diff --git a/curl/face/detect.sh b/curl/face/detect.sh
index f9de348a..5622c3fc 100644
--- a/curl/face/detect.sh
+++ b/curl/face/detect.sh
@@ -1,5 +1,5 @@
#
-curl -H "Ocp-Apim-Subscription-Key: TODO_INSERT_YOUR_FACE_SUBSCRIPTION_KEY_HERE" "TODO_INSERT_YOUR_FACE_ENDPOINT_HERE/face/v1.0/detect?detectionModel=detection_03&returnFaceId=true&returnFaceLandmarks=false" -H "Content-Type: application/json" --data-ascii "{\"url\":\"https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/Face/images/Family1-Dad1.jpg\"}"
+curl -v -X POST "https://{resource endpoint}/face/v1.0/detect?detectionModel=detection_03&recognitionModel=recognition_04&returnFaceId=true" -H "Content-Type: application/json" -H "Ocp-Apim-Subscription-Key: {subscription key}" --data-ascii "{\"url\":\"https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/Face/images/Family1-Dad1.jpg\"}"
#
#
@@ -17,7 +17,7 @@ https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-f
#
#
-curl -v -X POST "https:/{endpoint}/face/v1.0/findsimilars" -H "Content-Type: application/json" -H "Ocp-Apim-Subscription-Key: {subscription key}" --data-ascii "{body}"
+curl -v -X POST "https://{resource endpoint}/face/v1.0/findsimilars" -H "Content-Type: application/json" -H "Ocp-Apim-Subscription-Key: {subscription key}" --data-ascii "{body}"
#
#
diff --git a/dotnet/Face/Detect.cs b/dotnet/Face/Detect.cs
index 89be6009..0543bb9a 100644
--- a/dotnet/Face/Detect.cs
+++ b/dotnet/Face/Detect.cs
@@ -7,12 +7,12 @@ namespace FaceQuickstart
{
class Program
{
- static string SUBSCRIPTION_KEY = "PASTE_YOUR_FACE_SUBSCRIPTION_KEY_HERE";
- static string ENDPOINT = "PASTE_YOUR_FACE_ENDPOINT_HERE";
+ static string SubscriptionKey = "PASTE_YOUR_FACE_SUBSCRIPTION_KEY_HERE";
+ static string Endpoint = "PASTE_YOUR_FACE_ENDPOINT_HERE";
async static void Quickstart()
{
- FaceClient faceClient = new FaceClient(new Uri(ENDPOINT), new AzureKeyCredential(SUBSCRIPTION_KEY));
+ FaceClient faceClient = new FaceClient(new Uri(Endpoint), new AzureKeyCredential(SubscriptionKey));
var imageUrl = "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/ComputerVision/Images/faces.jpg";
diff --git a/dotnet/Face/FindSimilar.cs b/dotnet/Face/FindSimilar.cs
index 04c8c6dd..0adf1ffc 100644
--- a/dotnet/Face/FindSimilar.cs
+++ b/dotnet/Face/FindSimilar.cs
@@ -8,24 +8,19 @@ namespace FaceQuickstart
class Program
{
//
- const string IMAGE_BASE_URL = "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/Face/images/";
+ const string ImageBaseUrl = "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/Face/images/";
//
//
- static readonly string SUBSCRIPTION_KEY = Environment.GetEnvironmentVariable("FACE_APIKEY") ?? "";
- static readonly string ENDPOINT = Environment.GetEnvironmentVariable("FACE_ENDPOINT") ?? "";
+ static readonly string SubscriptionKey = Environment.GetEnvironmentVariable("FACE_APIKEY") ?? "";
+ static readonly string Endpoint = Environment.GetEnvironmentVariable("FACE_ENDPOINT") ?? "";
//
static void Main(string[] args)
{
-
- //
- FaceRecognitionModel RECOGNITION_MODEL4 = FaceRecognitionModel.Recognition04;
- //
-
//
- FaceClient client = Authenticate(ENDPOINT, SUBSCRIPTION_KEY);
- FindSimilar(client, IMAGE_BASE_URL, RECOGNITION_MODEL4).Wait();
+ FaceClient client = Authenticate(Endpoint, SubscriptionKey);
+ FindSimilar(client, ImageBaseUrl).Wait();
//
}
@@ -37,15 +32,15 @@ public static FaceClient Authenticate(string endpoint, string key)
//
//
- private static async Task> DetectFaceRecognize(FaceClient faceClient, string url, FaceRecognitionModel recognition_model)
+ private static async Task> DetectFaceRecognize(FaceClient faceClient, string url)
{
// Detect faces from image URL.
- Response> response = await faceClient.DetectAsync(new Uri(url), FaceDetectionModel.Detection03, recognition_model, returnFaceId: true, [FaceAttributeType.QualityForRecognition]);
+ var response = await faceClient.DetectAsync(new Uri(url), FaceDetectionModel.Detection03, FaceRecognitionModel.Recognition04, true, [FaceAttributeType.QualityForRecognition]);
IReadOnlyList detectedFaces = response.Value;
List sufficientQualityFaces = new List();
foreach (FaceDetectionResult detectedFace in detectedFaces)
{
- var faceQualityForRecognition = detectedFace.FaceAttributes.QualityForRecognition;
+ QualityForRecognition? faceQualityForRecognition = detectedFace.FaceAttributes.QualityForRecognition;
if (faceQualityForRecognition.HasValue && (faceQualityForRecognition.Value != QualityForRecognition.Low))
{
sufficientQualityFaces.Add(detectedFace);
@@ -57,7 +52,7 @@ private static async Task> DetectFaceRecognize(FaceCli
}
//
- public static async Task FindSimilar(FaceClient client, string base_url, FaceRecognitionModel recognition_model)
+ public static async Task FindSimilar(FaceClient client, string baseUrl)
{
//
Console.WriteLine("========FIND SIMILAR========");
@@ -76,27 +71,27 @@ public static async Task FindSimilar(FaceClient client, string base_url, FaceRec
};
string sourceImageFileName = "findsimilar.jpg";
- IList targetFaceIds = new List();
- foreach (var targetImageFileName in targetImageFileNames)
+ List targetFaceIds = new List();
+ foreach (string targetImageFileName in targetImageFileNames)
{
// Detect faces from target image url.
- var faces = await DetectFaceRecognize(client, $"{base_url}{targetImageFileName}", recognition_model);
+ List faces = await DetectFaceRecognize(client, $"{baseUrl}{targetImageFileName}");
// Add detected faceId to list of GUIDs.
targetFaceIds.Add(faces[0].FaceId.Value);
}
// Detect faces from source image url.
- IList detectedFaces = await DetectFaceRecognize(client, $"{base_url}{sourceImageFileName}", recognition_model);
+ List detectedFaces = await DetectFaceRecognize(client, $"{baseUrl}{sourceImageFileName}");
Console.WriteLine();
//
//
// Find a similar face(s) in the list of IDs. Comapring only the first in list for testing purposes.
- Response> response = await client.FindSimilarAsync(detectedFaces[0].FaceId.Value, targetFaceIds);
- IList similarResults = response.Value.ToList();
+ var response = await client.FindSimilarAsync(detectedFaces[0].FaceId.Value, targetFaceIds);
+ List similarResults = response.Value.ToList();
//
//
- foreach (var similarResult in similarResults)
+ foreach (FaceFindSimilarResult similarResult in similarResults)
{
Console.WriteLine($"Faces from {sourceImageFileName} & ID:{similarResult.FaceId} are similar with confidence: {similarResult.Confidence}.");
}
diff --git a/dotnet/Face/Quickstart.cs b/dotnet/Face/Quickstart.cs
index af27aba3..204f7781 100644
--- a/dotnet/Face/Quickstart.cs
+++ b/dotnet/Face/Quickstart.cs
@@ -1,24 +1,19 @@
//
-using System.Net.Http.Headers;
-using System.Text;
-
using Azure;
using Azure.AI.Vision.Face;
-using Newtonsoft.Json;
-using Newtonsoft.Json.Linq;
namespace FaceQuickstart
{
class Program
{
- static readonly string largePersonGroupId = Guid.NewGuid().ToString();
+ static readonly string LargePersonGroupId = Guid.NewGuid().ToString();
// URL path for the images.
- const string IMAGE_BASE_URL = "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/Face/images/";
+ const string ImageBaseUrl = "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/Face/images/";
// From your Face subscription in the Azure portal, get your subscription key and endpoint.
- static readonly string SUBSCRIPTION_KEY = Environment.GetEnvironmentVariable("FACE_APIKEY") ?? "";
- static readonly string ENDPOINT = Environment.GetEnvironmentVariable("FACE_ENDPOINT") ?? "";
+ static readonly string SubscriptionKey = Environment.GetEnvironmentVariable("FACE_APIKEY") ?? "";
+ static readonly string Endpoint = Environment.GetEnvironmentVariable("FACE_ENDPOINT") ?? "";
static void Main(string[] args)
{
@@ -27,13 +22,13 @@ static void Main(string[] args)
// on faces wearing masks compared with model 3,
// and its overall accuracy is improved compared
// with models 1 and 2.
- FaceRecognitionModel RECOGNITION_MODEL4 = FaceRecognitionModel.Recognition04;
+ FaceRecognitionModel RecognitionModel4 = FaceRecognitionModel.Recognition04;
// Authenticate.
- FaceClient client = Authenticate(ENDPOINT, SUBSCRIPTION_KEY);
+ FaceClient client = Authenticate(Endpoint, SubscriptionKey);
// Identify - recognize a face(s) in a large person group (a large person group is created in this example).
- IdentifyInLargePersonGroup(client, IMAGE_BASE_URL, RECOGNITION_MODEL4).Wait();
+ IdentifyInLargePersonGroup(client, ImageBaseUrl, RecognitionModel4).Wait();
Console.WriteLine("End of quickstart.");
}
@@ -54,15 +49,15 @@ public static FaceClient Authenticate(string endpoint, string key)
// Result faces with insufficient quality for recognition are filtered out.
// The field `faceId` in returned `DetectedFace`s will be used in Verify and Identify.
// It will expire 24 hours after the detection call.
- private static async Task> DetectFaceRecognize(FaceClient faceClient, string url, FaceRecognitionModel recognition_model)
+ private static async Task> DetectFaceRecognize(FaceClient faceClient, string url, FaceRecognitionModel recognitionModel)
{
// Detect faces from image URL.
- Response> response = await faceClient.DetectAsync(new Uri(url), FaceDetectionModel.Detection03, recognition_model, returnFaceId: true, [FaceAttributeType.QualityForRecognition]);
+ var response = await faceClient.DetectAsync(new Uri(url), FaceDetectionModel.Detection03, recognitionModel, true, [FaceAttributeType.QualityForRecognition]);
IReadOnlyList detectedFaces = response.Value;
List sufficientQualityFaces = new List();
foreach (FaceDetectionResult detectedFace in detectedFaces)
{
- var faceQualityForRecognition = detectedFace.FaceAttributes.QualityForRecognition;
+ QualityForRecognition? faceQualityForRecognition = detectedFace.FaceAttributes.QualityForRecognition;
if (faceQualityForRecognition.HasValue && (faceQualityForRecognition.Value != QualityForRecognition.Low))
{
sufficientQualityFaces.Add(detectedFace);
@@ -96,41 +91,28 @@ public static async Task IdentifyInLargePersonGroup(FaceClient client, string ur
string sourceImageFileName = "identification1.jpg";
// Create a large person group.
- Console.WriteLine($"Create a person group ({largePersonGroupId}).");
- HttpClient httpClient = new HttpClient();
- httpClient.DefaultRequestHeaders.Add("Ocp-Apim-Subscription-Key", SUBSCRIPTION_KEY);
- using (var content = new ByteArrayContent(Encoding.UTF8.GetBytes(JsonConvert.SerializeObject(new Dictionary { ["name"] = largePersonGroupId, ["recognitionModel"] = recognitionModel.ToString() }))))
- {
- content.Headers.ContentType = new MediaTypeHeaderValue("application/json");
- await httpClient.PutAsync($"{ENDPOINT}/face/v1.0/largepersongroups/{largePersonGroupId}", content);
- }
+ Console.WriteLine($"Create a person group ({LargePersonGroupId}).");
+ LargePersonGroupClient largePersonGroupClient = new FaceAdministrationClient(new Uri(Endpoint), new AzureKeyCredential(SubscriptionKey)).GetLargePersonGroupClient(LargePersonGroupId);
+ await largePersonGroupClient.CreateAsync(LargePersonGroupId, recognitionModel: recognitionModel);
// The similar faces will be grouped into a single large person group person.
- foreach (var groupedFace in personDictionary.Keys)
+ foreach (string groupedFace in personDictionary.Keys)
{
// Limit TPS
await Task.Delay(250);
- string? personId = null;
- using (var content = new ByteArrayContent(Encoding.UTF8.GetBytes(JsonConvert.SerializeObject(new Dictionary { ["name"] = groupedFace }))))
- {
- content.Headers.ContentType = new MediaTypeHeaderValue("application/json");
- using (var response = await httpClient.PostAsync($"{ENDPOINT}/face/v1.0/largepersongroups/{largePersonGroupId}/persons", content))
- {
- string contentString = await response.Content.ReadAsStringAsync();
- personId = (string?)(JsonConvert.DeserializeObject>(contentString)?["personId"]);
- }
- }
+ var createPersonResponse = await largePersonGroupClient.CreatePersonAsync(groupedFace);
+ Guid personId = createPersonResponse.Value.PersonId;
Console.WriteLine($"Create a person group person '{groupedFace}'.");
// Add face to the large person group person.
- foreach (var similarImage in personDictionary[groupedFace])
+ foreach (string similarImage in personDictionary[groupedFace])
{
Console.WriteLine($"Check whether image is of sufficient quality for recognition");
- Response> response = await client.DetectAsync(new Uri($"{url}{similarImage}"), FaceDetectionModel.Detection03, recognitionModel, returnFaceId: false, [FaceAttributeType.QualityForRecognition]);
- IReadOnlyList detectedFaces1 = response.Value;
+ var detectResponse = await client.DetectAsync(new Uri($"{url}{similarImage}"), FaceDetectionModel.Detection03, recognitionModel, false, [FaceAttributeType.QualityForRecognition]);
+ IReadOnlyList facesInImage = detectResponse.Value;
bool sufficientQuality = true;
- foreach (var face1 in detectedFaces1)
+ foreach (FaceDetectionResult face in facesInImage)
{
- var faceQualityForRecognition = face1.FaceAttributes.QualityForRecognition;
+ QualityForRecognition? faceQualityForRecognition = face.FaceAttributes.QualityForRecognition;
// Only "high" quality images are recommended for person enrollment
if (faceQualityForRecognition.HasValue && (faceQualityForRecognition.Value != QualityForRecognition.High))
{
@@ -144,39 +126,25 @@ public static async Task IdentifyInLargePersonGroup(FaceClient client, string ur
continue;
}
- if (detectedFaces1.Count != 1)
+ if (facesInImage.Count != 1)
{
continue;
}
// add face to the large person group
Console.WriteLine($"Add face to the person group person({groupedFace}) from image `{similarImage}`");
- using (var content = new ByteArrayContent(Encoding.UTF8.GetBytes(JsonConvert.SerializeObject(new Dictionary { ["url"] = $"{url}{similarImage}" }))))
- {
- content.Headers.ContentType = new MediaTypeHeaderValue("application/json");
- await httpClient.PostAsync($"{ENDPOINT}/face/v1.0/largepersongroups/{largePersonGroupId}/persons/{personId}/persistedfaces?detectionModel=detection_03", content);
- }
+ await largePersonGroupClient.AddFaceAsync(personId, new Uri($"{url}{similarImage}"), detectionModel: FaceDetectionModel.Detection03);
}
}
// Start to train the large person group.
Console.WriteLine();
- Console.WriteLine($"Train person group {largePersonGroupId}.");
- await httpClient.PostAsync($"{ENDPOINT}/face/v1.0/largepersongroups/{largePersonGroupId}/train", null);
+ Console.WriteLine($"Train person group {LargePersonGroupId}.");
+ Operation operation = await largePersonGroupClient.TrainAsync(WaitUntil.Completed);
// Wait until the training is completed.
- while (true)
- {
- await Task.Delay(1000);
- string? trainingStatus = null;
- using (var response = await httpClient.GetAsync($"{ENDPOINT}/face/v1.0/largepersongroups/{largePersonGroupId}/training"))
- {
- string contentString = await response.Content.ReadAsStringAsync();
- trainingStatus = (string?)(JsonConvert.DeserializeObject>(contentString)?["status"]);
- }
- Console.WriteLine($"Training status: {trainingStatus}.");
- if ("succeeded".Equals(trainingStatus)) { break; }
- }
+ await operation.WaitForCompletionResponseAsync();
+ Console.WriteLine("Training status: succeeded.");
Console.WriteLine();
Console.WriteLine("Pausing for 60 seconds to avoid triggering rate limit on free account...");
@@ -187,58 +155,35 @@ public static async Task IdentifyInLargePersonGroup(FaceClient client, string ur
List detectedFaces = await DetectFaceRecognize(client, $"{url}{sourceImageFileName}", recognitionModel);
// Add detected faceId to sourceFaceIds.
- foreach (var detectedFace in detectedFaces) { sourceFaceIds.Add(detectedFace.FaceId.Value); }
+ foreach (FaceDetectionResult detectedFace in detectedFaces) { sourceFaceIds.Add(detectedFace.FaceId.Value); }
// Identify the faces in a large person group.
- List> identifyResults = new List>();
- using (var content = new ByteArrayContent(Encoding.UTF8.GetBytes(JsonConvert.SerializeObject(new Dictionary { ["faceIds"] = sourceFaceIds, ["largePersonGroupId"] = largePersonGroupId }))))
- {
- content.Headers.ContentType = new MediaTypeHeaderValue("application/json");
- using (var response = await httpClient.PostAsync($"{ENDPOINT}/face/v1.0/identify", content))
- {
- string contentString = await response.Content.ReadAsStringAsync();
- identifyResults = JsonConvert.DeserializeObject>>(contentString) ?? [];
- }
- }
-
- foreach (var identifyResult in identifyResults)
+ var identifyResponse = await client.IdentifyFromLargePersonGroupAsync(sourceFaceIds, LargePersonGroupId);
+ IReadOnlyList identifyResults = identifyResponse.Value;
+ foreach (FaceIdentificationResult identifyResult in identifyResults)
{
- string faceId = (string)identifyResult["faceId"];
- List> candidates = JsonConvert.DeserializeObject>>(((JArray)identifyResult["candidates"]).ToString()) ?? [];
- if (candidates.Count == 0)
+ if (identifyResult.Candidates.Count == 0)
{
- Console.WriteLine($"No person is identified for the face in: {sourceImageFileName} - {faceId},");
+ Console.WriteLine($"No person is identified for the face in: {sourceImageFileName} - {identifyResult.FaceId},");
continue;
}
- string? personName = null;
- using (var response = await httpClient.GetAsync($"{ENDPOINT}/face/v1.0/largepersongroups/{largePersonGroupId}/persons/{candidates.First()["personId"]}"))
- {
- string contentString = await response.Content.ReadAsStringAsync();
- personName = (string?)(JsonConvert.DeserializeObject>(contentString)?["name"]);
- }
- Console.WriteLine($"Person '{personName}' is identified for the face in: {sourceImageFileName} - {faceId}," +
- $" confidence: {candidates.First()["confidence"]}.");
+ FaceIdentificationCandidate candidate = identifyResult.Candidates.First();
+ var getPersonResponse = await largePersonGroupClient.GetPersonAsync(candidate.PersonId);
+ string personName = getPersonResponse.Value.Name;
+ Console.WriteLine($"Person '{personName}' is identified for the face in: {sourceImageFileName} - {identifyResult.FaceId}," + $" confidence: {candidate.Confidence}.");
- Dictionary verifyResult = new Dictionary();
- using (var content = new ByteArrayContent(Encoding.UTF8.GetBytes(JsonConvert.SerializeObject(new Dictionary { ["faceId"] = faceId, ["personId"] = candidates.First()["personId"], ["largePersonGroupId"] = largePersonGroupId }))))
- {
- content.Headers.ContentType = new MediaTypeHeaderValue("application/json");
- using (var response = await httpClient.PostAsync($"{ENDPOINT}/face/v1.0/verify", content))
- {
- string contentString = await response.Content.ReadAsStringAsync();
- verifyResult = JsonConvert.DeserializeObject>(contentString) ?? [];
- }
- }
- Console.WriteLine($"Verification result: is a match? {verifyResult["isIdentical"]}. confidence: {verifyResult["confidence"]}");
+ var verifyResponse = await client.VerifyFromLargePersonGroupAsync(identifyResult.FaceId, LargePersonGroupId, candidate.PersonId);
+ FaceVerificationResult verifyResult = verifyResponse.Value;
+ Console.WriteLine($"Verification result: is a match? {verifyResult.IsIdentical}. confidence: {verifyResult.Confidence}");
}
Console.WriteLine();
// Delete large person group.
Console.WriteLine("========DELETE PERSON GROUP========");
Console.WriteLine();
- await httpClient.DeleteAsync($"{ENDPOINT}/face/v1.0/largepersongroups/{largePersonGroupId}");
- Console.WriteLine($"Deleted the person group {largePersonGroupId}.");
+ await largePersonGroupClient.DeleteAsync();
+ Console.WriteLine($"Deleted the person group {LargePersonGroupId}.");
Console.WriteLine();
}
}
diff --git a/java/Face/Quickstart.java b/java/Face/Quickstart.java
index 4be0a7ae..b1dfd8a5 100644
--- a/java/Face/Quickstart.java
+++ b/java/Face/Quickstart.java
@@ -8,27 +8,21 @@
import com.azure.ai.vision.face.FaceClient;
import com.azure.ai.vision.face.FaceClientBuilder;
+import com.azure.ai.vision.face.administration.FaceAdministrationClient;
+import com.azure.ai.vision.face.administration.FaceAdministrationClientBuilder;
+import com.azure.ai.vision.face.administration.LargePersonGroupClient;
import com.azure.ai.vision.face.models.DetectOptions;
import com.azure.ai.vision.face.models.FaceAttributeType;
import com.azure.ai.vision.face.models.FaceDetectionModel;
import com.azure.ai.vision.face.models.FaceDetectionResult;
+import com.azure.ai.vision.face.models.FaceIdentificationCandidate;
+import com.azure.ai.vision.face.models.FaceIdentificationResult;
import com.azure.ai.vision.face.models.FaceRecognitionModel;
+import com.azure.ai.vision.face.models.FaceTrainingResult;
+import com.azure.ai.vision.face.models.FaceVerificationResult;
import com.azure.ai.vision.face.models.QualityForRecognition;
import com.azure.core.credential.KeyCredential;
-import com.google.gson.Gson;
-import com.google.gson.reflect.TypeToken;
-
-import org.apache.http.HttpHeaders;
-import org.apache.http.client.HttpClient;
-import org.apache.http.client.methods.HttpDelete;
-import org.apache.http.client.methods.HttpGet;
-import org.apache.http.client.methods.HttpPost;
-import org.apache.http.client.methods.HttpPut;
-import org.apache.http.client.utils.URIBuilder;
-import org.apache.http.entity.StringEntity;
-import org.apache.http.impl.client.HttpClients;
-import org.apache.http.message.BasicHeader;
-import org.apache.http.util.EntityUtils;
+import com.azure.core.util.polling.SyncPoller;
public class Quickstart {
// LARGE_PERSON_GROUP_ID should be all lowercase and alphanumeric. For example, 'mygroupname' (dashes are OK).
@@ -105,49 +99,43 @@ public static void identifyInLargePersonGroup(FaceClient client, String url, Fac
// Create a large person group.
System.out.println("Create a person group (" + LARGE_PERSON_GROUP_ID + ").");
- List headers = Arrays.asList(new BasicHeader("Ocp-Apim-Subscription-Key", SUBSCRIPTION_KEY), new BasicHeader(HttpHeaders.CONTENT_TYPE, "application/json"));
- HttpClient httpClient = HttpClients.custom().setDefaultHeaders(headers).build();
- createLargePersonGroup(httpClient, recognitionModel);
+ FaceAdministrationClient faceAdministrationClient = new FaceAdministrationClientBuilder().endpoint(ENDPOINT).credential(new KeyCredential(SUBSCRIPTION_KEY)).buildClient();
+ LargePersonGroupClient largePersonGroupClient = faceAdministrationClient.getLargePersonGroupClient(LARGE_PERSON_GROUP_ID);
+ largePersonGroupClient.create(LARGE_PERSON_GROUP_ID, null, recognitionModel);
// The similar faces will be grouped into a single large person group person.
for (String groupedFace : personDictionary.keySet()) {
// Limit TPS
Thread.sleep(250);
- String personId = createLargePersonGroupPerson(httpClient, groupedFace);
+ String personId = largePersonGroupClient.createPerson(groupedFace).getPersonId();
System.out.println("Create a person group person '" + groupedFace + "'.");
// Add face to the large person group person.
for (String similarImage : personDictionary.get(groupedFace)) {
System.out.println("Check whether image is of sufficient quality for recognition");
DetectOptions options = new DetectOptions(FaceDetectionModel.DETECTION_03, recognitionModel, false).setReturnFaceAttributes(Arrays.asList(FaceAttributeType.QUALITY_FOR_RECOGNITION));
- List detectedFaces1 = client.detect(url + similarImage, options);
- if (detectedFaces1.stream().anyMatch(f -> f.getFaceAttributes().getQualityForRecognition() != QualityForRecognition.HIGH)) {
+ List facesInImage = client.detect(url + similarImage, options);
+ if (facesInImage.stream().anyMatch(f -> f.getFaceAttributes().getQualityForRecognition() != QualityForRecognition.HIGH)) {
continue;
}
- if (detectedFaces1.size() != 1) {
+ if (facesInImage.size() != 1) {
continue;
}
// add face to the large person group
System.out.println("Add face to the person group person(" + groupedFace + ") from image `" + similarImage + "`");
- addFaceToLargePersonGroup(httpClient, personId, url + similarImage);
+ largePersonGroupClient.addFace(personId, url + similarImage, null, FaceDetectionModel.DETECTION_03, null);
}
}
// Start to train the large person group.
System.out.println();
System.out.println("Train person group " + LARGE_PERSON_GROUP_ID + ".");
- trainLargePersonGroup(httpClient);
+ SyncPoller poller = largePersonGroupClient.beginTrain();
// Wait until the training is completed.
- while (true) {
- Thread.sleep(1000);
- String trainingStatus = getLargePersonGroupTrainingStatus(httpClient);
- System.out.println("Training status: " + trainingStatus + ".");
- if ("succeeded".equals(trainingStatus)) {
- break;
- }
- }
+ poller.waitForCompletion();
+ System.out.println("Training status: succeeded.");
System.out.println();
System.out.println("Pausing for 60 seconds to avoid triggering rate limit on free account...");
@@ -159,97 +147,29 @@ public static void identifyInLargePersonGroup(FaceClient client, String url, Fac
List sourceFaceIds = detectedFaces.stream().map(FaceDetectionResult::getFaceId).collect(Collectors.toList());
// Identify the faces in a large person group.
- List
\ No newline at end of file
diff --git a/java/Face/pom.xml b/java/Face/pom.xml
index fe7dfac5..c7adac77 100644
--- a/java/Face/pom.xml
+++ b/java/Face/pom.xml
@@ -10,19 +10,7 @@
com.azure
azure-ai-vision-face
- 1.0.0-beta.1
-
-
-
- org.apache.httpcomponents
- httpclient
- 4.5.13
-
-
-
- com.google.code.gson
- gson
- 2.11.0
+ 1.0.0-beta.2
\ No newline at end of file
diff --git a/python/Face/Quickstart.py b/python/Face/Quickstart.py
index 61c07522..ae3f82d5 100644
--- a/python/Face/Quickstart.py
+++ b/python/Face/Quickstart.py
@@ -2,16 +2,10 @@
import os
import time
import uuid
-import requests
from azure.core.credentials import AzureKeyCredential
-from azure.ai.vision.face import FaceClient
-from azure.ai.vision.face.models import (
- FaceAttributeTypeRecognition04,
- FaceDetectionModel,
- FaceRecognitionModel,
- QualityForRecognition,
-)
+from azure.ai.vision.face import FaceAdministrationClient, FaceClient
+from azure.ai.vision.face.models import FaceAttributeTypeRecognition04, FaceDetectionModel, FaceRecognitionModel, QualityForRecognition
# This key will serve all examples in this document.
@@ -24,156 +18,161 @@
# LARGE_PERSON_GROUP_ID should be all lowercase and alphanumeric. For example, 'mygroupname' (dashes are OK).
LARGE_PERSON_GROUP_ID = str(uuid.uuid4()) # assign a random ID (or name it anything)
-HEADERS = {"Ocp-Apim-Subscription-Key": KEY, "Content-Type": "application/json"}
-
# Create an authenticated FaceClient.
-with FaceClient(endpoint=ENDPOINT, credential=AzureKeyCredential(KEY)) as face_client:
+with FaceAdministrationClient(endpoint=ENDPOINT, credential=AzureKeyCredential(KEY)) as face_admin_client, \
+ FaceClient(endpoint=ENDPOINT, credential=AzureKeyCredential(KEY)) as face_client:
'''
Create the LargePersonGroup
'''
# Create empty Large Person Group. Large Person Group ID must be lower case, alphanumeric, and/or with '-', '_'.
print("Person group:", LARGE_PERSON_GROUP_ID)
- response = requests.put(
- ENDPOINT + f"/face/v1.0/largepersongroups/{LARGE_PERSON_GROUP_ID}",
- headers=HEADERS,
- json={"name": LARGE_PERSON_GROUP_ID, "recognitionModel": "recognition_04"})
- response.raise_for_status()
+ face_admin_client.large_person_group.create(
+ large_person_group_id=LARGE_PERSON_GROUP_ID,
+ name=LARGE_PERSON_GROUP_ID,
+ recognition_model=FaceRecognitionModel.RECOGNITION04,
+ )
# Define woman friend
- response = requests.post(ENDPOINT + f"/face/v1.0/largepersongroups/{LARGE_PERSON_GROUP_ID}/persons", headers=HEADERS, json={"name": "Woman"})
- response.raise_for_status()
- woman = response.json()
+ woman = face_admin_client.large_person_group.create_person(
+ large_person_group_id=LARGE_PERSON_GROUP_ID,
+ name="Woman",
+ )
# Define man friend
- response = requests.post(ENDPOINT + f"/face/v1.0/largepersongroups/{LARGE_PERSON_GROUP_ID}/persons", headers=HEADERS, json={"name": "Man"})
- response.raise_for_status()
- man = response.json()
+ man = face_admin_client.large_person_group.create_person(
+ large_person_group_id=LARGE_PERSON_GROUP_ID,
+ name="Man",
+ )
# Define child friend
- response = requests.post(ENDPOINT + f"/face/v1.0/largepersongroups/{LARGE_PERSON_GROUP_ID}/persons", headers=HEADERS, json={"name": "Child"})
- response.raise_for_status()
- child = response.json()
+ child = face_admin_client.large_person_group.create_person(
+ large_person_group_id=LARGE_PERSON_GROUP_ID,
+ name="Child",
+ )
'''
Detect faces and register them to each person
'''
# Find all jpeg images of friends in working directory (TBD pull from web instead)
woman_images = [
- "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/Face/images/Family1-Mom1.jpg", # noqa: E501
- "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/Face/images/Family1-Mom2.jpg", # noqa: E501
+ "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/Face/images/Family1-Mom1.jpg",
+ "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/Face/images/Family1-Mom2.jpg",
]
man_images = [
- "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/Face/images/Family1-Dad1.jpg", # noqa: E501
- "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/Face/images/Family1-Dad2.jpg", # noqa: E501
+ "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/Face/images/Family1-Dad1.jpg",
+ "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/Face/images/Family1-Dad2.jpg",
]
child_images = [
- "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/Face/images/Family1-Son1.jpg", # noqa: E501
- "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/Face/images/Family1-Son2.jpg", # noqa: E501
+ "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/Face/images/Family1-Son1.jpg",
+ "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/Face/images/Family1-Son2.jpg",
]
# Add to woman person
for image in woman_images:
# Check if the image is of sufficent quality for recognition.
- sufficientQuality = True
+ sufficient_quality = True
detected_faces = face_client.detect_from_url(
url=image,
- detection_model=FaceDetectionModel.DETECTION_03,
- recognition_model=FaceRecognitionModel.RECOGNITION_04,
+ detection_model=FaceDetectionModel.DETECTION03,
+ recognition_model=FaceRecognitionModel.RECOGNITION04,
return_face_id=True,
- return_face_attributes=[FaceAttributeTypeRecognition04.QUALITY_FOR_RECOGNITION])
+ return_face_attributes=[FaceAttributeTypeRecognition04.QUALITY_FOR_RECOGNITION],
+ )
for face in detected_faces:
if face.face_attributes.quality_for_recognition != QualityForRecognition.HIGH:
- sufficientQuality = False
+ sufficient_quality = False
break
- if not sufficientQuality:
+ if not sufficient_quality:
continue
if len(detected_faces) != 1:
continue
- response = requests.post(
- ENDPOINT + f"/face/v1.0/largepersongroups/{LARGE_PERSON_GROUP_ID}/persons/{woman['personId']}/persistedFaces",
- headers=HEADERS,
- json={"url": image})
- response.raise_for_status()
- print(f"face {face.face_id} added to person {woman['personId']}")
+ face_admin_client.large_person_group.add_face_from_url(
+ large_person_group_id=LARGE_PERSON_GROUP_ID,
+ person_id=woman.person_id,
+ url=image,
+ detection_model=FaceDetectionModel.DETECTION03,
+ )
+ print(f"face {face.face_id} added to person {woman.person_id}")
# Add to man person
for image in man_images:
# Check if the image is of sufficent quality for recognition.
- sufficientQuality = True
+ sufficient_quality = True
detected_faces = face_client.detect_from_url(
url=image,
- detection_model=FaceDetectionModel.DETECTION_03,
- recognition_model=FaceRecognitionModel.RECOGNITION_04,
+ detection_model=FaceDetectionModel.DETECTION03,
+ recognition_model=FaceRecognitionModel.RECOGNITION04,
return_face_id=True,
- return_face_attributes=[FaceAttributeTypeRecognition04.QUALITY_FOR_RECOGNITION])
+ return_face_attributes=[FaceAttributeTypeRecognition04.QUALITY_FOR_RECOGNITION],
+ )
for face in detected_faces:
if face.face_attributes.quality_for_recognition != QualityForRecognition.HIGH:
- sufficientQuality = False
+ sufficient_quality = False
break
- if not sufficientQuality:
+ if not sufficient_quality:
continue
if len(detected_faces) != 1:
continue
- response = requests.post(
- ENDPOINT + f"/face/v1.0/largepersongroups/{LARGE_PERSON_GROUP_ID}/persons/{man['personId']}/persistedFaces",
- headers=HEADERS,
- json={"url": image})
- response.raise_for_status()
- print(f"face {face.face_id} added to person {man['personId']}")
+ face_admin_client.large_person_group.add_face_from_url(
+ large_person_group_id=LARGE_PERSON_GROUP_ID,
+ person_id=man.person_id,
+ url=image,
+ detection_model=FaceDetectionModel.DETECTION03,
+ )
+ print(f"face {face.face_id} added to person {man.person_id}")
# Add to child person
for image in child_images:
# Check if the image is of sufficent quality for recognition.
- sufficientQuality = True
+ sufficient_quality = True
detected_faces = face_client.detect_from_url(
url=image,
- detection_model=FaceDetectionModel.DETECTION_03,
- recognition_model=FaceRecognitionModel.RECOGNITION_04,
+ detection_model=FaceDetectionModel.DETECTION03,
+ recognition_model=FaceRecognitionModel.RECOGNITION04,
return_face_id=True,
- return_face_attributes=[FaceAttributeTypeRecognition04.QUALITY_FOR_RECOGNITION])
+ return_face_attributes=[FaceAttributeTypeRecognition04.QUALITY_FOR_RECOGNITION],
+ )
for face in detected_faces:
if face.face_attributes.quality_for_recognition != QualityForRecognition.HIGH:
- sufficientQuality = False
+ sufficient_quality = False
break
- if not sufficientQuality:
+ if not sufficient_quality:
continue
if len(detected_faces) != 1:
continue
- response = requests.post(
- ENDPOINT + f"/face/v1.0/largepersongroups/{LARGE_PERSON_GROUP_ID}/persons/{child['personId']}/persistedFaces",
- headers=HEADERS,
- json={"url": image})
- response.raise_for_status()
- print(f"face {face.face_id} added to person {child['personId']}")
+ face_admin_client.large_person_group.add_face_from_url(
+ large_person_group_id=LARGE_PERSON_GROUP_ID,
+ person_id=child.person_id,
+ url=image,
+ detection_model=FaceDetectionModel.DETECTION03,
+ )
+ print(f"face {face.face_id} added to person {child.person_id}")
'''
Train LargePersonGroup
'''
- # Train the large person group
+ # Train the large person group and set the polling interval to 5s
print(f"Train the person group {LARGE_PERSON_GROUP_ID}")
- response = requests.post(ENDPOINT + f"/face/v1.0/largepersongroups/{LARGE_PERSON_GROUP_ID}/train", headers=HEADERS)
- response.raise_for_status()
-
- while (True):
- response = requests.get(ENDPOINT + f"/face/v1.0/largepersongroups/{LARGE_PERSON_GROUP_ID}/training", headers=HEADERS)
- response.raise_for_status()
- training_status = response.json()["status"]
- if training_status == "succeeded":
- break
+ poller = face_admin_client.large_person_group.begin_train(
+ large_person_group_id=LARGE_PERSON_GROUP_ID,
+ polling_interval=5,
+ )
+
+ poller.wait()
print(f"The person group {LARGE_PERSON_GROUP_ID} is trained successfully.")
'''
Identify a face against a defined LargePersonGroup
'''
# Group image for testing against
- test_image = "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/Face/images/identification1.jpg" # noqa: E501
+ test_image = "https://raw.githubusercontent.com/Azure-Samples/cognitive-services-sample-data-files/master/Face/images/identification1.jpg"
print("Pausing for 60 seconds to avoid triggering rate limit on free account...")
time.sleep(60)
@@ -184,46 +183,41 @@
# recognition attribute.
faces = face_client.detect_from_url(
url=test_image,
- detection_model=FaceDetectionModel.DETECTION_03,
- recognition_model=FaceRecognitionModel.RECOGNITION_04,
+ detection_model=FaceDetectionModel.DETECTION03,
+ recognition_model=FaceRecognitionModel.RECOGNITION04,
return_face_id=True,
- return_face_attributes=[FaceAttributeTypeRecognition04.QUALITY_FOR_RECOGNITION])
+ return_face_attributes=[FaceAttributeTypeRecognition04.QUALITY_FOR_RECOGNITION],
+ )
for face in faces:
# Only take the face if it is of sufficient quality.
if face.face_attributes.quality_for_recognition != QualityForRecognition.LOW:
face_ids.append(face.face_id)
# Identify faces
- response = requests.post(
- ENDPOINT + f"/face/v1.0/identify",
- headers=HEADERS,
- json={"faceIds": face_ids, "largePersonGroupId": LARGE_PERSON_GROUP_ID})
- response.raise_for_status()
- results = response.json()
+ identify_results = face_client.identify_from_large_person_group(
+ face_ids=face_ids,
+ large_person_group_id=LARGE_PERSON_GROUP_ID,
+ )
print("Identifying faces in image")
- if not results:
- print("No person identified in the person group")
- for identifiedFace in results:
- if len(identifiedFace["candidates"]) > 0:
- print(f"Person is identified for face ID {identifiedFace['faceId']} in image, with a confidence of "
- f"{identifiedFace['candidates'][0]['confidence']}.") # Get topmost confidence score
+ for identify_result in identify_results:
+ if identify_result.candidates:
+ print(f"Person is identified for face ID {identify_result.face_id} in image, with a confidence of "
+ f"{identify_result.candidates[0].confidence}.") # Get topmost confidence score
# Verify faces
- response = requests.post(
- ENDPOINT + f"/face/v1.0/verify",
- headers=HEADERS,
- json={"faceId": identifiedFace["faceId"], "personId": identifiedFace["candidates"][0]["personId"], "largePersonGroupId": LARGE_PERSON_GROUP_ID})
- response.raise_for_status()
- verify_result = response.json()
- print(f"verification result: {verify_result['isIdentical']}. confidence: {verify_result['confidence']}")
+ verify_result = face_client.verify_from_large_person_group(
+ face_id=identify_result.face_id,
+ large_person_group_id=LARGE_PERSON_GROUP_ID,
+ person_id=identify_result.candidates[0].person_id,
+ )
+ print(f"verification result: {verify_result.is_identical}. confidence: {verify_result.confidence}")
else:
- print(f"No person identified for face ID {identifiedFace['faceId']} in image.")
+ print(f"No person identified for face ID {identify_result.face_id} in image.")
print()
# Delete the large person group
- response = requests.delete(ENDPOINT + f"/face/v1.0/largepersongroups/{LARGE_PERSON_GROUP_ID}", headers=HEADERS)
- response.raise_for_status()
+ face_admin_client.large_person_group.delete(LARGE_PERSON_GROUP_ID)
print(f"The person group {LARGE_PERSON_GROUP_ID} is deleted.")
print()