Add FaceLandmarker iOS API
PiperOrigin-RevId: 537424705
This commit is contained in:
parent
5f50ac371f
commit
ace56b502a
|
@ -53,6 +53,7 @@ CALCULATORS_AND_GRAPHS = [
|
||||||
"//mediapipe/tasks/cc/text/text_classifier:text_classifier_graph",
|
"//mediapipe/tasks/cc/text/text_classifier:text_classifier_graph",
|
||||||
"//mediapipe/tasks/cc/text/text_embedder:text_embedder_graph",
|
"//mediapipe/tasks/cc/text/text_embedder:text_embedder_graph",
|
||||||
"//mediapipe/tasks/cc/vision/face_detector:face_detector_graph",
|
"//mediapipe/tasks/cc/vision/face_detector:face_detector_graph",
|
||||||
|
"//mediapipe/tasks/cc/vision/face_landmarker:face_landmarker_graph",
|
||||||
"//mediapipe/tasks/cc/vision/image_classifier:image_classifier_graph",
|
"//mediapipe/tasks/cc/vision/image_classifier:image_classifier_graph",
|
||||||
"//mediapipe/tasks/cc/vision/object_detector:object_detector_graph",
|
"//mediapipe/tasks/cc/vision/object_detector:object_detector_graph",
|
||||||
]
|
]
|
||||||
|
@ -80,6 +81,9 @@ strip_api_include_path_prefix(
|
||||||
"//mediapipe/tasks/ios/vision/face_detector:sources/MPPFaceDetector.h",
|
"//mediapipe/tasks/ios/vision/face_detector:sources/MPPFaceDetector.h",
|
||||||
"//mediapipe/tasks/ios/vision/face_detector:sources/MPPFaceDetectorOptions.h",
|
"//mediapipe/tasks/ios/vision/face_detector:sources/MPPFaceDetectorOptions.h",
|
||||||
"//mediapipe/tasks/ios/vision/face_detector:sources/MPPFaceDetectorResult.h",
|
"//mediapipe/tasks/ios/vision/face_detector:sources/MPPFaceDetectorResult.h",
|
||||||
|
"//mediapipe/tasks/ios/vision/face_landmarker:sources/MPPFaceLandmarker.h",
|
||||||
|
"//mediapipe/tasks/ios/vision/face_landmarker:sources/MPPFaceLandmarkerOptions.h",
|
||||||
|
"//mediapipe/tasks/ios/vision/face_landmarker:sources/MPPFaceLandmarkerResult.h",
|
||||||
"//mediapipe/tasks/ios/vision/image_classifier:sources/MPPImageClassifier.h",
|
"//mediapipe/tasks/ios/vision/image_classifier:sources/MPPImageClassifier.h",
|
||||||
"//mediapipe/tasks/ios/vision/image_classifier:sources/MPPImageClassifierOptions.h",
|
"//mediapipe/tasks/ios/vision/image_classifier:sources/MPPImageClassifierOptions.h",
|
||||||
"//mediapipe/tasks/ios/vision/image_classifier:sources/MPPImageClassifierResult.h",
|
"//mediapipe/tasks/ios/vision/image_classifier:sources/MPPImageClassifierResult.h",
|
||||||
|
@ -164,6 +168,9 @@ apple_static_xcframework(
|
||||||
":MPPFaceDetector.h",
|
":MPPFaceDetector.h",
|
||||||
":MPPFaceDetectorOptions.h",
|
":MPPFaceDetectorOptions.h",
|
||||||
":MPPFaceDetectorResult.h",
|
":MPPFaceDetectorResult.h",
|
||||||
|
":MPPFaceLandmarker.h",
|
||||||
|
":MPPFaceLandmarkerOptions.h",
|
||||||
|
":MPPFaceLandmarkerResult.h",
|
||||||
":MPPImageClassifier.h",
|
":MPPImageClassifier.h",
|
||||||
":MPPImageClassifierOptions.h",
|
":MPPImageClassifierOptions.h",
|
||||||
":MPPImageClassifierResult.h",
|
":MPPImageClassifierResult.h",
|
||||||
|
@ -173,6 +180,7 @@ apple_static_xcframework(
|
||||||
],
|
],
|
||||||
deps = [
|
deps = [
|
||||||
"//mediapipe/tasks/ios/vision/face_detector:MPPFaceDetector",
|
"//mediapipe/tasks/ios/vision/face_detector:MPPFaceDetector",
|
||||||
|
"//mediapipe/tasks/ios/vision/face_landmarker:MPPFaceLandmarker",
|
||||||
"//mediapipe/tasks/ios/vision/image_classifier:MPPImageClassifier",
|
"//mediapipe/tasks/ios/vision/image_classifier:MPPImageClassifier",
|
||||||
"//mediapipe/tasks/ios/vision/object_detector:MPPObjectDetector",
|
"//mediapipe/tasks/ios/vision/object_detector:MPPObjectDetector",
|
||||||
],
|
],
|
||||||
|
|
71
mediapipe/tasks/ios/test/vision/face_landmarker/BUILD
Normal file
71
mediapipe/tasks/ios/test/vision/face_landmarker/BUILD
Normal file
|
@ -0,0 +1,71 @@
|
||||||
|
load("@build_bazel_rules_apple//apple:ios.bzl", "ios_unit_test")
|
||||||
|
load(
|
||||||
|
"//mediapipe/framework/tool:ios.bzl",
|
||||||
|
"MPP_TASK_MINIMUM_OS_VERSION",
|
||||||
|
)
|
||||||
|
load(
|
||||||
|
"@org_tensorflow//tensorflow/lite:special_rules.bzl",
|
||||||
|
"tflite_ios_lab_runner",
|
||||||
|
)
|
||||||
|
|
||||||
|
package(default_visibility = ["//mediapipe/tasks:internal"])
|
||||||
|
|
||||||
|
licenses(["notice"])
|
||||||
|
|
||||||
|
# Default tags for filtering iOS targets. Targets are restricted to Apple platforms.
|
||||||
|
TFL_DEFAULT_TAGS = [
|
||||||
|
"apple",
|
||||||
|
]
|
||||||
|
|
||||||
|
# Following sanitizer tests are not supported by iOS test targets.
|
||||||
|
TFL_DISABLED_SANITIZER_TAGS = [
|
||||||
|
"noasan",
|
||||||
|
"nomsan",
|
||||||
|
"notsan",
|
||||||
|
]
|
||||||
|
|
||||||
|
objc_library(
|
||||||
|
name = "MPPFaceLandmarkerObjcTestLibrary",
|
||||||
|
testonly = 1,
|
||||||
|
srcs = ["MPPFaceLandmarkerTests.mm"],
|
||||||
|
copts = [
|
||||||
|
"-ObjC++",
|
||||||
|
"-std=c++17",
|
||||||
|
"-x objective-c++",
|
||||||
|
],
|
||||||
|
data = [
|
||||||
|
"//mediapipe/tasks/testdata/vision:test_images",
|
||||||
|
"//mediapipe/tasks/testdata/vision:test_models",
|
||||||
|
"//mediapipe/tasks/testdata/vision:test_protos",
|
||||||
|
],
|
||||||
|
deps = [
|
||||||
|
"//mediapipe/framework/formats:classification_cc_proto",
|
||||||
|
"//mediapipe/framework/formats:landmark_cc_proto",
|
||||||
|
"//mediapipe/framework/formats:matrix_data_cc_proto",
|
||||||
|
"//mediapipe/tasks/cc/vision/face_geometry/proto:face_geometry_cc_proto",
|
||||||
|
"//mediapipe/tasks/ios/common:MPPCommon",
|
||||||
|
"//mediapipe/tasks/ios/components/containers/utils:MPPClassificationResultHelpers",
|
||||||
|
"//mediapipe/tasks/ios/components/containers/utils:MPPDetectionHelpers",
|
||||||
|
"//mediapipe/tasks/ios/components/containers/utils:MPPLandmarkHelpers",
|
||||||
|
"//mediapipe/tasks/ios/test/vision/utils:MPPImageTestUtils",
|
||||||
|
"//mediapipe/tasks/ios/test/vision/utils:parse_proto_utils",
|
||||||
|
"//mediapipe/tasks/ios/vision/face_landmarker:MPPFaceLandmarker",
|
||||||
|
"//mediapipe/tasks/ios/vision/face_landmarker:MPPFaceLandmarkerResult",
|
||||||
|
"//third_party/apple_frameworks:UIKit",
|
||||||
|
] + select({
|
||||||
|
"//third_party:opencv_ios_sim_arm64_source_build": ["@ios_opencv_source//:opencv_xcframework"],
|
||||||
|
"//third_party:opencv_ios_arm64_source_build": ["@ios_opencv_source//:opencv_xcframework"],
|
||||||
|
"//third_party:opencv_ios_x86_64_source_build": ["@ios_opencv_source//:opencv_xcframework"],
|
||||||
|
"//conditions:default": ["@ios_opencv//:OpencvFramework"],
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
|
||||||
|
ios_unit_test(
|
||||||
|
name = "MPPFaceLandmarkerObjcTest",
|
||||||
|
minimum_os_version = MPP_TASK_MINIMUM_OS_VERSION,
|
||||||
|
runner = tflite_ios_lab_runner("IOS_LATEST"),
|
||||||
|
tags = TFL_DEFAULT_TAGS + TFL_DISABLED_SANITIZER_TAGS,
|
||||||
|
deps = [
|
||||||
|
":MPPFaceLandmarkerObjcTestLibrary",
|
||||||
|
],
|
||||||
|
)
|
|
@ -0,0 +1,351 @@
|
||||||
|
// Copyright 2023 The MediaPipe Authors.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#import <Foundation/Foundation.h>
|
||||||
|
#import <UIKit/UIKit.h>
|
||||||
|
#import <XCTest/XCTest.h>
|
||||||
|
|
||||||
|
#include "mediapipe/framework/formats/classification.pb.h"
|
||||||
|
#include "mediapipe/framework/formats/landmark.pb.h"
|
||||||
|
#include "mediapipe/framework/formats/matrix_data.pb.h"
|
||||||
|
#include "mediapipe/tasks/cc/vision/face_geometry/proto/face_geometry.pb.h"
|
||||||
|
#import "mediapipe/tasks/ios/common/sources/MPPCommon.h"
|
||||||
|
#import "mediapipe/tasks/ios/components/containers/utils/sources/MPPClassificationResult+Helpers.h"
|
||||||
|
#import "mediapipe/tasks/ios/components/containers/utils/sources/MPPDetection+Helpers.h"
|
||||||
|
#import "mediapipe/tasks/ios/components/containers/utils/sources/MPPLandmark+Helpers.h"
|
||||||
|
#import "mediapipe/tasks/ios/test/vision/utils/sources/MPPImage+TestUtils.h"
|
||||||
|
#include "mediapipe/tasks/ios/test/vision/utils/sources/parse_proto_utils.h"
|
||||||
|
#import "mediapipe/tasks/ios/vision/face_landmarker/sources/MPPFaceLandmarker.h"
|
||||||
|
#import "mediapipe/tasks/ios/vision/face_landmarker/sources/MPPFaceLandmarkerResult.h"
|
||||||
|
|
||||||
|
using NormalizedLandmarkListProto = ::mediapipe::NormalizedLandmarkList;
|
||||||
|
using ClassificationListProto = ::mediapipe::ClassificationList;
|
||||||
|
using FaceGeometryProto = ::mediapipe::tasks::vision::face_geometry::proto::FaceGeometry;
|
||||||
|
using ::mediapipe::tasks::ios::test::vision::utils::get_proto_from_pbtxt;
|
||||||
|
|
||||||
|
static NSString *const kPbFileExtension = @"pbtxt";
|
||||||
|
|
||||||
|
typedef NSDictionary<NSString *, NSString *> ResourceFileInfo;
|
||||||
|
|
||||||
|
static ResourceFileInfo *const kPortraitImage =
|
||||||
|
@{@"name" : @"portrait", @"type" : @"jpg", @"orientation" : @(UIImageOrientationUp)};
|
||||||
|
static ResourceFileInfo *const kPortraitRotatedImage =
|
||||||
|
@{@"name" : @"portrait_rotated", @"type" : @"jpg", @"orientation" : @(UIImageOrientationRight)};
|
||||||
|
static ResourceFileInfo *const kCatImage = @{@"name" : @"cat", @"type" : @"jpg"};
|
||||||
|
static ResourceFileInfo *const kPortraitExpectedLandmarksName =
|
||||||
|
@{@"name" : @"portrait_expected_face_landmarks", @"type" : kPbFileExtension};
|
||||||
|
static ResourceFileInfo *const kPortraitExpectedBlendshapesName =
|
||||||
|
@{@"name" : @"portrait_expected_blendshapes", @"type" : kPbFileExtension};
|
||||||
|
static ResourceFileInfo *const kPortraitExpectedGeometryName =
|
||||||
|
@{@"name" : @"portrait_expected_face_geometry", @"type" : kPbFileExtension};
|
||||||
|
static NSString *const kFaceLandmarkerModelName = @"face_landmarker_v2";
|
||||||
|
static NSString *const kFaceLandmarkerWithBlendshapesModelName =
|
||||||
|
@"face_landmarker_v2_with_blendshapes";
|
||||||
|
static NSString *const kExpectedErrorDomain = @"com.google.mediapipe.tasks";
|
||||||
|
|
||||||
|
constexpr float kLandmarkErrorThreshold = 0.03f;
|
||||||
|
constexpr float kBlendshapesErrorThreshold = 0.1f;
|
||||||
|
constexpr float kFacialTransformationMatrixErrorThreshold = 0.2f;
|
||||||
|
|
||||||
|
#define AssertEqualErrors(error, expectedError) \
|
||||||
|
XCTAssertNotNil(error); \
|
||||||
|
XCTAssertEqualObjects(error.domain, expectedError.domain); \
|
||||||
|
XCTAssertEqual(error.code, expectedError.code); \
|
||||||
|
XCTAssertEqualObjects(error.localizedDescription, expectedError.localizedDescription)
|
||||||
|
|
||||||
|
@interface MPPFaceLandmarkerTests : XCTestCase {
|
||||||
|
}
|
||||||
|
@end
|
||||||
|
|
||||||
|
@implementation MPPFaceLandmarkerTests
|
||||||
|
|
||||||
|
#pragma mark General Tests
|
||||||
|
|
||||||
|
- (void)testCreateFaceLandmarkerWithMissingModelPathFails {
|
||||||
|
NSString *modelPath = [MPPFaceLandmarkerTests filePathWithName:@"" extension:@""];
|
||||||
|
|
||||||
|
NSError *error = nil;
|
||||||
|
MPPFaceLandmarker *faceLandmarker = [[MPPFaceLandmarker alloc] initWithModelPath:modelPath
|
||||||
|
error:&error];
|
||||||
|
XCTAssertNil(faceLandmarker);
|
||||||
|
|
||||||
|
NSError *expectedError = [NSError
|
||||||
|
errorWithDomain:kExpectedErrorDomain
|
||||||
|
code:MPPTasksErrorCodeInvalidArgumentError
|
||||||
|
userInfo:@{
|
||||||
|
NSLocalizedDescriptionKey :
|
||||||
|
@"INVALID_ARGUMENT: ExternalFile must specify at least one of 'file_content', "
|
||||||
|
@"'file_name', 'file_pointer_meta' or 'file_descriptor_meta'."
|
||||||
|
}];
|
||||||
|
AssertEqualErrors(error, expectedError);
|
||||||
|
}
|
||||||
|
|
||||||
|
#pragma mark Image Mode Tests
|
||||||
|
|
||||||
|
- (void)testDetectWithImageModeAndPotraitSucceeds {
|
||||||
|
NSString *modelPath = [MPPFaceLandmarkerTests filePathWithName:kFaceLandmarkerModelName
|
||||||
|
extension:@"task"];
|
||||||
|
MPPFaceLandmarker *faceLandmarker = [[MPPFaceLandmarker alloc] initWithModelPath:modelPath
|
||||||
|
error:nil];
|
||||||
|
NSArray<MPPNormalizedLandmark *> *expectedLandmarks =
|
||||||
|
[MPPFaceLandmarkerTests expectedLandmarksFromFileInfo:kPortraitExpectedLandmarksName];
|
||||||
|
[self assertResultsOfDetectInImageWithFileInfo:kPortraitImage
|
||||||
|
usingFaceLandmarker:faceLandmarker
|
||||||
|
containsExpectedLandmarks:expectedLandmarks
|
||||||
|
expectedBlendshapes:NULL
|
||||||
|
expectedTransformationMatrix:NULL];
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void)testDetectWithImageModeAndPotraitAndFacialTransformationMatrixesSucceeds {
|
||||||
|
MPPFaceLandmarkerOptions *options =
|
||||||
|
[self faceLandmarkerOptionsWithModelName:kFaceLandmarkerModelName];
|
||||||
|
options.outputFacialTransformationMatrixes = YES;
|
||||||
|
MPPFaceLandmarker *faceLandmarker = [[MPPFaceLandmarker alloc] initWithOptions:options error:nil];
|
||||||
|
|
||||||
|
NSArray<MPPNormalizedLandmark *> *expectedLandmarks =
|
||||||
|
[MPPFaceLandmarkerTests expectedLandmarksFromFileInfo:kPortraitExpectedLandmarksName];
|
||||||
|
MPPTransformMatrix *expectedTransformationMatrix = [MPPFaceLandmarkerTests
|
||||||
|
expectedTransformationMatrixFromFileInfo:kPortraitExpectedGeometryName];
|
||||||
|
[self assertResultsOfDetectInImageWithFileInfo:kPortraitImage
|
||||||
|
usingFaceLandmarker:faceLandmarker
|
||||||
|
containsExpectedLandmarks:expectedLandmarks
|
||||||
|
expectedBlendshapes:NULL
|
||||||
|
expectedTransformationMatrix:expectedTransformationMatrix];
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void)testDetectWithImageModeAndNoFaceSucceeds {
|
||||||
|
NSString *modelPath = [MPPFaceLandmarkerTests filePathWithName:kFaceLandmarkerModelName
|
||||||
|
extension:@"task"];
|
||||||
|
MPPFaceLandmarker *faceLandmarker = [[MPPFaceLandmarker alloc] initWithModelPath:modelPath
|
||||||
|
error:nil];
|
||||||
|
XCTAssertNotNil(faceLandmarker);
|
||||||
|
|
||||||
|
NSError *error;
|
||||||
|
MPPImage *mppImage = [self imageWithFileInfo:kCatImage];
|
||||||
|
MPPFaceLandmarkerResult *faceLandmarkerResult = [faceLandmarker detectInImage:mppImage
|
||||||
|
error:&error];
|
||||||
|
XCTAssertNil(error);
|
||||||
|
XCTAssertNotNil(faceLandmarkerResult);
|
||||||
|
XCTAssertEqualObjects(faceLandmarkerResult.faceLandmarks, [NSArray array]);
|
||||||
|
XCTAssertEqualObjects(faceLandmarkerResult.faceBlendshapes, [NSArray array]);
|
||||||
|
XCTAssertEqualObjects(faceLandmarkerResult.facialTransformationMatrixes, [NSArray array]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#pragma mark Video Mode Tests
|
||||||
|
|
||||||
|
- (void)testDetectWithVideoModeAndPotraitSucceeds {
|
||||||
|
MPPFaceLandmarkerOptions *options =
|
||||||
|
[self faceLandmarkerOptionsWithModelName:kFaceLandmarkerModelName];
|
||||||
|
options.runningMode = MPPRunningModeVideo;
|
||||||
|
MPPFaceLandmarker *faceLandmarker = [[MPPFaceLandmarker alloc] initWithOptions:options error:nil];
|
||||||
|
|
||||||
|
MPPImage *image = [self imageWithFileInfo:kPortraitImage];
|
||||||
|
NSArray<MPPNormalizedLandmark *> *expectedLandmarks =
|
||||||
|
[MPPFaceLandmarkerTests expectedLandmarksFromFileInfo:kPortraitExpectedLandmarksName];
|
||||||
|
for (int i = 0; i < 3; i++) {
|
||||||
|
MPPFaceLandmarkerResult *faceLandmarkerResult = [faceLandmarker detectInVideoFrame:image
|
||||||
|
timestampInMilliseconds:i
|
||||||
|
error:nil];
|
||||||
|
[self assertFaceLandmarkerResult:faceLandmarkerResult
|
||||||
|
containsExpectedLandmarks:expectedLandmarks
|
||||||
|
expectedBlendshapes:NULL
|
||||||
|
expectedTransformationMatrix:NULL];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#pragma mark Running Mode Tests
|
||||||
|
|
||||||
|
- (void)testDetectFailsWithCallingWrongAPIInImageMode {
|
||||||
|
MPPFaceLandmarkerOptions *options =
|
||||||
|
[self faceLandmarkerOptionsWithModelName:kFaceLandmarkerModelName];
|
||||||
|
MPPFaceLandmarker *faceLandmarker = [[MPPFaceLandmarker alloc] initWithOptions:options error:nil];
|
||||||
|
|
||||||
|
MPPImage *image = [self imageWithFileInfo:kPortraitImage];
|
||||||
|
NSError *videoAPICallError;
|
||||||
|
XCTAssertFalse([faceLandmarker detectInVideoFrame:image
|
||||||
|
timestampInMilliseconds:0
|
||||||
|
error:&videoAPICallError]);
|
||||||
|
|
||||||
|
NSError *expectedVideoAPICallError =
|
||||||
|
[NSError errorWithDomain:kExpectedErrorDomain
|
||||||
|
code:MPPTasksErrorCodeInvalidArgumentError
|
||||||
|
userInfo:@{
|
||||||
|
NSLocalizedDescriptionKey : @"The vision task is not initialized with "
|
||||||
|
@"video mode. Current Running Mode: Image"
|
||||||
|
}];
|
||||||
|
AssertEqualErrors(videoAPICallError, expectedVideoAPICallError);
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void)testDetectFailsWithCallingWrongAPIInVideoMode {
|
||||||
|
MPPFaceLandmarkerOptions *options =
|
||||||
|
[self faceLandmarkerOptionsWithModelName:kFaceLandmarkerModelName];
|
||||||
|
options.runningMode = MPPRunningModeVideo;
|
||||||
|
|
||||||
|
MPPFaceLandmarker *faceLandmarker = [[MPPFaceLandmarker alloc] initWithOptions:options error:nil];
|
||||||
|
|
||||||
|
MPPImage *image = [self imageWithFileInfo:kPortraitImage];
|
||||||
|
NSError *imageAPICallError;
|
||||||
|
XCTAssertFalse([faceLandmarker detectInImage:image error:&imageAPICallError]);
|
||||||
|
|
||||||
|
NSError *expectedImageAPICallError =
|
||||||
|
[NSError errorWithDomain:kExpectedErrorDomain
|
||||||
|
code:MPPTasksErrorCodeInvalidArgumentError
|
||||||
|
userInfo:@{
|
||||||
|
NSLocalizedDescriptionKey : @"The vision task is not initialized with "
|
||||||
|
@"image mode. Current Running Mode: Video"
|
||||||
|
}];
|
||||||
|
AssertEqualErrors(imageAPICallError, expectedImageAPICallError);
|
||||||
|
}
|
||||||
|
|
||||||
|
+ (NSString *)filePathWithName:(NSString *)fileName extension:(NSString *)extension {
|
||||||
|
NSString *filePath =
|
||||||
|
[[NSBundle bundleForClass:[MPPFaceLandmarkerTests class]] pathForResource:fileName
|
||||||
|
ofType:extension];
|
||||||
|
return filePath;
|
||||||
|
}
|
||||||
|
|
||||||
|
+ (NSArray<MPPNormalizedLandmark *> *)expectedLandmarksFromFileInfo:(NSDictionary *)fileInfo {
|
||||||
|
NSString *filePath = [self filePathWithName:fileInfo[@"name"] extension:fileInfo[@"type"]];
|
||||||
|
NormalizedLandmarkListProto proto;
|
||||||
|
if (!get_proto_from_pbtxt([filePath UTF8String], proto).ok()) {
|
||||||
|
return nil;
|
||||||
|
}
|
||||||
|
NSMutableArray<MPPNormalizedLandmark *> *landmarks =
|
||||||
|
[NSMutableArray arrayWithCapacity:(NSUInteger)proto.landmark_size()];
|
||||||
|
for (const auto &landmarkProto : proto.landmark()) {
|
||||||
|
[landmarks addObject:[MPPNormalizedLandmark normalizedLandmarkWithProto:landmarkProto]];
|
||||||
|
}
|
||||||
|
return landmarks;
|
||||||
|
}
|
||||||
|
|
||||||
|
+ (MPPClassifications *)expectedBlendshapesFromFileInfo:(NSDictionary *)fileInfo {
|
||||||
|
NSString *filePath = [self filePathWithName:fileInfo[@"name"] extension:fileInfo[@"type"]];
|
||||||
|
ClassificationListProto proto;
|
||||||
|
if (!get_proto_from_pbtxt([filePath UTF8String], proto).ok()) {
|
||||||
|
return nil;
|
||||||
|
}
|
||||||
|
return [MPPClassifications classificationsWithClassificationListProto:proto
|
||||||
|
headIndex:0
|
||||||
|
headName:[NSString string]];
|
||||||
|
}
|
||||||
|
|
||||||
|
+ (MPPTransformMatrix *)expectedTransformationMatrixFromFileInfo:(NSDictionary *)fileInfo {
|
||||||
|
NSString *filePath = [self filePathWithName:fileInfo[@"name"] extension:fileInfo[@"type"]];
|
||||||
|
FaceGeometryProto proto;
|
||||||
|
if (!get_proto_from_pbtxt([filePath UTF8String], proto).ok()) {
|
||||||
|
return nil;
|
||||||
|
}
|
||||||
|
return [[MPPTransformMatrix alloc] initWithData:proto.pose_transform_matrix().packed_data().data()
|
||||||
|
rows:proto.pose_transform_matrix().rows()
|
||||||
|
columns:proto.pose_transform_matrix().cols()];
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void)assertFaceLandmarkerResult:(MPPFaceLandmarkerResult *)faceLandmarkerResult
|
||||||
|
containsExpectedLandmarks:(NSArray<MPPNormalizedLandmark *> *)expectedLandmarks
|
||||||
|
expectedBlendshapes:(nullable MPPClassifications *)expectedBlendshapes
|
||||||
|
expectedTransformationMatrix:(nullable MPPTransformMatrix *)expectedTransformationMatrix {
|
||||||
|
NSArray<MPPNormalizedLandmark *> *landmarks = faceLandmarkerResult.faceLandmarks[0];
|
||||||
|
XCTAssertEqual(landmarks.count, expectedLandmarks.count);
|
||||||
|
for (int i = 0; i < landmarks.count; ++i) {
|
||||||
|
XCTAssertEqualWithAccuracy(landmarks[i].x, expectedLandmarks[i].x, kLandmarkErrorThreshold,
|
||||||
|
@"index i = %d", i);
|
||||||
|
XCTAssertEqualWithAccuracy(landmarks[i].y, expectedLandmarks[i].y, kLandmarkErrorThreshold,
|
||||||
|
@"index i = %d", i);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (expectedBlendshapes == NULL) {
|
||||||
|
XCTAssertEqualObjects(faceLandmarkerResult.faceBlendshapes, [NSArray array]);
|
||||||
|
} else {
|
||||||
|
MPPClassifications *blendshapes = faceLandmarkerResult.faceBlendshapes[0];
|
||||||
|
NSArray<MPPCategory *> *actualCategories = blendshapes.categories;
|
||||||
|
NSArray<MPPCategory *> *expectedCategories = expectedBlendshapes.categories;
|
||||||
|
XCTAssertEqual(actualCategories.count, expectedCategories.count);
|
||||||
|
for (int i = 0; i < actualCategories.count; ++i) {
|
||||||
|
XCTAssertEqual(actualCategories[i].index, expectedCategories[i].index, @"index i = %d", i);
|
||||||
|
XCTAssertEqualWithAccuracy(actualCategories[i].score, expectedCategories[i].score,
|
||||||
|
kBlendshapesErrorThreshold, @"index i = %d", i);
|
||||||
|
XCTAssertEqualObjects(actualCategories[i].categoryName, expectedCategories[i].categoryName,
|
||||||
|
@"index i = %d", i);
|
||||||
|
XCTAssertEqualObjects(actualCategories[i].displayName, expectedCategories[i].displayName,
|
||||||
|
@"index i = %d", i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (expectedTransformationMatrix == NULL) {
|
||||||
|
XCTAssertEqualObjects(faceLandmarkerResult.facialTransformationMatrixes, [NSArray array]);
|
||||||
|
} else {
|
||||||
|
MPPTransformMatrix *actualTransformationMatrix =
|
||||||
|
faceLandmarkerResult.facialTransformationMatrixes[0];
|
||||||
|
XCTAssertEqual(actualTransformationMatrix.rows, expectedTransformationMatrix.rows);
|
||||||
|
XCTAssertEqual(actualTransformationMatrix.columns, expectedTransformationMatrix.columns);
|
||||||
|
for (int i = 0; i < actualTransformationMatrix.rows * actualTransformationMatrix.columns; ++i) {
|
||||||
|
XCTAssertEqualWithAccuracy(actualTransformationMatrix.data[i],
|
||||||
|
expectedTransformationMatrix.data[i],
|
||||||
|
kFacialTransformationMatrixErrorThreshold, @"index i = %d", i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#pragma mark Face Landmarker Initializers
|
||||||
|
|
||||||
|
- (MPPFaceLandmarkerOptions *)faceLandmarkerOptionsWithModelName:(NSString *)modelName {
|
||||||
|
NSString *modelPath = [MPPFaceLandmarkerTests filePathWithName:modelName extension:@"task"];
|
||||||
|
MPPFaceLandmarkerOptions *faceLandmarkerOptions = [[MPPFaceLandmarkerOptions alloc] init];
|
||||||
|
faceLandmarkerOptions.baseOptions.modelAssetPath = modelPath;
|
||||||
|
return faceLandmarkerOptions;
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void)assertCreateFaceLandmarkerWithOptions:(MPPFaceLandmarkerOptions *)faceLandmarkerOptions
|
||||||
|
failsWithExpectedError:(NSError *)expectedError {
|
||||||
|
NSError *error = nil;
|
||||||
|
MPPFaceLandmarker *faceLandmarker =
|
||||||
|
[[MPPFaceLandmarker alloc] initWithOptions:faceLandmarkerOptions error:&error];
|
||||||
|
XCTAssertNil(faceLandmarker);
|
||||||
|
AssertEqualErrors(error, expectedError);
|
||||||
|
}
|
||||||
|
|
||||||
|
#pragma mark Assert Detection Results
|
||||||
|
|
||||||
|
- (MPPImage *)imageWithFileInfo:(ResourceFileInfo *)fileInfo {
|
||||||
|
UIImageOrientation orientation = (UIImageOrientation)[fileInfo[@"orientation"] intValue];
|
||||||
|
MPPImage *image = [MPPImage imageFromBundleWithClass:[MPPFaceLandmarkerTests class]
|
||||||
|
fileName:fileInfo[@"name"]
|
||||||
|
ofType:fileInfo[@"type"]
|
||||||
|
orientation:orientation];
|
||||||
|
XCTAssertNotNil(image);
|
||||||
|
return image;
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void)assertResultsOfDetectInImageWithFileInfo:(ResourceFileInfo *)fileInfo
|
||||||
|
usingFaceLandmarker:(MPPFaceLandmarker *)faceLandmarker
|
||||||
|
containsExpectedLandmarks:
|
||||||
|
(NSArray<MPPNormalizedLandmark *> *)expectedLandmarks
|
||||||
|
expectedBlendshapes:(nullable MPPClassifications *)expectedBlendshapes
|
||||||
|
expectedTransformationMatrix:
|
||||||
|
(nullable MPPTransformMatrix *)expectedTransformationMatrix {
|
||||||
|
MPPImage *mppImage = [self imageWithFileInfo:fileInfo];
|
||||||
|
|
||||||
|
NSError *error;
|
||||||
|
MPPFaceLandmarkerResult *faceLandmarkerResult = [faceLandmarker detectInImage:mppImage
|
||||||
|
error:&error];
|
||||||
|
XCTAssertNil(error);
|
||||||
|
XCTAssertNotNil(faceLandmarkerResult);
|
||||||
|
|
||||||
|
[self assertFaceLandmarkerResult:faceLandmarkerResult
|
||||||
|
containsExpectedLandmarks:expectedLandmarks
|
||||||
|
expectedBlendshapes:expectedBlendshapes
|
||||||
|
expectedTransformationMatrix:expectedTransformationMatrix];
|
||||||
|
}
|
||||||
|
|
||||||
|
@end
|
|
@ -37,7 +37,32 @@ objc_library(
|
||||||
srcs = ["sources/MPPFaceLandmarkerOptions.m"],
|
srcs = ["sources/MPPFaceLandmarkerOptions.m"],
|
||||||
hdrs = ["sources/MPPFaceLandmarkerOptions.h"],
|
hdrs = ["sources/MPPFaceLandmarkerOptions.h"],
|
||||||
deps = [
|
deps = [
|
||||||
|
":MPPFaceLandmarkerResult",
|
||||||
"//mediapipe/tasks/ios/core:MPPTaskOptions",
|
"//mediapipe/tasks/ios/core:MPPTaskOptions",
|
||||||
"//mediapipe/tasks/ios/vision/core:MPPRunningMode",
|
"//mediapipe/tasks/ios/vision/core:MPPRunningMode",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
objc_library(
|
||||||
|
name = "MPPFaceLandmarker",
|
||||||
|
srcs = ["sources/MPPFaceLandmarker.m"],
|
||||||
|
hdrs = ["sources/MPPFaceLandmarker.h"],
|
||||||
|
copts = [
|
||||||
|
"-ObjC++",
|
||||||
|
"-std=c++17",
|
||||||
|
"-x objective-c++",
|
||||||
|
],
|
||||||
|
deps = [
|
||||||
|
":MPPFaceLandmarkerOptions",
|
||||||
|
":MPPFaceLandmarkerResult",
|
||||||
|
"//mediapipe/tasks/cc/vision/face_landmarker:face_landmarker_graph",
|
||||||
|
"//mediapipe/tasks/ios/common/utils:MPPCommonUtils",
|
||||||
|
"//mediapipe/tasks/ios/common/utils:NSStringHelpers",
|
||||||
|
"//mediapipe/tasks/ios/core:MPPTaskInfo",
|
||||||
|
"//mediapipe/tasks/ios/vision/core:MPPImage",
|
||||||
|
"//mediapipe/tasks/ios/vision/core:MPPVisionPacketCreator",
|
||||||
|
"//mediapipe/tasks/ios/vision/core:MPPVisionTaskRunner",
|
||||||
|
"//mediapipe/tasks/ios/vision/face_landmarker/utils:MPPFaceLandmarkerOptionsHelpers",
|
||||||
|
"//mediapipe/tasks/ios/vision/face_landmarker/utils:MPPFaceLandmarkerResultHelpers",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
|
@ -0,0 +1,116 @@
|
||||||
|
// Copyright 2023 The MediaPipe Authors.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#import <Foundation/Foundation.h>
|
||||||
|
|
||||||
|
#import "mediapipe/tasks/ios/vision/core/sources/MPPImage.h"
|
||||||
|
#import "mediapipe/tasks/ios/vision/face_landmarker/sources/MPPFaceLandmarkerOptions.h"
|
||||||
|
#import "mediapipe/tasks/ios/vision/face_landmarker/sources/MPPFaceLandmarkerResult.h"
|
||||||
|
|
||||||
|
NS_ASSUME_NONNULL_BEGIN
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Class that performs face landmark detection on images.
|
||||||
|
*
|
||||||
|
* The API expects a TFLite model with mandatory TFLite Model Metadata.
|
||||||
|
*/
|
||||||
|
NS_SWIFT_NAME(FaceLandmarker)
|
||||||
|
@interface MPPFaceLandmarker : NSObject
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new instance of `MPPFaceLandmarker` from an absolute path to a TensorFlow Lite model
|
||||||
|
* file stored locally on the device and the default `MPPFaceLandmarker`.
|
||||||
|
*
|
||||||
|
* @param modelPath An absolute path to a TensorFlow Lite model file stored locally on the device.
|
||||||
|
* @param error An optional error parameter populated when there is an error in initializing the
|
||||||
|
* face landmaker.
|
||||||
|
*
|
||||||
|
* @return A new instance of `MPPFaceLandmarker` with the given model path. `nil` if there is an
|
||||||
|
* error in initializing the face landmaker.
|
||||||
|
*/
|
||||||
|
- (nullable instancetype)initWithModelPath:(NSString *)modelPath error:(NSError **)error;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new instance of `MPPFaceLandmarker` from the given `MPPFaceLandmarkerOptions`.
|
||||||
|
*
|
||||||
|
* @param options The options of type `MPPFaceLandmarkerOptions` to use for configuring the
|
||||||
|
* `MPPFaceLandmarker`.
|
||||||
|
* @param error An optional error parameter populated when there is an error in initializing the
|
||||||
|
* face landmaker.
|
||||||
|
*
|
||||||
|
* @return A new instance of `MPPFaceLandmarker` with the given options. `nil` if there is an error
|
||||||
|
* in initializing the face landmaker.
|
||||||
|
*/
|
||||||
|
- (nullable instancetype)initWithOptions:(MPPFaceLandmarkerOptions *)options
|
||||||
|
error:(NSError **)error NS_DESIGNATED_INITIALIZER;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Performs face landmark detection on the provided MPPImage using the whole image as region of
|
||||||
|
* interest. Rotation will be applied according to the `orientation` property of the provided
|
||||||
|
* `MPPImage`. Only use this method when the `MPPFaceLandmarker` is created with
|
||||||
|
* `MPPRunningModeImage`.
|
||||||
|
*
|
||||||
|
* This method supports RGBA images. If your `MPPImage` has a source type of
|
||||||
|
* `MPPImageSourceTypePixelBuffer` or `MPPImageSourceTypeSampleBuffer`, the underlying pixel buffer
|
||||||
|
* must have one of the following pixel format types:
|
||||||
|
* 1. kCVPixelFormatType_32BGRA
|
||||||
|
* 2. kCVPixelFormatType_32RGBA
|
||||||
|
*
|
||||||
|
* If your `MPPImage` has a source type of `MPPImageSourceTypeImage` ensure that the color space is
|
||||||
|
* RGB with an Alpha channel.
|
||||||
|
*
|
||||||
|
* @param image The `MPPImage` on which face landmark detection is to be performed.
|
||||||
|
* @param error An optional error parameter populated when there is an error in performing face
|
||||||
|
* landmark detection on the input image.
|
||||||
|
*
|
||||||
|
* @return An `MPPFaceLandmarkerResult` that contains a list of landmarks.
|
||||||
|
*/
|
||||||
|
- (nullable MPPFaceLandmarkerResult *)detectInImage:(MPPImage *)image
|
||||||
|
error:(NSError **)error NS_SWIFT_NAME(detect(image:));
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Performs face landmark detection on the provided video frame of type `MPPImage` using the whole
|
||||||
|
* image as region of interest. Rotation will be applied according to the `orientation` property of
|
||||||
|
* the provided `MPPImage`. Only use this method when the `MPPFaceLandmarker` is created with
|
||||||
|
* `MPPRunningModeVideo`.
|
||||||
|
*
|
||||||
|
* This method supports RGBA images. If your `MPPImage` has a source type of
|
||||||
|
* `MPPImageSourceTypePixelBuffer` or `MPPImageSourceTypeSampleBuffer`, the underlying pixel buffer
|
||||||
|
* must have one of the following pixel format types:
|
||||||
|
* 1. kCVPixelFormatType_32BGRA
|
||||||
|
* 2. kCVPixelFormatType_32RGBA
|
||||||
|
*
|
||||||
|
* If your `MPPImage` has a source type of `MPPImageSourceTypeImage` ensure that the color space is
|
||||||
|
* RGB with an Alpha channel.
|
||||||
|
*
|
||||||
|
* @param image The `MPPImage` on which face landmark detection is to be performed.
|
||||||
|
* @param timestampInMilliseconds The video frame's timestamp (in milliseconds). The input
|
||||||
|
* timestamps must be monotonically increasing.
|
||||||
|
* @param error An optional error parameter populated when there is an error in performing face
|
||||||
|
* landmark detection on the input image.
|
||||||
|
*
|
||||||
|
* @return An `MPPFaceLandmarkerResult` that contains a list of landmarks.
|
||||||
|
*/
|
||||||
|
- (nullable MPPFaceLandmarkerResult *)detectInVideoFrame:(MPPImage *)image
|
||||||
|
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
||||||
|
error:(NSError **)error
|
||||||
|
NS_SWIFT_NAME(detect(videoFrame:timestampInMilliseconds:));
|
||||||
|
|
||||||
|
- (instancetype)init NS_UNAVAILABLE;
|
||||||
|
|
||||||
|
+ (instancetype)new NS_UNAVAILABLE;
|
||||||
|
|
||||||
|
@end
|
||||||
|
|
||||||
|
NS_ASSUME_NONNULL_END
|
|
@ -0,0 +1,203 @@
|
||||||
|
// Copyright 2023 The MediaPipe Authors.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#import "mediapipe/tasks/ios/vision/face_landmarker/sources/MPPFaceLandmarker.h"
|
||||||
|
#import <Foundation/Foundation.h>
|
||||||
|
|
||||||
|
#import "mediapipe/tasks/ios/common/utils/sources/MPPCommonUtils.h"
|
||||||
|
#import "mediapipe/tasks/ios/common/utils/sources/NSString+Helpers.h"
|
||||||
|
#import "mediapipe/tasks/ios/core/sources/MPPTaskInfo.h"
|
||||||
|
#import "mediapipe/tasks/ios/vision/core/sources/MPPVisionPacketCreator.h"
|
||||||
|
#import "mediapipe/tasks/ios/vision/core/sources/MPPVisionTaskRunner.h"
|
||||||
|
#import "mediapipe/tasks/ios/vision/face_landmarker/utils/sources/MPPFaceLandmarkerOptions+Helpers.h"
|
||||||
|
#import "mediapipe/tasks/ios/vision/face_landmarker/utils/sources/MPPFaceLandmarkerResult+Helpers.h"
|
||||||
|
|
||||||
|
using ::mediapipe::NormalizedRect;
|
||||||
|
using ::mediapipe::Packet;
|
||||||
|
using ::mediapipe::tasks::core::PacketMap;
|
||||||
|
using ::mediapipe::tasks::core::PacketsCallback;
|
||||||
|
|
||||||
|
// Constants for the underlying MP Tasks Graph. See
|
||||||
|
// https://github.com/google/mediapipe/tree/master/mediapipe/tasks/cc/vision/face_landmarker/face_landmarker_graph.cc
|
||||||
|
static NSString *const kLandmarksOutStreamName = @"landmarks_out";
|
||||||
|
static NSString *const kLandmarksOutTag = @"NORM_LANDMARKS";
|
||||||
|
static NSString *const kBlendshapesOutStreamName = @"blendshapes_out";
|
||||||
|
static NSString *const kBlendshapesOutTag = @"BLENDSHAPES";
|
||||||
|
static NSString *const kFaceGeometryOutStreamName = @"face_geometry_out";
|
||||||
|
static NSString *const kFaceGeometryOutTag = @"FACE_GEOMETRY";
|
||||||
|
static NSString *const kNormRectStreamName = @"norm_rect_in";
|
||||||
|
static NSString *const kNormRectTag = @"NORM_RECT";
|
||||||
|
static NSString *const kImageInStreamName = @"image_in";
|
||||||
|
static NSString *const kImageOutStreamName = @"image_out";
|
||||||
|
static NSString *const kImageTag = @"IMAGE";
|
||||||
|
static NSString *const kTaskGraphName =
|
||||||
|
@"mediapipe.tasks.vision.face_landmarker.FaceLandmarkerGraph";
|
||||||
|
static NSString *const kTaskName = @"faceLandmarker";
|
||||||
|
|
||||||
|
#define InputPacketMap(imagePacket, normalizedRectPacket) \
|
||||||
|
{ \
|
||||||
|
{kImageInStreamName.cppString, imagePacket}, { \
|
||||||
|
kNormRectStreamName.cppString, normalizedRectPacket \
|
||||||
|
} \
|
||||||
|
}
|
||||||
|
|
||||||
|
@interface MPPFaceLandmarker () {
|
||||||
|
/** iOS Vision Task Runner */
|
||||||
|
MPPVisionTaskRunner *_visionTaskRunner;
|
||||||
|
}
|
||||||
|
|
||||||
|
@end
|
||||||
|
|
||||||
|
@implementation MPPFaceLandmarker
|
||||||
|
|
||||||
|
- (instancetype)initWithOptions:(MPPFaceLandmarkerOptions *)options error:(NSError **)error {
|
||||||
|
self = [super init];
|
||||||
|
if (self) {
|
||||||
|
NSArray<NSString *> *inputStreams = @[
|
||||||
|
[NSString stringWithFormat:@"%@:%@", kImageTag, kImageInStreamName],
|
||||||
|
[NSString stringWithFormat:@"%@:%@", kNormRectTag, kNormRectStreamName]
|
||||||
|
];
|
||||||
|
|
||||||
|
NSMutableArray<NSString *> *outputStreams = [NSMutableArray
|
||||||
|
arrayWithObjects:[NSString
|
||||||
|
stringWithFormat:@"%@:%@", kLandmarksOutTag, kLandmarksOutStreamName],
|
||||||
|
[NSString stringWithFormat:@"%@:%@", kImageTag, kImageOutStreamName], nil];
|
||||||
|
if (options.outputFaceBlendshapes) {
|
||||||
|
[outputStreams addObject:[NSString stringWithFormat:@"%@:%@", kBlendshapesOutTag,
|
||||||
|
kBlendshapesOutStreamName]];
|
||||||
|
}
|
||||||
|
if (options.outputFacialTransformationMatrixes) {
|
||||||
|
[outputStreams addObject:[NSString stringWithFormat:@"%@:%@", kFaceGeometryOutTag,
|
||||||
|
kFaceGeometryOutStreamName]];
|
||||||
|
}
|
||||||
|
|
||||||
|
MPPTaskInfo *taskInfo =
|
||||||
|
[[MPPTaskInfo alloc] initWithTaskGraphName:kTaskGraphName
|
||||||
|
inputStreams:inputStreams
|
||||||
|
outputStreams:outputStreams
|
||||||
|
taskOptions:options
|
||||||
|
enableFlowLimiting:options.runningMode == MPPRunningModeLiveStream
|
||||||
|
error:error];
|
||||||
|
|
||||||
|
if (!taskInfo) {
|
||||||
|
return nil;
|
||||||
|
}
|
||||||
|
|
||||||
|
_visionTaskRunner =
|
||||||
|
[[MPPVisionTaskRunner alloc] initWithCalculatorGraphConfig:[taskInfo generateGraphConfig]
|
||||||
|
runningMode:options.runningMode
|
||||||
|
packetsCallback:nullptr
|
||||||
|
error:error];
|
||||||
|
|
||||||
|
if (!_visionTaskRunner) {
|
||||||
|
return nil;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return self;
|
||||||
|
}
|
||||||
|
|
||||||
|
- (instancetype)initWithModelPath:(NSString *)modelPath error:(NSError **)error {
|
||||||
|
MPPFaceLandmarkerOptions *options = [[MPPFaceLandmarkerOptions alloc] init];
|
||||||
|
options.baseOptions.modelAssetPath = modelPath;
|
||||||
|
return [self initWithOptions:options error:error];
|
||||||
|
}
|
||||||
|
|
||||||
|
- (std::optional<PacketMap>)inputPacketMapWithMPPImage:(MPPImage *)image
|
||||||
|
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
||||||
|
error:(NSError **)error {
|
||||||
|
std::optional<NormalizedRect> rect =
|
||||||
|
[_visionTaskRunner normalizedRectWithImageOrientation:image.orientation
|
||||||
|
imageSize:CGSizeMake(image.width, image.height)
|
||||||
|
error:error];
|
||||||
|
if (!rect.has_value()) {
|
||||||
|
return std::nullopt;
|
||||||
|
}
|
||||||
|
|
||||||
|
Packet imagePacket = [MPPVisionPacketCreator createPacketWithMPPImage:image
|
||||||
|
timestampInMilliseconds:timestampInMilliseconds
|
||||||
|
error:error];
|
||||||
|
if (imagePacket.IsEmpty()) {
|
||||||
|
return std::nullopt;
|
||||||
|
}
|
||||||
|
|
||||||
|
Packet normalizedRectPacket =
|
||||||
|
[MPPVisionPacketCreator createPacketWithNormalizedRect:rect.value()
|
||||||
|
timestampInMilliseconds:timestampInMilliseconds];
|
||||||
|
|
||||||
|
PacketMap inputPacketMap = InputPacketMap(imagePacket, normalizedRectPacket);
|
||||||
|
return inputPacketMap;
|
||||||
|
}
|
||||||
|
|
||||||
|
- (nullable MPPFaceLandmarkerResult *)detectInImage:(MPPImage *)image error:(NSError **)error {
|
||||||
|
std::optional<NormalizedRect> rect =
|
||||||
|
[_visionTaskRunner normalizedRectWithImageOrientation:image.orientation
|
||||||
|
imageSize:CGSizeMake(image.width, image.height)
|
||||||
|
error:error];
|
||||||
|
if (!rect.has_value()) {
|
||||||
|
return nil;
|
||||||
|
}
|
||||||
|
|
||||||
|
Packet imagePacket = [MPPVisionPacketCreator createPacketWithMPPImage:image error:error];
|
||||||
|
if (imagePacket.IsEmpty()) {
|
||||||
|
return nil;
|
||||||
|
}
|
||||||
|
|
||||||
|
Packet normalizedRectPacket =
|
||||||
|
[MPPVisionPacketCreator createPacketWithNormalizedRect:rect.value()];
|
||||||
|
|
||||||
|
PacketMap inputPacketMap = InputPacketMap(imagePacket, normalizedRectPacket);
|
||||||
|
|
||||||
|
std::optional<PacketMap> outputPacketMap = [_visionTaskRunner processImagePacketMap:inputPacketMap
|
||||||
|
error:error];
|
||||||
|
if (!outputPacketMap.has_value()) {
|
||||||
|
return nil;
|
||||||
|
}
|
||||||
|
|
||||||
|
return [MPPFaceLandmarkerResult
|
||||||
|
faceLandmarkerResultWithLandmarksPacket:outputPacketMap
|
||||||
|
.value()[kLandmarksOutStreamName.cppString]
|
||||||
|
blendshapesPacket:outputPacketMap
|
||||||
|
.value()[kBlendshapesOutStreamName.cppString]
|
||||||
|
transformationMatrixesPacket:outputPacketMap
|
||||||
|
.value()[kFaceGeometryOutStreamName.cppString]];
|
||||||
|
}
|
||||||
|
|
||||||
|
- (nullable MPPFaceLandmarkerResult *)detectInVideoFrame:(MPPImage *)image
|
||||||
|
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
||||||
|
error:(NSError **)error {
|
||||||
|
std::optional<PacketMap> inputPacketMap = [self inputPacketMapWithMPPImage:image
|
||||||
|
timestampInMilliseconds:timestampInMilliseconds
|
||||||
|
error:error];
|
||||||
|
if (!inputPacketMap.has_value()) {
|
||||||
|
return nil;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::optional<PacketMap> outputPacketMap =
|
||||||
|
[_visionTaskRunner processVideoFramePacketMap:inputPacketMap.value() error:error];
|
||||||
|
|
||||||
|
if (!outputPacketMap.has_value()) {
|
||||||
|
return nil;
|
||||||
|
}
|
||||||
|
|
||||||
|
return [MPPFaceLandmarkerResult
|
||||||
|
faceLandmarkerResultWithLandmarksPacket:outputPacketMap
|
||||||
|
.value()[kLandmarksOutStreamName.cppString]
|
||||||
|
blendshapesPacket:outputPacketMap
|
||||||
|
.value()[kBlendshapesOutStreamName.cppString]
|
||||||
|
transformationMatrixesPacket:outputPacketMap
|
||||||
|
.value()[kFaceGeometryOutStreamName.cppString]];
|
||||||
|
}
|
||||||
|
|
||||||
|
@end
|
|
@ -16,6 +16,7 @@
|
||||||
|
|
||||||
#import "mediapipe/tasks/ios/core/sources/MPPTaskOptions.h"
|
#import "mediapipe/tasks/ios/core/sources/MPPTaskOptions.h"
|
||||||
#import "mediapipe/tasks/ios/vision/core/sources/MPPRunningMode.h"
|
#import "mediapipe/tasks/ios/vision/core/sources/MPPRunningMode.h"
|
||||||
|
#import "mediapipe/tasks/ios/vision/face_landmarker/sources/MPPFaceLandmarkerResult.h"
|
||||||
|
|
||||||
NS_ASSUME_NONNULL_BEGIN
|
NS_ASSUME_NONNULL_BEGIN
|
||||||
|
|
||||||
|
@ -59,6 +60,13 @@ NS_SWIFT_NAME(FaceLandmarkerOptions)
|
||||||
*/
|
*/
|
||||||
@property(nonatomic) BOOL outputFaceBlendshapes;
|
@property(nonatomic) BOOL outputFaceBlendshapes;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Whether FaceLandmarker outputs facial transformation_matrix. Facial transformation matrix is used
|
||||||
|
* to transform the face landmarks in canonical face to the detected face, so that users can apply
|
||||||
|
* face effects on the detected landmarks.
|
||||||
|
*/
|
||||||
|
@property(nonatomic) BOOL outputFacialTransformationMatrixes;
|
||||||
|
|
||||||
@end
|
@end
|
||||||
|
|
||||||
NS_ASSUME_NONNULL_END
|
NS_ASSUME_NONNULL_END
|
||||||
|
|
|
@ -24,6 +24,7 @@
|
||||||
_minFacePresenceConfidence = 0.5f;
|
_minFacePresenceConfidence = 0.5f;
|
||||||
_minTrackingConfidence = 0.5f;
|
_minTrackingConfidence = 0.5f;
|
||||||
_outputFaceBlendshapes = NO;
|
_outputFaceBlendshapes = NO;
|
||||||
|
_outputFacialTransformationMatrixes = NO;
|
||||||
}
|
}
|
||||||
return self;
|
return self;
|
||||||
}
|
}
|
||||||
|
@ -36,6 +37,8 @@
|
||||||
faceLandmarkerOptions.minFacePresenceConfidence = self.minFacePresenceConfidence;
|
faceLandmarkerOptions.minFacePresenceConfidence = self.minFacePresenceConfidence;
|
||||||
faceLandmarkerOptions.minTrackingConfidence = self.minTrackingConfidence;
|
faceLandmarkerOptions.minTrackingConfidence = self.minTrackingConfidence;
|
||||||
faceLandmarkerOptions.outputFaceBlendshapes = self.outputFaceBlendshapes;
|
faceLandmarkerOptions.outputFaceBlendshapes = self.outputFaceBlendshapes;
|
||||||
|
faceLandmarkerOptions.outputFacialTransformationMatrixes =
|
||||||
|
self.outputFacialTransformationMatrixes;
|
||||||
|
|
||||||
return faceLandmarkerOptions;
|
return faceLandmarkerOptions;
|
||||||
}
|
}
|
||||||
|
|
Loading…
Reference in New Issue
Block a user