Merge branch 'master' of https://github.com/HFVladimir/mediapipe into master2
This commit is contained in:
commit
dfce9154f1
|
@ -96,7 +96,7 @@ absl::Status RunMPPGraph() {
|
|||
break;
|
||||
}
|
||||
cv::Mat camera_frame;
|
||||
cv::cvtColor(camera_frame_raw, camera_frame, cv::COLOR_BGR2RGBA);
|
||||
cv::cvtColor(camera_frame_raw, camera_frame, cv::COLOR_BGR2RGB);
|
||||
if (!load_video) {
|
||||
cv::flip(camera_frame, camera_frame, /*flipcode=HORIZONTAL*/ 1);
|
||||
}
|
||||
|
|
144
mediapipe/examples/ios/beauty/BUILD
Normal file
144
mediapipe/examples/ios/beauty/BUILD
Normal file
|
@ -0,0 +1,144 @@
|
|||
# Copyright 2019 The MediaPipe Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
load(
|
||||
"@build_bazel_rules_apple//apple:ios.bzl",
|
||||
"ios_application",
|
||||
"ios_framework")
|
||||
load("@build_bazel_rules_swift//swift:swift.bzl", "swift_library")
|
||||
load(
|
||||
"//mediapipe/examples/ios:bundle_id.bzl",
|
||||
"BUNDLE_ID_PREFIX",
|
||||
"example_provisioning",
|
||||
)
|
||||
|
||||
FRAMEWORK_HEADERS = [
|
||||
"MediaPipeController.h",
|
||||
]
|
||||
|
||||
IOS_FAMILIES = [
|
||||
"iphone",
|
||||
"ipad",
|
||||
]
|
||||
|
||||
licenses(["notice"])
|
||||
|
||||
MIN_IOS_VERSION = "11.0"
|
||||
|
||||
alias(
|
||||
name = "beauty",
|
||||
actual = "BeautyApp",
|
||||
)
|
||||
|
||||
ios_application(
|
||||
name = "BeautyApp",
|
||||
app_icons = ["//mediapipe/examples/ios/common:AppIcon"],
|
||||
bundle_id = BUNDLE_ID_PREFIX + ".Beauty",
|
||||
families = [
|
||||
"iphone",
|
||||
"ipad",
|
||||
],
|
||||
infoplists = [
|
||||
"//mediapipe/examples/ios/common:Info.plist",
|
||||
"Info.plist",
|
||||
],
|
||||
minimum_os_version = MIN_IOS_VERSION,
|
||||
provisioning_profile = example_provisioning(),
|
||||
deps = [
|
||||
":BeautyAppLibrary",
|
||||
"@ios_opencv//:OpencvFramework",
|
||||
],
|
||||
)
|
||||
|
||||
ios_framework(
|
||||
name = "MediaPipeFramework",
|
||||
hdrs = FRAMEWORK_HEADERS,
|
||||
bundle_id = "dh.MediaPipeFramework",
|
||||
bundle_name = "MediaPipeFramework",
|
||||
families = IOS_FAMILIES,
|
||||
infoplists = [
|
||||
"//mediapipe/examples/ios/common:Info.plist",
|
||||
],
|
||||
minimum_os_version = MIN_IOS_VERSION,
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
":MediaPipeLib",
|
||||
"@ios_opencv//:OpencvFramework",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
objc_library(
|
||||
name = "MediaPipeLib",
|
||||
srcs = [
|
||||
"MediaPipeController.mm",
|
||||
],
|
||||
hdrs = FRAMEWORK_HEADERS,
|
||||
copts = ["-std=c++17"], # https://github.com/google/mediapipe/issues/2275#issuecomment-877145926
|
||||
data = [
|
||||
"//mediapipe/graphs/face_effect:face_effect_gpu.binarypb",
|
||||
"//mediapipe/modules/face_detection:face_detection_short_range.tflite",
|
||||
"//mediapipe/graphs/face_effect/data:axis.binarypb",
|
||||
"//mediapipe/graphs/face_effect/data:axis.pngblob",
|
||||
"//mediapipe/graphs/face_effect/data:facepaint.pngblob",
|
||||
"//mediapipe/graphs/face_effect/data:glasses.binarypb",
|
||||
"//mediapipe/graphs/face_effect/data:glasses.pngblob",
|
||||
"//mediapipe/modules/face_geometry/data:geometry_pipeline_metadata.binarypb",
|
||||
"//mediapipe/modules/face_geometry/data:geometry_pipeline_metadata_detection.binarypb",
|
||||
"//mediapipe/modules/face_geometry/data:geometry_pipeline_metadata_landmarks.binarypb",
|
||||
"//mediapipe/modules/face_landmark:face_landmark.tflite",
|
||||
"//mediapipe/graphs/beauty:beauty_mobile_gpu.binarypb",
|
||||
"//mediapipe/modules/face_landmark:face_landmark_with_attention.tflite",
|
||||
],
|
||||
deps = [
|
||||
"//mediapipe/objc:mediapipe_framework_ios",
|
||||
"//mediapipe/objc:mediapipe_input_sources_ios",
|
||||
"//mediapipe/calculators/core:packet_presence_calculator",
|
||||
] + select({
|
||||
"//conditions:default": [
|
||||
"//mediapipe/framework/formats:matrix_data_cc_proto",
|
||||
"//mediapipe/graphs/face_effect:face_effect_gpu_deps",
|
||||
"//mediapipe/modules/face_geometry/protos:face_geometry_cc_proto",
|
||||
"//mediapipe/graphs/beauty:mobile_calculators",
|
||||
"//mediapipe/framework/formats:landmark_cc_proto",
|
||||
],
|
||||
}),
|
||||
)
|
||||
|
||||
|
||||
objc_library(
|
||||
name = "BeautyAppLibrary",
|
||||
srcs = [
|
||||
"BeautyViewController.mm",
|
||||
],
|
||||
hdrs = [
|
||||
"BeautyViewController.h",
|
||||
],
|
||||
copts = ["-std=c++17"],
|
||||
data = [
|
||||
"//mediapipe/graphs/beauty:beauty_mobile_gpu.binarypb",
|
||||
"//mediapipe/modules/face_detection:face_detection_short_range.tflite",
|
||||
"//mediapipe/modules/face_landmark:face_landmark_with_attention.tflite",
|
||||
],
|
||||
deps = [
|
||||
"//mediapipe/examples/ios/common:CommonMediaPipeAppLibrary",
|
||||
] + select({
|
||||
"//mediapipe:ios_i386": [],
|
||||
"//mediapipe:ios_x86_64": [],
|
||||
"//conditions:default": [
|
||||
"//mediapipe/graphs/beauty:mobile_calculators",
|
||||
"//mediapipe/framework/formats:landmark_cc_proto",
|
||||
],
|
||||
}),
|
||||
)
|
21
mediapipe/examples/ios/beauty/BeautyViewController.h
Normal file
21
mediapipe/examples/ios/beauty/BeautyViewController.h
Normal file
|
@ -0,0 +1,21 @@
|
|||
// Copyright 2019 The MediaPipe Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#import <UIKit/UIKit.h>
|
||||
|
||||
#import "mediapipe/examples/ios/common/CommonViewController.h"
|
||||
|
||||
@interface BeautyViewController : CommonViewController
|
||||
|
||||
@end
|
65
mediapipe/examples/ios/beauty/BeautyViewController.mm
Normal file
65
mediapipe/examples/ios/beauty/BeautyViewController.mm
Normal file
|
@ -0,0 +1,65 @@
|
|||
// Copyright 2019 The MediaPipe Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#import "BeautyViewController.h"
|
||||
|
||||
#include "mediapipe/framework/formats/landmark.pb.h"
|
||||
|
||||
static NSString* const kGraphName = @"beauty_mobile_gpu";
|
||||
|
||||
static const char* kNumFacesInputSidePacket = "num_faces";
|
||||
static const char* kLandmarksOutputStream = "multi_face_landmarks";
|
||||
|
||||
// Max number of faces to detect/process.
|
||||
static const int kNumFaces = 1;
|
||||
|
||||
@implementation BeautyViewController
|
||||
|
||||
#pragma mark - UIViewController methods
|
||||
|
||||
- (void)viewDidLoad {
|
||||
[super viewDidLoad];
|
||||
|
||||
[self.mediapipeGraph setSidePacket:(mediapipe::MakePacket<int>(kNumFaces))
|
||||
named:kNumFacesInputSidePacket];
|
||||
[self.mediapipeGraph addFrameOutputStream:kLandmarksOutputStream
|
||||
outputPacketType:MPPPacketTypeRaw];
|
||||
}
|
||||
|
||||
#pragma mark - MPPGraphDelegate methods
|
||||
|
||||
// Receives a raw packet from the MediaPipe graph. Invoked on a MediaPipe worker thread.
|
||||
- (void)mediapipeGraph:(MPPGraph*)graph
|
||||
didOutputPacket:(const ::mediapipe::Packet&)packet
|
||||
fromStream:(const std::string&)streamName {
|
||||
if (streamName == kLandmarksOutputStream) {
|
||||
if (packet.IsEmpty()) {
|
||||
NSLog(@"[TS:%lld] No face landmarks", packet.Timestamp().Value());
|
||||
return;
|
||||
}
|
||||
const auto& multi_face_landmarks = packet.Get<std::vector<::mediapipe::NormalizedLandmarkList>>();
|
||||
NSLog(@"[TS:%lld] Number of face instances with landmarks: %lu", packet.Timestamp().Value(),
|
||||
multi_face_landmarks.size());
|
||||
for (int face_index = 0; face_index < multi_face_landmarks.size(); ++face_index) {
|
||||
const auto& landmarks = multi_face_landmarks[face_index];
|
||||
NSLog(@"\tNumber of landmarks for face[%d]: %d", face_index, landmarks.landmark_size());
|
||||
for (int i = 0; i < landmarks.landmark_size(); ++i) {
|
||||
NSLog(@"\t\tLandmark[%d]: (%f, %f, %f)", i, landmarks.landmark(i).x(),
|
||||
landmarks.landmark(i).y(), landmarks.landmark(i).z());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@end
|
16
mediapipe/examples/ios/beauty/Info.plist
Normal file
16
mediapipe/examples/ios/beauty/Info.plist
Normal file
|
@ -0,0 +1,16 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
||||
<plist version="1.0">
|
||||
<dict>
|
||||
<key>CameraPosition</key>
|
||||
<string>front</string>
|
||||
<key>MainViewController</key>
|
||||
<string>BeautyViewController</string>
|
||||
<key>GraphOutputStream</key>
|
||||
<string>output_video</string>
|
||||
<key>GraphInputStream</key>
|
||||
<string>input_video</string>
|
||||
<key>GraphName</key>
|
||||
<string>beauty_mobile_gpu</string>
|
||||
</dict>
|
||||
</plist>
|
44
mediapipe/examples/ios/beauty/MediaPipeController.h
Normal file
44
mediapipe/examples/ios/beauty/MediaPipeController.h
Normal file
|
@ -0,0 +1,44 @@
|
|||
#import <CoreVideo/CoreVideo.h>
|
||||
#import <Foundation/Foundation.h>
|
||||
#import <AVFoundation/AVFoundation.h>
|
||||
|
||||
NS_ASSUME_NONNULL_BEGIN
|
||||
|
||||
@class MediaPipeController;
|
||||
@class MediaPipeFaceLandmarkPoint;
|
||||
@class MediaPipeNormalizedRect;
|
||||
|
||||
typedef void (^MediaPipeCompletionBlock)(CVPixelBufferRef pixelBuffer);
|
||||
|
||||
@protocol MediaPipeControllerDelegate <NSObject>
|
||||
@optional
|
||||
- (void)mediaPipeController:(MediaPipeController *)controller didReceiveFaces:(NSArray<NSArray<MediaPipeFaceLandmarkPoint *>*>*)faces;
|
||||
- (void)mediaPipeController:(MediaPipeController *)controller didReceiveFaceBoxes:(NSArray<MediaPipeNormalizedRect *>*)faces;
|
||||
- (void)mediaPipeController:(MediaPipeController *)controller didOutputPixelBuffer:(CVPixelBufferRef)pixelBuffer;
|
||||
@end
|
||||
|
||||
@interface MediaPipeController : NSObject
|
||||
|
||||
+ (instancetype)facemesh;
|
||||
+ (instancetype)effects;
|
||||
|
||||
- (void)startGraph;
|
||||
- (void)processVideoFrame:(CVPixelBufferRef)imageBuffer timestamp:(CMTime)timestamp completion:(nullable MediaPipeCompletionBlock)completion;
|
||||
@property (nullable, weak, nonatomic) id<MediaPipeControllerDelegate> delegate;
|
||||
@end
|
||||
|
||||
@interface MediaPipeFaceLandmarkPoint : NSObject
|
||||
@property (nonatomic) float x;
|
||||
@property (nonatomic) float y;
|
||||
@property (nonatomic) float z;
|
||||
@end
|
||||
|
||||
@interface MediaPipeNormalizedRect : NSObject
|
||||
@property (nonatomic) float centerX;
|
||||
@property (nonatomic) float centerY;
|
||||
@property (nonatomic) float height;
|
||||
@property (nonatomic) float width;
|
||||
@property (nonatomic) float rotation;
|
||||
@end
|
||||
|
||||
NS_ASSUME_NONNULL_END
|
241
mediapipe/examples/ios/beauty/MediaPipeController.mm
Normal file
241
mediapipe/examples/ios/beauty/MediaPipeController.mm
Normal file
|
@ -0,0 +1,241 @@
|
|||
#import "MediaPipeController.h"
|
||||
#import "mediapipe/objc/MPPCameraInputSource.h"
|
||||
#import "mediapipe/objc/MPPGraph.h"
|
||||
|
||||
#include "mediapipe/framework/formats/landmark.pb.h"
|
||||
#include "mediapipe/framework/formats/rect.pb.h"
|
||||
#include "mediapipe/framework/formats/detection.pb.h"
|
||||
|
||||
//#import "mediapipe/objc/MPPLayerRenderer.h"
|
||||
|
||||
static NSString* const kMeshGraphName = @"face_mesh_mobile_gpu";
|
||||
static NSString* const kEffectsGraphName = @"face_effect_gpu";
|
||||
|
||||
static const char *kInputStream = "input_video";
|
||||
static const char *kOutputStream = "output_video";
|
||||
static const char *kNumFacesInputSidePacket = "num_faces";
|
||||
static const char *kLandmarksOutputStream = "multi_face_landmarks";
|
||||
static const char *kFaceRectsOutputStream = "face_rects_from_landmarks";
|
||||
static const char *kLandmarkPresenceOutputStream = "landmark_presence";
|
||||
static const char *kSelectedEffectIdInputStream = "selected_effect_id";
|
||||
static const char *kMultiFaceGeometryStream = "multi_face_geometry";
|
||||
static const char* kUseFaceDetectionInputSourceInputSidePacket = "use_face_detection_input_source";
|
||||
static const BOOL kUseFaceDetectionInputSource = NO;
|
||||
|
||||
// Max number of faces to detect/process.
|
||||
static const int kNumFaces = 2;
|
||||
|
||||
@interface MediaPipeController () <MPPGraphDelegate>
|
||||
@property (nonatomic) MPPGraph* mediapipeGraph;
|
||||
@property (nonatomic, copy) MediaPipeCompletionBlock completion;
|
||||
@property (nonatomic) size_t timestamp;
|
||||
@end
|
||||
|
||||
@implementation MediaPipeController
|
||||
|
||||
#pragma mark - Cleanup methods
|
||||
|
||||
- (void)dealloc {
|
||||
self.mediapipeGraph.delegate = nil;
|
||||
[self.mediapipeGraph cancel];
|
||||
// Ignore errors since we're cleaning up.
|
||||
[self.mediapipeGraph closeAllInputStreamsWithError:nil];
|
||||
[self.mediapipeGraph waitUntilDoneWithError:nil];
|
||||
|
||||
NSLog(@"dealloc MediaPipeController");
|
||||
}
|
||||
|
||||
#pragma mark - MediaPipe graph methods
|
||||
|
||||
+ (MPPGraph*)loadGraphFromResource:(NSString*)resource {
|
||||
// Load the graph config resource.
|
||||
NSError* configLoadError = nil;
|
||||
NSBundle* bundle = [NSBundle bundleForClass:[self class]];
|
||||
if (!resource || resource.length == 0) {
|
||||
bundle = NSBundle.mainBundle;
|
||||
}
|
||||
NSURL* graphURL = [bundle URLForResource:resource withExtension:@"binarypb"];
|
||||
NSData* data = [NSData dataWithContentsOfURL:graphURL options:0 error:&configLoadError];
|
||||
if (!data) {
|
||||
NSLog(@"MediaPipe: Failed to load graph config: %@", configLoadError);
|
||||
return nil;
|
||||
}
|
||||
|
||||
mediapipe::CalculatorGraphConfig config;
|
||||
config.ParseFromArray(data.bytes, data.length);
|
||||
|
||||
MPPGraph* newGraph = [[MPPGraph alloc] initWithGraphConfig:config];
|
||||
[newGraph setSidePacket:(mediapipe::MakePacket<bool>(kUseFaceDetectionInputSource)) named:kUseFaceDetectionInputSourceInputSidePacket];
|
||||
[newGraph setSidePacket:(mediapipe::MakePacket<int>(kNumFaces)) named:kNumFacesInputSidePacket];
|
||||
|
||||
[newGraph addFrameOutputStream:kOutputStream outputPacketType:MPPPacketTypePixelBuffer];
|
||||
//[newGraph addFrameOutputStream:kMultiFaceGeometryStream outputPacketType:MPPPacketTypeRaw];
|
||||
|
||||
// [newGraph addFrameOutputStream:kLandmarksOutputStream outputPacketType:MPPPacketTypeRaw];
|
||||
// [newGraph addFrameOutputStream:kFaceRectsOutputStream outputPacketType:MPPPacketTypeRaw];
|
||||
// [newGraph addFrameOutputStream:kLandmarkPresenceOutputStream outputPacketType:MPPPacketTypeRaw];
|
||||
return newGraph;
|
||||
}
|
||||
|
||||
- (instancetype)initWithGraphName:(NSString *)graphName {
|
||||
self = [super init];
|
||||
if (self) {
|
||||
self.mediapipeGraph = [[self class] loadGraphFromResource:graphName];
|
||||
self.mediapipeGraph.delegate = self;
|
||||
|
||||
// Set maxFramesInFlight to a small value to avoid memory contention for real-time processing.
|
||||
self.mediapipeGraph.maxFramesInFlight = 2;
|
||||
NSLog(@"MediaPipe: Inited graph %@", graphName);
|
||||
NSLog(@"alloc MediaPipeController");
|
||||
}
|
||||
return self;
|
||||
}
|
||||
|
||||
+ (instancetype)facemesh {
|
||||
return [[MediaPipeController alloc] initWithGraphName:kMeshGraphName];
|
||||
}
|
||||
|
||||
+ (instancetype)effects {
|
||||
return [[MediaPipeController alloc] initWithGraphName:kEffectsGraphName];
|
||||
}
|
||||
|
||||
- (void)startGraph {
|
||||
NSError* error;
|
||||
if (![self.mediapipeGraph startWithError:&error]) {
|
||||
NSLog(@"MediaPipe: Failed to start graph: %@", error);
|
||||
}
|
||||
NSLog(@"MediaPipe: Started graph");
|
||||
}
|
||||
|
||||
#pragma mark - MPPGraphDelegate methods
|
||||
|
||||
- (void)mediapipeGraph:(MPPGraph*)graph
|
||||
didOutputPixelBuffer:(CVPixelBufferRef)pixelBuffer
|
||||
fromStream:(const std::string&)streamName {
|
||||
//NSLog(@"MediaPipe: didOutputPixelBuffer %s %@", streamName.c_str(), self.completion);
|
||||
if (streamName == kOutputStream) {
|
||||
if([self.delegate respondsToSelector:@selector(mediaPipeController:didOutputPixelBuffer:)]) {
|
||||
[_delegate mediaPipeController:self didOutputPixelBuffer:pixelBuffer];
|
||||
}
|
||||
if (self.completion) {
|
||||
self.completion(pixelBuffer);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
- (void)mediapipeGraph:(MPPGraph*)graph
|
||||
didOutputPacket:(const ::mediapipe::Packet&)packet
|
||||
fromStream:(const std::string&)streamName {
|
||||
if (streamName == kLandmarksOutputStream) {
|
||||
if (packet.IsEmpty()) {
|
||||
return;
|
||||
}
|
||||
if(![self.delegate respondsToSelector:@selector(mediaPipeController:didReceiveFaces:)]) {
|
||||
return;
|
||||
}
|
||||
const auto& multi_face_landmarks = packet.Get<std::vector<::mediapipe::NormalizedLandmarkList>>();
|
||||
// NSLog(@"[TS:%lld] Number of face instances with landmarks: %lu", packet.Timestamp().Value(),
|
||||
// multi_face_landmarks.size());
|
||||
NSMutableArray <NSArray <MediaPipeFaceLandmarkPoint *>*>*faceLandmarks = [NSMutableArray new];
|
||||
|
||||
for (int face_index = 0; face_index < multi_face_landmarks.size(); ++face_index) {
|
||||
NSMutableArray *thisFaceLandmarks = [NSMutableArray new];
|
||||
const auto& landmarks = multi_face_landmarks[face_index];
|
||||
// NSLog(@"\tNumber of landmarks for face[%d]: %d", face_index, landmarks.landmark_size());
|
||||
for (int i = 0; i < landmarks.landmark_size(); ++i) {
|
||||
// NSLog(@"\t\tLandmark[%d]: (%f, %f, %f)", i, landmarks.landmark(i).x(),
|
||||
// landmarks.landmark(i).y(), landmarks.landmark(i).z());
|
||||
MediaPipeFaceLandmarkPoint *obj_landmark = [MediaPipeFaceLandmarkPoint new];
|
||||
obj_landmark.x = landmarks.landmark(i).x();
|
||||
obj_landmark.y = landmarks.landmark(i).y();
|
||||
obj_landmark.z = landmarks.landmark(i).z();
|
||||
[thisFaceLandmarks addObject:obj_landmark];
|
||||
}
|
||||
[faceLandmarks addObject:thisFaceLandmarks];
|
||||
}
|
||||
[self.delegate mediaPipeController:self didReceiveFaces:faceLandmarks];
|
||||
}
|
||||
|
||||
else if (streamName == kFaceRectsOutputStream) {
|
||||
if (packet.IsEmpty()) { // This condition never gets called because FaceLandmarkFrontGpu does not process when there are no detections
|
||||
// NSLog(@"[TS:%lld] No face rects", packet.Timestamp().Value());
|
||||
if([self.delegate respondsToSelector:@selector(mediaPipeController:didReceiveFaceBoxes:)]) {
|
||||
[self.delegate mediaPipeController:self didReceiveFaceBoxes:@[]];
|
||||
}
|
||||
return;
|
||||
}
|
||||
if(![self.delegate respondsToSelector:@selector(mediaPipeController:didReceiveFaceBoxes:)]) {
|
||||
return;
|
||||
}
|
||||
const auto& face_rects_from_landmarks = packet.Get<std::vector<::mediapipe::NormalizedRect>>();
|
||||
NSMutableArray <MediaPipeNormalizedRect *>*outRects = [NSMutableArray new];
|
||||
for (int face_index = 0; face_index < face_rects_from_landmarks.size(); ++face_index) {
|
||||
const auto& face = face_rects_from_landmarks[face_index];
|
||||
float centerX = face.x_center();
|
||||
float centerY = face.y_center();
|
||||
float height = face.height();
|
||||
float width = face.width();
|
||||
float rotation = face.rotation();
|
||||
MediaPipeNormalizedRect *rect = [MediaPipeNormalizedRect new];
|
||||
rect.centerX = centerX; rect.centerY = centerY; rect.height = height; rect.width = width; rect.rotation = rotation;
|
||||
[outRects addObject:rect];
|
||||
}
|
||||
[self.delegate mediaPipeController:self didReceiveFaceBoxes:outRects];
|
||||
} else if (streamName == kLandmarkPresenceOutputStream) {
|
||||
bool is_landmark_present = true;
|
||||
if (packet.IsEmpty()) {
|
||||
is_landmark_present = false;
|
||||
} else {
|
||||
is_landmark_present = packet.Get<bool>();
|
||||
}
|
||||
|
||||
if (is_landmark_present) {
|
||||
} else {
|
||||
// NSLog(@"Landmarks not present");
|
||||
if([self.delegate respondsToSelector:@selector(mediaPipeController:didReceiveFaceBoxes:)]) {
|
||||
[self.delegate mediaPipeController:self didReceiveFaceBoxes:@[]];
|
||||
}
|
||||
if([self.delegate respondsToSelector:@selector(mediaPipeController:didReceiveFaces:)]) {
|
||||
[self.delegate mediaPipeController:self didReceiveFaces:@[]];
|
||||
}
|
||||
}
|
||||
} else {
|
||||
//NSLog(@"MediaPipe: Unknown %@ packet with stream name %s", packet.IsEmpty() ? @"EMPTY" : @"NON-EMPTY",streamName.c_str());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#pragma mark - MPPInputSourceDelegate methods
|
||||
|
||||
- (void)processVideoFrame:(CVPixelBufferRef)imageBuffer timestamp:(CMTime)timestamp completion:(MediaPipeCompletionBlock)completion {
|
||||
const auto ts = mediapipe::Timestamp(self.timestamp++ * mediapipe::Timestamp::kTimestampUnitsPerSecond);
|
||||
self.completion = completion;
|
||||
|
||||
NSError* err = nil;
|
||||
//NSLog(@"sending imageBuffer @%@ to %s", @(ts.DebugString().c_str()), kInputStream);
|
||||
auto sent = [self.mediapipeGraph sendPixelBuffer:imageBuffer
|
||||
intoStream:kInputStream
|
||||
packetType:MPPPacketTypePixelBuffer
|
||||
timestamp:ts
|
||||
allowOverwrite:NO
|
||||
error:&err
|
||||
];
|
||||
//NSLog(@"imageBuffer %s", sent ? "sent!" : "not sent.");
|
||||
if (err) {
|
||||
NSLog(@"MediaPipe: sendPixelBuffer error: %@", err);
|
||||
}
|
||||
|
||||
mediapipe::Packet selectedEffectIdPacket = mediapipe::MakePacket<int>(2).At(ts);
|
||||
[self.mediapipeGraph movePacket:std::move(selectedEffectIdPacket)
|
||||
intoStream:kSelectedEffectIdInputStream
|
||||
error:nil];
|
||||
}
|
||||
|
||||
@end
|
||||
|
||||
|
||||
@implementation MediaPipeFaceLandmarkPoint
|
||||
@end
|
||||
|
||||
@implementation MediaPipeNormalizedRect
|
||||
@end
|
19
mediapipe/examples/ios/beauty/build_mediapipe.sh
Executable file
19
mediapipe/examples/ios/beauty/build_mediapipe.sh
Executable file
|
@ -0,0 +1,19 @@
|
|||
#!/bin/sh
|
||||
|
||||
mkdir -p ./frameworkbuild/MediaPipeFramework/arm64
|
||||
mkdir -p ./frameworkbuild/MediaPipeFramework/x86_64
|
||||
mkdir -p ./frameworkbuild/MediaPipeFramework/xcframework
|
||||
|
||||
bazel build --config=ios_arm64 mediapipe/examples/ios/facemeshgpu:MediaPipeFramework
|
||||
./mediapipe/examples/ios/facemeshgpu/patch_ios_framework.sh ./bazel-out/applebin_ios-ios_arm64-fastbuild-ST-2967bd56a867/bin/mediapipe/examples/ios/facemeshgpu/MediaPipeFramework.zip MediaPipeController.h
|
||||
cp -a ./bazel-out/applebin_ios-ios_arm64-fastbuild-ST-2967bd56a867/bin/mediapipe/examples/ios/facemeshgpu/MediaPipeFramework.framework ./frameworkbuild/MediaPipeFramework/arm64
|
||||
|
||||
bazel build --config=ios_x86_64 mediapipe/examples/ios/facemeshgpu:MediaPipeFramework
|
||||
./mediapipe/examples/ios/facemeshgpu/patch_ios_framework.sh ./bazel-out/applebin_ios-ios_x86_64-fastbuild-ST-2967bd56a867/bin/mediapipe/examples/ios/facemeshgpu/MediaPipeFramework.zip MediaPipeController.h
|
||||
cp -a ./bazel-out/applebin_ios-ios_x86_64-fastbuild-ST-2967bd56a867/bin/mediapipe/examples/ios/facemeshgpu/MediaPipeFramework.framework ./frameworkbuild/MediaPipeFramework/x86_64
|
||||
|
||||
xcodebuild -create-xcframework \
|
||||
-framework ./frameworkbuild/MediaPipeFramework/x86_64/MediaPipeFramework.framework \
|
||||
-framework ./frameworkbuild/MediaPipeFramework/arm64/MediaPipeFramework.framework \
|
||||
-output ./frameworkbuild/MediaPipeFramework/xcframework/MediaPipeFramework.xcframework
|
||||
|
66
mediapipe/examples/ios/beauty/patch_ios_framework.sh
Executable file
66
mediapipe/examples/ios/beauty/patch_ios_framework.sh
Executable file
|
@ -0,0 +1,66 @@
|
|||
#!/bin/bash
|
||||
set -eu
|
||||
set -o pipefail
|
||||
|
||||
# Adds modulemap & header files to an iOS Framework
|
||||
# generated with bazel and encapsulating Mediapipe.
|
||||
#
|
||||
# This makes it so that the patched .framework can be imported into Xcode.
|
||||
# For a long term solution track the following issue:
|
||||
# https://github.com/bazelbuild/rules_apple/issues/355
|
||||
|
||||
[[ $# -lt 2 ]] && echo "Usage: $0 <path/to/zipped .framework> <hdrs>..." && exit 1
|
||||
zipped=$(python -c "import os; print(os.path.realpath('$1'))"); shift
|
||||
name=$(basename "$zipped" .zip)
|
||||
parent=$(dirname "$zipped")
|
||||
named="$parent"/"$name".framework
|
||||
|
||||
unzip "$zipped" -d "$parent"
|
||||
|
||||
mkdir "$named"/Modules
|
||||
cat << EOF >"$named"/Modules/module.modulemap
|
||||
framework module $name {
|
||||
umbrella header "$name.h"
|
||||
|
||||
export *
|
||||
module * { export * }
|
||||
|
||||
link framework "AVFoundation"
|
||||
link framework "Accelerate"
|
||||
link framework "AssetsLibrary"
|
||||
link framework "CoreFoundation"
|
||||
link framework "CoreGraphics"
|
||||
link framework "CoreImage"
|
||||
link framework "CoreMedia"
|
||||
link framework "CoreVideo"
|
||||
link framework "GLKit"
|
||||
link framework "Metal"
|
||||
link framework "MetalKit"
|
||||
link framework "OpenGLES"
|
||||
link framework "QuartzCore"
|
||||
link framework "UIKit"
|
||||
}
|
||||
EOF
|
||||
# NOTE: All these linked frameworks are required by mediapipe/objc.
|
||||
|
||||
cat << EOF >"$named"/Headers/$name.h
|
||||
//
|
||||
// $name.h
|
||||
// $name
|
||||
//
|
||||
|
||||
#import <Foundation/Foundation.h>
|
||||
|
||||
//! Project version number for $name.
|
||||
FOUNDATION_EXPORT double ${name}VersionNumber;
|
||||
|
||||
//! Project version string for $name.
|
||||
FOUNDATION_EXPORT const unsigned char ${name}VersionString[];
|
||||
|
||||
// In this header, you should import all the public headers of your framework using statements like #import <$name/PublicHeader.h>
|
||||
|
||||
EOF
|
||||
until [[ $# -eq 0 ]]; do
|
||||
printf '#import "'"$1"'"\n' "$1" >>"$named"/Headers/$name.h
|
||||
shift
|
||||
done
|
|
@ -0,0 +1 @@
|
|||
/Users/vladimir.borisov/Library/MobileDevice/Provisioning Profiles/6ef5d4b5-8423-4513-b88d-20e0314cbf37.mobileprovision
|
|
@ -106,18 +106,13 @@ objc_library(
|
|||
"//mediapipe/objc:mediapipe_framework_ios",
|
||||
"//mediapipe/objc:mediapipe_input_sources_ios",
|
||||
"//mediapipe/calculators/core:packet_presence_calculator",
|
||||
# "//mediapipe/objc:mediapipe_layer_renderer", # no need for layer renderer since I don't render
|
||||
] + select({
|
||||
# "//mediapipe:ios_i386": [],
|
||||
# "//mediapipe:ios_x86_64": [],
|
||||
"//conditions:default": [
|
||||
"//mediapipe/framework/formats:matrix_data_cc_proto",
|
||||
"//mediapipe/graphs/face_effect:face_effect_gpu_deps",
|
||||
"//mediapipe/modules/face_geometry/protos:face_geometry_cc_proto",
|
||||
"//mediapipe/graphs/face_mesh:mobile_calculators",
|
||||
"//mediapipe/framework/formats:landmark_cc_proto",
|
||||
# "//mediapipe/examples/facial_search/graphs:gpu_calculators",
|
||||
# "//mediapipe/examples/facial_search:embeddings_database",
|
||||
],
|
||||
}),
|
||||
)
|
||||
|
|
|
@ -68,7 +68,8 @@ cc_library(
|
|||
"//mediapipe/gpu:gpu_buffer_to_image_frame_calculator",
|
||||
"//mediapipe/gpu:image_frame_to_gpu_buffer_calculator",
|
||||
"//mediapipe/calculators/core:flow_limiter_calculator",
|
||||
"//mediapipe/graphs/beauty/subgraphs:face_renderer_cpu",
|
||||
#"//mediapipe/graphs/beauty/subgraphs:face_renderer_cpu",
|
||||
"//mediapipe/graphs/face_mesh/subgraphs:face_renderer_cpu",
|
||||
"//mediapipe/modules/face_landmark:face_landmark_front_gpu",
|
||||
],
|
||||
)
|
||||
|
@ -87,7 +88,7 @@ cc_library(
|
|||
|
||||
mediapipe_binary_graph(
|
||||
name = "beauty_mobile_gpu_binary_graph",
|
||||
graph = "beauty_mobile.pbtxt",
|
||||
graph = "beauty_mobile_gpu.pbtxt",
|
||||
output_name = "beauty_mobile_gpu.binarypb",
|
||||
deps = [":mobile_calculators"],
|
||||
)
|
||||
|
|
|
@ -44,6 +44,13 @@ node {
|
|||
}
|
||||
}
|
||||
|
||||
# Defines side packets for further use in the graph.
|
||||
node {
|
||||
calculator: "GpuBufferToImageFrameCalculator"
|
||||
input_stream: "throttled_input_video"
|
||||
output_stream: "throttled_input_video_cpu"
|
||||
}
|
||||
|
||||
# Subgraph that detects faces and corresponding landmarks.
|
||||
node {
|
||||
calculator: "FaceLandmarkFrontGpu"
|
||||
|
@ -53,14 +60,6 @@ node {
|
|||
output_stream: "LANDMARKS:multi_face_landmarks"
|
||||
}
|
||||
|
||||
# Defines side packets for further use in the graph.
|
||||
node {
|
||||
calculator: "GpuBufferToImageFrameCalculator"
|
||||
input_stream: "throttled_input_video"
|
||||
output_stream: "throttled_input_video_cpu"
|
||||
}
|
||||
|
||||
|
||||
# Subgraph that renders face-landmark annotation onto the input image.
|
||||
node {
|
||||
calculator: "FaceRendererCpu"
|
|
@ -1,68 +0,0 @@
|
|||
# MediaPipe graph that performs face mesh with TensorFlow Lite on GPU.
|
||||
|
||||
# GPU buffer. (GpuBuffer)
|
||||
input_stream: "input_video"
|
||||
|
||||
# Max number of faces to detect/process. (int)
|
||||
input_side_packet: "num_faces"
|
||||
|
||||
# Output image with rendered results. (GpuBuffer)
|
||||
output_stream: "output_video"
|
||||
# Collection of detected/processed faces, each represented as a list of
|
||||
# landmarks. (std::vector<NormalizedLandmarkList>)
|
||||
output_stream: "multi_face_landmarks"
|
||||
|
||||
# Throttles the images flowing downstream for flow control. It passes through
|
||||
# the very first incoming image unaltered, and waits for downstream nodes
|
||||
# (calculators and subgraphs) in the graph to finish their tasks before it
|
||||
# passes through another image. All images that come in while waiting are
|
||||
# dropped, limiting the number of in-flight images in most part of the graph to
|
||||
# 1. This prevents the downstream nodes from queuing up incoming images and data
|
||||
# excessively, which leads to increased latency and memory usage, unwanted in
|
||||
# real-time mobile applications. It also eliminates unnecessarily computation,
|
||||
# e.g., the output produced by a node may get dropped downstream if the
|
||||
# subsequent nodes are still busy processing previous inputs.
|
||||
node {
|
||||
calculator: "FlowLimiterCalculator"
|
||||
input_stream: "input_video"
|
||||
input_stream: "FINISHED:output_video"
|
||||
input_stream_info: {
|
||||
tag_index: "FINISHED"
|
||||
back_edge: true
|
||||
}
|
||||
output_stream: "throttled_input_video"
|
||||
}
|
||||
|
||||
# Defines side packets for further use in the graph.
|
||||
node {
|
||||
calculator: "ConstantSidePacketCalculator"
|
||||
output_side_packet: "PACKET:with_attention"
|
||||
node_options: {
|
||||
[type.googleapis.com/mediapipe.ConstantSidePacketCalculatorOptions]: {
|
||||
packet { bool_value: true }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Subgraph that detects faces and corresponding landmarks.
|
||||
node {
|
||||
calculator: "FaceLandmarkFrontGpu"
|
||||
input_stream: "IMAGE:throttled_input_video"
|
||||
input_side_packet: "NUM_FACES:num_faces"
|
||||
input_side_packet: "WITH_ATTENTION:with_attention"
|
||||
output_stream: "LANDMARKS:multi_face_landmarks"
|
||||
output_stream: "ROIS_FROM_LANDMARKS:face_rects_from_landmarks"
|
||||
output_stream: "DETECTIONS:face_detections"
|
||||
output_stream: "ROIS_FROM_DETECTIONS:face_rects_from_detections"
|
||||
}
|
||||
|
||||
# Subgraph that renders face-landmark annotation onto the input image.
|
||||
node {
|
||||
calculator: "FaceRendererGpuOver"
|
||||
input_stream: "IMAGE:throttled_input_video"
|
||||
input_stream: "LANDMARKS:multi_face_landmarks"
|
||||
input_stream: "NORM_RECTS:face_rects_from_landmarks"
|
||||
input_stream: "DETECTIONS:face_detections"
|
||||
output_stream: "IMAGE:output_video"
|
||||
}
|
||||
|
|
@ -1,66 +0,0 @@
|
|||
# MediaPipe graph that performs face mesh with TensorFlow Lite on GPU.
|
||||
|
||||
# Input image. (GpuBuffer)
|
||||
input_stream: "input_video"
|
||||
|
||||
# Output image with rendered results. (GpuBuffer)
|
||||
output_stream: "output_video"
|
||||
# Collection of detected/processed faces, each represented as a list of
|
||||
# landmarks. (std::vector<NormalizedLandmarkList>)
|
||||
output_stream: "multi_face_landmarks"
|
||||
|
||||
# Throttles the images flowing downstream for flow control. It passes through
|
||||
# the very first incoming image unaltered, and waits for downstream nodes
|
||||
# (calculators and subgraphs) in the graph to finish their tasks before it
|
||||
# passes through another image. All images that come in while waiting are
|
||||
# dropped, limiting the number of in-flight images in most part of the graph to
|
||||
# 1. This prevents the downstream nodes from queuing up incoming images and data
|
||||
# excessively, which leads to increased latency and memory usage, unwanted in
|
||||
# real-time mobile applications. It also eliminates unnecessarily computation,
|
||||
# e.g., the output produced by a node may get dropped downstream if the
|
||||
# subsequent nodes are still busy processing previous inputs.
|
||||
node {
|
||||
calculator: "FlowLimiterCalculator"
|
||||
input_stream: "input_video"
|
||||
input_stream: "FINISHED:output_video"
|
||||
input_stream_info: {
|
||||
tag_index: "FINISHED"
|
||||
back_edge: true
|
||||
}
|
||||
output_stream: "throttled_input_video"
|
||||
}
|
||||
|
||||
# Defines side packets for further use in the graph.
|
||||
node {
|
||||
calculator: "ConstantSidePacketCalculator"
|
||||
output_side_packet: "PACKET:0:num_faces"
|
||||
output_side_packet: "PACKET:1:with_attention"
|
||||
node_options: {
|
||||
[type.googleapis.com/mediapipe.ConstantSidePacketCalculatorOptions]: {
|
||||
packet { int_value: 1 }
|
||||
packet { bool_value: true }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Subgraph that detects faces and corresponding landmarks.
|
||||
node {
|
||||
calculator: "FaceLandmarkFrontGpu"
|
||||
input_stream: "IMAGE:throttled_input_video"
|
||||
input_side_packet: "NUM_FACES:num_faces"
|
||||
input_side_packet: "WITH_ATTENTION:with_attention"
|
||||
output_stream: "LANDMARKS:multi_face_landmarks"
|
||||
output_stream: "ROIS_FROM_LANDMARKS:face_rects_from_landmarks"
|
||||
output_stream: "DETECTIONS:face_detections"
|
||||
output_stream: "ROIS_FROM_DETECTIONS:face_rects_from_detections"
|
||||
}
|
||||
|
||||
# Subgraph that renders face-landmark annotation onto the input image.
|
||||
node {
|
||||
calculator: "FaceRendererGpu"
|
||||
input_stream: "IMAGE:throttled_input_video"
|
||||
input_stream: "LANDMARKS:multi_face_landmarks"
|
||||
input_stream: "NORM_RECTS:face_rects_from_landmarks"
|
||||
input_stream: "DETECTIONS:face_detections"
|
||||
output_stream: "IMAGE:output_video"
|
||||
}
|
|
@ -1,65 +0,0 @@
|
|||
# MediaPipe face mesh rendering subgraph.
|
||||
|
||||
type: "FaceRendererCpu"
|
||||
|
||||
# CPU image. (ImageFrame)
|
||||
input_stream: "IMAGE:input_image"
|
||||
# Collection of detected/predicted faces, each represented as a list of
|
||||
# landmarks. (std::vector<NormalizedLandmarkList>)
|
||||
input_stream: "LANDMARKS:multi_face_landmarks"
|
||||
|
||||
# CPU image with rendered data. (ImageFrame)
|
||||
output_stream: "IMAGE:output_image"
|
||||
|
||||
node {
|
||||
calculator: "ImagePropertiesCalculator"
|
||||
input_stream: "IMAGE:input_image"
|
||||
output_stream: "SIZE:image_size"
|
||||
}
|
||||
|
||||
# Outputs each element of multi_face_landmarks at a fake timestamp for the rest
|
||||
# of the graph to process. At the end of the loop, outputs the BATCH_END
|
||||
# timestamp for downstream calculators to inform them that all elements in the
|
||||
# vector have been processed.
|
||||
node {
|
||||
calculator: "BeginLoopNormalizedLandmarkListVectorCalculator"
|
||||
input_stream: "ITERABLE:multi_face_landmarks"
|
||||
output_stream: "ITEM:face_landmarks"
|
||||
output_stream: "BATCH_END:landmark_timestamp"
|
||||
}
|
||||
|
||||
# Converts landmarks to drawing primitives for annotation overlay.
|
||||
node {
|
||||
calculator: "FaceLandmarksToRenderDataCalculator"
|
||||
input_stream: "NORM_LANDMARKS:face_landmarks"
|
||||
output_stream: "RENDER_DATA:landmarks_render_data"
|
||||
node_options: {
|
||||
[type.googleapis.com/mediapipe.LandmarksToRenderDataCalculatorOptions] {
|
||||
landmark_color { r: 255 g: 0 b: 0 }
|
||||
connection_color { r: 0 g: 255 b: 0 }
|
||||
thickness: 2
|
||||
visualize_landmark_depth: false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Collects a RenderData object for each hand into a vector. Upon receiving the
|
||||
# BATCH_END timestamp, outputs the vector of RenderData at the BATCH_END
|
||||
# timestamp.
|
||||
node {
|
||||
calculator: "EndLoopRenderDataCalculator"
|
||||
input_stream: "ITEM:landmarks_render_data"
|
||||
input_stream: "BATCH_END:landmark_timestamp"
|
||||
output_stream: "ITERABLE:multi_face_landmarks_render_data"
|
||||
}
|
||||
|
||||
# Draws annotations and overlays them on top of the input images.
|
||||
node {
|
||||
calculator: "AnnotationOverlayCalculator"
|
||||
input_stream: "IMAGE:input_image"
|
||||
input_stream: "VECTOR:0:multi_face_landmarks_render_data"
|
||||
output_stream: "IMAGE:output_image"
|
||||
}
|
||||
|
||||
|
||||
|
|
@ -1,96 +0,0 @@
|
|||
# MediaPipe face mesh rendering subgraph.
|
||||
|
||||
type: "FaceRendererGpu"
|
||||
|
||||
# GPU image. (GpuBuffer)
|
||||
input_stream: "IMAGE:input_image"
|
||||
# Collection of detected/predicted faces, each represented as a list of
|
||||
# landmarks. (std::vector<NormalizedLandmarkList>)
|
||||
input_stream: "LANDMARKS:multi_face_landmarks"
|
||||
# Regions of interest calculated based on palm detections.
|
||||
# (std::vector<NormalizedRect>)
|
||||
input_stream: "NORM_RECTS:rects"
|
||||
# Detected palms. (std::vector<Detection>)
|
||||
input_stream: "DETECTIONS:detections"
|
||||
|
||||
# GPU image with rendered data. (GpuBuffer)
|
||||
output_stream: "IMAGE:output_image"
|
||||
|
||||
node {
|
||||
calculator: "ImagePropertiesCalculator"
|
||||
input_stream: "IMAGE_GPU:input_image"
|
||||
output_stream: "SIZE:image_size"
|
||||
}
|
||||
|
||||
# Converts detections to drawing primitives for annotation overlay.
|
||||
node {
|
||||
calculator: "DetectionsToRenderDataCalculator"
|
||||
input_stream: "DETECTIONS:detections"
|
||||
output_stream: "RENDER_DATA:detections_render_data"
|
||||
node_options: {
|
||||
[type.googleapis.com/mediapipe.DetectionsToRenderDataCalculatorOptions] {
|
||||
thickness: 4.0
|
||||
color { r: 0 g: 255 b: 0 }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Outputs each element of multi_face_landmarks at a fake timestamp for the rest
|
||||
# of the graph to process. At the end of the loop, outputs the BATCH_END
|
||||
# timestamp for downstream calculators to inform them that all elements in the
|
||||
# vector have been processed.
|
||||
node {
|
||||
calculator: "BeginLoopNormalizedLandmarkListVectorCalculator"
|
||||
input_stream: "ITERABLE:multi_face_landmarks"
|
||||
output_stream: "ITEM:face_landmarks"
|
||||
output_stream: "BATCH_END:end_timestamp"
|
||||
}
|
||||
|
||||
# Converts landmarks to drawing primitives for annotation overlay.
|
||||
node {
|
||||
calculator: "FaceLandmarksToRenderDataCalculator"
|
||||
input_stream: "NORM_LANDMARKS:face_landmarks"
|
||||
output_stream: "RENDER_DATA:landmarks_render_data"
|
||||
node_options: {
|
||||
[type.googleapis.com/mediapipe.LandmarksToRenderDataCalculatorOptions] {
|
||||
landmark_color { r: 255 g: 0 b: 0 }
|
||||
connection_color { r: 0 g: 255 b: 0 }
|
||||
thickness: 2
|
||||
visualize_landmark_depth: false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Collects a RenderData object for each hand into a vector. Upon receiving the
|
||||
# BATCH_END timestamp, outputs the vector of RenderData at the BATCH_END
|
||||
# timestamp.
|
||||
node {
|
||||
calculator: "EndLoopRenderDataCalculator"
|
||||
input_stream: "ITEM:landmarks_render_data"
|
||||
input_stream: "BATCH_END:end_timestamp"
|
||||
output_stream: "ITERABLE:multi_face_landmarks_render_data"
|
||||
}
|
||||
|
||||
# Converts normalized rects to drawing primitives for annotation overlay.
|
||||
node {
|
||||
calculator: "RectToRenderDataCalculator"
|
||||
input_stream: "NORM_RECTS:rects"
|
||||
output_stream: "RENDER_DATA:rects_render_data"
|
||||
node_options: {
|
||||
[type.googleapis.com/mediapipe.RectToRenderDataCalculatorOptions] {
|
||||
filled: false
|
||||
color { r: 255 g: 0 b: 0 }
|
||||
thickness: 4.0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Draws annotations and overlays them on top of the input images.
|
||||
node {
|
||||
calculator: "AnnotationOverlayCalculator"
|
||||
input_stream: "IMAGE_GPU:input_image"
|
||||
#input_stream: "detections_render_data"
|
||||
input_stream: "VECTOR:0:multi_face_landmarks_render_data"
|
||||
#input_stream: "rects_render_data"
|
||||
output_stream: "IMAGE_GPU:output_image"
|
||||
}
|
|
@ -1,96 +0,0 @@
|
|||
# MediaPipe face mesh rendering subgraph.
|
||||
|
||||
type: "FaceRendererGpu"
|
||||
|
||||
# GPU image. (GpuBuffer)
|
||||
input_stream: "IMAGE:input_image"
|
||||
# Collection of detected/predicted faces, each represented as a list of
|
||||
# landmarks. (std::vector<NormalizedLandmarkList>)
|
||||
input_stream: "LANDMARKS:multi_face_landmarks"
|
||||
# Regions of interest calculated based on palm detections.
|
||||
# (std::vector<NormalizedRect>)
|
||||
input_stream: "NORM_RECTS:rects"
|
||||
# Detected palms. (std::vector<Detection>)
|
||||
input_stream: "DETECTIONS:detections"
|
||||
|
||||
# GPU image with rendered data. (GpuBuffer)
|
||||
output_stream: "IMAGE:output_image"
|
||||
|
||||
node {
|
||||
calculator: "ImagePropertiesCalculator"
|
||||
input_stream: "IMAGE_GPU:input_image"
|
||||
output_stream: "SIZE:image_size"
|
||||
}
|
||||
|
||||
# Converts detections to drawing primitives for annotation overlay.
|
||||
node {
|
||||
calculator: "DetectionsToRenderDataCalculator"
|
||||
input_stream: "DETECTIONS:detections"
|
||||
output_stream: "RENDER_DATA:detections_render_data"
|
||||
node_options: {
|
||||
[type.googleapis.com/mediapipe.DetectionsToRenderDataCalculatorOptions] {
|
||||
thickness: 4.0
|
||||
color { r: 0 g: 255 b: 0 }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Outputs each element of multi_face_landmarks at a fake timestamp for the rest
|
||||
# of the graph to process. At the end of the loop, outputs the BATCH_END
|
||||
# timestamp for downstream calculators to inform them that all elements in the
|
||||
# vector have been processed.
|
||||
node {
|
||||
calculator: "BeginLoopNormalizedLandmarkListVectorCalculator"
|
||||
input_stream: "ITERABLE:multi_face_landmarks"
|
||||
output_stream: "ITEM:face_landmarks"
|
||||
output_stream: "BATCH_END:end_timestamp"
|
||||
}
|
||||
|
||||
# Converts landmarks to drawing primitives for annotation overlay.
|
||||
node {
|
||||
calculator: "FaceLandmarksToRenderDataCalculator"
|
||||
input_stream: "NORM_LANDMARKS:face_landmarks"
|
||||
output_stream: "RENDER_DATA:landmarks_render_data"
|
||||
node_options: {
|
||||
[type.googleapis.com/mediapipe.LandmarksToRenderDataCalculatorOptions] {
|
||||
landmark_color { r: 255 g: 0 b: 0 }
|
||||
connection_color { r: 0 g: 255 b: 0 }
|
||||
thickness: 2
|
||||
visualize_landmark_depth: false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Collects a RenderData object for each hand into a vector. Upon receiving the
|
||||
# BATCH_END timestamp, outputs the vector of RenderData at the BATCH_END
|
||||
# timestamp.
|
||||
node {
|
||||
calculator: "EndLoopRenderDataCalculator"
|
||||
input_stream: "ITEM:landmarks_render_data"
|
||||
input_stream: "BATCH_END:end_timestamp"
|
||||
output_stream: "ITERABLE:multi_face_landmarks_render_data"
|
||||
}
|
||||
|
||||
# Converts normalized rects to drawing primitives for annotation overlay.
|
||||
#node {
|
||||
# calculator: "RectToRenderDataCalculator"
|
||||
# input_stream: "NORM_RECTS:rects"
|
||||
# output_stream: "RENDER_DATA:rects_render_data"
|
||||
# node_options: {
|
||||
# [type.googleapis.com/mediapipe.RectToRenderDataCalculatorOptions] {
|
||||
# filled: false
|
||||
# color { r: 255 g: 0 b: 0 }
|
||||
# thickness: 4.0
|
||||
# }
|
||||
# }
|
||||
#}
|
||||
|
||||
# Draws annotations and overlays them on top of the input images.
|
||||
node {
|
||||
calculator: "AnnotationOverlayCalculator"
|
||||
input_stream: "IMAGE_GPU:input_image"
|
||||
#input_stream: "detections_render_data"
|
||||
input_stream: "VECTOR:0:multi_face_landmarks_render_data"
|
||||
#input_stream: "rects_render_data"
|
||||
output_stream: "IMAGE_GPU:output_image"
|
||||
}
|
Binary file not shown.
Loading…
Reference in New Issue
Block a user