Added example for creating face mesh shared library for android applications
This commit is contained in:
parent
374f5e2e7e
commit
e5277c1564
|
@ -0,0 +1,97 @@
|
||||||
|
# Copyright 2019 The MediaPipe Authors.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
licenses(["notice"])
|
||||||
|
|
||||||
|
package(default_visibility = ["//visibility:private"])
|
||||||
|
|
||||||
|
cc_binary(
|
||||||
|
name = "libmediapipe_jni.so",
|
||||||
|
linkshared = 1,
|
||||||
|
linkstatic = 1,
|
||||||
|
deps = [
|
||||||
|
"//mediapipe/graphs/face_mesh:mobile_calculators",
|
||||||
|
"//mediapipe/java/com/google/mediapipe/framework/jni:mediapipe_framework_jni",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
cc_library(
|
||||||
|
name = "mediapipe_jni_lib",
|
||||||
|
srcs = [":libmediapipe_jni.so"],
|
||||||
|
alwayslink = 1,
|
||||||
|
)
|
||||||
|
|
||||||
|
cc_binary(
|
||||||
|
name = "libmediapipe.so",
|
||||||
|
linkshared = 1,
|
||||||
|
linkstatic = 1,
|
||||||
|
srcs = ["face_mesh_lib.cpp", "face_mesh_lib.h"],
|
||||||
|
deps = [
|
||||||
|
"//mediapipe/framework:calculator_framework",
|
||||||
|
"//mediapipe/framework/formats:image_frame",
|
||||||
|
"//mediapipe/framework/formats:image_frame_opencv",
|
||||||
|
"//mediapipe/framework/formats:landmark_cc_proto",
|
||||||
|
"//mediapipe/framework/port:file_helpers",
|
||||||
|
"//mediapipe/framework/port:opencv_highgui",
|
||||||
|
"//mediapipe/framework/port:opencv_imgproc",
|
||||||
|
"//mediapipe/framework/port:opencv_video",
|
||||||
|
"//mediapipe/framework/port:parse_text_proto",
|
||||||
|
"//mediapipe/framework/port:status",
|
||||||
|
"@com_google_absl//absl/flags:flag",
|
||||||
|
"@com_google_absl//absl/flags:parse",
|
||||||
|
|
||||||
|
"//mediapipe/calculators/core:constant_side_packet_calculator",
|
||||||
|
"//mediapipe/calculators/core:flow_limiter_calculator",
|
||||||
|
"//mediapipe/calculators/tflite:tflite_model_calculator",
|
||||||
|
"//mediapipe/calculators/util:local_file_contents_calculator",
|
||||||
|
"//mediapipe/modules/face_landmark:face_landmark_front_side_model_cpu_with_face_counter",
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
||||||
|
cc_library(
|
||||||
|
name = "mediapipe_lib",
|
||||||
|
srcs = [":libmediapipe.so"],
|
||||||
|
alwayslink = 1,
|
||||||
|
)
|
||||||
|
|
||||||
|
android_binary(
|
||||||
|
name = "facemeshgpu",
|
||||||
|
srcs = glob(["*.java"]),
|
||||||
|
assets = [
|
||||||
|
"//mediapipe/graphs/face_mesh:face_mesh_mobile_gpu.binarypb",
|
||||||
|
"//mediapipe/modules/face_landmark:face_landmark.tflite",
|
||||||
|
"//mediapipe/modules/face_detection:face_detection_short_range.tflite",
|
||||||
|
],
|
||||||
|
assets_dir = "",
|
||||||
|
manifest = "//mediapipe/examples/android/src/java/com/google/mediapipe/apps/basic:AndroidManifest.xml",
|
||||||
|
manifest_values = {
|
||||||
|
"applicationId": "com.google.mediapipe.apps.facemeshgpu",
|
||||||
|
"appName": "Face Mesh",
|
||||||
|
"mainActivity": ".MainActivity",
|
||||||
|
"cameraFacingFront": "True",
|
||||||
|
"binaryGraphName": "face_mesh_mobile_gpu.binarypb",
|
||||||
|
"inputVideoStreamName": "input_video",
|
||||||
|
"outputVideoStreamName": "output_video",
|
||||||
|
"flipFramesVertically": "True",
|
||||||
|
"converterNumBuffers": "2",
|
||||||
|
},
|
||||||
|
multidex = "native",
|
||||||
|
deps = [
|
||||||
|
":mediapipe_jni_lib",
|
||||||
|
":mediapipe_lib",
|
||||||
|
"//mediapipe/examples/android/src/java/com/google/mediapipe/apps/basic:basic_lib",
|
||||||
|
"//mediapipe/framework/formats:landmark_java_proto_lite",
|
||||||
|
"//mediapipe/java/com/google/mediapipe/framework:android_framework",
|
||||||
|
],
|
||||||
|
)
|
|
@ -0,0 +1,93 @@
|
||||||
|
// Copyright 2019 The MediaPipe Authors.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
package com.google.mediapipe.apps.facemeshgpu;
|
||||||
|
|
||||||
|
import android.os.Bundle;
|
||||||
|
import android.util.Log;
|
||||||
|
import com.google.mediapipe.formats.proto.LandmarkProto.NormalizedLandmark;
|
||||||
|
import com.google.mediapipe.formats.proto.LandmarkProto.NormalizedLandmarkList;
|
||||||
|
import com.google.mediapipe.framework.AndroidPacketCreator;
|
||||||
|
import com.google.mediapipe.framework.Packet;
|
||||||
|
import com.google.mediapipe.framework.PacketGetter;
|
||||||
|
import java.util.HashMap;
|
||||||
|
import java.util.List;
|
||||||
|
import java.util.Map;
|
||||||
|
|
||||||
|
/** Main activity of MediaPipe face mesh app. */
|
||||||
|
public class MainActivity extends com.google.mediapipe.apps.basic.MainActivity {
|
||||||
|
private static final String TAG = "MainActivity";
|
||||||
|
|
||||||
|
private static final String INPUT_NUM_FACES_SIDE_PACKET_NAME = "num_faces";
|
||||||
|
private static final String OUTPUT_LANDMARKS_STREAM_NAME = "multi_face_landmarks";
|
||||||
|
// Max number of faces to detect/process.
|
||||||
|
private static final int NUM_FACES = 1;
|
||||||
|
|
||||||
|
@Override
|
||||||
|
protected void onCreate(Bundle savedInstanceState) {
|
||||||
|
super.onCreate(savedInstanceState);
|
||||||
|
|
||||||
|
AndroidPacketCreator packetCreator = processor.getPacketCreator();
|
||||||
|
Map<String, Packet> inputSidePackets = new HashMap<>();
|
||||||
|
inputSidePackets.put(INPUT_NUM_FACES_SIDE_PACKET_NAME, packetCreator.createInt32(NUM_FACES));
|
||||||
|
processor.setInputSidePackets(inputSidePackets);
|
||||||
|
|
||||||
|
// To show verbose logging, run:
|
||||||
|
// adb shell setprop log.tag.MainActivity VERBOSE
|
||||||
|
if (Log.isLoggable(TAG, Log.VERBOSE)) {
|
||||||
|
processor.addPacketCallback(
|
||||||
|
OUTPUT_LANDMARKS_STREAM_NAME,
|
||||||
|
(packet) -> {
|
||||||
|
Log.v(TAG, "Received multi face landmarks packet.");
|
||||||
|
List<NormalizedLandmarkList> multiFaceLandmarks =
|
||||||
|
PacketGetter.getProtoVector(packet, NormalizedLandmarkList.parser());
|
||||||
|
Log.v(
|
||||||
|
TAG,
|
||||||
|
"[TS:"
|
||||||
|
+ packet.getTimestamp()
|
||||||
|
+ "] "
|
||||||
|
+ getMultiFaceLandmarksDebugString(multiFaceLandmarks));
|
||||||
|
});
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
private static String getMultiFaceLandmarksDebugString(
|
||||||
|
List<NormalizedLandmarkList> multiFaceLandmarks) {
|
||||||
|
if (multiFaceLandmarks.isEmpty()) {
|
||||||
|
return "No face landmarks";
|
||||||
|
}
|
||||||
|
String multiFaceLandmarksStr = "Number of faces detected: " + multiFaceLandmarks.size() + "\n";
|
||||||
|
int faceIndex = 0;
|
||||||
|
for (NormalizedLandmarkList landmarks : multiFaceLandmarks) {
|
||||||
|
multiFaceLandmarksStr +=
|
||||||
|
"\t#Face landmarks for face[" + faceIndex + "]: " + landmarks.getLandmarkCount() + "\n";
|
||||||
|
int landmarkIndex = 0;
|
||||||
|
for (NormalizedLandmark landmark : landmarks.getLandmarkList()) {
|
||||||
|
multiFaceLandmarksStr +=
|
||||||
|
"\t\tLandmark ["
|
||||||
|
+ landmarkIndex
|
||||||
|
+ "]: ("
|
||||||
|
+ landmark.getX()
|
||||||
|
+ ", "
|
||||||
|
+ landmark.getY()
|
||||||
|
+ ", "
|
||||||
|
+ landmark.getZ()
|
||||||
|
+ ")\n";
|
||||||
|
++landmarkIndex;
|
||||||
|
}
|
||||||
|
++faceIndex;
|
||||||
|
}
|
||||||
|
return multiFaceLandmarksStr;
|
||||||
|
}
|
||||||
|
}
|
|
@ -0,0 +1,392 @@
|
||||||
|
#include "face_mesh_lib.h"
|
||||||
|
|
||||||
|
MPFaceMeshDetector::MPFaceMeshDetector(int numFaces,
|
||||||
|
const char *face_detection_model_path,
|
||||||
|
const char *face_landmark_model_path) {
|
||||||
|
const auto status = InitFaceMeshDetector(numFaces, face_detection_model_path,
|
||||||
|
face_landmark_model_path);
|
||||||
|
if (!status.ok()) {
|
||||||
|
LOG(INFO) << "Failed constructing FaceMeshDetector.";
|
||||||
|
LOG(INFO) << status.message();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status
|
||||||
|
MPFaceMeshDetector::InitFaceMeshDetector(int numFaces, {
|
||||||
|
numFaces = std::max(numFaces, 1);
|
||||||
|
|
||||||
|
if (face_detection_model_path == nullptr) {
|
||||||
|
face_detection_model_path =
|
||||||
|
"mediapipe/modules/face_detection/face_detection_short_range.tflite";
|
||||||
|
}
|
||||||
|
|
||||||
|
if (face_landmark_model_path == nullptr) {
|
||||||
|
face_landmark_model_path =
|
||||||
|
"mediapipe/modules/face_landmark/face_landmark.tflite";
|
||||||
|
}
|
||||||
|
|
||||||
|
// Prepare graph config.
|
||||||
|
auto preparedGraphConfig = absl::StrReplaceAll(
|
||||||
|
graphConfig, {{"$numFaces", std::to_string(numFaces)}});
|
||||||
|
preparedGraphConfig = absl::StrReplaceAll(
|
||||||
|
preparedGraphConfig,
|
||||||
|
{{"$faceDetectionModelPath", face_detection_model_path}});
|
||||||
|
preparedGraphConfig = absl::StrReplaceAll(
|
||||||
|
preparedGraphConfig,
|
||||||
|
{{"$faceLandmarkModelPath", face_landmark_model_path}});
|
||||||
|
|
||||||
|
LOG(INFO) << "Get calculator graph config contents: " << preparedGraphConfig;
|
||||||
|
|
||||||
|
mediapipe::CalculatorGraphConfig config =
|
||||||
|
mediapipe::ParseTextProtoOrDie<mediapipe::CalculatorGraphConfig>(
|
||||||
|
preparedGraphConfig);
|
||||||
|
LOG(INFO) << "Initialize the calculator graph.";
|
||||||
|
|
||||||
|
MP_RETURN_IF_ERROR(graph.Initialize(config));
|
||||||
|
|
||||||
|
LOG(INFO) << "Start running the calculator graph.";
|
||||||
|
|
||||||
|
ASSIGN_OR_RETURN(mediapipe::OutputStreamPolle landmarks_poller,
|
||||||
|
graph.AddOutputStreamPoller(kOutputStream_landmarks));
|
||||||
|
ASSIGN_OR_RETURN(mediapipe::OutputStreamPoller face_count_poller,
|
||||||
|
graph.AddOutputStreamPoller(kOutputStream_faceCount));
|
||||||
|
ASSIGN_OR_RETURN(mediapipe::OutputStreamPoller face_rects_from_landmarks_poller,
|
||||||
|
graph.AddOutputStreamPoller(kOutputStream_face_rects_from_landmarks));
|
||||||
|
|
||||||
|
landmarks_poller_ptr = std::make_unique<mediapipe::OutputStreamPoller>(
|
||||||
|
std::move(landmarks_poller));
|
||||||
|
face_count_poller_ptr = std::make_unique<mediapipe::OutputStreamPoller>(
|
||||||
|
std::move(face_count_poller));
|
||||||
|
face_rects_from_landmarks_poller_ptr =
|
||||||
|
std::make_unique<mediapipe::OutputStreamPoller>(
|
||||||
|
std::move(face_rects_from_landmarks_poller));
|
||||||
|
|
||||||
|
MP_RETURN_IF_ERROR(graph.StartRun({}));
|
||||||
|
|
||||||
|
LOG(INFO) << "MPFaceMeshDetector constructed successfully.";
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status
|
||||||
|
MPFaceMeshDetector::DetectFacesWithStatus(const cv::Mat &camera_frame,
|
||||||
|
cv::Rect *multi_face_bounding_boxes,
|
||||||
|
int *numFaces) {
|
||||||
|
if (!numFaces || !multi_face_bounding_boxes) {
|
||||||
|
return absl::InvalidArgumentError(
|
||||||
|
"MPFaceMeshDetector::DetectFacesWithStatus requires notnull pointer to "
|
||||||
|
"save results data.");
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reset face counts.
|
||||||
|
*numFaces = 0;
|
||||||
|
face_count = 0;
|
||||||
|
|
||||||
|
// Wrap Mat into an ImageFrame.
|
||||||
|
auto input_frame = absl::make_unique<mediapipe::ImageFrame>(
|
||||||
|
mediapipe::ImageFormat::SRGB, camera_frame.cols, camera_frame.rows,
|
||||||
|
mediapipe::ImageFrame::kDefaultAlignmentBoundary);
|
||||||
|
cv::Mat input_frame_mat = mediapipe::formats::MatView(input_frame.get());
|
||||||
|
camera_frame.copyTo(input_frame_mat);
|
||||||
|
|
||||||
|
// Send image packet into the graph.
|
||||||
|
static size_t timestamp = 0;
|
||||||
|
MP_RETURN_IF_ERROR(graph.AddPacketToInputStream(
|
||||||
|
kInputStream, mediapipe::Adopt(input_frame.release())
|
||||||
|
.At(mediapipe::Timestamp(timestamp++))));
|
||||||
|
|
||||||
|
// Get face count.
|
||||||
|
mediapipe::Packet face_count_packet;
|
||||||
|
if (!face_count_poller_ptr ||
|
||||||
|
!face_count_poller_ptr->Next(&face_count_packet)) {
|
||||||
|
return absl::CancelledError(
|
||||||
|
"Failed during getting next face_count_packet.");
|
||||||
|
}
|
||||||
|
|
||||||
|
auto &face_count_val = face_count_packet.Get<int>();
|
||||||
|
|
||||||
|
if (face_count_val <= 0) {
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get face bounding boxes.
|
||||||
|
mediapipe::Packet face_rects_from_landmarks_packet;
|
||||||
|
if (!face_rects_from_landmarks_poller_ptr ||
|
||||||
|
!face_rects_from_landmarks_poller_ptr->Next(
|
||||||
|
&face_rects_from_landmarks_packet)) {
|
||||||
|
return absl::CancelledError(
|
||||||
|
"Failed during getting next face_rects_from_landmarks_packet.");
|
||||||
|
}
|
||||||
|
|
||||||
|
auto &face_bounding_boxes =
|
||||||
|
face_rects_from_landmarks_packet
|
||||||
|
.Get < ::std::vector < ::mediapipe::NormalizedRect >> ();
|
||||||
|
|
||||||
|
image_width = camera_frame.cols;
|
||||||
|
image_height = camera_frame.rows;
|
||||||
|
const auto image_width_f = static_cast<float>(image_width);
|
||||||
|
const auto image_height_f = static_cast<float>(image_height);
|
||||||
|
|
||||||
|
// Convert vector<NormalizedRect> (center based Rects) to cv::Rect*
|
||||||
|
// (leftTop based Rects).
|
||||||
|
for (int i = 0; i < face_count_val; i) {
|
||||||
|
const auto &normalized_bounding_box = face_bounding_boxes[i];
|
||||||
|
auto &bounding_box = multi_face_bounding_boxes[i];
|
||||||
|
|
||||||
|
const auto width =
|
||||||
|
static_cast<int>(normalized_bounding_box.width() * image_width_f);
|
||||||
|
const auto height =
|
||||||
|
static_cast<int>(normalized_bounding_box.height() * image_height_f);
|
||||||
|
|
||||||
|
bounding_box.x =
|
||||||
|
static_cast<int>(normalized_bounding_box.x_center() * image_width_f) -
|
||||||
|
(width >> 1);
|
||||||
|
bounding_box.y =
|
||||||
|
static_cast<int>(normalized_bounding_box.y_center() * image_height_f) -
|
||||||
|
(height >> 1);
|
||||||
|
bounding_box.width = width;
|
||||||
|
bounding_box.height = height;
|
||||||
|
}
|
||||||
|
|
||||||
|
// Get face landmarks.
|
||||||
|
if (!landmarks_poller_ptr ||
|
||||||
|
!landmarks_poller_ptr->Next(&face_landmarks_packet)) {
|
||||||
|
return absl::CancelledError("Failed during getting next landmarks_packet.");
|
||||||
|
}
|
||||||
|
|
||||||
|
*numFaces = face_count_val;
|
||||||
|
face_count = face_count_val;
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
void MPFaceMeshDetector::DetectFaces(const cv::Mat &camera_frame,
|
||||||
|
cv::Rect *multi_face_bounding_boxes,
|
||||||
|
int *numFaces) {
|
||||||
|
const auto status =
|
||||||
|
DetectFacesWithStatus(camera_frame, multi_face_bounding_boxes, numFaces);
|
||||||
|
if (!status.ok()) {
|
||||||
|
LOG(INFO) << "MPFaceMeshDetector::DetectFaces failed: " << status.message();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status MPFaceMeshDetector::DetectLandmarksWithStatus(
|
||||||
|
cv::Point2f **multi_face_landmarks) {
|
||||||
|
|
||||||
|
if (face_landmarks_packet.IsEmpty()) {
|
||||||
|
return absl::CancelledError("Face landmarks packet is empty.");
|
||||||
|
}
|
||||||
|
|
||||||
|
auto &face_landmarks = face_landmarks_packet
|
||||||
|
|
||||||
|
const auto image_width_f = static_cast<float>(image_width);
|
||||||
|
const auto image_height_f = static_cast<float>(image_height);
|
||||||
|
|
||||||
|
// Convert landmarks to cv::Point2f**.
|
||||||
|
for (int i = 0; i < face_count; i) {
|
||||||
|
const auto &normalizedLandmarkList = face_landmarks[i];
|
||||||
|
const auto landmarks_num = normalizedLandmarkList.landmark_size();
|
||||||
|
|
||||||
|
if (landmarks_num != kLandmarksNum) {
|
||||||
|
return absl::CancelledError("Detected unexpected landmarks number.");
|
||||||
|
}
|
||||||
|
|
||||||
|
auto &face_landmarks = multi_face_landmarks[i];
|
||||||
|
|
||||||
|
for (int j = 0; j < landmarks_num; j) {
|
||||||
|
const auto &landmark = normalizedLandmarkList.landmark(j);
|
||||||
|
face_landmarks[j].x = landmark.x() * image_width_f;
|
||||||
|
face_landmarks[j].y = landmark.y() * image_height_f;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status MPFaceMeshDetector::DetectLandmarksWithStatus(
|
||||||
|
cv::Point3f **multi_face_landmarks) {
|
||||||
|
|
||||||
|
if (face_landmarks_packet.IsEmpty()) {
|
||||||
|
return absl::CancelledError("Face landmarks packet is empty.");
|
||||||
|
}
|
||||||
|
|
||||||
|
auto &face_landmarks = face_landmarks_packet
|
||||||
|
|
||||||
|
const auto image_width_f = static_cast<float>(image_width);
|
||||||
|
const auto image_height_f = static_cast<float>(image_height);
|
||||||
|
|
||||||
|
// Convert landmarks to cv::Point3f**.
|
||||||
|
for (int i = 0; i < face_count; i) {
|
||||||
|
const auto &normalized_landmark_list = face_landmarks[i];
|
||||||
|
const auto landmarks_num = normalized_landmark_list.landmark_size();
|
||||||
|
|
||||||
|
if (landmarks_num != kLandmarksNum) {
|
||||||
|
return absl::CancelledError("Detected unexpected landmarks number.");
|
||||||
|
}
|
||||||
|
|
||||||
|
auto &face_landmarks = multi_face_landmarks[i];
|
||||||
|
|
||||||
|
for (int j = 0; j < landmarks_num; j) {
|
||||||
|
const auto &landmark = normalized_landmark_list.landmark(j);
|
||||||
|
face_landmarks[j].x = landmark.x() * image_width_f;
|
||||||
|
face_landmarks[j].y = landmark.y() * image_height_f;
|
||||||
|
face_landmarks[j].z = landmark.z();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
void MPFaceMeshDetector::DetectLandmarks(cv::Point2f **multi_face_landmarks,
|
||||||
|
int *numFaces) {
|
||||||
|
*numFaces = 0;
|
||||||
|
const auto status = DetectLandmarksWithStatus(multi_face_landmarks);
|
||||||
|
if (!status.ok()) {
|
||||||
|
LOG(INFO) << "MPFaceMeshDetector::DetectLandmarks failed: "
|
||||||
|
<< status.message();
|
||||||
|
}
|
||||||
|
*numFaces = face_count;
|
||||||
|
}
|
||||||
|
|
||||||
|
void MPFaceMeshDetector::DetectLandmarks(cv::Point3f **multi_face_landmarks,
|
||||||
|
int *numFaces) {
|
||||||
|
*numFaces = 0;
|
||||||
|
const auto status = DetectLandmarksWithStatus(multi_face_landmarks);
|
||||||
|
if (!status.ok()) {
|
||||||
|
LOG(INFO) << "MPFaceMeshDetector::DetectLandmarks failed: "
|
||||||
|
<< status.message();
|
||||||
|
}
|
||||||
|
*numFaces = face_count;
|
||||||
|
}
|
||||||
|
|
||||||
|
extern "C" {
|
||||||
|
MPFaceMeshDetector *
|
||||||
|
MPFaceMeshDetectorConstruct(int numFaces, const char *face_detection_model_path,
|
||||||
|
const char *face_landmark_model_path) {
|
||||||
|
return new MPFaceMeshDetector(numFaces, face_detection_model_path,
|
||||||
|
face_landmark_model_path);
|
||||||
|
}
|
||||||
|
|
||||||
|
void MPFaceMeshDetectorDestruct(MPFaceMeshDetector *detector) {
|
||||||
|
delete detector;
|
||||||
|
}
|
||||||
|
|
||||||
|
void MPFaceMeshDetectorDetectFaces(
|
||||||
|
MPFaceMeshDetector *detector, const cv::Mat &camera_frame,
|
||||||
|
cv::Rect *multi_face_bounding_boxes, int *numFaces) {
|
||||||
|
detector->DetectFaces(camera_frame, multi_face_bounding_boxes, numFaces);
|
||||||
|
}
|
||||||
|
void
|
||||||
|
MPFaceMeshDetectorDetect2DLandmarks(MPFaceMeshDetector *detector,
|
||||||
|
cv::Point2f **multi_face_landmarks,
|
||||||
|
int *numFaces) {
|
||||||
|
detector->DetectLandmarks(multi_face_landmarks, numFaces);
|
||||||
|
}
|
||||||
|
void
|
||||||
|
MPFaceMeshDetectorDetect3DLandmarks(MPFaceMeshDetector *detector,
|
||||||
|
cv::Point3f **multi_face_landmarks,
|
||||||
|
int *numFaces) {
|
||||||
|
detector->DetectLandmarks(multi_face_landmarks, numFaces);
|
||||||
|
}
|
||||||
|
|
||||||
|
int MPFaceMeshDetectorLandmarksNum() {
|
||||||
|
return MPFaceMeshDetector::kLandmarksNum;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const std::string MPFaceMeshDetector::graphConfig = R"pb(
|
||||||
|
# MediaPipe graph that performs face mesh with TensorFlow Lite on CPU.
|
||||||
|
|
||||||
|
# Input image. (ImageFrame)
|
||||||
|
input_stream: "input_video"
|
||||||
|
|
||||||
|
# Collection of detected/processed faces, each represented as a list of
|
||||||
|
# landmarks. (std::vector<NormalizedLandmarkList>)
|
||||||
|
output_stream: "multi_face_landmarks"
|
||||||
|
|
||||||
|
# Detected faces count. (int)
|
||||||
|
output_stream: "face_count"
|
||||||
|
|
||||||
|
# Regions of interest calculated based on landmarks.
|
||||||
|
# (std::vector<NormalizedRect>)
|
||||||
|
output_stream: "face_rects_from_landmarks"
|
||||||
|
|
||||||
|
node {
|
||||||
|
calculator: "FlowLimiterCalculator"
|
||||||
|
input_stream: "input_video"
|
||||||
|
input_stream: "FINISHED:face_count"
|
||||||
|
input_stream_info: {
|
||||||
|
tag_index: "FINISHED"
|
||||||
|
back_edge: true
|
||||||
|
}
|
||||||
|
output_stream: "throttled_input_video"
|
||||||
|
}
|
||||||
|
|
||||||
|
# Defines side packets for further use in the graph.
|
||||||
|
node {
|
||||||
|
calculator: "ConstantSidePacketCalculator"
|
||||||
|
output_side_packet: "PACKET:num_faces"
|
||||||
|
node_options: {
|
||||||
|
[type.googleapis.com/mediapipe.ConstantSidePacketCalculatorOptions]: {
|
||||||
|
packet { int_value: $numFaces }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Defines side packets for further use in the graph.
|
||||||
|
node {
|
||||||
|
calculator: "ConstantSidePacketCalculator"
|
||||||
|
output_side_packet: "PACKET:face_detection_model_path"
|
||||||
|
options: {
|
||||||
|
[mediapipe.ConstantSidePacketCalculatorOptions.ext]: {
|
||||||
|
packet { string_value: "$faceDetectionModelPath" }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
# Defines side packets for further use in the graph.
|
||||||
|
node {
|
||||||
|
calculator: "ConstantSidePacketCalculator"
|
||||||
|
output_side_packet: "PACKET:face_landmark_model_path"
|
||||||
|
node_options: {
|
||||||
|
[type.googleapis.com/mediapipe.ConstantSidePacketCalculatorOptions]: {
|
||||||
|
packet { string_value: "$faceLandmarkModelPath" }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
node {
|
||||||
|
calculator: "LocalFileContentsCalculator"
|
||||||
|
input_side_packet: "FILE_PATH:0:face_detection_model_path"
|
||||||
|
input_side_packet: "FILE_PATH:1:face_landmark_model_path"
|
||||||
|
output_side_packet: "CONTENTS:0:face_detection_model_blob"
|
||||||
|
output_side_packet: "CONTENTS:1:face_landmark_model_blob"
|
||||||
|
}
|
||||||
|
|
||||||
|
node {
|
||||||
|
calculator: "TfLiteModelCalculator"
|
||||||
|
input_side_packet: "MODEL_BLOB:face_detection_model_blob"
|
||||||
|
output_side_packet: "MODEL:face_detection_model"
|
||||||
|
}
|
||||||
|
node {
|
||||||
|
calculator: "TfLiteModelCalculator"
|
||||||
|
input_side_packet: "MODEL_BLOB:face_landmark_model_blob"
|
||||||
|
output_side_packet: "MODEL:face_landmark_model"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
# Subgraph that detects faces and corresponding landmarks.
|
||||||
|
node {
|
||||||
|
calculator: "FaceLandmarkFrontSideModelCpuWithFaceCounter"
|
||||||
|
input_stream: "IMAGE:throttled_input_video"
|
||||||
|
input_side_packet: "NUM_FACES:num_faces"
|
||||||
|
input_side_packet: "MODEL:0:face_detection_model"
|
||||||
|
input_side_packet: "MODEL:1:face_landmark_model"
|
||||||
|
output_stream: "LANDMARKS:multi_face_landmarks"
|
||||||
|
output_stream: "ROIS_FROM_LANDMARKS:face_rects_from_landmarks"
|
||||||
|
output_stream: "DETECTIONS:face_detections"
|
||||||
|
output_stream: "ROIS_FROM_DETECTIONS:face_rects_from_detections"
|
||||||
|
output_stream: "FACE_COUNT_FROM_LANDMARKS:face_count"
|
||||||
|
}
|
||||||
|
|
||||||
|
)pb";
|
|
@ -0,0 +1,93 @@
|
||||||
|
#ifndef FACE_MESH_LIBRARY_H
|
||||||
|
#define FACE_MESH_LIBRARY_H
|
||||||
|
|
||||||
|
#include <cstdlib>
|
||||||
|
#include <memory>
|
||||||
|
#include <string>
|
||||||
|
|
||||||
|
#include "absl/flags/flag.h"
|
||||||
|
#include "absl/flags/parse.h"
|
||||||
|
#include "absl/strings/str_replace.h"
|
||||||
|
#include "mediapipe/framework/calculator_framework.h"
|
||||||
|
#include "mediapipe/framework/calculator_graph.h"
|
||||||
|
#include "mediapipe/framework/formats/image_frame.h"
|
||||||
|
#include "mediapipe/framework/formats/image_frame_opencv.h"
|
||||||
|
#include "mediapipe/framework/formats/landmark.pb.h"
|
||||||
|
#include "mediapipe/framework/formats/rect.pb.h"
|
||||||
|
#include "mediapipe/framework/output_stream_poller.h"
|
||||||
|
#include "mediapipe/framework/port/file_helpers.h"
|
||||||
|
#include "mediapipe/framework/port/opencv_highgui_inc.h"
|
||||||
|
#include "mediapipe/framework/port/opencv_imgproc_inc.h"
|
||||||
|
#include "mediapipe/framework/port/opencv_video_inc.h"
|
||||||
|
#include "mediapipe/framework/port/parse_text_proto.h"
|
||||||
|
#include "mediapipe/framework/port/status.h"
|
||||||
|
|
||||||
|
class MPFaceMeshDetector {
|
||||||
|
public:
|
||||||
|
MPFaceMeshDetector(int numFaces, const char *face_detection_model_path,
|
||||||
|
const char *face_landmark_model_path);
|
||||||
|
|
||||||
|
void DetectFaces(const cv::Mat &camera_frame,
|
||||||
|
cv::Rect *multi_face_bounding_boxes, int *numFaces);
|
||||||
|
|
||||||
|
void DetectLandmarks(cv::Point2f **multi_face_landmarks, int *numFaces);
|
||||||
|
void DetectLandmarks(cv::Point3f **multi_face_landmarks, int *numFaces);
|
||||||
|
|
||||||
|
static constexpr auto kLandmarksNum = 468;
|
||||||
|
|
||||||
|
private:
|
||||||
|
absl::Status InitFaceMeshDetector(int numFaces,
|
||||||
|
const char *face_detection_model_path,
|
||||||
|
const char *face_landmark_model_path);
|
||||||
|
absl::Status DetectFacesWithStatus(const cv::Mat &camera_frame,
|
||||||
|
cv::Rect *multi_face_bounding_boxes,
|
||||||
|
int *numFaces);
|
||||||
|
|
||||||
|
|
||||||
|
static constexpr auto kInputStream = "input_video";
|
||||||
|
static constexpr auto kOutputStream_landmarks = "multi_face_landmarks";
|
||||||
|
static constexpr auto kOutputStream_faceCount = "face_count";
|
||||||
|
static constexpr auto kOutputStream_face_rects_from_landmarks = "face_rects_from_landmarks";
|
||||||
|
|
||||||
|
static const std::string graphConfig;
|
||||||
|
|
||||||
|
mediapipe::CalculatorGraph graph;
|
||||||
|
|
||||||
|
std::unique_ptr<mediapipe::OutputStreamPoller> landmarks_poller_ptr;
|
||||||
|
std::unique_ptr<mediapipe::OutputStreamPoller> face_count_poller_ptr;
|
||||||
|
std::unique_ptr<mediapipe::OutputStreamPoller> face_rects_from_landmarks_poller_ptr;
|
||||||
|
|
||||||
|
int face_count;
|
||||||
|
int image_width;
|
||||||
|
int image_height;
|
||||||
|
mediapipe::Packet face_landmarks_packet;
|
||||||
|
};
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
extern "C" {
|
||||||
|
#endif
|
||||||
|
MPFaceMeshDetector *
|
||||||
|
MPFaceMeshDetectorConstruct(int numFaces, const char *face_detection_model_path,
|
||||||
|
const char *face_landmark_model_path);
|
||||||
|
|
||||||
|
void MPFaceMeshDetectorDestruct(MPFaceMeshDetector *detector);
|
||||||
|
|
||||||
|
void MPFaceMeshDetectorDetectFaces(
|
||||||
|
MPFaceMeshDetector *detector, const cv::Mat &camera_frame,
|
||||||
|
cv::Rect *multi_face_bounding_boxes, int *numFaces);
|
||||||
|
|
||||||
|
void
|
||||||
|
MPFaceMeshDetectorDetect2DLandmarks(MPFaceMeshDetector *detector,
|
||||||
|
cv::Point2f **multi_face_landmarks,
|
||||||
|
int *numFaces);
|
||||||
|
void
|
||||||
|
MPFaceMeshDetectorDetect3DLandmarks(MPFaceMeshDetector *detector,
|
||||||
|
cv::Point3f **multi_face_landmarks,
|
||||||
|
int *numFaces);
|
||||||
|
|
||||||
|
int MPFaceMeshDetectorLandmarksNum();
|
||||||
|
|
||||||
|
#ifdef __cplusplus
|
||||||
|
};
|
||||||
|
#endif
|
||||||
|
#endif
|
Loading…
Reference in New Issue
Block a user