Internal change

PiperOrigin-RevId: 515187906
This commit is contained in:
MediaPipe Team 2023-03-08 17:33:36 -08:00 committed by Copybara-Service
parent 8d9f627fd9
commit c12eae229f
15 changed files with 6124 additions and 79 deletions

View File

@ -322,10 +322,8 @@ class FaceDetectorGraph : public core::ModelTaskGraph {
// detection bounding boxes.
auto& detection_transformation =
graph.AddNode("DetectionTransformationCalculator");
detection_projection.Out(kDetectionsTag) >>
detection_transformation.In(kDetectionsTag);
preprocessing.Out(kImageSizeTag) >>
detection_transformation.In(kImageSizeTag);
face_detections >> detection_transformation.In(kDetectionsTag);
image_size >> detection_transformation.In(kImageSizeTag);
auto face_pixel_detections =
detection_transformation.Out(kPixelDetectionsTag)
.Cast<std::vector<Detection>>();

View File

@ -97,7 +97,8 @@ void ConfigureFaceGeometryEnvGeneratorCalculator(
// SideInputs:
// ENVIRONMENT - ENVIRONMENT
// Environment that describes the current virtual scene. If not provided, a
// default environment will be used which can be applied to common webcam.
// default environment will be used which is good enough for most general
// use cases.
//
//
// Outputs:
@ -142,7 +143,7 @@ class FaceGeometryFromLandmarksGraph : public Subgraph {
std::optional<SidePacket<Environment>> environment, Graph& graph) {
if (!environment.has_value()) {
// If there is no provided Environment, use a a default environment which
// is good enough for most general use case.
// is good enough for most general use cases.
auto& env_generator = graph.AddNode(
"mediapipe.tasks.vision.face_geometry."
"FaceGeometryEnvGeneratorCalculator");

View File

@ -142,3 +142,50 @@ cc_library(
"//mediapipe/tasks/cc/components/containers:landmark",
],
)
cc_library(
name = "face_landmarker_graph",
srcs = ["face_landmarker_graph.cc"],
deps = [
":face_landmarks_detector_graph",
"//mediapipe/calculators/core:begin_loop_calculator",
"//mediapipe/calculators/core:clip_vector_size_calculator_cc_proto",
"//mediapipe/calculators/core:end_loop_calculator",
"//mediapipe/calculators/core:gate_calculator",
"//mediapipe/calculators/core:gate_calculator_cc_proto",
"//mediapipe/calculators/core:pass_through_calculator",
"//mediapipe/calculators/core:previous_loopback_calculator",
"//mediapipe/calculators/image:image_properties_calculator",
"//mediapipe/calculators/util:association_calculator_cc_proto",
"//mediapipe/calculators/util:association_norm_rect_calculator",
"//mediapipe/calculators/util:collection_has_min_size_calculator",
"//mediapipe/calculators/util:collection_has_min_size_calculator_cc_proto",
"//mediapipe/framework/api2:builder",
"//mediapipe/framework/api2:port",
"//mediapipe/framework/formats:classification_cc_proto",
"//mediapipe/framework/formats:detection_cc_proto",
"//mediapipe/framework/formats:image",
"//mediapipe/framework/formats:landmark_cc_proto",
"//mediapipe/framework/formats:rect_cc_proto",
"//mediapipe/framework/formats:tensor",
"//mediapipe/framework/port:status",
"//mediapipe/tasks/cc:common",
"//mediapipe/tasks/cc/components/utils:gate",
"//mediapipe/tasks/cc/core:model_asset_bundle_resources",
"//mediapipe/tasks/cc/core:model_resources_cache",
"//mediapipe/tasks/cc/core:model_task_graph",
"//mediapipe/tasks/cc/core:utils",
"//mediapipe/tasks/cc/metadata/utils:zip_utils",
"//mediapipe/tasks/cc/vision/face_detector:face_detector_graph",
"//mediapipe/tasks/cc/vision/face_detector/proto:face_detector_graph_options_cc_proto",
"//mediapipe/tasks/cc/vision/face_geometry:face_geometry_from_landmarks_graph",
"//mediapipe/tasks/cc/vision/face_geometry/proto:environment_cc_proto",
"//mediapipe/tasks/cc/vision/face_geometry/proto:face_geometry_cc_proto",
"//mediapipe/tasks/cc/vision/face_landmarker/proto:face_blendshapes_graph_options_cc_proto",
"//mediapipe/tasks/cc/vision/face_landmarker/proto:face_landmarker_graph_options_cc_proto",
"//mediapipe/tasks/cc/vision/face_landmarker/proto:face_landmarks_detector_graph_options_cc_proto",
"//mediapipe/util:graph_builder_utils",
"@com_google_absl//absl/strings:str_format",
],
alwayslink = 1,
)

View File

@ -0,0 +1,520 @@
/* Copyright 2023 The MediaPipe Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <memory>
#include <type_traits>
#include <utility>
#include <vector>
#include "absl/strings/str_format.h"
#include "mediapipe/calculators/core/clip_vector_size_calculator.pb.h"
#include "mediapipe/calculators/core/gate_calculator.pb.h"
#include "mediapipe/calculators/util/association_calculator.pb.h"
#include "mediapipe/calculators/util/collection_has_min_size_calculator.pb.h"
#include "mediapipe/framework/api2/builder.h"
#include "mediapipe/framework/api2/port.h"
#include "mediapipe/framework/formats/classification.pb.h"
#include "mediapipe/framework/formats/detection.pb.h"
#include "mediapipe/framework/formats/image.h"
#include "mediapipe/framework/formats/landmark.pb.h"
#include "mediapipe/framework/formats/rect.pb.h"
#include "mediapipe/framework/formats/tensor.h"
#include "mediapipe/framework/port/status_macros.h"
#include "mediapipe/tasks/cc/common.h"
#include "mediapipe/tasks/cc/components/utils/gate.h"
#include "mediapipe/tasks/cc/core/model_asset_bundle_resources.h"
#include "mediapipe/tasks/cc/core/model_resources_cache.h"
#include "mediapipe/tasks/cc/core/model_task_graph.h"
#include "mediapipe/tasks/cc/core/utils.h"
#include "mediapipe/tasks/cc/metadata/utils/zip_utils.h"
#include "mediapipe/tasks/cc/vision/face_detector/proto/face_detector_graph_options.pb.h"
#include "mediapipe/tasks/cc/vision/face_geometry/proto/environment.pb.h"
#include "mediapipe/tasks/cc/vision/face_geometry/proto/face_geometry.pb.h"
#include "mediapipe/tasks/cc/vision/face_landmarker/proto/face_blendshapes_graph_options.pb.h"
#include "mediapipe/tasks/cc/vision/face_landmarker/proto/face_landmarker_graph_options.pb.h"
#include "mediapipe/tasks/cc/vision/face_landmarker/proto/face_landmarks_detector_graph_options.pb.h"
#include "mediapipe/util/graph_builder_utils.h"
namespace mediapipe {
namespace tasks {
namespace vision {
namespace face_landmarker {
namespace {
using ::mediapipe::NormalizedRect;
using ::mediapipe::api2::Input;
using ::mediapipe::api2::Output;
using ::mediapipe::api2::builder::Graph;
using ::mediapipe::api2::builder::SidePacket;
using ::mediapipe::api2::builder::Source;
using ::mediapipe::tasks::components::utils::DisallowIf;
using ::mediapipe::tasks::core::ModelAssetBundleResources;
using ::mediapipe::tasks::metadata::SetExternalFile;
using ::mediapipe::tasks::vision::face_detector::proto::
FaceDetectorGraphOptions;
using ::mediapipe::tasks::vision::face_geometry::proto::Environment;
using ::mediapipe::tasks::vision::face_geometry::proto::FaceGeometry;
using ::mediapipe::tasks::vision::face_landmarker::proto::
FaceLandmarkerGraphOptions;
using ::mediapipe::tasks::vision::face_landmarker::proto::
FaceLandmarksDetectorGraphOptions;
constexpr char kImageTag[] = "IMAGE";
constexpr char kNormRectTag[] = "NORM_RECT";
constexpr char kNormLandmarksTag[] = "NORM_LANDMARKS";
constexpr char kFaceRectsTag[] = "FACE_RECTS";
constexpr char kFaceRectsNextFrameTag[] = "FACE_RECTS_NEXT_FRAME";
constexpr char kExpandedFaceRectsTag[] = "EXPANDED_FACE_RECTS";
constexpr char kDetectionsTag[] = "DETECTIONS";
constexpr char kLoopTag[] = "LOOP";
constexpr char kPrevLoopTag[] = "PREV_LOOP";
constexpr char kMainTag[] = "MAIN";
constexpr char kIterableTag[] = "ITERABLE";
constexpr char kFaceLandmarksTag[] = "FACE_LANDMARKS";
constexpr char kFaceGeometryTag[] = "FACE_GEOMETRY";
constexpr char kEnvironmentTag[] = "ENVIRONMENT";
constexpr char kBlendshapesTag[] = "BLENDSHAPES";
constexpr char kImageSizeTag[] = "IMAGE_SIZE";
constexpr char kSizeTag[] = "SIZE";
constexpr char kFaceDetectorTFLiteName[] = "face_detector.tflite";
constexpr char kFaceLandmarksDetectorTFLiteName[] =
"face_landmarks_detector.tflite";
constexpr char kFaceBlendshapeTFLiteName[] = "face_blendshapes.tflite";
struct FaceLandmarkerOutputs {
Source<std::vector<NormalizedLandmarkList>> landmark_lists;
Source<std::vector<NormalizedRect>> face_rects_next_frame;
Source<std::vector<NormalizedRect>> face_rects;
Source<std::vector<Detection>> detections;
std::optional<Source<std::vector<ClassificationList>>> face_blendshapes;
std::optional<Source<std::vector<FaceGeometry>>> face_geometry;
Source<Image> image;
};
// Sets the base options in the sub tasks.
absl::Status SetSubTaskBaseOptions(const ModelAssetBundleResources& resources,
FaceLandmarkerGraphOptions* options,
bool is_copy) {
auto* face_detector_graph_options =
options->mutable_face_detector_graph_options();
if (!face_detector_graph_options->base_options().has_model_asset()) {
ASSIGN_OR_RETURN(const auto face_detector_file,
resources.GetModelFile(kFaceDetectorTFLiteName));
SetExternalFile(face_detector_file,
face_detector_graph_options->mutable_base_options()
->mutable_model_asset(),
is_copy);
}
face_detector_graph_options->mutable_base_options()
->mutable_acceleration()
->CopyFrom(options->base_options().acceleration());
face_detector_graph_options->mutable_base_options()->set_use_stream_mode(
options->base_options().use_stream_mode());
auto* face_landmarks_detector_graph_options =
options->mutable_face_landmarks_detector_graph_options();
if (!face_landmarks_detector_graph_options->base_options()
.has_model_asset()) {
ASSIGN_OR_RETURN(const auto face_landmarks_detector_file,
resources.GetModelFile(kFaceLandmarksDetectorTFLiteName));
SetExternalFile(
face_landmarks_detector_file,
face_landmarks_detector_graph_options->mutable_base_options()
->mutable_model_asset(),
is_copy);
}
face_landmarks_detector_graph_options->mutable_base_options()
->mutable_acceleration()
->CopyFrom(options->base_options().acceleration());
face_landmarks_detector_graph_options->mutable_base_options()
->set_use_stream_mode(options->base_options().use_stream_mode());
absl::StatusOr<absl::string_view> face_blendshape_model =
resources.GetModelFile(kFaceBlendshapeTFLiteName);
if (face_blendshape_model.ok()) {
SetExternalFile(*face_blendshape_model,
face_landmarks_detector_graph_options
->mutable_face_blendshapes_graph_options()
->mutable_base_options()
->mutable_model_asset(),
is_copy);
face_landmarks_detector_graph_options
->mutable_face_blendshapes_graph_options()
->mutable_base_options()
->mutable_acceleration()
->mutable_xnnpack();
LOG(WARNING) << "Face blendshape model contains CPU only ops. Sets "
<< "FaceBlendshapesGraph acceleartion to Xnnpack.";
}
return absl::OkStatus();
}
} // namespace
// A "mediapipe.tasks.vision.face_landmarker.FaceLandmarkerGraph" performs face
// landmarks detection. The FaceLandmarkerGraph consists of three subgraphs:
// FaceDetectorGraph, MultipleFaceLandmarksDetectorGraph and
// FaceGeometryFromLandmarksGraph.
//
// MultipleFaceLandmarksDetectorGraph detects landmarks from bounding boxes
// produced by FaceDetectorGraph. FaceLandmarkerGraph tracks the landmarks over
// time, and skips the FaceDetectorGraph. If the tracking is lost or the
// detected faces are less than configured max number faces, FaceDetectorGraph
// would be triggered to detect faces.
//
// FaceGeometryFromLandmarksGraph finds the transformation from canonical face
// to the detected faces. This transformation is useful for renderring face
// effects on the detected faces. This subgraph is added if users request a
// FaceGeometry Tag.
//
//
// Inputs:
// IMAGE - Image
// Image to perform face landmarks detection on.
// NORM_RECT - NormalizedRect @Optional
// Describes image rotation and region of image to perform landmarks
// detection on. If not provided, whole image is used for face landmarks
// detection.
//
// SideInputs:
// ENVIRONMENT - ENVIRONMENT @optional
// Environment that describes the current virtual scene. If not provided, a
// default environment will be used which is good enough for most general
// use case
//
// Outputs:
// NORM_LANDMARKS: - std::vector<NormalizedLandmarkList>
// Vector of detected face landmarks.
// BLENDSHAPES: - std::vector<ClassificationList> @optional
// Blendshape classification, available when the given model asset contains
// blendshapes model.
// All 52 blendshape coefficients:
// 0 - _neutral (ignore it)
// 1 - browDownLeft
// 2 - browDownRight
// 3 - browInnerUp
// 4 - browOuterUpLeft
// 5 - browOuterUpRight
// 6 - cheekPuff
// 7 - cheekSquintLeft
// 8 - cheekSquintRight
// 9 - eyeBlinkLeft
// 10 - eyeBlinkRight
// 11 - eyeLookDownLeft
// 12 - eyeLookDownRight
// 13 - eyeLookInLeft
// 14 - eyeLookInRight
// 15 - eyeLookOutLeft
// 16 - eyeLookOutRight
// 17 - eyeLookUpLeft
// 18 - eyeLookUpRight
// 19 - eyeSquintLeft
// 20 - eyeSquintRight
// 21 - eyeWideLeft
// 22 - eyeWideRight
// 23 - jawForward
// 24 - jawLeft
// 25 - jawOpen
// 26 - jawRight
// 27 - mouthClose
// 28 - mouthDimpleLeft
// 29 - mouthDimpleRight
// 30 - mouthFrownLeft
// 31 - mouthFrownRight
// 32 - mouthFunnel
// 33 - mouthLeft
// 34 - mouthLowerDownLeft
// 35 - mouthLowerDownRight
// 36 - mouthPressLeft
// 37 - mouthPressRight
// 38 - mouthPucker
// 39 - mouthRight
// 40 - mouthRollLower
// 41 - mouthRollUpper
// 42 - mouthShrugLower
// 43 - mouthShrugUpper
// 44 - mouthSmileLeft
// 45 - mouthSmileRight
// 46 - mouthStretchLeft
// 47 - mouthStretchRight
// 48 - mouthUpperUpLeft
// 49 - mouthUpperUpRight
// 50 - noseSneerLeft
// 51 - noseSneerRight
// FACE_GEOMETRY - std::vector<FaceGeometry> @optional
// A vector of 3D transform data for each detected face.
// FACE_RECTS_NEXT_FRAME - std::vector<NormalizedRect>
// Vector of the expanded rects enclosing the whole face RoI for landmark
// detection on the next frame.
// FACE_RECTS - std::vector<NormalizedRect>
// Detected face bounding boxes in normalized coordinates from face
// detection.
// DETECTIONS - std::vector<Detection>
// Detected faces with maximum `num_faces` specified in options.
// IMAGE - Image
// The input image that the face landmarker runs on and has the pixel data
// stored on the target storage (CPU vs GPU).
// All returned coordinates are in the unrotated and uncropped input image
// coordinates system.
//
// Example:
// node {
// calculator: "mediapipe.tasks.vision.face_landmarker.FaceLandmarkerGraph"
// input_stream: "IMAGE:image_in"
// input_stream: "NORM_RECT:norm_rect"
// output_stream: "NORM_LANDMARKS:face_landmarks"
// output_stream: "BLENDSHAPES:face_blendshapes"
// output_stream: "FACE_GEOMETRY:face_geometry"
// output_stream: "FACE_RECTS_NEXT_FRAME:face_rects_next_frame"
// output_stream: "FACE_RECTS:face_rects"
// output_stream: "DETECTIONS:detections"
// output_stream: "IMAGE:image_out"
// options {
// [mediapipe.tasks.vision.face_landmarker.proto.FaceLandmarkerGraphOptions.ext]
// {
// base_options {
// model_asset {
// file_name: "face_landmarker.task"
// }
// }
// face_detector_graph_options {
// min_detection_confidence: 0.5
// num_faces: 2
// }
// face_landmarks_detector_graph_options {
// min_detection_confidence: 0.5
// }
// }
// }
// }
class FaceLandmarkerGraph : public core::ModelTaskGraph {
public:
absl::StatusOr<CalculatorGraphConfig> GetConfig(
SubgraphContext* sc) override {
Graph graph;
if (sc->Options<FaceLandmarkerGraphOptions>()
.base_options()
.has_model_asset()) {
ASSIGN_OR_RETURN(
const auto* model_asset_bundle_resources,
CreateModelAssetBundleResources<FaceLandmarkerGraphOptions>(sc));
// Copies the file content instead of passing the pointer of file in
// memory if the subgraph model resource service is not available.
MP_RETURN_IF_ERROR(SetSubTaskBaseOptions(
*model_asset_bundle_resources,
sc->MutableOptions<FaceLandmarkerGraphOptions>(),
!sc->Service(::mediapipe::tasks::core::kModelResourcesCacheService)
.IsAvailable()));
}
std::optional<SidePacket<Environment>> environment;
if (HasSideInput(sc->OriginalNode(), kEnvironmentTag)) {
environment = std::make_optional<>(
graph.SideIn(kEnvironmentTag).Cast<Environment>());
}
bool output_blendshapes = HasOutput(sc->OriginalNode(), kBlendshapesTag);
if (output_blendshapes && !sc->Options<FaceLandmarkerGraphOptions>()
.face_landmarks_detector_graph_options()
.has_face_blendshapes_graph_options()) {
return absl::InvalidArgumentError(absl::StrFormat(
"BLENDSHAPES Tag and blendshapes model must be both set. Get "
"BLENDSHAPES is set: %v, blendshapes "
"model "
"is set: %v",
output_blendshapes,
sc->Options<FaceLandmarkerGraphOptions>()
.face_landmarks_detector_graph_options()
.has_face_blendshapes_graph_options()));
}
bool output_geometry = HasOutput(sc->OriginalNode(), kFaceGeometryTag);
ASSIGN_OR_RETURN(
auto outs,
BuildFaceLandmarkerGraph(
*sc->MutableOptions<FaceLandmarkerGraphOptions>(),
graph[Input<Image>(kImageTag)],
graph[Input<NormalizedRect>::Optional(kNormRectTag)], environment,
output_blendshapes, output_geometry, graph));
outs.landmark_lists >>
graph[Output<std::vector<NormalizedLandmarkList>>(kNormLandmarksTag)];
outs.face_rects_next_frame >>
graph[Output<std::vector<NormalizedRect>>(kFaceRectsNextFrameTag)];
outs.face_rects >>
graph[Output<std::vector<NormalizedRect>>(kFaceRectsTag)];
outs.detections >> graph[Output<std::vector<Detection>>(kDetectionsTag)];
outs.image >> graph[Output<Image>(kImageTag)];
if (outs.face_blendshapes) {
*outs.face_blendshapes >>
graph[Output<std::vector<ClassificationList>>(kBlendshapesTag)];
}
if (outs.face_geometry) {
*outs.face_geometry >>
graph[Output<std::vector<FaceGeometry>>(kFaceGeometryTag)];
}
// TODO remove when support is fixed.
// As mediapipe GraphBuilder currently doesn't support configuring
// InputStreamInfo, modifying the CalculatorGraphConfig proto directly.
CalculatorGraphConfig config = graph.GetConfig();
for (int i = 0; i < config.node_size(); ++i) {
if (config.node(i).calculator() == "PreviousLoopbackCalculator") {
auto* info = config.mutable_node(i)->add_input_stream_info();
info->set_tag_index(kLoopTag);
info->set_back_edge(true);
break;
}
}
return config;
}
private:
// Adds a mediapipe face landmarker graph into the provided builder::Graph
// instance.
//
// tasks_options: the mediapipe tasks module FaceLandmarkerGraphOptions.
// image_in: (mediapipe::Image) stream to run face landmark detection on.
// graph: the mediapipe graph instance to be updated.
absl::StatusOr<FaceLandmarkerOutputs> BuildFaceLandmarkerGraph(
FaceLandmarkerGraphOptions& tasks_options, Source<Image> image_in,
Source<NormalizedRect> norm_rect_in,
std::optional<SidePacket<Environment>> environment,
bool output_blendshapes, bool output_geometry, Graph& graph) {
const int max_num_faces =
tasks_options.face_detector_graph_options().num_faces();
auto& face_detector =
graph.AddNode("mediapipe.tasks.vision.face_detector.FaceDetectorGraph");
face_detector.GetOptions<FaceDetectorGraphOptions>().Swap(
tasks_options.mutable_face_detector_graph_options());
auto& clip_face_rects =
graph.AddNode("ClipNormalizedRectVectorSizeCalculator");
clip_face_rects.GetOptions<ClipVectorSizeCalculatorOptions>()
.set_max_vec_size(max_num_faces);
auto clipped_face_rects = clip_face_rects.Out("");
auto& face_landmarks_detector_graph = graph.AddNode(
"mediapipe.tasks.vision.face_landmarker."
"MultiFaceLandmarksDetectorGraph");
face_landmarks_detector_graph
.GetOptions<FaceLandmarksDetectorGraphOptions>()
.Swap(tasks_options.mutable_face_landmarks_detector_graph_options());
image_in >> face_landmarks_detector_graph.In(kImageTag);
clipped_face_rects >> face_landmarks_detector_graph.In(kNormRectTag);
// TODO: add landmarks smoothing calculators.
auto landmarks = face_landmarks_detector_graph.Out(kNormLandmarksTag)
.Cast<std::vector<NormalizedLandmarkList>>();
auto face_rects_for_next_frame =
face_landmarks_detector_graph.Out(kFaceRectsNextFrameTag)
.Cast<std::vector<NormalizedRect>>();
if (tasks_options.base_options().use_stream_mode()) {
auto& previous_loopback = graph.AddNode("PreviousLoopbackCalculator");
image_in >> previous_loopback.In(kMainTag);
auto prev_face_rects_from_landmarks =
previous_loopback[Output<std::vector<NormalizedRect>>(kPrevLoopTag)];
auto& min_size_node =
graph.AddNode("NormalizedRectVectorHasMinSizeCalculator");
prev_face_rects_from_landmarks >> min_size_node.In(kIterableTag);
min_size_node.GetOptions<CollectionHasMinSizeCalculatorOptions>()
.set_min_size(max_num_faces);
auto has_enough_faces = min_size_node.Out("").Cast<bool>();
// While in stream mode, skip face detector graph when we successfully
// track the faces from the last frame.
auto image_for_face_detector =
DisallowIf(image_in, has_enough_faces, graph);
auto norm_rect_in_for_face_detector =
DisallowIf(norm_rect_in, has_enough_faces, graph);
image_for_face_detector >> face_detector.In(kImageTag);
norm_rect_in_for_face_detector >> face_detector.In(kNormRectTag);
auto expanded_face_rects_from_face_detector =
face_detector.Out(kExpandedFaceRectsTag);
auto& face_association = graph.AddNode("AssociationNormRectCalculator");
face_association.GetOptions<mediapipe::AssociationCalculatorOptions>()
.set_min_similarity_threshold(
tasks_options.min_tracking_confidence());
prev_face_rects_from_landmarks >>
face_association[Input<std::vector<NormalizedRect>>::Multiple("")][0];
expanded_face_rects_from_face_detector >>
face_association[Input<std::vector<NormalizedRect>>::Multiple("")][1];
auto face_rects = face_association.Out("");
face_rects >> clip_face_rects.In("");
// Back edge.
face_rects_for_next_frame >> previous_loopback.In(kLoopTag);
} else {
// While not in stream mode, the input images are not guaranteed to be in
// series, and we don't want to enable the tracking and rect associations
// between input images. Always use the face detector graph.
image_in >> face_detector.In(kImageTag);
norm_rect_in >> face_detector.In(kNormRectTag);
auto face_rects = face_detector.Out(kExpandedFaceRectsTag);
face_rects >> clip_face_rects.In("");
}
// Optional blendshape output.
std::optional<Source<std::vector<ClassificationList>>> blendshapes;
if (output_blendshapes) {
blendshapes = std::make_optional<>(
face_landmarks_detector_graph.Out(kBlendshapesTag)
.Cast<std::vector<ClassificationList>>());
}
// Optional face geometry output.
std::optional<Source<std::vector<FaceGeometry>>> face_geometry;
if (output_geometry) {
auto& image_properties = graph.AddNode("ImagePropertiesCalculator");
image_in >> image_properties.In(kImageTag);
auto image_size = image_properties.Out(kSizeTag);
auto& face_geometry_from_landmarks = graph.AddNode(
"mediapipe.tasks.vision.face_geometry."
"FaceGeometryFromLandmarksGraph");
if (environment.has_value()) {
*environment >> face_geometry_from_landmarks.SideIn(kEnvironmentTag);
}
landmarks >> face_geometry_from_landmarks.In(kFaceLandmarksTag);
image_size >> face_geometry_from_landmarks.In(kImageSizeTag);
face_geometry = face_geometry_from_landmarks.Out(kFaceGeometryTag)
.Cast<std::vector<FaceGeometry>>();
}
// TODO: Replace PassThroughCalculator with a calculator that
// converts the pixel data to be stored on the target storage (CPU vs GPU).
auto& pass_through = graph.AddNode("PassThroughCalculator");
image_in >> pass_through.In("");
return {{
/* landmark_lists= */ landmarks,
/* face_rects_next_frame= */
face_rects_for_next_frame,
/* face_rects= */
face_detector.Out(kFaceRectsTag).Cast<std::vector<NormalizedRect>>(),
/* face_detections */
face_detector.Out(kDetectionsTag).Cast<std::vector<Detection>>(),
/* face_blendshapes= */ blendshapes,
/* face_geometry= */ face_geometry,
/* image= */
pass_through[Output<Image>("")],
}};
}
};
REGISTER_MEDIAPIPE_GRAPH(
::mediapipe::tasks::vision::face_landmarker::FaceLandmarkerGraph);
} // namespace face_landmarker
} // namespace vision
} // namespace tasks
} // namespace mediapipe

View File

@ -0,0 +1,311 @@
/* Copyright 2023 The MediaPipe Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include <optional>
#include "absl/flags/flag.h"
#include "absl/status/statusor.h"
#include "absl/strings/str_format.h"
#include "absl/strings/string_view.h"
#include "mediapipe/framework/api2/builder.h"
#include "mediapipe/framework/api2/port.h"
#include "mediapipe/framework/calculator_framework.h"
#include "mediapipe/framework/deps/file_path.h"
#include "mediapipe/framework/formats/classification.pb.h"
#include "mediapipe/framework/formats/image.h"
#include "mediapipe/framework/formats/landmark.pb.h"
#include "mediapipe/framework/formats/rect.pb.h"
#include "mediapipe/framework/packet.h"
#include "mediapipe/framework/port/file_helpers.h"
#include "mediapipe/framework/port/gmock.h"
#include "mediapipe/framework/port/gtest.h"
#include "mediapipe/tasks/cc/core/mediapipe_builtin_op_resolver.h"
#include "mediapipe/tasks/cc/core/proto/base_options.pb.h"
#include "mediapipe/tasks/cc/core/proto/external_file.pb.h"
#include "mediapipe/tasks/cc/core/task_runner.h"
#include "mediapipe/tasks/cc/vision/face_detector/proto/face_detector_graph_options.pb.h"
#include "mediapipe/tasks/cc/vision/face_geometry/proto/face_geometry.pb.h"
#include "mediapipe/tasks/cc/vision/face_landmarker/proto/face_landmarker_graph_options.pb.h"
#include "mediapipe/tasks/cc/vision/face_landmarker/proto/face_landmarks_detector_graph_options.pb.h"
#include "mediapipe/tasks/cc/vision/utils/image_utils.h"
namespace mediapipe {
namespace tasks {
namespace vision {
namespace face_landmarker {
namespace {
using ::file::Defaults;
using ::file::GetTextProto;
using ::mediapipe::api2::Input;
using ::mediapipe::api2::Output;
using ::mediapipe::api2::builder::Graph;
using ::mediapipe::api2::builder::Source;
using ::mediapipe::file::JoinPath;
using ::mediapipe::tasks::core::TaskRunner;
using ::mediapipe::tasks::vision::DecodeImageFromFile;
using ::mediapipe::tasks::vision::face_geometry::proto::FaceGeometry;
using ::mediapipe::tasks::vision::face_landmarker::proto::
FaceLandmarkerGraphOptions;
using ::testing::EqualsProto;
using ::testing::Pointwise;
using ::testing::TestParamInfo;
using ::testing::TestWithParam;
using ::testing::Values;
using ::testing::proto::Approximately;
using ::testing::proto::Partially;
constexpr char kTestDataDirectory[] = "/mediapipe/tasks/testdata/vision/";
constexpr char kFaceLandmarkerModelBundleName[] = "face_landmarker.task";
constexpr char kFaceLandmarkerWithBlendshapesModelBundleName[] =
"face_landmarker_with_blendshapes.task";
constexpr char kPortraitImageName[] = "portrait.jpg";
constexpr char kCatImageName[] = "cat.jpg";
constexpr char kPortraitExpectedFaceLandamrksName[] =
"portrait_expected_face_landmarks.pbtxt";
constexpr char kPortraitExpectedFaceLandamrksWithAttentionName[] =
"portrait_expected_face_landmarks_with_attention.pbtxt";
constexpr char kPortraitExpectedBlendshapesName[] =
"portrait_expected_blendshapes_with_attention.pbtxt";
constexpr char kPortraitExpectedFaceGeometryName[] =
"portrait_expected_face_geometry_with_attention.pbtxt";
constexpr char kImageTag[] = "IMAGE";
constexpr char kImageName[] = "image";
constexpr char kNormRectTag[] = "NORM_RECT";
constexpr char kNormRectName[] = "norm_rect";
constexpr char kNormLandmarksTag[] = "NORM_LANDMARKS";
constexpr char kNormLandmarksName[] = "norm_landmarks";
constexpr char kBlendshapesTag[] = "BLENDSHAPES";
constexpr char kBlendshapesName[] = "blendshapes";
constexpr char kFaceGeometryTag[] = "FACE_GEOMETRY";
constexpr char kFaceGeometryName[] = "face_geometry";
constexpr float kLandmarksDiffMargin = 0.03;
constexpr float kBlendshapesDiffMargin = 0.1;
constexpr float kFaceGeometryDiffMargin = 0.01;
template <typename ProtoT>
ProtoT GetExpectedProto(absl::string_view filename) {
ProtoT expected_proto;
MP_EXPECT_OK(GetTextProto(file::JoinPath("./", kTestDataDirectory, filename),
&expected_proto, Defaults()));
return expected_proto;
}
// Struct holding the parameters for parameterized FaceLandmarkerGraphTest
// class.
struct FaceLandmarkerGraphTestParams {
// The name of this test, for convenience when displaying test results.
std::string test_name;
// The filename of the model to test.
std::string input_model_name;
// The filename of the test image.
std::string test_image_name;
// The expected output landmarks positions.
std::optional<std::vector<NormalizedLandmarkList>> expected_landmarks_list;
// The expected output blendshape classification.
std::optional<std::vector<ClassificationList>> expected_blendshapes;
// The expected output face geometry.
std::optional<std::vector<FaceGeometry>> expected_face_geometry;
// The max value difference between expected_positions and detected positions.
float landmarks_diff_threshold;
// The max value difference between expected blendshapes and actual
// blendshapes.
float blendshapes_diff_threshold;
// The max value difference between expected blendshape and actual face
// geometry.
float face_geometry_diff_threshold;
};
// Helper function to create a FaceLandmarkerGraph TaskRunner.
absl::StatusOr<std::unique_ptr<TaskRunner>> CreateFaceLandmarkerGraphTaskRunner(
absl::string_view model_name, bool output_blendshape,
bool output_face_geometry) {
Graph graph;
auto& face_landmarker = graph.AddNode(
"mediapipe.tasks.vision.face_landmarker."
"FaceLandmarkerGraph");
auto* options = &face_landmarker.GetOptions<FaceLandmarkerGraphOptions>();
options->mutable_base_options()->mutable_model_asset()->set_file_name(
JoinPath("./", kTestDataDirectory, model_name));
options->mutable_face_detector_graph_options()->set_num_faces(1);
options->mutable_base_options()->set_use_stream_mode(true);
graph[Input<Image>(kImageTag)].SetName(kImageName) >>
face_landmarker.In(kImageTag);
graph[Input<NormalizedRect>(kNormRectTag)].SetName(kNormRectName) >>
face_landmarker.In(kNormRectTag);
face_landmarker.Out(kNormLandmarksTag).SetName(kNormLandmarksName) >>
graph[Output<std::vector<NormalizedLandmarkList>>(kNormLandmarksTag)];
if (output_blendshape) {
face_landmarker.Out(kBlendshapesTag).SetName(kBlendshapesName) >>
graph[Output<std::vector<ClassificationList>>(kBlendshapesTag)];
}
if (output_face_geometry) {
face_landmarker.Out(kFaceGeometryTag).SetName(kFaceGeometryName) >>
graph[Output<std::vector<FaceGeometry>>(kFaceGeometryTag)];
}
return TaskRunner::Create(
graph.GetConfig(),
absl::make_unique<tasks::core::MediaPipeBuiltinOpResolver>());
}
// Helper function to construct NormalizeRect proto.
NormalizedRect MakeNormRect(float x_center, float y_center, float width,
float height, float rotation) {
NormalizedRect face_rect;
face_rect.set_x_center(x_center);
face_rect.set_y_center(y_center);
face_rect.set_width(width);
face_rect.set_height(height);
face_rect.set_rotation(rotation);
return face_rect;
}
class FaceLandmarkerGraphTest
: public testing::TestWithParam<FaceLandmarkerGraphTestParams> {};
TEST(FaceLandmarkerGraphTest, FailsWithNoBlendshapesModel) {
MP_ASSERT_OK_AND_ASSIGN(
Image image, DecodeImageFromFile(
JoinPath("./", kTestDataDirectory, kPortraitImageName)));
auto result =
CreateFaceLandmarkerGraphTaskRunner(kFaceLandmarkerModelBundleName,
/*output_blendshape=*/true,
/*output_face_geometry=*/false);
EXPECT_EQ(result.status().code(), absl::StatusCode::kInvalidArgument);
EXPECT_THAT(result.status().message(),
testing::HasSubstr(
"BLENDSHAPES Tag and blendshapes model must be both set."));
}
TEST_P(FaceLandmarkerGraphTest, Succeeds) {
MP_ASSERT_OK_AND_ASSIGN(
Image image, DecodeImageFromFile(JoinPath("./", kTestDataDirectory,
GetParam().test_image_name)));
MP_ASSERT_OK_AND_ASSIGN(auto task_runner,
CreateFaceLandmarkerGraphTaskRunner(
GetParam().input_model_name,
GetParam().expected_blendshapes.has_value(),
GetParam().expected_face_geometry.has_value()));
auto output_packets = task_runner->Process(
{{kImageName, MakePacket<Image>(std::move(image))},
{kNormRectName,
MakePacket<NormalizedRect>(MakeNormRect(0.5, 0.5, 1.0, 1.0, 0))}});
MP_ASSERT_OK(output_packets);
if (GetParam().expected_landmarks_list) {
const std::vector<NormalizedLandmarkList>& landmarks_lists =
(*output_packets)[kNormLandmarksName]
.Get<std::vector<NormalizedLandmarkList>>();
EXPECT_THAT(landmarks_lists,
Pointwise(Approximately(Partially(EqualsProto()),
GetParam().landmarks_diff_threshold),
*GetParam().expected_landmarks_list));
}
if (GetParam().expected_blendshapes) {
const std::vector<ClassificationList>& blendshapes =
(*output_packets)[kBlendshapesName]
.Get<std::vector<ClassificationList>>();
EXPECT_THAT(blendshapes,
Pointwise(Approximately(Partially(EqualsProto()),
GetParam().blendshapes_diff_threshold),
*GetParam().expected_blendshapes));
}
if (GetParam().expected_face_geometry) {
const std::vector<FaceGeometry>& face_geometry =
(*output_packets)[kFaceGeometryName].Get<std::vector<FaceGeometry>>();
EXPECT_THAT(
face_geometry,
Pointwise(Approximately(Partially(EqualsProto()),
GetParam().face_geometry_diff_threshold),
*GetParam().expected_face_geometry));
}
}
INSTANTIATE_TEST_SUITE_P(
FaceLandmarkerGraphTests, FaceLandmarkerGraphTest,
Values(FaceLandmarkerGraphTestParams{
/* test_name= */ "Portrait",
/* input_model_name= */ kFaceLandmarkerModelBundleName,
/* test_image_name= */ kPortraitImageName,
/* expected_landmarks_list= */
{{GetExpectedProto<NormalizedLandmarkList>(
kPortraitExpectedFaceLandamrksName)}},
/* expected_blendshapes= */ std::nullopt,
/* expected_face_geometry= */ std::nullopt,
/* landmarks_diff_threshold= */ kLandmarksDiffMargin,
/* blendshapes_diff_threshold= */ kBlendshapesDiffMargin,
/* face_geometry_diff_threshold= */
kFaceGeometryDiffMargin},
FaceLandmarkerGraphTestParams{
/* test_name= */ "NoFace",
/* input_model_name= */ kFaceLandmarkerModelBundleName,
/* test_image_name= */ kCatImageName,
/* expected_landmarks_list= */ std::nullopt,
/* expected_blendshapes= */ std::nullopt,
/* expected_face_geometry= */ std::nullopt,
/* landmarks_diff_threshold= */ kLandmarksDiffMargin,
/* blendshapes_diff_threshold= */ kBlendshapesDiffMargin,
/* face_geometry_diff_threshold= */
kFaceGeometryDiffMargin},
FaceLandmarkerGraphTestParams{
/* test_name= */ "PortraitWithBlendshape",
/* input_model_name= */
kFaceLandmarkerWithBlendshapesModelBundleName,
/* test_image_name= */ kPortraitImageName,
/* expected_landmarks_list= */
{{GetExpectedProto<NormalizedLandmarkList>(
kPortraitExpectedFaceLandamrksWithAttentionName)}},
/* expected_blendshapes= */
{{GetExpectedProto<ClassificationList>(
kPortraitExpectedBlendshapesName)}},
/* expected_face_geometry= */ std::nullopt,
/*landmarks_diff_threshold= */ kLandmarksDiffMargin,
/*blendshapes_diff_threshold= */ kBlendshapesDiffMargin,
/*face_geometry_diff_threshold= */ kFaceGeometryDiffMargin},
FaceLandmarkerGraphTestParams{
/* test_name= */ "PortraitWithBlendshapeWithFaceGeometry",
/* input_model_name= */
kFaceLandmarkerWithBlendshapesModelBundleName,
/* test_image_name= */ kPortraitImageName,
/* expected_landmarks_list= */
{{GetExpectedProto<NormalizedLandmarkList>(
kPortraitExpectedFaceLandamrksWithAttentionName)}},
/* expected_blendshapes= */
{{GetExpectedProto<ClassificationList>(
kPortraitExpectedBlendshapesName)}},
/* expected_face_geometry= */
{{GetExpectedProto<FaceGeometry>(
kPortraitExpectedFaceGeometryName)}},
/*landmarks_diff_threshold= */ kLandmarksDiffMargin,
/*blendshapes_diff_threshold= */ kBlendshapesDiffMargin,
/*face_geometry_diff_threshold= */ kFaceGeometryDiffMargin}),
[](const TestParamInfo<FaceLandmarkerGraphTest::ParamType>& info) {
return info.param.test_name;
});
} // namespace
} // namespace face_landmarker
} // namespace vision
} // namespace tasks
} // namespace mediapipe

View File

@ -214,13 +214,13 @@ ClassificationList GetBlendshapes(absl::string_view filename) {
// Helper function to construct NormalizeRect proto.
NormalizedRect MakeNormRect(float x_center, float y_center, float width,
float height, float rotation) {
NormalizedRect hand_rect;
hand_rect.set_x_center(x_center);
hand_rect.set_y_center(y_center);
hand_rect.set_width(width);
hand_rect.set_height(height);
hand_rect.set_rotation(rotation);
return hand_rect;
NormalizedRect face_rect;
face_rect.set_x_center(x_center);
face_rect.set_y_center(y_center);
face_rect.set_width(width);
face_rect.set_height(height);
face_rect.set_rotation(rotation);
return face_rect;
}
// Struct holding the parameters for parameterized FaceLandmarksDetectionTest
@ -234,9 +234,9 @@ struct SingeFaceTestParams {
std::optional<std::string> blendshape_model_name;
// The filename of the test image.
std::string test_image_name;
// RoI on image to detect hands.
// RoI on image to detect faces.
NormalizedRect norm_rect;
// Expected hand presence value.
// Expected face presence value.
bool expected_presence;
// The expected output landmarks positions.
NormalizedLandmarkList expected_landmarks;
@ -258,9 +258,9 @@ struct MultiFaceTestParams {
std::optional<std::string> blendshape_model_name;
// The filename of the test image.
std::string test_image_name;
// RoI on image to detect hands.
// RoI on image to detect faces.
std::vector<NormalizedRect> norm_rects;
// Expected hand presence value.
// Expected face presence value.
std::vector<bool> expected_presence;
// The expected output landmarks positions.
std::optional<std::vector<NormalizedLandmarkList>> expected_landmarks_lists;
@ -299,7 +299,6 @@ TEST_P(SingleFaceLandmarksDetectionTest, Succeeds) {
(*output_packets)[kNormLandmarksName].Get<NormalizedLandmarkList>();
const NormalizedLandmarkList& expected_landmarks =
GetParam().expected_landmarks;
EXPECT_THAT(
landmarks,
Approximately(Partially(EqualsProto(expected_landmarks)),
@ -395,7 +394,9 @@ INSTANTIATE_TEST_SUITE_P(
kFaceLandmarksDetectionWithAttentionModel,
/* blendshape_model_name= */ kFaceBlendshapesModel,
/* test_image_name= */ kPortraitImageName,
/* norm_rect= */ MakeNormRect(0.4987, 0.2211, 0.2877, 0.2303, 0),
/* norm_rect= */
MakeNormRect(0.48906386, 0.22731927, 0.42905223, 0.34357703,
0.008304443),
/* expected_presence= */ true,
/* expected_landmarks= */
GetExpectedLandmarkList(
@ -444,7 +445,9 @@ INSTANTIATE_TEST_SUITE_P(
kFaceLandmarksDetectionWithAttentionModel,
/* blendshape_model_name= */ kFaceBlendshapesModel,
/* test_image_name= */ kPortraitImageName,
/* norm_rects= */ {MakeNormRect(0.4987, 0.2211, 0.2877, 0.2303, 0)},
/* norm_rects= */
{MakeNormRect(0.48906386, 0.22731927, 0.42905223, 0.34357703,
0.008304443)},
/* expected_presence= */ {true},
/* expected_landmarks_list= */
{{GetExpectedLandmarkList(

View File

@ -50,3 +50,15 @@ mediapipe_proto_library(
"//mediapipe/framework:calculator_proto",
],
)
mediapipe_proto_library(
name = "face_landmarker_graph_options_proto",
srcs = ["face_landmarker_graph_options.proto"],
deps = [
":face_landmarks_detector_graph_options_proto",
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
"//mediapipe/tasks/cc/core/proto:base_options_proto",
"//mediapipe/tasks/cc/vision/face_detector/proto:face_detector_graph_options_proto",
],
)

View File

@ -0,0 +1,48 @@
/* Copyright 2023 The MediaPipe Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
syntax = "proto2";
package mediapipe.tasks.vision.face_landmarker.proto;
import "mediapipe/framework/calculator.proto";
import "mediapipe/framework/calculator_options.proto";
import "mediapipe/tasks/cc/core/proto/base_options.proto";
import "mediapipe/tasks/cc/vision/face_detector/proto/face_detector_graph_options.proto";
import "mediapipe/tasks/cc/vision/face_landmarker/proto/face_landmarks_detector_graph_options.proto";
option java_package = "com.google.mediapipe.tasks.vision.facelandmarker.proto";
option java_outer_classname = "FaceLandmarkerGraphOptionsProto";
message FaceLandmarkerGraphOptions {
extend mediapipe.CalculatorOptions {
optional FaceLandmarkerGraphOptions ext = 508968150;
}
// Base options for configuring Task library, such as specifying the TfLite
// model file with metadata, accelerator options, etc.
optional core.proto.BaseOptions base_options = 1;
// Options for face detector graph.
optional face_detector.proto.FaceDetectorGraphOptions
face_detector_graph_options = 2;
// Options for face landmarks detector graph.
optional FaceLandmarksDetectorGraphOptions
face_landmarks_detector_graph_options = 3;
// Minimum confidence for face landmarks tracking to be considered
// successfully.
optional float min_tracking_confidence = 4 [default = 0.5];
}

View File

@ -44,6 +44,7 @@ mediapipe_files(srcs = [
"face_detection_short_range.tflite",
"face_landmark.tflite",
"face_landmark_with_attention.tflite",
"face_landmarker.task",
"fist.jpg",
"fist.png",
"hair_segmentation.tflite",
@ -92,6 +93,7 @@ exports_files(
"face_geometry_expected_out.pbtxt",
"gesture_recognizer.task",
"portrait_expected_detection.pbtxt",
"portrait_expected_face_geometry_with_attention.pbtxt",
"portrait_rotated_expected_detection.pbtxt",
],
)
@ -153,6 +155,7 @@ filegroup(
"face_detection_short_range.tflite",
"face_landmark.tflite",
"face_landmark_with_attention.tflite",
"face_landmarker.task",
"hair_segmentation.tflite",
"hand_landmark_full.tflite",
"hand_landmark_lite.tflite",
@ -166,7 +169,6 @@ filegroup(
"mobilenet_v2_1.0_224.tflite",
"mobilenet_v3_small_100_224_embedder.tflite",
"palm_detection_full.tflite",
"portrait_expected_face_landmarks.pbtxt",
"selfie_segm_128_128_3.tflite",
"selfie_segm_144_256_3.tflite",
],
@ -189,6 +191,7 @@ filegroup(
"pointing_up_landmarks.pbtxt",
"pointing_up_rotated_landmarks.pbtxt",
"portrait_expected_detection.pbtxt",
"portrait_expected_face_geometry_with_attention.pbtxt",
"portrait_expected_face_landmarks.pbtxt",
"portrait_expected_face_landmarks_with_attention.pbtxt",
"portrait_rotated_expected_detection.pbtxt",

View File

@ -1,5 +1,5 @@
# proto-file: mediapipe/tasks/cc/vision/face_geometry/proto/environment.proto
# proto-message: Environment
# proto-file: mediapipe/tasks/cc/vision/face_geometry/proto/face_geometry.proto
# proto-message: FaceGeometry
mesh {
vertex_type: VERTEX_PT
primitive_type: TRIANGLE

Binary file not shown.

View File

@ -2,261 +2,261 @@
# proto-message: ClassificationList
classification {
index: 0
score: 4.9559007e-06
score: 2.922153e-05
label: "_neutral"
}
classification {
index: 1
score: 0.22943014
score: 0.7923543
label: "browDownLeft"
}
classification {
index: 2
score: 0.22297752
score: 0.81483257
label: "browDownRight"
}
classification {
index: 3
score: 0.015948873
score: 0.0002845049
label: "browInnerUp"
}
classification {
index: 4
score: 0.006946607
score: 0.00049632485
label: "browOuterUpLeft"
}
classification {
index: 5
score: 0.0070318673
score: 0.00050830405
label: "browOuterUpRight"
}
classification {
index: 6
score: 0.0013679645
score: 0.0014010799
label: "cheekPuff"
}
classification {
index: 7
score: 7.1003383e-06
score: 3.1523705e-06
label: "cheekSquintLeft"
}
classification {
index: 8
score: 5.78299e-06
score: 4.7487533e-06
label: "cheekSquintRight"
}
classification {
index: 9
score: 0.20132238
score: 0.2633514
label: "eyeBlinkLeft"
}
classification {
index: 10
score: 0.16521452
score: 0.29687604
label: "eyeBlinkRight"
}
classification {
index: 11
score: 0.03764786
score: 0.15565884
label: "eyeLookDownLeft"
}
classification {
index: 12
score: 0.04828824
score: 0.18480143
label: "eyeLookDownRight"
}
classification {
index: 13
score: 0.016539993
score: 0.054997284
label: "eyeLookInLeft"
}
classification {
index: 14
score: 0.20026363
score: 0.10678173
label: "eyeLookInRight"
}
classification {
index: 15
score: 0.21363346
score: 0.099223085
label: "eyeLookOutLeft"
}
classification {
index: 16
score: 0.024430025
score: 0.06485316
label: "eyeLookOutRight"
}
classification {
index: 17
score: 0.30147508
score: 0.09108365
label: "eyeLookUpLeft"
}
classification {
index: 18
score: 0.28701693
score: 0.090983614
label: "eyeLookUpRight"
}
classification {
index: 19
score: 0.67143106
score: 0.7040458
label: "eyeSquintLeft"
}
classification {
index: 20
score: 0.5306328
score: 0.5759947
label: "eyeSquintRight"
}
classification {
index: 21
score: 0.0041342233
score: 0.002923721
label: "eyeWideLeft"
}
classification {
index: 22
score: 0.005231879
score: 0.0027232585
label: "eyeWideRight"
}
classification {
index: 23
score: 0.009427094
score: 0.0028721786
label: "jawForward"
}
classification {
index: 24
score: 0.0015789346
score: 0.0014818686
label: "jawLeft"
}
classification {
index: 25
score: 0.073719256
score: 0.13099338
label: "jawOpen"
}
classification {
index: 26
score: 0.00046979196
score: 0.00025238062
label: "jawRight"
}
classification {
index: 27
score: 0.0011400756
score: 0.013557238
label: "mouthClose"
}
classification {
index: 28
score: 0.0060502808
score: 0.040049534
label: "mouthDimpleLeft"
}
classification {
index: 29
score: 0.013351685
score: 0.017489946
label: "mouthDimpleRight"
}
classification {
index: 30
score: 0.09859665
score: 0.00069562776
label: "mouthFrownLeft"
}
classification {
index: 31
score: 0.08897466
score: 0.00082510535
label: "mouthFrownRight"
}
classification {
index: 32
score: 0.0020718675
score: 0.008079494
label: "mouthFunnel"
}
classification {
index: 33
score: 6.42887e-06
score: 0.000491791
label: "mouthLeft"
}
classification {
index: 34
score: 0.68950605
score: 0.067228556
label: "mouthLowerDownLeft"
}
classification {
index: 35
score: 0.7864029
score: 0.17653553
label: "mouthLowerDownRight"
}
classification {
index: 36
score: 0.056456964
score: 0.14764099
label: "mouthPressLeft"
}
classification {
index: 37
score: 0.037348792
score: 0.11692603
label: "mouthPressRight"
}
classification {
index: 38
score: 0.00067001814
score: 0.0016829089
label: "mouthPucker"
}
classification {
index: 39
score: 0.005189785
score: 0.0010300461
label: "mouthRight"
}
classification {
index: 40
score: 0.018723497
score: 0.01507229
label: "mouthRollLower"
}
classification {
index: 41
score: 0.052819636
score: 0.11626337
label: "mouthRollUpper"
}
classification {
index: 42
score: 0.0033772716
score: 0.004975724
label: "mouthShrugLower"
}
classification {
index: 43
score: 0.0031609535
score: 0.004570691
label: "mouthShrugUpper"
}
classification {
index: 44
score: 0.49639142
score: 0.9271871
label: "mouthSmileLeft"
}
classification {
index: 45
score: 0.4014515
score: 0.9062751
label: "mouthSmileRight"
}
classification {
index: 46
score: 0.5825701
score: 0.037454817
label: "mouthStretchLeft"
}
classification {
index: 47
score: 0.73058575
score: 0.1777958
label: "mouthStretchRight"
}
classification {
index: 48
score: 0.13561466
score: 0.8198947
label: "mouthUpperUpLeft"
}
classification {
index: 49
score: 0.20078722
score: 0.769086
label: "mouthUpperUpRight"
}
classification {
index: 50
score: 3.3396598e-06
score: 7.096563e-07
label: "noseSneerLeft"
}
classification {
index: 51
score: 1.3096546e-05
score: 9.818824e-06
label: "noseSneerRight"
}

File diff suppressed because it is too large Load Diff

View File

@ -70,6 +70,12 @@ def external_files():
urls = ["https://storage.googleapis.com/mediapipe-assets/BUILD?generation=1661875663693976"],
)
http_file(
name = "com_google_mediapipe_BUILD_orig",
sha256 = "64d5343a6a5f9be06db0a5074a2260f9ae63a989fe01702832cd215680dc19c1",
urls = ["https://storage.googleapis.com/mediapipe-assets/BUILD.orig?generation=1678323576393653"],
)
http_file(
name = "com_google_mediapipe_burger_crop_jpg",
sha256 = "8f58de573f0bf59a49c3d86cfabb9ad4061481f574aa049177e8da3963dddc50",
@ -300,8 +306,20 @@ def external_files():
http_file(
name = "com_google_mediapipe_face_geometry_expected_out_pbtxt",
sha256 = "611b203bca40e547ae75bf0822fda0695d512d02940e7af08a70068eaa8524f7",
urls = ["https://storage.googleapis.com/mediapipe-assets/face_geometry_expected_out.pbtxt?generation=1677787710308910"],
sha256 = "4a4ed08055a5bc9281472dd60180d11f0cdc9a15fa1788d87a58af3d06b2c6e4",
urls = ["https://storage.googleapis.com/mediapipe-assets/face_geometry_expected_out.pbtxt?generation=1678323580380646"],
)
http_file(
name = "com_google_mediapipe_face_landmarker_task",
sha256 = "7cf2bbf1842c429e9defee38e7f1c4238978d8a6faf2da145bb19846f86bd2f4",
urls = ["https://storage.googleapis.com/mediapipe-assets/face_landmarker.task?generation=1678323583183024"],
)
http_file(
name = "com_google_mediapipe_face_landmarker_with_blendshapes_task",
sha256 = "a75c1ba70e4b8568000af2ad0b355ed559ab5d5793db50fa9ad241f8dc4fad5f",
urls = ["https://storage.googleapis.com/mediapipe-assets/face_landmarker_with_blendshapes.task?generation=1678323586260800"],
)
http_file(
@ -316,6 +334,12 @@ def external_files():
urls = ["https://storage.googleapis.com/mediapipe-assets/face_landmark_with_attention.tflite?generation=1676415468821650"],
)
http_file(
name = "com_google_mediapipe_face_stylization_dummy_tflite",
sha256 = "f57fd2d5638def25466f6fec142eb3397d8ad99a9bd0a9344b622bad7c3f0376",
urls = ["https://storage.googleapis.com/mediapipe-assets/face_stylization_dummy.tflite?generation=1678323589048063"],
)
http_file(
name = "com_google_mediapipe_feature_tensor_meta_json",
sha256 = "b2c30ddfd495956ce81085f8a143422f4310b002cfbf1c594ff2ee0576e29d6f",
@ -508,6 +532,12 @@ def external_files():
urls = ["https://storage.googleapis.com/mediapipe-assets/labels.txt?generation=1667892497527642"],
)
http_file(
name = "com_google_mediapipe_language_detector_tflite",
sha256 = "5f64d821110dd2a3280546e8cd59dff09547e25d5f5c9711ec3f03416414dbb2",
urls = ["https://storage.googleapis.com/mediapipe-assets/language_detector.tflite?generation=1678323592870401"],
)
http_file(
name = "com_google_mediapipe_left_hands_jpg",
sha256 = "4b5134daa4cb60465535239535f9f74c2842aba3aa5fd30bf04ef5678f93d87f",
@ -780,8 +810,8 @@ def external_files():
http_file(
name = "com_google_mediapipe_portrait_expected_blendshapes_with_attention_pbtxt",
sha256 = "0142d56705093c3d79ea5ee79b8e9454499abee00fc059491e6ca14f5fbab862",
urls = ["https://storage.googleapis.com/mediapipe-assets/portrait_expected_blendshapes_with_attention.pbtxt?generation=1678218364703223"],
sha256 = "3f8f698d8ed81346c6f13d1cc85190fd4a58b021e664d336997d29818b8ffbb6",
urls = ["https://storage.googleapis.com/mediapipe-assets/portrait_expected_blendshapes_with_attention.pbtxt?generation=1678323598426417"],
)
http_file(
@ -790,6 +820,12 @@ def external_files():
urls = ["https://storage.googleapis.com/mediapipe-assets/portrait_expected_detection.pbtxt?generation=1677044311581104"],
)
http_file(
name = "com_google_mediapipe_portrait_expected_face_geometry_with_attention_pbtxt",
sha256 = "5cc57b8da3ad0527dce581fe1309f6b36043e5837e3f4f5af5e24005a99dc52a",
urls = ["https://storage.googleapis.com/mediapipe-assets/portrait_expected_face_geometry_with_attention.pbtxt?generation=1678323601064393"],
)
http_file(
name = "com_google_mediapipe_portrait_expected_face_landmarks_pbtxt",
sha256 = "4ac8587379bd072c36cda0d7345f5e592fae51b30522475e0b49c18aab108ce7",
@ -850,6 +886,12 @@ def external_files():
urls = ["https://storage.googleapis.com/mediapipe-assets/pose_landmark_lite.tflite?generation=1661875901231143"],
)
http_file(
name = "com_google_mediapipe_ptm_512_hdt_ptm_woid_tflite",
sha256 = "2baa1c9783d03dd26f91e3c49efbcab11dd1361ff80e40e7209e81f84f281b6a",
urls = ["https://storage.googleapis.com/mediapipe-assets/ptm_512_hdt_ptm_woid.tflite?generation=1678323604771164"],
)
http_file(
name = "com_google_mediapipe_README_md",
sha256 = "a96d08c9c70cd9717207ed72c926e02e5eada751f00bdc5d3a7e82e3492b72cb",