131 lines
4.7 KiB
Plaintext
131 lines
4.7 KiB
Plaintext
# MediaPipe graph that applies a face effect to the input video stream.
|
|
|
|
# GPU buffer. (GpuBuffer)
|
|
input_stream: "input_video"
|
|
|
|
# An integer, which indicate which effect is selected. (int)
|
|
#
|
|
# If `selected_effect_id` is `0`, the Axis effect is selected.
|
|
# If `selected_effect_id` is `1`, the Facepaint effect is selected.
|
|
# If `selected_effect_id` is `2`, the Glasses effect is selected.
|
|
#
|
|
# No other values are allowed for `selected_effect_id`.
|
|
input_stream: "selected_effect_id"
|
|
|
|
# Indicates whether to use the face detection as the input source. (bool)
|
|
#
|
|
# If `true`, the face detection pipeline will be used to produce landmarks.
|
|
# If `false`, the face landmark pipeline will be used to produce landmarks.
|
|
input_side_packet: "use_face_detection_input_source"
|
|
|
|
# Output image with rendered results. (GpuBuffer)
|
|
output_stream: "output_video"
|
|
|
|
# A list of geometry data for a single detected face.
|
|
#
|
|
# NOTE: there will not be an output packet in this stream for this particular
|
|
# timestamp if none of faces detected.
|
|
#
|
|
# (std::vector<face_geometry::FaceGeometry>)
|
|
output_stream: "multi_face_geometry"
|
|
|
|
# Throttles the images flowing downstream for flow control. It passes through
|
|
# the very first incoming image unaltered, and waits for downstream nodes
|
|
# (calculators and subgraphs) in the graph to finish their tasks before it
|
|
# passes through another image. All images that come in while waiting are
|
|
# dropped, limiting the number of in-flight images in most part of the graph to
|
|
# 1. This prevents the downstream nodes from queuing up incoming images and data
|
|
# excessively, which leads to increased latency and memory usage, unwanted in
|
|
# real-time mobile applications. It also eliminates unnecessarily computation,
|
|
# e.g., the output produced by a node may get dropped downstream if the
|
|
# subsequent nodes are still busy processing previous inputs.
|
|
node {
|
|
calculator: "FlowLimiterCalculator"
|
|
input_stream: "input_video"
|
|
input_stream: "FINISHED:output_video"
|
|
input_stream_info: {
|
|
tag_index: "FINISHED"
|
|
back_edge: true
|
|
}
|
|
output_stream: "throttled_input_video"
|
|
}
|
|
|
|
# Generates an environment that describes the current virtual scene.
|
|
node {
|
|
calculator: "FaceGeometryEnvGeneratorCalculator"
|
|
output_side_packet: "ENVIRONMENT:environment"
|
|
node_options: {
|
|
[type.googleapis.com/mediapipe.FaceGeometryEnvGeneratorCalculatorOptions] {
|
|
environment: {
|
|
origin_point_location: TOP_LEFT_CORNER
|
|
perspective_camera: {
|
|
vertical_fov_degrees: 63.0 # 63 degrees
|
|
near: 1.0 # 1cm
|
|
far: 10000.0 # 100m
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
# Computes the face geometry for a single face. The input source is defined
|
|
# through `use_face_detection_input_source`.
|
|
node {
|
|
calculator: "SwitchContainer"
|
|
input_stream: "IMAGE:throttled_input_video"
|
|
input_side_packet: "ENABLE:use_face_detection_input_source"
|
|
input_side_packet: "ENVIRONMENT:environment"
|
|
output_stream: "MULTI_FACE_GEOMETRY:multi_face_geometry"
|
|
node_options: {
|
|
[type.googleapis.com/mediapipe.SwitchContainerOptions] {
|
|
contained_node: {
|
|
calculator: "SingleFaceGeometryFromLandmarksGpu"
|
|
}
|
|
contained_node: {
|
|
calculator: "SingleFaceGeometryFromDetectionGpu"
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|
|
# Renders the selected effect based on `selected_effect_id`.
|
|
node {
|
|
calculator: "SwitchContainer"
|
|
input_stream: "SELECT:selected_effect_id"
|
|
input_stream: "IMAGE_GPU:throttled_input_video"
|
|
input_stream: "MULTI_FACE_GEOMETRY:multi_face_geometry"
|
|
input_side_packet: "ENVIRONMENT:environment"
|
|
output_stream: "IMAGE_GPU:output_video"
|
|
node_options: {
|
|
[type.googleapis.com/mediapipe.SwitchContainerOptions] {
|
|
contained_node: {
|
|
calculator: "FaceGeometryEffectRendererCalculator"
|
|
node_options: {
|
|
[type.googleapis.com/mediapipe.FaceGeometryEffectRendererCalculatorOptions] {
|
|
effect_texture_path: "mediapipe/graphs/face_effect/data/axis.pngblob"
|
|
effect_mesh_3d_path: "mediapipe/graphs/face_effect/data/axis.binarypb"
|
|
}
|
|
}
|
|
}
|
|
contained_node: {
|
|
calculator: "FaceGeometryEffectRendererCalculator"
|
|
node_options: {
|
|
[type.googleapis.com/mediapipe.FaceGeometryEffectRendererCalculatorOptions] {
|
|
effect_texture_path: "mediapipe/graphs/face_effect/data/facepaint.pngblob"
|
|
}
|
|
}
|
|
}
|
|
contained_node: {
|
|
calculator: "FaceGeometryEffectRendererCalculator"
|
|
node_options: {
|
|
[type.googleapis.com/mediapipe.FaceGeometryEffectRendererCalculatorOptions] {
|
|
effect_texture_path: "mediapipe/graphs/face_effect/data/glasses.pngblob"
|
|
effect_mesh_3d_path: "mediapipe/graphs/face_effect/data/glasses.binarypb"
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
}
|
|
|