77 lines
2.6 KiB
Plaintext
77 lines
2.6 KiB
Plaintext
# MediaPipe graph that performs face mesh with TensorFlow Lite on CPU.
|
|
|
|
# GPU buffer. (GpuBuffer)
|
|
input_stream: "input_video"
|
|
|
|
# Output image with rendered results. (GpuBuffer)
|
|
output_stream: "output_video"
|
|
# Detected faces. (std::vector<Detection>)
|
|
output_stream: "face_detections"
|
|
|
|
# Throttles the images flowing downstream for flow control. It passes through
|
|
# the very first incoming image unaltered, and waits for downstream nodes
|
|
# (calculators and subgraphs) in the graph to finish their tasks before it
|
|
# passes through another image. All images that come in while waiting are
|
|
# dropped, limiting the number of in-flight images in most part of the graph to
|
|
# 1. This prevents the downstream nodes from queuing up incoming images and data
|
|
# excessively, which leads to increased latency and memory usage, unwanted in
|
|
# real-time mobile applications. It also eliminates unnecessarily computation,
|
|
# e.g., the output produced by a node may get dropped downstream if the
|
|
# subsequent nodes are still busy processing previous inputs.
|
|
node {
|
|
calculator: "FlowLimiterCalculator"
|
|
input_stream: "input_video"
|
|
input_stream: "FINISHED:output_video"
|
|
input_stream_info: {
|
|
tag_index: "FINISHED"
|
|
back_edge: true
|
|
}
|
|
output_stream: "throttled_input_video"
|
|
}
|
|
|
|
# Transfers the input image from GPU to CPU memory for the purpose of
|
|
# demonstrating a CPU-based pipeline. Note that the input image on GPU has the
|
|
# origin defined at the bottom-left corner (OpenGL convention). As a result,
|
|
# the transferred image on CPU also shares the same representation.
|
|
node: {
|
|
calculator: "GpuBufferToImageFrameCalculator"
|
|
input_stream: "throttled_input_video"
|
|
output_stream: "input_video_cpu"
|
|
}
|
|
|
|
# Subgraph that detects faces.
|
|
node {
|
|
calculator: "FaceDetectionShortRangeCpu"
|
|
input_stream: "IMAGE:input_video_cpu"
|
|
output_stream: "DETECTIONS:face_detections"
|
|
}
|
|
|
|
# Converts the detections to drawing primitives for annotation overlay.
|
|
node {
|
|
calculator: "DetectionsToRenderDataCalculator"
|
|
input_stream: "DETECTIONS:face_detections"
|
|
output_stream: "RENDER_DATA:render_data"
|
|
node_options: {
|
|
[type.googleapis.com/mediapipe.DetectionsToRenderDataCalculatorOptions] {
|
|
thickness: 4.0
|
|
color { r: 255 g: 0 b: 0 }
|
|
}
|
|
}
|
|
}
|
|
|
|
# Draws annotations and overlays them on top of the input images.
|
|
node {
|
|
calculator: "AnnotationOverlayCalculator"
|
|
input_stream: "IMAGE:input_video_cpu"
|
|
input_stream: "render_data"
|
|
output_stream: "IMAGE:output_video_cpu"
|
|
}
|
|
|
|
# Transfers the annotated image from CPU back to GPU memory, to be sent out of
|
|
# the graph.
|
|
node: {
|
|
calculator: "ImageFrameToGpuBufferCalculator"
|
|
input_stream: "output_video_cpu"
|
|
output_stream: "output_video"
|
|
}
|