# MediaPipe graph that performs face mesh with TensorFlow Lite on CPU. # CPU buffer. (ImageFrame) input_stream: "input_video" # Output image with rendered results. (ImageFrame) output_stream: "output_video" # Detected faces. (std::vector) output_stream: "face_detections" # Throttles the images flowing downstream for flow control. It passes through # the very first incoming image unaltered, and waits for downstream nodes # (calculators and subgraphs) in the graph to finish their tasks before it # passes through another image. All images that come in while waiting are # dropped, limiting the number of in-flight images in most part of the graph to # 1. This prevents the downstream nodes from queuing up incoming images and data # excessively, which leads to increased latency and memory usage, unwanted in # real-time mobile applications. It also eliminates unnecessarily computation, # e.g., the output produced by a node may get dropped downstream if the # subsequent nodes are still busy processing previous inputs. node { calculator: "FlowLimiterCalculator" input_stream: "input_video" input_stream: "FINISHED:output_video" input_stream_info: { tag_index: "FINISHED" back_edge: true } output_stream: "throttled_input_video" } # Subgraph that detects faces. node { calculator: "FaceDetectionShortRangeCpu" input_stream: "IMAGE:throttled_input_video" output_stream: "DETECTIONS:face_detections" } # Converts the detections to drawing primitives for annotation overlay. node { calculator: "DetectionsToRenderDataCalculator" input_stream: "DETECTIONS:face_detections" output_stream: "RENDER_DATA:render_data" node_options: { [type.googleapis.com/mediapipe.DetectionsToRenderDataCalculatorOptions] { thickness: 4.0 color { r: 255 g: 0 b: 0 } } } } # Draws annotations and overlays them on top of the input images. node { calculator: "AnnotationOverlayCalculator" input_stream: "IMAGE:throttled_input_video" input_stream: "render_data" output_stream: "IMAGE:output_video" }