210 lines
6.5 KiB
Plaintext
210 lines
6.5 KiB
Plaintext
# MediaPipe graph to render hand landmarks and some related debug information.
|
|
|
|
type: "HandRendererSubgraph"
|
|
|
|
# CPU image. (ImageFrame)
|
|
input_stream: "IMAGE:input_image"
|
|
# Collection of detected/predicted hands, each represented as a list of
|
|
# landmarks. (std::vector<NormalizedLandmarkList>)
|
|
input_stream: "LANDMARKS:multi_hand_landmarks"
|
|
# Handedness of the detected hand (i.e. is hand left or right).
|
|
# (std::vector<ClassificationList>)
|
|
input_stream: "HANDEDNESS:multi_handedness"
|
|
# Regions of interest calculated based on palm detections.
|
|
# (std::vector<NormalizedRect>)
|
|
input_stream: "NORM_RECTS:0:multi_palm_rects"
|
|
# Regions of interest calculated based on landmarks.
|
|
# (std::vector<NormalizedRect>)
|
|
input_stream: "NORM_RECTS:1:multi_hand_rects"
|
|
# Detected palms. (std::vector<Detection>)
|
|
input_stream: "DETECTIONS:palm_detections"
|
|
|
|
# Updated CPU image. (ImageFrame)
|
|
output_stream: "IMAGE:output_image"
|
|
|
|
# Converts detections to drawing primitives for annotation overlay.
|
|
node {
|
|
calculator: "DetectionsToRenderDataCalculator"
|
|
input_stream: "DETECTIONS:palm_detections"
|
|
output_stream: "RENDER_DATA:detection_render_data"
|
|
node_options: {
|
|
[type.googleapis.com/mediapipe.DetectionsToRenderDataCalculatorOptions] {
|
|
thickness: 4.0
|
|
color { r: 0 g: 255 b: 0 }
|
|
}
|
|
}
|
|
}
|
|
|
|
# Converts normalized rects to drawing primitives for annotation overlay.
|
|
node {
|
|
calculator: "RectToRenderDataCalculator"
|
|
input_stream: "NORM_RECTS:multi_hand_rects"
|
|
output_stream: "RENDER_DATA:multi_hand_rects_render_data"
|
|
node_options: {
|
|
[type.googleapis.com/mediapipe.RectToRenderDataCalculatorOptions] {
|
|
filled: false
|
|
color { r: 255 g: 0 b: 0 }
|
|
thickness: 4.0
|
|
}
|
|
}
|
|
}
|
|
|
|
# Converts normalized rects to drawing primitives for annotation overlay.
|
|
node {
|
|
calculator: "RectToRenderDataCalculator"
|
|
input_stream: "NORM_RECTS:multi_palm_rects"
|
|
output_stream: "RENDER_DATA:multi_palm_rects_render_data"
|
|
node_options: {
|
|
[type.googleapis.com/mediapipe.RectToRenderDataCalculatorOptions] {
|
|
filled: false
|
|
color { r: 125 g: 0 b: 122 }
|
|
thickness: 4.0
|
|
}
|
|
}
|
|
}
|
|
|
|
# Outputs each element of multi_palm_landmarks at a fake timestamp for the rest
|
|
# of the graph to process. At the end of the loop, outputs the BATCH_END
|
|
# timestamp for downstream calculators to inform them that all elements in the
|
|
# vector have been processed.
|
|
node {
|
|
calculator: "BeginLoopNormalizedLandmarkListVectorCalculator"
|
|
input_stream: "ITERABLE:multi_hand_landmarks"
|
|
output_stream: "ITEM:single_hand_landmarks"
|
|
output_stream: "BATCH_END:landmark_timestamp"
|
|
}
|
|
|
|
# Converts landmarks to drawing primitives for annotation overlay.
|
|
node {
|
|
calculator: "LandmarksToRenderDataCalculator"
|
|
input_stream: "NORM_LANDMARKS:single_hand_landmarks"
|
|
output_stream: "RENDER_DATA:single_hand_landmark_render_data"
|
|
node_options: {
|
|
[type.googleapis.com/mediapipe.LandmarksToRenderDataCalculatorOptions] {
|
|
landmark_connections: 0
|
|
landmark_connections: 1
|
|
landmark_connections: 1
|
|
landmark_connections: 2
|
|
landmark_connections: 2
|
|
landmark_connections: 3
|
|
landmark_connections: 3
|
|
landmark_connections: 4
|
|
landmark_connections: 0
|
|
landmark_connections: 5
|
|
landmark_connections: 5
|
|
landmark_connections: 6
|
|
landmark_connections: 6
|
|
landmark_connections: 7
|
|
landmark_connections: 7
|
|
landmark_connections: 8
|
|
landmark_connections: 5
|
|
landmark_connections: 9
|
|
landmark_connections: 9
|
|
landmark_connections: 10
|
|
landmark_connections: 10
|
|
landmark_connections: 11
|
|
landmark_connections: 11
|
|
landmark_connections: 12
|
|
landmark_connections: 9
|
|
landmark_connections: 13
|
|
landmark_connections: 13
|
|
landmark_connections: 14
|
|
landmark_connections: 14
|
|
landmark_connections: 15
|
|
landmark_connections: 15
|
|
landmark_connections: 16
|
|
landmark_connections: 13
|
|
landmark_connections: 17
|
|
landmark_connections: 0
|
|
landmark_connections: 17
|
|
landmark_connections: 17
|
|
landmark_connections: 18
|
|
landmark_connections: 18
|
|
landmark_connections: 19
|
|
landmark_connections: 19
|
|
landmark_connections: 20
|
|
landmark_color { r: 255 g: 0 b: 0 }
|
|
connection_color { r: 0 g: 255 b: 0 }
|
|
thickness: 4.0
|
|
}
|
|
}
|
|
}
|
|
|
|
# Collects a RenderData object for each hand into a vector. Upon receiving the
|
|
# BATCH_END timestamp, outputs the vector of RenderData at the BATCH_END
|
|
# timestamp.
|
|
node {
|
|
calculator: "EndLoopRenderDataCalculator"
|
|
input_stream: "ITEM:single_hand_landmark_render_data"
|
|
input_stream: "BATCH_END:landmark_timestamp"
|
|
output_stream: "ITERABLE:multi_hand_landmarks_render_data"
|
|
}
|
|
|
|
# Don't render handedness if there are more than one handedness reported.
|
|
node {
|
|
calculator: "ClassificationListVectorHasMinSizeCalculator"
|
|
input_stream: "ITERABLE:multi_handedness"
|
|
output_stream: "disallow_handedness_rendering"
|
|
node_options: {
|
|
[type.googleapis.com/mediapipe.CollectionHasMinSizeCalculatorOptions] {
|
|
min_size: 2
|
|
}
|
|
}
|
|
}
|
|
|
|
node {
|
|
calculator: "GateCalculator"
|
|
input_stream: "multi_handedness"
|
|
input_stream: "DISALLOW:disallow_handedness_rendering"
|
|
output_stream: "allowed_multi_handedness"
|
|
node_options: {
|
|
[type.googleapis.com/mediapipe.GateCalculatorOptions] {
|
|
empty_packets_as_allow: false
|
|
}
|
|
}
|
|
}
|
|
|
|
node {
|
|
calculator: "SplitClassificationListVectorCalculator"
|
|
input_stream: "allowed_multi_handedness"
|
|
output_stream: "handedness"
|
|
node_options: {
|
|
[type.googleapis.com/mediapipe.SplitVectorCalculatorOptions] {
|
|
ranges: { begin: 0 end: 1 }
|
|
element_only: true
|
|
}
|
|
}
|
|
}
|
|
|
|
# Converts classification to drawing primitives for annotation overlay.
|
|
node {
|
|
calculator: "LabelsToRenderDataCalculator"
|
|
input_stream: "CLASSIFICATIONS:handedness"
|
|
output_stream: "RENDER_DATA:handedness_render_data"
|
|
node_options: {
|
|
[type.googleapis.com/mediapipe.LabelsToRenderDataCalculatorOptions]: {
|
|
color { r: 255 g: 0 b: 0 }
|
|
thickness: 10.0
|
|
font_height_px: 50
|
|
horizontal_offset_px: 30
|
|
vertical_offset_px: 50
|
|
|
|
max_num_labels: 1
|
|
location: TOP_LEFT
|
|
}
|
|
}
|
|
}
|
|
|
|
# Draws annotations and overlays them on top of the input images. Consumes
|
|
# a vector of RenderData objects and draws each of them on the input frame.
|
|
node {
|
|
calculator: "AnnotationOverlayCalculator"
|
|
input_stream: "IMAGE:input_image"
|
|
input_stream: "detection_render_data"
|
|
input_stream: "multi_hand_rects_render_data"
|
|
input_stream: "multi_palm_rects_render_data"
|
|
input_stream: "handedness_render_data"
|
|
input_stream: "VECTOR:0:multi_hand_landmarks_render_data"
|
|
output_stream: "IMAGE:output_image"
|
|
}
|