add segmentor
This commit is contained in:
parent
4fc9f7f3f8
commit
c9df4410fb
75
src/graphs/holistic_tracking_cpu.pbtxt
Normal file
75
src/graphs/holistic_tracking_cpu.pbtxt
Normal file
|
@ -0,0 +1,75 @@
|
|||
# Tracks and renders pose + hands + face landmarks.
|
||||
|
||||
# CPU image. (ImageFrame)
|
||||
input_stream: "input_video"
|
||||
|
||||
# CPU image with rendered results. (ImageFrame)
|
||||
output_stream: "output_video"
|
||||
|
||||
# Throttles the images flowing downstream for flow control. It passes through
|
||||
# the very first incoming image unaltered, and waits for downstream nodes
|
||||
# (calculators and subgraphs) in the graph to finish their tasks before it
|
||||
# passes through another image. All images that come in while waiting are
|
||||
# dropped, limiting the number of in-flight images in most part of the graph to
|
||||
# 1. This prevents the downstream nodes from queuing up incoming images and data
|
||||
# excessively, which leads to increased latency and memory usage, unwanted in
|
||||
# real-time mobile applications. It also eliminates unnecessarily computation,
|
||||
# e.g., the output produced by a node may get dropped downstream if the
|
||||
# subsequent nodes are still busy processing previous inputs.
|
||||
node {
|
||||
calculator: "FlowLimiterCalculator"
|
||||
input_stream: "input_video"
|
||||
input_stream: "FINISHED:output_video"
|
||||
input_stream_info: {
|
||||
tag_index: "FINISHED"
|
||||
back_edge: true
|
||||
}
|
||||
output_stream: "throttled_input_video"
|
||||
node_options: {
|
||||
[type.googleapis.com/mediapipe.FlowLimiterCalculatorOptions] {
|
||||
max_in_flight: 1
|
||||
max_in_queue: 1
|
||||
# Timeout is disabled (set to 0) as first frame processing can take more
|
||||
# than 1 second.
|
||||
in_flight_timeout: 0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
node {
|
||||
calculator: "HolisticLandmarkCpu"
|
||||
input_stream: "IMAGE:throttled_input_video"
|
||||
output_stream: "POSE_LANDMARKS:pose_landmarks"
|
||||
output_stream: "POSE_ROI:pose_roi"
|
||||
output_stream: "POSE_DETECTION:pose_detection"
|
||||
output_stream: "FACE_LANDMARKS:face_landmarks"
|
||||
output_stream: "LEFT_HAND_LANDMARKS:left_hand_landmarks"
|
||||
output_stream: "RIGHT_HAND_LANDMARKS:right_hand_landmarks"
|
||||
}
|
||||
|
||||
# Gets image size.
|
||||
node {
|
||||
calculator: "ImagePropertiesCalculator"
|
||||
input_stream: "IMAGE:throttled_input_video"
|
||||
output_stream: "SIZE:image_size"
|
||||
}
|
||||
|
||||
# Converts pose, hands and face landmarks to a render data vector.
|
||||
node {
|
||||
calculator: "HolisticTrackingToRenderData"
|
||||
input_stream: "IMAGE_SIZE:image_size"
|
||||
input_stream: "POSE_LANDMARKS:pose_landmarks"
|
||||
input_stream: "POSE_ROI:pose_roi"
|
||||
input_stream: "LEFT_HAND_LANDMARKS:left_hand_landmarks"
|
||||
input_stream: "RIGHT_HAND_LANDMARKS:right_hand_landmarks"
|
||||
input_stream: "FACE_LANDMARKS:face_landmarks"
|
||||
output_stream: "RENDER_DATA_VECTOR:render_data_vector"
|
||||
}
|
||||
|
||||
# Draws annotations and overlays them on top of the input images.
|
||||
node {
|
||||
calculator: "AnnotationOverlayCalculator"
|
||||
input_stream: "IMAGE:throttled_input_video"
|
||||
input_stream: "VECTOR:render_data_vector"
|
||||
output_stream: "IMAGE:output_video"
|
||||
}
|
52
src/graphs/selfie_segmentation_cpu.pbtxt
Normal file
52
src/graphs/selfie_segmentation_cpu.pbtxt
Normal file
|
@ -0,0 +1,52 @@
|
|||
# MediaPipe graph that performs selfie segmentation with TensorFlow Lite on CPU.
|
||||
|
||||
# CPU buffer. (ImageFrame)
|
||||
input_stream: "input_video"
|
||||
|
||||
# Output image with rendered results. (ImageFrame)
|
||||
output_stream: "output_video"
|
||||
|
||||
# Throttles the images flowing downstream for flow control. It passes through
|
||||
# the very first incoming image unaltered, and waits for downstream nodes
|
||||
# (calculators and subgraphs) in the graph to finish their tasks before it
|
||||
# passes through another image. All images that come in while waiting are
|
||||
# dropped, limiting the number of in-flight images in most part of the graph to
|
||||
# 1. This prevents the downstream nodes from queuing up incoming images and data
|
||||
# excessively, which leads to increased latency and memory usage, unwanted in
|
||||
# real-time mobile applications. It also eliminates unnecessarily computation,
|
||||
# e.g., the output produced by a node may get dropped downstream if the
|
||||
# subsequent nodes are still busy processing previous inputs.
|
||||
node {
|
||||
calculator: "FlowLimiterCalculator"
|
||||
input_stream: "input_video"
|
||||
input_stream: "FINISHED:output_video"
|
||||
input_stream_info: {
|
||||
tag_index: "FINISHED"
|
||||
back_edge: true
|
||||
}
|
||||
output_stream: "throttled_input_video"
|
||||
}
|
||||
|
||||
# Subgraph that performs selfie segmentation.
|
||||
node {
|
||||
calculator: "SelfieSegmentationCpu"
|
||||
input_stream: "IMAGE:throttled_input_video"
|
||||
output_stream: "SEGMENTATION_MASK:segmentation_mask"
|
||||
}
|
||||
|
||||
|
||||
# Colors the selfie segmentation with the color specified in the option.
|
||||
node {
|
||||
calculator: "RecolorCalculator"
|
||||
input_stream: "IMAGE:throttled_input_video"
|
||||
input_stream: "MASK:segmentation_mask"
|
||||
output_stream: "IMAGE:output_video"
|
||||
node_options: {
|
||||
[type.googleapis.com/mediapipe.RecolorCalculatorOptions] {
|
||||
color { r: 0 g: 0 b: 255 }
|
||||
mask_channel: RED
|
||||
invert_mask: true
|
||||
adjust_with_luminance: false
|
||||
}
|
||||
}
|
||||
}
|
37
src/lib.rs
37
src/lib.rs
|
@ -220,7 +220,7 @@ pub mod pose {
|
|||
pub fn new() -> Self {
|
||||
let graph = Detector::new(
|
||||
POSE_GRAPH_TYPE,
|
||||
include_str!("graphs/pose_tracking_cpu.txt"),
|
||||
include_str!("graphs/pose_tracking_cpu.pbtxt"),
|
||||
"pose_landmarks",
|
||||
);
|
||||
|
||||
|
@ -260,7 +260,7 @@ pub mod face_mesh {
|
|||
pub fn new() -> Self {
|
||||
let graph = Detector::new(
|
||||
FACE_GRAPH_TYPE,
|
||||
include_str!("graphs/face_mesh_desktop_live.txt"),
|
||||
include_str!("graphs/face_mesh_desktop_live.pbtxt"),
|
||||
"multi_face_landmarks",
|
||||
);
|
||||
|
||||
|
@ -325,7 +325,7 @@ pub mod hands {
|
|||
pub fn new() -> Self {
|
||||
let graph = Detector::new(
|
||||
HANDS_GRAPH_TYPE,
|
||||
include_str!("graphs/hand_tracking_desktop_live.txt"),
|
||||
include_str!("graphs/hand_tracking_desktop_live.pbtxt"),
|
||||
"hand_landmarks",
|
||||
);
|
||||
|
||||
|
@ -355,3 +355,34 @@ pub mod hands {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub mod segmentation {
|
||||
//! Selfie segmentation utilities.
|
||||
use super::*;
|
||||
|
||||
pub struct Segmentor {
|
||||
graph: Effect,
|
||||
}
|
||||
|
||||
impl Segmentor {
|
||||
pub fn new() -> Self {
|
||||
let graph = Effect::new(
|
||||
include_str!("graphs/selfie_segmentation_cpu.pbtxt"),
|
||||
"output_video",
|
||||
);
|
||||
|
||||
Self { graph }
|
||||
}
|
||||
|
||||
/// Processes the input frame, returns the output frame.
|
||||
pub fn process(&mut self, input: &Mat) -> Mat {
|
||||
self.graph.process(input)
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for Segmentor {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue
Block a user