Internal change

PiperOrigin-RevId: 487663454
This commit is contained in:
MediaPipe Team 2022-11-10 16:02:04 -08:00 committed by Copybara-Service
parent 1a59f0a217
commit 2ea5184c51

View File

@ -276,33 +276,44 @@ class HandLandmarkerGraph : public core::ModelTaskGraph {
.set_min_size(max_num_hands); .set_min_size(max_num_hands);
auto has_enough_hands = min_size_node.Out("").Cast<bool>(); auto has_enough_hands = min_size_node.Out("").Cast<bool>();
auto image_for_hand_detector =
DisallowIf(image_in, has_enough_hands, graph);
auto norm_rect_in_for_hand_detector =
DisallowIf(norm_rect_in, has_enough_hands, graph);
auto& hand_detector = auto& hand_detector =
graph.AddNode("mediapipe.tasks.vision.hand_detector.HandDetectorGraph"); graph.AddNode("mediapipe.tasks.vision.hand_detector.HandDetectorGraph");
hand_detector.GetOptions<HandDetectorGraphOptions>().CopyFrom( hand_detector.GetOptions<HandDetectorGraphOptions>().CopyFrom(
tasks_options.hand_detector_graph_options()); tasks_options.hand_detector_graph_options());
auto& clip_hand_rects =
graph.AddNode("ClipNormalizedRectVectorSizeCalculator");
clip_hand_rects.GetOptions<ClipVectorSizeCalculatorOptions>()
.set_max_vec_size(max_num_hands);
if (tasks_options.base_options().use_stream_mode()) {
// While in stream mode, skip hand detector graph when we successfully
// track the hands from the last frame.
auto image_for_hand_detector =
DisallowIf(image_in, has_enough_hands, graph);
auto norm_rect_in_for_hand_detector =
DisallowIf(norm_rect_in, has_enough_hands, graph);
image_for_hand_detector >> hand_detector.In("IMAGE"); image_for_hand_detector >> hand_detector.In("IMAGE");
norm_rect_in_for_hand_detector >> hand_detector.In("NORM_RECT"); norm_rect_in_for_hand_detector >> hand_detector.In("NORM_RECT");
auto hand_rects_from_hand_detector = hand_detector.Out("HAND_RECTS"); auto hand_rects_from_hand_detector = hand_detector.Out("HAND_RECTS");
auto& hand_association = graph.AddNode("HandAssociationCalculator"); auto& hand_association = graph.AddNode("HandAssociationCalculator");
hand_association.GetOptions<HandAssociationCalculatorOptions>() hand_association.GetOptions<HandAssociationCalculatorOptions>()
.set_min_similarity_threshold(tasks_options.min_tracking_confidence()); .set_min_similarity_threshold(
tasks_options.min_tracking_confidence());
prev_hand_rects_from_landmarks >> prev_hand_rects_from_landmarks >>
hand_association[Input<std::vector<NormalizedRect>>::Multiple("")][0]; hand_association[Input<std::vector<NormalizedRect>>::Multiple("")][0];
hand_rects_from_hand_detector >> hand_rects_from_hand_detector >>
hand_association[Input<std::vector<NormalizedRect>>::Multiple("")][1]; hand_association[Input<std::vector<NormalizedRect>>::Multiple("")][1];
auto hand_rects = hand_association.Out(""); auto hand_rects = hand_association.Out("");
auto& clip_hand_rects =
graph.AddNode("ClipNormalizedRectVectorSizeCalculator");
clip_hand_rects.GetOptions<ClipVectorSizeCalculatorOptions>()
.set_max_vec_size(max_num_hands);
hand_rects >> clip_hand_rects.In(""); hand_rects >> clip_hand_rects.In("");
} else {
// While not in stream mode, the input images are not guaranteed to be in
// series, and we don't want to enable the tracking and hand associations
// between input images. Always use the hand detector graph.
image_in >> hand_detector.In("IMAGE");
norm_rect_in >> hand_detector.In("NORM_RECT");
auto hand_rects_from_hand_detector = hand_detector.Out("HAND_RECTS");
hand_rects_from_hand_detector >> clip_hand_rects.In("");
}
auto clipped_hand_rects = clip_hand_rects.Out(""); auto clipped_hand_rects = clip_hand_rects.Out("");
auto& hand_landmarks_detector_graph = graph.AddNode( auto& hand_landmarks_detector_graph = graph.AddNode(