Project import generated by Copybara.
GitOrigin-RevId: 831b7eb6038549a3a5047e7a113d6a11956e2de9
This commit is contained in:
parent
7d470a1335
commit
86b3283b2f
|
@ -1,176 +0,0 @@
|
||||||
# MediaPipe graph that performs LSTM based object detection on desktop with
|
|
||||||
# TensorFlow Lite on CPU.
|
|
||||||
|
|
||||||
# max_queue_size limits the number of packets enqueued on any input stream
|
|
||||||
# by throttling inputs to the graph. This makes the graph only process one
|
|
||||||
# frame per time.
|
|
||||||
max_queue_size: 1
|
|
||||||
|
|
||||||
# Decodes an input video file into images and a video header.
|
|
||||||
node {
|
|
||||||
calculator: "OpenCvVideoDecoderCalculator"
|
|
||||||
input_side_packet: "INPUT_FILE_PATH:input_video_path"
|
|
||||||
output_stream: "VIDEO:input_video"
|
|
||||||
output_stream: "VIDEO_PRESTREAM:input_video_header"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Transforms the input image on CPU to a 256x256 image. To scale the image, by
|
|
||||||
# default it uses the STRETCH scale mode that maps the entire input image to the
|
|
||||||
# entire transformed image. As a result, image aspect ratio may be changed and
|
|
||||||
# objects in the image may be deformed (stretched or squeezed), but the object
|
|
||||||
# detection model used in this graph is agnostic to that deformation.
|
|
||||||
node: {
|
|
||||||
calculator: "ImageTransformationCalculator"
|
|
||||||
input_stream: "IMAGE:input_video"
|
|
||||||
output_stream: "IMAGE:transformed_input_video"
|
|
||||||
node_options: {
|
|
||||||
[type.googleapis.com/mediapipe.ImageTransformationCalculatorOptions] {
|
|
||||||
output_width: 256
|
|
||||||
output_height: 256
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Converts the transformed input image on CPU into an image tensor as a
|
|
||||||
# TfLiteTensor. The zero_center option is set to true to normalize the
|
|
||||||
# pixel values to [-1.f, 1.f] as opposed to [0.f, 1.f].
|
|
||||||
node {
|
|
||||||
calculator: "TfLiteConverterCalculator"
|
|
||||||
input_stream: "IMAGE:transformed_input_video"
|
|
||||||
output_stream: "TENSORS:image_tensor"
|
|
||||||
node_options: {
|
|
||||||
[type.googleapis.com/mediapipe.TfLiteConverterCalculatorOptions] {
|
|
||||||
zero_center: true
|
|
||||||
use_quantized_tensors: true
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Joins the input image_tensor and previous lstm state tensors into a single
|
|
||||||
# output vector of tensors.
|
|
||||||
node {
|
|
||||||
calculator: "ConcatenateTfLiteTensorVectorCalculator"
|
|
||||||
input_stream: "image_tensor"
|
|
||||||
input_stream: "prev_lstm_tensors"
|
|
||||||
output_stream: "concatenated_tensors"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Runs a TensorFlow Lite model on CPU that takes an image tensor and state
|
|
||||||
# tensors, then outputs a vector of tensors representing, for instance,
|
|
||||||
# detection boxes/keypoints and scores.
|
|
||||||
node {
|
|
||||||
calculator: "TfLiteInferenceCalculator"
|
|
||||||
input_stream: "TENSORS:concatenated_tensors"
|
|
||||||
output_stream: "TENSORS:inference_tensors"
|
|
||||||
node_options: {
|
|
||||||
[type.googleapis.com/mediapipe.TfLiteInferenceCalculatorOptions] {
|
|
||||||
model_path: "mediapipe/models/lstdlite_object_detection.tflite"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Splits up the output tensors into two streams [detection_boxes,
|
|
||||||
# detection_classes, detection_scores, num_detections], and [lstm_c, lstm_h].
|
|
||||||
node {
|
|
||||||
calculator: "SplitTfLiteTensorVectorCalculator"
|
|
||||||
input_stream: "inference_tensors"
|
|
||||||
output_stream: "detection_tensors"
|
|
||||||
output_stream: "lstm_tensors"
|
|
||||||
options {
|
|
||||||
[type.googleapis.com/mediapipe.SplitVectorCalculatorOptions] {
|
|
||||||
ranges: { begin: 0 end: 4 }
|
|
||||||
ranges: { begin: 4 end: 6 }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Waits for the LSTM state output from the TfLite inferencing to feed back in as
|
|
||||||
# an input tensor for the next frame.
|
|
||||||
node {
|
|
||||||
calculator: "PreviousLoopbackCalculator"
|
|
||||||
input_stream: "MAIN:transformed_input_video"
|
|
||||||
input_stream: "LOOP:lstm_tensors"
|
|
||||||
input_stream_info: { tag_index: "LOOP" back_edge: true }
|
|
||||||
output_stream: "PREV_LOOP:prev_lstm_tensors"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Decodes the detection tensors generated by the TensorFlow Lite model, based on
|
|
||||||
# the specification in the options, into a vector of detections. Each detection
|
|
||||||
# describes a detected object.
|
|
||||||
node {
|
|
||||||
calculator: "TfLiteTensorsToDetectionsCalculator"
|
|
||||||
input_stream: "TENSORS:detection_tensors"
|
|
||||||
output_stream: "DETECTIONS:detections"
|
|
||||||
node_options: {
|
|
||||||
[type.googleapis.com/mediapipe.TfLiteTensorsToDetectionsCalculatorOptions] {
|
|
||||||
num_classes: 7
|
|
||||||
num_boxes: 40
|
|
||||||
num_coords: 4
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Performs non-max suppression to remove excessive detections.
|
|
||||||
node {
|
|
||||||
calculator: "NonMaxSuppressionCalculator"
|
|
||||||
input_stream: "detections"
|
|
||||||
output_stream: "filtered_detections"
|
|
||||||
node_options: {
|
|
||||||
[type.googleapis.com/mediapipe.NonMaxSuppressionCalculatorOptions] {
|
|
||||||
min_suppression_threshold: 0.4
|
|
||||||
min_score_threshold: 0.2
|
|
||||||
max_num_detections: 5
|
|
||||||
overlap_type: INTERSECTION_OVER_UNION
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Maps detection label IDs to the corresponding label text. The label map is
|
|
||||||
# provided in the label_map_path option.
|
|
||||||
node {
|
|
||||||
calculator: "DetectionLabelIdToTextCalculator"
|
|
||||||
input_stream: "filtered_detections"
|
|
||||||
output_stream: "output_detections"
|
|
||||||
node_options: {
|
|
||||||
[type.googleapis.com/mediapipe.DetectionLabelIdToTextCalculatorOptions] {
|
|
||||||
label_map_path: "mediapipe/models/lstdlite_object_detection_labelmap.txt"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Converts the detections to drawing primitives for annotation overlay.
|
|
||||||
node {
|
|
||||||
calculator: "DetectionsToRenderDataCalculator"
|
|
||||||
input_stream: "DETECTIONS:output_detections"
|
|
||||||
output_stream: "RENDER_DATA:render_data"
|
|
||||||
node_options: {
|
|
||||||
[type.googleapis.com/mediapipe.DetectionsToRenderDataCalculatorOptions] {
|
|
||||||
thickness: 2.0
|
|
||||||
color { r: 255 g: 255 b: 255 }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
# Draws annotations and overlays them on top of the original image coming into
|
|
||||||
# the graph.
|
|
||||||
node {
|
|
||||||
calculator: "AnnotationOverlayCalculator"
|
|
||||||
input_stream: "INPUT_FRAME:input_video"
|
|
||||||
input_stream: "render_data"
|
|
||||||
output_stream: "OUTPUT_FRAME:output_video"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Encodes the annotated images into a video file, adopting properties specified
|
|
||||||
# in the input video header, e.g., video framerate.
|
|
||||||
node {
|
|
||||||
calculator: "OpenCvVideoEncoderCalculator"
|
|
||||||
input_stream: "VIDEO:output_video"
|
|
||||||
input_stream: "VIDEO_PRESTREAM:input_video_header"
|
|
||||||
input_side_packet: "OUTPUT_FILE_PATH:output_video_path"
|
|
||||||
node_options: {
|
|
||||||
[type.googleapis.com/mediapipe.OpenCvVideoEncoderCalculatorOptions]: {
|
|
||||||
codec: "avc1"
|
|
||||||
video_format: "mp4"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
Loading…
Reference in New Issue
Block a user