103 lines
3.2 KiB
Plaintext
103 lines
3.2 KiB
Plaintext
# MediaPipe graph performing common processing to detect faces using
|
|
# face_detection_full_range_sparse.tflite model, currently consisting of tensor
|
|
# post processing.
|
|
#
|
|
# EXAMPLE:
|
|
# node {
|
|
# calculator: "FaceDetectionFullRangeCommon"
|
|
# input_stream: "TENSORS:detection_tensors"
|
|
# input_stream: "MATRIX:transform_matrix"
|
|
# output_stream: "DETECTIONS:detections"
|
|
# }
|
|
|
|
type: "FaceDetectionShortRangeCommon"
|
|
|
|
# Detection tensors. (std::vector<Tensor>)
|
|
input_stream: "TENSORS:detection_tensors"
|
|
|
|
# A 4x4 row-major-order matrix that maps a point represented in the detection
|
|
# tensors to a desired coordinate system, e.g., in the original input image
|
|
# before scaling/cropping. (std::array<float, 16>)
|
|
input_stream: "MATRIX:transform_matrix"
|
|
|
|
# Detected faces. (std::vector<Detection>)
|
|
# NOTE: there will not be an output packet in the DETECTIONS stream for this
|
|
# particular timestamp if none of faces detected. However, the MediaPipe
|
|
# framework will internally inform the downstream calculators of the absence of
|
|
# this packet so that they don't wait for it unnecessarily.
|
|
output_stream: "DETECTIONS:detections"
|
|
|
|
# Generates a single side packet containing a vector of SSD anchors based on
|
|
# the specification in the options.
|
|
node {
|
|
calculator: "SsdAnchorsCalculator"
|
|
output_side_packet: "anchors"
|
|
options: {
|
|
[mediapipe.SsdAnchorsCalculatorOptions.ext] {
|
|
num_layers: 1
|
|
min_scale: 0.1484375
|
|
max_scale: 0.75
|
|
input_size_height: 192
|
|
input_size_width: 192
|
|
anchor_offset_x: 0.5
|
|
anchor_offset_y: 0.5
|
|
strides: 4
|
|
aspect_ratios: 1.0
|
|
fixed_anchor_size: true
|
|
interpolated_scale_aspect_ratio: 0.0
|
|
}
|
|
}
|
|
}
|
|
|
|
# Decodes the detection tensors generated by the TensorFlow Lite model, based on
|
|
# the SSD anchors and the specification in the options, into a vector of
|
|
# detections. Each detection describes a detected object.
|
|
node {
|
|
calculator: "TensorsToDetectionsCalculator"
|
|
input_stream: "TENSORS:detection_tensors"
|
|
input_side_packet: "ANCHORS:anchors"
|
|
output_stream: "DETECTIONS:unfiltered_detections"
|
|
options: {
|
|
[mediapipe.TensorsToDetectionsCalculatorOptions.ext] {
|
|
num_classes: 1
|
|
num_boxes: 2304
|
|
num_coords: 16
|
|
box_coord_offset: 0
|
|
keypoint_coord_offset: 4
|
|
num_keypoints: 6
|
|
num_values_per_keypoint: 2
|
|
sigmoid_score: true
|
|
score_clipping_thresh: 100.0
|
|
reverse_output_order: true
|
|
x_scale: 192.0
|
|
y_scale: 192.0
|
|
h_scale: 192.0
|
|
w_scale: 192.0
|
|
min_score_thresh: 0.6
|
|
}
|
|
}
|
|
}
|
|
|
|
# Performs non-max suppression to remove excessive detections.
|
|
node {
|
|
calculator: "NonMaxSuppressionCalculator"
|
|
input_stream: "unfiltered_detections"
|
|
output_stream: "filtered_detections"
|
|
options: {
|
|
[mediapipe.NonMaxSuppressionCalculatorOptions.ext] {
|
|
min_suppression_threshold: 0.3
|
|
overlap_type: INTERSECTION_OVER_UNION
|
|
algorithm: WEIGHTED
|
|
}
|
|
}
|
|
}
|
|
|
|
# Projects the detections from input tensor to the corresponding locations on
|
|
# the original image (input to the graph).
|
|
node {
|
|
calculator: "DetectionProjectionCalculator"
|
|
input_stream: "DETECTIONS:filtered_detections"
|
|
input_stream: "PROJECTION_MATRIX:transform_matrix"
|
|
output_stream: "DETECTIONS:detections"
|
|
}
|