160 lines
5.5 KiB
Plaintext
160 lines
5.5 KiB
Plaintext
|
# MediaPipe graph that performs iris distance computation on desktop with
|
||
|
# TensorFlow Lite on CPU.
|
||
|
# Used in the example in
|
||
|
# mediapipie/examples/desktop/iris_tracking:iris_depth_from_image_desktop.
|
||
|
|
||
|
# Raw image bytes. (std::string)
|
||
|
input_stream: "input_image_bytes"
|
||
|
|
||
|
# Image with all the detections rendered. (ImageFrame)
|
||
|
output_stream: "output_image"
|
||
|
# Estimated depth in mm from the camera to the left iris of the face (if any) in
|
||
|
# the image. (float)
|
||
|
output_stream: "left_iris_depth_mm"
|
||
|
# Estimated depth in mm from the camera to the right iris of the face (if any)
|
||
|
# in the image. (float)
|
||
|
output_stream: "right_iris_depth_mm"
|
||
|
|
||
|
# Computes the focal length in pixels based on EXIF information stored in the
|
||
|
# image file. The output is an ImageFileProperties object containing relevant
|
||
|
# image EXIF information along with focal length in pixels.
|
||
|
node {
|
||
|
calculator: "ImageFilePropertiesCalculator"
|
||
|
input_stream: "input_image_bytes"
|
||
|
output_side_packet: "image_file_properties"
|
||
|
}
|
||
|
|
||
|
# Converts a raw string with encoded image bytes into an ImageFrame object
|
||
|
# via OpenCV so that it can be processed by downstream calculators.
|
||
|
node {
|
||
|
calculator: "OpenCvEncodedImageToImageFrameCalculator"
|
||
|
input_stream: "input_image_bytes"
|
||
|
output_stream: "input_image"
|
||
|
}
|
||
|
|
||
|
# Defines how many faces to detect. Iris tracking currently only handles one
|
||
|
# face (left and right eye), and therefore this should always be set to 1.
|
||
|
node {
|
||
|
calculator: "ConstantSidePacketCalculator"
|
||
|
output_side_packet: "PACKET:0:num_faces"
|
||
|
node_options: {
|
||
|
[type.googleapis.com/mediapipe.ConstantSidePacketCalculatorOptions]: {
|
||
|
packet { int_value: 1 }
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
|
||
|
# Detects faces and corresponding landmarks.
|
||
|
node {
|
||
|
calculator: "FaceLandmarkFrontCpu"
|
||
|
input_stream: "IMAGE:input_image"
|
||
|
input_side_packet: "NUM_FACES:num_faces"
|
||
|
output_stream: "LANDMARKS:multi_face_landmarks"
|
||
|
output_stream: "ROIS_FROM_LANDMARKS:face_rects_from_landmarks"
|
||
|
output_stream: "DETECTIONS:face_detections"
|
||
|
output_stream: "ROIS_FROM_DETECTIONS:face_rects_from_detections"
|
||
|
}
|
||
|
|
||
|
# Gets the very first and only face from "multi_face_landmarks" vector.
|
||
|
node {
|
||
|
calculator: "SplitNormalizedLandmarkListVectorCalculator"
|
||
|
input_stream: "multi_face_landmarks"
|
||
|
output_stream: "face_landmarks"
|
||
|
node_options: {
|
||
|
[type.googleapis.com/mediapipe.SplitVectorCalculatorOptions] {
|
||
|
ranges: { begin: 0 end: 1 }
|
||
|
element_only: true
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
|
||
|
# Gets the very first and only face rect from "face_rects_from_landmarks"
|
||
|
# vector.
|
||
|
node {
|
||
|
calculator: "SplitNormalizedRectVectorCalculator"
|
||
|
input_stream: "face_rects_from_landmarks"
|
||
|
output_stream: "face_rect"
|
||
|
node_options: {
|
||
|
[type.googleapis.com/mediapipe.SplitVectorCalculatorOptions] {
|
||
|
ranges: { begin: 0 end: 1 }
|
||
|
element_only: true
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
|
||
|
# Gets two landmarks which define left eye boundary.
|
||
|
node {
|
||
|
calculator: "SplitNormalizedLandmarkListCalculator"
|
||
|
input_stream: "face_landmarks"
|
||
|
output_stream: "left_eye_boundary_landmarks"
|
||
|
node_options: {
|
||
|
[type.googleapis.com/mediapipe.SplitVectorCalculatorOptions] {
|
||
|
ranges: { begin: 33 end: 34 }
|
||
|
ranges: { begin: 133 end: 134 }
|
||
|
combine_outputs: true
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
|
||
|
# Gets two landmarks which define right eye boundary.
|
||
|
node {
|
||
|
calculator: "SplitNormalizedLandmarkListCalculator"
|
||
|
input_stream: "face_landmarks"
|
||
|
output_stream: "right_eye_boundary_landmarks"
|
||
|
node_options: {
|
||
|
[type.googleapis.com/mediapipe.SplitVectorCalculatorOptions] {
|
||
|
ranges: { begin: 362 end: 363 }
|
||
|
ranges: { begin: 263 end: 264 }
|
||
|
combine_outputs: true
|
||
|
}
|
||
|
}
|
||
|
}
|
||
|
|
||
|
# Detects iris landmarks, eye contour landmarks, and corresponding rect (ROI).
|
||
|
node {
|
||
|
calculator: "IrisLandmarkLeftAndRightCpu"
|
||
|
input_stream: "IMAGE:input_image"
|
||
|
input_stream: "LEFT_EYE_BOUNDARY_LANDMARKS:left_eye_boundary_landmarks"
|
||
|
input_stream: "RIGHT_EYE_BOUNDARY_LANDMARKS:right_eye_boundary_landmarks"
|
||
|
output_stream: "LEFT_EYE_CONTOUR_LANDMARKS:left_eye_contour_landmarks"
|
||
|
output_stream: "LEFT_EYE_IRIS_LANDMARKS:left_iris_landmarks"
|
||
|
output_stream: "LEFT_EYE_ROI:left_eye_rect_from_landmarks"
|
||
|
output_stream: "RIGHT_EYE_CONTOUR_LANDMARKS:right_eye_contour_landmarks"
|
||
|
output_stream: "RIGHT_EYE_IRIS_LANDMARKS:right_iris_landmarks"
|
||
|
output_stream: "RIGHT_EYE_ROI:right_eye_rect_from_landmarks"
|
||
|
}
|
||
|
|
||
|
node {
|
||
|
calculator: "ConcatenateNormalizedLandmarkListCalculator"
|
||
|
input_stream: "left_eye_contour_landmarks"
|
||
|
input_stream: "right_eye_contour_landmarks"
|
||
|
output_stream: "refined_eye_landmarks"
|
||
|
}
|
||
|
|
||
|
node {
|
||
|
calculator: "UpdateFaceLandmarksCalculator"
|
||
|
input_stream: "NEW_EYE_LANDMARKS:refined_eye_landmarks"
|
||
|
input_stream: "FACE_LANDMARKS:face_landmarks"
|
||
|
output_stream: "UPDATED_FACE_LANDMARKS:updated_face_landmarks"
|
||
|
}
|
||
|
|
||
|
# Renders annotations and overlays them on top of the input images.
|
||
|
node {
|
||
|
calculator: "IrisAndDepthRendererCpu"
|
||
|
input_stream: "IMAGE:input_image"
|
||
|
input_stream: "FACE_LANDMARKS:updated_face_landmarks"
|
||
|
input_stream: "EYE_LANDMARKS_LEFT:left_eye_contour_landmarks"
|
||
|
input_stream: "EYE_LANDMARKS_RIGHT:right_eye_contour_landmarks"
|
||
|
input_stream: "IRIS_LANDMARKS_LEFT:left_iris_landmarks"
|
||
|
input_stream: "IRIS_LANDMARKS_RIGHT:right_iris_landmarks"
|
||
|
input_stream: "NORM_RECT:face_rect"
|
||
|
input_stream: "LEFT_EYE_RECT:left_eye_rect_from_landmarks"
|
||
|
input_stream: "RIGHT_EYE_RECT:right_eye_rect_from_landmarks"
|
||
|
input_stream: "DETECTIONS:face_detections"
|
||
|
input_side_packet: "IMAGE_FILE_PROPERTIES:image_file_properties"
|
||
|
output_stream: "IRIS_LANDMARKS:iris_landmarks"
|
||
|
output_stream: "IMAGE:output_image"
|
||
|
output_stream: "LEFT_IRIS_DEPTH_MM:left_iris_depth_mm"
|
||
|
output_stream: "RIGHT_IRIS_DEPTH_MM:right_iris_depth_mm"
|
||
|
}
|