# MediaPipe graph to detect/predict face landmarks on GPU. type: "FaceLandmarkFrontGpuImage" # Input image. (Image) input_stream: "IMAGE:image" # Max number of faces to detect/track. (int) input_side_packet: "NUM_FACES:num_faces" # Whether landmarks on the previous image should be used to help localize # landmarks on the current image. (bool) input_side_packet: "USE_PREV_LANDMARKS:use_prev_landmarks" # Whether to run face mesh model with attention on lips and eyes. (bool) # Attention provides more accuracy on lips and eye regions as well as iris # landmarks. input_side_packet: "WITH_ATTENTION:with_attention" # The throttled input image. (Image) output_stream: "IMAGE:throttled_image" # Collection of detected/predicted faces, each represented as a list of 468 face # landmarks. (std::vector) # NOTE: there will not be an output packet in the LANDMARKS stream for this # particular timestamp if none of faces detected. However, the MediaPipe # framework will internally inform the downstream calculators of the absence of # this packet so that they don't wait for it unnecessarily. output_stream: "LANDMARKS:multi_face_landmarks" # Extra outputs (for debugging, for instance). # Detected faces. (std::vector) output_stream: "DETECTIONS:face_detections" # Regions of interest calculated based on landmarks. # (std::vector) output_stream: "ROIS_FROM_LANDMARKS:face_rects_from_landmarks" # Regions of interest calculated based on face detections. # (std::vector) output_stream: "ROIS_FROM_DETECTIONS:face_rects_from_detections" node { calculator: "FlowLimiterCalculator" input_stream: "image" input_stream: "FINISHED:multi_face_landmarks" input_stream_info: { tag_index: "FINISHED" back_edge: true } output_stream: "throttled_image" options: { [mediapipe.FlowLimiterCalculatorOptions.ext] { max_in_flight: 1 max_in_queue: 1 } } } # Converts Image to GpuBuffer for FaceLandmarkFrontGpu to consume. node { calculator: "FromImageCalculator" input_stream: "IMAGE:throttled_image" output_stream: "IMAGE_GPU:raw_gpu_buffer" output_stream: "SOURCE_ON_GPU:is_gpu_image" } # TODO: Remove the extra flipping once adopting MlImage. # If the source images are on gpu, flip the data vertically before sending them # into FaceLandmarkFrontGpu. This maybe needed because OpenGL represents images # assuming the image origin is at the bottom-left corner, whereas MediaPipe in # general assumes the image origin is at the top-left corner. node: { calculator: "ImageTransformationCalculator" input_stream: "IMAGE_GPU:raw_gpu_buffer" input_stream: "FLIP_VERTICALLY:is_gpu_image" output_stream: "IMAGE_GPU:gpu_buffer" } node { calculator: "FaceLandmarkFrontGpu" input_stream: "IMAGE:gpu_buffer" input_side_packet: "NUM_FACES:num_faces" input_side_packet: "USE_PREV_LANDMARKS:use_prev_landmarks" input_side_packet: "WITH_ATTENTION:with_attention" output_stream: "LANDMARKS:multi_face_landmarks" output_stream: "DETECTIONS:face_detections" output_stream: "ROIS_FROM_LANDMARKS:face_rects_from_landmarks" output_stream: "ROIS_FROM_DETECTIONS:face_rects_from_detections" }