Merge pull request #5028 from kinaryml:python-holistic-landmarker

PiperOrigin-RevId: 594995636
This commit is contained in:
Copybara-Service 2024-01-01 18:49:45 -08:00
commit e23fa531e1
8 changed files with 1199 additions and 22 deletions

View File

@ -97,6 +97,7 @@ cc_library(
"//mediapipe/tasks/cc/vision/face_landmarker:face_landmarker_graph", "//mediapipe/tasks/cc/vision/face_landmarker:face_landmarker_graph",
"//mediapipe/tasks/cc/vision/face_stylizer:face_stylizer_graph", "//mediapipe/tasks/cc/vision/face_stylizer:face_stylizer_graph",
"//mediapipe/tasks/cc/vision/gesture_recognizer:gesture_recognizer_graph", "//mediapipe/tasks/cc/vision/gesture_recognizer:gesture_recognizer_graph",
"//mediapipe/tasks/cc/vision/holistic_landmarker:holistic_landmarker_graph",
"//mediapipe/tasks/cc/vision/image_classifier:image_classifier_graph", "//mediapipe/tasks/cc/vision/image_classifier:image_classifier_graph",
"//mediapipe/tasks/cc/vision/image_embedder:image_embedder_graph", "//mediapipe/tasks/cc/vision/image_embedder:image_embedder_graph",
"//mediapipe/tasks/cc/vision/image_segmenter:image_segmenter_graph", "//mediapipe/tasks/cc/vision/image_segmenter:image_segmenter_graph",

View File

@ -49,5 +49,6 @@ py_library(
"//mediapipe/calculators/core:flow_limiter_calculator_py_pb2", "//mediapipe/calculators/core:flow_limiter_calculator_py_pb2",
"//mediapipe/framework:calculator_options_py_pb2", "//mediapipe/framework:calculator_options_py_pb2",
"//mediapipe/framework:calculator_py_pb2", "//mediapipe/framework:calculator_py_pb2",
"@com_google_protobuf//:protobuf_python",
], ],
) )

View File

@ -14,9 +14,8 @@
"""MediaPipe Tasks' task info data class.""" """MediaPipe Tasks' task info data class."""
import dataclasses import dataclasses
from typing import Any, List from typing import Any, List
from google.protobuf import any_pb2
from mediapipe.calculators.core import flow_limiter_calculator_pb2 from mediapipe.calculators.core import flow_limiter_calculator_pb2
from mediapipe.framework import calculator_options_pb2 from mediapipe.framework import calculator_options_pb2
from mediapipe.framework import calculator_pb2 from mediapipe.framework import calculator_pb2
@ -80,21 +79,34 @@ class TaskInfo:
raise ValueError( raise ValueError(
'`task_options` doesn`t provide `to_pb2()` method to convert itself to be a protobuf object.' '`task_options` doesn`t provide `to_pb2()` method to convert itself to be a protobuf object.'
) )
task_subgraph_options = calculator_options_pb2.CalculatorOptions()
task_options_proto = self.task_options.to_pb2() task_options_proto = self.task_options.to_pb2()
task_subgraph_options.Extensions[task_options_proto.ext].CopyFrom(
task_options_proto) node_config = calculator_pb2.CalculatorGraphConfig.Node(
calculator=self.task_graph,
input_stream=self.input_streams,
output_stream=self.output_streams,
)
if hasattr(task_options_proto, 'ext'):
# Use the extension mechanism for task_subgraph_options (proto2)
task_subgraph_options = calculator_options_pb2.CalculatorOptions()
task_subgraph_options.Extensions[task_options_proto.ext].CopyFrom(
task_options_proto
)
node_config.options.CopyFrom(task_subgraph_options)
else:
# Use the Any type for task_subgraph_options (proto3)
task_subgraph_options = any_pb2.Any()
task_subgraph_options.Pack(self.task_options.to_pb2())
node_config.node_options.append(task_subgraph_options)
if not enable_flow_limiting: if not enable_flow_limiting:
return calculator_pb2.CalculatorGraphConfig( return calculator_pb2.CalculatorGraphConfig(
node=[ node=[node_config],
calculator_pb2.CalculatorGraphConfig.Node(
calculator=self.task_graph,
input_stream=self.input_streams,
output_stream=self.output_streams,
options=task_subgraph_options)
],
input_stream=self.input_streams, input_stream=self.input_streams,
output_stream=self.output_streams) output_stream=self.output_streams,
)
# When a FlowLimiterCalculator is inserted to lower the overall graph # When a FlowLimiterCalculator is inserted to lower the overall graph
# latency, the task doesn't guarantee that each input must have the # latency, the task doesn't guarantee that each input must have the
# corresponding output. # corresponding output.
@ -120,13 +132,8 @@ class TaskInfo:
], ],
options=flow_limiter_options) options=flow_limiter_options)
config = calculator_pb2.CalculatorGraphConfig( config = calculator_pb2.CalculatorGraphConfig(
node=[ node=[node_config, flow_limiter],
calculator_pb2.CalculatorGraphConfig.Node(
calculator=self.task_graph,
input_stream=task_subgraph_inputs,
output_stream=self.output_streams,
options=task_subgraph_options), flow_limiter
],
input_stream=self.input_streams, input_stream=self.input_streams,
output_stream=self.output_streams) output_stream=self.output_streams,
)
return config return config

View File

@ -194,6 +194,27 @@ py_test(
], ],
) )
py_test(
name = "holistic_landmarker_test",
srcs = ["holistic_landmarker_test.py"],
data = [
"//mediapipe/tasks/testdata/vision:test_images",
"//mediapipe/tasks/testdata/vision:test_models",
"//mediapipe/tasks/testdata/vision:test_protos",
],
tags = ["not_run:arm"],
deps = [
"//mediapipe/python:_framework_bindings",
"//mediapipe/tasks/cc/vision/holistic_landmarker/proto:holistic_result_py_pb2",
"//mediapipe/tasks/python/core:base_options",
"//mediapipe/tasks/python/test:test_utils",
"//mediapipe/tasks/python/vision:holistic_landmarker",
"//mediapipe/tasks/python/vision/core:image_processing_options",
"//mediapipe/tasks/python/vision/core:vision_task_running_mode",
"@com_google_protobuf//:protobuf_python",
],
)
py_test( py_test(
name = "face_aligner_test", name = "face_aligner_test",
srcs = ["face_aligner_test.py"], srcs = ["face_aligner_test.py"],

View File

@ -0,0 +1,544 @@
# Copyright 2023 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for holistic landmarker."""
import enum
from unittest import mock
from absl.testing import absltest
from absl.testing import parameterized
import numpy as np
from google.protobuf import text_format
from mediapipe.python._framework_bindings import image as image_module
from mediapipe.tasks.cc.vision.holistic_landmarker.proto import holistic_result_pb2
from mediapipe.tasks.python.core import base_options as base_options_module
from mediapipe.tasks.python.test import test_utils
from mediapipe.tasks.python.vision import holistic_landmarker
from mediapipe.tasks.python.vision.core import image_processing_options as image_processing_options_module
from mediapipe.tasks.python.vision.core import vision_task_running_mode as running_mode_module
HolisticLandmarkerResult = holistic_landmarker.HolisticLandmarkerResult
_HolisticResultProto = holistic_result_pb2.HolisticResult
_BaseOptions = base_options_module.BaseOptions
_Image = image_module.Image
_HolisticLandmarker = holistic_landmarker.HolisticLandmarker
_HolisticLandmarkerOptions = holistic_landmarker.HolisticLandmarkerOptions
_RUNNING_MODE = running_mode_module.VisionTaskRunningMode
_ImageProcessingOptions = image_processing_options_module.ImageProcessingOptions
_HOLISTIC_LANDMARKER_BUNDLE_ASSET_FILE = 'holistic_landmarker.task'
_POSE_IMAGE = 'male_full_height_hands.jpg'
_CAT_IMAGE = 'cat.jpg'
_EXPECTED_HOLISTIC_RESULT = 'male_full_height_hands_result_cpu.pbtxt'
_IMAGE_WIDTH = 638
_IMAGE_HEIGHT = 1000
_LANDMARKS_MARGIN = 0.03
_BLENDSHAPES_MARGIN = 0.13
_VIDEO_LANDMARKS_MARGIN = 0.03
_VIDEO_BLENDSHAPES_MARGIN = 0.31
_LIVE_STREAM_LANDMARKS_MARGIN = 0.03
_LIVE_STREAM_BLENDSHAPES_MARGIN = 0.31
def _get_expected_holistic_landmarker_result(
file_path: str,
) -> HolisticLandmarkerResult:
holistic_result_file_path = test_utils.get_test_data_path(file_path)
with open(holistic_result_file_path, 'rb') as f:
holistic_result_proto = _HolisticResultProto()
# Use this if a .pb file is available.
# holistic_result_proto.ParseFromString(f.read())
text_format.Parse(f.read(), holistic_result_proto)
holistic_landmarker_result = HolisticLandmarkerResult.create_from_pb2(
holistic_result_proto
)
return holistic_landmarker_result
class ModelFileType(enum.Enum):
FILE_CONTENT = 1
FILE_NAME = 2
class HolisticLandmarkerTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.test_image = _Image.create_from_file(
test_utils.get_test_data_path(_POSE_IMAGE)
)
self.model_path = test_utils.get_test_data_path(
_HOLISTIC_LANDMARKER_BUNDLE_ASSET_FILE
)
def _expect_landmarks_correct(
self, actual_landmarks, expected_landmarks, margin
):
# Expects to have the same number of landmarks detected.
self.assertLen(actual_landmarks, len(expected_landmarks))
for i, elem in enumerate(actual_landmarks):
self.assertAlmostEqual(elem.x, expected_landmarks[i].x, delta=margin)
self.assertAlmostEqual(elem.y, expected_landmarks[i].y, delta=margin)
def _expect_blendshapes_correct(
self, actual_blendshapes, expected_blendshapes, margin
):
# Expects to have the same number of blendshapes.
self.assertLen(actual_blendshapes, len(expected_blendshapes))
for i, elem in enumerate(actual_blendshapes):
self.assertEqual(elem.index, expected_blendshapes[i].index)
self.assertEqual(
elem.category_name, expected_blendshapes[i].category_name
)
self.assertAlmostEqual(
elem.score,
expected_blendshapes[i].score,
delta=margin,
)
def _expect_holistic_landmarker_results_correct(
self,
actual_result: HolisticLandmarkerResult,
expected_result: HolisticLandmarkerResult,
output_segmentation_mask: bool,
landmarks_margin: float,
blendshapes_margin: float,
):
self._expect_landmarks_correct(
actual_result.pose_landmarks,
expected_result.pose_landmarks,
landmarks_margin,
)
self._expect_landmarks_correct(
actual_result.face_landmarks,
expected_result.face_landmarks,
landmarks_margin,
)
self._expect_blendshapes_correct(
actual_result.face_blendshapes,
expected_result.face_blendshapes,
blendshapes_margin,
)
if output_segmentation_mask:
self.assertIsInstance(actual_result.segmentation_mask, _Image)
self.assertEqual(actual_result.segmentation_mask.width, _IMAGE_WIDTH)
self.assertEqual(actual_result.segmentation_mask.height, _IMAGE_HEIGHT)
else:
self.assertIsNone(actual_result.segmentation_mask)
def test_create_from_file_succeeds_with_valid_model_path(self):
# Creates with default option and valid model file successfully.
with _HolisticLandmarker.create_from_model_path(
self.model_path
) as landmarker:
self.assertIsInstance(landmarker, _HolisticLandmarker)
def test_create_from_options_succeeds_with_valid_model_path(self):
# Creates with options containing model file successfully.
base_options = _BaseOptions(model_asset_path=self.model_path)
options = _HolisticLandmarkerOptions(base_options=base_options)
with _HolisticLandmarker.create_from_options(options) as landmarker:
self.assertIsInstance(landmarker, _HolisticLandmarker)
def test_create_from_options_fails_with_invalid_model_path(self):
# Invalid empty model path.
with self.assertRaisesRegex(
RuntimeError, 'Unable to open file at /path/to/invalid/model.tflite'
):
base_options = _BaseOptions(
model_asset_path='/path/to/invalid/model.tflite'
)
options = _HolisticLandmarkerOptions(base_options=base_options)
_HolisticLandmarker.create_from_options(options)
def test_create_from_options_succeeds_with_valid_model_content(self):
# Creates with options containing model content successfully.
with open(self.model_path, 'rb') as f:
base_options = _BaseOptions(model_asset_buffer=f.read())
options = _HolisticLandmarkerOptions(base_options=base_options)
landmarker = _HolisticLandmarker.create_from_options(options)
self.assertIsInstance(landmarker, _HolisticLandmarker)
@parameterized.parameters(
(
ModelFileType.FILE_NAME,
_HOLISTIC_LANDMARKER_BUNDLE_ASSET_FILE,
False,
_get_expected_holistic_landmarker_result(_EXPECTED_HOLISTIC_RESULT),
),
(
ModelFileType.FILE_CONTENT,
_HOLISTIC_LANDMARKER_BUNDLE_ASSET_FILE,
False,
_get_expected_holistic_landmarker_result(_EXPECTED_HOLISTIC_RESULT),
),
(
ModelFileType.FILE_NAME,
_HOLISTIC_LANDMARKER_BUNDLE_ASSET_FILE,
True,
_get_expected_holistic_landmarker_result(_EXPECTED_HOLISTIC_RESULT),
),
(
ModelFileType.FILE_CONTENT,
_HOLISTIC_LANDMARKER_BUNDLE_ASSET_FILE,
True,
_get_expected_holistic_landmarker_result(_EXPECTED_HOLISTIC_RESULT),
),
)
def test_detect(
self,
model_file_type,
model_name,
output_segmentation_mask,
expected_holistic_landmarker_result,
):
# Creates holistic landmarker.
model_path = test_utils.get_test_data_path(model_name)
if model_file_type is ModelFileType.FILE_NAME:
base_options = _BaseOptions(model_asset_path=model_path)
elif model_file_type is ModelFileType.FILE_CONTENT:
with open(model_path, 'rb') as f:
model_content = f.read()
base_options = _BaseOptions(model_asset_buffer=model_content)
else:
# Should never happen
raise ValueError('model_file_type is invalid.')
options = _HolisticLandmarkerOptions(
base_options=base_options,
output_face_blendshapes=True
if expected_holistic_landmarker_result.face_blendshapes
else False,
output_segmentation_mask=output_segmentation_mask,
)
landmarker = _HolisticLandmarker.create_from_options(options)
# Performs holistic landmarks detection on the input.
detection_result = landmarker.detect(self.test_image)
self._expect_holistic_landmarker_results_correct(
detection_result,
expected_holistic_landmarker_result,
output_segmentation_mask,
_LANDMARKS_MARGIN,
_BLENDSHAPES_MARGIN,
)
# Closes the holistic landmarker explicitly when the holistic landmarker is
# not used in a context.
landmarker.close()
@parameterized.parameters(
(
ModelFileType.FILE_NAME,
_HOLISTIC_LANDMARKER_BUNDLE_ASSET_FILE,
False,
_get_expected_holistic_landmarker_result(_EXPECTED_HOLISTIC_RESULT),
),
(
ModelFileType.FILE_CONTENT,
_HOLISTIC_LANDMARKER_BUNDLE_ASSET_FILE,
True,
_get_expected_holistic_landmarker_result(_EXPECTED_HOLISTIC_RESULT),
),
)
def test_detect_in_context(
self,
model_file_type,
model_name,
output_segmentation_mask,
expected_holistic_landmarker_result,
):
# Creates holistic landmarker.
model_path = test_utils.get_test_data_path(model_name)
if model_file_type is ModelFileType.FILE_NAME:
base_options = _BaseOptions(model_asset_path=model_path)
elif model_file_type is ModelFileType.FILE_CONTENT:
with open(model_path, 'rb') as f:
model_content = f.read()
base_options = _BaseOptions(model_asset_buffer=model_content)
else:
# Should never happen
raise ValueError('model_file_type is invalid.')
options = _HolisticLandmarkerOptions(
base_options=base_options,
output_face_blendshapes=True
if expected_holistic_landmarker_result.face_blendshapes
else False,
output_segmentation_mask=output_segmentation_mask,
)
with _HolisticLandmarker.create_from_options(options) as landmarker:
# Performs holistic landmarks detection on the input.
detection_result = landmarker.detect(self.test_image)
self._expect_holistic_landmarker_results_correct(
detection_result,
expected_holistic_landmarker_result,
output_segmentation_mask,
_LANDMARKS_MARGIN,
_BLENDSHAPES_MARGIN,
)
def test_empty_detection_outputs(self):
options = _HolisticLandmarkerOptions(
base_options=_BaseOptions(model_asset_path=self.model_path)
)
with _HolisticLandmarker.create_from_options(options) as landmarker:
# Load the cat image.
cat_test_image = _Image.create_from_file(
test_utils.get_test_data_path(_CAT_IMAGE)
)
# Performs holistic landmarks detection on the input.
detection_result = landmarker.detect(cat_test_image)
self.assertEmpty(detection_result.face_landmarks)
self.assertEmpty(detection_result.pose_landmarks)
self.assertEmpty(detection_result.pose_world_landmarks)
self.assertEmpty(detection_result.left_hand_landmarks)
self.assertEmpty(detection_result.left_hand_world_landmarks)
self.assertEmpty(detection_result.right_hand_landmarks)
self.assertEmpty(detection_result.right_hand_world_landmarks)
self.assertIsNone(detection_result.face_blendshapes)
self.assertIsNone(detection_result.segmentation_mask)
def test_missing_result_callback(self):
options = _HolisticLandmarkerOptions(
base_options=_BaseOptions(model_asset_path=self.model_path),
running_mode=_RUNNING_MODE.LIVE_STREAM,
)
with self.assertRaisesRegex(
ValueError, r'result callback must be provided'
):
with _HolisticLandmarker.create_from_options(
options
) as unused_landmarker:
pass
@parameterized.parameters((_RUNNING_MODE.IMAGE), (_RUNNING_MODE.VIDEO))
def test_illegal_result_callback(self, running_mode):
options = _HolisticLandmarkerOptions(
base_options=_BaseOptions(model_asset_path=self.model_path),
running_mode=running_mode,
result_callback=mock.MagicMock(),
)
with self.assertRaisesRegex(
ValueError, r'result callback should not be provided'
):
with _HolisticLandmarker.create_from_options(
options
) as unused_landmarker:
pass
def test_calling_detect_for_video_in_image_mode(self):
options = _HolisticLandmarkerOptions(
base_options=_BaseOptions(model_asset_path=self.model_path),
running_mode=_RUNNING_MODE.IMAGE,
)
with _HolisticLandmarker.create_from_options(options) as landmarker:
with self.assertRaisesRegex(
ValueError, r'not initialized with the video mode'
):
landmarker.detect_for_video(self.test_image, 0)
def test_calling_detect_async_in_image_mode(self):
options = _HolisticLandmarkerOptions(
base_options=_BaseOptions(model_asset_path=self.model_path),
running_mode=_RUNNING_MODE.IMAGE,
)
with _HolisticLandmarker.create_from_options(options) as landmarker:
with self.assertRaisesRegex(
ValueError, r'not initialized with the live stream mode'
):
landmarker.detect_async(self.test_image, 0)
def test_calling_detect_in_video_mode(self):
options = _HolisticLandmarkerOptions(
base_options=_BaseOptions(model_asset_path=self.model_path),
running_mode=_RUNNING_MODE.VIDEO,
)
with _HolisticLandmarker.create_from_options(options) as landmarker:
with self.assertRaisesRegex(
ValueError, r'not initialized with the image mode'
):
landmarker.detect(self.test_image)
def test_calling_detect_async_in_video_mode(self):
options = _HolisticLandmarkerOptions(
base_options=_BaseOptions(model_asset_path=self.model_path),
running_mode=_RUNNING_MODE.VIDEO,
)
with _HolisticLandmarker.create_from_options(options) as landmarker:
with self.assertRaisesRegex(
ValueError, r'not initialized with the live stream mode'
):
landmarker.detect_async(self.test_image, 0)
def test_detect_for_video_with_out_of_order_timestamp(self):
options = _HolisticLandmarkerOptions(
base_options=_BaseOptions(model_asset_path=self.model_path),
running_mode=_RUNNING_MODE.VIDEO,
)
with _HolisticLandmarker.create_from_options(options) as landmarker:
unused_result = landmarker.detect_for_video(self.test_image, 1)
with self.assertRaisesRegex(
ValueError, r'Input timestamp must be monotonically increasing'
):
landmarker.detect_for_video(self.test_image, 0)
@parameterized.parameters(
(
_HOLISTIC_LANDMARKER_BUNDLE_ASSET_FILE,
False,
_get_expected_holistic_landmarker_result(_EXPECTED_HOLISTIC_RESULT),
),
(
_HOLISTIC_LANDMARKER_BUNDLE_ASSET_FILE,
True,
_get_expected_holistic_landmarker_result(_EXPECTED_HOLISTIC_RESULT),
),
)
def test_detect_for_video(
self,
model_name,
output_segmentation_mask,
expected_holistic_landmarker_result,
):
# Creates holistic landmarker.
model_path = test_utils.get_test_data_path(model_name)
base_options = _BaseOptions(model_asset_path=model_path)
options = _HolisticLandmarkerOptions(
base_options=base_options,
running_mode=_RUNNING_MODE.VIDEO,
output_face_blendshapes=True
if expected_holistic_landmarker_result.face_blendshapes
else False,
output_segmentation_mask=output_segmentation_mask,
)
with _HolisticLandmarker.create_from_options(options) as landmarker:
for timestamp in range(0, 300, 30):
# Performs holistic landmarks detection on the input.
detection_result = landmarker.detect_for_video(
self.test_image, timestamp
)
# Comparing results.
self._expect_holistic_landmarker_results_correct(
detection_result,
expected_holistic_landmarker_result,
output_segmentation_mask,
_VIDEO_LANDMARKS_MARGIN,
_VIDEO_BLENDSHAPES_MARGIN,
)
def test_calling_detect_in_live_stream_mode(self):
options = _HolisticLandmarkerOptions(
base_options=_BaseOptions(model_asset_path=self.model_path),
running_mode=_RUNNING_MODE.LIVE_STREAM,
result_callback=mock.MagicMock(),
)
with _HolisticLandmarker.create_from_options(options) as landmarker:
with self.assertRaisesRegex(
ValueError, r'not initialized with the image mode'
):
landmarker.detect(self.test_image)
def test_calling_detect_for_video_in_live_stream_mode(self):
options = _HolisticLandmarkerOptions(
base_options=_BaseOptions(model_asset_path=self.model_path),
running_mode=_RUNNING_MODE.LIVE_STREAM,
result_callback=mock.MagicMock(),
)
with _HolisticLandmarker.create_from_options(options) as landmarker:
with self.assertRaisesRegex(
ValueError, r'not initialized with the video mode'
):
landmarker.detect_for_video(self.test_image, 0)
def test_detect_async_calls_with_illegal_timestamp(self):
options = _HolisticLandmarkerOptions(
base_options=_BaseOptions(model_asset_path=self.model_path),
running_mode=_RUNNING_MODE.LIVE_STREAM,
result_callback=mock.MagicMock(),
)
with _HolisticLandmarker.create_from_options(options) as landmarker:
landmarker.detect_async(self.test_image, 100)
with self.assertRaisesRegex(
ValueError, r'Input timestamp must be monotonically increasing'
):
landmarker.detect_async(self.test_image, 0)
@parameterized.parameters(
(
_POSE_IMAGE,
_HOLISTIC_LANDMARKER_BUNDLE_ASSET_FILE,
False,
_get_expected_holistic_landmarker_result(_EXPECTED_HOLISTIC_RESULT),
),
(
_POSE_IMAGE,
_HOLISTIC_LANDMARKER_BUNDLE_ASSET_FILE,
True,
_get_expected_holistic_landmarker_result(_EXPECTED_HOLISTIC_RESULT),
),
)
def test_detect_async_calls(
self,
image_path,
model_name,
output_segmentation_mask,
expected_holistic_landmarker_result,
):
test_image = _Image.create_from_file(
test_utils.get_test_data_path(image_path)
)
observed_timestamp_ms = -1
def check_result(
result: HolisticLandmarkerResult,
output_image: _Image,
timestamp_ms: int,
):
# Comparing results.
self._expect_holistic_landmarker_results_correct(
result,
expected_holistic_landmarker_result,
output_segmentation_mask,
_LIVE_STREAM_LANDMARKS_MARGIN,
_LIVE_STREAM_BLENDSHAPES_MARGIN,
)
self.assertTrue(
np.array_equal(output_image.numpy_view(), test_image.numpy_view())
)
self.assertLess(observed_timestamp_ms, timestamp_ms)
self.observed_timestamp_ms = timestamp_ms
model_path = test_utils.get_test_data_path(model_name)
options = _HolisticLandmarkerOptions(
base_options=_BaseOptions(model_asset_path=model_path),
running_mode=_RUNNING_MODE.LIVE_STREAM,
output_face_blendshapes=True
if expected_holistic_landmarker_result.face_blendshapes
else False,
output_segmentation_mask=output_segmentation_mask,
result_callback=check_result,
)
with _HolisticLandmarker.create_from_options(options) as landmarker:
for timestamp in range(0, 300, 30):
landmarker.detect_async(test_image, timestamp)
if __name__ == '__main__':
absltest.main()

View File

@ -12,7 +12,6 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
# Placeholder: load py_library
# Placeholder for internal Python strict library and test compatibility macro. # Placeholder for internal Python strict library and test compatibility macro.
package(default_visibility = ["//visibility:public"]) package(default_visibility = ["//visibility:public"])
@ -243,6 +242,30 @@ py_library(
], ],
) )
py_library(
name = "holistic_landmarker",
srcs = [
"holistic_landmarker.py",
],
deps = [
"//mediapipe/framework/formats:classification_py_pb2",
"//mediapipe/framework/formats:landmark_py_pb2",
"//mediapipe/python:_framework_bindings",
"//mediapipe/python:packet_creator",
"//mediapipe/python:packet_getter",
"//mediapipe/tasks/cc/vision/holistic_landmarker/proto:holistic_landmarker_graph_options_py_pb2",
"//mediapipe/tasks/cc/vision/holistic_landmarker/proto:holistic_result_py_pb2",
"//mediapipe/tasks/python/components/containers:category",
"//mediapipe/tasks/python/components/containers:landmark",
"//mediapipe/tasks/python/core:base_options",
"//mediapipe/tasks/python/core:optional_dependencies",
"//mediapipe/tasks/python/core:task_info",
"//mediapipe/tasks/python/vision/core:base_vision_task_api",
"//mediapipe/tasks/python/vision/core:image_processing_options",
"//mediapipe/tasks/python/vision/core:vision_task_running_mode",
],
)
py_library( py_library(
name = "face_stylizer", name = "face_stylizer",
srcs = [ srcs = [

View File

@ -0,0 +1,576 @@
# Copyright 2023 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MediaPipe holistic landmarker task."""
import dataclasses
from typing import Callable, List, Mapping, Optional
from mediapipe.framework.formats import classification_pb2
from mediapipe.framework.formats import landmark_pb2
from mediapipe.python import packet_creator
from mediapipe.python import packet_getter
from mediapipe.python._framework_bindings import image as image_module
from mediapipe.python._framework_bindings import packet as packet_module
from mediapipe.tasks.cc.vision.holistic_landmarker.proto import holistic_landmarker_graph_options_pb2
from mediapipe.tasks.cc.vision.holistic_landmarker.proto import holistic_result_pb2
from mediapipe.tasks.python.components.containers import category as category_module
from mediapipe.tasks.python.components.containers import landmark as landmark_module
from mediapipe.tasks.python.core import base_options as base_options_module
from mediapipe.tasks.python.core import task_info as task_info_module
from mediapipe.tasks.python.core.optional_dependencies import doc_controls
from mediapipe.tasks.python.vision.core import base_vision_task_api
from mediapipe.tasks.python.vision.core import image_processing_options as image_processing_options_module
from mediapipe.tasks.python.vision.core import vision_task_running_mode as running_mode_module
_BaseOptions = base_options_module.BaseOptions
_HolisticResultProto = holistic_result_pb2.HolisticResult
_HolisticLandmarkerGraphOptionsProto = (
holistic_landmarker_graph_options_pb2.HolisticLandmarkerGraphOptions
)
_RunningMode = running_mode_module.VisionTaskRunningMode
_ImageProcessingOptions = image_processing_options_module.ImageProcessingOptions
_TaskInfo = task_info_module.TaskInfo
_IMAGE_IN_STREAM_NAME = 'image_in'
_IMAGE_OUT_STREAM_NAME = 'image_out'
_IMAGE_TAG = 'IMAGE'
_POSE_LANDMARKS_STREAM_NAME = 'pose_landmarks'
_POSE_LANDMARKS_TAG_NAME = 'POSE_LANDMARKS'
_POSE_WORLD_LANDMARKS_STREAM_NAME = 'pose_world_landmarks'
_POSE_WORLD_LANDMARKS_TAG = 'POSE_WORLD_LANDMARKS'
_POSE_SEGMENTATION_MASK_STREAM_NAME = 'pose_segmentation_mask'
_POSE_SEGMENTATION_MASK_TAG = 'POSE_SEGMENTATION_MASK'
_FACE_LANDMARKS_STREAM_NAME = 'face_landmarks'
_FACE_LANDMARKS_TAG = 'FACE_LANDMARKS'
_FACE_BLENDSHAPES_STREAM_NAME = 'extra_blendshapes'
_FACE_BLENDSHAPES_TAG = 'FACE_BLENDSHAPES'
_LEFT_HAND_LANDMARKS_STREAM_NAME = 'left_hand_landmarks'
_LEFT_HAND_LANDMARKS_TAG = 'LEFT_HAND_LANDMARKS'
_LEFT_HAND_WORLD_LANDMARKS_STREAM_NAME = 'left_hand_world_landmarks'
_LEFT_HAND_WORLD_LANDMARKS_TAG = 'LEFT_HAND_WORLD_LANDMARKS'
_RIGHT_HAND_LANDMARKS_STREAM_NAME = 'right_hand_landmarks'
_RIGHT_HAND_LANDMARKS_TAG = 'RIGHT_HAND_LANDMARKS'
_RIGHT_HAND_WORLD_LANDMARKS_STREAM_NAME = 'right_hand_world_landmarks'
_RIGHT_HAND_WORLD_LANDMARKS_TAG = 'RIGHT_HAND_WORLD_LANDMARKS'
_TASK_GRAPH_NAME = (
'mediapipe.tasks.vision.holistic_landmarker.HolisticLandmarkerGraph'
)
_MICRO_SECONDS_PER_MILLISECOND = 1000
@dataclasses.dataclass
class HolisticLandmarkerResult:
"""The holistic landmarks result from HolisticLandmarker, where each vector element represents a single holistic detected in the image.
Attributes:
face_landmarks: Detected face landmarks in normalized image coordinates.
pose_landmarks: Detected pose landmarks in normalized image coordinates.
pose_world_landmarks: Detected pose world landmarks in image coordinates.
left_hand_landmarks: Detected left hand landmarks in normalized image
coordinates.
left_hand_world_landmarks: Detected left hand landmarks in image
coordinates.
right_hand_landmarks: Detected right hand landmarks in normalized image
coordinates.
right_hand_world_landmarks: Detected right hand landmarks in image
coordinates.
face_blendshapes: Optional face blendshapes.
segmentation_mask: Optional segmentation mask for pose.
"""
face_landmarks: List[landmark_module.NormalizedLandmark]
pose_landmarks: List[landmark_module.NormalizedLandmark]
pose_world_landmarks: List[landmark_module.Landmark]
left_hand_landmarks: List[landmark_module.NormalizedLandmark]
left_hand_world_landmarks: List[landmark_module.Landmark]
right_hand_landmarks: List[landmark_module.NormalizedLandmark]
right_hand_world_landmarks: List[landmark_module.Landmark]
face_blendshapes: Optional[List[category_module.Category]] = None
segmentation_mask: Optional[image_module.Image] = None
@classmethod
@doc_controls.do_not_generate_docs
def create_from_pb2(
cls, pb2_obj: _HolisticResultProto
) -> 'HolisticLandmarkerResult':
"""Creates a `HolisticLandmarkerResult` object from the given protobuf object."""
face_blendshapes = None
if hasattr(pb2_obj, 'face_blendshapes'):
face_blendshapes = [
category_module.Category(
score=classification.score,
index=classification.index,
category_name=classification.label,
display_name=classification.display_name,
)
for classification in pb2_obj.face_blendshapes.classification
]
return HolisticLandmarkerResult(
face_landmarks=[
landmark_module.NormalizedLandmark.create_from_pb2(landmark)
for landmark in pb2_obj.face_landmarks.landmark
],
pose_landmarks=[
landmark_module.NormalizedLandmark.create_from_pb2(landmark)
for landmark in pb2_obj.pose_landmarks.landmark
],
pose_world_landmarks=[
landmark_module.Landmark.create_from_pb2(landmark)
for landmark in pb2_obj.pose_world_landmarks.landmark
],
left_hand_landmarks=[
landmark_module.NormalizedLandmark.create_from_pb2(landmark)
for landmark in pb2_obj.left_hand_landmarks.landmark
],
left_hand_world_landmarks=[],
right_hand_landmarks=[
landmark_module.NormalizedLandmark.create_from_pb2(landmark)
for landmark in pb2_obj.right_hand_landmarks.landmark
],
right_hand_world_landmarks=[],
face_blendshapes=face_blendshapes,
segmentation_mask=None,
)
def _build_landmarker_result(
output_packets: Mapping[str, packet_module.Packet]
) -> HolisticLandmarkerResult:
"""Constructs a `HolisticLandmarksDetectionResult` from output packets."""
holistic_landmarker_result = HolisticLandmarkerResult(
[], [], [], [], [], [], []
)
face_landmarks_proto_list = packet_getter.get_proto(
output_packets[_FACE_LANDMARKS_STREAM_NAME]
)
pose_landmarks_proto_list = packet_getter.get_proto(
output_packets[_POSE_LANDMARKS_STREAM_NAME]
)
pose_world_landmarks_proto_list = packet_getter.get_proto(
output_packets[_POSE_WORLD_LANDMARKS_STREAM_NAME]
)
left_hand_landmarks_proto_list = packet_getter.get_proto(
output_packets[_LEFT_HAND_LANDMARKS_STREAM_NAME]
)
left_hand_world_landmarks_proto_list = packet_getter.get_proto(
output_packets[_LEFT_HAND_WORLD_LANDMARKS_STREAM_NAME]
)
right_hand_landmarks_proto_list = packet_getter.get_proto(
output_packets[_RIGHT_HAND_LANDMARKS_STREAM_NAME]
)
right_hand_world_landmarks_proto_list = packet_getter.get_proto(
output_packets[_RIGHT_HAND_WORLD_LANDMARKS_STREAM_NAME]
)
face_landmarks = landmark_pb2.NormalizedLandmarkList()
face_landmarks.MergeFrom(face_landmarks_proto_list)
for face_landmark in face_landmarks.landmark:
holistic_landmarker_result.face_landmarks.append(
landmark_module.NormalizedLandmark.create_from_pb2(face_landmark)
)
pose_landmarks = landmark_pb2.NormalizedLandmarkList()
pose_landmarks.MergeFrom(pose_landmarks_proto_list)
for pose_landmark in pose_landmarks.landmark:
holistic_landmarker_result.pose_landmarks.append(
landmark_module.NormalizedLandmark.create_from_pb2(pose_landmark)
)
pose_world_landmarks = landmark_pb2.LandmarkList()
pose_world_landmarks.MergeFrom(pose_world_landmarks_proto_list)
for pose_world_landmark in pose_world_landmarks.landmark:
holistic_landmarker_result.pose_world_landmarks.append(
landmark_module.Landmark.create_from_pb2(pose_world_landmark)
)
left_hand_landmarks = landmark_pb2.NormalizedLandmarkList()
left_hand_landmarks.MergeFrom(left_hand_landmarks_proto_list)
for hand_landmark in left_hand_landmarks.landmark:
holistic_landmarker_result.left_hand_landmarks.append(
landmark_module.NormalizedLandmark.create_from_pb2(hand_landmark)
)
left_hand_world_landmarks = landmark_pb2.LandmarkList()
left_hand_world_landmarks.MergeFrom(left_hand_world_landmarks_proto_list)
for left_hand_world_landmark in left_hand_world_landmarks.landmark:
holistic_landmarker_result.left_hand_world_landmarks.append(
landmark_module.Landmark.create_from_pb2(left_hand_world_landmark)
)
right_hand_landmarks = landmark_pb2.NormalizedLandmarkList()
right_hand_landmarks.MergeFrom(right_hand_landmarks_proto_list)
for hand_landmark in right_hand_landmarks.landmark:
holistic_landmarker_result.right_hand_landmarks.append(
landmark_module.NormalizedLandmark.create_from_pb2(hand_landmark)
)
right_hand_world_landmarks = landmark_pb2.LandmarkList()
right_hand_world_landmarks.MergeFrom(right_hand_world_landmarks_proto_list)
for right_hand_world_landmark in right_hand_world_landmarks.landmark:
holistic_landmarker_result.right_hand_world_landmarks.append(
landmark_module.Landmark.create_from_pb2(right_hand_world_landmark)
)
if _FACE_BLENDSHAPES_STREAM_NAME in output_packets:
face_blendshapes_proto_list = packet_getter.get_proto(
output_packets[_FACE_BLENDSHAPES_STREAM_NAME]
)
face_blendshapes_classifications = classification_pb2.ClassificationList()
face_blendshapes_classifications.MergeFrom(face_blendshapes_proto_list)
holistic_landmarker_result.face_blendshapes = []
for face_blendshapes in face_blendshapes_classifications.classification:
holistic_landmarker_result.face_blendshapes.append(
category_module.Category(
index=face_blendshapes.index,
score=face_blendshapes.score,
display_name=face_blendshapes.display_name,
category_name=face_blendshapes.label,
)
)
if _POSE_SEGMENTATION_MASK_STREAM_NAME in output_packets:
holistic_landmarker_result.segmentation_mask = packet_getter.get_image(
output_packets[_POSE_SEGMENTATION_MASK_STREAM_NAME]
)
return holistic_landmarker_result
@dataclasses.dataclass
class HolisticLandmarkerOptions:
"""Options for the holistic landmarker task.
Attributes:
base_options: Base options for the holistic landmarker task.
running_mode: The running mode of the task. Default to the image mode.
HolisticLandmarker has three running modes: 1) The image mode for
detecting holistic landmarks on single image inputs. 2) The video mode for
detecting holistic landmarks on the decoded frames of a video. 3) The live
stream mode for detecting holistic landmarks on the live stream of input
data, such as from camera. In this mode, the "result_callback" below must
be specified to receive the detection results asynchronously.
min_face_detection_confidence: The minimum confidence score for the face
detection to be considered successful.
min_face_suppression_threshold: The minimum non-maximum-suppression
threshold for face detection to be considered overlapped.
min_face_landmarks_confidence: The minimum confidence score for the face
landmark detection to be considered successful.
min_pose_detection_confidence: The minimum confidence score for the pose
detection to be considered successful.
min_pose_suppression_threshold: The minimum non-maximum-suppression
threshold for pose detection to be considered overlapped.
min_pose_landmarks_confidence: The minimum confidence score for the pose
landmark detection to be considered successful.
min_hand_landmarks_confidence: The minimum confidence score for the hand
landmark detection to be considered successful.
output_face_blendshapes: Whether HolisticLandmarker outputs face blendshapes
classification. Face blendshapes are used for rendering the 3D face model.
output_segmentation_mask: whether to output segmentation masks.
result_callback: The user-defined result callback for processing live stream
data. The result callback should only be specified when the running mode
is set to the live stream mode.
"""
base_options: _BaseOptions
running_mode: _RunningMode = _RunningMode.IMAGE
min_face_detection_confidence: float = 0.5
min_face_suppression_threshold: float = 0.5
min_face_landmarks_confidence: float = 0.5
min_pose_detection_confidence: float = 0.5
min_pose_suppression_threshold: float = 0.5
min_pose_landmarks_confidence: float = 0.5
min_hand_landmarks_confidence: float = 0.5
output_face_blendshapes: bool = False
output_segmentation_mask: bool = False
result_callback: Optional[
Callable[[HolisticLandmarkerResult, image_module.Image, int], None]
] = None
@doc_controls.do_not_generate_docs
def to_pb2(self) -> _HolisticLandmarkerGraphOptionsProto:
"""Generates an HolisticLandmarkerGraphOptions protobuf object."""
base_options_proto = self.base_options.to_pb2()
base_options_proto.use_stream_mode = (
False if self.running_mode == _RunningMode.IMAGE else True
)
# Initialize the holistic landmarker options from base options.
holistic_landmarker_options_proto = _HolisticLandmarkerGraphOptionsProto(
base_options=base_options_proto
)
# Configure face detector and face landmarks detector options.
holistic_landmarker_options_proto.face_detector_graph_options.min_detection_confidence = (
self.min_face_detection_confidence
)
holistic_landmarker_options_proto.face_detector_graph_options.min_suppression_threshold = (
self.min_face_suppression_threshold
)
holistic_landmarker_options_proto.face_landmarks_detector_graph_options.min_detection_confidence = (
self.min_face_landmarks_confidence
)
# Configure pose detector and pose landmarks detector options.
holistic_landmarker_options_proto.pose_detector_graph_options.min_detection_confidence = (
self.min_pose_detection_confidence
)
holistic_landmarker_options_proto.pose_detector_graph_options.min_suppression_threshold = (
self.min_pose_suppression_threshold
)
holistic_landmarker_options_proto.pose_landmarks_detector_graph_options.min_detection_confidence = (
self.min_pose_landmarks_confidence
)
# Configure hand landmarks detector options.
holistic_landmarker_options_proto.hand_landmarks_detector_graph_options.min_detection_confidence = (
self.min_hand_landmarks_confidence
)
return holistic_landmarker_options_proto
class HolisticLandmarker(base_vision_task_api.BaseVisionTaskApi):
"""Class that performs holistic landmarks detection on images."""
@classmethod
def create_from_model_path(cls, model_path: str) -> 'HolisticLandmarker':
"""Creates an `HolisticLandmarker` object from a TensorFlow Lite model and the default `HolisticLandmarkerOptions`.
Note that the created `HolisticLandmarker` instance is in image mode, for
detecting holistic landmarks on single image inputs.
Args:
model_path: Path to the model.
Returns:
`HolisticLandmarker` object that's created from the model file and the
default `HolisticLandmarkerOptions`.
Raises:
ValueError: If failed to create `HolisticLandmarker` object from the
provided file such as invalid file path.
RuntimeError: If other types of error occurred.
"""
base_options = _BaseOptions(model_asset_path=model_path)
options = HolisticLandmarkerOptions(
base_options=base_options, running_mode=_RunningMode.IMAGE
)
return cls.create_from_options(options)
@classmethod
def create_from_options(
cls, options: HolisticLandmarkerOptions
) -> 'HolisticLandmarker':
"""Creates the `HolisticLandmarker` object from holistic landmarker options.
Args:
options: Options for the holistic landmarker task.
Returns:
`HolisticLandmarker` object that's created from `options`.
Raises:
ValueError: If failed to create `HolisticLandmarker` object from
`HolisticLandmarkerOptions` such as missing the model.
RuntimeError: If other types of error occurred.
"""
def packets_callback(output_packets: Mapping[str, packet_module.Packet]):
if output_packets[_IMAGE_OUT_STREAM_NAME].is_empty():
return
image = packet_getter.get_image(output_packets[_IMAGE_OUT_STREAM_NAME])
if output_packets[_FACE_LANDMARKS_STREAM_NAME].is_empty():
empty_packet = output_packets[_FACE_LANDMARKS_STREAM_NAME]
options.result_callback(
HolisticLandmarkerResult([], [], [], [], [], [], []),
image,
empty_packet.timestamp.value // _MICRO_SECONDS_PER_MILLISECOND,
)
return
holistic_landmarks_detection_result = _build_landmarker_result(
output_packets
)
timestamp = output_packets[_FACE_LANDMARKS_STREAM_NAME].timestamp
options.result_callback(
holistic_landmarks_detection_result,
image,
timestamp.value // _MICRO_SECONDS_PER_MILLISECOND,
)
output_streams = [
':'.join([_FACE_LANDMARKS_TAG, _FACE_LANDMARKS_STREAM_NAME]),
':'.join([_POSE_LANDMARKS_TAG_NAME, _POSE_LANDMARKS_STREAM_NAME]),
':'.join(
[_POSE_WORLD_LANDMARKS_TAG, _POSE_WORLD_LANDMARKS_STREAM_NAME]
),
':'.join([_LEFT_HAND_LANDMARKS_TAG, _LEFT_HAND_LANDMARKS_STREAM_NAME]),
':'.join([
_LEFT_HAND_WORLD_LANDMARKS_TAG,
_LEFT_HAND_WORLD_LANDMARKS_STREAM_NAME,
]),
':'.join(
[_RIGHT_HAND_LANDMARKS_TAG, _RIGHT_HAND_LANDMARKS_STREAM_NAME]
),
':'.join([
_RIGHT_HAND_WORLD_LANDMARKS_TAG,
_RIGHT_HAND_WORLD_LANDMARKS_STREAM_NAME,
]),
':'.join([_IMAGE_TAG, _IMAGE_OUT_STREAM_NAME]),
]
if options.output_segmentation_mask:
output_streams.append(
':'.join(
[_POSE_SEGMENTATION_MASK_TAG, _POSE_SEGMENTATION_MASK_STREAM_NAME]
)
)
if options.output_face_blendshapes:
output_streams.append(
':'.join([_FACE_BLENDSHAPES_TAG, _FACE_BLENDSHAPES_STREAM_NAME])
)
task_info = _TaskInfo(
task_graph=_TASK_GRAPH_NAME,
input_streams=[
':'.join([_IMAGE_TAG, _IMAGE_IN_STREAM_NAME]),
],
output_streams=output_streams,
task_options=options,
)
return cls(
task_info.generate_graph_config(
enable_flow_limiting=options.running_mode
== _RunningMode.LIVE_STREAM
),
options.running_mode,
packets_callback if options.result_callback else None,
)
def detect(
self,
image: image_module.Image,
) -> HolisticLandmarkerResult:
"""Performs holistic landmarks detection on the given image.
Only use this method when the HolisticLandmarker is created with the image
running mode.
The image can be of any size with format RGB or RGBA.
Args:
image: MediaPipe Image.
Returns:
The holistic landmarks detection results.
Raises:
ValueError: If any of the input arguments is invalid.
RuntimeError: If holistic landmarker detection failed to run.
"""
output_packets = self._process_image_data({
_IMAGE_IN_STREAM_NAME: packet_creator.create_image(image),
})
if output_packets[_FACE_LANDMARKS_STREAM_NAME].is_empty():
return HolisticLandmarkerResult([], [], [], [], [], [], [])
return _build_landmarker_result(output_packets)
def detect_for_video(
self,
image: image_module.Image,
timestamp_ms: int,
) -> HolisticLandmarkerResult:
"""Performs holistic landmarks detection on the provided video frame.
Only use this method when the HolisticLandmarker is created with the video
running mode.
Only use this method when the HolisticLandmarker is created with the video
running mode. It's required to provide the video frame's timestamp (in
milliseconds) along with the video frame. The input timestamps should be
monotonically increasing for adjacent calls of this method.
Args:
image: MediaPipe Image.
timestamp_ms: The timestamp of the input video frame in milliseconds.
Returns:
The holistic landmarks detection results.
Raises:
ValueError: If any of the input arguments is invalid.
RuntimeError: If holistic landmarker detection failed to run.
"""
output_packets = self._process_video_data({
_IMAGE_IN_STREAM_NAME: packet_creator.create_image(image).at(
timestamp_ms * _MICRO_SECONDS_PER_MILLISECOND
),
})
if output_packets[_FACE_LANDMARKS_STREAM_NAME].is_empty():
return HolisticLandmarkerResult([], [], [], [], [], [], [])
return _build_landmarker_result(output_packets)
def detect_async(
self,
image: image_module.Image,
timestamp_ms: int,
) -> None:
"""Sends live image data to perform holistic landmarks detection.
The results will be available via the "result_callback" provided in the
HolisticLandmarkerOptions. Only use this method when the HolisticLandmarker
is
created with the live stream running mode.
Only use this method when the HolisticLandmarker is created with the live
stream running mode. The input timestamps should be monotonically increasing
for adjacent calls of this method. This method will return immediately after
the input image is accepted. The results will be available via the
`result_callback` provided in the `HolisticLandmarkerOptions`. The
`detect_async` method is designed to process live stream data such as
camera input. To lower the overall latency, holistic landmarker may drop the
input images if needed. In other words, it's not guaranteed to have output
per input image.
The `result_callback` provides:
- The holistic landmarks detection results.
- The input image that the holistic landmarker runs on.
- The input timestamp in milliseconds.
Args:
image: MediaPipe Image.
timestamp_ms: The timestamp of the input image in milliseconds.
Raises:
ValueError: If the current input timestamp is smaller than what the
holistic landmarker has already processed.
"""
self._send_live_stream_data({
_IMAGE_IN_STREAM_NAME: packet_creator.create_image(image).at(
timestamp_ms * _MICRO_SECONDS_PER_MILLISECOND
),
})

View File

@ -58,9 +58,11 @@ mediapipe_files(srcs = [
"hand_landmark_lite.tflite", "hand_landmark_lite.tflite",
"hand_landmarker.task", "hand_landmarker.task",
"handrecrop_2020_07_21_v0.f16.tflite", "handrecrop_2020_07_21_v0.f16.tflite",
"holistic_landmarker.task",
"left_hands.jpg", "left_hands.jpg",
"left_hands_rotated.jpg", "left_hands_rotated.jpg",
"leopard_bg_removal_result_512x512.png", "leopard_bg_removal_result_512x512.png",
"male_full_height_hands.jpg",
"mobilenet_v1_0.25_192_quantized_1_default_1.tflite", "mobilenet_v1_0.25_192_quantized_1_default_1.tflite",
"mobilenet_v1_0.25_224_1_default_1.tflite", "mobilenet_v1_0.25_224_1_default_1.tflite",
"mobilenet_v1_0.25_224_1_metadata_1.tflite", "mobilenet_v1_0.25_224_1_metadata_1.tflite",
@ -142,6 +144,7 @@ filegroup(
"left_hands.jpg", "left_hands.jpg",
"left_hands_rotated.jpg", "left_hands_rotated.jpg",
"leopard_bg_removal_result_512x512.png", "leopard_bg_removal_result_512x512.png",
"male_full_height_hands.jpg",
"mozart_square.jpg", "mozart_square.jpg",
"multi_objects.jpg", "multi_objects.jpg",
"multi_objects_rotated.jpg", "multi_objects_rotated.jpg",
@ -194,6 +197,7 @@ filegroup(
"hand_landmark_lite.tflite", "hand_landmark_lite.tflite",
"hand_landmarker.task", "hand_landmarker.task",
"handrecrop_2020_07_21_v0.f16.tflite", "handrecrop_2020_07_21_v0.f16.tflite",
"holistic_landmarker.task",
"mobilenet_v1_0.25_192_quantized_1_default_1.tflite", "mobilenet_v1_0.25_192_quantized_1_default_1.tflite",
"mobilenet_v1_0.25_224_1_default_1.tflite", "mobilenet_v1_0.25_224_1_default_1.tflite",
"mobilenet_v1_0.25_224_1_metadata_1.tflite", "mobilenet_v1_0.25_224_1_metadata_1.tflite",