diff --git a/mediapipe/python/BUILD b/mediapipe/python/BUILD index debe5404a..27a98cc9c 100644 --- a/mediapipe/python/BUILD +++ b/mediapipe/python/BUILD @@ -89,6 +89,7 @@ cc_library( deps = [ "//mediapipe/tasks/cc/vision/gesture_recognizer:gesture_recognizer_graph", "//mediapipe/tasks/cc/vision/image_classifier:image_classifier_graph", + "//mediapipe/tasks/cc/vision/image_embedder:image_embedder_graph", "//mediapipe/tasks/cc/vision/image_segmenter:image_segmenter_graph", "//mediapipe/tasks/cc/vision/object_detector:object_detector_graph", ] + select({ diff --git a/mediapipe/tasks/python/components/containers/BUILD b/mediapipe/tasks/python/components/containers/BUILD index b02021acd..d931c26c7 100644 --- a/mediapipe/tasks/python/components/containers/BUILD +++ b/mediapipe/tasks/python/components/containers/BUILD @@ -95,3 +95,12 @@ py_library( "//mediapipe/tasks/python/core:optional_dependencies", ], ) + +py_library( + name = "embedding_result", + srcs = ["embedding_result.py"], + deps = [ + "//mediapipe/tasks/cc/components/containers/proto:embeddings_py_pb2", + "//mediapipe/tasks/python/core:optional_dependencies", + ], +) diff --git a/mediapipe/tasks/python/components/containers/embedding_result.py b/mediapipe/tasks/python/components/containers/embedding_result.py new file mode 100644 index 000000000..8ddbb3ae5 --- /dev/null +++ b/mediapipe/tasks/python/components/containers/embedding_result.py @@ -0,0 +1,125 @@ +# Copyright 2022 The MediaPipe Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Embeddings data class.""" + +import dataclasses +from typing import Optional, List + +import numpy as np +from mediapipe.tasks.cc.components.containers.proto import embeddings_pb2 +from mediapipe.tasks.python.core.optional_dependencies import doc_controls + +_FloatEmbeddingProto = embeddings_pb2.FloatEmbedding +_QuantizedEmbeddingProto = embeddings_pb2.QuantizedEmbedding +_EmbeddingProto = embeddings_pb2.Embedding +_EmbeddingResultProto = embeddings_pb2.EmbeddingResult + + +@dataclasses.dataclass +class FloatEmbedding: + """Defines a dense floating-point embedding. + + Attributes: + values: A NumPy array indicating the raw output of the embedding layer. + """ + + values: np.ndarray + + @classmethod + @doc_controls.do_not_generate_docs + def create_from_pb2(cls, pb2_obj: _FloatEmbeddingProto) -> 'FloatEmbedding': + """Creates a `FloatEmbedding` object from the given protobuf object.""" + return FloatEmbedding(values=np.array(pb2_obj.values, dtype=float)) + + +@dataclasses.dataclass +class QuantizedEmbedding: + """Defines a dense scalar-quantized embedding. + + Attributes: + values: A NumPy array indicating the raw output of the embedding layer. + """ + + values: np.ndarray + + @classmethod + @doc_controls.do_not_generate_docs + def create_from_pb2( + cls, pb2_obj: _QuantizedEmbeddingProto) -> 'QuantizedEmbedding': + """Creates a `QuantizedEmbedding` object from the given protobuf object.""" + return QuantizedEmbedding( + values=np.array(bytearray(pb2_obj.values), dtype=np.uint8)) + + +@dataclasses.dataclass +class Embedding: + """Embedding result for a given embedder head. + + Attributes: + embedding: The actual embedding, either floating-point or scalar-quantized. + head_index: The index of the embedder head that produced this embedding. + This is useful for multi-head models. + head_name: The name of the embedder head, which is the corresponding tensor + metadata name (if any). This is useful for multi-head models. + """ + + embedding: np.ndarray + head_index: Optional[int] = None + head_name: Optional[str] = None + + @classmethod + @doc_controls.do_not_generate_docs + def create_from_pb2(cls, pb2_obj: _EmbeddingProto) -> 'Embedding': + """Creates a `Embedding` object from the given protobuf object.""" + + quantized_embedding = np.array( + bytearray(pb2_obj.quantized_embedding.values)) + float_embedding = np.array(pb2_obj.float_embedding.values, dtype=float) + + if not quantized_embedding: + return Embedding( + embedding=float_embedding, + head_index=pb2_obj.head_index, + head_name=pb2_obj.head_name) + else: + return Embedding( + embedding=quantized_embedding, + head_index=pb2_obj.head_index, + head_name=pb2_obj.head_name) + + +@dataclasses.dataclass +class EmbeddingResult: + """Embedding results for a given embedder model. + + Attributes: + embeddings: A list of `Embedding` objects. + timestamp_ms: The optional timestamp (in milliseconds) of the start of the + chunk of data corresponding to these results. This is only used for + embedding extraction on time series (e.g. audio embedding). In these use + cases, the amount of data to process might exceed the maximum size that + the model can process: to solve this, the input data is split into + multiple chunks starting at different timestamps. + """ + + embeddings: List[Embedding] + timestamp_ms: Optional[int] = None + + @classmethod + @doc_controls.do_not_generate_docs + def create_from_pb2(cls, pb2_obj: _EmbeddingResultProto) -> 'EmbeddingResult': + """Creates a `EmbeddingResult` object from the given protobuf object.""" + return EmbeddingResult(embeddings=[ + Embedding.create_from_pb2(embedding) for embedding in pb2_obj.embeddings + ]) diff --git a/mediapipe/tasks/python/components/processors/BUILD b/mediapipe/tasks/python/components/processors/BUILD index f87a579b0..eef368db0 100644 --- a/mediapipe/tasks/python/components/processors/BUILD +++ b/mediapipe/tasks/python/components/processors/BUILD @@ -28,3 +28,12 @@ py_library( "//mediapipe/tasks/python/core:optional_dependencies", ], ) + +py_library( + name = "embedder_options", + srcs = ["embedder_options.py"], + deps = [ + "//mediapipe/tasks/cc/components/processors/proto:embedder_options_py_pb2", + "//mediapipe/tasks/python/core:optional_dependencies", + ], +) diff --git a/mediapipe/tasks/python/components/processors/embedder_options.py b/mediapipe/tasks/python/components/processors/embedder_options.py new file mode 100644 index 000000000..c86a91105 --- /dev/null +++ b/mediapipe/tasks/python/components/processors/embedder_options.py @@ -0,0 +1,68 @@ +# Copyright 2022 The MediaPipe Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Embedder options data class.""" + +import dataclasses +from typing import Any, Optional + +from mediapipe.tasks.cc.components.processors.proto import embedder_options_pb2 +from mediapipe.tasks.python.core.optional_dependencies import doc_controls + +_EmbedderOptionsProto = embedder_options_pb2.EmbedderOptions + + +@dataclasses.dataclass +class EmbedderOptions: + """Shared options used by all embedding extraction tasks. + + Attributes: + l2_normalize: Whether to normalize the returned feature vector with L2 norm. + Use this option only if the model does not already contain a native + L2_NORMALIZATION TF Lite Op. In most cases, this is already the case and + L2 norm is thus achieved through TF Lite inference. + quantize: Whether the returned embedding should be quantized to bytes via + scalar quantization. Embeddings are implicitly assumed to be unit-norm and + therefore any dimension is guaranteed to have a value in [-1.0, 1.0]. Use + the l2_normalize option if this is not the case. + """ + + l2_normalize: Optional[bool] = None + quantize: Optional[bool] = None + + @doc_controls.do_not_generate_docs + def to_pb2(self) -> _EmbedderOptionsProto: + """Generates a EmbedderOptions protobuf object.""" + return _EmbedderOptionsProto( + l2_normalize=self.l2_normalize, quantize=self.quantize) + + @classmethod + @doc_controls.do_not_generate_docs + def create_from_pb2(cls, pb2_obj: _EmbedderOptionsProto) -> 'EmbedderOptions': + """Creates a `EmbedderOptions` object from the given protobuf object.""" + return EmbedderOptions( + l2_normalize=pb2_obj.l2_normalize, quantize=pb2_obj.quantize) + + def __eq__(self, other: Any) -> bool: + """Checks if this object is equal to the given object. + + Args: + other: The object to be compared with. + + Returns: + True if the objects are equal. + """ + if not isinstance(other, EmbedderOptions): + return False + + return self.to_pb2().__eq__(other.to_pb2()) diff --git a/mediapipe/tasks/python/components/utils/BUILD b/mediapipe/tasks/python/components/utils/BUILD new file mode 100644 index 000000000..b64d04c72 --- /dev/null +++ b/mediapipe/tasks/python/components/utils/BUILD @@ -0,0 +1,30 @@ +# Copyright 2022 The MediaPipe Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Placeholder for internal Python strict library compatibility macro. + +# Placeholder for internal Python strict library and test compatibility macro. + +package(default_visibility = ["//mediapipe/tasks:internal"]) + +licenses(["notice"]) + +py_library( + name = "cosine_similarity", + srcs = ["cosine_similarity.py"], + deps = [ + "//mediapipe/tasks/python/components/containers:embedding_result", + "//mediapipe/tasks/python/components/processors:embedder_options", + ], +) diff --git a/mediapipe/tasks/python/components/utils/__init__.py b/mediapipe/tasks/python/components/utils/__init__.py new file mode 100644 index 000000000..65c1214af --- /dev/null +++ b/mediapipe/tasks/python/components/utils/__init__.py @@ -0,0 +1,13 @@ +# Copyright 2022 The MediaPipe Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. diff --git a/mediapipe/tasks/python/components/utils/cosine_similarity.py b/mediapipe/tasks/python/components/utils/cosine_similarity.py new file mode 100644 index 000000000..486c02ece --- /dev/null +++ b/mediapipe/tasks/python/components/utils/cosine_similarity.py @@ -0,0 +1,67 @@ +# Copyright 2022 The MediaPipe Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Cosine similarity utilities.""" + +import numpy as np + +from mediapipe.tasks.python.components.containers import embedding_result +from mediapipe.tasks.python.components.processors import embedder_options + +_Embedding = embedding_result.Embedding +_EmbedderOptions = embedder_options.EmbedderOptions + + +def _compute_cosine_similarity(u, v): + """Computes cosine similarity between two embeddings.""" + + if len(u.embedding) <= 0: + raise ValueError("Cannot compute cosing similarity on empty embeddings.") + + norm_u = np.linalg.norm(u.embedding) + norm_v = np.linalg.norm(v.embedding) + + if norm_u <= 0 or norm_v <= 0: + raise ValueError( + "Cannot compute cosine similarity on embedding with 0 norm.") + + return np.dot(u.embedding, v.embedding.T) / (norm_u * norm_v) + + +def cosine_similarity(u: _Embedding, v: _Embedding) -> float: + """Utility function to compute cosine similarity between two embedding. + + May return an InvalidArgumentError if e.g. the feature vectors are of + different types (quantized vs. float), have different sizes, or have an + L2-norm of 0. + + Args: + u: An embedding. + v: An embedding. + + Returns: + Cosine similarity value. + """ + if len(u.embedding) != len(v.embedding): + raise ValueError(f"Cannot compute cosine similarity between embeddings " + f"of different sizes " + f"({len(u.embedding)} vs. {len(v.embedding)}).") + + if u.embedding.dtype == float and v.embedding.dtype == float: + return _compute_cosine_similarity(u, v) + + if u.embedding.dtype == np.uint8 and v.embedding.dtype == np.uint8: + return _compute_cosine_similarity(u, v) + + raise ValueError("Cannot compute cosine similarity between quantized and " + "float embeddings.") diff --git a/mediapipe/tasks/python/test/vision/BUILD b/mediapipe/tasks/python/test/vision/BUILD index a58474aaf..066107421 100644 --- a/mediapipe/tasks/python/test/vision/BUILD +++ b/mediapipe/tasks/python/test/vision/BUILD @@ -58,6 +58,26 @@ py_test( ], ) +py_test( + name = "image_embedder_test", + srcs = ["image_embedder_test.py"], + data = [ + "//mediapipe/tasks/testdata/vision:test_images", + "//mediapipe/tasks/testdata/vision:test_models", + ], + deps = [ + "//mediapipe/python:_framework_bindings", + "//mediapipe/tasks/python/components/containers:embedding_result", + "//mediapipe/tasks/python/components/containers:rect", + "//mediapipe/tasks/python/components/processors:embedder_options", + "//mediapipe/tasks/python/core:base_options", + "//mediapipe/tasks/python/test:test_utils", + "//mediapipe/tasks/python/vision:image_embedder", + "//mediapipe/tasks/python/vision/core:image_processing_options", + "//mediapipe/tasks/python/vision/core:vision_task_running_mode", + ], +) + py_test( name = "image_segmenter_test", srcs = ["image_segmenter_test.py"], diff --git a/mediapipe/tasks/python/test/vision/image_embedder_test.py b/mediapipe/tasks/python/test/vision/image_embedder_test.py new file mode 100644 index 000000000..d28320d71 --- /dev/null +++ b/mediapipe/tasks/python/test/vision/image_embedder_test.py @@ -0,0 +1,402 @@ +# Copyright 2022 The MediaPipe Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tests for image embedder.""" + +import enum +import os +from unittest import mock + +from absl.testing import absltest +from absl.testing import parameterized +import numpy as np + +from mediapipe.python._framework_bindings import image as image_module +from mediapipe.tasks.python.components.containers import embedding_result as embedding_result_module +from mediapipe.tasks.python.components.containers import rect +from mediapipe.tasks.python.components.processors import embedder_options as embedder_options_module +from mediapipe.tasks.python.core import base_options as base_options_module +from mediapipe.tasks.python.test import test_utils +from mediapipe.tasks.python.vision import image_embedder +from mediapipe.tasks.python.vision.core import image_processing_options as image_processing_options_module +from mediapipe.tasks.python.vision.core import vision_task_running_mode as running_mode_module + +ImageEmbedderResult = embedding_result_module.EmbeddingResult +_Rect = rect.Rect +_BaseOptions = base_options_module.BaseOptions +_EmbedderOptions = embedder_options_module.EmbedderOptions +_FloatEmbedding = embedding_result_module.FloatEmbedding +_QuantizedEmbedding = embedding_result_module.QuantizedEmbedding +_Embedding = embedding_result_module.Embedding +_Image = image_module.Image +_ImageEmbedder = image_embedder.ImageEmbedder +_ImageEmbedderOptions = image_embedder.ImageEmbedderOptions +_RUNNING_MODE = running_mode_module.VisionTaskRunningMode +_ImageProcessingOptions = image_processing_options_module.ImageProcessingOptions + +_MODEL_FILE = 'mobilenet_v3_small_100_224_embedder.tflite' +_BURGER_IMAGE_FILE = 'burger.jpg' +_BURGER_CROPPED_IMAGE_FILE = 'burger_crop.jpg' +_TEST_DATA_DIR = 'mediapipe/tasks/testdata/vision' +# Tolerance for embedding vector coordinate values. +_EPSILON = 1e-4 +# Tolerance for cosine similarity evaluation. +_SIMILARITY_TOLERANCE = 1e-6 + + +class ModelFileType(enum.Enum): + FILE_CONTENT = 1 + FILE_NAME = 2 + + +class ImageEmbedderTest(parameterized.TestCase): + + def setUp(self): + super().setUp() + self.test_image = _Image.create_from_file( + test_utils.get_test_data_path( + os.path.join(_TEST_DATA_DIR, _BURGER_IMAGE_FILE))) + self.test_cropped_image = _Image.create_from_file( + test_utils.get_test_data_path( + os.path.join(_TEST_DATA_DIR, _BURGER_CROPPED_IMAGE_FILE))) + self.model_path = test_utils.get_test_data_path( + os.path.join(_TEST_DATA_DIR, _MODEL_FILE)) + + def test_create_from_file_succeeds_with_valid_model_path(self): + # Creates with default option and valid model file successfully. + with _ImageEmbedder.create_from_model_path(self.model_path) as embedder: + self.assertIsInstance(embedder, _ImageEmbedder) + + def test_create_from_options_succeeds_with_valid_model_path(self): + # Creates with options containing model file successfully. + base_options = _BaseOptions(model_asset_path=self.model_path) + options = _ImageEmbedderOptions(base_options=base_options) + with _ImageEmbedder.create_from_options(options) as embedder: + self.assertIsInstance(embedder, _ImageEmbedder) + + def test_create_from_options_fails_with_invalid_model_path(self): + with self.assertRaisesRegex( + RuntimeError, 'Unable to open file at /path/to/invalid/model.tflite'): + base_options = _BaseOptions( + model_asset_path='/path/to/invalid/model.tflite') + options = _ImageEmbedderOptions(base_options=base_options) + _ImageEmbedder.create_from_options(options) + + def test_create_from_options_succeeds_with_valid_model_content(self): + # Creates with options containing model content successfully. + with open(self.model_path, 'rb') as f: + base_options = _BaseOptions(model_asset_buffer=f.read()) + options = _ImageEmbedderOptions(base_options=base_options) + embedder = _ImageEmbedder.create_from_options(options) + self.assertIsInstance(embedder, _ImageEmbedder) + + def _check_embedding_value(self, result, expected_first_value): + # Check embedding first value. + self.assertAlmostEqual( + result.embeddings[0].embedding[0], expected_first_value, delta=_EPSILON) + + def _check_embedding_size(self, result, quantize, expected_embedding_size): + # Check embedding size. + self.assertLen(result.embeddings, 1) + embedding_result = result.embeddings[0] + self.assertLen(embedding_result.embedding, expected_embedding_size) + if quantize: + self.assertEqual(embedding_result.embedding.dtype, np.uint8) + else: + self.assertEqual(embedding_result.embedding.dtype, float) + + def _check_cosine_similarity(self, result0, result1, expected_similarity): + # Checks cosine similarity. + similarity = _ImageEmbedder.cosine_similarity(result0.embeddings[0], + result1.embeddings[0]) + self.assertAlmostEqual( + similarity, expected_similarity, delta=_SIMILARITY_TOLERANCE) + + @parameterized.parameters( + (False, False, False, ModelFileType.FILE_NAME, 0.925519, 1024, + (-0.2101883, -0.193027)), + (True, False, False, ModelFileType.FILE_NAME, 0.925519, 1024, + (-0.0142344, -0.0131606)), + # (False, True, False, ModelFileType.FILE_NAME, + # 0.926791, 1024, (229, 231)), + (False, False, True, ModelFileType.FILE_CONTENT, 0.999931, 1024, + (-0.195062, -0.193027))) + def test_embed(self, l2_normalize, quantize, with_roi, model_file_type, + expected_similarity, expected_size, expected_first_values): + # Creates embedder. + if model_file_type is ModelFileType.FILE_NAME: + base_options = _BaseOptions(model_asset_path=self.model_path) + elif model_file_type is ModelFileType.FILE_CONTENT: + with open(self.model_path, 'rb') as f: + model_content = f.read() + base_options = _BaseOptions(model_asset_buffer=model_content) + else: + # Should never happen + raise ValueError('model_file_type is invalid.') + + embedder_options = _EmbedderOptions( + l2_normalize=l2_normalize, quantize=quantize) + options = _ImageEmbedderOptions( + base_options=base_options, embedder_options=embedder_options) + embedder = _ImageEmbedder.create_from_options(options) + + image_processing_options = None + if with_roi: + # Region-of-interest in "burger.jpg" corresponding to "burger_crop.jpg". + roi = _Rect(left=0, top=0, right=0.833333, bottom=1) + image_processing_options = _ImageProcessingOptions(roi) + + # Extracts both embeddings. + image_result = embedder.embed(self.test_image, image_processing_options) + crop_result = embedder.embed(self.test_cropped_image) + + # Checks embeddings and cosine similarity. + expected_result0_value, expected_result1_value = expected_first_values + self._check_embedding_size(image_result, quantize, expected_size) + self._check_embedding_size(crop_result, quantize, expected_size) + self._check_embedding_value(image_result, expected_result0_value) + self._check_embedding_value(crop_result, expected_result1_value) + self._check_cosine_similarity(image_result, crop_result, + expected_similarity) + # Closes the embedder explicitly when the embedder is not used in + # a context. + embedder.close() + + @parameterized.parameters( + (False, False, ModelFileType.FILE_NAME, 0.925519), + (False, False, ModelFileType.FILE_CONTENT, 0.925519)) + def test_embed_in_context(self, l2_normalize, quantize, model_file_type, + expected_similarity): + # Creates embedder. + if model_file_type is ModelFileType.FILE_NAME: + base_options = _BaseOptions(model_asset_path=self.model_path) + elif model_file_type is ModelFileType.FILE_CONTENT: + with open(self.model_path, 'rb') as f: + model_content = f.read() + base_options = _BaseOptions(model_asset_buffer=model_content) + else: + # Should never happen + raise ValueError('model_file_type is invalid.') + + embedder_options = _EmbedderOptions( + l2_normalize=l2_normalize, quantize=quantize) + options = _ImageEmbedderOptions( + base_options=base_options, embedder_options=embedder_options) + + with _ImageEmbedder.create_from_options(options) as embedder: + # Extracts both embeddings. + image_result = embedder.embed(self.test_image) + crop_result = embedder.embed(self.test_cropped_image) + + # Checks cosine similarity. + self._check_cosine_similarity(image_result, crop_result, + expected_similarity) + + def test_missing_result_callback(self): + options = _ImageEmbedderOptions( + base_options=_BaseOptions(model_asset_path=self.model_path), + running_mode=_RUNNING_MODE.LIVE_STREAM) + with self.assertRaisesRegex(ValueError, + r'result callback must be provided'): + with _ImageEmbedder.create_from_options(options) as unused_embedder: + pass + + @parameterized.parameters((_RUNNING_MODE.IMAGE), (_RUNNING_MODE.VIDEO)) + def test_illegal_result_callback(self, running_mode): + options = _ImageEmbedderOptions( + base_options=_BaseOptions(model_asset_path=self.model_path), + running_mode=running_mode, + result_callback=mock.MagicMock()) + with self.assertRaisesRegex(ValueError, + r'result callback should not be provided'): + with _ImageEmbedder.create_from_options(options) as unused_embedder: + pass + + def test_calling_embed_for_video_in_image_mode(self): + options = _ImageEmbedderOptions( + base_options=_BaseOptions(model_asset_path=self.model_path), + running_mode=_RUNNING_MODE.IMAGE) + with _ImageEmbedder.create_from_options(options) as embedder: + with self.assertRaisesRegex(ValueError, + r'not initialized with the video mode'): + embedder.embed_for_video(self.test_image, 0) + + def test_calling_embed_async_in_image_mode(self): + options = _ImageEmbedderOptions( + base_options=_BaseOptions(model_asset_path=self.model_path), + running_mode=_RUNNING_MODE.IMAGE) + with _ImageEmbedder.create_from_options(options) as embedder: + with self.assertRaisesRegex(ValueError, + r'not initialized with the live stream mode'): + embedder.embed_async(self.test_image, 0) + + def test_calling_embed_in_video_mode(self): + options = _ImageEmbedderOptions( + base_options=_BaseOptions(model_asset_path=self.model_path), + running_mode=_RUNNING_MODE.VIDEO) + with _ImageEmbedder.create_from_options(options) as embedder: + with self.assertRaisesRegex(ValueError, + r'not initialized with the image mode'): + embedder.embed(self.test_image) + + def test_calling_embed_async_in_video_mode(self): + options = _ImageEmbedderOptions( + base_options=_BaseOptions(model_asset_path=self.model_path), + running_mode=_RUNNING_MODE.VIDEO) + with _ImageEmbedder.create_from_options(options) as embedder: + with self.assertRaisesRegex(ValueError, + r'not initialized with the live stream mode'): + embedder.embed_async(self.test_image, 0) + + def test_embed_for_video_with_out_of_order_timestamp(self): + options = _ImageEmbedderOptions( + base_options=_BaseOptions(model_asset_path=self.model_path), + running_mode=_RUNNING_MODE.VIDEO) + with _ImageEmbedder.create_from_options(options) as embedder: + unused_result = embedder.embed_for_video(self.test_image, 1) + with self.assertRaisesRegex( + ValueError, r'Input timestamp must be monotonically increasing'): + embedder.embed_for_video(self.test_image, 0) + + def test_embed_for_video(self): + options = _ImageEmbedderOptions( + base_options=_BaseOptions(model_asset_path=self.model_path), + running_mode=_RUNNING_MODE.VIDEO) + with _ImageEmbedder.create_from_options(options) as embedder0, \ + _ImageEmbedder.create_from_options(options) as embedder1: + for timestamp in range(0, 300, 30): + # Extracts both embeddings. + image_result = embedder0.embed_for_video(self.test_image, timestamp) + crop_result = embedder1.embed_for_video(self.test_cropped_image, + timestamp) + # Checks cosine similarity. + self._check_cosine_similarity( + image_result, crop_result, expected_similarity=0.925519) + + def test_embed_for_video_succeeds_with_region_of_interest(self): + options = _ImageEmbedderOptions( + base_options=_BaseOptions(model_asset_path=self.model_path), + running_mode=_RUNNING_MODE.VIDEO) + with _ImageEmbedder.create_from_options(options) as embedder0, \ + _ImageEmbedder.create_from_options(options) as embedder1: + # Region-of-interest in "burger.jpg" corresponding to "burger_crop.jpg". + roi = _Rect(left=0, top=0, right=0.833333, bottom=1) + image_processing_options = _ImageProcessingOptions(roi) + + for timestamp in range(0, 300, 30): + # Extracts both embeddings. + image_result = embedder0.embed_for_video(self.test_image, timestamp, + image_processing_options) + crop_result = embedder1.embed_for_video(self.test_cropped_image, + timestamp) + + # Checks cosine similarity. + self._check_cosine_similarity( + image_result, crop_result, expected_similarity=0.999931) + + def test_calling_embed_in_live_stream_mode(self): + options = _ImageEmbedderOptions( + base_options=_BaseOptions(model_asset_path=self.model_path), + running_mode=_RUNNING_MODE.LIVE_STREAM, + result_callback=mock.MagicMock()) + with _ImageEmbedder.create_from_options(options) as embedder: + with self.assertRaisesRegex(ValueError, + r'not initialized with the image mode'): + embedder.embed(self.test_image) + + def test_calling_embed_for_video_in_live_stream_mode(self): + options = _ImageEmbedderOptions( + base_options=_BaseOptions(model_asset_path=self.model_path), + running_mode=_RUNNING_MODE.LIVE_STREAM, + result_callback=mock.MagicMock()) + with _ImageEmbedder.create_from_options(options) as embedder: + with self.assertRaisesRegex(ValueError, + r'not initialized with the video mode'): + embedder.embed_for_video(self.test_image, 0) + + def test_embed_async_calls_with_illegal_timestamp(self): + options = _ImageEmbedderOptions( + base_options=_BaseOptions(model_asset_path=self.model_path), + running_mode=_RUNNING_MODE.LIVE_STREAM, + result_callback=mock.MagicMock()) + with _ImageEmbedder.create_from_options(options) as embedder: + embedder.embed_async(self.test_image, 100) + with self.assertRaisesRegex( + ValueError, r'Input timestamp must be monotonically increasing'): + embedder.embed_async(self.test_image, 0) + + def test_embed_async_calls(self): + # Get the embedding result for the cropped image. + options = _ImageEmbedderOptions( + base_options=_BaseOptions(model_asset_path=self.model_path), + running_mode=_RUNNING_MODE.IMAGE) + with _ImageEmbedder.create_from_options(options) as embedder: + crop_result = embedder.embed(self.test_cropped_image) + + observed_timestamp_ms = -1 + + def check_result(result: ImageEmbedderResult, output_image: _Image, + timestamp_ms: int): + # Checks cosine similarity. + self._check_cosine_similarity( + result, crop_result, expected_similarity=0.925519) + self.assertTrue( + np.array_equal(output_image.numpy_view(), + self.test_image.numpy_view())) + self.assertLess(observed_timestamp_ms, timestamp_ms) + self.observed_timestamp_ms = timestamp_ms + + options = _ImageEmbedderOptions( + base_options=_BaseOptions(model_asset_path=self.model_path), + running_mode=_RUNNING_MODE.LIVE_STREAM, + result_callback=check_result) + with _ImageEmbedder.create_from_options(options) as embedder: + for timestamp in range(0, 300, 30): + embedder.embed_async(self.test_image, timestamp) + + def test_embed_async_succeeds_with_region_of_interest(self): + # Get the embedding result for the cropped image. + options = _ImageEmbedderOptions( + base_options=_BaseOptions(model_asset_path=self.model_path), + running_mode=_RUNNING_MODE.IMAGE) + with _ImageEmbedder.create_from_options(options) as embedder: + crop_result = embedder.embed(self.test_cropped_image) + + # Region-of-interest in "burger.jpg" corresponding to "burger_crop.jpg". + roi = _Rect(left=0, top=0, right=0.833333, bottom=1) + image_processing_options = _ImageProcessingOptions(roi) + observed_timestamp_ms = -1 + + def check_result(result: ImageEmbedderResult, output_image: _Image, + timestamp_ms: int): + # Checks cosine similarity. + self._check_cosine_similarity( + result, crop_result, expected_similarity=0.999931) + self.assertTrue( + np.array_equal(output_image.numpy_view(), + self.test_image.numpy_view())) + self.assertLess(observed_timestamp_ms, timestamp_ms) + self.observed_timestamp_ms = timestamp_ms + + options = _ImageEmbedderOptions( + base_options=_BaseOptions(model_asset_path=self.model_path), + running_mode=_RUNNING_MODE.LIVE_STREAM, + result_callback=check_result) + with _ImageEmbedder.create_from_options(options) as embedder: + for timestamp in range(0, 300, 30): + embedder.embed_async(self.test_image, timestamp, + image_processing_options) + + +if __name__ == '__main__': + absltest.main() diff --git a/mediapipe/tasks/python/vision/BUILD b/mediapipe/tasks/python/vision/BUILD index 97b92f2b5..e94507eed 100644 --- a/mediapipe/tasks/python/vision/BUILD +++ b/mediapipe/tasks/python/vision/BUILD @@ -79,6 +79,29 @@ py_library( ], ) +py_library( + name = "image_embedder", + srcs = [ + "image_embedder.py", + ], + deps = [ + "//mediapipe/python:_framework_bindings", + "//mediapipe/python:packet_creator", + "//mediapipe/python:packet_getter", + "//mediapipe/tasks/cc/components/containers/proto:embeddings_py_pb2", + "//mediapipe/tasks/cc/vision/image_embedder/proto:image_embedder_graph_options_py_pb2", + "//mediapipe/tasks/python/components/containers:embedding_result", + "//mediapipe/tasks/python/components/processors:embedder_options", + "//mediapipe/tasks/python/components/utils:cosine_similarity", + "//mediapipe/tasks/python/core:base_options", + "//mediapipe/tasks/python/core:optional_dependencies", + "//mediapipe/tasks/python/core:task_info", + "//mediapipe/tasks/python/vision/core:base_vision_task_api", + "//mediapipe/tasks/python/vision/core:image_processing_options", + "//mediapipe/tasks/python/vision/core:vision_task_running_mode", + ], +) + py_library( name = "gesture_recognizer", srcs = [ diff --git a/mediapipe/tasks/python/vision/image_embedder.py b/mediapipe/tasks/python/vision/image_embedder.py new file mode 100644 index 000000000..922040397 --- /dev/null +++ b/mediapipe/tasks/python/vision/image_embedder.py @@ -0,0 +1,309 @@ +# Copyright 2022 The MediaPipe Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""MediaPipe image embedder task.""" + +import dataclasses +from typing import Callable, Mapping, Optional + +from mediapipe.python import packet_creator +from mediapipe.python import packet_getter +from mediapipe.python._framework_bindings import image as image_module +from mediapipe.python._framework_bindings import packet as packet_module +from mediapipe.tasks.cc.components.containers.proto import embeddings_pb2 +from mediapipe.tasks.cc.vision.image_embedder.proto import image_embedder_graph_options_pb2 +from mediapipe.tasks.python.components.containers import embedding_result as embedding_result_module +from mediapipe.tasks.python.components.processors import embedder_options +from mediapipe.tasks.python.components.utils import cosine_similarity +from mediapipe.tasks.python.core import base_options as base_options_module +from mediapipe.tasks.python.core import task_info as task_info_module +from mediapipe.tasks.python.core.optional_dependencies import doc_controls +from mediapipe.tasks.python.vision.core import base_vision_task_api +from mediapipe.tasks.python.vision.core import image_processing_options as image_processing_options_module +from mediapipe.tasks.python.vision.core import vision_task_running_mode as running_mode_module + +ImageEmbedderResult = embedding_result_module.EmbeddingResult +_BaseOptions = base_options_module.BaseOptions +_ImageEmbedderGraphOptionsProto = image_embedder_graph_options_pb2.ImageEmbedderGraphOptions +_EmbedderOptions = embedder_options.EmbedderOptions +_RunningMode = running_mode_module.VisionTaskRunningMode +_TaskInfo = task_info_module.TaskInfo +_ImageProcessingOptions = image_processing_options_module.ImageProcessingOptions + +_EMBEDDINGS_OUT_STREAM_NAME = 'embeddings_out' +_EMBEDDINGS_TAG = 'EMBEDDINGS' +_IMAGE_IN_STREAM_NAME = 'image_in' +_IMAGE_OUT_STREAM_NAME = 'image_out' +_IMAGE_TAG = 'IMAGE' +_NORM_RECT_STREAM_NAME = 'norm_rect_in' +_NORM_RECT_TAG = 'NORM_RECT' +_TASK_GRAPH_NAME = 'mediapipe.tasks.vision.image_embedder.ImageEmbedderGraph' +_MICRO_SECONDS_PER_MILLISECOND = 1000 + + +@dataclasses.dataclass +class ImageEmbedderOptions: + """Options for the image embedder task. + + Attributes: + base_options: Base options for the image embedder task. + running_mode: The running mode of the task. Default to the image mode. Image + embedder task has three running modes: 1) The image mode for embedding + image on single image inputs. 2) The video mode for embedding image on the + decoded frames of a video. 3) The live stream mode for embedding image on + a live stream of input data, such as from camera. + embedder_options: Options for the image embedder task. + result_callback: The user-defined result callback for processing live stream + data. The result callback should only be specified when the running mode + is set to the live stream mode. + """ + base_options: _BaseOptions + running_mode: _RunningMode = _RunningMode.IMAGE + embedder_options: _EmbedderOptions = _EmbedderOptions() + result_callback: Optional[Callable[ + [ImageEmbedderResult, image_module.Image, int], None]] = None + + @doc_controls.do_not_generate_docs + def to_pb2(self) -> _ImageEmbedderGraphOptionsProto: + """Generates an ImageEmbedderOptions protobuf object.""" + base_options_proto = self.base_options.to_pb2() + base_options_proto.use_stream_mode = False if self.running_mode == _RunningMode.IMAGE else True + embedder_options_proto = self.embedder_options.to_pb2() + + return _ImageEmbedderGraphOptionsProto( + base_options=base_options_proto, + embedder_options=embedder_options_proto) + + +class ImageEmbedder(base_vision_task_api.BaseVisionTaskApi): + """Class that performs embedding extraction on images.""" + + @classmethod + def create_from_model_path(cls, model_path: str) -> 'ImageEmbedder': + """Creates an `ImageEmbedder` object from a TensorFlow Lite model and the default `ImageEmbedderOptions`. + + Note that the created `ImageEmbedder` instance is in image mode, for + embedding image on single image inputs. + + Args: + model_path: Path to the model. + + Returns: + `ImageEmbedder` object that's created from the model file and the default + `ImageEmbedderOptions`. + + Raises: + ValueError: If failed to create `ImageEmbedder` object from the provided + file such as invalid file path. + RuntimeError: If other types of error occurred. + """ + base_options = _BaseOptions(model_asset_path=model_path) + options = ImageEmbedderOptions( + base_options=base_options, running_mode=_RunningMode.IMAGE) + return cls.create_from_options(options) + + @classmethod + def create_from_options(cls, + options: ImageEmbedderOptions) -> 'ImageEmbedder': + """Creates the `ImageEmbedder` object from image embedder options. + + Args: + options: Options for the image embedder task. + + Returns: + `ImageEmbedder` object that's created from `options`. + + Raises: + ValueError: If failed to create `ImageEmbedder` object from + `ImageEmbedderOptions` such as missing the model. + RuntimeError: If other types of error occurred. + """ + + def packets_callback(output_packets: Mapping[str, packet_module.Packet]): + if output_packets[_IMAGE_OUT_STREAM_NAME].is_empty(): + return + + embedding_result_proto = embeddings_pb2.EmbeddingResult() + embedding_result_proto.CopyFrom( + packet_getter.get_proto(output_packets[_EMBEDDINGS_OUT_STREAM_NAME])) + + image = packet_getter.get_image(output_packets[_IMAGE_OUT_STREAM_NAME]) + timestamp = output_packets[_IMAGE_OUT_STREAM_NAME].timestamp + options.result_callback( + ImageEmbedderResult.create_from_pb2(embedding_result_proto), image, + timestamp.value // _MICRO_SECONDS_PER_MILLISECOND) + + task_info = _TaskInfo( + task_graph=_TASK_GRAPH_NAME, + input_streams=[ + ':'.join([_IMAGE_TAG, _IMAGE_IN_STREAM_NAME]), + ':'.join([_NORM_RECT_TAG, _NORM_RECT_STREAM_NAME]), + ], + output_streams=[ + ':'.join([_EMBEDDINGS_TAG, _EMBEDDINGS_OUT_STREAM_NAME]), + ':'.join([_IMAGE_TAG, _IMAGE_OUT_STREAM_NAME]) + ], + task_options=options) + return cls( + task_info.generate_graph_config( + enable_flow_limiting=options.running_mode == + _RunningMode.LIVE_STREAM), options.running_mode, + packets_callback if options.result_callback else None) + + def embed( + self, + image: image_module.Image, + image_processing_options: Optional[_ImageProcessingOptions] = None + ) -> ImageEmbedderResult: + """Performs image embedding extraction on the provided MediaPipe Image. + + Extraction is performed on the region of interest specified by the `roi` + argument if provided, or on the entire image otherwise. + + Args: + image: MediaPipe Image. + image_processing_options: Options for image processing. + + Returns: + An embedding result object that contains a list of embeddings. + + Raises: + ValueError: If any of the input arguments is invalid. + RuntimeError: If image embedder failed to run. + """ + normalized_rect = self.convert_to_normalized_rect(image_processing_options) + output_packets = self._process_image_data({ + _IMAGE_IN_STREAM_NAME: + packet_creator.create_image(image), + _NORM_RECT_STREAM_NAME: + packet_creator.create_proto(normalized_rect.to_pb2()) + }) + + embedding_result_proto = embeddings_pb2.EmbeddingResult() + embedding_result_proto.CopyFrom( + packet_getter.get_proto(output_packets[_EMBEDDINGS_OUT_STREAM_NAME])) + + return ImageEmbedderResult.create_from_pb2(embedding_result_proto) + + def embed_for_video( + self, + image: image_module.Image, + timestamp_ms: int, + image_processing_options: Optional[_ImageProcessingOptions] = None + ) -> ImageEmbedderResult: + """Performs image embedding extraction on the provided video frames. + + Extraction is performed on the region of interested specified by the `roi` + argument if provided, or on the entire image otherwise. + + Only use this method when the ImageEmbedder is created with the video + running mode. It's required to provide the video frame's timestamp (in + milliseconds) along with the video frame. The input timestamps should be + monotonically increasing for adjacent calls of this method. + + Args: + image: MediaPipe Image. + timestamp_ms: The timestamp of the input video frame in milliseconds. + image_processing_options: Options for image processing. + + Returns: + An embedding result object that contains a list of embeddings. + + Raises: + ValueError: If any of the input arguments is invalid. + RuntimeError: If image embedder failed to run. + """ + normalized_rect = self.convert_to_normalized_rect(image_processing_options) + output_packets = self._process_video_data({ + _IMAGE_IN_STREAM_NAME: + packet_creator.create_image(image).at( + timestamp_ms * _MICRO_SECONDS_PER_MILLISECOND), + _NORM_RECT_STREAM_NAME: + packet_creator.create_proto(normalized_rect.to_pb2()).at( + timestamp_ms * _MICRO_SECONDS_PER_MILLISECOND) + }) + embedding_result_proto = embeddings_pb2.EmbeddingResult() + embedding_result_proto.CopyFrom( + packet_getter.get_proto(output_packets[_EMBEDDINGS_OUT_STREAM_NAME])) + + return ImageEmbedderResult.create_from_pb2(embedding_result_proto) + + def embed_async( + self, + image: image_module.Image, + timestamp_ms: int, + image_processing_options: Optional[_ImageProcessingOptions] = None + ) -> None: + """Sends live image data to embedder. + + The results will be available via the "result_callback" provided in the + ImageEmbedderOptions. Embedding extraction is performed on the region of + interested specified by the `roi` argument if provided, or on the entire + image otherwise. + + Only use this method when the ImageEmbedder is created with the live + stream running mode. The input timestamps should be monotonically increasing + for adjacent calls of this method. This method will return immediately after + the input image is accepted. The results will be available via the + `result_callback` provided in the `ImageEmbedderOptions`. The + `embed_async` method is designed to process live stream data such as + camera input. To lower the overall latency, image embedder may drop the + input images if needed. In other words, it's not guaranteed to have output + per input image. + + The `result_callback` provides: + - An embedding result object that contains a list of embeddings. + - The input image that the image embedder runs on. + - The input timestamp in milliseconds. + + Args: + image: MediaPipe Image. + timestamp_ms: The timestamp of the input image in milliseconds. + image_processing_options: Options for image processing. + + Raises: + ValueError: If the current input timestamp is smaller than what the image + embedder has already processed. + """ + normalized_rect = self.convert_to_normalized_rect(image_processing_options) + self._send_live_stream_data({ + _IMAGE_IN_STREAM_NAME: + packet_creator.create_image(image).at( + timestamp_ms * _MICRO_SECONDS_PER_MILLISECOND), + _NORM_RECT_STREAM_NAME: + packet_creator.create_proto(normalized_rect.to_pb2()).at( + timestamp_ms * _MICRO_SECONDS_PER_MILLISECOND) + }) + + @classmethod + def cosine_similarity(cls, u: embedding_result_module.Embedding, + v: embedding_result_module.Embedding) -> float: + """Utility function to compute cosine similarity between two embedding entries. + + May return an InvalidArgumentError if e.g. the feature vectors are of + different types (quantized vs. float), have different sizes, or have an + L2-norm of 0. + + Args: + u: An embedding entry. + v: An embedding entry. + + Returns: + The cosine similarity for the two embeddings. + + Raises: + ValueError: May return an error if e.g. the feature vectors are of + different types (quantized vs. float), have different sizes, or have + an L2-norm of 0. + """ + return cosine_similarity.cosine_similarity(u, v)