Added some files necessary for the Face Stylizer implementation

This commit is contained in:
kinaryml 2023-03-09 02:39:21 -08:00
parent 5398b8881d
commit 7463e48fd4
5 changed files with 409 additions and 0 deletions

View File

@ -94,6 +94,7 @@ cc_library(
"//mediapipe/tasks/cc/vision/image_embedder:image_embedder_graph", "//mediapipe/tasks/cc/vision/image_embedder:image_embedder_graph",
"//mediapipe/tasks/cc/vision/image_segmenter:image_segmenter_graph", "//mediapipe/tasks/cc/vision/image_segmenter:image_segmenter_graph",
"//mediapipe/tasks/cc/vision/object_detector:object_detector_graph", "//mediapipe/tasks/cc/vision/object_detector:object_detector_graph",
"//mediapipe/tasks/cc/vision/face_stylizer:face_stylizer_graph",
] + select({ ] + select({
# TODO: Build text_classifier_graph and text_embedder_graph on Windows. # TODO: Build text_classifier_graph and text_embedder_graph on Windows.
"//mediapipe:windows": [], "//mediapipe:windows": [],

View File

@ -114,3 +114,20 @@ py_test(
"@com_google_protobuf//:protobuf_python", "@com_google_protobuf//:protobuf_python",
], ],
) )
py_test(
name = "face_stylizer_test",
srcs = ["face_stylizer_test.py"],
data = [
"//mediapipe/tasks/testdata/vision:test_images",
"//mediapipe/tasks/testdata/vision:test_models",
],
deps = [
"//mediapipe/python:_framework_bindings",
"//mediapipe/tasks/python/core:base_options",
"//mediapipe/tasks/python/test:test_utils",
"//mediapipe/tasks/python/vision:face_stylizer",
"//mediapipe/tasks/python/vision/core:image_processing_options",
"//mediapipe/tasks/python/vision/core:vision_task_running_mode",
],
)

View File

@ -0,0 +1,118 @@
# Copyright 2022 The MediaPipe Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for face stylizer."""
import enum
import os
from unittest import mock
import numpy as np
from absl.testing import absltest
from absl.testing import parameterized
from mediapipe.python._framework_bindings import image as image_module
from mediapipe.tasks.python.core import base_options as base_options_module
from mediapipe.tasks.python.test import test_utils
from mediapipe.tasks.python.vision import face_stylizer
from mediapipe.tasks.python.vision.core import vision_task_running_mode as running_mode_module
from mediapipe.tasks.python.vision.core import image_processing_options as image_processing_options_module
_BaseOptions = base_options_module.BaseOptions
_Image = image_module.Image
_FaceStylizer = face_stylizer.FaceStylizer
_FaceStylizerOptions = face_stylizer.FaceStylizerOptions
_RUNNING_MODE = running_mode_module.VisionTaskRunningMode
_ImageProcessingOptions = image_processing_options_module.ImageProcessingOptions
_MODEL = 'face_stylizer_model_placeholder.tflite'
_IMAGE = 'cats_and_dogs.jpg'
_STYLIZED_IMAGE = 'stylized_image_placeholder.jpg'
_TEST_DATA_DIR = 'mediapipe/tasks/testdata/vision'
class ModelFileType(enum.Enum):
FILE_CONTENT = 1
FILE_NAME = 2
class FaceStylizerTest(parameterized.TestCase):
def setUp(self):
super().setUp()
self.test_image = _Image.create_from_file(
test_utils.get_test_data_path(
os.path.join(_TEST_DATA_DIR, _IMAGE)))
self.model_path = test_utils.get_test_data_path(
os.path.join(_TEST_DATA_DIR, _MODEL))
def test_create_from_file_succeeds_with_valid_model_path(self):
# Creates with default option and valid model file successfully.
with _FaceStylizer.create_from_model_path(self.model_path) as stylizer:
self.assertIsInstance(stylizer, _FaceStylizer)
def test_create_from_options_succeeds_with_valid_model_path(self):
# Creates with options containing model file successfully.
base_options = _BaseOptions(model_asset_path=self.model_path)
options = _FaceStylizerOptions(base_options=base_options)
with _FaceStylizer.create_from_options(options) as stylizer:
self.assertIsInstance(stylizer, _FaceStylizer)
def test_create_from_options_fails_with_invalid_model_path(self):
with self.assertRaisesRegex(
RuntimeError, 'Unable to open file at /path/to/invalid/model.tflite'):
base_options = _BaseOptions(
model_asset_path='/path/to/invalid/model.tflite')
options = _FaceStylizerOptions(base_options=base_options)
_FaceStylizer.create_from_options(options)
def test_create_from_options_succeeds_with_valid_model_content(self):
# Creates with options containing model content successfully.
with open(self.model_path, 'rb') as f:
base_options = _BaseOptions(model_asset_buffer=f.read())
options = _FaceStylizerOptions(base_options=base_options)
stylizer = _FaceStylizer.create_from_options(options)
self.assertIsInstance(stylizer, _FaceStylizer)
@parameterized.parameters(
(ModelFileType.FILE_NAME, _STYLIZED_IMAGE),
(ModelFileType.FILE_CONTENT, _STYLIZED_IMAGE))
def test_stylize(self, model_file_type, expected_detection_result_file):
# Creates stylizer.
if model_file_type is ModelFileType.FILE_NAME:
base_options = _BaseOptions(model_asset_path=self.model_path)
elif model_file_type is ModelFileType.FILE_CONTENT:
with open(self.model_path, 'rb') as f:
model_content = f.read()
base_options = _BaseOptions(model_asset_buffer=model_content)
else:
# Should never happen
raise ValueError('model_file_type is invalid.')
options = _FaceStylizerOptions(base_options=base_options)
stylizer = _FaceStylizer.create_from_options(options)
# Performs face stylization on the input.
stylized_image = stylizer.detect(self.test_image)
# Comparing results.
self.assertTrue(
np.array_equal(stylized_image.numpy_view(),
self.test_image.numpy_view()))
# Closes the stylizer explicitly when the stylizer is not used in
# a context.
stylizer.close()
if __name__ == '__main__':
absltest.main()

View File

@ -152,3 +152,22 @@ py_library(
"//mediapipe/tasks/python/vision/core:vision_task_running_mode", "//mediapipe/tasks/python/vision/core:vision_task_running_mode",
], ],
) )
py_library(
name = "face_stylizer",
srcs = [
"face_stylizer.py",
],
deps = [
"//mediapipe/python:_framework_bindings",
"//mediapipe/python:packet_creator",
"//mediapipe/python:packet_getter",
"//mediapipe/tasks/cc/vision/image_segmenter/proto:face_stylizer_graph_options_py_pb2",
"//mediapipe/tasks/python/core:base_options",
"//mediapipe/tasks/python/core:optional_dependencies",
"//mediapipe/tasks/python/core:task_info",
"//mediapipe/tasks/python/vision/core:base_vision_task_api",
"//mediapipe/tasks/python/vision/core:image_processing_options",
"//mediapipe/tasks/python/vision/core:vision_task_running_mode",
],
)

View File

@ -0,0 +1,254 @@
# Copyright 2022 The MediaPipe Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MediaPipe face stylizer task."""
import dataclasses
from typing import Callable, Mapping, Optional
from mediapipe.python import packet_creator
from mediapipe.python import packet_getter
from mediapipe.python._framework_bindings import image as image_module
from mediapipe.python._framework_bindings import packet as packet_module
from mediapipe.tasks.cc.vision.face_stylizer.proto import face_stylizer_graph_options_pb2
from mediapipe.tasks.python.core import base_options as base_options_module
from mediapipe.tasks.python.core import task_info as task_info_module
from mediapipe.tasks.python.core.optional_dependencies import doc_controls
from mediapipe.tasks.python.vision.core import base_vision_task_api
from mediapipe.tasks.python.vision.core import image_processing_options as image_processing_options_module
from mediapipe.tasks.python.vision.core import vision_task_running_mode as running_mode_module
_BaseOptions = base_options_module.BaseOptions
_FaceStylizerGraphOptionsProto = face_stylizer_graph_options_pb2.FaceStylizerGraphOptions
_RunningMode = running_mode_module.VisionTaskRunningMode
_ImageProcessingOptions = image_processing_options_module.ImageProcessingOptions
_TaskInfo = task_info_module.TaskInfo
_STYLIZED_IMAGE_NAME = 'stylized_image'
_STYLIZED_IMAGE_TAG = 'STYLIZED_IMAGE'
_NORM_RECT_STREAM_NAME = 'norm_rect_in'
_NORM_RECT_TAG = 'NORM_RECT'
_IMAGE_IN_STREAM_NAME = 'image_in'
_IMAGE_OUT_STREAM_NAME = 'image_out'
_IMAGE_TAG = 'IMAGE'
_TASK_GRAPH_NAME = 'mediapipe.tasks.vision.face_stylizer.FaceStylizerGraph'
_MICRO_SECONDS_PER_MILLISECOND = 1000
@dataclasses.dataclass
class FaceStylizerOptions:
"""Options for the face stylizer task.
Attributes:
base_options: Base options for the face stylizer task.
running_mode: The running mode of the task. Default to the image mode.
Face stylizer task has three running modes:
1) The image mode for stylizing faces on single image inputs.
2) The video mode for stylizing faces on the decoded frames of a video.
3) The live stream mode for stylizing faces on a live stream of input
data, such as from camera.
result_callback: The user-defined result callback for processing live stream
data. The result callback should only be specified when the running mode
is set to the live stream mode.
"""
base_options: _BaseOptions
running_mode: _RunningMode = _RunningMode.IMAGE
result_callback: Optional[
Callable[[image_module.Image, image_module.Image, int],
None]] = None
@doc_controls.do_not_generate_docs
def to_pb2(self) -> _FaceStylizerGraphOptionsProto:
"""Generates an FaceStylizerOptions protobuf object."""
base_options_proto = self.base_options.to_pb2()
base_options_proto.use_stream_mode = False if self.running_mode == _RunningMode.IMAGE else True
return _FaceStylizerGraphOptionsProto(base_options=base_options_proto)
class FaceStylizer(base_vision_task_api.BaseVisionTaskApi):
"""Class that performs face stylization on images."""
@classmethod
def create_from_model_path(cls, model_path: str) -> 'FaceStylizer':
"""Creates an `FaceStylizer` object from a TensorFlow Lite model and the default `FaceStylizerOptions`.
Note that the created `FaceDetector` instance is in image mode, for
stylizing faces on single image inputs.
Args:
model_path: Path to the model.
Returns:
`FaceStylizer` object that's created from the model file and the default
`FaceStylizerOptions`.
Raises:
ValueError: If failed to create `FaceStylizer` object from the provided
file such as invalid file path.
RuntimeError: If other types of error occurred.
"""
base_options = _BaseOptions(model_asset_path=model_path)
options = FaceStylizerOptions(
base_options=base_options, running_mode=_RunningMode.IMAGE)
return cls.create_from_options(options)
@classmethod
def create_from_options(cls,
options: FaceStylizerOptions) -> 'FaceStylizer':
"""Creates the `FaceStylizer` object from face stylizer options.
Args:
options: Options for the face stylizer task.
Returns:
`FaceStylizer` object that's created from `options`.
Raises:
ValueError: If failed to create `FaceStylizer` object from
`FaceStylizerOptions` such as missing the model.
RuntimeError: If other types of error occurred.
"""
def packets_callback(output_packets: Mapping[str, packet_module.Packet]):
if output_packets[_IMAGE_OUT_STREAM_NAME].is_empty():
return
image = packet_getter.get_image(output_packets[_IMAGE_OUT_STREAM_NAME])
stylized_image_packet = output_packets[_STYLIZED_IMAGE_NAME]
options.result_callback(
stylized_image_packet, image,
stylized_image_packet.timestamp.value // _MICRO_SECONDS_PER_MILLISECOND)
task_info = _TaskInfo(
task_graph=_TASK_GRAPH_NAME,
input_streams=[
':'.join([_IMAGE_TAG, _IMAGE_IN_STREAM_NAME]),
':'.join([_NORM_RECT_TAG, _NORM_RECT_STREAM_NAME]),
],
output_streams=[
':'.join([_STYLIZED_IMAGE_TAG, _STYLIZED_IMAGE_NAME]),
':'.join([_IMAGE_TAG, _IMAGE_OUT_STREAM_NAME])
],
task_options=options)
return cls(
task_info.generate_graph_config(
enable_flow_limiting=options.running_mode ==
_RunningMode.LIVE_STREAM), options.running_mode,
packets_callback if options.result_callback else None)
def stylize(
self,
image: image_module.Image,
image_processing_options: Optional[_ImageProcessingOptions] = None
) -> image_module.Image:
"""Performs face stylization on the provided MediaPipe Image.
Only use this method when the FaceStylizer is created with the image
running mode.
Args:
image: MediaPipe Image.
image_processing_options: Options for image processing.
Returns:
The stylized image.
Raises:
ValueError: If any of the input arguments is invalid.
RuntimeError: If face stylization failed to run.
"""
normalized_rect = self.convert_to_normalized_rect(image_processing_options)
output_packets = self._process_image_data({
_IMAGE_IN_STREAM_NAME:
packet_creator.create_image(image),
_NORM_RECT_STREAM_NAME:
packet_creator.create_proto(normalized_rect.to_pb2())
})
return output_packets[_STYLIZED_IMAGE_NAME]
def stylize_for_video(
self,
image: image_module.Image,
timestamp_ms: int,
image_processing_options: Optional[_ImageProcessingOptions] = None
) -> image_module.Image:
"""Performs face stylization on the provided video frames.
Only use this method when the FaceStylizer is created with the video
running mode. It's required to provide the video frame's timestamp (in
milliseconds) along with the video frame. The input timestamps should be
monotonically increasing for adjacent calls of this method.
Args:
image: MediaPipe Image.
timestamp_ms: The timestamp of the input video frame in milliseconds.
image_processing_options: Options for image processing.
Returns:
The stylized image.
Raises:
ValueError: If any of the input arguments is invalid.
RuntimeError: If face stylization failed to run.
"""
normalized_rect = self.convert_to_normalized_rect(image_processing_options)
output_packets = self._process_video_data({
_IMAGE_IN_STREAM_NAME:
packet_creator.create_image(image).at(
timestamp_ms * _MICRO_SECONDS_PER_MILLISECOND),
_NORM_RECT_STREAM_NAME:
packet_creator.create_proto(normalized_rect.to_pb2()).at(
timestamp_ms * _MICRO_SECONDS_PER_MILLISECOND)
})
return output_packets[_STYLIZED_IMAGE_NAME]
def stylize_async(
self,
image: image_module.Image,
timestamp_ms: int,
image_processing_options: Optional[_ImageProcessingOptions] = None
) -> None:
"""Sends live image data (an Image with a unique timestamp) to perform face stylization.
Only use this method when the FaceStylizer is created with the live stream
running mode. The input timestamps should be monotonically increasing for
adjacent calls of this method. This method will return immediately after the
input image is accepted. The results will be available via the
`result_callback` provided in the `FaceStylizerOptions`. The
`stylize_async` method is designed to process live stream data such as camera
input. To lower the overall latency, face stylizer may drop the input
images if needed. In other words, it's not guaranteed to have output per
input image.
The `result_callback` provides:
- The stylized image.
- The input image that the face stylizer runs on.
- The input timestamp in milliseconds.
Args:
image: MediaPipe Image.
timestamp_ms: The timestamp of the input image in milliseconds.
image_processing_options: Options for image processing.
Raises:
ValueError: If the current input timestamp is smaller than what the face
stylizer has already processed.
"""
normalized_rect = self.convert_to_normalized_rect(image_processing_options)
self._send_live_stream_data({
_IMAGE_IN_STREAM_NAME:
packet_creator.create_image(image).at(
timestamp_ms * _MICRO_SECONDS_PER_MILLISECOND),
_NORM_RECT_STREAM_NAME:
packet_creator.create_proto(normalized_rect.to_pb2()).at(
timestamp_ms * _MICRO_SECONDS_PER_MILLISECOND)
})