From 8f32fda6d82aa66c85ce9f5bde1017c4fcdd0079 Mon Sep 17 00:00:00 2001 From: Kinar Date: Thu, 16 Nov 2023 12:53:36 -0800 Subject: [PATCH 1/3] Added more benchmark scripts for the Tasks Python API --- mediapipe/tasks/python/benchmark/BUILD | 24 ++++ .../tasks/python/benchmark/benchmark_utils.py | 58 ++++++++ .../tasks/python/benchmark/vision/core/BUILD | 22 +++ .../python/benchmark/vision/core/__init__.py | 14 ++ .../vision/core/base_vision_benchmark_api.py | 44 ++++++ .../benchmark/vision/face_aligner/BUILD | 34 +++++ .../face_aligner/face_aligner_benchmark.py | 120 ++++++++++++++++ .../benchmark/vision/face_detector/BUILD | 34 +++++ .../face_detector/face_detector_benchmark.py | 120 ++++++++++++++++ .../benchmark/vision/face_landmarker/BUILD | 34 +++++ .../face_landmarker_benchmark.py | 120 ++++++++++++++++ .../benchmark/vision/hand_landmarker/BUILD | 34 +++++ .../hand_landmarker_benchmark.py | 120 ++++++++++++++++ .../benchmark/vision/image_classifier/BUILD | 9 +- .../image_classifier_benchmark.py | 38 +++--- .../benchmark/vision/image_embedder/BUILD | 34 +++++ .../image_embedder_benchmark.py | 120 ++++++++++++++++ .../benchmark/vision/image_segmenter/BUILD | 34 +++++ .../image_segmenter_benchmark.py | 121 +++++++++++++++++ .../vision/interactive_segmenter/BUILD | 34 +++++ .../interactive_segmenter_benchmark.py | 128 ++++++++++++++++++ .../benchmark/vision/object_detector/BUILD | 34 +++++ .../object_detector_benchmark.py | 120 ++++++++++++++++ .../benchmark/vision/pose_landmarker/BUILD | 34 +++++ .../pose_landmarker_benchmark.py | 120 ++++++++++++++++ 25 files changed, 1586 insertions(+), 18 deletions(-) create mode 100644 mediapipe/tasks/python/benchmark/BUILD create mode 100644 mediapipe/tasks/python/benchmark/benchmark_utils.py create mode 100644 mediapipe/tasks/python/benchmark/vision/core/BUILD create mode 100644 mediapipe/tasks/python/benchmark/vision/core/__init__.py create mode 100644 mediapipe/tasks/python/benchmark/vision/core/base_vision_benchmark_api.py create mode 100644 mediapipe/tasks/python/benchmark/vision/face_aligner/BUILD create mode 100644 mediapipe/tasks/python/benchmark/vision/face_aligner/face_aligner_benchmark.py create mode 100644 mediapipe/tasks/python/benchmark/vision/face_detector/BUILD create mode 100644 mediapipe/tasks/python/benchmark/vision/face_detector/face_detector_benchmark.py create mode 100644 mediapipe/tasks/python/benchmark/vision/face_landmarker/BUILD create mode 100644 mediapipe/tasks/python/benchmark/vision/face_landmarker/face_landmarker_benchmark.py create mode 100644 mediapipe/tasks/python/benchmark/vision/hand_landmarker/BUILD create mode 100644 mediapipe/tasks/python/benchmark/vision/hand_landmarker/hand_landmarker_benchmark.py create mode 100644 mediapipe/tasks/python/benchmark/vision/image_embedder/BUILD create mode 100644 mediapipe/tasks/python/benchmark/vision/image_embedder/image_embedder_benchmark.py create mode 100644 mediapipe/tasks/python/benchmark/vision/image_segmenter/BUILD create mode 100644 mediapipe/tasks/python/benchmark/vision/image_segmenter/image_segmenter_benchmark.py create mode 100644 mediapipe/tasks/python/benchmark/vision/interactive_segmenter/BUILD create mode 100644 mediapipe/tasks/python/benchmark/vision/interactive_segmenter/interactive_segmenter_benchmark.py create mode 100644 mediapipe/tasks/python/benchmark/vision/object_detector/BUILD create mode 100644 mediapipe/tasks/python/benchmark/vision/object_detector/object_detector_benchmark.py create mode 100644 mediapipe/tasks/python/benchmark/vision/pose_landmarker/BUILD create mode 100644 mediapipe/tasks/python/benchmark/vision/pose_landmarker/pose_landmarker_benchmark.py diff --git a/mediapipe/tasks/python/benchmark/BUILD b/mediapipe/tasks/python/benchmark/BUILD new file mode 100644 index 000000000..fc5836da5 --- /dev/null +++ b/mediapipe/tasks/python/benchmark/BUILD @@ -0,0 +1,24 @@ +# Copyright 2023 The MediaPipe Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Placeholder for internal Python strict library and test compatibility macro. + +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +py_library( + name = "benchmark_utils", + srcs = ["benchmark_utils.py"] +) diff --git a/mediapipe/tasks/python/benchmark/benchmark_utils.py b/mediapipe/tasks/python/benchmark/benchmark_utils.py new file mode 100644 index 000000000..5c1b102a2 --- /dev/null +++ b/mediapipe/tasks/python/benchmark/benchmark_utils.py @@ -0,0 +1,58 @@ +# Copyright 2023 The MediaPipe Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Benchmark utils for MediaPipe Tasks.""" + +import os + + +def get_test_data_path(test_srcdir, file_or_dirname_path: str) -> str: + """Determine the test data path. + + Args: + test_srcdir: The path to the test source directory. + file_or_dirname_path: The path to the file or directory. + + Returns: + The full test data path. + """ + """Returns full test data path.""" + for directory, subdirs, files in os.walk(test_srcdir): + for f in subdirs + files: + path = os.path.join(directory, f) + if path.endswith(file_or_dirname_path): + return path + raise ValueError( + "No %s in test directory: %s." % (file_or_dirname_path, test_srcdir) + ) + + +def get_model_path(custom_model, default_model_path): + """Determine the model path based on the existence of the custom model. + + Args: + custom_model: The path to the custom model provided by the user. + default_model_path: The path to the default model. + + Returns: + The path to the model to be used. + """ + if custom_model is not None and os.path.exists(custom_model): + print(f"Using provided model: {custom_model}") + return custom_model + else: + if custom_model is not None: + print(f"Warning: Provided model '{custom_model}' not found. " + f"Using default model instead.") + print(f"Using default model: {default_model_path}") + return default_model_path diff --git a/mediapipe/tasks/python/benchmark/vision/core/BUILD b/mediapipe/tasks/python/benchmark/vision/core/BUILD new file mode 100644 index 000000000..48bfc6522 --- /dev/null +++ b/mediapipe/tasks/python/benchmark/vision/core/BUILD @@ -0,0 +1,22 @@ +# Copyright 2023 The MediaPipe Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Placeholder for internal Python strict library and test compatibility macro. + +package(default_visibility = ["//visibility:public"]) + +py_library( + name = "base_vision_benchmark_api", + srcs = ["base_vision_benchmark_api.py"] +) diff --git a/mediapipe/tasks/python/benchmark/vision/core/__init__.py b/mediapipe/tasks/python/benchmark/vision/core/__init__.py new file mode 100644 index 000000000..b87aebd51 --- /dev/null +++ b/mediapipe/tasks/python/benchmark/vision/core/__init__.py @@ -0,0 +1,14 @@ +"""Copyright 2023 The MediaPipe Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +""" diff --git a/mediapipe/tasks/python/benchmark/vision/core/base_vision_benchmark_api.py b/mediapipe/tasks/python/benchmark/vision/core/base_vision_benchmark_api.py new file mode 100644 index 000000000..65c460613 --- /dev/null +++ b/mediapipe/tasks/python/benchmark/vision/core/base_vision_benchmark_api.py @@ -0,0 +1,44 @@ +# Copyright 2023 The MediaPipe Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""MediaPipe vision benchmark base api.""" +import os +import time +import numpy as np + +VISION_TEST_DATA_DIR = 'mediapipe/tasks/testdata/vision' + + +def nth_percentile(func, image, n_iterations, percentile): + """Run a nth percentile benchmark for a given task using the function. + + Args: + func: The method associated with a given task used for benchmarking. + image: The input MediaPipe Image. + n_iterations: Number of iterations to run the benchmark. + percentile: Percentage for the percentiles to compute. Values must be + between 0 and 100 inclusive. + + Returns: + The n-th percentile of the inference times in milliseconds. + """ + inference_times = [] + + for _ in range(n_iterations): + start_time_ns = time.time_ns() + # Run the method for the task (e.g., classify) + func(image) + end_time_ns = time.time_ns() + inference_times.append((end_time_ns - start_time_ns) / 1_000_000) + + return np.percentile(inference_times, percentile) diff --git a/mediapipe/tasks/python/benchmark/vision/face_aligner/BUILD b/mediapipe/tasks/python/benchmark/vision/face_aligner/BUILD new file mode 100644 index 000000000..61791070f --- /dev/null +++ b/mediapipe/tasks/python/benchmark/vision/face_aligner/BUILD @@ -0,0 +1,34 @@ +# Copyright 2022 The MediaPipe Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Placeholder for internal Python strict library and test compatibility macro. + +package(default_visibility = ["//visibility:public"]) + +py_binary( + name = "face_aligner_benchmark", + main = "face_aligner_benchmark.py", + srcs = ["face_aligner_benchmark.py"], + data = [ + "//mediapipe/tasks/testdata/vision:test_images", + "//mediapipe/tasks/testdata/vision:test_models", + ], + deps = [ + "//mediapipe/python:_framework_bindings", + "//mediapipe/tasks/python/core:base_options", + "//mediapipe/tasks/python/vision:face_aligner", + "//mediapipe/tasks/python/benchmark:benchmark_utils", + "//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api", + ], +) diff --git a/mediapipe/tasks/python/benchmark/vision/face_aligner/face_aligner_benchmark.py b/mediapipe/tasks/python/benchmark/vision/face_aligner/face_aligner_benchmark.py new file mode 100644 index 000000000..e51ae056a --- /dev/null +++ b/mediapipe/tasks/python/benchmark/vision/face_aligner/face_aligner_benchmark.py @@ -0,0 +1,120 @@ +# Copyright 2023 The MediaPipe Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""MediaPipe face aligner benchmark.""" + +import argparse + +from mediapipe.python._framework_bindings import image +from mediapipe.tasks.python.core import base_options +from mediapipe.tasks.python.vision import face_aligner +from mediapipe.tasks.python.benchmark import benchmark_utils +from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api + +_MODEL_FILE = 'face_landmarker_v2.task' +_IMAGE_FILE = 'portrait.jpg' + + +def run( + model: str, + n_iterations: int, + delegate: base_options.BaseOptions.Delegate, + percentile: float, +): + """Run an face aligner benchmark. + + Args: + model: Path to the TFLite model. + n_iterations: Number of iterations to run the benchmark. + delegate: CPU or GPU delegate for inference. + percentile: Percentage for the percentiles to compute. Values must be + between 0 and 100 inclusive. + + Returns: + The n-th percentile of the inference times. + """ + # Initialize the face aligner + default_model_path = benchmark_utils.get_test_data_path( + base_vision_benchmark_api.VISION_TEST_DATA_DIR, _MODEL_FILE + ) + model_path = benchmark_utils.get_model_path(model, default_model_path) + options = face_aligner.FaceAlignerOptions( + base_options=base_options.BaseOptions( + model_asset_path=model_path, delegate=delegate + ) + ) + + with face_aligner.FaceAligner.create_from_options(options) as aligner: + mp_image = image.Image.create_from_file( + benchmark_utils.get_test_data_path( + base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE + ) + ) + # Run the benchmark and return the nth percentile of the inference times + nth_percentile = base_vision_benchmark_api.nth_percentile( + aligner.align, mp_image, n_iterations, percentile + ) + return nth_percentile + + +def main(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + parser.add_argument( + '--model', + help='Path to face aligner task.', + required=False, + default=None, + ) + parser.add_argument( + '--iterations', + help='Number of iterations for benchmarking.', + type=int, + default=100, + ) + parser.add_argument( + '--percentile', + help='Percentile for benchmarking statistics.', + type=float, + default=95.0, + ) + args = parser.parse_args() + + # Run benchmark on CPU + cpu_time = run( + args.model, + args.iterations, + base_options.BaseOptions.Delegate.CPU, + args.percentile, + ) + print( + f'{args.percentile}th Percentile Inference Time on CPU: ' + f'{cpu_time:.6f} milliseconds' + ) + + # Run benchmark on GPU + gpu_time = run( + args.model, + args.iterations, + base_options.BaseOptions.Delegate.GPU, + args.percentile, + ) + print( + f'{args.percentile}th Percentile Inference Time on GPU: ' + f'{gpu_time:.6f} milliseconds' + ) + + +if __name__ == '__main__': + main() diff --git a/mediapipe/tasks/python/benchmark/vision/face_detector/BUILD b/mediapipe/tasks/python/benchmark/vision/face_detector/BUILD new file mode 100644 index 000000000..9eb67c19f --- /dev/null +++ b/mediapipe/tasks/python/benchmark/vision/face_detector/BUILD @@ -0,0 +1,34 @@ +# Copyright 2022 The MediaPipe Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Placeholder for internal Python strict library and test compatibility macro. + +package(default_visibility = ["//visibility:public"]) + +py_binary( + name = "face_detector_benchmark", + main = "face_detector_benchmark.py", + srcs = ["face_detector_benchmark.py"], + data = [ + "//mediapipe/tasks/testdata/vision:test_images", + "//mediapipe/tasks/testdata/vision:test_models", + ], + deps = [ + "//mediapipe/python:_framework_bindings", + "//mediapipe/tasks/python/core:base_options", + "//mediapipe/tasks/python/vision:face_detector", + "//mediapipe/tasks/python/benchmark:benchmark_utils", + "//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api", + ], +) diff --git a/mediapipe/tasks/python/benchmark/vision/face_detector/face_detector_benchmark.py b/mediapipe/tasks/python/benchmark/vision/face_detector/face_detector_benchmark.py new file mode 100644 index 000000000..c4cd75100 --- /dev/null +++ b/mediapipe/tasks/python/benchmark/vision/face_detector/face_detector_benchmark.py @@ -0,0 +1,120 @@ +# Copyright 2023 The MediaPipe Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""MediaPipe face detector benchmark.""" + +import argparse + +from mediapipe.python._framework_bindings import image +from mediapipe.tasks.python.core import base_options +from mediapipe.tasks.python.vision import face_detector +from mediapipe.tasks.python.benchmark import benchmark_utils +from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api + +_MODEL_FILE = 'face_detection_short_range.tflite' +_IMAGE_FILE = 'portrait.jpg' + + +def run( + model: str, + n_iterations: int, + delegate: base_options.BaseOptions.Delegate, + percentile: float, +): + """Run an face detector benchmark. + + Args: + model: Path to the TFLite model. + n_iterations: Number of iterations to run the benchmark. + delegate: CPU or GPU delegate for inference. + percentile: Percentage for the percentiles to compute. Values must be + between 0 and 100 inclusive. + + Returns: + The n-th percentile of the inference times. + """ + # Initialize the face detector + default_model_path = benchmark_utils.get_test_data_path( + base_vision_benchmark_api.VISION_TEST_DATA_DIR, _MODEL_FILE + ) + model_path = benchmark_utils.get_model_path(model, default_model_path) + options = face_detector.FaceDetectorOptions( + base_options=base_options.BaseOptions( + model_asset_path=model_path, delegate=delegate + ) + ) + + with face_detector.FaceDetector.create_from_options(options) as detector: + mp_image = image.Image.create_from_file( + benchmark_utils.get_test_data_path( + base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE + ) + ) + # Run the benchmark and return the nth percentile of the inference times + nth_percentile = base_vision_benchmark_api.nth_percentile( + detector.detect, mp_image, n_iterations, percentile + ) + return nth_percentile + + +def main(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + parser.add_argument( + '--model', + help='Path to face detector task.', + required=False, + default=None, + ) + parser.add_argument( + '--iterations', + help='Number of iterations for benchmarking.', + type=int, + default=100, + ) + parser.add_argument( + '--percentile', + help='Percentile for benchmarking statistics.', + type=float, + default=95.0, + ) + args = parser.parse_args() + + # Run benchmark on CPU + cpu_time = run( + args.model, + args.iterations, + base_options.BaseOptions.Delegate.CPU, + args.percentile, + ) + print( + f'{args.percentile}th Percentile Inference Time on CPU: ' + f'{cpu_time:.6f} milliseconds' + ) + + # Run benchmark on GPU + gpu_time = run( + args.model, + args.iterations, + base_options.BaseOptions.Delegate.GPU, + args.percentile, + ) + print( + f'{args.percentile}th Percentile Inference Time on GPU: ' + f'{gpu_time:.6f} milliseconds' + ) + + +if __name__ == '__main__': + main() diff --git a/mediapipe/tasks/python/benchmark/vision/face_landmarker/BUILD b/mediapipe/tasks/python/benchmark/vision/face_landmarker/BUILD new file mode 100644 index 000000000..70dd311e4 --- /dev/null +++ b/mediapipe/tasks/python/benchmark/vision/face_landmarker/BUILD @@ -0,0 +1,34 @@ +# Copyright 2022 The MediaPipe Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Placeholder for internal Python strict library and test compatibility macro. + +package(default_visibility = ["//visibility:public"]) + +py_binary( + name = "face_landmarker_benchmark", + main = "face_landmarker_benchmark.py", + srcs = ["face_landmarker_benchmark.py"], + data = [ + "//mediapipe/tasks/testdata/vision:test_images", + "//mediapipe/tasks/testdata/vision:test_models", + ], + deps = [ + "//mediapipe/python:_framework_bindings", + "//mediapipe/tasks/python/core:base_options", + "//mediapipe/tasks/python/vision:face_landmarker", + "//mediapipe/tasks/python/benchmark:benchmark_utils", + "//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api", + ], +) diff --git a/mediapipe/tasks/python/benchmark/vision/face_landmarker/face_landmarker_benchmark.py b/mediapipe/tasks/python/benchmark/vision/face_landmarker/face_landmarker_benchmark.py new file mode 100644 index 000000000..7a1f3f817 --- /dev/null +++ b/mediapipe/tasks/python/benchmark/vision/face_landmarker/face_landmarker_benchmark.py @@ -0,0 +1,120 @@ +# Copyright 2023 The MediaPipe Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""MediaPipe face landmarker benchmark.""" + +import argparse + +from mediapipe.python._framework_bindings import image +from mediapipe.tasks.python.core import base_options +from mediapipe.tasks.python.vision import face_landmarker +from mediapipe.tasks.python.benchmark import benchmark_utils +from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api + +_MODEL_FILE = 'face_landmarker_v2.task' +_IMAGE_FILE = 'portrait.jpg' + + +def run( + model: str, + n_iterations: int, + delegate: base_options.BaseOptions.Delegate, + percentile: float, +): + """Run an face landmarker benchmark. + + Args: + model: Path to the TFLite model. + n_iterations: Number of iterations to run the benchmark. + delegate: CPU or GPU delegate for inference. + percentile: Percentage for the percentiles to compute. Values must be + between 0 and 100 inclusive. + + Returns: + The n-th percentile of the inference times. + """ + # Initialize the face landmarker + default_model_path = benchmark_utils.get_test_data_path( + base_vision_benchmark_api.VISION_TEST_DATA_DIR, _MODEL_FILE + ) + model_path = benchmark_utils.get_model_path(model, default_model_path) + options = face_landmarker.FaceLandmarkerOptions( + base_options=base_options.BaseOptions( + model_asset_path=model_path, delegate=delegate + ) + ) + + with face_landmarker.FaceLandmarker.create_from_options(options) as landmarker: + mp_image = image.Image.create_from_file( + benchmark_utils.get_test_data_path( + base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE + ) + ) + # Run the benchmark and return the nth percentile of the inference times + nth_percentile = base_vision_benchmark_api.nth_percentile( + landmarker.detect, mp_image, n_iterations, percentile + ) + return nth_percentile + + +def main(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + parser.add_argument( + '--model', + help='Path to face landmarker task.', + required=False, + default=None, + ) + parser.add_argument( + '--iterations', + help='Number of iterations for benchmarking.', + type=int, + default=100, + ) + parser.add_argument( + '--percentile', + help='Percentile for benchmarking statistics.', + type=float, + default=95.0, + ) + args = parser.parse_args() + + # Run benchmark on CPU + cpu_time = run( + args.model, + args.iterations, + base_options.BaseOptions.Delegate.CPU, + args.percentile, + ) + print( + f'{args.percentile}th Percentile Inference Time on CPU: ' + f'{cpu_time:.6f} milliseconds' + ) + + # Run benchmark on GPU + gpu_time = run( + args.model, + args.iterations, + base_options.BaseOptions.Delegate.GPU, + args.percentile, + ) + print( + f'{args.percentile}th Percentile Inference Time on GPU: ' + f'{gpu_time:.6f} milliseconds' + ) + + +if __name__ == '__main__': + main() diff --git a/mediapipe/tasks/python/benchmark/vision/hand_landmarker/BUILD b/mediapipe/tasks/python/benchmark/vision/hand_landmarker/BUILD new file mode 100644 index 000000000..693b42faf --- /dev/null +++ b/mediapipe/tasks/python/benchmark/vision/hand_landmarker/BUILD @@ -0,0 +1,34 @@ +# Copyright 2022 The MediaPipe Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Placeholder for internal Python strict library and test compatibility macro. + +package(default_visibility = ["//visibility:public"]) + +py_binary( + name = "hand_landmarker_benchmark", + main = "hand_landmarker_benchmark.py", + srcs = ["hand_landmarker_benchmark.py"], + data = [ + "//mediapipe/tasks/testdata/vision:test_images", + "//mediapipe/tasks/testdata/vision:test_models", + ], + deps = [ + "//mediapipe/python:_framework_bindings", + "//mediapipe/tasks/python/core:base_options", + "//mediapipe/tasks/python/vision:hand_landmarker", + "//mediapipe/tasks/python/benchmark:benchmark_utils", + "//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api", + ], +) diff --git a/mediapipe/tasks/python/benchmark/vision/hand_landmarker/hand_landmarker_benchmark.py b/mediapipe/tasks/python/benchmark/vision/hand_landmarker/hand_landmarker_benchmark.py new file mode 100644 index 000000000..8fd8fc210 --- /dev/null +++ b/mediapipe/tasks/python/benchmark/vision/hand_landmarker/hand_landmarker_benchmark.py @@ -0,0 +1,120 @@ +# Copyright 2023 The MediaPipe Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""MediaPipe hand landmarker benchmark.""" + +import argparse + +from mediapipe.python._framework_bindings import image +from mediapipe.tasks.python.core import base_options +from mediapipe.tasks.python.vision import hand_landmarker +from mediapipe.tasks.python.benchmark import benchmark_utils +from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api + +_MODEL_FILE = 'hand_landmarker.task' +_IMAGE_FILE = 'thumb_up.jpg' + + +def run( + model: str, + n_iterations: int, + delegate: base_options.BaseOptions.Delegate, + percentile: float, +): + """Run an hand landmarker benchmark. + + Args: + model: Path to the TFLite model. + n_iterations: Number of iterations to run the benchmark. + delegate: CPU or GPU delegate for inference. + percentile: Percentage for the percentiles to compute. Values must be + between 0 and 100 inclusive. + + Returns: + The n-th percentile of the inference times. + """ + # Initialize the hand landmarker + default_model_path = benchmark_utils.get_test_data_path( + base_vision_benchmark_api.VISION_TEST_DATA_DIR, _MODEL_FILE + ) + model_path = benchmark_utils.get_model_path(model, default_model_path) + options = hand_landmarker.HandLandmarkerOptions( + base_options=base_options.BaseOptions( + model_asset_path=model_path, delegate=delegate + ) + ) + + with hand_landmarker.HandLandmarker.create_from_options(options) as landmarker: + mp_image = image.Image.create_from_file( + benchmark_utils.get_test_data_path( + base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE + ) + ) + # Run the benchmark and return the nth percentile of the inference times + nth_percentile = base_vision_benchmark_api.nth_percentile( + landmarker.detect, mp_image, n_iterations, percentile + ) + return nth_percentile + + +def main(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + parser.add_argument( + '--model', + help='Path to hand landmarker task.', + required=False, + default=None, + ) + parser.add_argument( + '--iterations', + help='Number of iterations for benchmarking.', + type=int, + default=100, + ) + parser.add_argument( + '--percentile', + help='Percentile for benchmarking statistics.', + type=float, + default=95.0, + ) + args = parser.parse_args() + + # Run benchmark on CPU + cpu_time = run( + args.model, + args.iterations, + base_options.BaseOptions.Delegate.CPU, + args.percentile, + ) + print( + f'{args.percentile}th Percentile Inference Time on CPU: ' + f'{cpu_time:.6f} milliseconds' + ) + + # Run benchmark on GPU + gpu_time = run( + args.model, + args.iterations, + base_options.BaseOptions.Delegate.GPU, + args.percentile, + ) + print( + f'{args.percentile}th Percentile Inference Time on GPU: ' + f'{gpu_time:.6f} milliseconds' + ) + + +if __name__ == '__main__': + main() diff --git a/mediapipe/tasks/python/benchmark/vision/image_classifier/BUILD b/mediapipe/tasks/python/benchmark/vision/image_classifier/BUILD index aec17ea9d..738b9738a 100644 --- a/mediapipe/tasks/python/benchmark/vision/image_classifier/BUILD +++ b/mediapipe/tasks/python/benchmark/vision/image_classifier/BUILD @@ -16,12 +16,19 @@ package(default_visibility = ["//visibility:public"]) -py_library( +py_binary( name = "image_classifier_benchmark", + main = "image_classifier_benchmark.py", srcs = ["image_classifier_benchmark.py"], + data = [ + "//mediapipe/tasks/testdata/vision:test_images", + "//mediapipe/tasks/testdata/vision:test_models", + ], deps = [ "//mediapipe/python:_framework_bindings", "//mediapipe/tasks/python/core:base_options", "//mediapipe/tasks/python/vision:image_classifier", + "//mediapipe/tasks/python/benchmark:benchmark_utils", + "//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api", ], ) diff --git a/mediapipe/tasks/python/benchmark/vision/image_classifier/image_classifier_benchmark.py b/mediapipe/tasks/python/benchmark/vision/image_classifier/image_classifier_benchmark.py index dffd616fd..f24b7dcd5 100644 --- a/mediapipe/tasks/python/benchmark/vision/image_classifier/image_classifier_benchmark.py +++ b/mediapipe/tasks/python/benchmark/vision/image_classifier/image_classifier_benchmark.py @@ -14,12 +14,14 @@ """MediaPipe image classsifier benchmark.""" import argparse -import time -import numpy as np + from mediapipe.python._framework_bindings import image from mediapipe.tasks.python.core import base_options from mediapipe.tasks.python.vision import image_classifier +from mediapipe.tasks.python.benchmark import benchmark_utils +from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api +_MODEL_FILE = 'mobilenet_v2_1.0_224.tflite' _IMAGE_FILE = 'burger.jpg' @@ -41,27 +43,29 @@ def run( Returns: The n-th percentile of the inference times. """ - inference_times = [] - # Initialize the image classifier + default_model_path = benchmark_utils.get_test_data_path( + base_vision_benchmark_api.VISION_TEST_DATA_DIR, _MODEL_FILE + ) + model_path = benchmark_utils.get_model_path(model, default_model_path) options = image_classifier.ImageClassifierOptions( base_options=base_options.BaseOptions( - model_asset_path=model, delegate=delegate + model_asset_path=model_path, delegate=delegate ), max_results=1, ) - classifier = image_classifier.ImageClassifier.create_from_options(options) - mp_image = image.Image.create_from_file(_IMAGE_FILE) - for _ in range(n_iterations): - start_time_ns = time.time_ns() - classifier.classify(mp_image) - end_time_ns = time.time_ns() - # Convert to milliseconds - inference_times.append((end_time_ns - start_time_ns) / 1_000_000) - - classifier.close() - return np.percentile(inference_times, percentile) + with image_classifier.ImageClassifier.create_from_options(options) as classifier: + mp_image = image.Image.create_from_file( + benchmark_utils.get_test_data_path( + base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE + ) + ) + # Run the benchmark and return the nth percentile of the inference times + nth_percentile = base_vision_benchmark_api.nth_percentile( + classifier.classify, mp_image, n_iterations, percentile + ) + return nth_percentile def main(): @@ -72,7 +76,7 @@ def main(): '--model', help='Path to image classification model.', required=False, - default='classifier.tflite', + default=None, ) parser.add_argument( '--iterations', diff --git a/mediapipe/tasks/python/benchmark/vision/image_embedder/BUILD b/mediapipe/tasks/python/benchmark/vision/image_embedder/BUILD new file mode 100644 index 000000000..059bdd095 --- /dev/null +++ b/mediapipe/tasks/python/benchmark/vision/image_embedder/BUILD @@ -0,0 +1,34 @@ +# Copyright 2022 The MediaPipe Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Placeholder for internal Python strict library and test compatibility macro. + +package(default_visibility = ["//visibility:public"]) + +py_binary( + name = "image_embedder_benchmark", + main = "image_embedder_benchmark.py", + srcs = ["image_embedder_benchmark.py"], + data = [ + "//mediapipe/tasks/testdata/vision:test_images", + "//mediapipe/tasks/testdata/vision:test_models", + ], + deps = [ + "//mediapipe/python:_framework_bindings", + "//mediapipe/tasks/python/core:base_options", + "//mediapipe/tasks/python/vision:image_embedder", + "//mediapipe/tasks/python/benchmark:benchmark_utils", + "//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api", + ], +) diff --git a/mediapipe/tasks/python/benchmark/vision/image_embedder/image_embedder_benchmark.py b/mediapipe/tasks/python/benchmark/vision/image_embedder/image_embedder_benchmark.py new file mode 100644 index 000000000..1da3b8e89 --- /dev/null +++ b/mediapipe/tasks/python/benchmark/vision/image_embedder/image_embedder_benchmark.py @@ -0,0 +1,120 @@ +# Copyright 2023 The MediaPipe Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""MediaPipe image embedder benchmark.""" + +import argparse + +from mediapipe.python._framework_bindings import image +from mediapipe.tasks.python.core import base_options +from mediapipe.tasks.python.vision import image_embedder +from mediapipe.tasks.python.benchmark import benchmark_utils +from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api + +_MODEL_FILE = 'mobilenet_v3_small_100_224_embedder.tflite' +_IMAGE_FILE = 'burger.jpg' + + +def run( + model: str, + n_iterations: int, + delegate: base_options.BaseOptions.Delegate, + percentile: float, +): + """Run an image embedding extraction benchmark. + + Args: + model: Path to the TFLite model. + n_iterations: Number of iterations to run the benchmark. + delegate: CPU or GPU delegate for inference. + percentile: Percentage for the percentiles to compute. Values must be + between 0 and 100 inclusive. + + Returns: + The n-th percentile of the inference times. + """ + # Initialize the image embedder + default_model_path = benchmark_utils.get_test_data_path( + base_vision_benchmark_api.VISION_TEST_DATA_DIR, _MODEL_FILE + ) + model_path = benchmark_utils.get_model_path(model, default_model_path) + options = image_embedder.ImageEmbedderOptions( + base_options=base_options.BaseOptions( + model_asset_path=model_path, delegate=delegate + ) + ) + + with image_embedder.ImageEmbedder.create_from_options(options) as embedder: + mp_image = image.Image.create_from_file( + benchmark_utils.get_test_data_path( + base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE + ) + ) + # Run the benchmark and return the nth percentile of the inference times + nth_percentile = base_vision_benchmark_api.nth_percentile( + embedder.embed, mp_image, n_iterations, percentile + ) + return nth_percentile + + +def main(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + parser.add_argument( + '--model', + help='Path to image embedding extraction model.', + required=False, + default=None, + ) + parser.add_argument( + '--iterations', + help='Number of iterations for benchmarking.', + type=int, + default=100, + ) + parser.add_argument( + '--percentile', + help='Percentile for benchmarking statistics.', + type=float, + default=95.0, + ) + args = parser.parse_args() + + # Run benchmark on CPU + cpu_time = run( + args.model, + args.iterations, + base_options.BaseOptions.Delegate.CPU, + args.percentile, + ) + print( + f'{args.percentile}th Percentile Inference Time on CPU: ' + f'{cpu_time:.6f} milliseconds' + ) + + # Run benchmark on GPU + gpu_time = run( + args.model, + args.iterations, + base_options.BaseOptions.Delegate.GPU, + args.percentile, + ) + print( + f'{args.percentile}th Percentile Inference Time on GPU: ' + f'{gpu_time:.6f} milliseconds' + ) + + +if __name__ == '__main__': + main() diff --git a/mediapipe/tasks/python/benchmark/vision/image_segmenter/BUILD b/mediapipe/tasks/python/benchmark/vision/image_segmenter/BUILD new file mode 100644 index 000000000..bfb2ee763 --- /dev/null +++ b/mediapipe/tasks/python/benchmark/vision/image_segmenter/BUILD @@ -0,0 +1,34 @@ +# Copyright 2022 The MediaPipe Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Placeholder for internal Python strict library and test compatibility macro. + +package(default_visibility = ["//visibility:public"]) + +py_binary( + name = "image_segmenter_benchmark", + main = "image_segmenter_benchmark.py", + srcs = ["image_segmenter_benchmark.py"], + data = [ + "//mediapipe/tasks/testdata/vision:test_images", + "//mediapipe/tasks/testdata/vision:test_models", + ], + deps = [ + "//mediapipe/python:_framework_bindings", + "//mediapipe/tasks/python/core:base_options", + "//mediapipe/tasks/python/vision:image_segmenter", + "//mediapipe/tasks/python/benchmark:benchmark_utils", + "//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api", + ], +) diff --git a/mediapipe/tasks/python/benchmark/vision/image_segmenter/image_segmenter_benchmark.py b/mediapipe/tasks/python/benchmark/vision/image_segmenter/image_segmenter_benchmark.py new file mode 100644 index 000000000..ef885b11c --- /dev/null +++ b/mediapipe/tasks/python/benchmark/vision/image_segmenter/image_segmenter_benchmark.py @@ -0,0 +1,121 @@ +# Copyright 2023 The MediaPipe Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""MediaPipe image segmenter benchmark.""" + +import argparse + +from mediapipe.python._framework_bindings import image +from mediapipe.tasks.python.core import base_options +from mediapipe.tasks.python.vision import image_segmenter +from mediapipe.tasks.python.benchmark import benchmark_utils +from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api + +_MODEL_FILE = 'deeplabv3.tflite' +_IMAGE_FILE = 'segmentation_input_rotation0.jpg' + + +def run( + model: str, + n_iterations: int, + delegate: base_options.BaseOptions.Delegate, + percentile: float, +): + """Run an image segmentation benchmark. + + Args: + model: Path to the TFLite model. + n_iterations: Number of iterations to run the benchmark. + delegate: CPU or GPU delegate for inference. + percentile: Percentage for the percentiles to compute. Values must be + between 0 and 100 inclusive. + + Returns: + The n-th percentile of the inference times. + """ + # Initialize the image segmenter + default_model_path = benchmark_utils.get_test_data_path( + base_vision_benchmark_api.VISION_TEST_DATA_DIR, _MODEL_FILE + ) + model_path = benchmark_utils.get_model_path(model, default_model_path) + options = image_segmenter.ImageSegmenterOptions( + base_options=base_options.BaseOptions( + model_asset_path=model_path, delegate=delegate + ), + output_confidence_masks=True, output_category_mask=True + ) + + with image_segmenter.ImageSegmenter.create_from_options(options) as segmenter: + mp_image = image.Image.create_from_file( + benchmark_utils.get_test_data_path( + base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE + ) + ) + # Run the benchmark and return the nth percentile of the inference times + nth_percentile = base_vision_benchmark_api.nth_percentile( + segmenter.segment, mp_image, n_iterations, percentile + ) + return nth_percentile + + +def main(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + parser.add_argument( + '--model', + help='Path to image segmentation model.', + required=False, + default=None, + ) + parser.add_argument( + '--iterations', + help='Number of iterations for benchmarking.', + type=int, + default=100, + ) + parser.add_argument( + '--percentile', + help='Percentile for benchmarking statistics.', + type=float, + default=95.0, + ) + args = parser.parse_args() + + # Run benchmark on CPU + cpu_time = run( + args.model, + args.iterations, + base_options.BaseOptions.Delegate.CPU, + args.percentile, + ) + print( + f'{args.percentile}th Percentile Inference Time on CPU: ' + f'{cpu_time:.6f} milliseconds' + ) + + # Run benchmark on GPU + gpu_time = run( + args.model, + args.iterations, + base_options.BaseOptions.Delegate.GPU, + args.percentile, + ) + print( + f'{args.percentile}th Percentile Inference Time on GPU: ' + f'{gpu_time:.6f} milliseconds' + ) + + +if __name__ == '__main__': + main() diff --git a/mediapipe/tasks/python/benchmark/vision/interactive_segmenter/BUILD b/mediapipe/tasks/python/benchmark/vision/interactive_segmenter/BUILD new file mode 100644 index 000000000..1a0bab418 --- /dev/null +++ b/mediapipe/tasks/python/benchmark/vision/interactive_segmenter/BUILD @@ -0,0 +1,34 @@ +# Copyright 2022 The MediaPipe Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Placeholder for internal Python strict library and test compatibility macro. + +package(default_visibility = ["//visibility:public"]) + +py_binary( + name = "interactive_segmenter_benchmark", + main = "interactive_segmenter_benchmark.py", + srcs = ["interactive_segmenter_benchmark.py"], + data = [ + "//mediapipe/tasks/testdata/vision:test_images", + "//mediapipe/tasks/testdata/vision:test_models", + ], + deps = [ + "//mediapipe/python:_framework_bindings", + "//mediapipe/tasks/python/core:base_options", + "//mediapipe/tasks/python/vision:interactive_segmenter", + "//mediapipe/tasks/python/benchmark:benchmark_utils", + "//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api", + ], +) diff --git a/mediapipe/tasks/python/benchmark/vision/interactive_segmenter/interactive_segmenter_benchmark.py b/mediapipe/tasks/python/benchmark/vision/interactive_segmenter/interactive_segmenter_benchmark.py new file mode 100644 index 000000000..3ecc7f661 --- /dev/null +++ b/mediapipe/tasks/python/benchmark/vision/interactive_segmenter/interactive_segmenter_benchmark.py @@ -0,0 +1,128 @@ +# Copyright 2023 The MediaPipe Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""MediaPipe interactive segmenter benchmark.""" + +from functools import partial +import argparse + +from mediapipe.python._framework_bindings import image +from mediapipe.tasks.python.core import base_options +from mediapipe.tasks.python.components.containers import keypoint +from mediapipe.tasks.python.vision import interactive_segmenter +from mediapipe.tasks.python.benchmark import benchmark_utils +from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api + +_MODEL_FILE = 'deeplabv3.tflite' +_IMAGE_FILE = 'segmentation_input_rotation0.jpg' + + +def run( + model: str, + n_iterations: int, + delegate: base_options.BaseOptions.Delegate, + percentile: float, +): + """Run an interactive segmentation benchmark. + + Args: + model: Path to the TFLite model. + n_iterations: Number of iterations to run the benchmark. + delegate: CPU or GPU delegate for inference. + percentile: Percentage for the percentiles to compute. Values must be + between 0 and 100 inclusive. + + Returns: + The n-th percentile of the inference times. + """ + # Initialize the interactive segmenter + default_model_path = benchmark_utils.get_test_data_path( + base_vision_benchmark_api.VISION_TEST_DATA_DIR, _MODEL_FILE + ) + model_path = benchmark_utils.get_model_path(model, default_model_path) + + options = interactive_segmenter.InteractiveSegmenterOptions( + base_options=base_options.BaseOptions( + model_asset_path=model_path, delegate=delegate + ), + output_category_mask=True, output_confidence_masks=False + ) + roi = interactive_segmenter.RegionOfInterest( + format=interactive_segmenter.RegionOfInterest.Format.KEYPOINT, + keypoint=keypoint.NormalizedKeypoint(0.44, 0.7) + ) + + with interactive_segmenter.InteractiveSegmenter.create_from_options(options) as segmenter: + mp_image = image.Image.create_from_file( + benchmark_utils.get_test_data_path( + base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE + ) + ) + # Run the benchmark and return the nth percentile of the inference times + nth_percentile = base_vision_benchmark_api.nth_percentile( + partial(segmenter.segment, roi=roi), mp_image, n_iterations, percentile + ) + return nth_percentile + + +def main(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + parser.add_argument( + '--model', + help='Path to interactive segmentation model.', + required=False, + default=None, + ) + parser.add_argument( + '--iterations', + help='Number of iterations for benchmarking.', + type=int, + default=100, + ) + parser.add_argument( + '--percentile', + help='Percentile for benchmarking statistics.', + type=float, + default=95.0, + ) + args = parser.parse_args() + + # Run benchmark on CPU + cpu_time = run( + args.model, + args.iterations, + base_options.BaseOptions.Delegate.CPU, + args.percentile, + ) + print( + f'{args.percentile}th Percentile Inference Time on CPU: ' + f'{cpu_time:.6f} milliseconds' + ) + + # Run benchmark on GPU + gpu_time = run( + args.model, + args.iterations, + base_options.BaseOptions.Delegate.GPU, + args.percentile, + ) + print( + f'{args.percentile}th Percentile Inference Time on GPU: ' + f'{gpu_time:.6f} milliseconds' + ) + + +if __name__ == '__main__': + main() diff --git a/mediapipe/tasks/python/benchmark/vision/object_detector/BUILD b/mediapipe/tasks/python/benchmark/vision/object_detector/BUILD new file mode 100644 index 000000000..2db19db5e --- /dev/null +++ b/mediapipe/tasks/python/benchmark/vision/object_detector/BUILD @@ -0,0 +1,34 @@ +# Copyright 2022 The MediaPipe Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Placeholder for internal Python strict library and test compatibility macro. + +package(default_visibility = ["//visibility:public"]) + +py_binary( + name = "object_detector_benchmark", + main = "object_detector_benchmark.py", + srcs = ["object_detector_benchmark.py"], + data = [ + "//mediapipe/tasks/testdata/vision:test_images", + "//mediapipe/tasks/testdata/vision:test_models", + ], + deps = [ + "//mediapipe/python:_framework_bindings", + "//mediapipe/tasks/python/core:base_options", + "//mediapipe/tasks/python/vision:object_detector", + "//mediapipe/tasks/python/benchmark:benchmark_utils", + "//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api", + ], +) diff --git a/mediapipe/tasks/python/benchmark/vision/object_detector/object_detector_benchmark.py b/mediapipe/tasks/python/benchmark/vision/object_detector/object_detector_benchmark.py new file mode 100644 index 000000000..80f8302a2 --- /dev/null +++ b/mediapipe/tasks/python/benchmark/vision/object_detector/object_detector_benchmark.py @@ -0,0 +1,120 @@ +# Copyright 2023 The MediaPipe Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""MediaPipe object detector benchmark.""" + +import argparse + +from mediapipe.python._framework_bindings import image +from mediapipe.tasks.python.core import base_options +from mediapipe.tasks.python.vision import object_detector +from mediapipe.tasks.python.benchmark import benchmark_utils +from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api + +_MODEL_FILE = 'coco_efficientdet_lite0_v1_1.0_quant_2021_09_06.tflite' +_IMAGE_FILE = 'cats_and_dogs.jpg' + + +def run( + model: str, + n_iterations: int, + delegate: base_options.BaseOptions.Delegate, + percentile: float, +): + """Run an object detector benchmark. + + Args: + model: Path to the TFLite model. + n_iterations: Number of iterations to run the benchmark. + delegate: CPU or GPU delegate for inference. + percentile: Percentage for the percentiles to compute. Values must be + between 0 and 100 inclusive. + + Returns: + The n-th percentile of the inference times. + """ + # Initialize the object detector + default_model_path = benchmark_utils.get_test_data_path( + base_vision_benchmark_api.VISION_TEST_DATA_DIR, _MODEL_FILE + ) + model_path = benchmark_utils.get_model_path(model, default_model_path) + options = object_detector.ObjectDetectorOptions( + base_options=base_options.BaseOptions( + model_asset_path=model_path, delegate=delegate + ) + ) + + with object_detector.ObjectDetector.create_from_options(options) as detector: + mp_image = image.Image.create_from_file( + benchmark_utils.get_test_data_path( + base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE + ) + ) + # Run the benchmark and return the nth percentile of the inference times + nth_percentile = base_vision_benchmark_api.nth_percentile( + detector.detect, mp_image, n_iterations, percentile + ) + return nth_percentile + + +def main(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + parser.add_argument( + '--model', + help='Path to object detector model.', + required=False, + default=None, + ) + parser.add_argument( + '--iterations', + help='Number of iterations for benchmarking.', + type=int, + default=100, + ) + parser.add_argument( + '--percentile', + help='Percentile for benchmarking statistics.', + type=float, + default=95.0, + ) + args = parser.parse_args() + + # Run benchmark on CPU + cpu_time = run( + args.model, + args.iterations, + base_options.BaseOptions.Delegate.CPU, + args.percentile, + ) + print( + f'{args.percentile}th Percentile Inference Time on CPU: ' + f'{cpu_time:.6f} milliseconds' + ) + + # Run benchmark on GPU + gpu_time = run( + args.model, + args.iterations, + base_options.BaseOptions.Delegate.GPU, + args.percentile, + ) + print( + f'{args.percentile}th Percentile Inference Time on GPU: ' + f'{gpu_time:.6f} milliseconds' + ) + + +if __name__ == '__main__': + main() diff --git a/mediapipe/tasks/python/benchmark/vision/pose_landmarker/BUILD b/mediapipe/tasks/python/benchmark/vision/pose_landmarker/BUILD new file mode 100644 index 000000000..38c778c00 --- /dev/null +++ b/mediapipe/tasks/python/benchmark/vision/pose_landmarker/BUILD @@ -0,0 +1,34 @@ +# Copyright 2022 The MediaPipe Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Placeholder for internal Python strict library and test compatibility macro. + +package(default_visibility = ["//visibility:public"]) + +py_binary( + name = "pose_landmarker_benchmark", + main = "pose_landmarker_benchmark.py", + srcs = ["pose_landmarker_benchmark.py"], + data = [ + "//mediapipe/tasks/testdata/vision:test_images", + "//mediapipe/tasks/testdata/vision:test_models", + ], + deps = [ + "//mediapipe/python:_framework_bindings", + "//mediapipe/tasks/python/core:base_options", + "//mediapipe/tasks/python/vision:pose_landmarker", + "//mediapipe/tasks/python/benchmark:benchmark_utils", + "//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api", + ], +) diff --git a/mediapipe/tasks/python/benchmark/vision/pose_landmarker/pose_landmarker_benchmark.py b/mediapipe/tasks/python/benchmark/vision/pose_landmarker/pose_landmarker_benchmark.py new file mode 100644 index 000000000..a1c55f0b8 --- /dev/null +++ b/mediapipe/tasks/python/benchmark/vision/pose_landmarker/pose_landmarker_benchmark.py @@ -0,0 +1,120 @@ +# Copyright 2023 The MediaPipe Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""MediaPipe pose landmarker benchmark.""" + +import argparse + +from mediapipe.python._framework_bindings import image +from mediapipe.tasks.python.core import base_options +from mediapipe.tasks.python.vision import pose_landmarker +from mediapipe.tasks.python.benchmark import benchmark_utils +from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api + +_MODEL_FILE = 'pose_landmarker.task' +_IMAGE_FILE = 'pose.jpg' + + +def run( + model: str, + n_iterations: int, + delegate: base_options.BaseOptions.Delegate, + percentile: float, +): + """Run an pose landmarker benchmark. + + Args: + model: Path to the TFLite model. + n_iterations: Number of iterations to run the benchmark. + delegate: CPU or GPU delegate for inference. + percentile: Percentage for the percentiles to compute. Values must be + between 0 and 100 inclusive. + + Returns: + The n-th percentile of the inference times. + """ + # Initialize the pose landmarker + default_model_path = benchmark_utils.get_test_data_path( + base_vision_benchmark_api.VISION_TEST_DATA_DIR, _MODEL_FILE + ) + model_path = benchmark_utils.get_model_path(model, default_model_path) + options = pose_landmarker.PoseLandmarkerOptions( + base_options=base_options.BaseOptions( + model_asset_path=model_path, delegate=delegate + ) + ) + + with pose_landmarker.PoseLandmarker.create_from_options(options) as landmarker: + mp_image = image.Image.create_from_file( + benchmark_utils.get_test_data_path( + base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE + ) + ) + # Run the benchmark and return the nth percentile of the inference times + nth_percentile = base_vision_benchmark_api.nth_percentile( + landmarker.detect, mp_image, n_iterations, percentile + ) + return nth_percentile + + +def main(): + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + parser.add_argument( + '--model', + help='Path to pose landmarker task.', + required=False, + default=None, + ) + parser.add_argument( + '--iterations', + help='Number of iterations for benchmarking.', + type=int, + default=100, + ) + parser.add_argument( + '--percentile', + help='Percentile for benchmarking statistics.', + type=float, + default=95.0, + ) + args = parser.parse_args() + + # Run benchmark on CPU + cpu_time = run( + args.model, + args.iterations, + base_options.BaseOptions.Delegate.CPU, + args.percentile, + ) + print( + f'{args.percentile}th Percentile Inference Time on CPU: ' + f'{cpu_time:.6f} milliseconds' + ) + + # Run benchmark on GPU + gpu_time = run( + args.model, + args.iterations, + base_options.BaseOptions.Delegate.GPU, + args.percentile, + ) + print( + f'{args.percentile}th Percentile Inference Time on GPU: ' + f'{gpu_time:.6f} milliseconds' + ) + + +if __name__ == '__main__': + main() From 46c6c9403c661c555229e75a2eb647304cc714dd Mon Sep 17 00:00:00 2001 From: Kinar Date: Thu, 16 Nov 2023 16:26:29 -0800 Subject: [PATCH 2/3] Code cleanup and revised benchmarking API --- .../tasks/python/benchmark/benchmark_utils.py | 11 ++ mediapipe/tasks/python/benchmark/vision/BUILD | 33 ++++++ .../python/benchmark/vision/benchmark.py | 104 ++++++++++++++++++ .../vision/core/base_vision_benchmark_api.py | 14 +-- .../benchmark/vision/face_aligner/BUILD | 3 +- .../face_aligner/face_aligner_benchmark.py | 87 ++------------- .../benchmark/vision/face_detector/BUILD | 3 +- .../face_detector/face_detector_benchmark.py | 88 +++------------ .../benchmark/vision/face_landmarker/BUILD | 3 +- .../face_landmarker_benchmark.py | 80 ++------------ .../benchmark/vision/hand_landmarker/BUILD | 3 +- .../hand_landmarker_benchmark.py | 86 ++------------- .../benchmark/vision/image_classifier/BUILD | 3 +- .../vision/image_classifier/README.md | 5 +- .../image_classifier_benchmark.py | 90 +++------------ .../benchmark/vision/image_embedder/BUILD | 3 +- .../image_embedder_benchmark.py | 80 ++------------ .../benchmark/vision/image_segmenter/BUILD | 1 + .../image_segmenter_benchmark.py | 80 ++------------ .../vision/interactive_segmenter/BUILD | 3 +- .../interactive_segmenter_benchmark.py | 89 +++------------ .../benchmark/vision/object_detector/BUILD | 3 +- .../object_detector_benchmark.py | 84 ++------------ .../benchmark/vision/pose_landmarker/BUILD | 3 +- .../pose_landmarker_benchmark.py | 84 ++------------ mediapipe/tasks/testdata/vision/BUILD | 2 + 26 files changed, 289 insertions(+), 756 deletions(-) create mode 100644 mediapipe/tasks/python/benchmark/vision/BUILD create mode 100644 mediapipe/tasks/python/benchmark/vision/benchmark.py diff --git a/mediapipe/tasks/python/benchmark/benchmark_utils.py b/mediapipe/tasks/python/benchmark/benchmark_utils.py index 5c1b102a2..338bd0ea7 100644 --- a/mediapipe/tasks/python/benchmark/benchmark_utils.py +++ b/mediapipe/tasks/python/benchmark/benchmark_utils.py @@ -14,6 +14,17 @@ """Benchmark utils for MediaPipe Tasks.""" import os +import numpy as np + + +def nth_percentile(inference_times, percentile): + """Calculate the nth percentile of the inference times.""" + return np.percentile(inference_times, percentile) + + +def average(inference_times): + """Calculate the average of the inference times.""" + return np.mean(inference_times) def get_test_data_path(test_srcdir, file_or_dirname_path: str) -> str: diff --git a/mediapipe/tasks/python/benchmark/vision/BUILD b/mediapipe/tasks/python/benchmark/vision/BUILD new file mode 100644 index 000000000..591ebb2d2 --- /dev/null +++ b/mediapipe/tasks/python/benchmark/vision/BUILD @@ -0,0 +1,33 @@ +# Copyright 2022 The MediaPipe Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Placeholder for internal Python strict library and test compatibility macro. + +package(default_visibility = ["//visibility:public"]) + +py_binary( + name = "benchmark", + main = "benchmark.py", + srcs = ["benchmark.py"], + data = [ + "//mediapipe/tasks/testdata/vision:test_images", + "//mediapipe/tasks/testdata/vision:test_models", + ], + deps = [ + "//mediapipe/python:_framework_bindings", + "//mediapipe/tasks/python/core:base_options", + "//mediapipe/tasks/python/benchmark:benchmark_utils", + "//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api", + ], +) diff --git a/mediapipe/tasks/python/benchmark/vision/benchmark.py b/mediapipe/tasks/python/benchmark/vision/benchmark.py new file mode 100644 index 000000000..273668c3c --- /dev/null +++ b/mediapipe/tasks/python/benchmark/vision/benchmark.py @@ -0,0 +1,104 @@ +# Copyright 2023 The MediaPipe Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""MediaPipe vision benchmarker.""" + +import argparse + +from mediapipe.tasks.python.core import base_options +from mediapipe.tasks.python.benchmark import benchmark_utils as bu +from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api + + +def benchmarker(benchmark_function, default_model_name, default_image_name): + """Executes a benchmarking process using a specified function and + a default model (or a specified model) and reports the benchmarking + statistics. + + Args: + benchmark_function: A callable function to be executed for benchmarking. + This function should contain the logic of the task to be benchmarked and + should be capable of utilizing a model specified by its name. + default_model_name: The name or path of the default model to be used in + the benchmarking process. This is useful when the benchmarking function + requires a model and no other model is explicitly specified. + """ + parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter + ) + + parser.add_argument( + '--mode', + help='Benchmarking mode (e.g., "nth_percentile").', + required=False, + default='nth_percentile' + ) + parser.add_argument( + '--model', + help='Path to the model.', + default=None + ) + parser.add_argument( + '--iterations', + help='Number of iterations for benchmarking.', + type=int, + default=100 + ) + parser.add_argument( + '--percentile', + help='Percentile for benchmarking statistics.', + type=float, + default=95.0 + ) + + args = parser.parse_args() + + # Get the model path + default_model_path = bu.get_test_data_path( + base_vision_benchmark_api.VISION_TEST_DATA_DIR, + default_model_name + ) + model_path = bu.get_model_path(args.model, default_model_path) + + # Define a mapping of modes to their respective function argument lists + mode_args_mapping = { + 'nth_percentile': {'percentile': args.percentile}, + 'average': {} + # Add other modes and their arguments here + } + + # Check if the mode is supported and get the argument dictionary + if args.mode not in mode_args_mapping: + raise ValueError(f"Unsupported benchmarking mode: {args.mode}") + + mode_args = mode_args_mapping[args.mode] + + # Run the benchmark for both CPU and GPU and calculate results based on mode + results = {} + for delegate_type in [ + base_options.BaseOptions.Delegate.CPU, + base_options.BaseOptions.Delegate.GPU + ]: + inference_times = benchmark_function(model_path, args.iterations, + delegate_type) + + # Calculate the benchmark result based on the mode + if args.mode == 'nth_percentile': + results[delegate_type] = bu.nth_percentile(inference_times, **mode_args) + elif args.mode == 'average': + results[delegate_type] = bu.average(inference_times) + + # Report benchmarking results + for delegate_type, result in results.items(): + print(f'Inference time {delegate_type} {mode_args_mapping[args.mode]}: ' + f'{result:.6f} milliseconds') diff --git a/mediapipe/tasks/python/benchmark/vision/core/base_vision_benchmark_api.py b/mediapipe/tasks/python/benchmark/vision/core/base_vision_benchmark_api.py index 65c460613..ed3a0c9b7 100644 --- a/mediapipe/tasks/python/benchmark/vision/core/base_vision_benchmark_api.py +++ b/mediapipe/tasks/python/benchmark/vision/core/base_vision_benchmark_api.py @@ -12,25 +12,21 @@ # See the License for the specific language governing permissions and # limitations under the License. """MediaPipe vision benchmark base api.""" -import os import time -import numpy as np VISION_TEST_DATA_DIR = 'mediapipe/tasks/testdata/vision' -def nth_percentile(func, image, n_iterations, percentile): - """Run a nth percentile benchmark for a given task using the function. +def benchmark_task(func, image, n_iterations): + """Collect inference times for a given task after benchmarking Args: - func: The method associated with a given task used for benchmarking. + func: The task function used for benchmarking. image: The input MediaPipe Image. n_iterations: Number of iterations to run the benchmark. - percentile: Percentage for the percentiles to compute. Values must be - between 0 and 100 inclusive. Returns: - The n-th percentile of the inference times in milliseconds. + List of inference times in milliseconds. """ inference_times = [] @@ -41,4 +37,4 @@ def nth_percentile(func, image, n_iterations, percentile): end_time_ns = time.time_ns() inference_times.append((end_time_ns - start_time_ns) / 1_000_000) - return np.percentile(inference_times, percentile) + return inference_times diff --git a/mediapipe/tasks/python/benchmark/vision/face_aligner/BUILD b/mediapipe/tasks/python/benchmark/vision/face_aligner/BUILD index 61791070f..95ccaeba6 100644 --- a/mediapipe/tasks/python/benchmark/vision/face_aligner/BUILD +++ b/mediapipe/tasks/python/benchmark/vision/face_aligner/BUILD @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Placeholder for internal Python strict library and test compatibility macro. +# Placeholder for internal Python strict binary compatibility macro. package(default_visibility = ["//visibility:public"]) @@ -30,5 +30,6 @@ py_binary( "//mediapipe/tasks/python/vision:face_aligner", "//mediapipe/tasks/python/benchmark:benchmark_utils", "//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api", + "//mediapipe/tasks/python/benchmark/vision:benchmark", ], ) diff --git a/mediapipe/tasks/python/benchmark/vision/face_aligner/face_aligner_benchmark.py b/mediapipe/tasks/python/benchmark/vision/face_aligner/face_aligner_benchmark.py index e51ae056a..83ead70eb 100644 --- a/mediapipe/tasks/python/benchmark/vision/face_aligner/face_aligner_benchmark.py +++ b/mediapipe/tasks/python/benchmark/vision/face_aligner/face_aligner_benchmark.py @@ -13,45 +13,32 @@ # limitations under the License. """MediaPipe face aligner benchmark.""" -import argparse - from mediapipe.python._framework_bindings import image from mediapipe.tasks.python.core import base_options from mediapipe.tasks.python.vision import face_aligner from mediapipe.tasks.python.benchmark import benchmark_utils from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api +from mediapipe.tasks.python.benchmark.vision import benchmark _MODEL_FILE = 'face_landmarker_v2.task' _IMAGE_FILE = 'portrait.jpg' -def run( - model: str, - n_iterations: int, - delegate: base_options.BaseOptions.Delegate, - percentile: float, -): - """Run an face aligner benchmark. - +def run(model_path, n_iterations, delegate): + """Run a face aligner benchmark. Args: - model: Path to the TFLite model. + model_path: Path to the TFLite model. n_iterations: Number of iterations to run the benchmark. delegate: CPU or GPU delegate for inference. - percentile: Percentage for the percentiles to compute. Values must be - between 0 and 100 inclusive. Returns: - The n-th percentile of the inference times. + List of inference times. """ # Initialize the face aligner - default_model_path = benchmark_utils.get_test_data_path( - base_vision_benchmark_api.VISION_TEST_DATA_DIR, _MODEL_FILE - ) - model_path = benchmark_utils.get_model_path(model, default_model_path) options = face_aligner.FaceAlignerOptions( - base_options=base_options.BaseOptions( - model_asset_path=model_path, delegate=delegate - ) + base_options=base_options.BaseOptions( + model_asset_path=model_path, delegate=delegate + ) ) with face_aligner.FaceAligner.create_from_options(options) as aligner: @@ -60,61 +47,11 @@ def run( base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE ) ) - # Run the benchmark and return the nth percentile of the inference times - nth_percentile = base_vision_benchmark_api.nth_percentile( - aligner.align, mp_image, n_iterations, percentile + inference_times = base_vision_benchmark_api.benchmark_task( + aligner.align, mp_image, n_iterations ) - return nth_percentile - - -def main(): - parser = argparse.ArgumentParser( - formatter_class=argparse.ArgumentDefaultsHelpFormatter - ) - parser.add_argument( - '--model', - help='Path to face aligner task.', - required=False, - default=None, - ) - parser.add_argument( - '--iterations', - help='Number of iterations for benchmarking.', - type=int, - default=100, - ) - parser.add_argument( - '--percentile', - help='Percentile for benchmarking statistics.', - type=float, - default=95.0, - ) - args = parser.parse_args() - - # Run benchmark on CPU - cpu_time = run( - args.model, - args.iterations, - base_options.BaseOptions.Delegate.CPU, - args.percentile, - ) - print( - f'{args.percentile}th Percentile Inference Time on CPU: ' - f'{cpu_time:.6f} milliseconds' - ) - - # Run benchmark on GPU - gpu_time = run( - args.model, - args.iterations, - base_options.BaseOptions.Delegate.GPU, - args.percentile, - ) - print( - f'{args.percentile}th Percentile Inference Time on GPU: ' - f'{gpu_time:.6f} milliseconds' - ) + return inference_times if __name__ == '__main__': - main() + benchmark.benchmarker(run, _MODEL_FILE) diff --git a/mediapipe/tasks/python/benchmark/vision/face_detector/BUILD b/mediapipe/tasks/python/benchmark/vision/face_detector/BUILD index 9eb67c19f..9941e904b 100644 --- a/mediapipe/tasks/python/benchmark/vision/face_detector/BUILD +++ b/mediapipe/tasks/python/benchmark/vision/face_detector/BUILD @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Placeholder for internal Python strict library and test compatibility macro. +# Placeholder for internal Python strict binary compatibility macro. package(default_visibility = ["//visibility:public"]) @@ -30,5 +30,6 @@ py_binary( "//mediapipe/tasks/python/vision:face_detector", "//mediapipe/tasks/python/benchmark:benchmark_utils", "//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api", + "//mediapipe/tasks/python/benchmark/vision:benchmark", ], ) diff --git a/mediapipe/tasks/python/benchmark/vision/face_detector/face_detector_benchmark.py b/mediapipe/tasks/python/benchmark/vision/face_detector/face_detector_benchmark.py index c4cd75100..cff31b035 100644 --- a/mediapipe/tasks/python/benchmark/vision/face_detector/face_detector_benchmark.py +++ b/mediapipe/tasks/python/benchmark/vision/face_detector/face_detector_benchmark.py @@ -11,47 +11,35 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -"""MediaPipe face detector benchmark.""" - -import argparse +"""MediaPipe image embedder benchmark.""" from mediapipe.python._framework_bindings import image from mediapipe.tasks.python.core import base_options from mediapipe.tasks.python.vision import face_detector from mediapipe.tasks.python.benchmark import benchmark_utils from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api +from mediapipe.tasks.python.benchmark.vision import benchmark _MODEL_FILE = 'face_detection_short_range.tflite' _IMAGE_FILE = 'portrait.jpg' -def run( - model: str, - n_iterations: int, - delegate: base_options.BaseOptions.Delegate, - percentile: float, -): - """Run an face detector benchmark. +def run(model_path, n_iterations, delegate): + """Run a face detector benchmark. Args: - model: Path to the TFLite model. + model_path: Path to the TFLite model. n_iterations: Number of iterations to run the benchmark. delegate: CPU or GPU delegate for inference. - percentile: Percentage for the percentiles to compute. Values must be - between 0 and 100 inclusive. Returns: - The n-th percentile of the inference times. + List of inference times. """ # Initialize the face detector - default_model_path = benchmark_utils.get_test_data_path( - base_vision_benchmark_api.VISION_TEST_DATA_DIR, _MODEL_FILE - ) - model_path = benchmark_utils.get_model_path(model, default_model_path) options = face_detector.FaceDetectorOptions( - base_options=base_options.BaseOptions( - model_asset_path=model_path, delegate=delegate - ) + base_options=base_options.BaseOptions( + model_asset_path=model_path, delegate=delegate + ) ) with face_detector.FaceDetector.create_from_options(options) as detector: @@ -60,61 +48,11 @@ def run( base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE ) ) - # Run the benchmark and return the nth percentile of the inference times - nth_percentile = base_vision_benchmark_api.nth_percentile( - detector.detect, mp_image, n_iterations, percentile + inference_times = base_vision_benchmark_api.benchmark_task( + detector.detect, mp_image, n_iterations ) - return nth_percentile - - -def main(): - parser = argparse.ArgumentParser( - formatter_class=argparse.ArgumentDefaultsHelpFormatter - ) - parser.add_argument( - '--model', - help='Path to face detector task.', - required=False, - default=None, - ) - parser.add_argument( - '--iterations', - help='Number of iterations for benchmarking.', - type=int, - default=100, - ) - parser.add_argument( - '--percentile', - help='Percentile for benchmarking statistics.', - type=float, - default=95.0, - ) - args = parser.parse_args() - - # Run benchmark on CPU - cpu_time = run( - args.model, - args.iterations, - base_options.BaseOptions.Delegate.CPU, - args.percentile, - ) - print( - f'{args.percentile}th Percentile Inference Time on CPU: ' - f'{cpu_time:.6f} milliseconds' - ) - - # Run benchmark on GPU - gpu_time = run( - args.model, - args.iterations, - base_options.BaseOptions.Delegate.GPU, - args.percentile, - ) - print( - f'{args.percentile}th Percentile Inference Time on GPU: ' - f'{gpu_time:.6f} milliseconds' - ) + return inference_times if __name__ == '__main__': - main() + benchmark.benchmarker(run, _MODEL_FILE) diff --git a/mediapipe/tasks/python/benchmark/vision/face_landmarker/BUILD b/mediapipe/tasks/python/benchmark/vision/face_landmarker/BUILD index 70dd311e4..5500c3683 100644 --- a/mediapipe/tasks/python/benchmark/vision/face_landmarker/BUILD +++ b/mediapipe/tasks/python/benchmark/vision/face_landmarker/BUILD @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Placeholder for internal Python strict library and test compatibility macro. +# Placeholder for internal Python strict binary compatibility macro. package(default_visibility = ["//visibility:public"]) @@ -30,5 +30,6 @@ py_binary( "//mediapipe/tasks/python/vision:face_landmarker", "//mediapipe/tasks/python/benchmark:benchmark_utils", "//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api", + "//mediapipe/tasks/python/benchmark/vision:benchmark", ], ) diff --git a/mediapipe/tasks/python/benchmark/vision/face_landmarker/face_landmarker_benchmark.py b/mediapipe/tasks/python/benchmark/vision/face_landmarker/face_landmarker_benchmark.py index 7a1f3f817..dc584b2e3 100644 --- a/mediapipe/tasks/python/benchmark/vision/face_landmarker/face_landmarker_benchmark.py +++ b/mediapipe/tasks/python/benchmark/vision/face_landmarker/face_landmarker_benchmark.py @@ -13,41 +13,29 @@ # limitations under the License. """MediaPipe face landmarker benchmark.""" -import argparse - from mediapipe.python._framework_bindings import image from mediapipe.tasks.python.core import base_options from mediapipe.tasks.python.vision import face_landmarker from mediapipe.tasks.python.benchmark import benchmark_utils from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api +from mediapipe.tasks.python.benchmark.vision import benchmark _MODEL_FILE = 'face_landmarker_v2.task' _IMAGE_FILE = 'portrait.jpg' -def run( - model: str, - n_iterations: int, - delegate: base_options.BaseOptions.Delegate, - percentile: float, -): - """Run an face landmarker benchmark. +def run(model_path, n_iterations, delegate): + """Run a face landmarker benchmark. Args: - model: Path to the TFLite model. + model_path: Path to the TFLite model. n_iterations: Number of iterations to run the benchmark. delegate: CPU or GPU delegate for inference. - percentile: Percentage for the percentiles to compute. Values must be - between 0 and 100 inclusive. Returns: - The n-th percentile of the inference times. + List of inference times. """ # Initialize the face landmarker - default_model_path = benchmark_utils.get_test_data_path( - base_vision_benchmark_api.VISION_TEST_DATA_DIR, _MODEL_FILE - ) - model_path = benchmark_utils.get_model_path(model, default_model_path) options = face_landmarker.FaceLandmarkerOptions( base_options=base_options.BaseOptions( model_asset_path=model_path, delegate=delegate @@ -60,61 +48,11 @@ def run( base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE ) ) - # Run the benchmark and return the nth percentile of the inference times - nth_percentile = base_vision_benchmark_api.nth_percentile( - landmarker.detect, mp_image, n_iterations, percentile + inference_times = base_vision_benchmark_api.benchmark_task( + landmarker.detect, mp_image, n_iterations ) - return nth_percentile - - -def main(): - parser = argparse.ArgumentParser( - formatter_class=argparse.ArgumentDefaultsHelpFormatter - ) - parser.add_argument( - '--model', - help='Path to face landmarker task.', - required=False, - default=None, - ) - parser.add_argument( - '--iterations', - help='Number of iterations for benchmarking.', - type=int, - default=100, - ) - parser.add_argument( - '--percentile', - help='Percentile for benchmarking statistics.', - type=float, - default=95.0, - ) - args = parser.parse_args() - - # Run benchmark on CPU - cpu_time = run( - args.model, - args.iterations, - base_options.BaseOptions.Delegate.CPU, - args.percentile, - ) - print( - f'{args.percentile}th Percentile Inference Time on CPU: ' - f'{cpu_time:.6f} milliseconds' - ) - - # Run benchmark on GPU - gpu_time = run( - args.model, - args.iterations, - base_options.BaseOptions.Delegate.GPU, - args.percentile, - ) - print( - f'{args.percentile}th Percentile Inference Time on GPU: ' - f'{gpu_time:.6f} milliseconds' - ) + return inference_times if __name__ == '__main__': - main() + benchmark.benchmarker(run, _MODEL_FILE) diff --git a/mediapipe/tasks/python/benchmark/vision/hand_landmarker/BUILD b/mediapipe/tasks/python/benchmark/vision/hand_landmarker/BUILD index 693b42faf..425df549f 100644 --- a/mediapipe/tasks/python/benchmark/vision/hand_landmarker/BUILD +++ b/mediapipe/tasks/python/benchmark/vision/hand_landmarker/BUILD @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Placeholder for internal Python strict library and test compatibility macro. +# Placeholder for internal Python strict binary compatibility macro. package(default_visibility = ["//visibility:public"]) @@ -30,5 +30,6 @@ py_binary( "//mediapipe/tasks/python/vision:hand_landmarker", "//mediapipe/tasks/python/benchmark:benchmark_utils", "//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api", + "//mediapipe/tasks/python/benchmark/vision:benchmark", ], ) diff --git a/mediapipe/tasks/python/benchmark/vision/hand_landmarker/hand_landmarker_benchmark.py b/mediapipe/tasks/python/benchmark/vision/hand_landmarker/hand_landmarker_benchmark.py index 8fd8fc210..9669af715 100644 --- a/mediapipe/tasks/python/benchmark/vision/hand_landmarker/hand_landmarker_benchmark.py +++ b/mediapipe/tasks/python/benchmark/vision/hand_landmarker/hand_landmarker_benchmark.py @@ -13,45 +13,33 @@ # limitations under the License. """MediaPipe hand landmarker benchmark.""" -import argparse - from mediapipe.python._framework_bindings import image from mediapipe.tasks.python.core import base_options from mediapipe.tasks.python.vision import hand_landmarker from mediapipe.tasks.python.benchmark import benchmark_utils from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api +from mediapipe.tasks.python.benchmark.vision import benchmark _MODEL_FILE = 'hand_landmarker.task' _IMAGE_FILE = 'thumb_up.jpg' -def run( - model: str, - n_iterations: int, - delegate: base_options.BaseOptions.Delegate, - percentile: float, -): - """Run an hand landmarker benchmark. +def run(model_path, n_iterations, delegate): + """Run a hand landmarker benchmark. Args: - model: Path to the TFLite model. + model_path: Path to the TFLite model. n_iterations: Number of iterations to run the benchmark. delegate: CPU or GPU delegate for inference. - percentile: Percentage for the percentiles to compute. Values must be - between 0 and 100 inclusive. Returns: - The n-th percentile of the inference times. + List of inference times. """ # Initialize the hand landmarker - default_model_path = benchmark_utils.get_test_data_path( - base_vision_benchmark_api.VISION_TEST_DATA_DIR, _MODEL_FILE - ) - model_path = benchmark_utils.get_model_path(model, default_model_path) options = hand_landmarker.HandLandmarkerOptions( - base_options=base_options.BaseOptions( - model_asset_path=model_path, delegate=delegate - ) + base_options=base_options.BaseOptions( + model_asset_path=model_path, delegate=delegate + ) ) with hand_landmarker.HandLandmarker.create_from_options(options) as landmarker: @@ -60,61 +48,11 @@ def run( base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE ) ) - # Run the benchmark and return the nth percentile of the inference times - nth_percentile = base_vision_benchmark_api.nth_percentile( - landmarker.detect, mp_image, n_iterations, percentile + inference_times = base_vision_benchmark_api.benchmark_task( + landmarker.detect, mp_image, n_iterations ) - return nth_percentile - - -def main(): - parser = argparse.ArgumentParser( - formatter_class=argparse.ArgumentDefaultsHelpFormatter - ) - parser.add_argument( - '--model', - help='Path to hand landmarker task.', - required=False, - default=None, - ) - parser.add_argument( - '--iterations', - help='Number of iterations for benchmarking.', - type=int, - default=100, - ) - parser.add_argument( - '--percentile', - help='Percentile for benchmarking statistics.', - type=float, - default=95.0, - ) - args = parser.parse_args() - - # Run benchmark on CPU - cpu_time = run( - args.model, - args.iterations, - base_options.BaseOptions.Delegate.CPU, - args.percentile, - ) - print( - f'{args.percentile}th Percentile Inference Time on CPU: ' - f'{cpu_time:.6f} milliseconds' - ) - - # Run benchmark on GPU - gpu_time = run( - args.model, - args.iterations, - base_options.BaseOptions.Delegate.GPU, - args.percentile, - ) - print( - f'{args.percentile}th Percentile Inference Time on GPU: ' - f'{gpu_time:.6f} milliseconds' - ) + return inference_times if __name__ == '__main__': - main() + benchmark.benchmarker(run, _MODEL_FILE) diff --git a/mediapipe/tasks/python/benchmark/vision/image_classifier/BUILD b/mediapipe/tasks/python/benchmark/vision/image_classifier/BUILD index 738b9738a..c4cbe55dd 100644 --- a/mediapipe/tasks/python/benchmark/vision/image_classifier/BUILD +++ b/mediapipe/tasks/python/benchmark/vision/image_classifier/BUILD @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Placeholder for internal Python strict library and test compatibility macro. +# Placeholder for internal Python strict binary compatibility macro. package(default_visibility = ["//visibility:public"]) @@ -30,5 +30,6 @@ py_binary( "//mediapipe/tasks/python/vision:image_classifier", "//mediapipe/tasks/python/benchmark:benchmark_utils", "//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api", + "//mediapipe/tasks/python/benchmark/vision:benchmark", ], ) diff --git a/mediapipe/tasks/python/benchmark/vision/image_classifier/README.md b/mediapipe/tasks/python/benchmark/vision/image_classifier/README.md index b444fd5b8..67ab6350d 100644 --- a/mediapipe/tasks/python/benchmark/vision/image_classifier/README.md +++ b/mediapipe/tasks/python/benchmark/vision/image_classifier/README.md @@ -9,7 +9,6 @@ Run this commands to download the TFLite models and image files: ``` cd mediapipe/mediapipe/tasks/python/benchmark/vision/image_classifier wget -O classifier.tflite -q https://storage.googleapis.com/mediapipe-models/image_classifier/efficientnet_lite0/float32/1/efficientnet_lite0.tflite -wget -O burger.jpg https://storage.googleapis.com/mediapipe-assets/burger.jpg ``` ## Run the benchmark @@ -18,7 +17,7 @@ bazel run -c opt //mediapipe/tasks/python/benchmark/vision/image_classifier:imag ``` * You can optionally specify the `model` parameter to set the TensorFlow Lite model to be used: - * The default value is `classifier.tflite` + * The default value is `mobilenet_v2_1.0_224.tflite` * TensorFlow Lite image classification models **with metadata** * Models from [TensorFlow Hub](https://tfhub.dev/tensorflow/collections/lite/task-library/image-classifier/1) * Models from [MediaPipe Models](https://developers.google.com/mediapipe/solutions/vision/image_classifier/index#models) @@ -29,7 +28,7 @@ bazel run -c opt //mediapipe/tasks/python/benchmark/vision/image_classifier:imag * Default value: `100` * Example usage: ``` - bazel run -c opt :image_classifier_benchmark \ + bazel run -c opt :image_classifier_benchmark -- \ --model classifier.tflite \ --iterations 200 ``` diff --git a/mediapipe/tasks/python/benchmark/vision/image_classifier/image_classifier_benchmark.py b/mediapipe/tasks/python/benchmark/vision/image_classifier/image_classifier_benchmark.py index f24b7dcd5..a49ccf46e 100644 --- a/mediapipe/tasks/python/benchmark/vision/image_classifier/image_classifier_benchmark.py +++ b/mediapipe/tasks/python/benchmark/vision/image_classifier/image_classifier_benchmark.py @@ -11,48 +11,36 @@ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. -"""MediaPipe image classsifier benchmark.""" - -import argparse +"""MediaPipe image classifier benchmark.""" from mediapipe.python._framework_bindings import image from mediapipe.tasks.python.core import base_options from mediapipe.tasks.python.vision import image_classifier from mediapipe.tasks.python.benchmark import benchmark_utils from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api +from mediapipe.tasks.python.benchmark.vision import benchmark _MODEL_FILE = 'mobilenet_v2_1.0_224.tflite' _IMAGE_FILE = 'burger.jpg' -def run( - model: str, - n_iterations: int, - delegate: base_options.BaseOptions.Delegate, - percentile: float, -): - """Run an image classification benchmark. +def run(model_path, n_iterations, delegate): + """Run an image classifier benchmark. Args: - model: Path to the TFLite model. + model_path: Path to the TFLite model. n_iterations: Number of iterations to run the benchmark. delegate: CPU or GPU delegate for inference. - percentile: Percentage for the percentiles to compute. Values must be - between 0 and 100 inclusive. Returns: - The n-th percentile of the inference times. + List of inference times. """ # Initialize the image classifier - default_model_path = benchmark_utils.get_test_data_path( - base_vision_benchmark_api.VISION_TEST_DATA_DIR, _MODEL_FILE - ) - model_path = benchmark_utils.get_model_path(model, default_model_path) options = image_classifier.ImageClassifierOptions( - base_options=base_options.BaseOptions( - model_asset_path=model_path, delegate=delegate - ), - max_results=1, + base_options=base_options.BaseOptions( + model_asset_path=model_path, delegate=delegate + ), + max_results=1, ) with image_classifier.ImageClassifier.create_from_options(options) as classifier: @@ -61,61 +49,11 @@ def run( base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE ) ) - # Run the benchmark and return the nth percentile of the inference times - nth_percentile = base_vision_benchmark_api.nth_percentile( - classifier.classify, mp_image, n_iterations, percentile + inference_times = base_vision_benchmark_api.benchmark_task( + classifier.classify, mp_image, n_iterations ) - return nth_percentile - - -def main(): - parser = argparse.ArgumentParser( - formatter_class=argparse.ArgumentDefaultsHelpFormatter - ) - parser.add_argument( - '--model', - help='Path to image classification model.', - required=False, - default=None, - ) - parser.add_argument( - '--iterations', - help='Number of iterations for benchmarking.', - type=int, - default=100, - ) - parser.add_argument( - '--percentile', - help='Percentile for benchmarking statistics.', - type=float, - default=95.0, - ) - args = parser.parse_args() - - # Run benchmark on CPU - cpu_time = run( - args.model, - args.iterations, - base_options.BaseOptions.Delegate.CPU, - args.percentile, - ) - print( - f'{args.percentile}th Percentile Inference Time on CPU: ' - f'{cpu_time:.6f} milliseconds' - ) - - # Run benchmark on GPU - gpu_time = run( - args.model, - args.iterations, - base_options.BaseOptions.Delegate.GPU, - args.percentile, - ) - print( - f'{args.percentile}th Percentile Inference Time on GPU: ' - f'{gpu_time:.6f} milliseconds' - ) + return inference_times if __name__ == '__main__': - main() + benchmark.benchmarker(run, _MODEL_FILE) diff --git a/mediapipe/tasks/python/benchmark/vision/image_embedder/BUILD b/mediapipe/tasks/python/benchmark/vision/image_embedder/BUILD index 059bdd095..9e00ead3e 100644 --- a/mediapipe/tasks/python/benchmark/vision/image_embedder/BUILD +++ b/mediapipe/tasks/python/benchmark/vision/image_embedder/BUILD @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Placeholder for internal Python strict library and test compatibility macro. +# Placeholder for internal Python strict binary compatibility macro. package(default_visibility = ["//visibility:public"]) @@ -30,5 +30,6 @@ py_binary( "//mediapipe/tasks/python/vision:image_embedder", "//mediapipe/tasks/python/benchmark:benchmark_utils", "//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api", + "//mediapipe/tasks/python/benchmark/vision:benchmark", ], ) diff --git a/mediapipe/tasks/python/benchmark/vision/image_embedder/image_embedder_benchmark.py b/mediapipe/tasks/python/benchmark/vision/image_embedder/image_embedder_benchmark.py index 1da3b8e89..331cbb77b 100644 --- a/mediapipe/tasks/python/benchmark/vision/image_embedder/image_embedder_benchmark.py +++ b/mediapipe/tasks/python/benchmark/vision/image_embedder/image_embedder_benchmark.py @@ -13,41 +13,29 @@ # limitations under the License. """MediaPipe image embedder benchmark.""" -import argparse - from mediapipe.python._framework_bindings import image from mediapipe.tasks.python.core import base_options from mediapipe.tasks.python.vision import image_embedder from mediapipe.tasks.python.benchmark import benchmark_utils from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api +from mediapipe.tasks.python.benchmark.vision import benchmark _MODEL_FILE = 'mobilenet_v3_small_100_224_embedder.tflite' _IMAGE_FILE = 'burger.jpg' -def run( - model: str, - n_iterations: int, - delegate: base_options.BaseOptions.Delegate, - percentile: float, -): - """Run an image embedding extraction benchmark. +def run(model_path, n_iterations, delegate): + """Run an image embedding benchmark. Args: - model: Path to the TFLite model. + model_path: Path to the TFLite model. n_iterations: Number of iterations to run the benchmark. delegate: CPU or GPU delegate for inference. - percentile: Percentage for the percentiles to compute. Values must be - between 0 and 100 inclusive. Returns: - The n-th percentile of the inference times. + List of inference times. """ # Initialize the image embedder - default_model_path = benchmark_utils.get_test_data_path( - base_vision_benchmark_api.VISION_TEST_DATA_DIR, _MODEL_FILE - ) - model_path = benchmark_utils.get_model_path(model, default_model_path) options = image_embedder.ImageEmbedderOptions( base_options=base_options.BaseOptions( model_asset_path=model_path, delegate=delegate @@ -60,61 +48,11 @@ def run( base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE ) ) - # Run the benchmark and return the nth percentile of the inference times - nth_percentile = base_vision_benchmark_api.nth_percentile( - embedder.embed, mp_image, n_iterations, percentile + inference_times = base_vision_benchmark_api.benchmark_task( + embedder.embed, mp_image, n_iterations ) - return nth_percentile - - -def main(): - parser = argparse.ArgumentParser( - formatter_class=argparse.ArgumentDefaultsHelpFormatter - ) - parser.add_argument( - '--model', - help='Path to image embedding extraction model.', - required=False, - default=None, - ) - parser.add_argument( - '--iterations', - help='Number of iterations for benchmarking.', - type=int, - default=100, - ) - parser.add_argument( - '--percentile', - help='Percentile for benchmarking statistics.', - type=float, - default=95.0, - ) - args = parser.parse_args() - - # Run benchmark on CPU - cpu_time = run( - args.model, - args.iterations, - base_options.BaseOptions.Delegate.CPU, - args.percentile, - ) - print( - f'{args.percentile}th Percentile Inference Time on CPU: ' - f'{cpu_time:.6f} milliseconds' - ) - - # Run benchmark on GPU - gpu_time = run( - args.model, - args.iterations, - base_options.BaseOptions.Delegate.GPU, - args.percentile, - ) - print( - f'{args.percentile}th Percentile Inference Time on GPU: ' - f'{gpu_time:.6f} milliseconds' - ) + return inference_times if __name__ == '__main__': - main() + benchmark.benchmarker(run, _MODEL_FILE) diff --git a/mediapipe/tasks/python/benchmark/vision/image_segmenter/BUILD b/mediapipe/tasks/python/benchmark/vision/image_segmenter/BUILD index bfb2ee763..fc8d87239 100644 --- a/mediapipe/tasks/python/benchmark/vision/image_segmenter/BUILD +++ b/mediapipe/tasks/python/benchmark/vision/image_segmenter/BUILD @@ -30,5 +30,6 @@ py_binary( "//mediapipe/tasks/python/vision:image_segmenter", "//mediapipe/tasks/python/benchmark:benchmark_utils", "//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api", + "//mediapipe/tasks/python/benchmark/vision:benchmark", ], ) diff --git a/mediapipe/tasks/python/benchmark/vision/image_segmenter/image_segmenter_benchmark.py b/mediapipe/tasks/python/benchmark/vision/image_segmenter/image_segmenter_benchmark.py index ef885b11c..4cff3419a 100644 --- a/mediapipe/tasks/python/benchmark/vision/image_segmenter/image_segmenter_benchmark.py +++ b/mediapipe/tasks/python/benchmark/vision/image_segmenter/image_segmenter_benchmark.py @@ -13,41 +13,29 @@ # limitations under the License. """MediaPipe image segmenter benchmark.""" -import argparse - from mediapipe.python._framework_bindings import image from mediapipe.tasks.python.core import base_options from mediapipe.tasks.python.vision import image_segmenter from mediapipe.tasks.python.benchmark import benchmark_utils from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api +from mediapipe.tasks.python.benchmark.vision import benchmark _MODEL_FILE = 'deeplabv3.tflite' _IMAGE_FILE = 'segmentation_input_rotation0.jpg' -def run( - model: str, - n_iterations: int, - delegate: base_options.BaseOptions.Delegate, - percentile: float, -): - """Run an image segmentation benchmark. +def run(model_path, n_iterations, delegate): + """Run an image segmenter benchmark. Args: - model: Path to the TFLite model. + model_path: Path to the TFLite model. n_iterations: Number of iterations to run the benchmark. delegate: CPU or GPU delegate for inference. - percentile: Percentage for the percentiles to compute. Values must be - between 0 and 100 inclusive. Returns: - The n-th percentile of the inference times. + List of inference times. """ # Initialize the image segmenter - default_model_path = benchmark_utils.get_test_data_path( - base_vision_benchmark_api.VISION_TEST_DATA_DIR, _MODEL_FILE - ) - model_path = benchmark_utils.get_model_path(model, default_model_path) options = image_segmenter.ImageSegmenterOptions( base_options=base_options.BaseOptions( model_asset_path=model_path, delegate=delegate @@ -61,61 +49,11 @@ def run( base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE ) ) - # Run the benchmark and return the nth percentile of the inference times - nth_percentile = base_vision_benchmark_api.nth_percentile( - segmenter.segment, mp_image, n_iterations, percentile + inference_times = base_vision_benchmark_api.benchmark_task( + segmenter.segment, mp_image, n_iterations ) - return nth_percentile - - -def main(): - parser = argparse.ArgumentParser( - formatter_class=argparse.ArgumentDefaultsHelpFormatter - ) - parser.add_argument( - '--model', - help='Path to image segmentation model.', - required=False, - default=None, - ) - parser.add_argument( - '--iterations', - help='Number of iterations for benchmarking.', - type=int, - default=100, - ) - parser.add_argument( - '--percentile', - help='Percentile for benchmarking statistics.', - type=float, - default=95.0, - ) - args = parser.parse_args() - - # Run benchmark on CPU - cpu_time = run( - args.model, - args.iterations, - base_options.BaseOptions.Delegate.CPU, - args.percentile, - ) - print( - f'{args.percentile}th Percentile Inference Time on CPU: ' - f'{cpu_time:.6f} milliseconds' - ) - - # Run benchmark on GPU - gpu_time = run( - args.model, - args.iterations, - base_options.BaseOptions.Delegate.GPU, - args.percentile, - ) - print( - f'{args.percentile}th Percentile Inference Time on GPU: ' - f'{gpu_time:.6f} milliseconds' - ) + return inference_times if __name__ == '__main__': - main() + benchmark.benchmarker(run, _MODEL_FILE) diff --git a/mediapipe/tasks/python/benchmark/vision/interactive_segmenter/BUILD b/mediapipe/tasks/python/benchmark/vision/interactive_segmenter/BUILD index 1a0bab418..d238529f1 100644 --- a/mediapipe/tasks/python/benchmark/vision/interactive_segmenter/BUILD +++ b/mediapipe/tasks/python/benchmark/vision/interactive_segmenter/BUILD @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Placeholder for internal Python strict library and test compatibility macro. +# Placeholder for internal Python strict binary compatibility macro. package(default_visibility = ["//visibility:public"]) @@ -30,5 +30,6 @@ py_binary( "//mediapipe/tasks/python/vision:interactive_segmenter", "//mediapipe/tasks/python/benchmark:benchmark_utils", "//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api", + "//mediapipe/tasks/python/benchmark/vision:benchmark", ], ) diff --git a/mediapipe/tasks/python/benchmark/vision/interactive_segmenter/interactive_segmenter_benchmark.py b/mediapipe/tasks/python/benchmark/vision/interactive_segmenter/interactive_segmenter_benchmark.py index 3ecc7f661..e13fac456 100644 --- a/mediapipe/tasks/python/benchmark/vision/interactive_segmenter/interactive_segmenter_benchmark.py +++ b/mediapipe/tasks/python/benchmark/vision/interactive_segmenter/interactive_segmenter_benchmark.py @@ -12,45 +12,32 @@ # See the License for the specific language governing permissions and # limitations under the License. """MediaPipe interactive segmenter benchmark.""" - from functools import partial -import argparse from mediapipe.python._framework_bindings import image from mediapipe.tasks.python.core import base_options -from mediapipe.tasks.python.components.containers import keypoint from mediapipe.tasks.python.vision import interactive_segmenter +from mediapipe.tasks.python.components.containers import keypoint from mediapipe.tasks.python.benchmark import benchmark_utils from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api +from mediapipe.tasks.python.benchmark.vision import benchmark -_MODEL_FILE = 'deeplabv3.tflite' -_IMAGE_FILE = 'segmentation_input_rotation0.jpg' +_MODEL_FILE = 'ptm_512_hdt_ptm_woid.tflite' +_IMAGE_FILE = 'cats_and_dogs.jpg' -def run( - model: str, - n_iterations: int, - delegate: base_options.BaseOptions.Delegate, - percentile: float, -): - """Run an interactive segmentation benchmark. +def run(model_path, n_iterations, delegate): + """Run an interactive segmenter benchmark. Args: - model: Path to the TFLite model. + model_path: Path to the TFLite model. n_iterations: Number of iterations to run the benchmark. delegate: CPU or GPU delegate for inference. - percentile: Percentage for the percentiles to compute. Values must be - between 0 and 100 inclusive. Returns: - The n-th percentile of the inference times. + List of inference times. """ - # Initialize the interactive segmenter - default_model_path = benchmark_utils.get_test_data_path( - base_vision_benchmark_api.VISION_TEST_DATA_DIR, _MODEL_FILE - ) - model_path = benchmark_utils.get_model_path(model, default_model_path) - + # Initialize the image segmenter options = interactive_segmenter.InteractiveSegmenterOptions( base_options=base_options.BaseOptions( model_asset_path=model_path, delegate=delegate @@ -68,61 +55,11 @@ def run( base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE ) ) - # Run the benchmark and return the nth percentile of the inference times - nth_percentile = base_vision_benchmark_api.nth_percentile( - partial(segmenter.segment, roi=roi), mp_image, n_iterations, percentile + inference_times = base_vision_benchmark_api.benchmark_task( + partial(segmenter.segment, roi=roi), mp_image, n_iterations ) - return nth_percentile - - -def main(): - parser = argparse.ArgumentParser( - formatter_class=argparse.ArgumentDefaultsHelpFormatter - ) - parser.add_argument( - '--model', - help='Path to interactive segmentation model.', - required=False, - default=None, - ) - parser.add_argument( - '--iterations', - help='Number of iterations for benchmarking.', - type=int, - default=100, - ) - parser.add_argument( - '--percentile', - help='Percentile for benchmarking statistics.', - type=float, - default=95.0, - ) - args = parser.parse_args() - - # Run benchmark on CPU - cpu_time = run( - args.model, - args.iterations, - base_options.BaseOptions.Delegate.CPU, - args.percentile, - ) - print( - f'{args.percentile}th Percentile Inference Time on CPU: ' - f'{cpu_time:.6f} milliseconds' - ) - - # Run benchmark on GPU - gpu_time = run( - args.model, - args.iterations, - base_options.BaseOptions.Delegate.GPU, - args.percentile, - ) - print( - f'{args.percentile}th Percentile Inference Time on GPU: ' - f'{gpu_time:.6f} milliseconds' - ) + return inference_times if __name__ == '__main__': - main() + benchmark.benchmarker(run, _MODEL_FILE) diff --git a/mediapipe/tasks/python/benchmark/vision/object_detector/BUILD b/mediapipe/tasks/python/benchmark/vision/object_detector/BUILD index 2db19db5e..f91bdb891 100644 --- a/mediapipe/tasks/python/benchmark/vision/object_detector/BUILD +++ b/mediapipe/tasks/python/benchmark/vision/object_detector/BUILD @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Placeholder for internal Python strict library and test compatibility macro. +# Placeholder for internal Python strict binary compatibility macro. package(default_visibility = ["//visibility:public"]) @@ -30,5 +30,6 @@ py_binary( "//mediapipe/tasks/python/vision:object_detector", "//mediapipe/tasks/python/benchmark:benchmark_utils", "//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api", + "//mediapipe/tasks/python/benchmark/vision:benchmark", ], ) diff --git a/mediapipe/tasks/python/benchmark/vision/object_detector/object_detector_benchmark.py b/mediapipe/tasks/python/benchmark/vision/object_detector/object_detector_benchmark.py index 80f8302a2..8829a9410 100644 --- a/mediapipe/tasks/python/benchmark/vision/object_detector/object_detector_benchmark.py +++ b/mediapipe/tasks/python/benchmark/vision/object_detector/object_detector_benchmark.py @@ -13,45 +13,33 @@ # limitations under the License. """MediaPipe object detector benchmark.""" -import argparse - from mediapipe.python._framework_bindings import image from mediapipe.tasks.python.core import base_options from mediapipe.tasks.python.vision import object_detector from mediapipe.tasks.python.benchmark import benchmark_utils from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api +from mediapipe.tasks.python.benchmark.vision import benchmark _MODEL_FILE = 'coco_efficientdet_lite0_v1_1.0_quant_2021_09_06.tflite' _IMAGE_FILE = 'cats_and_dogs.jpg' -def run( - model: str, - n_iterations: int, - delegate: base_options.BaseOptions.Delegate, - percentile: float, -): +def run(model_path, n_iterations, delegate): """Run an object detector benchmark. Args: - model: Path to the TFLite model. + model_path: Path to the TFLite model. n_iterations: Number of iterations to run the benchmark. delegate: CPU or GPU delegate for inference. - percentile: Percentage for the percentiles to compute. Values must be - between 0 and 100 inclusive. Returns: - The n-th percentile of the inference times. + List of inference times. """ # Initialize the object detector - default_model_path = benchmark_utils.get_test_data_path( - base_vision_benchmark_api.VISION_TEST_DATA_DIR, _MODEL_FILE - ) - model_path = benchmark_utils.get_model_path(model, default_model_path) options = object_detector.ObjectDetectorOptions( - base_options=base_options.BaseOptions( - model_asset_path=model_path, delegate=delegate - ) + base_options=base_options.BaseOptions( + model_asset_path=model_path, delegate=delegate + ) ) with object_detector.ObjectDetector.create_from_options(options) as detector: @@ -60,61 +48,11 @@ def run( base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE ) ) - # Run the benchmark and return the nth percentile of the inference times - nth_percentile = base_vision_benchmark_api.nth_percentile( - detector.detect, mp_image, n_iterations, percentile + inference_times = base_vision_benchmark_api.benchmark_task( + detector.detect, mp_image, n_iterations ) - return nth_percentile - - -def main(): - parser = argparse.ArgumentParser( - formatter_class=argparse.ArgumentDefaultsHelpFormatter - ) - parser.add_argument( - '--model', - help='Path to object detector model.', - required=False, - default=None, - ) - parser.add_argument( - '--iterations', - help='Number of iterations for benchmarking.', - type=int, - default=100, - ) - parser.add_argument( - '--percentile', - help='Percentile for benchmarking statistics.', - type=float, - default=95.0, - ) - args = parser.parse_args() - - # Run benchmark on CPU - cpu_time = run( - args.model, - args.iterations, - base_options.BaseOptions.Delegate.CPU, - args.percentile, - ) - print( - f'{args.percentile}th Percentile Inference Time on CPU: ' - f'{cpu_time:.6f} milliseconds' - ) - - # Run benchmark on GPU - gpu_time = run( - args.model, - args.iterations, - base_options.BaseOptions.Delegate.GPU, - args.percentile, - ) - print( - f'{args.percentile}th Percentile Inference Time on GPU: ' - f'{gpu_time:.6f} milliseconds' - ) + return inference_times if __name__ == '__main__': - main() + benchmark.benchmarker(run, _MODEL_FILE) diff --git a/mediapipe/tasks/python/benchmark/vision/pose_landmarker/BUILD b/mediapipe/tasks/python/benchmark/vision/pose_landmarker/BUILD index 38c778c00..75e0dad18 100644 --- a/mediapipe/tasks/python/benchmark/vision/pose_landmarker/BUILD +++ b/mediapipe/tasks/python/benchmark/vision/pose_landmarker/BUILD @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Placeholder for internal Python strict library and test compatibility macro. +# Placeholder for internal Python strict binary compatibility macro. package(default_visibility = ["//visibility:public"]) @@ -30,5 +30,6 @@ py_binary( "//mediapipe/tasks/python/vision:pose_landmarker", "//mediapipe/tasks/python/benchmark:benchmark_utils", "//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api", + "//mediapipe/tasks/python/benchmark/vision:benchmark", ], ) diff --git a/mediapipe/tasks/python/benchmark/vision/pose_landmarker/pose_landmarker_benchmark.py b/mediapipe/tasks/python/benchmark/vision/pose_landmarker/pose_landmarker_benchmark.py index a1c55f0b8..38ac0e546 100644 --- a/mediapipe/tasks/python/benchmark/vision/pose_landmarker/pose_landmarker_benchmark.py +++ b/mediapipe/tasks/python/benchmark/vision/pose_landmarker/pose_landmarker_benchmark.py @@ -13,45 +13,33 @@ # limitations under the License. """MediaPipe pose landmarker benchmark.""" -import argparse - from mediapipe.python._framework_bindings import image from mediapipe.tasks.python.core import base_options from mediapipe.tasks.python.vision import pose_landmarker from mediapipe.tasks.python.benchmark import benchmark_utils from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api +from mediapipe.tasks.python.benchmark.vision import benchmark _MODEL_FILE = 'pose_landmarker.task' _IMAGE_FILE = 'pose.jpg' -def run( - model: str, - n_iterations: int, - delegate: base_options.BaseOptions.Delegate, - percentile: float, -): +def run(model_path, n_iterations, delegate): """Run an pose landmarker benchmark. Args: - model: Path to the TFLite model. + model_path: Path to the TFLite model. n_iterations: Number of iterations to run the benchmark. delegate: CPU or GPU delegate for inference. - percentile: Percentage for the percentiles to compute. Values must be - between 0 and 100 inclusive. Returns: - The n-th percentile of the inference times. + List of inference times. """ # Initialize the pose landmarker - default_model_path = benchmark_utils.get_test_data_path( - base_vision_benchmark_api.VISION_TEST_DATA_DIR, _MODEL_FILE - ) - model_path = benchmark_utils.get_model_path(model, default_model_path) options = pose_landmarker.PoseLandmarkerOptions( - base_options=base_options.BaseOptions( - model_asset_path=model_path, delegate=delegate - ) + base_options=base_options.BaseOptions( + model_asset_path=model_path, delegate=delegate + ) ) with pose_landmarker.PoseLandmarker.create_from_options(options) as landmarker: @@ -60,61 +48,11 @@ def run( base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE ) ) - # Run the benchmark and return the nth percentile of the inference times - nth_percentile = base_vision_benchmark_api.nth_percentile( - landmarker.detect, mp_image, n_iterations, percentile + inference_times = base_vision_benchmark_api.benchmark_task( + landmarker.detect, mp_image, n_iterations ) - return nth_percentile - - -def main(): - parser = argparse.ArgumentParser( - formatter_class=argparse.ArgumentDefaultsHelpFormatter - ) - parser.add_argument( - '--model', - help='Path to pose landmarker task.', - required=False, - default=None, - ) - parser.add_argument( - '--iterations', - help='Number of iterations for benchmarking.', - type=int, - default=100, - ) - parser.add_argument( - '--percentile', - help='Percentile for benchmarking statistics.', - type=float, - default=95.0, - ) - args = parser.parse_args() - - # Run benchmark on CPU - cpu_time = run( - args.model, - args.iterations, - base_options.BaseOptions.Delegate.CPU, - args.percentile, - ) - print( - f'{args.percentile}th Percentile Inference Time on CPU: ' - f'{cpu_time:.6f} milliseconds' - ) - - # Run benchmark on GPU - gpu_time = run( - args.model, - args.iterations, - base_options.BaseOptions.Delegate.GPU, - args.percentile, - ) - print( - f'{args.percentile}th Percentile Inference Time on GPU: ' - f'{gpu_time:.6f} milliseconds' - ) + return inference_times if __name__ == '__main__': - main() + benchmark.benchmarker(run, _MODEL_FILE) diff --git a/mediapipe/tasks/testdata/vision/BUILD b/mediapipe/tasks/testdata/vision/BUILD index 3f83118b0..117da6eaa 100644 --- a/mediapipe/tasks/testdata/vision/BUILD +++ b/mediapipe/tasks/testdata/vision/BUILD @@ -90,6 +90,7 @@ mediapipe_files(srcs = [ "pose_landmark_lite.tflite", "pose_landmarker.task", "pose_segmentation_mask_golden.png", + "ptm_512_hdt_ptm_woid.tflite", "right_hands.jpg", "right_hands_rotated.jpg", "segmentation_golden_rotation0.png", @@ -202,6 +203,7 @@ filegroup( "pose_detection.tflite", "pose_landmark_lite.tflite", "pose_landmarker.task", + "ptm_512_hdt_ptm_woid.tflite", "selfie_segm_128_128_3.tflite", "selfie_segm_144_256_3.tflite", "selfie_segmentation.tflite", From 6bdc7ce016b70db9e1b0e8e58351d028b9a1c790 Mon Sep 17 00:00:00 2001 From: Kinar Date: Thu, 16 Nov 2023 16:39:21 -0800 Subject: [PATCH 3/3] Removed unused param --- mediapipe/tasks/python/benchmark/vision/benchmark.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mediapipe/tasks/python/benchmark/vision/benchmark.py b/mediapipe/tasks/python/benchmark/vision/benchmark.py index 273668c3c..66bee680b 100644 --- a/mediapipe/tasks/python/benchmark/vision/benchmark.py +++ b/mediapipe/tasks/python/benchmark/vision/benchmark.py @@ -20,7 +20,7 @@ from mediapipe.tasks.python.benchmark import benchmark_utils as bu from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api -def benchmarker(benchmark_function, default_model_name, default_image_name): +def benchmarker(benchmark_function, default_model_name): """Executes a benchmarking process using a specified function and a default model (or a specified model) and reports the benchmarking statistics.