Merge pull request #4966 from kinaryml:python-vision-benchmark-scripts

PiperOrigin-RevId: 586349225
This commit is contained in:
Copybara-Service 2023-11-29 08:27:59 -08:00
commit bb4906bcd3
29 changed files with 1194 additions and 84 deletions

View File

@ -0,0 +1,24 @@
# Copyright 2023 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Placeholder: load py_library
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
py_library(
name = "benchmark_utils",
srcs = ["benchmark_utils.py"],
)

View File

@ -0,0 +1,70 @@
# Copyright 2023 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Benchmark utils for MediaPipe Tasks."""
import os
import numpy as np
def nth_percentile(inference_times, percentile):
"""Calculate the nth percentile of the inference times."""
return np.percentile(inference_times, percentile)
def average(inference_times):
"""Calculate the average of the inference times."""
return np.mean(inference_times)
def get_test_data_path(test_srcdir, file_or_dirname_path: str) -> str:
"""Determine the test data path.
Args:
test_srcdir: The path to the test source directory.
file_or_dirname_path: The path to the file or directory.
Returns:
The full test data path.
"""
for directory, subdirs, files in os.walk(test_srcdir):
for f in subdirs + files:
path = os.path.join(directory, f)
if path.endswith(file_or_dirname_path):
return path
raise ValueError(
"No %s in test directory: %s." % (file_or_dirname_path, test_srcdir)
)
def get_model_path(custom_model, default_model_path):
"""Determine the model path based on the existence of the custom model.
Args:
custom_model: The path to the custom model provided by the user.
default_model_path: The path to the default model.
Returns:
The path to the model to be used.
"""
if custom_model is not None and os.path.exists(custom_model):
print(f"Using provided model: {custom_model}")
return custom_model
else:
if custom_model is not None:
print(
f"Warning: Provided model '{custom_model}' not found. "
"Using default model instead."
)
print(f"Using default model: {default_model_path}")
return default_model_path

View File

@ -0,0 +1,33 @@
# Copyright 2023 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Placeholder: load py_binary
package(default_visibility = ["//visibility:public"])
py_binary(
name = "benchmark",
srcs = ["benchmark.py"],
data = [
"//mediapipe/tasks/testdata/vision:test_images",
"//mediapipe/tasks/testdata/vision:test_models",
],
main = "benchmark.py",
deps = [
"//mediapipe/python:_framework_bindings",
"//mediapipe/tasks/python/benchmark:benchmark_utils",
"//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api",
"//mediapipe/tasks/python/core:base_options",
],
)

View File

@ -0,0 +1,99 @@
# Copyright 2023 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MediaPipe vision benchmarker."""
import argparse
from mediapipe.tasks.python.benchmark import benchmark_utils as bu
from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api
from mediapipe.tasks.python.core import base_options
def benchmarker(benchmark_function, default_model_name):
"""Executes a benchmarking process using a specified function ann model.
Args:
benchmark_function: A callable function to be executed for benchmarking.
This function should contain the logic of the task to be benchmarked and
should be capable of utilizing a model specified by its name.
default_model_name: The name or path of the default model to be used in
the benchmarking process. This is useful when the benchmarking function
requires a model and no other model is explicitly specified.
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'--mode',
help='Benchmarking mode (e.g., "nth_percentile").',
required=False,
default='nth_percentile',
)
parser.add_argument('--model', help='Path to the model.', default=None)
parser.add_argument(
'--iterations',
help='Number of iterations for benchmarking.',
type=int,
default=100,
)
parser.add_argument(
'--percentile',
help='Percentile for benchmarking statistics.',
type=float,
default=95.0,
)
args = parser.parse_args()
# Get the model path
default_model_path = bu.get_test_data_path(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, default_model_name
)
model_path = bu.get_model_path(args.model, default_model_path)
# Define a mapping of modes to their respective function argument lists
mode_args_mapping = {
'nth_percentile': {'percentile': args.percentile},
'average': {},
}
# Check if the mode is supported and get the argument dictionary
if args.mode not in mode_args_mapping:
raise ValueError(f'Unsupported benchmarking mode: {args.mode}')
mode_args = mode_args_mapping[args.mode]
# Run the benchmark for both CPU and GPU and calculate results based on mode
results = {}
for delegate_type in [
base_options.BaseOptions.Delegate.CPU,
base_options.BaseOptions.Delegate.GPU,
]:
inference_times = benchmark_function(
model_path, args.iterations, delegate_type
)
# Calculate the benchmark result based on the mode
if args.mode == 'nth_percentile':
results[delegate_type] = bu.nth_percentile(inference_times, **mode_args)
elif args.mode == 'average':
results[delegate_type] = bu.average(inference_times)
# Report benchmarking results
for delegate_type, result in results.items():
print(
f'Inference time {delegate_type} {mode_args_mapping[args.mode]}: '
f'{result:.6f} milliseconds'
)

View File

@ -0,0 +1,22 @@
# Copyright 2023 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Placeholder: load py_library
package(default_visibility = ["//visibility:public"])
py_library(
name = "base_vision_benchmark_api",
srcs = ["base_vision_benchmark_api.py"],
)

View File

@ -0,0 +1,14 @@
"""Copyright 2023 The MediaPipe Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""

View File

@ -0,0 +1,40 @@
# Copyright 2023 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MediaPipe vision benchmark base api."""
import time
VISION_TEST_DATA_DIR = 'mediapipe/tasks/testdata/vision'
def benchmark_task(func, image, n_iterations):
"""Collect inference times for a given task after benchmarking.
Args:
func: The task function used for benchmarking.
image: The input MediaPipe Image.
n_iterations: Number of iterations to run the benchmark.
Returns:
List of inference times in milliseconds.
"""
inference_times = []
for _ in range(n_iterations):
start_time_ns = time.time_ns()
# Run the method for the task (e.g., classify)
func(image)
end_time_ns = time.time_ns()
inference_times.append((end_time_ns - start_time_ns) / 1_000_000)
return inference_times

View File

@ -0,0 +1,35 @@
# Copyright 2022 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Placeholder: load py_binary
package(default_visibility = ["//visibility:public"])
py_binary(
name = "face_aligner_benchmark",
srcs = ["face_aligner_benchmark.py"],
data = [
"//mediapipe/tasks/testdata/vision:test_images",
"//mediapipe/tasks/testdata/vision:test_models",
],
main = "face_aligner_benchmark.py",
deps = [
"//mediapipe/python:_framework_bindings",
"//mediapipe/tasks/python/benchmark:benchmark_utils",
"//mediapipe/tasks/python/benchmark/vision:benchmark",
"//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api",
"//mediapipe/tasks/python/core:base_options",
"//mediapipe/tasks/python/vision:face_aligner",
],
)

View File

@ -0,0 +1,58 @@
# Copyright 2023 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MediaPipe face aligner benchmark."""
from mediapipe.python._framework_bindings import image
from mediapipe.tasks.python.benchmark import benchmark_utils
from mediapipe.tasks.python.benchmark.vision import benchmark
from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api
from mediapipe.tasks.python.core import base_options
from mediapipe.tasks.python.vision import face_aligner
_MODEL_FILE = 'face_landmarker_v2.task'
_IMAGE_FILE = 'portrait.jpg'
def run(model_path, n_iterations, delegate):
"""Run a face aligner benchmark.
Args:
model_path: Path to the TFLite model.
n_iterations: Number of iterations to run the benchmark.
delegate: CPU or GPU delegate for inference.
Returns:
List of inference times.
"""
# Initialize the face aligner
options = face_aligner.FaceAlignerOptions(
base_options=base_options.BaseOptions(
model_asset_path=model_path, delegate=delegate
)
)
with face_aligner.FaceAligner.create_from_options(options) as aligner:
mp_image = image.Image.create_from_file(
benchmark_utils.get_test_data_path(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE
)
)
inference_times = base_vision_benchmark_api.benchmark_task(
aligner.align, mp_image, n_iterations
)
return inference_times
if __name__ == '__main__':
benchmark.benchmarker(run, _MODEL_FILE)

View File

@ -0,0 +1,35 @@
# Copyright 2022 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Placeholder: load py_binary
package(default_visibility = ["//visibility:public"])
py_binary(
name = "face_detector_benchmark",
srcs = ["face_detector_benchmark.py"],
data = [
"//mediapipe/tasks/testdata/vision:test_images",
"//mediapipe/tasks/testdata/vision:test_models",
],
main = "face_detector_benchmark.py",
deps = [
"//mediapipe/python:_framework_bindings",
"//mediapipe/tasks/python/benchmark:benchmark_utils",
"//mediapipe/tasks/python/benchmark/vision:benchmark",
"//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api",
"//mediapipe/tasks/python/core:base_options",
"//mediapipe/tasks/python/vision:face_detector",
],
)

View File

@ -0,0 +1,58 @@
# Copyright 2023 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MediaPipe image embedder benchmark."""
from mediapipe.python._framework_bindings import image
from mediapipe.tasks.python.benchmark import benchmark_utils
from mediapipe.tasks.python.benchmark.vision import benchmark
from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api
from mediapipe.tasks.python.core import base_options
from mediapipe.tasks.python.vision import face_detector
_MODEL_FILE = 'face_detection_short_range.tflite'
_IMAGE_FILE = 'portrait.jpg'
def run(model_path, n_iterations, delegate):
"""Run a face detector benchmark.
Args:
model_path: Path to the TFLite model.
n_iterations: Number of iterations to run the benchmark.
delegate: CPU or GPU delegate for inference.
Returns:
List of inference times.
"""
# Initialize the face detector
options = face_detector.FaceDetectorOptions(
base_options=base_options.BaseOptions(
model_asset_path=model_path, delegate=delegate
)
)
with face_detector.FaceDetector.create_from_options(options) as detector:
mp_image = image.Image.create_from_file(
benchmark_utils.get_test_data_path(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE
)
)
inference_times = base_vision_benchmark_api.benchmark_task(
detector.detect, mp_image, n_iterations
)
return inference_times
if __name__ == '__main__':
benchmark.benchmarker(run, _MODEL_FILE)

View File

@ -0,0 +1,35 @@
# Copyright 2022 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Placeholder: load py_binary
package(default_visibility = ["//visibility:public"])
py_binary(
name = "face_landmarker_benchmark",
srcs = ["face_landmarker_benchmark.py"],
data = [
"//mediapipe/tasks/testdata/vision:test_images",
"//mediapipe/tasks/testdata/vision:test_models",
],
main = "face_landmarker_benchmark.py",
deps = [
"//mediapipe/python:_framework_bindings",
"//mediapipe/tasks/python/benchmark:benchmark_utils",
"//mediapipe/tasks/python/benchmark/vision:benchmark",
"//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api",
"//mediapipe/tasks/python/core:base_options",
"//mediapipe/tasks/python/vision:face_landmarker",
],
)

View File

@ -0,0 +1,60 @@
# Copyright 2023 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MediaPipe face landmarker benchmark."""
from mediapipe.python._framework_bindings import image
from mediapipe.tasks.python.benchmark import benchmark_utils
from mediapipe.tasks.python.benchmark.vision import benchmark
from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api
from mediapipe.tasks.python.core import base_options
from mediapipe.tasks.python.vision import face_landmarker
_MODEL_FILE = 'face_landmarker_v2.task'
_IMAGE_FILE = 'portrait.jpg'
def run(model_path, n_iterations, delegate):
"""Run a face landmarker benchmark.
Args:
model_path: Path to the TFLite model.
n_iterations: Number of iterations to run the benchmark.
delegate: CPU or GPU delegate for inference.
Returns:
List of inference times.
"""
# Initialize the face landmarker
options = face_landmarker.FaceLandmarkerOptions(
base_options=base_options.BaseOptions(
model_asset_path=model_path, delegate=delegate
)
)
with face_landmarker.FaceLandmarker.create_from_options(
options
) as landmarker:
mp_image = image.Image.create_from_file(
benchmark_utils.get_test_data_path(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE
)
)
inference_times = base_vision_benchmark_api.benchmark_task(
landmarker.detect, mp_image, n_iterations
)
return inference_times
if __name__ == '__main__':
benchmark.benchmarker(run, _MODEL_FILE)

View File

@ -0,0 +1,35 @@
# Copyright 2022 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Placeholder: load py_binary
package(default_visibility = ["//visibility:public"])
py_binary(
name = "hand_landmarker_benchmark",
srcs = ["hand_landmarker_benchmark.py"],
data = [
"//mediapipe/tasks/testdata/vision:test_images",
"//mediapipe/tasks/testdata/vision:test_models",
],
main = "hand_landmarker_benchmark.py",
deps = [
"//mediapipe/python:_framework_bindings",
"//mediapipe/tasks/python/benchmark:benchmark_utils",
"//mediapipe/tasks/python/benchmark/vision:benchmark",
"//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api",
"//mediapipe/tasks/python/core:base_options",
"//mediapipe/tasks/python/vision:hand_landmarker",
],
)

View File

@ -0,0 +1,60 @@
# Copyright 2023 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MediaPipe hand landmarker benchmark."""
from mediapipe.python._framework_bindings import image
from mediapipe.tasks.python.benchmark import benchmark_utils
from mediapipe.tasks.python.benchmark.vision import benchmark
from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api
from mediapipe.tasks.python.core import base_options
from mediapipe.tasks.python.vision import hand_landmarker
_MODEL_FILE = 'hand_landmarker.task'
_IMAGE_FILE = 'thumb_up.jpg'
def run(model_path, n_iterations, delegate):
"""Run a hand landmarker benchmark.
Args:
model_path: Path to the TFLite model.
n_iterations: Number of iterations to run the benchmark.
delegate: CPU or GPU delegate for inference.
Returns:
List of inference times.
"""
# Initialize the hand landmarker
options = hand_landmarker.HandLandmarkerOptions(
base_options=base_options.BaseOptions(
model_asset_path=model_path, delegate=delegate
)
)
with hand_landmarker.HandLandmarker.create_from_options(
options
) as landmarker:
mp_image = image.Image.create_from_file(
benchmark_utils.get_test_data_path(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE
)
)
inference_times = base_vision_benchmark_api.benchmark_task(
landmarker.detect, mp_image, n_iterations
)
return inference_times
if __name__ == '__main__':
benchmark.benchmarker(run, _MODEL_FILE)

View File

@ -12,15 +12,23 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Placeholder for internal Python strict library and test compatibility macro.
# Placeholder: load py_binary
package(default_visibility = ["//visibility:public"])
py_library(
py_binary(
name = "image_classifier_benchmark",
srcs = ["image_classifier_benchmark.py"],
data = [
"//mediapipe/tasks/testdata/vision:test_images",
"//mediapipe/tasks/testdata/vision:test_models",
],
main = "image_classifier_benchmark.py",
deps = [
"//mediapipe/python:_framework_bindings",
"//mediapipe/tasks/python/benchmark:benchmark_utils",
"//mediapipe/tasks/python/benchmark/vision:benchmark",
"//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api",
"//mediapipe/tasks/python/core:base_options",
"//mediapipe/tasks/python/vision:image_classifier",
],

View File

@ -9,7 +9,6 @@ Run this commands to download the TFLite models and image files:
```
cd mediapipe/mediapipe/tasks/python/benchmark/vision/image_classifier
wget -O classifier.tflite -q https://storage.googleapis.com/mediapipe-models/image_classifier/efficientnet_lite0/float32/1/efficientnet_lite0.tflite
wget -O burger.jpg https://storage.googleapis.com/mediapipe-assets/burger.jpg
```
## Run the benchmark
@ -18,7 +17,7 @@ bazel run -c opt //mediapipe/tasks/python/benchmark/vision/image_classifier:imag
```
* You can optionally specify the `model` parameter to set the TensorFlow Lite
model to be used:
* The default value is `classifier.tflite`
* The default value is `mobilenet_v2_1.0_224.tflite`
* TensorFlow Lite image classification models **with metadata**
* Models from [TensorFlow Hub](https://tfhub.dev/tensorflow/collections/lite/task-library/image-classifier/1)
* Models from [MediaPipe Models](https://developers.google.com/mediapipe/solutions/vision/image_classifier/index#models)
@ -29,7 +28,7 @@ bazel run -c opt //mediapipe/tasks/python/benchmark/vision/image_classifier:imag
* Default value: `100`
* Example usage:
```
bazel run -c opt :image_classifier_benchmark \
bazel run -c opt :image_classifier_benchmark -- \
--model classifier.tflite \
--iterations 200
```

View File

@ -11,107 +11,51 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MediaPipe image classsifier benchmark."""
"""MediaPipe image classifier benchmark."""
import argparse
import time
import numpy as np
from mediapipe.python._framework_bindings import image
from mediapipe.tasks.python.benchmark import benchmark_utils
from mediapipe.tasks.python.benchmark.vision import benchmark
from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api
from mediapipe.tasks.python.core import base_options
from mediapipe.tasks.python.vision import image_classifier
_MODEL_FILE = 'mobilenet_v2_1.0_224.tflite'
_IMAGE_FILE = 'burger.jpg'
def run(
model: str,
n_iterations: int,
delegate: base_options.BaseOptions.Delegate,
percentile: float,
):
"""Run an image classification benchmark.
def run(model_path, n_iterations, delegate):
"""Run an image classifier benchmark.
Args:
model: Path to the TFLite model.
model_path: Path to the TFLite model.
n_iterations: Number of iterations to run the benchmark.
delegate: CPU or GPU delegate for inference.
percentile: Percentage for the percentiles to compute. Values must be
between 0 and 100 inclusive.
Returns:
The n-th percentile of the inference times.
List of inference times.
"""
inference_times = []
# Initialize the image classifier
options = image_classifier.ImageClassifierOptions(
base_options=base_options.BaseOptions(
model_asset_path=model, delegate=delegate
model_asset_path=model_path, delegate=delegate
),
max_results=1,
)
classifier = image_classifier.ImageClassifier.create_from_options(options)
mp_image = image.Image.create_from_file(_IMAGE_FILE)
for _ in range(n_iterations):
start_time_ns = time.time_ns()
classifier.classify(mp_image)
end_time_ns = time.time_ns()
# Convert to milliseconds
inference_times.append((end_time_ns - start_time_ns) / 1_000_000)
classifier.close()
return np.percentile(inference_times, percentile)
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
with image_classifier.ImageClassifier.create_from_options(
options
) as classifier:
mp_image = image.Image.create_from_file(
benchmark_utils.get_test_data_path(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE
)
parser.add_argument(
'--model',
help='Path to image classification model.',
required=False,
default='classifier.tflite',
)
parser.add_argument(
'--iterations',
help='Number of iterations for benchmarking.',
type=int,
default=100,
)
parser.add_argument(
'--percentile',
help='Percentile for benchmarking statistics.',
type=float,
default=95.0,
)
args = parser.parse_args()
# Run benchmark on CPU
cpu_time = run(
args.model,
args.iterations,
base_options.BaseOptions.Delegate.CPU,
args.percentile,
)
print(
f'{args.percentile}th Percentile Inference Time on CPU: '
f'{cpu_time:.6f} milliseconds'
)
# Run benchmark on GPU
gpu_time = run(
args.model,
args.iterations,
base_options.BaseOptions.Delegate.GPU,
args.percentile,
)
print(
f'{args.percentile}th Percentile Inference Time on GPU: '
f'{gpu_time:.6f} milliseconds'
inference_times = base_vision_benchmark_api.benchmark_task(
classifier.classify, mp_image, n_iterations
)
return inference_times
if __name__ == '__main__':
main()
benchmark.benchmarker(run, _MODEL_FILE)

View File

@ -0,0 +1,35 @@
# Copyright 2022 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Placeholder: load py_binary
package(default_visibility = ["//visibility:public"])
py_binary(
name = "image_embedder_benchmark",
srcs = ["image_embedder_benchmark.py"],
data = [
"//mediapipe/tasks/testdata/vision:test_images",
"//mediapipe/tasks/testdata/vision:test_models",
],
main = "image_embedder_benchmark.py",
deps = [
"//mediapipe/python:_framework_bindings",
"//mediapipe/tasks/python/benchmark:benchmark_utils",
"//mediapipe/tasks/python/benchmark/vision:benchmark",
"//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api",
"//mediapipe/tasks/python/core:base_options",
"//mediapipe/tasks/python/vision:image_embedder",
],
)

View File

@ -0,0 +1,58 @@
# Copyright 2023 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MediaPipe image embedder benchmark."""
from mediapipe.python._framework_bindings import image
from mediapipe.tasks.python.benchmark import benchmark_utils
from mediapipe.tasks.python.benchmark.vision import benchmark
from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api
from mediapipe.tasks.python.core import base_options
from mediapipe.tasks.python.vision import image_embedder
_MODEL_FILE = 'mobilenet_v3_small_100_224_embedder.tflite'
_IMAGE_FILE = 'burger.jpg'
def run(model_path, n_iterations, delegate):
"""Run an image embedding benchmark.
Args:
model_path: Path to the TFLite model.
n_iterations: Number of iterations to run the benchmark.
delegate: CPU or GPU delegate for inference.
Returns:
List of inference times.
"""
# Initialize the image embedder
options = image_embedder.ImageEmbedderOptions(
base_options=base_options.BaseOptions(
model_asset_path=model_path, delegate=delegate
)
)
with image_embedder.ImageEmbedder.create_from_options(options) as embedder:
mp_image = image.Image.create_from_file(
benchmark_utils.get_test_data_path(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE
)
)
inference_times = base_vision_benchmark_api.benchmark_task(
embedder.embed, mp_image, n_iterations
)
return inference_times
if __name__ == '__main__':
benchmark.benchmarker(run, _MODEL_FILE)

View File

@ -0,0 +1,35 @@
# Copyright 2022 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Placeholder: load py_binary
package(default_visibility = ["//visibility:public"])
py_binary(
name = "image_segmenter_benchmark",
srcs = ["image_segmenter_benchmark.py"],
data = [
"//mediapipe/tasks/testdata/vision:test_images",
"//mediapipe/tasks/testdata/vision:test_models",
],
main = "image_segmenter_benchmark.py",
deps = [
"//mediapipe/python:_framework_bindings",
"//mediapipe/tasks/python/benchmark:benchmark_utils",
"//mediapipe/tasks/python/benchmark/vision:benchmark",
"//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api",
"//mediapipe/tasks/python/core:base_options",
"//mediapipe/tasks/python/vision:image_segmenter",
],
)

View File

@ -0,0 +1,60 @@
# Copyright 2023 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MediaPipe image segmenter benchmark."""
from mediapipe.python._framework_bindings import image
from mediapipe.tasks.python.benchmark import benchmark_utils
from mediapipe.tasks.python.benchmark.vision import benchmark
from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api
from mediapipe.tasks.python.core import base_options
from mediapipe.tasks.python.vision import image_segmenter
_MODEL_FILE = 'deeplabv3.tflite'
_IMAGE_FILE = 'segmentation_input_rotation0.jpg'
def run(model_path, n_iterations, delegate):
"""Run an image segmenter benchmark.
Args:
model_path: Path to the TFLite model.
n_iterations: Number of iterations to run the benchmark.
delegate: CPU or GPU delegate for inference.
Returns:
List of inference times.
"""
# Initialize the image segmenter
options = image_segmenter.ImageSegmenterOptions(
base_options=base_options.BaseOptions(
model_asset_path=model_path, delegate=delegate
),
output_confidence_masks=True,
output_category_mask=True,
)
with image_segmenter.ImageSegmenter.create_from_options(options) as segmenter:
mp_image = image.Image.create_from_file(
benchmark_utils.get_test_data_path(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE
)
)
inference_times = base_vision_benchmark_api.benchmark_task(
segmenter.segment, mp_image, n_iterations
)
return inference_times
if __name__ == '__main__':
benchmark.benchmarker(run, _MODEL_FILE)

View File

@ -0,0 +1,35 @@
# Copyright 2022 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Placeholder: load py_binary
package(default_visibility = ["//visibility:public"])
py_binary(
name = "interactive_segmenter_benchmark",
srcs = ["interactive_segmenter_benchmark.py"],
data = [
"//mediapipe/tasks/testdata/vision:test_images",
"//mediapipe/tasks/testdata/vision:test_models",
],
main = "interactive_segmenter_benchmark.py",
deps = [
"//mediapipe/python:_framework_bindings",
"//mediapipe/tasks/python/benchmark:benchmark_utils",
"//mediapipe/tasks/python/benchmark/vision:benchmark",
"//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api",
"//mediapipe/tasks/python/core:base_options",
"//mediapipe/tasks/python/vision:interactive_segmenter",
],
)

View File

@ -0,0 +1,68 @@
# Copyright 2023 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MediaPipe interactive segmenter benchmark."""
import functools
from mediapipe.python._framework_bindings import image
from mediapipe.tasks.python.benchmark import benchmark_utils
from mediapipe.tasks.python.benchmark.vision import benchmark
from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api
from mediapipe.tasks.python.components.containers import keypoint
from mediapipe.tasks.python.core import base_options
from mediapipe.tasks.python.vision import interactive_segmenter
_MODEL_FILE = 'ptm_512_hdt_ptm_woid.tflite'
_IMAGE_FILE = 'cats_and_dogs.jpg'
def run(model_path, n_iterations, delegate):
"""Run an interactive segmenter benchmark.
Args:
model_path: Path to the TFLite model.
n_iterations: Number of iterations to run the benchmark.
delegate: CPU or GPU delegate for inference.
Returns:
List of inference times.
"""
# Initialize the image segmenter
options = interactive_segmenter.InteractiveSegmenterOptions(
base_options=base_options.BaseOptions(
model_asset_path=model_path, delegate=delegate
),
output_category_mask=True,
output_confidence_masks=False,
)
roi = interactive_segmenter.RegionOfInterest(
format=interactive_segmenter.RegionOfInterest.Format.KEYPOINT,
keypoint=keypoint.NormalizedKeypoint(0.44, 0.7),
)
with interactive_segmenter.InteractiveSegmenter.create_from_options(
options
) as segmenter:
mp_image = image.Image.create_from_file(
benchmark_utils.get_test_data_path(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE
)
)
inference_times = base_vision_benchmark_api.benchmark_task(
functools.partial(segmenter.segment, roi=roi), mp_image, n_iterations
)
return inference_times
if __name__ == '__main__':
benchmark.benchmarker(run, _MODEL_FILE)

View File

@ -0,0 +1,35 @@
# Copyright 2022 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Placeholder: load py_binary
package(default_visibility = ["//visibility:public"])
py_binary(
name = "object_detector_benchmark",
srcs = ["object_detector_benchmark.py"],
data = [
"//mediapipe/tasks/testdata/vision:test_images",
"//mediapipe/tasks/testdata/vision:test_models",
],
main = "object_detector_benchmark.py",
deps = [
"//mediapipe/python:_framework_bindings",
"//mediapipe/tasks/python/benchmark:benchmark_utils",
"//mediapipe/tasks/python/benchmark/vision:benchmark",
"//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api",
"//mediapipe/tasks/python/core:base_options",
"//mediapipe/tasks/python/vision:object_detector",
],
)

View File

@ -0,0 +1,58 @@
# Copyright 2023 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MediaPipe object detector benchmark."""
from mediapipe.python._framework_bindings import image
from mediapipe.tasks.python.benchmark import benchmark_utils
from mediapipe.tasks.python.benchmark.vision import benchmark
from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api
from mediapipe.tasks.python.core import base_options
from mediapipe.tasks.python.vision import object_detector
_MODEL_FILE = 'coco_efficientdet_lite0_v1_1.0_quant_2021_09_06.tflite'
_IMAGE_FILE = 'cats_and_dogs.jpg'
def run(model_path, n_iterations, delegate):
"""Run an object detector benchmark.
Args:
model_path: Path to the TFLite model.
n_iterations: Number of iterations to run the benchmark.
delegate: CPU or GPU delegate for inference.
Returns:
List of inference times.
"""
# Initialize the object detector
options = object_detector.ObjectDetectorOptions(
base_options=base_options.BaseOptions(
model_asset_path=model_path, delegate=delegate
)
)
with object_detector.ObjectDetector.create_from_options(options) as detector:
mp_image = image.Image.create_from_file(
benchmark_utils.get_test_data_path(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE
)
)
inference_times = base_vision_benchmark_api.benchmark_task(
detector.detect, mp_image, n_iterations
)
return inference_times
if __name__ == '__main__':
benchmark.benchmarker(run, _MODEL_FILE)

View File

@ -0,0 +1,35 @@
# Copyright 2022 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Placeholder: load py_binary
package(default_visibility = ["//visibility:public"])
py_binary(
name = "pose_landmarker_benchmark",
srcs = ["pose_landmarker_benchmark.py"],
data = [
"//mediapipe/tasks/testdata/vision:test_images",
"//mediapipe/tasks/testdata/vision:test_models",
],
main = "pose_landmarker_benchmark.py",
deps = [
"//mediapipe/python:_framework_bindings",
"//mediapipe/tasks/python/benchmark:benchmark_utils",
"//mediapipe/tasks/python/benchmark/vision:benchmark",
"//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api",
"//mediapipe/tasks/python/core:base_options",
"//mediapipe/tasks/python/vision:pose_landmarker",
],
)

View File

@ -0,0 +1,60 @@
# Copyright 2023 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MediaPipe pose landmarker benchmark."""
from mediapipe.python._framework_bindings import image
from mediapipe.tasks.python.benchmark import benchmark_utils
from mediapipe.tasks.python.benchmark.vision import benchmark
from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api
from mediapipe.tasks.python.core import base_options
from mediapipe.tasks.python.vision import pose_landmarker
_MODEL_FILE = 'pose_landmarker.task'
_IMAGE_FILE = 'pose.jpg'
def run(model_path, n_iterations, delegate):
"""Run an pose landmarker benchmark.
Args:
model_path: Path to the TFLite model.
n_iterations: Number of iterations to run the benchmark.
delegate: CPU or GPU delegate for inference.
Returns:
List of inference times.
"""
# Initialize the pose landmarker
options = pose_landmarker.PoseLandmarkerOptions(
base_options=base_options.BaseOptions(
model_asset_path=model_path, delegate=delegate
)
)
with pose_landmarker.PoseLandmarker.create_from_options(
options
) as landmarker:
mp_image = image.Image.create_from_file(
benchmark_utils.get_test_data_path(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE
)
)
inference_times = base_vision_benchmark_api.benchmark_task(
landmarker.detect, mp_image, n_iterations
)
return inference_times
if __name__ == '__main__':
benchmark.benchmarker(run, _MODEL_FILE)

View File

@ -90,6 +90,7 @@ mediapipe_files(srcs = [
"pose_landmark_lite.tflite",
"pose_landmarker.task",
"pose_segmentation_mask_golden.png",
"ptm_512_hdt_ptm_woid.tflite",
"right_hands.jpg",
"right_hands_rotated.jpg",
"segmentation_golden_rotation0.png",
@ -202,6 +203,7 @@ filegroup(
"pose_detection.tflite",
"pose_landmark_lite.tflite",
"pose_landmarker.task",
"ptm_512_hdt_ptm_woid.tflite",
"selfie_segm_128_128_3.tflite",
"selfie_segm_144_256_3.tflite",
"selfie_segmentation.tflite",