Code cleanup and revised benchmarking API

This commit is contained in:
Kinar 2023-11-16 16:26:29 -08:00
parent 8f32fda6d8
commit 46c6c9403c
26 changed files with 289 additions and 756 deletions

View File

@ -14,6 +14,17 @@
"""Benchmark utils for MediaPipe Tasks."""
import os
import numpy as np
def nth_percentile(inference_times, percentile):
"""Calculate the nth percentile of the inference times."""
return np.percentile(inference_times, percentile)
def average(inference_times):
"""Calculate the average of the inference times."""
return np.mean(inference_times)
def get_test_data_path(test_srcdir, file_or_dirname_path: str) -> str:

View File

@ -0,0 +1,33 @@
# Copyright 2022 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Placeholder for internal Python strict library and test compatibility macro.
package(default_visibility = ["//visibility:public"])
py_binary(
name = "benchmark",
main = "benchmark.py",
srcs = ["benchmark.py"],
data = [
"//mediapipe/tasks/testdata/vision:test_images",
"//mediapipe/tasks/testdata/vision:test_models",
],
deps = [
"//mediapipe/python:_framework_bindings",
"//mediapipe/tasks/python/core:base_options",
"//mediapipe/tasks/python/benchmark:benchmark_utils",
"//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api",
],
)

View File

@ -0,0 +1,104 @@
# Copyright 2023 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MediaPipe vision benchmarker."""
import argparse
from mediapipe.tasks.python.core import base_options
from mediapipe.tasks.python.benchmark import benchmark_utils as bu
from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api
def benchmarker(benchmark_function, default_model_name, default_image_name):
"""Executes a benchmarking process using a specified function and
a default model (or a specified model) and reports the benchmarking
statistics.
Args:
benchmark_function: A callable function to be executed for benchmarking.
This function should contain the logic of the task to be benchmarked and
should be capable of utilizing a model specified by its name.
default_model_name: The name or path of the default model to be used in
the benchmarking process. This is useful when the benchmarking function
requires a model and no other model is explicitly specified.
"""
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'--mode',
help='Benchmarking mode (e.g., "nth_percentile").',
required=False,
default='nth_percentile'
)
parser.add_argument(
'--model',
help='Path to the model.',
default=None
)
parser.add_argument(
'--iterations',
help='Number of iterations for benchmarking.',
type=int,
default=100
)
parser.add_argument(
'--percentile',
help='Percentile for benchmarking statistics.',
type=float,
default=95.0
)
args = parser.parse_args()
# Get the model path
default_model_path = bu.get_test_data_path(
base_vision_benchmark_api.VISION_TEST_DATA_DIR,
default_model_name
)
model_path = bu.get_model_path(args.model, default_model_path)
# Define a mapping of modes to their respective function argument lists
mode_args_mapping = {
'nth_percentile': {'percentile': args.percentile},
'average': {}
# Add other modes and their arguments here
}
# Check if the mode is supported and get the argument dictionary
if args.mode not in mode_args_mapping:
raise ValueError(f"Unsupported benchmarking mode: {args.mode}")
mode_args = mode_args_mapping[args.mode]
# Run the benchmark for both CPU and GPU and calculate results based on mode
results = {}
for delegate_type in [
base_options.BaseOptions.Delegate.CPU,
base_options.BaseOptions.Delegate.GPU
]:
inference_times = benchmark_function(model_path, args.iterations,
delegate_type)
# Calculate the benchmark result based on the mode
if args.mode == 'nth_percentile':
results[delegate_type] = bu.nth_percentile(inference_times, **mode_args)
elif args.mode == 'average':
results[delegate_type] = bu.average(inference_times)
# Report benchmarking results
for delegate_type, result in results.items():
print(f'Inference time {delegate_type} {mode_args_mapping[args.mode]}: '
f'{result:.6f} milliseconds')

View File

@ -12,25 +12,21 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""MediaPipe vision benchmark base api."""
import os
import time
import numpy as np
VISION_TEST_DATA_DIR = 'mediapipe/tasks/testdata/vision'
def nth_percentile(func, image, n_iterations, percentile):
"""Run a nth percentile benchmark for a given task using the function.
def benchmark_task(func, image, n_iterations):
"""Collect inference times for a given task after benchmarking
Args:
func: The method associated with a given task used for benchmarking.
func: The task function used for benchmarking.
image: The input MediaPipe Image.
n_iterations: Number of iterations to run the benchmark.
percentile: Percentage for the percentiles to compute. Values must be
between 0 and 100 inclusive.
Returns:
The n-th percentile of the inference times in milliseconds.
List of inference times in milliseconds.
"""
inference_times = []
@ -41,4 +37,4 @@ def nth_percentile(func, image, n_iterations, percentile):
end_time_ns = time.time_ns()
inference_times.append((end_time_ns - start_time_ns) / 1_000_000)
return np.percentile(inference_times, percentile)
return inference_times

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Placeholder for internal Python strict library and test compatibility macro.
# Placeholder for internal Python strict binary compatibility macro.
package(default_visibility = ["//visibility:public"])
@ -30,5 +30,6 @@ py_binary(
"//mediapipe/tasks/python/vision:face_aligner",
"//mediapipe/tasks/python/benchmark:benchmark_utils",
"//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api",
"//mediapipe/tasks/python/benchmark/vision:benchmark",
],
)

View File

@ -13,45 +13,32 @@
# limitations under the License.
"""MediaPipe face aligner benchmark."""
import argparse
from mediapipe.python._framework_bindings import image
from mediapipe.tasks.python.core import base_options
from mediapipe.tasks.python.vision import face_aligner
from mediapipe.tasks.python.benchmark import benchmark_utils
from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api
from mediapipe.tasks.python.benchmark.vision import benchmark
_MODEL_FILE = 'face_landmarker_v2.task'
_IMAGE_FILE = 'portrait.jpg'
def run(
model: str,
n_iterations: int,
delegate: base_options.BaseOptions.Delegate,
percentile: float,
):
"""Run an face aligner benchmark.
def run(model_path, n_iterations, delegate):
"""Run a face aligner benchmark.
Args:
model: Path to the TFLite model.
model_path: Path to the TFLite model.
n_iterations: Number of iterations to run the benchmark.
delegate: CPU or GPU delegate for inference.
percentile: Percentage for the percentiles to compute. Values must be
between 0 and 100 inclusive.
Returns:
The n-th percentile of the inference times.
List of inference times.
"""
# Initialize the face aligner
default_model_path = benchmark_utils.get_test_data_path(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _MODEL_FILE
)
model_path = benchmark_utils.get_model_path(model, default_model_path)
options = face_aligner.FaceAlignerOptions(
base_options=base_options.BaseOptions(
model_asset_path=model_path, delegate=delegate
)
base_options=base_options.BaseOptions(
model_asset_path=model_path, delegate=delegate
)
)
with face_aligner.FaceAligner.create_from_options(options) as aligner:
@ -60,61 +47,11 @@ def run(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE
)
)
# Run the benchmark and return the nth percentile of the inference times
nth_percentile = base_vision_benchmark_api.nth_percentile(
aligner.align, mp_image, n_iterations, percentile
inference_times = base_vision_benchmark_api.benchmark_task(
aligner.align, mp_image, n_iterations
)
return nth_percentile
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'--model',
help='Path to face aligner task.',
required=False,
default=None,
)
parser.add_argument(
'--iterations',
help='Number of iterations for benchmarking.',
type=int,
default=100,
)
parser.add_argument(
'--percentile',
help='Percentile for benchmarking statistics.',
type=float,
default=95.0,
)
args = parser.parse_args()
# Run benchmark on CPU
cpu_time = run(
args.model,
args.iterations,
base_options.BaseOptions.Delegate.CPU,
args.percentile,
)
print(
f'{args.percentile}th Percentile Inference Time on CPU: '
f'{cpu_time:.6f} milliseconds'
)
# Run benchmark on GPU
gpu_time = run(
args.model,
args.iterations,
base_options.BaseOptions.Delegate.GPU,
args.percentile,
)
print(
f'{args.percentile}th Percentile Inference Time on GPU: '
f'{gpu_time:.6f} milliseconds'
)
return inference_times
if __name__ == '__main__':
main()
benchmark.benchmarker(run, _MODEL_FILE)

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Placeholder for internal Python strict library and test compatibility macro.
# Placeholder for internal Python strict binary compatibility macro.
package(default_visibility = ["//visibility:public"])
@ -30,5 +30,6 @@ py_binary(
"//mediapipe/tasks/python/vision:face_detector",
"//mediapipe/tasks/python/benchmark:benchmark_utils",
"//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api",
"//mediapipe/tasks/python/benchmark/vision:benchmark",
],
)

View File

@ -11,47 +11,35 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MediaPipe face detector benchmark."""
import argparse
"""MediaPipe image embedder benchmark."""
from mediapipe.python._framework_bindings import image
from mediapipe.tasks.python.core import base_options
from mediapipe.tasks.python.vision import face_detector
from mediapipe.tasks.python.benchmark import benchmark_utils
from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api
from mediapipe.tasks.python.benchmark.vision import benchmark
_MODEL_FILE = 'face_detection_short_range.tflite'
_IMAGE_FILE = 'portrait.jpg'
def run(
model: str,
n_iterations: int,
delegate: base_options.BaseOptions.Delegate,
percentile: float,
):
"""Run an face detector benchmark.
def run(model_path, n_iterations, delegate):
"""Run a face detector benchmark.
Args:
model: Path to the TFLite model.
model_path: Path to the TFLite model.
n_iterations: Number of iterations to run the benchmark.
delegate: CPU or GPU delegate for inference.
percentile: Percentage for the percentiles to compute. Values must be
between 0 and 100 inclusive.
Returns:
The n-th percentile of the inference times.
List of inference times.
"""
# Initialize the face detector
default_model_path = benchmark_utils.get_test_data_path(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _MODEL_FILE
)
model_path = benchmark_utils.get_model_path(model, default_model_path)
options = face_detector.FaceDetectorOptions(
base_options=base_options.BaseOptions(
model_asset_path=model_path, delegate=delegate
)
base_options=base_options.BaseOptions(
model_asset_path=model_path, delegate=delegate
)
)
with face_detector.FaceDetector.create_from_options(options) as detector:
@ -60,61 +48,11 @@ def run(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE
)
)
# Run the benchmark and return the nth percentile of the inference times
nth_percentile = base_vision_benchmark_api.nth_percentile(
detector.detect, mp_image, n_iterations, percentile
inference_times = base_vision_benchmark_api.benchmark_task(
detector.detect, mp_image, n_iterations
)
return nth_percentile
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'--model',
help='Path to face detector task.',
required=False,
default=None,
)
parser.add_argument(
'--iterations',
help='Number of iterations for benchmarking.',
type=int,
default=100,
)
parser.add_argument(
'--percentile',
help='Percentile for benchmarking statistics.',
type=float,
default=95.0,
)
args = parser.parse_args()
# Run benchmark on CPU
cpu_time = run(
args.model,
args.iterations,
base_options.BaseOptions.Delegate.CPU,
args.percentile,
)
print(
f'{args.percentile}th Percentile Inference Time on CPU: '
f'{cpu_time:.6f} milliseconds'
)
# Run benchmark on GPU
gpu_time = run(
args.model,
args.iterations,
base_options.BaseOptions.Delegate.GPU,
args.percentile,
)
print(
f'{args.percentile}th Percentile Inference Time on GPU: '
f'{gpu_time:.6f} milliseconds'
)
return inference_times
if __name__ == '__main__':
main()
benchmark.benchmarker(run, _MODEL_FILE)

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Placeholder for internal Python strict library and test compatibility macro.
# Placeholder for internal Python strict binary compatibility macro.
package(default_visibility = ["//visibility:public"])
@ -30,5 +30,6 @@ py_binary(
"//mediapipe/tasks/python/vision:face_landmarker",
"//mediapipe/tasks/python/benchmark:benchmark_utils",
"//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api",
"//mediapipe/tasks/python/benchmark/vision:benchmark",
],
)

View File

@ -13,41 +13,29 @@
# limitations under the License.
"""MediaPipe face landmarker benchmark."""
import argparse
from mediapipe.python._framework_bindings import image
from mediapipe.tasks.python.core import base_options
from mediapipe.tasks.python.vision import face_landmarker
from mediapipe.tasks.python.benchmark import benchmark_utils
from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api
from mediapipe.tasks.python.benchmark.vision import benchmark
_MODEL_FILE = 'face_landmarker_v2.task'
_IMAGE_FILE = 'portrait.jpg'
def run(
model: str,
n_iterations: int,
delegate: base_options.BaseOptions.Delegate,
percentile: float,
):
"""Run an face landmarker benchmark.
def run(model_path, n_iterations, delegate):
"""Run a face landmarker benchmark.
Args:
model: Path to the TFLite model.
model_path: Path to the TFLite model.
n_iterations: Number of iterations to run the benchmark.
delegate: CPU or GPU delegate for inference.
percentile: Percentage for the percentiles to compute. Values must be
between 0 and 100 inclusive.
Returns:
The n-th percentile of the inference times.
List of inference times.
"""
# Initialize the face landmarker
default_model_path = benchmark_utils.get_test_data_path(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _MODEL_FILE
)
model_path = benchmark_utils.get_model_path(model, default_model_path)
options = face_landmarker.FaceLandmarkerOptions(
base_options=base_options.BaseOptions(
model_asset_path=model_path, delegate=delegate
@ -60,61 +48,11 @@ def run(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE
)
)
# Run the benchmark and return the nth percentile of the inference times
nth_percentile = base_vision_benchmark_api.nth_percentile(
landmarker.detect, mp_image, n_iterations, percentile
inference_times = base_vision_benchmark_api.benchmark_task(
landmarker.detect, mp_image, n_iterations
)
return nth_percentile
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'--model',
help='Path to face landmarker task.',
required=False,
default=None,
)
parser.add_argument(
'--iterations',
help='Number of iterations for benchmarking.',
type=int,
default=100,
)
parser.add_argument(
'--percentile',
help='Percentile for benchmarking statistics.',
type=float,
default=95.0,
)
args = parser.parse_args()
# Run benchmark on CPU
cpu_time = run(
args.model,
args.iterations,
base_options.BaseOptions.Delegate.CPU,
args.percentile,
)
print(
f'{args.percentile}th Percentile Inference Time on CPU: '
f'{cpu_time:.6f} milliseconds'
)
# Run benchmark on GPU
gpu_time = run(
args.model,
args.iterations,
base_options.BaseOptions.Delegate.GPU,
args.percentile,
)
print(
f'{args.percentile}th Percentile Inference Time on GPU: '
f'{gpu_time:.6f} milliseconds'
)
return inference_times
if __name__ == '__main__':
main()
benchmark.benchmarker(run, _MODEL_FILE)

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Placeholder for internal Python strict library and test compatibility macro.
# Placeholder for internal Python strict binary compatibility macro.
package(default_visibility = ["//visibility:public"])
@ -30,5 +30,6 @@ py_binary(
"//mediapipe/tasks/python/vision:hand_landmarker",
"//mediapipe/tasks/python/benchmark:benchmark_utils",
"//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api",
"//mediapipe/tasks/python/benchmark/vision:benchmark",
],
)

View File

@ -13,45 +13,33 @@
# limitations under the License.
"""MediaPipe hand landmarker benchmark."""
import argparse
from mediapipe.python._framework_bindings import image
from mediapipe.tasks.python.core import base_options
from mediapipe.tasks.python.vision import hand_landmarker
from mediapipe.tasks.python.benchmark import benchmark_utils
from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api
from mediapipe.tasks.python.benchmark.vision import benchmark
_MODEL_FILE = 'hand_landmarker.task'
_IMAGE_FILE = 'thumb_up.jpg'
def run(
model: str,
n_iterations: int,
delegate: base_options.BaseOptions.Delegate,
percentile: float,
):
"""Run an hand landmarker benchmark.
def run(model_path, n_iterations, delegate):
"""Run a hand landmarker benchmark.
Args:
model: Path to the TFLite model.
model_path: Path to the TFLite model.
n_iterations: Number of iterations to run the benchmark.
delegate: CPU or GPU delegate for inference.
percentile: Percentage for the percentiles to compute. Values must be
between 0 and 100 inclusive.
Returns:
The n-th percentile of the inference times.
List of inference times.
"""
# Initialize the hand landmarker
default_model_path = benchmark_utils.get_test_data_path(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _MODEL_FILE
)
model_path = benchmark_utils.get_model_path(model, default_model_path)
options = hand_landmarker.HandLandmarkerOptions(
base_options=base_options.BaseOptions(
model_asset_path=model_path, delegate=delegate
)
base_options=base_options.BaseOptions(
model_asset_path=model_path, delegate=delegate
)
)
with hand_landmarker.HandLandmarker.create_from_options(options) as landmarker:
@ -60,61 +48,11 @@ def run(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE
)
)
# Run the benchmark and return the nth percentile of the inference times
nth_percentile = base_vision_benchmark_api.nth_percentile(
landmarker.detect, mp_image, n_iterations, percentile
inference_times = base_vision_benchmark_api.benchmark_task(
landmarker.detect, mp_image, n_iterations
)
return nth_percentile
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'--model',
help='Path to hand landmarker task.',
required=False,
default=None,
)
parser.add_argument(
'--iterations',
help='Number of iterations for benchmarking.',
type=int,
default=100,
)
parser.add_argument(
'--percentile',
help='Percentile for benchmarking statistics.',
type=float,
default=95.0,
)
args = parser.parse_args()
# Run benchmark on CPU
cpu_time = run(
args.model,
args.iterations,
base_options.BaseOptions.Delegate.CPU,
args.percentile,
)
print(
f'{args.percentile}th Percentile Inference Time on CPU: '
f'{cpu_time:.6f} milliseconds'
)
# Run benchmark on GPU
gpu_time = run(
args.model,
args.iterations,
base_options.BaseOptions.Delegate.GPU,
args.percentile,
)
print(
f'{args.percentile}th Percentile Inference Time on GPU: '
f'{gpu_time:.6f} milliseconds'
)
return inference_times
if __name__ == '__main__':
main()
benchmark.benchmarker(run, _MODEL_FILE)

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Placeholder for internal Python strict library and test compatibility macro.
# Placeholder for internal Python strict binary compatibility macro.
package(default_visibility = ["//visibility:public"])
@ -30,5 +30,6 @@ py_binary(
"//mediapipe/tasks/python/vision:image_classifier",
"//mediapipe/tasks/python/benchmark:benchmark_utils",
"//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api",
"//mediapipe/tasks/python/benchmark/vision:benchmark",
],
)

View File

@ -9,7 +9,6 @@ Run this commands to download the TFLite models and image files:
```
cd mediapipe/mediapipe/tasks/python/benchmark/vision/image_classifier
wget -O classifier.tflite -q https://storage.googleapis.com/mediapipe-models/image_classifier/efficientnet_lite0/float32/1/efficientnet_lite0.tflite
wget -O burger.jpg https://storage.googleapis.com/mediapipe-assets/burger.jpg
```
## Run the benchmark
@ -18,7 +17,7 @@ bazel run -c opt //mediapipe/tasks/python/benchmark/vision/image_classifier:imag
```
* You can optionally specify the `model` parameter to set the TensorFlow Lite
model to be used:
* The default value is `classifier.tflite`
* The default value is `mobilenet_v2_1.0_224.tflite`
* TensorFlow Lite image classification models **with metadata**
* Models from [TensorFlow Hub](https://tfhub.dev/tensorflow/collections/lite/task-library/image-classifier/1)
* Models from [MediaPipe Models](https://developers.google.com/mediapipe/solutions/vision/image_classifier/index#models)
@ -29,7 +28,7 @@ bazel run -c opt //mediapipe/tasks/python/benchmark/vision/image_classifier:imag
* Default value: `100`
* Example usage:
```
bazel run -c opt :image_classifier_benchmark \
bazel run -c opt :image_classifier_benchmark -- \
--model classifier.tflite \
--iterations 200
```

View File

@ -11,48 +11,36 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MediaPipe image classsifier benchmark."""
import argparse
"""MediaPipe image classifier benchmark."""
from mediapipe.python._framework_bindings import image
from mediapipe.tasks.python.core import base_options
from mediapipe.tasks.python.vision import image_classifier
from mediapipe.tasks.python.benchmark import benchmark_utils
from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api
from mediapipe.tasks.python.benchmark.vision import benchmark
_MODEL_FILE = 'mobilenet_v2_1.0_224.tflite'
_IMAGE_FILE = 'burger.jpg'
def run(
model: str,
n_iterations: int,
delegate: base_options.BaseOptions.Delegate,
percentile: float,
):
"""Run an image classification benchmark.
def run(model_path, n_iterations, delegate):
"""Run an image classifier benchmark.
Args:
model: Path to the TFLite model.
model_path: Path to the TFLite model.
n_iterations: Number of iterations to run the benchmark.
delegate: CPU or GPU delegate for inference.
percentile: Percentage for the percentiles to compute. Values must be
between 0 and 100 inclusive.
Returns:
The n-th percentile of the inference times.
List of inference times.
"""
# Initialize the image classifier
default_model_path = benchmark_utils.get_test_data_path(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _MODEL_FILE
)
model_path = benchmark_utils.get_model_path(model, default_model_path)
options = image_classifier.ImageClassifierOptions(
base_options=base_options.BaseOptions(
model_asset_path=model_path, delegate=delegate
),
max_results=1,
base_options=base_options.BaseOptions(
model_asset_path=model_path, delegate=delegate
),
max_results=1,
)
with image_classifier.ImageClassifier.create_from_options(options) as classifier:
@ -61,61 +49,11 @@ def run(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE
)
)
# Run the benchmark and return the nth percentile of the inference times
nth_percentile = base_vision_benchmark_api.nth_percentile(
classifier.classify, mp_image, n_iterations, percentile
inference_times = base_vision_benchmark_api.benchmark_task(
classifier.classify, mp_image, n_iterations
)
return nth_percentile
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'--model',
help='Path to image classification model.',
required=False,
default=None,
)
parser.add_argument(
'--iterations',
help='Number of iterations for benchmarking.',
type=int,
default=100,
)
parser.add_argument(
'--percentile',
help='Percentile for benchmarking statistics.',
type=float,
default=95.0,
)
args = parser.parse_args()
# Run benchmark on CPU
cpu_time = run(
args.model,
args.iterations,
base_options.BaseOptions.Delegate.CPU,
args.percentile,
)
print(
f'{args.percentile}th Percentile Inference Time on CPU: '
f'{cpu_time:.6f} milliseconds'
)
# Run benchmark on GPU
gpu_time = run(
args.model,
args.iterations,
base_options.BaseOptions.Delegate.GPU,
args.percentile,
)
print(
f'{args.percentile}th Percentile Inference Time on GPU: '
f'{gpu_time:.6f} milliseconds'
)
return inference_times
if __name__ == '__main__':
main()
benchmark.benchmarker(run, _MODEL_FILE)

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Placeholder for internal Python strict library and test compatibility macro.
# Placeholder for internal Python strict binary compatibility macro.
package(default_visibility = ["//visibility:public"])
@ -30,5 +30,6 @@ py_binary(
"//mediapipe/tasks/python/vision:image_embedder",
"//mediapipe/tasks/python/benchmark:benchmark_utils",
"//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api",
"//mediapipe/tasks/python/benchmark/vision:benchmark",
],
)

View File

@ -13,41 +13,29 @@
# limitations under the License.
"""MediaPipe image embedder benchmark."""
import argparse
from mediapipe.python._framework_bindings import image
from mediapipe.tasks.python.core import base_options
from mediapipe.tasks.python.vision import image_embedder
from mediapipe.tasks.python.benchmark import benchmark_utils
from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api
from mediapipe.tasks.python.benchmark.vision import benchmark
_MODEL_FILE = 'mobilenet_v3_small_100_224_embedder.tflite'
_IMAGE_FILE = 'burger.jpg'
def run(
model: str,
n_iterations: int,
delegate: base_options.BaseOptions.Delegate,
percentile: float,
):
"""Run an image embedding extraction benchmark.
def run(model_path, n_iterations, delegate):
"""Run an image embedding benchmark.
Args:
model: Path to the TFLite model.
model_path: Path to the TFLite model.
n_iterations: Number of iterations to run the benchmark.
delegate: CPU or GPU delegate for inference.
percentile: Percentage for the percentiles to compute. Values must be
between 0 and 100 inclusive.
Returns:
The n-th percentile of the inference times.
List of inference times.
"""
# Initialize the image embedder
default_model_path = benchmark_utils.get_test_data_path(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _MODEL_FILE
)
model_path = benchmark_utils.get_model_path(model, default_model_path)
options = image_embedder.ImageEmbedderOptions(
base_options=base_options.BaseOptions(
model_asset_path=model_path, delegate=delegate
@ -60,61 +48,11 @@ def run(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE
)
)
# Run the benchmark and return the nth percentile of the inference times
nth_percentile = base_vision_benchmark_api.nth_percentile(
embedder.embed, mp_image, n_iterations, percentile
inference_times = base_vision_benchmark_api.benchmark_task(
embedder.embed, mp_image, n_iterations
)
return nth_percentile
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'--model',
help='Path to image embedding extraction model.',
required=False,
default=None,
)
parser.add_argument(
'--iterations',
help='Number of iterations for benchmarking.',
type=int,
default=100,
)
parser.add_argument(
'--percentile',
help='Percentile for benchmarking statistics.',
type=float,
default=95.0,
)
args = parser.parse_args()
# Run benchmark on CPU
cpu_time = run(
args.model,
args.iterations,
base_options.BaseOptions.Delegate.CPU,
args.percentile,
)
print(
f'{args.percentile}th Percentile Inference Time on CPU: '
f'{cpu_time:.6f} milliseconds'
)
# Run benchmark on GPU
gpu_time = run(
args.model,
args.iterations,
base_options.BaseOptions.Delegate.GPU,
args.percentile,
)
print(
f'{args.percentile}th Percentile Inference Time on GPU: '
f'{gpu_time:.6f} milliseconds'
)
return inference_times
if __name__ == '__main__':
main()
benchmark.benchmarker(run, _MODEL_FILE)

View File

@ -30,5 +30,6 @@ py_binary(
"//mediapipe/tasks/python/vision:image_segmenter",
"//mediapipe/tasks/python/benchmark:benchmark_utils",
"//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api",
"//mediapipe/tasks/python/benchmark/vision:benchmark",
],
)

View File

@ -13,41 +13,29 @@
# limitations under the License.
"""MediaPipe image segmenter benchmark."""
import argparse
from mediapipe.python._framework_bindings import image
from mediapipe.tasks.python.core import base_options
from mediapipe.tasks.python.vision import image_segmenter
from mediapipe.tasks.python.benchmark import benchmark_utils
from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api
from mediapipe.tasks.python.benchmark.vision import benchmark
_MODEL_FILE = 'deeplabv3.tflite'
_IMAGE_FILE = 'segmentation_input_rotation0.jpg'
def run(
model: str,
n_iterations: int,
delegate: base_options.BaseOptions.Delegate,
percentile: float,
):
"""Run an image segmentation benchmark.
def run(model_path, n_iterations, delegate):
"""Run an image segmenter benchmark.
Args:
model: Path to the TFLite model.
model_path: Path to the TFLite model.
n_iterations: Number of iterations to run the benchmark.
delegate: CPU or GPU delegate for inference.
percentile: Percentage for the percentiles to compute. Values must be
between 0 and 100 inclusive.
Returns:
The n-th percentile of the inference times.
List of inference times.
"""
# Initialize the image segmenter
default_model_path = benchmark_utils.get_test_data_path(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _MODEL_FILE
)
model_path = benchmark_utils.get_model_path(model, default_model_path)
options = image_segmenter.ImageSegmenterOptions(
base_options=base_options.BaseOptions(
model_asset_path=model_path, delegate=delegate
@ -61,61 +49,11 @@ def run(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE
)
)
# Run the benchmark and return the nth percentile of the inference times
nth_percentile = base_vision_benchmark_api.nth_percentile(
segmenter.segment, mp_image, n_iterations, percentile
inference_times = base_vision_benchmark_api.benchmark_task(
segmenter.segment, mp_image, n_iterations
)
return nth_percentile
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'--model',
help='Path to image segmentation model.',
required=False,
default=None,
)
parser.add_argument(
'--iterations',
help='Number of iterations for benchmarking.',
type=int,
default=100,
)
parser.add_argument(
'--percentile',
help='Percentile for benchmarking statistics.',
type=float,
default=95.0,
)
args = parser.parse_args()
# Run benchmark on CPU
cpu_time = run(
args.model,
args.iterations,
base_options.BaseOptions.Delegate.CPU,
args.percentile,
)
print(
f'{args.percentile}th Percentile Inference Time on CPU: '
f'{cpu_time:.6f} milliseconds'
)
# Run benchmark on GPU
gpu_time = run(
args.model,
args.iterations,
base_options.BaseOptions.Delegate.GPU,
args.percentile,
)
print(
f'{args.percentile}th Percentile Inference Time on GPU: '
f'{gpu_time:.6f} milliseconds'
)
return inference_times
if __name__ == '__main__':
main()
benchmark.benchmarker(run, _MODEL_FILE)

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Placeholder for internal Python strict library and test compatibility macro.
# Placeholder for internal Python strict binary compatibility macro.
package(default_visibility = ["//visibility:public"])
@ -30,5 +30,6 @@ py_binary(
"//mediapipe/tasks/python/vision:interactive_segmenter",
"//mediapipe/tasks/python/benchmark:benchmark_utils",
"//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api",
"//mediapipe/tasks/python/benchmark/vision:benchmark",
],
)

View File

@ -12,45 +12,32 @@
# See the License for the specific language governing permissions and
# limitations under the License.
"""MediaPipe interactive segmenter benchmark."""
from functools import partial
import argparse
from mediapipe.python._framework_bindings import image
from mediapipe.tasks.python.core import base_options
from mediapipe.tasks.python.components.containers import keypoint
from mediapipe.tasks.python.vision import interactive_segmenter
from mediapipe.tasks.python.components.containers import keypoint
from mediapipe.tasks.python.benchmark import benchmark_utils
from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api
from mediapipe.tasks.python.benchmark.vision import benchmark
_MODEL_FILE = 'deeplabv3.tflite'
_IMAGE_FILE = 'segmentation_input_rotation0.jpg'
_MODEL_FILE = 'ptm_512_hdt_ptm_woid.tflite'
_IMAGE_FILE = 'cats_and_dogs.jpg'
def run(
model: str,
n_iterations: int,
delegate: base_options.BaseOptions.Delegate,
percentile: float,
):
"""Run an interactive segmentation benchmark.
def run(model_path, n_iterations, delegate):
"""Run an interactive segmenter benchmark.
Args:
model: Path to the TFLite model.
model_path: Path to the TFLite model.
n_iterations: Number of iterations to run the benchmark.
delegate: CPU or GPU delegate for inference.
percentile: Percentage for the percentiles to compute. Values must be
between 0 and 100 inclusive.
Returns:
The n-th percentile of the inference times.
List of inference times.
"""
# Initialize the interactive segmenter
default_model_path = benchmark_utils.get_test_data_path(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _MODEL_FILE
)
model_path = benchmark_utils.get_model_path(model, default_model_path)
# Initialize the image segmenter
options = interactive_segmenter.InteractiveSegmenterOptions(
base_options=base_options.BaseOptions(
model_asset_path=model_path, delegate=delegate
@ -68,61 +55,11 @@ def run(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE
)
)
# Run the benchmark and return the nth percentile of the inference times
nth_percentile = base_vision_benchmark_api.nth_percentile(
partial(segmenter.segment, roi=roi), mp_image, n_iterations, percentile
inference_times = base_vision_benchmark_api.benchmark_task(
partial(segmenter.segment, roi=roi), mp_image, n_iterations
)
return nth_percentile
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'--model',
help='Path to interactive segmentation model.',
required=False,
default=None,
)
parser.add_argument(
'--iterations',
help='Number of iterations for benchmarking.',
type=int,
default=100,
)
parser.add_argument(
'--percentile',
help='Percentile for benchmarking statistics.',
type=float,
default=95.0,
)
args = parser.parse_args()
# Run benchmark on CPU
cpu_time = run(
args.model,
args.iterations,
base_options.BaseOptions.Delegate.CPU,
args.percentile,
)
print(
f'{args.percentile}th Percentile Inference Time on CPU: '
f'{cpu_time:.6f} milliseconds'
)
# Run benchmark on GPU
gpu_time = run(
args.model,
args.iterations,
base_options.BaseOptions.Delegate.GPU,
args.percentile,
)
print(
f'{args.percentile}th Percentile Inference Time on GPU: '
f'{gpu_time:.6f} milliseconds'
)
return inference_times
if __name__ == '__main__':
main()
benchmark.benchmarker(run, _MODEL_FILE)

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Placeholder for internal Python strict library and test compatibility macro.
# Placeholder for internal Python strict binary compatibility macro.
package(default_visibility = ["//visibility:public"])
@ -30,5 +30,6 @@ py_binary(
"//mediapipe/tasks/python/vision:object_detector",
"//mediapipe/tasks/python/benchmark:benchmark_utils",
"//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api",
"//mediapipe/tasks/python/benchmark/vision:benchmark",
],
)

View File

@ -13,45 +13,33 @@
# limitations under the License.
"""MediaPipe object detector benchmark."""
import argparse
from mediapipe.python._framework_bindings import image
from mediapipe.tasks.python.core import base_options
from mediapipe.tasks.python.vision import object_detector
from mediapipe.tasks.python.benchmark import benchmark_utils
from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api
from mediapipe.tasks.python.benchmark.vision import benchmark
_MODEL_FILE = 'coco_efficientdet_lite0_v1_1.0_quant_2021_09_06.tflite'
_IMAGE_FILE = 'cats_and_dogs.jpg'
def run(
model: str,
n_iterations: int,
delegate: base_options.BaseOptions.Delegate,
percentile: float,
):
def run(model_path, n_iterations, delegate):
"""Run an object detector benchmark.
Args:
model: Path to the TFLite model.
model_path: Path to the TFLite model.
n_iterations: Number of iterations to run the benchmark.
delegate: CPU or GPU delegate for inference.
percentile: Percentage for the percentiles to compute. Values must be
between 0 and 100 inclusive.
Returns:
The n-th percentile of the inference times.
List of inference times.
"""
# Initialize the object detector
default_model_path = benchmark_utils.get_test_data_path(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _MODEL_FILE
)
model_path = benchmark_utils.get_model_path(model, default_model_path)
options = object_detector.ObjectDetectorOptions(
base_options=base_options.BaseOptions(
model_asset_path=model_path, delegate=delegate
)
base_options=base_options.BaseOptions(
model_asset_path=model_path, delegate=delegate
)
)
with object_detector.ObjectDetector.create_from_options(options) as detector:
@ -60,61 +48,11 @@ def run(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE
)
)
# Run the benchmark and return the nth percentile of the inference times
nth_percentile = base_vision_benchmark_api.nth_percentile(
detector.detect, mp_image, n_iterations, percentile
inference_times = base_vision_benchmark_api.benchmark_task(
detector.detect, mp_image, n_iterations
)
return nth_percentile
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'--model',
help='Path to object detector model.',
required=False,
default=None,
)
parser.add_argument(
'--iterations',
help='Number of iterations for benchmarking.',
type=int,
default=100,
)
parser.add_argument(
'--percentile',
help='Percentile for benchmarking statistics.',
type=float,
default=95.0,
)
args = parser.parse_args()
# Run benchmark on CPU
cpu_time = run(
args.model,
args.iterations,
base_options.BaseOptions.Delegate.CPU,
args.percentile,
)
print(
f'{args.percentile}th Percentile Inference Time on CPU: '
f'{cpu_time:.6f} milliseconds'
)
# Run benchmark on GPU
gpu_time = run(
args.model,
args.iterations,
base_options.BaseOptions.Delegate.GPU,
args.percentile,
)
print(
f'{args.percentile}th Percentile Inference Time on GPU: '
f'{gpu_time:.6f} milliseconds'
)
return inference_times
if __name__ == '__main__':
main()
benchmark.benchmarker(run, _MODEL_FILE)

View File

@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# Placeholder for internal Python strict library and test compatibility macro.
# Placeholder for internal Python strict binary compatibility macro.
package(default_visibility = ["//visibility:public"])
@ -30,5 +30,6 @@ py_binary(
"//mediapipe/tasks/python/vision:pose_landmarker",
"//mediapipe/tasks/python/benchmark:benchmark_utils",
"//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api",
"//mediapipe/tasks/python/benchmark/vision:benchmark",
],
)

View File

@ -13,45 +13,33 @@
# limitations under the License.
"""MediaPipe pose landmarker benchmark."""
import argparse
from mediapipe.python._framework_bindings import image
from mediapipe.tasks.python.core import base_options
from mediapipe.tasks.python.vision import pose_landmarker
from mediapipe.tasks.python.benchmark import benchmark_utils
from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api
from mediapipe.tasks.python.benchmark.vision import benchmark
_MODEL_FILE = 'pose_landmarker.task'
_IMAGE_FILE = 'pose.jpg'
def run(
model: str,
n_iterations: int,
delegate: base_options.BaseOptions.Delegate,
percentile: float,
):
def run(model_path, n_iterations, delegate):
"""Run an pose landmarker benchmark.
Args:
model: Path to the TFLite model.
model_path: Path to the TFLite model.
n_iterations: Number of iterations to run the benchmark.
delegate: CPU or GPU delegate for inference.
percentile: Percentage for the percentiles to compute. Values must be
between 0 and 100 inclusive.
Returns:
The n-th percentile of the inference times.
List of inference times.
"""
# Initialize the pose landmarker
default_model_path = benchmark_utils.get_test_data_path(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _MODEL_FILE
)
model_path = benchmark_utils.get_model_path(model, default_model_path)
options = pose_landmarker.PoseLandmarkerOptions(
base_options=base_options.BaseOptions(
model_asset_path=model_path, delegate=delegate
)
base_options=base_options.BaseOptions(
model_asset_path=model_path, delegate=delegate
)
)
with pose_landmarker.PoseLandmarker.create_from_options(options) as landmarker:
@ -60,61 +48,11 @@ def run(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE
)
)
# Run the benchmark and return the nth percentile of the inference times
nth_percentile = base_vision_benchmark_api.nth_percentile(
landmarker.detect, mp_image, n_iterations, percentile
inference_times = base_vision_benchmark_api.benchmark_task(
landmarker.detect, mp_image, n_iterations
)
return nth_percentile
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'--model',
help='Path to pose landmarker task.',
required=False,
default=None,
)
parser.add_argument(
'--iterations',
help='Number of iterations for benchmarking.',
type=int,
default=100,
)
parser.add_argument(
'--percentile',
help='Percentile for benchmarking statistics.',
type=float,
default=95.0,
)
args = parser.parse_args()
# Run benchmark on CPU
cpu_time = run(
args.model,
args.iterations,
base_options.BaseOptions.Delegate.CPU,
args.percentile,
)
print(
f'{args.percentile}th Percentile Inference Time on CPU: '
f'{cpu_time:.6f} milliseconds'
)
# Run benchmark on GPU
gpu_time = run(
args.model,
args.iterations,
base_options.BaseOptions.Delegate.GPU,
args.percentile,
)
print(
f'{args.percentile}th Percentile Inference Time on GPU: '
f'{gpu_time:.6f} milliseconds'
)
return inference_times
if __name__ == '__main__':
main()
benchmark.benchmarker(run, _MODEL_FILE)

View File

@ -90,6 +90,7 @@ mediapipe_files(srcs = [
"pose_landmark_lite.tflite",
"pose_landmarker.task",
"pose_segmentation_mask_golden.png",
"ptm_512_hdt_ptm_woid.tflite",
"right_hands.jpg",
"right_hands_rotated.jpg",
"segmentation_golden_rotation0.png",
@ -202,6 +203,7 @@ filegroup(
"pose_detection.tflite",
"pose_landmark_lite.tflite",
"pose_landmarker.task",
"ptm_512_hdt_ptm_woid.tflite",
"selfie_segm_128_128_3.tflite",
"selfie_segm_144_256_3.tflite",
"selfie_segmentation.tflite",