Added more benchmark scripts for the Tasks Python API

This commit is contained in:
Kinar 2023-11-16 12:53:36 -08:00
parent 7287056674
commit 8f32fda6d8
25 changed files with 1586 additions and 18 deletions

View File

@ -0,0 +1,24 @@
# Copyright 2023 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Placeholder for internal Python strict library and test compatibility macro.
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
py_library(
name = "benchmark_utils",
srcs = ["benchmark_utils.py"]
)

View File

@ -0,0 +1,58 @@
# Copyright 2023 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Benchmark utils for MediaPipe Tasks."""
import os
def get_test_data_path(test_srcdir, file_or_dirname_path: str) -> str:
"""Determine the test data path.
Args:
test_srcdir: The path to the test source directory.
file_or_dirname_path: The path to the file or directory.
Returns:
The full test data path.
"""
"""Returns full test data path."""
for directory, subdirs, files in os.walk(test_srcdir):
for f in subdirs + files:
path = os.path.join(directory, f)
if path.endswith(file_or_dirname_path):
return path
raise ValueError(
"No %s in test directory: %s." % (file_or_dirname_path, test_srcdir)
)
def get_model_path(custom_model, default_model_path):
"""Determine the model path based on the existence of the custom model.
Args:
custom_model: The path to the custom model provided by the user.
default_model_path: The path to the default model.
Returns:
The path to the model to be used.
"""
if custom_model is not None and os.path.exists(custom_model):
print(f"Using provided model: {custom_model}")
return custom_model
else:
if custom_model is not None:
print(f"Warning: Provided model '{custom_model}' not found. "
f"Using default model instead.")
print(f"Using default model: {default_model_path}")
return default_model_path

View File

@ -0,0 +1,22 @@
# Copyright 2023 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Placeholder for internal Python strict library and test compatibility macro.
package(default_visibility = ["//visibility:public"])
py_library(
name = "base_vision_benchmark_api",
srcs = ["base_vision_benchmark_api.py"]
)

View File

@ -0,0 +1,14 @@
"""Copyright 2023 The MediaPipe Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""

View File

@ -0,0 +1,44 @@
# Copyright 2023 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MediaPipe vision benchmark base api."""
import os
import time
import numpy as np
VISION_TEST_DATA_DIR = 'mediapipe/tasks/testdata/vision'
def nth_percentile(func, image, n_iterations, percentile):
"""Run a nth percentile benchmark for a given task using the function.
Args:
func: The method associated with a given task used for benchmarking.
image: The input MediaPipe Image.
n_iterations: Number of iterations to run the benchmark.
percentile: Percentage for the percentiles to compute. Values must be
between 0 and 100 inclusive.
Returns:
The n-th percentile of the inference times in milliseconds.
"""
inference_times = []
for _ in range(n_iterations):
start_time_ns = time.time_ns()
# Run the method for the task (e.g., classify)
func(image)
end_time_ns = time.time_ns()
inference_times.append((end_time_ns - start_time_ns) / 1_000_000)
return np.percentile(inference_times, percentile)

View File

@ -0,0 +1,34 @@
# Copyright 2022 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Placeholder for internal Python strict library and test compatibility macro.
package(default_visibility = ["//visibility:public"])
py_binary(
name = "face_aligner_benchmark",
main = "face_aligner_benchmark.py",
srcs = ["face_aligner_benchmark.py"],
data = [
"//mediapipe/tasks/testdata/vision:test_images",
"//mediapipe/tasks/testdata/vision:test_models",
],
deps = [
"//mediapipe/python:_framework_bindings",
"//mediapipe/tasks/python/core:base_options",
"//mediapipe/tasks/python/vision:face_aligner",
"//mediapipe/tasks/python/benchmark:benchmark_utils",
"//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api",
],
)

View File

@ -0,0 +1,120 @@
# Copyright 2023 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MediaPipe face aligner benchmark."""
import argparse
from mediapipe.python._framework_bindings import image
from mediapipe.tasks.python.core import base_options
from mediapipe.tasks.python.vision import face_aligner
from mediapipe.tasks.python.benchmark import benchmark_utils
from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api
_MODEL_FILE = 'face_landmarker_v2.task'
_IMAGE_FILE = 'portrait.jpg'
def run(
model: str,
n_iterations: int,
delegate: base_options.BaseOptions.Delegate,
percentile: float,
):
"""Run an face aligner benchmark.
Args:
model: Path to the TFLite model.
n_iterations: Number of iterations to run the benchmark.
delegate: CPU or GPU delegate for inference.
percentile: Percentage for the percentiles to compute. Values must be
between 0 and 100 inclusive.
Returns:
The n-th percentile of the inference times.
"""
# Initialize the face aligner
default_model_path = benchmark_utils.get_test_data_path(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _MODEL_FILE
)
model_path = benchmark_utils.get_model_path(model, default_model_path)
options = face_aligner.FaceAlignerOptions(
base_options=base_options.BaseOptions(
model_asset_path=model_path, delegate=delegate
)
)
with face_aligner.FaceAligner.create_from_options(options) as aligner:
mp_image = image.Image.create_from_file(
benchmark_utils.get_test_data_path(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE
)
)
# Run the benchmark and return the nth percentile of the inference times
nth_percentile = base_vision_benchmark_api.nth_percentile(
aligner.align, mp_image, n_iterations, percentile
)
return nth_percentile
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'--model',
help='Path to face aligner task.',
required=False,
default=None,
)
parser.add_argument(
'--iterations',
help='Number of iterations for benchmarking.',
type=int,
default=100,
)
parser.add_argument(
'--percentile',
help='Percentile for benchmarking statistics.',
type=float,
default=95.0,
)
args = parser.parse_args()
# Run benchmark on CPU
cpu_time = run(
args.model,
args.iterations,
base_options.BaseOptions.Delegate.CPU,
args.percentile,
)
print(
f'{args.percentile}th Percentile Inference Time on CPU: '
f'{cpu_time:.6f} milliseconds'
)
# Run benchmark on GPU
gpu_time = run(
args.model,
args.iterations,
base_options.BaseOptions.Delegate.GPU,
args.percentile,
)
print(
f'{args.percentile}th Percentile Inference Time on GPU: '
f'{gpu_time:.6f} milliseconds'
)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,34 @@
# Copyright 2022 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Placeholder for internal Python strict library and test compatibility macro.
package(default_visibility = ["//visibility:public"])
py_binary(
name = "face_detector_benchmark",
main = "face_detector_benchmark.py",
srcs = ["face_detector_benchmark.py"],
data = [
"//mediapipe/tasks/testdata/vision:test_images",
"//mediapipe/tasks/testdata/vision:test_models",
],
deps = [
"//mediapipe/python:_framework_bindings",
"//mediapipe/tasks/python/core:base_options",
"//mediapipe/tasks/python/vision:face_detector",
"//mediapipe/tasks/python/benchmark:benchmark_utils",
"//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api",
],
)

View File

@ -0,0 +1,120 @@
# Copyright 2023 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MediaPipe face detector benchmark."""
import argparse
from mediapipe.python._framework_bindings import image
from mediapipe.tasks.python.core import base_options
from mediapipe.tasks.python.vision import face_detector
from mediapipe.tasks.python.benchmark import benchmark_utils
from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api
_MODEL_FILE = 'face_detection_short_range.tflite'
_IMAGE_FILE = 'portrait.jpg'
def run(
model: str,
n_iterations: int,
delegate: base_options.BaseOptions.Delegate,
percentile: float,
):
"""Run an face detector benchmark.
Args:
model: Path to the TFLite model.
n_iterations: Number of iterations to run the benchmark.
delegate: CPU or GPU delegate for inference.
percentile: Percentage for the percentiles to compute. Values must be
between 0 and 100 inclusive.
Returns:
The n-th percentile of the inference times.
"""
# Initialize the face detector
default_model_path = benchmark_utils.get_test_data_path(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _MODEL_FILE
)
model_path = benchmark_utils.get_model_path(model, default_model_path)
options = face_detector.FaceDetectorOptions(
base_options=base_options.BaseOptions(
model_asset_path=model_path, delegate=delegate
)
)
with face_detector.FaceDetector.create_from_options(options) as detector:
mp_image = image.Image.create_from_file(
benchmark_utils.get_test_data_path(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE
)
)
# Run the benchmark and return the nth percentile of the inference times
nth_percentile = base_vision_benchmark_api.nth_percentile(
detector.detect, mp_image, n_iterations, percentile
)
return nth_percentile
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'--model',
help='Path to face detector task.',
required=False,
default=None,
)
parser.add_argument(
'--iterations',
help='Number of iterations for benchmarking.',
type=int,
default=100,
)
parser.add_argument(
'--percentile',
help='Percentile for benchmarking statistics.',
type=float,
default=95.0,
)
args = parser.parse_args()
# Run benchmark on CPU
cpu_time = run(
args.model,
args.iterations,
base_options.BaseOptions.Delegate.CPU,
args.percentile,
)
print(
f'{args.percentile}th Percentile Inference Time on CPU: '
f'{cpu_time:.6f} milliseconds'
)
# Run benchmark on GPU
gpu_time = run(
args.model,
args.iterations,
base_options.BaseOptions.Delegate.GPU,
args.percentile,
)
print(
f'{args.percentile}th Percentile Inference Time on GPU: '
f'{gpu_time:.6f} milliseconds'
)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,34 @@
# Copyright 2022 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Placeholder for internal Python strict library and test compatibility macro.
package(default_visibility = ["//visibility:public"])
py_binary(
name = "face_landmarker_benchmark",
main = "face_landmarker_benchmark.py",
srcs = ["face_landmarker_benchmark.py"],
data = [
"//mediapipe/tasks/testdata/vision:test_images",
"//mediapipe/tasks/testdata/vision:test_models",
],
deps = [
"//mediapipe/python:_framework_bindings",
"//mediapipe/tasks/python/core:base_options",
"//mediapipe/tasks/python/vision:face_landmarker",
"//mediapipe/tasks/python/benchmark:benchmark_utils",
"//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api",
],
)

View File

@ -0,0 +1,120 @@
# Copyright 2023 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MediaPipe face landmarker benchmark."""
import argparse
from mediapipe.python._framework_bindings import image
from mediapipe.tasks.python.core import base_options
from mediapipe.tasks.python.vision import face_landmarker
from mediapipe.tasks.python.benchmark import benchmark_utils
from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api
_MODEL_FILE = 'face_landmarker_v2.task'
_IMAGE_FILE = 'portrait.jpg'
def run(
model: str,
n_iterations: int,
delegate: base_options.BaseOptions.Delegate,
percentile: float,
):
"""Run an face landmarker benchmark.
Args:
model: Path to the TFLite model.
n_iterations: Number of iterations to run the benchmark.
delegate: CPU or GPU delegate for inference.
percentile: Percentage for the percentiles to compute. Values must be
between 0 and 100 inclusive.
Returns:
The n-th percentile of the inference times.
"""
# Initialize the face landmarker
default_model_path = benchmark_utils.get_test_data_path(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _MODEL_FILE
)
model_path = benchmark_utils.get_model_path(model, default_model_path)
options = face_landmarker.FaceLandmarkerOptions(
base_options=base_options.BaseOptions(
model_asset_path=model_path, delegate=delegate
)
)
with face_landmarker.FaceLandmarker.create_from_options(options) as landmarker:
mp_image = image.Image.create_from_file(
benchmark_utils.get_test_data_path(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE
)
)
# Run the benchmark and return the nth percentile of the inference times
nth_percentile = base_vision_benchmark_api.nth_percentile(
landmarker.detect, mp_image, n_iterations, percentile
)
return nth_percentile
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'--model',
help='Path to face landmarker task.',
required=False,
default=None,
)
parser.add_argument(
'--iterations',
help='Number of iterations for benchmarking.',
type=int,
default=100,
)
parser.add_argument(
'--percentile',
help='Percentile for benchmarking statistics.',
type=float,
default=95.0,
)
args = parser.parse_args()
# Run benchmark on CPU
cpu_time = run(
args.model,
args.iterations,
base_options.BaseOptions.Delegate.CPU,
args.percentile,
)
print(
f'{args.percentile}th Percentile Inference Time on CPU: '
f'{cpu_time:.6f} milliseconds'
)
# Run benchmark on GPU
gpu_time = run(
args.model,
args.iterations,
base_options.BaseOptions.Delegate.GPU,
args.percentile,
)
print(
f'{args.percentile}th Percentile Inference Time on GPU: '
f'{gpu_time:.6f} milliseconds'
)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,34 @@
# Copyright 2022 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Placeholder for internal Python strict library and test compatibility macro.
package(default_visibility = ["//visibility:public"])
py_binary(
name = "hand_landmarker_benchmark",
main = "hand_landmarker_benchmark.py",
srcs = ["hand_landmarker_benchmark.py"],
data = [
"//mediapipe/tasks/testdata/vision:test_images",
"//mediapipe/tasks/testdata/vision:test_models",
],
deps = [
"//mediapipe/python:_framework_bindings",
"//mediapipe/tasks/python/core:base_options",
"//mediapipe/tasks/python/vision:hand_landmarker",
"//mediapipe/tasks/python/benchmark:benchmark_utils",
"//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api",
],
)

View File

@ -0,0 +1,120 @@
# Copyright 2023 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MediaPipe hand landmarker benchmark."""
import argparse
from mediapipe.python._framework_bindings import image
from mediapipe.tasks.python.core import base_options
from mediapipe.tasks.python.vision import hand_landmarker
from mediapipe.tasks.python.benchmark import benchmark_utils
from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api
_MODEL_FILE = 'hand_landmarker.task'
_IMAGE_FILE = 'thumb_up.jpg'
def run(
model: str,
n_iterations: int,
delegate: base_options.BaseOptions.Delegate,
percentile: float,
):
"""Run an hand landmarker benchmark.
Args:
model: Path to the TFLite model.
n_iterations: Number of iterations to run the benchmark.
delegate: CPU or GPU delegate for inference.
percentile: Percentage for the percentiles to compute. Values must be
between 0 and 100 inclusive.
Returns:
The n-th percentile of the inference times.
"""
# Initialize the hand landmarker
default_model_path = benchmark_utils.get_test_data_path(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _MODEL_FILE
)
model_path = benchmark_utils.get_model_path(model, default_model_path)
options = hand_landmarker.HandLandmarkerOptions(
base_options=base_options.BaseOptions(
model_asset_path=model_path, delegate=delegate
)
)
with hand_landmarker.HandLandmarker.create_from_options(options) as landmarker:
mp_image = image.Image.create_from_file(
benchmark_utils.get_test_data_path(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE
)
)
# Run the benchmark and return the nth percentile of the inference times
nth_percentile = base_vision_benchmark_api.nth_percentile(
landmarker.detect, mp_image, n_iterations, percentile
)
return nth_percentile
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'--model',
help='Path to hand landmarker task.',
required=False,
default=None,
)
parser.add_argument(
'--iterations',
help='Number of iterations for benchmarking.',
type=int,
default=100,
)
parser.add_argument(
'--percentile',
help='Percentile for benchmarking statistics.',
type=float,
default=95.0,
)
args = parser.parse_args()
# Run benchmark on CPU
cpu_time = run(
args.model,
args.iterations,
base_options.BaseOptions.Delegate.CPU,
args.percentile,
)
print(
f'{args.percentile}th Percentile Inference Time on CPU: '
f'{cpu_time:.6f} milliseconds'
)
# Run benchmark on GPU
gpu_time = run(
args.model,
args.iterations,
base_options.BaseOptions.Delegate.GPU,
args.percentile,
)
print(
f'{args.percentile}th Percentile Inference Time on GPU: '
f'{gpu_time:.6f} milliseconds'
)
if __name__ == '__main__':
main()

View File

@ -16,12 +16,19 @@
package(default_visibility = ["//visibility:public"])
py_library(
py_binary(
name = "image_classifier_benchmark",
main = "image_classifier_benchmark.py",
srcs = ["image_classifier_benchmark.py"],
data = [
"//mediapipe/tasks/testdata/vision:test_images",
"//mediapipe/tasks/testdata/vision:test_models",
],
deps = [
"//mediapipe/python:_framework_bindings",
"//mediapipe/tasks/python/core:base_options",
"//mediapipe/tasks/python/vision:image_classifier",
"//mediapipe/tasks/python/benchmark:benchmark_utils",
"//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api",
],
)

View File

@ -14,12 +14,14 @@
"""MediaPipe image classsifier benchmark."""
import argparse
import time
import numpy as np
from mediapipe.python._framework_bindings import image
from mediapipe.tasks.python.core import base_options
from mediapipe.tasks.python.vision import image_classifier
from mediapipe.tasks.python.benchmark import benchmark_utils
from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api
_MODEL_FILE = 'mobilenet_v2_1.0_224.tflite'
_IMAGE_FILE = 'burger.jpg'
@ -41,27 +43,29 @@ def run(
Returns:
The n-th percentile of the inference times.
"""
inference_times = []
# Initialize the image classifier
default_model_path = benchmark_utils.get_test_data_path(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _MODEL_FILE
)
model_path = benchmark_utils.get_model_path(model, default_model_path)
options = image_classifier.ImageClassifierOptions(
base_options=base_options.BaseOptions(
model_asset_path=model, delegate=delegate
model_asset_path=model_path, delegate=delegate
),
max_results=1,
)
classifier = image_classifier.ImageClassifier.create_from_options(options)
mp_image = image.Image.create_from_file(_IMAGE_FILE)
for _ in range(n_iterations):
start_time_ns = time.time_ns()
classifier.classify(mp_image)
end_time_ns = time.time_ns()
# Convert to milliseconds
inference_times.append((end_time_ns - start_time_ns) / 1_000_000)
classifier.close()
return np.percentile(inference_times, percentile)
with image_classifier.ImageClassifier.create_from_options(options) as classifier:
mp_image = image.Image.create_from_file(
benchmark_utils.get_test_data_path(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE
)
)
# Run the benchmark and return the nth percentile of the inference times
nth_percentile = base_vision_benchmark_api.nth_percentile(
classifier.classify, mp_image, n_iterations, percentile
)
return nth_percentile
def main():
@ -72,7 +76,7 @@ def main():
'--model',
help='Path to image classification model.',
required=False,
default='classifier.tflite',
default=None,
)
parser.add_argument(
'--iterations',

View File

@ -0,0 +1,34 @@
# Copyright 2022 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Placeholder for internal Python strict library and test compatibility macro.
package(default_visibility = ["//visibility:public"])
py_binary(
name = "image_embedder_benchmark",
main = "image_embedder_benchmark.py",
srcs = ["image_embedder_benchmark.py"],
data = [
"//mediapipe/tasks/testdata/vision:test_images",
"//mediapipe/tasks/testdata/vision:test_models",
],
deps = [
"//mediapipe/python:_framework_bindings",
"//mediapipe/tasks/python/core:base_options",
"//mediapipe/tasks/python/vision:image_embedder",
"//mediapipe/tasks/python/benchmark:benchmark_utils",
"//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api",
],
)

View File

@ -0,0 +1,120 @@
# Copyright 2023 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MediaPipe image embedder benchmark."""
import argparse
from mediapipe.python._framework_bindings import image
from mediapipe.tasks.python.core import base_options
from mediapipe.tasks.python.vision import image_embedder
from mediapipe.tasks.python.benchmark import benchmark_utils
from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api
_MODEL_FILE = 'mobilenet_v3_small_100_224_embedder.tflite'
_IMAGE_FILE = 'burger.jpg'
def run(
model: str,
n_iterations: int,
delegate: base_options.BaseOptions.Delegate,
percentile: float,
):
"""Run an image embedding extraction benchmark.
Args:
model: Path to the TFLite model.
n_iterations: Number of iterations to run the benchmark.
delegate: CPU or GPU delegate for inference.
percentile: Percentage for the percentiles to compute. Values must be
between 0 and 100 inclusive.
Returns:
The n-th percentile of the inference times.
"""
# Initialize the image embedder
default_model_path = benchmark_utils.get_test_data_path(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _MODEL_FILE
)
model_path = benchmark_utils.get_model_path(model, default_model_path)
options = image_embedder.ImageEmbedderOptions(
base_options=base_options.BaseOptions(
model_asset_path=model_path, delegate=delegate
)
)
with image_embedder.ImageEmbedder.create_from_options(options) as embedder:
mp_image = image.Image.create_from_file(
benchmark_utils.get_test_data_path(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE
)
)
# Run the benchmark and return the nth percentile of the inference times
nth_percentile = base_vision_benchmark_api.nth_percentile(
embedder.embed, mp_image, n_iterations, percentile
)
return nth_percentile
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'--model',
help='Path to image embedding extraction model.',
required=False,
default=None,
)
parser.add_argument(
'--iterations',
help='Number of iterations for benchmarking.',
type=int,
default=100,
)
parser.add_argument(
'--percentile',
help='Percentile for benchmarking statistics.',
type=float,
default=95.0,
)
args = parser.parse_args()
# Run benchmark on CPU
cpu_time = run(
args.model,
args.iterations,
base_options.BaseOptions.Delegate.CPU,
args.percentile,
)
print(
f'{args.percentile}th Percentile Inference Time on CPU: '
f'{cpu_time:.6f} milliseconds'
)
# Run benchmark on GPU
gpu_time = run(
args.model,
args.iterations,
base_options.BaseOptions.Delegate.GPU,
args.percentile,
)
print(
f'{args.percentile}th Percentile Inference Time on GPU: '
f'{gpu_time:.6f} milliseconds'
)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,34 @@
# Copyright 2022 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Placeholder for internal Python strict library and test compatibility macro.
package(default_visibility = ["//visibility:public"])
py_binary(
name = "image_segmenter_benchmark",
main = "image_segmenter_benchmark.py",
srcs = ["image_segmenter_benchmark.py"],
data = [
"//mediapipe/tasks/testdata/vision:test_images",
"//mediapipe/tasks/testdata/vision:test_models",
],
deps = [
"//mediapipe/python:_framework_bindings",
"//mediapipe/tasks/python/core:base_options",
"//mediapipe/tasks/python/vision:image_segmenter",
"//mediapipe/tasks/python/benchmark:benchmark_utils",
"//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api",
],
)

View File

@ -0,0 +1,121 @@
# Copyright 2023 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MediaPipe image segmenter benchmark."""
import argparse
from mediapipe.python._framework_bindings import image
from mediapipe.tasks.python.core import base_options
from mediapipe.tasks.python.vision import image_segmenter
from mediapipe.tasks.python.benchmark import benchmark_utils
from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api
_MODEL_FILE = 'deeplabv3.tflite'
_IMAGE_FILE = 'segmentation_input_rotation0.jpg'
def run(
model: str,
n_iterations: int,
delegate: base_options.BaseOptions.Delegate,
percentile: float,
):
"""Run an image segmentation benchmark.
Args:
model: Path to the TFLite model.
n_iterations: Number of iterations to run the benchmark.
delegate: CPU or GPU delegate for inference.
percentile: Percentage for the percentiles to compute. Values must be
between 0 and 100 inclusive.
Returns:
The n-th percentile of the inference times.
"""
# Initialize the image segmenter
default_model_path = benchmark_utils.get_test_data_path(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _MODEL_FILE
)
model_path = benchmark_utils.get_model_path(model, default_model_path)
options = image_segmenter.ImageSegmenterOptions(
base_options=base_options.BaseOptions(
model_asset_path=model_path, delegate=delegate
),
output_confidence_masks=True, output_category_mask=True
)
with image_segmenter.ImageSegmenter.create_from_options(options) as segmenter:
mp_image = image.Image.create_from_file(
benchmark_utils.get_test_data_path(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE
)
)
# Run the benchmark and return the nth percentile of the inference times
nth_percentile = base_vision_benchmark_api.nth_percentile(
segmenter.segment, mp_image, n_iterations, percentile
)
return nth_percentile
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'--model',
help='Path to image segmentation model.',
required=False,
default=None,
)
parser.add_argument(
'--iterations',
help='Number of iterations for benchmarking.',
type=int,
default=100,
)
parser.add_argument(
'--percentile',
help='Percentile for benchmarking statistics.',
type=float,
default=95.0,
)
args = parser.parse_args()
# Run benchmark on CPU
cpu_time = run(
args.model,
args.iterations,
base_options.BaseOptions.Delegate.CPU,
args.percentile,
)
print(
f'{args.percentile}th Percentile Inference Time on CPU: '
f'{cpu_time:.6f} milliseconds'
)
# Run benchmark on GPU
gpu_time = run(
args.model,
args.iterations,
base_options.BaseOptions.Delegate.GPU,
args.percentile,
)
print(
f'{args.percentile}th Percentile Inference Time on GPU: '
f'{gpu_time:.6f} milliseconds'
)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,34 @@
# Copyright 2022 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Placeholder for internal Python strict library and test compatibility macro.
package(default_visibility = ["//visibility:public"])
py_binary(
name = "interactive_segmenter_benchmark",
main = "interactive_segmenter_benchmark.py",
srcs = ["interactive_segmenter_benchmark.py"],
data = [
"//mediapipe/tasks/testdata/vision:test_images",
"//mediapipe/tasks/testdata/vision:test_models",
],
deps = [
"//mediapipe/python:_framework_bindings",
"//mediapipe/tasks/python/core:base_options",
"//mediapipe/tasks/python/vision:interactive_segmenter",
"//mediapipe/tasks/python/benchmark:benchmark_utils",
"//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api",
],
)

View File

@ -0,0 +1,128 @@
# Copyright 2023 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MediaPipe interactive segmenter benchmark."""
from functools import partial
import argparse
from mediapipe.python._framework_bindings import image
from mediapipe.tasks.python.core import base_options
from mediapipe.tasks.python.components.containers import keypoint
from mediapipe.tasks.python.vision import interactive_segmenter
from mediapipe.tasks.python.benchmark import benchmark_utils
from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api
_MODEL_FILE = 'deeplabv3.tflite'
_IMAGE_FILE = 'segmentation_input_rotation0.jpg'
def run(
model: str,
n_iterations: int,
delegate: base_options.BaseOptions.Delegate,
percentile: float,
):
"""Run an interactive segmentation benchmark.
Args:
model: Path to the TFLite model.
n_iterations: Number of iterations to run the benchmark.
delegate: CPU or GPU delegate for inference.
percentile: Percentage for the percentiles to compute. Values must be
between 0 and 100 inclusive.
Returns:
The n-th percentile of the inference times.
"""
# Initialize the interactive segmenter
default_model_path = benchmark_utils.get_test_data_path(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _MODEL_FILE
)
model_path = benchmark_utils.get_model_path(model, default_model_path)
options = interactive_segmenter.InteractiveSegmenterOptions(
base_options=base_options.BaseOptions(
model_asset_path=model_path, delegate=delegate
),
output_category_mask=True, output_confidence_masks=False
)
roi = interactive_segmenter.RegionOfInterest(
format=interactive_segmenter.RegionOfInterest.Format.KEYPOINT,
keypoint=keypoint.NormalizedKeypoint(0.44, 0.7)
)
with interactive_segmenter.InteractiveSegmenter.create_from_options(options) as segmenter:
mp_image = image.Image.create_from_file(
benchmark_utils.get_test_data_path(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE
)
)
# Run the benchmark and return the nth percentile of the inference times
nth_percentile = base_vision_benchmark_api.nth_percentile(
partial(segmenter.segment, roi=roi), mp_image, n_iterations, percentile
)
return nth_percentile
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'--model',
help='Path to interactive segmentation model.',
required=False,
default=None,
)
parser.add_argument(
'--iterations',
help='Number of iterations for benchmarking.',
type=int,
default=100,
)
parser.add_argument(
'--percentile',
help='Percentile for benchmarking statistics.',
type=float,
default=95.0,
)
args = parser.parse_args()
# Run benchmark on CPU
cpu_time = run(
args.model,
args.iterations,
base_options.BaseOptions.Delegate.CPU,
args.percentile,
)
print(
f'{args.percentile}th Percentile Inference Time on CPU: '
f'{cpu_time:.6f} milliseconds'
)
# Run benchmark on GPU
gpu_time = run(
args.model,
args.iterations,
base_options.BaseOptions.Delegate.GPU,
args.percentile,
)
print(
f'{args.percentile}th Percentile Inference Time on GPU: '
f'{gpu_time:.6f} milliseconds'
)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,34 @@
# Copyright 2022 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Placeholder for internal Python strict library and test compatibility macro.
package(default_visibility = ["//visibility:public"])
py_binary(
name = "object_detector_benchmark",
main = "object_detector_benchmark.py",
srcs = ["object_detector_benchmark.py"],
data = [
"//mediapipe/tasks/testdata/vision:test_images",
"//mediapipe/tasks/testdata/vision:test_models",
],
deps = [
"//mediapipe/python:_framework_bindings",
"//mediapipe/tasks/python/core:base_options",
"//mediapipe/tasks/python/vision:object_detector",
"//mediapipe/tasks/python/benchmark:benchmark_utils",
"//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api",
],
)

View File

@ -0,0 +1,120 @@
# Copyright 2023 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MediaPipe object detector benchmark."""
import argparse
from mediapipe.python._framework_bindings import image
from mediapipe.tasks.python.core import base_options
from mediapipe.tasks.python.vision import object_detector
from mediapipe.tasks.python.benchmark import benchmark_utils
from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api
_MODEL_FILE = 'coco_efficientdet_lite0_v1_1.0_quant_2021_09_06.tflite'
_IMAGE_FILE = 'cats_and_dogs.jpg'
def run(
model: str,
n_iterations: int,
delegate: base_options.BaseOptions.Delegate,
percentile: float,
):
"""Run an object detector benchmark.
Args:
model: Path to the TFLite model.
n_iterations: Number of iterations to run the benchmark.
delegate: CPU or GPU delegate for inference.
percentile: Percentage for the percentiles to compute. Values must be
between 0 and 100 inclusive.
Returns:
The n-th percentile of the inference times.
"""
# Initialize the object detector
default_model_path = benchmark_utils.get_test_data_path(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _MODEL_FILE
)
model_path = benchmark_utils.get_model_path(model, default_model_path)
options = object_detector.ObjectDetectorOptions(
base_options=base_options.BaseOptions(
model_asset_path=model_path, delegate=delegate
)
)
with object_detector.ObjectDetector.create_from_options(options) as detector:
mp_image = image.Image.create_from_file(
benchmark_utils.get_test_data_path(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE
)
)
# Run the benchmark and return the nth percentile of the inference times
nth_percentile = base_vision_benchmark_api.nth_percentile(
detector.detect, mp_image, n_iterations, percentile
)
return nth_percentile
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'--model',
help='Path to object detector model.',
required=False,
default=None,
)
parser.add_argument(
'--iterations',
help='Number of iterations for benchmarking.',
type=int,
default=100,
)
parser.add_argument(
'--percentile',
help='Percentile for benchmarking statistics.',
type=float,
default=95.0,
)
args = parser.parse_args()
# Run benchmark on CPU
cpu_time = run(
args.model,
args.iterations,
base_options.BaseOptions.Delegate.CPU,
args.percentile,
)
print(
f'{args.percentile}th Percentile Inference Time on CPU: '
f'{cpu_time:.6f} milliseconds'
)
# Run benchmark on GPU
gpu_time = run(
args.model,
args.iterations,
base_options.BaseOptions.Delegate.GPU,
args.percentile,
)
print(
f'{args.percentile}th Percentile Inference Time on GPU: '
f'{gpu_time:.6f} milliseconds'
)
if __name__ == '__main__':
main()

View File

@ -0,0 +1,34 @@
# Copyright 2022 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Placeholder for internal Python strict library and test compatibility macro.
package(default_visibility = ["//visibility:public"])
py_binary(
name = "pose_landmarker_benchmark",
main = "pose_landmarker_benchmark.py",
srcs = ["pose_landmarker_benchmark.py"],
data = [
"//mediapipe/tasks/testdata/vision:test_images",
"//mediapipe/tasks/testdata/vision:test_models",
],
deps = [
"//mediapipe/python:_framework_bindings",
"//mediapipe/tasks/python/core:base_options",
"//mediapipe/tasks/python/vision:pose_landmarker",
"//mediapipe/tasks/python/benchmark:benchmark_utils",
"//mediapipe/tasks/python/benchmark/vision/core:base_vision_benchmark_api",
],
)

View File

@ -0,0 +1,120 @@
# Copyright 2023 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""MediaPipe pose landmarker benchmark."""
import argparse
from mediapipe.python._framework_bindings import image
from mediapipe.tasks.python.core import base_options
from mediapipe.tasks.python.vision import pose_landmarker
from mediapipe.tasks.python.benchmark import benchmark_utils
from mediapipe.tasks.python.benchmark.vision.core import base_vision_benchmark_api
_MODEL_FILE = 'pose_landmarker.task'
_IMAGE_FILE = 'pose.jpg'
def run(
model: str,
n_iterations: int,
delegate: base_options.BaseOptions.Delegate,
percentile: float,
):
"""Run an pose landmarker benchmark.
Args:
model: Path to the TFLite model.
n_iterations: Number of iterations to run the benchmark.
delegate: CPU or GPU delegate for inference.
percentile: Percentage for the percentiles to compute. Values must be
between 0 and 100 inclusive.
Returns:
The n-th percentile of the inference times.
"""
# Initialize the pose landmarker
default_model_path = benchmark_utils.get_test_data_path(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _MODEL_FILE
)
model_path = benchmark_utils.get_model_path(model, default_model_path)
options = pose_landmarker.PoseLandmarkerOptions(
base_options=base_options.BaseOptions(
model_asset_path=model_path, delegate=delegate
)
)
with pose_landmarker.PoseLandmarker.create_from_options(options) as landmarker:
mp_image = image.Image.create_from_file(
benchmark_utils.get_test_data_path(
base_vision_benchmark_api.VISION_TEST_DATA_DIR, _IMAGE_FILE
)
)
# Run the benchmark and return the nth percentile of the inference times
nth_percentile = base_vision_benchmark_api.nth_percentile(
landmarker.detect, mp_image, n_iterations, percentile
)
return nth_percentile
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument(
'--model',
help='Path to pose landmarker task.',
required=False,
default=None,
)
parser.add_argument(
'--iterations',
help='Number of iterations for benchmarking.',
type=int,
default=100,
)
parser.add_argument(
'--percentile',
help='Percentile for benchmarking statistics.',
type=float,
default=95.0,
)
args = parser.parse_args()
# Run benchmark on CPU
cpu_time = run(
args.model,
args.iterations,
base_options.BaseOptions.Delegate.CPU,
args.percentile,
)
print(
f'{args.percentile}th Percentile Inference Time on CPU: '
f'{cpu_time:.6f} milliseconds'
)
# Run benchmark on GPU
gpu_time = run(
args.model,
args.iterations,
base_options.BaseOptions.Delegate.GPU,
args.percentile,
)
print(
f'{args.percentile}th Percentile Inference Time on GPU: '
f'{gpu_time:.6f} milliseconds'
)
if __name__ == '__main__':
main()