Project import generated by Copybara.

GitOrigin-RevId: d8caa66de45839696f5bd0786ad3bfbcb9cff632
This commit is contained in:
MediaPipe Team 2020-12-09 22:13:05 -05:00 committed by chuoling
parent f15da632de
commit 2b58cceec9
750 changed files with 22901 additions and 9478 deletions

View File

@ -9,21 +9,21 @@ build --define='absl=1'
build --enable_platform_specific_config build --enable_platform_specific_config
# Linux # Linux
build:linux --cxxopt=-std=c++14 build:linux --cxxopt=-std=c++17
build:linux --host_cxxopt=-std=c++14 build:linux --host_cxxopt=-std=c++17
build:linux --copt=-w build:linux --copt=-w
# windows # windows
build:windows --cxxopt=/std:c++14 build:windows --cxxopt=/std:c++17
build:windows --host_cxxopt=/std:c++14 build:windows --host_cxxopt=/std:c++17
build:windows --copt=/w build:windows --copt=/w
# For using M_* math constants on Windows with MSVC. # For using M_* math constants on Windows with MSVC.
build:windows --copt=/D_USE_MATH_DEFINES build:windows --copt=/D_USE_MATH_DEFINES
build:windows --host_copt=/D_USE_MATH_DEFINES build:windows --host_copt=/D_USE_MATH_DEFINES
# macOS # macOS
build:macos --cxxopt=-std=c++14 build:macos --cxxopt=-std=c++17
build:macos --host_cxxopt=-std=c++14 build:macos --host_cxxopt=-std=c++17
build:macos --copt=-w build:macos --copt=-w
# Sets the default Apple platform to macOS. # Sets the default Apple platform to macOS.
@ -83,3 +83,9 @@ build:ios_fat --watchos_cpus=armv7k
build:darwin_x86_64 --apple_platform_type=macos build:darwin_x86_64 --apple_platform_type=macos
build:darwin_x86_64 --macos_minimum_os=10.12 build:darwin_x86_64 --macos_minimum_os=10.12
build:darwin_x86_64 --cpu=darwin_x86_64 build:darwin_x86_64 --cpu=darwin_x86_64
# This bazelrc file is meant to be written by a setup script.
try-import %workspace%/.configure.bazelrc
# This bazelrc file can be used for user-specific custom build settings.
try-import %workspace%/.user.bazelrc

2
.gitignore vendored
View File

@ -2,3 +2,5 @@ bazel-*
mediapipe/MediaPipe.xcodeproj mediapipe/MediaPipe.xcodeproj
mediapipe/MediaPipe.tulsiproj/*.tulsiconf-user mediapipe/MediaPipe.tulsiproj/*.tulsiconf-user
mediapipe/provisioning_profile.mobileprovision mediapipe/provisioning_profile.mobileprovision
.configure.bazelrc
.user.bazelrc

View File

@ -21,25 +21,26 @@ ML solutions for live and streaming media.
## ML solutions in MediaPipe ## ML solutions in MediaPipe
Face Detection | Face Mesh | Iris | Hands | Pose | Hair Segmentation Face Detection | Face Mesh | Iris | Hands | Pose | Holistic
:----------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------: | :---------------: :----------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------: | :------:
[![face_detection](docs/images/mobile/face_detection_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/face_detection) | [![face_mesh](docs/images/mobile/face_mesh_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/face_mesh) | [![iris](docs/images/mobile/iris_tracking_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/iris) | [![hand](docs/images/mobile/hand_tracking_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/hands) | [![pose](docs/images/mobile/pose_tracking_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/pose) | [![hair_segmentation](docs/images/mobile/hair_segmentation_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/hair_segmentation) [![face_detection](docs/images/mobile/face_detection_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/face_detection) | [![face_mesh](docs/images/mobile/face_mesh_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/face_mesh) | [![iris](docs/images/mobile/iris_tracking_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/iris) | [![hand](docs/images/mobile/hand_tracking_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/hands) | [![pose](docs/images/mobile/pose_tracking_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/pose) | [![hair_segmentation](docs/images/mobile/holistic_tracking_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/holistic)
Object Detection | Box Tracking | Instant Motion Tracking | Objectron | KNIFT Hair Segmentation | Object Detection | Box Tracking | Instant Motion Tracking | Objectron | KNIFT
:----------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------: | :---: :-------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------: | :---:
[![object_detection](docs/images/mobile/object_detection_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/object_detection) | [![box_tracking](docs/images/mobile/object_tracking_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/box_tracking) | [![instant_motion_tracking](docs/images/mobile/instant_motion_tracking_android_small.gif)](https://google.github.io/mediapipe/solutions/instant_motion_tracking) | [![objectron](docs/images/mobile/objectron_chair_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/objectron) | [![knift](docs/images/mobile/template_matching_android_cpu_small.gif)](https://google.github.io/mediapipe/solutions/knift) [![hair_segmentation](docs/images/mobile/hair_segmentation_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/hair_segmentation) | [![object_detection](docs/images/mobile/object_detection_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/object_detection) | [![box_tracking](docs/images/mobile/object_tracking_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/box_tracking) | [![instant_motion_tracking](docs/images/mobile/instant_motion_tracking_android_small.gif)](https://google.github.io/mediapipe/solutions/instant_motion_tracking) | [![objectron](docs/images/mobile/objectron_chair_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/objectron) | [![knift](docs/images/mobile/template_matching_android_cpu_small.gif)](https://google.github.io/mediapipe/solutions/knift)
<!-- []() in the first cell is needed to preserve table formatting in GitHub Pages. --> <!-- []() in the first cell is needed to preserve table formatting in GitHub Pages. -->
<!-- Whenever this table is updated, paste a copy to solutions/solutions.md. --> <!-- Whenever this table is updated, paste a copy to solutions/solutions.md. -->
[]() | Android | iOS | Desktop | Python | Web | Coral []() | [Android](https://google.github.io/mediapipe/getting_started/android) | [iOS](https://google.github.io/mediapipe/getting_started/ios) | [C++](https://google.github.io/mediapipe/getting_started/cpp) | [Python](https://google.github.io/mediapipe/getting_started/python) | [JS](https://google.github.io/mediapipe/getting_started/javascript) | [Coral](https://github.com/google/mediapipe/tree/master/mediapipe/examples/coral/README.md)
:---------------------------------------------------------------------------------------- | :-----: | :-: | :-----: | :----: | :-: | :---: :---------------------------------------------------------------------------------------- | :-------------------------------------------------------------: | :-----------------------------------------------------: | :-----------------------------------------------------: | :-----------------------------------------------------------: | :-----------------------------------------------------------: | :--------------------------------------------------------------------:
[Face Detection](https://google.github.io/mediapipe/solutions/face_detection) | ✅ | ✅ | ✅ | | ✅ | ✅ [Face Detection](https://google.github.io/mediapipe/solutions/face_detection) | ✅ | ✅ | ✅ | | | ✅
[Face Mesh](https://google.github.io/mediapipe/solutions/face_mesh) | ✅ | ✅ | ✅ | ✅ | | [Face Mesh](https://google.github.io/mediapipe/solutions/face_mesh) | ✅ | ✅ | ✅ | ✅ | |
[Iris](https://google.github.io/mediapipe/solutions/iris) | ✅ | ✅ | ✅ | | ✅ | [Iris](https://google.github.io/mediapipe/solutions/iris) | ✅ | ✅ | ✅ | | |
[Hands](https://google.github.io/mediapipe/solutions/hands) | ✅ | ✅ | ✅ | ✅ | ✅ | [Hands](https://google.github.io/mediapipe/solutions/hands) | ✅ | ✅ | ✅ | ✅ | ✅ |
[Pose](https://google.github.io/mediapipe/solutions/pose) | ✅ | ✅ | ✅ | ✅ | ✅ | [Pose](https://google.github.io/mediapipe/solutions/pose) | ✅ | ✅ | ✅ | ✅ | ✅ |
[Hair Segmentation](https://google.github.io/mediapipe/solutions/hair_segmentation) | ✅ | | ✅ | | ✅ | [Holistic](https://google.github.io/mediapipe/solutions/holistic) | ✅ | ✅ | ✅ | ✅ | ✅ |
[Hair Segmentation](https://google.github.io/mediapipe/solutions/hair_segmentation) | ✅ | | ✅ | | |
[Object Detection](https://google.github.io/mediapipe/solutions/object_detection) | ✅ | ✅ | ✅ | | | ✅ [Object Detection](https://google.github.io/mediapipe/solutions/object_detection) | ✅ | ✅ | ✅ | | | ✅
[Box Tracking](https://google.github.io/mediapipe/solutions/box_tracking) | ✅ | ✅ | ✅ | | | [Box Tracking](https://google.github.io/mediapipe/solutions/box_tracking) | ✅ | ✅ | ✅ | | |
[Instant Motion Tracking](https://google.github.io/mediapipe/solutions/instant_motion_tracking) | ✅ | | | | | [Instant Motion Tracking](https://google.github.io/mediapipe/solutions/instant_motion_tracking) | ✅ | | | | |
@ -55,16 +56,12 @@ for ML models released in MediaPipe.
## MediaPipe in Python ## MediaPipe in Python
MediaPipe Python package is available on MediaPipe offers customizable Python solutions as a prebuilt Python package on
[PyPI](https://pypi.org/project/mediapipe/), and can be installed simply by `pip [PyPI](https://pypi.org/project/mediapipe/), which can be installed simply with
install mediapipe` on Linux and macOS, as described in: `pip install mediapipe`. It also provides tools for users to build their own
solutions. Please see
* [MediaPipe Face Mesh](https://google.github.io/mediapipe/solutions/face_mesh#python) [MediaPipe in Python](https://google.github.io/mediapipe/getting_started/python.md)
([colab](https://mediapipe.page.link/face_mesh_py_colab)) for more info.
* [MediaPipe Hands](https://google.github.io/mediapipe/solutions/hands#python)
([colab](https://mediapipe.page.link/hands_py_colab))
* [MediaPipe Pose](https://google.github.io/mediapipe/solutions/pose#python)
([colab](https://mediapipe.page.link/pose_py_colab))
## MediaPipe on the Web ## MediaPipe on the Web
@ -105,6 +102,8 @@ run code search using
## Publications ## Publications
* [Background Features in Google Meet, Powered by Web ML](https://ai.googleblog.com/2020/10/background-features-in-google-meet.html)
in Google AI Blog
* [MediaPipe 3D Face Transform](https://developers.googleblog.com/2020/09/mediapipe-3d-face-transform.html) * [MediaPipe 3D Face Transform](https://developers.googleblog.com/2020/09/mediapipe-3d-face-transform.html)
in Google Developers Blog in Google Developers Blog
* [Instant Motion Tracking With MediaPipe](https://developers.googleblog.com/2020/08/instant-motion-tracking-with-mediapipe.html) * [Instant Motion Tracking With MediaPipe](https://developers.googleblog.com/2020/08/instant-motion-tracking-with-mediapipe.html)

View File

@ -13,11 +13,11 @@ load("@bazel_skylib//lib:versions.bzl", "versions")
versions.check(minimum_bazel_version = "3.4.0") versions.check(minimum_bazel_version = "3.4.0")
# ABSL cpp library lts_2020_02_25 # ABSL cpp library lts_2020_09_23
http_archive( http_archive(
name = "com_google_absl", name = "com_google_absl",
urls = [ urls = [
"https://github.com/abseil/abseil-cpp/archive/20200225.tar.gz", "https://github.com/abseil/abseil-cpp/archive/20200923.tar.gz",
], ],
# Remove after https://github.com/abseil/abseil-cpp/issues/326 is solved. # Remove after https://github.com/abseil/abseil-cpp/issues/326 is solved.
patches = [ patches = [
@ -26,8 +26,8 @@ http_archive(
patch_args = [ patch_args = [
"-p1", "-p1",
], ],
strip_prefix = "abseil-cpp-20200225", strip_prefix = "abseil-cpp-20200923",
sha256 = "728a813291bdec2aa46eab8356ace9f75ac2ed9dfe2df5ab603c4e6c09f1c353" sha256 = "b3744a4f7a249d5eaf2309daad597631ce77ea62e0fc6abffbab4b4c3dc0fc08"
) )
http_archive( http_archive(
@ -99,7 +99,7 @@ http_archive(
"https://github.com/google/glog/archive/0a2e5931bd5ff22fd3bf8999eb8ce776f159cda6.zip", "https://github.com/google/glog/archive/0a2e5931bd5ff22fd3bf8999eb8ce776f159cda6.zip",
], ],
patches = [ patches = [
"@//third_party:com_github_glog_glog_9779e5ea6ef59562b030248947f787d1256132ae.diff" "@//third_party:com_github_glog_glog_9779e5ea6ef59562b030248947f787d1256132ae.diff",
], ],
patch_args = [ patch_args = [
"-p1", "-p1",
@ -170,15 +170,15 @@ http_archive(
http_archive( http_archive(
name = "ceres_solver", name = "ceres_solver",
url = "https://github.com/ceres-solver/ceres-solver/archive/1.14.0.zip", url = "https://github.com/ceres-solver/ceres-solver/archive/2.0.0.zip",
patches = [ patches = [
"@//third_party:ceres_solver_compatibility_fixes.diff" "@//third_party:ceres_solver_compatibility_fixes.diff"
], ],
patch_args = [ patch_args = [
"-p1", "-p1",
], ],
strip_prefix = "ceres-solver-1.14.0", strip_prefix = "ceres-solver-2.0.0",
sha256 = "5ba6d0db4e784621fda44a50c58bb23b0892684692f0c623e2063f9c19f192f1" sha256 = "db12d37b4cebb26353ae5b7746c7985e00877baa8e7b12dc4d3a1512252fff3b"
) )
http_archive( http_archive(
@ -364,9 +364,9 @@ http_archive(
) )
#Tensorflow repo should always go after the other external dependencies. #Tensorflow repo should always go after the other external dependencies.
# 2020-10-30 # 2020-12-07
_TENSORFLOW_GIT_COMMIT = "84384703c0d8b502e33ff6fd7eefd219dca5ff8e" _TENSORFLOW_GIT_COMMIT = "f556709f4df005ad57fd24d5eaa0d9380128d3ba"
_TENSORFLOW_SHA256= "23fb322fc15a20f7a7838d9a31f8b16f60700a494ea654311a0aa8621769df98" _TENSORFLOW_SHA256= "9e157d4723921b48a974f645f70d07c8fd3c363569a0ac6ee85fec114d6459ea"
http_archive( http_archive(
name = "org_tensorflow", name = "org_tensorflow",
urls = [ urls = [
@ -374,6 +374,7 @@ http_archive(
], ],
patches = [ patches = [
"@//third_party:org_tensorflow_compatibility_fixes.diff", "@//third_party:org_tensorflow_compatibility_fixes.diff",
"@//third_party:org_tensorflow_objc_cxx17.diff",
], ],
patch_args = [ patch_args = [
"-p1", "-p1",

View File

@ -89,33 +89,34 @@ for app in ${apps}; do
fi fi
target="${app}:${target_name}" target="${app}:${target_name}"
bin="${bin_dir}/${app}/${target_name}.apk" bin="${bin_dir}/${app}/${target_name}.apk"
apk="${out_dir}/${target_name}.apk"
echo "=== Target: ${target}" echo "=== Target: ${target}"
if [[ ${app_name} == "objectdetection3d" ]]; then
categories=("shoe" "chair" "cup" "camera" "shoe_1stage" "chair_1stage")
for category in ${categories[@]}; do
apk="${out_dir}/${target_name}_${category}.apk"
if [[ $install_only == false ]]; then
bazel_flags_extended=("${bazel_flags[@]}")
if [[ ${category} != "shoe" ]]; then
bazel_flags_extended+=(--define ${category}=true)
fi
echo "bazel ${bazel_flags_extended[@]}"
bazel "${bazel_flags_extended[@]}"
cp -f "${bin}" "${apk}"
fi
apks+=(${apk})
done
else
if [[ $install_only == false ]]; then if [[ $install_only == false ]]; then
bazel_flags=("${default_bazel_flags[@]}") bazel_flags=("${default_bazel_flags[@]}")
bazel_flags+=(${target}) bazel_flags+=(${target})
if [[ $strip == true ]]; then if [[ $strip == true ]]; then
bazel_flags+=(--linkopt=-s) bazel_flags+=(--linkopt=-s)
fi fi
fi
if [[ ${app_name} == "objectdetection3d" ]]; then
categories=("shoe" "chair" "cup" "camera" "shoe_1stage" "chair_1stage")
for category in "${categories[@]}"; do
apk="${out_dir}/${target_name}_${category}.apk"
if [[ $install_only == false ]]; then
bazel_flags_extended=("${bazel_flags[@]}")
if [[ ${category} != "shoe" ]]; then
bazel_flags_extended+=(--define ${category}=true)
fi
bazel "${bazel_flags_extended[@]}"
cp -f "${bin}" "${apk}"
fi
apks+=(${apk})
done
else
apk="${out_dir}/${target_name}.apk"
if [[ $install_only == false ]]; then
if [[ ${app_name} == "templatematchingcpu" ]]; then if [[ ${app_name} == "templatematchingcpu" ]]; then
switch_to_opencv_4 switch_to_opencv_4
fi fi

View File

@ -70,6 +70,7 @@ for app in ${apps}; do
if [[ "${target_name}" == "autoflip" || if [[ "${target_name}" == "autoflip" ||
"${target_name}" == "hello_world" || "${target_name}" == "hello_world" ||
"${target_name}" == "media_sequence" || "${target_name}" == "media_sequence" ||
"${target_name}" == "object_detection_3d" ||
"${target_name}" == "template_matching" || "${target_name}" == "template_matching" ||
"${target_name}" == "youtube8m" ]]; then "${target_name}" == "youtube8m" ]]; then
continue continue
@ -94,6 +95,7 @@ for app in ${apps}; do
graph_name="${target_name}/${target_name}" graph_name="${target_name}/${target_name}"
fi fi
if [[ ${target_name} == "iris_tracking" || if [[ ${target_name} == "iris_tracking" ||
${target_name} == "pose_tracking" ||
${target_name} == "upper_body_pose_tracking" ]]; then ${target_name} == "upper_body_pose_tracking" ]]; then
graph_suffix="cpu" graph_suffix="cpu"
else else

View File

@ -0,0 +1,191 @@
---
layout: default
title: MediaPipe on Android
parent: Getting Started
has_children: true
has_toc: false
nav_order: 1
---
# MediaPipe on Android
{: .no_toc }
1. TOC
{:toc}
---
Please follow instructions below to build Android example apps in the supported
MediaPipe [solutions](../solutions/solutions.md). To learn more about these
example apps, start from [Hello World! on Android](./hello_world_android.md). To
incorporate MediaPipe into an existing Android Studio project, see these
[instructions](./android_archive_library.md) that use Android Archive (AAR) and
Gradle.
## Building Android example apps
### Prerequisite
* Install MediaPipe following these [instructions](./install.md).
* Setup Java Runtime.
* Setup Android SDK release 28.0.3 and above.
* Setup Android NDK r18b and above.
MediaPipe recommends setting up Android SDK and NDK via Android Studio (and see
below for Android Studio setup). However, if you prefer using MediaPipe without
Android Studio, please run
[`setup_android_sdk_and_ndk.sh`](https://github.com/google/mediapipe/blob/master/setup_android_sdk_and_ndk.sh)
to download and setup Android SDK and NDK before building any Android example
apps.
If Android SDK and NDK are already installed (e.g., by Android Studio), set
$ANDROID_HOME and $ANDROID_NDK_HOME to point to the installed SDK and NDK.
```bash
export ANDROID_HOME=<path to the Android SDK>
export ANDROID_NDK_HOME=<path to the Android NDK>
```
In order to use MediaPipe on earlier Android versions, MediaPipe needs to switch
to a lower Android API level. You can achieve this by specifying `api_level =
$YOUR_INTENDED_API_LEVEL` in android_ndk_repository() and/or
android_sdk_repository() in the
[`WORKSPACE`](https://github.com/google/mediapipe/blob/master/WORKSPACE) file.
Please verify all the necessary packages are installed.
* Android SDK Platform API Level 28 or 29
* Android SDK Build-Tools 28 or 29
* Android SDK Platform-Tools 28 or 29
* Android SDK Tools 26.1.1
* Android NDK 17c or above
### Option 1: Build with Bazel in Command Line
Tip: You can run this
[script](https://github.com/google/mediapipe/blob/master/build_android_examples.sh)
to build (and install) all MediaPipe Android example apps.
1. To build an Android example app, build against the corresponding
`android_binary` build target. For instance, for
[MediaPipe Hands](../solutions/hands.md) the target is `handtrackinggpu` in
the
[BUILD](https://github.com/google/mediapipe/tree/master/mediapipe/examples/android/src/java/com/google/mediapipe/apps/handtrackinggpu/BUILD)
file:
Note: To reduce the binary size, consider appending `--linkopt="-s"` to the
command below to strip symbols.
```bash
bazel build -c opt --config=android_arm64 mediapipe/examples/android/src/java/com/google/mediapipe/apps/handtrackinggpu:handtrackinggpu
```
2. Install it on a device with:
```bash
adb install bazel-bin/mediapipe/examples/android/src/java/com/google/mediapipe/apps/handtrackinggpu/handtrackinggpu.apk
```
### Option 2: Build with Bazel in Android Studio
The MediaPipe project can be imported into Android Studio using the Bazel
plugins. This allows the MediaPipe examples to be built and modified in Android
Studio.
To incorporate MediaPipe into an existing Android Studio project, see these
[instructions](./android_archive_library.md) that use Android Archive (AAR) and
Gradle.
The steps below use Android Studio 3.5 to build and install a MediaPipe example
app:
1. Install and launch Android Studio 3.5.
2. Select `Configure` -> `SDK Manager` -> `SDK Platforms`.
* Verify that Android SDK Platform API Level 28 or 29 is installed.
* Take note of the Android SDK Location, e.g.,
`/usr/local/home/Android/Sdk`.
3. Select `Configure` -> `SDK Manager` -> `SDK Tools`.
* Verify that Android SDK Build-Tools 28 or 29 is installed.
* Verify that Android SDK Platform-Tools 28 or 29 is installed.
* Verify that Android SDK Tools 26.1.1 is installed.
* Verify that Android NDK 17c or above is installed.
* Take note of the Android NDK Location, e.g.,
`/usr/local/home/Android/Sdk/ndk-bundle` or
`/usr/local/home/Android/Sdk/ndk/20.0.5594570`.
4. Set environment variables `$ANDROID_HOME` and `$ANDROID_NDK_HOME` to point
to the installed SDK and NDK.
```bash
export ANDROID_HOME=/usr/local/home/Android/Sdk
# If the NDK libraries are installed by a previous version of Android Studio, do
export ANDROID_NDK_HOME=/usr/local/home/Android/Sdk/ndk-bundle
# If the NDK libraries are installed by Android Studio 3.5, do
export ANDROID_NDK_HOME=/usr/local/home/Android/Sdk/ndk/<version number>
```
5. Select `Configure` -> `Plugins` to install `Bazel`.
6. On Linux, select `File` -> `Settings` -> `Bazel settings`. On macos, select
`Android Studio` -> `Preferences` -> `Bazel settings`. Then, modify `Bazel
binary location` to be the same as the output of `$ which bazel`.
7. Select `Import Bazel Project`.
* Select `Workspace`: `/path/to/mediapipe` and select `Next`.
* Select `Generate from BUILD file`: `/path/to/mediapipe/BUILD` and select
`Next`.
* Modify `Project View` to be the following and select `Finish`.
```
directories:
# read project settings, e.g., .bazelrc
.
-mediapipe/objc
-mediapipe/examples/ios
targets:
//mediapipe/examples/android/...:all
//mediapipe/java/...:all
android_sdk_platform: android-29
sync_flags:
--host_crosstool_top=@bazel_tools//tools/cpp:toolchain
```
8. Select `Bazel` -> `Sync` -> `Sync project with Build files`.
Note: Even after doing step 4, if you still see the error: `"no such package
'@androidsdk//': Either the path attribute of android_sdk_repository or the
ANDROID_HOME environment variable must be set."`, please modify the
[`WORKSPACE`](https://github.com/google/mediapipe/blob/master/WORKSPACE)
file to point to your SDK and NDK library locations, as below:
```
android_sdk_repository(
name = "androidsdk",
path = "/path/to/android/sdk"
)
android_ndk_repository(
name = "androidndk",
path = "/path/to/android/ndk"
)
```
9. Connect an Android device to the workstation.
10. Select `Run...` -> `Edit Configurations...`.
* Select `Templates` -> `Bazel Command`.
* Enter Target Expression:
`//mediapipe/examples/android/src/java/com/google/mediapipe/apps/handtrackinggpu:handtrackinggpu`
* Enter Bazel command: `mobile-install`.
* Enter Bazel flags: `-c opt --config=android_arm64`.
* Press the `[+]` button to add the new configuration.
* Select `Run` to run the example app on the connected Android device.

View File

@ -1,8 +1,9 @@
--- ---
layout: default layout: default
title: MediaPipe Android Archive title: MediaPipe Android Archive
parent: Getting Started parent: MediaPipe on Android
nav_order: 7 grand_parent: Getting Started
nav_order: 2
--- ---
# MediaPipe Android Archive # MediaPipe Android Archive

View File

@ -2,7 +2,7 @@
layout: default layout: default
title: Building MediaPipe Examples title: Building MediaPipe Examples
parent: Getting Started parent: Getting Started
nav_order: 2 nav_exclude: true
--- ---
# Building MediaPipe Examples # Building MediaPipe Examples
@ -12,464 +12,22 @@ nav_order: 2
{:toc} {:toc}
--- ---
## Android ### Android
### Prerequisite Please see these [instructions](./android.md).
* Java Runtime. ### iOS
* Android SDK release 28.0.3 and above.
* Android NDK r18b and above.
MediaPipe recommends setting up Android SDK and NDK via Android Studio (and see Please see these [instructions](./ios.md).
below for Android Studio setup). However, if you prefer using MediaPipe without
Android Studio, please run
[`setup_android_sdk_and_ndk.sh`](https://github.com/google/mediapipe/blob/master/setup_android_sdk_and_ndk.sh)
to download and setup Android SDK and NDK before building any Android example
apps.
If Android SDK and NDK are already installed (e.g., by Android Studio), set ### Python
$ANDROID_HOME and $ANDROID_NDK_HOME to point to the installed SDK and NDK.
```bash Please see these [instructions](./python.md).
export ANDROID_HOME=<path to the Android SDK>
export ANDROID_NDK_HOME=<path to the Android NDK>
```
In order to use MediaPipe on earlier Android versions, MediaPipe needs to switch ### JavaScript
to a lower Android API level. You can achieve this by specifying `api_level =
$YOUR_INTENDED_API_LEVEL` in android_ndk_repository() and/or
android_sdk_repository() in the
[`WORKSPACE`](https://github.com/google/mediapipe/blob/master/WORKSPACE) file.
Please verify all the necessary packages are installed. Please see these [instructions](./javascript.md).
* Android SDK Platform API Level 28 or 29 ### C++
* Android SDK Build-Tools 28 or 29
* Android SDK Platform-Tools 28 or 29
* Android SDK Tools 26.1.1
* Android NDK 17c or above
### Option 1: Build with Bazel in Command Line Please see these [instructions](./cpp.md).
Tip: You can run this
[script](https://github.com/google/mediapipe/blob/master/build_android_examples.sh)
to build (and install) all MediaPipe Android example apps.
1. To build an Android example app, build against the corresponding
`android_binary` build target. For instance, for
[MediaPipe Hands](../solutions/hands.md) the target is `handtrackinggpu` in
the
[BUILD](https://github.com/google/mediapipe/tree/master/mediapipe/examples/android/src/java/com/google/mediapipe/apps/handtrackinggpu/BUILD)
file:
Note: To reduce the binary size, consider appending `--linkopt="-s"` to the
command below to strip symbols.
```bash
bazel build -c opt --config=android_arm64 mediapipe/examples/android/src/java/com/google/mediapipe/apps/handtrackinggpu:handtrackinggpu
```
2. Install it on a device with:
```bash
adb install bazel-bin/mediapipe/examples/android/src/java/com/google/mediapipe/apps/handtrackinggpu/handtrackinggpu.apk
```
### Option 2: Build with Bazel in Android Studio
The MediaPipe project can be imported into Android Studio using the Bazel
plugins. This allows the MediaPipe examples to be built and modified in Android
Studio.
To incorporate MediaPipe into an existing Android Studio project, see these
[instructions](./android_archive_library.md) that use Android Archive (AAR) and
Gradle.
The steps below use Android Studio 3.5 to build and install a MediaPipe example
app:
1. Install and launch Android Studio 3.5.
2. Select `Configure` -> `SDK Manager` -> `SDK Platforms`.
* Verify that Android SDK Platform API Level 28 or 29 is installed.
* Take note of the Android SDK Location, e.g.,
`/usr/local/home/Android/Sdk`.
3. Select `Configure` -> `SDK Manager` -> `SDK Tools`.
* Verify that Android SDK Build-Tools 28 or 29 is installed.
* Verify that Android SDK Platform-Tools 28 or 29 is installed.
* Verify that Android SDK Tools 26.1.1 is installed.
* Verify that Android NDK 17c or above is installed.
* Take note of the Android NDK Location, e.g.,
`/usr/local/home/Android/Sdk/ndk-bundle` or
`/usr/local/home/Android/Sdk/ndk/20.0.5594570`.
4. Set environment variables `$ANDROID_HOME` and `$ANDROID_NDK_HOME` to point
to the installed SDK and NDK.
```bash
export ANDROID_HOME=/usr/local/home/Android/Sdk
# If the NDK libraries are installed by a previous version of Android Studio, do
export ANDROID_NDK_HOME=/usr/local/home/Android/Sdk/ndk-bundle
# If the NDK libraries are installed by Android Studio 3.5, do
export ANDROID_NDK_HOME=/usr/local/home/Android/Sdk/ndk/<version number>
```
5. Select `Configure` -> `Plugins` to install `Bazel`.
6. On Linux, select `File` -> `Settings` -> `Bazel settings`. On macos, select
`Android Studio` -> `Preferences` -> `Bazel settings`. Then, modify `Bazel
binary location` to be the same as the output of `$ which bazel`.
7. Select `Import Bazel Project`.
* Select `Workspace`: `/path/to/mediapipe` and select `Next`.
* Select `Generate from BUILD file`: `/path/to/mediapipe/BUILD` and select
`Next`.
* Modify `Project View` to be the following and select `Finish`.
```
directories:
# read project settings, e.g., .bazelrc
.
-mediapipe/objc
-mediapipe/examples/ios
targets:
//mediapipe/examples/android/...:all
//mediapipe/java/...:all
android_sdk_platform: android-29
sync_flags:
--host_crosstool_top=@bazel_tools//tools/cpp:toolchain
```
8. Select `Bazel` -> `Sync` -> `Sync project with Build files`.
Note: Even after doing step 4, if you still see the error: `"no such package
'@androidsdk//': Either the path attribute of android_sdk_repository or the
ANDROID_HOME environment variable must be set."`, please modify the
[`WORKSPACE`](https://github.com/google/mediapipe/blob/master/WORKSPACE)
file to point to your SDK and NDK library locations, as below:
```
android_sdk_repository(
name = "androidsdk",
path = "/path/to/android/sdk"
)
android_ndk_repository(
name = "androidndk",
path = "/path/to/android/ndk"
)
```
9. Connect an Android device to the workstation.
10. Select `Run...` -> `Edit Configurations...`.
* Select `Templates` -> `Bazel Command`.
* Enter Target Expression:
`//mediapipe/examples/android/src/java/com/google/mediapipe/apps/handtrackinggpu:handtrackinggpu`
* Enter Bazel command: `mobile-install`.
* Enter Bazel flags: `-c opt --config=android_arm64`.
* Press the `[+]` button to add the new configuration.
* Select `Run` to run the example app on the connected Android device.
## iOS
### Prerequisite
1. Install [Xcode](https://developer.apple.com/xcode/), then install the
Command Line Tools using:
```bash
xcode-select --install
```
2. Install [Bazel](https://bazel.build/).
We recommend using [Homebrew](https://brew.sh/) to get the latest version.
3. Set Python 3.7 as the default Python version and install the Python "six"
library. This is needed for TensorFlow.
```bash
pip3 install --user six
```
4. Clone the MediaPipe repository.
```bash
git clone https://github.com/google/mediapipe.git
```
### Set up a bundle ID prefix
All iOS apps must have a bundle ID, and you must have a provisioning profile
that lets you install an app with that ID onto your phone. To avoid clashes
between different MediaPipe users, you need to configure a unique prefix for the
bundle IDs of our iOS demo apps.
If you have a custom provisioning profile, see
[Custom provisioning](#custom-provisioning) below.
Otherwise, run this command to generate a unique prefix:
```bash
python3 mediapipe/examples/ios/link_local_profiles.py
```
### Create an Xcode project
This allows you to edit and debug one of the example apps in Xcode. It also
allows you to make use of automatic provisioning (see later section).
1. We will use a tool called [Tulsi](https://tulsi.bazel.build/) for generating
Xcode projects from Bazel build configurations.
```bash
# cd out of the mediapipe directory, then:
git clone https://github.com/bazelbuild/tulsi.git
cd tulsi
# remove Xcode version from Tulsi's .bazelrc (see http://github.com/bazelbuild/tulsi#building-and-installing):
sed -i .orig '/xcode_version/d' .bazelrc
# build and run Tulsi:
sh build_and_run.sh
```
This will install `Tulsi.app` inside the `Applications` directory in your
home directory.
2. Open `mediapipe/Mediapipe.tulsiproj` using the Tulsi app.
Tip: If Tulsi displays an error saying "Bazel could not be found", press the
"Bazel..." button in the Packages tab and select the `bazel` executable in
your homebrew `/bin/` directory.
3. Select the MediaPipe config in the Configs tab, then press the Generate
button below. You will be asked for a location to save the Xcode project.
Once the project is generated, it will be opened in Xcode.
If you get an error about bundle IDs, see the
[previous section](#set-up-a-bundle-id-prefix).
### Set up provisioning
To install applications on an iOS device, you need a provisioning profile. There
are two options:
1. Automatic provisioning. This allows you to build and install an app to your
personal device. The provisining profile is managed by Xcode, and has to be
updated often (it is valid for about a week).
2. Custom provisioning. This uses a provisioning profile associated with an
Apple developer account. These profiles have a longer validity period and
can target multiple devices, but you need a paid developer account with
Apple to obtain one.
#### Automatic provisioning
1. Create an Xcode project for MediaPipe, as discussed
[earlier](#create-an-xcode-project).
2. In the project navigator in the left sidebar, select the "Mediapipe"
project.
3. Select one of the application targets, e.g. HandTrackingGpuApp.
4. Select the "Signing & Capabilities" tab.
5. Check "Automatically manage signing", and confirm the dialog box.
6. Select "_Your Name_ (Personal Team)" in the Team pop-up menu.
7. This set-up needs to be done once for each application you want to install.
Repeat steps 3-6 as needed.
This generates provisioning profiles for each app you have selected. Now we need
to tell Bazel to use them. We have provided a script to make this easier.
1. In the terminal, to the `mediapipe` directory where you cloned the
repository.
2. Run this command:
```bash
python3 mediapipe/examples/ios/link_local_profiles.py
```
This will find and link the provisioning profile for all applications for which
you have enabled automatic provisioning in Xcode.
Note: once a profile expires, Xcode will generate a new one; you must then run
this script again to link the updated profiles.
#### Custom provisioning
1. Obtain a provisioning profile from Apple.
Tip: You can use this command to see the provisioning profiles you have
previously downloaded using Xcode: `open ~/Library/MobileDevice/"Provisioning
Profiles"`. If there are none, generate and download a profile on
[Apple's developer site](https://developer.apple.com/account/resources/).
1. Symlink or copy your provisioning profile to
`mediapipe/mediapipe/provisioning_profile.mobileprovision`.
```bash
cd mediapipe
ln -s ~/Downloads/MyProvisioningProfile.mobileprovision mediapipe/provisioning_profile.mobileprovision
```
Note: if you had previously set up automatic provisioning, you should remove the
`provisioning_profile.mobileprovision` symlink in each example's directory,
since it will take precedence over the common one. You can also overwrite it
with you own profile if you need a different profile for different apps.
1. Open `mediapipe/examples/ios/bundle_id.bzl`, and change the
`BUNDLE_ID_PREFIX` to a prefix associated with your provisioning profile.
### Build and run an app using Xcode
1. Create the Xcode project, and make sure you have set up either automatic or
custom provisioning.
2. You can now select any of the MediaPipe demos in the target menu, and build
and run them as normal.
Note: When you ask Xcode to run an app, by default it will use the Debug
configuration. Some of our demos are computationally heavy; you may want to use
the Release configuration for better performance.
Tip: To switch build configuration in Xcode, click on the target menu, choose
"Edit Scheme...", select the Run action, and switch the Build Configuration from
Debug to Release. Note that this is set independently for each target.
Tip: On the device, in Settings > General > Device Management, make sure the
developer (yourself) is trusted.
### Build an app using the command line
1. Make sure you have set up either automatic or custom provisioning.
2. Using [MediaPipe Hands](../solutions/hands.md) for example, run:
```bash
bazel build -c opt --config=ios_arm64 mediapipe/examples/ios/handtrackinggpu:HandTrackingGpuApp
```
You may see a permission request from `codesign` in order to sign the app.
Tip: If you are using custom provisioning, you can run this
[script](https://github.com/google/mediapipe/blob/master/build_ios_examples.sh)
to build all MediaPipe iOS example apps.
3. In Xcode, open the `Devices and Simulators` window (command-shift-2).
4. Make sure your device is connected. You will see a list of installed apps.
Press the "+" button under the list, and select the `.ipa` file built by
Bazel.
5. You can now run the app on your device.
Tip: On the device, in Settings > General > Device Management, make sure the
developer (yourself) is trusted.
## Desktop
### Option 1: Running on CPU
1. To build, for example, [MediaPipe Hands](../solutions/hands.md), run:
```bash
bazel build -c opt --define MEDIAPIPE_DISABLE_GPU=1 mediapipe/examples/desktop/hand_tracking:hand_tracking_cpu
```
2. To run the application:
```bash
GLOG_logtostderr=1 bazel-bin/mediapipe/examples/desktop/hand_tracking/hand_tracking_cpu \
--calculator_graph_config_file=mediapipe/graphs/hand_tracking/hand_tracking_desktop_live.pbtxt
```
This will open up your webcam as long as it is connected and on. Any errors
is likely due to your webcam being not accessible.
### Option 2: Running on GPU
Note: This currently works only on Linux, and please first follow
[OpenGL ES Setup on Linux Desktop](./gpu_support.md#opengl-es-setup-on-linux-desktop).
1. To build, for example, [MediaPipe Hands](../solutions/hands.md), run:
```bash
bazel build -c opt --copt -DMESA_EGL_NO_X11_HEADERS --copt -DEGL_NO_X11 \
mediapipe/examples/desktop/hand_tracking:hand_tracking_gpu
```
2. To run the application:
```bash
GLOG_logtostderr=1 bazel-bin/mediapipe/examples/desktop/hand_tracking/hand_tracking_gpu \
--calculator_graph_config_file=mediapipe/graphs/hand_tracking/hand_tracking_mobile.pbtxt
```
This will open up your webcam as long as it is connected and on. Any errors
is likely due to your webcam being not accessible, or GPU drivers not setup
properly.
## Python
MediaPipe Python package is available on
[PyPI](https://pypi.org/project/mediapipe/), and can be installed simply by `pip
install mediapipe` on Linux and macOS, as described in, for instance,
[Python section in MediaPipe Pose](../solutions/pose.md#python) and in this
[colab](https://mediapipe.page.link/pose_py_colab).
Follow the steps below only if you have local changes and need to build the
Python package from source. Otherwise, we strongly encourage our users to simply
run `pip install mediapipe`, more convenient and much faster.
1. Make sure that Bazel and OpenCV are correctly installed and configured for
MediaPipe. Please see [Installation](./install.md) for how to setup Bazel
and OpenCV for MediaPipe on Linux and macOS.
2. Install the following dependencies.
```bash
# Debian or Ubuntu
$ sudo apt install python3-dev
$ sudo apt install python3-venv
$ sudo apt install -y protobuf-compiler
```
```bash
# macOS
$ brew install protobuf
```
3. Activate a Python virtual environment.
```bash
$ python3 -m venv mp_env && source mp_env/bin/activate
```
4. In the virtual environment, go to the MediaPipe repo directory.
5. Install the required Python packages.
```bash
(mp_env)mediapipe$ pip3 install -r requirements.txt
```
6. Generate and install MediaPipe package.
```bash
(mp_env)mediapipe$ python3 setup.py gen_protos
(mp_env)mediapipe$ python3 setup.py install --link-opencv
```

View File

@ -0,0 +1,62 @@
---
layout: default
title: MediaPipe in C++
parent: Getting Started
has_children: true
has_toc: false
nav_order: 5
---
# MediaPipe in C++
{: .no_toc }
1. TOC
{:toc}
---
Please follow instructions below to build C++ command-line example apps in the
supported MediaPipe [solutions](../solutions/solutions.md). To learn more about
these example apps, start from [Hello World! in C++](./hello_world_cpp.md).
## Building C++ command-line example apps
### Option 1: Running on CPU
1. To build, for example, [MediaPipe Hands](../solutions/hands.md), run:
```bash
bazel build -c opt --define MEDIAPIPE_DISABLE_GPU=1 mediapipe/examples/desktop/hand_tracking:hand_tracking_cpu
```
2. To run the application:
```bash
GLOG_logtostderr=1 bazel-bin/mediapipe/examples/desktop/hand_tracking/hand_tracking_cpu \
--calculator_graph_config_file=mediapipe/graphs/hand_tracking/hand_tracking_desktop_live.pbtxt
```
This will open up your webcam as long as it is connected and on. Any errors
is likely due to your webcam being not accessible.
### Option 2: Running on GPU
Note: This currently works only on Linux, and please first follow
[OpenGL ES Setup on Linux Desktop](./gpu_support.md#opengl-es-setup-on-linux-desktop).
1. To build, for example, [MediaPipe Hands](../solutions/hands.md), run:
```bash
bazel build -c opt --copt -DMESA_EGL_NO_X11_HEADERS --copt -DEGL_NO_X11 \
mediapipe/examples/desktop/hand_tracking:hand_tracking_gpu
```
2. To run the application:
```bash
GLOG_logtostderr=1 bazel-bin/mediapipe/examples/desktop/hand_tracking/hand_tracking_gpu \
--calculator_graph_config_file=mediapipe/graphs/hand_tracking/hand_tracking_mobile.pbtxt
```
This will open up your webcam as long as it is connected and on. Any errors
is likely due to your webcam being not accessible, or GPU drivers not setup
properly.

View File

@ -2,7 +2,7 @@
layout: default layout: default
title: GPU Support title: GPU Support
parent: Getting Started parent: Getting Started
nav_order: 6 nav_order: 7
--- ---
# GPU Support # GPU Support

View File

@ -1,8 +1,9 @@
--- ---
layout: default layout: default
title: Hello World! on Android title: Hello World! on Android
parent: Getting Started parent: MediaPipe on Android
nav_order: 3 grand_parent: Getting Started
nav_order: 1
--- ---
# Hello World! on Android # Hello World! on Android
@ -496,7 +497,7 @@ CameraHelper.CameraFacing cameraFacing =
applicationInfo.metaData.getBoolean("cameraFacingFront", false) applicationInfo.metaData.getBoolean("cameraFacingFront", false)
? CameraHelper.CameraFacing.FRONT ? CameraHelper.CameraFacing.FRONT
: CameraHelper.CameraFacing.BACK; : CameraHelper.CameraFacing.BACK;
cameraHelper.startCamera(this, cameraFacing, /*surfaceTexture=*/ null); cameraHelper.startCamera(this, cameraFacing, /*unusedSurfaceTexture=*/ null);
``` ```
At this point, the application should build successfully. However, when you run At this point, the application should build successfully. However, when you run

View File

@ -1,11 +1,12 @@
--- ---
layout: default layout: default
title: Hello World! on Desktop (C++) title: Hello World! in C++
parent: Getting Started parent: MediaPipe in C++
nav_order: 5 grand_parent: Getting Started
nav_order: 1
--- ---
# Hello World! on Desktop (C++) # Hello World! in C++
{: .no_toc } {: .no_toc }
1. TOC 1. TOC

View File

@ -1,8 +1,9 @@
--- ---
layout: default layout: default
title: Hello World! on iOS title: Hello World! on iOS
parent: Getting Started parent: MediaPipe on iOS
nav_order: 4 grand_parent: Getting Started
nav_order: 1
--- ---
# Hello World! on iOS # Hello World! on iOS
@ -193,8 +194,7 @@ bazel build -c opt --config=ios_arm64 mediapipe/examples/ios/helloworld:HelloWor
Then, go back to XCode, open Window > Devices and Simulators, select your Then, go back to XCode, open Window > Devices and Simulators, select your
device, and add the `.ipa` file generated by the command above to your device. device, and add the `.ipa` file generated by the command above to your device.
Here is the document on [setting up and compiling](./building_examples.md#ios) Here is the document on [setting up and compiling](./ios.md) iOS MediaPipe apps.
iOS MediaPipe apps.
Open the application on your device. Since it is empty, it should display a Open the application on your device. Since it is empty, it should display a
blank white screen. blank white screen.

View File

@ -2,7 +2,7 @@
layout: default layout: default
title: Installation title: Installation
parent: Getting Started parent: Getting Started
nav_order: 1 nav_order: 6
--- ---
# Installation # Installation
@ -23,32 +23,21 @@ Note: To make Mediapipe work with TensorFlow, please set Python 3.7 as the
default Python version and install the Python "six" library by running `pip3 default Python version and install the Python "six" library by running `pip3
install --user six`. install --user six`.
Note: To build and run Android example apps, see these
[instructions](./building_examples.md#android). To build and run iOS example
apps, see these [instructions](./building_examples.md#ios).
## Installing on Debian and Ubuntu ## Installing on Debian and Ubuntu
1. Checkout MediaPipe repository. 1. Install Bazel.
```bash
$ git clone https://github.com/google/mediapipe.git
# Change directory into MediaPipe root directory
$ cd mediapipe
```
2. Install Bazel.
Follow the official Follow the official
[Bazel documentation](https://docs.bazel.build/versions/master/install-ubuntu.html) [Bazel documentation](https://docs.bazel.build/versions/master/install-ubuntu.html)
to install Bazel 3.4 or higher. to install Bazel 3.4 or higher.
For Nvidia Jetson and Raspberry Pi devices with ARM Ubuntu, Bazel needs to For Nvidia Jetson and Raspberry Pi devices with ARM Ubuntu only, Bazel needs
be built from source. to be built from source:
```bash ```bash
# For Bazel 3.4.0 # For Bazel 3.4.0
mkdir $HOME/bazel-3.4.0
cd $HOME/bazel-3.4.0
wget https://github.com/bazelbuild/bazel/releases/download/3.4.0/bazel-3.4.0-dist.zip wget https://github.com/bazelbuild/bazel/releases/download/3.4.0/bazel-3.4.0-dist.zip
sudo apt-get install build-essential openjdk-8-jdk python zip unzip sudo apt-get install build-essential openjdk-8-jdk python zip unzip
unzip bazel-3.4.0-dist.zip unzip bazel-3.4.0-dist.zip
@ -56,6 +45,16 @@ apps, see these [instructions](./building_examples.md#ios).
sudo cp output/bazel /usr/local/bin/ sudo cp output/bazel /usr/local/bin/
``` ```
2. Checkout MediaPipe repository.
```bash
$ cd $HOME
$ git clone https://github.com/google/mediapipe.git
# Change directory into MediaPipe root directory
$ cd mediapipe
```
3. Install OpenCV and FFmpeg. 3. Install OpenCV and FFmpeg.
Option 1. Use package manager tool to install the pre-compiled OpenCV Option 1. Use package manager tool to install the pre-compiled OpenCV
@ -174,7 +173,7 @@ apps, see these [instructions](./building_examples.md#ios).
# when building GPU examples. # when building GPU examples.
``` ```
5. Run the [Hello World desktop example](./hello_world_desktop.md). 5. Run the [Hello World! in C++ example](./hello_world_cpp.md).
```bash ```bash
$ export GLOG_logtostderr=1 $ export GLOG_logtostderr=1
@ -208,7 +207,13 @@ build issues.
**Disclaimer**: Running MediaPipe on CentOS is experimental. **Disclaimer**: Running MediaPipe on CentOS is experimental.
1. Checkout MediaPipe repository. 1. Install Bazel.
Follow the official
[Bazel documentation](https://docs.bazel.build/versions/master/install-redhat.html)
to install Bazel 3.4 or higher.
2. Checkout MediaPipe repository.
```bash ```bash
$ git clone https://github.com/google/mediapipe.git $ git clone https://github.com/google/mediapipe.git
@ -217,12 +222,6 @@ build issues.
$ cd mediapipe $ cd mediapipe
``` ```
2. Install Bazel.
Follow the official
[Bazel documentation](https://docs.bazel.build/versions/master/install-redhat.html)
to install Bazel 3.4 or higher.
3. Install OpenCV. 3. Install OpenCV.
Option 1. Use package manager tool to install the pre-compiled version. Option 1. Use package manager tool to install the pre-compiled version.
@ -304,7 +303,7 @@ build issues.
) )
``` ```
4. Run the [Hello World desktop example](./hello_world_desktop.md). 4. Run the [Hello World! in C++ example](./hello_world_cpp.md).
```bash ```bash
$ export GLOG_logtostderr=1 $ export GLOG_logtostderr=1
@ -337,15 +336,7 @@ build issues.
* Install [Xcode](https://developer.apple.com/xcode/) and its Command Line * Install [Xcode](https://developer.apple.com/xcode/) and its Command Line
Tools by `xcode-select --install`. Tools by `xcode-select --install`.
2. Checkout MediaPipe repository. 2. Install Bazel.
```bash
$ git clone https://github.com/google/mediapipe.git
$ cd mediapipe
```
3. Install Bazel.
Option 1. Use package manager tool to install Bazel Option 1. Use package manager tool to install Bazel
@ -358,6 +349,14 @@ build issues.
[Bazel documentation](https://docs.bazel.build/versions/master/install-os-x.html#install-with-installer-mac-os-x) [Bazel documentation](https://docs.bazel.build/versions/master/install-os-x.html#install-with-installer-mac-os-x)
to install Bazel 3.4 or higher. to install Bazel 3.4 or higher.
3. Checkout MediaPipe repository.
```bash
$ git clone https://github.com/google/mediapipe.git
$ cd mediapipe
```
4. Install OpenCV and FFmpeg. 4. Install OpenCV and FFmpeg.
Option 1. Use HomeBrew package manager tool to install the pre-compiled Option 1. Use HomeBrew package manager tool to install the pre-compiled
@ -439,7 +438,7 @@ build issues.
$ pip3 install --user six $ pip3 install --user six
``` ```
6. Run the [Hello World desktop example](./hello_world_desktop.md). 6. Run the [Hello World! in C++ example](./hello_world_cpp.md).
```bash ```bash
$ export GLOG_logtostderr=1 $ export GLOG_logtostderr=1
@ -540,7 +539,7 @@ next section.
) )
``` ```
9. Run the [Hello World desktop example](./hello_world_desktop.md). 9. Run the [Hello World! in C++ example](./hello_world_cpp.md).
Note: For building MediaPipe on Windows, please add `--action_env Note: For building MediaPipe on Windows, please add `--action_env
PYTHON_BIN_PATH="C://path//to//python.exe"` to the build command. PYTHON_BIN_PATH="C://path//to//python.exe"` to the build command.
@ -673,7 +672,7 @@ cameras. Alternatively, you use a video file as input.
) )
``` ```
8. Run the [Hello World desktop example](./hello_world_desktop.md). 8. Run the [Hello World! in C++ example](./hello_world_cpp.md).
```bash ```bash
username@DESKTOP-TMVLBJ1:~/mediapipe$ export GLOG_logtostderr=1 username@DESKTOP-TMVLBJ1:~/mediapipe$ export GLOG_logtostderr=1
@ -729,7 +728,7 @@ This will use a Docker image that will isolate mediapipe's installation from the
# Successfully tagged mediapipe:latest # Successfully tagged mediapipe:latest
``` ```
3. Run the [Hello World desktop example](./hello_world_desktop.md). 3. Run the [Hello World! in C++ example](./hello_world_cpp.md).
```bash ```bash
$ docker run -it --name mediapipe mediapipe:latest $ docker run -it --name mediapipe mediapipe:latest

222
docs/getting_started/ios.md Normal file
View File

@ -0,0 +1,222 @@
---
layout: default
title: MediaPipe on iOS
parent: Getting Started
has_children: true
has_toc: false
nav_order: 2
---
# MediaPipe on iOS
{: .no_toc }
1. TOC
{:toc}
---
Please follow instructions below to build iOS example apps in the supported
MediaPipe [solutions](../solutions/solutions.md). To learn more about these
example apps, start from, start from
[Hello World! on iOS](./hello_world_ios.md).
## Building iOS example apps
### Prerequisite
1. Install MediaPipe following these [instructions](./install.md).
2. Install [Xcode](https://developer.apple.com/xcode/), then install the
Command Line Tools using:
```bash
xcode-select --install
```
3. Install [Bazel](https://bazel.build/).
We recommend using [Homebrew](https://brew.sh/) to get the latest version.
4. Set Python 3.7 as the default Python version and install the Python "six"
library. This is needed for TensorFlow.
```bash
pip3 install --user six
```
5. Clone the MediaPipe repository.
```bash
git clone https://github.com/google/mediapipe.git
```
### Set up a bundle ID prefix
All iOS apps must have a bundle ID, and you must have a provisioning profile
that lets you install an app with that ID onto your phone. To avoid clashes
between different MediaPipe users, you need to configure a unique prefix for the
bundle IDs of our iOS demo apps.
If you have a custom provisioning profile, see
[Custom provisioning](#custom-provisioning) below.
Otherwise, run this command to generate a unique prefix:
```bash
python3 mediapipe/examples/ios/link_local_profiles.py
```
### Create an Xcode project
This allows you to edit and debug one of the example apps in Xcode. It also
allows you to make use of automatic provisioning (see later section).
1. We will use a tool called [Tulsi](https://tulsi.bazel.build/) for generating
Xcode projects from Bazel build configurations.
```bash
# cd out of the mediapipe directory, then:
git clone https://github.com/bazelbuild/tulsi.git
cd tulsi
# remove Xcode version from Tulsi's .bazelrc (see http://github.com/bazelbuild/tulsi#building-and-installing):
sed -i .orig '/xcode_version/d' .bazelrc
# build and run Tulsi:
sh build_and_run.sh
```
This will install `Tulsi.app` inside the `Applications` directory in your
home directory.
2. Open `mediapipe/Mediapipe.tulsiproj` using the Tulsi app.
Tip: If Tulsi displays an error saying "Bazel could not be found", press the
"Bazel..." button in the Packages tab and select the `bazel` executable in
your homebrew `/bin/` directory.
3. Select the MediaPipe config in the Configs tab, then press the Generate
button below. You will be asked for a location to save the Xcode project.
Once the project is generated, it will be opened in Xcode.
If you get an error about bundle IDs, see the
[previous section](#set-up-a-bundle-id-prefix).
### Set up provisioning
To install applications on an iOS device, you need a provisioning profile. There
are two options:
1. Automatic provisioning. This allows you to build and install an app to your
personal device. The provisining profile is managed by Xcode, and has to be
updated often (it is valid for about a week).
2. Custom provisioning. This uses a provisioning profile associated with an
Apple developer account. These profiles have a longer validity period and
can target multiple devices, but you need a paid developer account with
Apple to obtain one.
#### Automatic provisioning
1. Create an Xcode project for MediaPipe, as discussed
[earlier](#create-an-xcode-project).
2. In the project navigator in the left sidebar, select the "Mediapipe"
project.
3. Select one of the application targets, e.g. HandTrackingGpuApp.
4. Select the "Signing & Capabilities" tab.
5. Check "Automatically manage signing", and confirm the dialog box.
6. Select "_Your Name_ (Personal Team)" in the Team pop-up menu.
7. This set-up needs to be done once for each application you want to install.
Repeat steps 3-6 as needed.
This generates provisioning profiles for each app you have selected. Now we need
to tell Bazel to use them. We have provided a script to make this easier.
1. In the terminal, to the `mediapipe` directory where you cloned the
repository.
2. Run this command:
```bash
python3 mediapipe/examples/ios/link_local_profiles.py
```
This will find and link the provisioning profile for all applications for which
you have enabled automatic provisioning in Xcode.
Note: once a profile expires, Xcode will generate a new one; you must then run
this script again to link the updated profiles.
#### Custom provisioning
1. Obtain a provisioning profile from Apple.
Tip: You can use this command to see the provisioning profiles you have
previously downloaded using Xcode: `open ~/Library/MobileDevice/"Provisioning
Profiles"`. If there are none, generate and download a profile on
[Apple's developer site](https://developer.apple.com/account/resources/).
1. Symlink or copy your provisioning profile to
`mediapipe/mediapipe/provisioning_profile.mobileprovision`.
```bash
cd mediapipe
ln -s ~/Downloads/MyProvisioningProfile.mobileprovision mediapipe/provisioning_profile.mobileprovision
```
Note: if you had previously set up automatic provisioning, you should remove the
`provisioning_profile.mobileprovision` symlink in each example's directory,
since it will take precedence over the common one. You can also overwrite it
with you own profile if you need a different profile for different apps.
1. Open `mediapipe/examples/ios/bundle_id.bzl`, and change the
`BUNDLE_ID_PREFIX` to a prefix associated with your provisioning profile.
### Build and run an app using Xcode
1. Create the Xcode project, and make sure you have set up either automatic or
custom provisioning.
2. You can now select any of the MediaPipe demos in the target menu, and build
and run them as normal.
Note: When you ask Xcode to run an app, by default it will use the Debug
configuration. Some of our demos are computationally heavy; you may want to use
the Release configuration for better performance.
Tip: To switch build configuration in Xcode, click on the target menu, choose
"Edit Scheme...", select the Run action, and switch the Build Configuration from
Debug to Release. Note that this is set independently for each target.
Tip: On the device, in Settings > General > Device Management, make sure the
developer (yourself) is trusted.
### Build an app using the command line
1. Make sure you have set up either automatic or custom provisioning.
2. Using [MediaPipe Hands](../solutions/hands.md) for example, run:
```bash
bazel build -c opt --config=ios_arm64 mediapipe/examples/ios/handtrackinggpu:HandTrackingGpuApp
```
You may see a permission request from `codesign` in order to sign the app.
Tip: If you are using custom provisioning, you can run this
[script](https://github.com/google/mediapipe/blob/master/build_ios_examples.sh)
to build all MediaPipe iOS example apps.
3. In Xcode, open the `Devices and Simulators` window (command-shift-2).
4. Make sure your device is connected. You will see a list of installed apps.
Press the "+" button under the list, and select the `.ipa` file built by
Bazel.
5. You can now run the app on your device.
Tip: On the device, in Settings > General > Device Management, make sure the
developer (yourself) is trusted.

View File

@ -0,0 +1,88 @@
---
layout: default
title: MediaPipe in JavaScript
parent: Getting Started
nav_order: 4
---
# MediaPipe in JavaScript
{: .no_toc }
1. TOC
{:toc}
---
## Ready-to-use JavaScript Solutions
MediaPipe currently offers the following solutions:
Solution | NPM Package | Example
----------------- | ----------------------------- | -------
[Face Mesh][F-pg] | [@mediapipe/face_mesh][F-npm] | [mediapipe.dev/demo/face_mesh][F-demo]
[Hands][H-pg] | [@mediapipe/hands][H-npm] | [mediapipe.dev/demo/hands][H-demo]
[Pose][P-pg] | [@mediapipe/pose][P-npm] | [mediapipe.dev/demo/pose][P-demo]
[Holistic][Ho-pg] | [@mediapipe/holistic][Ho-npm] | [mediapipe.dev/demo/holistic][Ho-demo]
Click on a solution link above for more information, including API and code
snippets.
The quickest way to get acclimated is to look at the examples above. Each demo
has a link to a [CodePen][codepen] so that you can edit the code and try it
yourself. We have included a number of utility packages to help you get started:
* [@mediapipe/drawing_utils][draw-npm] - Utilities to draw landmarks and
connectors.
* [@mediapipe/camera_utils][cam-npm] - Utilities to operate the camera.
* [@mediapipe/control_utils][ctrl-npm] - Utilities to show sliders and FPS
widgets.
Note: See these demos and more at [MediaPipe on CodePen][codepen]
All of these solutions are staged in [NPM][npm]. You can install any package
locally with `npm install`. Example:
```
npm install @mediapipe/holistic.
```
If you would rather not stage these locally, you can rely on a CDN (e.g.,
[jsDelivr](https://www.jsdelivr.com/)). This will allow you to add scripts
directly to your HTML:
```
<head>
<script src="https://cdn.jsdelivr.net/npm/@mediapipe/drawing_utils@0.1/drawing_utils.js" crossorigin="anonymous"></script>
<script src="https://cdn.jsdelivr.net/npm/@mediapipe/holistic@0.1/holistic.js" crossorigin="anonymous"></script>
</head>
```
Note: You can specify version numbers to both NPM and jsdelivr. They are
structured as `<major>.<minor>.<build>`. To prevent breaking changes from
affecting your work, restrict your request to a `<minor>` number. e.g.,
`@mediapipe/holistic@0.1`.
[Ho-pg]: ../solutions/holistic#javascript-solution-api
[F-pg]: ../solutions/face_mesh#javascript-solution-api
[H-pg]: ../solutions/hands#javascript-solution-api
[P-pg]: ../solutions/pose#javascript-solution-api
[Ho-npm]: https://www.npmjs.com/package/@mediapipe/holistic
[F-npm]: https://www.npmjs.com/package/@mediapipe/face_mesh
[H-npm]: https://www.npmjs.com/package/@mediapipe/hands
[P-npm]: https://www.npmjs.com/package/@mediapipe/pose
[draw-npm]: https://www.npmjs.com/package/@mediapipe/pose
[cam-npm]: https://www.npmjs.com/package/@mediapipe/pose
[ctrl-npm]: https://www.npmjs.com/package/@mediapipe/pose
[Ho-jsd]: https://www.jsdelivr.com/package/npm/@mediapipe/holistic
[F-jsd]: https://www.jsdelivr.com/package/npm/@mediapipe/face_mesh
[H-jsd]: https://www.jsdelivr.com/package/npm/@mediapipe/hands
[P-jsd]: https://www.jsdelivr.com/package/npm/@mediapipe/pose
[Ho-pen]: https://code.mediapipe.dev/codepen/holistic
[F-pen]: https://code.mediapipe.dev/codepen/face_mesh
[H-pen]: https://code.mediapipe.dev/codepen/hands
[P-pen]: https://code.mediapipe.dev/codepen/pose
[Ho-demo]: https://mediapipe.dev/demo/holistic
[F-demo]: https://mediapipe.dev/demo/face_mesh
[H-demo]: https://mediapipe.dev/demo/hands
[P-demo]: https://mediapipe.dev/demo/pose
[npm]: https://www.npmjs.com/package/@mediapipe
[codepen]: https://code.mediapipe.dev/codepen

View File

@ -0,0 +1,120 @@
---
layout: default
title: MediaPipe in Python
parent: Getting Started
has_children: true
has_toc: false
nav_order: 3
---
# MediaPipe in Python
{: .no_toc }
1. TOC
{:toc}
---
## Ready-to-use Python Solutions
MediaPipe offers ready-to-use yet customizable Python solutions as a prebuilt
Python package. MediaPipe Python package is available on
[PyPI](https://pypi.org/project/mediapipe/) for Linux, macOS and Windows.
You can, for instance, activate a Python virtual environment:
```bash
$ python3 -m venv mp_env && source mp_env/bin/activate
```
Install MediaPipe Python package and start Python intepreter:
```bash
(mp_env)$ pip install mediapipe
(mp_env)$ python3
```
In Python interpreter, import the package and start using one of the solutions:
```python
import mediapipe as mp
mp_face_mesh = mp.solutions.face_mesh
```
Tip: Use command `deactivate` to later exit the Python virtual environment.
To learn more about configuration options and usage examples, please find
details in each solution via the links below:
* [MediaPipe Face Mesh](../solutions/face_mesh#python-solution-api)
* [MediaPipe Hands](../solutions/hands#python-solution-api)
* [MediaPipe Pose](../solutions/pose#python-solution-api)
* [MediaPipe Holistic](../solutions/holistic#python-solution-api)
## MediaPipe on Google Colab
* [MediaPipe Face Mesh Colab](https://mediapipe.page.link/face_mesh_py_colab)
* [MediaPipe Hands Colab](https://mediapipe.page.link/hands_py_colab)
* [MediaPipe Pose Colab](https://mediapipe.page.link/pose_py_colab)
* [MediaPipe Holistic Colab](https://mediapipe.page.link/holistic_py_colab)
## MediaPipe Python Framework
The ready-to-use solutions are built upon the MediaPipe Python framework, which
can be used by advanced users to run their own MediaPipe graphs in Python.
Please see [here](./python_framework.md) for more info.
## Building MediaPipe Python Package
Follow the steps below only if you have local changes and need to build the
Python package from source. Otherwise, we strongly encourage our users to simply
run `pip install mediapipe` to use the ready-to-use solutions, more convenient
and much faster.
1. Make sure that Bazel and OpenCV are correctly installed and configured for
MediaPipe. Please see [Installation](./install.md) for how to setup Bazel
and OpenCV for MediaPipe on Linux and macOS.
2. Install the following dependencies.
Debian or Ubuntu:
```bash
$ sudo apt install python3-dev
$ sudo apt install python3-venv
$ sudo apt install -y protobuf-compiler
```
macOS:
```bash
$ brew install protobuf
```
Windows:
Download the latest protoc win64 zip from
[the Protobuf GitHub repo](https://github.com/protocolbuffers/protobuf/releases),
unzip the file, and copy the protoc.exe executable to a preferred
location. Please ensure that location is added into the Path environment
variable.
3. Activate a Python virtual environment.
```bash
$ python3 -m venv mp_env && source mp_env/bin/activate
```
4. In the virtual environment, go to the MediaPipe repo directory.
5. Install the required Python packages.
```bash
(mp_env)mediapipe$ pip3 install -r requirements.txt
```
6. Generate and install MediaPipe package.
```bash
(mp_env)mediapipe$ python3 setup.py gen_protos
(mp_env)mediapipe$ python3 setup.py install --link-opencv
```

View File

@ -0,0 +1,268 @@
---
layout: default
title: MediaPipe Python Framework
parent: MediaPipe in Python
grand_parent: Getting Started
nav_order: 1
---
# MediaPipe Python Framework
{: .no_toc }
1. TOC
{:toc}
---
The MediaPipe Python framework grants direct access to the core components of
the MediaPipe C++ framework such as Timestamp, Packet, and CalculatorGraph,
whereas the
[ready-to-use Python solutions](./python.md#ready-to-use-python-solutions) hide
the technical details of the framework and simply return the readable model
inference results back to the callers.
MediaPipe framework sits on top of
[the pybind11 library](https://pybind11.readthedocs.io/en/stable/index.html).
The C++ core framework is exposed in Python via a C++/Python language binding.
The content below assumes that the reader already has a basic understanding of
the MediaPipe C++ framework. Otherwise, you can find useful information in
[Framework Concepts](../framework_concepts/framework_concepts.md).
### Packet
The packet is the basic data flow unit in MediaPipe. A packet consists of a
numeric timestamp and a shared pointer to an immutable payload. In Python, a
MediaPipe packet can be created by calling one of the packet creator methods in
the
[`mp.packet_creator`](https://github.com/google/mediapipe/tree/master/mediapipe/python/pybind/packet_creator.cc)
module. Correspondingly, the packet payload can be retrieved by using one of the
packet getter methods in the
[`mp.packet_getter`](https://github.com/google/mediapipe/tree/master/mediapipe/python/pybind/packet_getter.cc)
module. Note that the packet payload becomes **immutable** after packet
creation. Thus, the modification of the retrieved packet content doesn't affect
the actual payload in the packet. MediaPipe framework Python API supports the
most commonly used data types of MediaPipe (e.g., ImageFrame, Matrix, Protocol
Buffers, and the primitive data types) in the core binding. The comprehensive
table below shows the type mappings between the Python and the C++ data type
along with the packet creator and the content getter method for each data type
supported by the MediaPipe Python framework API.
Python Data Type | C++ Data Type | Packet Creator | Content Getter
------------------------------------ | ----------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------ | --------------
bool | bool | create_bool(True) | get_bool(packet)
int or np.intc | int_t | create_int(1) | get_int(packet)
int or np.int8 | int8_t | create_int8(2**7-1) | get_int(packet)
int or np.int16 | int16_t | create_int16(2**15-1) | get_int(packet)
int or np.int32 | int32_t | create_int32(2**31-1) | get_int(packet)
int or np.int64 | int64_t | create_int64(2**63-1) | get_int(packet)
int or np.uint8 | uint8_t | create_uint8(2**8-1) | get_uint(packet)
int or np.uint16 | uint16_t | create_uint16(2**16-1) | get_uint(packet)
int or np.uint32 | uint32_t | create_uint32(2**32-1) | get_uint(packet)
int or np.uint64 | uint64_t | create_uint64(2**64-1) | get_uint(packet)
float or np.float32 | float | create_float(1.1) | get_float(packet)
float or np.double | double | create_double(1.1) | get_float(packet)
str (UTF-8) | std::string | create_string('abc') | get_str(packet)
bytes | std::string | create_string(b'\xd0\xd0\xd0') | get_bytes(packet)
mp.Packet | mp::Packet | create_packet(p) | get_packet(packet)
List\[bool\] | std::vector\<bool\> | create_bool_vector(\[True, False\]) | get_bool_list(packet)
List\[int\] or List\[np.intc\] | int\[\] | create_int_array(\[1, 2, 3\]) | get_int_list(packet, size=10)
List\[int\] or List\[np.intc\] | std::vector\<int\> | create_int_vector(\[1, 2, 3\]) | get_int_list(packet)
List\[float\] or List\[np.float\] | float\[\] | create_float_arrary(\[0.1, 0.2\]) | get_float_list(packet, size=10)
List\[float\] or List\[np.float\] | std::vector\<float\> | create_float_vector(\[0.1, 0.2\]) | get_float_list(packet, size=10)
List\[str\] | std::vector\<std::string\> | create_string_vector(\['a'\]) | get_str_list(packet)
List\[mp.Packet\] | std::vector\<mp::Packet\> | create_packet_vector(<br>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;\[packet1, packet2\]) | get_packet_list(p)
Mapping\[str, Packet\] | std::map<std::string, Packet> | create_string_to_packet_map(<br>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;{'a': packet1, 'b': packet2}) | get_str_to_packet_dict(packet)
np.ndarray<br>(cv.mat and PIL.Image) | mp::ImageFrame | create_image_frame(<br>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;format=ImageFormat.SRGB,<br>&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;&nbsp;data=mat) | get_image_frame(packet)
np.ndarray | mp::Matrix | create_matrix(data) | get_matrix(packet)
Google Proto Message | Google Proto Message | create_proto(proto) | get_proto(packet)
List\[Proto\] | std::vector\<Proto\> | create_proto_vector(proto_list) | get_proto_list(packet)
It's not uncommon that users create custom C++ classes and and send those into
the graphs and calculators. To allow the custom classes to be used in Python
with MediaPipe, you may extend the Packet API for a new data type in the
following steps:
1. Write the pybind11
[class binding code](https://pybind11.readthedocs.io/en/stable/advanced/classes.html)
or
[a custom type caster](https://pybind11.readthedocs.io/en/stable/advanced/cast/custom.html?highlight=custom%20type%20caster)
for the custom type in a cc file.
```c++
#include "path/to/my_type/header/file.h"
#include "pybind11/pybind11.h"
namespace py = pybind11;
PYBIND11_MODULE(my_type_binding, m) {
// Write binding code or a custom type caster for MyType.
py::class_<MyType>(m, "MyType")
.def(py::init<>())
.def(...);
}
```
2. Create a new packet creator and getter method of the custom type in a
separate cc file.
```c++
#include "path/to/my_type/header/file.h"
#include "mediapipe/framework/packet.h"
#include "pybind11/pybind11.h"
namespace mediapipe {
namespace py = pybind11;
PYBIND11_MODULE(my_packet_methods, m) {
m.def(
"create_my_type",
[](const MyType& my_type) { return MakePacket<MyType>(my_type); });
m.def(
"get_my_type",
[](const Packet& packet) {
if(!packet.ValidateAsType<MyType>().ok()) {
PyErr_SetString(PyExc_ValueError, "Packet data type mismatch.");
return py::error_already_set();
}
return packet.Get<MyType>();
});
} // namespace mediapipe
```
3. Add two bazel build rules for the custom type binding and the new packet
methods in the BUILD file.
```
load("@pybind11_bazel//:build_defs.bzl", "pybind_extension")
pybind_extension(
name = "my_type_binding",
srcs = ["my_type_binding.cc"],
deps = [":my_type"],
)
pybind_extension(
name = "my_packet_methods",
srcs = ["my_packet_methods.cc"],
deps = [
":my_type",
"//mediapipe/framework:packet"
],
)
```
4. Build the pybind extension targets (with the suffix .so) by Bazel and move the generated dynamic libraries into one of the $LD_LIBRARY_PATH dirs.
5. Use the binding modules in Python.
```python
import my_type_binding
import my_packet_methods
packet = my_packet_methods.create_my_type(my_type_binding.MyType())
my_type = my_packet_methods.get_my_type(packet)
```
### Timestamp
Each packet contains a timestamp that is in units of microseconds. In Python,
the Packet API provides a convenience method `packet.at()` to define the numeric
timestamp of a packet. More generally, `packet.timestamp` is the packet class
property for accessing the underlying timestamp. To convert an Unix epoch to a
MediaPipe timestamp,
[the Timestamp API](https://github.com/google/mediapipe/tree/master/mediapipe/python/pybind/timestamp.cc)
offers a method `mp.Timestamp.from_seconds()` for this purpose.
### ImageFrame
ImageFrame is the container for storing an image or a video frame. Formats
supported by ImageFrame are listed in
[the ImageFormat enum](https://github.com/google/mediapipe/tree/master/mediapipe/python/pybind/image_frame.cc#l=170).
Pixels are encoded row-major with interleaved color components, and ImageFrame
supports uint8, uint16, and float as its data types. MediaPipe provides
[an ImageFrame Python API](https://github.com/google/mediapipe/tree/master/mediapipe/python/pybind/image_frame.cc)
to access the ImageFrame C++ class. In Python, the easiest way to retrieve the
pixel data is to call `image_frame.numpy_view()` to get a numpy ndarray. Note
that the returned numpy ndarray, a reference to the internal pixel data, is
unwritable. If the callers need to modify the numpy ndarray, it's required to
explicitly call a copy operation to obtain a copy. When MediaPipe takes a numpy
ndarray to make an ImageFrame, it assumes that the data is stored contiguously.
Correspondingly, the pixel data of an ImageFrame will be realigned to be
contiguous when it's returned to the Python side.
### Graph
In MediaPipe, all processing takes places within the context of a
CalculatorGraph.
[The CalculatorGraph Python API](https://github.com/google/mediapipe/tree/master/mediapipe/python/pybind/calculator_graph.cc)
is a direct binding to the C++ CalculatorGraph class. The major difference is
the CalculatorGraph Python API raises a Python error instead of returning a
non-OK Status when an error occurs. Therefore, as a Python user, you can handle
the exceptions as you normally do. The life cycle of a CalculatorGraph contains
three stages: initialization and setup, graph run, and graph shutdown.
1. Initialize a CalculatorGraph with a CalculatorGraphConfig protobuf or binary
protobuf file, and provide callback method(s) to observe the output
stream(s).
Option 1. Initialize a CalculatorGraph with a CalculatorGraphConfig protobuf
or its text representation, and observe the output stream(s):
```python
import mediapipe as mp
config_text = """
input_stream: 'in_stream'
output_stream: 'out_stream'
node {
calculator: 'PassThroughCalculator'
input_stream: 'in_stream'
output_stream: 'out_stream'
}
"""
graph = mp.CalculatorGraph(graph_config=config_text)
output_packets = []
graph.observe_output_stream(
'out_stream',
lambda stream_name, packet:
output_packets.append(mp.packet_getter.get_str(packet)))
```
Option 2. Initialize a CalculatorGraph with with a binary protobuf file, and
observe the output stream(s).
```python
import mediapipe as mp
# resources dependency
graph = mp.CalculatorGraph(
binary_graph=os.path.join(
resources.GetRunfilesDir(), 'path/to/your/graph.binarypb'))
graph.observe_output_stream(
'out_stream',
lambda stream_name, packet: print(f'Get {packet} from {stream_name}'))
```
2. Start the graph run and feed packets into the graph.
```python
graph.start_run()
graph.add_packet_to_input_stream(
'in_stream', mp.packet_creator.create_str('abc').at(0))
rgb_img = cv2.cvtColor(cv2.imread('/path/to/your/image.png'), cv2.COLOR_BGR2RGB)
graph.add_packet_to_input_stream(
'in_stream',
mp.packet_creator.create_image_frame(format=mp.ImageFormat.SRGB,
data=rgb_img).at(1))
```
3. Close the graph after finish. You may restart the graph for another graph
run after the call to `close()`.
```python
graph.close()
```
The Python script can be run by your local Python runtime.

Binary file not shown.

After

Width:  |  Height:  |  Size: 156 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 297 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 4.5 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 617 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 120 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 224 KiB

After

Width:  |  Height:  |  Size: 90 KiB

View File

@ -21,25 +21,26 @@ ML solutions for live and streaming media.
## ML solutions in MediaPipe ## ML solutions in MediaPipe
Face Detection | Face Mesh | Iris | Hands | Pose | Hair Segmentation Face Detection | Face Mesh | Iris | Hands | Pose | Holistic
:----------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------: | :---------------: :----------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------: | :------:
[![face_detection](images/mobile/face_detection_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/face_detection) | [![face_mesh](images/mobile/face_mesh_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/face_mesh) | [![iris](images/mobile/iris_tracking_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/iris) | [![hand](images/mobile/hand_tracking_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/hands) | [![pose](images/mobile/pose_tracking_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/pose) | [![hair_segmentation](images/mobile/hair_segmentation_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/hair_segmentation) [![face_detection](images/mobile/face_detection_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/face_detection) | [![face_mesh](images/mobile/face_mesh_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/face_mesh) | [![iris](images/mobile/iris_tracking_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/iris) | [![hand](images/mobile/hand_tracking_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/hands) | [![pose](images/mobile/pose_tracking_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/pose) | [![hair_segmentation](images/mobile/holistic_tracking_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/holistic)
Object Detection | Box Tracking | Instant Motion Tracking | Objectron | KNIFT Hair Segmentation | Object Detection | Box Tracking | Instant Motion Tracking | Objectron | KNIFT
:----------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------: | :---: :-------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------: | :---:
[![object_detection](images/mobile/object_detection_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/object_detection) | [![box_tracking](images/mobile/object_tracking_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/box_tracking) | [![instant_motion_tracking](images/mobile/instant_motion_tracking_android_small.gif)](https://google.github.io/mediapipe/solutions/instant_motion_tracking) | [![objectron](images/mobile/objectron_chair_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/objectron) | [![knift](images/mobile/template_matching_android_cpu_small.gif)](https://google.github.io/mediapipe/solutions/knift) [![hair_segmentation](images/mobile/hair_segmentation_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/hair_segmentation) | [![object_detection](images/mobile/object_detection_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/object_detection) | [![box_tracking](images/mobile/object_tracking_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/box_tracking) | [![instant_motion_tracking](images/mobile/instant_motion_tracking_android_small.gif)](https://google.github.io/mediapipe/solutions/instant_motion_tracking) | [![objectron](images/mobile/objectron_chair_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/objectron) | [![knift](images/mobile/template_matching_android_cpu_small.gif)](https://google.github.io/mediapipe/solutions/knift)
<!-- []() in the first cell is needed to preserve table formatting in GitHub Pages. --> <!-- []() in the first cell is needed to preserve table formatting in GitHub Pages. -->
<!-- Whenever this table is updated, paste a copy to solutions/solutions.md. --> <!-- Whenever this table is updated, paste a copy to solutions/solutions.md. -->
[]() | Android | iOS | Desktop | Python | Web | Coral []() | [Android](https://google.github.io/mediapipe/getting_started/android) | [iOS](https://google.github.io/mediapipe/getting_started/ios) | [C++](https://google.github.io/mediapipe/getting_started/cpp) | [Python](https://google.github.io/mediapipe/getting_started/python) | [JS](https://google.github.io/mediapipe/getting_started/javascript) | [Coral](https://github.com/google/mediapipe/tree/master/mediapipe/examples/coral/README.md)
:---------------------------------------------------------------------------------------- | :-----: | :-: | :-----: | :----: | :-: | :---: :---------------------------------------------------------------------------------------- | :-------------------------------------------------------------: | :-----------------------------------------------------: | :-----------------------------------------------------: | :-----------------------------------------------------------: | :-----------------------------------------------------------: | :--------------------------------------------------------------------:
[Face Detection](https://google.github.io/mediapipe/solutions/face_detection) | ✅ | ✅ | ✅ | | ✅ | ✅ [Face Detection](https://google.github.io/mediapipe/solutions/face_detection) | ✅ | ✅ | ✅ | | | ✅
[Face Mesh](https://google.github.io/mediapipe/solutions/face_mesh) | ✅ | ✅ | ✅ | ✅ | | [Face Mesh](https://google.github.io/mediapipe/solutions/face_mesh) | ✅ | ✅ | ✅ | ✅ | |
[Iris](https://google.github.io/mediapipe/solutions/iris) | ✅ | ✅ | ✅ | | ✅ | [Iris](https://google.github.io/mediapipe/solutions/iris) | ✅ | ✅ | ✅ | | |
[Hands](https://google.github.io/mediapipe/solutions/hands) | ✅ | ✅ | ✅ | ✅ | ✅ | [Hands](https://google.github.io/mediapipe/solutions/hands) | ✅ | ✅ | ✅ | ✅ | ✅ |
[Pose](https://google.github.io/mediapipe/solutions/pose) | ✅ | ✅ | ✅ | ✅ | ✅ | [Pose](https://google.github.io/mediapipe/solutions/pose) | ✅ | ✅ | ✅ | ✅ | ✅ |
[Hair Segmentation](https://google.github.io/mediapipe/solutions/hair_segmentation) | ✅ | | ✅ | | ✅ | [Holistic](https://google.github.io/mediapipe/solutions/holistic) | ✅ | ✅ | ✅ | ✅ | ✅ |
[Hair Segmentation](https://google.github.io/mediapipe/solutions/hair_segmentation) | ✅ | | ✅ | | |
[Object Detection](https://google.github.io/mediapipe/solutions/object_detection) | ✅ | ✅ | ✅ | | | ✅ [Object Detection](https://google.github.io/mediapipe/solutions/object_detection) | ✅ | ✅ | ✅ | | | ✅
[Box Tracking](https://google.github.io/mediapipe/solutions/box_tracking) | ✅ | ✅ | ✅ | | | [Box Tracking](https://google.github.io/mediapipe/solutions/box_tracking) | ✅ | ✅ | ✅ | | |
[Instant Motion Tracking](https://google.github.io/mediapipe/solutions/instant_motion_tracking) | ✅ | | | | | [Instant Motion Tracking](https://google.github.io/mediapipe/solutions/instant_motion_tracking) | ✅ | | | | |
@ -55,16 +56,12 @@ for ML models released in MediaPipe.
## MediaPipe in Python ## MediaPipe in Python
MediaPipe Python package is available on MediaPipe offers customizable Python solutions as a prebuilt Python package on
[PyPI](https://pypi.org/project/mediapipe/), and can be installed simply by `pip [PyPI](https://pypi.org/project/mediapipe/), which can be installed simply with
install mediapipe` on Linux and macOS, as described in: `pip install mediapipe`. It also provides tools for users to build their own
solutions. Please see
* [MediaPipe Face Mesh](https://google.github.io/mediapipe/solutions/face_mesh#python) [MediaPipe in Python](https://google.github.io/mediapipe/getting_started/python.md)
([colab](https://mediapipe.page.link/face_mesh_py_colab)) for more info.
* [MediaPipe Hands](https://google.github.io/mediapipe/solutions/hands#python)
([colab](https://mediapipe.page.link/hands_py_colab))
* [MediaPipe Pose](https://google.github.io/mediapipe/solutions/pose#python)
([colab](https://mediapipe.page.link/pose_py_colab))
## MediaPipe on the Web ## MediaPipe on the Web
@ -105,6 +102,8 @@ run code search using
## Publications ## Publications
* [Background Features in Google Meet, Powered by Web ML](https://ai.googleblog.com/2020/10/background-features-in-google-meet.html)
in Google AI Blog
* [MediaPipe 3D Face Transform](https://developers.googleblog.com/2020/09/mediapipe-3d-face-transform.html) * [MediaPipe 3D Face Transform](https://developers.googleblog.com/2020/09/mediapipe-3d-face-transform.html)
in Google Developers Blog in Google Developers Blog
* [Instant Motion Tracking With MediaPipe](https://developers.googleblog.com/2020/08/instant-motion-tracking-with-mediapipe.html) * [Instant Motion Tracking With MediaPipe](https://developers.googleblog.com/2020/08/instant-motion-tracking-with-mediapipe.html)

View File

@ -2,14 +2,20 @@
layout: default layout: default
title: AutoFlip (Saliency-aware Video Cropping) title: AutoFlip (Saliency-aware Video Cropping)
parent: Solutions parent: Solutions
nav_order: 12 nav_order: 13
--- ---
# AutoFlip: Saliency-aware Video Cropping # AutoFlip: Saliency-aware Video Cropping
{: .no_toc } {: .no_toc }
<details close markdown="block">
<summary>
Table of contents
</summary>
{: .text-delta }
1. TOC 1. TOC
{:toc} {:toc}
</details>
--- ---
## Overview ## Overview

View File

@ -2,14 +2,20 @@
layout: default layout: default
title: Box Tracking title: Box Tracking
parent: Solutions parent: Solutions
nav_order: 8 nav_order: 9
--- ---
# MediaPipe Box Tracking # MediaPipe Box Tracking
{: .no_toc } {: .no_toc }
<details close markdown="block">
<summary>
Table of contents
</summary>
{: .text-delta }
1. TOC 1. TOC
{:toc} {:toc}
</details>
--- ---
## Overview ## Overview
@ -105,9 +111,8 @@ new detections to remove obsolete or duplicated boxes.
## Example Apps ## Example Apps
Please first see general instructions for Please first see general instructions for
[Android](../getting_started/building_examples.md#android), [iOS](../getting_started/building_examples.md#ios) [Android](../getting_started/android.md), [iOS](../getting_started/ios.md) and
and [desktop](../getting_started/building_examples.md#desktop) on how to build MediaPipe [desktop](../getting_started/cpp.md) on how to build MediaPipe examples.
examples.
Note: To visualize a graph, copy the graph and paste it into Note: To visualize a graph, copy the graph and paste it into
[MediaPipe Visualizer](https://viz.mediapipe.dev/). For more information on how [MediaPipe Visualizer](https://viz.mediapipe.dev/). For more information on how

View File

@ -8,8 +8,14 @@ nav_order: 1
# MediaPipe Face Detection # MediaPipe Face Detection
{: .no_toc } {: .no_toc }
<details close markdown="block">
<summary>
Table of contents
</summary>
{: .text-delta }
1. TOC 1. TOC
{:toc} {:toc}
</details>
--- ---
## Overview ## Overview
@ -36,9 +42,8 @@ section.
## Example Apps ## Example Apps
Please first see general instructions for Please first see general instructions for
[Android](../getting_started/building_examples.md#android), [iOS](../getting_started/building_examples.md#ios) [Android](../getting_started/android.md), [iOS](../getting_started/ios.md) and
and [desktop](../getting_started/building_examples.md#desktop) on how to build MediaPipe [desktop](../getting_started/cpp.md) on how to build MediaPipe examples.
examples.
Note: To visualize a graph, copy the graph and paste it into Note: To visualize a graph, copy the graph and paste it into
[MediaPipe Visualizer](https://viz.mediapipe.dev/). For more information on how [MediaPipe Visualizer](https://viz.mediapipe.dev/). For more information on how

View File

@ -8,8 +8,14 @@ nav_order: 2
# MediaPipe Face Mesh # MediaPipe Face Mesh
{: .no_toc } {: .no_toc }
<details close markdown="block">
<summary>
Table of contents
</summary>
{: .text-delta }
1. TOC 1. TOC
{:toc} {:toc}
</details>
--- ---
## Overview ## Overview
@ -206,11 +212,222 @@ The effect renderer is implemented as a MediaPipe
| :---------------------------------------------------------------------: | | :---------------------------------------------------------------------: |
| *Fig 4. An example of face effects rendered by the Face Geometry Effect Renderer.* | | *Fig 4. An example of face effects rendered by the Face Geometry Effect Renderer.* |
## Solution APIs
### Configuration Options
Naming style and availability may differ slightly across platforms/languages.
#### static_image_mode
If set to `false`, the solution treats the input images as a video stream. It
will try to detect faces in the first input images, and upon a successful
detection further localizes the face landmarks. In subsequent images, once all
[max_num_faces](#max_num_faces) faces are detected and the corresponding face
landmarks are localized, it simply tracks those landmarks without invoking
another detection until it loses track of any of the faces. This reduces latency
and is ideal for processing video frames. If set to `true`, face detection runs
on every input image, ideal for processing a batch of static, possibly
unrelated, images. Default to `false`.
#### max_num_faces
Maximum number of faces to detect. Default to `1`.
#### min_detection_confidence
Minimum confidence value (`[0.0, 1.0]`) from the face detection model for the
detection to be considered successful. Default to `0.5`.
#### min_tracking_confidence
Minimum confidence value (`[0.0, 1.0]`) from the landmark-tracking model for the
face landmarks to be considered tracked successfully, or otherwise face
detection will be invoked automatically on the next input image. Setting it to a
higher value can increase robustness of the solution, at the expense of a higher
latency. Ignored if [static_image_mode](#static_image_mode) is `true`, where
face detection simply runs on every image. Default to `0.5`.
### Output
Naming style may differ slightly across platforms/languages.
#### multi_face_landmarks
Collection of detected/tracked faces, where each face is represented as a list
of 468 face landmarks and each landmark is composed of `x`, `y` and `z`. `x` and
`y` are normalized to `[0.0, 1.0]` by the image width and height respectively.
`z` represents the landmark depth with the depth at center of the head being the
origin, and the smaller the value the closer the landmark is to the camera. The
magnitude of `z` uses roughly the same scale as `x`.
### Python Solution API
Please first follow general [instructions](../getting_started/python.md) to
install MediaPipe Python package, then learn more in the companion [Colab] and
the following usage example.
Supported configuration options:
* [static_image_mode](#static_image_mode)
* [max_num_faces](#max_num_faces)
* [min_detection_confidence](#min_detection_confidence)
* [min_tracking_confidence](#min_tracking_confidence)
```python
import cv2
import mediapipe as mp
mp_drawing = mp.solutions.drawing_utils
mp_face_mesh = mp.solutions.face_mesh
# For static images:
face_mesh = mp_face_mesh.FaceMesh(
static_image_mode=True,
max_num_faces=1,
min_detection_confidence=0.5)
drawing_spec = mp_drawing.DrawingSpec(thickness=1, circle_radius=1)
for idx, file in enumerate(file_list):
image = cv2.imread(file)
# Convert the BGR image to RGB before processing.
results = face_mesh.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
# Print and draw face mesh landmarks on the image.
if not results.multi_face_landmarks:
continue
annotated_image = image.copy()
for face_landmarks in results.multi_face_landmarks:
print('face_landmarks:', face_landmarks)
mp_drawing.draw_landmarks(
image=annotated_image,
landmark_list=face_landmarks,
connections=mp_face_mesh.FACE_CONNECTIONS,
landmark_drawing_spec=drawing_spec,
connection_drawing_spec=drawing_spec)
cv2.imwrite('/tmp/annotated_image' + str(idx) + '.png', annotated_image)
face_mesh.close()
# For webcam input:
face_mesh = mp_face_mesh.FaceMesh(
min_detection_confidence=0.5, min_tracking_confidence=0.5)
drawing_spec = mp_drawing.DrawingSpec(thickness=1, circle_radius=1)
cap = cv2.VideoCapture(0)
while cap.isOpened():
success, image = cap.read()
if not success:
print("Ignoring empty camera frame.")
# If loading a video, use 'break' instead of 'continue'.
continue
# Flip the image horizontally for a later selfie-view display, and convert
# the BGR image to RGB.
image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)
# To improve performance, optionally mark the image as not writeable to
# pass by reference.
image.flags.writeable = False
results = face_mesh.process(image)
# Draw the face mesh annotations on the image.
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if results.multi_face_landmarks:
for face_landmarks in results.multi_face_landmarks:
mp_drawing.draw_landmarks(
image=image,
landmark_list=face_landmarks,
connections=mp_face_mesh.FACE_CONNECTIONS,
landmark_drawing_spec=drawing_spec,
connection_drawing_spec=drawing_spec)
cv2.imshow('MediaPipe FaceMesh', image)
if cv2.waitKey(5) & 0xFF == 27:
break
face_mesh.close()
cap.release()
```
### JavaScript Solution API
Please first see general [introduction](../getting_started/javascript.md) on
MediaPipe in JavaScript, then learn more in the companion [web demo] and the
following usage example.
Supported configuration options:
* [maxNumFaces](#max_num_faces)
* [minDetectionConfidence](#min_detection_confidence)
* [minTrackingConfidence](#min_tracking_confidence)
```html
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<script src="https://cdn.jsdelivr.net/npm/@mediapipe/camera_utils/camera_utils.js" crossorigin="anonymous"></script>
<script src="https://cdn.jsdelivr.net/npm/@mediapipe/control_utils/control_utils.js" crossorigin="anonymous"></script>
<script src="https://cdn.jsdelivr.net/npm/@mediapipe/drawing_utils/drawing_utils.js" crossorigin="anonymous"></script>
<script src="https://cdn.jsdelivr.net/npm/@mediapipe/face_mesh/face_mesh.js" crossorigin="anonymous"></script>
</head>
<body>
<div class="container">
<video class="input_video"></video>
<canvas class="output_canvas" width="1280px" height="720px"></canvas>
</div>
</body>
</html>
```
```javascript
<script type="module">
const videoElement = document.getElementsByClassName('input_video')[0];
const canvasElement = document.getElementsByClassName('output_canvas')[0];
const canvasCtx = canvasElement.getContext('2d');
function onResults(results) {
canvasCtx.save();
canvasCtx.clearRect(0, 0, canvasElement.width, canvasElement.height);
canvasCtx.drawImage(
results.image, 0, 0, canvasElement.width, canvasElement.height);
if (results.multiFaceLandmarks) {
for (const landmarks of results.multiFaceLandmarks) {
drawConnectors(canvasCtx, landmarks, FACEMESH_TESSELATION,
{color: '#C0C0C070', lineWidth: 1});
drawConnectors(canvasCtx, landmarks, FACEMESH_RIGHT_EYE, {color: '#FF3030'});
drawConnectors(canvasCtx, landmarks, FACEMESH_RIGHT_EYEBROW, {color: '#FF3030'});
drawConnectors(canvasCtx, landmarks, FACEMESH_LEFT_EYE, {color: '#30FF30'});
drawConnectors(canvasCtx, landmarks, FACEMESH_LEFT_EYEBROW, {color: '#30FF30'});
drawConnectors(canvasCtx, landmarks, FACEMESH_FACE_OVAL, {color: '#E0E0E0'});
drawConnectors(canvasCtx, landmarks, FACEMESH_LIPS, {color: '#E0E0E0'});
}
}
canvasCtx.restore();
}
const faceMesh = new FaceMesh({locateFile: (file) => {
return `https://cdn.jsdelivr.net/npm/@mediapipe/face_mesh/${file}`;
}});
faceMesh.setOptions({
maxNumFaces: 1,
minDetectionConfidence: 0.5,
minTrackingConfidence: 0.5
});
faceMesh.onResults(onResults);
const camera = new Camera(videoElement, {
onFrame: async () => {
await faceMesh.send({image: videoElement});
},
width: 1280,
height: 720
});
camera.start();
</script>
```
## Example Apps ## Example Apps
Please first see general instructions for Please first see general instructions for
[Android](../getting_started/building_examples.md#android), [iOS](../getting_started/building_examples.md#ios) and [Android](../getting_started/android.md), [iOS](../getting_started/ios.md) and
[desktop](../getting_started/building_examples.md#desktop) on how to build MediaPipe examples. [desktop](../getting_started/cpp.md) on how to build MediaPipe examples.
Note: To visualize a graph, copy the graph and paste it into Note: To visualize a graph, copy the graph and paste it into
[MediaPipe Visualizer](https://viz.mediapipe.dev/). For more information on how [MediaPipe Visualizer](https://viz.mediapipe.dev/). For more information on how
@ -254,99 +471,6 @@ and for iOS modify `kNumFaces` in
Tip: Maximum number of faces to detect/process is set to 1 by default. To change Tip: Maximum number of faces to detect/process is set to 1 by default. To change
it, in the graph file modify the option of `ConstantSidePacketCalculator`. it, in the graph file modify the option of `ConstantSidePacketCalculator`.
#### Python
MediaPipe Python package is available on
[PyPI](https://pypi.org/project/mediapipe/), and can be installed simply by `pip
install mediapipe` on Linux and macOS, as described below and in this
[colab](https://mediapipe.page.link/face_mesh_py_colab). If you do need to build
the Python package from source, see
[additional instructions](../getting_started/building_examples.md#python).
Activate a Python virtual environment:
```bash
$ python3 -m venv mp_env && source mp_env/bin/activate
```
Install MediaPipe Python package:
```bash
(mp_env)$ pip install mediapipe
```
Run the following Python code:
<!-- Do not change the example code below directly. Change the corresponding example in mediapipe/python/solutions/face_mesh.py and copy it over. -->
```python
import cv2
import mediapipe as mp
mp_drawing = mp.solutions.drawing_utils
mp_face_mesh = mp.solutions.face_mesh
# For static images:
face_mesh = mp_face_mesh.FaceMesh(
static_image_mode=True,
max_num_faces=1,
min_detection_confidence=0.5)
drawing_spec = mp_drawing.DrawingSpec(thickness=1, circle_radius=1)
for idx, file in enumerate(file_list):
image = cv2.imread(file)
# Convert the BGR image to RGB before processing.
results = face_mesh.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
# Print and draw face mesh landmarks on the image.
if not results.multi_face_landmarks:
continue
annotated_image = image.copy()
for face_landmarks in results.multi_face_landmarks:
print('face_landmarks:', face_landmarks)
mp_drawing.draw_landmarks(
image=annotated_image,
landmark_list=face_landmarks,
connections=mp_face_mesh.FACE_CONNECTIONS,
landmark_drawing_spec=drawing_spec,
connection_drawing_spec=drawing_spec)
cv2.imwrite('/tmp/annotated_image' + str(idx) + '.png', image)
face_mesh.close()
# For webcam input:
face_mesh = mp_face_mesh.FaceMesh(
min_detection_confidence=0.5, min_tracking_confidence=0.5)
drawing_spec = mp_drawing.DrawingSpec(thickness=1, circle_radius=1)
cap = cv2.VideoCapture(0)
while cap.isOpened():
success, image = cap.read()
if not success:
break
# Flip the image horizontally for a later selfie-view display, and convert
# the BGR image to RGB.
image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)
# To improve performance, optionally mark the image as not writeable to
# pass by reference.
image.flags.writeable = False
results = face_mesh.process(image)
# Draw the face mesh annotations on the image.
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if results.multi_face_landmarks:
for face_landmarks in results.multi_face_landmarks:
mp_drawing.draw_landmarks(
image=image,
landmark_list=face_landmarks,
connections=mp_face_mesh.FACE_CONNECTIONS,
landmark_drawing_spec=drawing_spec,
connection_drawing_spec=drawing_spec)
cv2.imshow('MediaPipe FaceMesh', image)
if cv2.waitKey(5) & 0xFF == 27:
break
face_mesh.close()
cap.release()
```
### Face Effect Example ### Face Effect Example
Face effect example showcases real-time mobile face effect application use case Face effect example showcases real-time mobile face effect application use case
@ -379,3 +503,7 @@ only works for a single face. For visual reference, please refer to *Fig. 4*.
[OBJ](https://github.com/google/mediapipe/tree/master/mediapipe/modules/face_geometry/data/canonical_face_model.obj), [OBJ](https://github.com/google/mediapipe/tree/master/mediapipe/modules/face_geometry/data/canonical_face_model.obj),
[UV visualization](https://github.com/google/mediapipe/tree/master/mediapipe/modules/face_geometry/data/canonical_face_model_uv_visualization.png) [UV visualization](https://github.com/google/mediapipe/tree/master/mediapipe/modules/face_geometry/data/canonical_face_model_uv_visualization.png)
* [Models and model cards](./models.md#face_mesh) * [Models and model cards](./models.md#face_mesh)
[Colab]:https://mediapipe.page.link/face_mesh_py_colab
[web demo]:https://code.mediapipe.dev/codepen/face_mesh

View File

@ -2,14 +2,20 @@
layout: default layout: default
title: Hair Segmentation title: Hair Segmentation
parent: Solutions parent: Solutions
nav_order: 6 nav_order: 7
--- ---
# MediaPipe Hair Segmentation # MediaPipe Hair Segmentation
{: .no_toc } {: .no_toc }
<details close markdown="block">
<summary>
Table of contents
</summary>
{: .text-delta }
1. TOC 1. TOC
{:toc} {:toc}
</details>
--- ---
![hair_segmentation_android_gpu_gif](../images/mobile/hair_segmentation_android_gpu.gif) ![hair_segmentation_android_gpu_gif](../images/mobile/hair_segmentation_android_gpu.gif)
@ -17,9 +23,8 @@ nav_order: 6
## Example Apps ## Example Apps
Please first see general instructions for Please first see general instructions for
[Android](../getting_started/building_examples.md#android), [iOS](../getting_started/building_examples.md#ios) [Android](../getting_started/android.md), [iOS](../getting_started/ios.md) and
and [desktop](../getting_started/building_examples.md#desktop) on how to build MediaPipe [desktop](../getting_started/cpp.md) on how to build MediaPipe examples.
examples.
Note: To visualize a graph, copy the graph and paste it into Note: To visualize a graph, copy the graph and paste it into
[MediaPipe Visualizer](https://viz.mediapipe.dev/). For more information on how [MediaPipe Visualizer](https://viz.mediapipe.dev/). For more information on how

View File

@ -8,8 +8,14 @@ nav_order: 4
# MediaPipe Hands # MediaPipe Hands
{: .no_toc } {: .no_toc }
<details close markdown="block">
<summary>
Table of contents
</summary>
{: .text-delta }
1. TOC 1. TOC
{:toc} {:toc}
</details>
--- ---
## Overview ## Overview
@ -126,16 +132,239 @@ and provide additional supervision on the nature of hand geometry, we also
render a high-quality synthetic hand model over various backgrounds and map it render a high-quality synthetic hand model over various backgrounds and map it
to the corresponding 3D coordinates. to the corresponding 3D coordinates.
![hand_landmarks.png](../images/mobile/hand_landmarks.png) |
:--------------------------------------------------------: |
*Fig 2. 21 hand landmarks.* |
| ![hand_crops.png](../images/mobile/hand_crops.png) | | ![hand_crops.png](../images/mobile/hand_crops.png) |
| :-------------------------------------------------------------------------: | | :-------------------------------------------------------------------------: |
| *Fig 2. Top: Aligned hand crops passed to the tracking network with ground truth annotation. Bottom: Rendered synthetic hand images with ground truth annotation.* | | *Fig 3. Top: Aligned hand crops passed to the tracking network with ground |
: truth annotation. Bottom\: Rendered synthetic hand images with ground truth :
: annotation.* :
## Solution APIs
### Configuration Options
Naming style and availability may differ slightly across platforms/languages.
#### static_image_mode
If set to `false`, the solution treats the input images as a video stream. It
will try to detect hands in the first input images, and upon a successful
detection further localizes the hand landmarks. In subsequent images, once all
[max_num_hands](#max_num_hands) hands are detected and the corresponding hand
landmarks are localized, it simply tracks those landmarks without invoking
another detection until it loses track of any of the hands. This reduces latency
and is ideal for processing video frames. If set to `true`, hand detection runs
on every input image, ideal for processing a batch of static, possibly
unrelated, images. Default to `false`.
#### max_num_hands
Maximum number of hands to detect. Default to `2`.
#### min_detection_confidence
Minimum confidence value (`[0.0, 1.0]`) from the hand detection model for the
detection to be considered successful. Default to `0.5`.
#### min_tracking_confidence:
Minimum confidence value (`[0.0, 1.0]`) from the landmark-tracking model for the
hand landmarks to be considered tracked successfully, or otherwise hand
detection will be invoked automatically on the next input image. Setting it to a
higher value can increase robustness of the solution, at the expense of a higher
latency. Ignored if [static_image_mode](#static_image_mode) is `true`, where
hand detection simply runs on every image. Default to `0.5`.
### Output
Naming style may differ slightly across platforms/languages.
#### multi_hand_landmarks
Collection of detected/tracked hands, where each hand is represented as a list
of 21 hand landmarks and each landmark is composed of `x`, `y` and `z`. `x` and
`y` are normalized to `[0.0, 1.0]` by the image width and height respectively.
`z` represents the landmark depth with the depth at the wrist being the origin,
and the smaller the value the closer the landmark is to the camera. The
magnitude of `z` uses roughly the same scale as `x`.
#### multi_handedness
Collection of handedness of the detected/tracked hands (i.e. is it a left or
right hand). Each hand is composed of `label` and `score`. `label` is a string
of value either `"Left"` or `"Right"`. `score` is the estimated probability of
the predicted handedness and is always greater than or equal to `0.5` (and the
opposite handedness has an estimated probability of `1 - score`).
Note that handedness is determined assuming the input image is mirrored, i.e.,
taken with a front-facing/selfie camera with images flipped horizontally. If it
is not the case, please swap the handedness output in the application.
### Python Solution API
Please first follow general [instructions](../getting_started/python.md) to
install MediaPipe Python package, then learn more in the companion [Colab] and
the following usage example.
Supported configuration options:
* [static_image_mode](#static_image_mode)
* [max_num_hands](#max_num_hands)
* [min_detection_confidence](#min_detection_confidence)
* [min_tracking_confidence](#min_tracking_confidence)
```python
import cv2
import mediapipe as mp
mp_drawing = mp.solutions.drawing_utils
mp_hands = mp.solutions.hands
# For static images:
hands = mp_hands.Hands(
static_image_mode=True,
max_num_hands=2,
min_detection_confidence=0.5)
for idx, file in enumerate(file_list):
# Read an image, flip it around y-axis for correct handedness output (see
# above).
image = cv2.flip(cv2.imread(file), 1)
# Convert the BGR image to RGB before processing.
results = hands.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
# Print handedness and draw hand landmarks on the image.
print('Handedness:', results.multi_handedness)
if not results.multi_hand_landmarks:
continue
image_hight, image_width, _ = image.shape
annotated_image = image.copy()
for hand_landmarks in results.multi_hand_landmarks:
print('hand_landmarks:', hand_landmarks)
print(
f'Index finger tip coordinates: (',
f'{hand_landmarks.landmark[mp_hands.HandLandmark.INDEX_FINGER_TIP].x * image_width}, '
f'{hand_landmarks.landmark[mp_hands.HandLandmark.INDEX_FINGER_TIP].y * image_hight})'
)
mp_drawing.draw_landmarks(
annotated_image, hand_landmarks, mp_hands.HAND_CONNECTIONS)
cv2.imwrite(
'/tmp/annotated_image' + str(idx) + '.png', cv2.flip(annotated_image, 1))
hands.close()
# For webcam input:
hands = mp_hands.Hands(
min_detection_confidence=0.5, min_tracking_confidence=0.5)
cap = cv2.VideoCapture(0)
while cap.isOpened():
success, image = cap.read()
if not success:
print("Ignoring empty camera frame.")
# If loading a video, use 'break' instead of 'continue'.
continue
# Flip the image horizontally for a later selfie-view display, and convert
# the BGR image to RGB.
image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)
# To improve performance, optionally mark the image as not writeable to
# pass by reference.
image.flags.writeable = False
results = hands.process(image)
# Draw the hand annotations on the image.
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
mp_drawing.draw_landmarks(
image, hand_landmarks, mp_hands.HAND_CONNECTIONS)
cv2.imshow('MediaPipe Hands', image)
if cv2.waitKey(5) & 0xFF == 27:
break
hands.close()
cap.release()
```
### JavaScript Solution API
Please first see general [introduction](../getting_started/javascript.md) on
MediaPipe in JavaScript, then learn more in the companion [web demo] and a
[fun application], and the following usage example.
Supported configuration options:
* [maxNumHands](#max_num_hands)
* [minDetectionConfidence](#min_detection_confidence)
* [minTrackingConfidence](#min_tracking_confidence)
```html
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<script src="https://cdn.jsdelivr.net/npm/@mediapipe/camera_utils/camera_utils.js" crossorigin="anonymous"></script>
<script src="https://cdn.jsdelivr.net/npm/@mediapipe/control_utils/control_utils.js" crossorigin="anonymous"></script>
<script src="https://cdn.jsdelivr.net/npm/@mediapipe/drawing_utils/drawing_utils.js" crossorigin="anonymous"></script>
<script src="https://cdn.jsdelivr.net/npm/@mediapipe/hands/hands.js" crossorigin="anonymous"></script>
</head>
<body>
<div class="container">
<video class="input_video"></video>
<canvas class="output_canvas" width="1280px" height="720px"></canvas>
</div>
</body>
</html>
```
```javascript
<script type="module">
const videoElement = document.getElementsByClassName('input_video')[0];
const canvasElement = document.getElementsByClassName('output_canvas')[0];
const canvasCtx = canvasElement.getContext('2d');
function onResults(results) {
canvasCtx.save();
canvasCtx.clearRect(0, 0, canvasElement.width, canvasElement.height);
canvasCtx.drawImage(
results.image, 0, 0, canvasElement.width, canvasElement.height);
if (results.multiHandLandmarks) {
for (const landmarks of results.multiHandLandmarks) {
drawConnectors(canvasCtx, landmarks, HAND_CONNECTIONS,
{color: '#00FF00', lineWidth: 5});
drawLandmarks(canvasCtx, landmarks, {color: '#FF0000', lineWidth: 2});
}
}
canvasCtx.restore();
}
const hands = new Hands({locateFile: (file) => {
return `https://cdn.jsdelivr.net/npm/@mediapipe/hands/${file}`;
}});
hands.setOptions({
maxNumHands: 2,
minDetectionConfidence: 0.5,
minTrackingConfidence: 0.5
});
hands.onResults(onResults);
const camera = new Camera(videoElement, {
onFrame: async () => {
await hands.send({image: videoElement});
},
width: 1280,
height: 720
});
camera.start();
</script>
```
## Example Apps ## Example Apps
Please first see general instructions for Please first see general instructions for
[Android](../getting_started/building_examples.md#android), [iOS](../getting_started/building_examples.md#ios) [Android](../getting_started/android.md), [iOS](../getting_started/ios.md) and
and [desktop](../getting_started/building_examples.md#desktop) on how to build MediaPipe [desktop](../getting_started/cpp.md) on how to build MediaPipe examples.
examples.
Note: To visualize a graph, copy the graph and paste it into Note: To visualize a graph, copy the graph and paste it into
[MediaPipe Visualizer](https://viz.mediapipe.dev/). For more information on how [MediaPipe Visualizer](https://viz.mediapipe.dev/). For more information on how
@ -186,99 +415,6 @@ and for iOS modify `kNumHands` in
Tip: Maximum number of hands to detect/process is set to 2 by default. To change Tip: Maximum number of hands to detect/process is set to 2 by default. To change
it, in the graph file modify the option of `ConstantSidePacketCalculator`. it, in the graph file modify the option of `ConstantSidePacketCalculator`.
### Python
MediaPipe Python package is available on
[PyPI](https://pypi.org/project/mediapipe/), and can be installed simply by `pip
install mediapipe` on Linux and macOS, as described below and in this
[colab](https://mediapipe.page.link/hands_py_colab). If you do need to build the
Python package from source, see
[additional instructions](../getting_started/building_examples.md#python).
Activate a Python virtual environment:
```bash
$ python3 -m venv mp_env && source mp_env/bin/activate
```
Install MediaPipe Python package:
```bash
(mp_env)$ pip install mediapipe
```
Run the following Python code:
<!-- Do not change the example code below directly. Change the corresponding example in mediapipe/python/solutions/hands.py and copy it over. -->
```python
import cv2
import mediapipe as mp
mp_drawing = mp.solutions.drawing_utils
mp_hands = mp.solutions.hands
# For static images:
hands = mp_hands.Hands(
static_image_mode=True,
max_num_hands=2,
min_detection_confidence=0.7)
for idx, file in enumerate(file_list):
# Read an image, flip it around y-axis for correct handedness output (see
# above).
image = cv2.flip(cv2.imread(file), 1)
# Convert the BGR image to RGB before processing.
results = hands.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
# Print handedness and draw hand landmarks on the image.
print('handedness:', results.multi_handedness)
if not results.multi_hand_landmarks:
continue
annotated_image = image.copy()
for hand_landmarks in results.multi_hand_landmarks:
print('hand_landmarks:', hand_landmarks)
mp_drawing.draw_landmarks(
annotated_image, hand_landmarks, mp_hands.HAND_CONNECTIONS)
cv2.imwrite(
'/tmp/annotated_image' + str(idx) + '.png', cv2.flip(image, 1))
hands.close()
# For webcam input:
hands = mp_hands.Hands(
min_detection_confidence=0.7, min_tracking_confidence=0.5)
cap = cv2.VideoCapture(0)
while cap.isOpened():
success, image = cap.read()
if not success:
break
# Flip the image horizontally for a later selfie-view display, and convert
# the BGR image to RGB.
image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)
# To improve performance, optionally mark the image as not writeable to
# pass by reference.
image.flags.writeable = False
results = hands.process(image)
# Draw the hand annotations on the image.
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
mp_drawing.draw_landmarks(
image, hand_landmarks, mp_hands.HAND_CONNECTIONS)
cv2.imshow('MediaPipe Hands', image)
if cv2.waitKey(5) & 0xFF == 27:
break
hands.close()
cap.release()
```
Tip: Use command `deactivate` to exit the Python virtual environment.
### Web
Please refer to [these instructions](../index.md#mediapipe-on-the-web).
## Resources ## Resources
* Google AI Blog: * Google AI Blog:
@ -289,3 +425,8 @@ Please refer to [these instructions](../index.md#mediapipe-on-the-web).
[MediaPipe Hands: On-device Real-time Hand Tracking](https://arxiv.org/abs/2006.10214) [MediaPipe Hands: On-device Real-time Hand Tracking](https://arxiv.org/abs/2006.10214)
([presentation](https://www.youtube.com/watch?v=I-UOrvxxXEk)) ([presentation](https://www.youtube.com/watch?v=I-UOrvxxXEk))
* [Models and model cards](./models.md#hands) * [Models and model cards](./models.md#hands)
[Colab]:https://mediapipe.page.link/hands_py_colab
[web demo]:https://code.mediapipe.dev/codepen/hands
[fun application]:https://code.mediapipe.dev/codepen/defrost

413
docs/solutions/holistic.md Normal file
View File

@ -0,0 +1,413 @@
---
layout: default
title: Holistic
parent: Solutions
nav_order: 6
---
# MediaPipe Holistic
{: .no_toc }
<details close markdown="block">
<summary>
Table of contents
</summary>
{: .text-delta }
1. TOC
{:toc}
</details>
---
## Overview
Live perception of simultaneous [human pose](./pose.md),
[face landmarks](./face_mesh.md), and [hand tracking](./hands.md) in real-time
on mobile devices can enable various modern life applications: fitness and sport
analysis, gesture control and sign language recognition, augmented reality
try-on and effects. MediaPipe already offers fast and accurate, yet separate,
solutions for these tasks. Combining them all in real-time into a semantically
consistent end-to-end solution is a uniquely difficult problem requiring
simultaneous inference of multiple, dependent neural networks.
![holistic_sports_and_gestures_example.gif](../images/mobile/holistic_sports_and_gestures_example.gif) |
:----------------------------------------------------------------------------------------------------: |
*Fig 1. Example of MediaPipe Holistic.* |
## ML Pipeline
The MediaPipe Holistic pipeline integrates separate models for
[pose](./pose.md), [face](./face_mesh.md) and [hand](./hands.md) components,
each of which are optimized for their particular domain. However, because of
their different specializations, the input to one component is not well-suited
for the others. The pose estimation model, for example, takes a lower, fixed
resolution video frame (256x256) as input. But if one were to crop the hand and
face regions from that image to pass to their respective models, the image
resolution would be too low for accurate articulation. Therefore, we designed
MediaPipe Holistic as a multi-stage pipeline, which treats the different regions
using a region appropriate image resolution.
First, we estimate the human pose (top of Fig 2) with [BlazePose](./pose.md)s
pose detector and subsequent landmark model. Then, using the inferred pose
landmarks we derive three regions of interest (ROI) crops for each hand (2x) and
the face, and employ a re-crop model to improve the ROI. We then crop the
full-resolution input frame to these ROIs and apply task-specific face and hand
models to estimate their corresponding landmarks. Finally, we merge all
landmarks with those of the pose model to yield the full 540+ landmarks.
![holistic_pipeline_example.jpg](../images/mobile/holistic_pipeline_example.jpg) |
:------------------------------------------------------------------------------: |
*Fig 2. MediaPipe Holistic Pipeline Overview.* |
To streamline the identification of ROIs for face and hands, we utilize a
tracking approach similar to the one we use for standalone
[face](./face_mesh.md) and [hand](./hands.md) pipelines. It assumes that the
object doesn't move significantly between frames and uses estimation from the
previous frame as a guide to the object region on the current one. However,
during fast movements, the tracker can lose the target, which requires the
detector to re-localize it in the image. MediaPipe Holistic uses
[pose](./pose.md) prediction (on every frame) as an additional ROI prior to
reduce the response time of the pipeline when reacting to fast movements. This
also enables the model to retain semantic consistency across the body and its
parts by preventing a mixup between left and right hands or body parts of one
person in the frame with another.
In addition, the resolution of the input frame to the pose model is low enough
that the resulting ROIs for face and hands are still too inaccurate to guide the
re-cropping of those regions, which require a precise input crop to remain
lightweight. To close this accuracy gap we use lightweight face and hand re-crop
models that play the role of
[spatial transformers](https://arxiv.org/abs/1506.02025) and cost only ~10% of
corresponding model's inference time.
The pipeline is implemented as a MediaPipe
[graph](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/holistic_tracking/holistic_tracking_gpu.pbtxt)
that uses a
[holistic landmark subgraph](https://github.com/google/mediapipe/tree/master/mediapipe/modules/holistic_landmark/holistic_landmark_gpu.pbtxt)
from the
[holistic landmark module](https://github.com/google/mediapipe/tree/master/mediapipe/modules/holistic_landmark)
and renders using a dedicated
[holistic renderer subgraph](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/holistic_tracking/holistic_tracking_to_render_data.pbtxt).
The
[holistic landmark subgraph](https://github.com/google/mediapipe/tree/master/mediapipe/modules/holistic_landmark/holistic_landmark_gpu.pbtxt)
internally uses a
[pose landmark module](https://github.com/google/mediapipe/tree/master/mediapipe/modules/pose_landmark)
,
[hand landmark module](https://github.com/google/mediapipe/tree/master/mediapipe/modules/hand_landmark)
and
[face landmark module](https://github.com/google/mediapipe/tree/master/mediapipe/modules/face_landmark/).
Please check them for implementation details.
Note: To visualize a graph, copy the graph and paste it into
[MediaPipe Visualizer](https://viz.mediapipe.dev/). For more information on how
to visualize its associated subgraphs, please see
[visualizer documentation](../tools/visualizer.md).
## Models
### Landmark Models
MediaPipe Holistic utilizes the pose, face and hand landmark models in
[MediaPipe Pose](./pose.md), [MediaPipe Face Mesh](./face_mesh.md) and
[MediaPipe Hands](./hands.md) respectively to generate a total of 543 landmarks
(33 pose landmarks, 468 face landmarks, and 21 hand landmarks per hand).
### Hand Recrop Model
For cases when the accuracy of the pose model is low enough that the resulting
ROIs for hands are still too inaccurate we run the additional lightweight hand
re-crop model that play the role of
[spatial transformer](https://arxiv.org/abs/1506.02025) and cost only ~10% of
hand model inference time.
## Solution APIs
### Cross-platform Configuration Options
Naming style and availability may differ slightly across platforms/languages.
#### static_image_mode
If set to `false`, the solution treats the input images as a video stream. It
will try to detect the most prominent person in the very first images, and upon
a successful detection further localizes the pose and other landmarks. In
subsequent images, it then simply tracks those landmarks without invoking
another detection until it loses track, on reducing computation and latency. If
set to `true`, person detection runs every input image, ideal for processing a
batch of static, possibly unrelated, images. Default to `false`.
#### upper_body_only
If set to `true`, the solution outputs only the 25 upper-body pose landmarks
(535 in total) instead of the full set of 33 pose landmarks (543 in total). Note
that upper-body-only prediction may be more accurate for use cases where the
lower-body parts are mostly out of view. Default to `false`.
#### smooth_landmarks
If set to `true`, the solution filters pose landmarks across different input
images to reduce jitter, but ignored if [static_image_mode](#static_image_mode)
is also set to `true`. Default to `true`.
#### min_detection_confidence
Minimum confidence value (`[0.0, 1.0]`) from the person-detection model for the
detection to be considered successful. Default to `0.5`.
#### min_tracking_confidence
Minimum confidence value (`[0.0, 1.0]`) from the landmark-tracking model for the
pose landmarks to be considered tracked successfully, or otherwise person
detection will be invoked automatically on the next input image. Setting it to a
higher value can increase robustness of the solution, at the expense of a higher
latency. Ignored if [static_image_mode](#static_image_mode) is `true`, where
person detection simply runs on every image. Default to `0.5`.
### Output
Naming style may differ slightly across platforms/languages.
#### pose_landmarks
A list of pose landmarks. Each landmark consists of the following:
* `x` and `y`: Landmark coordinates normalized to `[0.0, 1.0]` by the image
width and height respectively.
* `z`: Should be discarded as currently the model is not fully trained to
predict depth, but this is something on the roadmap.
* `visibility`: A value in `[0.0, 1.0]` indicating the likelihood of the
landmark being visible (present and not occluded) in the image.
#### face_landmarks
A list of 468 face landmarks. Each landmark consists of `x`, `y` and `z`. `x`
and `y` are normalized to `[0.0, 1.0]` by the image width and height
respectively. `z` represents the landmark depth with the depth at center of the
head being the origin, and the smaller the value the closer the landmark is to
the camera. The magnitude of `z` uses roughly the same scale as `x`.
#### left_hand_landmarks
A list of 21 hand landmarks on the left hand. Each landmark consists of `x`, `y`
and `z`. `x` and `y` are normalized to `[0.0, 1.0]` by the image width and
height respectively. `z` represents the landmark depth with the depth at the
wrist being the origin, and the smaller the value the closer the landmark is to
the camera. The magnitude of `z` uses roughly the same scale as `x`.
#### right_hand_landmarks
A list of 21 hand landmarks on the right hand, in the same representation as
[left_hand_landmarks](#left_hand_landmarks).
### Python Solution API
Please first follow general [instructions](../getting_started/python.md) to
install MediaPipe Python package, then learn more in the companion [Colab] and
the following usage example.
Supported configuration options:
* [static_image_mode](#static_image_mode)
* [upper_body_only](#upper_body_only)
* [smooth_landmarks](#smooth_landmarks)
* [min_detection_confidence](#min_detection_confidence)
* [min_tracking_confidence](#min_tracking_confidence)
```python
import cv2
import mediapipe as mp
mp_drawing = mp.solutions.drawing_utils
mp_holistic = mp.solutions.holistic
# For static images:
holistic = mp_holistic.Holistic(static_image_mode=True)
for idx, file in enumerate(file_list):
image = cv2.imread(file)
image_hight, image_width, _ = image.shape
# Convert the BGR image to RGB before processing.
results = holistic.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
if results.pose_landmarks:
print(
f'Nose coordinates: ('
f'{results.pose_landmarks.landmark[mp_holistic.PoseLandmark.NOSE].x * image_width}, '
f'{results.pose_landmarks.landmark[mp_holistic.PoseLandmark.NOSE].y * image_hight})'
)
# Draw pose, left and right hands, and face landmarks on the image.
annotated_image = image.copy()
mp_drawing.draw_landmarks(
annotated_image, results.face_landmarks, mp_holistic.FACE_CONNECTIONS)
mp_drawing.draw_landmarks(
annotated_image, results.left_hand_landmarks, mp_holistic.HAND_CONNECTIONS)
mp_drawing.draw_landmarks(
annotated_image, results.right_hand_landmarks, mp_holistic.HAND_CONNECTIONS)
mp_drawing.draw_landmarks(
annotated_image, results.pose_landmarks, mp_holistic.POSE_CONNECTIONS)
cv2.imwrite('/tmp/annotated_image' + str(idx) + '.png', annotated_image)
holistic.close()
# For webcam input:
holistic = mp_holistic.Holistic(
min_detection_confidence=0.5, min_tracking_confidence=0.5)
cap = cv2.VideoCapture(0)
while cap.isOpened():
success, image = cap.read()
if not success:
print("Ignoring empty camera frame.")
# If loading a video, use 'break' instead of 'continue'.
continue
# Flip the image horizontally for a later selfie-view display, and convert
# the BGR image to RGB.
image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB)
# To improve performance, optionally mark the image as not writeable to
# pass by reference.
image.flags.writeable = False
results = holistic.process(image)
# Draw landmark annotation on the image.
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
mp_drawing.draw_landmarks(
image, results.face_landmarks, mp_holistic.FACE_CONNECTIONS)
mp_drawing.draw_landmarks(
image, results.left_hand_landmarks, mp_holistic.HAND_CONNECTIONS)
mp_drawing.draw_landmarks(
image, results.right_hand_landmarks, mp_holistic.HAND_CONNECTIONS)
mp_drawing.draw_landmarks(
image, results.pose_landmarks, mp_holistic.POSE_CONNECTIONS)
cv2.imshow('MediaPipe Holistic', image)
if cv2.waitKey(5) & 0xFF == 27:
break
holistic.close()
cap.release()
```
### JavaScript Solution API
Please first see general [introduction](../getting_started/javascript.md) on
MediaPipe in JavaScript, then learn more in the companion [web demo] and the
following usage example.
Supported configuration options:
* [upperBodyOnly](#upper_body_only)
* [smoothLandmarks](#smooth_landmarks)
* [minDetectionConfidence](#min_detection_confidence)
* [minTrackingConfidence](#min_tracking_confidence)
```html
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<script src="https://cdn.jsdelivr.net/npm/@mediapipe/camera_utils/camera_utils.js" crossorigin="anonymous"></script>
<script src="https://cdn.jsdelivr.net/npm/@mediapipe/control_utils/control_utils.js" crossorigin="anonymous"></script>
<script src="https://cdn.jsdelivr.net/npm/@mediapipe/drawing_utils/drawing_utils.js" crossorigin="anonymous"></script>
<script src="https://cdn.jsdelivr.net/npm/@mediapipe/holistic/holistic.js" crossorigin="anonymous"></script>
</head>
<body>
<div class="container">
<video class="input_video"></video>
<canvas class="output_canvas" width="1280px" height="720px"></canvas>
</div>
</body>
</html>
```
```javascript
<script type="module">
const videoElement = document.getElementsByClassName('input_video')[0];
const canvasElement = document.getElementsByClassName('output_canvas')[0];
const canvasCtx = canvasElement.getContext('2d');
function onResults(results) {
canvasCtx.save();
canvasCtx.clearRect(0, 0, canvasElement.width, canvasElement.height);
canvasCtx.drawImage(
results.image, 0, 0, canvasElement.width, canvasElement.height);
drawConnectors(canvasCtx, results.poseLandmarks, POSE_CONNECTIONS,
{color: '#00FF00', lineWidth: 4});
drawLandmarks(canvasCtx, results.poseLandmarks,
{color: '#FF0000', lineWidth: 2});
drawConnectors(canvasCtx, results.faceLandmarks, FACEMESH_TESSELATION,
{color: '#C0C0C070', lineWidth: 1});
drawConnectors(canvasCtx, results.leftHandLandmarks, HAND_CONNECTIONS,
{color: '#CC0000', lineWidth: 5});
drawLandmarks(canvasCtx, results.leftHandLandmarks,
{color: '#00FF00', lineWidth: 2});
drawConnectors(canvasCtx, results.rightHandLandmarks, HAND_CONNECTIONS,
{color: '#00CC00', lineWidth: 5});
drawLandmarks(canvasCtx, results.rightHandLandmarks,
{color: '#FF0000', lineWidth: 2});
canvasCtx.restore();
}
const holistic = new Holistic({locateFile: (file) => {
return `https://cdn.jsdelivr.net/npm/@mediapipe/holistic/${file}`;
}});
holistic.setOptions({
upperBodyOnly: false,
smoothLandmarks: true,
minDetectionConfidence: 0.5,
minTrackingConfidence: 0.5
});
holistic.onResults(onResults);
const camera = new Camera(videoElement, {
onFrame: async () => {
await holistic.send({image: videoElement});
},
width: 1280,
height: 720
});
camera.start();
</script>
```
## Example Apps
Please first see general instructions for
[Android](../getting_started/android.md), [iOS](../getting_started/ios.md), and
[desktop](../getting_started/cpp.md) on how to build MediaPipe examples.
Note: To visualize a graph, copy the graph and paste it into
[MediaPipe Visualizer](https://viz.mediapipe.dev/). For more information on how
to visualize its associated subgraphs, please see
[visualizer documentation](../tools/visualizer.md).
### Mobile
* Graph:
[`mediapipe/graphs/holistic_tracking/holistic_tracking_gpu.pbtxt`](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/holistic_tracking/holistic_tracking_gpu.pbtxt)
* Android target:
[(or download prebuilt ARM64 APK)](https://drive.google.com/file/d/1o-Trp2GIRitA0OvmZWUQjVMa476xpfgK/view?usp=sharing)
[`mediapipe/examples/android/src/java/com/google/mediapipe/apps/holistictrackinggpu:holistictrackinggpu`](https://github.com/google/mediapipe/tree/master/mediapipe/examples/android/src/java/com/google/mediapipe/apps/holistictrackinggpu/BUILD)
* iOS target:
[`mediapipe/examples/ios/holistictrackinggpu:HolisticTrackingGpuApp`](http:/mediapipe/examples/ios/holistictrackinggpu/BUILD)
### Desktop
Please first see general instructions for [desktop](../getting_started/cpp.md)
on how to build MediaPipe examples.
* Running on CPU
* Graph:
[`mediapipe/graphs/holistic_tracking/holistic_tracking_cpu.pbtxt`](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/holistic_tracking/holistic_tracking_cpu.pbtxt)
* Target:
[`mediapipe/examples/desktop/holistic_tracking:holistic_tracking_cpu`](https://github.com/google/mediapipe/tree/master/mediapipe/examples/desktop/holistic_tracking/BUILD)
* Running on GPU
* Graph:
[`mediapipe/graphs/holistic_tracking/holistic_tracking_gpu.pbtxt`](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/holistic_tracking/holistic_tracking_gpu.pbtxt)
* Target:
[`mediapipe/examples/desktop/holistic_tracking:holistic_tracking_gpu`](https://github.com/google/mediapipe/tree/master/mediapipe/examples/desktop/holistic_tracking/BUILD)
## Resources
* Google AI Blog:
[MediaPipe Holistic - Simultaneous Face, Hand and Pose Prediction on Device](https://ai.googleblog.com/2020/08/on-device-real-time-body-pose-tracking.html)
* [Models and model cards](./models.md#holistic)
[Colab]:https://mediapipe.page.link/holistic_py_colab
[web demo]:https://code.mediapipe.dev/codepen/holistic

View File

@ -2,14 +2,20 @@
layout: default layout: default
title: Instant Motion Tracking title: Instant Motion Tracking
parent: Solutions parent: Solutions
nav_order: 9 nav_order: 10
--- ---
# MediaPipe Instant Motion Tracking # MediaPipe Instant Motion Tracking
{: .no_toc } {: .no_toc }
<details close markdown="block">
<summary>
Table of contents
</summary>
{: .text-delta }
1. TOC 1. TOC
{:toc} {:toc}
</details>
--- ---
## Overview ## Overview
@ -104,8 +110,7 @@ and connected camera.
## Example Apps ## Example Apps
Please first see general instructions for Please first see general instructions for
[Android](../getting_started/building_examples.md#android) on how to build [Android](../getting_started/android.md) on how to build MediaPipe examples.
MediaPipe examples.
* Graph: [mediapipe/graphs/instant_motion_tracking/instant_motion_tracking.pbtxt](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/instant_motion_tracking/instant_motion_tracking.pbtxt) * Graph: [mediapipe/graphs/instant_motion_tracking/instant_motion_tracking.pbtxt](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/instant_motion_tracking/instant_motion_tracking.pbtxt)

View File

@ -8,8 +8,14 @@ nav_order: 3
# MediaPipe Iris # MediaPipe Iris
{: .no_toc } {: .no_toc }
<details close markdown="block">
<summary>
Table of contents
</summary>
{: .text-delta }
1. TOC 1. TOC
{:toc} {:toc}
</details>
--- ---
## Overview ## Overview
@ -116,10 +122,8 @@ along with some simple geometric arguments. For more details please refer to our
## Example Apps ## Example Apps
Please first see general instructions for Please first see general instructions for
[Android](../getting_started/building_examples.md#android), [Android](../getting_started/android.md), [iOS](../getting_started/ios.md) and
[iOS](../getting_started/building_examples.md#ios) and [desktop](../getting_started/cpp.md) on how to build MediaPipe examples.
[desktop](../getting_started/building_examples.md#desktop) on how to build
MediaPipe examples.
Note: To visualize a graph, copy the graph and paste it into Note: To visualize a graph, copy the graph and paste it into
[MediaPipe Visualizer](https://viz.mediapipe.dev/). For more information on how [MediaPipe Visualizer](https://viz.mediapipe.dev/). For more information on how
@ -140,9 +144,8 @@ to visualize its associated subgraphs, please see
#### Live Camera Input #### Live Camera Input
Please first see general instructions for Please first see general instructions for [desktop](../getting_started/cpp.md)
[desktop](../getting_started/building_examples.md#desktop) on how to build on how to build MediaPipe examples.
MediaPipe examples.
* Running on CPU * Running on CPU
* Graph: * Graph:

View File

@ -2,14 +2,20 @@
layout: default layout: default
title: KNIFT (Template-based Feature Matching) title: KNIFT (Template-based Feature Matching)
parent: Solutions parent: Solutions
nav_order: 11 nav_order: 12
--- ---
# MediaPipe KNIFT # MediaPipe KNIFT
{: .no_toc } {: .no_toc }
<details close markdown="block">
<summary>
Table of contents
</summary>
{: .text-delta }
1. TOC 1. TOC
{:toc} {:toc}
</details>
--- ---
## Overview ## Overview
@ -67,7 +73,7 @@ you'd like to use your own template images, see
![template_matching_mobile_template.jpg](../images/mobile/template_matching_mobile_template.jpg) ![template_matching_mobile_template.jpg](../images/mobile/template_matching_mobile_template.jpg)
Please first see general instructions for Please first see general instructions for
[Android](../getting_started/building_examples.md#android) on how to build MediaPipe examples. [Android](../getting_started/android.md) on how to build MediaPipe examples.
Note: To visualize a graph, copy the graph and paste it into Note: To visualize a graph, copy the graph and paste it into
[MediaPipe Visualizer](https://viz.mediapipe.dev/). For more information on how [MediaPipe Visualizer](https://viz.mediapipe.dev/). For more information on how

View File

@ -2,14 +2,20 @@
layout: default layout: default
title: Dataset Preparation with MediaSequence title: Dataset Preparation with MediaSequence
parent: Solutions parent: Solutions
nav_order: 13 nav_order: 14
--- ---
# Dataset Preparation with MediaSequence # Dataset Preparation with MediaSequence
{: .no_toc } {: .no_toc }
<details close markdown="block">
<summary>
Table of contents
</summary>
{: .text-delta }
1. TOC 1. TOC
{:toc} {:toc}
</details>
--- ---
## Overview ## Overview

View File

@ -48,10 +48,17 @@ nav_order: 30
* Pose detection model: * Pose detection model:
[TFLite model](https://github.com/google/mediapipe/tree/master/mediapipe/modules/pose_detection/pose_detection.tflite) [TFLite model](https://github.com/google/mediapipe/tree/master/mediapipe/modules/pose_detection/pose_detection.tflite)
* Full-body pose landmark model:
[TFLite model](https://github.com/google/mediapipe/tree/master/mediapipe/modules/pose_landmark/pose_landmark_full_body.tflite)
* Upper-body pose landmark model: * Upper-body pose landmark model:
[TFLite model](https://github.com/google/mediapipe/tree/master/mediapipe/modules/pose_landmark/pose_landmark_upper_body.tflite) [TFLite model](https://github.com/google/mediapipe/tree/master/mediapipe/modules/pose_landmark/pose_landmark_upper_body.tflite)
* [Model card](https://mediapipe.page.link/blazepose-mc) * [Model card](https://mediapipe.page.link/blazepose-mc)
### [Holistic](https://google.github.io/mediapipe/solutions/holistic)
* Hand recrop model:
[TFLite model](https://github.com/google/mediapipe/tree/master/mediapipe/modules/holistic_landmark/hand_recrop.tflite)
### [Hair Segmentation](https://google.github.io/mediapipe/solutions/hair_segmentation) ### [Hair Segmentation](https://google.github.io/mediapipe/solutions/hair_segmentation)
* [TFLite model](https://github.com/google/mediapipe/tree/master/mediapipe/models/hair_segmentation.tflite) * [TFLite model](https://github.com/google/mediapipe/tree/master/mediapipe/models/hair_segmentation.tflite)

View File

@ -2,14 +2,20 @@
layout: default layout: default
title: Object Detection title: Object Detection
parent: Solutions parent: Solutions
nav_order: 7 nav_order: 8
--- ---
# MediaPipe Object Detection # MediaPipe Object Detection
{: .no_toc } {: .no_toc }
<details close markdown="block">
<summary>
Table of contents
</summary>
{: .text-delta }
1. TOC 1. TOC
{:toc} {:toc}
</details>
--- ---
![object_detection_android_gpu.gif](../images/mobile/object_detection_android_gpu.gif) ![object_detection_android_gpu.gif](../images/mobile/object_detection_android_gpu.gif)
@ -24,8 +30,8 @@ to visualize its associated subgraphs, please see
### Mobile ### Mobile
Please first see general instructions for Please first see general instructions for
[Android](../getting_started/building_examples.md#android) and [Android](../getting_started/android.md) and [iOS](../getting_started/ios.md) on
[iOS](../getting_started/building_examples.md#ios) on how to build MediaPipe examples. how to build MediaPipe examples.
#### GPU Pipeline #### GPU Pipeline
@ -56,8 +62,8 @@ same configuration as the GPU pipeline, runs entirely on CPU.
#### Live Camera Input #### Live Camera Input
Please first see general instructions for Please first see general instructions for [desktop](../getting_started/cpp.md)
[desktop](../getting_started/building_examples.md#desktop) on how to build MediaPipe examples. on how to build MediaPipe examples.
* Graph: * Graph:
[`mediapipe/graphs/object_detection/object_detection_desktop_live.pbtxt`](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/object_detection/object_detection_desktop_live.pbtxt) [`mediapipe/graphs/object_detection/object_detection_desktop_live.pbtxt`](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/object_detection/object_detection_desktop_live.pbtxt)

View File

@ -2,21 +2,27 @@
layout: default layout: default
title: Objectron (3D Object Detection) title: Objectron (3D Object Detection)
parent: Solutions parent: Solutions
nav_order: 10 nav_order: 11
--- ---
# MediaPipe Objectron # MediaPipe Objectron
{: .no_toc } {: .no_toc }
<details close markdown="block">
<summary>
Table of contents
</summary>
{: .text-delta }
1. TOC 1. TOC
{:toc} {:toc}
</details>
--- ---
## Overview ## Overview
MediaPipe Objectron is a mobile real-time 3D object detection solution for MediaPipe Objectron is a mobile real-time 3D object detection solution for
everyday objects. It detects objects in 2D images, and estimates their poses everyday objects. It detects objects in 2D images, and estimates their poses
through a machine learning (ML) model, trained on a newly created 3D dataset. through a machine learning (ML) model, trained on the [Objectron dataset](https://github.com/google-research-datasets/Objectron).
![objectron_shoe_android_gpu.gif](../images/mobile/objectron_shoe_android_gpu.gif) | ![objectron_chair_android_gpu.gif](../images/mobile/objectron_chair_android_gpu.gif) | ![objectron_camera_android_gpu.gif](../images/mobile/objectron_camera_android_gpu.gif) | ![objectron_cup_android_gpu.gif](../images/mobile/objectron_cup_android_gpu.gif) ![objectron_shoe_android_gpu.gif](../images/mobile/objectron_shoe_android_gpu.gif) | ![objectron_chair_android_gpu.gif](../images/mobile/objectron_chair_android_gpu.gif) | ![objectron_camera_android_gpu.gif](../images/mobile/objectron_camera_android_gpu.gif) | ![objectron_cup_android_gpu.gif](../images/mobile/objectron_cup_android_gpu.gif)
:--------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------: :--------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------:
@ -106,7 +112,8 @@ detector does not need to run every frame.
*Fig 5. Network architecture and post-processing for two-stage 3D object detection.* | *Fig 5. Network architecture and post-processing for two-stage 3D object detection.* |
We can use any 2D object detector for the first stage. In this solution, we use We can use any 2D object detector for the first stage. In this solution, we use
[TensorFlow Object Detection](https://github.com/tensorflow/models/tree/master/research/object_detection). [TensorFlow Object Detection](https://github.com/tensorflow/models/tree/master/research/object_detection) trained
with the [Open Images dataset](https://storage.googleapis.com/openimages/web/index.html).
The second stage 3D bounding box predictor we released runs 83FPS on Adreno 650 The second stage 3D bounding box predictor we released runs 83FPS on Adreno 650
mobile GPU. mobile GPU.
@ -157,9 +164,9 @@ The Objectron 3D object detection and tracking pipeline is implemented as a
MediaPipe MediaPipe
[graph](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/object_detection_3d/object_occlusion_tracking_1stage.pbtxt), [graph](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/object_detection_3d/object_occlusion_tracking_1stage.pbtxt),
which internally uses a which internally uses a
[detection subgraph](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/object_detection_3d/subgraphs/objectron_detection_gpu.pbtxt) [detection subgraph](https://github.com/google/mediapipe/tree/master/mediapipe/modules/objectron/objectron_detection_1stage_gpu.pbtxt)
and a and a
[tracking subgraph](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/object_detection_3d/subgraphs/objectron_tracking_gpu.pbtxt). [tracking subgraph](https://github.com/google/mediapipe/tree/master/mediapipe/modules/objectron/objectron_tracking_1stage_gpu.pbtxt).
The detection subgraph performs ML inference only once every few frames to The detection subgraph performs ML inference only once every few frames to
reduce computation load, and decodes the output tensor to a FrameAnnotation that reduce computation load, and decodes the output tensor to a FrameAnnotation that
contains nine keypoints: the 3D bounding box's center and its eight vertices. contains nine keypoints: the 3D bounding box's center and its eight vertices.
@ -176,13 +183,14 @@ tracking results, based on the area of overlap.
We also released our [Objectron dataset](http://objectron.dev), with which we We also released our [Objectron dataset](http://objectron.dev), with which we
trained our 3D object detection models. The technical details of the Objectron trained our 3D object detection models. The technical details of the Objectron
dataset, including usage and tutorials, are available on the dataset website. dataset, including usage and tutorials, are available on
the [dataset website](https://github.com/google-research-datasets/Objectron/).
## Example Apps ## Example Apps
Please first see general instructions for Please first see general instructions for
[Android](../getting_started/building_examples.md#android) and [Android](../getting_started/android.md) and [iOS](../getting_started/ios.md) on
[iOS](../getting_started/building_examples.md#ios) on how to build MediaPipe examples. how to build MediaPipe examples.
Note: To visualize a graph, copy the graph and paste it into Note: To visualize a graph, copy the graph and paste it into
[MediaPipe Visualizer](https://viz.mediapipe.dev/). For more information on how [MediaPipe Visualizer](https://viz.mediapipe.dev/). For more information on how
@ -254,7 +262,7 @@ to visualize its associated subgraphs, please see
## Resources ## Resources
* Google AI Blog: * Google AI Blog:
[Announcing the Objectron Dataset](https://mediapipe.page.link/objectron_dataset_ai_blog) [Announcing the Objectron Dataset](https://ai.googleblog.com/2020/11/announcing-objectron-dataset.html)
* Google AI Blog: * Google AI Blog:
[Real-Time 3D Object Detection on Mobile Devices with MediaPipe](https://ai.googleblog.com/2020/03/real-time-3d-object-detection-on-mobile.html) [Real-Time 3D Object Detection on Mobile Devices with MediaPipe](https://ai.googleblog.com/2020/03/real-time-3d-object-detection-on-mobile.html)
* Paper: [MobilePose: Real-Time Pose Estimation for Unseen Objects with Weak * Paper: [MobilePose: Real-Time Pose Estimation for Unseen Objects with Weak

View File

@ -8,8 +8,14 @@ nav_order: 5
# MediaPipe Pose # MediaPipe Pose
{: .no_toc } {: .no_toc }
<details close markdown="block">
<summary>
Table of contents
</summary>
{: .text-delta }
1. TOC 1. TOC
{:toc} {:toc}
</details>
--- ---
## Overview ## Overview
@ -20,16 +26,16 @@ gesture control. For example, it can form the basis for yoga, dance, and fitness
applications. It can also enable the overlay of digital content and information applications. It can also enable the overlay of digital content and information
on top of the physical world in augmented reality. on top of the physical world in augmented reality.
MediaPipe Pose is a ML solution for high-fidelity upper-body pose tracking, MediaPipe Pose is a ML solution for high-fidelity body pose tracking, inferring
inferring 25 2D upper-body landmarks from RGB video frames utilizing our 33 2D landmarks on the whole body (or 25 upper-body landmarks) from RGB video
frames utilizing our
[BlazePose](https://ai.googleblog.com/2020/08/on-device-real-time-body-pose-tracking.html) [BlazePose](https://ai.googleblog.com/2020/08/on-device-real-time-body-pose-tracking.html)
research. Current state-of-the-art approaches rely primarily on powerful desktop research that also powers the
[ML Kit Pose Detection API](https://developers.google.com/ml-kit/vision/pose-detection).
Current state-of-the-art approaches rely primarily on powerful desktop
environments for inference, whereas our method achieves real-time performance on environments for inference, whereas our method achieves real-time performance on
most modern [mobile phones](#mobile), [desktops/laptops](#desktop), in most modern [mobile phones](#mobile), [desktops/laptops](#desktop), in
[python](#python) and even on the [web](#web). A variant of MediaPipe Pose that [python](#python) and even on the [web](#web).
performs full-body pose tracking on mobile phones will be included in an
upcoming release of
[ML Kit](https://developers.google.com/ml-kit/early-access/pose-detection).
![pose_tracking_upper_body_example.gif](../images/mobile/pose_tracking_upper_body_example.gif) | ![pose_tracking_upper_body_example.gif](../images/mobile/pose_tracking_upper_body_example.gif) |
:--------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------: |
@ -40,23 +46,24 @@ upcoming release of
The solution utilizes a two-step detector-tracker ML pipeline, proven to be The solution utilizes a two-step detector-tracker ML pipeline, proven to be
effective in our [MediaPipe Hands](./hands.md) and effective in our [MediaPipe Hands](./hands.md) and
[MediaPipe Face Mesh](./face_mesh.md) solutions. Using a detector, the pipeline [MediaPipe Face Mesh](./face_mesh.md) solutions. Using a detector, the pipeline
first locates the pose region-of-interest (ROI) within the frame. The tracker first locates the person/pose region-of-interest (ROI) within the frame. The
subsequently predicts the pose landmarks within the ROI using the ROI-cropped tracker subsequently predicts the pose landmarks within the ROI using the
frame as input. Note that for video use cases the detector is invoked only as ROI-cropped frame as input. Note that for video use cases the detector is
needed, i.e., for the very first frame and when the tracker could no longer invoked only as needed, i.e., for the very first frame and when the tracker
identify body pose presence in the previous frame. For other frames the pipeline could no longer identify body pose presence in the previous frame. For other
simply derives the ROI from the previous frames pose landmarks. frames the pipeline simply derives the ROI from the previous frames pose
landmarks.
The pipeline is implemented as a MediaPipe The pipeline is implemented as a MediaPipe
[graph](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/pose_tracking/upper_body_pose_tracking_gpu.pbtxt) [graph](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/pose_tracking/pose_tracking_gpu.pbtxt)
that uses a that uses a
[pose landmark subgraph](https://github.com/google/mediapipe/tree/master/mediapipe/modules/pose_landmark/pose_landmark_upper_body_gpu.pbtxt) [pose landmark subgraph](https://github.com/google/mediapipe/tree/master/mediapipe/modules/pose_landmark/pose_landmark_gpu.pbtxt)
from the from the
[pose landmark module](https://github.com/google/mediapipe/tree/master/mediapipe/modules/pose_landmark) [pose landmark module](https://github.com/google/mediapipe/tree/master/mediapipe/modules/pose_landmark)
and renders using a dedicated and renders using a dedicated
[upper-body pose renderer subgraph](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/pose_tracking/subgraphs/upper_body_pose_renderer_gpu.pbtxt). [pose renderer subgraph](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/pose_tracking/subgraphs/pose_renderer_gpu.pbtxt).
The The
[pose landmark subgraph](https://github.com/google/mediapipe/tree/master/mediapipe/modules/pose_landmark/pose_landmark_upper_body_gpu.pbtxt) [pose landmark subgraph](https://github.com/google/mediapipe/tree/master/mediapipe/modules/pose_landmark/pose_landmark_gpu.pbtxt)
internally uses a internally uses a
[pose detection subgraph](https://github.com/google/mediapipe/tree/master/mediapipe/modules/pose_detection/pose_detection_gpu.pbtxt) [pose detection subgraph](https://github.com/google/mediapipe/tree/master/mediapipe/modules/pose_detection/pose_detection_gpu.pbtxt)
from the from the
@ -69,7 +76,7 @@ to visualize its associated subgraphs, please see
## Models ## Models
### Pose Detection Model (BlazePose Detector) ### Person/pose Detection Model (BlazePose Detector)
The detector is inspired by our own lightweight The detector is inspired by our own lightweight
[BlazeFace](https://arxiv.org/abs/1907.05047) model, used in [BlazeFace](https://arxiv.org/abs/1907.05047) model, used in
@ -87,84 +94,93 @@ hip midpoints.
### Pose Landmark Model (BlazePose Tracker) ### Pose Landmark Model (BlazePose Tracker)
The landmark model currently included in MediaPipe Pose predicts the location of The landmark model in MediaPipe Pose comes in two versions: a full-body model
25 upper-body landmarks (see figure below), each with `(x, y, z, visibility)`. that predicts the location of 33 pose landmarks (see figure below), and an
Note that the `z` value should be discarded as the model is currently not fully upper-body version that only predicts the first 25. The latter may be more
trained to predict depth, but this is something we have on the roadmap. The accurate than the former in scenarios where the lower-body parts are mostly out
model shares the same architecture as the full-body version that predicts 33 of view.
landmarks, described in more detail in the
[BlazePose Google AI Blog](https://ai.googleblog.com/2020/08/on-device-real-time-body-pose-tracking.html)
and in this [paper](https://arxiv.org/abs/2006.10204).
![pose_tracking_upper_body_landmarks.png](../images/mobile/pose_tracking_upper_body_landmarks.png) | Please find more detail in the
:------------------------------------------------------------------------------------------------: | [BlazePose Google AI Blog](https://ai.googleblog.com/2020/08/on-device-real-time-body-pose-tracking.html),
*Fig 3. 25 upper-body pose landmarks.* | this [paper](https://arxiv.org/abs/2006.10204) and
[the model card](./models.md#pose), and the attributes in each landmark
[below](#pose_landmarks).
## Example Apps ![pose_tracking_full_body_landmarks.png](../images/mobile/pose_tracking_full_body_landmarks.png) |
:----------------------------------------------------------------------------------------------: |
*Fig 3. 33 pose landmarks.* |
Please first see general instructions for ## Solution APIs
[Android](../getting_started/building_examples.md#android),
[iOS](../getting_started/building_examples.md#ios),
[desktop](../getting_started/building_examples.md#desktop) and
[Python](../getting_started/building_examples.md#python) on how to build
MediaPipe examples.
Note: To visualize a graph, copy the graph and paste it into ### Cross-platform Configuration Options
[MediaPipe Visualizer](https://viz.mediapipe.dev/). For more information on how
to visualize its associated subgraphs, please see
[visualizer documentation](../tools/visualizer.md).
### Mobile Naming style and availability may differ slightly across platforms/languages.
* Graph: #### static_image_mode
[`mediapipe/graphs/pose_tracking/upper_body_pose_tracking_gpu.pbtxt`](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/pose_tracking/upper_body_pose_tracking_gpu.pbtxt)
* Android target:
[(or download prebuilt ARM64 APK)](https://drive.google.com/file/d/1uKc6T7KSuA0Mlq2URi5YookHu0U3yoh_/view?usp=sharing)
[`mediapipe/examples/android/src/java/com/google/mediapipe/apps/upperbodyposetrackinggpu:upperbodyposetrackinggpu`](https://github.com/google/mediapipe/tree/master/mediapipe/examples/android/src/java/com/google/mediapipe/apps/upperbodyposetrackinggpu/BUILD)
* iOS target:
[`mediapipe/examples/ios/upperbodyposetrackinggpu:UpperBodyPoseTrackingGpuApp`](http:/mediapipe/examples/ios/upperbodyposetrackinggpu/BUILD)
### Desktop If set to `false`, the solution treats the input images as a video stream. It
will try to detect the most prominent person in the very first images, and upon
a successful detection further localizes the pose landmarks. In subsequent
images, it then simply tracks those landmarks without invoking another detection
until it loses track, on reducing computation and latency. If set to `true`,
person detection runs every input image, ideal for processing a batch of static,
possibly unrelated, images. Default to `false`.
Please first see general instructions for #### upper_body_only
[desktop](../getting_started/building_examples.md#desktop) on how to build
MediaPipe examples.
* Running on CPU If set to `true`, the solution outputs only the 25 upper-body pose landmarks.
* Graph: Otherwise, it outputs the full set of 33 pose landmarks. Note that
[`mediapipe/graphs/pose_tracking/upper_body_pose_tracking_cpu.pbtxt`](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/pose_tracking/upper_body_pose_tracking_cpu.pbtxt) upper-body-only prediction may be more accurate for use cases where the
* Target: lower-body parts are mostly out of view. Default to `false`.
[`mediapipe/examples/desktop/upper_body_pose_tracking:upper_body_pose_tracking_cpu`](https://github.com/google/mediapipe/tree/master/mediapipe/examples/desktop/upper_body_pose_tracking/BUILD)
* Running on GPU
* Graph:
[`mediapipe/graphs/pose_tracking/upper_body_pose_tracking_gpu.pbtxt`](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/pose_tracking/upper_body_pose_tracking_gpu.pbtxt)
* Target:
[`mediapipe/examples/desktop/upper_body_pose_tracking:upper_body_pose_tracking_gpu`](https://github.com/google/mediapipe/tree/master/mediapipe/examples/desktop/upper_body_pose_tracking/BUILD)
### Python #### smooth_landmarks
MediaPipe Python package is available on If set to `true`, the solution filters pose landmarks across different input
[PyPI](https://pypi.org/project/mediapipe/), and can be installed simply by `pip images to reduce jitter, but ignored if [static_image_mode](#static_image_mode)
install mediapipe` on Linux and macOS, as described below and in this is also set to `true`. Default to `true`.
[colab](https://mediapipe.page.link/pose_py_colab). If you do need to build the
Python package from source, see
[additional instructions](../getting_started/building_examples.md#python).
Activate a Python virtual environment: #### min_detection_confidence
```bash Minimum confidence value (`[0.0, 1.0]`) from the person-detection model for the
$ python3 -m venv mp_env && source mp_env/bin/activate detection to be considered successful. Default to `0.5`.
```
Install MediaPipe Python package: #### min_tracking_confidence
```bash Minimum confidence value (`[0.0, 1.0]`) from the landmark-tracking model for the
(mp_env)$ pip install mediapipe pose landmarks to be considered tracked successfully, or otherwise person
``` detection will be invoked automatically on the next input image. Setting it to a
higher value can increase robustness of the solution, at the expense of a higher
latency. Ignored if [static_image_mode](#static_image_mode) is `true`, where
person detection simply runs on every image. Default to `0.5`.
Run the following Python code: ### Output
<!-- Do not change the example code below directly. Change the corresponding example in mediapipe/python/solutions/pose.py and copy it over. --> Naming style may differ slightly across platforms/languages.
#### pose_landmarks
A list of pose landmarks. Each lanmark consists of the following:
* `x` and `y`: Landmark coordinates normalized to `[0.0, 1.0]` by the image
width and height respectively.
* `z`: Should be discarded as currently the model is not fully trained to
predict depth, but this is something on the roadmap.
* `visibility`: A value in `[0.0, 1.0]` indicating the likelihood of the
landmark being visible (present and not occluded) in the image.
### Python Solution API
Please first follow general [instructions](../getting_started/python.md) to
install MediaPipe Python package, then learn more in the companion [Colab] and
the following usage example.
Supported configuration options:
* [static_image_mode](#static_image_mode)
* [upper_body_only](#upper_body_only)
* [smooth_landmarks](#smooth_landmarks)
* [min_detection_confidence](#min_detection_confidence)
* [min_tracking_confidence](#min_tracking_confidence)
```python ```python
import cv2 import cv2
@ -177,17 +193,22 @@ pose = mp_pose.Pose(
static_image_mode=True, min_detection_confidence=0.5) static_image_mode=True, min_detection_confidence=0.5)
for idx, file in enumerate(file_list): for idx, file in enumerate(file_list):
image = cv2.imread(file) image = cv2.imread(file)
image_hight, image_width, _ = image.shape
# Convert the BGR image to RGB before processing. # Convert the BGR image to RGB before processing.
results = pose.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) results = pose.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
# Print and draw pose landmarks on the image. if not results.pose_landmarks:
continue
print( print(
'nose landmark:', f'Nose coordinates: ('
results.pose_landmarks.landmark[mp_pose.PoseLandmark.NOSE]) f'{results.pose_landmarks.landmark[mp_holistic.PoseLandmark.NOSE].x * image_width}, '
f'{results.pose_landmarks.landmark[mp_holistic.PoseLandmark.NOSE].y * image_hight})'
)
# Draw pose landmarks on the image.
annotated_image = image.copy() annotated_image = image.copy()
mp_drawing.draw_landmarks( mp_drawing.draw_landmarks(
annotated_image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS) annotated_image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS)
cv2.imwrite('/tmp/annotated_image' + str(idx) + '.png', image) cv2.imwrite('/tmp/annotated_image' + str(idx) + '.png', annotated_image)
pose.close() pose.close()
# For webcam input: # For webcam input:
@ -197,7 +218,9 @@ cap = cv2.VideoCapture(0)
while cap.isOpened(): while cap.isOpened():
success, image = cap.read() success, image = cap.read()
if not success: if not success:
break print("Ignoring empty camera frame.")
# If loading a video, use 'break' instead of 'continue'.
continue
# Flip the image horizontally for a later selfie-view display, and convert # Flip the image horizontally for a later selfie-view display, and convert
# the BGR image to RGB. # the BGR image to RGB.
@ -219,11 +242,142 @@ pose.close()
cap.release() cap.release()
``` ```
Tip: Use command `deactivate` to exit the Python virtual environment. ### JavaScript Solution API
### Web Please first see general [introduction](../getting_started/javascript.md) on
MediaPipe in JavaScript, then learn more in the companion [web demo] and the
following usage example.
Please refer to [these instructions](../index.md#mediapipe-on-the-web). Supported configuration options:
* [upperBodyOnly](#upper_body_only)
* [smoothLandmarks](#smooth_landmarks)
* [minDetectionConfidence](#min_detection_confidence)
* [minTrackingConfidence](#min_tracking_confidence)
```html
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<script src="https://cdn.jsdelivr.net/npm/@mediapipe/camera_utils/camera_utils.js" crossorigin="anonymous"></script>
<script src="https://cdn.jsdelivr.net/npm/@mediapipe/control_utils/control_utils.js" crossorigin="anonymous"></script>
<script src="https://cdn.jsdelivr.net/npm/@mediapipe/drawing_utils/drawing_utils.js" crossorigin="anonymous"></script>
<script src="https://cdn.jsdelivr.net/npm/@mediapipe/pose/pose.js" crossorigin="anonymous"></script>
</head>
<body>
<div class="container">
<video class="input_video"></video>
<canvas class="output_canvas" width="1280px" height="720px"></canvas>
</div>
</body>
</html>
```
```javascript
<script type="module">
const videoElement = document.getElementsByClassName('input_video')[0];
const canvasElement = document.getElementsByClassName('output_canvas')[0];
const canvasCtx = canvasElement.getContext('2d');
function onResults(results) {
canvasCtx.save();
canvasCtx.clearRect(0, 0, canvasElement.width, canvasElement.height);
canvasCtx.drawImage(
results.image, 0, 0, canvasElement.width, canvasElement.height);
drawConnectors(canvasCtx, results.poseLandmarks, POSE_CONNECTIONS,
{color: '#00FF00', lineWidth: 4});
drawLandmarks(canvasCtx, results.poseLandmarks,
{color: '#FF0000', lineWidth: 2});
canvasCtx.restore();
}
const pose = new Pose({locateFile: (file) => {
return `https://cdn.jsdelivr.net/npm/@mediapipe/pose/${file}`;
}});
pose.setOptions({
upperBodyOnly: false,
smoothLandmarks: true,
minDetectionConfidence: 0.5,
minTrackingConfidence: 0.5
});
pose.onResults(onResults);
const camera = new Camera(videoElement, {
onFrame: async () => {
await pose.send({image: videoElement});
},
width: 1280,
height: 720
});
camera.start();
</script>
```
## Example Apps
Please first see general instructions for
[Android](../getting_started/android.md), [iOS](../getting_started/ios.md), and
[desktop](../getting_started/cpp.md) on how to build MediaPipe examples.
Note: To visualize a graph, copy the graph and paste it into
[MediaPipe Visualizer](https://viz.mediapipe.dev/). For more information on how
to visualize its associated subgraphs, please see
[visualizer documentation](../tools/visualizer.md).
### Mobile
#### Main Example
* Graph:
[`mediapipe/graphs/pose_tracking/pose_tracking_gpu.pbtxt`](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/pose_tracking/pose_tracking_gpu.pbtxt)
* Android target:
[(or download prebuilt ARM64 APK)](https://drive.google.com/file/d/17GFIrqEJS6W8UHKXlYevTtSCLxN9pWlY/view?usp=sharing)
[`mediapipe/examples/android/src/java/com/google/mediapipe/apps/posetrackinggpu:posetrackinggpu`](https://github.com/google/mediapipe/tree/master/mediapipe/examples/android/src/java/com/google/mediapipe/apps/posetrackinggpu/BUILD)
* iOS target:
[`mediapipe/examples/ios/posetrackinggpu:PoseTrackingGpuApp`](http:/mediapipe/examples/ios/posetrackinggpu/BUILD)
#### Upper-body Only
* Graph:
[`mediapipe/graphs/pose_tracking/upper_body_pose_tracking_gpu.pbtxt`](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/pose_tracking/upper_body_pose_tracking_gpu.pbtxt)
* Android target:
[(or download prebuilt ARM64 APK)](https://drive.google.com/file/d/1uKc6T7KSuA0Mlq2URi5YookHu0U3yoh_/view?usp=sharing)
[`mediapipe/examples/android/src/java/com/google/mediapipe/apps/upperbodyposetrackinggpu:upperbodyposetrackinggpu`](https://github.com/google/mediapipe/tree/master/mediapipe/examples/android/src/java/com/google/mediapipe/apps/upperbodyposetrackinggpu/BUILD)
* iOS target:
[`mediapipe/examples/ios/upperbodyposetrackinggpu:UpperBodyPoseTrackingGpuApp`](http:/mediapipe/examples/ios/upperbodyposetrackinggpu/BUILD)
### Desktop
Please first see general instructions for [desktop](../getting_started/cpp.md)
on how to build MediaPipe examples.
#### Main Example
* Running on CPU
* Graph:
[`mediapipe/graphs/pose_tracking/pose_tracking_cpu.pbtxt`](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/pose_tracking/pose_tracking_cpu.pbtxt)
* Target:
[`mediapipe/examples/desktop/pose_tracking:pose_tracking_cpu`](https://github.com/google/mediapipe/tree/master/mediapipe/examples/desktop/pose_tracking/BUILD)
* Running on GPU
* Graph:
[`mediapipe/graphs/pose_tracking/pose_tracking_gpu.pbtxt`](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/pose_tracking/pose_tracking_gpu.pbtxt)
* Target:
[`mediapipe/examples/desktop/pose_tracking:pose_tracking_gpu`](https://github.com/google/mediapipe/tree/master/mediapipe/examples/desktop/pose_tracking/BUILD)
#### Upper-body Only
* Running on CPU
* Graph:
[`mediapipe/graphs/pose_tracking/upper_body_pose_tracking_cpu.pbtxt`](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/pose_tracking/upper_body_pose_tracking_cpu.pbtxt)
* Target:
[`mediapipe/examples/desktop/upper_body_pose_tracking:upper_body_pose_tracking_cpu`](https://github.com/google/mediapipe/tree/master/mediapipe/examples/desktop/upper_body_pose_tracking/BUILD)
* Running on GPU
* Graph:
[`mediapipe/graphs/pose_tracking/upper_body_pose_tracking_gpu.pbtxt`](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/pose_tracking/upper_body_pose_tracking_gpu.pbtxt)
* Target:
[`mediapipe/examples/desktop/upper_body_pose_tracking:upper_body_pose_tracking_gpu`](https://github.com/google/mediapipe/tree/master/mediapipe/examples/desktop/upper_body_pose_tracking/BUILD)
## Resources ## Resources
@ -233,3 +387,7 @@ Please refer to [these instructions](../index.md#mediapipe-on-the-web).
[BlazePose: On-device Real-time Body Pose Tracking](https://arxiv.org/abs/2006.10204) [BlazePose: On-device Real-time Body Pose Tracking](https://arxiv.org/abs/2006.10204)
([presentation](https://youtu.be/YPpUOTRn5tA)) ([presentation](https://youtu.be/YPpUOTRn5tA))
* [Models and model cards](./models.md#pose) * [Models and model cards](./models.md#pose)
[Colab]:https://mediapipe.page.link/pose_py_colab
[web demo]:https://code.mediapipe.dev/codepen/pose

View File

@ -16,14 +16,15 @@ has_toc: false
<!-- []() in the first cell is needed to preserve table formatting in GitHub Pages. --> <!-- []() in the first cell is needed to preserve table formatting in GitHub Pages. -->
<!-- Whenever this table is updated, paste a copy to ../external_index.md. --> <!-- Whenever this table is updated, paste a copy to ../external_index.md. -->
[]() | Android | iOS | Desktop | Python | Web | Coral []() | [Android](https://google.github.io/mediapipe/getting_started/android) | [iOS](https://google.github.io/mediapipe/getting_started/ios) | [C++](https://google.github.io/mediapipe/getting_started/cpp) | [Python](https://google.github.io/mediapipe/getting_started/python) | [JS](https://google.github.io/mediapipe/getting_started/javascript) | [Coral](https://github.com/google/mediapipe/tree/master/mediapipe/examples/coral/README.md)
:---------------------------------------------------------------------------------------- | :-----: | :-: | :-----: | :----: | :-: | :---: :---------------------------------------------------------------------------------------- | :-------------------------------------------------------------: | :-----------------------------------------------------: | :-----------------------------------------------------: | :-----------------------------------------------------------: | :-----------------------------------------------------------: | :--------------------------------------------------------------------:
[Face Detection](https://google.github.io/mediapipe/solutions/face_detection) | ✅ | ✅ | ✅ | | ✅ | ✅ [Face Detection](https://google.github.io/mediapipe/solutions/face_detection) | ✅ | ✅ | ✅ | | | ✅
[Face Mesh](https://google.github.io/mediapipe/solutions/face_mesh) | ✅ | ✅ | ✅ | ✅ | | [Face Mesh](https://google.github.io/mediapipe/solutions/face_mesh) | ✅ | ✅ | ✅ | ✅ | |
[Iris](https://google.github.io/mediapipe/solutions/iris) | ✅ | ✅ | ✅ | | ✅ | [Iris](https://google.github.io/mediapipe/solutions/iris) | ✅ | ✅ | ✅ | | |
[Hands](https://google.github.io/mediapipe/solutions/hands) | ✅ | ✅ | ✅ | ✅ | ✅ | [Hands](https://google.github.io/mediapipe/solutions/hands) | ✅ | ✅ | ✅ | ✅ | ✅ |
[Pose](https://google.github.io/mediapipe/solutions/pose) | ✅ | ✅ | ✅ | ✅ | ✅ | [Pose](https://google.github.io/mediapipe/solutions/pose) | ✅ | ✅ | ✅ | ✅ | ✅ |
[Hair Segmentation](https://google.github.io/mediapipe/solutions/hair_segmentation) | ✅ | | ✅ | | ✅ | [Holistic](https://google.github.io/mediapipe/solutions/holistic) | ✅ | ✅ | ✅ | ✅ | ✅ |
[Hair Segmentation](https://google.github.io/mediapipe/solutions/hair_segmentation) | ✅ | | ✅ | | |
[Object Detection](https://google.github.io/mediapipe/solutions/object_detection) | ✅ | ✅ | ✅ | | | ✅ [Object Detection](https://google.github.io/mediapipe/solutions/object_detection) | ✅ | ✅ | ✅ | | | ✅
[Box Tracking](https://google.github.io/mediapipe/solutions/box_tracking) | ✅ | ✅ | ✅ | | | [Box Tracking](https://google.github.io/mediapipe/solutions/box_tracking) | ✅ | ✅ | ✅ | | |
[Instant Motion Tracking](https://google.github.io/mediapipe/solutions/instant_motion_tracking) | ✅ | | | | | [Instant Motion Tracking](https://google.github.io/mediapipe/solutions/instant_motion_tracking) | ✅ | | | | |

View File

@ -2,14 +2,20 @@
layout: default layout: default
title: YouTube-8M Feature Extraction and Model Inference title: YouTube-8M Feature Extraction and Model Inference
parent: Solutions parent: Solutions
nav_order: 14 nav_order: 15
--- ---
# YouTube-8M Feature Extraction and Model Inference # YouTube-8M Feature Extraction and Model Inference
{: .no_toc } {: .no_toc }
<details close markdown="block">
<summary>
Table of contents
</summary>
{: .text-delta }
1. TOC 1. TOC
{:toc} {:toc}
</details>
--- ---
MediaPipe is a useful and general framework for media processing that can assist MediaPipe is a useful and general framework for media processing that can assist

View File

@ -14,9 +14,11 @@
"mediapipe/examples/ios/facemeshgpu/BUILD", "mediapipe/examples/ios/facemeshgpu/BUILD",
"mediapipe/examples/ios/handdetectiongpu/BUILD", "mediapipe/examples/ios/handdetectiongpu/BUILD",
"mediapipe/examples/ios/handtrackinggpu/BUILD", "mediapipe/examples/ios/handtrackinggpu/BUILD",
"mediapipe/examples/ios/holistictrackinggpu/BUILD",
"mediapipe/examples/ios/iristrackinggpu/BUILD", "mediapipe/examples/ios/iristrackinggpu/BUILD",
"mediapipe/examples/ios/objectdetectioncpu/BUILD", "mediapipe/examples/ios/objectdetectioncpu/BUILD",
"mediapipe/examples/ios/objectdetectiongpu/BUILD", "mediapipe/examples/ios/objectdetectiongpu/BUILD",
"mediapipe/examples/ios/posetrackinggpu/BUILD",
"mediapipe/examples/ios/upperbodyposetrackinggpu/BUILD" "mediapipe/examples/ios/upperbodyposetrackinggpu/BUILD"
], ],
"buildTargets" : [ "buildTargets" : [
@ -27,9 +29,11 @@
"//mediapipe/examples/ios/facemeshgpu:FaceMeshGpuApp", "//mediapipe/examples/ios/facemeshgpu:FaceMeshGpuApp",
"//mediapipe/examples/ios/handdetectiongpu:HandDetectionGpuApp", "//mediapipe/examples/ios/handdetectiongpu:HandDetectionGpuApp",
"//mediapipe/examples/ios/handtrackinggpu:HandTrackingGpuApp", "//mediapipe/examples/ios/handtrackinggpu:HandTrackingGpuApp",
"//mediapipe/examples/ios/holistictrackinggpu:HolisticTrackingGpuApp",
"//mediapipe/examples/ios/iristrackinggpu:IrisTrackingGpuApp", "//mediapipe/examples/ios/iristrackinggpu:IrisTrackingGpuApp",
"//mediapipe/examples/ios/objectdetectioncpu:ObjectDetectionCpuApp", "//mediapipe/examples/ios/objectdetectioncpu:ObjectDetectionCpuApp",
"//mediapipe/examples/ios/objectdetectiongpu:ObjectDetectionGpuApp", "//mediapipe/examples/ios/objectdetectiongpu:ObjectDetectionGpuApp",
"//mediapipe/examples/ios/posetrackinggpu:PoseTrackingGpuApp",
"//mediapipe/examples/ios/upperbodyposetrackinggpu:UpperBodyPoseTrackingGpuApp", "//mediapipe/examples/ios/upperbodyposetrackinggpu:UpperBodyPoseTrackingGpuApp",
"//mediapipe/objc:mediapipe_framework_ios" "//mediapipe/objc:mediapipe_framework_ios"
], ],
@ -94,9 +98,11 @@
"mediapipe/examples/ios/faceeffect/Base.lproj", "mediapipe/examples/ios/faceeffect/Base.lproj",
"mediapipe/examples/ios/handdetectiongpu", "mediapipe/examples/ios/handdetectiongpu",
"mediapipe/examples/ios/handtrackinggpu", "mediapipe/examples/ios/handtrackinggpu",
"mediapipe/examples/ios/holistictrackinggpu",
"mediapipe/examples/ios/iristrackinggpu", "mediapipe/examples/ios/iristrackinggpu",
"mediapipe/examples/ios/objectdetectioncpu", "mediapipe/examples/ios/objectdetectioncpu",
"mediapipe/examples/ios/objectdetectiongpu", "mediapipe/examples/ios/objectdetectiongpu",
"mediapipe/examples/ios/posetrackinggpu",
"mediapipe/examples/ios/upperbodyposetrackinggpu", "mediapipe/examples/ios/upperbodyposetrackinggpu",
"mediapipe/framework", "mediapipe/framework",
"mediapipe/framework/deps", "mediapipe/framework/deps",

View File

@ -17,9 +17,11 @@
"mediapipe/examples/ios/facemeshgpu", "mediapipe/examples/ios/facemeshgpu",
"mediapipe/examples/ios/handdetectiongpu", "mediapipe/examples/ios/handdetectiongpu",
"mediapipe/examples/ios/handtrackinggpu", "mediapipe/examples/ios/handtrackinggpu",
"mediapipe/examples/ios/holistictrackinggpu",
"mediapipe/examples/ios/iristrackinggpu", "mediapipe/examples/ios/iristrackinggpu",
"mediapipe/examples/ios/objectdetectioncpu", "mediapipe/examples/ios/objectdetectioncpu",
"mediapipe/examples/ios/objectdetectiongpu", "mediapipe/examples/ios/objectdetectiongpu",
"mediapipe/examples/ios/posetrackinggpu",
"mediapipe/examples/ios/upperbodyposetrackinggpu" "mediapipe/examples/ios/upperbodyposetrackinggpu"
], ],
"projectName" : "Mediapipe", "projectName" : "Mediapipe",

View File

@ -48,18 +48,17 @@ namespace mediapipe {
// TODO: support decoding multiple streams. // TODO: support decoding multiple streams.
class AudioDecoderCalculator : public CalculatorBase { class AudioDecoderCalculator : public CalculatorBase {
public: public:
static ::mediapipe::Status GetContract(CalculatorContract* cc); static mediapipe::Status GetContract(CalculatorContract* cc);
::mediapipe::Status Open(CalculatorContext* cc) override; mediapipe::Status Open(CalculatorContext* cc) override;
::mediapipe::Status Process(CalculatorContext* cc) override; mediapipe::Status Process(CalculatorContext* cc) override;
::mediapipe::Status Close(CalculatorContext* cc) override; mediapipe::Status Close(CalculatorContext* cc) override;
private: private:
std::unique_ptr<AudioDecoder> decoder_; std::unique_ptr<AudioDecoder> decoder_;
}; };
::mediapipe::Status AudioDecoderCalculator::GetContract( mediapipe::Status AudioDecoderCalculator::GetContract(CalculatorContract* cc) {
CalculatorContract* cc) {
cc->InputSidePackets().Tag("INPUT_FILE_PATH").Set<std::string>(); cc->InputSidePackets().Tag("INPUT_FILE_PATH").Set<std::string>();
if (cc->InputSidePackets().HasTag("OPTIONS")) { if (cc->InputSidePackets().HasTag("OPTIONS")) {
cc->InputSidePackets().Tag("OPTIONS").Set<mediapipe::AudioDecoderOptions>(); cc->InputSidePackets().Tag("OPTIONS").Set<mediapipe::AudioDecoderOptions>();
@ -68,10 +67,10 @@ class AudioDecoderCalculator : public CalculatorBase {
if (cc->Outputs().HasTag("AUDIO_HEADER")) { if (cc->Outputs().HasTag("AUDIO_HEADER")) {
cc->Outputs().Tag("AUDIO_HEADER").SetNone(); cc->Outputs().Tag("AUDIO_HEADER").SetNone();
} }
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status AudioDecoderCalculator::Open(CalculatorContext* cc) { mediapipe::Status AudioDecoderCalculator::Open(CalculatorContext* cc) {
const std::string& input_file_path = const std::string& input_file_path =
cc->InputSidePackets().Tag("INPUT_FILE_PATH").Get<std::string>(); cc->InputSidePackets().Tag("INPUT_FILE_PATH").Get<std::string>();
const auto& decoder_options = const auto& decoder_options =
@ -88,10 +87,10 @@ class AudioDecoderCalculator : public CalculatorBase {
cc->Outputs().Tag("AUDIO_HEADER").SetHeader(Adopt(header.release())); cc->Outputs().Tag("AUDIO_HEADER").SetHeader(Adopt(header.release()));
} }
cc->Outputs().Tag("AUDIO_HEADER").Close(); cc->Outputs().Tag("AUDIO_HEADER").Close();
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status AudioDecoderCalculator::Process(CalculatorContext* cc) { mediapipe::Status AudioDecoderCalculator::Process(CalculatorContext* cc) {
Packet data; Packet data;
int options_index = -1; int options_index = -1;
auto status = decoder_->GetData(&options_index, &data); auto status = decoder_->GetData(&options_index, &data);
@ -101,7 +100,7 @@ class AudioDecoderCalculator : public CalculatorBase {
return status; return status;
} }
::mediapipe::Status AudioDecoderCalculator::Close(CalculatorContext* cc) { mediapipe::Status AudioDecoderCalculator::Close(CalculatorContext* cc) {
return decoder_->Close(); return decoder_->Close();
} }

View File

@ -38,7 +38,7 @@ static bool SafeMultiply(int x, int y, int* result) {
} }
} // namespace } // namespace
::mediapipe::Status BasicTimeSeriesCalculatorBase::GetContract( mediapipe::Status BasicTimeSeriesCalculatorBase::GetContract(
CalculatorContract* cc) { CalculatorContract* cc) {
cc->Inputs().Index(0).Set<Matrix>( cc->Inputs().Index(0).Set<Matrix>(
// Input stream with TimeSeriesHeader. // Input stream with TimeSeriesHeader.
@ -46,10 +46,10 @@ static bool SafeMultiply(int x, int y, int* result) {
cc->Outputs().Index(0).Set<Matrix>( cc->Outputs().Index(0).Set<Matrix>(
// Output stream with TimeSeriesHeader. // Output stream with TimeSeriesHeader.
); );
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status BasicTimeSeriesCalculatorBase::Open(CalculatorContext* cc) { mediapipe::Status BasicTimeSeriesCalculatorBase::Open(CalculatorContext* cc) {
TimeSeriesHeader input_header; TimeSeriesHeader input_header;
MP_RETURN_IF_ERROR(time_series_util::FillTimeSeriesHeaderIfValid( MP_RETURN_IF_ERROR(time_series_util::FillTimeSeriesHeaderIfValid(
cc->Inputs().Index(0).Header(), &input_header)); cc->Inputs().Index(0).Header(), &input_header));
@ -57,10 +57,10 @@ static bool SafeMultiply(int x, int y, int* result) {
auto output_header = new TimeSeriesHeader(input_header); auto output_header = new TimeSeriesHeader(input_header);
MP_RETURN_IF_ERROR(MutateHeader(output_header)); MP_RETURN_IF_ERROR(MutateHeader(output_header));
cc->Outputs().Index(0).SetHeader(Adopt(output_header)); cc->Outputs().Index(0).SetHeader(Adopt(output_header));
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status BasicTimeSeriesCalculatorBase::Process( mediapipe::Status BasicTimeSeriesCalculatorBase::Process(
CalculatorContext* cc) { CalculatorContext* cc) {
const Matrix& input = cc->Inputs().Index(0).Get<Matrix>(); const Matrix& input = cc->Inputs().Index(0).Get<Matrix>();
MP_RETURN_IF_ERROR(time_series_util::IsMatrixShapeConsistentWithHeader( MP_RETURN_IF_ERROR(time_series_util::IsMatrixShapeConsistentWithHeader(
@ -71,12 +71,12 @@ static bool SafeMultiply(int x, int y, int* result) {
*output, cc->Outputs().Index(0).Header().Get<TimeSeriesHeader>())); *output, cc->Outputs().Index(0).Header().Get<TimeSeriesHeader>()));
cc->Outputs().Index(0).Add(output.release(), cc->InputTimestamp()); cc->Outputs().Index(0).Add(output.release(), cc->InputTimestamp());
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status BasicTimeSeriesCalculatorBase::MutateHeader( mediapipe::Status BasicTimeSeriesCalculatorBase::MutateHeader(
TimeSeriesHeader* output_header) { TimeSeriesHeader* output_header) {
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
// Calculator to sum an input time series across channels. This is // Calculator to sum an input time series across channels. This is
@ -86,9 +86,9 @@ static bool SafeMultiply(int x, int y, int* result) {
class SumTimeSeriesAcrossChannelsCalculator class SumTimeSeriesAcrossChannelsCalculator
: public BasicTimeSeriesCalculatorBase { : public BasicTimeSeriesCalculatorBase {
protected: protected:
::mediapipe::Status MutateHeader(TimeSeriesHeader* output_header) final { mediapipe::Status MutateHeader(TimeSeriesHeader* output_header) final {
output_header->set_num_channels(1); output_header->set_num_channels(1);
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
Matrix ProcessMatrix(const Matrix& input_matrix) final { Matrix ProcessMatrix(const Matrix& input_matrix) final {
@ -104,9 +104,9 @@ REGISTER_CALCULATOR(SumTimeSeriesAcrossChannelsCalculator);
class AverageTimeSeriesAcrossChannelsCalculator class AverageTimeSeriesAcrossChannelsCalculator
: public BasicTimeSeriesCalculatorBase { : public BasicTimeSeriesCalculatorBase {
protected: protected:
::mediapipe::Status MutateHeader(TimeSeriesHeader* output_header) final { mediapipe::Status MutateHeader(TimeSeriesHeader* output_header) final {
output_header->set_num_channels(1); output_header->set_num_channels(1);
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
Matrix ProcessMatrix(const Matrix& input_matrix) final { Matrix ProcessMatrix(const Matrix& input_matrix) final {
@ -122,7 +122,7 @@ REGISTER_CALCULATOR(AverageTimeSeriesAcrossChannelsCalculator);
// Options proto: None. // Options proto: None.
class SummarySaiToPitchogramCalculator : public BasicTimeSeriesCalculatorBase { class SummarySaiToPitchogramCalculator : public BasicTimeSeriesCalculatorBase {
protected: protected:
::mediapipe::Status MutateHeader(TimeSeriesHeader* output_header) final { mediapipe::Status MutateHeader(TimeSeriesHeader* output_header) final {
if (output_header->num_channels() != 1) { if (output_header->num_channels() != 1) {
return tool::StatusInvalid( return tool::StatusInvalid(
absl::StrCat("Expected single-channel input, got ", absl::StrCat("Expected single-channel input, got ",
@ -131,7 +131,7 @@ class SummarySaiToPitchogramCalculator : public BasicTimeSeriesCalculatorBase {
output_header->set_num_channels(output_header->num_samples()); output_header->set_num_channels(output_header->num_samples());
output_header->set_num_samples(1); output_header->set_num_samples(1);
output_header->set_sample_rate(output_header->packet_rate()); output_header->set_sample_rate(output_header->packet_rate());
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
Matrix ProcessMatrix(const Matrix& input_matrix) final { Matrix ProcessMatrix(const Matrix& input_matrix) final {
@ -160,7 +160,7 @@ REGISTER_CALCULATOR(ReverseChannelOrderCalculator);
// Options proto: None. // Options proto: None.
class FlattenPacketCalculator : public BasicTimeSeriesCalculatorBase { class FlattenPacketCalculator : public BasicTimeSeriesCalculatorBase {
protected: protected:
::mediapipe::Status MutateHeader(TimeSeriesHeader* output_header) final { mediapipe::Status MutateHeader(TimeSeriesHeader* output_header) final {
const int num_input_channels = output_header->num_channels(); const int num_input_channels = output_header->num_channels();
const int num_input_samples = output_header->num_samples(); const int num_input_samples = output_header->num_samples();
RET_CHECK(num_input_channels >= 0) RET_CHECK(num_input_channels >= 0)
@ -174,7 +174,7 @@ class FlattenPacketCalculator : public BasicTimeSeriesCalculatorBase {
output_header->set_num_channels(output_num_channels); output_header->set_num_channels(output_num_channels);
output_header->set_num_samples(1); output_header->set_num_samples(1);
output_header->set_sample_rate(output_header->packet_rate()); output_header->set_sample_rate(output_header->packet_rate());
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
Matrix ProcessMatrix(const Matrix& input_matrix) final { Matrix ProcessMatrix(const Matrix& input_matrix) final {
@ -253,10 +253,10 @@ REGISTER_CALCULATOR(DivideByMeanAcrossChannelsCalculator);
// Options proto: None. // Options proto: None.
class MeanCalculator : public BasicTimeSeriesCalculatorBase { class MeanCalculator : public BasicTimeSeriesCalculatorBase {
protected: protected:
::mediapipe::Status MutateHeader(TimeSeriesHeader* output_header) final { mediapipe::Status MutateHeader(TimeSeriesHeader* output_header) final {
output_header->set_num_samples(1); output_header->set_num_samples(1);
output_header->set_sample_rate(output_header->packet_rate()); output_header->set_sample_rate(output_header->packet_rate());
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
Matrix ProcessMatrix(const Matrix& input_matrix) final { Matrix ProcessMatrix(const Matrix& input_matrix) final {
@ -272,10 +272,10 @@ REGISTER_CALCULATOR(MeanCalculator);
// Options proto: None. // Options proto: None.
class StandardDeviationCalculator : public BasicTimeSeriesCalculatorBase { class StandardDeviationCalculator : public BasicTimeSeriesCalculatorBase {
protected: protected:
::mediapipe::Status MutateHeader(TimeSeriesHeader* output_header) final { mediapipe::Status MutateHeader(TimeSeriesHeader* output_header) final {
output_header->set_num_samples(1); output_header->set_num_samples(1);
output_header->set_sample_rate(output_header->packet_rate()); output_header->set_sample_rate(output_header->packet_rate());
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
Matrix ProcessMatrix(const Matrix& input_matrix) final { Matrix ProcessMatrix(const Matrix& input_matrix) final {
@ -293,9 +293,9 @@ REGISTER_CALCULATOR(StandardDeviationCalculator);
// Options proto: None. // Options proto: None.
class CovarianceCalculator : public BasicTimeSeriesCalculatorBase { class CovarianceCalculator : public BasicTimeSeriesCalculatorBase {
protected: protected:
::mediapipe::Status MutateHeader(TimeSeriesHeader* output_header) final { mediapipe::Status MutateHeader(TimeSeriesHeader* output_header) final {
output_header->set_num_samples(output_header->num_channels()); output_header->set_num_samples(output_header->num_channels());
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
Matrix ProcessMatrix(const Matrix& input_matrix) final { Matrix ProcessMatrix(const Matrix& input_matrix) final {
@ -313,9 +313,9 @@ REGISTER_CALCULATOR(CovarianceCalculator);
// Options proto: None. // Options proto: None.
class L2NormCalculator : public BasicTimeSeriesCalculatorBase { class L2NormCalculator : public BasicTimeSeriesCalculatorBase {
protected: protected:
::mediapipe::Status MutateHeader(TimeSeriesHeader* output_header) final { mediapipe::Status MutateHeader(TimeSeriesHeader* output_header) final {
output_header->set_num_channels(1); output_header->set_num_channels(1);
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
Matrix ProcessMatrix(const Matrix& input_matrix) final { Matrix ProcessMatrix(const Matrix& input_matrix) final {
@ -385,12 +385,12 @@ REGISTER_CALCULATOR(ElementwiseSquareCalculator);
// Options proto: None. // Options proto: None.
class FirstHalfSlicerCalculator : public BasicTimeSeriesCalculatorBase { class FirstHalfSlicerCalculator : public BasicTimeSeriesCalculatorBase {
protected: protected:
::mediapipe::Status MutateHeader(TimeSeriesHeader* output_header) final { mediapipe::Status MutateHeader(TimeSeriesHeader* output_header) final {
const int num_input_samples = output_header->num_samples(); const int num_input_samples = output_header->num_samples();
RET_CHECK(num_input_samples >= 0) RET_CHECK(num_input_samples >= 0)
<< "FirstHalfSlicerCalculator: num_input_samples < 0"; << "FirstHalfSlicerCalculator: num_input_samples < 0";
output_header->set_num_samples(num_input_samples / 2); output_header->set_num_samples(num_input_samples / 2);
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
Matrix ProcessMatrix(const Matrix& input_matrix) final { Matrix ProcessMatrix(const Matrix& input_matrix) final {

View File

@ -28,16 +28,16 @@ namespace mediapipe {
class BasicTimeSeriesCalculatorBase : public CalculatorBase { class BasicTimeSeriesCalculatorBase : public CalculatorBase {
public: public:
static ::mediapipe::Status GetContract(CalculatorContract* cc); static mediapipe::Status GetContract(CalculatorContract* cc);
::mediapipe::Status Open(CalculatorContext* cc) override; mediapipe::Status Open(CalculatorContext* cc) override;
::mediapipe::Status Process(CalculatorContext* cc) override; mediapipe::Status Process(CalculatorContext* cc) override;
protected: protected:
// Open() calls this method to mutate the output stream header. The input // Open() calls this method to mutate the output stream header. The input
// to this function will contain a copy of the input stream header, so // to this function will contain a copy of the input stream header, so
// subclasses that do not need to mutate the header do not need to override // subclasses that do not need to mutate the header do not need to override
// it. // it.
virtual ::mediapipe::Status MutateHeader(TimeSeriesHeader* output_header); virtual mediapipe::Status MutateHeader(TimeSeriesHeader* output_header);
// Process() calls this method on each packet to compute the output matrix. // Process() calls this method on each packet to compute the output matrix.
virtual Matrix ProcessMatrix(const Matrix& input_matrix) = 0; virtual Matrix ProcessMatrix(const Matrix& input_matrix) = 0;

View File

@ -66,7 +66,7 @@ std::string PortableDebugString(const TimeSeriesHeader& header) {
// rows corresponding to the new feature space). // rows corresponding to the new feature space).
class FramewiseTransformCalculatorBase : public CalculatorBase { class FramewiseTransformCalculatorBase : public CalculatorBase {
public: public:
static ::mediapipe::Status GetContract(CalculatorContract* cc) { static mediapipe::Status GetContract(CalculatorContract* cc) {
cc->Inputs().Index(0).Set<Matrix>( cc->Inputs().Index(0).Set<Matrix>(
// Sequence of Matrices, each column describing a particular time frame, // Sequence of Matrices, each column describing a particular time frame,
// each row a feature dimension, with TimeSeriesHeader. // each row a feature dimension, with TimeSeriesHeader.
@ -75,11 +75,11 @@ class FramewiseTransformCalculatorBase : public CalculatorBase {
// Sequence of Matrices, each column describing a particular time frame, // Sequence of Matrices, each column describing a particular time frame,
// each row a feature dimension, with TimeSeriesHeader. // each row a feature dimension, with TimeSeriesHeader.
); );
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Open(CalculatorContext* cc) override; mediapipe::Status Open(CalculatorContext* cc) override;
::mediapipe::Status Process(CalculatorContext* cc) override; mediapipe::Status Process(CalculatorContext* cc) override;
int num_output_channels(void) { return num_output_channels_; } int num_output_channels(void) { return num_output_channels_; }
@ -90,7 +90,7 @@ class FramewiseTransformCalculatorBase : public CalculatorBase {
private: private:
// Takes header and options, and sets up state including calling // Takes header and options, and sets up state including calling
// set_num_output_channels() on the base object. // set_num_output_channels() on the base object.
virtual ::mediapipe::Status ConfigureTransform(const TimeSeriesHeader& header, virtual mediapipe::Status ConfigureTransform(const TimeSeriesHeader& header,
CalculatorContext* cc) = 0; CalculatorContext* cc) = 0;
// Takes a vector<double> corresponding to an input frame, and // Takes a vector<double> corresponding to an input frame, and
@ -102,13 +102,13 @@ class FramewiseTransformCalculatorBase : public CalculatorBase {
int num_output_channels_; int num_output_channels_;
}; };
::mediapipe::Status FramewiseTransformCalculatorBase::Open( mediapipe::Status FramewiseTransformCalculatorBase::Open(
CalculatorContext* cc) { CalculatorContext* cc) {
TimeSeriesHeader input_header; TimeSeriesHeader input_header;
MP_RETURN_IF_ERROR(time_series_util::FillTimeSeriesHeaderIfValid( MP_RETURN_IF_ERROR(time_series_util::FillTimeSeriesHeaderIfValid(
cc->Inputs().Index(0).Header(), &input_header)); cc->Inputs().Index(0).Header(), &input_header));
::mediapipe::Status status = ConfigureTransform(input_header, cc); mediapipe::Status status = ConfigureTransform(input_header, cc);
auto output_header = new TimeSeriesHeader(input_header); auto output_header = new TimeSeriesHeader(input_header);
output_header->set_num_channels(num_output_channels_); output_header->set_num_channels(num_output_channels_);
@ -117,7 +117,7 @@ class FramewiseTransformCalculatorBase : public CalculatorBase {
return status; return status;
} }
::mediapipe::Status FramewiseTransformCalculatorBase::Process( mediapipe::Status FramewiseTransformCalculatorBase::Process(
CalculatorContext* cc) { CalculatorContext* cc) {
const Matrix& input = cc->Inputs().Index(0).Get<Matrix>(); const Matrix& input = cc->Inputs().Index(0).Get<Matrix>();
const int num_frames = input.cols(); const int num_frames = input.cols();
@ -145,7 +145,7 @@ class FramewiseTransformCalculatorBase : public CalculatorBase {
} }
cc->Outputs().Index(0).Add(output.release(), cc->InputTimestamp()); cc->Outputs().Index(0).Add(output.release(), cc->InputTimestamp());
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
// Calculator wrapper around the dsp/mfcc/mfcc.cc routine. // Calculator wrapper around the dsp/mfcc/mfcc.cc routine.
@ -170,12 +170,12 @@ class FramewiseTransformCalculatorBase : public CalculatorBase {
// } // }
class MfccCalculator : public FramewiseTransformCalculatorBase { class MfccCalculator : public FramewiseTransformCalculatorBase {
public: public:
static ::mediapipe::Status GetContract(CalculatorContract* cc) { static mediapipe::Status GetContract(CalculatorContract* cc) {
return FramewiseTransformCalculatorBase::GetContract(cc); return FramewiseTransformCalculatorBase::GetContract(cc);
} }
private: private:
::mediapipe::Status ConfigureTransform(const TimeSeriesHeader& header, mediapipe::Status ConfigureTransform(const TimeSeriesHeader& header,
CalculatorContext* cc) override { CalculatorContext* cc) override {
MfccCalculatorOptions mfcc_options = cc->Options<MfccCalculatorOptions>(); MfccCalculatorOptions mfcc_options = cc->Options<MfccCalculatorOptions>();
mfcc_.reset(new audio_dsp::Mfcc()); mfcc_.reset(new audio_dsp::Mfcc());
@ -194,7 +194,7 @@ class MfccCalculator : public FramewiseTransformCalculatorBase {
// audio_dsp::MelFilterBank needs to know this to // audio_dsp::MelFilterBank needs to know this to
// correctly interpret the spectrogram bins. // correctly interpret the spectrogram bins.
if (!header.has_audio_sample_rate()) { if (!header.has_audio_sample_rate()) {
return ::mediapipe::InvalidArgumentError( return mediapipe::InvalidArgumentError(
absl::StrCat("No audio_sample_rate in input TimeSeriesHeader ", absl::StrCat("No audio_sample_rate in input TimeSeriesHeader ",
PortableDebugString(header))); PortableDebugString(header)));
} }
@ -203,9 +203,9 @@ class MfccCalculator : public FramewiseTransformCalculatorBase {
mfcc_->Initialize(input_length, header.audio_sample_rate()); mfcc_->Initialize(input_length, header.audio_sample_rate());
if (initialized) { if (initialized) {
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} else { } else {
return ::mediapipe::Status(mediapipe::StatusCode::kInternal, return mediapipe::Status(mediapipe::StatusCode::kInternal,
"Mfcc::Initialize returned uninitialized"); "Mfcc::Initialize returned uninitialized");
} }
} }
@ -228,12 +228,12 @@ REGISTER_CALCULATOR(MfccCalculator);
// if you ask for too many channels. // if you ask for too many channels.
class MelSpectrumCalculator : public FramewiseTransformCalculatorBase { class MelSpectrumCalculator : public FramewiseTransformCalculatorBase {
public: public:
static ::mediapipe::Status GetContract(CalculatorContract* cc) { static mediapipe::Status GetContract(CalculatorContract* cc) {
return FramewiseTransformCalculatorBase::GetContract(cc); return FramewiseTransformCalculatorBase::GetContract(cc);
} }
private: private:
::mediapipe::Status ConfigureTransform(const TimeSeriesHeader& header, mediapipe::Status ConfigureTransform(const TimeSeriesHeader& header,
CalculatorContext* cc) override { CalculatorContext* cc) override {
MelSpectrumCalculatorOptions mel_spectrum_options = MelSpectrumCalculatorOptions mel_spectrum_options =
cc->Options<MelSpectrumCalculatorOptions>(); cc->Options<MelSpectrumCalculatorOptions>();
@ -245,7 +245,7 @@ class MelSpectrumCalculator : public FramewiseTransformCalculatorBase {
// audio_dsp::MelFilterBank needs to know this to // audio_dsp::MelFilterBank needs to know this to
// correctly interpret the spectrogram bins. // correctly interpret the spectrogram bins.
if (!header.has_audio_sample_rate()) { if (!header.has_audio_sample_rate()) {
return ::mediapipe::InvalidArgumentError( return mediapipe::InvalidArgumentError(
absl::StrCat("No audio_sample_rate in input TimeSeriesHeader ", absl::StrCat("No audio_sample_rate in input TimeSeriesHeader ",
PortableDebugString(header))); PortableDebugString(header)));
} }
@ -255,9 +255,9 @@ class MelSpectrumCalculator : public FramewiseTransformCalculatorBase {
mel_spectrum_options.max_frequency_hertz()); mel_spectrum_options.max_frequency_hertz());
if (initialized) { if (initialized) {
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} else { } else {
return ::mediapipe::Status(mediapipe::StatusCode::kInternal, return mediapipe::Status(mediapipe::StatusCode::kInternal,
"mfcc::Initialize returned uninitialized"); "mfcc::Initialize returned uninitialized");
} }
} }

View File

@ -84,7 +84,7 @@ class FramewiseTransformCalculatorTest
num_samples_per_packet_ = GenerateRandomNonnegInputStream(kNumPackets); num_samples_per_packet_ = GenerateRandomNonnegInputStream(kNumPackets);
} }
::mediapipe::Status Run() { return this->RunGraph(); } mediapipe::Status Run() { return this->RunGraph(); }
void CheckResults(int expected_num_channels) { void CheckResults(int expected_num_channels) {
const auto& output_header = const auto& output_header =

View File

@ -23,15 +23,15 @@ using audio_dsp::RationalFactorResampler;
using audio_dsp::Resampler; using audio_dsp::Resampler;
namespace mediapipe { namespace mediapipe {
::mediapipe::Status RationalFactorResampleCalculator::Process( mediapipe::Status RationalFactorResampleCalculator::Process(
CalculatorContext* cc) { CalculatorContext* cc) {
return ProcessInternal(cc->Inputs().Index(0).Get<Matrix>(), false, cc); return ProcessInternal(cc->Inputs().Index(0).Get<Matrix>(), false, cc);
} }
::mediapipe::Status RationalFactorResampleCalculator::Close( mediapipe::Status RationalFactorResampleCalculator::Close(
CalculatorContext* cc) { CalculatorContext* cc) {
if (initial_timestamp_ == Timestamp::Unstarted()) { if (initial_timestamp_ == Timestamp::Unstarted()) {
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
Matrix empty_input_frame(num_channels_, 0); Matrix empty_input_frame(num_channels_, 0);
return ProcessInternal(empty_input_frame, true, cc); return ProcessInternal(empty_input_frame, true, cc);
@ -62,7 +62,7 @@ void CopyVectorToChannel(const std::vector<float>& vec, Matrix* matrix,
} // namespace } // namespace
::mediapipe::Status RationalFactorResampleCalculator::Open( mediapipe::Status RationalFactorResampleCalculator::Open(
CalculatorContext* cc) { CalculatorContext* cc) {
RationalFactorResampleCalculatorOptions resample_options = RationalFactorResampleCalculatorOptions resample_options =
cc->Options<RationalFactorResampleCalculatorOptions>(); cc->Options<RationalFactorResampleCalculatorOptions>();
@ -88,7 +88,7 @@ void CopyVectorToChannel(const std::vector<float>& vec, Matrix* matrix,
resample_options); resample_options);
if (!r) { if (!r) {
LOG(ERROR) << "Failed to initialize resampler."; LOG(ERROR) << "Failed to initialize resampler.";
return ::mediapipe::UnknownError("Failed to initialize resampler."); return mediapipe::UnknownError("Failed to initialize resampler.");
} }
} }
} }
@ -106,10 +106,10 @@ void CopyVectorToChannel(const std::vector<float>& vec, Matrix* matrix,
initial_timestamp_ = Timestamp::Unstarted(); initial_timestamp_ = Timestamp::Unstarted();
check_inconsistent_timestamps_ = check_inconsistent_timestamps_ =
resample_options.check_inconsistent_timestamps(); resample_options.check_inconsistent_timestamps();
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status RationalFactorResampleCalculator::ProcessInternal( mediapipe::Status RationalFactorResampleCalculator::ProcessInternal(
const Matrix& input_frame, bool should_flush, CalculatorContext* cc) { const Matrix& input_frame, bool should_flush, CalculatorContext* cc) {
if (initial_timestamp_ == Timestamp::Unstarted()) { if (initial_timestamp_ == Timestamp::Unstarted()) {
initial_timestamp_ = cc->InputTimestamp(); initial_timestamp_ = cc->InputTimestamp();
@ -131,7 +131,7 @@ void CopyVectorToChannel(const std::vector<float>& vec, Matrix* matrix,
*output_frame = input_frame; *output_frame = input_frame;
} else { } else {
if (!Resample(input_frame, output_frame.get(), should_flush)) { if (!Resample(input_frame, output_frame.get(), should_flush)) {
return ::mediapipe::UnknownError("Resample() failed."); return mediapipe::UnknownError("Resample() failed.");
} }
} }
cumulative_output_samples_ += output_frame->cols(); cumulative_output_samples_ += output_frame->cols();
@ -139,7 +139,7 @@ void CopyVectorToChannel(const std::vector<float>& vec, Matrix* matrix,
if (output_frame->cols() > 0) { if (output_frame->cols() > 0) {
cc->Outputs().Index(0).Add(output_frame.release(), output_timestamp); cc->Outputs().Index(0).Add(output_frame.release(), output_timestamp);
} }
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
bool RationalFactorResampleCalculator::Resample(const Matrix& input_frame, bool RationalFactorResampleCalculator::Resample(const Matrix& input_frame,

View File

@ -40,24 +40,24 @@ class RationalFactorResampleCalculator : public CalculatorBase {
public: public:
struct TestAccess; struct TestAccess;
static ::mediapipe::Status GetContract(CalculatorContract* cc) { static mediapipe::Status GetContract(CalculatorContract* cc) {
cc->Inputs().Index(0).Set<Matrix>( cc->Inputs().Index(0).Set<Matrix>(
// Single input stream with TimeSeriesHeader. // Single input stream with TimeSeriesHeader.
); );
cc->Outputs().Index(0).Set<Matrix>( cc->Outputs().Index(0).Set<Matrix>(
// Resampled stream with TimeSeriesHeader. // Resampled stream with TimeSeriesHeader.
); );
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
// Returns FAIL if the input stream header is invalid or if the // Returns FAIL if the input stream header is invalid or if the
// resampler cannot be initialized. // resampler cannot be initialized.
::mediapipe::Status Open(CalculatorContext* cc) override; mediapipe::Status Open(CalculatorContext* cc) override;
// Resamples a packet of TimeSeries data. Returns FAIL if the // Resamples a packet of TimeSeries data. Returns FAIL if the
// resampler state becomes inconsistent. // resampler state becomes inconsistent.
::mediapipe::Status Process(CalculatorContext* cc) override; mediapipe::Status Process(CalculatorContext* cc) override;
// Flushes any remaining state. Returns FAIL if the resampler state // Flushes any remaining state. Returns FAIL if the resampler state
// becomes inconsistent. // becomes inconsistent.
::mediapipe::Status Close(CalculatorContext* cc) override; mediapipe::Status Close(CalculatorContext* cc) override;
protected: protected:
typedef audio_dsp::Resampler<float> ResamplerType; typedef audio_dsp::Resampler<float> ResamplerType;
@ -72,7 +72,7 @@ class RationalFactorResampleCalculator : public CalculatorBase {
// Does Timestamp bookkeeping and resampling common to Process() and // Does Timestamp bookkeeping and resampling common to Process() and
// Close(). Returns FAIL if the resampler state becomes // Close(). Returns FAIL if the resampler state becomes
// inconsistent. // inconsistent.
::mediapipe::Status ProcessInternal(const Matrix& input_frame, mediapipe::Status ProcessInternal(const Matrix& input_frame,
bool should_flush, CalculatorContext* cc); bool should_flush, CalculatorContext* cc);
// Uses the internal resampler_ objects to actually resample each // Uses the internal resampler_ objects to actually resample each

View File

@ -80,7 +80,7 @@ class RationalFactorResampleCalculatorTest
} }
// Initializes and runs the test graph. // Initializes and runs the test graph.
::mediapipe::Status Run(double output_sample_rate) { mediapipe::Status Run(double output_sample_rate) {
options_.set_target_sample_rate(output_sample_rate); options_.set_target_sample_rate(output_sample_rate);
InitializeGraph(); InitializeGraph();

View File

@ -66,7 +66,7 @@ namespace mediapipe {
// analysis frame will advance from its predecessor by the same time step. // analysis frame will advance from its predecessor by the same time step.
class SpectrogramCalculator : public CalculatorBase { class SpectrogramCalculator : public CalculatorBase {
public: public:
static ::mediapipe::Status GetContract(CalculatorContract* cc) { static mediapipe::Status GetContract(CalculatorContract* cc) {
cc->Inputs().Index(0).Set<Matrix>( cc->Inputs().Index(0).Set<Matrix>(
// Input stream with TimeSeriesHeader. // Input stream with TimeSeriesHeader.
); );
@ -96,21 +96,21 @@ class SpectrogramCalculator : public CalculatorBase {
); );
} }
} }
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
// Returns FAIL if the input stream header is invalid. // Returns FAIL if the input stream header is invalid.
::mediapipe::Status Open(CalculatorContext* cc) override; mediapipe::Status Open(CalculatorContext* cc) override;
// Outputs at most one packet consisting of a single Matrix with one or // Outputs at most one packet consisting of a single Matrix with one or
// more columns containing the spectral values from as many input frames // more columns containing the spectral values from as many input frames
// as are completed by the input samples. Always returns OK. // as are completed by the input samples. Always returns OK.
::mediapipe::Status Process(CalculatorContext* cc) override; mediapipe::Status Process(CalculatorContext* cc) override;
// Performs zero-padding and processing of any remaining samples // Performs zero-padding and processing of any remaining samples
// if pad_final_packet is set. // if pad_final_packet is set.
// Returns OK. // Returns OK.
::mediapipe::Status Close(CalculatorContext* cc) override; mediapipe::Status Close(CalculatorContext* cc) override;
private: private:
Timestamp CurrentOutputTimestamp(CalculatorContext* cc) { Timestamp CurrentOutputTimestamp(CalculatorContext* cc) {
@ -138,12 +138,12 @@ class SpectrogramCalculator : public CalculatorBase {
// Convert the output of the spectrogram object into a Matrix (or an // Convert the output of the spectrogram object into a Matrix (or an
// Eigen::MatrixXcf if complex-valued output is requested) and pass to // Eigen::MatrixXcf if complex-valued output is requested) and pass to
// MediaPipe output. // MediaPipe output.
::mediapipe::Status ProcessVector(const Matrix& input_stream, mediapipe::Status ProcessVector(const Matrix& input_stream,
CalculatorContext* cc); CalculatorContext* cc);
// Templated function to process either real- or complex-output spectrogram. // Templated function to process either real- or complex-output spectrogram.
template <class OutputMatrixType> template <class OutputMatrixType>
::mediapipe::Status ProcessVectorToOutput( mediapipe::Status ProcessVectorToOutput(
const Matrix& input_stream, const Matrix& input_stream,
const OutputMatrixType postprocess_output_fn(const OutputMatrixType&), const OutputMatrixType postprocess_output_fn(const OutputMatrixType&),
CalculatorContext* cc); CalculatorContext* cc);
@ -177,7 +177,7 @@ REGISTER_CALCULATOR(SpectrogramCalculator);
// Factor to convert ln(magnitude_squared) to deciBels = 10.0/ln(10.0). // Factor to convert ln(magnitude_squared) to deciBels = 10.0/ln(10.0).
const float SpectrogramCalculator::kLnPowerToDb = 4.342944819032518; const float SpectrogramCalculator::kLnPowerToDb = 4.342944819032518;
::mediapipe::Status SpectrogramCalculator::Open(CalculatorContext* cc) { mediapipe::Status SpectrogramCalculator::Open(CalculatorContext* cc) {
SpectrogramCalculatorOptions spectrogram_options = SpectrogramCalculatorOptions spectrogram_options =
cc->Options<SpectrogramCalculatorOptions>(); cc->Options<SpectrogramCalculatorOptions>();
@ -272,10 +272,10 @@ const float SpectrogramCalculator::kLnPowerToDb = 4.342944819032518;
} }
cumulative_completed_frames_ = 0; cumulative_completed_frames_ = 0;
initial_input_timestamp_ = Timestamp::Unstarted(); initial_input_timestamp_ = Timestamp::Unstarted();
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status SpectrogramCalculator::Process(CalculatorContext* cc) { mediapipe::Status SpectrogramCalculator::Process(CalculatorContext* cc) {
if (initial_input_timestamp_ == Timestamp::Unstarted()) { if (initial_input_timestamp_ == Timestamp::Unstarted()) {
initial_input_timestamp_ = cc->InputTimestamp(); initial_input_timestamp_ = cc->InputTimestamp();
} }
@ -291,7 +291,7 @@ const float SpectrogramCalculator::kLnPowerToDb = 4.342944819032518;
} }
template <class OutputMatrixType> template <class OutputMatrixType>
::mediapipe::Status SpectrogramCalculator::ProcessVectorToOutput( mediapipe::Status SpectrogramCalculator::ProcessVectorToOutput(
const Matrix& input_stream, const Matrix& input_stream,
const OutputMatrixType postprocess_output_fn(const OutputMatrixType&), const OutputMatrixType postprocess_output_fn(const OutputMatrixType&),
CalculatorContext* cc) { CalculatorContext* cc) {
@ -311,7 +311,7 @@ template <class OutputMatrixType>
if (!spectrogram_generators_[channel]->ComputeSpectrogram( if (!spectrogram_generators_[channel]->ComputeSpectrogram(
input_vector, &output_vectors)) { input_vector, &output_vectors)) {
return ::mediapipe::Status(mediapipe::StatusCode::kInternal, return mediapipe::Status(mediapipe::StatusCode::kInternal,
"Spectrogram returned failure"); "Spectrogram returned failure");
} }
if (channel == 0) { if (channel == 0) {
@ -355,10 +355,10 @@ template <class OutputMatrixType>
} }
cumulative_completed_frames_ += output_vectors.size(); cumulative_completed_frames_ += output_vectors.size();
} }
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status SpectrogramCalculator::ProcessVector( mediapipe::Status SpectrogramCalculator::ProcessVector(
const Matrix& input_stream, CalculatorContext* cc) { const Matrix& input_stream, CalculatorContext* cc) {
switch (output_type_) { switch (output_type_) {
// These blocks deliberately ignore clang-format to preserve the // These blocks deliberately ignore clang-format to preserve the
@ -394,13 +394,13 @@ template <class OutputMatrixType>
} }
// clang-format on // clang-format on
default: { default: {
return ::mediapipe::Status(mediapipe::StatusCode::kInvalidArgument, return mediapipe::Status(mediapipe::StatusCode::kInvalidArgument,
"Unrecognized spectrogram output type."); "Unrecognized spectrogram output type.");
} }
} }
} }
::mediapipe::Status SpectrogramCalculator::Close(CalculatorContext* cc) { mediapipe::Status SpectrogramCalculator::Close(CalculatorContext* cc) {
if (cumulative_input_samples_ > 0 && pad_final_packet_) { if (cumulative_input_samples_ > 0 && pad_final_packet_) {
// We can flush any remaining samples by sending frame_step_samples - 1 // We can flush any remaining samples by sending frame_step_samples - 1
// zeros to the Process method, and letting it do its thing, // zeros to the Process method, and letting it do its thing,
@ -416,7 +416,7 @@ template <class OutputMatrixType>
Matrix::Zero(num_input_channels_, required_padding_samples), cc); Matrix::Zero(num_input_channels_, required_padding_samples), cc);
} }
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
} // namespace mediapipe } // namespace mediapipe

View File

@ -50,7 +50,7 @@ class SpectrogramCalculatorTest
} }
// Initializes and runs the test graph. // Initializes and runs the test graph.
::mediapipe::Status Run() { mediapipe::Status Run() {
// Now that options are set, we can set up some internal constants. // Now that options are set, we can set up some internal constants.
frame_duration_samples_ = frame_duration_samples_ =
round(options_.frame_duration_seconds() * input_sample_rate_); round(options_.frame_duration_seconds() * input_sample_rate_);

View File

@ -41,17 +41,17 @@ namespace mediapipe {
// } // }
class StabilizedLogCalculator : public CalculatorBase { class StabilizedLogCalculator : public CalculatorBase {
public: public:
static ::mediapipe::Status GetContract(CalculatorContract* cc) { static mediapipe::Status GetContract(CalculatorContract* cc) {
cc->Inputs().Index(0).Set<Matrix>( cc->Inputs().Index(0).Set<Matrix>(
// Input stream with TimeSeriesHeader. // Input stream with TimeSeriesHeader.
); );
cc->Outputs().Index(0).Set<Matrix>( cc->Outputs().Index(0).Set<Matrix>(
// Output stabilized log stream with TimeSeriesHeader. // Output stabilized log stream with TimeSeriesHeader.
); );
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Open(CalculatorContext* cc) override { mediapipe::Status Open(CalculatorContext* cc) override {
StabilizedLogCalculatorOptions stabilized_log_calculator_options = StabilizedLogCalculatorOptions stabilized_log_calculator_options =
cc->Options<StabilizedLogCalculatorOptions>(); cc->Options<StabilizedLogCalculatorOptions>();
@ -70,23 +70,23 @@ class StabilizedLogCalculator : public CalculatorBase {
cc->Outputs().Index(0).SetHeader( cc->Outputs().Index(0).SetHeader(
Adopt(new TimeSeriesHeader(input_header))); Adopt(new TimeSeriesHeader(input_header)));
} }
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Process(CalculatorContext* cc) override { mediapipe::Status Process(CalculatorContext* cc) override {
auto input_matrix = cc->Inputs().Index(0).Get<Matrix>(); auto input_matrix = cc->Inputs().Index(0).Get<Matrix>();
if (input_matrix.array().isNaN().any()) { if (input_matrix.array().isNaN().any()) {
return ::mediapipe::InvalidArgumentError("NaN input to log operation."); return mediapipe::InvalidArgumentError("NaN input to log operation.");
} }
if (check_nonnegativity_) { if (check_nonnegativity_) {
if (input_matrix.minCoeff() < 0.0) { if (input_matrix.minCoeff() < 0.0) {
return ::mediapipe::OutOfRangeError("Negative input to log operation."); return mediapipe::OutOfRangeError("Negative input to log operation.");
} }
} }
std::unique_ptr<Matrix> output_frame(new Matrix( std::unique_ptr<Matrix> output_frame(new Matrix(
output_scale_ * (input_matrix.array() + stabilizer_).log().matrix())); output_scale_ * (input_matrix.array() + stabilizer_).log().matrix()));
cc->Outputs().Index(0).Add(output_frame.release(), cc->InputTimestamp()); cc->Outputs().Index(0).Add(output_frame.release(), cc->InputTimestamp());
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
private: private:

View File

@ -66,26 +66,26 @@ namespace mediapipe {
// cumulative_completed_samples / sample_rate_. // cumulative_completed_samples / sample_rate_.
class TimeSeriesFramerCalculator : public CalculatorBase { class TimeSeriesFramerCalculator : public CalculatorBase {
public: public:
static ::mediapipe::Status GetContract(CalculatorContract* cc) { static mediapipe::Status GetContract(CalculatorContract* cc) {
cc->Inputs().Index(0).Set<Matrix>( cc->Inputs().Index(0).Set<Matrix>(
// Input stream with TimeSeriesHeader. // Input stream with TimeSeriesHeader.
); );
cc->Outputs().Index(0).Set<Matrix>( cc->Outputs().Index(0).Set<Matrix>(
// Fixed length time series Packets with TimeSeriesHeader. // Fixed length time series Packets with TimeSeriesHeader.
); );
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
// Returns FAIL if the input stream header is invalid. // Returns FAIL if the input stream header is invalid.
::mediapipe::Status Open(CalculatorContext* cc) override; mediapipe::Status Open(CalculatorContext* cc) override;
// Outputs as many framed packets as possible given the accumulated // Outputs as many framed packets as possible given the accumulated
// input. Always returns OK. // input. Always returns OK.
::mediapipe::Status Process(CalculatorContext* cc) override; mediapipe::Status Process(CalculatorContext* cc) override;
// Flushes any remaining samples in a zero-padded packet. Always // Flushes any remaining samples in a zero-padded packet. Always
// returns OK. // returns OK.
::mediapipe::Status Close(CalculatorContext* cc) override; mediapipe::Status Close(CalculatorContext* cc) override;
private: private:
// Adds input data to the internal buffer. // Adds input data to the internal buffer.
@ -205,7 +205,7 @@ void TimeSeriesFramerCalculator::FrameOutput(CalculatorContext* cc) {
} }
} }
::mediapipe::Status TimeSeriesFramerCalculator::Process(CalculatorContext* cc) { mediapipe::Status TimeSeriesFramerCalculator::Process(CalculatorContext* cc) {
if (initial_input_timestamp_ == Timestamp::Unstarted()) { if (initial_input_timestamp_ == Timestamp::Unstarted()) {
initial_input_timestamp_ = cc->InputTimestamp(); initial_input_timestamp_ = cc->InputTimestamp();
current_timestamp_ = initial_input_timestamp_; current_timestamp_ = initial_input_timestamp_;
@ -214,10 +214,10 @@ void TimeSeriesFramerCalculator::FrameOutput(CalculatorContext* cc) {
EnqueueInput(cc); EnqueueInput(cc);
FrameOutput(cc); FrameOutput(cc);
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status TimeSeriesFramerCalculator::Close(CalculatorContext* cc) { mediapipe::Status TimeSeriesFramerCalculator::Close(CalculatorContext* cc) {
while (samples_still_to_drop_ > 0 && !sample_buffer_.empty()) { while (samples_still_to_drop_ > 0 && !sample_buffer_.empty()) {
sample_buffer_.pop_front(); sample_buffer_.pop_front();
--samples_still_to_drop_; --samples_still_to_drop_;
@ -234,10 +234,10 @@ void TimeSeriesFramerCalculator::FrameOutput(CalculatorContext* cc) {
CurrentOutputTimestamp()); CurrentOutputTimestamp());
} }
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status TimeSeriesFramerCalculator::Open(CalculatorContext* cc) { mediapipe::Status TimeSeriesFramerCalculator::Open(CalculatorContext* cc) {
TimeSeriesFramerCalculatorOptions framer_options = TimeSeriesFramerCalculatorOptions framer_options =
cc->Options<TimeSeriesFramerCalculatorOptions>(); cc->Options<TimeSeriesFramerCalculatorOptions>();
@ -317,7 +317,7 @@ void TimeSeriesFramerCalculator::FrameOutput(CalculatorContext* cc) {
} }
use_local_timestamp_ = framer_options.use_local_timestamp(); use_local_timestamp_ = framer_options.use_local_timestamp();
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
} // namespace mediapipe } // namespace mediapipe

View File

@ -69,7 +69,7 @@ class TimeSeriesFramerCalculatorTest
} }
// Initializes and runs the test graph. // Initializes and runs the test graph.
::mediapipe::Status Run() { mediapipe::Status Run() {
InitializeGraph(); InitializeGraph();
FillInputHeader(); FillInputHeader();
@ -441,7 +441,7 @@ class TimeSeriesFramerCalculatorTimestampingTest
} }
} }
::mediapipe::Status RunTimestampTest() { mediapipe::Status RunTimestampTest() {
InitializeGraph(); InitializeGraph();
InitializeInputForTimeStampingTest(); InitializeInputForTimeStampingTest();
FillInputHeader(); FillInputHeader();

View File

@ -130,6 +130,16 @@ mediapipe_proto_library(
], ],
) )
mediapipe_proto_library(
name = "flow_limiter_calculator_proto",
srcs = ["flow_limiter_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
],
)
cc_library( cc_library(
name = "add_header_calculator", name = "add_header_calculator",
srcs = ["add_header_calculator.cc"], srcs = ["add_header_calculator.cc"],
@ -238,13 +248,14 @@ cc_library(
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
deps = [ deps = [
":concatenate_vector_calculator_cc_proto", ":concatenate_vector_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework/formats:classification_cc_proto", "//mediapipe/framework/formats:classification_cc_proto",
"//mediapipe/framework/formats:landmark_cc_proto", "//mediapipe/framework/formats:landmark_cc_proto",
"//mediapipe/framework/formats:tensor", "//mediapipe/framework/formats:tensor",
"//mediapipe/framework/port:integral_types", "//mediapipe/framework/port:integral_types",
"//mediapipe/framework/port:ret_check", "//mediapipe/framework/port:ret_check",
"//mediapipe/framework/port:status", "//mediapipe/framework/port:status",
"//mediapipe/framework:calculator_framework",
"//mediapipe/util:render_data_cc_proto",
"@org_tensorflow//tensorflow/lite:framework", "@org_tensorflow//tensorflow/lite:framework",
] + select({ ] + select({
"//mediapipe/gpu:disable_gpu": [], "//mediapipe/gpu:disable_gpu": [],
@ -607,6 +618,7 @@ cc_library(
srcs = ["flow_limiter_calculator.cc"], srcs = ["flow_limiter_calculator.cc"],
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
deps = [ deps = [
":flow_limiter_calculator_cc_proto",
"//mediapipe/framework:calculator_framework", "//mediapipe/framework:calculator_framework",
"//mediapipe/framework:packet", "//mediapipe/framework:packet",
"//mediapipe/framework:timestamp", "//mediapipe/framework:timestamp",
@ -782,6 +794,7 @@ cc_test(
srcs = ["flow_limiter_calculator_test.cc"], srcs = ["flow_limiter_calculator_test.cc"],
deps = [ deps = [
":flow_limiter_calculator", ":flow_limiter_calculator",
":flow_limiter_calculator_cc_proto",
"//mediapipe/calculators/core:counting_source_calculator", "//mediapipe/calculators/core:counting_source_calculator",
"//mediapipe/calculators/core:pass_through_calculator", "//mediapipe/calculators/core:pass_through_calculator",
"//mediapipe/framework:calculator_framework", "//mediapipe/framework:calculator_framework",
@ -793,6 +806,8 @@ cc_test(
"//mediapipe/framework/port:integral_types", "//mediapipe/framework/port:integral_types",
"//mediapipe/framework/port:parse_text_proto", "//mediapipe/framework/port:parse_text_proto",
"//mediapipe/framework/stream_handler:immediate_input_stream_handler", "//mediapipe/framework/stream_handler:immediate_input_stream_handler",
"//mediapipe/framework/tool:simulation_clock",
"//mediapipe/framework/tool:simulation_clock_executor",
"//mediapipe/framework/tool:sink", "//mediapipe/framework/tool:sink",
"@com_google_absl//absl/time", "@com_google_absl//absl/time",
], ],

View File

@ -44,7 +44,7 @@ namespace mediapipe {
// //
class AddHeaderCalculator : public CalculatorBase { class AddHeaderCalculator : public CalculatorBase {
public: public:
static ::mediapipe::Status GetContract(CalculatorContract* cc) { static mediapipe::Status GetContract(CalculatorContract* cc) {
bool has_side_input = false; bool has_side_input = false;
bool has_header_stream = false; bool has_header_stream = false;
if (cc->InputSidePackets().HasTag("HEADER")) { if (cc->InputSidePackets().HasTag("HEADER")) {
@ -62,10 +62,10 @@ class AddHeaderCalculator : public CalculatorBase {
} }
cc->Inputs().Tag("DATA").SetAny(); cc->Inputs().Tag("DATA").SetAny();
cc->Outputs().Index(0).SetSameAs(&cc->Inputs().Tag("DATA")); cc->Outputs().Index(0).SetSameAs(&cc->Inputs().Tag("DATA"));
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Open(CalculatorContext* cc) override { mediapipe::Status Open(CalculatorContext* cc) override {
Packet header; Packet header;
if (cc->InputSidePackets().HasTag("HEADER")) { if (cc->InputSidePackets().HasTag("HEADER")) {
header = cc->InputSidePackets().Tag("HEADER"); header = cc->InputSidePackets().Tag("HEADER");
@ -77,12 +77,12 @@ class AddHeaderCalculator : public CalculatorBase {
cc->Outputs().Index(0).SetHeader(header); cc->Outputs().Index(0).SetHeader(header);
} }
cc->SetOffset(TimestampDiff(0)); cc->SetOffset(TimestampDiff(0));
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Process(CalculatorContext* cc) override { mediapipe::Status Process(CalculatorContext* cc) override {
cc->Outputs().Index(0).AddPacket(cc->Inputs().Tag("DATA").Value()); cc->Outputs().Index(0).AddPacket(cc->Inputs().Tag("DATA").Value());
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
}; };

View File

@ -153,7 +153,7 @@ TEST_F(AddHeaderCalculatorTest, UsingBothSideInputAndStream) {
} }
// Run should fail because header can only be provided one way. // Run should fail because header can only be provided one way.
EXPECT_EQ(runner.Run().code(), ::mediapipe::InvalidArgumentError("").code()); EXPECT_EQ(runner.Run().code(), mediapipe::InvalidArgumentError("").code());
} }
} // namespace mediapipe } // namespace mediapipe

View File

@ -42,22 +42,22 @@ REGISTER_CALCULATOR(BeginLoopIntegerCalculator);
class IncrementCalculator : public CalculatorBase { class IncrementCalculator : public CalculatorBase {
public: public:
static ::mediapipe::Status GetContract(CalculatorContract* cc) { static mediapipe::Status GetContract(CalculatorContract* cc) {
cc->Inputs().Index(0).Set<int>(); cc->Inputs().Index(0).Set<int>();
cc->Outputs().Index(0).Set<int>(); cc->Outputs().Index(0).Set<int>();
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Open(CalculatorContext* cc) override { mediapipe::Status Open(CalculatorContext* cc) override {
cc->SetOffset(TimestampDiff(0)); cc->SetOffset(TimestampDiff(0));
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Process(CalculatorContext* cc) override { mediapipe::Status Process(CalculatorContext* cc) override {
const int& input_int = cc->Inputs().Index(0).Get<int>(); const int& input_int = cc->Inputs().Index(0).Get<int>();
auto output_int = absl::make_unique<int>(input_int + 1); auto output_int = absl::make_unique<int>(input_int + 1);
cc->Outputs().Index(0).Add(output_int.release(), cc->InputTimestamp()); cc->Outputs().Index(0).Add(output_int.release(), cc->InputTimestamp());
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
}; };
@ -166,19 +166,19 @@ TEST_F(BeginEndLoopCalculatorGraphTest, MultipleVectors) {
// bound update. // bound update.
class PassThroughOrEmptyVectorCalculator : public CalculatorBase { class PassThroughOrEmptyVectorCalculator : public CalculatorBase {
public: public:
static ::mediapipe::Status GetContract(CalculatorContract* cc) { static mediapipe::Status GetContract(CalculatorContract* cc) {
cc->SetProcessTimestampBounds(true); cc->SetProcessTimestampBounds(true);
cc->Inputs().Index(0).Set<std::vector<int>>(); cc->Inputs().Index(0).Set<std::vector<int>>();
cc->Outputs().Index(0).Set<std::vector<int>>(); cc->Outputs().Index(0).Set<std::vector<int>>();
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Open(CalculatorContext* cc) override { mediapipe::Status Open(CalculatorContext* cc) override {
cc->SetOffset(TimestampDiff(0)); cc->SetOffset(TimestampDiff(0));
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Process(CalculatorContext* cc) override { mediapipe::Status Process(CalculatorContext* cc) override {
if (!cc->Inputs().Index(0).IsEmpty()) { if (!cc->Inputs().Index(0).IsEmpty()) {
cc->Outputs().Index(0).AddPacket(cc->Inputs().Index(0).Value()); cc->Outputs().Index(0).AddPacket(cc->Inputs().Index(0).Value());
} else { } else {
@ -186,7 +186,7 @@ class PassThroughOrEmptyVectorCalculator : public CalculatorBase {
MakePacket<std::vector<int>>(std::vector<int>()) MakePacket<std::vector<int>>(std::vector<int>())
.At(cc->InputTimestamp())); .At(cc->InputTimestamp()));
} }
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
}; };
@ -311,24 +311,24 @@ TEST_F(BeginEndLoopCalculatorGraphProcessingEmptyPacketsTest, MultipleVectors) {
class MultiplierCalculator : public CalculatorBase { class MultiplierCalculator : public CalculatorBase {
public: public:
static ::mediapipe::Status GetContract(CalculatorContract* cc) { static mediapipe::Status GetContract(CalculatorContract* cc) {
cc->Inputs().Index(0).Set<int>(); cc->Inputs().Index(0).Set<int>();
cc->Inputs().Index(1).Set<int>(); cc->Inputs().Index(1).Set<int>();
cc->Outputs().Index(0).Set<int>(); cc->Outputs().Index(0).Set<int>();
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Open(CalculatorContext* cc) override { mediapipe::Status Open(CalculatorContext* cc) override {
cc->SetOffset(TimestampDiff(0)); cc->SetOffset(TimestampDiff(0));
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Process(CalculatorContext* cc) override { mediapipe::Status Process(CalculatorContext* cc) override {
const int& input_int = cc->Inputs().Index(0).Get<int>(); const int& input_int = cc->Inputs().Index(0).Get<int>();
const int& multiplier_int = cc->Inputs().Index(1).Get<int>(); const int& multiplier_int = cc->Inputs().Index(1).Get<int>();
auto output_int = absl::make_unique<int>(input_int * multiplier_int); auto output_int = absl::make_unique<int>(input_int * multiplier_int);
cc->Outputs().Index(0).Add(output_int.release(), cc->InputTimestamp()); cc->Outputs().Index(0).Add(output_int.release(), cc->InputTimestamp());
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
}; };

View File

@ -61,7 +61,7 @@ class BeginLoopCalculator : public CalculatorBase {
using ItemT = typename IterableT::value_type; using ItemT = typename IterableT::value_type;
public: public:
static ::mediapipe::Status GetContract(CalculatorContract* cc) { static mediapipe::Status GetContract(CalculatorContract* cc) {
// The below enables processing of timestamp bound updates, and that enables // The below enables processing of timestamp bound updates, and that enables
// correct timestamp propagation by the companion EndLoopCalculator. // correct timestamp propagation by the companion EndLoopCalculator.
// //
@ -106,10 +106,10 @@ class BeginLoopCalculator : public CalculatorBase {
} }
} }
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Process(CalculatorContext* cc) final { mediapipe::Status Process(CalculatorContext* cc) final {
Timestamp last_timestamp = loop_internal_timestamp_; Timestamp last_timestamp = loop_internal_timestamp_;
if (!cc->Inputs().Tag("ITERABLE").IsEmpty()) { if (!cc->Inputs().Tag("ITERABLE").IsEmpty()) {
const IterableT& collection = const IterableT& collection =
@ -139,7 +139,7 @@ class BeginLoopCalculator : public CalculatorBase {
.AddPacket(MakePacket<Timestamp>(cc->InputTimestamp()) .AddPacket(MakePacket<Timestamp>(cc->InputTimestamp())
.At(Timestamp(loop_internal_timestamp_ - 1))); .At(Timestamp(loop_internal_timestamp_ - 1)));
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
private: private:

View File

@ -33,7 +33,7 @@ namespace mediapipe {
// input_stream: "input_vector" // input_stream: "input_vector"
// output_stream: "output_vector" // output_stream: "output_vector"
// options { // options {
// [mediapipe.ClipIntVectorSizeCalculatorOptions.ext] { // [mediapipe.ClipVectorSizeCalculatorOptions.ext] {
// max_vec_size: 5 // max_vec_size: 5
// } // }
// } // }
@ -43,13 +43,13 @@ namespace mediapipe {
template <typename T> template <typename T>
class ClipVectorSizeCalculator : public CalculatorBase { class ClipVectorSizeCalculator : public CalculatorBase {
public: public:
static ::mediapipe::Status GetContract(CalculatorContract* cc) { static mediapipe::Status GetContract(CalculatorContract* cc) {
RET_CHECK(cc->Inputs().NumEntries() == 1); RET_CHECK(cc->Inputs().NumEntries() == 1);
RET_CHECK(cc->Outputs().NumEntries() == 1); RET_CHECK(cc->Outputs().NumEntries() == 1);
if (cc->Options<::mediapipe::ClipVectorSizeCalculatorOptions>() if (cc->Options<::mediapipe::ClipVectorSizeCalculatorOptions>()
.max_vec_size() < 1) { .max_vec_size() < 1) {
return ::mediapipe::InternalError( return mediapipe::InternalError(
"max_vec_size should be greater than or equal to 1."); "max_vec_size should be greater than or equal to 1.");
} }
@ -60,10 +60,10 @@ class ClipVectorSizeCalculator : public CalculatorBase {
cc->InputSidePackets().Index(0).Set<int>(); cc->InputSidePackets().Index(0).Set<int>();
} }
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Open(CalculatorContext* cc) override { mediapipe::Status Open(CalculatorContext* cc) override {
cc->SetOffset(TimestampDiff(0)); cc->SetOffset(TimestampDiff(0));
max_vec_size_ = cc->Options<::mediapipe::ClipVectorSizeCalculatorOptions>() max_vec_size_ = cc->Options<::mediapipe::ClipVectorSizeCalculatorOptions>()
.max_vec_size(); .max_vec_size();
@ -72,23 +72,23 @@ class ClipVectorSizeCalculator : public CalculatorBase {
!cc->InputSidePackets().Index(0).IsEmpty()) { !cc->InputSidePackets().Index(0).IsEmpty()) {
max_vec_size_ = cc->InputSidePackets().Index(0).Get<int>(); max_vec_size_ = cc->InputSidePackets().Index(0).Get<int>();
} }
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Process(CalculatorContext* cc) override { mediapipe::Status Process(CalculatorContext* cc) override {
if (max_vec_size_ < 1) { if (max_vec_size_ < 1) {
return ::mediapipe::InternalError( return mediapipe::InternalError(
"max_vec_size should be greater than or equal to 1."); "max_vec_size should be greater than or equal to 1.");
} }
if (cc->Inputs().Index(0).IsEmpty()) { if (cc->Inputs().Index(0).IsEmpty()) {
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
return ClipVectorSize<T>(std::is_copy_constructible<T>(), cc); return ClipVectorSize<T>(std::is_copy_constructible<T>(), cc);
} }
template <typename U> template <typename U>
::mediapipe::Status ClipVectorSize(std::true_type, CalculatorContext* cc) { mediapipe::Status ClipVectorSize(std::true_type, CalculatorContext* cc) {
auto output = absl::make_unique<std::vector<U>>(); auto output = absl::make_unique<std::vector<U>>();
const std::vector<U>& input_vector = const std::vector<U>& input_vector =
cc->Inputs().Index(0).Get<std::vector<U>>(); cc->Inputs().Index(0).Get<std::vector<U>>();
@ -100,19 +100,19 @@ class ClipVectorSizeCalculator : public CalculatorBase {
} }
} }
cc->Outputs().Index(0).Add(output.release(), cc->InputTimestamp()); cc->Outputs().Index(0).Add(output.release(), cc->InputTimestamp());
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
template <typename U> template <typename U>
::mediapipe::Status ClipVectorSize(std::false_type, CalculatorContext* cc) { mediapipe::Status ClipVectorSize(std::false_type, CalculatorContext* cc) {
return ConsumeAndClipVectorSize<T>(std::is_move_constructible<U>(), cc); return ConsumeAndClipVectorSize<T>(std::is_move_constructible<U>(), cc);
} }
template <typename U> template <typename U>
::mediapipe::Status ConsumeAndClipVectorSize(std::true_type, mediapipe::Status ConsumeAndClipVectorSize(std::true_type,
CalculatorContext* cc) { CalculatorContext* cc) {
auto output = absl::make_unique<std::vector<U>>(); auto output = absl::make_unique<std::vector<U>>();
::mediapipe::StatusOr<std::unique_ptr<std::vector<U>>> input_status = mediapipe::StatusOr<std::unique_ptr<std::vector<U>>> input_status =
cc->Inputs().Index(0).Value().Consume<std::vector<U>>(); cc->Inputs().Index(0).Value().Consume<std::vector<U>>();
if (input_status.ok()) { if (input_status.ok()) {
@ -129,13 +129,13 @@ class ClipVectorSizeCalculator : public CalculatorBase {
return input_status.status(); return input_status.status();
} }
cc->Outputs().Index(0).Add(output.release(), cc->InputTimestamp()); cc->Outputs().Index(0).Add(output.release(), cc->InputTimestamp());
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
template <typename U> template <typename U>
::mediapipe::Status ConsumeAndClipVectorSize(std::false_type, mediapipe::Status ConsumeAndClipVectorSize(std::false_type,
CalculatorContext* cc) { CalculatorContext* cc) {
return ::mediapipe::InternalError( return mediapipe::InternalError(
"Cannot copy or move input vectors and clip their size."); "Cannot copy or move input vectors and clip their size.");
} }

View File

@ -29,7 +29,7 @@ namespace mediapipe {
// NormalizedLandmarkList proto object. // NormalizedLandmarkList proto object.
class ConcatenateNormalizedLandmarkListCalculator : public CalculatorBase { class ConcatenateNormalizedLandmarkListCalculator : public CalculatorBase {
public: public:
static ::mediapipe::Status GetContract(CalculatorContract* cc) { static mediapipe::Status GetContract(CalculatorContract* cc) {
RET_CHECK(cc->Inputs().NumEntries() != 0); RET_CHECK(cc->Inputs().NumEntries() != 0);
RET_CHECK(cc->Outputs().NumEntries() == 1); RET_CHECK(cc->Outputs().NumEntries() == 1);
@ -39,21 +39,21 @@ class ConcatenateNormalizedLandmarkListCalculator : public CalculatorBase {
cc->Outputs().Index(0).Set<NormalizedLandmarkList>(); cc->Outputs().Index(0).Set<NormalizedLandmarkList>();
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Open(CalculatorContext* cc) override { mediapipe::Status Open(CalculatorContext* cc) override {
cc->SetOffset(TimestampDiff(0)); cc->SetOffset(TimestampDiff(0));
only_emit_if_all_present_ = only_emit_if_all_present_ =
cc->Options<::mediapipe::ConcatenateVectorCalculatorOptions>() cc->Options<::mediapipe::ConcatenateVectorCalculatorOptions>()
.only_emit_if_all_present(); .only_emit_if_all_present();
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Process(CalculatorContext* cc) override { mediapipe::Status Process(CalculatorContext* cc) override {
if (only_emit_if_all_present_) { if (only_emit_if_all_present_) {
for (int i = 0; i < cc->Inputs().NumEntries(); ++i) { for (int i = 0; i < cc->Inputs().NumEntries(); ++i) {
if (cc->Inputs().Index(i).IsEmpty()) return ::mediapipe::OkStatus(); if (cc->Inputs().Index(i).IsEmpty()) return mediapipe::OkStatus();
} }
} }
@ -69,7 +69,7 @@ class ConcatenateNormalizedLandmarkListCalculator : public CalculatorBase {
} }
cc->Outputs().Index(0).AddPacket( cc->Outputs().Index(0).AddPacket(
MakePacket<NormalizedLandmarkList>(output).At(cc->InputTimestamp())); MakePacket<NormalizedLandmarkList>(output).At(cc->InputTimestamp()));
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
private: private:

View File

@ -20,6 +20,7 @@
#include "mediapipe/framework/formats/landmark.pb.h" #include "mediapipe/framework/formats/landmark.pb.h"
#include "mediapipe/framework/formats/tensor.h" #include "mediapipe/framework/formats/tensor.h"
#include "mediapipe/framework/port/integral_types.h" #include "mediapipe/framework/port/integral_types.h"
#include "mediapipe/util/render_data.pb.h"
#include "tensorflow/lite/interpreter.h" #include "tensorflow/lite/interpreter.h"
#if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) #if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE)
@ -86,4 +87,8 @@ typedef ConcatenateVectorCalculator<::tflite::gpu::gl::GlBuffer>
REGISTER_CALCULATOR(ConcatenateGlBufferVectorCalculator); REGISTER_CALCULATOR(ConcatenateGlBufferVectorCalculator);
#endif #endif
typedef ConcatenateVectorCalculator<mediapipe::RenderData>
ConcatenateRenderDataVectorCalculator;
REGISTER_CALCULATOR(ConcatenateRenderDataVectorCalculator);
} // namespace mediapipe } // namespace mediapipe

View File

@ -34,7 +34,7 @@ namespace mediapipe {
template <typename T> template <typename T>
class ConcatenateVectorCalculator : public CalculatorBase { class ConcatenateVectorCalculator : public CalculatorBase {
public: public:
static ::mediapipe::Status GetContract(CalculatorContract* cc) { static mediapipe::Status GetContract(CalculatorContract* cc) {
RET_CHECK(cc->Inputs().NumEntries() != 0); RET_CHECK(cc->Inputs().NumEntries() != 0);
RET_CHECK(cc->Outputs().NumEntries() == 1); RET_CHECK(cc->Outputs().NumEntries() == 1);
@ -45,21 +45,21 @@ class ConcatenateVectorCalculator : public CalculatorBase {
cc->Outputs().Index(0).Set<std::vector<T>>(); cc->Outputs().Index(0).Set<std::vector<T>>();
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Open(CalculatorContext* cc) override { mediapipe::Status Open(CalculatorContext* cc) override {
cc->SetOffset(TimestampDiff(0)); cc->SetOffset(TimestampDiff(0));
only_emit_if_all_present_ = only_emit_if_all_present_ =
cc->Options<::mediapipe::ConcatenateVectorCalculatorOptions>() cc->Options<::mediapipe::ConcatenateVectorCalculatorOptions>()
.only_emit_if_all_present(); .only_emit_if_all_present();
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Process(CalculatorContext* cc) override { mediapipe::Status Process(CalculatorContext* cc) override {
if (only_emit_if_all_present_) { if (only_emit_if_all_present_) {
for (int i = 0; i < cc->Inputs().NumEntries(); ++i) { for (int i = 0; i < cc->Inputs().NumEntries(); ++i) {
if (cc->Inputs().Index(i).IsEmpty()) return ::mediapipe::OkStatus(); if (cc->Inputs().Index(i).IsEmpty()) return mediapipe::OkStatus();
} }
} }
@ -67,8 +67,7 @@ class ConcatenateVectorCalculator : public CalculatorBase {
} }
template <typename U> template <typename U>
::mediapipe::Status ConcatenateVectors(std::true_type, mediapipe::Status ConcatenateVectors(std::true_type, CalculatorContext* cc) {
CalculatorContext* cc) {
auto output = absl::make_unique<std::vector<U>>(); auto output = absl::make_unique<std::vector<U>>();
for (int i = 0; i < cc->Inputs().NumEntries(); ++i) { for (int i = 0; i < cc->Inputs().NumEntries(); ++i) {
auto& input = cc->Inputs().Index(i); auto& input = cc->Inputs().Index(i);
@ -82,21 +81,20 @@ class ConcatenateVectorCalculator : public CalculatorBase {
const std::vector<U>& value = input.Get<std::vector<U>>(); const std::vector<U>& value = input.Get<std::vector<U>>();
output->insert(output->end(), value.begin(), value.end()); output->insert(output->end(), value.begin(), value.end());
} else { } else {
return ::mediapipe::InvalidArgumentError("Invalid input stream type."); return mediapipe::InvalidArgumentError("Invalid input stream type.");
} }
} }
cc->Outputs().Index(0).Add(output.release(), cc->InputTimestamp()); cc->Outputs().Index(0).Add(output.release(), cc->InputTimestamp());
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
template <typename U> template <typename U>
::mediapipe::Status ConcatenateVectors(std::false_type, mediapipe::Status ConcatenateVectors(std::false_type, CalculatorContext* cc) {
CalculatorContext* cc) {
return ConsumeAndConcatenateVectors<T>(std::is_move_constructible<U>(), cc); return ConsumeAndConcatenateVectors<T>(std::is_move_constructible<U>(), cc);
} }
template <typename U> template <typename U>
::mediapipe::Status ConsumeAndConcatenateVectors(std::true_type, mediapipe::Status ConsumeAndConcatenateVectors(std::true_type,
CalculatorContext* cc) { CalculatorContext* cc) {
auto output = absl::make_unique<std::vector<U>>(); auto output = absl::make_unique<std::vector<U>>();
for (int i = 0; i < cc->Inputs().NumEntries(); ++i) { for (int i = 0; i < cc->Inputs().NumEntries(); ++i) {
@ -105,7 +103,7 @@ class ConcatenateVectorCalculator : public CalculatorBase {
if (input.IsEmpty()) continue; if (input.IsEmpty()) continue;
if (input.Value().ValidateAsType<U>().ok()) { if (input.Value().ValidateAsType<U>().ok()) {
::mediapipe::StatusOr<std::unique_ptr<U>> value_status = mediapipe::StatusOr<std::unique_ptr<U>> value_status =
input.Value().Consume<U>(); input.Value().Consume<U>();
if (value_status.ok()) { if (value_status.ok()) {
std::unique_ptr<U> value = std::move(value_status).ValueOrDie(); std::unique_ptr<U> value = std::move(value_status).ValueOrDie();
@ -114,7 +112,7 @@ class ConcatenateVectorCalculator : public CalculatorBase {
return value_status.status(); return value_status.status();
} }
} else if (input.Value().ValidateAsType<std::vector<U>>().ok()) { } else if (input.Value().ValidateAsType<std::vector<U>>().ok()) {
::mediapipe::StatusOr<std::unique_ptr<std::vector<U>>> value_status = mediapipe::StatusOr<std::unique_ptr<std::vector<U>>> value_status =
input.Value().Consume<std::vector<U>>(); input.Value().Consume<std::vector<U>>();
if (value_status.ok()) { if (value_status.ok()) {
std::unique_ptr<std::vector<U>> value = std::unique_ptr<std::vector<U>> value =
@ -125,17 +123,17 @@ class ConcatenateVectorCalculator : public CalculatorBase {
return value_status.status(); return value_status.status();
} }
} else { } else {
return ::mediapipe::InvalidArgumentError("Invalid input stream type."); return mediapipe::InvalidArgumentError("Invalid input stream type.");
} }
} }
cc->Outputs().Index(0).Add(output.release(), cc->InputTimestamp()); cc->Outputs().Index(0).Add(output.release(), cc->InputTimestamp());
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
template <typename U> template <typename U>
::mediapipe::Status ConsumeAndConcatenateVectors(std::false_type, mediapipe::Status ConsumeAndConcatenateVectors(std::false_type,
CalculatorContext* cc) { CalculatorContext* cc) {
return ::mediapipe::InternalError( return mediapipe::InternalError(
"Cannot copy or move inputs to concatenate them"); "Cannot copy or move inputs to concatenate them");
} }

View File

@ -54,7 +54,7 @@ namespace {} // namespace
// } // }
class ConstantSidePacketCalculator : public CalculatorBase { class ConstantSidePacketCalculator : public CalculatorBase {
public: public:
static ::mediapipe::Status GetContract(CalculatorContract* cc) { static mediapipe::Status GetContract(CalculatorContract* cc) {
const auto& options = const auto& options =
cc->Options<::mediapipe::ConstantSidePacketCalculatorOptions>(); cc->Options<::mediapipe::ConstantSidePacketCalculatorOptions>();
RET_CHECK_EQ(cc->OutputSidePackets().NumEntries(kPacketTag), RET_CHECK_EQ(cc->OutputSidePackets().NumEntries(kPacketTag),
@ -80,14 +80,14 @@ class ConstantSidePacketCalculator : public CalculatorBase {
} else if (packet_options.has_classification_list_value()) { } else if (packet_options.has_classification_list_value()) {
packet.Set<ClassificationList>(); packet.Set<ClassificationList>();
} else { } else {
return ::mediapipe::InvalidArgumentError( return mediapipe::InvalidArgumentError(
"None of supported values were specified in options."); "None of supported values were specified in options.");
} }
} }
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Open(CalculatorContext* cc) override { mediapipe::Status Open(CalculatorContext* cc) override {
const auto& options = const auto& options =
cc->Options<::mediapipe::ConstantSidePacketCalculatorOptions>(); cc->Options<::mediapipe::ConstantSidePacketCalculatorOptions>();
int index = 0; int index = 0;
@ -109,15 +109,15 @@ class ConstantSidePacketCalculator : public CalculatorBase {
packet.Set(MakePacket<ClassificationList>( packet.Set(MakePacket<ClassificationList>(
packet_options.classification_list_value())); packet_options.classification_list_value()));
} else { } else {
return ::mediapipe::InvalidArgumentError( return mediapipe::InvalidArgumentError(
"None of supported values were specified in options."); "None of supported values were specified in options.");
} }
} }
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Process(CalculatorContext* cc) override { mediapipe::Status Process(CalculatorContext* cc) override {
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
private: private:

View File

@ -40,7 +40,7 @@ void DoTestSingleSidePacket(absl::string_view packet_spec,
} }
)"; )";
CalculatorGraphConfig graph_config = CalculatorGraphConfig graph_config =
::mediapipe::ParseTextProtoOrDie<CalculatorGraphConfig>( mediapipe::ParseTextProtoOrDie<CalculatorGraphConfig>(
absl::Substitute(graph_config_template, packet_spec)); absl::Substitute(graph_config_template, packet_spec));
CalculatorGraph graph; CalculatorGraph graph;
MP_ASSERT_OK(graph.Initialize(graph_config)); MP_ASSERT_OK(graph.Initialize(graph_config));
@ -62,7 +62,7 @@ TEST(ConstantSidePacketCalculatorTest, EveryPossibleType) {
TEST(ConstantSidePacketCalculatorTest, MultiplePackets) { TEST(ConstantSidePacketCalculatorTest, MultiplePackets) {
CalculatorGraphConfig graph_config = CalculatorGraphConfig graph_config =
::mediapipe::ParseTextProtoOrDie<CalculatorGraphConfig>(R"( mediapipe::ParseTextProtoOrDie<CalculatorGraphConfig>(R"(
node { node {
calculator: "ConstantSidePacketCalculator" calculator: "ConstantSidePacketCalculator"
output_side_packet: "PACKET:0:int_packet" output_side_packet: "PACKET:0:int_packet"
@ -115,7 +115,7 @@ TEST(ConstantSidePacketCalculatorTest, MultiplePackets) {
TEST(ConstantSidePacketCalculatorTest, ProcessingPacketsWithCorrectTagOnly) { TEST(ConstantSidePacketCalculatorTest, ProcessingPacketsWithCorrectTagOnly) {
CalculatorGraphConfig graph_config = CalculatorGraphConfig graph_config =
::mediapipe::ParseTextProtoOrDie<CalculatorGraphConfig>(R"( mediapipe::ParseTextProtoOrDie<CalculatorGraphConfig>(R"(
node { node {
calculator: "ConstantSidePacketCalculator" calculator: "ConstantSidePacketCalculator"
output_side_packet: "PACKET:0:int_packet" output_side_packet: "PACKET:0:int_packet"
@ -159,7 +159,7 @@ TEST(ConstantSidePacketCalculatorTest, ProcessingPacketsWithCorrectTagOnly) {
TEST(ConstantSidePacketCalculatorTest, IncorrectConfig_MoreOptionsThanPackets) { TEST(ConstantSidePacketCalculatorTest, IncorrectConfig_MoreOptionsThanPackets) {
CalculatorGraphConfig graph_config = CalculatorGraphConfig graph_config =
::mediapipe::ParseTextProtoOrDie<CalculatorGraphConfig>(R"( mediapipe::ParseTextProtoOrDie<CalculatorGraphConfig>(R"(
node { node {
calculator: "ConstantSidePacketCalculator" calculator: "ConstantSidePacketCalculator"
output_side_packet: "PACKET:int_packet" output_side_packet: "PACKET:int_packet"
@ -177,7 +177,7 @@ TEST(ConstantSidePacketCalculatorTest, IncorrectConfig_MoreOptionsThanPackets) {
TEST(ConstantSidePacketCalculatorTest, IncorrectConfig_MorePacketsThanOptions) { TEST(ConstantSidePacketCalculatorTest, IncorrectConfig_MorePacketsThanOptions) {
CalculatorGraphConfig graph_config = CalculatorGraphConfig graph_config =
::mediapipe::ParseTextProtoOrDie<CalculatorGraphConfig>(R"( mediapipe::ParseTextProtoOrDie<CalculatorGraphConfig>(R"(
node { node {
calculator: "ConstantSidePacketCalculator" calculator: "ConstantSidePacketCalculator"
output_side_packet: "PACKET:0:int_packet" output_side_packet: "PACKET:0:int_packet"

View File

@ -30,7 +30,7 @@ namespace mediapipe {
// provided, then batches are of size 1. // provided, then batches are of size 1.
class CountingSourceCalculator : public CalculatorBase { class CountingSourceCalculator : public CalculatorBase {
public: public:
static ::mediapipe::Status GetContract(CalculatorContract* cc) { static mediapipe::Status GetContract(CalculatorContract* cc) {
cc->Outputs().Index(0).Set<int>(); cc->Outputs().Index(0).Set<int>();
if (cc->InputSidePackets().HasTag("ERROR_ON_OPEN")) { if (cc->InputSidePackets().HasTag("ERROR_ON_OPEN")) {
@ -55,13 +55,13 @@ class CountingSourceCalculator : public CalculatorBase {
if (cc->InputSidePackets().HasTag("INCREMENT")) { if (cc->InputSidePackets().HasTag("INCREMENT")) {
cc->InputSidePackets().Tag("INCREMENT").Set<int>(); cc->InputSidePackets().Tag("INCREMENT").Set<int>();
} }
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Open(CalculatorContext* cc) override { mediapipe::Status Open(CalculatorContext* cc) override {
if (cc->InputSidePackets().HasTag("ERROR_ON_OPEN") && if (cc->InputSidePackets().HasTag("ERROR_ON_OPEN") &&
cc->InputSidePackets().Tag("ERROR_ON_OPEN").Get<bool>()) { cc->InputSidePackets().Tag("ERROR_ON_OPEN").Get<bool>()) {
return ::mediapipe::NotFoundError("expected error"); return mediapipe::NotFoundError("expected error");
} }
if (cc->InputSidePackets().HasTag("ERROR_COUNT")) { if (cc->InputSidePackets().HasTag("ERROR_COUNT")) {
error_count_ = cc->InputSidePackets().Tag("ERROR_COUNT").Get<int>(); error_count_ = cc->InputSidePackets().Tag("ERROR_COUNT").Get<int>();
@ -83,12 +83,12 @@ class CountingSourceCalculator : public CalculatorBase {
RET_CHECK_LT(0, increment_); RET_CHECK_LT(0, increment_);
} }
RET_CHECK(error_count_ >= 0 || max_count_ >= 0); RET_CHECK(error_count_ >= 0 || max_count_ >= 0);
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Process(CalculatorContext* cc) override { mediapipe::Status Process(CalculatorContext* cc) override {
if (error_count_ >= 0 && batch_counter_ >= error_count_) { if (error_count_ >= 0 && batch_counter_ >= error_count_) {
return ::mediapipe::InternalError("expected error"); return mediapipe::InternalError("expected error");
} }
if (max_count_ >= 0 && batch_counter_ >= max_count_) { if (max_count_ >= 0 && batch_counter_ >= max_count_) {
return tool::StatusStop(); return tool::StatusStop();
@ -98,7 +98,7 @@ class CountingSourceCalculator : public CalculatorBase {
counter_ += increment_; counter_ += increment_;
} }
++batch_counter_; ++batch_counter_;
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
private: private:

View File

@ -37,34 +37,34 @@ namespace mediapipe {
class DequantizeByteArrayCalculator : public CalculatorBase { class DequantizeByteArrayCalculator : public CalculatorBase {
public: public:
static ::mediapipe::Status GetContract(CalculatorContract* cc) { static mediapipe::Status GetContract(CalculatorContract* cc) {
cc->Inputs().Tag("ENCODED").Set<std::string>(); cc->Inputs().Tag("ENCODED").Set<std::string>();
cc->Outputs().Tag("FLOAT_VECTOR").Set<std::vector<float>>(); cc->Outputs().Tag("FLOAT_VECTOR").Set<std::vector<float>>();
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Open(CalculatorContext* cc) final { mediapipe::Status Open(CalculatorContext* cc) final {
const auto options = const auto options =
cc->Options<::mediapipe::DequantizeByteArrayCalculatorOptions>(); cc->Options<::mediapipe::DequantizeByteArrayCalculatorOptions>();
if (!options.has_max_quantized_value() || if (!options.has_max_quantized_value() ||
!options.has_min_quantized_value()) { !options.has_min_quantized_value()) {
return ::mediapipe::InvalidArgumentError( return mediapipe::InvalidArgumentError(
"Both max_quantized_value and min_quantized_value must be provided " "Both max_quantized_value and min_quantized_value must be provided "
"in DequantizeByteArrayCalculatorOptions."); "in DequantizeByteArrayCalculatorOptions.");
} }
float max_quantized_value = options.max_quantized_value(); float max_quantized_value = options.max_quantized_value();
float min_quantized_value = options.min_quantized_value(); float min_quantized_value = options.min_quantized_value();
if (max_quantized_value < min_quantized_value + FLT_EPSILON) { if (max_quantized_value < min_quantized_value + FLT_EPSILON) {
return ::mediapipe::InvalidArgumentError( return mediapipe::InvalidArgumentError(
"max_quantized_value must be greater than min_quantized_value."); "max_quantized_value must be greater than min_quantized_value.");
} }
float range = max_quantized_value - min_quantized_value; float range = max_quantized_value - min_quantized_value;
scalar_ = range / 255.0; scalar_ = range / 255.0;
bias_ = (range / 512.0) + min_quantized_value; bias_ = (range / 512.0) + min_quantized_value;
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Process(CalculatorContext* cc) final { mediapipe::Status Process(CalculatorContext* cc) final {
const std::string& encoded = const std::string& encoded =
cc->Inputs().Tag("ENCODED").Value().Get<std::string>(); cc->Inputs().Tag("ENCODED").Value().Get<std::string>();
std::vector<float> float_vector; std::vector<float> float_vector;
@ -77,7 +77,7 @@ class DequantizeByteArrayCalculator : public CalculatorBase {
.Tag("FLOAT_VECTOR") .Tag("FLOAT_VECTOR")
.AddPacket(MakePacket<std::vector<float>>(float_vector) .AddPacket(MakePacket<std::vector<float>>(float_vector)
.At(cc->InputTimestamp())); .At(cc->InputTimestamp()));
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
private: private:

View File

@ -57,7 +57,7 @@ class EndLoopCalculator : public CalculatorBase {
using ItemT = typename IterableT::value_type; using ItemT = typename IterableT::value_type;
public: public:
static ::mediapipe::Status GetContract(CalculatorContract* cc) { static mediapipe::Status GetContract(CalculatorContract* cc) {
RET_CHECK(cc->Inputs().HasTag("BATCH_END")) RET_CHECK(cc->Inputs().HasTag("BATCH_END"))
<< "Missing BATCH_END tagged input_stream."; << "Missing BATCH_END tagged input_stream.";
cc->Inputs().Tag("BATCH_END").Set<Timestamp>(); cc->Inputs().Tag("BATCH_END").Set<Timestamp>();
@ -67,10 +67,10 @@ class EndLoopCalculator : public CalculatorBase {
RET_CHECK(cc->Outputs().HasTag("ITERABLE")); RET_CHECK(cc->Outputs().HasTag("ITERABLE"));
cc->Outputs().Tag("ITERABLE").Set<IterableT>(); cc->Outputs().Tag("ITERABLE").Set<IterableT>();
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Process(CalculatorContext* cc) override { mediapipe::Status Process(CalculatorContext* cc) override {
if (!cc->Inputs().Tag("ITEM").IsEmpty()) { if (!cc->Inputs().Tag("ITEM").IsEmpty()) {
if (!input_stream_collection_) { if (!input_stream_collection_) {
input_stream_collection_.reset(new IterableT); input_stream_collection_.reset(new IterableT);
@ -94,7 +94,7 @@ class EndLoopCalculator : public CalculatorBase {
.SetNextTimestampBound(Timestamp(loop_control_ts.Value() + 1)); .SetNextTimestampBound(Timestamp(loop_control_ts.Value() + 1));
} }
} }
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
private: private:

View File

@ -16,6 +16,7 @@
#include <utility> #include <utility>
#include <vector> #include <vector>
#include "mediapipe/calculators/core/flow_limiter_calculator.pb.h"
#include "mediapipe/framework/calculator_framework.h" #include "mediapipe/framework/calculator_framework.h"
#include "mediapipe/framework/port/ret_check.h" #include "mediapipe/framework/port/ret_check.h"
#include "mediapipe/framework/port/status.h" #include "mediapipe/framework/port/status.h"
@ -23,41 +24,23 @@
namespace mediapipe { namespace mediapipe {
// FlowLimiterCalculator is used to limit the number of pipelined processing // FlowLimiterCalculator is used to limit the number of frames in flight
// operations in a section of the graph. // by dropping input frames when necessary.
// //
// Typical topology: // The input stream "FINISH" is used to signal the FlowLimiterCalculator
// when a frame is finished processing. Either a non-empty "FINISH" packet
// or a timestamp bound should be received for each processed frame.
// //
// in ->-[FLC]-[foo]-...-[bar]-+->- out // The combination of `max_in_flight: 1` and `max_in_queue: 1` generally gives
// ^_____________________| // best throughput/latency balance. Throughput is nearly optimal as the
// FINISHED // graph is never idle as there is always something in the queue. Latency is
// nearly optimal latency as the queue always stores the latest available frame.
// //
// By connecting the output of the graph section to this calculator's FINISHED // Increasing `max_in_flight` to 2 or more can yield the better throughput
// input with a backwards edge, this allows FLC to keep track of how many // when the graph exhibits a high degree of pipeline parallelism. Decreasing
// timestamps are currently being processed. // `max_in_flight` to 0 can yield a better average latency, but at the cost of
// // lower throughput (lower framerate) due to the time during which the graph
// The limit defaults to 1, and can be overridden with the MAX_IN_FLIGHT side // is idle awaiting the next input frame.
// packet.
//
// As long as the number of timestamps being processed ("in flight") is below
// the limit, FLC allows input to pass through. When the limit is reached,
// FLC starts dropping input packets, keeping only the most recent. When the
// processing count decreases again, as signaled by the receipt of a packet on
// FINISHED, FLC allows packets to flow again, releasing the most recently
// queued packet, if any.
//
// If there are multiple input streams, packet dropping is synchronized.
//
// IMPORTANT: for each timestamp where FLC forwards a packet (or a set of
// packets, if using multiple data streams), a packet must eventually arrive on
// the FINISHED stream. Dropping packets in the section between FLC and
// FINISHED will make the in-flight count incorrect.
//
// TODO: Remove this comment when graph-level ISH has been removed.
// NOTE: this calculator should always use the ImmediateInputStreamHandler and
// uses it by default. However, if the graph specifies a graph-level
// InputStreamHandler, to override that setting, the InputStreamHandler must
// be explicitly specified as shown below.
// //
// Example config: // Example config:
// node { // node {
@ -68,131 +51,178 @@ namespace mediapipe {
// tag_index: 'FINISHED' // tag_index: 'FINISHED'
// back_edge: true // back_edge: true
// } // }
// input_stream_handler { // output_stream: "sampled_frames"
// input_stream_handler: 'ImmediateInputStreamHandler' // output_stream: "ALLOW:allowed_timestamps"
// }
// output_stream: "gated_frames"
// } // }
//
// The "ALLOW" stream indicates the transition between accepting frames and
// dropping frames. "ALLOW = true" indicates the start of accepting frames
// including the current timestamp, and "ALLOW = true" indicates the start of
// dropping frames including the current timestamp.
//
// FlowLimiterCalculator provides limited support for multiple input streams.
// The first input stream is treated as the main input stream and successive
// input streams are treated as auxiliary input streams. The auxiliary input
// streams are limited to timestamps passed on the main input stream.
//
class FlowLimiterCalculator : public CalculatorBase { class FlowLimiterCalculator : public CalculatorBase {
public: public:
static ::mediapipe::Status GetContract(CalculatorContract* cc) { static mediapipe::Status GetContract(CalculatorContract* cc) {
int num_data_streams = cc->Inputs().NumEntries(""); auto& side_inputs = cc->InputSidePackets();
RET_CHECK_GE(num_data_streams, 1); side_inputs.Tag("OPTIONS").Set<FlowLimiterCalculatorOptions>().Optional();
RET_CHECK_EQ(cc->Outputs().NumEntries(""), num_data_streams) cc->Inputs().Tag("OPTIONS").Set<FlowLimiterCalculatorOptions>().Optional();
<< "Output streams must correspond input streams except for the " RET_CHECK_GE(cc->Inputs().NumEntries(""), 1);
"finish indicator input stream."; for (int i = 0; i < cc->Inputs().NumEntries(""); ++i) {
for (int i = 0; i < num_data_streams; ++i) {
cc->Inputs().Get("", i).SetAny(); cc->Inputs().Get("", i).SetAny();
cc->Outputs().Get("", i).SetSameAs(&(cc->Inputs().Get("", i))); cc->Outputs().Get("", i).SetSameAs(&(cc->Inputs().Get("", i)));
} }
cc->Inputs().Get("FINISHED", 0).SetAny(); cc->Inputs().Get("FINISHED", 0).SetAny();
if (cc->InputSidePackets().HasTag("MAX_IN_FLIGHT")) { cc->InputSidePackets().Tag("MAX_IN_FLIGHT").Set<int>().Optional();
cc->InputSidePackets().Tag("MAX_IN_FLIGHT").Set<int>(); cc->Outputs().Tag("ALLOW").Set<bool>().Optional();
}
if (cc->Outputs().HasTag("ALLOW")) {
cc->Outputs().Tag("ALLOW").Set<bool>();
}
cc->SetInputStreamHandler("ImmediateInputStreamHandler"); cc->SetInputStreamHandler("ImmediateInputStreamHandler");
cc->SetProcessTimestampBounds(true);
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Open(CalculatorContext* cc) final { mediapipe::Status Open(CalculatorContext* cc) final {
finished_id_ = cc->Inputs().GetId("FINISHED", 0); options_ = cc->Options<FlowLimiterCalculatorOptions>();
max_in_flight_ = 1; options_ = tool::RetrieveOptions(options_, cc->InputSidePackets());
if (cc->InputSidePackets().HasTag("MAX_IN_FLIGHT")) { if (cc->InputSidePackets().HasTag("MAX_IN_FLIGHT")) {
max_in_flight_ = cc->InputSidePackets().Tag("MAX_IN_FLIGHT").Get<int>(); options_.set_max_in_flight(
cc->InputSidePackets().Tag("MAX_IN_FLIGHT").Get<int>());
} }
RET_CHECK_GE(max_in_flight_, 1); input_queues_.resize(cc->Inputs().NumEntries(""));
num_in_flight_ = 0;
allowed_id_ = cc->Outputs().GetId("ALLOW", 0);
allow_ctr_ts_ = Timestamp(0);
num_data_streams_ = cc->Inputs().NumEntries("");
data_stream_bound_ts_.resize(num_data_streams_);
RET_CHECK_OK(CopyInputHeadersToOutputs(cc->Inputs(), &(cc->Outputs()))); RET_CHECK_OK(CopyInputHeadersToOutputs(cc->Inputs(), &(cc->Outputs())));
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
bool Allow() { return num_in_flight_ < max_in_flight_; } // Returns true if an additional frame can be released for processing.
// The "ALLOW" output stream indicates this condition at each input frame.
::mediapipe::Status Process(CalculatorContext* cc) final { bool ProcessingAllowed() {
bool old_allow = Allow(); return frames_in_flight_.size() < options_.max_in_flight();
Timestamp lowest_incomplete_ts = Timestamp::Done();
// Process FINISHED stream.
if (!cc->Inputs().Get(finished_id_).Value().IsEmpty()) {
RET_CHECK_GT(num_in_flight_, 0)
<< "Received a FINISHED packet, but we had none in flight.";
--num_in_flight_;
} }
// Process data streams. // Outputs a packet indicating whether a frame was sent or dropped.
for (int i = 0; i < num_data_streams_; ++i) { void SendAllow(bool allow, Timestamp ts, CalculatorContext* cc) {
auto& stream = cc->Inputs().Get("", i); if (cc->Outputs().HasTag("ALLOW")) {
auto& out = cc->Outputs().Get("", i); cc->Outputs().Tag("ALLOW").AddPacket(MakePacket<bool>(allow).At(ts));
Packet& packet = stream.Value(); }
auto ts = packet.Timestamp();
if (ts.IsRangeValue() && data_stream_bound_ts_[i] <= ts) {
data_stream_bound_ts_[i] = ts + 1;
// Note: it's ok to update the output bound here, before sending the
// packet, because updates are batched during the Process function.
out.SetNextTimestampBound(data_stream_bound_ts_[i]);
} }
lowest_incomplete_ts =
std::min(lowest_incomplete_ts, data_stream_bound_ts_[i]);
if (packet.IsEmpty()) { // Sets the timestamp bound or closes an output stream.
// If the input stream is closed, close the corresponding output. void SetNextTimestampBound(Timestamp bound, OutputStream* stream) {
if (stream.IsDone() && !out.IsClosed()) { if (bound > Timestamp::Max()) {
out.Close(); stream->Close();
}
// TODO: if the packet is empty, the ts is unset, and we
// cannot read the timestamp bound, even though we'd like to propagate
// it.
} else if (mediapipe::ContainsKey(pending_ts_, ts)) {
// If we have already sent this timestamp (on another stream), send it
// on this stream too.
out.AddPacket(std::move(packet));
} else if (Allow() && (ts > last_dropped_ts_)) {
// If the in-flight is under the limit, and if we have not already
// dropped this or a later timestamp on another stream, then send
// the packet and add an in-flight timestamp.
out.AddPacket(std::move(packet));
pending_ts_.insert(ts);
++num_in_flight_;
} else { } else {
// Otherwise, we'll drop the packet. stream->SetNextTimestampBound(bound);
last_dropped_ts_ = std::max(last_dropped_ts_, ts);
} }
} }
// Remove old pending_ts_ entries. // Returns true if a certain timestamp is being processed.
auto it = std::lower_bound(pending_ts_.begin(), pending_ts_.end(), bool IsInFlight(Timestamp timestamp) {
lowest_incomplete_ts); return std::find(frames_in_flight_.begin(), frames_in_flight_.end(),
pending_ts_.erase(pending_ts_.begin(), it); timestamp) != frames_in_flight_.end();
// Update ALLOW signal.
if ((old_allow != Allow()) && allowed_id_.IsValid()) {
cc->Outputs()
.Get(allowed_id_)
.AddPacket(MakePacket<bool>(Allow()).At(++allow_ctr_ts_));
} }
return ::mediapipe::OkStatus();
// Releases input packets up to the latest settled input timestamp.
void ProcessAuxiliaryInputs(CalculatorContext* cc) {
Timestamp settled_bound = cc->Outputs().Get("", 0).NextTimestampBound();
for (int i = 1; i < cc->Inputs().NumEntries(""); ++i) {
// Release settled frames from each input queue.
while (!input_queues_[i].empty() &&
input_queues_[i].front().Timestamp() < settled_bound) {
Packet packet = input_queues_[i].front();
input_queues_[i].pop_front();
if (IsInFlight(packet.Timestamp())) {
cc->Outputs().Get("", i).AddPacket(packet);
}
}
// Propagate each input timestamp bound.
if (!input_queues_[i].empty()) {
Timestamp bound = input_queues_[i].front().Timestamp();
SetNextTimestampBound(bound, &cc->Outputs().Get("", i));
} else {
Timestamp bound =
cc->Inputs().Get("", i).Value().Timestamp().NextAllowedInStream();
SetNextTimestampBound(bound, &cc->Outputs().Get("", i));
}
}
}
// Releases input packets allowed by the max_in_flight constraint.
mediapipe::Status Process(CalculatorContext* cc) final {
options_ = tool::RetrieveOptions(options_, cc->Inputs());
// Process the FINISHED input stream.
Packet finished_packet = cc->Inputs().Tag("FINISHED").Value();
if (finished_packet.Timestamp() == cc->InputTimestamp()) {
while (!frames_in_flight_.empty() &&
frames_in_flight_.front() <= finished_packet.Timestamp()) {
frames_in_flight_.pop_front();
}
}
// Process the frame input streams.
for (int i = 0; i < cc->Inputs().NumEntries(""); ++i) {
Packet packet = cc->Inputs().Get("", i).Value();
if (!packet.IsEmpty()) {
input_queues_[i].push_back(packet);
}
}
// Abandon expired frames in flight. Note that old frames are abandoned
// when much newer frame timestamps arrive regardless of elapsed time.
TimestampDiff timeout = options_.in_flight_timeout();
Timestamp latest_ts = cc->Inputs().Get("", 0).Value().Timestamp();
if (timeout > 0 && latest_ts == cc->InputTimestamp() &&
latest_ts < Timestamp::Max()) {
while (!frames_in_flight_.empty() &&
(latest_ts - frames_in_flight_.front()) > timeout) {
frames_in_flight_.pop_front();
}
}
// Release allowed frames from the main input queue.
auto& input_queue = input_queues_[0];
while (ProcessingAllowed() && !input_queue.empty()) {
Packet packet = input_queue.front();
input_queue.pop_front();
cc->Outputs().Get("", 0).AddPacket(packet);
SendAllow(true, packet.Timestamp(), cc);
frames_in_flight_.push_back(packet.Timestamp());
}
// Limit the number of queued frames.
// Note that frames can be dropped after frames are released because
// frame-packets and FINISH-packets never arrive in the same Process call.
while (input_queue.size() > options_.max_in_queue()) {
Packet packet = input_queue.front();
input_queue.pop_front();
SendAllow(false, packet.Timestamp(), cc);
}
// Propagate the input timestamp bound.
if (!input_queue.empty()) {
Timestamp bound = input_queue.front().Timestamp();
SetNextTimestampBound(bound, &cc->Outputs().Get("", 0));
} else {
Timestamp bound =
cc->Inputs().Get("", 0).Value().Timestamp().NextAllowedInStream();
SetNextTimestampBound(bound, &cc->Outputs().Get("", 0));
if (cc->Outputs().HasTag("ALLOW")) {
SetNextTimestampBound(bound, &cc->Outputs().Tag("ALLOW"));
}
}
ProcessAuxiliaryInputs(cc);
return mediapipe::OkStatus();
} }
private: private:
std::set<Timestamp> pending_ts_; FlowLimiterCalculatorOptions options_;
Timestamp last_dropped_ts_; std::vector<std::deque<Packet>> input_queues_;
int num_data_streams_; std::deque<Timestamp> frames_in_flight_;
int num_in_flight_;
int max_in_flight_;
CollectionItemId finished_id_;
CollectionItemId allowed_id_;
Timestamp allow_ctr_ts_;
std::vector<Timestamp> data_stream_bound_ts_;
}; };
REGISTER_CALCULATOR(FlowLimiterCalculator); REGISTER_CALCULATOR(FlowLimiterCalculator);

View File

@ -0,0 +1,40 @@
// Copyright 2019 The MediaPipe Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto2";
package mediapipe;
import "mediapipe/framework/calculator.proto";
option objc_class_prefix = "MediaPipe";
message FlowLimiterCalculatorOptions {
extend mediapipe.CalculatorOptions {
optional FlowLimiterCalculatorOptions ext = 326963320;
}
// The maximum number of frames released for processing at one time.
// The default value limits to 1 frame processing at a time.
optional int32 max_in_flight = 1 [default = 1];
// The maximum number of frames queued waiting for processing.
// The default value limits to 1 frame awaiting processing.
optional int32 max_in_queue = 2 [default = 0];
// The maximum time in microseconds to wait for a frame to finish processing.
// The default value stops waiting after 1 sec.
// The value 0 specifies no timeout.
optional int64 in_flight_timeout = 3 [default = 1000000];
}

View File

@ -19,6 +19,7 @@
#include "absl/time/clock.h" #include "absl/time/clock.h"
#include "absl/time/time.h" #include "absl/time/time.h"
#include "mediapipe/calculators/core/flow_limiter_calculator.pb.h"
#include "mediapipe/framework/calculator_framework.h" #include "mediapipe/framework/calculator_framework.h"
#include "mediapipe/framework/calculator_runner.h" #include "mediapipe/framework/calculator_runner.h"
#include "mediapipe/framework/formats/image_frame.h" #include "mediapipe/framework/formats/image_frame.h"
@ -28,6 +29,8 @@
#include "mediapipe/framework/port/parse_text_proto.h" #include "mediapipe/framework/port/parse_text_proto.h"
#include "mediapipe/framework/port/status_matchers.h" #include "mediapipe/framework/port/status_matchers.h"
#include "mediapipe/framework/timestamp.h" #include "mediapipe/framework/timestamp.h"
#include "mediapipe/framework/tool/simulation_clock.h"
#include "mediapipe/framework/tool/simulation_clock_executor.h"
#include "mediapipe/framework/tool/sink.h" #include "mediapipe/framework/tool/sink.h"
namespace mediapipe { namespace mediapipe {
@ -67,144 +70,49 @@ std::vector<T> PacketValues(const std::vector<Packet>& packets) {
return result; return result;
} }
constexpr int kNumImageFrames = 5;
constexpr int kNumFinished = 3;
CalculatorGraphConfig::Node GetDefaultNode() {
return ParseTextProtoOrDie<CalculatorGraphConfig::Node>(R"(
calculator: "FlowLimiterCalculator"
input_stream: "raw_frames"
input_stream: "FINISHED:finished"
input_stream_info: { tag_index: "FINISHED" back_edge: true }
output_stream: "gated_frames"
)");
}
// Simple test to make sure that the FlowLimiterCalculator outputs just one
// packet when MAX_IN_FLIGHT is 1.
TEST(FlowLimiterCalculator, OneOutputTest) {
// Setup the calculator runner and add only ImageFrame packets.
CalculatorRunner runner(GetDefaultNode());
for (int i = 0; i < kNumImageFrames; ++i) {
Timestamp timestamp = Timestamp(i * Timestamp::kTimestampUnitsPerSecond);
runner.MutableInputs()->Index(0).packets.push_back(
MakePacket<ImageFrame>().At(timestamp));
}
// Run the calculator.
MP_ASSERT_OK(runner.Run()) << "Calculator execution failed.";
const std::vector<Packet>& frame_output_packets =
runner.Outputs().Index(0).packets;
EXPECT_EQ(frame_output_packets.size(), 1);
}
// Simple test to make sure that the FlowLimiterCalculator waits for all
// input streams to have at least one packet available before publishing.
TEST(FlowLimiterCalculator, BasicTest) {
// Setup the calculator runner and add both ImageFrame and finish packets.
CalculatorRunner runner(GetDefaultNode());
for (int i = 0; i < kNumImageFrames; ++i) {
Timestamp timestamp = Timestamp(i * Timestamp::kTimestampUnitsPerSecond);
runner.MutableInputs()->Index(0).packets.push_back(
MakePacket<ImageFrame>().At(timestamp));
}
for (int i = 0; i < kNumFinished; ++i) {
Timestamp timestamp =
Timestamp((i + 1) * Timestamp::kTimestampUnitsPerSecond);
runner.MutableInputs()
->Tag("FINISHED")
.packets.push_back(MakePacket<bool>(true).At(timestamp));
}
// Run the calculator.
MP_ASSERT_OK(runner.Run()) << "Calculator execution failed.";
const std::vector<Packet>& frame_output_packets =
runner.Outputs().Index(0).packets;
// Only outputs packets if both input streams are available.
int expected_num_packets = std::min(kNumImageFrames, kNumFinished + 1);
EXPECT_EQ(frame_output_packets.size(), expected_num_packets);
}
// A Calculator::Process callback function. // A Calculator::Process callback function.
typedef std::function<::mediapipe::Status(const InputStreamShardSet&, typedef std::function<mediapipe::Status(const InputStreamShardSet&,
OutputStreamShardSet*)> OutputStreamShardSet*)>
ProcessFunction; ProcessFunction;
// A testing callback function that passes through all packets. // A testing callback function that passes through all packets.
::mediapipe::Status PassthroughFunction(const InputStreamShardSet& inputs, mediapipe::Status PassthroughFunction(const InputStreamShardSet& inputs,
OutputStreamShardSet* outputs) { OutputStreamShardSet* outputs) {
for (int i = 0; i < inputs.NumEntries(); ++i) { for (int i = 0; i < inputs.NumEntries(); ++i) {
if (!inputs.Index(i).Value().IsEmpty()) { if (!inputs.Index(i).Value().IsEmpty()) {
outputs->Index(i).AddPacket(inputs.Index(i).Value()); outputs->Index(i).AddPacket(inputs.Index(i).Value());
} }
} }
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
// A Calculator that runs a testing callback function in Close. // Tests demonstrating an FlowLimiterCalculator operating in a cyclic graph.
class CloseCallbackCalculator : public CalculatorBase { class FlowLimiterCalculatorSemaphoreTest : public testing::Test {
public: public:
static ::mediapipe::Status GetContract(CalculatorContract* cc) { FlowLimiterCalculatorSemaphoreTest() : exit_semaphore_(0) {}
for (CollectionItemId id = cc->Inputs().BeginId();
id < cc->Inputs().EndId(); ++id) {
cc->Inputs().Get(id).SetAny();
}
for (CollectionItemId id = cc->Outputs().BeginId();
id < cc->Outputs().EndId(); ++id) {
cc->Outputs().Get(id).SetAny();
}
cc->InputSidePackets().Index(0).Set<std::function<::mediapipe::Status()>>();
return ::mediapipe::OkStatus();
}
::mediapipe::Status Process(CalculatorContext* cc) override {
return PassthroughFunction(cc->Inputs(), &(cc->Outputs()));
}
::mediapipe::Status Close(CalculatorContext* cc) override {
const auto& callback = cc->InputSidePackets()
.Index(0)
.Get<std::function<::mediapipe::Status()>>();
return callback();
}
};
REGISTER_CALCULATOR(CloseCallbackCalculator);
// Tests demostrating an FlowLimiterCalculator operating in a cyclic graph.
// TODO: clean up these tests.
class FlowLimiterCalculatorTest : public testing::Test {
public:
FlowLimiterCalculatorTest() : enter_semaphore_(0), exit_semaphore_(0) {}
void SetUp() override { void SetUp() override {
graph_config_ = InflightGraphConfig(); graph_config_ = InflightGraphConfig();
tool::AddVectorSink("out_1", &graph_config_, &out_1_packets_); tool::AddVectorSink("out_1", &graph_config_, &out_1_packets_);
tool::AddVectorSink("out_2", &graph_config_, &out_2_packets_);
} }
void InitializeGraph(int max_in_flight) { void InitializeGraph(int max_in_flight) {
ProcessFunction semaphore_0_func = [&](const InputStreamShardSet& inputs,
OutputStreamShardSet* outputs) {
enter_semaphore_.Release(1);
return PassthroughFunction(inputs, outputs);
};
ProcessFunction semaphore_1_func = [&](const InputStreamShardSet& inputs, ProcessFunction semaphore_1_func = [&](const InputStreamShardSet& inputs,
OutputStreamShardSet* outputs) { OutputStreamShardSet* outputs) {
exit_semaphore_.Acquire(1); exit_semaphore_.Acquire(1);
return PassthroughFunction(inputs, outputs); return PassthroughFunction(inputs, outputs);
}; };
std::function<::mediapipe::Status()> close_func = [this]() { FlowLimiterCalculatorOptions options;
close_count_++; options.set_max_in_flight(max_in_flight);
return ::mediapipe::OkStatus(); options.set_max_in_queue(1);
};
MP_ASSERT_OK(graph_.Initialize( MP_ASSERT_OK(graph_.Initialize(
graph_config_, { graph_config_, {
{"max_in_flight", MakePacket<int>(max_in_flight)}, {"limiter_options", Adopt(new auto(options))},
{"callback_0", Adopt(new auto(semaphore_0_func))},
{"callback_1", Adopt(new auto(semaphore_1_func))}, {"callback_1", Adopt(new auto(semaphore_1_func))},
{"callback_2", Adopt(new auto(close_func))},
})); }));
allow_poller_.reset(new OutputStreamPoller(
graph_.AddOutputStreamPoller("allow").ValueOrDie()));
} }
// Adds a packet to a graph input stream. // Adds a packet to a graph input stream.
@ -216,44 +124,24 @@ class FlowLimiterCalculatorTest : public testing::Test {
// A calculator graph starting with an FlowLimiterCalculator and // A calculator graph starting with an FlowLimiterCalculator and
// ending with a InFlightFinishCalculator. // ending with a InFlightFinishCalculator.
// Back-edge "finished" limits processing to one frame in-flight. // Back-edge "finished" limits processing to one frame in-flight.
// The two LambdaCalculators are used to keep certain packet sets in flight. // The LambdaCalculator is used to keep certain frames in flight.
CalculatorGraphConfig InflightGraphConfig() { CalculatorGraphConfig InflightGraphConfig() {
return ParseTextProtoOrDie<CalculatorGraphConfig>(R"( return ParseTextProtoOrDie<CalculatorGraphConfig>(R"(
input_stream: 'in_1' input_stream: 'in_1'
input_stream: 'in_2'
node { node {
calculator: 'FlowLimiterCalculator' calculator: 'FlowLimiterCalculator'
input_side_packet: 'MAX_IN_FLIGHT:max_in_flight' input_side_packet: 'OPTIONS:limiter_options'
input_stream: 'in_1' input_stream: 'in_1'
input_stream: 'in_2'
input_stream: 'FINISHED:out_1' input_stream: 'FINISHED:out_1'
input_stream_info: { tag_index: 'FINISHED' back_edge: true } input_stream_info: { tag_index: 'FINISHED' back_edge: true }
output_stream: 'in_1_sampled' output_stream: 'in_1_sampled'
output_stream: 'in_2_sampled' output_stream: 'ALLOW:allow'
}
node {
calculator: 'LambdaCalculator'
input_side_packet: 'callback_0'
input_stream: 'in_1_sampled'
input_stream: 'in_2_sampled'
output_stream: 'queue_1'
output_stream: 'queue_2'
} }
node { node {
calculator: 'LambdaCalculator' calculator: 'LambdaCalculator'
input_side_packet: 'callback_1' input_side_packet: 'callback_1'
input_stream: 'queue_1' input_stream: 'in_1_sampled'
input_stream: 'queue_2'
output_stream: 'close_1'
output_stream: 'close_2'
}
node {
calculator: 'CloseCallbackCalculator'
input_side_packet: 'callback_2'
input_stream: 'close_1'
input_stream: 'close_2'
output_stream: 'out_1' output_stream: 'out_1'
output_stream: 'out_2'
} }
)"); )");
} }
@ -261,21 +149,19 @@ class FlowLimiterCalculatorTest : public testing::Test {
protected: protected:
CalculatorGraphConfig graph_config_; CalculatorGraphConfig graph_config_;
CalculatorGraph graph_; CalculatorGraph graph_;
AtomicSemaphore enter_semaphore_;
AtomicSemaphore exit_semaphore_; AtomicSemaphore exit_semaphore_;
std::vector<Packet> out_1_packets_; std::vector<Packet> out_1_packets_;
std::vector<Packet> out_2_packets_; std::unique_ptr<OutputStreamPoller> allow_poller_;
int close_count_ = 0;
}; };
// A test demonstrating an FlowLimiterCalculator operating in a cyclic // A test demonstrating an FlowLimiterCalculator operating in a cyclic
// graph. This test shows that: // graph. This test shows that:
// //
// (1) Timestamps are passed through unaltered. // (1) Frames exceeding the queue size are dropped.
// (2) All output streams including the back_edge stream are closed when // (2) The "ALLOW" signal is produced.
// the first input stream is closed. // (3) Timestamps are passed through unaltered.
// //
TEST_F(FlowLimiterCalculatorTest, BackEdgeCloses) { TEST_F(FlowLimiterCalculatorSemaphoreTest, FramesDropped) {
InitializeGraph(1); InitializeGraph(1);
MP_ASSERT_OK(graph_.StartRun({})); MP_ASSERT_OK(graph_.StartRun({}));
@ -284,210 +170,590 @@ TEST_F(FlowLimiterCalculatorTest, BackEdgeCloses) {
input_name, MakePacket<int64>(n).At(Timestamp(n)))); input_name, MakePacket<int64>(n).At(Timestamp(n))));
}; };
for (int i = 0; i < 10; i++) { Packet allow_packet;
send_packet("in_1", i * 10); send_packet("in_1", 0);
// This next input should be dropped. for (int i = 0; i < 9; i++) {
EXPECT_TRUE(allow_poller_->Next(&allow_packet));
EXPECT_TRUE(allow_packet.Get<bool>());
// This input should wait in the limiter input queue.
send_packet("in_1", i * 10 + 5); send_packet("in_1", i * 10 + 5);
MP_EXPECT_OK(graph_.WaitUntilIdle()); // This input should drop the previous input.
send_packet("in_2", i * 10); send_packet("in_1", i * 10 + 10);
EXPECT_TRUE(allow_poller_->Next(&allow_packet));
EXPECT_FALSE(allow_packet.Get<bool>());
exit_semaphore_.Release(1); exit_semaphore_.Release(1);
MP_EXPECT_OK(graph_.WaitUntilIdle());
} }
exit_semaphore_.Release(1);
MP_EXPECT_OK(graph_.CloseInputStream("in_1")); MP_EXPECT_OK(graph_.CloseInputStream("in_1"));
MP_EXPECT_OK(graph_.CloseInputStream("in_2"));
MP_EXPECT_OK(graph_.WaitUntilIdle()); MP_EXPECT_OK(graph_.WaitUntilIdle());
// All output streams are closed and all output packets are delivered, // All output streams are closed and all output packets are delivered,
// with stream "in_1" and stream "in_2" closed. // with stream "in_1" closed.
EXPECT_EQ(10, out_1_packets_.size()); EXPECT_EQ(10, out_1_packets_.size());
EXPECT_EQ(10, out_2_packets_.size());
// Timestamps have not been messed with. // Timestamps have not been altered.
EXPECT_EQ(PacketValues<int64>(out_1_packets_), EXPECT_EQ(PacketValues<int64>(out_1_packets_),
TimestampValues(out_1_packets_)); TimestampValues(out_1_packets_));
EXPECT_EQ(PacketValues<int64>(out_2_packets_),
TimestampValues(out_2_packets_));
// Extra inputs on in_1 have been dropped // Extra inputs on in_1 have been dropped.
EXPECT_EQ(TimestampValues(out_1_packets_), EXPECT_EQ(TimestampValues(out_1_packets_),
(std::vector<int64>{0, 10, 20, 30, 40, 50, 60, 70, 80, 90})); (std::vector<int64>{0, 10, 20, 30, 40, 50, 60, 70, 80, 90}));
EXPECT_EQ(TimestampValues(out_1_packets_), TimestampValues(out_2_packets_));
// The closing of the stream has been propagated.
EXPECT_EQ(1, close_count_);
} }
// A test demonstrating that all output streams are closed when all // A calculator that sleeps during Process.
// input streams are closed after the last input packet has been processed. class SleepCalculator : public CalculatorBase {
TEST_F(FlowLimiterCalculatorTest, AllStreamsClose) { public:
InitializeGraph(1); static mediapipe::Status GetContract(CalculatorContract* cc) {
MP_ASSERT_OK(graph_.StartRun({})); cc->Inputs().Tag("PACKET").SetAny();
cc->Outputs().Tag("PACKET").SetSameAs(&cc->Inputs().Tag("PACKET"));
exit_semaphore_.Release(10); cc->InputSidePackets().Tag("SLEEP_TIME").Set<int64>();
for (int i = 0; i < 10; i++) { cc->InputSidePackets().Tag("WARMUP_TIME").Set<int64>();
AddPacket("in_1", i); cc->InputSidePackets().Tag("CLOCK").Set<mediapipe::Clock*>();
MP_EXPECT_OK(graph_.WaitUntilIdle()); cc->SetTimestampOffset(0);
AddPacket("in_2", i); return mediapipe::OkStatus();
MP_EXPECT_OK(graph_.WaitUntilIdle());
}
MP_EXPECT_OK(graph_.CloseAllInputStreams());
MP_EXPECT_OK(graph_.WaitUntilIdle());
EXPECT_EQ(TimestampValues(out_1_packets_), TimestampValues(out_2_packets_));
EXPECT_EQ(TimestampValues(out_1_packets_),
(std::vector<int64>{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}));
EXPECT_EQ(1, close_count_);
} }
TEST(FlowLimiterCalculator, TwoStreams) { mediapipe::Status Open(CalculatorContext* cc) final {
std::vector<Packet> a_passed; clock_ = cc->InputSidePackets().Tag("CLOCK").Get<mediapipe::Clock*>();
std::vector<Packet> b_passed; return mediapipe::OkStatus();
CalculatorGraphConfig graph_config_ = }
ParseTextProtoOrDie<CalculatorGraphConfig>(R"(
input_stream: 'in_a' mediapipe::Status Process(CalculatorContext* cc) final {
input_stream: 'in_b' ++packet_count;
input_stream: 'finished' absl::Duration sleep_time = absl::Microseconds(
packet_count == 1
? cc->InputSidePackets().Tag("WARMUP_TIME").Get<int64>()
: cc->InputSidePackets().Tag("SLEEP_TIME").Get<int64>());
clock_->Sleep(sleep_time);
cc->Outputs().Tag("PACKET").AddPacket(cc->Inputs().Tag("PACKET").Value());
return mediapipe::OkStatus();
}
private:
::mediapipe::Clock* clock_ = nullptr;
int packet_count = 0;
};
REGISTER_CALCULATOR(SleepCalculator);
// A calculator that drops a packet occasionally.
// Drops the 3rd packet, and optionally the corresponding timestamp bound.
class DropCalculator : public CalculatorBase {
public:
static mediapipe::Status GetContract(CalculatorContract* cc) {
cc->Inputs().Tag("PACKET").SetAny();
cc->Outputs().Tag("PACKET").SetSameAs(&cc->Inputs().Tag("PACKET"));
cc->InputSidePackets().Tag("DROP_TIMESTAMPS").Set<bool>();
cc->SetProcessTimestampBounds(true);
return mediapipe::OkStatus();
}
mediapipe::Status Process(CalculatorContext* cc) final {
if (!cc->Inputs().Tag("PACKET").Value().IsEmpty()) {
++packet_count;
}
bool drop = (packet_count == 3);
if (!drop && !cc->Inputs().Tag("PACKET").Value().IsEmpty()) {
cc->Outputs().Tag("PACKET").AddPacket(cc->Inputs().Tag("PACKET").Value());
}
if (!drop || !cc->InputSidePackets().Tag("DROP_TIMESTAMPS").Get<bool>()) {
cc->Outputs().Tag("PACKET").SetNextTimestampBound(
cc->InputTimestamp().NextAllowedInStream());
}
return mediapipe::OkStatus();
}
private:
int packet_count = 0;
};
REGISTER_CALCULATOR(DropCalculator);
// Tests demonstrating an FlowLimiterCalculator processing FINISHED timestamps.
class FlowLimiterCalculatorTest : public testing::Test {
protected:
CalculatorGraphConfig InflightGraphConfig() {
return ParseTextProtoOrDie<CalculatorGraphConfig>(R"(
input_stream: 'in_1'
node { node {
name: 'input_dropper'
calculator: 'FlowLimiterCalculator' calculator: 'FlowLimiterCalculator'
input_side_packet: 'MAX_IN_FLIGHT:max_in_flight' input_side_packet: 'OPTIONS:limiter_options'
input_stream: 'in_a' input_stream: 'in_1'
input_stream: 'in_b' input_stream: 'FINISHED:out_1'
input_stream: 'FINISHED:finished'
input_stream_info: { tag_index: 'FINISHED' back_edge: true } input_stream_info: { tag_index: 'FINISHED' back_edge: true }
output_stream: 'in_a_sampled' output_stream: 'in_1_sampled'
output_stream: 'in_b_sampled'
output_stream: 'ALLOW:allow' output_stream: 'ALLOW:allow'
} }
node {
calculator: 'SleepCalculator'
input_side_packet: 'WARMUP_TIME:warmup_time'
input_side_packet: 'SLEEP_TIME:sleep_time'
input_side_packet: 'CLOCK:clock'
input_stream: 'PACKET:in_1_sampled'
output_stream: 'PACKET:out_1_sampled'
}
node {
calculator: 'DropCalculator'
input_side_packet: "DROP_TIMESTAMPS:drop_timesamps"
input_stream: 'PACKET:out_1_sampled'
output_stream: 'PACKET:out_1'
}
)"); )");
std::string allow_cb_name;
tool::AddVectorSink("in_a_sampled", &graph_config_, &a_passed);
tool::AddVectorSink("in_b_sampled", &graph_config_, &b_passed);
tool::AddCallbackCalculator("allow", &graph_config_, &allow_cb_name, true);
bool allow = true;
auto allow_cb = [&allow](const Packet& packet) {
allow = packet.Get<bool>();
};
CalculatorGraph graph_;
MP_EXPECT_OK(graph_.Initialize(
graph_config_,
{
{"max_in_flight", MakePacket<int>(1)},
{allow_cb_name,
MakePacket<std::function<void(const Packet&)>>(allow_cb)},
}));
MP_EXPECT_OK(graph_.StartRun({}));
auto send_packet = [&graph_](const std::string& input_name, int n) {
MP_EXPECT_OK(graph_.AddPacketToInputStream(
input_name, MakePacket<int>(n).At(Timestamp(n))));
};
send_packet("in_a", 1);
MP_EXPECT_OK(graph_.WaitUntilIdle());
EXPECT_EQ(allow, false);
EXPECT_EQ(TimestampValues(a_passed), (std::vector<int64>{1}));
EXPECT_EQ(TimestampValues(b_passed), (std::vector<int64>{}));
send_packet("in_a", 2);
send_packet("in_b", 1);
MP_EXPECT_OK(graph_.WaitUntilIdle());
EXPECT_EQ(TimestampValues(a_passed), (std::vector<int64>{1}));
EXPECT_EQ(TimestampValues(b_passed), (std::vector<int64>{1}));
EXPECT_EQ(allow, false);
send_packet("finished", 1);
MP_EXPECT_OK(graph_.WaitUntilIdle());
EXPECT_EQ(TimestampValues(a_passed), (std::vector<int64>{1}));
EXPECT_EQ(TimestampValues(b_passed), (std::vector<int64>{1}));
EXPECT_EQ(allow, true);
send_packet("in_b", 2);
MP_EXPECT_OK(graph_.WaitUntilIdle());
EXPECT_EQ(TimestampValues(a_passed), (std::vector<int64>{1}));
EXPECT_EQ(TimestampValues(b_passed), (std::vector<int64>{1}));
EXPECT_EQ(allow, true);
send_packet("in_b", 3);
MP_EXPECT_OK(graph_.WaitUntilIdle());
EXPECT_EQ(TimestampValues(a_passed), (std::vector<int64>{1}));
EXPECT_EQ(TimestampValues(b_passed), (std::vector<int64>{1, 3}));
EXPECT_EQ(allow, false);
send_packet("in_b", 4);
MP_EXPECT_OK(graph_.WaitUntilIdle());
EXPECT_EQ(TimestampValues(a_passed), (std::vector<int64>{1}));
EXPECT_EQ(TimestampValues(b_passed), (std::vector<int64>{1, 3}));
EXPECT_EQ(allow, false);
send_packet("in_a", 3);
MP_EXPECT_OK(graph_.WaitUntilIdle());
EXPECT_EQ(TimestampValues(a_passed), (std::vector<int64>{1, 3}));
EXPECT_EQ(TimestampValues(b_passed), (std::vector<int64>{1, 3}));
EXPECT_EQ(allow, false);
send_packet("finished", 3);
MP_EXPECT_OK(graph_.WaitUntilIdle());
EXPECT_EQ(TimestampValues(a_passed), (std::vector<int64>{1, 3}));
EXPECT_EQ(TimestampValues(b_passed), (std::vector<int64>{1, 3}));
EXPECT_EQ(allow, true);
MP_EXPECT_OK(graph_.CloseAllInputStreams());
MP_EXPECT_OK(graph_.WaitUntilDone());
} }
TEST(FlowLimiterCalculator, CanConsume) { // Parse an absl::Time from RFC3339 format.
std::vector<Packet> in_sampled_packets_; absl::Time ParseTime(const std::string& date_time_str) {
CalculatorGraphConfig graph_config_ = absl::Time result;
absl::ParseTime(absl::RFC3339_sec, date_time_str, &result, nullptr);
return result;
}
// The point in simulated time when the test starts.
absl::Time StartTime() { return ParseTime("2020-11-03T20:00:00Z"); }
// Initialize the test clock to follow simulated time.
void SetUpSimulationClock() {
auto executor = std::make_shared<SimulationClockExecutor>(8);
simulation_clock_ = executor->GetClock();
clock_ = simulation_clock_.get();
simulation_clock_->ThreadStart();
clock_->SleepUntil(StartTime());
simulation_clock_->ThreadFinish();
MP_ASSERT_OK(graph_.SetExecutor("", executor));
}
// Initialize the test clock to follow wall time.
void SetUpRealClock() { clock_ = mediapipe::Clock::RealClock(); }
// Create a few mediapipe input Packets holding ints.
void SetUpInputData() {
for (int i = 0; i < 100; ++i) {
input_packets_.push_back(MakePacket<int>(i).At(Timestamp(i * 10000)));
}
}
protected:
CalculatorGraph graph_;
mediapipe::Clock* clock_;
std::shared_ptr<SimulationClock> simulation_clock_;
std::vector<Packet> input_packets_;
std::vector<Packet> out_1_packets_;
std::vector<Packet> allow_packets_;
};
// Shows that "FINISHED" can be indicated with either a packet or a timestamp
// bound. DropCalculator periodically drops one packet but always propagates
// the timestamp bound. Input packets are released or dropped promptly after
// each "FINISH" packet or a timestamp bound arrives.
TEST_F(FlowLimiterCalculatorTest, FinishedTimestamps) {
// Configure the test.
SetUpInputData();
SetUpSimulationClock();
CalculatorGraphConfig graph_config = InflightGraphConfig();
auto limiter_options = ParseTextProtoOrDie<FlowLimiterCalculatorOptions>(R"(
max_in_flight: 1
max_in_queue: 1
)");
std::map<std::string, Packet> side_packets = {
{"limiter_options",
MakePacket<FlowLimiterCalculatorOptions>(limiter_options)},
{"warmup_time", MakePacket<int64>(22000)},
{"sleep_time", MakePacket<int64>(22000)},
{"drop_timesamps", MakePacket<bool>(false)},
{"clock", MakePacket<mediapipe::Clock*>(clock_)},
};
// Start the graph.
MP_ASSERT_OK(graph_.Initialize(graph_config));
MP_EXPECT_OK(graph_.ObserveOutputStream("out_1", [this](Packet p) {
out_1_packets_.push_back(p);
return mediapipe::OkStatus();
}));
MP_EXPECT_OK(graph_.ObserveOutputStream("allow", [this](Packet p) {
allow_packets_.push_back(p);
return mediapipe::OkStatus();
}));
simulation_clock_->ThreadStart();
MP_ASSERT_OK(graph_.StartRun(side_packets));
// Add 9 input packets.
// 1. packet-0 is released,
// 2. packet-1 is queued,
// 3. packet-2 is queued and packet-1 is dropped,
// 4. packet-2 is released, and so forth.
MP_EXPECT_OK(graph_.AddPacketToInputStream("in_1", input_packets_[0]));
clock_->Sleep(absl::Microseconds(1));
EXPECT_EQ(allow_packets_.size(), 1);
EXPECT_EQ(allow_packets_.back().Get<bool>(), true);
clock_->Sleep(absl::Microseconds(10000));
for (int i = 1; i < 8; i += 2) {
MP_EXPECT_OK(graph_.AddPacketToInputStream("in_1", input_packets_[i]));
clock_->Sleep(absl::Microseconds(10000));
EXPECT_EQ(allow_packets_.size(), i);
MP_EXPECT_OK(graph_.AddPacketToInputStream("in_1", input_packets_[i + 1]));
clock_->Sleep(absl::Microseconds(1));
EXPECT_EQ(allow_packets_.size(), i + 1);
EXPECT_EQ(allow_packets_.back().Get<bool>(), false);
clock_->Sleep(absl::Microseconds(10000));
EXPECT_EQ(allow_packets_.size(), i + 2);
EXPECT_EQ(allow_packets_.back().Get<bool>(), true);
}
// Finish the graph.
MP_EXPECT_OK(graph_.CloseAllPacketSources());
clock_->Sleep(absl::Microseconds(40000));
MP_EXPECT_OK(graph_.WaitUntilDone());
simulation_clock_->ThreadFinish();
// Validate the output.
// input_packets_[4] is dropped by the DropCalculator.
std::vector<Packet> expected_output = {input_packets_[0], input_packets_[2],
input_packets_[6], input_packets_[8]};
EXPECT_EQ(out_1_packets_, expected_output);
}
// Shows that an output packet can be lost completely, and the
// FlowLimiterCalculator will stop waiting for it after in_flight_timeout.
// DropCalculator completely loses one packet including its timestamp bound.
// FlowLimiterCalculator waits 100 ms, and then starts releasing packets again.
TEST_F(FlowLimiterCalculatorTest, FinishedLost) {
// Configure the test.
SetUpInputData();
SetUpSimulationClock();
CalculatorGraphConfig graph_config = InflightGraphConfig();
auto limiter_options = ParseTextProtoOrDie<FlowLimiterCalculatorOptions>(R"(
max_in_flight: 1
max_in_queue: 1
in_flight_timeout: 100000 # 100 ms
)");
std::map<std::string, Packet> side_packets = {
{"limiter_options",
MakePacket<FlowLimiterCalculatorOptions>(limiter_options)},
{"warmup_time", MakePacket<int64>(22000)},
{"sleep_time", MakePacket<int64>(22000)},
{"drop_timesamps", MakePacket<bool>(true)},
{"clock", MakePacket<mediapipe::Clock*>(clock_)},
};
// Start the graph.
MP_ASSERT_OK(graph_.Initialize(graph_config));
MP_EXPECT_OK(graph_.ObserveOutputStream("out_1", [this](Packet p) {
out_1_packets_.push_back(p);
return mediapipe::OkStatus();
}));
MP_EXPECT_OK(graph_.ObserveOutputStream("allow", [this](Packet p) {
allow_packets_.push_back(p);
return mediapipe::OkStatus();
}));
simulation_clock_->ThreadStart();
MP_ASSERT_OK(graph_.StartRun(side_packets));
// Add 21 input packets.
// 1. packet-0 is released, packet-1 queued and dropped, and so forth.
// 2. packet-4 is lost by DropCalculator.
// 3. packet-5 through 13 are dropped while waiting for packet-4.
// 4. packet-4 expires and queued packet-14 is released.
// 5. packet-17, 19, and 20 are released on time.
MP_EXPECT_OK(graph_.AddPacketToInputStream("in_1", input_packets_[0]));
clock_->Sleep(absl::Microseconds(10000));
for (int i = 1; i < 21; ++i) {
MP_EXPECT_OK(graph_.AddPacketToInputStream("in_1", input_packets_[i]));
clock_->Sleep(absl::Microseconds(10000));
}
// Finish the graph.
MP_EXPECT_OK(graph_.CloseAllPacketSources());
clock_->Sleep(absl::Microseconds(40000));
MP_EXPECT_OK(graph_.WaitUntilDone());
simulation_clock_->ThreadFinish();
// Validate the output.
// input_packets_[4] is lost by the DropCalculator.
std::vector<Packet> expected_output = {
input_packets_[0], input_packets_[2], input_packets_[14],
input_packets_[17], input_packets_[19], input_packets_[20],
};
EXPECT_EQ(out_1_packets_, expected_output);
}
// Shows what happens when a finish packet is delayed beyond in_flight_timeout.
// After in_flight_timeout, FlowLimiterCalculator continues releasing packets.
// Temporarily, more than max_in_flight frames are in flight.
// Eventually, the number of frames in flight returns to max_in_flight.
TEST_F(FlowLimiterCalculatorTest, FinishedDelayed) {
// Configure the test.
SetUpInputData();
SetUpSimulationClock();
CalculatorGraphConfig graph_config = InflightGraphConfig();
auto limiter_options = ParseTextProtoOrDie<FlowLimiterCalculatorOptions>(R"(
max_in_flight: 1
max_in_queue: 1
in_flight_timeout: 100000 # 100 ms
)");
std::map<std::string, Packet> side_packets = {
{"limiter_options",
MakePacket<FlowLimiterCalculatorOptions>(limiter_options)},
{"warmup_time", MakePacket<int64>(500000)},
{"sleep_time", MakePacket<int64>(22000)},
{"drop_timesamps", MakePacket<bool>(false)},
{"clock", MakePacket<mediapipe::Clock*>(clock_)},
};
// Start the graph.
MP_ASSERT_OK(graph_.Initialize(graph_config));
MP_EXPECT_OK(graph_.ObserveOutputStream("out_1", [this](Packet p) {
out_1_packets_.push_back(p);
return mediapipe::OkStatus();
}));
MP_EXPECT_OK(graph_.ObserveOutputStream("allow", [this](Packet p) {
allow_packets_.push_back(p);
return mediapipe::OkStatus();
}));
simulation_clock_->ThreadStart();
MP_ASSERT_OK(graph_.StartRun(side_packets));
// Add 71 input packets.
// 1. During the 500 ms WARMUP_TIME, the in_flight_timeout releases
// packets 0, 10, 20, 30, 40, 50, which are queued at the SleepCalculator.
// 2. During the next 120 ms, these 6 packets are processed.
// 3. After the graph is finally finished with warmup and the backlog packets,
// packets 60 through 70 are released and processed on time.
MP_EXPECT_OK(graph_.AddPacketToInputStream("in_1", input_packets_[0]));
clock_->Sleep(absl::Microseconds(10000));
for (int i = 1; i < 71; ++i) {
MP_EXPECT_OK(graph_.AddPacketToInputStream("in_1", input_packets_[i]));
clock_->Sleep(absl::Microseconds(10000));
}
// Finish the graph.
MP_EXPECT_OK(graph_.CloseAllPacketSources());
clock_->Sleep(absl::Microseconds(40000));
MP_EXPECT_OK(graph_.WaitUntilDone());
simulation_clock_->ThreadFinish();
// Validate the output.
// The graph is warming up or backlogged until packet 60.
std::vector<Packet> expected_output = {
input_packets_[0], input_packets_[10], input_packets_[30],
input_packets_[40], input_packets_[50], input_packets_[60],
input_packets_[63], input_packets_[65], input_packets_[67],
input_packets_[69], input_packets_[70],
};
EXPECT_EQ(out_1_packets_, expected_output);
}
// Shows that packets on auxiliary input streams are relesed for the same
// timestamps as the main input stream, whether the auxiliary packets arrive
// early or late.
TEST_F(FlowLimiterCalculatorTest, TwoInputStreams) {
// Configure the test.
SetUpInputData();
SetUpSimulationClock();
CalculatorGraphConfig graph_config =
ParseTextProtoOrDie<CalculatorGraphConfig>(R"( ParseTextProtoOrDie<CalculatorGraphConfig>(R"(
input_stream: 'in' input_stream: 'in_1'
input_stream: 'finished' input_stream: 'in_2'
node { node {
name: 'input_dropper'
calculator: 'FlowLimiterCalculator' calculator: 'FlowLimiterCalculator'
input_side_packet: 'MAX_IN_FLIGHT:max_in_flight' input_side_packet: 'OPTIONS:limiter_options'
input_stream: 'in' input_stream: 'in_1'
input_stream: 'FINISHED:finished' input_stream: 'in_2'
input_stream: 'FINISHED:out_1'
input_stream_info: { tag_index: 'FINISHED' back_edge: true } input_stream_info: { tag_index: 'FINISHED' back_edge: true }
output_stream: 'in_sampled' output_stream: 'in_1_sampled'
output_stream: 'in_2_sampled'
output_stream: 'ALLOW:allow' output_stream: 'ALLOW:allow'
} }
node {
calculator: 'SleepCalculator'
input_side_packet: 'WARMUP_TIME:warmup_time'
input_side_packet: 'SLEEP_TIME:sleep_time'
input_side_packet: 'CLOCK:clock'
input_stream: 'PACKET:in_1_sampled'
output_stream: 'PACKET:out_1_sampled'
}
node {
calculator: 'DropCalculator'
input_side_packet: "DROP_TIMESTAMPS:drop_timesamps"
input_stream: 'PACKET:out_1_sampled'
output_stream: 'PACKET:out_1'
}
)"); )");
std::string allow_cb_name;
tool::AddVectorSink("in_sampled", &graph_config_, &in_sampled_packets_);
tool::AddCallbackCalculator("allow", &graph_config_, &allow_cb_name, true);
bool allow = true; auto limiter_options = ParseTextProtoOrDie<FlowLimiterCalculatorOptions>(R"(
auto allow_cb = [&allow](const Packet& packet) { max_in_flight: 1
allow = packet.Get<bool>(); max_in_queue: 1
in_flight_timeout: 100000 # 100 ms
)");
std::map<std::string, Packet> side_packets = {
{"limiter_options",
MakePacket<FlowLimiterCalculatorOptions>(limiter_options)},
{"warmup_time", MakePacket<int64>(22000)},
{"sleep_time", MakePacket<int64>(22000)},
{"drop_timesamps", MakePacket<bool>(true)},
{"clock", MakePacket<mediapipe::Clock*>(clock_)},
}; };
CalculatorGraph graph_; // Start the graph.
MP_EXPECT_OK(graph_.Initialize( MP_ASSERT_OK(graph_.Initialize(graph_config));
graph_config_, MP_EXPECT_OK(graph_.ObserveOutputStream("out_1", [this](Packet p) {
{ out_1_packets_.push_back(p);
{"max_in_flight", MakePacket<int>(1)}, return mediapipe::OkStatus();
{allow_cb_name,
MakePacket<std::function<void(const Packet&)>>(allow_cb)},
})); }));
std::vector<Packet> out_2_packets;
MP_EXPECT_OK(graph_.ObserveOutputStream("in_2_sampled", [&](Packet p) {
out_2_packets.push_back(p);
return mediapipe::OkStatus();
}));
MP_EXPECT_OK(graph_.ObserveOutputStream("allow", [this](Packet p) {
allow_packets_.push_back(p);
return mediapipe::OkStatus();
}));
simulation_clock_->ThreadStart();
MP_ASSERT_OK(graph_.StartRun(side_packets));
MP_EXPECT_OK(graph_.StartRun({})); // Add packets 0..9 to stream in_1, and packets 0..10 to stream in_2.
MP_EXPECT_OK(graph_.AddPacketToInputStream("in_1", input_packets_[0]));
clock_->Sleep(absl::Microseconds(10000));
for (int i = 1; i < 10; ++i) {
MP_EXPECT_OK(graph_.AddPacketToInputStream("in_1", input_packets_[i]));
MP_EXPECT_OK(graph_.AddPacketToInputStream("in_2", input_packets_[i - 1]));
clock_->Sleep(absl::Microseconds(10000));
}
auto send_packet = [&graph_](const std::string& input_name, int n) { // Add packets 10..20 to stream in_1, and packets 11..21 to stream in_2.
MP_EXPECT_OK(graph_.AddPacketToInputStream( for (int i = 10; i < 21; ++i) {
input_name, MakePacket<int>(n).At(Timestamp(n)))); MP_EXPECT_OK(graph_.AddPacketToInputStream("in_2", input_packets_[i + 1]));
}; MP_EXPECT_OK(graph_.AddPacketToInputStream("in_1", input_packets_[i]));
send_packet("in", 1); clock_->Sleep(absl::Microseconds(10000));
MP_EXPECT_OK(graph_.WaitUntilIdle()); }
EXPECT_EQ(allow, false);
EXPECT_EQ(TimestampValues(in_sampled_packets_), (std::vector<int64>{1}));
MP_EXPECT_OK(in_sampled_packets_[0].Consume<int>()); // Finish the graph run.
MP_EXPECT_OK(graph_.CloseAllPacketSources());
MP_EXPECT_OK(graph_.CloseAllInputStreams()); clock_->Sleep(absl::Microseconds(40000));
MP_EXPECT_OK(graph_.WaitUntilDone()); MP_EXPECT_OK(graph_.WaitUntilDone());
simulation_clock_->ThreadFinish();
// Validate the output.
// Packet input_packets_[4] is lost by the DropCalculator.
std::vector<Packet> expected_output = {
input_packets_[0], input_packets_[2], input_packets_[14],
input_packets_[17], input_packets_[19], input_packets_[20],
};
EXPECT_EQ(out_1_packets_, expected_output);
// Exactly the timestamps released by FlowLimiterCalculator for in_1_sampled.
std::vector<Packet> expected_output_2 = {
input_packets_[0], input_packets_[2], input_packets_[4],
input_packets_[14], input_packets_[17], input_packets_[19],
input_packets_[20],
};
EXPECT_EQ(out_2_packets, expected_output_2);
}
// Shows how FlowLimiterCalculator releases packets with max_in_queue 0.
// Shows how auxiliary input streams still work with max_in_queue 0.
// The processing time "sleep_time" is reduced from 22ms to 12ms to create
// the same frame rate as FlowLimiterCalculatorTest::TwoInputStreams.
TEST_F(FlowLimiterCalculatorTest, ZeroQueue) {
// Configure the test.
SetUpInputData();
SetUpSimulationClock();
CalculatorGraphConfig graph_config =
ParseTextProtoOrDie<CalculatorGraphConfig>(R"(
input_stream: 'in_1'
input_stream: 'in_2'
node {
calculator: 'FlowLimiterCalculator'
input_side_packet: 'OPTIONS:limiter_options'
input_stream: 'in_1'
input_stream: 'in_2'
input_stream: 'FINISHED:out_1'
input_stream_info: { tag_index: 'FINISHED' back_edge: true }
output_stream: 'in_1_sampled'
output_stream: 'in_2_sampled'
output_stream: 'ALLOW:allow'
}
node {
calculator: 'SleepCalculator'
input_side_packet: 'WARMUP_TIME:warmup_time'
input_side_packet: 'SLEEP_TIME:sleep_time'
input_side_packet: 'CLOCK:clock'
input_stream: 'PACKET:in_1_sampled'
output_stream: 'PACKET:out_1_sampled'
}
node {
calculator: 'DropCalculator'
input_side_packet: "DROP_TIMESTAMPS:drop_timesamps"
input_stream: 'PACKET:out_1_sampled'
output_stream: 'PACKET:out_1'
}
)");
auto limiter_options = ParseTextProtoOrDie<FlowLimiterCalculatorOptions>(R"(
max_in_flight: 1
max_in_queue: 0
in_flight_timeout: 100000 # 100 ms
)");
std::map<std::string, Packet> side_packets = {
{"limiter_options",
MakePacket<FlowLimiterCalculatorOptions>(limiter_options)},
{"warmup_time", MakePacket<int64>(12000)},
{"sleep_time", MakePacket<int64>(12000)},
{"drop_timesamps", MakePacket<bool>(true)},
{"clock", MakePacket<mediapipe::Clock*>(clock_)},
};
// Start the graph.
MP_ASSERT_OK(graph_.Initialize(graph_config));
MP_EXPECT_OK(graph_.ObserveOutputStream("out_1", [this](Packet p) {
out_1_packets_.push_back(p);
return mediapipe::OkStatus();
}));
std::vector<Packet> out_2_packets;
MP_EXPECT_OK(graph_.ObserveOutputStream("in_2_sampled", [&](Packet p) {
out_2_packets.push_back(p);
return mediapipe::OkStatus();
}));
MP_EXPECT_OK(graph_.ObserveOutputStream("allow", [this](Packet p) {
allow_packets_.push_back(p);
return mediapipe::OkStatus();
}));
simulation_clock_->ThreadStart();
MP_ASSERT_OK(graph_.StartRun(side_packets));
// Add packets 0..9 to stream in_1, and packets 0..10 to stream in_2.
MP_EXPECT_OK(graph_.AddPacketToInputStream("in_1", input_packets_[0]));
clock_->Sleep(absl::Microseconds(10000));
for (int i = 1; i < 10; ++i) {
MP_EXPECT_OK(graph_.AddPacketToInputStream("in_1", input_packets_[i]));
MP_EXPECT_OK(graph_.AddPacketToInputStream("in_2", input_packets_[i - 1]));
clock_->Sleep(absl::Microseconds(10000));
}
// Add packets 10..20 to stream in_1, and packets 11..21 to stream in_2.
for (int i = 10; i < 21; ++i) {
MP_EXPECT_OK(graph_.AddPacketToInputStream("in_2", input_packets_[i + 1]));
MP_EXPECT_OK(graph_.AddPacketToInputStream("in_1", input_packets_[i]));
clock_->Sleep(absl::Microseconds(10000));
}
// Finish the graph run.
MP_EXPECT_OK(graph_.CloseAllPacketSources());
clock_->Sleep(absl::Microseconds(40000));
MP_EXPECT_OK(graph_.WaitUntilDone());
simulation_clock_->ThreadFinish();
// Validate the output.
// Packet input_packets_[4] is lost by the DropCalculator.
std::vector<Packet> expected_output = {
input_packets_[0], input_packets_[2], input_packets_[15],
input_packets_[17], input_packets_[19],
};
EXPECT_EQ(out_1_packets_, expected_output);
// Exactly the timestamps released by FlowLimiterCalculator for in_1_sampled.
std::vector<Packet> expected_output_2 = {
input_packets_[0], input_packets_[2], input_packets_[4],
input_packets_[15], input_packets_[17], input_packets_[19],
};
EXPECT_EQ(out_2_packets, expected_output_2);
} }
} // anonymous namespace } // anonymous namespace

View File

@ -82,7 +82,7 @@ class GateCalculator : public CalculatorBase {
public: public:
GateCalculator() {} GateCalculator() {}
static ::mediapipe::Status CheckAndInitAllowDisallowInputs( static mediapipe::Status CheckAndInitAllowDisallowInputs(
CalculatorContract* cc) { CalculatorContract* cc) {
bool input_via_side_packet = cc->InputSidePackets().HasTag("ALLOW") || bool input_via_side_packet = cc->InputSidePackets().HasTag("ALLOW") ||
cc->InputSidePackets().HasTag("DISALLOW"); cc->InputSidePackets().HasTag("DISALLOW");
@ -110,10 +110,10 @@ class GateCalculator : public CalculatorBase {
cc->Inputs().Tag("DISALLOW").Set<bool>(); cc->Inputs().Tag("DISALLOW").Set<bool>();
} }
} }
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
static ::mediapipe::Status GetContract(CalculatorContract* cc) { static mediapipe::Status GetContract(CalculatorContract* cc) {
RET_CHECK_OK(CheckAndInitAllowDisallowInputs(cc)); RET_CHECK_OK(CheckAndInitAllowDisallowInputs(cc));
const int num_data_streams = cc->Inputs().NumEntries(""); const int num_data_streams = cc->Inputs().NumEntries("");
@ -130,10 +130,10 @@ class GateCalculator : public CalculatorBase {
cc->Outputs().Tag("STATE_CHANGE").Set<bool>(); cc->Outputs().Tag("STATE_CHANGE").Set<bool>();
} }
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Open(CalculatorContext* cc) final { mediapipe::Status Open(CalculatorContext* cc) final {
use_side_packet_for_allow_disallow_ = false; use_side_packet_for_allow_disallow_ = false;
if (cc->InputSidePackets().HasTag("ALLOW")) { if (cc->InputSidePackets().HasTag("ALLOW")) {
use_side_packet_for_allow_disallow_ = true; use_side_packet_for_allow_disallow_ = true;
@ -153,10 +153,10 @@ class GateCalculator : public CalculatorBase {
const auto& options = cc->Options<::mediapipe::GateCalculatorOptions>(); const auto& options = cc->Options<::mediapipe::GateCalculatorOptions>();
empty_packets_as_allow_ = options.empty_packets_as_allow(); empty_packets_as_allow_ = options.empty_packets_as_allow();
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Process(CalculatorContext* cc) final { mediapipe::Status Process(CalculatorContext* cc) final {
bool allow = empty_packets_as_allow_; bool allow = empty_packets_as_allow_;
if (use_side_packet_for_allow_disallow_) { if (use_side_packet_for_allow_disallow_) {
allow = allow_by_side_packet_decision_; allow = allow_by_side_packet_decision_;
@ -195,7 +195,7 @@ class GateCalculator : public CalculatorBase {
cc->Outputs().Get("", i).Close(); cc->Outputs().Get("", i).Close();
} }
} }
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
// Process data streams. // Process data streams.
@ -205,7 +205,7 @@ class GateCalculator : public CalculatorBase {
} }
} }
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
private: private:

View File

@ -25,7 +25,7 @@ namespace {
class GateCalculatorTest : public ::testing::Test { class GateCalculatorTest : public ::testing::Test {
protected: protected:
// Helper to run a graph and return status. // Helper to run a graph and return status.
static ::mediapipe::Status RunGraph(const std::string& proto) { static mediapipe::Status RunGraph(const std::string& proto) {
auto runner = absl::make_unique<CalculatorRunner>( auto runner = absl::make_unique<CalculatorRunner>(
ParseTextProtoOrDie<CalculatorGraphConfig::Node>(proto)); ParseTextProtoOrDie<CalculatorGraphConfig::Node>(proto));
return runner->Run(); return runner->Run();

View File

@ -29,9 +29,7 @@ namespace mediapipe {
// received. // received.
// //
// This Calculator can be used with an ImmediateInputStreamHandler or with the // This Calculator can be used with an ImmediateInputStreamHandler or with the
// default ISH. Note that currently ImmediateInputStreamHandler seems to // default ISH.
// interfere with timestamp bound propagation, so it is better to use the
// default unless the immediate one is needed. (b/118387598)
// //
// This Calculator is designed to work with a Demux calculator such as // This Calculator is designed to work with a Demux calculator such as
// the RoundRobinDemuxCalculator. Therefore, packets from different // the RoundRobinDemuxCalculator. Therefore, packets from different
@ -45,17 +43,16 @@ class ImmediateMuxCalculator : public CalculatorBase {
public: public:
// This calculator combines any set of input streams into a single // This calculator combines any set of input streams into a single
// output stream. All input stream types must match the output stream type. // output stream. All input stream types must match the output stream type.
static ::mediapipe::Status GetContract(CalculatorContract* cc); static mediapipe::Status GetContract(CalculatorContract* cc);
// Passes any input packet to the output stream immediately, unless the // Passes any input packet to the output stream immediately, unless the
// packet timestamp is lower than a previously passed packet. // packet timestamp is lower than a previously passed packet.
::mediapipe::Status Process(CalculatorContext* cc) override; mediapipe::Status Process(CalculatorContext* cc) override;
::mediapipe::Status Open(CalculatorContext* cc) override; mediapipe::Status Open(CalculatorContext* cc) override;
}; };
REGISTER_CALCULATOR(ImmediateMuxCalculator); REGISTER_CALCULATOR(ImmediateMuxCalculator);
::mediapipe::Status ImmediateMuxCalculator::GetContract( mediapipe::Status ImmediateMuxCalculator::GetContract(CalculatorContract* cc) {
CalculatorContract* cc) {
RET_CHECK(cc->Outputs().NumEntries() >= 1 && cc->Outputs().NumEntries() <= 2) RET_CHECK(cc->Outputs().NumEntries() >= 1 && cc->Outputs().NumEntries() <= 2)
<< "This calculator produces only one or two output streams."; << "This calculator produces only one or two output streams.";
cc->Outputs().Index(0).SetAny(); cc->Outputs().Index(0).SetAny();
@ -65,15 +62,15 @@ REGISTER_CALCULATOR(ImmediateMuxCalculator);
for (int i = 0; i < cc->Inputs().NumEntries(); ++i) { for (int i = 0; i < cc->Inputs().NumEntries(); ++i) {
cc->Inputs().Index(i).SetSameAs(&cc->Outputs().Index(0)); cc->Inputs().Index(i).SetSameAs(&cc->Outputs().Index(0));
} }
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status ImmediateMuxCalculator::Open(CalculatorContext* cc) { mediapipe::Status ImmediateMuxCalculator::Open(CalculatorContext* cc) {
cc->SetOffset(TimestampDiff(0)); cc->SetOffset(TimestampDiff(0));
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status ImmediateMuxCalculator::Process(CalculatorContext* cc) { mediapipe::Status ImmediateMuxCalculator::Process(CalculatorContext* cc) {
// Pass along the first packet, unless it has been superseded. // Pass along the first packet, unless it has been superseded.
for (int i = 0; i < cc->Inputs().NumEntries(); ++i) { for (int i = 0; i < cc->Inputs().NumEntries(); ++i) {
const Packet& packet = cc->Inputs().Index(i).Value(); const Packet& packet = cc->Inputs().Index(i).Value();
@ -91,7 +88,7 @@ REGISTER_CALCULATOR(ImmediateMuxCalculator);
} }
} }
} }
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
} // namespace mediapipe } // namespace mediapipe

View File

@ -289,19 +289,19 @@ TEST_F(ImmediateMuxCalculatorTest, SimultaneousTimestamps) {
} }
// A Calculator::Process callback function. // A Calculator::Process callback function.
typedef std::function<::mediapipe::Status(const InputStreamShardSet&, typedef std::function<mediapipe::Status(const InputStreamShardSet&,
OutputStreamShardSet*)> OutputStreamShardSet*)>
ProcessFunction; ProcessFunction;
// A testing callback function that passes through all packets. // A testing callback function that passes through all packets.
::mediapipe::Status PassThrough(const InputStreamShardSet& inputs, mediapipe::Status PassThrough(const InputStreamShardSet& inputs,
OutputStreamShardSet* outputs) { OutputStreamShardSet* outputs) {
for (int i = 0; i < inputs.NumEntries(); ++i) { for (int i = 0; i < inputs.NumEntries(); ++i) {
if (!inputs.Index(i).Value().IsEmpty()) { if (!inputs.Index(i).Value().IsEmpty()) {
outputs->Index(i).AddPacket(inputs.Index(i).Value()); outputs->Index(i).AddPacket(inputs.Index(i).Value());
} }
} }
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
TEST_F(ImmediateMuxCalculatorTest, Demux) { TEST_F(ImmediateMuxCalculatorTest, Demux) {
@ -325,7 +325,7 @@ TEST_F(ImmediateMuxCalculatorTest, Demux) {
auto out_cb = [&](const Packet& p) { auto out_cb = [&](const Packet& p) {
absl::MutexLock lock(&out_mutex); absl::MutexLock lock(&out_mutex);
out_packets.push_back(p); out_packets.push_back(p);
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
}; };
auto wait_for = [&](std::function<bool()> cond) { auto wait_for = [&](std::function<bool()> cond) {
absl::MutexLock lock(&out_mutex); absl::MutexLock lock(&out_mutex);

View File

@ -35,24 +35,24 @@ class MakePairCalculator : public CalculatorBase {
MakePairCalculator() {} MakePairCalculator() {}
~MakePairCalculator() override {} ~MakePairCalculator() override {}
static ::mediapipe::Status GetContract(CalculatorContract* cc) { static mediapipe::Status GetContract(CalculatorContract* cc) {
cc->Inputs().Index(0).SetAny(); cc->Inputs().Index(0).SetAny();
cc->Inputs().Index(1).SetAny(); cc->Inputs().Index(1).SetAny();
cc->Outputs().Index(0).Set<std::pair<Packet, Packet>>(); cc->Outputs().Index(0).Set<std::pair<Packet, Packet>>();
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Open(CalculatorContext* cc) override { mediapipe::Status Open(CalculatorContext* cc) override {
cc->SetOffset(TimestampDiff(0)); cc->SetOffset(TimestampDiff(0));
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Process(CalculatorContext* cc) override { mediapipe::Status Process(CalculatorContext* cc) override {
cc->Outputs().Index(0).Add( cc->Outputs().Index(0).Add(
new std::pair<Packet, Packet>(cc->Inputs().Index(0).Value(), new std::pair<Packet, Packet>(cc->Inputs().Index(0).Value(),
cc->Inputs().Index(1).Value()), cc->Inputs().Index(1).Value()),
cc->InputTimestamp()); cc->InputTimestamp());
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
}; };

View File

@ -33,34 +33,34 @@ class MatrixMultiplyCalculator : public CalculatorBase {
MatrixMultiplyCalculator() {} MatrixMultiplyCalculator() {}
~MatrixMultiplyCalculator() override {} ~MatrixMultiplyCalculator() override {}
static ::mediapipe::Status GetContract(CalculatorContract* cc); static mediapipe::Status GetContract(CalculatorContract* cc);
::mediapipe::Status Open(CalculatorContext* cc) override; mediapipe::Status Open(CalculatorContext* cc) override;
::mediapipe::Status Process(CalculatorContext* cc) override; mediapipe::Status Process(CalculatorContext* cc) override;
}; };
REGISTER_CALCULATOR(MatrixMultiplyCalculator); REGISTER_CALCULATOR(MatrixMultiplyCalculator);
// static // static
::mediapipe::Status MatrixMultiplyCalculator::GetContract( mediapipe::Status MatrixMultiplyCalculator::GetContract(
CalculatorContract* cc) { CalculatorContract* cc) {
cc->Inputs().Index(0).Set<Matrix>(); cc->Inputs().Index(0).Set<Matrix>();
cc->Outputs().Index(0).Set<Matrix>(); cc->Outputs().Index(0).Set<Matrix>();
cc->InputSidePackets().Index(0).Set<Matrix>(); cc->InputSidePackets().Index(0).Set<Matrix>();
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status MatrixMultiplyCalculator::Open(CalculatorContext* cc) { mediapipe::Status MatrixMultiplyCalculator::Open(CalculatorContext* cc) {
// The output is at the same timestamp as the input. // The output is at the same timestamp as the input.
cc->SetOffset(TimestampDiff(0)); cc->SetOffset(TimestampDiff(0));
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status MatrixMultiplyCalculator::Process(CalculatorContext* cc) { mediapipe::Status MatrixMultiplyCalculator::Process(CalculatorContext* cc) {
Matrix* multiplied = new Matrix(); Matrix* multiplied = new Matrix();
*multiplied = cc->InputSidePackets().Index(0).Get<Matrix>() * *multiplied = cc->InputSidePackets().Index(0).Get<Matrix>() *
cc->Inputs().Index(0).Get<Matrix>(); cc->Inputs().Index(0).Get<Matrix>();
cc->Outputs().Index(0).Add(multiplied, cc->InputTimestamp()); cc->Outputs().Index(0).Add(multiplied, cc->InputTimestamp());
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
} // namespace mediapipe } // namespace mediapipe

View File

@ -46,10 +46,10 @@ class MatrixSubtractCalculator : public CalculatorBase {
MatrixSubtractCalculator() {} MatrixSubtractCalculator() {}
~MatrixSubtractCalculator() override {} ~MatrixSubtractCalculator() override {}
static ::mediapipe::Status GetContract(CalculatorContract* cc); static mediapipe::Status GetContract(CalculatorContract* cc);
::mediapipe::Status Open(CalculatorContext* cc) override; mediapipe::Status Open(CalculatorContext* cc) override;
::mediapipe::Status Process(CalculatorContext* cc) override; mediapipe::Status Process(CalculatorContext* cc) override;
private: private:
bool subtract_from_input_ = false; bool subtract_from_input_ = false;
@ -57,11 +57,11 @@ class MatrixSubtractCalculator : public CalculatorBase {
REGISTER_CALCULATOR(MatrixSubtractCalculator); REGISTER_CALCULATOR(MatrixSubtractCalculator);
// static // static
::mediapipe::Status MatrixSubtractCalculator::GetContract( mediapipe::Status MatrixSubtractCalculator::GetContract(
CalculatorContract* cc) { CalculatorContract* cc) {
if (cc->Inputs().NumEntries() != 1 || if (cc->Inputs().NumEntries() != 1 ||
cc->InputSidePackets().NumEntries() != 1) { cc->InputSidePackets().NumEntries() != 1) {
return ::mediapipe::InvalidArgumentError( return mediapipe::InvalidArgumentError(
"MatrixSubtractCalculator only accepts exactly one input stream and " "MatrixSubtractCalculator only accepts exactly one input stream and "
"one " "one "
"input side packet"); "input side packet");
@ -75,23 +75,23 @@ REGISTER_CALCULATOR(MatrixSubtractCalculator);
cc->Inputs().Tag("SUBTRAHEND").Set<Matrix>(); cc->Inputs().Tag("SUBTRAHEND").Set<Matrix>();
cc->InputSidePackets().Tag("MINUEND").Set<Matrix>(); cc->InputSidePackets().Tag("MINUEND").Set<Matrix>();
} else { } else {
return ::mediapipe::InvalidArgumentError( return mediapipe::InvalidArgumentError(
"Must specify exactly one minuend and one subtrahend."); "Must specify exactly one minuend and one subtrahend.");
} }
cc->Outputs().Index(0).Set<Matrix>(); cc->Outputs().Index(0).Set<Matrix>();
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status MatrixSubtractCalculator::Open(CalculatorContext* cc) { mediapipe::Status MatrixSubtractCalculator::Open(CalculatorContext* cc) {
// The output is at the same timestamp as the input. // The output is at the same timestamp as the input.
cc->SetOffset(TimestampDiff(0)); cc->SetOffset(TimestampDiff(0));
if (cc->Inputs().HasTag("MINUEND")) { if (cc->Inputs().HasTag("MINUEND")) {
subtract_from_input_ = true; subtract_from_input_ = true;
} }
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status MatrixSubtractCalculator::Process(CalculatorContext* cc) { mediapipe::Status MatrixSubtractCalculator::Process(CalculatorContext* cc) {
Matrix* subtracted = new Matrix(); Matrix* subtracted = new Matrix();
if (subtract_from_input_) { if (subtract_from_input_) {
const Matrix& input_matrix = cc->Inputs().Tag("MINUEND").Get<Matrix>(); const Matrix& input_matrix = cc->Inputs().Tag("MINUEND").Get<Matrix>();
@ -99,7 +99,7 @@ REGISTER_CALCULATOR(MatrixSubtractCalculator);
cc->InputSidePackets().Tag("SUBTRAHEND").Get<Matrix>(); cc->InputSidePackets().Tag("SUBTRAHEND").Get<Matrix>();
if (input_matrix.rows() != side_input_matrix.rows() || if (input_matrix.rows() != side_input_matrix.rows() ||
input_matrix.cols() != side_input_matrix.cols()) { input_matrix.cols() != side_input_matrix.cols()) {
return ::mediapipe::InvalidArgumentError( return mediapipe::InvalidArgumentError(
"Input matrix and the input side matrix must have the same " "Input matrix and the input side matrix must have the same "
"dimension."); "dimension.");
} }
@ -110,14 +110,14 @@ REGISTER_CALCULATOR(MatrixSubtractCalculator);
cc->InputSidePackets().Tag("MINUEND").Get<Matrix>(); cc->InputSidePackets().Tag("MINUEND").Get<Matrix>();
if (input_matrix.rows() != side_input_matrix.rows() || if (input_matrix.rows() != side_input_matrix.rows() ||
input_matrix.cols() != side_input_matrix.cols()) { input_matrix.cols() != side_input_matrix.cols()) {
return ::mediapipe::InvalidArgumentError( return mediapipe::InvalidArgumentError(
"Input matrix and the input side matrix must have the same " "Input matrix and the input side matrix must have the same "
"dimension."); "dimension.");
} }
*subtracted = side_input_matrix - input_matrix; *subtracted = side_input_matrix - input_matrix;
} }
cc->Outputs().Index(0).Add(subtracted, cc->InputTimestamp()); cc->Outputs().Index(0).Add(subtracted, cc->InputTimestamp());
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
} // namespace mediapipe } // namespace mediapipe

View File

@ -42,30 +42,30 @@ namespace mediapipe {
// } // }
class MatrixToVectorCalculator : public CalculatorBase { class MatrixToVectorCalculator : public CalculatorBase {
public: public:
static ::mediapipe::Status GetContract(CalculatorContract* cc) { static mediapipe::Status GetContract(CalculatorContract* cc) {
cc->Inputs().Index(0).Set<Matrix>( cc->Inputs().Index(0).Set<Matrix>(
// Input Packet containing a Matrix. // Input Packet containing a Matrix.
); );
cc->Outputs().Index(0).Set<std::vector<float>>( cc->Outputs().Index(0).Set<std::vector<float>>(
// Output Packet containing a vector, one for each input Packet. // Output Packet containing a vector, one for each input Packet.
); );
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Open(CalculatorContext* cc) override; mediapipe::Status Open(CalculatorContext* cc) override;
// Outputs a packet containing a vector for each input packet. // Outputs a packet containing a vector for each input packet.
::mediapipe::Status Process(CalculatorContext* cc) override; mediapipe::Status Process(CalculatorContext* cc) override;
}; };
REGISTER_CALCULATOR(MatrixToVectorCalculator); REGISTER_CALCULATOR(MatrixToVectorCalculator);
::mediapipe::Status MatrixToVectorCalculator::Open(CalculatorContext* cc) { mediapipe::Status MatrixToVectorCalculator::Open(CalculatorContext* cc) {
// Inform the framework that we don't alter timestamps. // Inform the framework that we don't alter timestamps.
cc->SetOffset(mediapipe::TimestampDiff(0)); cc->SetOffset(mediapipe::TimestampDiff(0));
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status MatrixToVectorCalculator::Process(CalculatorContext* cc) { mediapipe::Status MatrixToVectorCalculator::Process(CalculatorContext* cc) {
const Matrix& input = cc->Inputs().Index(0).Get<Matrix>(); const Matrix& input = cc->Inputs().Index(0).Get<Matrix>();
auto output = absl::make_unique<std::vector<float>>(); auto output = absl::make_unique<std::vector<float>>();
@ -77,7 +77,7 @@ REGISTER_CALCULATOR(MatrixToVectorCalculator);
output_as_matrix = input; output_as_matrix = input;
cc->Outputs().Index(0).Add(output.release(), cc->InputTimestamp()); cc->Outputs().Index(0).Add(output.release(), cc->InputTimestamp());
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
} // namespace mediapipe } // namespace mediapipe

View File

@ -43,7 +43,7 @@ namespace mediapipe {
// //
class MergeCalculator : public CalculatorBase { class MergeCalculator : public CalculatorBase {
public: public:
static ::mediapipe::Status GetContract(CalculatorContract* cc) { static mediapipe::Status GetContract(CalculatorContract* cc) {
RET_CHECK_GT(cc->Inputs().NumEntries(), 0) RET_CHECK_GT(cc->Inputs().NumEntries(), 0)
<< "Needs at least one input stream"; << "Needs at least one input stream";
RET_CHECK_EQ(cc->Outputs().NumEntries(), 1); RET_CHECK_EQ(cc->Outputs().NumEntries(), 1);
@ -60,29 +60,29 @@ class MergeCalculator : public CalculatorBase {
} }
cc->Outputs().Index(0).SetAny(); cc->Outputs().Index(0).SetAny();
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Open(CalculatorContext* cc) final { mediapipe::Status Open(CalculatorContext* cc) final {
cc->SetOffset(TimestampDiff(0)); cc->SetOffset(TimestampDiff(0));
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Process(CalculatorContext* cc) final { mediapipe::Status Process(CalculatorContext* cc) final {
// Output the packet from the first input stream with a packet ready at this // Output the packet from the first input stream with a packet ready at this
// timestamp. // timestamp.
for (int i = 0; i < cc->Inputs().NumEntries(); ++i) { for (int i = 0; i < cc->Inputs().NumEntries(); ++i) {
if (!cc->Inputs().Index(i).IsEmpty()) { if (!cc->Inputs().Index(i).IsEmpty()) {
cc->Outputs().Index(0).AddPacket(cc->Inputs().Index(i).Value()); cc->Outputs().Index(0).AddPacket(cc->Inputs().Index(i).Value());
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
} }
LOG(WARNING) << "Empty input packets at timestamp " LOG(WARNING) << "Empty input packets at timestamp "
<< cc->InputTimestamp().Value(); << cc->InputTimestamp().Value();
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
}; };

View File

@ -36,7 +36,7 @@ constexpr char kInputTag[] = "INPUT";
// with DefaultInputStreamHandler. // with DefaultInputStreamHandler.
class MuxCalculator : public CalculatorBase { class MuxCalculator : public CalculatorBase {
public: public:
static ::mediapipe::Status CheckAndInitAllowDisallowInputs( static mediapipe::Status CheckAndInitAllowDisallowInputs(
CalculatorContract* cc) { CalculatorContract* cc) {
RET_CHECK(cc->Inputs().HasTag(kSelectTag) ^ RET_CHECK(cc->Inputs().HasTag(kSelectTag) ^
cc->InputSidePackets().HasTag(kSelectTag)); cc->InputSidePackets().HasTag(kSelectTag));
@ -45,10 +45,10 @@ class MuxCalculator : public CalculatorBase {
} else { } else {
cc->InputSidePackets().Tag(kSelectTag).Set<int>(); cc->InputSidePackets().Tag(kSelectTag).Set<int>();
} }
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
static ::mediapipe::Status GetContract(CalculatorContract* cc) { static mediapipe::Status GetContract(CalculatorContract* cc) {
RET_CHECK_OK(CheckAndInitAllowDisallowInputs(cc)); RET_CHECK_OK(CheckAndInitAllowDisallowInputs(cc));
CollectionItemId data_input_id = cc->Inputs().BeginId(kInputTag); CollectionItemId data_input_id = cc->Inputs().BeginId(kInputTag);
PacketType* data_input0 = &cc->Inputs().Get(data_input_id); PacketType* data_input0 = &cc->Inputs().Get(data_input_id);
@ -64,10 +64,10 @@ class MuxCalculator : public CalculatorBase {
MediaPipeOptions options; MediaPipeOptions options;
cc->SetInputStreamHandlerOptions(options); cc->SetInputStreamHandlerOptions(options);
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Open(CalculatorContext* cc) final { mediapipe::Status Open(CalculatorContext* cc) final {
use_side_packet_select_ = false; use_side_packet_select_ = false;
if (cc->InputSidePackets().HasTag(kSelectTag)) { if (cc->InputSidePackets().HasTag(kSelectTag)) {
use_side_packet_select_ = true; use_side_packet_select_ = true;
@ -79,10 +79,10 @@ class MuxCalculator : public CalculatorBase {
num_data_inputs_ = cc->Inputs().NumEntries(kInputTag); num_data_inputs_ = cc->Inputs().NumEntries(kInputTag);
output_ = cc->Outputs().GetId("OUTPUT", 0); output_ = cc->Outputs().GetId("OUTPUT", 0);
cc->SetOffset(TimestampDiff(0)); cc->SetOffset(TimestampDiff(0));
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Process(CalculatorContext* cc) final { mediapipe::Status Process(CalculatorContext* cc) final {
int select = use_side_packet_select_ int select = use_side_packet_select_
? selected_index_ ? selected_index_
: cc->Inputs().Get(select_input_).Get<int>(); : cc->Inputs().Get(select_input_).Get<int>();
@ -91,7 +91,7 @@ class MuxCalculator : public CalculatorBase {
cc->Outputs().Get(output_).AddPacket( cc->Outputs().Get(output_).AddPacket(
cc->Inputs().Get(data_input_base_ + select).Value()); cc->Inputs().Get(data_input_base_ + select).Value());
} }
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
private: private:

View File

@ -134,10 +134,9 @@ void RunGraph(const std::string& graph_config_proto,
const std::string& input_stream_name, int num_input_packets, const std::string& input_stream_name, int num_input_packets,
std::function<Packet(int)> input_fn, std::function<Packet(int)> input_fn,
const std::string& output_stream_name, const std::string& output_stream_name,
std::function<::mediapipe::Status(const Packet&)> output_fn) { std::function<mediapipe::Status(const Packet&)> output_fn) {
CalculatorGraphConfig config = CalculatorGraphConfig config =
::mediapipe::ParseTextProtoOrDie<CalculatorGraphConfig>( mediapipe::ParseTextProtoOrDie<CalculatorGraphConfig>(graph_config_proto);
graph_config_proto);
CalculatorGraph graph; CalculatorGraph graph;
MP_ASSERT_OK(graph.Initialize(config)); MP_ASSERT_OK(graph.Initialize(config));
MP_ASSERT_OK(graph.ObserveOutputStream(output_stream_name, output_fn)); MP_ASSERT_OK(graph.ObserveOutputStream(output_stream_name, output_fn));
@ -166,9 +165,9 @@ TEST(MuxCalculatorTest, InputStreamSelector_DefaultInputStreamHandler) {
// Output and handling. // Output and handling.
std::vector<int> output; std::vector<int> output;
// This function collects the output from the packet. // This function collects the output from the packet.
auto output_fn = [&output](const Packet& p) -> ::mediapipe::Status { auto output_fn = [&output](const Packet& p) -> mediapipe::Status {
output.push_back(p.Get<int>()); output.push_back(p.Get<int>());
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
}; };
RunGraph(kTestGraphConfig1, {}, kInputName, input_packets.size(), input_fn, RunGraph(kTestGraphConfig1, {}, kInputName, input_packets.size(), input_fn,
@ -192,9 +191,9 @@ TEST(MuxCalculatorTest, InputSidePacketSelector_DefaultInputStreamHandler) {
// Output and handling. // Output and handling.
std::vector<int> output; std::vector<int> output;
// This function collects the output from the packet. // This function collects the output from the packet.
auto output_fn = [&output](const Packet& p) -> ::mediapipe::Status { auto output_fn = [&output](const Packet& p) -> mediapipe::Status {
output.push_back(p.Get<int>()); output.push_back(p.Get<int>());
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
}; };
RunGraph(kTestGraphConfig2, {{kInputSelector, MakePacket<int>(0)}}, RunGraph(kTestGraphConfig2, {{kInputSelector, MakePacket<int>(0)}},
@ -226,9 +225,9 @@ TEST(MuxCalculatorTest, InputStreamSelector_MuxInputStreamHandler) {
// Output and handling. // Output and handling.
std::vector<int> output; std::vector<int> output;
// This function collects the output from the packet. // This function collects the output from the packet.
auto output_fn = [&output](const Packet& p) -> ::mediapipe::Status { auto output_fn = [&output](const Packet& p) -> mediapipe::Status {
output.push_back(p.Get<int>()); output.push_back(p.Get<int>());
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
}; };
RunGraph(kTestGraphConfig3, {}, kInputName, input_packets.size(), input_fn, RunGraph(kTestGraphConfig3, {}, kInputName, input_packets.size(), input_fn,
@ -252,7 +251,7 @@ constexpr char kDualInputGraphConfig[] = R"proto(
TEST(MuxCalculatorTest, DiscardSkippedInputs_MuxInputStreamHandler) { TEST(MuxCalculatorTest, DiscardSkippedInputs_MuxInputStreamHandler) {
CalculatorGraphConfig config = CalculatorGraphConfig config =
::mediapipe::ParseTextProtoOrDie<CalculatorGraphConfig>( mediapipe::ParseTextProtoOrDie<CalculatorGraphConfig>(
kDualInputGraphConfig); kDualInputGraphConfig);
CalculatorGraph graph; CalculatorGraph graph;
MP_ASSERT_OK(graph.Initialize(config)); MP_ASSERT_OK(graph.Initialize(config));
@ -261,7 +260,7 @@ TEST(MuxCalculatorTest, DiscardSkippedInputs_MuxInputStreamHandler) {
MP_ASSERT_OK( MP_ASSERT_OK(
graph.ObserveOutputStream("test_output", [&output](const Packet& p) { graph.ObserveOutputStream("test_output", [&output](const Packet& p) {
output = p.Get<std::shared_ptr<int>>(); output = p.Get<std::shared_ptr<int>>();
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
})); }));
MP_ASSERT_OK(graph.StartRun({})); MP_ASSERT_OK(graph.StartRun({}));

View File

@ -45,17 +45,17 @@ namespace mediapipe {
// packet_inner_join_calculator.cc: Don't output unless all inputs are new. // packet_inner_join_calculator.cc: Don't output unless all inputs are new.
class PacketClonerCalculator : public CalculatorBase { class PacketClonerCalculator : public CalculatorBase {
public: public:
static ::mediapipe::Status GetContract(CalculatorContract* cc) { static mediapipe::Status GetContract(CalculatorContract* cc) {
const int tick_signal_index = cc->Inputs().NumEntries() - 1; const int tick_signal_index = cc->Inputs().NumEntries() - 1;
for (int i = 0; i < tick_signal_index; ++i) { for (int i = 0; i < tick_signal_index; ++i) {
cc->Inputs().Index(i).SetAny(); cc->Inputs().Index(i).SetAny();
cc->Outputs().Index(i).SetSameAs(&cc->Inputs().Index(i)); cc->Outputs().Index(i).SetSameAs(&cc->Inputs().Index(i));
} }
cc->Inputs().Index(tick_signal_index).SetAny(); cc->Inputs().Index(tick_signal_index).SetAny();
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Open(CalculatorContext* cc) final { mediapipe::Status Open(CalculatorContext* cc) final {
// Load options. // Load options.
const auto calculator_options = const auto calculator_options =
cc->Options<mediapipe::PacketClonerCalculatorOptions>(); cc->Options<mediapipe::PacketClonerCalculatorOptions>();
@ -71,10 +71,10 @@ class PacketClonerCalculator : public CalculatorBase {
cc->Outputs().Index(i).SetHeader(cc->Inputs().Index(i).Header()); cc->Outputs().Index(i).SetHeader(cc->Inputs().Index(i).Header());
} }
} }
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Process(CalculatorContext* cc) final { mediapipe::Status Process(CalculatorContext* cc) final {
// Store input signals. // Store input signals.
for (int i = 0; i < tick_signal_index_; ++i) { for (int i = 0; i < tick_signal_index_; ++i) {
if (!cc->Inputs().Index(i).Value().IsEmpty()) { if (!cc->Inputs().Index(i).Value().IsEmpty()) {
@ -88,7 +88,7 @@ class PacketClonerCalculator : public CalculatorBase {
// Return if one of the input is null. // Return if one of the input is null.
for (int i = 0; i < tick_signal_index_; ++i) { for (int i = 0; i < tick_signal_index_; ++i) {
if (current_[i].IsEmpty()) { if (current_[i].IsEmpty()) {
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
} }
} }
@ -103,7 +103,7 @@ class PacketClonerCalculator : public CalculatorBase {
} }
} }
} }
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
private: private:

View File

@ -34,10 +34,10 @@ namespace mediapipe {
// packet_cloner_calculator.cc: Repeats last-seen packets from empty inputs. // packet_cloner_calculator.cc: Repeats last-seen packets from empty inputs.
class PacketInnerJoinCalculator : public CalculatorBase { class PacketInnerJoinCalculator : public CalculatorBase {
public: public:
static ::mediapipe::Status GetContract(CalculatorContract* cc); static mediapipe::Status GetContract(CalculatorContract* cc);
::mediapipe::Status Open(CalculatorContext* cc) override; mediapipe::Status Open(CalculatorContext* cc) override;
::mediapipe::Status Process(CalculatorContext* cc) override; mediapipe::Status Process(CalculatorContext* cc) override;
private: private:
int num_streams_; int num_streams_;
@ -45,7 +45,7 @@ class PacketInnerJoinCalculator : public CalculatorBase {
REGISTER_CALCULATOR(PacketInnerJoinCalculator); REGISTER_CALCULATOR(PacketInnerJoinCalculator);
::mediapipe::Status PacketInnerJoinCalculator::GetContract( mediapipe::Status PacketInnerJoinCalculator::GetContract(
CalculatorContract* cc) { CalculatorContract* cc) {
RET_CHECK(cc->Inputs().NumEntries() == cc->Outputs().NumEntries()) RET_CHECK(cc->Inputs().NumEntries() == cc->Outputs().NumEntries())
<< "The number of input and output streams must match."; << "The number of input and output streams must match.";
@ -54,25 +54,25 @@ REGISTER_CALCULATOR(PacketInnerJoinCalculator);
cc->Inputs().Index(i).SetAny(); cc->Inputs().Index(i).SetAny();
cc->Outputs().Index(i).SetSameAs(&cc->Inputs().Index(i)); cc->Outputs().Index(i).SetSameAs(&cc->Inputs().Index(i));
} }
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status PacketInnerJoinCalculator::Open(CalculatorContext* cc) { mediapipe::Status PacketInnerJoinCalculator::Open(CalculatorContext* cc) {
num_streams_ = cc->Inputs().NumEntries(); num_streams_ = cc->Inputs().NumEntries();
cc->SetOffset(TimestampDiff(0)); cc->SetOffset(TimestampDiff(0));
return mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status PacketInnerJoinCalculator::Process(CalculatorContext* cc) { mediapipe::Status PacketInnerJoinCalculator::Process(CalculatorContext* cc) {
for (int i = 0; i < num_streams_; ++i) { for (int i = 0; i < num_streams_; ++i) {
if (cc->Inputs().Index(i).Value().IsEmpty()) { if (cc->Inputs().Index(i).Value().IsEmpty()) {
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
} }
for (int i = 0; i < num_streams_; ++i) { for (int i = 0; i < num_streams_; ++i) {
cc->Outputs().Index(i).AddPacket(cc->Inputs().Index(i).Value()); cc->Outputs().Index(i).AddPacket(cc->Inputs().Index(i).Value());
} }
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
} // namespace mediapipe } // namespace mediapipe

View File

@ -57,26 +57,26 @@ namespace mediapipe {
// } // }
class PacketPresenceCalculator : public CalculatorBase { class PacketPresenceCalculator : public CalculatorBase {
public: public:
static ::mediapipe::Status GetContract(CalculatorContract* cc) { static mediapipe::Status GetContract(CalculatorContract* cc) {
cc->Inputs().Tag("PACKET").SetAny(); cc->Inputs().Tag("PACKET").SetAny();
cc->Outputs().Tag("PRESENCE").Set<bool>(); cc->Outputs().Tag("PRESENCE").Set<bool>();
// Process() function is invoked in response to input stream timestamp // Process() function is invoked in response to input stream timestamp
// bound updates. // bound updates.
cc->SetProcessTimestampBounds(true); cc->SetProcessTimestampBounds(true);
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Open(CalculatorContext* cc) override { mediapipe::Status Open(CalculatorContext* cc) override {
cc->SetOffset(TimestampDiff(0)); cc->SetOffset(TimestampDiff(0));
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Process(CalculatorContext* cc) final { mediapipe::Status Process(CalculatorContext* cc) final {
cc->Outputs() cc->Outputs()
.Tag("PRESENCE") .Tag("PRESENCE")
.AddPacket(MakePacket<bool>(!cc->Inputs().Tag("PACKET").IsEmpty()) .AddPacket(MakePacket<bool>(!cc->Inputs().Tag("PACKET").IsEmpty())
.At(cc->InputTimestamp())); .At(cc->InputTimestamp()));
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
}; };
REGISTER_CALCULATOR(PacketPresenceCalculator); REGISTER_CALCULATOR(PacketPresenceCalculator);

View File

@ -47,7 +47,7 @@ TimestampDiff TimestampDiffFromSeconds(double seconds) {
} }
} // namespace } // namespace
::mediapipe::Status PacketResamplerCalculator::GetContract( mediapipe::Status PacketResamplerCalculator::GetContract(
CalculatorContract* cc) { CalculatorContract* cc) {
const auto& resampler_options = const auto& resampler_options =
cc->Options<PacketResamplerCalculatorOptions>(); cc->Options<PacketResamplerCalculatorOptions>();
@ -78,10 +78,10 @@ TimestampDiff TimestampDiffFromSeconds(double seconds) {
RET_CHECK(cc->InputSidePackets().HasTag("SEED")); RET_CHECK(cc->InputSidePackets().HasTag("SEED"));
cc->InputSidePackets().Tag("SEED").Set<std::string>(); cc->InputSidePackets().Tag("SEED").Set<std::string>();
} }
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status PacketResamplerCalculator::Open(CalculatorContext* cc) { mediapipe::Status PacketResamplerCalculator::Open(CalculatorContext* cc) {
const auto resampler_options = const auto resampler_options =
tool::RetrieveOptions(cc->Options<PacketResamplerCalculatorOptions>(), tool::RetrieveOptions(cc->Options<PacketResamplerCalculatorOptions>(),
cc->InputSidePackets(), "OPTIONS"); cc->InputSidePackets(), "OPTIONS");
@ -156,8 +156,8 @@ TimestampDiff TimestampDiffFromSeconds(double seconds) {
const auto& seed = cc->InputSidePackets().Tag("SEED").Get<std::string>(); const auto& seed = cc->InputSidePackets().Tag("SEED").Get<std::string>();
random_ = CreateSecureRandom(seed); random_ = CreateSecureRandom(seed);
if (random_ == nullptr) { if (random_ == nullptr) {
return ::mediapipe::Status( return mediapipe::Status(
::mediapipe::StatusCode::kInvalidArgument, mediapipe::StatusCode::kInvalidArgument,
"SecureRandom is not available. With \"jitter\" specified, " "SecureRandom is not available. With \"jitter\" specified, "
"PacketResamplerCalculator processing cannot proceed."); "PacketResamplerCalculator processing cannot proceed.");
} }
@ -165,17 +165,17 @@ TimestampDiff TimestampDiffFromSeconds(double seconds) {
} }
packet_reservoir_ = packet_reservoir_ =
std::make_unique<PacketReservoir>(packet_reservoir_random_.get()); std::make_unique<PacketReservoir>(packet_reservoir_random_.get());
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status PacketResamplerCalculator::Process(CalculatorContext* cc) { mediapipe::Status PacketResamplerCalculator::Process(CalculatorContext* cc) {
if (cc->InputTimestamp() == Timestamp::PreStream() && if (cc->InputTimestamp() == Timestamp::PreStream() &&
cc->Inputs().UsesTags() && cc->Inputs().HasTag("VIDEO_HEADER") && cc->Inputs().UsesTags() && cc->Inputs().HasTag("VIDEO_HEADER") &&
!cc->Inputs().Tag("VIDEO_HEADER").IsEmpty()) { !cc->Inputs().Tag("VIDEO_HEADER").IsEmpty()) {
video_header_ = cc->Inputs().Tag("VIDEO_HEADER").Get<VideoHeader>(); video_header_ = cc->Inputs().Tag("VIDEO_HEADER").Get<VideoHeader>();
video_header_.frame_rate = frame_rate_; video_header_.frame_rate = frame_rate_;
if (cc->Inputs().Get(input_data_id_).IsEmpty()) { if (cc->Inputs().Get(input_data_id_).IsEmpty()) {
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
} }
if (jitter_ != 0.0 && random_ != nullptr) { if (jitter_ != 0.0 && random_ != nullptr) {
@ -192,7 +192,7 @@ TimestampDiff TimestampDiffFromSeconds(double seconds) {
MP_RETURN_IF_ERROR(ProcessWithoutJitter(cc)); MP_RETURN_IF_ERROR(ProcessWithoutJitter(cc));
} }
last_packet_ = cc->Inputs().Get(input_data_id_).Value(); last_packet_ = cc->Inputs().Get(input_data_id_).Value();
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
void PacketResamplerCalculator::InitializeNextOutputTimestampWithJitter() { void PacketResamplerCalculator::InitializeNextOutputTimestampWithJitter() {
@ -229,7 +229,7 @@ void PacketResamplerCalculator::UpdateNextOutputTimestampWithJitter() {
((1.0 - jitter_) + 2.0 * jitter_ * random_->RandFloat()); ((1.0 - jitter_) + 2.0 * jitter_ * random_->RandFloat());
} }
::mediapipe::Status PacketResamplerCalculator::ProcessWithJitter( mediapipe::Status PacketResamplerCalculator::ProcessWithJitter(
CalculatorContext* cc) { CalculatorContext* cc) {
RET_CHECK_GT(cc->InputTimestamp(), Timestamp::PreStream()); RET_CHECK_GT(cc->InputTimestamp(), Timestamp::PreStream());
RET_CHECK_NE(jitter_, 0.0); RET_CHECK_NE(jitter_, 0.0);
@ -243,7 +243,7 @@ void PacketResamplerCalculator::UpdateNextOutputTimestampWithJitter() {
cc->Inputs().Get(input_data_id_).Value().At(next_output_timestamp_)); cc->Inputs().Get(input_data_id_).Value().At(next_output_timestamp_));
UpdateNextOutputTimestampWithJitter(); UpdateNextOutputTimestampWithJitter();
} }
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
if (frame_time_usec_ < if (frame_time_usec_ <
@ -267,10 +267,10 @@ void PacketResamplerCalculator::UpdateNextOutputTimestampWithJitter() {
.At(next_output_timestamp_)); .At(next_output_timestamp_));
UpdateNextOutputTimestampWithJitter(); UpdateNextOutputTimestampWithJitter();
} }
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status PacketResamplerCalculator::ProcessWithoutJitter( mediapipe::Status PacketResamplerCalculator::ProcessWithoutJitter(
CalculatorContext* cc) { CalculatorContext* cc) {
RET_CHECK_GT(cc->InputTimestamp(), Timestamp::PreStream()); RET_CHECK_GT(cc->InputTimestamp(), Timestamp::PreStream());
RET_CHECK_EQ(jitter_, 0.0); RET_CHECK_EQ(jitter_, 0.0);
@ -333,12 +333,12 @@ void PacketResamplerCalculator::UpdateNextOutputTimestampWithJitter() {
.Get(output_data_id_) .Get(output_data_id_)
.SetNextTimestampBound(PeriodIndexToTimestamp(period_count_)); .SetNextTimestampBound(PeriodIndexToTimestamp(period_count_));
} }
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status PacketResamplerCalculator::Close(CalculatorContext* cc) { mediapipe::Status PacketResamplerCalculator::Close(CalculatorContext* cc) {
if (!cc->GraphStatus().ok()) { if (!cc->GraphStatus().ok()) {
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
// Emit the last packet received if we have at least one packet, but // Emit the last packet received if we have at least one packet, but
// haven't sent anything for its period. // haven't sent anything for its period.
@ -350,7 +350,7 @@ void PacketResamplerCalculator::UpdateNextOutputTimestampWithJitter() {
if (!packet_reservoir_->IsEmpty()) { if (!packet_reservoir_->IsEmpty()) {
OutputWithinLimits(cc, packet_reservoir_->GetSample()); OutputWithinLimits(cc, packet_reservoir_->GetSample());
} }
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
Timestamp PacketResamplerCalculator::PeriodIndexToTimestamp(int64 index) const { Timestamp PacketResamplerCalculator::PeriodIndexToTimestamp(int64 index) const {

View File

@ -99,11 +99,11 @@ class PacketReservoir {
// packet_downsampler_calculator.cc: skips packets regardless of timestamps. // packet_downsampler_calculator.cc: skips packets regardless of timestamps.
class PacketResamplerCalculator : public CalculatorBase { class PacketResamplerCalculator : public CalculatorBase {
public: public:
static ::mediapipe::Status GetContract(CalculatorContract* cc); static mediapipe::Status GetContract(CalculatorContract* cc);
::mediapipe::Status Open(CalculatorContext* cc) override; mediapipe::Status Open(CalculatorContext* cc) override;
::mediapipe::Status Close(CalculatorContext* cc) override; mediapipe::Status Close(CalculatorContext* cc) override;
::mediapipe::Status Process(CalculatorContext* cc) override; mediapipe::Status Process(CalculatorContext* cc) override;
private: private:
// Calculates the first sampled timestamp that incorporates a jittering // Calculates the first sampled timestamp that incorporates a jittering
@ -113,10 +113,10 @@ class PacketResamplerCalculator : public CalculatorBase {
void UpdateNextOutputTimestampWithJitter(); void UpdateNextOutputTimestampWithJitter();
// Logic for Process() when jitter_ != 0.0. // Logic for Process() when jitter_ != 0.0.
::mediapipe::Status ProcessWithJitter(CalculatorContext* cc); mediapipe::Status ProcessWithJitter(CalculatorContext* cc);
// Logic for Process() when jitter_ == 0.0. // Logic for Process() when jitter_ == 0.0.
::mediapipe::Status ProcessWithoutJitter(CalculatorContext* cc); mediapipe::Status ProcessWithoutJitter(CalculatorContext* cc);
// Given the current count of periods that have passed, this returns // Given the current count of periods that have passed, this returns
// the next valid timestamp of the middle point of the next period: // the next valid timestamp of the middle point of the next period:

View File

@ -90,7 +90,7 @@ class PacketThinnerCalculator : public CalculatorBase {
PacketThinnerCalculator() {} PacketThinnerCalculator() {}
~PacketThinnerCalculator() override {} ~PacketThinnerCalculator() override {}
static ::mediapipe::Status GetContract(CalculatorContract* cc) { static mediapipe::Status GetContract(CalculatorContract* cc) {
if (cc->InputSidePackets().HasTag(kOptionsTag)) { if (cc->InputSidePackets().HasTag(kOptionsTag)) {
cc->InputSidePackets().Tag(kOptionsTag).Set<CalculatorOptions>(); cc->InputSidePackets().Tag(kOptionsTag).Set<CalculatorOptions>();
} }
@ -99,21 +99,21 @@ class PacketThinnerCalculator : public CalculatorBase {
if (cc->InputSidePackets().HasTag(kPeriodTag)) { if (cc->InputSidePackets().HasTag(kPeriodTag)) {
cc->InputSidePackets().Tag(kPeriodTag).Set<int64>(); cc->InputSidePackets().Tag(kPeriodTag).Set<int64>();
} }
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Open(CalculatorContext* cc) override; mediapipe::Status Open(CalculatorContext* cc) override;
::mediapipe::Status Close(CalculatorContext* cc) override; mediapipe::Status Close(CalculatorContext* cc) override;
::mediapipe::Status Process(CalculatorContext* cc) override { mediapipe::Status Process(CalculatorContext* cc) override {
if (cc->InputTimestamp() < start_time_) { if (cc->InputTimestamp() < start_time_) {
return ::mediapipe::OkStatus(); // Drop packets before start_time_. return mediapipe::OkStatus(); // Drop packets before start_time_.
} else if (cc->InputTimestamp() >= end_time_) { } else if (cc->InputTimestamp() >= end_time_) {
if (!cc->Outputs().Index(0).IsClosed()) { if (!cc->Outputs().Index(0).IsClosed()) {
cc->Outputs() cc->Outputs()
.Index(0) .Index(0)
.Close(); // No more Packets will be output after end_time_. .Close(); // No more Packets will be output after end_time_.
} }
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} else { } else {
return thinner_type_ == PacketThinnerCalculatorOptions::ASYNC return thinner_type_ == PacketThinnerCalculatorOptions::ASYNC
? AsyncThinnerProcess(cc) ? AsyncThinnerProcess(cc)
@ -123,8 +123,8 @@ class PacketThinnerCalculator : public CalculatorBase {
private: private:
// Implementation of ASYNC and SYNC versions of thinner algorithm. // Implementation of ASYNC and SYNC versions of thinner algorithm.
::mediapipe::Status AsyncThinnerProcess(CalculatorContext* cc); mediapipe::Status AsyncThinnerProcess(CalculatorContext* cc);
::mediapipe::Status SyncThinnerProcess(CalculatorContext* cc); mediapipe::Status SyncThinnerProcess(CalculatorContext* cc);
// Cached option. // Cached option.
PacketThinnerCalculatorOptions::ThinnerType thinner_type_; PacketThinnerCalculatorOptions::ThinnerType thinner_type_;
@ -153,7 +153,7 @@ namespace {
TimestampDiff abs(TimestampDiff t) { return t < 0 ? -t : t; } TimestampDiff abs(TimestampDiff t) { return t < 0 ? -t : t; }
} // namespace } // namespace
::mediapipe::Status PacketThinnerCalculator::Open(CalculatorContext* cc) { mediapipe::Status PacketThinnerCalculator::Open(CalculatorContext* cc) {
PacketThinnerCalculatorOptions options = mediapipe::tool::RetrieveOptions( PacketThinnerCalculatorOptions options = mediapipe::tool::RetrieveOptions(
cc->Options<PacketThinnerCalculatorOptions>(), cc->InputSidePackets(), cc->Options<PacketThinnerCalculatorOptions>(), cc->InputSidePackets(),
kOptionsTag); kOptionsTag);
@ -224,10 +224,10 @@ TimestampDiff abs(TimestampDiff t) { return t < 0 ? -t : t; }
} }
} }
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status PacketThinnerCalculator::Close(CalculatorContext* cc) { mediapipe::Status PacketThinnerCalculator::Close(CalculatorContext* cc) {
// Emit any saved packets before quitting. // Emit any saved packets before quitting.
if (!saved_packet_.IsEmpty()) { if (!saved_packet_.IsEmpty()) {
// Only sync thinner should have saved packets. // Only sync thinner should have saved packets.
@ -239,10 +239,10 @@ TimestampDiff abs(TimestampDiff t) { return t < 0 ? -t : t; }
cc->Outputs().Index(0).AddPacket(saved_packet_); cc->Outputs().Index(0).AddPacket(saved_packet_);
} }
} }
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status PacketThinnerCalculator::AsyncThinnerProcess( mediapipe::Status PacketThinnerCalculator::AsyncThinnerProcess(
CalculatorContext* cc) { CalculatorContext* cc) {
if (cc->InputTimestamp() >= next_valid_timestamp_) { if (cc->InputTimestamp() >= next_valid_timestamp_) {
cc->Outputs().Index(0).AddPacket( cc->Outputs().Index(0).AddPacket(
@ -251,10 +251,10 @@ TimestampDiff abs(TimestampDiff t) { return t < 0 ? -t : t; }
// Guaranteed not to emit packets seen during refractory period. // Guaranteed not to emit packets seen during refractory period.
cc->Outputs().Index(0).SetNextTimestampBound(next_valid_timestamp_); cc->Outputs().Index(0).SetNextTimestampBound(next_valid_timestamp_);
} }
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status PacketThinnerCalculator::SyncThinnerProcess( mediapipe::Status PacketThinnerCalculator::SyncThinnerProcess(
CalculatorContext* cc) { CalculatorContext* cc) {
if (saved_packet_.IsEmpty()) { if (saved_packet_.IsEmpty()) {
// If no packet has been saved, store the current packet. // If no packet has been saved, store the current packet.
@ -290,7 +290,7 @@ TimestampDiff abs(TimestampDiff t) { return t < 0 ? -t : t; }
saved_packet_ = cc->Inputs().Index(0).Value(); saved_packet_ = cc->Inputs().Index(0).Value();
} }
} }
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
Timestamp PacketThinnerCalculator::NearestSyncTimestamp(Timestamp now) const { Timestamp PacketThinnerCalculator::NearestSyncTimestamp(Timestamp now) const {

View File

@ -28,9 +28,9 @@ namespace mediapipe {
// ignored. // ignored.
class PassThroughCalculator : public CalculatorBase { class PassThroughCalculator : public CalculatorBase {
public: public:
static ::mediapipe::Status GetContract(CalculatorContract* cc) { static mediapipe::Status GetContract(CalculatorContract* cc) {
if (!cc->Inputs().TagMap()->SameAs(*cc->Outputs().TagMap())) { if (!cc->Inputs().TagMap()->SameAs(*cc->Outputs().TagMap())) {
return ::mediapipe::InvalidArgumentError( return mediapipe::InvalidArgumentError(
"Input and output streams to PassThroughCalculator must use " "Input and output streams to PassThroughCalculator must use "
"matching tags and indexes."); "matching tags and indexes.");
} }
@ -46,7 +46,7 @@ class PassThroughCalculator : public CalculatorBase {
if (cc->OutputSidePackets().NumEntries() != 0) { if (cc->OutputSidePackets().NumEntries() != 0) {
if (!cc->InputSidePackets().TagMap()->SameAs( if (!cc->InputSidePackets().TagMap()->SameAs(
*cc->OutputSidePackets().TagMap())) { *cc->OutputSidePackets().TagMap())) {
return ::mediapipe::InvalidArgumentError( return mediapipe::InvalidArgumentError(
"Input and output side packets to PassThroughCalculator must use " "Input and output side packets to PassThroughCalculator must use "
"matching tags and indexes."); "matching tags and indexes.");
} }
@ -56,10 +56,10 @@ class PassThroughCalculator : public CalculatorBase {
&cc->InputSidePackets().Get(id)); &cc->InputSidePackets().Get(id));
} }
} }
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Open(CalculatorContext* cc) final { mediapipe::Status Open(CalculatorContext* cc) final {
for (CollectionItemId id = cc->Inputs().BeginId(); for (CollectionItemId id = cc->Inputs().BeginId();
id < cc->Inputs().EndId(); ++id) { id < cc->Inputs().EndId(); ++id) {
if (!cc->Inputs().Get(id).Header().IsEmpty()) { if (!cc->Inputs().Get(id).Header().IsEmpty()) {
@ -73,10 +73,10 @@ class PassThroughCalculator : public CalculatorBase {
} }
} }
cc->SetOffset(TimestampDiff(0)); cc->SetOffset(TimestampDiff(0));
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Process(CalculatorContext* cc) final { mediapipe::Status Process(CalculatorContext* cc) final {
cc->GetCounter("PassThrough")->Increment(); cc->GetCounter("PassThrough")->Increment();
if (cc->Inputs().NumEntries() == 0) { if (cc->Inputs().NumEntries() == 0) {
return tool::StatusStop(); return tool::StatusStop();
@ -90,7 +90,7 @@ class PassThroughCalculator : public CalculatorBase {
cc->Outputs().Get(id).AddPacket(cc->Inputs().Get(id).Value()); cc->Outputs().Get(id).AddPacket(cc->Inputs().Get(id).Value());
} }
} }
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
}; };
REGISTER_CALCULATOR(PassThroughCalculator); REGISTER_CALCULATOR(PassThroughCalculator);

View File

@ -53,7 +53,7 @@ namespace mediapipe {
// } // }
class PreviousLoopbackCalculator : public CalculatorBase { class PreviousLoopbackCalculator : public CalculatorBase {
public: public:
static ::mediapipe::Status GetContract(CalculatorContract* cc) { static mediapipe::Status GetContract(CalculatorContract* cc) {
cc->Inputs().Get("MAIN", 0).SetAny(); cc->Inputs().Get("MAIN", 0).SetAny();
cc->Inputs().Get("LOOP", 0).SetAny(); cc->Inputs().Get("LOOP", 0).SetAny();
cc->Outputs().Get("PREV_LOOP", 0).SetSameAs(&(cc->Inputs().Get("LOOP", 0))); cc->Outputs().Get("PREV_LOOP", 0).SetSameAs(&(cc->Inputs().Get("LOOP", 0)));
@ -63,20 +63,20 @@ class PreviousLoopbackCalculator : public CalculatorBase {
// Process() function is invoked in response to MAIN/LOOP stream timestamp // Process() function is invoked in response to MAIN/LOOP stream timestamp
// bound updates. // bound updates.
cc->SetProcessTimestampBounds(true); cc->SetProcessTimestampBounds(true);
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Open(CalculatorContext* cc) final { mediapipe::Status Open(CalculatorContext* cc) final {
main_id_ = cc->Inputs().GetId("MAIN", 0); main_id_ = cc->Inputs().GetId("MAIN", 0);
loop_id_ = cc->Inputs().GetId("LOOP", 0); loop_id_ = cc->Inputs().GetId("LOOP", 0);
prev_loop_id_ = cc->Outputs().GetId("PREV_LOOP", 0); prev_loop_id_ = cc->Outputs().GetId("PREV_LOOP", 0);
cc->Outputs() cc->Outputs()
.Get(prev_loop_id_) .Get(prev_loop_id_)
.SetHeader(cc->Inputs().Get(loop_id_).Header()); .SetHeader(cc->Inputs().Get(loop_id_).Header());
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Process(CalculatorContext* cc) final { mediapipe::Status Process(CalculatorContext* cc) final {
// Non-empty packets and empty packets indicating timestamp bound updates // Non-empty packets and empty packets indicating timestamp bound updates
// are guaranteed to have timestamps greater than timestamps of previous // are guaranteed to have timestamps greater than timestamps of previous
// packets within the same stream. Calculator tracks and operates on such // packets within the same stream. Calculator tracks and operates on such
@ -139,7 +139,7 @@ class PreviousLoopbackCalculator : public CalculatorBase {
} }
} }
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
private: private:

View File

@ -136,27 +136,27 @@ TEST(PreviousLoopbackCalculator, CorrectTimestamps) {
// A Calculator that outputs a summary packet in CalculatorBase::Close(). // A Calculator that outputs a summary packet in CalculatorBase::Close().
class PacketOnCloseCalculator : public CalculatorBase { class PacketOnCloseCalculator : public CalculatorBase {
public: public:
static ::mediapipe::Status GetContract(CalculatorContract* cc) { static mediapipe::Status GetContract(CalculatorContract* cc) {
cc->Inputs().Index(0).Set<int>(); cc->Inputs().Index(0).Set<int>();
cc->Outputs().Index(0).Set<int>(); cc->Outputs().Index(0).Set<int>();
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Open(CalculatorContext* cc) final { mediapipe::Status Open(CalculatorContext* cc) final {
cc->SetOffset(TimestampDiff(0)); cc->SetOffset(TimestampDiff(0));
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Process(CalculatorContext* cc) final { mediapipe::Status Process(CalculatorContext* cc) final {
sum_ += cc->Inputs().Index(0).Value().Get<int>(); sum_ += cc->Inputs().Index(0).Value().Get<int>();
cc->Outputs().Index(0).AddPacket(cc->Inputs().Index(0).Value()); cc->Outputs().Index(0).AddPacket(cc->Inputs().Index(0).Value());
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Close(CalculatorContext* cc) final { mediapipe::Status Close(CalculatorContext* cc) final {
cc->Outputs().Index(0).AddPacket( cc->Outputs().Index(0).AddPacket(
MakePacket<int>(sum_).At(Timestamp::Max())); MakePacket<int>(sum_).At(Timestamp::Max()));
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
private: private:
@ -700,19 +700,19 @@ TEST_F(PreviousLoopbackCalculatorProcessingTimestampsTest,
// Similar to GateCalculator, but it doesn't propagate timestamp bound updates. // Similar to GateCalculator, but it doesn't propagate timestamp bound updates.
class DroppingGateCalculator : public CalculatorBase { class DroppingGateCalculator : public CalculatorBase {
public: public:
static ::mediapipe::Status GetContract(CalculatorContract* cc) { static mediapipe::Status GetContract(CalculatorContract* cc) {
cc->Inputs().Index(0).SetAny(); cc->Inputs().Index(0).SetAny();
cc->Inputs().Tag("DISALLOW").Set<bool>(); cc->Inputs().Tag("DISALLOW").Set<bool>();
cc->Outputs().Index(0).SetSameAs(&cc->Inputs().Index(0)); cc->Outputs().Index(0).SetSameAs(&cc->Inputs().Index(0));
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Process(CalculatorContext* cc) final { mediapipe::Status Process(CalculatorContext* cc) final {
if (!cc->Inputs().Index(0).IsEmpty() && if (!cc->Inputs().Index(0).IsEmpty() &&
!cc->Inputs().Tag("DISALLOW").Get<bool>()) { !cc->Inputs().Tag("DISALLOW").Get<bool>()) {
cc->Outputs().Index(0).AddPacket(cc->Inputs().Index(0).Value()); cc->Outputs().Index(0).AddPacket(cc->Inputs().Index(0).Value());
} }
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
}; };
REGISTER_CALCULATOR(DroppingGateCalculator); REGISTER_CALCULATOR(DroppingGateCalculator);

View File

@ -43,32 +43,32 @@ namespace mediapipe {
class QuantizeFloatVectorCalculator : public CalculatorBase { class QuantizeFloatVectorCalculator : public CalculatorBase {
public: public:
static ::mediapipe::Status GetContract(CalculatorContract* cc) { static mediapipe::Status GetContract(CalculatorContract* cc) {
cc->Inputs().Tag("FLOAT_VECTOR").Set<std::vector<float>>(); cc->Inputs().Tag("FLOAT_VECTOR").Set<std::vector<float>>();
cc->Outputs().Tag("ENCODED").Set<std::string>(); cc->Outputs().Tag("ENCODED").Set<std::string>();
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Open(CalculatorContext* cc) final { mediapipe::Status Open(CalculatorContext* cc) final {
const auto options = const auto options =
cc->Options<::mediapipe::QuantizeFloatVectorCalculatorOptions>(); cc->Options<::mediapipe::QuantizeFloatVectorCalculatorOptions>();
if (!options.has_max_quantized_value() || if (!options.has_max_quantized_value() ||
!options.has_min_quantized_value()) { !options.has_min_quantized_value()) {
return ::mediapipe::InvalidArgumentError( return mediapipe::InvalidArgumentError(
"Both max_quantized_value and min_quantized_value must be provided " "Both max_quantized_value and min_quantized_value must be provided "
"in QuantizeFloatVectorCalculatorOptions."); "in QuantizeFloatVectorCalculatorOptions.");
} }
max_quantized_value_ = options.max_quantized_value(); max_quantized_value_ = options.max_quantized_value();
min_quantized_value_ = options.min_quantized_value(); min_quantized_value_ = options.min_quantized_value();
if (max_quantized_value_ < min_quantized_value_ + FLT_EPSILON) { if (max_quantized_value_ < min_quantized_value_ + FLT_EPSILON) {
return ::mediapipe::InvalidArgumentError( return mediapipe::InvalidArgumentError(
"max_quantized_value must be greater than min_quantized_value."); "max_quantized_value must be greater than min_quantized_value.");
} }
range_ = max_quantized_value_ - min_quantized_value_; range_ = max_quantized_value_ - min_quantized_value_;
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Process(CalculatorContext* cc) final { mediapipe::Status Process(CalculatorContext* cc) final {
const std::vector<float>& float_vector = const std::vector<float>& float_vector =
cc->Inputs().Tag("FLOAT_VECTOR").Value().Get<std::vector<float>>(); cc->Inputs().Tag("FLOAT_VECTOR").Value().Get<std::vector<float>>();
int feature_size = float_vector.size(); int feature_size = float_vector.size();
@ -88,7 +88,7 @@ class QuantizeFloatVectorCalculator : public CalculatorBase {
} }
cc->Outputs().Tag("ENCODED").AddPacket( cc->Outputs().Tag("ENCODED").AddPacket(
MakePacket<std::string>(encoded_features).At(cc->InputTimestamp())); MakePacket<std::string>(encoded_features).At(cc->InputTimestamp()));
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
private: private:

View File

@ -0,0 +1,199 @@
// Copyright 2019 The MediaPipe Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <algorithm>
#include <utility>
#include <vector>
#include "mediapipe/framework/calculator_framework.h"
#include "mediapipe/framework/port/ret_check.h"
#include "mediapipe/framework/port/status.h"
#include "mediapipe/util/header_util.h"
namespace mediapipe {
// RealTimeFlowLimiterCalculator is used to limit the number of pipelined
// processing operations in a section of the graph.
//
// Typical topology:
//
// in ->-[FLC]-[foo]-...-[bar]-+->- out
// ^_____________________|
// FINISHED
//
// By connecting the output of the graph section to this calculator's FINISHED
// input with a backwards edge, this allows FLC to keep track of how many
// timestamps are currently being processed.
//
// The limit defaults to 1, and can be overridden with the MAX_IN_FLIGHT side
// packet.
//
// As long as the number of timestamps being processed ("in flight") is below
// the limit, FLC allows input to pass through. When the limit is reached,
// FLC starts dropping input packets, keeping only the most recent. When the
// processing count decreases again, as signaled by the receipt of a packet on
// FINISHED, FLC allows packets to flow again, releasing the most recently
// queued packet, if any.
//
// If there are multiple input streams, packet dropping is synchronized.
//
// IMPORTANT: for each timestamp where FLC forwards a packet (or a set of
// packets, if using multiple data streams), a packet must eventually arrive on
// the FINISHED stream. Dropping packets in the section between FLC and
// FINISHED will make the in-flight count incorrect.
//
// TODO: Remove this comment when graph-level ISH has been removed.
// NOTE: this calculator should always use the ImmediateInputStreamHandler and
// uses it by default. However, if the graph specifies a graph-level
// InputStreamHandler, to override that setting, the InputStreamHandler must
// be explicitly specified as shown below.
//
// Example config:
// node {
// calculator: "RealTimeFlowLimiterCalculator"
// input_stream: "raw_frames"
// input_stream: "FINISHED:finished"
// input_stream_info: {
// tag_index: 'FINISHED'
// back_edge: true
// }
// input_stream_handler {
// input_stream_handler: 'ImmediateInputStreamHandler'
// }
// output_stream: "gated_frames"
// }
class RealTimeFlowLimiterCalculator : public CalculatorBase {
public:
static mediapipe::Status GetContract(CalculatorContract* cc) {
int num_data_streams = cc->Inputs().NumEntries("");
RET_CHECK_GE(num_data_streams, 1);
RET_CHECK_EQ(cc->Outputs().NumEntries(""), num_data_streams)
<< "Output streams must correspond input streams except for the "
"finish indicator input stream.";
for (int i = 0; i < num_data_streams; ++i) {
cc->Inputs().Get("", i).SetAny();
cc->Outputs().Get("", i).SetSameAs(&(cc->Inputs().Get("", i)));
}
cc->Inputs().Get("FINISHED", 0).SetAny();
if (cc->InputSidePackets().HasTag("MAX_IN_FLIGHT")) {
cc->InputSidePackets().Tag("MAX_IN_FLIGHT").Set<int>();
}
if (cc->Outputs().HasTag("ALLOW")) {
cc->Outputs().Tag("ALLOW").Set<bool>();
}
cc->SetInputStreamHandler("ImmediateInputStreamHandler");
return mediapipe::OkStatus();
}
mediapipe::Status Open(CalculatorContext* cc) final {
finished_id_ = cc->Inputs().GetId("FINISHED", 0);
max_in_flight_ = 1;
if (cc->InputSidePackets().HasTag("MAX_IN_FLIGHT")) {
max_in_flight_ = cc->InputSidePackets().Tag("MAX_IN_FLIGHT").Get<int>();
}
RET_CHECK_GE(max_in_flight_, 1);
num_in_flight_ = 0;
allowed_id_ = cc->Outputs().GetId("ALLOW", 0);
allow_ctr_ts_ = Timestamp(0);
num_data_streams_ = cc->Inputs().NumEntries("");
data_stream_bound_ts_.resize(num_data_streams_);
RET_CHECK_OK(CopyInputHeadersToOutputs(cc->Inputs(), &(cc->Outputs())));
return mediapipe::OkStatus();
}
bool Allow() { return num_in_flight_ < max_in_flight_; }
mediapipe::Status Process(CalculatorContext* cc) final {
bool old_allow = Allow();
Timestamp lowest_incomplete_ts = Timestamp::Done();
// Process FINISHED stream.
if (!cc->Inputs().Get(finished_id_).Value().IsEmpty()) {
RET_CHECK_GT(num_in_flight_, 0)
<< "Received a FINISHED packet, but we had none in flight.";
--num_in_flight_;
}
// Process data streams.
for (int i = 0; i < num_data_streams_; ++i) {
auto& stream = cc->Inputs().Get("", i);
auto& out = cc->Outputs().Get("", i);
Packet& packet = stream.Value();
auto ts = packet.Timestamp();
if (ts.IsRangeValue() && data_stream_bound_ts_[i] <= ts) {
data_stream_bound_ts_[i] = ts + 1;
// Note: it's ok to update the output bound here, before sending the
// packet, because updates are batched during the Process function.
out.SetNextTimestampBound(data_stream_bound_ts_[i]);
}
lowest_incomplete_ts =
std::min(lowest_incomplete_ts, data_stream_bound_ts_[i]);
if (packet.IsEmpty()) {
// If the input stream is closed, close the corresponding output.
if (stream.IsDone() && !out.IsClosed()) {
out.Close();
}
// TODO: if the packet is empty, the ts is unset, and we
// cannot read the timestamp bound, even though we'd like to propagate
// it.
} else if (mediapipe::ContainsKey(pending_ts_, ts)) {
// If we have already sent this timestamp (on another stream), send it
// on this stream too.
out.AddPacket(std::move(packet));
} else if (Allow() && (ts > last_dropped_ts_)) {
// If the in-flight is under the limit, and if we have not already
// dropped this or a later timestamp on another stream, then send
// the packet and add an in-flight timestamp.
out.AddPacket(std::move(packet));
pending_ts_.insert(ts);
++num_in_flight_;
} else {
// Otherwise, we'll drop the packet.
last_dropped_ts_ = std::max(last_dropped_ts_, ts);
}
}
// Remove old pending_ts_ entries.
auto it = std::lower_bound(pending_ts_.begin(), pending_ts_.end(),
lowest_incomplete_ts);
pending_ts_.erase(pending_ts_.begin(), it);
// Update ALLOW signal.
if ((old_allow != Allow()) && allowed_id_.IsValid()) {
cc->Outputs()
.Get(allowed_id_)
.AddPacket(MakePacket<bool>(Allow()).At(++allow_ctr_ts_));
}
return mediapipe::OkStatus();
}
private:
std::set<Timestamp> pending_ts_;
Timestamp last_dropped_ts_;
int num_data_streams_;
int num_in_flight_;
int max_in_flight_;
CollectionItemId finished_id_;
CollectionItemId allowed_id_;
Timestamp allow_ctr_ts_;
std::vector<Timestamp> data_stream_bound_ts_;
};
REGISTER_CALCULATOR(RealTimeFlowLimiterCalculator);
} // namespace mediapipe

View File

@ -0,0 +1,496 @@
// Copyright 2019 The MediaPipe Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <algorithm>
#include <memory>
#include <string>
#include <vector>
#include "absl/time/clock.h"
#include "absl/time/time.h"
#include "mediapipe/framework/calculator_framework.h"
#include "mediapipe/framework/calculator_runner.h"
#include "mediapipe/framework/formats/image_frame.h"
#include "mediapipe/framework/port/gmock.h"
#include "mediapipe/framework/port/gtest.h"
#include "mediapipe/framework/port/integral_types.h"
#include "mediapipe/framework/port/parse_text_proto.h"
#include "mediapipe/framework/port/status_matchers.h"
#include "mediapipe/framework/timestamp.h"
#include "mediapipe/framework/tool/sink.h"
namespace mediapipe {
namespace {
// A simple Semaphore for synchronizing test threads.
class AtomicSemaphore {
public:
AtomicSemaphore(int64_t supply) : supply_(supply) {}
void Acquire(int64_t amount) {
while (supply_.fetch_sub(amount) - amount < 0) {
Release(amount);
}
}
void Release(int64_t amount) { supply_.fetch_add(amount); }
private:
std::atomic<int64_t> supply_;
};
// Returns the timestamp values for a vector of Packets.
std::vector<int64> TimestampValues(const std::vector<Packet>& packets) {
std::vector<int64> result;
for (const Packet& packet : packets) {
result.push_back(packet.Timestamp().Value());
}
return result;
}
// Returns the packet values for a vector of Packets.
template <typename T>
std::vector<T> PacketValues(const std::vector<Packet>& packets) {
std::vector<T> result;
for (const Packet& packet : packets) {
result.push_back(packet.Get<T>());
}
return result;
}
constexpr int kNumImageFrames = 5;
constexpr int kNumFinished = 3;
CalculatorGraphConfig::Node GetDefaultNode() {
return ParseTextProtoOrDie<CalculatorGraphConfig::Node>(R"(
calculator: "RealTimeFlowLimiterCalculator"
input_stream: "raw_frames"
input_stream: "FINISHED:finished"
input_stream_info: { tag_index: "FINISHED" back_edge: true }
output_stream: "gated_frames"
)");
}
// Simple test to make sure that the RealTimeFlowLimiterCalculator outputs just
// one packet when MAX_IN_FLIGHT is 1.
TEST(RealTimeFlowLimiterCalculator, OneOutputTest) {
// Setup the calculator runner and add only ImageFrame packets.
CalculatorRunner runner(GetDefaultNode());
for (int i = 0; i < kNumImageFrames; ++i) {
Timestamp timestamp = Timestamp(i * Timestamp::kTimestampUnitsPerSecond);
runner.MutableInputs()->Index(0).packets.push_back(
MakePacket<ImageFrame>().At(timestamp));
}
// Run the calculator.
MP_ASSERT_OK(runner.Run()) << "Calculator execution failed.";
const std::vector<Packet>& frame_output_packets =
runner.Outputs().Index(0).packets;
EXPECT_EQ(frame_output_packets.size(), 1);
}
// Simple test to make sure that the RealTimeFlowLimiterCalculator waits for all
// input streams to have at least one packet available before publishing.
TEST(RealTimeFlowLimiterCalculator, BasicTest) {
// Setup the calculator runner and add both ImageFrame and finish packets.
CalculatorRunner runner(GetDefaultNode());
for (int i = 0; i < kNumImageFrames; ++i) {
Timestamp timestamp = Timestamp(i * Timestamp::kTimestampUnitsPerSecond);
runner.MutableInputs()->Index(0).packets.push_back(
MakePacket<ImageFrame>().At(timestamp));
}
for (int i = 0; i < kNumFinished; ++i) {
Timestamp timestamp =
Timestamp((i + 1) * Timestamp::kTimestampUnitsPerSecond);
runner.MutableInputs()
->Tag("FINISHED")
.packets.push_back(MakePacket<bool>(true).At(timestamp));
}
// Run the calculator.
MP_ASSERT_OK(runner.Run()) << "Calculator execution failed.";
const std::vector<Packet>& frame_output_packets =
runner.Outputs().Index(0).packets;
// Only outputs packets if both input streams are available.
int expected_num_packets = std::min(kNumImageFrames, kNumFinished + 1);
EXPECT_EQ(frame_output_packets.size(), expected_num_packets);
}
// A Calculator::Process callback function.
typedef std::function<mediapipe::Status(const InputStreamShardSet&,
OutputStreamShardSet*)>
ProcessFunction;
// A testing callback function that passes through all packets.
mediapipe::Status PassthroughFunction(const InputStreamShardSet& inputs,
OutputStreamShardSet* outputs) {
for (int i = 0; i < inputs.NumEntries(); ++i) {
if (!inputs.Index(i).Value().IsEmpty()) {
outputs->Index(i).AddPacket(inputs.Index(i).Value());
}
}
return mediapipe::OkStatus();
}
// A Calculator that runs a testing callback function in Close.
class CloseCallbackCalculator : public CalculatorBase {
public:
static mediapipe::Status GetContract(CalculatorContract* cc) {
for (CollectionItemId id = cc->Inputs().BeginId();
id < cc->Inputs().EndId(); ++id) {
cc->Inputs().Get(id).SetAny();
}
for (CollectionItemId id = cc->Outputs().BeginId();
id < cc->Outputs().EndId(); ++id) {
cc->Outputs().Get(id).SetAny();
}
cc->InputSidePackets().Index(0).Set<std::function<mediapipe::Status()>>();
return mediapipe::OkStatus();
}
mediapipe::Status Process(CalculatorContext* cc) override {
return PassthroughFunction(cc->Inputs(), &(cc->Outputs()));
}
mediapipe::Status Close(CalculatorContext* cc) override {
const auto& callback = cc->InputSidePackets()
.Index(0)
.Get<std::function<mediapipe::Status()>>();
return callback();
}
};
REGISTER_CALCULATOR(CloseCallbackCalculator);
// Tests demostrating an RealTimeFlowLimiterCalculator operating in a cyclic
// graph.
// TODO: clean up these tests.
class RealTimeFlowLimiterCalculatorTest : public testing::Test {
public:
RealTimeFlowLimiterCalculatorTest()
: enter_semaphore_(0), exit_semaphore_(0) {}
void SetUp() override {
graph_config_ = InflightGraphConfig();
tool::AddVectorSink("out_1", &graph_config_, &out_1_packets_);
tool::AddVectorSink("out_2", &graph_config_, &out_2_packets_);
}
void InitializeGraph(int max_in_flight) {
ProcessFunction semaphore_0_func = [&](const InputStreamShardSet& inputs,
OutputStreamShardSet* outputs) {
enter_semaphore_.Release(1);
return PassthroughFunction(inputs, outputs);
};
ProcessFunction semaphore_1_func = [&](const InputStreamShardSet& inputs,
OutputStreamShardSet* outputs) {
exit_semaphore_.Acquire(1);
return PassthroughFunction(inputs, outputs);
};
std::function<mediapipe::Status()> close_func = [this]() {
close_count_++;
return mediapipe::OkStatus();
};
MP_ASSERT_OK(graph_.Initialize(
graph_config_, {
{"max_in_flight", MakePacket<int>(max_in_flight)},
{"callback_0", Adopt(new auto(semaphore_0_func))},
{"callback_1", Adopt(new auto(semaphore_1_func))},
{"callback_2", Adopt(new auto(close_func))},
}));
}
// Adds a packet to a graph input stream.
void AddPacket(const std::string& input_name, int value) {
MP_EXPECT_OK(graph_.AddPacketToInputStream(
input_name, MakePacket<int>(value).At(Timestamp(value))));
}
// A calculator graph starting with an RealTimeFlowLimiterCalculator and
// ending with a InFlightFinishCalculator.
// Back-edge "finished" limits processing to one frame in-flight.
// The two LambdaCalculators are used to keep certain packet sets in flight.
CalculatorGraphConfig InflightGraphConfig() {
return ParseTextProtoOrDie<CalculatorGraphConfig>(R"(
input_stream: 'in_1'
input_stream: 'in_2'
node {
calculator: 'RealTimeFlowLimiterCalculator'
input_side_packet: 'MAX_IN_FLIGHT:max_in_flight'
input_stream: 'in_1'
input_stream: 'in_2'
input_stream: 'FINISHED:out_1'
input_stream_info: { tag_index: 'FINISHED' back_edge: true }
output_stream: 'in_1_sampled'
output_stream: 'in_2_sampled'
}
node {
calculator: 'LambdaCalculator'
input_side_packet: 'callback_0'
input_stream: 'in_1_sampled'
input_stream: 'in_2_sampled'
output_stream: 'queue_1'
output_stream: 'queue_2'
}
node {
calculator: 'LambdaCalculator'
input_side_packet: 'callback_1'
input_stream: 'queue_1'
input_stream: 'queue_2'
output_stream: 'close_1'
output_stream: 'close_2'
}
node {
calculator: 'CloseCallbackCalculator'
input_side_packet: 'callback_2'
input_stream: 'close_1'
input_stream: 'close_2'
output_stream: 'out_1'
output_stream: 'out_2'
}
)");
}
protected:
CalculatorGraphConfig graph_config_;
CalculatorGraph graph_;
AtomicSemaphore enter_semaphore_;
AtomicSemaphore exit_semaphore_;
std::vector<Packet> out_1_packets_;
std::vector<Packet> out_2_packets_;
int close_count_ = 0;
};
// A test demonstrating an RealTimeFlowLimiterCalculator operating in a cyclic
// graph. This test shows that:
//
// (1) Timestamps are passed through unaltered.
// (2) All output streams including the back_edge stream are closed when
// the first input stream is closed.
//
TEST_F(RealTimeFlowLimiterCalculatorTest, BackEdgeCloses) {
InitializeGraph(1);
MP_ASSERT_OK(graph_.StartRun({}));
auto send_packet = [this](const std::string& input_name, int64 n) {
MP_EXPECT_OK(graph_.AddPacketToInputStream(
input_name, MakePacket<int64>(n).At(Timestamp(n))));
};
for (int i = 0; i < 10; i++) {
send_packet("in_1", i * 10);
// This next input should be dropped.
send_packet("in_1", i * 10 + 5);
MP_EXPECT_OK(graph_.WaitUntilIdle());
send_packet("in_2", i * 10);
exit_semaphore_.Release(1);
MP_EXPECT_OK(graph_.WaitUntilIdle());
}
MP_EXPECT_OK(graph_.CloseInputStream("in_1"));
MP_EXPECT_OK(graph_.CloseInputStream("in_2"));
MP_EXPECT_OK(graph_.WaitUntilIdle());
// All output streams are closed and all output packets are delivered,
// with stream "in_1" and stream "in_2" closed.
EXPECT_EQ(10, out_1_packets_.size());
EXPECT_EQ(10, out_2_packets_.size());
// Timestamps have not been messed with.
EXPECT_EQ(PacketValues<int64>(out_1_packets_),
TimestampValues(out_1_packets_));
EXPECT_EQ(PacketValues<int64>(out_2_packets_),
TimestampValues(out_2_packets_));
// Extra inputs on in_1 have been dropped
EXPECT_EQ(TimestampValues(out_1_packets_),
(std::vector<int64>{0, 10, 20, 30, 40, 50, 60, 70, 80, 90}));
EXPECT_EQ(TimestampValues(out_1_packets_), TimestampValues(out_2_packets_));
// The closing of the stream has been propagated.
EXPECT_EQ(1, close_count_);
}
// A test demonstrating that all output streams are closed when all
// input streams are closed after the last input packet has been processed.
TEST_F(RealTimeFlowLimiterCalculatorTest, AllStreamsClose) {
InitializeGraph(1);
MP_ASSERT_OK(graph_.StartRun({}));
exit_semaphore_.Release(10);
for (int i = 0; i < 10; i++) {
AddPacket("in_1", i);
MP_EXPECT_OK(graph_.WaitUntilIdle());
AddPacket("in_2", i);
MP_EXPECT_OK(graph_.WaitUntilIdle());
}
MP_EXPECT_OK(graph_.CloseAllInputStreams());
MP_EXPECT_OK(graph_.WaitUntilIdle());
EXPECT_EQ(TimestampValues(out_1_packets_), TimestampValues(out_2_packets_));
EXPECT_EQ(TimestampValues(out_1_packets_),
(std::vector<int64>{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}));
EXPECT_EQ(1, close_count_);
}
TEST(RealTimeFlowLimiterCalculator, TwoStreams) {
std::vector<Packet> a_passed;
std::vector<Packet> b_passed;
CalculatorGraphConfig graph_config_ =
ParseTextProtoOrDie<CalculatorGraphConfig>(R"(
input_stream: 'in_a'
input_stream: 'in_b'
input_stream: 'finished'
node {
name: 'input_dropper'
calculator: 'RealTimeFlowLimiterCalculator'
input_side_packet: 'MAX_IN_FLIGHT:max_in_flight'
input_stream: 'in_a'
input_stream: 'in_b'
input_stream: 'FINISHED:finished'
input_stream_info: { tag_index: 'FINISHED' back_edge: true }
output_stream: 'in_a_sampled'
output_stream: 'in_b_sampled'
output_stream: 'ALLOW:allow'
}
)");
std::string allow_cb_name;
tool::AddVectorSink("in_a_sampled", &graph_config_, &a_passed);
tool::AddVectorSink("in_b_sampled", &graph_config_, &b_passed);
tool::AddCallbackCalculator("allow", &graph_config_, &allow_cb_name, true);
bool allow = true;
auto allow_cb = [&allow](const Packet& packet) {
allow = packet.Get<bool>();
};
CalculatorGraph graph_;
MP_EXPECT_OK(graph_.Initialize(
graph_config_,
{
{"max_in_flight", MakePacket<int>(1)},
{allow_cb_name,
MakePacket<std::function<void(const Packet&)>>(allow_cb)},
}));
MP_EXPECT_OK(graph_.StartRun({}));
auto send_packet = [&graph_](const std::string& input_name, int n) {
MP_EXPECT_OK(graph_.AddPacketToInputStream(
input_name, MakePacket<int>(n).At(Timestamp(n))));
};
send_packet("in_a", 1);
MP_EXPECT_OK(graph_.WaitUntilIdle());
EXPECT_EQ(allow, false);
EXPECT_EQ(TimestampValues(a_passed), (std::vector<int64>{1}));
EXPECT_EQ(TimestampValues(b_passed), (std::vector<int64>{}));
send_packet("in_a", 2);
send_packet("in_b", 1);
MP_EXPECT_OK(graph_.WaitUntilIdle());
EXPECT_EQ(TimestampValues(a_passed), (std::vector<int64>{1}));
EXPECT_EQ(TimestampValues(b_passed), (std::vector<int64>{1}));
EXPECT_EQ(allow, false);
send_packet("finished", 1);
MP_EXPECT_OK(graph_.WaitUntilIdle());
EXPECT_EQ(TimestampValues(a_passed), (std::vector<int64>{1}));
EXPECT_EQ(TimestampValues(b_passed), (std::vector<int64>{1}));
EXPECT_EQ(allow, true);
send_packet("in_b", 2);
MP_EXPECT_OK(graph_.WaitUntilIdle());
EXPECT_EQ(TimestampValues(a_passed), (std::vector<int64>{1}));
EXPECT_EQ(TimestampValues(b_passed), (std::vector<int64>{1}));
EXPECT_EQ(allow, true);
send_packet("in_b", 3);
MP_EXPECT_OK(graph_.WaitUntilIdle());
EXPECT_EQ(TimestampValues(a_passed), (std::vector<int64>{1}));
EXPECT_EQ(TimestampValues(b_passed), (std::vector<int64>{1, 3}));
EXPECT_EQ(allow, false);
send_packet("in_b", 4);
MP_EXPECT_OK(graph_.WaitUntilIdle());
EXPECT_EQ(TimestampValues(a_passed), (std::vector<int64>{1}));
EXPECT_EQ(TimestampValues(b_passed), (std::vector<int64>{1, 3}));
EXPECT_EQ(allow, false);
send_packet("in_a", 3);
MP_EXPECT_OK(graph_.WaitUntilIdle());
EXPECT_EQ(TimestampValues(a_passed), (std::vector<int64>{1, 3}));
EXPECT_EQ(TimestampValues(b_passed), (std::vector<int64>{1, 3}));
EXPECT_EQ(allow, false);
send_packet("finished", 3);
MP_EXPECT_OK(graph_.WaitUntilIdle());
EXPECT_EQ(TimestampValues(a_passed), (std::vector<int64>{1, 3}));
EXPECT_EQ(TimestampValues(b_passed), (std::vector<int64>{1, 3}));
EXPECT_EQ(allow, true);
MP_EXPECT_OK(graph_.CloseAllInputStreams());
MP_EXPECT_OK(graph_.WaitUntilDone());
}
TEST(RealTimeFlowLimiterCalculator, CanConsume) {
std::vector<Packet> in_sampled_packets_;
CalculatorGraphConfig graph_config_ =
ParseTextProtoOrDie<CalculatorGraphConfig>(R"(
input_stream: 'in'
input_stream: 'finished'
node {
name: 'input_dropper'
calculator: 'RealTimeFlowLimiterCalculator'
input_side_packet: 'MAX_IN_FLIGHT:max_in_flight'
input_stream: 'in'
input_stream: 'FINISHED:finished'
input_stream_info: { tag_index: 'FINISHED' back_edge: true }
output_stream: 'in_sampled'
output_stream: 'ALLOW:allow'
}
)");
std::string allow_cb_name;
tool::AddVectorSink("in_sampled", &graph_config_, &in_sampled_packets_);
tool::AddCallbackCalculator("allow", &graph_config_, &allow_cb_name, true);
bool allow = true;
auto allow_cb = [&allow](const Packet& packet) {
allow = packet.Get<bool>();
};
CalculatorGraph graph_;
MP_EXPECT_OK(graph_.Initialize(
graph_config_,
{
{"max_in_flight", MakePacket<int>(1)},
{allow_cb_name,
MakePacket<std::function<void(const Packet&)>>(allow_cb)},
}));
MP_EXPECT_OK(graph_.StartRun({}));
auto send_packet = [&graph_](const std::string& input_name, int n) {
MP_EXPECT_OK(graph_.AddPacketToInputStream(
input_name, MakePacket<int>(n).At(Timestamp(n))));
};
send_packet("in", 1);
MP_EXPECT_OK(graph_.WaitUntilIdle());
EXPECT_EQ(allow, false);
EXPECT_EQ(TimestampValues(in_sampled_packets_), (std::vector<int64>{1}));
MP_EXPECT_OK(in_sampled_packets_[0].Consume<int>());
MP_EXPECT_OK(graph_.CloseAllInputStreams());
MP_EXPECT_OK(graph_.WaitUntilDone());
}
} // anonymous namespace
} // namespace mediapipe

View File

@ -73,7 +73,7 @@ namespace mediapipe {
// MuxCalculator/MuxInputStreamHandler. // MuxCalculator/MuxInputStreamHandler.
class RoundRobinDemuxCalculator : public CalculatorBase { class RoundRobinDemuxCalculator : public CalculatorBase {
public: public:
static ::mediapipe::Status GetContract(CalculatorContract* cc) { static mediapipe::Status GetContract(CalculatorContract* cc) {
RET_CHECK_EQ(cc->Inputs().NumEntries(), 1); RET_CHECK_EQ(cc->Inputs().NumEntries(), 1);
cc->Inputs().Index(0).SetAny(); cc->Inputs().Index(0).SetAny();
if (cc->Outputs().HasTag("SELECT")) { if (cc->Outputs().HasTag("SELECT")) {
@ -83,18 +83,18 @@ class RoundRobinDemuxCalculator : public CalculatorBase {
id < cc->Outputs().EndId("OUTPUT"); ++id) { id < cc->Outputs().EndId("OUTPUT"); ++id) {
cc->Outputs().Get(id).SetSameAs(&cc->Inputs().Index(0)); cc->Outputs().Get(id).SetSameAs(&cc->Inputs().Index(0));
} }
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Open(CalculatorContext* cc) override { mediapipe::Status Open(CalculatorContext* cc) override {
select_output_ = cc->Outputs().GetId("SELECT", 0); select_output_ = cc->Outputs().GetId("SELECT", 0);
output_data_stream_index_ = 0; output_data_stream_index_ = 0;
output_data_stream_base_ = cc->Outputs().GetId("OUTPUT", 0); output_data_stream_base_ = cc->Outputs().GetId("OUTPUT", 0);
num_output_data_streams_ = cc->Outputs().NumEntries("OUTPUT"); num_output_data_streams_ = cc->Outputs().NumEntries("OUTPUT");
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status Process(CalculatorContext* cc) override { mediapipe::Status Process(CalculatorContext* cc) override {
cc->Outputs() cc->Outputs()
.Get(output_data_stream_base_ + output_data_stream_index_) .Get(output_data_stream_base_ + output_data_stream_index_)
.AddPacket(cc->Inputs().Index(0).Value()); .AddPacket(cc->Inputs().Index(0).Value());
@ -105,7 +105,7 @@ class RoundRobinDemuxCalculator : public CalculatorBase {
} }
output_data_stream_index_ = output_data_stream_index_ =
(output_data_stream_index_ + 1) % num_output_data_streams_; (output_data_stream_index_ + 1) % num_output_data_streams_;
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
private: private:

View File

@ -30,18 +30,18 @@ namespace mediapipe {
// second, and so on. // second, and so on.
class SequenceShiftCalculator : public CalculatorBase { class SequenceShiftCalculator : public CalculatorBase {
public: public:
static ::mediapipe::Status GetContract(CalculatorContract* cc) { static mediapipe::Status GetContract(CalculatorContract* cc) {
cc->Inputs().Index(0).SetAny(); cc->Inputs().Index(0).SetAny();
if (cc->InputSidePackets().HasTag(kPacketOffsetTag)) { if (cc->InputSidePackets().HasTag(kPacketOffsetTag)) {
cc->InputSidePackets().Tag(kPacketOffsetTag).Set<int>(); cc->InputSidePackets().Tag(kPacketOffsetTag).Set<int>();
} }
cc->Outputs().Index(0).SetSameAs(&cc->Inputs().Index(0)); cc->Outputs().Index(0).SetSameAs(&cc->Inputs().Index(0));
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
// Reads from options to set cache_size_ and packet_offset_. // Reads from options to set cache_size_ and packet_offset_.
::mediapipe::Status Open(CalculatorContext* cc) override; mediapipe::Status Open(CalculatorContext* cc) override;
::mediapipe::Status Process(CalculatorContext* cc) override; mediapipe::Status Process(CalculatorContext* cc) override;
private: private:
static constexpr const char* kPacketOffsetTag = "PACKET_OFFSET"; static constexpr const char* kPacketOffsetTag = "PACKET_OFFSET";
@ -72,7 +72,7 @@ class SequenceShiftCalculator : public CalculatorBase {
}; };
REGISTER_CALCULATOR(SequenceShiftCalculator); REGISTER_CALCULATOR(SequenceShiftCalculator);
::mediapipe::Status SequenceShiftCalculator::Open(CalculatorContext* cc) { mediapipe::Status SequenceShiftCalculator::Open(CalculatorContext* cc) {
packet_offset_ = packet_offset_ =
cc->Options<mediapipe::SequenceShiftCalculatorOptions>().packet_offset(); cc->Options<mediapipe::SequenceShiftCalculatorOptions>().packet_offset();
if (cc->InputSidePackets().HasTag(kPacketOffsetTag)) { if (cc->InputSidePackets().HasTag(kPacketOffsetTag)) {
@ -83,10 +83,10 @@ REGISTER_CALCULATOR(SequenceShiftCalculator);
if (packet_offset_ == 0) { if (packet_offset_ == 0) {
cc->Outputs().Index(0).SetOffset(0); cc->Outputs().Index(0).SetOffset(0);
} }
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
::mediapipe::Status SequenceShiftCalculator::Process(CalculatorContext* cc) { mediapipe::Status SequenceShiftCalculator::Process(CalculatorContext* cc) {
if (packet_offset_ > 0) { if (packet_offset_ > 0) {
ProcessPositiveOffset(cc); ProcessPositiveOffset(cc);
} else if (packet_offset_ < 0) { } else if (packet_offset_ < 0) {
@ -94,7 +94,7 @@ REGISTER_CALCULATOR(SequenceShiftCalculator);
} else { } else {
cc->Outputs().Index(0).AddPacket(cc->Inputs().Index(0).Value()); cc->Outputs().Index(0).AddPacket(cc->Inputs().Index(0).Value());
} }
return ::mediapipe::OkStatus(); return mediapipe::OkStatus();
} }
void SequenceShiftCalculator::ProcessPositiveOffset(CalculatorContext* cc) { void SequenceShiftCalculator::ProcessPositiveOffset(CalculatorContext* cc) {

Some files were not shown because too many files have changed in this diff Show More