diff --git a/.bazelrc b/.bazelrc index 5a586f3ca..0ec819c0d 100644 --- a/.bazelrc +++ b/.bazelrc @@ -9,21 +9,21 @@ build --define='absl=1' build --enable_platform_specific_config # Linux -build:linux --cxxopt=-std=c++14 -build:linux --host_cxxopt=-std=c++14 +build:linux --cxxopt=-std=c++17 +build:linux --host_cxxopt=-std=c++17 build:linux --copt=-w # windows -build:windows --cxxopt=/std:c++14 -build:windows --host_cxxopt=/std:c++14 +build:windows --cxxopt=/std:c++17 +build:windows --host_cxxopt=/std:c++17 build:windows --copt=/w # For using M_* math constants on Windows with MSVC. build:windows --copt=/D_USE_MATH_DEFINES build:windows --host_copt=/D_USE_MATH_DEFINES # macOS -build:macos --cxxopt=-std=c++14 -build:macos --host_cxxopt=-std=c++14 +build:macos --cxxopt=-std=c++17 +build:macos --host_cxxopt=-std=c++17 build:macos --copt=-w # Sets the default Apple platform to macOS. @@ -83,3 +83,9 @@ build:ios_fat --watchos_cpus=armv7k build:darwin_x86_64 --apple_platform_type=macos build:darwin_x86_64 --macos_minimum_os=10.12 build:darwin_x86_64 --cpu=darwin_x86_64 + +# This bazelrc file is meant to be written by a setup script. +try-import %workspace%/.configure.bazelrc + +# This bazelrc file can be used for user-specific custom build settings. +try-import %workspace%/.user.bazelrc diff --git a/.gitignore b/.gitignore index aa1bde53e..b3a881711 100644 --- a/.gitignore +++ b/.gitignore @@ -2,3 +2,5 @@ bazel-* mediapipe/MediaPipe.xcodeproj mediapipe/MediaPipe.tulsiproj/*.tulsiconf-user mediapipe/provisioning_profile.mobileprovision +.configure.bazelrc +.user.bazelrc diff --git a/README.md b/README.md index 300563c47..ee8b3fddb 100644 --- a/README.md +++ b/README.md @@ -21,33 +21,34 @@ ML solutions for live and streaming media. ## ML solutions in MediaPipe -Face Detection | Face Mesh | Iris | Hands | Pose | Hair Segmentation -:----------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------: | :---------------: -[![face_detection](docs/images/mobile/face_detection_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/face_detection) | [![face_mesh](docs/images/mobile/face_mesh_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/face_mesh) | [![iris](docs/images/mobile/iris_tracking_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/iris) | [![hand](docs/images/mobile/hand_tracking_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/hands) | [![pose](docs/images/mobile/pose_tracking_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/pose) | [![hair_segmentation](docs/images/mobile/hair_segmentation_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/hair_segmentation) +Face Detection | Face Mesh | Iris | Hands | Pose | Holistic +:----------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------: | :------: +[![face_detection](docs/images/mobile/face_detection_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/face_detection) | [![face_mesh](docs/images/mobile/face_mesh_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/face_mesh) | [![iris](docs/images/mobile/iris_tracking_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/iris) | [![hand](docs/images/mobile/hand_tracking_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/hands) | [![pose](docs/images/mobile/pose_tracking_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/pose) | [![hair_segmentation](docs/images/mobile/holistic_tracking_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/holistic) -Object Detection | Box Tracking | Instant Motion Tracking | Objectron | KNIFT -:----------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------: | :---: -[![object_detection](docs/images/mobile/object_detection_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/object_detection) | [![box_tracking](docs/images/mobile/object_tracking_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/box_tracking) | [![instant_motion_tracking](docs/images/mobile/instant_motion_tracking_android_small.gif)](https://google.github.io/mediapipe/solutions/instant_motion_tracking) | [![objectron](docs/images/mobile/objectron_chair_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/objectron) | [![knift](docs/images/mobile/template_matching_android_cpu_small.gif)](https://google.github.io/mediapipe/solutions/knift) +Hair Segmentation | Object Detection | Box Tracking | Instant Motion Tracking | Objectron | KNIFT +:-------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------: | :---: +[![hair_segmentation](docs/images/mobile/hair_segmentation_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/hair_segmentation) | [![object_detection](docs/images/mobile/object_detection_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/object_detection) | [![box_tracking](docs/images/mobile/object_tracking_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/box_tracking) | [![instant_motion_tracking](docs/images/mobile/instant_motion_tracking_android_small.gif)](https://google.github.io/mediapipe/solutions/instant_motion_tracking) | [![objectron](docs/images/mobile/objectron_chair_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/objectron) | [![knift](docs/images/mobile/template_matching_android_cpu_small.gif)](https://google.github.io/mediapipe/solutions/knift) -[]() | Android | iOS | Desktop | Python | Web | Coral -:---------------------------------------------------------------------------------------- | :-----: | :-: | :-----: | :----: | :-: | :---: -[Face Detection](https://google.github.io/mediapipe/solutions/face_detection) | ✅ | ✅ | ✅ | | ✅ | ✅ -[Face Mesh](https://google.github.io/mediapipe/solutions/face_mesh) | ✅ | ✅ | ✅ | ✅ | | -[Iris](https://google.github.io/mediapipe/solutions/iris) | ✅ | ✅ | ✅ | | ✅ | -[Hands](https://google.github.io/mediapipe/solutions/hands) | ✅ | ✅ | ✅ | ✅ | ✅ | -[Pose](https://google.github.io/mediapipe/solutions/pose) | ✅ | ✅ | ✅ | ✅ | ✅ | -[Hair Segmentation](https://google.github.io/mediapipe/solutions/hair_segmentation) | ✅ | | ✅ | | ✅ | -[Object Detection](https://google.github.io/mediapipe/solutions/object_detection) | ✅ | ✅ | ✅ | | | ✅ -[Box Tracking](https://google.github.io/mediapipe/solutions/box_tracking) | ✅ | ✅ | ✅ | | | -[Instant Motion Tracking](https://google.github.io/mediapipe/solutions/instant_motion_tracking) | ✅ | | | | | -[Objectron](https://google.github.io/mediapipe/solutions/objectron) | ✅ | | | | | -[KNIFT](https://google.github.io/mediapipe/solutions/knift) | ✅ | | | | | -[AutoFlip](https://google.github.io/mediapipe/solutions/autoflip) | | | ✅ | | | -[MediaSequence](https://google.github.io/mediapipe/solutions/media_sequence) | | | ✅ | | | -[YouTube 8M](https://google.github.io/mediapipe/solutions/youtube_8m) | | | ✅ | | | +[]() | [Android](https://google.github.io/mediapipe/getting_started/android) | [iOS](https://google.github.io/mediapipe/getting_started/ios) | [C++](https://google.github.io/mediapipe/getting_started/cpp) | [Python](https://google.github.io/mediapipe/getting_started/python) | [JS](https://google.github.io/mediapipe/getting_started/javascript) | [Coral](https://github.com/google/mediapipe/tree/master/mediapipe/examples/coral/README.md) +:---------------------------------------------------------------------------------------- | :-------------------------------------------------------------: | :-----------------------------------------------------: | :-----------------------------------------------------: | :-----------------------------------------------------------: | :-----------------------------------------------------------: | :--------------------------------------------------------------------: +[Face Detection](https://google.github.io/mediapipe/solutions/face_detection) | ✅ | ✅ | ✅ | | | ✅ +[Face Mesh](https://google.github.io/mediapipe/solutions/face_mesh) | ✅ | ✅ | ✅ | ✅ | ✅ | +[Iris](https://google.github.io/mediapipe/solutions/iris) | ✅ | ✅ | ✅ | | | +[Hands](https://google.github.io/mediapipe/solutions/hands) | ✅ | ✅ | ✅ | ✅ | ✅ | +[Pose](https://google.github.io/mediapipe/solutions/pose) | ✅ | ✅ | ✅ | ✅ | ✅ | +[Holistic](https://google.github.io/mediapipe/solutions/holistic) | ✅ | ✅ | ✅ | ✅ | ✅ | +[Hair Segmentation](https://google.github.io/mediapipe/solutions/hair_segmentation) | ✅ | | ✅ | | | +[Object Detection](https://google.github.io/mediapipe/solutions/object_detection) | ✅ | ✅ | ✅ | | | ✅ +[Box Tracking](https://google.github.io/mediapipe/solutions/box_tracking) | ✅ | ✅ | ✅ | | | +[Instant Motion Tracking](https://google.github.io/mediapipe/solutions/instant_motion_tracking) | ✅ | | | | | +[Objectron](https://google.github.io/mediapipe/solutions/objectron) | ✅ | | | | | +[KNIFT](https://google.github.io/mediapipe/solutions/knift) | ✅ | | | | | +[AutoFlip](https://google.github.io/mediapipe/solutions/autoflip) | | | ✅ | | | +[MediaSequence](https://google.github.io/mediapipe/solutions/media_sequence) | | | ✅ | | | +[YouTube 8M](https://google.github.io/mediapipe/solutions/youtube_8m) | | | ✅ | | | See also [MediaPipe Models and Model Cards](https://google.github.io/mediapipe/solutions/models) @@ -55,16 +56,12 @@ for ML models released in MediaPipe. ## MediaPipe in Python -MediaPipe Python package is available on -[PyPI](https://pypi.org/project/mediapipe/), and can be installed simply by `pip -install mediapipe` on Linux and macOS, as described in: - -* [MediaPipe Face Mesh](https://google.github.io/mediapipe/solutions/face_mesh#python) - ([colab](https://mediapipe.page.link/face_mesh_py_colab)) -* [MediaPipe Hands](https://google.github.io/mediapipe/solutions/hands#python) - ([colab](https://mediapipe.page.link/hands_py_colab)) -* [MediaPipe Pose](https://google.github.io/mediapipe/solutions/pose#python) - ([colab](https://mediapipe.page.link/pose_py_colab)) +MediaPipe offers customizable Python solutions as a prebuilt Python package on +[PyPI](https://pypi.org/project/mediapipe/), which can be installed simply with +`pip install mediapipe`. It also provides tools for users to build their own +solutions. Please see +[MediaPipe in Python](https://google.github.io/mediapipe/getting_started/python.md) +for more info. ## MediaPipe on the Web @@ -105,6 +102,8 @@ run code search using ## Publications +* [Background Features in Google Meet, Powered by Web ML](https://ai.googleblog.com/2020/10/background-features-in-google-meet.html) + in Google AI Blog * [MediaPipe 3D Face Transform](https://developers.googleblog.com/2020/09/mediapipe-3d-face-transform.html) in Google Developers Blog * [Instant Motion Tracking With MediaPipe](https://developers.googleblog.com/2020/08/instant-motion-tracking-with-mediapipe.html) diff --git a/WORKSPACE b/WORKSPACE index fe0c1da75..b52e605cc 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -13,11 +13,11 @@ load("@bazel_skylib//lib:versions.bzl", "versions") versions.check(minimum_bazel_version = "3.4.0") -# ABSL cpp library lts_2020_02_25 +# ABSL cpp library lts_2020_09_23 http_archive( name = "com_google_absl", urls = [ - "https://github.com/abseil/abseil-cpp/archive/20200225.tar.gz", + "https://github.com/abseil/abseil-cpp/archive/20200923.tar.gz", ], # Remove after https://github.com/abseil/abseil-cpp/issues/326 is solved. patches = [ @@ -26,8 +26,8 @@ http_archive( patch_args = [ "-p1", ], - strip_prefix = "abseil-cpp-20200225", - sha256 = "728a813291bdec2aa46eab8356ace9f75ac2ed9dfe2df5ab603c4e6c09f1c353" + strip_prefix = "abseil-cpp-20200923", + sha256 = "b3744a4f7a249d5eaf2309daad597631ce77ea62e0fc6abffbab4b4c3dc0fc08" ) http_archive( @@ -99,7 +99,7 @@ http_archive( "https://github.com/google/glog/archive/0a2e5931bd5ff22fd3bf8999eb8ce776f159cda6.zip", ], patches = [ - "@//third_party:com_github_glog_glog_9779e5ea6ef59562b030248947f787d1256132ae.diff" + "@//third_party:com_github_glog_glog_9779e5ea6ef59562b030248947f787d1256132ae.diff", ], patch_args = [ "-p1", @@ -170,15 +170,15 @@ http_archive( http_archive( name = "ceres_solver", - url = "https://github.com/ceres-solver/ceres-solver/archive/1.14.0.zip", + url = "https://github.com/ceres-solver/ceres-solver/archive/2.0.0.zip", patches = [ "@//third_party:ceres_solver_compatibility_fixes.diff" ], patch_args = [ "-p1", ], - strip_prefix = "ceres-solver-1.14.0", - sha256 = "5ba6d0db4e784621fda44a50c58bb23b0892684692f0c623e2063f9c19f192f1" + strip_prefix = "ceres-solver-2.0.0", + sha256 = "db12d37b4cebb26353ae5b7746c7985e00877baa8e7b12dc4d3a1512252fff3b" ) http_archive( @@ -364,9 +364,9 @@ http_archive( ) #Tensorflow repo should always go after the other external dependencies. -# 2020-10-30 -_TENSORFLOW_GIT_COMMIT = "84384703c0d8b502e33ff6fd7eefd219dca5ff8e" -_TENSORFLOW_SHA256= "23fb322fc15a20f7a7838d9a31f8b16f60700a494ea654311a0aa8621769df98" +# 2020-12-07 +_TENSORFLOW_GIT_COMMIT = "f556709f4df005ad57fd24d5eaa0d9380128d3ba" +_TENSORFLOW_SHA256= "9e157d4723921b48a974f645f70d07c8fd3c363569a0ac6ee85fec114d6459ea" http_archive( name = "org_tensorflow", urls = [ @@ -374,6 +374,7 @@ http_archive( ], patches = [ "@//third_party:org_tensorflow_compatibility_fixes.diff", + "@//third_party:org_tensorflow_objc_cxx17.diff", ], patch_args = [ "-p1", diff --git a/build_android_examples.sh b/build_android_examples.sh index 37a40ed50..75ec54199 100644 --- a/build_android_examples.sh +++ b/build_android_examples.sh @@ -89,33 +89,34 @@ for app in ${apps}; do fi target="${app}:${target_name}" bin="${bin_dir}/${app}/${target_name}.apk" - apk="${out_dir}/${target_name}.apk" echo "=== Target: ${target}" + if [[ $install_only == false ]]; then + bazel_flags=("${default_bazel_flags[@]}") + bazel_flags+=(${target}) + if [[ $strip == true ]]; then + bazel_flags+=(--linkopt=-s) + fi + fi + if [[ ${app_name} == "objectdetection3d" ]]; then categories=("shoe" "chair" "cup" "camera" "shoe_1stage" "chair_1stage") - for category in ${categories[@]}; do + for category in "${categories[@]}"; do apk="${out_dir}/${target_name}_${category}.apk" if [[ $install_only == false ]]; then bazel_flags_extended=("${bazel_flags[@]}") if [[ ${category} != "shoe" ]]; then bazel_flags_extended+=(--define ${category}=true) fi - echo "bazel ${bazel_flags_extended[@]}" bazel "${bazel_flags_extended[@]}" cp -f "${bin}" "${apk}" fi apks+=(${apk}) done else + apk="${out_dir}/${target_name}.apk" if [[ $install_only == false ]]; then - bazel_flags=("${default_bazel_flags[@]}") - bazel_flags+=(${target}) - if [[ $strip == true ]]; then - bazel_flags+=(--linkopt=-s) - fi - if [[ ${app_name} == "templatematchingcpu" ]]; then switch_to_opencv_4 fi diff --git a/build_desktop_examples.sh b/build_desktop_examples.sh index 36abeb340..a6b2b54f7 100644 --- a/build_desktop_examples.sh +++ b/build_desktop_examples.sh @@ -70,6 +70,7 @@ for app in ${apps}; do if [[ "${target_name}" == "autoflip" || "${target_name}" == "hello_world" || "${target_name}" == "media_sequence" || + "${target_name}" == "object_detection_3d" || "${target_name}" == "template_matching" || "${target_name}" == "youtube8m" ]]; then continue @@ -94,6 +95,7 @@ for app in ${apps}; do graph_name="${target_name}/${target_name}" fi if [[ ${target_name} == "iris_tracking" || + ${target_name} == "pose_tracking" || ${target_name} == "upper_body_pose_tracking" ]]; then graph_suffix="cpu" else diff --git a/docs/getting_started/android.md b/docs/getting_started/android.md new file mode 100644 index 000000000..855f5fa29 --- /dev/null +++ b/docs/getting_started/android.md @@ -0,0 +1,191 @@ +--- +layout: default +title: MediaPipe on Android +parent: Getting Started +has_children: true +has_toc: false +nav_order: 1 +--- + +# MediaPipe on Android +{: .no_toc } + +1. TOC +{:toc} +--- + +Please follow instructions below to build Android example apps in the supported +MediaPipe [solutions](../solutions/solutions.md). To learn more about these +example apps, start from [Hello World! on Android](./hello_world_android.md). To +incorporate MediaPipe into an existing Android Studio project, see these +[instructions](./android_archive_library.md) that use Android Archive (AAR) and +Gradle. + +## Building Android example apps + +### Prerequisite + +* Install MediaPipe following these [instructions](./install.md). +* Setup Java Runtime. +* Setup Android SDK release 28.0.3 and above. +* Setup Android NDK r18b and above. + +MediaPipe recommends setting up Android SDK and NDK via Android Studio (and see +below for Android Studio setup). However, if you prefer using MediaPipe without +Android Studio, please run +[`setup_android_sdk_and_ndk.sh`](https://github.com/google/mediapipe/blob/master/setup_android_sdk_and_ndk.sh) +to download and setup Android SDK and NDK before building any Android example +apps. + +If Android SDK and NDK are already installed (e.g., by Android Studio), set +$ANDROID_HOME and $ANDROID_NDK_HOME to point to the installed SDK and NDK. + +```bash +export ANDROID_HOME= +export ANDROID_NDK_HOME= +``` + +In order to use MediaPipe on earlier Android versions, MediaPipe needs to switch +to a lower Android API level. You can achieve this by specifying `api_level = +$YOUR_INTENDED_API_LEVEL` in android_ndk_repository() and/or +android_sdk_repository() in the +[`WORKSPACE`](https://github.com/google/mediapipe/blob/master/WORKSPACE) file. + +Please verify all the necessary packages are installed. + +* Android SDK Platform API Level 28 or 29 +* Android SDK Build-Tools 28 or 29 +* Android SDK Platform-Tools 28 or 29 +* Android SDK Tools 26.1.1 +* Android NDK 17c or above + +### Option 1: Build with Bazel in Command Line + +Tip: You can run this +[script](https://github.com/google/mediapipe/blob/master/build_android_examples.sh) +to build (and install) all MediaPipe Android example apps. + +1. To build an Android example app, build against the corresponding + `android_binary` build target. For instance, for + [MediaPipe Hands](../solutions/hands.md) the target is `handtrackinggpu` in + the + [BUILD](https://github.com/google/mediapipe/tree/master/mediapipe/examples/android/src/java/com/google/mediapipe/apps/handtrackinggpu/BUILD) + file: + + Note: To reduce the binary size, consider appending `--linkopt="-s"` to the + command below to strip symbols. + + ```bash + bazel build -c opt --config=android_arm64 mediapipe/examples/android/src/java/com/google/mediapipe/apps/handtrackinggpu:handtrackinggpu + ``` + +2. Install it on a device with: + + ```bash + adb install bazel-bin/mediapipe/examples/android/src/java/com/google/mediapipe/apps/handtrackinggpu/handtrackinggpu.apk + ``` + +### Option 2: Build with Bazel in Android Studio + +The MediaPipe project can be imported into Android Studio using the Bazel +plugins. This allows the MediaPipe examples to be built and modified in Android +Studio. + +To incorporate MediaPipe into an existing Android Studio project, see these +[instructions](./android_archive_library.md) that use Android Archive (AAR) and +Gradle. + +The steps below use Android Studio 3.5 to build and install a MediaPipe example +app: + +1. Install and launch Android Studio 3.5. + +2. Select `Configure` -> `SDK Manager` -> `SDK Platforms`. + + * Verify that Android SDK Platform API Level 28 or 29 is installed. + * Take note of the Android SDK Location, e.g., + `/usr/local/home/Android/Sdk`. + +3. Select `Configure` -> `SDK Manager` -> `SDK Tools`. + + * Verify that Android SDK Build-Tools 28 or 29 is installed. + * Verify that Android SDK Platform-Tools 28 or 29 is installed. + * Verify that Android SDK Tools 26.1.1 is installed. + * Verify that Android NDK 17c or above is installed. + * Take note of the Android NDK Location, e.g., + `/usr/local/home/Android/Sdk/ndk-bundle` or + `/usr/local/home/Android/Sdk/ndk/20.0.5594570`. + +4. Set environment variables `$ANDROID_HOME` and `$ANDROID_NDK_HOME` to point + to the installed SDK and NDK. + + ```bash + export ANDROID_HOME=/usr/local/home/Android/Sdk + + # If the NDK libraries are installed by a previous version of Android Studio, do + export ANDROID_NDK_HOME=/usr/local/home/Android/Sdk/ndk-bundle + # If the NDK libraries are installed by Android Studio 3.5, do + export ANDROID_NDK_HOME=/usr/local/home/Android/Sdk/ndk/ + ``` + +5. Select `Configure` -> `Plugins` to install `Bazel`. + +6. On Linux, select `File` -> `Settings` -> `Bazel settings`. On macos, select + `Android Studio` -> `Preferences` -> `Bazel settings`. Then, modify `Bazel + binary location` to be the same as the output of `$ which bazel`. + +7. Select `Import Bazel Project`. + + * Select `Workspace`: `/path/to/mediapipe` and select `Next`. + * Select `Generate from BUILD file`: `/path/to/mediapipe/BUILD` and select + `Next`. + * Modify `Project View` to be the following and select `Finish`. + + ``` + directories: + # read project settings, e.g., .bazelrc + . + -mediapipe/objc + -mediapipe/examples/ios + + targets: + //mediapipe/examples/android/...:all + //mediapipe/java/...:all + + android_sdk_platform: android-29 + + sync_flags: + --host_crosstool_top=@bazel_tools//tools/cpp:toolchain + ``` + +8. Select `Bazel` -> `Sync` -> `Sync project with Build files`. + + Note: Even after doing step 4, if you still see the error: `"no such package + '@androidsdk//': Either the path attribute of android_sdk_repository or the + ANDROID_HOME environment variable must be set."`, please modify the + [`WORKSPACE`](https://github.com/google/mediapipe/blob/master/WORKSPACE) + file to point to your SDK and NDK library locations, as below: + + ``` + android_sdk_repository( + name = "androidsdk", + path = "/path/to/android/sdk" + ) + + android_ndk_repository( + name = "androidndk", + path = "/path/to/android/ndk" + ) + ``` + +9. Connect an Android device to the workstation. + +10. Select `Run...` -> `Edit Configurations...`. + + * Select `Templates` -> `Bazel Command`. + * Enter Target Expression: + `//mediapipe/examples/android/src/java/com/google/mediapipe/apps/handtrackinggpu:handtrackinggpu` + * Enter Bazel command: `mobile-install`. + * Enter Bazel flags: `-c opt --config=android_arm64`. + * Press the `[+]` button to add the new configuration. + * Select `Run` to run the example app on the connected Android device. diff --git a/docs/getting_started/android_archive_library.md b/docs/getting_started/android_archive_library.md index fc9f90c39..bd6a1e1c1 100644 --- a/docs/getting_started/android_archive_library.md +++ b/docs/getting_started/android_archive_library.md @@ -1,8 +1,9 @@ --- layout: default title: MediaPipe Android Archive -parent: Getting Started -nav_order: 7 +parent: MediaPipe on Android +grand_parent: Getting Started +nav_order: 2 --- # MediaPipe Android Archive diff --git a/docs/getting_started/building_examples.md b/docs/getting_started/building_examples.md index 301a70117..2244b2736 100644 --- a/docs/getting_started/building_examples.md +++ b/docs/getting_started/building_examples.md @@ -2,7 +2,7 @@ layout: default title: Building MediaPipe Examples parent: Getting Started -nav_order: 2 +nav_exclude: true --- # Building MediaPipe Examples @@ -12,464 +12,22 @@ nav_order: 2 {:toc} --- -## Android +### Android -### Prerequisite +Please see these [instructions](./android.md). -* Java Runtime. -* Android SDK release 28.0.3 and above. -* Android NDK r18b and above. +### iOS -MediaPipe recommends setting up Android SDK and NDK via Android Studio (and see -below for Android Studio setup). However, if you prefer using MediaPipe without -Android Studio, please run -[`setup_android_sdk_and_ndk.sh`](https://github.com/google/mediapipe/blob/master/setup_android_sdk_and_ndk.sh) -to download and setup Android SDK and NDK before building any Android example -apps. +Please see these [instructions](./ios.md). -If Android SDK and NDK are already installed (e.g., by Android Studio), set -$ANDROID_HOME and $ANDROID_NDK_HOME to point to the installed SDK and NDK. +### Python -```bash -export ANDROID_HOME= -export ANDROID_NDK_HOME= -``` +Please see these [instructions](./python.md). -In order to use MediaPipe on earlier Android versions, MediaPipe needs to switch -to a lower Android API level. You can achieve this by specifying `api_level = -$YOUR_INTENDED_API_LEVEL` in android_ndk_repository() and/or -android_sdk_repository() in the -[`WORKSPACE`](https://github.com/google/mediapipe/blob/master/WORKSPACE) file. +### JavaScript -Please verify all the necessary packages are installed. +Please see these [instructions](./javascript.md). -* Android SDK Platform API Level 28 or 29 -* Android SDK Build-Tools 28 or 29 -* Android SDK Platform-Tools 28 or 29 -* Android SDK Tools 26.1.1 -* Android NDK 17c or above +### C++ -### Option 1: Build with Bazel in Command Line - -Tip: You can run this -[script](https://github.com/google/mediapipe/blob/master/build_android_examples.sh) -to build (and install) all MediaPipe Android example apps. - -1. To build an Android example app, build against the corresponding - `android_binary` build target. For instance, for - [MediaPipe Hands](../solutions/hands.md) the target is `handtrackinggpu` in - the - [BUILD](https://github.com/google/mediapipe/tree/master/mediapipe/examples/android/src/java/com/google/mediapipe/apps/handtrackinggpu/BUILD) - file: - - Note: To reduce the binary size, consider appending `--linkopt="-s"` to the - command below to strip symbols. - - ```bash - bazel build -c opt --config=android_arm64 mediapipe/examples/android/src/java/com/google/mediapipe/apps/handtrackinggpu:handtrackinggpu - ``` - -2. Install it on a device with: - - ```bash - adb install bazel-bin/mediapipe/examples/android/src/java/com/google/mediapipe/apps/handtrackinggpu/handtrackinggpu.apk - ``` - -### Option 2: Build with Bazel in Android Studio - -The MediaPipe project can be imported into Android Studio using the Bazel -plugins. This allows the MediaPipe examples to be built and modified in Android -Studio. - -To incorporate MediaPipe into an existing Android Studio project, see these -[instructions](./android_archive_library.md) that use Android Archive (AAR) and -Gradle. - -The steps below use Android Studio 3.5 to build and install a MediaPipe example -app: - -1. Install and launch Android Studio 3.5. - -2. Select `Configure` -> `SDK Manager` -> `SDK Platforms`. - - * Verify that Android SDK Platform API Level 28 or 29 is installed. - * Take note of the Android SDK Location, e.g., - `/usr/local/home/Android/Sdk`. - -3. Select `Configure` -> `SDK Manager` -> `SDK Tools`. - - * Verify that Android SDK Build-Tools 28 or 29 is installed. - * Verify that Android SDK Platform-Tools 28 or 29 is installed. - * Verify that Android SDK Tools 26.1.1 is installed. - * Verify that Android NDK 17c or above is installed. - * Take note of the Android NDK Location, e.g., - `/usr/local/home/Android/Sdk/ndk-bundle` or - `/usr/local/home/Android/Sdk/ndk/20.0.5594570`. - -4. Set environment variables `$ANDROID_HOME` and `$ANDROID_NDK_HOME` to point - to the installed SDK and NDK. - - ```bash - export ANDROID_HOME=/usr/local/home/Android/Sdk - - # If the NDK libraries are installed by a previous version of Android Studio, do - export ANDROID_NDK_HOME=/usr/local/home/Android/Sdk/ndk-bundle - # If the NDK libraries are installed by Android Studio 3.5, do - export ANDROID_NDK_HOME=/usr/local/home/Android/Sdk/ndk/ - ``` - -5. Select `Configure` -> `Plugins` to install `Bazel`. - -6. On Linux, select `File` -> `Settings` -> `Bazel settings`. On macos, select - `Android Studio` -> `Preferences` -> `Bazel settings`. Then, modify `Bazel - binary location` to be the same as the output of `$ which bazel`. - -7. Select `Import Bazel Project`. - - * Select `Workspace`: `/path/to/mediapipe` and select `Next`. - * Select `Generate from BUILD file`: `/path/to/mediapipe/BUILD` and select - `Next`. - * Modify `Project View` to be the following and select `Finish`. - - ``` - directories: - # read project settings, e.g., .bazelrc - . - -mediapipe/objc - -mediapipe/examples/ios - - targets: - //mediapipe/examples/android/...:all - //mediapipe/java/...:all - - android_sdk_platform: android-29 - - sync_flags: - --host_crosstool_top=@bazel_tools//tools/cpp:toolchain - ``` - -8. Select `Bazel` -> `Sync` -> `Sync project with Build files`. - - Note: Even after doing step 4, if you still see the error: `"no such package - '@androidsdk//': Either the path attribute of android_sdk_repository or the - ANDROID_HOME environment variable must be set."`, please modify the - [`WORKSPACE`](https://github.com/google/mediapipe/blob/master/WORKSPACE) - file to point to your SDK and NDK library locations, as below: - - ``` - android_sdk_repository( - name = "androidsdk", - path = "/path/to/android/sdk" - ) - - android_ndk_repository( - name = "androidndk", - path = "/path/to/android/ndk" - ) - ``` - -9. Connect an Android device to the workstation. - -10. Select `Run...` -> `Edit Configurations...`. - - * Select `Templates` -> `Bazel Command`. - * Enter Target Expression: - `//mediapipe/examples/android/src/java/com/google/mediapipe/apps/handtrackinggpu:handtrackinggpu` - * Enter Bazel command: `mobile-install`. - * Enter Bazel flags: `-c opt --config=android_arm64`. - * Press the `[+]` button to add the new configuration. - * Select `Run` to run the example app on the connected Android device. - -## iOS - -### Prerequisite - -1. Install [Xcode](https://developer.apple.com/xcode/), then install the - Command Line Tools using: - - ```bash - xcode-select --install - ``` - -2. Install [Bazel](https://bazel.build/). - - We recommend using [Homebrew](https://brew.sh/) to get the latest version. - -3. Set Python 3.7 as the default Python version and install the Python "six" - library. This is needed for TensorFlow. - - ```bash - pip3 install --user six - ``` - -4. Clone the MediaPipe repository. - - ```bash - git clone https://github.com/google/mediapipe.git - ``` - -### Set up a bundle ID prefix - -All iOS apps must have a bundle ID, and you must have a provisioning profile -that lets you install an app with that ID onto your phone. To avoid clashes -between different MediaPipe users, you need to configure a unique prefix for the -bundle IDs of our iOS demo apps. - -If you have a custom provisioning profile, see -[Custom provisioning](#custom-provisioning) below. - -Otherwise, run this command to generate a unique prefix: - -```bash -python3 mediapipe/examples/ios/link_local_profiles.py -``` - -### Create an Xcode project - -This allows you to edit and debug one of the example apps in Xcode. It also -allows you to make use of automatic provisioning (see later section). - -1. We will use a tool called [Tulsi](https://tulsi.bazel.build/) for generating - Xcode projects from Bazel build configurations. - - ```bash - # cd out of the mediapipe directory, then: - git clone https://github.com/bazelbuild/tulsi.git - cd tulsi - # remove Xcode version from Tulsi's .bazelrc (see http://github.com/bazelbuild/tulsi#building-and-installing): - sed -i .orig '/xcode_version/d' .bazelrc - # build and run Tulsi: - sh build_and_run.sh - ``` - - This will install `Tulsi.app` inside the `Applications` directory in your - home directory. - -2. Open `mediapipe/Mediapipe.tulsiproj` using the Tulsi app. - - Tip: If Tulsi displays an error saying "Bazel could not be found", press the - "Bazel..." button in the Packages tab and select the `bazel` executable in - your homebrew `/bin/` directory. - -3. Select the MediaPipe config in the Configs tab, then press the Generate - button below. You will be asked for a location to save the Xcode project. - Once the project is generated, it will be opened in Xcode. - - If you get an error about bundle IDs, see the - [previous section](#set-up-a-bundle-id-prefix). - -### Set up provisioning - -To install applications on an iOS device, you need a provisioning profile. There -are two options: - -1. Automatic provisioning. This allows you to build and install an app to your - personal device. The provisining profile is managed by Xcode, and has to be - updated often (it is valid for about a week). - -2. Custom provisioning. This uses a provisioning profile associated with an - Apple developer account. These profiles have a longer validity period and - can target multiple devices, but you need a paid developer account with - Apple to obtain one. - -#### Automatic provisioning - -1. Create an Xcode project for MediaPipe, as discussed - [earlier](#create-an-xcode-project). - -2. In the project navigator in the left sidebar, select the "Mediapipe" - project. - -3. Select one of the application targets, e.g. HandTrackingGpuApp. - -4. Select the "Signing & Capabilities" tab. - -5. Check "Automatically manage signing", and confirm the dialog box. - -6. Select "_Your Name_ (Personal Team)" in the Team pop-up menu. - -7. This set-up needs to be done once for each application you want to install. - Repeat steps 3-6 as needed. - -This generates provisioning profiles for each app you have selected. Now we need -to tell Bazel to use them. We have provided a script to make this easier. - -1. In the terminal, to the `mediapipe` directory where you cloned the - repository. - -2. Run this command: - - ```bash - python3 mediapipe/examples/ios/link_local_profiles.py - ``` - -This will find and link the provisioning profile for all applications for which -you have enabled automatic provisioning in Xcode. - -Note: once a profile expires, Xcode will generate a new one; you must then run -this script again to link the updated profiles. - -#### Custom provisioning - -1. Obtain a provisioning profile from Apple. - -Tip: You can use this command to see the provisioning profiles you have -previously downloaded using Xcode: `open ~/Library/MobileDevice/"Provisioning -Profiles"`. If there are none, generate and download a profile on -[Apple's developer site](https://developer.apple.com/account/resources/). - -1. Symlink or copy your provisioning profile to - `mediapipe/mediapipe/provisioning_profile.mobileprovision`. - - ```bash - cd mediapipe - ln -s ~/Downloads/MyProvisioningProfile.mobileprovision mediapipe/provisioning_profile.mobileprovision - ``` - -Note: if you had previously set up automatic provisioning, you should remove the -`provisioning_profile.mobileprovision` symlink in each example's directory, -since it will take precedence over the common one. You can also overwrite it -with you own profile if you need a different profile for different apps. - -1. Open `mediapipe/examples/ios/bundle_id.bzl`, and change the - `BUNDLE_ID_PREFIX` to a prefix associated with your provisioning profile. - -### Build and run an app using Xcode - -1. Create the Xcode project, and make sure you have set up either automatic or - custom provisioning. - -2. You can now select any of the MediaPipe demos in the target menu, and build - and run them as normal. - -Note: When you ask Xcode to run an app, by default it will use the Debug -configuration. Some of our demos are computationally heavy; you may want to use -the Release configuration for better performance. - -Tip: To switch build configuration in Xcode, click on the target menu, choose -"Edit Scheme...", select the Run action, and switch the Build Configuration from -Debug to Release. Note that this is set independently for each target. - -Tip: On the device, in Settings > General > Device Management, make sure the -developer (yourself) is trusted. - -### Build an app using the command line - -1. Make sure you have set up either automatic or custom provisioning. - -2. Using [MediaPipe Hands](../solutions/hands.md) for example, run: - - ```bash - bazel build -c opt --config=ios_arm64 mediapipe/examples/ios/handtrackinggpu:HandTrackingGpuApp - ``` - - You may see a permission request from `codesign` in order to sign the app. - - Tip: If you are using custom provisioning, you can run this - [script](https://github.com/google/mediapipe/blob/master/build_ios_examples.sh) - to build all MediaPipe iOS example apps. - -3. In Xcode, open the `Devices and Simulators` window (command-shift-2). - -4. Make sure your device is connected. You will see a list of installed apps. - Press the "+" button under the list, and select the `.ipa` file built by - Bazel. - -5. You can now run the app on your device. - -Tip: On the device, in Settings > General > Device Management, make sure the -developer (yourself) is trusted. - -## Desktop - -### Option 1: Running on CPU - -1. To build, for example, [MediaPipe Hands](../solutions/hands.md), run: - - ```bash - bazel build -c opt --define MEDIAPIPE_DISABLE_GPU=1 mediapipe/examples/desktop/hand_tracking:hand_tracking_cpu - ``` - -2. To run the application: - - ```bash - GLOG_logtostderr=1 bazel-bin/mediapipe/examples/desktop/hand_tracking/hand_tracking_cpu \ - --calculator_graph_config_file=mediapipe/graphs/hand_tracking/hand_tracking_desktop_live.pbtxt - ``` - - This will open up your webcam as long as it is connected and on. Any errors - is likely due to your webcam being not accessible. - -### Option 2: Running on GPU - -Note: This currently works only on Linux, and please first follow -[OpenGL ES Setup on Linux Desktop](./gpu_support.md#opengl-es-setup-on-linux-desktop). - -1. To build, for example, [MediaPipe Hands](../solutions/hands.md), run: - - ```bash - bazel build -c opt --copt -DMESA_EGL_NO_X11_HEADERS --copt -DEGL_NO_X11 \ - mediapipe/examples/desktop/hand_tracking:hand_tracking_gpu - ``` - -2. To run the application: - - ```bash - GLOG_logtostderr=1 bazel-bin/mediapipe/examples/desktop/hand_tracking/hand_tracking_gpu \ - --calculator_graph_config_file=mediapipe/graphs/hand_tracking/hand_tracking_mobile.pbtxt - ``` - - This will open up your webcam as long as it is connected and on. Any errors - is likely due to your webcam being not accessible, or GPU drivers not setup - properly. - -## Python - -MediaPipe Python package is available on -[PyPI](https://pypi.org/project/mediapipe/), and can be installed simply by `pip -install mediapipe` on Linux and macOS, as described in, for instance, -[Python section in MediaPipe Pose](../solutions/pose.md#python) and in this -[colab](https://mediapipe.page.link/pose_py_colab). - -Follow the steps below only if you have local changes and need to build the -Python package from source. Otherwise, we strongly encourage our users to simply -run `pip install mediapipe`, more convenient and much faster. - -1. Make sure that Bazel and OpenCV are correctly installed and configured for - MediaPipe. Please see [Installation](./install.md) for how to setup Bazel - and OpenCV for MediaPipe on Linux and macOS. - -2. Install the following dependencies. - - ```bash - # Debian or Ubuntu - $ sudo apt install python3-dev - $ sudo apt install python3-venv - $ sudo apt install -y protobuf-compiler - ``` - - ```bash - # macOS - $ brew install protobuf - ``` - -3. Activate a Python virtual environment. - - ```bash - $ python3 -m venv mp_env && source mp_env/bin/activate - ``` - -4. In the virtual environment, go to the MediaPipe repo directory. - -5. Install the required Python packages. - - ```bash - (mp_env)mediapipe$ pip3 install -r requirements.txt - ``` - -6. Generate and install MediaPipe package. - - ```bash - (mp_env)mediapipe$ python3 setup.py gen_protos - (mp_env)mediapipe$ python3 setup.py install --link-opencv - ``` +Please see these [instructions](./cpp.md). diff --git a/docs/getting_started/cpp.md b/docs/getting_started/cpp.md new file mode 100644 index 000000000..8fc091fea --- /dev/null +++ b/docs/getting_started/cpp.md @@ -0,0 +1,62 @@ +--- +layout: default +title: MediaPipe in C++ +parent: Getting Started +has_children: true +has_toc: false +nav_order: 5 +--- + +# MediaPipe in C++ +{: .no_toc } + +1. TOC +{:toc} +--- + +Please follow instructions below to build C++ command-line example apps in the +supported MediaPipe [solutions](../solutions/solutions.md). To learn more about +these example apps, start from [Hello World! in C++](./hello_world_cpp.md). + +## Building C++ command-line example apps + +### Option 1: Running on CPU + +1. To build, for example, [MediaPipe Hands](../solutions/hands.md), run: + + ```bash + bazel build -c opt --define MEDIAPIPE_DISABLE_GPU=1 mediapipe/examples/desktop/hand_tracking:hand_tracking_cpu + ``` + +2. To run the application: + + ```bash + GLOG_logtostderr=1 bazel-bin/mediapipe/examples/desktop/hand_tracking/hand_tracking_cpu \ + --calculator_graph_config_file=mediapipe/graphs/hand_tracking/hand_tracking_desktop_live.pbtxt + ``` + + This will open up your webcam as long as it is connected and on. Any errors + is likely due to your webcam being not accessible. + +### Option 2: Running on GPU + +Note: This currently works only on Linux, and please first follow +[OpenGL ES Setup on Linux Desktop](./gpu_support.md#opengl-es-setup-on-linux-desktop). + +1. To build, for example, [MediaPipe Hands](../solutions/hands.md), run: + + ```bash + bazel build -c opt --copt -DMESA_EGL_NO_X11_HEADERS --copt -DEGL_NO_X11 \ + mediapipe/examples/desktop/hand_tracking:hand_tracking_gpu + ``` + +2. To run the application: + + ```bash + GLOG_logtostderr=1 bazel-bin/mediapipe/examples/desktop/hand_tracking/hand_tracking_gpu \ + --calculator_graph_config_file=mediapipe/graphs/hand_tracking/hand_tracking_mobile.pbtxt + ``` + + This will open up your webcam as long as it is connected and on. Any errors + is likely due to your webcam being not accessible, or GPU drivers not setup + properly. diff --git a/docs/getting_started/gpu_support.md b/docs/getting_started/gpu_support.md index 2aae63a2e..38bab9be3 100644 --- a/docs/getting_started/gpu_support.md +++ b/docs/getting_started/gpu_support.md @@ -2,7 +2,7 @@ layout: default title: GPU Support parent: Getting Started -nav_order: 6 +nav_order: 7 --- # GPU Support diff --git a/docs/getting_started/hello_world_android.md b/docs/getting_started/hello_world_android.md index 6bb98f671..c828ed226 100644 --- a/docs/getting_started/hello_world_android.md +++ b/docs/getting_started/hello_world_android.md @@ -1,8 +1,9 @@ --- layout: default title: Hello World! on Android -parent: Getting Started -nav_order: 3 +parent: MediaPipe on Android +grand_parent: Getting Started +nav_order: 1 --- # Hello World! on Android @@ -496,7 +497,7 @@ CameraHelper.CameraFacing cameraFacing = applicationInfo.metaData.getBoolean("cameraFacingFront", false) ? CameraHelper.CameraFacing.FRONT : CameraHelper.CameraFacing.BACK; -cameraHelper.startCamera(this, cameraFacing, /*surfaceTexture=*/ null); +cameraHelper.startCamera(this, cameraFacing, /*unusedSurfaceTexture=*/ null); ``` At this point, the application should build successfully. However, when you run diff --git a/docs/getting_started/hello_world_desktop.md b/docs/getting_started/hello_world_cpp.md similarity index 97% rename from docs/getting_started/hello_world_desktop.md rename to docs/getting_started/hello_world_cpp.md index 61e9b6471..f46e88698 100644 --- a/docs/getting_started/hello_world_desktop.md +++ b/docs/getting_started/hello_world_cpp.md @@ -1,11 +1,12 @@ --- layout: default -title: Hello World! on Desktop (C++) -parent: Getting Started -nav_order: 5 +title: Hello World! in C++ +parent: MediaPipe in C++ +grand_parent: Getting Started +nav_order: 1 --- -# Hello World! on Desktop (C++) +# Hello World! in C++ {: .no_toc } 1. TOC diff --git a/docs/getting_started/hello_world_ios.md b/docs/getting_started/hello_world_ios.md index 19de67d01..0441623e3 100644 --- a/docs/getting_started/hello_world_ios.md +++ b/docs/getting_started/hello_world_ios.md @@ -1,8 +1,9 @@ --- layout: default title: Hello World! on iOS -parent: Getting Started -nav_order: 4 +parent: MediaPipe on iOS +grand_parent: Getting Started +nav_order: 1 --- # Hello World! on iOS @@ -193,8 +194,7 @@ bazel build -c opt --config=ios_arm64 mediapipe/examples/ios/helloworld:HelloWor Then, go back to XCode, open Window > Devices and Simulators, select your device, and add the `.ipa` file generated by the command above to your device. -Here is the document on [setting up and compiling](./building_examples.md#ios) -iOS MediaPipe apps. +Here is the document on [setting up and compiling](./ios.md) iOS MediaPipe apps. Open the application on your device. Since it is empty, it should display a blank white screen. diff --git a/docs/getting_started/install.md b/docs/getting_started/install.md index b83f9a9ef..d8b3122c0 100644 --- a/docs/getting_started/install.md +++ b/docs/getting_started/install.md @@ -2,7 +2,7 @@ layout: default title: Installation parent: Getting Started -nav_order: 1 +nav_order: 6 --- # Installation @@ -23,32 +23,21 @@ Note: To make Mediapipe work with TensorFlow, please set Python 3.7 as the default Python version and install the Python "six" library by running `pip3 install --user six`. -Note: To build and run Android example apps, see these -[instructions](./building_examples.md#android). To build and run iOS example -apps, see these [instructions](./building_examples.md#ios). - ## Installing on Debian and Ubuntu -1. Checkout MediaPipe repository. - - ```bash - $ git clone https://github.com/google/mediapipe.git - - # Change directory into MediaPipe root directory - $ cd mediapipe - ``` - -2. Install Bazel. +1. Install Bazel. Follow the official [Bazel documentation](https://docs.bazel.build/versions/master/install-ubuntu.html) to install Bazel 3.4 or higher. - For Nvidia Jetson and Raspberry Pi devices with ARM Ubuntu, Bazel needs to - be built from source. + For Nvidia Jetson and Raspberry Pi devices with ARM Ubuntu only, Bazel needs + to be built from source: ```bash # For Bazel 3.4.0 + mkdir $HOME/bazel-3.4.0 + cd $HOME/bazel-3.4.0 wget https://github.com/bazelbuild/bazel/releases/download/3.4.0/bazel-3.4.0-dist.zip sudo apt-get install build-essential openjdk-8-jdk python zip unzip unzip bazel-3.4.0-dist.zip @@ -56,6 +45,16 @@ apps, see these [instructions](./building_examples.md#ios). sudo cp output/bazel /usr/local/bin/ ``` +2. Checkout MediaPipe repository. + + ```bash + $ cd $HOME + $ git clone https://github.com/google/mediapipe.git + + # Change directory into MediaPipe root directory + $ cd mediapipe + ``` + 3. Install OpenCV and FFmpeg. Option 1. Use package manager tool to install the pre-compiled OpenCV @@ -174,7 +173,7 @@ apps, see these [instructions](./building_examples.md#ios). # when building GPU examples. ``` -5. Run the [Hello World desktop example](./hello_world_desktop.md). +5. Run the [Hello World! in C++ example](./hello_world_cpp.md). ```bash $ export GLOG_logtostderr=1 @@ -208,7 +207,13 @@ build issues. **Disclaimer**: Running MediaPipe on CentOS is experimental. -1. Checkout MediaPipe repository. +1. Install Bazel. + + Follow the official + [Bazel documentation](https://docs.bazel.build/versions/master/install-redhat.html) + to install Bazel 3.4 or higher. + +2. Checkout MediaPipe repository. ```bash $ git clone https://github.com/google/mediapipe.git @@ -217,12 +222,6 @@ build issues. $ cd mediapipe ``` -2. Install Bazel. - - Follow the official - [Bazel documentation](https://docs.bazel.build/versions/master/install-redhat.html) - to install Bazel 3.4 or higher. - 3. Install OpenCV. Option 1. Use package manager tool to install the pre-compiled version. @@ -304,7 +303,7 @@ build issues. ) ``` -4. Run the [Hello World desktop example](./hello_world_desktop.md). +4. Run the [Hello World! in C++ example](./hello_world_cpp.md). ```bash $ export GLOG_logtostderr=1 @@ -337,15 +336,7 @@ build issues. * Install [Xcode](https://developer.apple.com/xcode/) and its Command Line Tools by `xcode-select --install`. -2. Checkout MediaPipe repository. - - ```bash - $ git clone https://github.com/google/mediapipe.git - - $ cd mediapipe - ``` - -3. Install Bazel. +2. Install Bazel. Option 1. Use package manager tool to install Bazel @@ -358,6 +349,14 @@ build issues. [Bazel documentation](https://docs.bazel.build/versions/master/install-os-x.html#install-with-installer-mac-os-x) to install Bazel 3.4 or higher. +3. Checkout MediaPipe repository. + + ```bash + $ git clone https://github.com/google/mediapipe.git + + $ cd mediapipe + ``` + 4. Install OpenCV and FFmpeg. Option 1. Use HomeBrew package manager tool to install the pre-compiled @@ -439,7 +438,7 @@ build issues. $ pip3 install --user six ``` -6. Run the [Hello World desktop example](./hello_world_desktop.md). +6. Run the [Hello World! in C++ example](./hello_world_cpp.md). ```bash $ export GLOG_logtostderr=1 @@ -540,7 +539,7 @@ next section. ) ``` -9. Run the [Hello World desktop example](./hello_world_desktop.md). +9. Run the [Hello World! in C++ example](./hello_world_cpp.md). Note: For building MediaPipe on Windows, please add `--action_env PYTHON_BIN_PATH="C://path//to//python.exe"` to the build command. @@ -673,7 +672,7 @@ cameras. Alternatively, you use a video file as input. ) ``` -8. Run the [Hello World desktop example](./hello_world_desktop.md). +8. Run the [Hello World! in C++ example](./hello_world_cpp.md). ```bash username@DESKTOP-TMVLBJ1:~/mediapipe$ export GLOG_logtostderr=1 @@ -729,7 +728,7 @@ This will use a Docker image that will isolate mediapipe's installation from the # Successfully tagged mediapipe:latest ``` -3. Run the [Hello World desktop example](./hello_world_desktop.md). +3. Run the [Hello World! in C++ example](./hello_world_cpp.md). ```bash $ docker run -it --name mediapipe mediapipe:latest diff --git a/docs/getting_started/ios.md b/docs/getting_started/ios.md new file mode 100644 index 000000000..cd11828af --- /dev/null +++ b/docs/getting_started/ios.md @@ -0,0 +1,222 @@ +--- +layout: default +title: MediaPipe on iOS +parent: Getting Started +has_children: true +has_toc: false +nav_order: 2 +--- + +# MediaPipe on iOS +{: .no_toc } + +1. TOC +{:toc} +--- + +Please follow instructions below to build iOS example apps in the supported +MediaPipe [solutions](../solutions/solutions.md). To learn more about these +example apps, start from, start from +[Hello World! on iOS](./hello_world_ios.md). + +## Building iOS example apps + +### Prerequisite + +1. Install MediaPipe following these [instructions](./install.md). + +2. Install [Xcode](https://developer.apple.com/xcode/), then install the + Command Line Tools using: + + ```bash + xcode-select --install + ``` + +3. Install [Bazel](https://bazel.build/). + + We recommend using [Homebrew](https://brew.sh/) to get the latest version. + +4. Set Python 3.7 as the default Python version and install the Python "six" + library. This is needed for TensorFlow. + + ```bash + pip3 install --user six + ``` + +5. Clone the MediaPipe repository. + + ```bash + git clone https://github.com/google/mediapipe.git + ``` + +### Set up a bundle ID prefix + +All iOS apps must have a bundle ID, and you must have a provisioning profile +that lets you install an app with that ID onto your phone. To avoid clashes +between different MediaPipe users, you need to configure a unique prefix for the +bundle IDs of our iOS demo apps. + +If you have a custom provisioning profile, see +[Custom provisioning](#custom-provisioning) below. + +Otherwise, run this command to generate a unique prefix: + +```bash +python3 mediapipe/examples/ios/link_local_profiles.py +``` + +### Create an Xcode project + +This allows you to edit and debug one of the example apps in Xcode. It also +allows you to make use of automatic provisioning (see later section). + +1. We will use a tool called [Tulsi](https://tulsi.bazel.build/) for generating + Xcode projects from Bazel build configurations. + + ```bash + # cd out of the mediapipe directory, then: + git clone https://github.com/bazelbuild/tulsi.git + cd tulsi + # remove Xcode version from Tulsi's .bazelrc (see http://github.com/bazelbuild/tulsi#building-and-installing): + sed -i .orig '/xcode_version/d' .bazelrc + # build and run Tulsi: + sh build_and_run.sh + ``` + + This will install `Tulsi.app` inside the `Applications` directory in your + home directory. + +2. Open `mediapipe/Mediapipe.tulsiproj` using the Tulsi app. + + Tip: If Tulsi displays an error saying "Bazel could not be found", press the + "Bazel..." button in the Packages tab and select the `bazel` executable in + your homebrew `/bin/` directory. + +3. Select the MediaPipe config in the Configs tab, then press the Generate + button below. You will be asked for a location to save the Xcode project. + Once the project is generated, it will be opened in Xcode. + + If you get an error about bundle IDs, see the + [previous section](#set-up-a-bundle-id-prefix). + +### Set up provisioning + +To install applications on an iOS device, you need a provisioning profile. There +are two options: + +1. Automatic provisioning. This allows you to build and install an app to your + personal device. The provisining profile is managed by Xcode, and has to be + updated often (it is valid for about a week). + +2. Custom provisioning. This uses a provisioning profile associated with an + Apple developer account. These profiles have a longer validity period and + can target multiple devices, but you need a paid developer account with + Apple to obtain one. + +#### Automatic provisioning + +1. Create an Xcode project for MediaPipe, as discussed + [earlier](#create-an-xcode-project). + +2. In the project navigator in the left sidebar, select the "Mediapipe" + project. + +3. Select one of the application targets, e.g. HandTrackingGpuApp. + +4. Select the "Signing & Capabilities" tab. + +5. Check "Automatically manage signing", and confirm the dialog box. + +6. Select "_Your Name_ (Personal Team)" in the Team pop-up menu. + +7. This set-up needs to be done once for each application you want to install. + Repeat steps 3-6 as needed. + +This generates provisioning profiles for each app you have selected. Now we need +to tell Bazel to use them. We have provided a script to make this easier. + +1. In the terminal, to the `mediapipe` directory where you cloned the + repository. + +2. Run this command: + + ```bash + python3 mediapipe/examples/ios/link_local_profiles.py + ``` + +This will find and link the provisioning profile for all applications for which +you have enabled automatic provisioning in Xcode. + +Note: once a profile expires, Xcode will generate a new one; you must then run +this script again to link the updated profiles. + +#### Custom provisioning + +1. Obtain a provisioning profile from Apple. + +Tip: You can use this command to see the provisioning profiles you have +previously downloaded using Xcode: `open ~/Library/MobileDevice/"Provisioning +Profiles"`. If there are none, generate and download a profile on +[Apple's developer site](https://developer.apple.com/account/resources/). + +1. Symlink or copy your provisioning profile to + `mediapipe/mediapipe/provisioning_profile.mobileprovision`. + + ```bash + cd mediapipe + ln -s ~/Downloads/MyProvisioningProfile.mobileprovision mediapipe/provisioning_profile.mobileprovision + ``` + +Note: if you had previously set up automatic provisioning, you should remove the +`provisioning_profile.mobileprovision` symlink in each example's directory, +since it will take precedence over the common one. You can also overwrite it +with you own profile if you need a different profile for different apps. + +1. Open `mediapipe/examples/ios/bundle_id.bzl`, and change the + `BUNDLE_ID_PREFIX` to a prefix associated with your provisioning profile. + +### Build and run an app using Xcode + +1. Create the Xcode project, and make sure you have set up either automatic or + custom provisioning. + +2. You can now select any of the MediaPipe demos in the target menu, and build + and run them as normal. + +Note: When you ask Xcode to run an app, by default it will use the Debug +configuration. Some of our demos are computationally heavy; you may want to use +the Release configuration for better performance. + +Tip: To switch build configuration in Xcode, click on the target menu, choose +"Edit Scheme...", select the Run action, and switch the Build Configuration from +Debug to Release. Note that this is set independently for each target. + +Tip: On the device, in Settings > General > Device Management, make sure the +developer (yourself) is trusted. + +### Build an app using the command line + +1. Make sure you have set up either automatic or custom provisioning. + +2. Using [MediaPipe Hands](../solutions/hands.md) for example, run: + + ```bash + bazel build -c opt --config=ios_arm64 mediapipe/examples/ios/handtrackinggpu:HandTrackingGpuApp + ``` + + You may see a permission request from `codesign` in order to sign the app. + + Tip: If you are using custom provisioning, you can run this + [script](https://github.com/google/mediapipe/blob/master/build_ios_examples.sh) + to build all MediaPipe iOS example apps. + +3. In Xcode, open the `Devices and Simulators` window (command-shift-2). + +4. Make sure your device is connected. You will see a list of installed apps. + Press the "+" button under the list, and select the `.ipa` file built by + Bazel. + +5. You can now run the app on your device. + +Tip: On the device, in Settings > General > Device Management, make sure the +developer (yourself) is trusted. diff --git a/docs/getting_started/javascript.md b/docs/getting_started/javascript.md new file mode 100644 index 000000000..95a1e2610 --- /dev/null +++ b/docs/getting_started/javascript.md @@ -0,0 +1,88 @@ +--- +layout: default +title: MediaPipe in JavaScript +parent: Getting Started +nav_order: 4 +--- + +# MediaPipe in JavaScript +{: .no_toc } + +1. TOC +{:toc} +--- + +## Ready-to-use JavaScript Solutions + +MediaPipe currently offers the following solutions: + +Solution | NPM Package | Example +----------------- | ----------------------------- | ------- +[Face Mesh][F-pg] | [@mediapipe/face_mesh][F-npm] | [mediapipe.dev/demo/face_mesh][F-demo] +[Hands][H-pg] | [@mediapipe/hands][H-npm] | [mediapipe.dev/demo/hands][H-demo] +[Pose][P-pg] | [@mediapipe/pose][P-npm] | [mediapipe.dev/demo/pose][P-demo] +[Holistic][Ho-pg] | [@mediapipe/holistic][Ho-npm] | [mediapipe.dev/demo/holistic][Ho-demo] + +Click on a solution link above for more information, including API and code +snippets. + +The quickest way to get acclimated is to look at the examples above. Each demo +has a link to a [CodePen][codepen] so that you can edit the code and try it +yourself. We have included a number of utility packages to help you get started: + +* [@mediapipe/drawing_utils][draw-npm] - Utilities to draw landmarks and + connectors. +* [@mediapipe/camera_utils][cam-npm] - Utilities to operate the camera. +* [@mediapipe/control_utils][ctrl-npm] - Utilities to show sliders and FPS + widgets. + +Note: See these demos and more at [MediaPipe on CodePen][codepen] + +All of these solutions are staged in [NPM][npm]. You can install any package +locally with `npm install`. Example: + +``` +npm install @mediapipe/holistic. +``` + +If you would rather not stage these locally, you can rely on a CDN (e.g., +[jsDelivr](https://www.jsdelivr.com/)). This will allow you to add scripts +directly to your HTML: + +``` + + + + +``` + +Note: You can specify version numbers to both NPM and jsdelivr. They are +structured as `..`. To prevent breaking changes from +affecting your work, restrict your request to a `` number. e.g., +`@mediapipe/holistic@0.1`. + +[Ho-pg]: ../solutions/holistic#javascript-solution-api +[F-pg]: ../solutions/face_mesh#javascript-solution-api +[H-pg]: ../solutions/hands#javascript-solution-api +[P-pg]: ../solutions/pose#javascript-solution-api +[Ho-npm]: https://www.npmjs.com/package/@mediapipe/holistic +[F-npm]: https://www.npmjs.com/package/@mediapipe/face_mesh +[H-npm]: https://www.npmjs.com/package/@mediapipe/hands +[P-npm]: https://www.npmjs.com/package/@mediapipe/pose +[draw-npm]: https://www.npmjs.com/package/@mediapipe/pose +[cam-npm]: https://www.npmjs.com/package/@mediapipe/pose +[ctrl-npm]: https://www.npmjs.com/package/@mediapipe/pose +[Ho-jsd]: https://www.jsdelivr.com/package/npm/@mediapipe/holistic +[F-jsd]: https://www.jsdelivr.com/package/npm/@mediapipe/face_mesh +[H-jsd]: https://www.jsdelivr.com/package/npm/@mediapipe/hands +[P-jsd]: https://www.jsdelivr.com/package/npm/@mediapipe/pose +[Ho-pen]: https://code.mediapipe.dev/codepen/holistic +[F-pen]: https://code.mediapipe.dev/codepen/face_mesh +[H-pen]: https://code.mediapipe.dev/codepen/hands +[P-pen]: https://code.mediapipe.dev/codepen/pose +[Ho-demo]: https://mediapipe.dev/demo/holistic +[F-demo]: https://mediapipe.dev/demo/face_mesh +[H-demo]: https://mediapipe.dev/demo/hands +[P-demo]: https://mediapipe.dev/demo/pose +[npm]: https://www.npmjs.com/package/@mediapipe +[codepen]: https://code.mediapipe.dev/codepen diff --git a/docs/getting_started/python.md b/docs/getting_started/python.md new file mode 100644 index 000000000..4fd332630 --- /dev/null +++ b/docs/getting_started/python.md @@ -0,0 +1,120 @@ +--- +layout: default +title: MediaPipe in Python +parent: Getting Started +has_children: true +has_toc: false +nav_order: 3 +--- + +# MediaPipe in Python +{: .no_toc } + +1. TOC +{:toc} +--- + +## Ready-to-use Python Solutions + +MediaPipe offers ready-to-use yet customizable Python solutions as a prebuilt +Python package. MediaPipe Python package is available on +[PyPI](https://pypi.org/project/mediapipe/) for Linux, macOS and Windows. + +You can, for instance, activate a Python virtual environment: + +```bash +$ python3 -m venv mp_env && source mp_env/bin/activate +``` + +Install MediaPipe Python package and start Python intepreter: + +```bash +(mp_env)$ pip install mediapipe +(mp_env)$ python3 +``` + +In Python interpreter, import the package and start using one of the solutions: + +```python +import mediapipe as mp +mp_face_mesh = mp.solutions.face_mesh +``` + +Tip: Use command `deactivate` to later exit the Python virtual environment. + +To learn more about configuration options and usage examples, please find +details in each solution via the links below: + +* [MediaPipe Face Mesh](../solutions/face_mesh#python-solution-api) +* [MediaPipe Hands](../solutions/hands#python-solution-api) +* [MediaPipe Pose](../solutions/pose#python-solution-api) +* [MediaPipe Holistic](../solutions/holistic#python-solution-api) + +## MediaPipe on Google Colab + +* [MediaPipe Face Mesh Colab](https://mediapipe.page.link/face_mesh_py_colab) +* [MediaPipe Hands Colab](https://mediapipe.page.link/hands_py_colab) +* [MediaPipe Pose Colab](https://mediapipe.page.link/pose_py_colab) +* [MediaPipe Holistic Colab](https://mediapipe.page.link/holistic_py_colab) + +## MediaPipe Python Framework + +The ready-to-use solutions are built upon the MediaPipe Python framework, which +can be used by advanced users to run their own MediaPipe graphs in Python. +Please see [here](./python_framework.md) for more info. + +## Building MediaPipe Python Package + +Follow the steps below only if you have local changes and need to build the +Python package from source. Otherwise, we strongly encourage our users to simply +run `pip install mediapipe` to use the ready-to-use solutions, more convenient +and much faster. + +1. Make sure that Bazel and OpenCV are correctly installed and configured for + MediaPipe. Please see [Installation](./install.md) for how to setup Bazel + and OpenCV for MediaPipe on Linux and macOS. + +2. Install the following dependencies. + + Debian or Ubuntu: + + ```bash + $ sudo apt install python3-dev + $ sudo apt install python3-venv + $ sudo apt install -y protobuf-compiler + ``` + + macOS: + + ```bash + $ brew install protobuf + ``` + + Windows: + + Download the latest protoc win64 zip from + [the Protobuf GitHub repo](https://github.com/protocolbuffers/protobuf/releases), + unzip the file, and copy the protoc.exe executable to a preferred + location. Please ensure that location is added into the Path environment + variable. + +3. Activate a Python virtual environment. + + ```bash + $ python3 -m venv mp_env && source mp_env/bin/activate + ``` + +4. In the virtual environment, go to the MediaPipe repo directory. + +5. Install the required Python packages. + + ```bash + (mp_env)mediapipe$ pip3 install -r requirements.txt + ``` + +6. Generate and install MediaPipe package. + + ```bash + (mp_env)mediapipe$ python3 setup.py gen_protos + (mp_env)mediapipe$ python3 setup.py install --link-opencv + ``` diff --git a/docs/getting_started/python_framework.md b/docs/getting_started/python_framework.md new file mode 100644 index 000000000..ece14bc91 --- /dev/null +++ b/docs/getting_started/python_framework.md @@ -0,0 +1,268 @@ +--- +layout: default +title: MediaPipe Python Framework +parent: MediaPipe in Python +grand_parent: Getting Started +nav_order: 1 +--- + +# MediaPipe Python Framework +{: .no_toc } + +1. TOC +{:toc} +--- + +The MediaPipe Python framework grants direct access to the core components of +the MediaPipe C++ framework such as Timestamp, Packet, and CalculatorGraph, +whereas the +[ready-to-use Python solutions](./python.md#ready-to-use-python-solutions) hide +the technical details of the framework and simply return the readable model +inference results back to the callers. + +MediaPipe framework sits on top of +[the pybind11 library](https://pybind11.readthedocs.io/en/stable/index.html). +The C++ core framework is exposed in Python via a C++/Python language binding. +The content below assumes that the reader already has a basic understanding of +the MediaPipe C++ framework. Otherwise, you can find useful information in +[Framework Concepts](../framework_concepts/framework_concepts.md). + +### Packet + +The packet is the basic data flow unit in MediaPipe. A packet consists of a +numeric timestamp and a shared pointer to an immutable payload. In Python, a +MediaPipe packet can be created by calling one of the packet creator methods in +the +[`mp.packet_creator`](https://github.com/google/mediapipe/tree/master/mediapipe/python/pybind/packet_creator.cc) +module. Correspondingly, the packet payload can be retrieved by using one of the +packet getter methods in the +[`mp.packet_getter`](https://github.com/google/mediapipe/tree/master/mediapipe/python/pybind/packet_getter.cc) +module. Note that the packet payload becomes **immutable** after packet +creation. Thus, the modification of the retrieved packet content doesn't affect +the actual payload in the packet. MediaPipe framework Python API supports the +most commonly used data types of MediaPipe (e.g., ImageFrame, Matrix, Protocol +Buffers, and the primitive data types) in the core binding. The comprehensive +table below shows the type mappings between the Python and the C++ data type +along with the packet creator and the content getter method for each data type +supported by the MediaPipe Python framework API. + +Python Data Type | C++ Data Type | Packet Creator | Content Getter +------------------------------------ | ----------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------ | -------------- +bool | bool | create_bool(True) | get_bool(packet) +int or np.intc | int_t | create_int(1) | get_int(packet) +int or np.int8 | int8_t | create_int8(2**7-1) | get_int(packet) +int or np.int16 | int16_t | create_int16(2**15-1) | get_int(packet) +int or np.int32 | int32_t | create_int32(2**31-1) | get_int(packet) +int or np.int64 | int64_t | create_int64(2**63-1) | get_int(packet) +int or np.uint8 | uint8_t | create_uint8(2**8-1) | get_uint(packet) +int or np.uint16 | uint16_t | create_uint16(2**16-1) | get_uint(packet) +int or np.uint32 | uint32_t | create_uint32(2**32-1) | get_uint(packet) +int or np.uint64 | uint64_t | create_uint64(2**64-1) | get_uint(packet) +float or np.float32 | float | create_float(1.1) | get_float(packet) +float or np.double | double | create_double(1.1) | get_float(packet) +str (UTF-8) | std::string | create_string('abc') | get_str(packet) +bytes | std::string | create_string(b'\xd0\xd0\xd0') | get_bytes(packet) +mp.Packet | mp::Packet | create_packet(p) | get_packet(packet) +List\[bool\] | std::vector\ | create_bool_vector(\[True, False\]) | get_bool_list(packet) +List\[int\] or List\[np.intc\] | int\[\] | create_int_array(\[1, 2, 3\]) | get_int_list(packet, size=10) +List\[int\] or List\[np.intc\] | std::vector\ | create_int_vector(\[1, 2, 3\]) | get_int_list(packet) +List\[float\] or List\[np.float\] | float\[\] | create_float_arrary(\[0.1, 0.2\]) | get_float_list(packet, size=10) +List\[float\] or List\[np.float\] | std::vector\ | create_float_vector(\[0.1, 0.2\]) | get_float_list(packet, size=10) +List\[str\] | std::vector\ | create_string_vector(\['a'\]) | get_str_list(packet) +List\[mp.Packet\] | std::vector\ | create_packet_vector(
        \[packet1, packet2\]) | get_packet_list(p) +Mapping\[str, Packet\] | std::map | create_string_to_packet_map(
        {'a': packet1, 'b': packet2}) | get_str_to_packet_dict(packet) +np.ndarray
(cv.mat and PIL.Image) | mp::ImageFrame | create_image_frame(
        format=ImageFormat.SRGB,
        data=mat) | get_image_frame(packet) +np.ndarray | mp::Matrix | create_matrix(data) | get_matrix(packet) +Google Proto Message | Google Proto Message | create_proto(proto) | get_proto(packet) +List\[Proto\] | std::vector\ | create_proto_vector(proto_list) | get_proto_list(packet) + +It's not uncommon that users create custom C++ classes and and send those into +the graphs and calculators. To allow the custom classes to be used in Python +with MediaPipe, you may extend the Packet API for a new data type in the +following steps: + +1. Write the pybind11 + [class binding code](https://pybind11.readthedocs.io/en/stable/advanced/classes.html) + or + [a custom type caster](https://pybind11.readthedocs.io/en/stable/advanced/cast/custom.html?highlight=custom%20type%20caster) + for the custom type in a cc file. + + ```c++ + #include "path/to/my_type/header/file.h" + #include "pybind11/pybind11.h" + + namespace py = pybind11; + + PYBIND11_MODULE(my_type_binding, m) { + // Write binding code or a custom type caster for MyType. + py::class_(m, "MyType") + .def(py::init<>()) + .def(...); + } + ``` + +2. Create a new packet creator and getter method of the custom type in a + separate cc file. + + ```c++ + #include "path/to/my_type/header/file.h" + #include "mediapipe/framework/packet.h" + #include "pybind11/pybind11.h" + + namespace mediapipe { + namespace py = pybind11; + + PYBIND11_MODULE(my_packet_methods, m) { + m.def( + "create_my_type", + [](const MyType& my_type) { return MakePacket(my_type); }); + + m.def( + "get_my_type", + [](const Packet& packet) { + if(!packet.ValidateAsType().ok()) { + PyErr_SetString(PyExc_ValueError, "Packet data type mismatch."); + return py::error_already_set(); + } + return packet.Get(); + }); + } // namespace mediapipe + ``` + +3. Add two bazel build rules for the custom type binding and the new packet + methods in the BUILD file. + + ``` + load("@pybind11_bazel//:build_defs.bzl", "pybind_extension") + + pybind_extension( + name = "my_type_binding", + srcs = ["my_type_binding.cc"], + deps = [":my_type"], + ) + + pybind_extension( + name = "my_packet_methods", + srcs = ["my_packet_methods.cc"], + deps = [ + ":my_type", + "//mediapipe/framework:packet" + ], + ) + ``` + +4. Build the pybind extension targets (with the suffix .so) by Bazel and move the generated dynamic libraries into one of the $LD_LIBRARY_PATH dirs. + +5. Use the binding modules in Python. + + ```python + import my_type_binding + import my_packet_methods + + packet = my_packet_methods.create_my_type(my_type_binding.MyType()) + my_type = my_packet_methods.get_my_type(packet) + ``` + +### Timestamp + +Each packet contains a timestamp that is in units of microseconds. In Python, +the Packet API provides a convenience method `packet.at()` to define the numeric +timestamp of a packet. More generally, `packet.timestamp` is the packet class +property for accessing the underlying timestamp. To convert an Unix epoch to a +MediaPipe timestamp, +[the Timestamp API](https://github.com/google/mediapipe/tree/master/mediapipe/python/pybind/timestamp.cc) +offers a method `mp.Timestamp.from_seconds()` for this purpose. + +### ImageFrame + +ImageFrame is the container for storing an image or a video frame. Formats +supported by ImageFrame are listed in +[the ImageFormat enum](https://github.com/google/mediapipe/tree/master/mediapipe/python/pybind/image_frame.cc#l=170). +Pixels are encoded row-major with interleaved color components, and ImageFrame +supports uint8, uint16, and float as its data types. MediaPipe provides +[an ImageFrame Python API](https://github.com/google/mediapipe/tree/master/mediapipe/python/pybind/image_frame.cc) +to access the ImageFrame C++ class. In Python, the easiest way to retrieve the +pixel data is to call `image_frame.numpy_view()` to get a numpy ndarray. Note +that the returned numpy ndarray, a reference to the internal pixel data, is +unwritable. If the callers need to modify the numpy ndarray, it's required to +explicitly call a copy operation to obtain a copy. When MediaPipe takes a numpy +ndarray to make an ImageFrame, it assumes that the data is stored contiguously. +Correspondingly, the pixel data of an ImageFrame will be realigned to be +contiguous when it's returned to the Python side. + +### Graph + +In MediaPipe, all processing takes places within the context of a +CalculatorGraph. +[The CalculatorGraph Python API](https://github.com/google/mediapipe/tree/master/mediapipe/python/pybind/calculator_graph.cc) +is a direct binding to the C++ CalculatorGraph class. The major difference is +the CalculatorGraph Python API raises a Python error instead of returning a +non-OK Status when an error occurs. Therefore, as a Python user, you can handle +the exceptions as you normally do. The life cycle of a CalculatorGraph contains +three stages: initialization and setup, graph run, and graph shutdown. + +1. Initialize a CalculatorGraph with a CalculatorGraphConfig protobuf or binary + protobuf file, and provide callback method(s) to observe the output + stream(s). + + Option 1. Initialize a CalculatorGraph with a CalculatorGraphConfig protobuf + or its text representation, and observe the output stream(s): + + ```python + import mediapipe as mp + + config_text = """ + input_stream: 'in_stream' + output_stream: 'out_stream' + node { + calculator: 'PassThroughCalculator' + input_stream: 'in_stream' + output_stream: 'out_stream' + } + """ + graph = mp.CalculatorGraph(graph_config=config_text) + output_packets = [] + graph.observe_output_stream( + 'out_stream', + lambda stream_name, packet: + output_packets.append(mp.packet_getter.get_str(packet))) + ``` + + Option 2. Initialize a CalculatorGraph with with a binary protobuf file, and + observe the output stream(s). + + ```python + import mediapipe as mp + # resources dependency + + graph = mp.CalculatorGraph( + binary_graph=os.path.join( + resources.GetRunfilesDir(), 'path/to/your/graph.binarypb')) + graph.observe_output_stream( + 'out_stream', + lambda stream_name, packet: print(f'Get {packet} from {stream_name}')) + ``` + +2. Start the graph run and feed packets into the graph. + + ```python + graph.start_run() + + graph.add_packet_to_input_stream( + 'in_stream', mp.packet_creator.create_str('abc').at(0)) + + rgb_img = cv2.cvtColor(cv2.imread('/path/to/your/image.png'), cv2.COLOR_BGR2RGB) + graph.add_packet_to_input_stream( + 'in_stream', + mp.packet_creator.create_image_frame(format=mp.ImageFormat.SRGB, + data=rgb_img).at(1)) + ``` + +3. Close the graph after finish. You may restart the graph for another graph + run after the call to `close()`. + + ```python + graph.close() + ``` + +The Python script can be run by your local Python runtime. diff --git a/docs/images/mobile/hand_landmarks.png b/docs/images/mobile/hand_landmarks.png new file mode 100644 index 000000000..f13746a86 Binary files /dev/null and b/docs/images/mobile/hand_landmarks.png differ diff --git a/docs/images/mobile/holistic_pipeline_example.jpg b/docs/images/mobile/holistic_pipeline_example.jpg new file mode 100644 index 000000000..a35b3784b Binary files /dev/null and b/docs/images/mobile/holistic_pipeline_example.jpg differ diff --git a/docs/images/mobile/holistic_sports_and_gestures_example.gif b/docs/images/mobile/holistic_sports_and_gestures_example.gif new file mode 100644 index 000000000..d579e77ab Binary files /dev/null and b/docs/images/mobile/holistic_sports_and_gestures_example.gif differ diff --git a/docs/images/mobile/holistic_tracking_android_gpu_small.gif b/docs/images/mobile/holistic_tracking_android_gpu_small.gif new file mode 100644 index 000000000..8cf0c226f Binary files /dev/null and b/docs/images/mobile/holistic_tracking_android_gpu_small.gif differ diff --git a/docs/images/mobile/pose_tracking_full_body_landmarks.png b/docs/images/mobile/pose_tracking_full_body_landmarks.png new file mode 100644 index 000000000..89530d9e4 Binary files /dev/null and b/docs/images/mobile/pose_tracking_full_body_landmarks.png differ diff --git a/docs/images/mobile/pose_tracking_upper_body_landmarks.png b/docs/images/mobile/pose_tracking_upper_body_landmarks.png index cb18ad567..e2e964ec1 100644 Binary files a/docs/images/mobile/pose_tracking_upper_body_landmarks.png and b/docs/images/mobile/pose_tracking_upper_body_landmarks.png differ diff --git a/docs/index.md b/docs/index.md index 528efa4b3..436010b7c 100644 --- a/docs/index.md +++ b/docs/index.md @@ -21,33 +21,34 @@ ML solutions for live and streaming media. ## ML solutions in MediaPipe -Face Detection | Face Mesh | Iris | Hands | Pose | Hair Segmentation -:----------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------: | :---------------: -[![face_detection](images/mobile/face_detection_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/face_detection) | [![face_mesh](images/mobile/face_mesh_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/face_mesh) | [![iris](images/mobile/iris_tracking_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/iris) | [![hand](images/mobile/hand_tracking_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/hands) | [![pose](images/mobile/pose_tracking_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/pose) | [![hair_segmentation](images/mobile/hair_segmentation_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/hair_segmentation) +Face Detection | Face Mesh | Iris | Hands | Pose | Holistic +:----------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------: | :------: +[![face_detection](images/mobile/face_detection_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/face_detection) | [![face_mesh](images/mobile/face_mesh_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/face_mesh) | [![iris](images/mobile/iris_tracking_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/iris) | [![hand](images/mobile/hand_tracking_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/hands) | [![pose](images/mobile/pose_tracking_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/pose) | [![hair_segmentation](images/mobile/holistic_tracking_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/holistic) -Object Detection | Box Tracking | Instant Motion Tracking | Objectron | KNIFT -:----------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------: | :---: -[![object_detection](images/mobile/object_detection_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/object_detection) | [![box_tracking](images/mobile/object_tracking_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/box_tracking) | [![instant_motion_tracking](images/mobile/instant_motion_tracking_android_small.gif)](https://google.github.io/mediapipe/solutions/instant_motion_tracking) | [![objectron](images/mobile/objectron_chair_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/objectron) | [![knift](images/mobile/template_matching_android_cpu_small.gif)](https://google.github.io/mediapipe/solutions/knift) +Hair Segmentation | Object Detection | Box Tracking | Instant Motion Tracking | Objectron | KNIFT +:-------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------: | :---: +[![hair_segmentation](images/mobile/hair_segmentation_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/hair_segmentation) | [![object_detection](images/mobile/object_detection_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/object_detection) | [![box_tracking](images/mobile/object_tracking_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/box_tracking) | [![instant_motion_tracking](images/mobile/instant_motion_tracking_android_small.gif)](https://google.github.io/mediapipe/solutions/instant_motion_tracking) | [![objectron](images/mobile/objectron_chair_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/objectron) | [![knift](images/mobile/template_matching_android_cpu_small.gif)](https://google.github.io/mediapipe/solutions/knift) -[]() | Android | iOS | Desktop | Python | Web | Coral -:---------------------------------------------------------------------------------------- | :-----: | :-: | :-----: | :----: | :-: | :---: -[Face Detection](https://google.github.io/mediapipe/solutions/face_detection) | ✅ | ✅ | ✅ | | ✅ | ✅ -[Face Mesh](https://google.github.io/mediapipe/solutions/face_mesh) | ✅ | ✅ | ✅ | ✅ | | -[Iris](https://google.github.io/mediapipe/solutions/iris) | ✅ | ✅ | ✅ | | ✅ | -[Hands](https://google.github.io/mediapipe/solutions/hands) | ✅ | ✅ | ✅ | ✅ | ✅ | -[Pose](https://google.github.io/mediapipe/solutions/pose) | ✅ | ✅ | ✅ | ✅ | ✅ | -[Hair Segmentation](https://google.github.io/mediapipe/solutions/hair_segmentation) | ✅ | | ✅ | | ✅ | -[Object Detection](https://google.github.io/mediapipe/solutions/object_detection) | ✅ | ✅ | ✅ | | | ✅ -[Box Tracking](https://google.github.io/mediapipe/solutions/box_tracking) | ✅ | ✅ | ✅ | | | -[Instant Motion Tracking](https://google.github.io/mediapipe/solutions/instant_motion_tracking) | ✅ | | | | | -[Objectron](https://google.github.io/mediapipe/solutions/objectron) | ✅ | | | | | -[KNIFT](https://google.github.io/mediapipe/solutions/knift) | ✅ | | | | | -[AutoFlip](https://google.github.io/mediapipe/solutions/autoflip) | | | ✅ | | | -[MediaSequence](https://google.github.io/mediapipe/solutions/media_sequence) | | | ✅ | | | -[YouTube 8M](https://google.github.io/mediapipe/solutions/youtube_8m) | | | ✅ | | | +[]() | [Android](https://google.github.io/mediapipe/getting_started/android) | [iOS](https://google.github.io/mediapipe/getting_started/ios) | [C++](https://google.github.io/mediapipe/getting_started/cpp) | [Python](https://google.github.io/mediapipe/getting_started/python) | [JS](https://google.github.io/mediapipe/getting_started/javascript) | [Coral](https://github.com/google/mediapipe/tree/master/mediapipe/examples/coral/README.md) +:---------------------------------------------------------------------------------------- | :-------------------------------------------------------------: | :-----------------------------------------------------: | :-----------------------------------------------------: | :-----------------------------------------------------------: | :-----------------------------------------------------------: | :--------------------------------------------------------------------: +[Face Detection](https://google.github.io/mediapipe/solutions/face_detection) | ✅ | ✅ | ✅ | | | ✅ +[Face Mesh](https://google.github.io/mediapipe/solutions/face_mesh) | ✅ | ✅ | ✅ | ✅ | ✅ | +[Iris](https://google.github.io/mediapipe/solutions/iris) | ✅ | ✅ | ✅ | | | +[Hands](https://google.github.io/mediapipe/solutions/hands) | ✅ | ✅ | ✅ | ✅ | ✅ | +[Pose](https://google.github.io/mediapipe/solutions/pose) | ✅ | ✅ | ✅ | ✅ | ✅ | +[Holistic](https://google.github.io/mediapipe/solutions/holistic) | ✅ | ✅ | ✅ | ✅ | ✅ | +[Hair Segmentation](https://google.github.io/mediapipe/solutions/hair_segmentation) | ✅ | | ✅ | | | +[Object Detection](https://google.github.io/mediapipe/solutions/object_detection) | ✅ | ✅ | ✅ | | | ✅ +[Box Tracking](https://google.github.io/mediapipe/solutions/box_tracking) | ✅ | ✅ | ✅ | | | +[Instant Motion Tracking](https://google.github.io/mediapipe/solutions/instant_motion_tracking) | ✅ | | | | | +[Objectron](https://google.github.io/mediapipe/solutions/objectron) | ✅ | | | | | +[KNIFT](https://google.github.io/mediapipe/solutions/knift) | ✅ | | | | | +[AutoFlip](https://google.github.io/mediapipe/solutions/autoflip) | | | ✅ | | | +[MediaSequence](https://google.github.io/mediapipe/solutions/media_sequence) | | | ✅ | | | +[YouTube 8M](https://google.github.io/mediapipe/solutions/youtube_8m) | | | ✅ | | | See also [MediaPipe Models and Model Cards](https://google.github.io/mediapipe/solutions/models) @@ -55,16 +56,12 @@ for ML models released in MediaPipe. ## MediaPipe in Python -MediaPipe Python package is available on -[PyPI](https://pypi.org/project/mediapipe/), and can be installed simply by `pip -install mediapipe` on Linux and macOS, as described in: - -* [MediaPipe Face Mesh](https://google.github.io/mediapipe/solutions/face_mesh#python) - ([colab](https://mediapipe.page.link/face_mesh_py_colab)) -* [MediaPipe Hands](https://google.github.io/mediapipe/solutions/hands#python) - ([colab](https://mediapipe.page.link/hands_py_colab)) -* [MediaPipe Pose](https://google.github.io/mediapipe/solutions/pose#python) - ([colab](https://mediapipe.page.link/pose_py_colab)) +MediaPipe offers customizable Python solutions as a prebuilt Python package on +[PyPI](https://pypi.org/project/mediapipe/), which can be installed simply with +`pip install mediapipe`. It also provides tools for users to build their own +solutions. Please see +[MediaPipe in Python](https://google.github.io/mediapipe/getting_started/python.md) +for more info. ## MediaPipe on the Web @@ -105,6 +102,8 @@ run code search using ## Publications +* [Background Features in Google Meet, Powered by Web ML](https://ai.googleblog.com/2020/10/background-features-in-google-meet.html) + in Google AI Blog * [MediaPipe 3D Face Transform](https://developers.googleblog.com/2020/09/mediapipe-3d-face-transform.html) in Google Developers Blog * [Instant Motion Tracking With MediaPipe](https://developers.googleblog.com/2020/08/instant-motion-tracking-with-mediapipe.html) diff --git a/docs/solutions/autoflip.md b/docs/solutions/autoflip.md index 3dec7719b..0e118cc55 100644 --- a/docs/solutions/autoflip.md +++ b/docs/solutions/autoflip.md @@ -2,14 +2,20 @@ layout: default title: AutoFlip (Saliency-aware Video Cropping) parent: Solutions -nav_order: 12 +nav_order: 13 --- # AutoFlip: Saliency-aware Video Cropping {: .no_toc } +
+ + Table of contents + + {: .text-delta } 1. TOC {:toc} +
--- ## Overview diff --git a/docs/solutions/box_tracking.md b/docs/solutions/box_tracking.md index 34fed0277..0e7550e7f 100644 --- a/docs/solutions/box_tracking.md +++ b/docs/solutions/box_tracking.md @@ -2,14 +2,20 @@ layout: default title: Box Tracking parent: Solutions -nav_order: 8 +nav_order: 9 --- # MediaPipe Box Tracking {: .no_toc } +
+ + Table of contents + + {: .text-delta } 1. TOC {:toc} +
--- ## Overview @@ -105,9 +111,8 @@ new detections to remove obsolete or duplicated boxes. ## Example Apps Please first see general instructions for -[Android](../getting_started/building_examples.md#android), [iOS](../getting_started/building_examples.md#ios) -and [desktop](../getting_started/building_examples.md#desktop) on how to build MediaPipe -examples. +[Android](../getting_started/android.md), [iOS](../getting_started/ios.md) and +[desktop](../getting_started/cpp.md) on how to build MediaPipe examples. Note: To visualize a graph, copy the graph and paste it into [MediaPipe Visualizer](https://viz.mediapipe.dev/). For more information on how diff --git a/docs/solutions/face_detection.md b/docs/solutions/face_detection.md index 62abf0dd5..3a74bee0a 100644 --- a/docs/solutions/face_detection.md +++ b/docs/solutions/face_detection.md @@ -8,8 +8,14 @@ nav_order: 1 # MediaPipe Face Detection {: .no_toc } +
+ + Table of contents + + {: .text-delta } 1. TOC {:toc} +
--- ## Overview @@ -36,9 +42,8 @@ section. ## Example Apps Please first see general instructions for -[Android](../getting_started/building_examples.md#android), [iOS](../getting_started/building_examples.md#ios) -and [desktop](../getting_started/building_examples.md#desktop) on how to build MediaPipe -examples. +[Android](../getting_started/android.md), [iOS](../getting_started/ios.md) and +[desktop](../getting_started/cpp.md) on how to build MediaPipe examples. Note: To visualize a graph, copy the graph and paste it into [MediaPipe Visualizer](https://viz.mediapipe.dev/). For more information on how diff --git a/docs/solutions/face_mesh.md b/docs/solutions/face_mesh.md index dd9c0bc42..bea135105 100644 --- a/docs/solutions/face_mesh.md +++ b/docs/solutions/face_mesh.md @@ -8,8 +8,14 @@ nav_order: 2 # MediaPipe Face Mesh {: .no_toc } +
+ + Table of contents + + {: .text-delta } 1. TOC {:toc} +
--- ## Overview @@ -206,11 +212,222 @@ The effect renderer is implemented as a MediaPipe | :---------------------------------------------------------------------: | | *Fig 4. An example of face effects rendered by the Face Geometry Effect Renderer.* | +## Solution APIs + +### Configuration Options + +Naming style and availability may differ slightly across platforms/languages. + +#### static_image_mode + +If set to `false`, the solution treats the input images as a video stream. It +will try to detect faces in the first input images, and upon a successful +detection further localizes the face landmarks. In subsequent images, once all +[max_num_faces](#max_num_faces) faces are detected and the corresponding face +landmarks are localized, it simply tracks those landmarks without invoking +another detection until it loses track of any of the faces. This reduces latency +and is ideal for processing video frames. If set to `true`, face detection runs +on every input image, ideal for processing a batch of static, possibly +unrelated, images. Default to `false`. + +#### max_num_faces + +Maximum number of faces to detect. Default to `1`. + +#### min_detection_confidence + +Minimum confidence value (`[0.0, 1.0]`) from the face detection model for the +detection to be considered successful. Default to `0.5`. + +#### min_tracking_confidence + +Minimum confidence value (`[0.0, 1.0]`) from the landmark-tracking model for the +face landmarks to be considered tracked successfully, or otherwise face +detection will be invoked automatically on the next input image. Setting it to a +higher value can increase robustness of the solution, at the expense of a higher +latency. Ignored if [static_image_mode](#static_image_mode) is `true`, where +face detection simply runs on every image. Default to `0.5`. + +### Output + +Naming style may differ slightly across platforms/languages. + +#### multi_face_landmarks + +Collection of detected/tracked faces, where each face is represented as a list +of 468 face landmarks and each landmark is composed of `x`, `y` and `z`. `x` and +`y` are normalized to `[0.0, 1.0]` by the image width and height respectively. +`z` represents the landmark depth with the depth at center of the head being the +origin, and the smaller the value the closer the landmark is to the camera. The +magnitude of `z` uses roughly the same scale as `x`. + +### Python Solution API + +Please first follow general [instructions](../getting_started/python.md) to +install MediaPipe Python package, then learn more in the companion [Colab] and +the following usage example. + +Supported configuration options: + +* [static_image_mode](#static_image_mode) +* [max_num_faces](#max_num_faces) +* [min_detection_confidence](#min_detection_confidence) +* [min_tracking_confidence](#min_tracking_confidence) + +```python +import cv2 +import mediapipe as mp +mp_drawing = mp.solutions.drawing_utils +mp_face_mesh = mp.solutions.face_mesh + +# For static images: +face_mesh = mp_face_mesh.FaceMesh( + static_image_mode=True, + max_num_faces=1, + min_detection_confidence=0.5) +drawing_spec = mp_drawing.DrawingSpec(thickness=1, circle_radius=1) +for idx, file in enumerate(file_list): + image = cv2.imread(file) + # Convert the BGR image to RGB before processing. + results = face_mesh.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) + + # Print and draw face mesh landmarks on the image. + if not results.multi_face_landmarks: + continue + annotated_image = image.copy() + for face_landmarks in results.multi_face_landmarks: + print('face_landmarks:', face_landmarks) + mp_drawing.draw_landmarks( + image=annotated_image, + landmark_list=face_landmarks, + connections=mp_face_mesh.FACE_CONNECTIONS, + landmark_drawing_spec=drawing_spec, + connection_drawing_spec=drawing_spec) + cv2.imwrite('/tmp/annotated_image' + str(idx) + '.png', annotated_image) +face_mesh.close() + +# For webcam input: +face_mesh = mp_face_mesh.FaceMesh( + min_detection_confidence=0.5, min_tracking_confidence=0.5) +drawing_spec = mp_drawing.DrawingSpec(thickness=1, circle_radius=1) +cap = cv2.VideoCapture(0) +while cap.isOpened(): + success, image = cap.read() + if not success: + print("Ignoring empty camera frame.") + # If loading a video, use 'break' instead of 'continue'. + continue + + # Flip the image horizontally for a later selfie-view display, and convert + # the BGR image to RGB. + image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB) + # To improve performance, optionally mark the image as not writeable to + # pass by reference. + image.flags.writeable = False + results = face_mesh.process(image) + + # Draw the face mesh annotations on the image. + image.flags.writeable = True + image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) + if results.multi_face_landmarks: + for face_landmarks in results.multi_face_landmarks: + mp_drawing.draw_landmarks( + image=image, + landmark_list=face_landmarks, + connections=mp_face_mesh.FACE_CONNECTIONS, + landmark_drawing_spec=drawing_spec, + connection_drawing_spec=drawing_spec) + cv2.imshow('MediaPipe FaceMesh', image) + if cv2.waitKey(5) & 0xFF == 27: + break +face_mesh.close() +cap.release() +``` + +### JavaScript Solution API + +Please first see general [introduction](../getting_started/javascript.md) on +MediaPipe in JavaScript, then learn more in the companion [web demo] and the +following usage example. + +Supported configuration options: + +* [maxNumFaces](#max_num_faces) +* [minDetectionConfidence](#min_detection_confidence) +* [minTrackingConfidence](#min_tracking_confidence) + +```html + + + + + + + + + + + +
+ + +
+ + +``` + +```javascript + +``` + ## Example Apps Please first see general instructions for -[Android](../getting_started/building_examples.md#android), [iOS](../getting_started/building_examples.md#ios) and -[desktop](../getting_started/building_examples.md#desktop) on how to build MediaPipe examples. +[Android](../getting_started/android.md), [iOS](../getting_started/ios.md) and +[desktop](../getting_started/cpp.md) on how to build MediaPipe examples. Note: To visualize a graph, copy the graph and paste it into [MediaPipe Visualizer](https://viz.mediapipe.dev/). For more information on how @@ -254,99 +471,6 @@ and for iOS modify `kNumFaces` in Tip: Maximum number of faces to detect/process is set to 1 by default. To change it, in the graph file modify the option of `ConstantSidePacketCalculator`. -#### Python - -MediaPipe Python package is available on -[PyPI](https://pypi.org/project/mediapipe/), and can be installed simply by `pip -install mediapipe` on Linux and macOS, as described below and in this -[colab](https://mediapipe.page.link/face_mesh_py_colab). If you do need to build -the Python package from source, see -[additional instructions](../getting_started/building_examples.md#python). - -Activate a Python virtual environment: - -```bash -$ python3 -m venv mp_env && source mp_env/bin/activate -``` - -Install MediaPipe Python package: - -```bash -(mp_env)$ pip install mediapipe -``` - -Run the following Python code: - - - -```python -import cv2 -import mediapipe as mp -mp_drawing = mp.solutions.drawing_utils -mp_face_mesh = mp.solutions.face_mesh - -# For static images: -face_mesh = mp_face_mesh.FaceMesh( - static_image_mode=True, - max_num_faces=1, - min_detection_confidence=0.5) -drawing_spec = mp_drawing.DrawingSpec(thickness=1, circle_radius=1) -for idx, file in enumerate(file_list): - image = cv2.imread(file) - # Convert the BGR image to RGB before processing. - results = face_mesh.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) - - # Print and draw face mesh landmarks on the image. - if not results.multi_face_landmarks: - continue - annotated_image = image.copy() - for face_landmarks in results.multi_face_landmarks: - print('face_landmarks:', face_landmarks) - mp_drawing.draw_landmarks( - image=annotated_image, - landmark_list=face_landmarks, - connections=mp_face_mesh.FACE_CONNECTIONS, - landmark_drawing_spec=drawing_spec, - connection_drawing_spec=drawing_spec) - cv2.imwrite('/tmp/annotated_image' + str(idx) + '.png', image) -face_mesh.close() - -# For webcam input: -face_mesh = mp_face_mesh.FaceMesh( - min_detection_confidence=0.5, min_tracking_confidence=0.5) -drawing_spec = mp_drawing.DrawingSpec(thickness=1, circle_radius=1) -cap = cv2.VideoCapture(0) -while cap.isOpened(): - success, image = cap.read() - if not success: - break - - # Flip the image horizontally for a later selfie-view display, and convert - # the BGR image to RGB. - image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB) - # To improve performance, optionally mark the image as not writeable to - # pass by reference. - image.flags.writeable = False - results = face_mesh.process(image) - - # Draw the face mesh annotations on the image. - image.flags.writeable = True - image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) - if results.multi_face_landmarks: - for face_landmarks in results.multi_face_landmarks: - mp_drawing.draw_landmarks( - image=image, - landmark_list=face_landmarks, - connections=mp_face_mesh.FACE_CONNECTIONS, - landmark_drawing_spec=drawing_spec, - connection_drawing_spec=drawing_spec) - cv2.imshow('MediaPipe FaceMesh', image) - if cv2.waitKey(5) & 0xFF == 27: - break -face_mesh.close() -cap.release() -``` - ### Face Effect Example Face effect example showcases real-time mobile face effect application use case @@ -379,3 +503,7 @@ only works for a single face. For visual reference, please refer to *Fig. 4*. [OBJ](https://github.com/google/mediapipe/tree/master/mediapipe/modules/face_geometry/data/canonical_face_model.obj), [UV visualization](https://github.com/google/mediapipe/tree/master/mediapipe/modules/face_geometry/data/canonical_face_model_uv_visualization.png) * [Models and model cards](./models.md#face_mesh) + +[Colab]:https://mediapipe.page.link/face_mesh_py_colab + +[web demo]:https://code.mediapipe.dev/codepen/face_mesh diff --git a/docs/solutions/hair_segmentation.md b/docs/solutions/hair_segmentation.md index a7200c2d8..5e2e4a7c5 100644 --- a/docs/solutions/hair_segmentation.md +++ b/docs/solutions/hair_segmentation.md @@ -2,14 +2,20 @@ layout: default title: Hair Segmentation parent: Solutions -nav_order: 6 +nav_order: 7 --- # MediaPipe Hair Segmentation {: .no_toc } +
+ + Table of contents + + {: .text-delta } 1. TOC {:toc} +
--- ![hair_segmentation_android_gpu_gif](../images/mobile/hair_segmentation_android_gpu.gif) @@ -17,9 +23,8 @@ nav_order: 6 ## Example Apps Please first see general instructions for -[Android](../getting_started/building_examples.md#android), [iOS](../getting_started/building_examples.md#ios) -and [desktop](../getting_started/building_examples.md#desktop) on how to build MediaPipe -examples. +[Android](../getting_started/android.md), [iOS](../getting_started/ios.md) and +[desktop](../getting_started/cpp.md) on how to build MediaPipe examples. Note: To visualize a graph, copy the graph and paste it into [MediaPipe Visualizer](https://viz.mediapipe.dev/). For more information on how diff --git a/docs/solutions/hands.md b/docs/solutions/hands.md index f6025fb1b..3d07411c2 100644 --- a/docs/solutions/hands.md +++ b/docs/solutions/hands.md @@ -8,8 +8,14 @@ nav_order: 4 # MediaPipe Hands {: .no_toc } +
+ + Table of contents + + {: .text-delta } 1. TOC {:toc} +
--- ## Overview @@ -126,16 +132,239 @@ and provide additional supervision on the nature of hand geometry, we also render a high-quality synthetic hand model over various backgrounds and map it to the corresponding 3D coordinates. +![hand_landmarks.png](../images/mobile/hand_landmarks.png) | +:--------------------------------------------------------: | +*Fig 2. 21 hand landmarks.* | + | ![hand_crops.png](../images/mobile/hand_crops.png) | | :-------------------------------------------------------------------------: | -| *Fig 2. Top: Aligned hand crops passed to the tracking network with ground truth annotation. Bottom: Rendered synthetic hand images with ground truth annotation.* | +| *Fig 3. Top: Aligned hand crops passed to the tracking network with ground | +: truth annotation. Bottom\: Rendered synthetic hand images with ground truth : +: annotation.* : + +## Solution APIs + +### Configuration Options + +Naming style and availability may differ slightly across platforms/languages. + +#### static_image_mode + +If set to `false`, the solution treats the input images as a video stream. It +will try to detect hands in the first input images, and upon a successful +detection further localizes the hand landmarks. In subsequent images, once all +[max_num_hands](#max_num_hands) hands are detected and the corresponding hand +landmarks are localized, it simply tracks those landmarks without invoking +another detection until it loses track of any of the hands. This reduces latency +and is ideal for processing video frames. If set to `true`, hand detection runs +on every input image, ideal for processing a batch of static, possibly +unrelated, images. Default to `false`. + +#### max_num_hands + +Maximum number of hands to detect. Default to `2`. + +#### min_detection_confidence + +Minimum confidence value (`[0.0, 1.0]`) from the hand detection model for the +detection to be considered successful. Default to `0.5`. + +#### min_tracking_confidence: + +Minimum confidence value (`[0.0, 1.0]`) from the landmark-tracking model for the +hand landmarks to be considered tracked successfully, or otherwise hand +detection will be invoked automatically on the next input image. Setting it to a +higher value can increase robustness of the solution, at the expense of a higher +latency. Ignored if [static_image_mode](#static_image_mode) is `true`, where +hand detection simply runs on every image. Default to `0.5`. + +### Output + +Naming style may differ slightly across platforms/languages. + +#### multi_hand_landmarks + +Collection of detected/tracked hands, where each hand is represented as a list +of 21 hand landmarks and each landmark is composed of `x`, `y` and `z`. `x` and +`y` are normalized to `[0.0, 1.0]` by the image width and height respectively. +`z` represents the landmark depth with the depth at the wrist being the origin, +and the smaller the value the closer the landmark is to the camera. The +magnitude of `z` uses roughly the same scale as `x`. + +#### multi_handedness + +Collection of handedness of the detected/tracked hands (i.e. is it a left or +right hand). Each hand is composed of `label` and `score`. `label` is a string +of value either `"Left"` or `"Right"`. `score` is the estimated probability of +the predicted handedness and is always greater than or equal to `0.5` (and the +opposite handedness has an estimated probability of `1 - score`). + +Note that handedness is determined assuming the input image is mirrored, i.e., +taken with a front-facing/selfie camera with images flipped horizontally. If it +is not the case, please swap the handedness output in the application. + +### Python Solution API + +Please first follow general [instructions](../getting_started/python.md) to +install MediaPipe Python package, then learn more in the companion [Colab] and +the following usage example. + +Supported configuration options: + +* [static_image_mode](#static_image_mode) +* [max_num_hands](#max_num_hands) +* [min_detection_confidence](#min_detection_confidence) +* [min_tracking_confidence](#min_tracking_confidence) + +```python +import cv2 +import mediapipe as mp +mp_drawing = mp.solutions.drawing_utils +mp_hands = mp.solutions.hands + +# For static images: +hands = mp_hands.Hands( + static_image_mode=True, + max_num_hands=2, + min_detection_confidence=0.5) +for idx, file in enumerate(file_list): + # Read an image, flip it around y-axis for correct handedness output (see + # above). + image = cv2.flip(cv2.imread(file), 1) + # Convert the BGR image to RGB before processing. + results = hands.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) + + # Print handedness and draw hand landmarks on the image. + print('Handedness:', results.multi_handedness) + if not results.multi_hand_landmarks: + continue + image_hight, image_width, _ = image.shape + annotated_image = image.copy() + for hand_landmarks in results.multi_hand_landmarks: + print('hand_landmarks:', hand_landmarks) + print( + f'Index finger tip coordinates: (', + f'{hand_landmarks.landmark[mp_hands.HandLandmark.INDEX_FINGER_TIP].x * image_width}, ' + f'{hand_landmarks.landmark[mp_hands.HandLandmark.INDEX_FINGER_TIP].y * image_hight})' + ) + mp_drawing.draw_landmarks( + annotated_image, hand_landmarks, mp_hands.HAND_CONNECTIONS) + cv2.imwrite( + '/tmp/annotated_image' + str(idx) + '.png', cv2.flip(annotated_image, 1)) +hands.close() + +# For webcam input: +hands = mp_hands.Hands( + min_detection_confidence=0.5, min_tracking_confidence=0.5) +cap = cv2.VideoCapture(0) +while cap.isOpened(): + success, image = cap.read() + if not success: + print("Ignoring empty camera frame.") + # If loading a video, use 'break' instead of 'continue'. + continue + + # Flip the image horizontally for a later selfie-view display, and convert + # the BGR image to RGB. + image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB) + # To improve performance, optionally mark the image as not writeable to + # pass by reference. + image.flags.writeable = False + results = hands.process(image) + + # Draw the hand annotations on the image. + image.flags.writeable = True + image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) + if results.multi_hand_landmarks: + for hand_landmarks in results.multi_hand_landmarks: + mp_drawing.draw_landmarks( + image, hand_landmarks, mp_hands.HAND_CONNECTIONS) + cv2.imshow('MediaPipe Hands', image) + if cv2.waitKey(5) & 0xFF == 27: + break +hands.close() +cap.release() +``` + +### JavaScript Solution API + +Please first see general [introduction](../getting_started/javascript.md) on +MediaPipe in JavaScript, then learn more in the companion [web demo] and a +[fun application], and the following usage example. + +Supported configuration options: + +* [maxNumHands](#max_num_hands) +* [minDetectionConfidence](#min_detection_confidence) +* [minTrackingConfidence](#min_tracking_confidence) + +```html + + + + + + + + + + + +
+ + +
+ + +``` + +```javascript + +``` ## Example Apps Please first see general instructions for -[Android](../getting_started/building_examples.md#android), [iOS](../getting_started/building_examples.md#ios) -and [desktop](../getting_started/building_examples.md#desktop) on how to build MediaPipe -examples. +[Android](../getting_started/android.md), [iOS](../getting_started/ios.md) and +[desktop](../getting_started/cpp.md) on how to build MediaPipe examples. Note: To visualize a graph, copy the graph and paste it into [MediaPipe Visualizer](https://viz.mediapipe.dev/). For more information on how @@ -186,99 +415,6 @@ and for iOS modify `kNumHands` in Tip: Maximum number of hands to detect/process is set to 2 by default. To change it, in the graph file modify the option of `ConstantSidePacketCalculator`. -### Python - -MediaPipe Python package is available on -[PyPI](https://pypi.org/project/mediapipe/), and can be installed simply by `pip -install mediapipe` on Linux and macOS, as described below and in this -[colab](https://mediapipe.page.link/hands_py_colab). If you do need to build the -Python package from source, see -[additional instructions](../getting_started/building_examples.md#python). - -Activate a Python virtual environment: - -```bash -$ python3 -m venv mp_env && source mp_env/bin/activate -``` - -Install MediaPipe Python package: - -```bash -(mp_env)$ pip install mediapipe -``` - -Run the following Python code: - - - -```python -import cv2 -import mediapipe as mp -mp_drawing = mp.solutions.drawing_utils -mp_hands = mp.solutions.hands - -# For static images: -hands = mp_hands.Hands( - static_image_mode=True, - max_num_hands=2, - min_detection_confidence=0.7) -for idx, file in enumerate(file_list): - # Read an image, flip it around y-axis for correct handedness output (see - # above). - image = cv2.flip(cv2.imread(file), 1) - # Convert the BGR image to RGB before processing. - results = hands.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) - - # Print handedness and draw hand landmarks on the image. - print('handedness:', results.multi_handedness) - if not results.multi_hand_landmarks: - continue - annotated_image = image.copy() - for hand_landmarks in results.multi_hand_landmarks: - print('hand_landmarks:', hand_landmarks) - mp_drawing.draw_landmarks( - annotated_image, hand_landmarks, mp_hands.HAND_CONNECTIONS) - cv2.imwrite( - '/tmp/annotated_image' + str(idx) + '.png', cv2.flip(image, 1)) -hands.close() - -# For webcam input: -hands = mp_hands.Hands( - min_detection_confidence=0.7, min_tracking_confidence=0.5) -cap = cv2.VideoCapture(0) -while cap.isOpened(): - success, image = cap.read() - if not success: - break - - # Flip the image horizontally for a later selfie-view display, and convert - # the BGR image to RGB. - image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB) - # To improve performance, optionally mark the image as not writeable to - # pass by reference. - image.flags.writeable = False - results = hands.process(image) - - # Draw the hand annotations on the image. - image.flags.writeable = True - image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) - if results.multi_hand_landmarks: - for hand_landmarks in results.multi_hand_landmarks: - mp_drawing.draw_landmarks( - image, hand_landmarks, mp_hands.HAND_CONNECTIONS) - cv2.imshow('MediaPipe Hands', image) - if cv2.waitKey(5) & 0xFF == 27: - break -hands.close() -cap.release() -``` - -Tip: Use command `deactivate` to exit the Python virtual environment. - -### Web - -Please refer to [these instructions](../index.md#mediapipe-on-the-web). - ## Resources * Google AI Blog: @@ -289,3 +425,8 @@ Please refer to [these instructions](../index.md#mediapipe-on-the-web). [MediaPipe Hands: On-device Real-time Hand Tracking](https://arxiv.org/abs/2006.10214) ([presentation](https://www.youtube.com/watch?v=I-UOrvxxXEk)) * [Models and model cards](./models.md#hands) + +[Colab]:https://mediapipe.page.link/hands_py_colab + +[web demo]:https://code.mediapipe.dev/codepen/hands +[fun application]:https://code.mediapipe.dev/codepen/defrost diff --git a/docs/solutions/holistic.md b/docs/solutions/holistic.md new file mode 100644 index 000000000..e0d941e59 --- /dev/null +++ b/docs/solutions/holistic.md @@ -0,0 +1,413 @@ +--- +layout: default +title: Holistic +parent: Solutions +nav_order: 6 +--- + +# MediaPipe Holistic +{: .no_toc } + +
+ + Table of contents + + {: .text-delta } +1. TOC +{:toc} +
+--- + +## Overview + +Live perception of simultaneous [human pose](./pose.md), +[face landmarks](./face_mesh.md), and [hand tracking](./hands.md) in real-time +on mobile devices can enable various modern life applications: fitness and sport +analysis, gesture control and sign language recognition, augmented reality +try-on and effects. MediaPipe already offers fast and accurate, yet separate, +solutions for these tasks. Combining them all in real-time into a semantically +consistent end-to-end solution is a uniquely difficult problem requiring +simultaneous inference of multiple, dependent neural networks. + +![holistic_sports_and_gestures_example.gif](../images/mobile/holistic_sports_and_gestures_example.gif) | +:----------------------------------------------------------------------------------------------------: | +*Fig 1. Example of MediaPipe Holistic.* | + +## ML Pipeline + +The MediaPipe Holistic pipeline integrates separate models for +[pose](./pose.md), [face](./face_mesh.md) and [hand](./hands.md) components, +each of which are optimized for their particular domain. However, because of +their different specializations, the input to one component is not well-suited +for the others. The pose estimation model, for example, takes a lower, fixed +resolution video frame (256x256) as input. But if one were to crop the hand and +face regions from that image to pass to their respective models, the image +resolution would be too low for accurate articulation. Therefore, we designed +MediaPipe Holistic as a multi-stage pipeline, which treats the different regions +using a region appropriate image resolution. + +First, we estimate the human pose (top of Fig 2) with [BlazePose](./pose.md)’s +pose detector and subsequent landmark model. Then, using the inferred pose +landmarks we derive three regions of interest (ROI) crops for each hand (2x) and +the face, and employ a re-crop model to improve the ROI. We then crop the +full-resolution input frame to these ROIs and apply task-specific face and hand +models to estimate their corresponding landmarks. Finally, we merge all +landmarks with those of the pose model to yield the full 540+ landmarks. + +![holistic_pipeline_example.jpg](../images/mobile/holistic_pipeline_example.jpg) | +:------------------------------------------------------------------------------: | +*Fig 2. MediaPipe Holistic Pipeline Overview.* | + +To streamline the identification of ROIs for face and hands, we utilize a +tracking approach similar to the one we use for standalone +[face](./face_mesh.md) and [hand](./hands.md) pipelines. It assumes that the +object doesn't move significantly between frames and uses estimation from the +previous frame as a guide to the object region on the current one. However, +during fast movements, the tracker can lose the target, which requires the +detector to re-localize it in the image. MediaPipe Holistic uses +[pose](./pose.md) prediction (on every frame) as an additional ROI prior to +reduce the response time of the pipeline when reacting to fast movements. This +also enables the model to retain semantic consistency across the body and its +parts by preventing a mixup between left and right hands or body parts of one +person in the frame with another. + +In addition, the resolution of the input frame to the pose model is low enough +that the resulting ROIs for face and hands are still too inaccurate to guide the +re-cropping of those regions, which require a precise input crop to remain +lightweight. To close this accuracy gap we use lightweight face and hand re-crop +models that play the role of +[spatial transformers](https://arxiv.org/abs/1506.02025) and cost only ~10% of +corresponding model's inference time. + +The pipeline is implemented as a MediaPipe +[graph](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/holistic_tracking/holistic_tracking_gpu.pbtxt) +that uses a +[holistic landmark subgraph](https://github.com/google/mediapipe/tree/master/mediapipe/modules/holistic_landmark/holistic_landmark_gpu.pbtxt) +from the +[holistic landmark module](https://github.com/google/mediapipe/tree/master/mediapipe/modules/holistic_landmark) +and renders using a dedicated +[holistic renderer subgraph](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/holistic_tracking/holistic_tracking_to_render_data.pbtxt). +The +[holistic landmark subgraph](https://github.com/google/mediapipe/tree/master/mediapipe/modules/holistic_landmark/holistic_landmark_gpu.pbtxt) +internally uses a +[pose landmark module](https://github.com/google/mediapipe/tree/master/mediapipe/modules/pose_landmark) +, +[hand landmark module](https://github.com/google/mediapipe/tree/master/mediapipe/modules/hand_landmark) +and +[face landmark module](https://github.com/google/mediapipe/tree/master/mediapipe/modules/face_landmark/). +Please check them for implementation details. + +Note: To visualize a graph, copy the graph and paste it into +[MediaPipe Visualizer](https://viz.mediapipe.dev/). For more information on how +to visualize its associated subgraphs, please see +[visualizer documentation](../tools/visualizer.md). + +## Models + +### Landmark Models + +MediaPipe Holistic utilizes the pose, face and hand landmark models in +[MediaPipe Pose](./pose.md), [MediaPipe Face Mesh](./face_mesh.md) and +[MediaPipe Hands](./hands.md) respectively to generate a total of 543 landmarks +(33 pose landmarks, 468 face landmarks, and 21 hand landmarks per hand). + +### Hand Recrop Model + +For cases when the accuracy of the pose model is low enough that the resulting +ROIs for hands are still too inaccurate we run the additional lightweight hand +re-crop model that play the role of +[spatial transformer](https://arxiv.org/abs/1506.02025) and cost only ~10% of +hand model inference time. + +## Solution APIs + +### Cross-platform Configuration Options + +Naming style and availability may differ slightly across platforms/languages. + +#### static_image_mode + +If set to `false`, the solution treats the input images as a video stream. It +will try to detect the most prominent person in the very first images, and upon +a successful detection further localizes the pose and other landmarks. In +subsequent images, it then simply tracks those landmarks without invoking +another detection until it loses track, on reducing computation and latency. If +set to `true`, person detection runs every input image, ideal for processing a +batch of static, possibly unrelated, images. Default to `false`. + +#### upper_body_only + +If set to `true`, the solution outputs only the 25 upper-body pose landmarks +(535 in total) instead of the full set of 33 pose landmarks (543 in total). Note +that upper-body-only prediction may be more accurate for use cases where the +lower-body parts are mostly out of view. Default to `false`. + +#### smooth_landmarks + +If set to `true`, the solution filters pose landmarks across different input +images to reduce jitter, but ignored if [static_image_mode](#static_image_mode) +is also set to `true`. Default to `true`. + +#### min_detection_confidence + +Minimum confidence value (`[0.0, 1.0]`) from the person-detection model for the +detection to be considered successful. Default to `0.5`. + +#### min_tracking_confidence + +Minimum confidence value (`[0.0, 1.0]`) from the landmark-tracking model for the +pose landmarks to be considered tracked successfully, or otherwise person +detection will be invoked automatically on the next input image. Setting it to a +higher value can increase robustness of the solution, at the expense of a higher +latency. Ignored if [static_image_mode](#static_image_mode) is `true`, where +person detection simply runs on every image. Default to `0.5`. + +### Output + +Naming style may differ slightly across platforms/languages. + +#### pose_landmarks + +A list of pose landmarks. Each landmark consists of the following: + +* `x` and `y`: Landmark coordinates normalized to `[0.0, 1.0]` by the image + width and height respectively. +* `z`: Should be discarded as currently the model is not fully trained to + predict depth, but this is something on the roadmap. +* `visibility`: A value in `[0.0, 1.0]` indicating the likelihood of the + landmark being visible (present and not occluded) in the image. + +#### face_landmarks + +A list of 468 face landmarks. Each landmark consists of `x`, `y` and `z`. `x` +and `y` are normalized to `[0.0, 1.0]` by the image width and height +respectively. `z` represents the landmark depth with the depth at center of the +head being the origin, and the smaller the value the closer the landmark is to +the camera. The magnitude of `z` uses roughly the same scale as `x`. + +#### left_hand_landmarks + +A list of 21 hand landmarks on the left hand. Each landmark consists of `x`, `y` +and `z`. `x` and `y` are normalized to `[0.0, 1.0]` by the image width and +height respectively. `z` represents the landmark depth with the depth at the +wrist being the origin, and the smaller the value the closer the landmark is to +the camera. The magnitude of `z` uses roughly the same scale as `x`. + +#### right_hand_landmarks + +A list of 21 hand landmarks on the right hand, in the same representation as +[left_hand_landmarks](#left_hand_landmarks). + +### Python Solution API + +Please first follow general [instructions](../getting_started/python.md) to +install MediaPipe Python package, then learn more in the companion [Colab] and +the following usage example. + +Supported configuration options: + +* [static_image_mode](#static_image_mode) +* [upper_body_only](#upper_body_only) +* [smooth_landmarks](#smooth_landmarks) +* [min_detection_confidence](#min_detection_confidence) +* [min_tracking_confidence](#min_tracking_confidence) + +```python +import cv2 +import mediapipe as mp +mp_drawing = mp.solutions.drawing_utils +mp_holistic = mp.solutions.holistic + +# For static images: +holistic = mp_holistic.Holistic(static_image_mode=True) +for idx, file in enumerate(file_list): + image = cv2.imread(file) + image_hight, image_width, _ = image.shape + # Convert the BGR image to RGB before processing. + results = holistic.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) + + if results.pose_landmarks: + print( + f'Nose coordinates: (' + f'{results.pose_landmarks.landmark[mp_holistic.PoseLandmark.NOSE].x * image_width}, ' + f'{results.pose_landmarks.landmark[mp_holistic.PoseLandmark.NOSE].y * image_hight})' + ) + # Draw pose, left and right hands, and face landmarks on the image. + annotated_image = image.copy() + mp_drawing.draw_landmarks( + annotated_image, results.face_landmarks, mp_holistic.FACE_CONNECTIONS) + mp_drawing.draw_landmarks( + annotated_image, results.left_hand_landmarks, mp_holistic.HAND_CONNECTIONS) + mp_drawing.draw_landmarks( + annotated_image, results.right_hand_landmarks, mp_holistic.HAND_CONNECTIONS) + mp_drawing.draw_landmarks( + annotated_image, results.pose_landmarks, mp_holistic.POSE_CONNECTIONS) + cv2.imwrite('/tmp/annotated_image' + str(idx) + '.png', annotated_image) +holistic.close() + +# For webcam input: +holistic = mp_holistic.Holistic( + min_detection_confidence=0.5, min_tracking_confidence=0.5) +cap = cv2.VideoCapture(0) +while cap.isOpened(): + success, image = cap.read() + if not success: + print("Ignoring empty camera frame.") + # If loading a video, use 'break' instead of 'continue'. + continue + + # Flip the image horizontally for a later selfie-view display, and convert + # the BGR image to RGB. + image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB) + # To improve performance, optionally mark the image as not writeable to + # pass by reference. + image.flags.writeable = False + results = holistic.process(image) + + # Draw landmark annotation on the image. + image.flags.writeable = True + image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) + mp_drawing.draw_landmarks( + image, results.face_landmarks, mp_holistic.FACE_CONNECTIONS) + mp_drawing.draw_landmarks( + image, results.left_hand_landmarks, mp_holistic.HAND_CONNECTIONS) + mp_drawing.draw_landmarks( + image, results.right_hand_landmarks, mp_holistic.HAND_CONNECTIONS) + mp_drawing.draw_landmarks( + image, results.pose_landmarks, mp_holistic.POSE_CONNECTIONS) + cv2.imshow('MediaPipe Holistic', image) + if cv2.waitKey(5) & 0xFF == 27: + break +holistic.close() +cap.release() +``` + +### JavaScript Solution API + +Please first see general [introduction](../getting_started/javascript.md) on +MediaPipe in JavaScript, then learn more in the companion [web demo] and the +following usage example. + +Supported configuration options: + +* [upperBodyOnly](#upper_body_only) +* [smoothLandmarks](#smooth_landmarks) +* [minDetectionConfidence](#min_detection_confidence) +* [minTrackingConfidence](#min_tracking_confidence) + +```html + + + + + + + + + + + +
+ + +
+ + +``` + +```javascript + +``` + +## Example Apps + +Please first see general instructions for +[Android](../getting_started/android.md), [iOS](../getting_started/ios.md), and +[desktop](../getting_started/cpp.md) on how to build MediaPipe examples. + +Note: To visualize a graph, copy the graph and paste it into +[MediaPipe Visualizer](https://viz.mediapipe.dev/). For more information on how +to visualize its associated subgraphs, please see +[visualizer documentation](../tools/visualizer.md). + +### Mobile + +* Graph: + [`mediapipe/graphs/holistic_tracking/holistic_tracking_gpu.pbtxt`](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/holistic_tracking/holistic_tracking_gpu.pbtxt) +* Android target: + [(or download prebuilt ARM64 APK)](https://drive.google.com/file/d/1o-Trp2GIRitA0OvmZWUQjVMa476xpfgK/view?usp=sharing) + [`mediapipe/examples/android/src/java/com/google/mediapipe/apps/holistictrackinggpu:holistictrackinggpu`](https://github.com/google/mediapipe/tree/master/mediapipe/examples/android/src/java/com/google/mediapipe/apps/holistictrackinggpu/BUILD) +* iOS target: + [`mediapipe/examples/ios/holistictrackinggpu:HolisticTrackingGpuApp`](http:/mediapipe/examples/ios/holistictrackinggpu/BUILD) + +### Desktop + +Please first see general instructions for [desktop](../getting_started/cpp.md) +on how to build MediaPipe examples. + +* Running on CPU + * Graph: + [`mediapipe/graphs/holistic_tracking/holistic_tracking_cpu.pbtxt`](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/holistic_tracking/holistic_tracking_cpu.pbtxt) + * Target: + [`mediapipe/examples/desktop/holistic_tracking:holistic_tracking_cpu`](https://github.com/google/mediapipe/tree/master/mediapipe/examples/desktop/holistic_tracking/BUILD) +* Running on GPU + * Graph: + [`mediapipe/graphs/holistic_tracking/holistic_tracking_gpu.pbtxt`](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/holistic_tracking/holistic_tracking_gpu.pbtxt) + * Target: + [`mediapipe/examples/desktop/holistic_tracking:holistic_tracking_gpu`](https://github.com/google/mediapipe/tree/master/mediapipe/examples/desktop/holistic_tracking/BUILD) + +## Resources + +* Google AI Blog: + [MediaPipe Holistic - Simultaneous Face, Hand and Pose Prediction on Device](https://ai.googleblog.com/2020/08/on-device-real-time-body-pose-tracking.html) +* [Models and model cards](./models.md#holistic) + +[Colab]:https://mediapipe.page.link/holistic_py_colab + +[web demo]:https://code.mediapipe.dev/codepen/holistic diff --git a/docs/solutions/instant_motion_tracking.md b/docs/solutions/instant_motion_tracking.md index dfc4835a3..720fe80f6 100644 --- a/docs/solutions/instant_motion_tracking.md +++ b/docs/solutions/instant_motion_tracking.md @@ -2,14 +2,20 @@ layout: default title: Instant Motion Tracking parent: Solutions -nav_order: 9 +nav_order: 10 --- # MediaPipe Instant Motion Tracking {: .no_toc } +
+ + Table of contents + + {: .text-delta } 1. TOC {:toc} +
--- ## Overview @@ -104,8 +110,7 @@ and connected camera. ## Example Apps Please first see general instructions for -[Android](../getting_started/building_examples.md#android) on how to build -MediaPipe examples. +[Android](../getting_started/android.md) on how to build MediaPipe examples. * Graph: [mediapipe/graphs/instant_motion_tracking/instant_motion_tracking.pbtxt](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/instant_motion_tracking/instant_motion_tracking.pbtxt) diff --git a/docs/solutions/iris.md b/docs/solutions/iris.md index 706504196..61ca8049c 100644 --- a/docs/solutions/iris.md +++ b/docs/solutions/iris.md @@ -8,8 +8,14 @@ nav_order: 3 # MediaPipe Iris {: .no_toc } +
+ + Table of contents + + {: .text-delta } 1. TOC {:toc} +
--- ## Overview @@ -116,10 +122,8 @@ along with some simple geometric arguments. For more details please refer to our ## Example Apps Please first see general instructions for -[Android](../getting_started/building_examples.md#android), -[iOS](../getting_started/building_examples.md#ios) and -[desktop](../getting_started/building_examples.md#desktop) on how to build -MediaPipe examples. +[Android](../getting_started/android.md), [iOS](../getting_started/ios.md) and +[desktop](../getting_started/cpp.md) on how to build MediaPipe examples. Note: To visualize a graph, copy the graph and paste it into [MediaPipe Visualizer](https://viz.mediapipe.dev/). For more information on how @@ -140,9 +144,8 @@ to visualize its associated subgraphs, please see #### Live Camera Input -Please first see general instructions for -[desktop](../getting_started/building_examples.md#desktop) on how to build -MediaPipe examples. +Please first see general instructions for [desktop](../getting_started/cpp.md) +on how to build MediaPipe examples. * Running on CPU * Graph: diff --git a/docs/solutions/knift.md b/docs/solutions/knift.md index e59875705..41691c418 100644 --- a/docs/solutions/knift.md +++ b/docs/solutions/knift.md @@ -2,14 +2,20 @@ layout: default title: KNIFT (Template-based Feature Matching) parent: Solutions -nav_order: 11 +nav_order: 12 --- # MediaPipe KNIFT {: .no_toc } +
+ + Table of contents + + {: .text-delta } 1. TOC {:toc} +
--- ## Overview @@ -67,7 +73,7 @@ you'd like to use your own template images, see ![template_matching_mobile_template.jpg](../images/mobile/template_matching_mobile_template.jpg) Please first see general instructions for -[Android](../getting_started/building_examples.md#android) on how to build MediaPipe examples. +[Android](../getting_started/android.md) on how to build MediaPipe examples. Note: To visualize a graph, copy the graph and paste it into [MediaPipe Visualizer](https://viz.mediapipe.dev/). For more information on how diff --git a/docs/solutions/media_sequence.md b/docs/solutions/media_sequence.md index 16a2278cd..cd3b7ecef 100644 --- a/docs/solutions/media_sequence.md +++ b/docs/solutions/media_sequence.md @@ -2,14 +2,20 @@ layout: default title: Dataset Preparation with MediaSequence parent: Solutions -nav_order: 13 +nav_order: 14 --- # Dataset Preparation with MediaSequence {: .no_toc } +
+ + Table of contents + + {: .text-delta } 1. TOC {:toc} +
--- ## Overview diff --git a/docs/solutions/models.md b/docs/solutions/models.md index 9cc6e1fec..4bc3d849e 100644 --- a/docs/solutions/models.md +++ b/docs/solutions/models.md @@ -48,10 +48,17 @@ nav_order: 30 * Pose detection model: [TFLite model](https://github.com/google/mediapipe/tree/master/mediapipe/modules/pose_detection/pose_detection.tflite) +* Full-body pose landmark model: + [TFLite model](https://github.com/google/mediapipe/tree/master/mediapipe/modules/pose_landmark/pose_landmark_full_body.tflite) * Upper-body pose landmark model: [TFLite model](https://github.com/google/mediapipe/tree/master/mediapipe/modules/pose_landmark/pose_landmark_upper_body.tflite) * [Model card](https://mediapipe.page.link/blazepose-mc) +### [Holistic](https://google.github.io/mediapipe/solutions/holistic) + +* Hand recrop model: + [TFLite model](https://github.com/google/mediapipe/tree/master/mediapipe/modules/holistic_landmark/hand_recrop.tflite) + ### [Hair Segmentation](https://google.github.io/mediapipe/solutions/hair_segmentation) * [TFLite model](https://github.com/google/mediapipe/tree/master/mediapipe/models/hair_segmentation.tflite) diff --git a/docs/solutions/object_detection.md b/docs/solutions/object_detection.md index e4081d8d9..044748537 100644 --- a/docs/solutions/object_detection.md +++ b/docs/solutions/object_detection.md @@ -2,14 +2,20 @@ layout: default title: Object Detection parent: Solutions -nav_order: 7 +nav_order: 8 --- # MediaPipe Object Detection {: .no_toc } +
+ + Table of contents + + {: .text-delta } 1. TOC {:toc} +
--- ![object_detection_android_gpu.gif](../images/mobile/object_detection_android_gpu.gif) @@ -24,8 +30,8 @@ to visualize its associated subgraphs, please see ### Mobile Please first see general instructions for -[Android](../getting_started/building_examples.md#android) and -[iOS](../getting_started/building_examples.md#ios) on how to build MediaPipe examples. +[Android](../getting_started/android.md) and [iOS](../getting_started/ios.md) on +how to build MediaPipe examples. #### GPU Pipeline @@ -56,8 +62,8 @@ same configuration as the GPU pipeline, runs entirely on CPU. #### Live Camera Input -Please first see general instructions for -[desktop](../getting_started/building_examples.md#desktop) on how to build MediaPipe examples. +Please first see general instructions for [desktop](../getting_started/cpp.md) +on how to build MediaPipe examples. * Graph: [`mediapipe/graphs/object_detection/object_detection_desktop_live.pbtxt`](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/object_detection/object_detection_desktop_live.pbtxt) diff --git a/docs/solutions/objectron.md b/docs/solutions/objectron.md index f01f2aefb..c65b72bae 100644 --- a/docs/solutions/objectron.md +++ b/docs/solutions/objectron.md @@ -2,21 +2,27 @@ layout: default title: Objectron (3D Object Detection) parent: Solutions -nav_order: 10 +nav_order: 11 --- # MediaPipe Objectron {: .no_toc } +
+ + Table of contents + + {: .text-delta } 1. TOC {:toc} +
--- ## Overview MediaPipe Objectron is a mobile real-time 3D object detection solution for everyday objects. It detects objects in 2D images, and estimates their poses -through a machine learning (ML) model, trained on a newly created 3D dataset. +through a machine learning (ML) model, trained on the [Objectron dataset](https://github.com/google-research-datasets/Objectron). ![objectron_shoe_android_gpu.gif](../images/mobile/objectron_shoe_android_gpu.gif) | ![objectron_chair_android_gpu.gif](../images/mobile/objectron_chair_android_gpu.gif) | ![objectron_camera_android_gpu.gif](../images/mobile/objectron_camera_android_gpu.gif) | ![objectron_cup_android_gpu.gif](../images/mobile/objectron_cup_android_gpu.gif) :--------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------: | :------------------------------------------------------------------------------------: | :------------------------------------------------------------------------------: @@ -106,7 +112,8 @@ detector does not need to run every frame. *Fig 5. Network architecture and post-processing for two-stage 3D object detection.* | We can use any 2D object detector for the first stage. In this solution, we use -[TensorFlow Object Detection](https://github.com/tensorflow/models/tree/master/research/object_detection). +[TensorFlow Object Detection](https://github.com/tensorflow/models/tree/master/research/object_detection) trained +with the [Open Images dataset](https://storage.googleapis.com/openimages/web/index.html). The second stage 3D bounding box predictor we released runs 83FPS on Adreno 650 mobile GPU. @@ -157,9 +164,9 @@ The Objectron 3D object detection and tracking pipeline is implemented as a MediaPipe [graph](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/object_detection_3d/object_occlusion_tracking_1stage.pbtxt), which internally uses a -[detection subgraph](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/object_detection_3d/subgraphs/objectron_detection_gpu.pbtxt) +[detection subgraph](https://github.com/google/mediapipe/tree/master/mediapipe/modules/objectron/objectron_detection_1stage_gpu.pbtxt) and a -[tracking subgraph](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/object_detection_3d/subgraphs/objectron_tracking_gpu.pbtxt). +[tracking subgraph](https://github.com/google/mediapipe/tree/master/mediapipe/modules/objectron/objectron_tracking_1stage_gpu.pbtxt). The detection subgraph performs ML inference only once every few frames to reduce computation load, and decodes the output tensor to a FrameAnnotation that contains nine keypoints: the 3D bounding box's center and its eight vertices. @@ -176,13 +183,14 @@ tracking results, based on the area of overlap. We also released our [Objectron dataset](http://objectron.dev), with which we trained our 3D object detection models. The technical details of the Objectron -dataset, including usage and tutorials, are available on the dataset website. +dataset, including usage and tutorials, are available on +the [dataset website](https://github.com/google-research-datasets/Objectron/). ## Example Apps Please first see general instructions for -[Android](../getting_started/building_examples.md#android) and -[iOS](../getting_started/building_examples.md#ios) on how to build MediaPipe examples. +[Android](../getting_started/android.md) and [iOS](../getting_started/ios.md) on +how to build MediaPipe examples. Note: To visualize a graph, copy the graph and paste it into [MediaPipe Visualizer](https://viz.mediapipe.dev/). For more information on how @@ -254,7 +262,7 @@ to visualize its associated subgraphs, please see ## Resources * Google AI Blog: - [Announcing the Objectron Dataset](https://mediapipe.page.link/objectron_dataset_ai_blog) + [Announcing the Objectron Dataset](https://ai.googleblog.com/2020/11/announcing-objectron-dataset.html) * Google AI Blog: [Real-Time 3D Object Detection on Mobile Devices with MediaPipe](https://ai.googleblog.com/2020/03/real-time-3d-object-detection-on-mobile.html) * Paper: [MobilePose: Real-Time Pose Estimation for Unseen Objects with Weak diff --git a/docs/solutions/pose.md b/docs/solutions/pose.md index c9c7f7159..0130a5f46 100644 --- a/docs/solutions/pose.md +++ b/docs/solutions/pose.md @@ -8,8 +8,14 @@ nav_order: 5 # MediaPipe Pose {: .no_toc } +
+ + Table of contents + + {: .text-delta } 1. TOC {:toc} +
--- ## Overview @@ -20,16 +26,16 @@ gesture control. For example, it can form the basis for yoga, dance, and fitness applications. It can also enable the overlay of digital content and information on top of the physical world in augmented reality. -MediaPipe Pose is a ML solution for high-fidelity upper-body pose tracking, -inferring 25 2D upper-body landmarks from RGB video frames utilizing our +MediaPipe Pose is a ML solution for high-fidelity body pose tracking, inferring +33 2D landmarks on the whole body (or 25 upper-body landmarks) from RGB video +frames utilizing our [BlazePose](https://ai.googleblog.com/2020/08/on-device-real-time-body-pose-tracking.html) -research. Current state-of-the-art approaches rely primarily on powerful desktop +research that also powers the +[ML Kit Pose Detection API](https://developers.google.com/ml-kit/vision/pose-detection). +Current state-of-the-art approaches rely primarily on powerful desktop environments for inference, whereas our method achieves real-time performance on most modern [mobile phones](#mobile), [desktops/laptops](#desktop), in -[python](#python) and even on the [web](#web). A variant of MediaPipe Pose that -performs full-body pose tracking on mobile phones will be included in an -upcoming release of -[ML Kit](https://developers.google.com/ml-kit/early-access/pose-detection). +[python](#python) and even on the [web](#web). ![pose_tracking_upper_body_example.gif](../images/mobile/pose_tracking_upper_body_example.gif) | :--------------------------------------------------------------------------------------------: | @@ -40,23 +46,24 @@ upcoming release of The solution utilizes a two-step detector-tracker ML pipeline, proven to be effective in our [MediaPipe Hands](./hands.md) and [MediaPipe Face Mesh](./face_mesh.md) solutions. Using a detector, the pipeline -first locates the pose region-of-interest (ROI) within the frame. The tracker -subsequently predicts the pose landmarks within the ROI using the ROI-cropped -frame as input. Note that for video use cases the detector is invoked only as -needed, i.e., for the very first frame and when the tracker could no longer -identify body pose presence in the previous frame. For other frames the pipeline -simply derives the ROI from the previous frame’s pose landmarks. +first locates the person/pose region-of-interest (ROI) within the frame. The +tracker subsequently predicts the pose landmarks within the ROI using the +ROI-cropped frame as input. Note that for video use cases the detector is +invoked only as needed, i.e., for the very first frame and when the tracker +could no longer identify body pose presence in the previous frame. For other +frames the pipeline simply derives the ROI from the previous frame’s pose +landmarks. The pipeline is implemented as a MediaPipe -[graph](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/pose_tracking/upper_body_pose_tracking_gpu.pbtxt) +[graph](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/pose_tracking/pose_tracking_gpu.pbtxt) that uses a -[pose landmark subgraph](https://github.com/google/mediapipe/tree/master/mediapipe/modules/pose_landmark/pose_landmark_upper_body_gpu.pbtxt) +[pose landmark subgraph](https://github.com/google/mediapipe/tree/master/mediapipe/modules/pose_landmark/pose_landmark_gpu.pbtxt) from the [pose landmark module](https://github.com/google/mediapipe/tree/master/mediapipe/modules/pose_landmark) and renders using a dedicated -[upper-body pose renderer subgraph](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/pose_tracking/subgraphs/upper_body_pose_renderer_gpu.pbtxt). +[pose renderer subgraph](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/pose_tracking/subgraphs/pose_renderer_gpu.pbtxt). The -[pose landmark subgraph](https://github.com/google/mediapipe/tree/master/mediapipe/modules/pose_landmark/pose_landmark_upper_body_gpu.pbtxt) +[pose landmark subgraph](https://github.com/google/mediapipe/tree/master/mediapipe/modules/pose_landmark/pose_landmark_gpu.pbtxt) internally uses a [pose detection subgraph](https://github.com/google/mediapipe/tree/master/mediapipe/modules/pose_detection/pose_detection_gpu.pbtxt) from the @@ -69,7 +76,7 @@ to visualize its associated subgraphs, please see ## Models -### Pose Detection Model (BlazePose Detector) +### Person/pose Detection Model (BlazePose Detector) The detector is inspired by our own lightweight [BlazeFace](https://arxiv.org/abs/1907.05047) model, used in @@ -87,84 +94,93 @@ hip midpoints. ### Pose Landmark Model (BlazePose Tracker) -The landmark model currently included in MediaPipe Pose predicts the location of -25 upper-body landmarks (see figure below), each with `(x, y, z, visibility)`. -Note that the `z` value should be discarded as the model is currently not fully -trained to predict depth, but this is something we have on the roadmap. The -model shares the same architecture as the full-body version that predicts 33 -landmarks, described in more detail in the -[BlazePose Google AI Blog](https://ai.googleblog.com/2020/08/on-device-real-time-body-pose-tracking.html) -and in this [paper](https://arxiv.org/abs/2006.10204). +The landmark model in MediaPipe Pose comes in two versions: a full-body model +that predicts the location of 33 pose landmarks (see figure below), and an +upper-body version that only predicts the first 25. The latter may be more +accurate than the former in scenarios where the lower-body parts are mostly out +of view. -![pose_tracking_upper_body_landmarks.png](../images/mobile/pose_tracking_upper_body_landmarks.png) | -:------------------------------------------------------------------------------------------------: | -*Fig 3. 25 upper-body pose landmarks.* | +Please find more detail in the +[BlazePose Google AI Blog](https://ai.googleblog.com/2020/08/on-device-real-time-body-pose-tracking.html), +this [paper](https://arxiv.org/abs/2006.10204) and +[the model card](./models.md#pose), and the attributes in each landmark +[below](#pose_landmarks). -## Example Apps +![pose_tracking_full_body_landmarks.png](../images/mobile/pose_tracking_full_body_landmarks.png) | +:----------------------------------------------------------------------------------------------: | +*Fig 3. 33 pose landmarks.* | -Please first see general instructions for -[Android](../getting_started/building_examples.md#android), -[iOS](../getting_started/building_examples.md#ios), -[desktop](../getting_started/building_examples.md#desktop) and -[Python](../getting_started/building_examples.md#python) on how to build -MediaPipe examples. +## Solution APIs -Note: To visualize a graph, copy the graph and paste it into -[MediaPipe Visualizer](https://viz.mediapipe.dev/). For more information on how -to visualize its associated subgraphs, please see -[visualizer documentation](../tools/visualizer.md). +### Cross-platform Configuration Options -### Mobile +Naming style and availability may differ slightly across platforms/languages. -* Graph: - [`mediapipe/graphs/pose_tracking/upper_body_pose_tracking_gpu.pbtxt`](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/pose_tracking/upper_body_pose_tracking_gpu.pbtxt) -* Android target: - [(or download prebuilt ARM64 APK)](https://drive.google.com/file/d/1uKc6T7KSuA0Mlq2URi5YookHu0U3yoh_/view?usp=sharing) - [`mediapipe/examples/android/src/java/com/google/mediapipe/apps/upperbodyposetrackinggpu:upperbodyposetrackinggpu`](https://github.com/google/mediapipe/tree/master/mediapipe/examples/android/src/java/com/google/mediapipe/apps/upperbodyposetrackinggpu/BUILD) -* iOS target: - [`mediapipe/examples/ios/upperbodyposetrackinggpu:UpperBodyPoseTrackingGpuApp`](http:/mediapipe/examples/ios/upperbodyposetrackinggpu/BUILD) +#### static_image_mode -### Desktop +If set to `false`, the solution treats the input images as a video stream. It +will try to detect the most prominent person in the very first images, and upon +a successful detection further localizes the pose landmarks. In subsequent +images, it then simply tracks those landmarks without invoking another detection +until it loses track, on reducing computation and latency. If set to `true`, +person detection runs every input image, ideal for processing a batch of static, +possibly unrelated, images. Default to `false`. -Please first see general instructions for -[desktop](../getting_started/building_examples.md#desktop) on how to build -MediaPipe examples. +#### upper_body_only -* Running on CPU - * Graph: - [`mediapipe/graphs/pose_tracking/upper_body_pose_tracking_cpu.pbtxt`](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/pose_tracking/upper_body_pose_tracking_cpu.pbtxt) - * Target: - [`mediapipe/examples/desktop/upper_body_pose_tracking:upper_body_pose_tracking_cpu`](https://github.com/google/mediapipe/tree/master/mediapipe/examples/desktop/upper_body_pose_tracking/BUILD) -* Running on GPU - * Graph: - [`mediapipe/graphs/pose_tracking/upper_body_pose_tracking_gpu.pbtxt`](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/pose_tracking/upper_body_pose_tracking_gpu.pbtxt) - * Target: - [`mediapipe/examples/desktop/upper_body_pose_tracking:upper_body_pose_tracking_gpu`](https://github.com/google/mediapipe/tree/master/mediapipe/examples/desktop/upper_body_pose_tracking/BUILD) +If set to `true`, the solution outputs only the 25 upper-body pose landmarks. +Otherwise, it outputs the full set of 33 pose landmarks. Note that +upper-body-only prediction may be more accurate for use cases where the +lower-body parts are mostly out of view. Default to `false`. -### Python +#### smooth_landmarks -MediaPipe Python package is available on -[PyPI](https://pypi.org/project/mediapipe/), and can be installed simply by `pip -install mediapipe` on Linux and macOS, as described below and in this -[colab](https://mediapipe.page.link/pose_py_colab). If you do need to build the -Python package from source, see -[additional instructions](../getting_started/building_examples.md#python). +If set to `true`, the solution filters pose landmarks across different input +images to reduce jitter, but ignored if [static_image_mode](#static_image_mode) +is also set to `true`. Default to `true`. -Activate a Python virtual environment: +#### min_detection_confidence -```bash -$ python3 -m venv mp_env && source mp_env/bin/activate -``` +Minimum confidence value (`[0.0, 1.0]`) from the person-detection model for the +detection to be considered successful. Default to `0.5`. -Install MediaPipe Python package: +#### min_tracking_confidence -```bash -(mp_env)$ pip install mediapipe -``` +Minimum confidence value (`[0.0, 1.0]`) from the landmark-tracking model for the +pose landmarks to be considered tracked successfully, or otherwise person +detection will be invoked automatically on the next input image. Setting it to a +higher value can increase robustness of the solution, at the expense of a higher +latency. Ignored if [static_image_mode](#static_image_mode) is `true`, where +person detection simply runs on every image. Default to `0.5`. -Run the following Python code: +### Output - +Naming style may differ slightly across platforms/languages. + +#### pose_landmarks + +A list of pose landmarks. Each lanmark consists of the following: + +* `x` and `y`: Landmark coordinates normalized to `[0.0, 1.0]` by the image + width and height respectively. +* `z`: Should be discarded as currently the model is not fully trained to + predict depth, but this is something on the roadmap. +* `visibility`: A value in `[0.0, 1.0]` indicating the likelihood of the + landmark being visible (present and not occluded) in the image. + +### Python Solution API + +Please first follow general [instructions](../getting_started/python.md) to +install MediaPipe Python package, then learn more in the companion [Colab] and +the following usage example. + +Supported configuration options: + +* [static_image_mode](#static_image_mode) +* [upper_body_only](#upper_body_only) +* [smooth_landmarks](#smooth_landmarks) +* [min_detection_confidence](#min_detection_confidence) +* [min_tracking_confidence](#min_tracking_confidence) ```python import cv2 @@ -177,17 +193,22 @@ pose = mp_pose.Pose( static_image_mode=True, min_detection_confidence=0.5) for idx, file in enumerate(file_list): image = cv2.imread(file) + image_hight, image_width, _ = image.shape # Convert the BGR image to RGB before processing. results = pose.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) - # Print and draw pose landmarks on the image. + if not results.pose_landmarks: + continue print( - 'nose landmark:', - results.pose_landmarks.landmark[mp_pose.PoseLandmark.NOSE]) + f'Nose coordinates: (' + f'{results.pose_landmarks.landmark[mp_holistic.PoseLandmark.NOSE].x * image_width}, ' + f'{results.pose_landmarks.landmark[mp_holistic.PoseLandmark.NOSE].y * image_hight})' + ) + # Draw pose landmarks on the image. annotated_image = image.copy() mp_drawing.draw_landmarks( annotated_image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS) - cv2.imwrite('/tmp/annotated_image' + str(idx) + '.png', image) + cv2.imwrite('/tmp/annotated_image' + str(idx) + '.png', annotated_image) pose.close() # For webcam input: @@ -197,7 +218,9 @@ cap = cv2.VideoCapture(0) while cap.isOpened(): success, image = cap.read() if not success: - break + print("Ignoring empty camera frame.") + # If loading a video, use 'break' instead of 'continue'. + continue # Flip the image horizontally for a later selfie-view display, and convert # the BGR image to RGB. @@ -219,11 +242,142 @@ pose.close() cap.release() ``` -Tip: Use command `deactivate` to exit the Python virtual environment. +### JavaScript Solution API -### Web +Please first see general [introduction](../getting_started/javascript.md) on +MediaPipe in JavaScript, then learn more in the companion [web demo] and the +following usage example. -Please refer to [these instructions](../index.md#mediapipe-on-the-web). +Supported configuration options: + +* [upperBodyOnly](#upper_body_only) +* [smoothLandmarks](#smooth_landmarks) +* [minDetectionConfidence](#min_detection_confidence) +* [minTrackingConfidence](#min_tracking_confidence) + +```html + + + + + + + + + + + +
+ + +
+ + +``` + +```javascript + +``` + +## Example Apps + +Please first see general instructions for +[Android](../getting_started/android.md), [iOS](../getting_started/ios.md), and +[desktop](../getting_started/cpp.md) on how to build MediaPipe examples. + +Note: To visualize a graph, copy the graph and paste it into +[MediaPipe Visualizer](https://viz.mediapipe.dev/). For more information on how +to visualize its associated subgraphs, please see +[visualizer documentation](../tools/visualizer.md). + +### Mobile + +#### Main Example + +* Graph: + [`mediapipe/graphs/pose_tracking/pose_tracking_gpu.pbtxt`](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/pose_tracking/pose_tracking_gpu.pbtxt) +* Android target: + [(or download prebuilt ARM64 APK)](https://drive.google.com/file/d/17GFIrqEJS6W8UHKXlYevTtSCLxN9pWlY/view?usp=sharing) + [`mediapipe/examples/android/src/java/com/google/mediapipe/apps/posetrackinggpu:posetrackinggpu`](https://github.com/google/mediapipe/tree/master/mediapipe/examples/android/src/java/com/google/mediapipe/apps/posetrackinggpu/BUILD) +* iOS target: + [`mediapipe/examples/ios/posetrackinggpu:PoseTrackingGpuApp`](http:/mediapipe/examples/ios/posetrackinggpu/BUILD) + +#### Upper-body Only + +* Graph: + [`mediapipe/graphs/pose_tracking/upper_body_pose_tracking_gpu.pbtxt`](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/pose_tracking/upper_body_pose_tracking_gpu.pbtxt) +* Android target: + [(or download prebuilt ARM64 APK)](https://drive.google.com/file/d/1uKc6T7KSuA0Mlq2URi5YookHu0U3yoh_/view?usp=sharing) + [`mediapipe/examples/android/src/java/com/google/mediapipe/apps/upperbodyposetrackinggpu:upperbodyposetrackinggpu`](https://github.com/google/mediapipe/tree/master/mediapipe/examples/android/src/java/com/google/mediapipe/apps/upperbodyposetrackinggpu/BUILD) +* iOS target: + [`mediapipe/examples/ios/upperbodyposetrackinggpu:UpperBodyPoseTrackingGpuApp`](http:/mediapipe/examples/ios/upperbodyposetrackinggpu/BUILD) + +### Desktop + +Please first see general instructions for [desktop](../getting_started/cpp.md) +on how to build MediaPipe examples. + +#### Main Example + +* Running on CPU + * Graph: + [`mediapipe/graphs/pose_tracking/pose_tracking_cpu.pbtxt`](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/pose_tracking/pose_tracking_cpu.pbtxt) + * Target: + [`mediapipe/examples/desktop/pose_tracking:pose_tracking_cpu`](https://github.com/google/mediapipe/tree/master/mediapipe/examples/desktop/pose_tracking/BUILD) +* Running on GPU + * Graph: + [`mediapipe/graphs/pose_tracking/pose_tracking_gpu.pbtxt`](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/pose_tracking/pose_tracking_gpu.pbtxt) + * Target: + [`mediapipe/examples/desktop/pose_tracking:pose_tracking_gpu`](https://github.com/google/mediapipe/tree/master/mediapipe/examples/desktop/pose_tracking/BUILD) + +#### Upper-body Only + +* Running on CPU + * Graph: + [`mediapipe/graphs/pose_tracking/upper_body_pose_tracking_cpu.pbtxt`](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/pose_tracking/upper_body_pose_tracking_cpu.pbtxt) + * Target: + [`mediapipe/examples/desktop/upper_body_pose_tracking:upper_body_pose_tracking_cpu`](https://github.com/google/mediapipe/tree/master/mediapipe/examples/desktop/upper_body_pose_tracking/BUILD) +* Running on GPU + * Graph: + [`mediapipe/graphs/pose_tracking/upper_body_pose_tracking_gpu.pbtxt`](https://github.com/google/mediapipe/tree/master/mediapipe/graphs/pose_tracking/upper_body_pose_tracking_gpu.pbtxt) + * Target: + [`mediapipe/examples/desktop/upper_body_pose_tracking:upper_body_pose_tracking_gpu`](https://github.com/google/mediapipe/tree/master/mediapipe/examples/desktop/upper_body_pose_tracking/BUILD) ## Resources @@ -233,3 +387,7 @@ Please refer to [these instructions](../index.md#mediapipe-on-the-web). [BlazePose: On-device Real-time Body Pose Tracking](https://arxiv.org/abs/2006.10204) ([presentation](https://youtu.be/YPpUOTRn5tA)) * [Models and model cards](./models.md#pose) + +[Colab]:https://mediapipe.page.link/pose_py_colab + +[web demo]:https://code.mediapipe.dev/codepen/pose diff --git a/docs/solutions/solutions.md b/docs/solutions/solutions.md index efb853cee..a0dce94a0 100644 --- a/docs/solutions/solutions.md +++ b/docs/solutions/solutions.md @@ -16,22 +16,23 @@ has_toc: false -[]() | Android | iOS | Desktop | Python | Web | Coral -:---------------------------------------------------------------------------------------- | :-----: | :-: | :-----: | :----: | :-: | :---: -[Face Detection](https://google.github.io/mediapipe/solutions/face_detection) | ✅ | ✅ | ✅ | | ✅ | ✅ -[Face Mesh](https://google.github.io/mediapipe/solutions/face_mesh) | ✅ | ✅ | ✅ | ✅ | | -[Iris](https://google.github.io/mediapipe/solutions/iris) | ✅ | ✅ | ✅ | | ✅ | -[Hands](https://google.github.io/mediapipe/solutions/hands) | ✅ | ✅ | ✅ | ✅ | ✅ | -[Pose](https://google.github.io/mediapipe/solutions/pose) | ✅ | ✅ | ✅ | ✅ | ✅ | -[Hair Segmentation](https://google.github.io/mediapipe/solutions/hair_segmentation) | ✅ | | ✅ | | ✅ | -[Object Detection](https://google.github.io/mediapipe/solutions/object_detection) | ✅ | ✅ | ✅ | | | ✅ -[Box Tracking](https://google.github.io/mediapipe/solutions/box_tracking) | ✅ | ✅ | ✅ | | | -[Instant Motion Tracking](https://google.github.io/mediapipe/solutions/instant_motion_tracking) | ✅ | | | | | -[Objectron](https://google.github.io/mediapipe/solutions/objectron) | ✅ | | | | | -[KNIFT](https://google.github.io/mediapipe/solutions/knift) | ✅ | | | | | -[AutoFlip](https://google.github.io/mediapipe/solutions/autoflip) | | | ✅ | | | -[MediaSequence](https://google.github.io/mediapipe/solutions/media_sequence) | | | ✅ | | | -[YouTube 8M](https://google.github.io/mediapipe/solutions/youtube_8m) | | | ✅ | | | +[]() | [Android](https://google.github.io/mediapipe/getting_started/android) | [iOS](https://google.github.io/mediapipe/getting_started/ios) | [C++](https://google.github.io/mediapipe/getting_started/cpp) | [Python](https://google.github.io/mediapipe/getting_started/python) | [JS](https://google.github.io/mediapipe/getting_started/javascript) | [Coral](https://github.com/google/mediapipe/tree/master/mediapipe/examples/coral/README.md) +:---------------------------------------------------------------------------------------- | :-------------------------------------------------------------: | :-----------------------------------------------------: | :-----------------------------------------------------: | :-----------------------------------------------------------: | :-----------------------------------------------------------: | :--------------------------------------------------------------------: +[Face Detection](https://google.github.io/mediapipe/solutions/face_detection) | ✅ | ✅ | ✅ | | | ✅ +[Face Mesh](https://google.github.io/mediapipe/solutions/face_mesh) | ✅ | ✅ | ✅ | ✅ | ✅ | +[Iris](https://google.github.io/mediapipe/solutions/iris) | ✅ | ✅ | ✅ | | | +[Hands](https://google.github.io/mediapipe/solutions/hands) | ✅ | ✅ | ✅ | ✅ | ✅ | +[Pose](https://google.github.io/mediapipe/solutions/pose) | ✅ | ✅ | ✅ | ✅ | ✅ | +[Holistic](https://google.github.io/mediapipe/solutions/holistic) | ✅ | ✅ | ✅ | ✅ | ✅ | +[Hair Segmentation](https://google.github.io/mediapipe/solutions/hair_segmentation) | ✅ | | ✅ | | | +[Object Detection](https://google.github.io/mediapipe/solutions/object_detection) | ✅ | ✅ | ✅ | | | ✅ +[Box Tracking](https://google.github.io/mediapipe/solutions/box_tracking) | ✅ | ✅ | ✅ | | | +[Instant Motion Tracking](https://google.github.io/mediapipe/solutions/instant_motion_tracking) | ✅ | | | | | +[Objectron](https://google.github.io/mediapipe/solutions/objectron) | ✅ | | | | | +[KNIFT](https://google.github.io/mediapipe/solutions/knift) | ✅ | | | | | +[AutoFlip](https://google.github.io/mediapipe/solutions/autoflip) | | | ✅ | | | +[MediaSequence](https://google.github.io/mediapipe/solutions/media_sequence) | | | ✅ | | | +[YouTube 8M](https://google.github.io/mediapipe/solutions/youtube_8m) | | | ✅ | | | See also [MediaPipe Models and Model Cards](https://google.github.io/mediapipe/solutions/models) diff --git a/docs/solutions/youtube_8m.md b/docs/solutions/youtube_8m.md index f6d05bbca..abef6f1b6 100644 --- a/docs/solutions/youtube_8m.md +++ b/docs/solutions/youtube_8m.md @@ -2,14 +2,20 @@ layout: default title: YouTube-8M Feature Extraction and Model Inference parent: Solutions -nav_order: 14 +nav_order: 15 --- # YouTube-8M Feature Extraction and Model Inference {: .no_toc } +
+ + Table of contents + + {: .text-delta } 1. TOC {:toc} +
--- MediaPipe is a useful and general framework for media processing that can assist diff --git a/mediapipe/MediaPipe.tulsiproj/Configs/MediaPipe.tulsigen b/mediapipe/MediaPipe.tulsiproj/Configs/MediaPipe.tulsigen index 95664a8ca..257bda2f5 100644 --- a/mediapipe/MediaPipe.tulsiproj/Configs/MediaPipe.tulsigen +++ b/mediapipe/MediaPipe.tulsiproj/Configs/MediaPipe.tulsigen @@ -14,9 +14,11 @@ "mediapipe/examples/ios/facemeshgpu/BUILD", "mediapipe/examples/ios/handdetectiongpu/BUILD", "mediapipe/examples/ios/handtrackinggpu/BUILD", + "mediapipe/examples/ios/holistictrackinggpu/BUILD", "mediapipe/examples/ios/iristrackinggpu/BUILD", "mediapipe/examples/ios/objectdetectioncpu/BUILD", "mediapipe/examples/ios/objectdetectiongpu/BUILD", + "mediapipe/examples/ios/posetrackinggpu/BUILD", "mediapipe/examples/ios/upperbodyposetrackinggpu/BUILD" ], "buildTargets" : [ @@ -27,9 +29,11 @@ "//mediapipe/examples/ios/facemeshgpu:FaceMeshGpuApp", "//mediapipe/examples/ios/handdetectiongpu:HandDetectionGpuApp", "//mediapipe/examples/ios/handtrackinggpu:HandTrackingGpuApp", + "//mediapipe/examples/ios/holistictrackinggpu:HolisticTrackingGpuApp", "//mediapipe/examples/ios/iristrackinggpu:IrisTrackingGpuApp", "//mediapipe/examples/ios/objectdetectioncpu:ObjectDetectionCpuApp", "//mediapipe/examples/ios/objectdetectiongpu:ObjectDetectionGpuApp", + "//mediapipe/examples/ios/posetrackinggpu:PoseTrackingGpuApp", "//mediapipe/examples/ios/upperbodyposetrackinggpu:UpperBodyPoseTrackingGpuApp", "//mediapipe/objc:mediapipe_framework_ios" ], @@ -94,9 +98,11 @@ "mediapipe/examples/ios/faceeffect/Base.lproj", "mediapipe/examples/ios/handdetectiongpu", "mediapipe/examples/ios/handtrackinggpu", + "mediapipe/examples/ios/holistictrackinggpu", "mediapipe/examples/ios/iristrackinggpu", "mediapipe/examples/ios/objectdetectioncpu", "mediapipe/examples/ios/objectdetectiongpu", + "mediapipe/examples/ios/posetrackinggpu", "mediapipe/examples/ios/upperbodyposetrackinggpu", "mediapipe/framework", "mediapipe/framework/deps", diff --git a/mediapipe/MediaPipe.tulsiproj/project.tulsiconf b/mediapipe/MediaPipe.tulsiproj/project.tulsiconf index 241b121ba..c829f2706 100644 --- a/mediapipe/MediaPipe.tulsiproj/project.tulsiconf +++ b/mediapipe/MediaPipe.tulsiproj/project.tulsiconf @@ -17,9 +17,11 @@ "mediapipe/examples/ios/facemeshgpu", "mediapipe/examples/ios/handdetectiongpu", "mediapipe/examples/ios/handtrackinggpu", + "mediapipe/examples/ios/holistictrackinggpu", "mediapipe/examples/ios/iristrackinggpu", "mediapipe/examples/ios/objectdetectioncpu", "mediapipe/examples/ios/objectdetectiongpu", + "mediapipe/examples/ios/posetrackinggpu", "mediapipe/examples/ios/upperbodyposetrackinggpu" ], "projectName" : "Mediapipe", diff --git a/mediapipe/calculators/audio/audio_decoder_calculator.cc b/mediapipe/calculators/audio/audio_decoder_calculator.cc index b80b64bae..1ff70eb23 100644 --- a/mediapipe/calculators/audio/audio_decoder_calculator.cc +++ b/mediapipe/calculators/audio/audio_decoder_calculator.cc @@ -48,18 +48,17 @@ namespace mediapipe { // TODO: support decoding multiple streams. class AudioDecoderCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; - ::mediapipe::Status Close(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Close(CalculatorContext* cc) override; private: std::unique_ptr decoder_; }; -::mediapipe::Status AudioDecoderCalculator::GetContract( - CalculatorContract* cc) { +mediapipe::Status AudioDecoderCalculator::GetContract(CalculatorContract* cc) { cc->InputSidePackets().Tag("INPUT_FILE_PATH").Set(); if (cc->InputSidePackets().HasTag("OPTIONS")) { cc->InputSidePackets().Tag("OPTIONS").Set(); @@ -68,10 +67,10 @@ class AudioDecoderCalculator : public CalculatorBase { if (cc->Outputs().HasTag("AUDIO_HEADER")) { cc->Outputs().Tag("AUDIO_HEADER").SetNone(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status AudioDecoderCalculator::Open(CalculatorContext* cc) { +mediapipe::Status AudioDecoderCalculator::Open(CalculatorContext* cc) { const std::string& input_file_path = cc->InputSidePackets().Tag("INPUT_FILE_PATH").Get(); const auto& decoder_options = @@ -88,10 +87,10 @@ class AudioDecoderCalculator : public CalculatorBase { cc->Outputs().Tag("AUDIO_HEADER").SetHeader(Adopt(header.release())); } cc->Outputs().Tag("AUDIO_HEADER").Close(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status AudioDecoderCalculator::Process(CalculatorContext* cc) { +mediapipe::Status AudioDecoderCalculator::Process(CalculatorContext* cc) { Packet data; int options_index = -1; auto status = decoder_->GetData(&options_index, &data); @@ -101,7 +100,7 @@ class AudioDecoderCalculator : public CalculatorBase { return status; } -::mediapipe::Status AudioDecoderCalculator::Close(CalculatorContext* cc) { +mediapipe::Status AudioDecoderCalculator::Close(CalculatorContext* cc) { return decoder_->Close(); } diff --git a/mediapipe/calculators/audio/basic_time_series_calculators.cc b/mediapipe/calculators/audio/basic_time_series_calculators.cc index 4d966f47f..f372e5a7c 100644 --- a/mediapipe/calculators/audio/basic_time_series_calculators.cc +++ b/mediapipe/calculators/audio/basic_time_series_calculators.cc @@ -38,7 +38,7 @@ static bool SafeMultiply(int x, int y, int* result) { } } // namespace -::mediapipe::Status BasicTimeSeriesCalculatorBase::GetContract( +mediapipe::Status BasicTimeSeriesCalculatorBase::GetContract( CalculatorContract* cc) { cc->Inputs().Index(0).Set( // Input stream with TimeSeriesHeader. @@ -46,10 +46,10 @@ static bool SafeMultiply(int x, int y, int* result) { cc->Outputs().Index(0).Set( // Output stream with TimeSeriesHeader. ); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status BasicTimeSeriesCalculatorBase::Open(CalculatorContext* cc) { +mediapipe::Status BasicTimeSeriesCalculatorBase::Open(CalculatorContext* cc) { TimeSeriesHeader input_header; MP_RETURN_IF_ERROR(time_series_util::FillTimeSeriesHeaderIfValid( cc->Inputs().Index(0).Header(), &input_header)); @@ -57,10 +57,10 @@ static bool SafeMultiply(int x, int y, int* result) { auto output_header = new TimeSeriesHeader(input_header); MP_RETURN_IF_ERROR(MutateHeader(output_header)); cc->Outputs().Index(0).SetHeader(Adopt(output_header)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status BasicTimeSeriesCalculatorBase::Process( +mediapipe::Status BasicTimeSeriesCalculatorBase::Process( CalculatorContext* cc) { const Matrix& input = cc->Inputs().Index(0).Get(); MP_RETURN_IF_ERROR(time_series_util::IsMatrixShapeConsistentWithHeader( @@ -71,12 +71,12 @@ static bool SafeMultiply(int x, int y, int* result) { *output, cc->Outputs().Index(0).Header().Get())); cc->Outputs().Index(0).Add(output.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status BasicTimeSeriesCalculatorBase::MutateHeader( +mediapipe::Status BasicTimeSeriesCalculatorBase::MutateHeader( TimeSeriesHeader* output_header) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Calculator to sum an input time series across channels. This is @@ -86,9 +86,9 @@ static bool SafeMultiply(int x, int y, int* result) { class SumTimeSeriesAcrossChannelsCalculator : public BasicTimeSeriesCalculatorBase { protected: - ::mediapipe::Status MutateHeader(TimeSeriesHeader* output_header) final { + mediapipe::Status MutateHeader(TimeSeriesHeader* output_header) final { output_header->set_num_channels(1); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } Matrix ProcessMatrix(const Matrix& input_matrix) final { @@ -104,9 +104,9 @@ REGISTER_CALCULATOR(SumTimeSeriesAcrossChannelsCalculator); class AverageTimeSeriesAcrossChannelsCalculator : public BasicTimeSeriesCalculatorBase { protected: - ::mediapipe::Status MutateHeader(TimeSeriesHeader* output_header) final { + mediapipe::Status MutateHeader(TimeSeriesHeader* output_header) final { output_header->set_num_channels(1); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } Matrix ProcessMatrix(const Matrix& input_matrix) final { @@ -122,7 +122,7 @@ REGISTER_CALCULATOR(AverageTimeSeriesAcrossChannelsCalculator); // Options proto: None. class SummarySaiToPitchogramCalculator : public BasicTimeSeriesCalculatorBase { protected: - ::mediapipe::Status MutateHeader(TimeSeriesHeader* output_header) final { + mediapipe::Status MutateHeader(TimeSeriesHeader* output_header) final { if (output_header->num_channels() != 1) { return tool::StatusInvalid( absl::StrCat("Expected single-channel input, got ", @@ -131,7 +131,7 @@ class SummarySaiToPitchogramCalculator : public BasicTimeSeriesCalculatorBase { output_header->set_num_channels(output_header->num_samples()); output_header->set_num_samples(1); output_header->set_sample_rate(output_header->packet_rate()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } Matrix ProcessMatrix(const Matrix& input_matrix) final { @@ -160,7 +160,7 @@ REGISTER_CALCULATOR(ReverseChannelOrderCalculator); // Options proto: None. class FlattenPacketCalculator : public BasicTimeSeriesCalculatorBase { protected: - ::mediapipe::Status MutateHeader(TimeSeriesHeader* output_header) final { + mediapipe::Status MutateHeader(TimeSeriesHeader* output_header) final { const int num_input_channels = output_header->num_channels(); const int num_input_samples = output_header->num_samples(); RET_CHECK(num_input_channels >= 0) @@ -174,7 +174,7 @@ class FlattenPacketCalculator : public BasicTimeSeriesCalculatorBase { output_header->set_num_channels(output_num_channels); output_header->set_num_samples(1); output_header->set_sample_rate(output_header->packet_rate()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } Matrix ProcessMatrix(const Matrix& input_matrix) final { @@ -253,10 +253,10 @@ REGISTER_CALCULATOR(DivideByMeanAcrossChannelsCalculator); // Options proto: None. class MeanCalculator : public BasicTimeSeriesCalculatorBase { protected: - ::mediapipe::Status MutateHeader(TimeSeriesHeader* output_header) final { + mediapipe::Status MutateHeader(TimeSeriesHeader* output_header) final { output_header->set_num_samples(1); output_header->set_sample_rate(output_header->packet_rate()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } Matrix ProcessMatrix(const Matrix& input_matrix) final { @@ -272,10 +272,10 @@ REGISTER_CALCULATOR(MeanCalculator); // Options proto: None. class StandardDeviationCalculator : public BasicTimeSeriesCalculatorBase { protected: - ::mediapipe::Status MutateHeader(TimeSeriesHeader* output_header) final { + mediapipe::Status MutateHeader(TimeSeriesHeader* output_header) final { output_header->set_num_samples(1); output_header->set_sample_rate(output_header->packet_rate()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } Matrix ProcessMatrix(const Matrix& input_matrix) final { @@ -293,9 +293,9 @@ REGISTER_CALCULATOR(StandardDeviationCalculator); // Options proto: None. class CovarianceCalculator : public BasicTimeSeriesCalculatorBase { protected: - ::mediapipe::Status MutateHeader(TimeSeriesHeader* output_header) final { + mediapipe::Status MutateHeader(TimeSeriesHeader* output_header) final { output_header->set_num_samples(output_header->num_channels()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } Matrix ProcessMatrix(const Matrix& input_matrix) final { @@ -313,9 +313,9 @@ REGISTER_CALCULATOR(CovarianceCalculator); // Options proto: None. class L2NormCalculator : public BasicTimeSeriesCalculatorBase { protected: - ::mediapipe::Status MutateHeader(TimeSeriesHeader* output_header) final { + mediapipe::Status MutateHeader(TimeSeriesHeader* output_header) final { output_header->set_num_channels(1); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } Matrix ProcessMatrix(const Matrix& input_matrix) final { @@ -385,12 +385,12 @@ REGISTER_CALCULATOR(ElementwiseSquareCalculator); // Options proto: None. class FirstHalfSlicerCalculator : public BasicTimeSeriesCalculatorBase { protected: - ::mediapipe::Status MutateHeader(TimeSeriesHeader* output_header) final { + mediapipe::Status MutateHeader(TimeSeriesHeader* output_header) final { const int num_input_samples = output_header->num_samples(); RET_CHECK(num_input_samples >= 0) << "FirstHalfSlicerCalculator: num_input_samples < 0"; output_header->set_num_samples(num_input_samples / 2); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } Matrix ProcessMatrix(const Matrix& input_matrix) final { diff --git a/mediapipe/calculators/audio/basic_time_series_calculators.h b/mediapipe/calculators/audio/basic_time_series_calculators.h index 3727d66b0..f08939440 100644 --- a/mediapipe/calculators/audio/basic_time_series_calculators.h +++ b/mediapipe/calculators/audio/basic_time_series_calculators.h @@ -28,16 +28,16 @@ namespace mediapipe { class BasicTimeSeriesCalculatorBase : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + static mediapipe::Status GetContract(CalculatorContract* cc); + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; protected: // Open() calls this method to mutate the output stream header. The input // to this function will contain a copy of the input stream header, so // subclasses that do not need to mutate the header do not need to override // it. - virtual ::mediapipe::Status MutateHeader(TimeSeriesHeader* output_header); + virtual mediapipe::Status MutateHeader(TimeSeriesHeader* output_header); // Process() calls this method on each packet to compute the output matrix. virtual Matrix ProcessMatrix(const Matrix& input_matrix) = 0; diff --git a/mediapipe/calculators/audio/mfcc_mel_calculators.cc b/mediapipe/calculators/audio/mfcc_mel_calculators.cc index 93c44b1fb..d6d5cf56c 100644 --- a/mediapipe/calculators/audio/mfcc_mel_calculators.cc +++ b/mediapipe/calculators/audio/mfcc_mel_calculators.cc @@ -66,7 +66,7 @@ std::string PortableDebugString(const TimeSeriesHeader& header) { // rows corresponding to the new feature space). class FramewiseTransformCalculatorBase : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).Set( // Sequence of Matrices, each column describing a particular time frame, // each row a feature dimension, with TimeSeriesHeader. @@ -75,11 +75,11 @@ class FramewiseTransformCalculatorBase : public CalculatorBase { // Sequence of Matrices, each column describing a particular time frame, // each row a feature dimension, with TimeSeriesHeader. ); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; int num_output_channels(void) { return num_output_channels_; } @@ -90,8 +90,8 @@ class FramewiseTransformCalculatorBase : public CalculatorBase { private: // Takes header and options, and sets up state including calling // set_num_output_channels() on the base object. - virtual ::mediapipe::Status ConfigureTransform(const TimeSeriesHeader& header, - CalculatorContext* cc) = 0; + virtual mediapipe::Status ConfigureTransform(const TimeSeriesHeader& header, + CalculatorContext* cc) = 0; // Takes a vector corresponding to an input frame, and // perform the specific transformation to produce an output frame. @@ -102,13 +102,13 @@ class FramewiseTransformCalculatorBase : public CalculatorBase { int num_output_channels_; }; -::mediapipe::Status FramewiseTransformCalculatorBase::Open( +mediapipe::Status FramewiseTransformCalculatorBase::Open( CalculatorContext* cc) { TimeSeriesHeader input_header; MP_RETURN_IF_ERROR(time_series_util::FillTimeSeriesHeaderIfValid( cc->Inputs().Index(0).Header(), &input_header)); - ::mediapipe::Status status = ConfigureTransform(input_header, cc); + mediapipe::Status status = ConfigureTransform(input_header, cc); auto output_header = new TimeSeriesHeader(input_header); output_header->set_num_channels(num_output_channels_); @@ -117,7 +117,7 @@ class FramewiseTransformCalculatorBase : public CalculatorBase { return status; } -::mediapipe::Status FramewiseTransformCalculatorBase::Process( +mediapipe::Status FramewiseTransformCalculatorBase::Process( CalculatorContext* cc) { const Matrix& input = cc->Inputs().Index(0).Get(); const int num_frames = input.cols(); @@ -145,7 +145,7 @@ class FramewiseTransformCalculatorBase : public CalculatorBase { } cc->Outputs().Index(0).Add(output.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Calculator wrapper around the dsp/mfcc/mfcc.cc routine. @@ -170,13 +170,13 @@ class FramewiseTransformCalculatorBase : public CalculatorBase { // } class MfccCalculator : public FramewiseTransformCalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { return FramewiseTransformCalculatorBase::GetContract(cc); } private: - ::mediapipe::Status ConfigureTransform(const TimeSeriesHeader& header, - CalculatorContext* cc) override { + mediapipe::Status ConfigureTransform(const TimeSeriesHeader& header, + CalculatorContext* cc) override { MfccCalculatorOptions mfcc_options = cc->Options(); mfcc_.reset(new audio_dsp::Mfcc()); int input_length = header.num_channels(); @@ -194,7 +194,7 @@ class MfccCalculator : public FramewiseTransformCalculatorBase { // audio_dsp::MelFilterBank needs to know this to // correctly interpret the spectrogram bins. if (!header.has_audio_sample_rate()) { - return ::mediapipe::InvalidArgumentError( + return mediapipe::InvalidArgumentError( absl::StrCat("No audio_sample_rate in input TimeSeriesHeader ", PortableDebugString(header))); } @@ -203,10 +203,10 @@ class MfccCalculator : public FramewiseTransformCalculatorBase { mfcc_->Initialize(input_length, header.audio_sample_rate()); if (initialized) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } else { - return ::mediapipe::Status(mediapipe::StatusCode::kInternal, - "Mfcc::Initialize returned uninitialized"); + return mediapipe::Status(mediapipe::StatusCode::kInternal, + "Mfcc::Initialize returned uninitialized"); } } @@ -228,13 +228,13 @@ REGISTER_CALCULATOR(MfccCalculator); // if you ask for too many channels. class MelSpectrumCalculator : public FramewiseTransformCalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { return FramewiseTransformCalculatorBase::GetContract(cc); } private: - ::mediapipe::Status ConfigureTransform(const TimeSeriesHeader& header, - CalculatorContext* cc) override { + mediapipe::Status ConfigureTransform(const TimeSeriesHeader& header, + CalculatorContext* cc) override { MelSpectrumCalculatorOptions mel_spectrum_options = cc->Options(); mel_filterbank_.reset(new audio_dsp::MelFilterbank()); @@ -245,7 +245,7 @@ class MelSpectrumCalculator : public FramewiseTransformCalculatorBase { // audio_dsp::MelFilterBank needs to know this to // correctly interpret the spectrogram bins. if (!header.has_audio_sample_rate()) { - return ::mediapipe::InvalidArgumentError( + return mediapipe::InvalidArgumentError( absl::StrCat("No audio_sample_rate in input TimeSeriesHeader ", PortableDebugString(header))); } @@ -255,10 +255,10 @@ class MelSpectrumCalculator : public FramewiseTransformCalculatorBase { mel_spectrum_options.max_frequency_hertz()); if (initialized) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } else { - return ::mediapipe::Status(mediapipe::StatusCode::kInternal, - "mfcc::Initialize returned uninitialized"); + return mediapipe::Status(mediapipe::StatusCode::kInternal, + "mfcc::Initialize returned uninitialized"); } } diff --git a/mediapipe/calculators/audio/mfcc_mel_calculators_test.cc b/mediapipe/calculators/audio/mfcc_mel_calculators_test.cc index b2ceacf00..e7e7ac3a0 100644 --- a/mediapipe/calculators/audio/mfcc_mel_calculators_test.cc +++ b/mediapipe/calculators/audio/mfcc_mel_calculators_test.cc @@ -84,7 +84,7 @@ class FramewiseTransformCalculatorTest num_samples_per_packet_ = GenerateRandomNonnegInputStream(kNumPackets); } - ::mediapipe::Status Run() { return this->RunGraph(); } + mediapipe::Status Run() { return this->RunGraph(); } void CheckResults(int expected_num_channels) { const auto& output_header = diff --git a/mediapipe/calculators/audio/rational_factor_resample_calculator.cc b/mediapipe/calculators/audio/rational_factor_resample_calculator.cc index 3ed67bd88..b335fbe40 100644 --- a/mediapipe/calculators/audio/rational_factor_resample_calculator.cc +++ b/mediapipe/calculators/audio/rational_factor_resample_calculator.cc @@ -23,15 +23,15 @@ using audio_dsp::RationalFactorResampler; using audio_dsp::Resampler; namespace mediapipe { -::mediapipe::Status RationalFactorResampleCalculator::Process( +mediapipe::Status RationalFactorResampleCalculator::Process( CalculatorContext* cc) { return ProcessInternal(cc->Inputs().Index(0).Get(), false, cc); } -::mediapipe::Status RationalFactorResampleCalculator::Close( +mediapipe::Status RationalFactorResampleCalculator::Close( CalculatorContext* cc) { if (initial_timestamp_ == Timestamp::Unstarted()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } Matrix empty_input_frame(num_channels_, 0); return ProcessInternal(empty_input_frame, true, cc); @@ -62,7 +62,7 @@ void CopyVectorToChannel(const std::vector& vec, Matrix* matrix, } // namespace -::mediapipe::Status RationalFactorResampleCalculator::Open( +mediapipe::Status RationalFactorResampleCalculator::Open( CalculatorContext* cc) { RationalFactorResampleCalculatorOptions resample_options = cc->Options(); @@ -88,7 +88,7 @@ void CopyVectorToChannel(const std::vector& vec, Matrix* matrix, resample_options); if (!r) { LOG(ERROR) << "Failed to initialize resampler."; - return ::mediapipe::UnknownError("Failed to initialize resampler."); + return mediapipe::UnknownError("Failed to initialize resampler."); } } } @@ -106,10 +106,10 @@ void CopyVectorToChannel(const std::vector& vec, Matrix* matrix, initial_timestamp_ = Timestamp::Unstarted(); check_inconsistent_timestamps_ = resample_options.check_inconsistent_timestamps(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status RationalFactorResampleCalculator::ProcessInternal( +mediapipe::Status RationalFactorResampleCalculator::ProcessInternal( const Matrix& input_frame, bool should_flush, CalculatorContext* cc) { if (initial_timestamp_ == Timestamp::Unstarted()) { initial_timestamp_ = cc->InputTimestamp(); @@ -131,7 +131,7 @@ void CopyVectorToChannel(const std::vector& vec, Matrix* matrix, *output_frame = input_frame; } else { if (!Resample(input_frame, output_frame.get(), should_flush)) { - return ::mediapipe::UnknownError("Resample() failed."); + return mediapipe::UnknownError("Resample() failed."); } } cumulative_output_samples_ += output_frame->cols(); @@ -139,7 +139,7 @@ void CopyVectorToChannel(const std::vector& vec, Matrix* matrix, if (output_frame->cols() > 0) { cc->Outputs().Index(0).Add(output_frame.release(), output_timestamp); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } bool RationalFactorResampleCalculator::Resample(const Matrix& input_frame, diff --git a/mediapipe/calculators/audio/rational_factor_resample_calculator.h b/mediapipe/calculators/audio/rational_factor_resample_calculator.h index 745ac8f0d..dc0719b39 100644 --- a/mediapipe/calculators/audio/rational_factor_resample_calculator.h +++ b/mediapipe/calculators/audio/rational_factor_resample_calculator.h @@ -40,24 +40,24 @@ class RationalFactorResampleCalculator : public CalculatorBase { public: struct TestAccess; - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).Set( // Single input stream with TimeSeriesHeader. ); cc->Outputs().Index(0).Set( // Resampled stream with TimeSeriesHeader. ); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Returns FAIL if the input stream header is invalid or if the // resampler cannot be initialized. - ::mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; // Resamples a packet of TimeSeries data. Returns FAIL if the // resampler state becomes inconsistent. - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; // Flushes any remaining state. Returns FAIL if the resampler state // becomes inconsistent. - ::mediapipe::Status Close(CalculatorContext* cc) override; + mediapipe::Status Close(CalculatorContext* cc) override; protected: typedef audio_dsp::Resampler ResamplerType; @@ -72,8 +72,8 @@ class RationalFactorResampleCalculator : public CalculatorBase { // Does Timestamp bookkeeping and resampling common to Process() and // Close(). Returns FAIL if the resampler state becomes // inconsistent. - ::mediapipe::Status ProcessInternal(const Matrix& input_frame, - bool should_flush, CalculatorContext* cc); + mediapipe::Status ProcessInternal(const Matrix& input_frame, + bool should_flush, CalculatorContext* cc); // Uses the internal resampler_ objects to actually resample each // row of the input TimeSeries. Returns false if the resampler diff --git a/mediapipe/calculators/audio/rational_factor_resample_calculator_test.cc b/mediapipe/calculators/audio/rational_factor_resample_calculator_test.cc index f21cff516..aefa21205 100644 --- a/mediapipe/calculators/audio/rational_factor_resample_calculator_test.cc +++ b/mediapipe/calculators/audio/rational_factor_resample_calculator_test.cc @@ -80,7 +80,7 @@ class RationalFactorResampleCalculatorTest } // Initializes and runs the test graph. - ::mediapipe::Status Run(double output_sample_rate) { + mediapipe::Status Run(double output_sample_rate) { options_.set_target_sample_rate(output_sample_rate); InitializeGraph(); diff --git a/mediapipe/calculators/audio/spectrogram_calculator.cc b/mediapipe/calculators/audio/spectrogram_calculator.cc index 7bac73ff7..dd2dae886 100644 --- a/mediapipe/calculators/audio/spectrogram_calculator.cc +++ b/mediapipe/calculators/audio/spectrogram_calculator.cc @@ -66,7 +66,7 @@ namespace mediapipe { // analysis frame will advance from its predecessor by the same time step. class SpectrogramCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).Set( // Input stream with TimeSeriesHeader. ); @@ -96,21 +96,21 @@ class SpectrogramCalculator : public CalculatorBase { ); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Returns FAIL if the input stream header is invalid. - ::mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; // Outputs at most one packet consisting of a single Matrix with one or // more columns containing the spectral values from as many input frames // as are completed by the input samples. Always returns OK. - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; // Performs zero-padding and processing of any remaining samples // if pad_final_packet is set. // Returns OK. - ::mediapipe::Status Close(CalculatorContext* cc) override; + mediapipe::Status Close(CalculatorContext* cc) override; private: Timestamp CurrentOutputTimestamp(CalculatorContext* cc) { @@ -138,12 +138,12 @@ class SpectrogramCalculator : public CalculatorBase { // Convert the output of the spectrogram object into a Matrix (or an // Eigen::MatrixXcf if complex-valued output is requested) and pass to // MediaPipe output. - ::mediapipe::Status ProcessVector(const Matrix& input_stream, - CalculatorContext* cc); + mediapipe::Status ProcessVector(const Matrix& input_stream, + CalculatorContext* cc); // Templated function to process either real- or complex-output spectrogram. template - ::mediapipe::Status ProcessVectorToOutput( + mediapipe::Status ProcessVectorToOutput( const Matrix& input_stream, const OutputMatrixType postprocess_output_fn(const OutputMatrixType&), CalculatorContext* cc); @@ -177,7 +177,7 @@ REGISTER_CALCULATOR(SpectrogramCalculator); // Factor to convert ln(magnitude_squared) to deciBels = 10.0/ln(10.0). const float SpectrogramCalculator::kLnPowerToDb = 4.342944819032518; -::mediapipe::Status SpectrogramCalculator::Open(CalculatorContext* cc) { +mediapipe::Status SpectrogramCalculator::Open(CalculatorContext* cc) { SpectrogramCalculatorOptions spectrogram_options = cc->Options(); @@ -272,10 +272,10 @@ const float SpectrogramCalculator::kLnPowerToDb = 4.342944819032518; } cumulative_completed_frames_ = 0; initial_input_timestamp_ = Timestamp::Unstarted(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status SpectrogramCalculator::Process(CalculatorContext* cc) { +mediapipe::Status SpectrogramCalculator::Process(CalculatorContext* cc) { if (initial_input_timestamp_ == Timestamp::Unstarted()) { initial_input_timestamp_ = cc->InputTimestamp(); } @@ -291,7 +291,7 @@ const float SpectrogramCalculator::kLnPowerToDb = 4.342944819032518; } template -::mediapipe::Status SpectrogramCalculator::ProcessVectorToOutput( +mediapipe::Status SpectrogramCalculator::ProcessVectorToOutput( const Matrix& input_stream, const OutputMatrixType postprocess_output_fn(const OutputMatrixType&), CalculatorContext* cc) { @@ -311,8 +311,8 @@ template if (!spectrogram_generators_[channel]->ComputeSpectrogram( input_vector, &output_vectors)) { - return ::mediapipe::Status(mediapipe::StatusCode::kInternal, - "Spectrogram returned failure"); + return mediapipe::Status(mediapipe::StatusCode::kInternal, + "Spectrogram returned failure"); } if (channel == 0) { // Record the number of time frames we expect from each channel. @@ -355,10 +355,10 @@ template } cumulative_completed_frames_ += output_vectors.size(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status SpectrogramCalculator::ProcessVector( +mediapipe::Status SpectrogramCalculator::ProcessVector( const Matrix& input_stream, CalculatorContext* cc) { switch (output_type_) { // These blocks deliberately ignore clang-format to preserve the @@ -394,13 +394,13 @@ template } // clang-format on default: { - return ::mediapipe::Status(mediapipe::StatusCode::kInvalidArgument, - "Unrecognized spectrogram output type."); + return mediapipe::Status(mediapipe::StatusCode::kInvalidArgument, + "Unrecognized spectrogram output type."); } } } -::mediapipe::Status SpectrogramCalculator::Close(CalculatorContext* cc) { +mediapipe::Status SpectrogramCalculator::Close(CalculatorContext* cc) { if (cumulative_input_samples_ > 0 && pad_final_packet_) { // We can flush any remaining samples by sending frame_step_samples - 1 // zeros to the Process method, and letting it do its thing, @@ -416,7 +416,7 @@ template Matrix::Zero(num_input_channels_, required_padding_samples), cc); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/audio/spectrogram_calculator_test.cc b/mediapipe/calculators/audio/spectrogram_calculator_test.cc index 200bdee11..c28ffb4d2 100644 --- a/mediapipe/calculators/audio/spectrogram_calculator_test.cc +++ b/mediapipe/calculators/audio/spectrogram_calculator_test.cc @@ -50,7 +50,7 @@ class SpectrogramCalculatorTest } // Initializes and runs the test graph. - ::mediapipe::Status Run() { + mediapipe::Status Run() { // Now that options are set, we can set up some internal constants. frame_duration_samples_ = round(options_.frame_duration_seconds() * input_sample_rate_); diff --git a/mediapipe/calculators/audio/stabilized_log_calculator.cc b/mediapipe/calculators/audio/stabilized_log_calculator.cc index b5623ee0f..20d062bfb 100644 --- a/mediapipe/calculators/audio/stabilized_log_calculator.cc +++ b/mediapipe/calculators/audio/stabilized_log_calculator.cc @@ -41,17 +41,17 @@ namespace mediapipe { // } class StabilizedLogCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).Set( // Input stream with TimeSeriesHeader. ); cc->Outputs().Index(0).Set( // Output stabilized log stream with TimeSeriesHeader. ); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { StabilizedLogCalculatorOptions stabilized_log_calculator_options = cc->Options(); @@ -70,23 +70,23 @@ class StabilizedLogCalculator : public CalculatorBase { cc->Outputs().Index(0).SetHeader( Adopt(new TimeSeriesHeader(input_header))); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { auto input_matrix = cc->Inputs().Index(0).Get(); if (input_matrix.array().isNaN().any()) { - return ::mediapipe::InvalidArgumentError("NaN input to log operation."); + return mediapipe::InvalidArgumentError("NaN input to log operation."); } if (check_nonnegativity_) { if (input_matrix.minCoeff() < 0.0) { - return ::mediapipe::OutOfRangeError("Negative input to log operation."); + return mediapipe::OutOfRangeError("Negative input to log operation."); } } std::unique_ptr output_frame(new Matrix( output_scale_ * (input_matrix.array() + stabilizer_).log().matrix())); cc->Outputs().Index(0).Add(output_frame.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: diff --git a/mediapipe/calculators/audio/time_series_framer_calculator.cc b/mediapipe/calculators/audio/time_series_framer_calculator.cc index 04f593bca..bffda6723 100644 --- a/mediapipe/calculators/audio/time_series_framer_calculator.cc +++ b/mediapipe/calculators/audio/time_series_framer_calculator.cc @@ -66,26 +66,26 @@ namespace mediapipe { // cumulative_completed_samples / sample_rate_. class TimeSeriesFramerCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).Set( // Input stream with TimeSeriesHeader. ); cc->Outputs().Index(0).Set( // Fixed length time series Packets with TimeSeriesHeader. ); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Returns FAIL if the input stream header is invalid. - ::mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; // Outputs as many framed packets as possible given the accumulated // input. Always returns OK. - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; // Flushes any remaining samples in a zero-padded packet. Always // returns OK. - ::mediapipe::Status Close(CalculatorContext* cc) override; + mediapipe::Status Close(CalculatorContext* cc) override; private: // Adds input data to the internal buffer. @@ -205,7 +205,7 @@ void TimeSeriesFramerCalculator::FrameOutput(CalculatorContext* cc) { } } -::mediapipe::Status TimeSeriesFramerCalculator::Process(CalculatorContext* cc) { +mediapipe::Status TimeSeriesFramerCalculator::Process(CalculatorContext* cc) { if (initial_input_timestamp_ == Timestamp::Unstarted()) { initial_input_timestamp_ = cc->InputTimestamp(); current_timestamp_ = initial_input_timestamp_; @@ -214,10 +214,10 @@ void TimeSeriesFramerCalculator::FrameOutput(CalculatorContext* cc) { EnqueueInput(cc); FrameOutput(cc); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TimeSeriesFramerCalculator::Close(CalculatorContext* cc) { +mediapipe::Status TimeSeriesFramerCalculator::Close(CalculatorContext* cc) { while (samples_still_to_drop_ > 0 && !sample_buffer_.empty()) { sample_buffer_.pop_front(); --samples_still_to_drop_; @@ -234,10 +234,10 @@ void TimeSeriesFramerCalculator::FrameOutput(CalculatorContext* cc) { CurrentOutputTimestamp()); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TimeSeriesFramerCalculator::Open(CalculatorContext* cc) { +mediapipe::Status TimeSeriesFramerCalculator::Open(CalculatorContext* cc) { TimeSeriesFramerCalculatorOptions framer_options = cc->Options(); @@ -317,7 +317,7 @@ void TimeSeriesFramerCalculator::FrameOutput(CalculatorContext* cc) { } use_local_timestamp_ = framer_options.use_local_timestamp(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/audio/time_series_framer_calculator_test.cc b/mediapipe/calculators/audio/time_series_framer_calculator_test.cc index cd0c38e13..c4978a00e 100644 --- a/mediapipe/calculators/audio/time_series_framer_calculator_test.cc +++ b/mediapipe/calculators/audio/time_series_framer_calculator_test.cc @@ -69,7 +69,7 @@ class TimeSeriesFramerCalculatorTest } // Initializes and runs the test graph. - ::mediapipe::Status Run() { + mediapipe::Status Run() { InitializeGraph(); FillInputHeader(); @@ -441,7 +441,7 @@ class TimeSeriesFramerCalculatorTimestampingTest } } - ::mediapipe::Status RunTimestampTest() { + mediapipe::Status RunTimestampTest() { InitializeGraph(); InitializeInputForTimeStampingTest(); FillInputHeader(); diff --git a/mediapipe/calculators/core/BUILD b/mediapipe/calculators/core/BUILD index bda9700b9..2def194ea 100644 --- a/mediapipe/calculators/core/BUILD +++ b/mediapipe/calculators/core/BUILD @@ -130,6 +130,16 @@ mediapipe_proto_library( ], ) +mediapipe_proto_library( + name = "flow_limiter_calculator_proto", + srcs = ["flow_limiter_calculator.proto"], + visibility = ["//visibility:public"], + deps = [ + "//mediapipe/framework:calculator_options_proto", + "//mediapipe/framework:calculator_proto", + ], +) + cc_library( name = "add_header_calculator", srcs = ["add_header_calculator.cc"], @@ -238,13 +248,14 @@ cc_library( visibility = ["//visibility:public"], deps = [ ":concatenate_vector_calculator_cc_proto", - "//mediapipe/framework:calculator_framework", "//mediapipe/framework/formats:classification_cc_proto", "//mediapipe/framework/formats:landmark_cc_proto", "//mediapipe/framework/formats:tensor", "//mediapipe/framework/port:integral_types", "//mediapipe/framework/port:ret_check", "//mediapipe/framework/port:status", + "//mediapipe/framework:calculator_framework", + "//mediapipe/util:render_data_cc_proto", "@org_tensorflow//tensorflow/lite:framework", ] + select({ "//mediapipe/gpu:disable_gpu": [], @@ -607,6 +618,7 @@ cc_library( srcs = ["flow_limiter_calculator.cc"], visibility = ["//visibility:public"], deps = [ + ":flow_limiter_calculator_cc_proto", "//mediapipe/framework:calculator_framework", "//mediapipe/framework:packet", "//mediapipe/framework:timestamp", @@ -782,6 +794,7 @@ cc_test( srcs = ["flow_limiter_calculator_test.cc"], deps = [ ":flow_limiter_calculator", + ":flow_limiter_calculator_cc_proto", "//mediapipe/calculators/core:counting_source_calculator", "//mediapipe/calculators/core:pass_through_calculator", "//mediapipe/framework:calculator_framework", @@ -793,6 +806,8 @@ cc_test( "//mediapipe/framework/port:integral_types", "//mediapipe/framework/port:parse_text_proto", "//mediapipe/framework/stream_handler:immediate_input_stream_handler", + "//mediapipe/framework/tool:simulation_clock", + "//mediapipe/framework/tool:simulation_clock_executor", "//mediapipe/framework/tool:sink", "@com_google_absl//absl/time", ], diff --git a/mediapipe/calculators/core/add_header_calculator.cc b/mediapipe/calculators/core/add_header_calculator.cc index 393c12225..918729ec0 100644 --- a/mediapipe/calculators/core/add_header_calculator.cc +++ b/mediapipe/calculators/core/add_header_calculator.cc @@ -44,7 +44,7 @@ namespace mediapipe { // class AddHeaderCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { bool has_side_input = false; bool has_header_stream = false; if (cc->InputSidePackets().HasTag("HEADER")) { @@ -62,10 +62,10 @@ class AddHeaderCalculator : public CalculatorBase { } cc->Inputs().Tag("DATA").SetAny(); cc->Outputs().Index(0).SetSameAs(&cc->Inputs().Tag("DATA")); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { Packet header; if (cc->InputSidePackets().HasTag("HEADER")) { header = cc->InputSidePackets().Tag("HEADER"); @@ -77,12 +77,12 @@ class AddHeaderCalculator : public CalculatorBase { cc->Outputs().Index(0).SetHeader(header); } cc->SetOffset(TimestampDiff(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { cc->Outputs().Index(0).AddPacket(cc->Inputs().Tag("DATA").Value()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; diff --git a/mediapipe/calculators/core/add_header_calculator_test.cc b/mediapipe/calculators/core/add_header_calculator_test.cc index 01ea986f1..8aa5d3424 100644 --- a/mediapipe/calculators/core/add_header_calculator_test.cc +++ b/mediapipe/calculators/core/add_header_calculator_test.cc @@ -153,7 +153,7 @@ TEST_F(AddHeaderCalculatorTest, UsingBothSideInputAndStream) { } // Run should fail because header can only be provided one way. - EXPECT_EQ(runner.Run().code(), ::mediapipe::InvalidArgumentError("").code()); + EXPECT_EQ(runner.Run().code(), mediapipe::InvalidArgumentError("").code()); } } // namespace mediapipe diff --git a/mediapipe/calculators/core/begin_end_loop_calculator_graph_test.cc b/mediapipe/calculators/core/begin_end_loop_calculator_graph_test.cc index 716151b69..85834491a 100644 --- a/mediapipe/calculators/core/begin_end_loop_calculator_graph_test.cc +++ b/mediapipe/calculators/core/begin_end_loop_calculator_graph_test.cc @@ -42,22 +42,22 @@ REGISTER_CALCULATOR(BeginLoopIntegerCalculator); class IncrementCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).Set(); cc->Outputs().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { cc->SetOffset(TimestampDiff(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { const int& input_int = cc->Inputs().Index(0).Get(); auto output_int = absl::make_unique(input_int + 1); cc->Outputs().Index(0).Add(output_int.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; @@ -166,19 +166,19 @@ TEST_F(BeginEndLoopCalculatorGraphTest, MultipleVectors) { // bound update. class PassThroughOrEmptyVectorCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->SetProcessTimestampBounds(true); cc->Inputs().Index(0).Set>(); cc->Outputs().Index(0).Set>(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { cc->SetOffset(TimestampDiff(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { if (!cc->Inputs().Index(0).IsEmpty()) { cc->Outputs().Index(0).AddPacket(cc->Inputs().Index(0).Value()); } else { @@ -186,7 +186,7 @@ class PassThroughOrEmptyVectorCalculator : public CalculatorBase { MakePacket>(std::vector()) .At(cc->InputTimestamp())); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; @@ -311,24 +311,24 @@ TEST_F(BeginEndLoopCalculatorGraphProcessingEmptyPacketsTest, MultipleVectors) { class MultiplierCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).Set(); cc->Inputs().Index(1).Set(); cc->Outputs().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { cc->SetOffset(TimestampDiff(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { const int& input_int = cc->Inputs().Index(0).Get(); const int& multiplier_int = cc->Inputs().Index(1).Get(); auto output_int = absl::make_unique(input_int * multiplier_int); cc->Outputs().Index(0).Add(output_int.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; diff --git a/mediapipe/calculators/core/begin_loop_calculator.h b/mediapipe/calculators/core/begin_loop_calculator.h index ec59e1012..a655d1871 100644 --- a/mediapipe/calculators/core/begin_loop_calculator.h +++ b/mediapipe/calculators/core/begin_loop_calculator.h @@ -61,7 +61,7 @@ class BeginLoopCalculator : public CalculatorBase { using ItemT = typename IterableT::value_type; public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { // The below enables processing of timestamp bound updates, and that enables // correct timestamp propagation by the companion EndLoopCalculator. // @@ -106,10 +106,10 @@ class BeginLoopCalculator : public CalculatorBase { } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { Timestamp last_timestamp = loop_internal_timestamp_; if (!cc->Inputs().Tag("ITERABLE").IsEmpty()) { const IterableT& collection = @@ -139,7 +139,7 @@ class BeginLoopCalculator : public CalculatorBase { .AddPacket(MakePacket(cc->InputTimestamp()) .At(Timestamp(loop_internal_timestamp_ - 1))); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: diff --git a/mediapipe/calculators/core/clip_vector_size_calculator.h b/mediapipe/calculators/core/clip_vector_size_calculator.h index 89a4945d4..c16fd6dcc 100644 --- a/mediapipe/calculators/core/clip_vector_size_calculator.h +++ b/mediapipe/calculators/core/clip_vector_size_calculator.h @@ -33,7 +33,7 @@ namespace mediapipe { // input_stream: "input_vector" // output_stream: "output_vector" // options { -// [mediapipe.ClipIntVectorSizeCalculatorOptions.ext] { +// [mediapipe.ClipVectorSizeCalculatorOptions.ext] { // max_vec_size: 5 // } // } @@ -43,13 +43,13 @@ namespace mediapipe { template class ClipVectorSizeCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { RET_CHECK(cc->Inputs().NumEntries() == 1); RET_CHECK(cc->Outputs().NumEntries() == 1); if (cc->Options<::mediapipe::ClipVectorSizeCalculatorOptions>() .max_vec_size() < 1) { - return ::mediapipe::InternalError( + return mediapipe::InternalError( "max_vec_size should be greater than or equal to 1."); } @@ -60,10 +60,10 @@ class ClipVectorSizeCalculator : public CalculatorBase { cc->InputSidePackets().Index(0).Set(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { cc->SetOffset(TimestampDiff(0)); max_vec_size_ = cc->Options<::mediapipe::ClipVectorSizeCalculatorOptions>() .max_vec_size(); @@ -72,23 +72,23 @@ class ClipVectorSizeCalculator : public CalculatorBase { !cc->InputSidePackets().Index(0).IsEmpty()) { max_vec_size_ = cc->InputSidePackets().Index(0).Get(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { if (max_vec_size_ < 1) { - return ::mediapipe::InternalError( + return mediapipe::InternalError( "max_vec_size should be greater than or equal to 1."); } if (cc->Inputs().Index(0).IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } return ClipVectorSize(std::is_copy_constructible(), cc); } template - ::mediapipe::Status ClipVectorSize(std::true_type, CalculatorContext* cc) { + mediapipe::Status ClipVectorSize(std::true_type, CalculatorContext* cc) { auto output = absl::make_unique>(); const std::vector& input_vector = cc->Inputs().Index(0).Get>(); @@ -100,19 +100,19 @@ class ClipVectorSizeCalculator : public CalculatorBase { } } cc->Outputs().Index(0).Add(output.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } template - ::mediapipe::Status ClipVectorSize(std::false_type, CalculatorContext* cc) { + mediapipe::Status ClipVectorSize(std::false_type, CalculatorContext* cc) { return ConsumeAndClipVectorSize(std::is_move_constructible(), cc); } template - ::mediapipe::Status ConsumeAndClipVectorSize(std::true_type, - CalculatorContext* cc) { + mediapipe::Status ConsumeAndClipVectorSize(std::true_type, + CalculatorContext* cc) { auto output = absl::make_unique>(); - ::mediapipe::StatusOr>> input_status = + mediapipe::StatusOr>> input_status = cc->Inputs().Index(0).Value().Consume>(); if (input_status.ok()) { @@ -129,13 +129,13 @@ class ClipVectorSizeCalculator : public CalculatorBase { return input_status.status(); } cc->Outputs().Index(0).Add(output.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } template - ::mediapipe::Status ConsumeAndClipVectorSize(std::false_type, - CalculatorContext* cc) { - return ::mediapipe::InternalError( + mediapipe::Status ConsumeAndClipVectorSize(std::false_type, + CalculatorContext* cc) { + return mediapipe::InternalError( "Cannot copy or move input vectors and clip their size."); } diff --git a/mediapipe/calculators/core/concatenate_normalized_landmark_list_calculator.cc b/mediapipe/calculators/core/concatenate_normalized_landmark_list_calculator.cc index 54c3e05b9..e94ab5ae8 100644 --- a/mediapipe/calculators/core/concatenate_normalized_landmark_list_calculator.cc +++ b/mediapipe/calculators/core/concatenate_normalized_landmark_list_calculator.cc @@ -29,7 +29,7 @@ namespace mediapipe { // NormalizedLandmarkList proto object. class ConcatenateNormalizedLandmarkListCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { RET_CHECK(cc->Inputs().NumEntries() != 0); RET_CHECK(cc->Outputs().NumEntries() == 1); @@ -39,21 +39,21 @@ class ConcatenateNormalizedLandmarkListCalculator : public CalculatorBase { cc->Outputs().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { cc->SetOffset(TimestampDiff(0)); only_emit_if_all_present_ = cc->Options<::mediapipe::ConcatenateVectorCalculatorOptions>() .only_emit_if_all_present(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { if (only_emit_if_all_present_) { for (int i = 0; i < cc->Inputs().NumEntries(); ++i) { - if (cc->Inputs().Index(i).IsEmpty()) return ::mediapipe::OkStatus(); + if (cc->Inputs().Index(i).IsEmpty()) return mediapipe::OkStatus(); } } @@ -69,7 +69,7 @@ class ConcatenateNormalizedLandmarkListCalculator : public CalculatorBase { } cc->Outputs().Index(0).AddPacket( MakePacket(output).At(cc->InputTimestamp())); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: diff --git a/mediapipe/calculators/core/concatenate_vector_calculator.cc b/mediapipe/calculators/core/concatenate_vector_calculator.cc index 309425080..39be14f46 100644 --- a/mediapipe/calculators/core/concatenate_vector_calculator.cc +++ b/mediapipe/calculators/core/concatenate_vector_calculator.cc @@ -20,6 +20,7 @@ #include "mediapipe/framework/formats/landmark.pb.h" #include "mediapipe/framework/formats/tensor.h" #include "mediapipe/framework/port/integral_types.h" +#include "mediapipe/util/render_data.pb.h" #include "tensorflow/lite/interpreter.h" #if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) @@ -86,4 +87,8 @@ typedef ConcatenateVectorCalculator<::tflite::gpu::gl::GlBuffer> REGISTER_CALCULATOR(ConcatenateGlBufferVectorCalculator); #endif +typedef ConcatenateVectorCalculator + ConcatenateRenderDataVectorCalculator; +REGISTER_CALCULATOR(ConcatenateRenderDataVectorCalculator); + } // namespace mediapipe diff --git a/mediapipe/calculators/core/concatenate_vector_calculator.h b/mediapipe/calculators/core/concatenate_vector_calculator.h index ef72cb0dc..01b729ed9 100644 --- a/mediapipe/calculators/core/concatenate_vector_calculator.h +++ b/mediapipe/calculators/core/concatenate_vector_calculator.h @@ -34,7 +34,7 @@ namespace mediapipe { template class ConcatenateVectorCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { RET_CHECK(cc->Inputs().NumEntries() != 0); RET_CHECK(cc->Outputs().NumEntries() == 1); @@ -45,21 +45,21 @@ class ConcatenateVectorCalculator : public CalculatorBase { cc->Outputs().Index(0).Set>(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { cc->SetOffset(TimestampDiff(0)); only_emit_if_all_present_ = cc->Options<::mediapipe::ConcatenateVectorCalculatorOptions>() .only_emit_if_all_present(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { if (only_emit_if_all_present_) { for (int i = 0; i < cc->Inputs().NumEntries(); ++i) { - if (cc->Inputs().Index(i).IsEmpty()) return ::mediapipe::OkStatus(); + if (cc->Inputs().Index(i).IsEmpty()) return mediapipe::OkStatus(); } } @@ -67,8 +67,7 @@ class ConcatenateVectorCalculator : public CalculatorBase { } template - ::mediapipe::Status ConcatenateVectors(std::true_type, - CalculatorContext* cc) { + mediapipe::Status ConcatenateVectors(std::true_type, CalculatorContext* cc) { auto output = absl::make_unique>(); for (int i = 0; i < cc->Inputs().NumEntries(); ++i) { auto& input = cc->Inputs().Index(i); @@ -82,22 +81,21 @@ class ConcatenateVectorCalculator : public CalculatorBase { const std::vector& value = input.Get>(); output->insert(output->end(), value.begin(), value.end()); } else { - return ::mediapipe::InvalidArgumentError("Invalid input stream type."); + return mediapipe::InvalidArgumentError("Invalid input stream type."); } } cc->Outputs().Index(0).Add(output.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } template - ::mediapipe::Status ConcatenateVectors(std::false_type, - CalculatorContext* cc) { + mediapipe::Status ConcatenateVectors(std::false_type, CalculatorContext* cc) { return ConsumeAndConcatenateVectors(std::is_move_constructible(), cc); } template - ::mediapipe::Status ConsumeAndConcatenateVectors(std::true_type, - CalculatorContext* cc) { + mediapipe::Status ConsumeAndConcatenateVectors(std::true_type, + CalculatorContext* cc) { auto output = absl::make_unique>(); for (int i = 0; i < cc->Inputs().NumEntries(); ++i) { auto& input = cc->Inputs().Index(i); @@ -105,7 +103,7 @@ class ConcatenateVectorCalculator : public CalculatorBase { if (input.IsEmpty()) continue; if (input.Value().ValidateAsType().ok()) { - ::mediapipe::StatusOr> value_status = + mediapipe::StatusOr> value_status = input.Value().Consume(); if (value_status.ok()) { std::unique_ptr value = std::move(value_status).ValueOrDie(); @@ -114,7 +112,7 @@ class ConcatenateVectorCalculator : public CalculatorBase { return value_status.status(); } } else if (input.Value().ValidateAsType>().ok()) { - ::mediapipe::StatusOr>> value_status = + mediapipe::StatusOr>> value_status = input.Value().Consume>(); if (value_status.ok()) { std::unique_ptr> value = @@ -125,17 +123,17 @@ class ConcatenateVectorCalculator : public CalculatorBase { return value_status.status(); } } else { - return ::mediapipe::InvalidArgumentError("Invalid input stream type."); + return mediapipe::InvalidArgumentError("Invalid input stream type."); } } cc->Outputs().Index(0).Add(output.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } template - ::mediapipe::Status ConsumeAndConcatenateVectors(std::false_type, - CalculatorContext* cc) { - return ::mediapipe::InternalError( + mediapipe::Status ConsumeAndConcatenateVectors(std::false_type, + CalculatorContext* cc) { + return mediapipe::InternalError( "Cannot copy or move inputs to concatenate them"); } diff --git a/mediapipe/calculators/core/constant_side_packet_calculator.cc b/mediapipe/calculators/core/constant_side_packet_calculator.cc index 213264dc1..4b6952deb 100644 --- a/mediapipe/calculators/core/constant_side_packet_calculator.cc +++ b/mediapipe/calculators/core/constant_side_packet_calculator.cc @@ -54,7 +54,7 @@ namespace {} // namespace // } class ConstantSidePacketCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { const auto& options = cc->Options<::mediapipe::ConstantSidePacketCalculatorOptions>(); RET_CHECK_EQ(cc->OutputSidePackets().NumEntries(kPacketTag), @@ -80,14 +80,14 @@ class ConstantSidePacketCalculator : public CalculatorBase { } else if (packet_options.has_classification_list_value()) { packet.Set(); } else { - return ::mediapipe::InvalidArgumentError( + return mediapipe::InvalidArgumentError( "None of supported values were specified in options."); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { const auto& options = cc->Options<::mediapipe::ConstantSidePacketCalculatorOptions>(); int index = 0; @@ -109,15 +109,15 @@ class ConstantSidePacketCalculator : public CalculatorBase { packet.Set(MakePacket( packet_options.classification_list_value())); } else { - return ::mediapipe::InvalidArgumentError( + return mediapipe::InvalidArgumentError( "None of supported values were specified in options."); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { - return ::mediapipe::OkStatus(); + mediapipe::Status Process(CalculatorContext* cc) override { + return mediapipe::OkStatus(); } private: diff --git a/mediapipe/calculators/core/constant_side_packet_calculator_test.cc b/mediapipe/calculators/core/constant_side_packet_calculator_test.cc index dee0a219e..497dc5e55 100644 --- a/mediapipe/calculators/core/constant_side_packet_calculator_test.cc +++ b/mediapipe/calculators/core/constant_side_packet_calculator_test.cc @@ -40,7 +40,7 @@ void DoTestSingleSidePacket(absl::string_view packet_spec, } )"; CalculatorGraphConfig graph_config = - ::mediapipe::ParseTextProtoOrDie( + mediapipe::ParseTextProtoOrDie( absl::Substitute(graph_config_template, packet_spec)); CalculatorGraph graph; MP_ASSERT_OK(graph.Initialize(graph_config)); @@ -62,7 +62,7 @@ TEST(ConstantSidePacketCalculatorTest, EveryPossibleType) { TEST(ConstantSidePacketCalculatorTest, MultiplePackets) { CalculatorGraphConfig graph_config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( node { calculator: "ConstantSidePacketCalculator" output_side_packet: "PACKET:0:int_packet" @@ -115,7 +115,7 @@ TEST(ConstantSidePacketCalculatorTest, MultiplePackets) { TEST(ConstantSidePacketCalculatorTest, ProcessingPacketsWithCorrectTagOnly) { CalculatorGraphConfig graph_config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( node { calculator: "ConstantSidePacketCalculator" output_side_packet: "PACKET:0:int_packet" @@ -159,7 +159,7 @@ TEST(ConstantSidePacketCalculatorTest, ProcessingPacketsWithCorrectTagOnly) { TEST(ConstantSidePacketCalculatorTest, IncorrectConfig_MoreOptionsThanPackets) { CalculatorGraphConfig graph_config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( node { calculator: "ConstantSidePacketCalculator" output_side_packet: "PACKET:int_packet" @@ -177,7 +177,7 @@ TEST(ConstantSidePacketCalculatorTest, IncorrectConfig_MoreOptionsThanPackets) { TEST(ConstantSidePacketCalculatorTest, IncorrectConfig_MorePacketsThanOptions) { CalculatorGraphConfig graph_config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( node { calculator: "ConstantSidePacketCalculator" output_side_packet: "PACKET:0:int_packet" diff --git a/mediapipe/calculators/core/counting_source_calculator.cc b/mediapipe/calculators/core/counting_source_calculator.cc index 7b2f79a0c..efd8148e9 100644 --- a/mediapipe/calculators/core/counting_source_calculator.cc +++ b/mediapipe/calculators/core/counting_source_calculator.cc @@ -30,7 +30,7 @@ namespace mediapipe { // provided, then batches are of size 1. class CountingSourceCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Outputs().Index(0).Set(); if (cc->InputSidePackets().HasTag("ERROR_ON_OPEN")) { @@ -55,13 +55,13 @@ class CountingSourceCalculator : public CalculatorBase { if (cc->InputSidePackets().HasTag("INCREMENT")) { cc->InputSidePackets().Tag("INCREMENT").Set(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { if (cc->InputSidePackets().HasTag("ERROR_ON_OPEN") && cc->InputSidePackets().Tag("ERROR_ON_OPEN").Get()) { - return ::mediapipe::NotFoundError("expected error"); + return mediapipe::NotFoundError("expected error"); } if (cc->InputSidePackets().HasTag("ERROR_COUNT")) { error_count_ = cc->InputSidePackets().Tag("ERROR_COUNT").Get(); @@ -83,12 +83,12 @@ class CountingSourceCalculator : public CalculatorBase { RET_CHECK_LT(0, increment_); } RET_CHECK(error_count_ >= 0 || max_count_ >= 0); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { if (error_count_ >= 0 && batch_counter_ >= error_count_) { - return ::mediapipe::InternalError("expected error"); + return mediapipe::InternalError("expected error"); } if (max_count_ >= 0 && batch_counter_ >= max_count_) { return tool::StatusStop(); @@ -98,7 +98,7 @@ class CountingSourceCalculator : public CalculatorBase { counter_ += increment_; } ++batch_counter_; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: diff --git a/mediapipe/calculators/core/dequantize_byte_array_calculator.cc b/mediapipe/calculators/core/dequantize_byte_array_calculator.cc index 4f1a3ed86..373c15461 100644 --- a/mediapipe/calculators/core/dequantize_byte_array_calculator.cc +++ b/mediapipe/calculators/core/dequantize_byte_array_calculator.cc @@ -37,34 +37,34 @@ namespace mediapipe { class DequantizeByteArrayCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Tag("ENCODED").Set(); cc->Outputs().Tag("FLOAT_VECTOR").Set>(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { + mediapipe::Status Open(CalculatorContext* cc) final { const auto options = cc->Options<::mediapipe::DequantizeByteArrayCalculatorOptions>(); if (!options.has_max_quantized_value() || !options.has_min_quantized_value()) { - return ::mediapipe::InvalidArgumentError( + return mediapipe::InvalidArgumentError( "Both max_quantized_value and min_quantized_value must be provided " "in DequantizeByteArrayCalculatorOptions."); } float max_quantized_value = options.max_quantized_value(); float min_quantized_value = options.min_quantized_value(); if (max_quantized_value < min_quantized_value + FLT_EPSILON) { - return ::mediapipe::InvalidArgumentError( + return mediapipe::InvalidArgumentError( "max_quantized_value must be greater than min_quantized_value."); } float range = max_quantized_value - min_quantized_value; scalar_ = range / 255.0; bias_ = (range / 512.0) + min_quantized_value; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { const std::string& encoded = cc->Inputs().Tag("ENCODED").Value().Get(); std::vector float_vector; @@ -77,7 +77,7 @@ class DequantizeByteArrayCalculator : public CalculatorBase { .Tag("FLOAT_VECTOR") .AddPacket(MakePacket>(float_vector) .At(cc->InputTimestamp())); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: diff --git a/mediapipe/calculators/core/end_loop_calculator.h b/mediapipe/calculators/core/end_loop_calculator.h index 869cc29a2..969ed003a 100644 --- a/mediapipe/calculators/core/end_loop_calculator.h +++ b/mediapipe/calculators/core/end_loop_calculator.h @@ -57,7 +57,7 @@ class EndLoopCalculator : public CalculatorBase { using ItemT = typename IterableT::value_type; public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { RET_CHECK(cc->Inputs().HasTag("BATCH_END")) << "Missing BATCH_END tagged input_stream."; cc->Inputs().Tag("BATCH_END").Set(); @@ -67,10 +67,10 @@ class EndLoopCalculator : public CalculatorBase { RET_CHECK(cc->Outputs().HasTag("ITERABLE")); cc->Outputs().Tag("ITERABLE").Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { if (!cc->Inputs().Tag("ITEM").IsEmpty()) { if (!input_stream_collection_) { input_stream_collection_.reset(new IterableT); @@ -94,7 +94,7 @@ class EndLoopCalculator : public CalculatorBase { .SetNextTimestampBound(Timestamp(loop_control_ts.Value() + 1)); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: diff --git a/mediapipe/calculators/core/flow_limiter_calculator.cc b/mediapipe/calculators/core/flow_limiter_calculator.cc index 6d595e6cd..ffee4b8ed 100644 --- a/mediapipe/calculators/core/flow_limiter_calculator.cc +++ b/mediapipe/calculators/core/flow_limiter_calculator.cc @@ -16,6 +16,7 @@ #include #include +#include "mediapipe/calculators/core/flow_limiter_calculator.pb.h" #include "mediapipe/framework/calculator_framework.h" #include "mediapipe/framework/port/ret_check.h" #include "mediapipe/framework/port/status.h" @@ -23,41 +24,23 @@ namespace mediapipe { -// FlowLimiterCalculator is used to limit the number of pipelined processing -// operations in a section of the graph. +// FlowLimiterCalculator is used to limit the number of frames in flight +// by dropping input frames when necessary. // -// Typical topology: +// The input stream "FINISH" is used to signal the FlowLimiterCalculator +// when a frame is finished processing. Either a non-empty "FINISH" packet +// or a timestamp bound should be received for each processed frame. // -// in ->-[FLC]-[foo]-...-[bar]-+->- out -// ^_____________________| -// FINISHED +// The combination of `max_in_flight: 1` and `max_in_queue: 1` generally gives +// best throughput/latency balance. Throughput is nearly optimal as the +// graph is never idle as there is always something in the queue. Latency is +// nearly optimal latency as the queue always stores the latest available frame. // -// By connecting the output of the graph section to this calculator's FINISHED -// input with a backwards edge, this allows FLC to keep track of how many -// timestamps are currently being processed. -// -// The limit defaults to 1, and can be overridden with the MAX_IN_FLIGHT side -// packet. -// -// As long as the number of timestamps being processed ("in flight") is below -// the limit, FLC allows input to pass through. When the limit is reached, -// FLC starts dropping input packets, keeping only the most recent. When the -// processing count decreases again, as signaled by the receipt of a packet on -// FINISHED, FLC allows packets to flow again, releasing the most recently -// queued packet, if any. -// -// If there are multiple input streams, packet dropping is synchronized. -// -// IMPORTANT: for each timestamp where FLC forwards a packet (or a set of -// packets, if using multiple data streams), a packet must eventually arrive on -// the FINISHED stream. Dropping packets in the section between FLC and -// FINISHED will make the in-flight count incorrect. -// -// TODO: Remove this comment when graph-level ISH has been removed. -// NOTE: this calculator should always use the ImmediateInputStreamHandler and -// uses it by default. However, if the graph specifies a graph-level -// InputStreamHandler, to override that setting, the InputStreamHandler must -// be explicitly specified as shown below. +// Increasing `max_in_flight` to 2 or more can yield the better throughput +// when the graph exhibits a high degree of pipeline parallelism. Decreasing +// `max_in_flight` to 0 can yield a better average latency, but at the cost of +// lower throughput (lower framerate) due to the time during which the graph +// is idle awaiting the next input frame. // // Example config: // node { @@ -68,131 +51,178 @@ namespace mediapipe { // tag_index: 'FINISHED' // back_edge: true // } -// input_stream_handler { -// input_stream_handler: 'ImmediateInputStreamHandler' -// } -// output_stream: "gated_frames" +// output_stream: "sampled_frames" +// output_stream: "ALLOW:allowed_timestamps" // } +// +// The "ALLOW" stream indicates the transition between accepting frames and +// dropping frames. "ALLOW = true" indicates the start of accepting frames +// including the current timestamp, and "ALLOW = true" indicates the start of +// dropping frames including the current timestamp. +// +// FlowLimiterCalculator provides limited support for multiple input streams. +// The first input stream is treated as the main input stream and successive +// input streams are treated as auxiliary input streams. The auxiliary input +// streams are limited to timestamps passed on the main input stream. +// class FlowLimiterCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { - int num_data_streams = cc->Inputs().NumEntries(""); - RET_CHECK_GE(num_data_streams, 1); - RET_CHECK_EQ(cc->Outputs().NumEntries(""), num_data_streams) - << "Output streams must correspond input streams except for the " - "finish indicator input stream."; - for (int i = 0; i < num_data_streams; ++i) { + static mediapipe::Status GetContract(CalculatorContract* cc) { + auto& side_inputs = cc->InputSidePackets(); + side_inputs.Tag("OPTIONS").Set().Optional(); + cc->Inputs().Tag("OPTIONS").Set().Optional(); + RET_CHECK_GE(cc->Inputs().NumEntries(""), 1); + for (int i = 0; i < cc->Inputs().NumEntries(""); ++i) { cc->Inputs().Get("", i).SetAny(); cc->Outputs().Get("", i).SetSameAs(&(cc->Inputs().Get("", i))); } cc->Inputs().Get("FINISHED", 0).SetAny(); - if (cc->InputSidePackets().HasTag("MAX_IN_FLIGHT")) { - cc->InputSidePackets().Tag("MAX_IN_FLIGHT").Set(); - } - if (cc->Outputs().HasTag("ALLOW")) { - cc->Outputs().Tag("ALLOW").Set(); - } - + cc->InputSidePackets().Tag("MAX_IN_FLIGHT").Set().Optional(); + cc->Outputs().Tag("ALLOW").Set().Optional(); cc->SetInputStreamHandler("ImmediateInputStreamHandler"); - - return ::mediapipe::OkStatus(); + cc->SetProcessTimestampBounds(true); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { - finished_id_ = cc->Inputs().GetId("FINISHED", 0); - max_in_flight_ = 1; + mediapipe::Status Open(CalculatorContext* cc) final { + options_ = cc->Options(); + options_ = tool::RetrieveOptions(options_, cc->InputSidePackets()); if (cc->InputSidePackets().HasTag("MAX_IN_FLIGHT")) { - max_in_flight_ = cc->InputSidePackets().Tag("MAX_IN_FLIGHT").Get(); + options_.set_max_in_flight( + cc->InputSidePackets().Tag("MAX_IN_FLIGHT").Get()); } - RET_CHECK_GE(max_in_flight_, 1); - num_in_flight_ = 0; - - allowed_id_ = cc->Outputs().GetId("ALLOW", 0); - allow_ctr_ts_ = Timestamp(0); - - num_data_streams_ = cc->Inputs().NumEntries(""); - data_stream_bound_ts_.resize(num_data_streams_); + input_queues_.resize(cc->Inputs().NumEntries("")); RET_CHECK_OK(CopyInputHeadersToOutputs(cc->Inputs(), &(cc->Outputs()))); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - bool Allow() { return num_in_flight_ < max_in_flight_; } + // Returns true if an additional frame can be released for processing. + // The "ALLOW" output stream indicates this condition at each input frame. + bool ProcessingAllowed() { + return frames_in_flight_.size() < options_.max_in_flight(); + } - ::mediapipe::Status Process(CalculatorContext* cc) final { - bool old_allow = Allow(); - Timestamp lowest_incomplete_ts = Timestamp::Done(); - - // Process FINISHED stream. - if (!cc->Inputs().Get(finished_id_).Value().IsEmpty()) { - RET_CHECK_GT(num_in_flight_, 0) - << "Received a FINISHED packet, but we had none in flight."; - --num_in_flight_; + // Outputs a packet indicating whether a frame was sent or dropped. + void SendAllow(bool allow, Timestamp ts, CalculatorContext* cc) { + if (cc->Outputs().HasTag("ALLOW")) { + cc->Outputs().Tag("ALLOW").AddPacket(MakePacket(allow).At(ts)); } + } - // Process data streams. - for (int i = 0; i < num_data_streams_; ++i) { - auto& stream = cc->Inputs().Get("", i); - auto& out = cc->Outputs().Get("", i); - Packet& packet = stream.Value(); - auto ts = packet.Timestamp(); - if (ts.IsRangeValue() && data_stream_bound_ts_[i] <= ts) { - data_stream_bound_ts_[i] = ts + 1; - // Note: it's ok to update the output bound here, before sending the - // packet, because updates are batched during the Process function. - out.SetNextTimestampBound(data_stream_bound_ts_[i]); - } - lowest_incomplete_ts = - std::min(lowest_incomplete_ts, data_stream_bound_ts_[i]); + // Sets the timestamp bound or closes an output stream. + void SetNextTimestampBound(Timestamp bound, OutputStream* stream) { + if (bound > Timestamp::Max()) { + stream->Close(); + } else { + stream->SetNextTimestampBound(bound); + } + } - if (packet.IsEmpty()) { - // If the input stream is closed, close the corresponding output. - if (stream.IsDone() && !out.IsClosed()) { - out.Close(); + // Returns true if a certain timestamp is being processed. + bool IsInFlight(Timestamp timestamp) { + return std::find(frames_in_flight_.begin(), frames_in_flight_.end(), + timestamp) != frames_in_flight_.end(); + } + + // Releases input packets up to the latest settled input timestamp. + void ProcessAuxiliaryInputs(CalculatorContext* cc) { + Timestamp settled_bound = cc->Outputs().Get("", 0).NextTimestampBound(); + for (int i = 1; i < cc->Inputs().NumEntries(""); ++i) { + // Release settled frames from each input queue. + while (!input_queues_[i].empty() && + input_queues_[i].front().Timestamp() < settled_bound) { + Packet packet = input_queues_[i].front(); + input_queues_[i].pop_front(); + if (IsInFlight(packet.Timestamp())) { + cc->Outputs().Get("", i).AddPacket(packet); } - // TODO: if the packet is empty, the ts is unset, and we - // cannot read the timestamp bound, even though we'd like to propagate - // it. - } else if (mediapipe::ContainsKey(pending_ts_, ts)) { - // If we have already sent this timestamp (on another stream), send it - // on this stream too. - out.AddPacket(std::move(packet)); - } else if (Allow() && (ts > last_dropped_ts_)) { - // If the in-flight is under the limit, and if we have not already - // dropped this or a later timestamp on another stream, then send - // the packet and add an in-flight timestamp. - out.AddPacket(std::move(packet)); - pending_ts_.insert(ts); - ++num_in_flight_; + } + + // Propagate each input timestamp bound. + if (!input_queues_[i].empty()) { + Timestamp bound = input_queues_[i].front().Timestamp(); + SetNextTimestampBound(bound, &cc->Outputs().Get("", i)); } else { - // Otherwise, we'll drop the packet. - last_dropped_ts_ = std::max(last_dropped_ts_, ts); + Timestamp bound = + cc->Inputs().Get("", i).Value().Timestamp().NextAllowedInStream(); + SetNextTimestampBound(bound, &cc->Outputs().Get("", i)); + } + } + } + + // Releases input packets allowed by the max_in_flight constraint. + mediapipe::Status Process(CalculatorContext* cc) final { + options_ = tool::RetrieveOptions(options_, cc->Inputs()); + + // Process the FINISHED input stream. + Packet finished_packet = cc->Inputs().Tag("FINISHED").Value(); + if (finished_packet.Timestamp() == cc->InputTimestamp()) { + while (!frames_in_flight_.empty() && + frames_in_flight_.front() <= finished_packet.Timestamp()) { + frames_in_flight_.pop_front(); } } - // Remove old pending_ts_ entries. - auto it = std::lower_bound(pending_ts_.begin(), pending_ts_.end(), - lowest_incomplete_ts); - pending_ts_.erase(pending_ts_.begin(), it); - - // Update ALLOW signal. - if ((old_allow != Allow()) && allowed_id_.IsValid()) { - cc->Outputs() - .Get(allowed_id_) - .AddPacket(MakePacket(Allow()).At(++allow_ctr_ts_)); + // Process the frame input streams. + for (int i = 0; i < cc->Inputs().NumEntries(""); ++i) { + Packet packet = cc->Inputs().Get("", i).Value(); + if (!packet.IsEmpty()) { + input_queues_[i].push_back(packet); + } } - return ::mediapipe::OkStatus(); + + // Abandon expired frames in flight. Note that old frames are abandoned + // when much newer frame timestamps arrive regardless of elapsed time. + TimestampDiff timeout = options_.in_flight_timeout(); + Timestamp latest_ts = cc->Inputs().Get("", 0).Value().Timestamp(); + if (timeout > 0 && latest_ts == cc->InputTimestamp() && + latest_ts < Timestamp::Max()) { + while (!frames_in_flight_.empty() && + (latest_ts - frames_in_flight_.front()) > timeout) { + frames_in_flight_.pop_front(); + } + } + + // Release allowed frames from the main input queue. + auto& input_queue = input_queues_[0]; + while (ProcessingAllowed() && !input_queue.empty()) { + Packet packet = input_queue.front(); + input_queue.pop_front(); + cc->Outputs().Get("", 0).AddPacket(packet); + SendAllow(true, packet.Timestamp(), cc); + frames_in_flight_.push_back(packet.Timestamp()); + } + + // Limit the number of queued frames. + // Note that frames can be dropped after frames are released because + // frame-packets and FINISH-packets never arrive in the same Process call. + while (input_queue.size() > options_.max_in_queue()) { + Packet packet = input_queue.front(); + input_queue.pop_front(); + SendAllow(false, packet.Timestamp(), cc); + } + + // Propagate the input timestamp bound. + if (!input_queue.empty()) { + Timestamp bound = input_queue.front().Timestamp(); + SetNextTimestampBound(bound, &cc->Outputs().Get("", 0)); + } else { + Timestamp bound = + cc->Inputs().Get("", 0).Value().Timestamp().NextAllowedInStream(); + SetNextTimestampBound(bound, &cc->Outputs().Get("", 0)); + if (cc->Outputs().HasTag("ALLOW")) { + SetNextTimestampBound(bound, &cc->Outputs().Tag("ALLOW")); + } + } + + ProcessAuxiliaryInputs(cc); + return mediapipe::OkStatus(); } private: - std::set pending_ts_; - Timestamp last_dropped_ts_; - int num_data_streams_; - int num_in_flight_; - int max_in_flight_; - CollectionItemId finished_id_; - CollectionItemId allowed_id_; - Timestamp allow_ctr_ts_; - std::vector data_stream_bound_ts_; + FlowLimiterCalculatorOptions options_; + std::vector> input_queues_; + std::deque frames_in_flight_; }; REGISTER_CALCULATOR(FlowLimiterCalculator); diff --git a/mediapipe/calculators/core/flow_limiter_calculator.proto b/mediapipe/calculators/core/flow_limiter_calculator.proto new file mode 100644 index 000000000..0f7c925ae --- /dev/null +++ b/mediapipe/calculators/core/flow_limiter_calculator.proto @@ -0,0 +1,40 @@ +// Copyright 2019 The MediaPipe Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto2"; + +package mediapipe; + +import "mediapipe/framework/calculator.proto"; + +option objc_class_prefix = "MediaPipe"; + +message FlowLimiterCalculatorOptions { + extend mediapipe.CalculatorOptions { + optional FlowLimiterCalculatorOptions ext = 326963320; + } + + // The maximum number of frames released for processing at one time. + // The default value limits to 1 frame processing at a time. + optional int32 max_in_flight = 1 [default = 1]; + + // The maximum number of frames queued waiting for processing. + // The default value limits to 1 frame awaiting processing. + optional int32 max_in_queue = 2 [default = 0]; + + // The maximum time in microseconds to wait for a frame to finish processing. + // The default value stops waiting after 1 sec. + // The value 0 specifies no timeout. + optional int64 in_flight_timeout = 3 [default = 1000000]; +} diff --git a/mediapipe/calculators/core/flow_limiter_calculator_test.cc b/mediapipe/calculators/core/flow_limiter_calculator_test.cc index 895c88e6d..12cacfc72 100644 --- a/mediapipe/calculators/core/flow_limiter_calculator_test.cc +++ b/mediapipe/calculators/core/flow_limiter_calculator_test.cc @@ -19,6 +19,7 @@ #include "absl/time/clock.h" #include "absl/time/time.h" +#include "mediapipe/calculators/core/flow_limiter_calculator.pb.h" #include "mediapipe/framework/calculator_framework.h" #include "mediapipe/framework/calculator_runner.h" #include "mediapipe/framework/formats/image_frame.h" @@ -28,6 +29,8 @@ #include "mediapipe/framework/port/parse_text_proto.h" #include "mediapipe/framework/port/status_matchers.h" #include "mediapipe/framework/timestamp.h" +#include "mediapipe/framework/tool/simulation_clock.h" +#include "mediapipe/framework/tool/simulation_clock_executor.h" #include "mediapipe/framework/tool/sink.h" namespace mediapipe { @@ -67,144 +70,49 @@ std::vector PacketValues(const std::vector& packets) { return result; } -constexpr int kNumImageFrames = 5; -constexpr int kNumFinished = 3; -CalculatorGraphConfig::Node GetDefaultNode() { - return ParseTextProtoOrDie(R"( - calculator: "FlowLimiterCalculator" - input_stream: "raw_frames" - input_stream: "FINISHED:finished" - input_stream_info: { tag_index: "FINISHED" back_edge: true } - output_stream: "gated_frames" - )"); -} - -// Simple test to make sure that the FlowLimiterCalculator outputs just one -// packet when MAX_IN_FLIGHT is 1. -TEST(FlowLimiterCalculator, OneOutputTest) { - // Setup the calculator runner and add only ImageFrame packets. - CalculatorRunner runner(GetDefaultNode()); - for (int i = 0; i < kNumImageFrames; ++i) { - Timestamp timestamp = Timestamp(i * Timestamp::kTimestampUnitsPerSecond); - runner.MutableInputs()->Index(0).packets.push_back( - MakePacket().At(timestamp)); - } - - // Run the calculator. - MP_ASSERT_OK(runner.Run()) << "Calculator execution failed."; - const std::vector& frame_output_packets = - runner.Outputs().Index(0).packets; - - EXPECT_EQ(frame_output_packets.size(), 1); -} - -// Simple test to make sure that the FlowLimiterCalculator waits for all -// input streams to have at least one packet available before publishing. -TEST(FlowLimiterCalculator, BasicTest) { - // Setup the calculator runner and add both ImageFrame and finish packets. - CalculatorRunner runner(GetDefaultNode()); - for (int i = 0; i < kNumImageFrames; ++i) { - Timestamp timestamp = Timestamp(i * Timestamp::kTimestampUnitsPerSecond); - runner.MutableInputs()->Index(0).packets.push_back( - MakePacket().At(timestamp)); - } - for (int i = 0; i < kNumFinished; ++i) { - Timestamp timestamp = - Timestamp((i + 1) * Timestamp::kTimestampUnitsPerSecond); - runner.MutableInputs() - ->Tag("FINISHED") - .packets.push_back(MakePacket(true).At(timestamp)); - } - - // Run the calculator. - MP_ASSERT_OK(runner.Run()) << "Calculator execution failed."; - const std::vector& frame_output_packets = - runner.Outputs().Index(0).packets; - - // Only outputs packets if both input streams are available. - int expected_num_packets = std::min(kNumImageFrames, kNumFinished + 1); - EXPECT_EQ(frame_output_packets.size(), expected_num_packets); -} - // A Calculator::Process callback function. -typedef std::function<::mediapipe::Status(const InputStreamShardSet&, - OutputStreamShardSet*)> +typedef std::function ProcessFunction; // A testing callback function that passes through all packets. -::mediapipe::Status PassthroughFunction(const InputStreamShardSet& inputs, - OutputStreamShardSet* outputs) { +mediapipe::Status PassthroughFunction(const InputStreamShardSet& inputs, + OutputStreamShardSet* outputs) { for (int i = 0; i < inputs.NumEntries(); ++i) { if (!inputs.Index(i).Value().IsEmpty()) { outputs->Index(i).AddPacket(inputs.Index(i).Value()); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -// A Calculator that runs a testing callback function in Close. -class CloseCallbackCalculator : public CalculatorBase { +// Tests demonstrating an FlowLimiterCalculator operating in a cyclic graph. +class FlowLimiterCalculatorSemaphoreTest : public testing::Test { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { - for (CollectionItemId id = cc->Inputs().BeginId(); - id < cc->Inputs().EndId(); ++id) { - cc->Inputs().Get(id).SetAny(); - } - for (CollectionItemId id = cc->Outputs().BeginId(); - id < cc->Outputs().EndId(); ++id) { - cc->Outputs().Get(id).SetAny(); - } - cc->InputSidePackets().Index(0).Set>(); - return ::mediapipe::OkStatus(); - } - - ::mediapipe::Status Process(CalculatorContext* cc) override { - return PassthroughFunction(cc->Inputs(), &(cc->Outputs())); - } - - ::mediapipe::Status Close(CalculatorContext* cc) override { - const auto& callback = cc->InputSidePackets() - .Index(0) - .Get>(); - return callback(); - } -}; -REGISTER_CALCULATOR(CloseCallbackCalculator); - -// Tests demostrating an FlowLimiterCalculator operating in a cyclic graph. -// TODO: clean up these tests. -class FlowLimiterCalculatorTest : public testing::Test { - public: - FlowLimiterCalculatorTest() : enter_semaphore_(0), exit_semaphore_(0) {} + FlowLimiterCalculatorSemaphoreTest() : exit_semaphore_(0) {} void SetUp() override { graph_config_ = InflightGraphConfig(); tool::AddVectorSink("out_1", &graph_config_, &out_1_packets_); - tool::AddVectorSink("out_2", &graph_config_, &out_2_packets_); } void InitializeGraph(int max_in_flight) { - ProcessFunction semaphore_0_func = [&](const InputStreamShardSet& inputs, - OutputStreamShardSet* outputs) { - enter_semaphore_.Release(1); - return PassthroughFunction(inputs, outputs); - }; ProcessFunction semaphore_1_func = [&](const InputStreamShardSet& inputs, OutputStreamShardSet* outputs) { exit_semaphore_.Acquire(1); return PassthroughFunction(inputs, outputs); }; - std::function<::mediapipe::Status()> close_func = [this]() { - close_count_++; - return ::mediapipe::OkStatus(); - }; + FlowLimiterCalculatorOptions options; + options.set_max_in_flight(max_in_flight); + options.set_max_in_queue(1); MP_ASSERT_OK(graph_.Initialize( graph_config_, { - {"max_in_flight", MakePacket(max_in_flight)}, - {"callback_0", Adopt(new auto(semaphore_0_func))}, + {"limiter_options", Adopt(new auto(options))}, {"callback_1", Adopt(new auto(semaphore_1_func))}, - {"callback_2", Adopt(new auto(close_func))}, })); + + allow_poller_.reset(new OutputStreamPoller( + graph_.AddOutputStreamPoller("allow").ValueOrDie())); } // Adds a packet to a graph input stream. @@ -216,44 +124,24 @@ class FlowLimiterCalculatorTest : public testing::Test { // A calculator graph starting with an FlowLimiterCalculator and // ending with a InFlightFinishCalculator. // Back-edge "finished" limits processing to one frame in-flight. - // The two LambdaCalculators are used to keep certain packet sets in flight. + // The LambdaCalculator is used to keep certain frames in flight. CalculatorGraphConfig InflightGraphConfig() { return ParseTextProtoOrDie(R"( input_stream: 'in_1' - input_stream: 'in_2' node { calculator: 'FlowLimiterCalculator' - input_side_packet: 'MAX_IN_FLIGHT:max_in_flight' + input_side_packet: 'OPTIONS:limiter_options' input_stream: 'in_1' - input_stream: 'in_2' input_stream: 'FINISHED:out_1' input_stream_info: { tag_index: 'FINISHED' back_edge: true } output_stream: 'in_1_sampled' - output_stream: 'in_2_sampled' - } - node { - calculator: 'LambdaCalculator' - input_side_packet: 'callback_0' - input_stream: 'in_1_sampled' - input_stream: 'in_2_sampled' - output_stream: 'queue_1' - output_stream: 'queue_2' + output_stream: 'ALLOW:allow' } node { calculator: 'LambdaCalculator' input_side_packet: 'callback_1' - input_stream: 'queue_1' - input_stream: 'queue_2' - output_stream: 'close_1' - output_stream: 'close_2' - } - node { - calculator: 'CloseCallbackCalculator' - input_side_packet: 'callback_2' - input_stream: 'close_1' - input_stream: 'close_2' + input_stream: 'in_1_sampled' output_stream: 'out_1' - output_stream: 'out_2' } )"); } @@ -261,21 +149,19 @@ class FlowLimiterCalculatorTest : public testing::Test { protected: CalculatorGraphConfig graph_config_; CalculatorGraph graph_; - AtomicSemaphore enter_semaphore_; AtomicSemaphore exit_semaphore_; std::vector out_1_packets_; - std::vector out_2_packets_; - int close_count_ = 0; + std::unique_ptr allow_poller_; }; // A test demonstrating an FlowLimiterCalculator operating in a cyclic // graph. This test shows that: // -// (1) Timestamps are passed through unaltered. -// (2) All output streams including the back_edge stream are closed when -// the first input stream is closed. +// (1) Frames exceeding the queue size are dropped. +// (2) The "ALLOW" signal is produced. +// (3) Timestamps are passed through unaltered. // -TEST_F(FlowLimiterCalculatorTest, BackEdgeCloses) { +TEST_F(FlowLimiterCalculatorSemaphoreTest, FramesDropped) { InitializeGraph(1); MP_ASSERT_OK(graph_.StartRun({})); @@ -284,210 +170,590 @@ TEST_F(FlowLimiterCalculatorTest, BackEdgeCloses) { input_name, MakePacket(n).At(Timestamp(n)))); }; - for (int i = 0; i < 10; i++) { - send_packet("in_1", i * 10); - // This next input should be dropped. + Packet allow_packet; + send_packet("in_1", 0); + for (int i = 0; i < 9; i++) { + EXPECT_TRUE(allow_poller_->Next(&allow_packet)); + EXPECT_TRUE(allow_packet.Get()); + // This input should wait in the limiter input queue. send_packet("in_1", i * 10 + 5); - MP_EXPECT_OK(graph_.WaitUntilIdle()); - send_packet("in_2", i * 10); + // This input should drop the previous input. + send_packet("in_1", i * 10 + 10); + EXPECT_TRUE(allow_poller_->Next(&allow_packet)); + EXPECT_FALSE(allow_packet.Get()); exit_semaphore_.Release(1); - MP_EXPECT_OK(graph_.WaitUntilIdle()); } + exit_semaphore_.Release(1); MP_EXPECT_OK(graph_.CloseInputStream("in_1")); - MP_EXPECT_OK(graph_.CloseInputStream("in_2")); MP_EXPECT_OK(graph_.WaitUntilIdle()); // All output streams are closed and all output packets are delivered, - // with stream "in_1" and stream "in_2" closed. + // with stream "in_1" closed. EXPECT_EQ(10, out_1_packets_.size()); - EXPECT_EQ(10, out_2_packets_.size()); - // Timestamps have not been messed with. + // Timestamps have not been altered. EXPECT_EQ(PacketValues(out_1_packets_), TimestampValues(out_1_packets_)); - EXPECT_EQ(PacketValues(out_2_packets_), - TimestampValues(out_2_packets_)); - // Extra inputs on in_1 have been dropped + // Extra inputs on in_1 have been dropped. EXPECT_EQ(TimestampValues(out_1_packets_), (std::vector{0, 10, 20, 30, 40, 50, 60, 70, 80, 90})); - EXPECT_EQ(TimestampValues(out_1_packets_), TimestampValues(out_2_packets_)); - - // The closing of the stream has been propagated. - EXPECT_EQ(1, close_count_); } -// A test demonstrating that all output streams are closed when all -// input streams are closed after the last input packet has been processed. -TEST_F(FlowLimiterCalculatorTest, AllStreamsClose) { - InitializeGraph(1); - MP_ASSERT_OK(graph_.StartRun({})); - - exit_semaphore_.Release(10); - for (int i = 0; i < 10; i++) { - AddPacket("in_1", i); - MP_EXPECT_OK(graph_.WaitUntilIdle()); - AddPacket("in_2", i); - MP_EXPECT_OK(graph_.WaitUntilIdle()); +// A calculator that sleeps during Process. +class SleepCalculator : public CalculatorBase { + public: + static mediapipe::Status GetContract(CalculatorContract* cc) { + cc->Inputs().Tag("PACKET").SetAny(); + cc->Outputs().Tag("PACKET").SetSameAs(&cc->Inputs().Tag("PACKET")); + cc->InputSidePackets().Tag("SLEEP_TIME").Set(); + cc->InputSidePackets().Tag("WARMUP_TIME").Set(); + cc->InputSidePackets().Tag("CLOCK").Set(); + cc->SetTimestampOffset(0); + return mediapipe::OkStatus(); } - MP_EXPECT_OK(graph_.CloseAllInputStreams()); - MP_EXPECT_OK(graph_.WaitUntilIdle()); - EXPECT_EQ(TimestampValues(out_1_packets_), TimestampValues(out_2_packets_)); - EXPECT_EQ(TimestampValues(out_1_packets_), - (std::vector{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})); - EXPECT_EQ(1, close_count_); + mediapipe::Status Open(CalculatorContext* cc) final { + clock_ = cc->InputSidePackets().Tag("CLOCK").Get(); + return mediapipe::OkStatus(); + } + + mediapipe::Status Process(CalculatorContext* cc) final { + ++packet_count; + absl::Duration sleep_time = absl::Microseconds( + packet_count == 1 + ? cc->InputSidePackets().Tag("WARMUP_TIME").Get() + : cc->InputSidePackets().Tag("SLEEP_TIME").Get()); + clock_->Sleep(sleep_time); + cc->Outputs().Tag("PACKET").AddPacket(cc->Inputs().Tag("PACKET").Value()); + return mediapipe::OkStatus(); + } + + private: + ::mediapipe::Clock* clock_ = nullptr; + int packet_count = 0; +}; +REGISTER_CALCULATOR(SleepCalculator); + +// A calculator that drops a packet occasionally. +// Drops the 3rd packet, and optionally the corresponding timestamp bound. +class DropCalculator : public CalculatorBase { + public: + static mediapipe::Status GetContract(CalculatorContract* cc) { + cc->Inputs().Tag("PACKET").SetAny(); + cc->Outputs().Tag("PACKET").SetSameAs(&cc->Inputs().Tag("PACKET")); + cc->InputSidePackets().Tag("DROP_TIMESTAMPS").Set(); + cc->SetProcessTimestampBounds(true); + return mediapipe::OkStatus(); + } + + mediapipe::Status Process(CalculatorContext* cc) final { + if (!cc->Inputs().Tag("PACKET").Value().IsEmpty()) { + ++packet_count; + } + bool drop = (packet_count == 3); + if (!drop && !cc->Inputs().Tag("PACKET").Value().IsEmpty()) { + cc->Outputs().Tag("PACKET").AddPacket(cc->Inputs().Tag("PACKET").Value()); + } + if (!drop || !cc->InputSidePackets().Tag("DROP_TIMESTAMPS").Get()) { + cc->Outputs().Tag("PACKET").SetNextTimestampBound( + cc->InputTimestamp().NextAllowedInStream()); + } + return mediapipe::OkStatus(); + } + + private: + int packet_count = 0; +}; +REGISTER_CALCULATOR(DropCalculator); + +// Tests demonstrating an FlowLimiterCalculator processing FINISHED timestamps. +class FlowLimiterCalculatorTest : public testing::Test { + protected: + CalculatorGraphConfig InflightGraphConfig() { + return ParseTextProtoOrDie(R"( + input_stream: 'in_1' + node { + calculator: 'FlowLimiterCalculator' + input_side_packet: 'OPTIONS:limiter_options' + input_stream: 'in_1' + input_stream: 'FINISHED:out_1' + input_stream_info: { tag_index: 'FINISHED' back_edge: true } + output_stream: 'in_1_sampled' + output_stream: 'ALLOW:allow' + } + node { + calculator: 'SleepCalculator' + input_side_packet: 'WARMUP_TIME:warmup_time' + input_side_packet: 'SLEEP_TIME:sleep_time' + input_side_packet: 'CLOCK:clock' + input_stream: 'PACKET:in_1_sampled' + output_stream: 'PACKET:out_1_sampled' + } + node { + calculator: 'DropCalculator' + input_side_packet: "DROP_TIMESTAMPS:drop_timesamps" + input_stream: 'PACKET:out_1_sampled' + output_stream: 'PACKET:out_1' + } + )"); + } + + // Parse an absl::Time from RFC3339 format. + absl::Time ParseTime(const std::string& date_time_str) { + absl::Time result; + absl::ParseTime(absl::RFC3339_sec, date_time_str, &result, nullptr); + return result; + } + + // The point in simulated time when the test starts. + absl::Time StartTime() { return ParseTime("2020-11-03T20:00:00Z"); } + + // Initialize the test clock to follow simulated time. + void SetUpSimulationClock() { + auto executor = std::make_shared(8); + simulation_clock_ = executor->GetClock(); + clock_ = simulation_clock_.get(); + simulation_clock_->ThreadStart(); + clock_->SleepUntil(StartTime()); + simulation_clock_->ThreadFinish(); + MP_ASSERT_OK(graph_.SetExecutor("", executor)); + } + + // Initialize the test clock to follow wall time. + void SetUpRealClock() { clock_ = mediapipe::Clock::RealClock(); } + + // Create a few mediapipe input Packets holding ints. + void SetUpInputData() { + for (int i = 0; i < 100; ++i) { + input_packets_.push_back(MakePacket(i).At(Timestamp(i * 10000))); + } + } + + protected: + CalculatorGraph graph_; + mediapipe::Clock* clock_; + std::shared_ptr simulation_clock_; + std::vector input_packets_; + std::vector out_1_packets_; + std::vector allow_packets_; +}; + +// Shows that "FINISHED" can be indicated with either a packet or a timestamp +// bound. DropCalculator periodically drops one packet but always propagates +// the timestamp bound. Input packets are released or dropped promptly after +// each "FINISH" packet or a timestamp bound arrives. +TEST_F(FlowLimiterCalculatorTest, FinishedTimestamps) { + // Configure the test. + SetUpInputData(); + SetUpSimulationClock(); + CalculatorGraphConfig graph_config = InflightGraphConfig(); + auto limiter_options = ParseTextProtoOrDie(R"( + max_in_flight: 1 + max_in_queue: 1 + )"); + std::map side_packets = { + {"limiter_options", + MakePacket(limiter_options)}, + {"warmup_time", MakePacket(22000)}, + {"sleep_time", MakePacket(22000)}, + {"drop_timesamps", MakePacket(false)}, + {"clock", MakePacket(clock_)}, + }; + + // Start the graph. + MP_ASSERT_OK(graph_.Initialize(graph_config)); + MP_EXPECT_OK(graph_.ObserveOutputStream("out_1", [this](Packet p) { + out_1_packets_.push_back(p); + return mediapipe::OkStatus(); + })); + MP_EXPECT_OK(graph_.ObserveOutputStream("allow", [this](Packet p) { + allow_packets_.push_back(p); + return mediapipe::OkStatus(); + })); + simulation_clock_->ThreadStart(); + MP_ASSERT_OK(graph_.StartRun(side_packets)); + + // Add 9 input packets. + // 1. packet-0 is released, + // 2. packet-1 is queued, + // 3. packet-2 is queued and packet-1 is dropped, + // 4. packet-2 is released, and so forth. + MP_EXPECT_OK(graph_.AddPacketToInputStream("in_1", input_packets_[0])); + clock_->Sleep(absl::Microseconds(1)); + EXPECT_EQ(allow_packets_.size(), 1); + EXPECT_EQ(allow_packets_.back().Get(), true); + clock_->Sleep(absl::Microseconds(10000)); + for (int i = 1; i < 8; i += 2) { + MP_EXPECT_OK(graph_.AddPacketToInputStream("in_1", input_packets_[i])); + clock_->Sleep(absl::Microseconds(10000)); + EXPECT_EQ(allow_packets_.size(), i); + MP_EXPECT_OK(graph_.AddPacketToInputStream("in_1", input_packets_[i + 1])); + clock_->Sleep(absl::Microseconds(1)); + EXPECT_EQ(allow_packets_.size(), i + 1); + EXPECT_EQ(allow_packets_.back().Get(), false); + clock_->Sleep(absl::Microseconds(10000)); + EXPECT_EQ(allow_packets_.size(), i + 2); + EXPECT_EQ(allow_packets_.back().Get(), true); + } + + // Finish the graph. + MP_EXPECT_OK(graph_.CloseAllPacketSources()); + clock_->Sleep(absl::Microseconds(40000)); + MP_EXPECT_OK(graph_.WaitUntilDone()); + simulation_clock_->ThreadFinish(); + + // Validate the output. + // input_packets_[4] is dropped by the DropCalculator. + std::vector expected_output = {input_packets_[0], input_packets_[2], + input_packets_[6], input_packets_[8]}; + EXPECT_EQ(out_1_packets_, expected_output); } -TEST(FlowLimiterCalculator, TwoStreams) { - std::vector a_passed; - std::vector b_passed; - CalculatorGraphConfig graph_config_ = - ParseTextProtoOrDie(R"( - input_stream: 'in_a' - input_stream: 'in_b' - input_stream: 'finished' - node { - name: 'input_dropper' - calculator: 'FlowLimiterCalculator' - input_side_packet: 'MAX_IN_FLIGHT:max_in_flight' - input_stream: 'in_a' - input_stream: 'in_b' - input_stream: 'FINISHED:finished' - input_stream_info: { tag_index: 'FINISHED' back_edge: true } - output_stream: 'in_a_sampled' - output_stream: 'in_b_sampled' - output_stream: 'ALLOW:allow' - } - )"); - std::string allow_cb_name; - tool::AddVectorSink("in_a_sampled", &graph_config_, &a_passed); - tool::AddVectorSink("in_b_sampled", &graph_config_, &b_passed); - tool::AddCallbackCalculator("allow", &graph_config_, &allow_cb_name, true); - - bool allow = true; - auto allow_cb = [&allow](const Packet& packet) { - allow = packet.Get(); +// Shows that an output packet can be lost completely, and the +// FlowLimiterCalculator will stop waiting for it after in_flight_timeout. +// DropCalculator completely loses one packet including its timestamp bound. +// FlowLimiterCalculator waits 100 ms, and then starts releasing packets again. +TEST_F(FlowLimiterCalculatorTest, FinishedLost) { + // Configure the test. + SetUpInputData(); + SetUpSimulationClock(); + CalculatorGraphConfig graph_config = InflightGraphConfig(); + auto limiter_options = ParseTextProtoOrDie(R"( + max_in_flight: 1 + max_in_queue: 1 + in_flight_timeout: 100000 # 100 ms + )"); + std::map side_packets = { + {"limiter_options", + MakePacket(limiter_options)}, + {"warmup_time", MakePacket(22000)}, + {"sleep_time", MakePacket(22000)}, + {"drop_timesamps", MakePacket(true)}, + {"clock", MakePacket(clock_)}, }; - CalculatorGraph graph_; - MP_EXPECT_OK(graph_.Initialize( - graph_config_, - { - {"max_in_flight", MakePacket(1)}, - {allow_cb_name, - MakePacket>(allow_cb)}, - })); + // Start the graph. + MP_ASSERT_OK(graph_.Initialize(graph_config)); + MP_EXPECT_OK(graph_.ObserveOutputStream("out_1", [this](Packet p) { + out_1_packets_.push_back(p); + return mediapipe::OkStatus(); + })); + MP_EXPECT_OK(graph_.ObserveOutputStream("allow", [this](Packet p) { + allow_packets_.push_back(p); + return mediapipe::OkStatus(); + })); + simulation_clock_->ThreadStart(); + MP_ASSERT_OK(graph_.StartRun(side_packets)); - MP_EXPECT_OK(graph_.StartRun({})); + // Add 21 input packets. + // 1. packet-0 is released, packet-1 queued and dropped, and so forth. + // 2. packet-4 is lost by DropCalculator. + // 3. packet-5 through 13 are dropped while waiting for packet-4. + // 4. packet-4 expires and queued packet-14 is released. + // 5. packet-17, 19, and 20 are released on time. + MP_EXPECT_OK(graph_.AddPacketToInputStream("in_1", input_packets_[0])); + clock_->Sleep(absl::Microseconds(10000)); + for (int i = 1; i < 21; ++i) { + MP_EXPECT_OK(graph_.AddPacketToInputStream("in_1", input_packets_[i])); + clock_->Sleep(absl::Microseconds(10000)); + } - auto send_packet = [&graph_](const std::string& input_name, int n) { - MP_EXPECT_OK(graph_.AddPacketToInputStream( - input_name, MakePacket(n).At(Timestamp(n)))); - }; - send_packet("in_a", 1); - MP_EXPECT_OK(graph_.WaitUntilIdle()); - EXPECT_EQ(allow, false); - EXPECT_EQ(TimestampValues(a_passed), (std::vector{1})); - EXPECT_EQ(TimestampValues(b_passed), (std::vector{})); - - send_packet("in_a", 2); - send_packet("in_b", 1); - MP_EXPECT_OK(graph_.WaitUntilIdle()); - EXPECT_EQ(TimestampValues(a_passed), (std::vector{1})); - EXPECT_EQ(TimestampValues(b_passed), (std::vector{1})); - EXPECT_EQ(allow, false); - - send_packet("finished", 1); - MP_EXPECT_OK(graph_.WaitUntilIdle()); - EXPECT_EQ(TimestampValues(a_passed), (std::vector{1})); - EXPECT_EQ(TimestampValues(b_passed), (std::vector{1})); - EXPECT_EQ(allow, true); - - send_packet("in_b", 2); - MP_EXPECT_OK(graph_.WaitUntilIdle()); - EXPECT_EQ(TimestampValues(a_passed), (std::vector{1})); - EXPECT_EQ(TimestampValues(b_passed), (std::vector{1})); - EXPECT_EQ(allow, true); - - send_packet("in_b", 3); - MP_EXPECT_OK(graph_.WaitUntilIdle()); - EXPECT_EQ(TimestampValues(a_passed), (std::vector{1})); - EXPECT_EQ(TimestampValues(b_passed), (std::vector{1, 3})); - EXPECT_EQ(allow, false); - - send_packet("in_b", 4); - MP_EXPECT_OK(graph_.WaitUntilIdle()); - EXPECT_EQ(TimestampValues(a_passed), (std::vector{1})); - EXPECT_EQ(TimestampValues(b_passed), (std::vector{1, 3})); - EXPECT_EQ(allow, false); - - send_packet("in_a", 3); - MP_EXPECT_OK(graph_.WaitUntilIdle()); - EXPECT_EQ(TimestampValues(a_passed), (std::vector{1, 3})); - EXPECT_EQ(TimestampValues(b_passed), (std::vector{1, 3})); - EXPECT_EQ(allow, false); - - send_packet("finished", 3); - MP_EXPECT_OK(graph_.WaitUntilIdle()); - EXPECT_EQ(TimestampValues(a_passed), (std::vector{1, 3})); - EXPECT_EQ(TimestampValues(b_passed), (std::vector{1, 3})); - EXPECT_EQ(allow, true); - - MP_EXPECT_OK(graph_.CloseAllInputStreams()); + // Finish the graph. + MP_EXPECT_OK(graph_.CloseAllPacketSources()); + clock_->Sleep(absl::Microseconds(40000)); MP_EXPECT_OK(graph_.WaitUntilDone()); + simulation_clock_->ThreadFinish(); + + // Validate the output. + // input_packets_[4] is lost by the DropCalculator. + std::vector expected_output = { + input_packets_[0], input_packets_[2], input_packets_[14], + input_packets_[17], input_packets_[19], input_packets_[20], + }; + EXPECT_EQ(out_1_packets_, expected_output); } -TEST(FlowLimiterCalculator, CanConsume) { - std::vector in_sampled_packets_; - CalculatorGraphConfig graph_config_ = +// Shows what happens when a finish packet is delayed beyond in_flight_timeout. +// After in_flight_timeout, FlowLimiterCalculator continues releasing packets. +// Temporarily, more than max_in_flight frames are in flight. +// Eventually, the number of frames in flight returns to max_in_flight. +TEST_F(FlowLimiterCalculatorTest, FinishedDelayed) { + // Configure the test. + SetUpInputData(); + SetUpSimulationClock(); + CalculatorGraphConfig graph_config = InflightGraphConfig(); + auto limiter_options = ParseTextProtoOrDie(R"( + max_in_flight: 1 + max_in_queue: 1 + in_flight_timeout: 100000 # 100 ms + )"); + std::map side_packets = { + {"limiter_options", + MakePacket(limiter_options)}, + {"warmup_time", MakePacket(500000)}, + {"sleep_time", MakePacket(22000)}, + {"drop_timesamps", MakePacket(false)}, + {"clock", MakePacket(clock_)}, + }; + + // Start the graph. + MP_ASSERT_OK(graph_.Initialize(graph_config)); + MP_EXPECT_OK(graph_.ObserveOutputStream("out_1", [this](Packet p) { + out_1_packets_.push_back(p); + return mediapipe::OkStatus(); + })); + MP_EXPECT_OK(graph_.ObserveOutputStream("allow", [this](Packet p) { + allow_packets_.push_back(p); + return mediapipe::OkStatus(); + })); + simulation_clock_->ThreadStart(); + MP_ASSERT_OK(graph_.StartRun(side_packets)); + + // Add 71 input packets. + // 1. During the 500 ms WARMUP_TIME, the in_flight_timeout releases + // packets 0, 10, 20, 30, 40, 50, which are queued at the SleepCalculator. + // 2. During the next 120 ms, these 6 packets are processed. + // 3. After the graph is finally finished with warmup and the backlog packets, + // packets 60 through 70 are released and processed on time. + MP_EXPECT_OK(graph_.AddPacketToInputStream("in_1", input_packets_[0])); + clock_->Sleep(absl::Microseconds(10000)); + for (int i = 1; i < 71; ++i) { + MP_EXPECT_OK(graph_.AddPacketToInputStream("in_1", input_packets_[i])); + clock_->Sleep(absl::Microseconds(10000)); + } + + // Finish the graph. + MP_EXPECT_OK(graph_.CloseAllPacketSources()); + clock_->Sleep(absl::Microseconds(40000)); + MP_EXPECT_OK(graph_.WaitUntilDone()); + simulation_clock_->ThreadFinish(); + + // Validate the output. + // The graph is warming up or backlogged until packet 60. + std::vector expected_output = { + input_packets_[0], input_packets_[10], input_packets_[30], + input_packets_[40], input_packets_[50], input_packets_[60], + input_packets_[63], input_packets_[65], input_packets_[67], + input_packets_[69], input_packets_[70], + }; + EXPECT_EQ(out_1_packets_, expected_output); +} + +// Shows that packets on auxiliary input streams are relesed for the same +// timestamps as the main input stream, whether the auxiliary packets arrive +// early or late. +TEST_F(FlowLimiterCalculatorTest, TwoInputStreams) { + // Configure the test. + SetUpInputData(); + SetUpSimulationClock(); + CalculatorGraphConfig graph_config = ParseTextProtoOrDie(R"( - input_stream: 'in' - input_stream: 'finished' + input_stream: 'in_1' + input_stream: 'in_2' node { - name: 'input_dropper' calculator: 'FlowLimiterCalculator' - input_side_packet: 'MAX_IN_FLIGHT:max_in_flight' - input_stream: 'in' - input_stream: 'FINISHED:finished' + input_side_packet: 'OPTIONS:limiter_options' + input_stream: 'in_1' + input_stream: 'in_2' + input_stream: 'FINISHED:out_1' input_stream_info: { tag_index: 'FINISHED' back_edge: true } - output_stream: 'in_sampled' + output_stream: 'in_1_sampled' + output_stream: 'in_2_sampled' output_stream: 'ALLOW:allow' } + node { + calculator: 'SleepCalculator' + input_side_packet: 'WARMUP_TIME:warmup_time' + input_side_packet: 'SLEEP_TIME:sleep_time' + input_side_packet: 'CLOCK:clock' + input_stream: 'PACKET:in_1_sampled' + output_stream: 'PACKET:out_1_sampled' + } + node { + calculator: 'DropCalculator' + input_side_packet: "DROP_TIMESTAMPS:drop_timesamps" + input_stream: 'PACKET:out_1_sampled' + output_stream: 'PACKET:out_1' + } )"); - std::string allow_cb_name; - tool::AddVectorSink("in_sampled", &graph_config_, &in_sampled_packets_); - tool::AddCallbackCalculator("allow", &graph_config_, &allow_cb_name, true); - bool allow = true; - auto allow_cb = [&allow](const Packet& packet) { - allow = packet.Get(); + auto limiter_options = ParseTextProtoOrDie(R"( + max_in_flight: 1 + max_in_queue: 1 + in_flight_timeout: 100000 # 100 ms + )"); + std::map side_packets = { + {"limiter_options", + MakePacket(limiter_options)}, + {"warmup_time", MakePacket(22000)}, + {"sleep_time", MakePacket(22000)}, + {"drop_timesamps", MakePacket(true)}, + {"clock", MakePacket(clock_)}, }; - CalculatorGraph graph_; - MP_EXPECT_OK(graph_.Initialize( - graph_config_, - { - {"max_in_flight", MakePacket(1)}, - {allow_cb_name, - MakePacket>(allow_cb)}, - })); + // Start the graph. + MP_ASSERT_OK(graph_.Initialize(graph_config)); + MP_EXPECT_OK(graph_.ObserveOutputStream("out_1", [this](Packet p) { + out_1_packets_.push_back(p); + return mediapipe::OkStatus(); + })); + std::vector out_2_packets; + MP_EXPECT_OK(graph_.ObserveOutputStream("in_2_sampled", [&](Packet p) { + out_2_packets.push_back(p); + return mediapipe::OkStatus(); + })); + MP_EXPECT_OK(graph_.ObserveOutputStream("allow", [this](Packet p) { + allow_packets_.push_back(p); + return mediapipe::OkStatus(); + })); + simulation_clock_->ThreadStart(); + MP_ASSERT_OK(graph_.StartRun(side_packets)); - MP_EXPECT_OK(graph_.StartRun({})); + // Add packets 0..9 to stream in_1, and packets 0..10 to stream in_2. + MP_EXPECT_OK(graph_.AddPacketToInputStream("in_1", input_packets_[0])); + clock_->Sleep(absl::Microseconds(10000)); + for (int i = 1; i < 10; ++i) { + MP_EXPECT_OK(graph_.AddPacketToInputStream("in_1", input_packets_[i])); + MP_EXPECT_OK(graph_.AddPacketToInputStream("in_2", input_packets_[i - 1])); + clock_->Sleep(absl::Microseconds(10000)); + } - auto send_packet = [&graph_](const std::string& input_name, int n) { - MP_EXPECT_OK(graph_.AddPacketToInputStream( - input_name, MakePacket(n).At(Timestamp(n)))); - }; - send_packet("in", 1); - MP_EXPECT_OK(graph_.WaitUntilIdle()); - EXPECT_EQ(allow, false); - EXPECT_EQ(TimestampValues(in_sampled_packets_), (std::vector{1})); + // Add packets 10..20 to stream in_1, and packets 11..21 to stream in_2. + for (int i = 10; i < 21; ++i) { + MP_EXPECT_OK(graph_.AddPacketToInputStream("in_2", input_packets_[i + 1])); + MP_EXPECT_OK(graph_.AddPacketToInputStream("in_1", input_packets_[i])); + clock_->Sleep(absl::Microseconds(10000)); + } - MP_EXPECT_OK(in_sampled_packets_[0].Consume()); - - MP_EXPECT_OK(graph_.CloseAllInputStreams()); + // Finish the graph run. + MP_EXPECT_OK(graph_.CloseAllPacketSources()); + clock_->Sleep(absl::Microseconds(40000)); MP_EXPECT_OK(graph_.WaitUntilDone()); + simulation_clock_->ThreadFinish(); + + // Validate the output. + // Packet input_packets_[4] is lost by the DropCalculator. + std::vector expected_output = { + input_packets_[0], input_packets_[2], input_packets_[14], + input_packets_[17], input_packets_[19], input_packets_[20], + }; + EXPECT_EQ(out_1_packets_, expected_output); + // Exactly the timestamps released by FlowLimiterCalculator for in_1_sampled. + std::vector expected_output_2 = { + input_packets_[0], input_packets_[2], input_packets_[4], + input_packets_[14], input_packets_[17], input_packets_[19], + input_packets_[20], + }; + EXPECT_EQ(out_2_packets, expected_output_2); +} + +// Shows how FlowLimiterCalculator releases packets with max_in_queue 0. +// Shows how auxiliary input streams still work with max_in_queue 0. +// The processing time "sleep_time" is reduced from 22ms to 12ms to create +// the same frame rate as FlowLimiterCalculatorTest::TwoInputStreams. +TEST_F(FlowLimiterCalculatorTest, ZeroQueue) { + // Configure the test. + SetUpInputData(); + SetUpSimulationClock(); + CalculatorGraphConfig graph_config = + ParseTextProtoOrDie(R"( + input_stream: 'in_1' + input_stream: 'in_2' + node { + calculator: 'FlowLimiterCalculator' + input_side_packet: 'OPTIONS:limiter_options' + input_stream: 'in_1' + input_stream: 'in_2' + input_stream: 'FINISHED:out_1' + input_stream_info: { tag_index: 'FINISHED' back_edge: true } + output_stream: 'in_1_sampled' + output_stream: 'in_2_sampled' + output_stream: 'ALLOW:allow' + } + node { + calculator: 'SleepCalculator' + input_side_packet: 'WARMUP_TIME:warmup_time' + input_side_packet: 'SLEEP_TIME:sleep_time' + input_side_packet: 'CLOCK:clock' + input_stream: 'PACKET:in_1_sampled' + output_stream: 'PACKET:out_1_sampled' + } + node { + calculator: 'DropCalculator' + input_side_packet: "DROP_TIMESTAMPS:drop_timesamps" + input_stream: 'PACKET:out_1_sampled' + output_stream: 'PACKET:out_1' + } + )"); + + auto limiter_options = ParseTextProtoOrDie(R"( + max_in_flight: 1 + max_in_queue: 0 + in_flight_timeout: 100000 # 100 ms + )"); + std::map side_packets = { + {"limiter_options", + MakePacket(limiter_options)}, + {"warmup_time", MakePacket(12000)}, + {"sleep_time", MakePacket(12000)}, + {"drop_timesamps", MakePacket(true)}, + {"clock", MakePacket(clock_)}, + }; + + // Start the graph. + MP_ASSERT_OK(graph_.Initialize(graph_config)); + MP_EXPECT_OK(graph_.ObserveOutputStream("out_1", [this](Packet p) { + out_1_packets_.push_back(p); + return mediapipe::OkStatus(); + })); + std::vector out_2_packets; + MP_EXPECT_OK(graph_.ObserveOutputStream("in_2_sampled", [&](Packet p) { + out_2_packets.push_back(p); + return mediapipe::OkStatus(); + })); + MP_EXPECT_OK(graph_.ObserveOutputStream("allow", [this](Packet p) { + allow_packets_.push_back(p); + return mediapipe::OkStatus(); + })); + simulation_clock_->ThreadStart(); + MP_ASSERT_OK(graph_.StartRun(side_packets)); + + // Add packets 0..9 to stream in_1, and packets 0..10 to stream in_2. + MP_EXPECT_OK(graph_.AddPacketToInputStream("in_1", input_packets_[0])); + clock_->Sleep(absl::Microseconds(10000)); + for (int i = 1; i < 10; ++i) { + MP_EXPECT_OK(graph_.AddPacketToInputStream("in_1", input_packets_[i])); + MP_EXPECT_OK(graph_.AddPacketToInputStream("in_2", input_packets_[i - 1])); + clock_->Sleep(absl::Microseconds(10000)); + } + + // Add packets 10..20 to stream in_1, and packets 11..21 to stream in_2. + for (int i = 10; i < 21; ++i) { + MP_EXPECT_OK(graph_.AddPacketToInputStream("in_2", input_packets_[i + 1])); + MP_EXPECT_OK(graph_.AddPacketToInputStream("in_1", input_packets_[i])); + clock_->Sleep(absl::Microseconds(10000)); + } + + // Finish the graph run. + MP_EXPECT_OK(graph_.CloseAllPacketSources()); + clock_->Sleep(absl::Microseconds(40000)); + MP_EXPECT_OK(graph_.WaitUntilDone()); + simulation_clock_->ThreadFinish(); + + // Validate the output. + // Packet input_packets_[4] is lost by the DropCalculator. + std::vector expected_output = { + input_packets_[0], input_packets_[2], input_packets_[15], + input_packets_[17], input_packets_[19], + }; + EXPECT_EQ(out_1_packets_, expected_output); + // Exactly the timestamps released by FlowLimiterCalculator for in_1_sampled. + std::vector expected_output_2 = { + input_packets_[0], input_packets_[2], input_packets_[4], + input_packets_[15], input_packets_[17], input_packets_[19], + }; + EXPECT_EQ(out_2_packets, expected_output_2); } } // anonymous namespace diff --git a/mediapipe/calculators/core/gate_calculator.cc b/mediapipe/calculators/core/gate_calculator.cc index 8b8e1424d..95ae9b03f 100644 --- a/mediapipe/calculators/core/gate_calculator.cc +++ b/mediapipe/calculators/core/gate_calculator.cc @@ -82,7 +82,7 @@ class GateCalculator : public CalculatorBase { public: GateCalculator() {} - static ::mediapipe::Status CheckAndInitAllowDisallowInputs( + static mediapipe::Status CheckAndInitAllowDisallowInputs( CalculatorContract* cc) { bool input_via_side_packet = cc->InputSidePackets().HasTag("ALLOW") || cc->InputSidePackets().HasTag("DISALLOW"); @@ -110,10 +110,10 @@ class GateCalculator : public CalculatorBase { cc->Inputs().Tag("DISALLOW").Set(); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { RET_CHECK_OK(CheckAndInitAllowDisallowInputs(cc)); const int num_data_streams = cc->Inputs().NumEntries(""); @@ -130,10 +130,10 @@ class GateCalculator : public CalculatorBase { cc->Outputs().Tag("STATE_CHANGE").Set(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { + mediapipe::Status Open(CalculatorContext* cc) final { use_side_packet_for_allow_disallow_ = false; if (cc->InputSidePackets().HasTag("ALLOW")) { use_side_packet_for_allow_disallow_ = true; @@ -153,10 +153,10 @@ class GateCalculator : public CalculatorBase { const auto& options = cc->Options<::mediapipe::GateCalculatorOptions>(); empty_packets_as_allow_ = options.empty_packets_as_allow(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { bool allow = empty_packets_as_allow_; if (use_side_packet_for_allow_disallow_) { allow = allow_by_side_packet_decision_; @@ -195,7 +195,7 @@ class GateCalculator : public CalculatorBase { cc->Outputs().Get("", i).Close(); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Process data streams. @@ -205,7 +205,7 @@ class GateCalculator : public CalculatorBase { } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: diff --git a/mediapipe/calculators/core/gate_calculator_test.cc b/mediapipe/calculators/core/gate_calculator_test.cc index ec838f6b6..d1dcae09d 100644 --- a/mediapipe/calculators/core/gate_calculator_test.cc +++ b/mediapipe/calculators/core/gate_calculator_test.cc @@ -25,7 +25,7 @@ namespace { class GateCalculatorTest : public ::testing::Test { protected: // Helper to run a graph and return status. - static ::mediapipe::Status RunGraph(const std::string& proto) { + static mediapipe::Status RunGraph(const std::string& proto) { auto runner = absl::make_unique( ParseTextProtoOrDie(proto)); return runner->Run(); diff --git a/mediapipe/calculators/core/immediate_mux_calculator.cc b/mediapipe/calculators/core/immediate_mux_calculator.cc index 007fbf73e..e0e129f4b 100644 --- a/mediapipe/calculators/core/immediate_mux_calculator.cc +++ b/mediapipe/calculators/core/immediate_mux_calculator.cc @@ -29,9 +29,7 @@ namespace mediapipe { // received. // // This Calculator can be used with an ImmediateInputStreamHandler or with the -// default ISH. Note that currently ImmediateInputStreamHandler seems to -// interfere with timestamp bound propagation, so it is better to use the -// default unless the immediate one is needed. (b/118387598) +// default ISH. // // This Calculator is designed to work with a Demux calculator such as // the RoundRobinDemuxCalculator. Therefore, packets from different @@ -45,17 +43,16 @@ class ImmediateMuxCalculator : public CalculatorBase { public: // This calculator combines any set of input streams into a single // output stream. All input stream types must match the output stream type. - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); // Passes any input packet to the output stream immediately, unless the // packet timestamp is lower than a previously passed packet. - ::mediapipe::Status Process(CalculatorContext* cc) override; - ::mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; }; REGISTER_CALCULATOR(ImmediateMuxCalculator); -::mediapipe::Status ImmediateMuxCalculator::GetContract( - CalculatorContract* cc) { +mediapipe::Status ImmediateMuxCalculator::GetContract(CalculatorContract* cc) { RET_CHECK(cc->Outputs().NumEntries() >= 1 && cc->Outputs().NumEntries() <= 2) << "This calculator produces only one or two output streams."; cc->Outputs().Index(0).SetAny(); @@ -65,15 +62,15 @@ REGISTER_CALCULATOR(ImmediateMuxCalculator); for (int i = 0; i < cc->Inputs().NumEntries(); ++i) { cc->Inputs().Index(i).SetSameAs(&cc->Outputs().Index(0)); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ImmediateMuxCalculator::Open(CalculatorContext* cc) { +mediapipe::Status ImmediateMuxCalculator::Open(CalculatorContext* cc) { cc->SetOffset(TimestampDiff(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ImmediateMuxCalculator::Process(CalculatorContext* cc) { +mediapipe::Status ImmediateMuxCalculator::Process(CalculatorContext* cc) { // Pass along the first packet, unless it has been superseded. for (int i = 0; i < cc->Inputs().NumEntries(); ++i) { const Packet& packet = cc->Inputs().Index(i).Value(); @@ -91,7 +88,7 @@ REGISTER_CALCULATOR(ImmediateMuxCalculator); } } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/core/immediate_mux_calculator_test.cc b/mediapipe/calculators/core/immediate_mux_calculator_test.cc index 4afe358f2..d691e0f73 100644 --- a/mediapipe/calculators/core/immediate_mux_calculator_test.cc +++ b/mediapipe/calculators/core/immediate_mux_calculator_test.cc @@ -289,19 +289,19 @@ TEST_F(ImmediateMuxCalculatorTest, SimultaneousTimestamps) { } // A Calculator::Process callback function. -typedef std::function<::mediapipe::Status(const InputStreamShardSet&, - OutputStreamShardSet*)> +typedef std::function ProcessFunction; // A testing callback function that passes through all packets. -::mediapipe::Status PassThrough(const InputStreamShardSet& inputs, - OutputStreamShardSet* outputs) { +mediapipe::Status PassThrough(const InputStreamShardSet& inputs, + OutputStreamShardSet* outputs) { for (int i = 0; i < inputs.NumEntries(); ++i) { if (!inputs.Index(i).Value().IsEmpty()) { outputs->Index(i).AddPacket(inputs.Index(i).Value()); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } TEST_F(ImmediateMuxCalculatorTest, Demux) { @@ -325,7 +325,7 @@ TEST_F(ImmediateMuxCalculatorTest, Demux) { auto out_cb = [&](const Packet& p) { absl::MutexLock lock(&out_mutex); out_packets.push_back(p); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); }; auto wait_for = [&](std::function cond) { absl::MutexLock lock(&out_mutex); diff --git a/mediapipe/calculators/core/make_pair_calculator.cc b/mediapipe/calculators/core/make_pair_calculator.cc index 8eb4cb67b..58029ea6b 100644 --- a/mediapipe/calculators/core/make_pair_calculator.cc +++ b/mediapipe/calculators/core/make_pair_calculator.cc @@ -35,24 +35,24 @@ class MakePairCalculator : public CalculatorBase { MakePairCalculator() {} ~MakePairCalculator() override {} - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).SetAny(); cc->Inputs().Index(1).SetAny(); cc->Outputs().Index(0).Set>(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { cc->SetOffset(TimestampDiff(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { cc->Outputs().Index(0).Add( new std::pair(cc->Inputs().Index(0).Value(), cc->Inputs().Index(1).Value()), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; diff --git a/mediapipe/calculators/core/matrix_multiply_calculator.cc b/mediapipe/calculators/core/matrix_multiply_calculator.cc index 8dc60b763..e5f479511 100644 --- a/mediapipe/calculators/core/matrix_multiply_calculator.cc +++ b/mediapipe/calculators/core/matrix_multiply_calculator.cc @@ -33,34 +33,34 @@ class MatrixMultiplyCalculator : public CalculatorBase { MatrixMultiplyCalculator() {} ~MatrixMultiplyCalculator() override {} - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; }; REGISTER_CALCULATOR(MatrixMultiplyCalculator); // static -::mediapipe::Status MatrixMultiplyCalculator::GetContract( +mediapipe::Status MatrixMultiplyCalculator::GetContract( CalculatorContract* cc) { cc->Inputs().Index(0).Set(); cc->Outputs().Index(0).Set(); cc->InputSidePackets().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status MatrixMultiplyCalculator::Open(CalculatorContext* cc) { +mediapipe::Status MatrixMultiplyCalculator::Open(CalculatorContext* cc) { // The output is at the same timestamp as the input. cc->SetOffset(TimestampDiff(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status MatrixMultiplyCalculator::Process(CalculatorContext* cc) { +mediapipe::Status MatrixMultiplyCalculator::Process(CalculatorContext* cc) { Matrix* multiplied = new Matrix(); *multiplied = cc->InputSidePackets().Index(0).Get() * cc->Inputs().Index(0).Get(); cc->Outputs().Index(0).Add(multiplied, cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/core/matrix_subtract_calculator.cc b/mediapipe/calculators/core/matrix_subtract_calculator.cc index af13a0d38..4a9b18bbd 100644 --- a/mediapipe/calculators/core/matrix_subtract_calculator.cc +++ b/mediapipe/calculators/core/matrix_subtract_calculator.cc @@ -46,10 +46,10 @@ class MatrixSubtractCalculator : public CalculatorBase { MatrixSubtractCalculator() {} ~MatrixSubtractCalculator() override {} - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; private: bool subtract_from_input_ = false; @@ -57,11 +57,11 @@ class MatrixSubtractCalculator : public CalculatorBase { REGISTER_CALCULATOR(MatrixSubtractCalculator); // static -::mediapipe::Status MatrixSubtractCalculator::GetContract( +mediapipe::Status MatrixSubtractCalculator::GetContract( CalculatorContract* cc) { if (cc->Inputs().NumEntries() != 1 || cc->InputSidePackets().NumEntries() != 1) { - return ::mediapipe::InvalidArgumentError( + return mediapipe::InvalidArgumentError( "MatrixSubtractCalculator only accepts exactly one input stream and " "one " "input side packet"); @@ -75,23 +75,23 @@ REGISTER_CALCULATOR(MatrixSubtractCalculator); cc->Inputs().Tag("SUBTRAHEND").Set(); cc->InputSidePackets().Tag("MINUEND").Set(); } else { - return ::mediapipe::InvalidArgumentError( + return mediapipe::InvalidArgumentError( "Must specify exactly one minuend and one subtrahend."); } cc->Outputs().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status MatrixSubtractCalculator::Open(CalculatorContext* cc) { +mediapipe::Status MatrixSubtractCalculator::Open(CalculatorContext* cc) { // The output is at the same timestamp as the input. cc->SetOffset(TimestampDiff(0)); if (cc->Inputs().HasTag("MINUEND")) { subtract_from_input_ = true; } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status MatrixSubtractCalculator::Process(CalculatorContext* cc) { +mediapipe::Status MatrixSubtractCalculator::Process(CalculatorContext* cc) { Matrix* subtracted = new Matrix(); if (subtract_from_input_) { const Matrix& input_matrix = cc->Inputs().Tag("MINUEND").Get(); @@ -99,7 +99,7 @@ REGISTER_CALCULATOR(MatrixSubtractCalculator); cc->InputSidePackets().Tag("SUBTRAHEND").Get(); if (input_matrix.rows() != side_input_matrix.rows() || input_matrix.cols() != side_input_matrix.cols()) { - return ::mediapipe::InvalidArgumentError( + return mediapipe::InvalidArgumentError( "Input matrix and the input side matrix must have the same " "dimension."); } @@ -110,14 +110,14 @@ REGISTER_CALCULATOR(MatrixSubtractCalculator); cc->InputSidePackets().Tag("MINUEND").Get(); if (input_matrix.rows() != side_input_matrix.rows() || input_matrix.cols() != side_input_matrix.cols()) { - return ::mediapipe::InvalidArgumentError( + return mediapipe::InvalidArgumentError( "Input matrix and the input side matrix must have the same " "dimension."); } *subtracted = side_input_matrix - input_matrix; } cc->Outputs().Index(0).Add(subtracted, cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/core/matrix_to_vector_calculator.cc b/mediapipe/calculators/core/matrix_to_vector_calculator.cc index b02fda77c..889ab22fa 100644 --- a/mediapipe/calculators/core/matrix_to_vector_calculator.cc +++ b/mediapipe/calculators/core/matrix_to_vector_calculator.cc @@ -42,30 +42,30 @@ namespace mediapipe { // } class MatrixToVectorCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).Set( // Input Packet containing a Matrix. ); cc->Outputs().Index(0).Set>( // Output Packet containing a vector, one for each input Packet. ); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; // Outputs a packet containing a vector for each input packet. - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; }; REGISTER_CALCULATOR(MatrixToVectorCalculator); -::mediapipe::Status MatrixToVectorCalculator::Open(CalculatorContext* cc) { +mediapipe::Status MatrixToVectorCalculator::Open(CalculatorContext* cc) { // Inform the framework that we don't alter timestamps. cc->SetOffset(mediapipe::TimestampDiff(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status MatrixToVectorCalculator::Process(CalculatorContext* cc) { +mediapipe::Status MatrixToVectorCalculator::Process(CalculatorContext* cc) { const Matrix& input = cc->Inputs().Index(0).Get(); auto output = absl::make_unique>(); @@ -77,7 +77,7 @@ REGISTER_CALCULATOR(MatrixToVectorCalculator); output_as_matrix = input; cc->Outputs().Index(0).Add(output.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/core/merge_calculator.cc b/mediapipe/calculators/core/merge_calculator.cc index e85ae0c12..9d67f9068 100644 --- a/mediapipe/calculators/core/merge_calculator.cc +++ b/mediapipe/calculators/core/merge_calculator.cc @@ -43,7 +43,7 @@ namespace mediapipe { // class MergeCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { RET_CHECK_GT(cc->Inputs().NumEntries(), 0) << "Needs at least one input stream"; RET_CHECK_EQ(cc->Outputs().NumEntries(), 1); @@ -60,29 +60,29 @@ class MergeCalculator : public CalculatorBase { } cc->Outputs().Index(0).SetAny(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { + mediapipe::Status Open(CalculatorContext* cc) final { cc->SetOffset(TimestampDiff(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { // Output the packet from the first input stream with a packet ready at this // timestamp. for (int i = 0; i < cc->Inputs().NumEntries(); ++i) { if (!cc->Inputs().Index(i).IsEmpty()) { cc->Outputs().Index(0).AddPacket(cc->Inputs().Index(i).Value()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } LOG(WARNING) << "Empty input packets at timestamp " << cc->InputTimestamp().Value(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; diff --git a/mediapipe/calculators/core/mux_calculator.cc b/mediapipe/calculators/core/mux_calculator.cc index 8ca25bdd0..0100d4ce8 100644 --- a/mediapipe/calculators/core/mux_calculator.cc +++ b/mediapipe/calculators/core/mux_calculator.cc @@ -36,7 +36,7 @@ constexpr char kInputTag[] = "INPUT"; // with DefaultInputStreamHandler. class MuxCalculator : public CalculatorBase { public: - static ::mediapipe::Status CheckAndInitAllowDisallowInputs( + static mediapipe::Status CheckAndInitAllowDisallowInputs( CalculatorContract* cc) { RET_CHECK(cc->Inputs().HasTag(kSelectTag) ^ cc->InputSidePackets().HasTag(kSelectTag)); @@ -45,10 +45,10 @@ class MuxCalculator : public CalculatorBase { } else { cc->InputSidePackets().Tag(kSelectTag).Set(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { RET_CHECK_OK(CheckAndInitAllowDisallowInputs(cc)); CollectionItemId data_input_id = cc->Inputs().BeginId(kInputTag); PacketType* data_input0 = &cc->Inputs().Get(data_input_id); @@ -64,10 +64,10 @@ class MuxCalculator : public CalculatorBase { MediaPipeOptions options; cc->SetInputStreamHandlerOptions(options); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { + mediapipe::Status Open(CalculatorContext* cc) final { use_side_packet_select_ = false; if (cc->InputSidePackets().HasTag(kSelectTag)) { use_side_packet_select_ = true; @@ -79,10 +79,10 @@ class MuxCalculator : public CalculatorBase { num_data_inputs_ = cc->Inputs().NumEntries(kInputTag); output_ = cc->Outputs().GetId("OUTPUT", 0); cc->SetOffset(TimestampDiff(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { int select = use_side_packet_select_ ? selected_index_ : cc->Inputs().Get(select_input_).Get(); @@ -91,7 +91,7 @@ class MuxCalculator : public CalculatorBase { cc->Outputs().Get(output_).AddPacket( cc->Inputs().Get(data_input_base_ + select).Value()); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: diff --git a/mediapipe/calculators/core/mux_calculator_test.cc b/mediapipe/calculators/core/mux_calculator_test.cc index 78fbb7999..c99919a38 100644 --- a/mediapipe/calculators/core/mux_calculator_test.cc +++ b/mediapipe/calculators/core/mux_calculator_test.cc @@ -134,10 +134,9 @@ void RunGraph(const std::string& graph_config_proto, const std::string& input_stream_name, int num_input_packets, std::function input_fn, const std::string& output_stream_name, - std::function<::mediapipe::Status(const Packet&)> output_fn) { + std::function output_fn) { CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie( - graph_config_proto); + mediapipe::ParseTextProtoOrDie(graph_config_proto); CalculatorGraph graph; MP_ASSERT_OK(graph.Initialize(config)); MP_ASSERT_OK(graph.ObserveOutputStream(output_stream_name, output_fn)); @@ -166,9 +165,9 @@ TEST(MuxCalculatorTest, InputStreamSelector_DefaultInputStreamHandler) { // Output and handling. std::vector output; // This function collects the output from the packet. - auto output_fn = [&output](const Packet& p) -> ::mediapipe::Status { + auto output_fn = [&output](const Packet& p) -> mediapipe::Status { output.push_back(p.Get()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); }; RunGraph(kTestGraphConfig1, {}, kInputName, input_packets.size(), input_fn, @@ -192,9 +191,9 @@ TEST(MuxCalculatorTest, InputSidePacketSelector_DefaultInputStreamHandler) { // Output and handling. std::vector output; // This function collects the output from the packet. - auto output_fn = [&output](const Packet& p) -> ::mediapipe::Status { + auto output_fn = [&output](const Packet& p) -> mediapipe::Status { output.push_back(p.Get()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); }; RunGraph(kTestGraphConfig2, {{kInputSelector, MakePacket(0)}}, @@ -226,9 +225,9 @@ TEST(MuxCalculatorTest, InputStreamSelector_MuxInputStreamHandler) { // Output and handling. std::vector output; // This function collects the output from the packet. - auto output_fn = [&output](const Packet& p) -> ::mediapipe::Status { + auto output_fn = [&output](const Packet& p) -> mediapipe::Status { output.push_back(p.Get()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); }; RunGraph(kTestGraphConfig3, {}, kInputName, input_packets.size(), input_fn, @@ -252,7 +251,7 @@ constexpr char kDualInputGraphConfig[] = R"proto( TEST(MuxCalculatorTest, DiscardSkippedInputs_MuxInputStreamHandler) { CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie( + mediapipe::ParseTextProtoOrDie( kDualInputGraphConfig); CalculatorGraph graph; MP_ASSERT_OK(graph.Initialize(config)); @@ -261,7 +260,7 @@ TEST(MuxCalculatorTest, DiscardSkippedInputs_MuxInputStreamHandler) { MP_ASSERT_OK( graph.ObserveOutputStream("test_output", [&output](const Packet& p) { output = p.Get>(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); MP_ASSERT_OK(graph.StartRun({})); diff --git a/mediapipe/calculators/core/packet_cloner_calculator.cc b/mediapipe/calculators/core/packet_cloner_calculator.cc index 26044fc2c..c2a2979c7 100644 --- a/mediapipe/calculators/core/packet_cloner_calculator.cc +++ b/mediapipe/calculators/core/packet_cloner_calculator.cc @@ -45,17 +45,17 @@ namespace mediapipe { // packet_inner_join_calculator.cc: Don't output unless all inputs are new. class PacketClonerCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { const int tick_signal_index = cc->Inputs().NumEntries() - 1; for (int i = 0; i < tick_signal_index; ++i) { cc->Inputs().Index(i).SetAny(); cc->Outputs().Index(i).SetSameAs(&cc->Inputs().Index(i)); } cc->Inputs().Index(tick_signal_index).SetAny(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { + mediapipe::Status Open(CalculatorContext* cc) final { // Load options. const auto calculator_options = cc->Options(); @@ -71,10 +71,10 @@ class PacketClonerCalculator : public CalculatorBase { cc->Outputs().Index(i).SetHeader(cc->Inputs().Index(i).Header()); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { // Store input signals. for (int i = 0; i < tick_signal_index_; ++i) { if (!cc->Inputs().Index(i).Value().IsEmpty()) { @@ -88,7 +88,7 @@ class PacketClonerCalculator : public CalculatorBase { // Return if one of the input is null. for (int i = 0; i < tick_signal_index_; ++i) { if (current_[i].IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } } @@ -103,7 +103,7 @@ class PacketClonerCalculator : public CalculatorBase { } } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: diff --git a/mediapipe/calculators/core/packet_inner_join_calculator.cc b/mediapipe/calculators/core/packet_inner_join_calculator.cc index 2b93df3cf..1f77d4149 100644 --- a/mediapipe/calculators/core/packet_inner_join_calculator.cc +++ b/mediapipe/calculators/core/packet_inner_join_calculator.cc @@ -34,10 +34,10 @@ namespace mediapipe { // packet_cloner_calculator.cc: Repeats last-seen packets from empty inputs. class PacketInnerJoinCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; private: int num_streams_; @@ -45,7 +45,7 @@ class PacketInnerJoinCalculator : public CalculatorBase { REGISTER_CALCULATOR(PacketInnerJoinCalculator); -::mediapipe::Status PacketInnerJoinCalculator::GetContract( +mediapipe::Status PacketInnerJoinCalculator::GetContract( CalculatorContract* cc) { RET_CHECK(cc->Inputs().NumEntries() == cc->Outputs().NumEntries()) << "The number of input and output streams must match."; @@ -54,25 +54,25 @@ REGISTER_CALCULATOR(PacketInnerJoinCalculator); cc->Inputs().Index(i).SetAny(); cc->Outputs().Index(i).SetSameAs(&cc->Inputs().Index(i)); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status PacketInnerJoinCalculator::Open(CalculatorContext* cc) { +mediapipe::Status PacketInnerJoinCalculator::Open(CalculatorContext* cc) { num_streams_ = cc->Inputs().NumEntries(); cc->SetOffset(TimestampDiff(0)); return mediapipe::OkStatus(); } -::mediapipe::Status PacketInnerJoinCalculator::Process(CalculatorContext* cc) { +mediapipe::Status PacketInnerJoinCalculator::Process(CalculatorContext* cc) { for (int i = 0; i < num_streams_; ++i) { if (cc->Inputs().Index(i).Value().IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } for (int i = 0; i < num_streams_; ++i) { cc->Outputs().Index(i).AddPacket(cc->Inputs().Index(i).Value()); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/core/packet_presence_calculator.cc b/mediapipe/calculators/core/packet_presence_calculator.cc index 468d31718..7f823f27e 100644 --- a/mediapipe/calculators/core/packet_presence_calculator.cc +++ b/mediapipe/calculators/core/packet_presence_calculator.cc @@ -57,26 +57,26 @@ namespace mediapipe { // } class PacketPresenceCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Tag("PACKET").SetAny(); cc->Outputs().Tag("PRESENCE").Set(); // Process() function is invoked in response to input stream timestamp // bound updates. cc->SetProcessTimestampBounds(true); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { cc->SetOffset(TimestampDiff(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { cc->Outputs() .Tag("PRESENCE") .AddPacket(MakePacket(!cc->Inputs().Tag("PACKET").IsEmpty()) .At(cc->InputTimestamp())); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(PacketPresenceCalculator); diff --git a/mediapipe/calculators/core/packet_resampler_calculator.cc b/mediapipe/calculators/core/packet_resampler_calculator.cc index da9c26c15..4a08c1f1c 100644 --- a/mediapipe/calculators/core/packet_resampler_calculator.cc +++ b/mediapipe/calculators/core/packet_resampler_calculator.cc @@ -47,7 +47,7 @@ TimestampDiff TimestampDiffFromSeconds(double seconds) { } } // namespace -::mediapipe::Status PacketResamplerCalculator::GetContract( +mediapipe::Status PacketResamplerCalculator::GetContract( CalculatorContract* cc) { const auto& resampler_options = cc->Options(); @@ -78,10 +78,10 @@ TimestampDiff TimestampDiffFromSeconds(double seconds) { RET_CHECK(cc->InputSidePackets().HasTag("SEED")); cc->InputSidePackets().Tag("SEED").Set(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status PacketResamplerCalculator::Open(CalculatorContext* cc) { +mediapipe::Status PacketResamplerCalculator::Open(CalculatorContext* cc) { const auto resampler_options = tool::RetrieveOptions(cc->Options(), cc->InputSidePackets(), "OPTIONS"); @@ -156,8 +156,8 @@ TimestampDiff TimestampDiffFromSeconds(double seconds) { const auto& seed = cc->InputSidePackets().Tag("SEED").Get(); random_ = CreateSecureRandom(seed); if (random_ == nullptr) { - return ::mediapipe::Status( - ::mediapipe::StatusCode::kInvalidArgument, + return mediapipe::Status( + mediapipe::StatusCode::kInvalidArgument, "SecureRandom is not available. With \"jitter\" specified, " "PacketResamplerCalculator processing cannot proceed."); } @@ -165,17 +165,17 @@ TimestampDiff TimestampDiffFromSeconds(double seconds) { } packet_reservoir_ = std::make_unique(packet_reservoir_random_.get()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status PacketResamplerCalculator::Process(CalculatorContext* cc) { +mediapipe::Status PacketResamplerCalculator::Process(CalculatorContext* cc) { if (cc->InputTimestamp() == Timestamp::PreStream() && cc->Inputs().UsesTags() && cc->Inputs().HasTag("VIDEO_HEADER") && !cc->Inputs().Tag("VIDEO_HEADER").IsEmpty()) { video_header_ = cc->Inputs().Tag("VIDEO_HEADER").Get(); video_header_.frame_rate = frame_rate_; if (cc->Inputs().Get(input_data_id_).IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } if (jitter_ != 0.0 && random_ != nullptr) { @@ -192,7 +192,7 @@ TimestampDiff TimestampDiffFromSeconds(double seconds) { MP_RETURN_IF_ERROR(ProcessWithoutJitter(cc)); } last_packet_ = cc->Inputs().Get(input_data_id_).Value(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } void PacketResamplerCalculator::InitializeNextOutputTimestampWithJitter() { @@ -229,7 +229,7 @@ void PacketResamplerCalculator::UpdateNextOutputTimestampWithJitter() { ((1.0 - jitter_) + 2.0 * jitter_ * random_->RandFloat()); } -::mediapipe::Status PacketResamplerCalculator::ProcessWithJitter( +mediapipe::Status PacketResamplerCalculator::ProcessWithJitter( CalculatorContext* cc) { RET_CHECK_GT(cc->InputTimestamp(), Timestamp::PreStream()); RET_CHECK_NE(jitter_, 0.0); @@ -243,7 +243,7 @@ void PacketResamplerCalculator::UpdateNextOutputTimestampWithJitter() { cc->Inputs().Get(input_data_id_).Value().At(next_output_timestamp_)); UpdateNextOutputTimestampWithJitter(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } if (frame_time_usec_ < @@ -267,10 +267,10 @@ void PacketResamplerCalculator::UpdateNextOutputTimestampWithJitter() { .At(next_output_timestamp_)); UpdateNextOutputTimestampWithJitter(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status PacketResamplerCalculator::ProcessWithoutJitter( +mediapipe::Status PacketResamplerCalculator::ProcessWithoutJitter( CalculatorContext* cc) { RET_CHECK_GT(cc->InputTimestamp(), Timestamp::PreStream()); RET_CHECK_EQ(jitter_, 0.0); @@ -333,12 +333,12 @@ void PacketResamplerCalculator::UpdateNextOutputTimestampWithJitter() { .Get(output_data_id_) .SetNextTimestampBound(PeriodIndexToTimestamp(period_count_)); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status PacketResamplerCalculator::Close(CalculatorContext* cc) { +mediapipe::Status PacketResamplerCalculator::Close(CalculatorContext* cc) { if (!cc->GraphStatus().ok()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Emit the last packet received if we have at least one packet, but // haven't sent anything for its period. @@ -350,7 +350,7 @@ void PacketResamplerCalculator::UpdateNextOutputTimestampWithJitter() { if (!packet_reservoir_->IsEmpty()) { OutputWithinLimits(cc, packet_reservoir_->GetSample()); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } Timestamp PacketResamplerCalculator::PeriodIndexToTimestamp(int64 index) const { diff --git a/mediapipe/calculators/core/packet_resampler_calculator.h b/mediapipe/calculators/core/packet_resampler_calculator.h index 95ef24cc2..c07eb1c24 100644 --- a/mediapipe/calculators/core/packet_resampler_calculator.h +++ b/mediapipe/calculators/core/packet_resampler_calculator.h @@ -99,11 +99,11 @@ class PacketReservoir { // packet_downsampler_calculator.cc: skips packets regardless of timestamps. class PacketResamplerCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Close(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Close(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; private: // Calculates the first sampled timestamp that incorporates a jittering @@ -113,10 +113,10 @@ class PacketResamplerCalculator : public CalculatorBase { void UpdateNextOutputTimestampWithJitter(); // Logic for Process() when jitter_ != 0.0. - ::mediapipe::Status ProcessWithJitter(CalculatorContext* cc); + mediapipe::Status ProcessWithJitter(CalculatorContext* cc); // Logic for Process() when jitter_ == 0.0. - ::mediapipe::Status ProcessWithoutJitter(CalculatorContext* cc); + mediapipe::Status ProcessWithoutJitter(CalculatorContext* cc); // Given the current count of periods that have passed, this returns // the next valid timestamp of the middle point of the next period: diff --git a/mediapipe/calculators/core/packet_thinner_calculator.cc b/mediapipe/calculators/core/packet_thinner_calculator.cc index 417fafa31..4795ad5e4 100644 --- a/mediapipe/calculators/core/packet_thinner_calculator.cc +++ b/mediapipe/calculators/core/packet_thinner_calculator.cc @@ -90,7 +90,7 @@ class PacketThinnerCalculator : public CalculatorBase { PacketThinnerCalculator() {} ~PacketThinnerCalculator() override {} - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { if (cc->InputSidePackets().HasTag(kOptionsTag)) { cc->InputSidePackets().Tag(kOptionsTag).Set(); } @@ -99,21 +99,21 @@ class PacketThinnerCalculator : public CalculatorBase { if (cc->InputSidePackets().HasTag(kPeriodTag)) { cc->InputSidePackets().Tag(kPeriodTag).Set(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Close(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Close(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override { if (cc->InputTimestamp() < start_time_) { - return ::mediapipe::OkStatus(); // Drop packets before start_time_. + return mediapipe::OkStatus(); // Drop packets before start_time_. } else if (cc->InputTimestamp() >= end_time_) { if (!cc->Outputs().Index(0).IsClosed()) { cc->Outputs() .Index(0) .Close(); // No more Packets will be output after end_time_. } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } else { return thinner_type_ == PacketThinnerCalculatorOptions::ASYNC ? AsyncThinnerProcess(cc) @@ -123,8 +123,8 @@ class PacketThinnerCalculator : public CalculatorBase { private: // Implementation of ASYNC and SYNC versions of thinner algorithm. - ::mediapipe::Status AsyncThinnerProcess(CalculatorContext* cc); - ::mediapipe::Status SyncThinnerProcess(CalculatorContext* cc); + mediapipe::Status AsyncThinnerProcess(CalculatorContext* cc); + mediapipe::Status SyncThinnerProcess(CalculatorContext* cc); // Cached option. PacketThinnerCalculatorOptions::ThinnerType thinner_type_; @@ -153,7 +153,7 @@ namespace { TimestampDiff abs(TimestampDiff t) { return t < 0 ? -t : t; } } // namespace -::mediapipe::Status PacketThinnerCalculator::Open(CalculatorContext* cc) { +mediapipe::Status PacketThinnerCalculator::Open(CalculatorContext* cc) { PacketThinnerCalculatorOptions options = mediapipe::tool::RetrieveOptions( cc->Options(), cc->InputSidePackets(), kOptionsTag); @@ -224,10 +224,10 @@ TimestampDiff abs(TimestampDiff t) { return t < 0 ? -t : t; } } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status PacketThinnerCalculator::Close(CalculatorContext* cc) { +mediapipe::Status PacketThinnerCalculator::Close(CalculatorContext* cc) { // Emit any saved packets before quitting. if (!saved_packet_.IsEmpty()) { // Only sync thinner should have saved packets. @@ -239,10 +239,10 @@ TimestampDiff abs(TimestampDiff t) { return t < 0 ? -t : t; } cc->Outputs().Index(0).AddPacket(saved_packet_); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status PacketThinnerCalculator::AsyncThinnerProcess( +mediapipe::Status PacketThinnerCalculator::AsyncThinnerProcess( CalculatorContext* cc) { if (cc->InputTimestamp() >= next_valid_timestamp_) { cc->Outputs().Index(0).AddPacket( @@ -251,10 +251,10 @@ TimestampDiff abs(TimestampDiff t) { return t < 0 ? -t : t; } // Guaranteed not to emit packets seen during refractory period. cc->Outputs().Index(0).SetNextTimestampBound(next_valid_timestamp_); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status PacketThinnerCalculator::SyncThinnerProcess( +mediapipe::Status PacketThinnerCalculator::SyncThinnerProcess( CalculatorContext* cc) { if (saved_packet_.IsEmpty()) { // If no packet has been saved, store the current packet. @@ -290,7 +290,7 @@ TimestampDiff abs(TimestampDiff t) { return t < 0 ? -t : t; } saved_packet_ = cc->Inputs().Index(0).Value(); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } Timestamp PacketThinnerCalculator::NearestSyncTimestamp(Timestamp now) const { diff --git a/mediapipe/calculators/core/pass_through_calculator.cc b/mediapipe/calculators/core/pass_through_calculator.cc index d4e648037..d07104733 100644 --- a/mediapipe/calculators/core/pass_through_calculator.cc +++ b/mediapipe/calculators/core/pass_through_calculator.cc @@ -28,9 +28,9 @@ namespace mediapipe { // ignored. class PassThroughCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { if (!cc->Inputs().TagMap()->SameAs(*cc->Outputs().TagMap())) { - return ::mediapipe::InvalidArgumentError( + return mediapipe::InvalidArgumentError( "Input and output streams to PassThroughCalculator must use " "matching tags and indexes."); } @@ -46,7 +46,7 @@ class PassThroughCalculator : public CalculatorBase { if (cc->OutputSidePackets().NumEntries() != 0) { if (!cc->InputSidePackets().TagMap()->SameAs( *cc->OutputSidePackets().TagMap())) { - return ::mediapipe::InvalidArgumentError( + return mediapipe::InvalidArgumentError( "Input and output side packets to PassThroughCalculator must use " "matching tags and indexes."); } @@ -56,10 +56,10 @@ class PassThroughCalculator : public CalculatorBase { &cc->InputSidePackets().Get(id)); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { + mediapipe::Status Open(CalculatorContext* cc) final { for (CollectionItemId id = cc->Inputs().BeginId(); id < cc->Inputs().EndId(); ++id) { if (!cc->Inputs().Get(id).Header().IsEmpty()) { @@ -73,10 +73,10 @@ class PassThroughCalculator : public CalculatorBase { } } cc->SetOffset(TimestampDiff(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { cc->GetCounter("PassThrough")->Increment(); if (cc->Inputs().NumEntries() == 0) { return tool::StatusStop(); @@ -90,7 +90,7 @@ class PassThroughCalculator : public CalculatorBase { cc->Outputs().Get(id).AddPacket(cc->Inputs().Get(id).Value()); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(PassThroughCalculator); diff --git a/mediapipe/calculators/core/previous_loopback_calculator.cc b/mediapipe/calculators/core/previous_loopback_calculator.cc index 8cbf04410..46102d3ea 100644 --- a/mediapipe/calculators/core/previous_loopback_calculator.cc +++ b/mediapipe/calculators/core/previous_loopback_calculator.cc @@ -53,7 +53,7 @@ namespace mediapipe { // } class PreviousLoopbackCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Get("MAIN", 0).SetAny(); cc->Inputs().Get("LOOP", 0).SetAny(); cc->Outputs().Get("PREV_LOOP", 0).SetSameAs(&(cc->Inputs().Get("LOOP", 0))); @@ -63,20 +63,20 @@ class PreviousLoopbackCalculator : public CalculatorBase { // Process() function is invoked in response to MAIN/LOOP stream timestamp // bound updates. cc->SetProcessTimestampBounds(true); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { + mediapipe::Status Open(CalculatorContext* cc) final { main_id_ = cc->Inputs().GetId("MAIN", 0); loop_id_ = cc->Inputs().GetId("LOOP", 0); prev_loop_id_ = cc->Outputs().GetId("PREV_LOOP", 0); cc->Outputs() .Get(prev_loop_id_) .SetHeader(cc->Inputs().Get(loop_id_).Header()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { // Non-empty packets and empty packets indicating timestamp bound updates // are guaranteed to have timestamps greater than timestamps of previous // packets within the same stream. Calculator tracks and operates on such @@ -139,7 +139,7 @@ class PreviousLoopbackCalculator : public CalculatorBase { } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: diff --git a/mediapipe/calculators/core/previous_loopback_calculator_test.cc b/mediapipe/calculators/core/previous_loopback_calculator_test.cc index ef469b43a..4c4d9b6e8 100644 --- a/mediapipe/calculators/core/previous_loopback_calculator_test.cc +++ b/mediapipe/calculators/core/previous_loopback_calculator_test.cc @@ -136,27 +136,27 @@ TEST(PreviousLoopbackCalculator, CorrectTimestamps) { // A Calculator that outputs a summary packet in CalculatorBase::Close(). class PacketOnCloseCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).Set(); cc->Outputs().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { + mediapipe::Status Open(CalculatorContext* cc) final { cc->SetOffset(TimestampDiff(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { sum_ += cc->Inputs().Index(0).Value().Get(); cc->Outputs().Index(0).AddPacket(cc->Inputs().Index(0).Value()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Close(CalculatorContext* cc) final { + mediapipe::Status Close(CalculatorContext* cc) final { cc->Outputs().Index(0).AddPacket( MakePacket(sum_).At(Timestamp::Max())); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: @@ -700,19 +700,19 @@ TEST_F(PreviousLoopbackCalculatorProcessingTimestampsTest, // Similar to GateCalculator, but it doesn't propagate timestamp bound updates. class DroppingGateCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).SetAny(); cc->Inputs().Tag("DISALLOW").Set(); cc->Outputs().Index(0).SetSameAs(&cc->Inputs().Index(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { if (!cc->Inputs().Index(0).IsEmpty() && !cc->Inputs().Tag("DISALLOW").Get()) { cc->Outputs().Index(0).AddPacket(cc->Inputs().Index(0).Value()); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(DroppingGateCalculator); diff --git a/mediapipe/calculators/core/quantize_float_vector_calculator.cc b/mediapipe/calculators/core/quantize_float_vector_calculator.cc index 76e635e5b..514159145 100644 --- a/mediapipe/calculators/core/quantize_float_vector_calculator.cc +++ b/mediapipe/calculators/core/quantize_float_vector_calculator.cc @@ -43,32 +43,32 @@ namespace mediapipe { class QuantizeFloatVectorCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Tag("FLOAT_VECTOR").Set>(); cc->Outputs().Tag("ENCODED").Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { + mediapipe::Status Open(CalculatorContext* cc) final { const auto options = cc->Options<::mediapipe::QuantizeFloatVectorCalculatorOptions>(); if (!options.has_max_quantized_value() || !options.has_min_quantized_value()) { - return ::mediapipe::InvalidArgumentError( + return mediapipe::InvalidArgumentError( "Both max_quantized_value and min_quantized_value must be provided " "in QuantizeFloatVectorCalculatorOptions."); } max_quantized_value_ = options.max_quantized_value(); min_quantized_value_ = options.min_quantized_value(); if (max_quantized_value_ < min_quantized_value_ + FLT_EPSILON) { - return ::mediapipe::InvalidArgumentError( + return mediapipe::InvalidArgumentError( "max_quantized_value must be greater than min_quantized_value."); } range_ = max_quantized_value_ - min_quantized_value_; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { const std::vector& float_vector = cc->Inputs().Tag("FLOAT_VECTOR").Value().Get>(); int feature_size = float_vector.size(); @@ -88,7 +88,7 @@ class QuantizeFloatVectorCalculator : public CalculatorBase { } cc->Outputs().Tag("ENCODED").AddPacket( MakePacket(encoded_features).At(cc->InputTimestamp())); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: diff --git a/mediapipe/calculators/core/real_time_flow_limiter_calculator.cc b/mediapipe/calculators/core/real_time_flow_limiter_calculator.cc new file mode 100644 index 000000000..0f7cde49a --- /dev/null +++ b/mediapipe/calculators/core/real_time_flow_limiter_calculator.cc @@ -0,0 +1,199 @@ +// Copyright 2019 The MediaPipe Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include + +#include "mediapipe/framework/calculator_framework.h" +#include "mediapipe/framework/port/ret_check.h" +#include "mediapipe/framework/port/status.h" +#include "mediapipe/util/header_util.h" + +namespace mediapipe { + +// RealTimeFlowLimiterCalculator is used to limit the number of pipelined +// processing operations in a section of the graph. +// +// Typical topology: +// +// in ->-[FLC]-[foo]-...-[bar]-+->- out +// ^_____________________| +// FINISHED +// +// By connecting the output of the graph section to this calculator's FINISHED +// input with a backwards edge, this allows FLC to keep track of how many +// timestamps are currently being processed. +// +// The limit defaults to 1, and can be overridden with the MAX_IN_FLIGHT side +// packet. +// +// As long as the number of timestamps being processed ("in flight") is below +// the limit, FLC allows input to pass through. When the limit is reached, +// FLC starts dropping input packets, keeping only the most recent. When the +// processing count decreases again, as signaled by the receipt of a packet on +// FINISHED, FLC allows packets to flow again, releasing the most recently +// queued packet, if any. +// +// If there are multiple input streams, packet dropping is synchronized. +// +// IMPORTANT: for each timestamp where FLC forwards a packet (or a set of +// packets, if using multiple data streams), a packet must eventually arrive on +// the FINISHED stream. Dropping packets in the section between FLC and +// FINISHED will make the in-flight count incorrect. +// +// TODO: Remove this comment when graph-level ISH has been removed. +// NOTE: this calculator should always use the ImmediateInputStreamHandler and +// uses it by default. However, if the graph specifies a graph-level +// InputStreamHandler, to override that setting, the InputStreamHandler must +// be explicitly specified as shown below. +// +// Example config: +// node { +// calculator: "RealTimeFlowLimiterCalculator" +// input_stream: "raw_frames" +// input_stream: "FINISHED:finished" +// input_stream_info: { +// tag_index: 'FINISHED' +// back_edge: true +// } +// input_stream_handler { +// input_stream_handler: 'ImmediateInputStreamHandler' +// } +// output_stream: "gated_frames" +// } +class RealTimeFlowLimiterCalculator : public CalculatorBase { + public: + static mediapipe::Status GetContract(CalculatorContract* cc) { + int num_data_streams = cc->Inputs().NumEntries(""); + RET_CHECK_GE(num_data_streams, 1); + RET_CHECK_EQ(cc->Outputs().NumEntries(""), num_data_streams) + << "Output streams must correspond input streams except for the " + "finish indicator input stream."; + for (int i = 0; i < num_data_streams; ++i) { + cc->Inputs().Get("", i).SetAny(); + cc->Outputs().Get("", i).SetSameAs(&(cc->Inputs().Get("", i))); + } + cc->Inputs().Get("FINISHED", 0).SetAny(); + if (cc->InputSidePackets().HasTag("MAX_IN_FLIGHT")) { + cc->InputSidePackets().Tag("MAX_IN_FLIGHT").Set(); + } + if (cc->Outputs().HasTag("ALLOW")) { + cc->Outputs().Tag("ALLOW").Set(); + } + + cc->SetInputStreamHandler("ImmediateInputStreamHandler"); + + return mediapipe::OkStatus(); + } + + mediapipe::Status Open(CalculatorContext* cc) final { + finished_id_ = cc->Inputs().GetId("FINISHED", 0); + max_in_flight_ = 1; + if (cc->InputSidePackets().HasTag("MAX_IN_FLIGHT")) { + max_in_flight_ = cc->InputSidePackets().Tag("MAX_IN_FLIGHT").Get(); + } + RET_CHECK_GE(max_in_flight_, 1); + num_in_flight_ = 0; + + allowed_id_ = cc->Outputs().GetId("ALLOW", 0); + allow_ctr_ts_ = Timestamp(0); + + num_data_streams_ = cc->Inputs().NumEntries(""); + data_stream_bound_ts_.resize(num_data_streams_); + RET_CHECK_OK(CopyInputHeadersToOutputs(cc->Inputs(), &(cc->Outputs()))); + return mediapipe::OkStatus(); + } + + bool Allow() { return num_in_flight_ < max_in_flight_; } + + mediapipe::Status Process(CalculatorContext* cc) final { + bool old_allow = Allow(); + Timestamp lowest_incomplete_ts = Timestamp::Done(); + + // Process FINISHED stream. + if (!cc->Inputs().Get(finished_id_).Value().IsEmpty()) { + RET_CHECK_GT(num_in_flight_, 0) + << "Received a FINISHED packet, but we had none in flight."; + --num_in_flight_; + } + + // Process data streams. + for (int i = 0; i < num_data_streams_; ++i) { + auto& stream = cc->Inputs().Get("", i); + auto& out = cc->Outputs().Get("", i); + Packet& packet = stream.Value(); + auto ts = packet.Timestamp(); + if (ts.IsRangeValue() && data_stream_bound_ts_[i] <= ts) { + data_stream_bound_ts_[i] = ts + 1; + // Note: it's ok to update the output bound here, before sending the + // packet, because updates are batched during the Process function. + out.SetNextTimestampBound(data_stream_bound_ts_[i]); + } + lowest_incomplete_ts = + std::min(lowest_incomplete_ts, data_stream_bound_ts_[i]); + + if (packet.IsEmpty()) { + // If the input stream is closed, close the corresponding output. + if (stream.IsDone() && !out.IsClosed()) { + out.Close(); + } + // TODO: if the packet is empty, the ts is unset, and we + // cannot read the timestamp bound, even though we'd like to propagate + // it. + } else if (mediapipe::ContainsKey(pending_ts_, ts)) { + // If we have already sent this timestamp (on another stream), send it + // on this stream too. + out.AddPacket(std::move(packet)); + } else if (Allow() && (ts > last_dropped_ts_)) { + // If the in-flight is under the limit, and if we have not already + // dropped this or a later timestamp on another stream, then send + // the packet and add an in-flight timestamp. + out.AddPacket(std::move(packet)); + pending_ts_.insert(ts); + ++num_in_flight_; + } else { + // Otherwise, we'll drop the packet. + last_dropped_ts_ = std::max(last_dropped_ts_, ts); + } + } + + // Remove old pending_ts_ entries. + auto it = std::lower_bound(pending_ts_.begin(), pending_ts_.end(), + lowest_incomplete_ts); + pending_ts_.erase(pending_ts_.begin(), it); + + // Update ALLOW signal. + if ((old_allow != Allow()) && allowed_id_.IsValid()) { + cc->Outputs() + .Get(allowed_id_) + .AddPacket(MakePacket(Allow()).At(++allow_ctr_ts_)); + } + return mediapipe::OkStatus(); + } + + private: + std::set pending_ts_; + Timestamp last_dropped_ts_; + int num_data_streams_; + int num_in_flight_; + int max_in_flight_; + CollectionItemId finished_id_; + CollectionItemId allowed_id_; + Timestamp allow_ctr_ts_; + std::vector data_stream_bound_ts_; +}; +REGISTER_CALCULATOR(RealTimeFlowLimiterCalculator); + +} // namespace mediapipe diff --git a/mediapipe/calculators/core/real_time_flow_limiter_calculator_test.cc b/mediapipe/calculators/core/real_time_flow_limiter_calculator_test.cc new file mode 100644 index 000000000..7f4ce1db1 --- /dev/null +++ b/mediapipe/calculators/core/real_time_flow_limiter_calculator_test.cc @@ -0,0 +1,496 @@ +// Copyright 2019 The MediaPipe Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include + +#include "absl/time/clock.h" +#include "absl/time/time.h" +#include "mediapipe/framework/calculator_framework.h" +#include "mediapipe/framework/calculator_runner.h" +#include "mediapipe/framework/formats/image_frame.h" +#include "mediapipe/framework/port/gmock.h" +#include "mediapipe/framework/port/gtest.h" +#include "mediapipe/framework/port/integral_types.h" +#include "mediapipe/framework/port/parse_text_proto.h" +#include "mediapipe/framework/port/status_matchers.h" +#include "mediapipe/framework/timestamp.h" +#include "mediapipe/framework/tool/sink.h" + +namespace mediapipe { + +namespace { +// A simple Semaphore for synchronizing test threads. +class AtomicSemaphore { + public: + AtomicSemaphore(int64_t supply) : supply_(supply) {} + void Acquire(int64_t amount) { + while (supply_.fetch_sub(amount) - amount < 0) { + Release(amount); + } + } + void Release(int64_t amount) { supply_.fetch_add(amount); } + + private: + std::atomic supply_; +}; + +// Returns the timestamp values for a vector of Packets. +std::vector TimestampValues(const std::vector& packets) { + std::vector result; + for (const Packet& packet : packets) { + result.push_back(packet.Timestamp().Value()); + } + return result; +} + +// Returns the packet values for a vector of Packets. +template +std::vector PacketValues(const std::vector& packets) { + std::vector result; + for (const Packet& packet : packets) { + result.push_back(packet.Get()); + } + return result; +} + +constexpr int kNumImageFrames = 5; +constexpr int kNumFinished = 3; +CalculatorGraphConfig::Node GetDefaultNode() { + return ParseTextProtoOrDie(R"( + calculator: "RealTimeFlowLimiterCalculator" + input_stream: "raw_frames" + input_stream: "FINISHED:finished" + input_stream_info: { tag_index: "FINISHED" back_edge: true } + output_stream: "gated_frames" + )"); +} + +// Simple test to make sure that the RealTimeFlowLimiterCalculator outputs just +// one packet when MAX_IN_FLIGHT is 1. +TEST(RealTimeFlowLimiterCalculator, OneOutputTest) { + // Setup the calculator runner and add only ImageFrame packets. + CalculatorRunner runner(GetDefaultNode()); + for (int i = 0; i < kNumImageFrames; ++i) { + Timestamp timestamp = Timestamp(i * Timestamp::kTimestampUnitsPerSecond); + runner.MutableInputs()->Index(0).packets.push_back( + MakePacket().At(timestamp)); + } + + // Run the calculator. + MP_ASSERT_OK(runner.Run()) << "Calculator execution failed."; + const std::vector& frame_output_packets = + runner.Outputs().Index(0).packets; + + EXPECT_EQ(frame_output_packets.size(), 1); +} + +// Simple test to make sure that the RealTimeFlowLimiterCalculator waits for all +// input streams to have at least one packet available before publishing. +TEST(RealTimeFlowLimiterCalculator, BasicTest) { + // Setup the calculator runner and add both ImageFrame and finish packets. + CalculatorRunner runner(GetDefaultNode()); + for (int i = 0; i < kNumImageFrames; ++i) { + Timestamp timestamp = Timestamp(i * Timestamp::kTimestampUnitsPerSecond); + runner.MutableInputs()->Index(0).packets.push_back( + MakePacket().At(timestamp)); + } + for (int i = 0; i < kNumFinished; ++i) { + Timestamp timestamp = + Timestamp((i + 1) * Timestamp::kTimestampUnitsPerSecond); + runner.MutableInputs() + ->Tag("FINISHED") + .packets.push_back(MakePacket(true).At(timestamp)); + } + + // Run the calculator. + MP_ASSERT_OK(runner.Run()) << "Calculator execution failed."; + const std::vector& frame_output_packets = + runner.Outputs().Index(0).packets; + + // Only outputs packets if both input streams are available. + int expected_num_packets = std::min(kNumImageFrames, kNumFinished + 1); + EXPECT_EQ(frame_output_packets.size(), expected_num_packets); +} + +// A Calculator::Process callback function. +typedef std::function + ProcessFunction; + +// A testing callback function that passes through all packets. +mediapipe::Status PassthroughFunction(const InputStreamShardSet& inputs, + OutputStreamShardSet* outputs) { + for (int i = 0; i < inputs.NumEntries(); ++i) { + if (!inputs.Index(i).Value().IsEmpty()) { + outputs->Index(i).AddPacket(inputs.Index(i).Value()); + } + } + return mediapipe::OkStatus(); +} + +// A Calculator that runs a testing callback function in Close. +class CloseCallbackCalculator : public CalculatorBase { + public: + static mediapipe::Status GetContract(CalculatorContract* cc) { + for (CollectionItemId id = cc->Inputs().BeginId(); + id < cc->Inputs().EndId(); ++id) { + cc->Inputs().Get(id).SetAny(); + } + for (CollectionItemId id = cc->Outputs().BeginId(); + id < cc->Outputs().EndId(); ++id) { + cc->Outputs().Get(id).SetAny(); + } + cc->InputSidePackets().Index(0).Set>(); + return mediapipe::OkStatus(); + } + + mediapipe::Status Process(CalculatorContext* cc) override { + return PassthroughFunction(cc->Inputs(), &(cc->Outputs())); + } + + mediapipe::Status Close(CalculatorContext* cc) override { + const auto& callback = cc->InputSidePackets() + .Index(0) + .Get>(); + return callback(); + } +}; +REGISTER_CALCULATOR(CloseCallbackCalculator); + +// Tests demostrating an RealTimeFlowLimiterCalculator operating in a cyclic +// graph. +// TODO: clean up these tests. +class RealTimeFlowLimiterCalculatorTest : public testing::Test { + public: + RealTimeFlowLimiterCalculatorTest() + : enter_semaphore_(0), exit_semaphore_(0) {} + + void SetUp() override { + graph_config_ = InflightGraphConfig(); + tool::AddVectorSink("out_1", &graph_config_, &out_1_packets_); + tool::AddVectorSink("out_2", &graph_config_, &out_2_packets_); + } + + void InitializeGraph(int max_in_flight) { + ProcessFunction semaphore_0_func = [&](const InputStreamShardSet& inputs, + OutputStreamShardSet* outputs) { + enter_semaphore_.Release(1); + return PassthroughFunction(inputs, outputs); + }; + ProcessFunction semaphore_1_func = [&](const InputStreamShardSet& inputs, + OutputStreamShardSet* outputs) { + exit_semaphore_.Acquire(1); + return PassthroughFunction(inputs, outputs); + }; + std::function close_func = [this]() { + close_count_++; + return mediapipe::OkStatus(); + }; + MP_ASSERT_OK(graph_.Initialize( + graph_config_, { + {"max_in_flight", MakePacket(max_in_flight)}, + {"callback_0", Adopt(new auto(semaphore_0_func))}, + {"callback_1", Adopt(new auto(semaphore_1_func))}, + {"callback_2", Adopt(new auto(close_func))}, + })); + } + + // Adds a packet to a graph input stream. + void AddPacket(const std::string& input_name, int value) { + MP_EXPECT_OK(graph_.AddPacketToInputStream( + input_name, MakePacket(value).At(Timestamp(value)))); + } + + // A calculator graph starting with an RealTimeFlowLimiterCalculator and + // ending with a InFlightFinishCalculator. + // Back-edge "finished" limits processing to one frame in-flight. + // The two LambdaCalculators are used to keep certain packet sets in flight. + CalculatorGraphConfig InflightGraphConfig() { + return ParseTextProtoOrDie(R"( + input_stream: 'in_1' + input_stream: 'in_2' + node { + calculator: 'RealTimeFlowLimiterCalculator' + input_side_packet: 'MAX_IN_FLIGHT:max_in_flight' + input_stream: 'in_1' + input_stream: 'in_2' + input_stream: 'FINISHED:out_1' + input_stream_info: { tag_index: 'FINISHED' back_edge: true } + output_stream: 'in_1_sampled' + output_stream: 'in_2_sampled' + } + node { + calculator: 'LambdaCalculator' + input_side_packet: 'callback_0' + input_stream: 'in_1_sampled' + input_stream: 'in_2_sampled' + output_stream: 'queue_1' + output_stream: 'queue_2' + } + node { + calculator: 'LambdaCalculator' + input_side_packet: 'callback_1' + input_stream: 'queue_1' + input_stream: 'queue_2' + output_stream: 'close_1' + output_stream: 'close_2' + } + node { + calculator: 'CloseCallbackCalculator' + input_side_packet: 'callback_2' + input_stream: 'close_1' + input_stream: 'close_2' + output_stream: 'out_1' + output_stream: 'out_2' + } + )"); + } + + protected: + CalculatorGraphConfig graph_config_; + CalculatorGraph graph_; + AtomicSemaphore enter_semaphore_; + AtomicSemaphore exit_semaphore_; + std::vector out_1_packets_; + std::vector out_2_packets_; + int close_count_ = 0; +}; + +// A test demonstrating an RealTimeFlowLimiterCalculator operating in a cyclic +// graph. This test shows that: +// +// (1) Timestamps are passed through unaltered. +// (2) All output streams including the back_edge stream are closed when +// the first input stream is closed. +// +TEST_F(RealTimeFlowLimiterCalculatorTest, BackEdgeCloses) { + InitializeGraph(1); + MP_ASSERT_OK(graph_.StartRun({})); + + auto send_packet = [this](const std::string& input_name, int64 n) { + MP_EXPECT_OK(graph_.AddPacketToInputStream( + input_name, MakePacket(n).At(Timestamp(n)))); + }; + + for (int i = 0; i < 10; i++) { + send_packet("in_1", i * 10); + // This next input should be dropped. + send_packet("in_1", i * 10 + 5); + MP_EXPECT_OK(graph_.WaitUntilIdle()); + send_packet("in_2", i * 10); + exit_semaphore_.Release(1); + MP_EXPECT_OK(graph_.WaitUntilIdle()); + } + MP_EXPECT_OK(graph_.CloseInputStream("in_1")); + MP_EXPECT_OK(graph_.CloseInputStream("in_2")); + MP_EXPECT_OK(graph_.WaitUntilIdle()); + + // All output streams are closed and all output packets are delivered, + // with stream "in_1" and stream "in_2" closed. + EXPECT_EQ(10, out_1_packets_.size()); + EXPECT_EQ(10, out_2_packets_.size()); + + // Timestamps have not been messed with. + EXPECT_EQ(PacketValues(out_1_packets_), + TimestampValues(out_1_packets_)); + EXPECT_EQ(PacketValues(out_2_packets_), + TimestampValues(out_2_packets_)); + + // Extra inputs on in_1 have been dropped + EXPECT_EQ(TimestampValues(out_1_packets_), + (std::vector{0, 10, 20, 30, 40, 50, 60, 70, 80, 90})); + EXPECT_EQ(TimestampValues(out_1_packets_), TimestampValues(out_2_packets_)); + + // The closing of the stream has been propagated. + EXPECT_EQ(1, close_count_); +} + +// A test demonstrating that all output streams are closed when all +// input streams are closed after the last input packet has been processed. +TEST_F(RealTimeFlowLimiterCalculatorTest, AllStreamsClose) { + InitializeGraph(1); + MP_ASSERT_OK(graph_.StartRun({})); + + exit_semaphore_.Release(10); + for (int i = 0; i < 10; i++) { + AddPacket("in_1", i); + MP_EXPECT_OK(graph_.WaitUntilIdle()); + AddPacket("in_2", i); + MP_EXPECT_OK(graph_.WaitUntilIdle()); + } + MP_EXPECT_OK(graph_.CloseAllInputStreams()); + MP_EXPECT_OK(graph_.WaitUntilIdle()); + + EXPECT_EQ(TimestampValues(out_1_packets_), TimestampValues(out_2_packets_)); + EXPECT_EQ(TimestampValues(out_1_packets_), + (std::vector{0, 1, 2, 3, 4, 5, 6, 7, 8, 9})); + EXPECT_EQ(1, close_count_); +} + +TEST(RealTimeFlowLimiterCalculator, TwoStreams) { + std::vector a_passed; + std::vector b_passed; + CalculatorGraphConfig graph_config_ = + ParseTextProtoOrDie(R"( + input_stream: 'in_a' + input_stream: 'in_b' + input_stream: 'finished' + node { + name: 'input_dropper' + calculator: 'RealTimeFlowLimiterCalculator' + input_side_packet: 'MAX_IN_FLIGHT:max_in_flight' + input_stream: 'in_a' + input_stream: 'in_b' + input_stream: 'FINISHED:finished' + input_stream_info: { tag_index: 'FINISHED' back_edge: true } + output_stream: 'in_a_sampled' + output_stream: 'in_b_sampled' + output_stream: 'ALLOW:allow' + } + )"); + std::string allow_cb_name; + tool::AddVectorSink("in_a_sampled", &graph_config_, &a_passed); + tool::AddVectorSink("in_b_sampled", &graph_config_, &b_passed); + tool::AddCallbackCalculator("allow", &graph_config_, &allow_cb_name, true); + + bool allow = true; + auto allow_cb = [&allow](const Packet& packet) { + allow = packet.Get(); + }; + + CalculatorGraph graph_; + MP_EXPECT_OK(graph_.Initialize( + graph_config_, + { + {"max_in_flight", MakePacket(1)}, + {allow_cb_name, + MakePacket>(allow_cb)}, + })); + + MP_EXPECT_OK(graph_.StartRun({})); + + auto send_packet = [&graph_](const std::string& input_name, int n) { + MP_EXPECT_OK(graph_.AddPacketToInputStream( + input_name, MakePacket(n).At(Timestamp(n)))); + }; + send_packet("in_a", 1); + MP_EXPECT_OK(graph_.WaitUntilIdle()); + EXPECT_EQ(allow, false); + EXPECT_EQ(TimestampValues(a_passed), (std::vector{1})); + EXPECT_EQ(TimestampValues(b_passed), (std::vector{})); + + send_packet("in_a", 2); + send_packet("in_b", 1); + MP_EXPECT_OK(graph_.WaitUntilIdle()); + EXPECT_EQ(TimestampValues(a_passed), (std::vector{1})); + EXPECT_EQ(TimestampValues(b_passed), (std::vector{1})); + EXPECT_EQ(allow, false); + + send_packet("finished", 1); + MP_EXPECT_OK(graph_.WaitUntilIdle()); + EXPECT_EQ(TimestampValues(a_passed), (std::vector{1})); + EXPECT_EQ(TimestampValues(b_passed), (std::vector{1})); + EXPECT_EQ(allow, true); + + send_packet("in_b", 2); + MP_EXPECT_OK(graph_.WaitUntilIdle()); + EXPECT_EQ(TimestampValues(a_passed), (std::vector{1})); + EXPECT_EQ(TimestampValues(b_passed), (std::vector{1})); + EXPECT_EQ(allow, true); + + send_packet("in_b", 3); + MP_EXPECT_OK(graph_.WaitUntilIdle()); + EXPECT_EQ(TimestampValues(a_passed), (std::vector{1})); + EXPECT_EQ(TimestampValues(b_passed), (std::vector{1, 3})); + EXPECT_EQ(allow, false); + + send_packet("in_b", 4); + MP_EXPECT_OK(graph_.WaitUntilIdle()); + EXPECT_EQ(TimestampValues(a_passed), (std::vector{1})); + EXPECT_EQ(TimestampValues(b_passed), (std::vector{1, 3})); + EXPECT_EQ(allow, false); + + send_packet("in_a", 3); + MP_EXPECT_OK(graph_.WaitUntilIdle()); + EXPECT_EQ(TimestampValues(a_passed), (std::vector{1, 3})); + EXPECT_EQ(TimestampValues(b_passed), (std::vector{1, 3})); + EXPECT_EQ(allow, false); + + send_packet("finished", 3); + MP_EXPECT_OK(graph_.WaitUntilIdle()); + EXPECT_EQ(TimestampValues(a_passed), (std::vector{1, 3})); + EXPECT_EQ(TimestampValues(b_passed), (std::vector{1, 3})); + EXPECT_EQ(allow, true); + + MP_EXPECT_OK(graph_.CloseAllInputStreams()); + MP_EXPECT_OK(graph_.WaitUntilDone()); +} + +TEST(RealTimeFlowLimiterCalculator, CanConsume) { + std::vector in_sampled_packets_; + CalculatorGraphConfig graph_config_ = + ParseTextProtoOrDie(R"( + input_stream: 'in' + input_stream: 'finished' + node { + name: 'input_dropper' + calculator: 'RealTimeFlowLimiterCalculator' + input_side_packet: 'MAX_IN_FLIGHT:max_in_flight' + input_stream: 'in' + input_stream: 'FINISHED:finished' + input_stream_info: { tag_index: 'FINISHED' back_edge: true } + output_stream: 'in_sampled' + output_stream: 'ALLOW:allow' + } + )"); + std::string allow_cb_name; + tool::AddVectorSink("in_sampled", &graph_config_, &in_sampled_packets_); + tool::AddCallbackCalculator("allow", &graph_config_, &allow_cb_name, true); + + bool allow = true; + auto allow_cb = [&allow](const Packet& packet) { + allow = packet.Get(); + }; + + CalculatorGraph graph_; + MP_EXPECT_OK(graph_.Initialize( + graph_config_, + { + {"max_in_flight", MakePacket(1)}, + {allow_cb_name, + MakePacket>(allow_cb)}, + })); + + MP_EXPECT_OK(graph_.StartRun({})); + + auto send_packet = [&graph_](const std::string& input_name, int n) { + MP_EXPECT_OK(graph_.AddPacketToInputStream( + input_name, MakePacket(n).At(Timestamp(n)))); + }; + send_packet("in", 1); + MP_EXPECT_OK(graph_.WaitUntilIdle()); + EXPECT_EQ(allow, false); + EXPECT_EQ(TimestampValues(in_sampled_packets_), (std::vector{1})); + + MP_EXPECT_OK(in_sampled_packets_[0].Consume()); + + MP_EXPECT_OK(graph_.CloseAllInputStreams()); + MP_EXPECT_OK(graph_.WaitUntilDone()); +} + +} // anonymous namespace +} // namespace mediapipe diff --git a/mediapipe/calculators/core/round_robin_demux_calculator.cc b/mediapipe/calculators/core/round_robin_demux_calculator.cc index c84e08884..8fe2c2b9c 100644 --- a/mediapipe/calculators/core/round_robin_demux_calculator.cc +++ b/mediapipe/calculators/core/round_robin_demux_calculator.cc @@ -73,7 +73,7 @@ namespace mediapipe { // MuxCalculator/MuxInputStreamHandler. class RoundRobinDemuxCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { RET_CHECK_EQ(cc->Inputs().NumEntries(), 1); cc->Inputs().Index(0).SetAny(); if (cc->Outputs().HasTag("SELECT")) { @@ -83,18 +83,18 @@ class RoundRobinDemuxCalculator : public CalculatorBase { id < cc->Outputs().EndId("OUTPUT"); ++id) { cc->Outputs().Get(id).SetSameAs(&cc->Inputs().Index(0)); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { select_output_ = cc->Outputs().GetId("SELECT", 0); output_data_stream_index_ = 0; output_data_stream_base_ = cc->Outputs().GetId("OUTPUT", 0); num_output_data_streams_ = cc->Outputs().NumEntries("OUTPUT"); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { cc->Outputs() .Get(output_data_stream_base_ + output_data_stream_index_) .AddPacket(cc->Inputs().Index(0).Value()); @@ -105,7 +105,7 @@ class RoundRobinDemuxCalculator : public CalculatorBase { } output_data_stream_index_ = (output_data_stream_index_ + 1) % num_output_data_streams_; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: diff --git a/mediapipe/calculators/core/sequence_shift_calculator.cc b/mediapipe/calculators/core/sequence_shift_calculator.cc index 425795fa2..e288128e1 100644 --- a/mediapipe/calculators/core/sequence_shift_calculator.cc +++ b/mediapipe/calculators/core/sequence_shift_calculator.cc @@ -30,18 +30,18 @@ namespace mediapipe { // second, and so on. class SequenceShiftCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).SetAny(); if (cc->InputSidePackets().HasTag(kPacketOffsetTag)) { cc->InputSidePackets().Tag(kPacketOffsetTag).Set(); } cc->Outputs().Index(0).SetSameAs(&cc->Inputs().Index(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Reads from options to set cache_size_ and packet_offset_. - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; private: static constexpr const char* kPacketOffsetTag = "PACKET_OFFSET"; @@ -72,7 +72,7 @@ class SequenceShiftCalculator : public CalculatorBase { }; REGISTER_CALCULATOR(SequenceShiftCalculator); -::mediapipe::Status SequenceShiftCalculator::Open(CalculatorContext* cc) { +mediapipe::Status SequenceShiftCalculator::Open(CalculatorContext* cc) { packet_offset_ = cc->Options().packet_offset(); if (cc->InputSidePackets().HasTag(kPacketOffsetTag)) { @@ -83,10 +83,10 @@ REGISTER_CALCULATOR(SequenceShiftCalculator); if (packet_offset_ == 0) { cc->Outputs().Index(0).SetOffset(0); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status SequenceShiftCalculator::Process(CalculatorContext* cc) { +mediapipe::Status SequenceShiftCalculator::Process(CalculatorContext* cc) { if (packet_offset_ > 0) { ProcessPositiveOffset(cc); } else if (packet_offset_ < 0) { @@ -94,7 +94,7 @@ REGISTER_CALCULATOR(SequenceShiftCalculator); } else { cc->Outputs().Index(0).AddPacket(cc->Inputs().Index(0).Value()); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } void SequenceShiftCalculator::ProcessPositiveOffset(CalculatorContext* cc) { diff --git a/mediapipe/calculators/core/side_packet_to_stream_calculator.cc b/mediapipe/calculators/core/side_packet_to_stream_calculator.cc index 47c3f624b..4ad359bbe 100644 --- a/mediapipe/calculators/core/side_packet_to_stream_calculator.cc +++ b/mediapipe/calculators/core/side_packet_to_stream_calculator.cc @@ -89,10 +89,10 @@ class SidePacketToStreamCalculator : public CalculatorBase { SidePacketToStreamCalculator() = default; ~SidePacketToStreamCalculator() override = default; - static ::mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; - ::mediapipe::Status Close(CalculatorContext* cc) override; + static mediapipe::Status GetContract(CalculatorContract* cc); + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Close(CalculatorContext* cc) override; private: bool is_tick_processing_ = false; @@ -100,7 +100,7 @@ class SidePacketToStreamCalculator : public CalculatorBase { }; REGISTER_CALCULATOR(SidePacketToStreamCalculator); -::mediapipe::Status SidePacketToStreamCalculator::GetContract( +mediapipe::Status SidePacketToStreamCalculator::GetContract( CalculatorContract* cc) { const auto& tags = cc->Outputs().GetTags(); RET_CHECK(tags.size() == 1 && kTimestampMap->count(*tags.begin()) == 1) @@ -138,10 +138,10 @@ REGISTER_CALCULATOR(SidePacketToStreamCalculator); cc->Inputs().Tag(kTagTick).SetAny(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status SidePacketToStreamCalculator::Open(CalculatorContext* cc) { +mediapipe::Status SidePacketToStreamCalculator::Open(CalculatorContext* cc) { output_tag_ = GetOutputTag(*cc); if (cc->Inputs().HasTag(kTagTick)) { is_tick_processing_ = true; @@ -149,11 +149,10 @@ REGISTER_CALCULATOR(SidePacketToStreamCalculator); // timestamp bound update. cc->SetOffset(TimestampDiff(0)); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status SidePacketToStreamCalculator::Process( - CalculatorContext* cc) { +mediapipe::Status SidePacketToStreamCalculator::Process(CalculatorContext* cc) { if (is_tick_processing_) { // TICK input is guaranteed to be non-empty, as it's the only input stream // for this calculator. @@ -164,13 +163,13 @@ REGISTER_CALCULATOR(SidePacketToStreamCalculator); .AddPacket(cc->InputSidePackets().Index(i).At(timestamp)); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - return ::mediapipe::tool::StatusStop(); + return mediapipe::tool::StatusStop(); } -::mediapipe::Status SidePacketToStreamCalculator::Close(CalculatorContext* cc) { +mediapipe::Status SidePacketToStreamCalculator::Close(CalculatorContext* cc) { if (!cc->Outputs().HasTag(kTagAtTick) && !cc->Outputs().HasTag(kTagAtTimestamp)) { const auto& timestamp = kTimestampMap->at(output_tag_); @@ -188,7 +187,7 @@ REGISTER_CALCULATOR(SidePacketToStreamCalculator); .AddPacket(cc->InputSidePackets().Index(i).At(Timestamp(timestamp))); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/core/side_packet_to_stream_calculator_test.cc b/mediapipe/calculators/core/side_packet_to_stream_calculator_test.cc index e7195e03b..706825f19 100644 --- a/mediapipe/calculators/core/side_packet_to_stream_calculator_test.cc +++ b/mediapipe/calculators/core/side_packet_to_stream_calculator_test.cc @@ -20,6 +20,7 @@ #include "absl/strings/str_replace.h" #include "absl/strings/string_view.h" #include "mediapipe/framework/calculator_framework.h" +#include "mediapipe/framework/port/gmock.h" #include "mediapipe/framework/port/gtest.h" #include "mediapipe/framework/port/integral_types.h" #include "mediapipe/framework/port/parse_text_proto.h" @@ -30,6 +31,8 @@ namespace mediapipe { namespace { +using testing::HasSubstr; + TEST(SidePacketToStreamCalculator, WrongConfig_MissingTick) { CalculatorGraphConfig graph_config = ParseTextProtoOrDie( @@ -46,9 +49,10 @@ TEST(SidePacketToStreamCalculator, WrongConfig_MissingTick) { CalculatorGraph graph; auto status = graph.Initialize(graph_config); EXPECT_FALSE(status.ok()); - EXPECT_PRED2( - absl::StrContains, status.message(), - "Either both of TICK and AT_TICK should be used or none of them."); + EXPECT_THAT( + status.message(), + HasSubstr( + "Either both of TICK and AT_TICK should be used or none of them.")); } TEST(SidePacketToStreamCalculator, WrongConfig_MissingTimestampSideInput) { @@ -67,9 +71,9 @@ TEST(SidePacketToStreamCalculator, WrongConfig_MissingTimestampSideInput) { CalculatorGraph graph; auto status = graph.Initialize(graph_config); EXPECT_FALSE(status.ok()); - EXPECT_PRED2( - absl::StrContains, status.message(), - "Either both TIMESTAMP and AT_TIMESTAMP should be used or none of them."); + EXPECT_THAT(status.message(), + HasSubstr("Either both TIMESTAMP and AT_TIMESTAMP should be used " + "or none of them.")); } TEST(SidePacketToStreamCalculator, WrongConfig_NonExistentTag) { @@ -88,10 +92,11 @@ TEST(SidePacketToStreamCalculator, WrongConfig_NonExistentTag) { CalculatorGraph graph; auto status = graph.Initialize(graph_config); EXPECT_FALSE(status.ok()); - EXPECT_PRED2(absl::StrContains, status.message(), - "Only one of AT_PRESTREAM, AT_POSTSTREAM, AT_ZERO, AT_TICK and " - "AT_TIMESTAMP tags is allowed and required to specify output " - "stream(s)."); + EXPECT_THAT( + status.message(), + HasSubstr("Only one of AT_PRESTREAM, AT_POSTSTREAM, AT_ZERO, AT_TICK and " + "AT_TIMESTAMP tags is allowed and required to specify output " + "stream(s).")); } TEST(SidePacketToStreamCalculator, WrongConfig_MixedTags) { @@ -112,10 +117,11 @@ TEST(SidePacketToStreamCalculator, WrongConfig_MixedTags) { CalculatorGraph graph; auto status = graph.Initialize(graph_config); EXPECT_FALSE(status.ok()); - EXPECT_PRED2(absl::StrContains, status.message(), - "Only one of AT_PRESTREAM, AT_POSTSTREAM, AT_ZERO, AT_TICK and " - "AT_TIMESTAMP tags is allowed and required to specify output " - "stream(s)."); + EXPECT_THAT( + status.message(), + HasSubstr("Only one of AT_PRESTREAM, AT_POSTSTREAM, AT_ZERO, AT_TICK and " + "AT_TIMESTAMP tags is allowed and required to specify output " + "stream(s).")); } TEST(SidePacketToStreamCalculator, WrongConfig_NotEnoughSidePackets) { @@ -134,9 +140,10 @@ TEST(SidePacketToStreamCalculator, WrongConfig_NotEnoughSidePackets) { CalculatorGraph graph; auto status = graph.Initialize(graph_config); EXPECT_FALSE(status.ok()); - EXPECT_PRED2( - absl::StrContains, status.message(), - "Same number of input side packets and output streams is required."); + EXPECT_THAT( + status.message(), + HasSubstr( + "Same number of input side packets and output streams is required.")); } TEST(SidePacketToStreamCalculator, WrongConfig_NotEnoughOutputStreams) { @@ -155,9 +162,10 @@ TEST(SidePacketToStreamCalculator, WrongConfig_NotEnoughOutputStreams) { CalculatorGraph graph; auto status = graph.Initialize(graph_config); EXPECT_FALSE(status.ok()); - EXPECT_PRED2( - absl::StrContains, status.message(), - "Same number of input side packets and output streams is required."); + EXPECT_THAT( + status.message(), + HasSubstr( + "Same number of input side packets and output streams is required.")); } void DoTestNonAtTickOutputTag(absl::string_view tag, @@ -181,7 +189,7 @@ void DoTestNonAtTickOutputTag(absl::string_view tag, MP_ASSERT_OK(graph.ObserveOutputStream( "packet", [&output_packets](const Packet& packet) { output_packets.push_back(packet); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); MP_ASSERT_OK( graph.StartRun({{"side_packet", MakePacket(expected_value)}})); diff --git a/mediapipe/calculators/core/split_normalized_landmark_list_calculator.cc b/mediapipe/calculators/core/split_normalized_landmark_list_calculator.cc index 85bac0e9b..c51a0a42f 100644 --- a/mediapipe/calculators/core/split_normalized_landmark_list_calculator.cc +++ b/mediapipe/calculators/core/split_normalized_landmark_list_calculator.cc @@ -35,7 +35,7 @@ namespace mediapipe { // NormalizedLandmarkList. class SplitNormalizedLandmarkListCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { RET_CHECK(cc->Inputs().NumEntries() == 1); RET_CHECK(cc->Outputs().NumEntries() != 0); @@ -55,7 +55,7 @@ class SplitNormalizedLandmarkListCalculator : public CalculatorBase { range_0.begin() < range_1.end()) || (range_1.begin() >= range_0.begin() && range_1.begin() < range_0.end())) { - return ::mediapipe::InvalidArgumentError( + return mediapipe::InvalidArgumentError( "Ranges must be non-overlapping when using combine_outputs " "option."); } @@ -63,7 +63,7 @@ class SplitNormalizedLandmarkListCalculator : public CalculatorBase { } } else { if (cc->Outputs().NumEntries() != options.ranges_size()) { - return ::mediapipe::InvalidArgumentError( + return mediapipe::InvalidArgumentError( "The number of output streams should match the number of ranges " "specified in the CalculatorOptions."); } @@ -72,13 +72,13 @@ class SplitNormalizedLandmarkListCalculator : public CalculatorBase { for (int i = 0; i < cc->Outputs().NumEntries(); ++i) { if (options.ranges(i).begin() < 0 || options.ranges(i).end() < 0 || options.ranges(i).begin() >= options.ranges(i).end()) { - return ::mediapipe::InvalidArgumentError( + return mediapipe::InvalidArgumentError( "Indices should be non-negative and begin index should be less " "than the end index."); } if (options.element_only()) { if (options.ranges(i).end() - options.ranges(i).begin() != 1) { - return ::mediapipe::InvalidArgumentError( + return mediapipe::InvalidArgumentError( "Since element_only is true, all ranges should be of size 1."); } cc->Outputs().Index(i).Set(); @@ -88,10 +88,10 @@ class SplitNormalizedLandmarkListCalculator : public CalculatorBase { } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { cc->SetOffset(TimestampDiff(0)); const auto& options = @@ -106,10 +106,10 @@ class SplitNormalizedLandmarkListCalculator : public CalculatorBase { total_elements_ += range.end() - range.begin(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { const NormalizedLandmarkList& input = cc->Inputs().Index(0).Get(); RET_CHECK_GE(input.landmark_size(), max_range_end_) @@ -148,7 +148,7 @@ class SplitNormalizedLandmarkListCalculator : public CalculatorBase { } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: diff --git a/mediapipe/calculators/core/split_normalized_landmark_list_calculator_test.cc b/mediapipe/calculators/core/split_normalized_landmark_list_calculator_test.cc index ce02dcd8a..202287208 100644 --- a/mediapipe/calculators/core/split_normalized_landmark_list_calculator_test.cc +++ b/mediapipe/calculators/core/split_normalized_landmark_list_calculator_test.cc @@ -121,7 +121,7 @@ TEST_F(SplitNormalizedLandmarkListCalculatorTest, SmokeTest) { // Prepare a graph to use the SplitNormalizedLandmarkListCalculator. CalculatorGraphConfig graph_config = - ::mediapipe::ParseTextProtoOrDie( + mediapipe::ParseTextProtoOrDie( R"( input_stream: "landmarks_in" node { @@ -170,7 +170,7 @@ TEST_F(SplitNormalizedLandmarkListCalculatorTest, SmokeTest) { TEST_F(SplitNormalizedLandmarkListCalculatorTest, InvalidRangeTest) { // Prepare a graph to use the SplitNormalizedLandmarkListCalculator. CalculatorGraphConfig graph_config = - ::mediapipe::ParseTextProtoOrDie( + mediapipe::ParseTextProtoOrDie( R"( input_stream: "landmarks_in" node { @@ -195,7 +195,7 @@ TEST_F(SplitNormalizedLandmarkListCalculatorTest, InvalidOutputStreamCountTest) { // Prepare a graph to use the SplitNormalizedLandmarkListCalculator. CalculatorGraphConfig graph_config = - ::mediapipe::ParseTextProtoOrDie( + mediapipe::ParseTextProtoOrDie( R"( input_stream: "landmarks_in" node { @@ -222,7 +222,7 @@ TEST_F(SplitNormalizedLandmarkListCalculatorTest, InvalidCombineOutputsMultipleOutputsTest) { // Prepare a graph to use the SplitNormalizedLandmarkListCalculator. CalculatorGraphConfig graph_config = - ::mediapipe::ParseTextProtoOrDie( + mediapipe::ParseTextProtoOrDie( R"( input_stream: "landmarks_in" node { @@ -251,7 +251,7 @@ TEST_F(SplitNormalizedLandmarkListCalculatorTest, InvalidOverlappingRangesTest) { // Prepare a graph to use the SplitNormalizedLandmarkListCalculator. CalculatorGraphConfig graph_config = - ::mediapipe::ParseTextProtoOrDie( + mediapipe::ParseTextProtoOrDie( R"( input_stream: "landmarks_in" node { @@ -280,7 +280,7 @@ TEST_F(SplitNormalizedLandmarkListCalculatorTest, SmokeTestElementOnly) { // Prepare a graph to use the SplitNormalizedLandmarkListCalculator. CalculatorGraphConfig graph_config = - ::mediapipe::ParseTextProtoOrDie( + mediapipe::ParseTextProtoOrDie( R"( input_stream: "landmarks_in" node { @@ -333,7 +333,7 @@ TEST_F(SplitNormalizedLandmarkListCalculatorTest, SmokeTestCombiningOutputs) { // Prepare a graph to use the SplitNormalizedLandmarkListCalculator. CalculatorGraphConfig graph_config = - ::mediapipe::ParseTextProtoOrDie( + mediapipe::ParseTextProtoOrDie( R"( input_stream: "landmarks_in" node { @@ -376,7 +376,7 @@ TEST_F(SplitNormalizedLandmarkListCalculatorTest, ElementOnlyDisablesVectorOutputs) { // Prepare a graph to use the SplitNormalizedLandmarkListCalculator. CalculatorGraphConfig graph_config = - ::mediapipe::ParseTextProtoOrDie( + mediapipe::ParseTextProtoOrDie( R"( input_stream: "landmarks_in" node { diff --git a/mediapipe/calculators/core/split_vector_calculator.h b/mediapipe/calculators/core/split_vector_calculator.h index 4e257e3df..6fb863377 100644 --- a/mediapipe/calculators/core/split_vector_calculator.h +++ b/mediapipe/calculators/core/split_vector_calculator.h @@ -58,7 +58,7 @@ using IsNotMovable = template class SplitVectorCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { RET_CHECK(cc->Inputs().NumEntries() == 1); RET_CHECK(cc->Outputs().NumEntries() != 0); @@ -79,7 +79,7 @@ class SplitVectorCalculator : public CalculatorBase { RET_CHECK_OK(checkRangesDontOverlap(options)); } else { if (cc->Outputs().NumEntries() != options.ranges_size()) { - return ::mediapipe::InvalidArgumentError( + return mediapipe::InvalidArgumentError( "The number of output streams should match the number of ranges " "specified in the CalculatorOptions."); } @@ -88,13 +88,13 @@ class SplitVectorCalculator : public CalculatorBase { for (int i = 0; i < cc->Outputs().NumEntries(); ++i) { if (options.ranges(i).begin() < 0 || options.ranges(i).end() < 0 || options.ranges(i).begin() >= options.ranges(i).end()) { - return ::mediapipe::InvalidArgumentError( + return mediapipe::InvalidArgumentError( "Indices should be non-negative and begin index should be less " "than the end index."); } if (options.element_only()) { if (options.ranges(i).end() - options.ranges(i).begin() != 1) { - return ::mediapipe::InvalidArgumentError( + return mediapipe::InvalidArgumentError( "Since element_only is true, all ranges should be of size 1."); } cc->Outputs().Index(i).Set(); @@ -104,10 +104,10 @@ class SplitVectorCalculator : public CalculatorBase { } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { cc->SetOffset(TimestampDiff(0)); const auto& options = @@ -122,11 +122,11 @@ class SplitVectorCalculator : public CalculatorBase { total_elements_ += range.end() - range.begin(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { - if (cc->Inputs().Index(0).IsEmpty()) return ::mediapipe::OkStatus(); + mediapipe::Status Process(CalculatorContext* cc) override { + if (cc->Inputs().Index(0).IsEmpty()) return mediapipe::OkStatus(); if (move_elements) { return ProcessMovableElements(cc); @@ -136,7 +136,7 @@ class SplitVectorCalculator : public CalculatorBase { } template = true> - ::mediapipe::Status ProcessCopyableElements(CalculatorContext* cc) { + mediapipe::Status ProcessCopyableElements(CalculatorContext* cc) { // static_assert(std::is_copy_constructible::value, // "Cannot copy non-copyable elements"); const auto& input = cc->Inputs().Index(0).Get>(); @@ -167,17 +167,17 @@ class SplitVectorCalculator : public CalculatorBase { } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } template = true> - ::mediapipe::Status ProcessCopyableElements(CalculatorContext* cc) { - return ::mediapipe::InternalError("Cannot copy non-copyable elements."); + mediapipe::Status ProcessCopyableElements(CalculatorContext* cc) { + return mediapipe::InternalError("Cannot copy non-copyable elements."); } template = true> - ::mediapipe::Status ProcessMovableElements(CalculatorContext* cc) { - ::mediapipe::StatusOr>> input_status = + mediapipe::Status ProcessMovableElements(CalculatorContext* cc) { + mediapipe::StatusOr>> input_status = cc->Inputs().Index(0).Value().Consume>(); if (!input_status.ok()) return input_status.status(); std::unique_ptr> input_vector = @@ -214,16 +214,16 @@ class SplitVectorCalculator : public CalculatorBase { } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } template = true> - ::mediapipe::Status ProcessMovableElements(CalculatorContext* cc) { - return ::mediapipe::InternalError("Cannot move non-movable elements."); + mediapipe::Status ProcessMovableElements(CalculatorContext* cc) { + return mediapipe::InternalError("Cannot move non-movable elements."); } private: - static ::mediapipe::Status checkRangesDontOverlap( + static mediapipe::Status checkRangesDontOverlap( const ::mediapipe::SplitVectorCalculatorOptions& options) { for (int i = 0; i < options.ranges_size() - 1; ++i) { for (int j = i + 1; j < options.ranges_size(); ++j) { @@ -233,13 +233,13 @@ class SplitVectorCalculator : public CalculatorBase { range_0.begin() < range_1.end()) || (range_1.begin() >= range_0.begin() && range_1.begin() < range_0.end())) { - return ::mediapipe::InvalidArgumentError( + return mediapipe::InvalidArgumentError( "Ranges must be non-overlapping when using combine_outputs " "option."); } } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } std::vector> ranges_; diff --git a/mediapipe/calculators/core/split_vector_calculator_test.cc b/mediapipe/calculators/core/split_vector_calculator_test.cc index 5d1ea2a04..0b98940fe 100644 --- a/mediapipe/calculators/core/split_vector_calculator_test.cc +++ b/mediapipe/calculators/core/split_vector_calculator_test.cc @@ -162,7 +162,7 @@ TEST_F(SplitTfLiteTensorVectorCalculatorTest, SmokeTest) { // Prepare a graph to use the SplitTfLiteTensorVectorCalculator. CalculatorGraphConfig graph_config = - ::mediapipe::ParseTextProtoOrDie( + mediapipe::ParseTextProtoOrDie( R"( input_stream: "tensor_in" node { @@ -213,7 +213,7 @@ TEST_F(SplitTfLiteTensorVectorCalculatorTest, InvalidRangeTest) { // Prepare a graph to use the SplitTfLiteTensorVectorCalculator. CalculatorGraphConfig graph_config = - ::mediapipe::ParseTextProtoOrDie( + mediapipe::ParseTextProtoOrDie( R"( input_stream: "tensor_in" node { @@ -239,7 +239,7 @@ TEST_F(SplitTfLiteTensorVectorCalculatorTest, InvalidOutputStreamCountTest) { // Prepare a graph to use the SplitTfLiteTensorVectorCalculator. CalculatorGraphConfig graph_config = - ::mediapipe::ParseTextProtoOrDie( + mediapipe::ParseTextProtoOrDie( R"( input_stream: "tensor_in" node { @@ -268,7 +268,7 @@ TEST_F(SplitTfLiteTensorVectorCalculatorTest, // Prepare a graph to use the SplitTfLiteTensorVectorCalculator. CalculatorGraphConfig graph_config = - ::mediapipe::ParseTextProtoOrDie( + mediapipe::ParseTextProtoOrDie( R"( input_stream: "tensor_in" node { @@ -298,7 +298,7 @@ TEST_F(SplitTfLiteTensorVectorCalculatorTest, InvalidOverlappingRangesTest) { // Prepare a graph to use the SplitTfLiteTensorVectorCalculator. CalculatorGraphConfig graph_config = - ::mediapipe::ParseTextProtoOrDie( + mediapipe::ParseTextProtoOrDie( R"( input_stream: "tensor_in" node { @@ -329,7 +329,7 @@ TEST_F(SplitTfLiteTensorVectorCalculatorTest, SmokeTestElementOnly) { // Prepare a graph to use the SplitTfLiteTensorVectorCalculator. CalculatorGraphConfig graph_config = - ::mediapipe::ParseTextProtoOrDie( + mediapipe::ParseTextProtoOrDie( R"( input_stream: "tensor_in" node { @@ -384,7 +384,7 @@ TEST_F(SplitTfLiteTensorVectorCalculatorTest, SmokeTestCombiningOutputs) { // Prepare a graph to use the SplitTfLiteTensorVectorCalculator. CalculatorGraphConfig graph_config = - ::mediapipe::ParseTextProtoOrDie( + mediapipe::ParseTextProtoOrDie( R"( input_stream: "tensor_in" node { @@ -427,7 +427,7 @@ TEST_F(SplitTfLiteTensorVectorCalculatorTest, ElementOnlyDisablesVectorOutputs) { // Prepare a graph to use the SplitTfLiteTensorVectorCalculator. CalculatorGraphConfig graph_config = - ::mediapipe::ParseTextProtoOrDie( + mediapipe::ParseTextProtoOrDie( R"( input_stream: "tensor_in" node { @@ -510,7 +510,7 @@ class MovableSplitUniqueIntPtrCalculatorTest : public ::testing::Test { TEST_F(MovableSplitUniqueIntPtrCalculatorTest, InvalidOverlappingRangesTest) { // Prepare a graph to use the TestMovableSplitUniqueIntPtrVectorCalculator. CalculatorGraphConfig graph_config = - ::mediapipe::ParseTextProtoOrDie( + mediapipe::ParseTextProtoOrDie( R"( input_stream: "input_vector" node { @@ -535,7 +535,7 @@ TEST_F(MovableSplitUniqueIntPtrCalculatorTest, InvalidOverlappingRangesTest) { TEST_F(MovableSplitUniqueIntPtrCalculatorTest, SmokeTest) { // Prepare a graph to use the TestMovableSplitUniqueIntPtrVectorCalculator. CalculatorGraphConfig graph_config = - ::mediapipe::ParseTextProtoOrDie( + mediapipe::ParseTextProtoOrDie( R"( input_stream: "input_vector" node { @@ -591,7 +591,7 @@ TEST_F(MovableSplitUniqueIntPtrCalculatorTest, SmokeTest) { TEST_F(MovableSplitUniqueIntPtrCalculatorTest, SmokeTestElementOnly) { // Prepare a graph to use the TestMovableSplitUniqueIntPtrVectorCalculator. CalculatorGraphConfig graph_config = - ::mediapipe::ParseTextProtoOrDie( + mediapipe::ParseTextProtoOrDie( R"( input_stream: "input_vector" node { @@ -645,7 +645,7 @@ TEST_F(MovableSplitUniqueIntPtrCalculatorTest, SmokeTestElementOnly) { TEST_F(MovableSplitUniqueIntPtrCalculatorTest, SmokeTestCombiningOutputs) { // Prepare a graph to use the TestMovableSplitUniqueIntPtrVectorCalculator. CalculatorGraphConfig graph_config = - ::mediapipe::ParseTextProtoOrDie( + mediapipe::ParseTextProtoOrDie( R"( input_stream: "input_vector" node { diff --git a/mediapipe/calculators/core/string_to_int_calculator.cc b/mediapipe/calculators/core/string_to_int_calculator.cc index 64600cde3..7dc558160 100644 --- a/mediapipe/calculators/core/string_to_int_calculator.cc +++ b/mediapipe/calculators/core/string_to_int_calculator.cc @@ -36,25 +36,25 @@ namespace mediapipe { template class StringToIntCalculatorTemplate : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->InputSidePackets().Index(0).Set(); cc->OutputSidePackets().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { IntType number; if (!absl::SimpleAtoi(cc->InputSidePackets().Index(0).Get(), &number)) { - return ::mediapipe::InvalidArgumentError( + return mediapipe::InvalidArgumentError( "The std::string could not be parsed as an integer."); } cc->OutputSidePackets().Index(0).Set(MakePacket(number)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { - return ::mediapipe::OkStatus(); + mediapipe::Status Process(CalculatorContext* cc) override { + return mediapipe::OkStatus(); } }; diff --git a/mediapipe/calculators/image/bilateral_filter_calculator.cc b/mediapipe/calculators/image/bilateral_filter_calculator.cc index 8d3d26f2d..ae89c4f4a 100644 --- a/mediapipe/calculators/image/bilateral_filter_calculator.cc +++ b/mediapipe/calculators/image/bilateral_filter_calculator.cc @@ -82,18 +82,18 @@ class BilateralFilterCalculator : public CalculatorBase { BilateralFilterCalculator() = default; ~BilateralFilterCalculator() override = default; - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); // From Calculator. - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; - ::mediapipe::Status Close(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Close(CalculatorContext* cc) override; private: - ::mediapipe::Status RenderGpu(CalculatorContext* cc); - ::mediapipe::Status RenderCpu(CalculatorContext* cc); + mediapipe::Status RenderGpu(CalculatorContext* cc); + mediapipe::Status RenderCpu(CalculatorContext* cc); - ::mediapipe::Status GlSetup(CalculatorContext* cc); + mediapipe::Status GlSetup(CalculatorContext* cc); void GlRender(CalculatorContext* cc); mediapipe::BilateralFilterCalculatorOptions options_; @@ -111,17 +111,17 @@ class BilateralFilterCalculator : public CalculatorBase { }; REGISTER_CALCULATOR(BilateralFilterCalculator); -::mediapipe::Status BilateralFilterCalculator::GetContract( +mediapipe::Status BilateralFilterCalculator::GetContract( CalculatorContract* cc) { CHECK_GE(cc->Inputs().NumEntries(), 1); if (cc->Inputs().HasTag(kInputFrameTag) && cc->Inputs().HasTag(kInputFrameTagGpu)) { - return ::mediapipe::InternalError("Cannot have multiple input images."); + return mediapipe::InternalError("Cannot have multiple input images."); } if (cc->Inputs().HasTag(kInputFrameTagGpu) != cc->Outputs().HasTag(kOutputFrameTagGpu)) { - return ::mediapipe::InternalError("GPU output must have GPU input."); + return mediapipe::InternalError("GPU output must have GPU input."); } bool use_gpu = false; @@ -165,10 +165,10 @@ REGISTER_CALCULATOR(BilateralFilterCalculator); #endif // !MEDIAPIPE_DISABLE_GPU } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status BilateralFilterCalculator::Open(CalculatorContext* cc) { +mediapipe::Status BilateralFilterCalculator::Open(CalculatorContext* cc) { cc->SetOffset(TimestampDiff(0)); options_ = cc->Options(); @@ -194,30 +194,30 @@ REGISTER_CALCULATOR(BilateralFilterCalculator); #endif // !MEDIAPIPE_DISABLE_GPU } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status BilateralFilterCalculator::Process(CalculatorContext* cc) { +mediapipe::Status BilateralFilterCalculator::Process(CalculatorContext* cc) { if (use_gpu_) { #if !defined(MEDIAPIPE_DISABLE_GPU) MP_RETURN_IF_ERROR( - gpu_helper_.RunInGlContext([this, cc]() -> ::mediapipe::Status { + gpu_helper_.RunInGlContext([this, cc]() -> mediapipe::Status { if (!gpu_initialized_) { MP_RETURN_IF_ERROR(GlSetup(cc)); gpu_initialized_ = true; } MP_RETURN_IF_ERROR(RenderGpu(cc)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); #endif // !MEDIAPIPE_DISABLE_GPU } else { MP_RETURN_IF_ERROR(RenderCpu(cc)); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status BilateralFilterCalculator::Close(CalculatorContext* cc) { +mediapipe::Status BilateralFilterCalculator::Close(CalculatorContext* cc) { #if !defined(MEDIAPIPE_DISABLE_GPU) gpu_helper_.RunInGlContext([this] { if (program_) glDeleteProgram(program_); @@ -230,13 +230,12 @@ REGISTER_CALCULATOR(BilateralFilterCalculator); }); #endif // !MEDIAPIPE_DISABLE_GPU - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status BilateralFilterCalculator::RenderCpu( - CalculatorContext* cc) { +mediapipe::Status BilateralFilterCalculator::RenderCpu(CalculatorContext* cc) { if (cc->Inputs().Tag(kInputFrameTag).IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } const auto& input_frame = cc->Inputs().Tag(kInputFrameTag).Get(); @@ -244,7 +243,7 @@ REGISTER_CALCULATOR(BilateralFilterCalculator); // Only 1 or 3 channel images supported by OpenCV. if ((input_mat.channels() == 1 || input_mat.channels() == 3)) { - return ::mediapipe::InternalError( + return mediapipe::InternalError( "CPU filtering supports only 1 or 3 channel input images."); } @@ -255,7 +254,7 @@ REGISTER_CALCULATOR(BilateralFilterCalculator); if (has_guide_image) { // cv::jointBilateralFilter() is in contrib module 'ximgproc'. - return ::mediapipe::UnimplementedError( + return mediapipe::UnimplementedError( "CPU joint filtering support is not implemented yet."); } else { auto output_mat = mediapipe::formats::MatView(output_frame.get()); @@ -267,13 +266,12 @@ REGISTER_CALCULATOR(BilateralFilterCalculator); cc->Outputs() .Tag(kOutputFrameTag) .Add(output_frame.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status BilateralFilterCalculator::RenderGpu( - CalculatorContext* cc) { +mediapipe::Status BilateralFilterCalculator::RenderGpu(CalculatorContext* cc) { if (cc->Inputs().Tag(kInputFrameTagGpu).IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } #if !defined(MEDIAPIPE_DISABLE_GPU) const auto& input_frame = @@ -334,7 +332,7 @@ REGISTER_CALCULATOR(BilateralFilterCalculator); output_texture.Release(); #endif // !MEDIAPIPE_DISABLE_GPU - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } void BilateralFilterCalculator::GlRender(CalculatorContext* cc) { @@ -350,7 +348,7 @@ void BilateralFilterCalculator::GlRender(CalculatorContext* cc) { #endif // !MEDIAPIPE_DISABLE_GPU } -::mediapipe::Status BilateralFilterCalculator::GlSetup(CalculatorContext* cc) { +mediapipe::Status BilateralFilterCalculator::GlSetup(CalculatorContext* cc) { #if !defined(MEDIAPIPE_DISABLE_GPU) const GLint attr_location[NUM_ATTRIBUTES] = { ATTRIB_VERTEX, @@ -517,7 +515,7 @@ void BilateralFilterCalculator::GlRender(CalculatorContext* cc) { #endif // !MEDIAPIPE_DISABLE_GPU - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/image/color_convert_calculator.cc b/mediapipe/calculators/image/color_convert_calculator.cc index f31586d9d..b0f72d29b 100644 --- a/mediapipe/calculators/image/color_convert_calculator.cc +++ b/mediapipe/calculators/image/color_convert_calculator.cc @@ -78,12 +78,12 @@ constexpr char kGrayOutTag[] = "GRAY_OUT"; class ColorConvertCalculator : public CalculatorBase { public: ~ColorConvertCalculator() override = default; - static ::mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Process(CalculatorContext* cc) override; + static mediapipe::Status GetContract(CalculatorContract* cc); + mediapipe::Status Process(CalculatorContext* cc) override; - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { cc->SetOffset(TimestampDiff(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: @@ -91,17 +91,16 @@ class ColorConvertCalculator : public CalculatorBase { // conversion. The ImageFrame on input_tag is converted using the // open_cv_convert_code provided and then output on the output_tag stream. // Note that the output_format must match the destination conversion code. - ::mediapipe::Status ConvertAndOutput(const std::string& input_tag, - const std::string& output_tag, - ImageFormat::Format output_format, - int open_cv_convert_code, - CalculatorContext* cc); + mediapipe::Status ConvertAndOutput(const std::string& input_tag, + const std::string& output_tag, + ImageFormat::Format output_format, + int open_cv_convert_code, + CalculatorContext* cc); }; REGISTER_CALCULATOR(ColorConvertCalculator); -::mediapipe::Status ColorConvertCalculator::GetContract( - CalculatorContract* cc) { +mediapipe::Status ColorConvertCalculator::GetContract(CalculatorContract* cc) { RET_CHECK_EQ(cc->Inputs().NumEntries(), 1) << "Only one input stream is allowed."; RET_CHECK_EQ(cc->Outputs().NumEntries(), 1) @@ -139,10 +138,10 @@ REGISTER_CALCULATOR(ColorConvertCalculator); cc->Outputs().Tag(kBgraOutTag).Set(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ColorConvertCalculator::ConvertAndOutput( +mediapipe::Status ColorConvertCalculator::ConvertAndOutput( const std::string& input_tag, const std::string& output_tag, ImageFormat::Format output_format, int open_cv_convert_code, CalculatorContext* cc) { @@ -161,10 +160,10 @@ REGISTER_CALCULATOR(ColorConvertCalculator); cc->Outputs() .Tag(output_tag) .Add(output_frame.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ColorConvertCalculator::Process(CalculatorContext* cc) { +mediapipe::Status ColorConvertCalculator::Process(CalculatorContext* cc) { // RGBA -> RGB if (cc->Inputs().HasTag(kRgbaInTag) && cc->Outputs().HasTag(kRgbOutTag)) { return ConvertAndOutput(kRgbaInTag, kRgbOutTag, ImageFormat::SRGB, @@ -196,7 +195,7 @@ REGISTER_CALCULATOR(ColorConvertCalculator); cv::COLOR_RGBA2BGRA, cc); } - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "Unsupported image format conversion."; } diff --git a/mediapipe/calculators/image/feature_detector_calculator.cc b/mediapipe/calculators/image/feature_detector_calculator.cc index 9f873740e..d3b774bca 100644 --- a/mediapipe/calculators/image/feature_detector_calculator.cc +++ b/mediapipe/calculators/image/feature_detector_calculator.cc @@ -50,15 +50,15 @@ class FeatureDetectorCalculator : public CalculatorBase { public: ~FeatureDetectorCalculator() override = default; - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; private: FeatureDetectorCalculatorOptions options_; cv::Ptr feature_detector_; - std::unique_ptr<::mediapipe::ThreadPool> pool_; + std::unique_ptr pool_; // Create image pyramid based on input image. void ComputeImagePyramid(const cv::Mat& input_image, @@ -71,7 +71,7 @@ class FeatureDetectorCalculator : public CalculatorBase { REGISTER_CALCULATOR(FeatureDetectorCalculator); -::mediapipe::Status FeatureDetectorCalculator::GetContract( +mediapipe::Status FeatureDetectorCalculator::GetContract( CalculatorContract* cc) { if (cc->Inputs().HasTag("IMAGE")) { cc->Inputs().Tag("IMAGE").Set(); @@ -85,26 +85,26 @@ REGISTER_CALCULATOR(FeatureDetectorCalculator); if (cc->Outputs().HasTag("PATCHES")) { cc->Outputs().Tag("PATCHES").Set>(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status FeatureDetectorCalculator::Open(CalculatorContext* cc) { +mediapipe::Status FeatureDetectorCalculator::Open(CalculatorContext* cc) { options_ = tool::RetrieveOptions(cc->Options(), cc->InputSidePackets(), kOptionsTag) .GetExtension(FeatureDetectorCalculatorOptions::ext); feature_detector_ = cv::ORB::create( options_.max_features(), options_.scale_factor(), options_.pyramid_level(), kPatchSize - 1, 0, 2, cv::ORB::FAST_SCORE); - pool_ = absl::make_unique<::mediapipe::ThreadPool>("ThreadPool", kNumThreads); + pool_ = absl::make_unique("ThreadPool", kNumThreads); pool_->StartWorkers(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status FeatureDetectorCalculator::Process(CalculatorContext* cc) { +mediapipe::Status FeatureDetectorCalculator::Process(CalculatorContext* cc) { const Timestamp& timestamp = cc->InputTimestamp(); if (timestamp == Timestamp::PreStream()) { // Indicator packet. - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } InputStream* input_frame = &(cc->Inputs().Tag("IMAGE")); cv::Mat input_view = formats::MatView(&input_frame->Get()); @@ -176,7 +176,7 @@ REGISTER_CALCULATOR(FeatureDetectorCalculator); cc->Outputs().Tag("PATCHES").Add(patches.release(), timestamp); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } void FeatureDetectorCalculator::ComputeImagePyramid( diff --git a/mediapipe/calculators/image/image_cropping_calculator.cc b/mediapipe/calculators/image/image_cropping_calculator.cc index b008a9e1e..591fcb47b 100644 --- a/mediapipe/calculators/image/image_cropping_calculator.cc +++ b/mediapipe/calculators/image/image_cropping_calculator.cc @@ -53,8 +53,7 @@ constexpr char kWidthTag[] = "WIDTH"; REGISTER_CALCULATOR(ImageCroppingCalculator); -::mediapipe::Status ImageCroppingCalculator::GetContract( - CalculatorContract* cc) { +mediapipe::Status ImageCroppingCalculator::GetContract(CalculatorContract* cc) { RET_CHECK(cc->Inputs().HasTag(kImageTag) ^ cc->Inputs().HasTag(kImageGpuTag)); RET_CHECK(cc->Outputs().HasTag(kImageTag) ^ cc->Outputs().HasTag(kImageGpuTag)); @@ -116,10 +115,10 @@ REGISTER_CALCULATOR(ImageCroppingCalculator); #endif // !MEDIAPIPE_DISABLE_GPU } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ImageCroppingCalculator::Open(CalculatorContext* cc) { +mediapipe::Status ImageCroppingCalculator::Open(CalculatorContext* cc) { cc->SetOffset(TimestampDiff(0)); if (cc->Inputs().HasTag(kImageGpuTag)) { @@ -147,38 +146,38 @@ REGISTER_CALCULATOR(ImageCroppingCalculator); MP_RETURN_IF_ERROR(ValidateBorderModeForCPU(cc)); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ImageCroppingCalculator::Process(CalculatorContext* cc) { +mediapipe::Status ImageCroppingCalculator::Process(CalculatorContext* cc) { if (cc->Inputs().HasTag(kRectTag) && cc->Inputs().Tag(kRectTag).IsEmpty()) { VLOG(1) << "RECT is empty for timestamp: " << cc->InputTimestamp(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } if (cc->Inputs().HasTag(kNormRectTag) && cc->Inputs().Tag(kNormRectTag).IsEmpty()) { VLOG(1) << "NORM_RECT is empty for timestamp: " << cc->InputTimestamp(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } if (use_gpu_) { #if !defined(MEDIAPIPE_DISABLE_GPU) MP_RETURN_IF_ERROR( - gpu_helper_.RunInGlContext([this, cc]() -> ::mediapipe::Status { + gpu_helper_.RunInGlContext([this, cc]() -> mediapipe::Status { if (!gpu_initialized_) { MP_RETURN_IF_ERROR(InitGpu(cc)); gpu_initialized_ = true; } MP_RETURN_IF_ERROR(RenderGpu(cc)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); #endif // !MEDIAPIPE_DISABLE_GPU } else { MP_RETURN_IF_ERROR(RenderCpu(cc)); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ImageCroppingCalculator::Close(CalculatorContext* cc) { +mediapipe::Status ImageCroppingCalculator::Close(CalculatorContext* cc) { #if !defined(MEDIAPIPE_DISABLE_GPU) gpu_helper_.RunInGlContext([this] { if (program_) glDeleteProgram(program_); @@ -187,16 +186,16 @@ REGISTER_CALCULATOR(ImageCroppingCalculator); gpu_initialized_ = false; #endif // !MEDIAPIPE_DISABLE_GPU - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ImageCroppingCalculator::ValidateBorderModeForCPU( +mediapipe::Status ImageCroppingCalculator::ValidateBorderModeForCPU( CalculatorContext* cc) { int border_mode; return GetBorderModeForOpenCV(cc, &border_mode); } -::mediapipe::Status ImageCroppingCalculator::ValidateBorderModeForGPU( +mediapipe::Status ImageCroppingCalculator::ValidateBorderModeForGPU( CalculatorContext* cc) { mediapipe::ImageCroppingCalculatorOptions options = cc->Options(); @@ -213,12 +212,12 @@ REGISTER_CALCULATOR(ImageCroppingCalculator); << options.border_mode(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ImageCroppingCalculator::RenderCpu(CalculatorContext* cc) { +mediapipe::Status ImageCroppingCalculator::RenderCpu(CalculatorContext* cc) { if (cc->Inputs().Tag(kImageTag).IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } const auto& input_img = cc->Inputs().Tag(kImageTag).Get(); cv::Mat input_mat = formats::MatView(&input_img); @@ -268,12 +267,12 @@ REGISTER_CALCULATOR(ImageCroppingCalculator); cropped_image.copyTo(output_mat); cc->Outputs().Tag(kImageTag).Add(output_frame.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ImageCroppingCalculator::RenderGpu(CalculatorContext* cc) { +mediapipe::Status ImageCroppingCalculator::RenderGpu(CalculatorContext* cc) { if (cc->Inputs().Tag(kImageGpuTag).IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } #if !defined(MEDIAPIPE_DISABLE_GPU) const Packet& input_packet = cc->Inputs().Tag(kImageGpuTag).Value(); @@ -308,7 +307,7 @@ REGISTER_CALCULATOR(ImageCroppingCalculator); dst_tex.Release(); #endif // !MEDIAPIPE_DISABLE_GPU - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } void ImageCroppingCalculator::GlRender() { @@ -359,7 +358,7 @@ void ImageCroppingCalculator::GlRender() { #endif // !MEDIAPIPE_DISABLE_GPU } -::mediapipe::Status ImageCroppingCalculator::InitGpu(CalculatorContext* cc) { +mediapipe::Status ImageCroppingCalculator::InitGpu(CalculatorContext* cc) { #if !defined(MEDIAPIPE_DISABLE_GPU) const GLint attr_location[NUM_ATTRIBUTES] = { ATTRIB_VERTEX, @@ -408,7 +407,7 @@ void ImageCroppingCalculator::GlRender() { glUniform1i(glGetUniformLocation(program_, "input_frame"), 1); #endif // !MEDIAPIPE_DISABLE_GPU - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // For GPU only. @@ -534,7 +533,7 @@ RectSpec ImageCroppingCalculator::GetCropSpecs(const CalculatorContext* cc, return {crop_width, crop_height, x_center, y_center, rotation}; } -::mediapipe::Status ImageCroppingCalculator::GetBorderModeForOpenCV( +mediapipe::Status ImageCroppingCalculator::GetBorderModeForOpenCV( CalculatorContext* cc, int* border_mode) { mediapipe::ImageCroppingCalculatorOptions options = cc->Options(); @@ -551,7 +550,7 @@ RectSpec ImageCroppingCalculator::GetCropSpecs(const CalculatorContext* cc, << options.border_mode(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/image/image_cropping_calculator.h b/mediapipe/calculators/image/image_cropping_calculator.h index 5d50b6647..2f0324879 100644 --- a/mediapipe/calculators/image/image_cropping_calculator.h +++ b/mediapipe/calculators/image/image_cropping_calculator.h @@ -58,24 +58,24 @@ class ImageCroppingCalculator : public CalculatorBase { ImageCroppingCalculator() = default; ~ImageCroppingCalculator() override = default; - static ::mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; - ::mediapipe::Status Close(CalculatorContext* cc) override; + static mediapipe::Status GetContract(CalculatorContract* cc); + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Close(CalculatorContext* cc) override; static RectSpec GetCropSpecs(const CalculatorContext* cc, int src_width, int src_height); private: - ::mediapipe::Status ValidateBorderModeForCPU(CalculatorContext* cc); - ::mediapipe::Status ValidateBorderModeForGPU(CalculatorContext* cc); - ::mediapipe::Status RenderCpu(CalculatorContext* cc); - ::mediapipe::Status RenderGpu(CalculatorContext* cc); - ::mediapipe::Status InitGpu(CalculatorContext* cc); + mediapipe::Status ValidateBorderModeForCPU(CalculatorContext* cc); + mediapipe::Status ValidateBorderModeForGPU(CalculatorContext* cc); + mediapipe::Status RenderCpu(CalculatorContext* cc); + mediapipe::Status RenderGpu(CalculatorContext* cc); + mediapipe::Status InitGpu(CalculatorContext* cc); void GlRender(); void GetOutputDimensions(CalculatorContext* cc, int src_width, int src_height, int* dst_width, int* dst_height); - ::mediapipe::Status GetBorderModeForOpenCV(CalculatorContext* cc, - int* border_mode); + mediapipe::Status GetBorderModeForOpenCV(CalculatorContext* cc, + int* border_mode); mediapipe::ImageCroppingCalculatorOptions options_; diff --git a/mediapipe/calculators/image/image_file_properties_calculator.cc b/mediapipe/calculators/image/image_file_properties_calculator.cc index 82af9ef8a..a0636acbe 100644 --- a/mediapipe/calculators/image/image_file_properties_calculator.cc +++ b/mediapipe/calculators/image/image_file_properties_calculator.cc @@ -28,23 +28,24 @@ namespace { // sqrt(36^2 + 24^2). static const double SENSOR_DIAGONAL_35MM = std::sqrt(1872.0); -::mediapipe::StatusOr ComputeFocalLengthInPixels( - int image_width, int image_height, double focal_length_35mm, - double focal_length_mm) { +mediapipe::StatusOr ComputeFocalLengthInPixels(int image_width, + int image_height, + double focal_length_35mm, + double focal_length_mm) { // TODO: Allow returning image file properties even when focal length // computation is not possible. if (image_width == 0 || image_height == 0) { - return ::mediapipe::InternalError( + return mediapipe::InternalError( "Image dimensions should be non-zero to compute focal length in " "pixels."); } if (focal_length_mm == 0) { - return ::mediapipe::InternalError( + return mediapipe::InternalError( "Focal length in mm should be non-zero to compute focal length in " "pixels."); } if (focal_length_35mm == 0) { - return ::mediapipe::InternalError( + return mediapipe::InternalError( "Focal length in 35 mm should be non-zero to compute focal length in " "pixels."); } @@ -76,13 +77,13 @@ static const double SENSOR_DIAGONAL_35MM = std::sqrt(1872.0); return focal_length_pixels; } -::mediapipe::StatusOr GetImageFileProperites( +mediapipe::StatusOr GetImageFileProperites( const std::string& image_bytes) { easyexif::EXIFInfo result; int code = result.parseFrom(image_bytes); if (code) { - return ::mediapipe::InternalError("Error parsing EXIF, code: " + - std::to_string(code)); + return mediapipe::InternalError("Error parsing EXIF, code: " + + std::to_string(code)); } ImageFileProperties properties; @@ -125,7 +126,7 @@ static const double SENSOR_DIAGONAL_35MM = std::sqrt(1872.0); // } class ImageFilePropertiesCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { if (cc->Inputs().NumEntries() != 0) { RET_CHECK(cc->Inputs().NumEntries() == 1); cc->Inputs().Index(0).Set(); @@ -141,10 +142,10 @@ class ImageFilePropertiesCalculator : public CalculatorBase { cc->OutputSidePackets().Index(0).Set<::mediapipe::ImageFileProperties>(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { cc->SetOffset(TimestampDiff(0)); if (cc->InputSidePackets().NumEntries() == 1) { @@ -159,13 +160,13 @@ class ImageFilePropertiesCalculator : public CalculatorBase { MakePacket(properties_)); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { if (cc->Inputs().NumEntries() == 1) { if (cc->Inputs().Index(0).IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } const std::string& image_bytes = cc->Inputs().Index(0).Get(); ASSIGN_OR_RETURN(properties_, GetImageFileProperites(image_bytes)); @@ -179,11 +180,11 @@ class ImageFilePropertiesCalculator : public CalculatorBase { } else { cc->OutputSidePackets().Index(0).Set( MakePacket(properties_) - .At(::mediapipe::Timestamp::Unset())); + .At(mediapipe::Timestamp::Unset())); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: diff --git a/mediapipe/calculators/image/image_properties_calculator.cc b/mediapipe/calculators/image/image_properties_calculator.cc index be0a65e0d..84e67c0cb 100644 --- a/mediapipe/calculators/image/image_properties_calculator.cc +++ b/mediapipe/calculators/image/image_properties_calculator.cc @@ -44,7 +44,7 @@ namespace mediapipe { // } class ImagePropertiesCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { RET_CHECK(cc->Inputs().HasTag(kImageFrameTag) ^ cc->Inputs().HasTag(kGpuBufferTag)); if (cc->Inputs().HasTag(kImageFrameTag)) { @@ -60,15 +60,15 @@ class ImagePropertiesCalculator : public CalculatorBase { cc->Outputs().Tag("SIZE").Set>(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { cc->SetOffset(TimestampDiff(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { int width; int height; @@ -92,7 +92,7 @@ class ImagePropertiesCalculator : public CalculatorBase { MakePacket>(width, height) .At(cc->InputTimestamp())); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(ImagePropertiesCalculator); diff --git a/mediapipe/calculators/image/image_transformation_calculator.cc b/mediapipe/calculators/image/image_transformation_calculator.cc index 3fb4e6807..0c5ff8bdd 100644 --- a/mediapipe/calculators/image/image_transformation_calculator.cc +++ b/mediapipe/calculators/image/image_transformation_calculator.cc @@ -163,16 +163,16 @@ class ImageTransformationCalculator : public CalculatorBase { ImageTransformationCalculator() = default; ~ImageTransformationCalculator() override = default; - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; - ::mediapipe::Status Close(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Close(CalculatorContext* cc) override; private: - ::mediapipe::Status RenderCpu(CalculatorContext* cc); - ::mediapipe::Status RenderGpu(CalculatorContext* cc); - ::mediapipe::Status GlSetup(); + mediapipe::Status RenderCpu(CalculatorContext* cc); + mediapipe::Status RenderGpu(CalculatorContext* cc); + mediapipe::Status GlSetup(); void ComputeOutputDimensions(int input_width, int input_height, int* output_width, int* output_height); @@ -199,7 +199,7 @@ class ImageTransformationCalculator : public CalculatorBase { REGISTER_CALCULATOR(ImageTransformationCalculator); // static -::mediapipe::Status ImageTransformationCalculator::GetContract( +mediapipe::Status ImageTransformationCalculator::GetContract( CalculatorContract* cc) { // Only one input can be set, and the output type must match. RET_CHECK(cc->Inputs().HasTag(kImageFrameTag) ^ @@ -254,10 +254,10 @@ REGISTER_CALCULATOR(ImageTransformationCalculator); #endif // !MEDIAPIPE_DISABLE_GPU } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ImageTransformationCalculator::Open(CalculatorContext* cc) { +mediapipe::Status ImageTransformationCalculator::Open(CalculatorContext* cc) { // Inform the framework that we always output at the same timestamp // as we receive a packet at. cc->SetOffset(TimestampDiff(0)); @@ -311,10 +311,10 @@ REGISTER_CALCULATOR(ImageTransformationCalculator); #endif // !MEDIAPIPE_DISABLE_GPU } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ImageTransformationCalculator::Process( +mediapipe::Status ImageTransformationCalculator::Process( CalculatorContext* cc) { // Override values if specified so. if (cc->Inputs().HasTag("ROTATION_DEGREES") && @@ -334,22 +334,21 @@ REGISTER_CALCULATOR(ImageTransformationCalculator); if (use_gpu_) { #if !defined(MEDIAPIPE_DISABLE_GPU) if (cc->Inputs().Tag(kGpuBufferTag).IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } return gpu_helper_.RunInGlContext( - [this, cc]() -> ::mediapipe::Status { return RenderGpu(cc); }); + [this, cc]() -> mediapipe::Status { return RenderGpu(cc); }); #endif // !MEDIAPIPE_DISABLE_GPU } else { if (cc->Inputs().Tag(kImageFrameTag).IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } return RenderCpu(cc); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ImageTransformationCalculator::Close( - CalculatorContext* cc) { +mediapipe::Status ImageTransformationCalculator::Close(CalculatorContext* cc) { if (use_gpu_) { #if !defined(MEDIAPIPE_DISABLE_GPU) QuadRenderer* rgb_renderer = rgb_renderer_.release(); @@ -372,10 +371,10 @@ REGISTER_CALCULATOR(ImageTransformationCalculator); #endif // !MEDIAPIPE_DISABLE_GPU } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ImageTransformationCalculator::RenderCpu( +mediapipe::Status ImageTransformationCalculator::RenderCpu( CalculatorContext* cc) { cv::Mat input_mat; mediapipe::ImageFormat::Format format; @@ -480,10 +479,10 @@ REGISTER_CALCULATOR(ImageTransformationCalculator); .Tag(kImageFrameTag) .Add(output_frame.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ImageTransformationCalculator::RenderGpu( +mediapipe::Status ImageTransformationCalculator::RenderGpu( CalculatorContext* cc) { #if !defined(MEDIAPIPE_DISABLE_GPU) const auto& input = cc->Inputs().Tag(kGpuBufferTag).Get(); @@ -570,7 +569,7 @@ REGISTER_CALCULATOR(ImageTransformationCalculator); #endif // !MEDIAPIPE_DISABLE_GPU - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } void ImageTransformationCalculator::ComputeOutputDimensions( diff --git a/mediapipe/calculators/image/luminance_calculator.cc b/mediapipe/calculators/image/luminance_calculator.cc index 325745d99..503d2b22e 100644 --- a/mediapipe/calculators/image/luminance_calculator.cc +++ b/mediapipe/calculators/image/luminance_calculator.cc @@ -26,10 +26,10 @@ namespace mediapipe { // See GlSimpleCalculatorBase for inputs, outputs and input side packets. class LuminanceCalculator : public GlSimpleCalculator { public: - ::mediapipe::Status GlSetup() override; - ::mediapipe::Status GlRender(const GlTexture& src, - const GlTexture& dst) override; - ::mediapipe::Status GlTeardown() override; + mediapipe::Status GlSetup() override; + mediapipe::Status GlRender(const GlTexture& src, + const GlTexture& dst) override; + mediapipe::Status GlTeardown() override; private: GLuint program_ = 0; @@ -37,7 +37,7 @@ class LuminanceCalculator : public GlSimpleCalculator { }; REGISTER_CALCULATOR(LuminanceCalculator); -::mediapipe::Status LuminanceCalculator::GlSetup() { +mediapipe::Status LuminanceCalculator::GlSetup() { // Load vertex and fragment shaders const GLint attr_location[NUM_ATTRIBUTES] = { ATTRIB_VERTEX, @@ -83,11 +83,11 @@ REGISTER_CALCULATOR(LuminanceCalculator); (const GLchar**)&attr_name[0], attr_location, &program_); RET_CHECK(program_) << "Problem initializing the program."; frame_ = glGetUniformLocation(program_, "video_frame"); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status LuminanceCalculator::GlRender(const GlTexture& src, - const GlTexture& dst) { +mediapipe::Status LuminanceCalculator::GlRender(const GlTexture& src, + const GlTexture& dst) { static const GLfloat square_vertices[] = { -1.0f, -1.0f, // bottom left 1.0f, -1.0f, // bottom right @@ -137,15 +137,15 @@ REGISTER_CALCULATOR(LuminanceCalculator); glDeleteVertexArrays(1, &vao); glDeleteBuffers(2, vbo); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status LuminanceCalculator::GlTeardown() { +mediapipe::Status LuminanceCalculator::GlTeardown() { if (program_) { glDeleteProgram(program_); program_ = 0; } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/image/mask_overlay_calculator.cc b/mediapipe/calculators/image/mask_overlay_calculator.cc index eda3f4f91..9844f2ed5 100644 --- a/mediapipe/calculators/image/mask_overlay_calculator.cc +++ b/mediapipe/calculators/image/mask_overlay_calculator.cc @@ -52,14 +52,14 @@ class MaskOverlayCalculator : public CalculatorBase { MaskOverlayCalculator() {} ~MaskOverlayCalculator(); - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; - ::mediapipe::Status GlSetup( + mediapipe::Status GlSetup( const MaskOverlayCalculatorOptions::MaskChannel mask_channel); - ::mediapipe::Status GlRender(const float mask_const); + mediapipe::Status GlRender(const float mask_const); private: GlCalculatorHelper helper_; @@ -73,7 +73,7 @@ class MaskOverlayCalculator : public CalculatorBase { REGISTER_CALCULATOR(MaskOverlayCalculator); // static -::mediapipe::Status MaskOverlayCalculator::GetContract(CalculatorContract* cc) { +mediapipe::Status MaskOverlayCalculator::GetContract(CalculatorContract* cc) { MP_RETURN_IF_ERROR(GlCalculatorHelper::UpdateContract(cc)); cc->Inputs().Get("VIDEO", 0).Set(); cc->Inputs().Get("VIDEO", 1).Set(); @@ -82,14 +82,13 @@ REGISTER_CALCULATOR(MaskOverlayCalculator); else if (cc->Inputs().HasTag("CONST_MASK")) cc->Inputs().Tag("CONST_MASK").Set(); else - return ::mediapipe::Status( - ::mediapipe::StatusCode::kNotFound, - "At least one mask input stream must be present."); + return mediapipe::Status(mediapipe::StatusCode::kNotFound, + "At least one mask input stream must be present."); cc->Outputs().Tag("OUTPUT").Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status MaskOverlayCalculator::Open(CalculatorContext* cc) { +mediapipe::Status MaskOverlayCalculator::Open(CalculatorContext* cc) { cc->SetOffset(TimestampDiff(0)); if (cc->Inputs().HasTag("MASK")) { use_mask_tex_ = true; @@ -97,8 +96,8 @@ REGISTER_CALCULATOR(MaskOverlayCalculator); return helper_.Open(cc); } -::mediapipe::Status MaskOverlayCalculator::Process(CalculatorContext* cc) { - return helper_.RunInGlContext([this, &cc]() -> ::mediapipe::Status { +mediapipe::Status MaskOverlayCalculator::Process(CalculatorContext* cc) { + return helper_.RunInGlContext([this, &cc]() -> mediapipe::Status { if (!initialized_) { const auto& options = cc->Options(); const auto mask_channel = options.mask_channel(); @@ -116,7 +115,7 @@ REGISTER_CALCULATOR(MaskOverlayCalculator); if (mask_packet.IsEmpty()) { cc->Outputs().Tag("OUTPUT").AddPacket(input1_packet); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } const auto& input0_buffer = cc->Inputs().Get("VIDEO", 0).Get(); @@ -173,11 +172,11 @@ REGISTER_CALCULATOR(MaskOverlayCalculator); dst.Release(); cc->Outputs().Tag("OUTPUT").Add(output.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); }); } -::mediapipe::Status MaskOverlayCalculator::GlSetup( +mediapipe::Status MaskOverlayCalculator::GlSetup( const MaskOverlayCalculatorOptions::MaskChannel mask_channel) { // Load vertex and fragment shaders const GLint attr_location[NUM_ATTRIBUTES] = { @@ -248,10 +247,10 @@ REGISTER_CALCULATOR(MaskOverlayCalculator); unif_frame1_ = glGetUniformLocation(program_, "frame1"); unif_frame2_ = glGetUniformLocation(program_, "frame2"); unif_mask_ = glGetUniformLocation(program_, "mask"); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status MaskOverlayCalculator::GlRender(const float mask_const) { +mediapipe::Status MaskOverlayCalculator::GlRender(const float mask_const) { glUseProgram(program_); glVertexAttribPointer(ATTRIB_VERTEX, 2, GL_FLOAT, 0, 0, kBasicSquareVertices); glEnableVertexAttribArray(ATTRIB_VERTEX); @@ -267,7 +266,7 @@ REGISTER_CALCULATOR(MaskOverlayCalculator); glUniform1f(unif_mask_, mask_const); glDrawArrays(GL_TRIANGLE_STRIP, 0, 4); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } MaskOverlayCalculator::~MaskOverlayCalculator() { diff --git a/mediapipe/calculators/image/opencv_encoded_image_to_image_frame_calculator.cc b/mediapipe/calculators/image/opencv_encoded_image_to_image_frame_calculator.cc index ae87d7511..6d30bc290 100644 --- a/mediapipe/calculators/image/opencv_encoded_image_to_image_frame_calculator.cc +++ b/mediapipe/calculators/image/opencv_encoded_image_to_image_frame_calculator.cc @@ -34,29 +34,29 @@ namespace mediapipe { // } class OpenCvEncodedImageToImageFrameCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + static mediapipe::Status GetContract(CalculatorContract* cc); + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; private: mediapipe::OpenCvEncodedImageToImageFrameCalculatorOptions options_; }; -::mediapipe::Status OpenCvEncodedImageToImageFrameCalculator::GetContract( +mediapipe::Status OpenCvEncodedImageToImageFrameCalculator::GetContract( CalculatorContract* cc) { cc->Inputs().Index(0).Set(); cc->Outputs().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status OpenCvEncodedImageToImageFrameCalculator::Open( +mediapipe::Status OpenCvEncodedImageToImageFrameCalculator::Open( CalculatorContext* cc) { options_ = cc->Options(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status OpenCvEncodedImageToImageFrameCalculator::Process( +mediapipe::Status OpenCvEncodedImageToImageFrameCalculator::Process( CalculatorContext* cc) { const std::string& contents = cc->Inputs().Index(0).Get(); const std::vector contents_vector(contents.begin(), contents.end()); @@ -84,10 +84,10 @@ class OpenCvEncodedImageToImageFrameCalculator : public CalculatorBase { cv::cvtColor(decoded_mat, output_mat, cv::COLOR_BGR2RGB); break; case 4: - return ::mediapipe::UnimplementedErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::UnimplementedErrorBuilder(MEDIAPIPE_LOC) << "4-channel image isn't supported yet"; default: - return ::mediapipe::FailedPreconditionErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::FailedPreconditionErrorBuilder(MEDIAPIPE_LOC) << "Unsupported number of channels: " << decoded_mat.channels(); } std::unique_ptr output_frame = absl::make_unique( @@ -95,7 +95,7 @@ class OpenCvEncodedImageToImageFrameCalculator : public CalculatorBase { ImageFrame::kGlDefaultAlignmentBoundary); output_mat.copyTo(formats::MatView(output_frame.get())); cc->Outputs().Index(0).Add(output_frame.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } REGISTER_CALCULATOR(OpenCvEncodedImageToImageFrameCalculator); diff --git a/mediapipe/calculators/image/opencv_image_encoder_calculator.cc b/mediapipe/calculators/image/opencv_image_encoder_calculator.cc index efe79d99c..6f72346da 100644 --- a/mediapipe/calculators/image/opencv_image_encoder_calculator.cc +++ b/mediapipe/calculators/image/opencv_image_encoder_calculator.cc @@ -38,30 +38,29 @@ namespace mediapipe { // } class OpenCvImageEncoderCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; - ::mediapipe::Status Close(CalculatorContext* cc) override; + static mediapipe::Status GetContract(CalculatorContract* cc); + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Close(CalculatorContext* cc) override; private: int encoding_quality_; }; -::mediapipe::Status OpenCvImageEncoderCalculator::GetContract( +mediapipe::Status OpenCvImageEncoderCalculator::GetContract( CalculatorContract* cc) { cc->Inputs().Index(0).Set(); cc->Outputs().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status OpenCvImageEncoderCalculator::Open(CalculatorContext* cc) { +mediapipe::Status OpenCvImageEncoderCalculator::Open(CalculatorContext* cc) { auto options = cc->Options(); encoding_quality_ = options.quality(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status OpenCvImageEncoderCalculator::Process( - CalculatorContext* cc) { +mediapipe::Status OpenCvImageEncoderCalculator::Process(CalculatorContext* cc) { const ImageFrame& image_frame = cc->Inputs().Index(0).Get(); CHECK_EQ(1, image_frame.ByteDepth()); @@ -85,10 +84,10 @@ class OpenCvImageEncoderCalculator : public CalculatorBase { encoded_result->set_colorspace(OpenCvImageEncoderCalculatorResults::RGB); break; case 4: - return ::mediapipe::UnimplementedErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::UnimplementedErrorBuilder(MEDIAPIPE_LOC) << "4-channel image isn't supported yet"; default: - return ::mediapipe::FailedPreconditionErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::FailedPreconditionErrorBuilder(MEDIAPIPE_LOC) << "Unsupported number of channels: " << original_mat.channels(); } @@ -101,7 +100,7 @@ class OpenCvImageEncoderCalculator : public CalculatorBase { // Check its JpegEncoder::write() in "imgcodecs/src/grfmt_jpeg.cpp" for more // info. if (!cv::imencode(".jpg", input_mat, encode_buffer, parameters)) { - return ::mediapipe::InternalErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InternalErrorBuilder(MEDIAPIPE_LOC) << "Fail to encode the image to be jpeg format."; } @@ -109,11 +108,11 @@ class OpenCvImageEncoderCalculator : public CalculatorBase { reinterpret_cast(&encode_buffer[0]), encode_buffer.size()))); cc->Outputs().Index(0).Add(encoded_result.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status OpenCvImageEncoderCalculator::Close(CalculatorContext* cc) { - return ::mediapipe::OkStatus(); +mediapipe::Status OpenCvImageEncoderCalculator::Close(CalculatorContext* cc) { + return mediapipe::OkStatus(); } REGISTER_CALCULATOR(OpenCvImageEncoderCalculator); diff --git a/mediapipe/calculators/image/opencv_put_text_calculator.cc b/mediapipe/calculators/image/opencv_put_text_calculator.cc index 07f6f0dbf..e7769486f 100644 --- a/mediapipe/calculators/image/opencv_put_text_calculator.cc +++ b/mediapipe/calculators/image/opencv_put_text_calculator.cc @@ -32,18 +32,17 @@ namespace mediapipe { // TODO: Generalize the calculator for other text use cases. class OpenCvPutTextCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Process(CalculatorContext* cc) override; + static mediapipe::Status GetContract(CalculatorContract* cc); + mediapipe::Status Process(CalculatorContext* cc) override; }; -::mediapipe::Status OpenCvPutTextCalculator::GetContract( - CalculatorContract* cc) { +mediapipe::Status OpenCvPutTextCalculator::GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).Set(); cc->Outputs().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status OpenCvPutTextCalculator::Process(CalculatorContext* cc) { +mediapipe::Status OpenCvPutTextCalculator::Process(CalculatorContext* cc) { const std::string& text_content = cc->Inputs().Index(0).Get(); cv::Mat mat = cv::Mat::zeros(640, 640, CV_8UC4); cv::putText(mat, text_content, cv::Point(15, 70), cv::FONT_HERSHEY_PLAIN, 3, @@ -52,7 +51,7 @@ class OpenCvPutTextCalculator : public CalculatorBase { ImageFormat::SRGBA, mat.size().width, mat.size().height); mat.copyTo(formats::MatView(output_frame.get())); cc->Outputs().Index(0).Add(output_frame.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } REGISTER_CALCULATOR(OpenCvPutTextCalculator); diff --git a/mediapipe/calculators/image/recolor_calculator.cc b/mediapipe/calculators/image/recolor_calculator.cc index c8d3d1725..db0a46c7f 100644 --- a/mediapipe/calculators/image/recolor_calculator.cc +++ b/mediapipe/calculators/image/recolor_calculator.cc @@ -84,17 +84,17 @@ class RecolorCalculator : public CalculatorBase { RecolorCalculator() = default; ~RecolorCalculator() override = default; - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; - ::mediapipe::Status Close(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Close(CalculatorContext* cc) override; private: - ::mediapipe::Status LoadOptions(CalculatorContext* cc); - ::mediapipe::Status InitGpu(CalculatorContext* cc); - ::mediapipe::Status RenderGpu(CalculatorContext* cc); - ::mediapipe::Status RenderCpu(CalculatorContext* cc); + mediapipe::Status LoadOptions(CalculatorContext* cc); + mediapipe::Status InitGpu(CalculatorContext* cc); + mediapipe::Status RenderGpu(CalculatorContext* cc); + mediapipe::Status RenderCpu(CalculatorContext* cc); void GlRender(); bool initialized_ = false; @@ -110,7 +110,7 @@ class RecolorCalculator : public CalculatorBase { REGISTER_CALCULATOR(RecolorCalculator); // static -::mediapipe::Status RecolorCalculator::GetContract(CalculatorContract* cc) { +mediapipe::Status RecolorCalculator::GetContract(CalculatorContract* cc) { RET_CHECK(!cc->Inputs().GetTags().empty()); RET_CHECK(!cc->Outputs().GetTags().empty()); @@ -159,10 +159,10 @@ REGISTER_CALCULATOR(RecolorCalculator); #endif // !MEDIAPIPE_DISABLE_GPU } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status RecolorCalculator::Open(CalculatorContext* cc) { +mediapipe::Status RecolorCalculator::Open(CalculatorContext* cc) { cc->SetOffset(TimestampDiff(0)); if (cc->Inputs().HasTag(kGpuBufferTag)) { @@ -174,29 +174,29 @@ REGISTER_CALCULATOR(RecolorCalculator); MP_RETURN_IF_ERROR(LoadOptions(cc)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status RecolorCalculator::Process(CalculatorContext* cc) { +mediapipe::Status RecolorCalculator::Process(CalculatorContext* cc) { if (use_gpu_) { #if !defined(MEDIAPIPE_DISABLE_GPU) MP_RETURN_IF_ERROR( - gpu_helper_.RunInGlContext([this, &cc]() -> ::mediapipe::Status { + gpu_helper_.RunInGlContext([this, &cc]() -> mediapipe::Status { if (!initialized_) { MP_RETURN_IF_ERROR(InitGpu(cc)); initialized_ = true; } MP_RETURN_IF_ERROR(RenderGpu(cc)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); #endif // !MEDIAPIPE_DISABLE_GPU } else { MP_RETURN_IF_ERROR(RenderCpu(cc)); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status RecolorCalculator::Close(CalculatorContext* cc) { +mediapipe::Status RecolorCalculator::Close(CalculatorContext* cc) { #if !defined(MEDIAPIPE_DISABLE_GPU) gpu_helper_.RunInGlContext([this] { if (program_) glDeleteProgram(program_); @@ -204,12 +204,12 @@ REGISTER_CALCULATOR(RecolorCalculator); }); #endif // !MEDIAPIPE_DISABLE_GPU - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status RecolorCalculator::RenderCpu(CalculatorContext* cc) { +mediapipe::Status RecolorCalculator::RenderCpu(CalculatorContext* cc) { if (cc->Inputs().Tag(kMaskCpuTag).IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Get inputs and setup output. const auto& input_img = cc->Inputs().Tag(kImageFrameTag).Get(); @@ -265,12 +265,12 @@ REGISTER_CALCULATOR(RecolorCalculator); .Tag(kImageFrameTag) .Add(output_img.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status RecolorCalculator::RenderGpu(CalculatorContext* cc) { +mediapipe::Status RecolorCalculator::RenderGpu(CalculatorContext* cc) { if (cc->Inputs().Tag(kMaskGpuTag).IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } #if !defined(MEDIAPIPE_DISABLE_GPU) // Get inputs and setup output. @@ -313,7 +313,7 @@ REGISTER_CALCULATOR(RecolorCalculator); dst_tex.Release(); #endif // !MEDIAPIPE_DISABLE_GPU - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } void RecolorCalculator::GlRender() { @@ -368,7 +368,7 @@ void RecolorCalculator::GlRender() { #endif // !MEDIAPIPE_DISABLE_GPU } -::mediapipe::Status RecolorCalculator::LoadOptions(CalculatorContext* cc) { +mediapipe::Status RecolorCalculator::LoadOptions(CalculatorContext* cc) { const auto& options = cc->Options(); mask_channel_ = options.mask_channel(); @@ -379,10 +379,10 @@ void RecolorCalculator::GlRender() { color_.push_back(options.color().g()); color_.push_back(options.color().b()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status RecolorCalculator::InitGpu(CalculatorContext* cc) { +mediapipe::Status RecolorCalculator::InitGpu(CalculatorContext* cc) { #if !defined(MEDIAPIPE_DISABLE_GPU) const GLint attr_location[NUM_ATTRIBUTES] = { ATTRIB_VERTEX, @@ -454,7 +454,7 @@ void RecolorCalculator::GlRender() { color_[1] / 255.0, color_[2] / 255.0); #endif // !MEDIAPIPE_DISABLE_GPU - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/image/scale_image_calculator.cc b/mediapipe/calculators/image/scale_image_calculator.cc index ac441689e..6d321b474 100644 --- a/mediapipe/calculators/image/scale_image_calculator.cc +++ b/mediapipe/calculators/image/scale_image_calculator.cc @@ -44,7 +44,7 @@ namespace { // Given an upscaling algorithm, determine which OpenCV interpolation algorithm // to use. -::mediapipe::Status FindInterpolationAlgorithm( +mediapipe::Status FindInterpolationAlgorithm( ScaleImageCalculatorOptions::ScaleAlgorithm upscaling_algorithm, int* interpolation_algorithm) { switch (upscaling_algorithm) { @@ -70,7 +70,7 @@ namespace { RET_CHECK_FAIL() << absl::Substitute("Unknown upscaling algorithm: $0", upscaling_algorithm); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } void CropImageFrame(const ImageFrame& original, int col_start, int row_start, @@ -147,7 +147,7 @@ class ScaleImageCalculator : public CalculatorBase { ScaleImageCalculator(); ~ScaleImageCalculator() override; - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { ScaleImageCalculatorOptions options = cc->Options(); @@ -184,35 +184,35 @@ class ScaleImageCalculator : public CalculatorBase { if (cc->Inputs().HasTag("OVERRIDE_OPTIONS")) { cc->Inputs().Tag("OVERRIDE_OPTIONS").Set(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // From Calculator. - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; private: // Initialize some data members from options_. This can be called either from // Open or Process depending on whether OVERRIDE_OPTIONS is used. - ::mediapipe::Status InitializeFromOptions(); + mediapipe::Status InitializeFromOptions(); // Initialize crop and output parameters based on set member variable // values. This function will also send the header information on // the VIDEO_HEADER stream if it hasn't been done yet. - ::mediapipe::Status InitializeFrameInfo(CalculatorContext* cc); + mediapipe::Status InitializeFrameInfo(CalculatorContext* cc); // Validate that input_format_ and output_format_ are supported image // formats. - ::mediapipe::Status ValidateImageFormats() const; + mediapipe::Status ValidateImageFormats() const; // Validate that the image frame has the proper format and dimensions. // If the dimensions and format weren't initialized by the header, // then the first frame on which this function is called is used // to initialize. - ::mediapipe::Status ValidateImageFrame(CalculatorContext* cc, - const ImageFrame& image_frame); + mediapipe::Status ValidateImageFrame(CalculatorContext* cc, + const ImageFrame& image_frame); // Validate that the YUV image has the proper dimensions. If the // dimensions weren't initialized by the header, then the first image // on which this function is called is used to initialize. - ::mediapipe::Status ValidateYUVImage(CalculatorContext* cc, - const YUVImage& yuv_image); + mediapipe::Status ValidateYUVImage(CalculatorContext* cc, + const YUVImage& yuv_image); bool has_header_; // True if the input stream has a header. int input_width_; @@ -251,7 +251,7 @@ ScaleImageCalculator::ScaleImageCalculator() {} ScaleImageCalculator::~ScaleImageCalculator() {} -::mediapipe::Status ScaleImageCalculator::InitializeFrameInfo( +mediapipe::Status ScaleImageCalculator::InitializeFrameInfo( CalculatorContext* cc) { MP_RETURN_IF_ERROR( scale_image::FindCropDimensions(input_width_, input_height_, // @@ -299,10 +299,10 @@ ScaleImageCalculator::~ScaleImageCalculator() {} .Add(header.release(), Timestamp::PreStream()); cc->Outputs().Tag("VIDEO_HEADER").Close(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ScaleImageCalculator::Open(CalculatorContext* cc) { +mediapipe::Status ScaleImageCalculator::Open(CalculatorContext* cc) { options_ = cc->Options(); input_data_id_ = cc->Inputs().GetId("FRAMES", 0); @@ -339,7 +339,7 @@ ScaleImageCalculator::~ScaleImageCalculator() {} // has a header. At this point in the code, the ScaleImageCalculator // config may be changed by the new options at PreStream, so the output // header can't be determined. - return ::mediapipe::InvalidArgumentError( + return mediapipe::InvalidArgumentError( "OVERRIDE_OPTIONS stream can't be used when the main input stream " "has a header."); } @@ -406,10 +406,10 @@ ScaleImageCalculator::~ScaleImageCalculator() {} } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ScaleImageCalculator::InitializeFromOptions() { +mediapipe::Status ScaleImageCalculator::InitializeFromOptions() { if (options_.has_input_format()) { input_format_ = options_.input_format(); } else { @@ -423,10 +423,10 @@ ScaleImageCalculator::~ScaleImageCalculator() {} downscaler_.reset(new ImageResizer(options_.post_sharpening_coefficient())); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ScaleImageCalculator::ValidateImageFormats() const { +mediapipe::Status ScaleImageCalculator::ValidateImageFormats() const { RET_CHECK_NE(input_format_, ImageFormat::UNKNOWN) << "The input image format was UNKNOWN."; RET_CHECK_NE(output_format_, ImageFormat::UNKNOWN) @@ -440,10 +440,10 @@ ScaleImageCalculator::~ScaleImageCalculator() {} input_format_ == ImageFormat::YCBCR420P) << "Conversion of the color space (except from " "YCbCr420P to SRGB) is not yet supported."; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ScaleImageCalculator::ValidateImageFrame( +mediapipe::Status ScaleImageCalculator::ValidateImageFrame( CalculatorContext* cc, const ImageFrame& image_frame) { if (!has_header_) { if (input_width_ != image_frame.Width() || @@ -494,10 +494,10 @@ ScaleImageCalculator::~ScaleImageCalculator() {} image_frame_format_desc, " but expected ", input_format_desc)); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ScaleImageCalculator::ValidateYUVImage( +mediapipe::Status ScaleImageCalculator::ValidateYUVImage( CalculatorContext* cc, const YUVImage& yuv_image) { CHECK_EQ(input_format_, ImageFormat::YCBCR420P); if (!has_header_) { @@ -528,14 +528,14 @@ ScaleImageCalculator::~ScaleImageCalculator() {} input_width_, "x", input_height_)); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ScaleImageCalculator::Process(CalculatorContext* cc) { +mediapipe::Status ScaleImageCalculator::Process(CalculatorContext* cc) { if (cc->InputTimestamp() == Timestamp::PreStream()) { if (cc->Inputs().HasTag("OVERRIDE_OPTIONS")) { if (cc->Inputs().Tag("OVERRIDE_OPTIONS").IsEmpty()) { - return ::mediapipe::InvalidArgumentError( + return mediapipe::InvalidArgumentError( "The OVERRIDE_OPTIONS input stream must be non-empty at PreStream " "time if used."); } @@ -549,7 +549,7 @@ ScaleImageCalculator::~ScaleImageCalculator() {} input_video_header_ = cc->Inputs().Tag("VIDEO_HEADER").Get(); } if (cc->Inputs().Get(input_data_id_).IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } @@ -603,7 +603,7 @@ ScaleImageCalculator::~ScaleImageCalculator() {} cc->Outputs() .Get(output_data_id_) .Add(output_image.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } else { image_frame = &cc->Inputs().Get(input_data_id_).Get(); @@ -664,7 +664,7 @@ ScaleImageCalculator::~ScaleImageCalculator() {} .Add(output_frame.release(), cc->InputTimestamp()); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Rescale the image frame. @@ -698,7 +698,7 @@ ScaleImageCalculator::~ScaleImageCalculator() {} cc->Outputs() .Get(output_data_id_) .Add(output_frame.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/image/scale_image_utils.cc b/mediapipe/calculators/image/scale_image_utils.cc index 3225521a5..20049af76 100644 --- a/mediapipe/calculators/image/scale_image_utils.cc +++ b/mediapipe/calculators/image/scale_image_utils.cc @@ -35,11 +35,11 @@ double ParseRational(const std::string& rational) { } } // namespace -::mediapipe::Status FindCropDimensions(int input_width, int input_height, // - const std::string& min_aspect_ratio, // - const std::string& max_aspect_ratio, // - int* crop_width, int* crop_height, // - int* col_start, int* row_start) { +mediapipe::Status FindCropDimensions(int input_width, int input_height, // + const std::string& min_aspect_ratio, // + const std::string& max_aspect_ratio, // + int* crop_width, int* crop_height, // + int* col_start, int* row_start) { CHECK(crop_width); CHECK(crop_height); CHECK(col_start); @@ -85,17 +85,16 @@ double ParseRational(const std::string& rational) { CHECK_LE(*crop_width, input_width); CHECK_LE(*crop_height, input_height); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status FindOutputDimensions(int input_width, // - int input_height, // - int target_width, // - int target_height, // - bool preserve_aspect_ratio, // - int scale_to_multiple_of, // - int* output_width, - int* output_height) { +mediapipe::Status FindOutputDimensions(int input_width, // + int input_height, // + int target_width, // + int target_height, // + bool preserve_aspect_ratio, // + int scale_to_multiple_of, // + int* output_width, int* output_height) { CHECK(output_width); CHECK(output_height); @@ -123,7 +122,7 @@ double ParseRational(const std::string& rational) { *output_width = target_width; *output_height = target_height; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } if (target_width > 0) { @@ -140,7 +139,7 @@ double ParseRational(const std::string& rational) { // was within the image, so use these dimensions. *output_width = try_width; *output_height = try_height; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } @@ -158,7 +157,7 @@ double ParseRational(const std::string& rational) { // was within the image, so use these dimensions. *output_width = try_width; *output_height = try_height; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } RET_CHECK_FAIL() diff --git a/mediapipe/calculators/image/scale_image_utils.h b/mediapipe/calculators/image/scale_image_utils.h index ea9dd3f0f..86b014ca3 100644 --- a/mediapipe/calculators/image/scale_image_utils.h +++ b/mediapipe/calculators/image/scale_image_utils.h @@ -28,11 +28,11 @@ namespace scale_image { // is a centered, cropped portion of the image that falls within the min // and max aspect ratio. If either the min or max aspect ratio argument // is empty or has a 0 in the numerator or denominator then it is ignored. -::mediapipe::Status FindCropDimensions(int input_width, int input_height, // - const std::string& min_aspect_ratio, // - const std::string& max_aspect_ratio, // - int* crop_width, int* crop_height, // - int* col_start, int* row_start); +mediapipe::Status FindCropDimensions(int input_width, int input_height, // + const std::string& min_aspect_ratio, // + const std::string& max_aspect_ratio, // + int* crop_width, int* crop_height, // + int* col_start, int* row_start); // Given an input width and height, a target width and height, whether to // preserve the aspect ratio, and whether to round-down to the multiple of a @@ -43,12 +43,12 @@ namespace scale_image { // output_height will be reduced as necessary to preserve_aspect_ratio if the // option is specified. If preserving the aspect ratio is desired, you must set // scale_to_multiple_of to 2. -::mediapipe::Status FindOutputDimensions(int input_width, int input_height, // - int target_width, - int target_height, // - bool preserve_aspect_ratio, // - int scale_to_multiple_of, // - int* output_width, int* output_height); +mediapipe::Status FindOutputDimensions(int input_width, int input_height, // + int target_width, + int target_height, // + bool preserve_aspect_ratio, // + int scale_to_multiple_of, // + int* output_width, int* output_height); } // namespace scale_image } // namespace mediapipe diff --git a/mediapipe/calculators/image/set_alpha_calculator.cc b/mediapipe/calculators/image/set_alpha_calculator.cc index 31de1e21a..683efce6b 100644 --- a/mediapipe/calculators/image/set_alpha_calculator.cc +++ b/mediapipe/calculators/image/set_alpha_calculator.cc @@ -87,18 +87,18 @@ class SetAlphaCalculator : public CalculatorBase { SetAlphaCalculator() = default; ~SetAlphaCalculator() override = default; - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); // From Calculator. - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; - ::mediapipe::Status Close(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Close(CalculatorContext* cc) override; private: - ::mediapipe::Status RenderGpu(CalculatorContext* cc); - ::mediapipe::Status RenderCpu(CalculatorContext* cc); + mediapipe::Status RenderGpu(CalculatorContext* cc); + mediapipe::Status RenderCpu(CalculatorContext* cc); - ::mediapipe::Status GlSetup(CalculatorContext* cc); + mediapipe::Status GlSetup(CalculatorContext* cc); void GlRender(CalculatorContext* cc); mediapipe::SetAlphaCalculatorOptions options_; @@ -113,18 +113,18 @@ class SetAlphaCalculator : public CalculatorBase { }; REGISTER_CALCULATOR(SetAlphaCalculator); -::mediapipe::Status SetAlphaCalculator::GetContract(CalculatorContract* cc) { +mediapipe::Status SetAlphaCalculator::GetContract(CalculatorContract* cc) { CHECK_GE(cc->Inputs().NumEntries(), 1); bool use_gpu = false; if (cc->Inputs().HasTag(kInputFrameTag) && cc->Inputs().HasTag(kInputFrameTagGpu)) { - return ::mediapipe::InternalError("Cannot have multiple input images."); + return mediapipe::InternalError("Cannot have multiple input images."); } if (cc->Inputs().HasTag(kInputFrameTagGpu) != cc->Outputs().HasTag(kOutputFrameTagGpu)) { - return ::mediapipe::InternalError("GPU output must have GPU input."); + return mediapipe::InternalError("GPU output must have GPU input."); } // Input image to add/edit alpha channel. @@ -166,10 +166,10 @@ REGISTER_CALCULATOR(SetAlphaCalculator); #endif // !MEDIAPIPE_DISABLE_GPU } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status SetAlphaCalculator::Open(CalculatorContext* cc) { +mediapipe::Status SetAlphaCalculator::Open(CalculatorContext* cc) { cc->SetOffset(TimestampDiff(0)); options_ = cc->Options(); @@ -198,30 +198,30 @@ REGISTER_CALCULATOR(SetAlphaCalculator); #endif } // !MEDIAPIPE_DISABLE_GPU - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status SetAlphaCalculator::Process(CalculatorContext* cc) { +mediapipe::Status SetAlphaCalculator::Process(CalculatorContext* cc) { if (use_gpu_) { #if !defined(MEDIAPIPE_DISABLE_GPU) MP_RETURN_IF_ERROR( - gpu_helper_.RunInGlContext([this, cc]() -> ::mediapipe::Status { + gpu_helper_.RunInGlContext([this, cc]() -> mediapipe::Status { if (!gpu_initialized_) { MP_RETURN_IF_ERROR(GlSetup(cc)); gpu_initialized_ = true; } MP_RETURN_IF_ERROR(RenderGpu(cc)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); #endif // !MEDIAPIPE_DISABLE_GPU } else { MP_RETURN_IF_ERROR(RenderCpu(cc)); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status SetAlphaCalculator::Close(CalculatorContext* cc) { +mediapipe::Status SetAlphaCalculator::Close(CalculatorContext* cc) { #if !defined(MEDIAPIPE_DISABLE_GPU) gpu_helper_.RunInGlContext([this] { if (program_) glDeleteProgram(program_); @@ -229,12 +229,12 @@ REGISTER_CALCULATOR(SetAlphaCalculator); }); #endif // !MEDIAPIPE_DISABLE_GPU - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status SetAlphaCalculator::RenderCpu(CalculatorContext* cc) { +mediapipe::Status SetAlphaCalculator::RenderCpu(CalculatorContext* cc) { if (cc->Inputs().Tag(kInputFrameTag).IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Setup source image @@ -294,12 +294,12 @@ REGISTER_CALCULATOR(SetAlphaCalculator); .Tag(kOutputFrameTag) .Add(output_frame.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status SetAlphaCalculator::RenderGpu(CalculatorContext* cc) { +mediapipe::Status SetAlphaCalculator::RenderGpu(CalculatorContext* cc) { if (cc->Inputs().Tag(kInputFrameTagGpu).IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } #if !defined(MEDIAPIPE_DISABLE_GPU) // Setup source texture. @@ -356,7 +356,7 @@ REGISTER_CALCULATOR(SetAlphaCalculator); output_texture.Release(); #endif // !MEDIAPIPE_DISABLE_GPU - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } void SetAlphaCalculator::GlRender(CalculatorContext* cc) { @@ -412,7 +412,7 @@ void SetAlphaCalculator::GlRender(CalculatorContext* cc) { #endif // !MEDIAPIPE_DISABLE_GPU } -::mediapipe::Status SetAlphaCalculator::GlSetup(CalculatorContext* cc) { +mediapipe::Status SetAlphaCalculator::GlSetup(CalculatorContext* cc) { #if !defined(MEDIAPIPE_DISABLE_GPU) const GLint attr_location[NUM_ATTRIBUTES] = { ATTRIB_VERTEX, @@ -468,7 +468,7 @@ void SetAlphaCalculator::GlRender(CalculatorContext* cc) { #endif // !MEDIAPIPE_DISABLE_GPU - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/image/sobel_edges_calculator.cc b/mediapipe/calculators/image/sobel_edges_calculator.cc index e710a99f5..a9a8d637b 100644 --- a/mediapipe/calculators/image/sobel_edges_calculator.cc +++ b/mediapipe/calculators/image/sobel_edges_calculator.cc @@ -27,10 +27,10 @@ namespace mediapipe { // See GlSimpleCalculatorBase for inputs, outputs and input side packets. class SobelEdgesCalculator : public GlSimpleCalculator { public: - ::mediapipe::Status GlSetup() override; - ::mediapipe::Status GlRender(const GlTexture& src, - const GlTexture& dst) override; - ::mediapipe::Status GlTeardown() override; + mediapipe::Status GlSetup() override; + mediapipe::Status GlRender(const GlTexture& src, + const GlTexture& dst) override; + mediapipe::Status GlTeardown() override; private: GLuint program_ = 0; @@ -40,7 +40,7 @@ class SobelEdgesCalculator : public GlSimpleCalculator { }; REGISTER_CALCULATOR(SobelEdgesCalculator); -::mediapipe::Status SobelEdgesCalculator::GlSetup() { +mediapipe::Status SobelEdgesCalculator::GlSetup() { // Load vertex and fragment shaders const GLint attr_location[NUM_ATTRIBUTES] = { ATTRIB_VERTEX, @@ -166,11 +166,11 @@ REGISTER_CALCULATOR(SobelEdgesCalculator); frame_ = glGetUniformLocation(program_, "inputImage"); pixel_w_ = glGetUniformLocation(program_, "pixelW"); pixel_h_ = glGetUniformLocation(program_, "pixelH"); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status SobelEdgesCalculator::GlRender(const GlTexture& src, - const GlTexture& dst) { +mediapipe::Status SobelEdgesCalculator::GlRender(const GlTexture& src, + const GlTexture& dst) { static const GLfloat square_vertices[] = { -1.0f, -1.0f, // bottom left 1.0f, -1.0f, // bottom right @@ -225,15 +225,15 @@ REGISTER_CALCULATOR(SobelEdgesCalculator); glDeleteVertexArrays(1, &vao); glDeleteBuffers(2, vbo); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status SobelEdgesCalculator::GlTeardown() { +mediapipe::Status SobelEdgesCalculator::GlTeardown() { if (program_) { glDeleteProgram(program_); program_ = 0; } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/internal/callback_packet_calculator.cc b/mediapipe/calculators/internal/callback_packet_calculator.cc index e9f85ee83..e78007fbb 100644 --- a/mediapipe/calculators/internal/callback_packet_calculator.cc +++ b/mediapipe/calculators/internal/callback_packet_calculator.cc @@ -50,7 +50,7 @@ void DumpPostStreamPacket(Packet* post_stream_packet, const Packet& packet) { // while that pointer is still alive. class CallbackPacketCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { const auto& options = cc->Options(); switch (options.type()) { case CallbackPacketCalculatorOptions::VECTOR_PACKET: @@ -60,17 +60,17 @@ class CallbackPacketCalculator : public CalculatorBase { .Set>(); break; default: - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "Invalid type of callback to produce."; } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { const auto& options = cc->Options(); void* ptr; if (sscanf(options.pointer().c_str(), "%p", &ptr) != 1) { - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "Stored pointer value in options is invalid."; } switch (options.type()) { @@ -87,14 +87,14 @@ class CallbackPacketCalculator : public CalculatorBase { std::placeholders::_1))); break; default: - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "Invalid type to dump into."; } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { - return ::mediapipe::OkStatus(); + mediapipe::Status Process(CalculatorContext* cc) override { + return mediapipe::OkStatus(); } }; diff --git a/mediapipe/calculators/tensor/BUILD b/mediapipe/calculators/tensor/BUILD index f236413b6..94daac793 100644 --- a/mediapipe/calculators/tensor/BUILD +++ b/mediapipe/calculators/tensor/BUILD @@ -61,7 +61,7 @@ cc_library( "@com_google_absl//absl/memory", "//mediapipe/framework:calculator_framework", "//mediapipe/framework/formats:tensor", - "//mediapipe/util:resource_util", + "//mediapipe/util/tflite:tflite_model_loader", "//mediapipe/util/tflite:config", "@org_tensorflow//tensorflow/lite:framework", "@org_tensorflow//tensorflow/lite/delegates/xnnpack:xnnpack_delegate", @@ -293,6 +293,16 @@ cc_library( alwayslink = 1, ) +mediapipe_proto_library( + name = "tensors_to_floats_calculator_proto", + srcs = ["tensors_to_floats_calculator.proto"], + visibility = ["//visibility:public"], + deps = [ + "//mediapipe/framework:calculator_options_proto", + "//mediapipe/framework:calculator_proto", + ], +) + cc_library( name = "tensors_to_floats_calculator", srcs = ["tensors_to_floats_calculator.cc"], @@ -305,6 +315,7 @@ cc_library( }), visibility = ["//visibility:public"], deps = [ + ":tensors_to_floats_calculator_cc_proto", "//mediapipe/framework:calculator_framework", "//mediapipe/framework/formats:tensor", "//mediapipe/framework/port:ret_check", @@ -312,6 +323,23 @@ cc_library( alwayslink = 1, ) +cc_test( + name = "tensors_to_floats_calculator_test", + srcs = ["tensors_to_floats_calculator_test.cc"], + deps = [ + ":tensors_to_floats_calculator", + ":tensors_to_floats_calculator_cc_proto", + "//mediapipe/framework:calculator_cc_proto", + "//mediapipe/framework:calculator_framework", + "//mediapipe/framework:calculator_runner", + "//mediapipe/framework/formats:tensor", + "//mediapipe/framework/port:gtest_main", + "//mediapipe/framework/port:parse_text_proto", + "@com_google_absl//absl/memory", + "@com_google_googletest//:gtest_main", + ], +) + cc_library( name = "tensors_to_classification_calculator", srcs = ["tensors_to_classification_calculator.cc"], @@ -445,15 +473,22 @@ cc_test( data = [ "testdata/image_to_tensor/input.jpg", "testdata/image_to_tensor/large_sub_rect.png", + "testdata/image_to_tensor/large_sub_rect_border_zero.png", "testdata/image_to_tensor/large_sub_rect_keep_aspect.png", + "testdata/image_to_tensor/large_sub_rect_keep_aspect_border_zero.png", "testdata/image_to_tensor/large_sub_rect_keep_aspect_with_rotation.png", + "testdata/image_to_tensor/large_sub_rect_keep_aspect_with_rotation_border_zero.png", "testdata/image_to_tensor/medium_sub_rect_keep_aspect.png", + "testdata/image_to_tensor/medium_sub_rect_keep_aspect_border_zero.png", "testdata/image_to_tensor/medium_sub_rect_keep_aspect_with_rotation.png", + "testdata/image_to_tensor/medium_sub_rect_keep_aspect_with_rotation_border_zero.png", "testdata/image_to_tensor/medium_sub_rect_with_rotation.png", + "testdata/image_to_tensor/medium_sub_rect_with_rotation_border_zero.png", "testdata/image_to_tensor/noop_except_range.png", ], deps = [ ":image_to_tensor_calculator", + ":image_to_tensor_converter", ":image_to_tensor_utils", "//mediapipe/framework:calculator_framework", "//mediapipe/framework:calculator_runner", @@ -479,6 +514,13 @@ cc_test( cc_library( name = "image_to_tensor_converter", hdrs = ["image_to_tensor_converter.h"], + copts = select({ + "//mediapipe:apple": [ + "-x objective-c++", + "-fobjc-arc", # enable reference-counting + ], + "//conditions:default": [], + }), deps = [ ":image_to_tensor_utils", "//mediapipe/framework:packet", @@ -521,6 +563,7 @@ cc_library( "//mediapipe:apple": [], "//conditions:default": [ ":image_to_tensor_converter", + ":image_to_tensor_converter_gl_utils", ":image_to_tensor_utils", "@com_google_absl//absl/strings", "//mediapipe/framework:calculator_framework", @@ -553,6 +596,7 @@ cc_library( "//mediapipe/gpu:disable_gpu": [], "//conditions:default": [ ":image_to_tensor_converter", + ":image_to_tensor_converter_gl_utils", ":image_to_tensor_utils", "@com_google_absl//absl/strings", "//mediapipe/framework:calculator_framework", @@ -568,6 +612,21 @@ cc_library( }), ) +cc_library( + name = "image_to_tensor_converter_gl_utils", + srcs = ["image_to_tensor_converter_gl_utils.cc"], + hdrs = ["image_to_tensor_converter_gl_utils.h"], + deps = ["//mediapipe/framework:port"] + select({ + "//mediapipe/gpu:disable_gpu": [], + "//conditions:default": [ + "//mediapipe/gpu:gl_base", + "//mediapipe/gpu:gl_context", + "//mediapipe/framework/port:status", + "//mediapipe/framework/port:statusor", + ], + }), +) + cc_library( name = "image_to_tensor_converter_metal", srcs = ["image_to_tensor_converter_metal.cc"], diff --git a/mediapipe/calculators/tensor/image_to_tensor_calculator.cc b/mediapipe/calculators/tensor/image_to_tensor_calculator.cc index 9f8c2b023..2a93355c4 100644 --- a/mediapipe/calculators/tensor/image_to_tensor_calculator.cc +++ b/mediapipe/calculators/tensor/image_to_tensor_calculator.cc @@ -14,6 +14,7 @@ #include #include +#include #include "mediapipe/calculators/tensor/image_to_tensor_calculator.pb.h" #include "mediapipe/calculators/tensor/image_to_tensor_converter.h" @@ -111,7 +112,7 @@ namespace mediapipe { // } class ImageToTensorCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { const auto& options = cc->Options(); @@ -157,10 +158,10 @@ class ImageToTensorCalculator : public CalculatorBase { #endif // MEDIAPIPE_DISABLE_GPU } cc->Outputs().Tag(kOutput).Set>(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) { + mediapipe::Status Open(CalculatorContext* cc) { // Makes sure outputs' next timestamp bound update is handled automatically // by the framework. cc->SetOffset(TimestampDiff(0)); @@ -171,40 +172,42 @@ class ImageToTensorCalculator : public CalculatorBase { range_max_ = options_.output_tensor_float_range().max(); if (cc->Inputs().HasTag(kInputCpu)) { - ASSIGN_OR_RETURN(converter_, CreateOpenCvConverter(cc)); + ASSIGN_OR_RETURN(converter_, CreateOpenCvConverter(cc, GetBorderMode())); } else { #if MEDIAPIPE_DISABLE_GPU return mediapipe::UnimplementedError("GPU processing is disabled"); #else #if MEDIAPIPE_METAL_ENABLED - ASSIGN_OR_RETURN(converter_, CreateMetalConverter(cc)); + ASSIGN_OR_RETURN(converter_, CreateMetalConverter(cc, GetBorderMode())); #elif MEDIAPIPE_OPENGL_ES_VERSION >= MEDIAPIPE_OPENGL_ES_31 - ASSIGN_OR_RETURN(converter_, CreateImageToGlBufferTensorConverter( - cc, DoesInputStartAtBottom())); + ASSIGN_OR_RETURN(converter_, + CreateImageToGlBufferTensorConverter( + cc, DoesInputStartAtBottom(), GetBorderMode())); #else - ASSIGN_OR_RETURN(converter_, CreateImageToGlTextureTensorConverter( - cc, DoesInputStartAtBottom())); + ASSIGN_OR_RETURN(converter_, + CreateImageToGlTextureTensorConverter( + cc, DoesInputStartAtBottom(), GetBorderMode())); #endif // MEDIAPIPE_METAL_ENABLED #endif // MEDIAPIPE_DISABLE_GPU } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) { + mediapipe::Status Process(CalculatorContext* cc) { const InputStreamShard& input = cc->Inputs().Tag( cc->Inputs().HasTag(kInputCpu) ? kInputCpu : kInputGpu); if (input.IsEmpty()) { // Timestamp bound update happens automatically. (See Open().) - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } absl::optional norm_rect; if (cc->Inputs().HasTag(kInputNormRect)) { if (cc->Inputs().Tag(kInputNormRect).IsEmpty()) { // Timestamp bound update happens automatically. (See Open().) - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } norm_rect = cc->Inputs().Tag(kInputNormRect).Get(); @@ -216,7 +219,7 @@ class ImageToTensorCalculator : public CalculatorBase { // NOTE: usage of sentinel rects should be avoided. DLOG(WARNING) << "Updating timestamp bound in response to a sentinel rect"; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } @@ -254,7 +257,7 @@ class ImageToTensorCalculator : public CalculatorBase { MakePacket>(std::move(result)) .At(cc->InputTimestamp())); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: @@ -262,6 +265,19 @@ class ImageToTensorCalculator : public CalculatorBase { return options_.gpu_origin() != mediapipe::GpuOrigin_Mode_TOP_LEFT; } + BorderMode GetBorderMode() { + switch (options_.border_mode()) { + case mediapipe:: + ImageToTensorCalculatorOptions_BorderMode_BORDER_UNSPECIFIED: + return BorderMode::kReplicate; + case mediapipe::ImageToTensorCalculatorOptions_BorderMode_BORDER_ZERO: + return BorderMode::kZero; + case mediapipe:: + ImageToTensorCalculatorOptions_BorderMode_BORDER_REPLICATE: + return BorderMode::kReplicate; + } + } + std::unique_ptr converter_; mediapipe::ImageToTensorCalculatorOptions options_; int output_width_ = 0; diff --git a/mediapipe/calculators/tensor/image_to_tensor_calculator.proto b/mediapipe/calculators/tensor/image_to_tensor_calculator.proto index 038952a01..77fb1eb46 100644 --- a/mediapipe/calculators/tensor/image_to_tensor_calculator.proto +++ b/mediapipe/calculators/tensor/image_to_tensor_calculator.proto @@ -44,6 +44,13 @@ message ImageToTensorCalculatorOptions { optional float max = 2; } + // Pixel extrapolation methods. See @border_mode. + enum BorderMode { + BORDER_UNSPECIFIED = 0; + BORDER_ZERO = 1; + BORDER_REPLICATE = 2; + } + optional int32 output_tensor_width = 1; optional int32 output_tensor_height = 2; @@ -61,4 +68,12 @@ message ImageToTensorCalculatorOptions { // to be flipped vertically as tensors are expected to start at top. // (DEFAULT or unset interpreted as CONVENTIONAL.) optional GpuOrigin.Mode gpu_origin = 5; + + // Pixel extrapolation method. + // When converting image to tensor it may happen that tensor needs to read + // pixels outside image boundaries. Border mode helps to specify how such + // pixels will be calculated. + // + // BORDER_REPLICATE is used by default. + optional BorderMode border_mode = 6; } diff --git a/mediapipe/calculators/tensor/image_to_tensor_calculator_test.cc b/mediapipe/calculators/tensor/image_to_tensor_calculator_test.cc index 6c5162a95..c11b61c51 100644 --- a/mediapipe/calculators/tensor/image_to_tensor_calculator_test.cc +++ b/mediapipe/calculators/tensor/image_to_tensor_calculator_test.cc @@ -17,6 +17,7 @@ #include "absl/memory/memory.h" #include "absl/strings/substitute.h" +#include "mediapipe/calculators/tensor/image_to_tensor_converter.h" #include "mediapipe/calculators/tensor/image_to_tensor_utils.h" #include "mediapipe/framework/calculator_framework.h" #include "mediapipe/framework/calculator_runner.h" @@ -55,7 +56,19 @@ cv::Mat GetRgba(absl::string_view path) { // No processing/assertions should be done after the function is invoked. void RunTest(cv::Mat input, cv::Mat expected_result, float range_min, float range_max, int tensor_width, int tensor_height, - bool keep_aspect, const mediapipe::NormalizedRect& roi) { + bool keep_aspect, absl::optional border_mode, + const mediapipe::NormalizedRect& roi) { + std::string border_mode_str; + if (border_mode) { + switch (*border_mode) { + case BorderMode::kReplicate: + border_mode_str = "border_mode: BORDER_REPLICATE"; + break; + case BorderMode::kZero: + border_mode_str = "border_mode: BORDER_ZERO"; + break; + } + } auto graph_config = mediapipe::ParseTextProtoOrDie( absl::Substitute(R"( input_stream: "input_image" @@ -67,13 +80,14 @@ void RunTest(cv::Mat input, cv::Mat expected_result, float range_min, output_stream: "TENSORS:tensor" options { [mediapipe.ImageToTensorCalculatorOptions.ext] { - output_tensor_width: $0 - output_tensor_height: $1 - keep_aspect_ratio: $4 - output_tensor_float_range { + output_tensor_width: $0 + output_tensor_height: $1 + keep_aspect_ratio: $4 + output_tensor_float_range { min: $2 max: $3 } + $5 # border mode } } } @@ -82,7 +96,8 @@ void RunTest(cv::Mat input, cv::Mat expected_result, float range_min, /*$1=*/tensor_height, /*$2=*/range_min, /*$3=*/range_max, - /*$4=*/keep_aspect ? "true" : "false")); + /*$4=*/keep_aspect ? "true" : "false", + /*$5=*/border_mode_str)); std::vector output_packets; tool::AddVectorSink("tensor", &graph_config, &output_packets); @@ -151,7 +166,26 @@ TEST(ImageToTensorCalculatorTest, MediumSubRectKeepAspect) { "tensor/testdata/image_to_tensor/medium_sub_rect_keep_aspect.png"), /*range_min=*/0.0f, /*range_max=*/1.0f, - /*tensor_width=*/256, /*tensor_height=*/256, /*keep_aspect=*/true, roi); + /*tensor_width=*/256, /*tensor_height=*/256, /*keep_aspect=*/true, + /*border mode*/ {}, roi); +} + +TEST(ImageToTensorCalculatorTest, MediumSubRectKeepAspectBorderZero) { + mediapipe::NormalizedRect roi; + roi.set_x_center(0.65f); + roi.set_y_center(0.4f); + roi.set_width(0.5f); + roi.set_height(0.5f); + roi.set_rotation(0); + RunTest(GetRgb("/mediapipe/calculators/" + "tensor/testdata/image_to_tensor/input.jpg"), + GetRgb("/mediapipe/calculators/" + "tensor/testdata/image_to_tensor/" + "medium_sub_rect_keep_aspect_border_zero.png"), + /*range_min=*/0.0f, + /*range_max=*/1.0f, + /*tensor_width=*/256, /*tensor_height=*/256, /*keep_aspect=*/true, + BorderMode::kZero, roi); } TEST(ImageToTensorCalculatorTest, MediumSubRectKeepAspectWithRotation) { @@ -168,7 +202,25 @@ TEST(ImageToTensorCalculatorTest, MediumSubRectKeepAspectWithRotation) { "medium_sub_rect_keep_aspect_with_rotation.png"), /*range_min=*/0.0f, /*range_max=*/1.0f, /*tensor_width=*/256, /*tensor_height=*/256, /*keep_aspect=*/true, - roi); + BorderMode::kReplicate, roi); +} + +TEST(ImageToTensorCalculatorTest, + MediumSubRectKeepAspectWithRotationBorderZero) { + mediapipe::NormalizedRect roi; + roi.set_x_center(0.65f); + roi.set_y_center(0.4f); + roi.set_width(0.5f); + roi.set_height(0.5f); + roi.set_rotation(M_PI * 90.0f / 180.0f); + RunTest(GetRgb("/mediapipe/calculators/" + "tensor/testdata/image_to_tensor/input.jpg"), + GetRgb("/mediapipe/calculators/" + "tensor/testdata/image_to_tensor/" + "medium_sub_rect_keep_aspect_with_rotation_border_zero.png"), + /*range_min=*/0.0f, /*range_max=*/1.0f, + /*tensor_width=*/256, /*tensor_height=*/256, /*keep_aspect=*/true, + BorderMode::kZero, roi); } TEST(ImageToTensorCalculatorTest, MediumSubRectWithRotation) { @@ -186,7 +238,26 @@ TEST(ImageToTensorCalculatorTest, MediumSubRectWithRotation) { "tensor/testdata/image_to_tensor/medium_sub_rect_with_rotation.png"), /*range_min=*/-1.0f, /*range_max=*/1.0f, - /*tensor_width=*/256, /*tensor_height=*/256, /*keep_aspect=*/false, roi); + /*tensor_width=*/256, /*tensor_height=*/256, /*keep_aspect=*/false, + BorderMode::kReplicate, roi); +} + +TEST(ImageToTensorCalculatorTest, MediumSubRectWithRotationBorderZero) { + mediapipe::NormalizedRect roi; + roi.set_x_center(0.65f); + roi.set_y_center(0.4f); + roi.set_width(0.5f); + roi.set_height(0.5f); + roi.set_rotation(M_PI * -45.0f / 180.0f); + RunTest(GetRgb("/mediapipe/calculators/" + "tensor/testdata/image_to_tensor/input.jpg"), + GetRgb("/mediapipe/calculators/" + "tensor/testdata/image_to_tensor/" + "medium_sub_rect_with_rotation_border_zero.png"), + /*range_min=*/-1.0f, + /*range_max=*/1.0f, + /*tensor_width=*/256, /*tensor_height=*/256, /*keep_aspect=*/false, + BorderMode::kZero, roi); } TEST(ImageToTensorCalculatorTest, LargeSubRect) { @@ -203,7 +274,25 @@ TEST(ImageToTensorCalculatorTest, LargeSubRect) { /*range_min=*/0.0f, /*range_max=*/1.0f, /*tensor_width=*/128, /*tensor_height=*/128, /*keep_aspect=*/false, - roi); + BorderMode::kReplicate, roi); +} + +TEST(ImageToTensorCalculatorTest, LargeSubRectBorderZero) { + mediapipe::NormalizedRect roi; + roi.set_x_center(0.5f); + roi.set_y_center(0.5f); + roi.set_width(1.5f); + roi.set_height(1.1f); + roi.set_rotation(0); + RunTest( + GetRgb("/mediapipe/calculators/" + "tensor/testdata/image_to_tensor/input.jpg"), + GetRgb("/mediapipe/calculators/" + "tensor/testdata/image_to_tensor/large_sub_rect_border_zero.png"), + /*range_min=*/0.0f, + /*range_max=*/1.0f, + /*tensor_width=*/128, /*tensor_height=*/128, /*keep_aspect=*/false, + BorderMode::kZero, roi); } TEST(ImageToTensorCalculatorTest, LargeSubRectKeepAspect) { @@ -220,7 +309,26 @@ TEST(ImageToTensorCalculatorTest, LargeSubRectKeepAspect) { "tensor/testdata/image_to_tensor/large_sub_rect_keep_aspect.png"), /*range_min=*/0.0f, /*range_max=*/1.0f, - /*tensor_width=*/128, /*tensor_height=*/128, /*keep_aspect=*/true, roi); + /*tensor_width=*/128, /*tensor_height=*/128, /*keep_aspect=*/true, + BorderMode::kReplicate, roi); +} + +TEST(ImageToTensorCalculatorTest, LargeSubRectKeepAspectBorderZero) { + mediapipe::NormalizedRect roi; + roi.set_x_center(0.5f); + roi.set_y_center(0.5f); + roi.set_width(1.5f); + roi.set_height(1.1f); + roi.set_rotation(0); + RunTest(GetRgb("/mediapipe/calculators/" + "tensor/testdata/image_to_tensor/input.jpg"), + GetRgb("/mediapipe/calculators/" + "tensor/testdata/image_to_tensor/" + "large_sub_rect_keep_aspect_border_zero.png"), + /*range_min=*/0.0f, + /*range_max=*/1.0f, + /*tensor_width=*/128, /*tensor_height=*/128, /*keep_aspect=*/true, + BorderMode::kZero, roi); } TEST(ImageToTensorCalculatorTest, LargeSubRectKeepAspectWithRotation) { @@ -238,7 +346,26 @@ TEST(ImageToTensorCalculatorTest, LargeSubRectKeepAspectWithRotation) { /*range_min=*/0.0f, /*range_max=*/1.0f, /*tensor_width=*/128, /*tensor_height=*/128, /*keep_aspect=*/true, - roi); + /*border_mode=*/{}, roi); +} + +TEST(ImageToTensorCalculatorTest, + LargeSubRectKeepAspectWithRotationBorderZero) { + mediapipe::NormalizedRect roi; + roi.set_x_center(0.5f); + roi.set_y_center(0.5f); + roi.set_width(1.5f); + roi.set_height(1.1f); + roi.set_rotation(M_PI * -15.0f / 180.0f); + RunTest(GetRgba("/mediapipe/calculators/" + "tensor/testdata/image_to_tensor/input.jpg"), + GetRgb("/mediapipe/calculators/" + "tensor/testdata/image_to_tensor/" + "large_sub_rect_keep_aspect_with_rotation_border_zero.png"), + /*range_min=*/0.0f, + /*range_max=*/1.0f, + /*tensor_width=*/128, /*tensor_height=*/128, /*keep_aspect=*/true, + /*border_mode=*/BorderMode::kZero, roi); } TEST(ImageToTensorCalculatorTest, NoOpExceptRange) { @@ -255,7 +382,24 @@ TEST(ImageToTensorCalculatorTest, NoOpExceptRange) { /*range_min=*/0.0f, /*range_max=*/1.0f, /*tensor_width=*/64, /*tensor_height=*/128, /*keep_aspect=*/true, - roi); + BorderMode::kReplicate, roi); +} + +TEST(ImageToTensorCalculatorTest, NoOpExceptRangeBorderZero) { + mediapipe::NormalizedRect roi; + roi.set_x_center(0.5f); + roi.set_y_center(0.5f); + roi.set_width(1.0f); + roi.set_height(1.0f); + roi.set_rotation(0); + RunTest(GetRgba("/mediapipe/calculators/" + "tensor/testdata/image_to_tensor/input.jpg"), + GetRgb("/mediapipe/calculators/" + "tensor/testdata/image_to_tensor/noop_except_range.png"), + /*range_min=*/0.0f, + /*range_max=*/1.0f, + /*tensor_width=*/64, /*tensor_height=*/128, /*keep_aspect=*/true, + BorderMode::kZero, roi); } } // namespace diff --git a/mediapipe/calculators/tensor/image_to_tensor_converter.h b/mediapipe/calculators/tensor/image_to_tensor_converter.h index 062195697..ef4cac9d1 100644 --- a/mediapipe/calculators/tensor/image_to_tensor_converter.h +++ b/mediapipe/calculators/tensor/image_to_tensor_converter.h @@ -27,6 +27,12 @@ struct Size { int height; }; +// Pixel extrapolation method. +// When converting image to tensor it may happen that tensor needs to read +// pixels outside image boundaries. Border mode helps to specify how such pixels +// will be calculated. +enum class BorderMode { kZero, kReplicate }; + // Converts image to tensor. class ImageToTensorConverter { public: @@ -41,11 +47,11 @@ class ImageToTensorConverter { // @output_dims dimensions of output tensor. // @range_min/max describes output tensor range image pixels should converted // to. - virtual ::mediapipe::StatusOr Convert(const Packet& image_packet, - const RotatedRect& roi, - const Size& output_dims, - float range_min, - float range_max) = 0; + virtual mediapipe::StatusOr Convert(const Packet& image_packet, + const RotatedRect& roi, + const Size& output_dims, + float range_min, + float range_max) = 0; }; } // namespace mediapipe diff --git a/mediapipe/calculators/tensor/image_to_tensor_converter_gl_buffer.cc b/mediapipe/calculators/tensor/image_to_tensor_converter_gl_buffer.cc index fb1b9ab89..b8633fc5d 100644 --- a/mediapipe/calculators/tensor/image_to_tensor_converter_gl_buffer.cc +++ b/mediapipe/calculators/tensor/image_to_tensor_converter_gl_buffer.cc @@ -14,6 +14,8 @@ #include "mediapipe/calculators/tensor/image_to_tensor_converter_gl_buffer.h" +#include "mediapipe/framework/port.h" + #if MEDIAPIPE_OPENGL_ES_VERSION >= MEDIAPIPE_OPENGL_ES_31 #include @@ -22,6 +24,7 @@ #include "absl/strings/str_cat.h" #include "mediapipe/calculators/tensor/image_to_tensor_converter.h" +#include "mediapipe/calculators/tensor/image_to_tensor_converter_gl_utils.h" #include "mediapipe/calculators/tensor/image_to_tensor_utils.h" #include "mediapipe/framework/calculator_framework.h" #include "mediapipe/framework/formats/tensor.h" @@ -51,7 +54,7 @@ class SubRectExtractorGl { public: // Extracts a region defined by @sub_rect, removes A channel, transforms input // pixels as alpha * x + beta and resizes result into destination. - ::mediapipe::Status ExtractSubRectToBuffer( + mediapipe::Status ExtractSubRectToBuffer( const tflite::gpu::gl::GlTexture& texture, const tflite::gpu::HW& texture_size, const RotatedRect& sub_rect, bool flip_horizontaly, float alpha, float beta, @@ -59,20 +62,28 @@ class SubRectExtractorGl { tflite::gpu::gl::CommandQueue* command_queue, tflite::gpu::gl::GlBuffer* destination); - static ::mediapipe::StatusOr Create( - bool input_starts_at_bottom); + static mediapipe::StatusOr Create( + const mediapipe::GlContext& gl_context, bool input_starts_at_bottom, + BorderMode border_mode); private: explicit SubRectExtractorGl(tflite::gpu::gl::GlProgram program, - tflite::gpu::uint3 workgroup_size) - : program_(std::move(program)), workgroup_size_(workgroup_size) {} + tflite::gpu::uint3 workgroup_size, + bool use_custom_zero_border, + BorderMode border_mode) + : program_(std::move(program)), + workgroup_size_(workgroup_size), + use_custom_zero_border_(use_custom_zero_border), + border_mode_(border_mode) {} tflite::gpu::gl::GlProgram program_; tflite::gpu::uint3 workgroup_size_; + bool use_custom_zero_border_ = false; + BorderMode border_mode_ = BorderMode::kReplicate; }; -::mediapipe::Status SetMat4x4(const tflite::gpu::gl::GlProgram& program, - const std::string& name, float* data) { +mediapipe::Status SetMat4x4(const tflite::gpu::gl::GlProgram& program, + const std::string& name, float* data) { GLint uniform_id; MP_RETURN_IF_ERROR(TFLITE_GPU_CALL_GL(glGetUniformLocation, &uniform_id, program.id(), name.c_str())); @@ -80,44 +91,6 @@ class SubRectExtractorGl { 1, GL_TRUE, data); } -class GlParametersOverride { - public: - static ::mediapipe::StatusOr Create( - const std::vector>& overrides) { - std::vector old_values(overrides.size()); - for (int i = 0; i < overrides.size(); ++i) { - MP_RETURN_IF_ERROR(TFLITE_GPU_CALL_GL(glGetTexParameteriv, GL_TEXTURE_2D, - overrides[i].first, - &old_values[i])); - if (overrides[i].second != old_values[i]) { - MP_RETURN_IF_ERROR(TFLITE_GPU_CALL_GL(glTexParameteri, GL_TEXTURE_2D, - overrides[i].first, - overrides[i].second)); - } - } - return GlParametersOverride(overrides, std::move(old_values)); - } - - ::mediapipe::Status Revert() { - for (int i = 0; i < overrides_.size(); ++i) { - if (overrides_[i].second != old_values_[i]) { - MP_RETURN_IF_ERROR(TFLITE_GPU_CALL_GL(glTexParameteri, GL_TEXTURE_2D, - overrides_[i].first, - old_values_[i])); - } - } - return ::mediapipe::OkStatus(); - } - - private: - GlParametersOverride(const std::vector>& overrides, - std::vector old_values) - : overrides_(overrides), old_values_(std::move(old_values)) {} - - std::vector> overrides_; - std::vector old_values_; -}; - constexpr char kShaderCode[] = R"( layout(std430) buffer; @@ -162,6 +135,12 @@ void main() { #endif // INPUT_STARTS_AT_BOTTOM vec4 src_value = alpha * texture(input_data, tc.xy) + beta; +#ifdef CUSTOM_ZERO_BORDER_MODE + float out_of_bounds = + float(tc.x < 0.0 || tc.x > 1.0 || tc.y < 0.0 || tc.y > 1.0); + src_value = mix(src_value, vec4(0.0, 0.0, 0.0, 0.0), out_of_bounds); +#endif + int linear_index = gid.y * out_width + gid.x; // output_data.elements is populated as though it contains vec3 elements. @@ -172,7 +151,7 @@ void main() { } )"; -::mediapipe::Status SubRectExtractorGl::ExtractSubRectToBuffer( +mediapipe::Status SubRectExtractorGl::ExtractSubRectToBuffer( const tflite::gpu::gl::GlTexture& texture, const tflite::gpu::HW& texture_size, const RotatedRect& texture_sub_rect, bool flip_horizontaly, float alpha, float beta, @@ -185,11 +164,27 @@ void main() { &transform_mat); MP_RETURN_IF_ERROR(texture.BindAsSampler2D(0)); - ASSIGN_OR_RETURN(auto overrides, GlParametersOverride::Create( - {{GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE}, - {GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE}, - {GL_TEXTURE_MIN_FILTER, GL_LINEAR}, - {GL_TEXTURE_MAG_FILTER, GL_LINEAR}})); + // a) Filtering. + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); + + // b) Clamping. + switch (border_mode_) { + case BorderMode::kReplicate: { + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); + break; + } + case BorderMode::kZero: { + if (!use_custom_zero_border_) { + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER); + glTexParameterfv(GL_TEXTURE_2D, GL_TEXTURE_BORDER_COLOR, + std::array{0.0f, 0.0f, 0.0f, 0.0f}.data()); + } + break; + } + } MP_RETURN_IF_ERROR(destination->BindToIndex(0)); MP_RETURN_IF_ERROR(program_.SetParameter({"input_data", 0})); @@ -204,11 +199,21 @@ void main() { workgroup_size_); MP_RETURN_IF_ERROR(command_queue->Dispatch(program_, num_workgroups)); - return overrides.Revert(); + // Resetting to MediaPipe texture param defaults. + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); + + return mediapipe::OkStatus(); } -::mediapipe::StatusOr SubRectExtractorGl::Create( - bool input_starts_at_bottom) { +mediapipe::StatusOr SubRectExtractorGl::Create( + const mediapipe::GlContext& gl_context, bool input_starts_at_bottom, + BorderMode border_mode) { + bool use_custom_zero_border = border_mode == BorderMode::kZero && + !IsGlClampToBorderSupported(gl_context); + const tflite::gpu::uint3 workgroup_size = {8, 8, 1}; std::string starts_at_bottom_def; if (input_starts_at_bottom) { @@ -216,9 +221,15 @@ void main() { #define INPUT_STARTS_AT_BOTTOM; )"; } - const std::string full_shader_source = - absl::StrCat(tflite::gpu::gl::GetShaderHeader(workgroup_size), - starts_at_bottom_def, kShaderCode); + std::string custom_zero_border_mode_def; + if (use_custom_zero_border) { + custom_zero_border_mode_def = R"( + #define CUSTOM_ZERO_BORDER_MODE + )"; + } + const std::string full_shader_source = absl::StrCat( + tflite::gpu::gl::GetShaderHeader(workgroup_size), starts_at_bottom_def, + custom_zero_border_mode_def, kShaderCode); tflite::gpu::gl::GlShader shader; MP_RETURN_IF_ERROR(tflite::gpu::gl::GlShader::CompileShader( @@ -227,27 +238,30 @@ void main() { MP_RETURN_IF_ERROR( tflite::gpu::gl::GlProgram::CreateWithShader(shader, &program)); - return SubRectExtractorGl(std::move(program), workgroup_size); + return SubRectExtractorGl(std::move(program), workgroup_size, + use_custom_zero_border, border_mode); } class GlProcessor : public ImageToTensorConverter { public: - ::mediapipe::Status Init(CalculatorContext* cc, bool input_starts_at_bottom) { + mediapipe::Status Init(CalculatorContext* cc, bool input_starts_at_bottom, + BorderMode border_mode) { MP_RETURN_IF_ERROR(gl_helper_.Open(cc)); - return gl_helper_.RunInGlContext( - [this, input_starts_at_bottom]() -> ::mediapipe::Status { - tflite::gpu::GpuInfo gpu_info; - MP_RETURN_IF_ERROR(tflite::gpu::gl::RequestGpuInfo(&gpu_info)); - RET_CHECK(tflite::gpu::IsOpenGl31OrAbove(gpu_info)) - << "OpenGL ES 3.1 is required."; - command_queue_ = tflite::gpu::gl::NewCommandQueue(gpu_info); + return gl_helper_.RunInGlContext([this, input_starts_at_bottom, + border_mode]() -> mediapipe::Status { + tflite::gpu::GpuInfo gpu_info; + MP_RETURN_IF_ERROR(tflite::gpu::gl::RequestGpuInfo(&gpu_info)); + RET_CHECK(gpu_info.IsApiOpenGl31OrAbove()) + << "OpenGL ES 3.1 is required."; + command_queue_ = tflite::gpu::gl::NewCommandQueue(gpu_info); - ASSIGN_OR_RETURN(auto extractor, - SubRectExtractorGl::Create(input_starts_at_bottom)); - extractor_ = - absl::make_unique(std::move(extractor)); - return ::mediapipe::OkStatus(); - }); + ASSIGN_OR_RETURN( + auto extractor, + SubRectExtractorGl::Create(gl_helper_.GetGlContext(), + input_starts_at_bottom, border_mode)); + extractor_ = absl::make_unique(std::move(extractor)); + return mediapipe::OkStatus(); + }); } Size GetImageSize(const Packet& image_packet) override { @@ -255,11 +269,10 @@ class GlProcessor : public ImageToTensorConverter { return {image.width(), image.height()}; } - ::mediapipe::StatusOr Convert(const Packet& image_packet, - const RotatedRect& roi, - const Size& output_dims, - float range_min, - float range_max) override { + mediapipe::StatusOr Convert(const Packet& image_packet, + const RotatedRect& roi, + const Size& output_dims, float range_min, + float range_max) override { const auto& input = image_packet.Get(); if (input.format() != mediapipe::GpuBufferFormat::kBGRA32) { return InvalidArgumentError( @@ -273,7 +286,7 @@ class GlProcessor : public ImageToTensorConverter { MP_RETURN_IF_ERROR(gl_helper_.RunInGlContext( [this, &tensor, &input, &roi, &output_dims, range_min, - range_max]() -> ::mediapipe::Status { + range_max]() -> mediapipe::Status { constexpr int kRgbaNumChannels = 4; auto source_texture = gl_helper_.CreateSourceTexture(input); tflite::gpu::gl::GlTexture input_texture( @@ -303,7 +316,7 @@ class GlProcessor : public ImageToTensorConverter { tflite::gpu::HW(output_dims.height, output_dims.width), command_queue_.get(), &output)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); return tensor; @@ -325,11 +338,12 @@ class GlProcessor : public ImageToTensorConverter { } // namespace -::mediapipe::StatusOr> +mediapipe::StatusOr> CreateImageToGlBufferTensorConverter(CalculatorContext* cc, - bool input_starts_at_bottom) { + bool input_starts_at_bottom, + BorderMode border_mode) { auto result = absl::make_unique(); - MP_RETURN_IF_ERROR(result->Init(cc, input_starts_at_bottom)); + MP_RETURN_IF_ERROR(result->Init(cc, input_starts_at_bottom, border_mode)); // Simply "return std::move(result)" failed to build on macOS with bazel. return std::unique_ptr(std::move(result)); diff --git a/mediapipe/calculators/tensor/image_to_tensor_converter_gl_buffer.h b/mediapipe/calculators/tensor/image_to_tensor_converter_gl_buffer.h index 51ca9172f..da167b5c4 100644 --- a/mediapipe/calculators/tensor/image_to_tensor_converter_gl_buffer.h +++ b/mediapipe/calculators/tensor/image_to_tensor_converter_gl_buffer.h @@ -30,9 +30,10 @@ namespace mediapipe { // Creates image to tensor (represented as OpenGL buffer) converter. // NOTE: mediapipe::GlCalculatorHelper::UpdateContract invocation must precede // converter creation. -::mediapipe::StatusOr> +mediapipe::StatusOr> CreateImageToGlBufferTensorConverter(CalculatorContext* cc, - bool input_starts_at_bottom); + bool input_starts_at_bottom, + BorderMode border_mode); } // namespace mediapipe diff --git a/mediapipe/calculators/tensor/image_to_tensor_converter_gl_texture.cc b/mediapipe/calculators/tensor/image_to_tensor_converter_gl_texture.cc index b02fb98c0..3bd99ea77 100644 --- a/mediapipe/calculators/tensor/image_to_tensor_converter_gl_texture.cc +++ b/mediapipe/calculators/tensor/image_to_tensor_converter_gl_texture.cc @@ -24,6 +24,7 @@ #include "absl/strings/str_cat.h" #include "mediapipe/calculators/tensor/image_to_tensor_converter.h" +#include "mediapipe/calculators/tensor/image_to_tensor_converter_gl_utils.h" #include "mediapipe/calculators/tensor/image_to_tensor_utils.h" #include "mediapipe/framework/calculator_framework.h" #include "mediapipe/framework/formats/tensor.h" @@ -40,48 +41,22 @@ namespace mediapipe { namespace { -class GlParametersOverride { - public: - static ::mediapipe::StatusOr Create( - const std::vector>& overrides) { - std::vector old_values(overrides.size()); - for (int i = 0; i < overrides.size(); ++i) { - glGetTexParameteriv(GL_TEXTURE_2D, overrides[i].first, &old_values[i]); - if (overrides[i].second != old_values[i]) { - glTexParameteri(GL_TEXTURE_2D, overrides[i].first, overrides[i].second); - } - } - return GlParametersOverride(overrides, std::move(old_values)); - } - - ::mediapipe::Status Revert() { - for (int i = 0; i < overrides_.size(); ++i) { - if (overrides_[i].second != old_values_[i]) { - glTexParameteri(GL_TEXTURE_2D, overrides_[i].first, old_values_[i]); - } - } - return ::mediapipe::OkStatus(); - } - - private: - GlParametersOverride(const std::vector>& overrides, - std::vector old_values) - : overrides_(overrides), old_values_(std::move(old_values)) {} - - std::vector> overrides_; - std::vector old_values_; -}; - constexpr int kAttribVertex = 0; constexpr int kAttribTexturePosition = 1; constexpr int kNumAttributes = 2; class GlProcessor : public ImageToTensorConverter { public: - ::mediapipe::Status Init(CalculatorContext* cc, bool input_starts_at_bottom) { + mediapipe::Status Init(CalculatorContext* cc, bool input_starts_at_bottom, + BorderMode border_mode) { MP_RETURN_IF_ERROR(gl_helper_.Open(cc)); - return gl_helper_.RunInGlContext([this, input_starts_at_bottom]() - -> ::mediapipe::Status { + return gl_helper_.RunInGlContext([this, input_starts_at_bottom, + border_mode]() -> mediapipe::Status { + use_custom_zero_border_ = + border_mode == BorderMode::kZero && + !IsGlClampToBorderSupported(gl_helper_.GetGlContext()); + border_mode_ = border_mode; + const GLint attr_location[kNumAttributes] = { kAttribVertex, kAttribTexturePosition, @@ -127,23 +102,38 @@ class GlProcessor : public ImageToTensorConverter { #endif // defined(GL_ES); void main() { - fragColor = alpha * texture2D(input_texture, sample_coordinate) + beta; + vec4 color = texture2D(input_texture, sample_coordinate); + #ifdef CUSTOM_ZERO_BORDER_MODE + float out_of_bounds = + float(sample_coordinate.x < 0.0 || sample_coordinate.x > 1.0 || + sample_coordinate.y < 0.0 || sample_coordinate.y > 1.0); + color = mix(color, vec4(0.0, 0.0, 0.0, 0.0), out_of_bounds); + #endif // defined(CUSTOM_ZERO_BORDER_MODE) + fragColor = alpha * color + beta; } )"; std::string starts_at_bottom_def; if (input_starts_at_bottom) { starts_at_bottom_def = R"( - #define INPUT_STARTS_AT_BOTTOM - )"; + #define INPUT_STARTS_AT_BOTTOM + )"; } // Create program and set parameters. const std::string extract_sub_rect_vertex_src = absl::StrCat(mediapipe::kMediaPipeVertexShaderPreamble, starts_at_bottom_def, kExtractSubRectVertexShader); - const std::string extract_sub_rect_frag_src = absl::StrCat( - mediapipe::kMediaPipeFragmentShaderPreamble, kExtractSubRectFragBody); + + std::string custom_zero_border_mode_def; + if (use_custom_zero_border_) { + custom_zero_border_mode_def = R"( + #define CUSTOM_ZERO_BORDER_MODE + )"; + } + const std::string extract_sub_rect_frag_src = + absl::StrCat(mediapipe::kMediaPipeFragmentShaderPreamble, + custom_zero_border_mode_def, kExtractSubRectFragBody); mediapipe::GlhCreateProgram(extract_sub_rect_vertex_src.c_str(), extract_sub_rect_frag_src.c_str(), kNumAttributes, &attr_name[0], attr_location, @@ -174,7 +164,7 @@ class GlProcessor : public ImageToTensorConverter { glBindBuffer(GL_ARRAY_BUFFER, 0); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); }); } @@ -183,11 +173,10 @@ class GlProcessor : public ImageToTensorConverter { return {image.width(), image.height()}; } - ::mediapipe::StatusOr Convert(const Packet& image_packet, - const RotatedRect& roi, - const Size& output_dims, - float range_min, - float range_max) override { + mediapipe::StatusOr Convert(const Packet& image_packet, + const RotatedRect& roi, + const Size& output_dims, float range_min, + float range_max) override { const auto& input = image_packet.Get(); if (input.format() != mediapipe::GpuBufferFormat::kBGRA32) { return InvalidArgumentError( @@ -202,7 +191,7 @@ class GlProcessor : public ImageToTensorConverter { MP_RETURN_IF_ERROR(gl_helper_.RunInGlContext( [this, &tensor, &input, &roi, &output_dims, range_min, - range_max]() -> ::mediapipe::Status { + range_max]() -> mediapipe::Status { auto input_texture = gl_helper_.CreateSourceTexture(input); constexpr float kInputImageRangeMin = 0.0f; @@ -216,17 +205,17 @@ class GlProcessor : public ImageToTensorConverter { /*flip_horizontaly=*/false, transform.scale, transform.offset, output_dims, &tensor_view)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); return tensor; } - ::mediapipe::Status ExtractSubRect(const mediapipe::GlTexture& texture, - const RotatedRect& sub_rect, - bool flip_horizontaly, float alpha, - float beta, const Size& output_dims, - Tensor::OpenGlTexture2dView* output) { + mediapipe::Status ExtractSubRect(const mediapipe::GlTexture& texture, + const RotatedRect& sub_rect, + bool flip_horizontaly, float alpha, + float beta, const Size& output_dims, + Tensor::OpenGlTexture2dView* output) { std::array transform_mat; GetRotatedSubRectToRectTransformMatrix(sub_rect, texture.width(), texture.height(), flip_horizontaly, @@ -244,11 +233,27 @@ class GlProcessor : public ImageToTensorConverter { glActiveTexture(GL_TEXTURE1); glBindTexture(texture.target(), texture.name()); - ASSIGN_OR_RETURN(auto overrides, GlParametersOverride::Create( - {{GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE}, - {GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE}, - {GL_TEXTURE_MIN_FILTER, GL_LINEAR}, - {GL_TEXTURE_MAG_FILTER, GL_LINEAR}})); + // a) Filtering. + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); + + // b) Clamping. + switch (border_mode_) { + case BorderMode::kReplicate: { + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); + break; + } + case BorderMode::kZero: { + if (!use_custom_zero_border_) { + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_BORDER); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_BORDER); + glTexParameterfv(GL_TEXTURE_2D, GL_TEXTURE_BORDER_COLOR, + std::array{0.0f, 0.0f, 0.0f, 0.0f}.data()); + } + break; + } + } glUseProgram(program_); glUniform1f(alpha_id_, alpha); @@ -271,7 +276,12 @@ class GlProcessor : public ImageToTensorConverter { // draw glDrawArrays(GL_TRIANGLE_STRIP, 0, 4); - // cleanup + // Resetting to MediaPipe texture param defaults. + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); + glDisableVertexAttribArray(kAttribVertex); glDisableVertexAttribArray(kAttribTexturePosition); glBindBuffer(GL_ARRAY_BUFFER, 0); @@ -282,7 +292,7 @@ class GlProcessor : public ImageToTensorConverter { glActiveTexture(GL_TEXTURE0); glBindTexture(GL_TEXTURE_2D, 0); - return overrides.Revert(); + return mediapipe::OkStatus(); } ~GlProcessor() override { @@ -297,6 +307,8 @@ class GlProcessor : public ImageToTensorConverter { private: mediapipe::GlCalculatorHelper gl_helper_; + bool use_custom_zero_border_ = false; + BorderMode border_mode_ = BorderMode::kReplicate; GLuint vao_ = 0; GLuint vbo_[2] = {0, 0}; GLuint program_ = 0; @@ -308,11 +320,12 @@ class GlProcessor : public ImageToTensorConverter { } // namespace -::mediapipe::StatusOr> +mediapipe::StatusOr> CreateImageToGlTextureTensorConverter(CalculatorContext* cc, - bool input_starts_at_bottom) { + bool input_starts_at_bottom, + BorderMode border_mode) { auto result = absl::make_unique(); - MP_RETURN_IF_ERROR(result->Init(cc, input_starts_at_bottom)); + MP_RETURN_IF_ERROR(result->Init(cc, input_starts_at_bottom, border_mode)); // Simply "return std::move(result)" failed to build on macOS with bazel. return std::unique_ptr(std::move(result)); diff --git a/mediapipe/calculators/tensor/image_to_tensor_converter_gl_texture.h b/mediapipe/calculators/tensor/image_to_tensor_converter_gl_texture.h index 4ae224e7d..8802f7602 100644 --- a/mediapipe/calculators/tensor/image_to_tensor_converter_gl_texture.h +++ b/mediapipe/calculators/tensor/image_to_tensor_converter_gl_texture.h @@ -13,7 +13,6 @@ // limitations under the License. #ifndef MEDIAPIPE_CALCULATORS_TENSOR_IMAGE_TO_TENSOR_CONVERTER_GL_TEXTURE_H_ - #define MEDIAPIPE_CALCULATORS_TENSOR_IMAGE_TO_TENSOR_CONVERTER_GL_TEXTURE_H_ #include "mediapipe/framework/port.h" @@ -31,9 +30,10 @@ namespace mediapipe { // Creates image to tensor (represented as OpenGL texture) converter. // NOTE: mediapipe::GlCalculatorHelper::UpdateContract invocation must precede // converter creation. -::mediapipe::StatusOr> +mediapipe::StatusOr> CreateImageToGlTextureTensorConverter(CalculatorContext* cc, - bool input_starts_at_bottom); + bool input_starts_at_bottom, + BorderMode border_mode); } // namespace mediapipe diff --git a/mediapipe/calculators/tensor/image_to_tensor_converter_gl_utils.cc b/mediapipe/calculators/tensor/image_to_tensor_converter_gl_utils.cc new file mode 100644 index 000000000..6fb39e0c3 --- /dev/null +++ b/mediapipe/calculators/tensor/image_to_tensor_converter_gl_utils.cc @@ -0,0 +1,88 @@ +#include "mediapipe/calculators/tensor/image_to_tensor_converter_gl_utils.h" + +#include "mediapipe/framework/port.h" + +#if MEDIAPIPE_OPENGL_ES_VERSION >= MEDIAPIPE_OPENGL_ES_20 + +#include +#include +#include + +#include "mediapipe/framework/port/status_macros.h" +#include "mediapipe/framework/port/statusor.h" +#include "mediapipe/gpu/gl_base.h" +#include "mediapipe/gpu/gl_context.h" + +namespace mediapipe { + +namespace { + +class GlNoOpOverride : public GlOverride {}; + +class GlTexParameteriOverride : public GlOverride { + public: + GlTexParameteriOverride(GLenum name, GLint old_value) + : name_(name), old_value_(old_value) {} + + ~GlTexParameteriOverride() override { + glTexParameteri(GL_TEXTURE_2D, name_, old_value_); + } + + private: + GLenum name_; + GLint old_value_; +}; + +template +class GlTexParameterfvOverride : public GlOverride { + public: + GlTexParameterfvOverride(GLenum name, + std::array old_values) + : name_(name), old_values_(std::move(old_values)) {} + + ~GlTexParameterfvOverride() { + glTexParameterfv(GL_TEXTURE_2D, name_, &old_values_[0]); + } + + private: + GLenum name_; + std::array old_values_; +}; + +} // namespace + +std::unique_ptr OverrideGlTexParametri(GLenum name, GLint value) { + GLint old_value; + glGetTexParameteriv(GL_TEXTURE_2D, name, &old_value); + if (value != old_value) { + glTexParameteri(GL_TEXTURE_2D, name, value); + return {absl::make_unique(name, old_value)}; + } + return {absl::make_unique()}; +} + +template +std::unique_ptr OverrideGlTexParameterfv( + GLenum name, std::array values) { + std::array old_values; + glGetTexParameterfv(GL_TEXTURE_2D, name, values.data()); + if (values != old_values) { + glTexParameterfv(GL_TEXTURE_2D, name, values.data()); + return {absl::make_unique>( + name, std::move(old_values))}; + } + return {absl::make_unique()}; +} + +template std::unique_ptr OverrideGlTexParameterfv<4>( + GLenum name, std::array values); + +bool IsGlClampToBorderSupported(const mediapipe::GlContext& gl_context) { + return gl_context.gl_major_version() > 3 || + (gl_context.gl_major_version() == 3 && + gl_context.gl_minor_version() >= 2); +} + +} // namespace mediapipe + +#endif // MEDIAPIPE_OPENGL_ES_VERSION >= MEDIAPIPE_OPENGL_ES_20 diff --git a/mediapipe/calculators/tensor/image_to_tensor_converter_gl_utils.h b/mediapipe/calculators/tensor/image_to_tensor_converter_gl_utils.h new file mode 100644 index 000000000..3105cfef1 --- /dev/null +++ b/mediapipe/calculators/tensor/image_to_tensor_converter_gl_utils.h @@ -0,0 +1,45 @@ +#ifndef MEDIAPIPE_CALCULATORS_TENSOR_IMAGE_TO_TENSOR_CONVERTER_GL_UTILS_H_ +#define MEDIAPIPE_CALCULATORS_TENSOR_IMAGE_TO_TENSOR_CONVERTER_GL_UTILS_H_ + +#include "mediapipe/framework/port.h" + +#if MEDIAPIPE_OPENGL_ES_VERSION >= MEDIAPIPE_OPENGL_ES_20 + +#include +#include +#include + +#include "mediapipe/framework/port/statusor.h" +#include "mediapipe/gpu/gl_base.h" +#include "mediapipe/gpu/gl_context.h" + +namespace mediapipe { + +// Intended to override and automatically revert various OpenGL attributes. +// (e.g. overriding texture parameters like GL_TEXTURE_MIN_FILTER, +// GL_TEXTURE_MAG_FILTER, etc.) +class GlOverride { + public: + virtual ~GlOverride() = default; +}; + +// Creates an object that overrides attributes using `glTexParameteri` +// function during construction and reverts them during destruction. See +// `glTexParameteri` for details on @name and @value. +ABSL_MUST_USE_RESULT std::unique_ptr OverrideGlTexParametri( + GLenum name, GLint value); + +// Creates an object that overrides attributes using `glTexParameterfv` +// function during construction and reverts them during destruction. See +// `glTexParameterfv` for details on @name and @values. +template +ABSL_MUST_USE_RESULT std::unique_ptr OverrideGlTexParameterfv( + GLenum name, std::array values); + +bool IsGlClampToBorderSupported(const mediapipe::GlContext& gl_context); + +} // namespace mediapipe + +#endif // MEDIAPIPE_OPENGL_ES_VERSION >= MEDIAPIPE_OPENGL_ES_20 + +#endif // MEDIAPIPE_CALCULATORS_TENSOR_IMAGE_TO_TENSOR_CONVERTER_GL_UTILS_H_ diff --git a/mediapipe/calculators/tensor/image_to_tensor_converter_gl_utils_test.cc b/mediapipe/calculators/tensor/image_to_tensor_converter_gl_utils_test.cc new file mode 100644 index 000000000..4c8dc3d6d --- /dev/null +++ b/mediapipe/calculators/tensor/image_to_tensor_converter_gl_utils_test.cc @@ -0,0 +1,49 @@ +#include "mediapipe/framework/port.h" + +#if MEDIAPIPE_OPENGL_ES_VERSION >= MEDIAPIPE_OPENGL_ES_20 + +#include "mediapipe/calculators/tensor/image_to_tensor_converter_gl_utils.h" +#include "mediapipe/framework/port/gtest.h" +#include "mediapipe/framework/port/status_matchers.h" +#include "mediapipe/gpu/gl_base.h" +#include "mediapipe/gpu/gl_context.h" + +namespace mediapipe { +namespace { + +TEST(ImageToTensorConverterGlUtilsTest, GlTexParameteriOverrider) { + auto status_or_context = mediapipe::GlContext::Create(nullptr, false); + MP_ASSERT_OK(status_or_context); + auto context = status_or_context.ValueOrDie(); + + std::vector min_filter_changes; + context->Run([&min_filter_changes]() { + GLuint texture = 0; + glGenTextures(1, &texture); + glBindTexture(GL_TEXTURE_2D, texture); + + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); + GLint value = 0; + glGetTexParameteriv(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, &value); + min_filter_changes.push_back(value); + + { + auto min_filter_linear = + OverrideGlTexParametri(GL_TEXTURE_MIN_FILTER, GL_LINEAR); + glGetTexParameteriv(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, &value); + min_filter_changes.push_back(value); + + // reverter is destroyed automatically reverting previously set value + } + glGetTexParameteriv(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, &value); + min_filter_changes.push_back(value); + }); + + EXPECT_THAT(min_filter_changes, + testing::ElementsAre(GL_NEAREST, GL_LINEAR, GL_NEAREST)); +} + +} // namespace +} // namespace mediapipe + +#endif // MEDIAPIPE_OPENGL_ES_VERSION >= MEDIAPIPE_OPENGL_ES_20 diff --git a/mediapipe/calculators/tensor/image_to_tensor_converter_metal.cc b/mediapipe/calculators/tensor/image_to_tensor_converter_metal.cc index 2acb127e3..01546253f 100644 --- a/mediapipe/calculators/tensor/image_to_tensor_converter_metal.cc +++ b/mediapipe/calculators/tensor/image_to_tensor_converter_metal.cc @@ -105,8 +105,15 @@ constexpr char kFragmentShader[] = R"( const float alpha = parameters[0]; const float beta = parameters[1]; + #ifdef CLAMP_TO_ZERO + constexpr sampler linear_sampler(address::clamp_to_zero, min_filter::linear, + mag_filter::linear); + #endif // CLAMP_TO_ZERO + + #ifdef CLAMP_TO_EDGE constexpr sampler linear_sampler(address::clamp_to_edge, min_filter::linear, mag_filter::linear); + #endif // CLAMP_TO_EDGE Type4 texture_pixel = texture.sample(linear_sampler, vertex_output.uv); return Type4(alpha * texture_pixel.rgb + beta, 0); @@ -139,11 +146,12 @@ int GetBytesPerRaw(OutputFormat output_format, const tflite::gpu::HW& size) { class SubRectExtractorMetal { public: - static ::mediapipe::StatusOr> Make( - id device, OutputFormat output_format) { + static mediapipe::StatusOr> Make( + id device, OutputFormat output_format, + BorderMode border_mode) { id pipeline_state; MP_RETURN_IF_ERROR(SubRectExtractorMetal::MakePipelineState( - device, output_format, &pipeline_state)); + device, output_format, border_mode, &pipeline_state)); return absl::make_unique(device, pipeline_state, output_format); @@ -164,19 +172,14 @@ class SubRectExtractorMetal { [device_ newBufferWithBytes:kBasicTextureVertices length:sizeof(kBasicTextureVertices) options:MTLResourceOptionCPUCacheModeDefault]; - - transform_mat_buffer_ = - [device_ newBufferWithBytes:&transform_mat_ - length:sizeof(transform_mat_) - options:MTLResourceOptionCPUCacheModeDefault]; } - ::mediapipe::Status Execute(id input_texture, - const RotatedRect& sub_rect, - bool flip_horizontaly, float alpha, float beta, - const tflite::gpu::HW& destination_size, - id command_buffer, - id destination) { + mediapipe::Status Execute(id input_texture, + const RotatedRect& sub_rect, bool flip_horizontaly, + float alpha, float beta, + const tflite::gpu::HW& destination_size, + id command_buffer, + id destination) { auto output_texture = MTLTextureWithBuffer(destination_size, destination); return InternalExecute(input_texture, sub_rect, flip_horizontaly, alpha, beta, destination_size, command_buffer, @@ -202,23 +205,26 @@ class SubRectExtractorMetal { return texture; } - ::mediapipe::Status InternalExecute(id input_texture, - const RotatedRect& sub_rect, - bool flip_horizontaly, float alpha, - float beta, - const tflite::gpu::HW& destination_size, - id command_buffer, - id output_texture) { + mediapipe::Status InternalExecute(id input_texture, + const RotatedRect& sub_rect, + bool flip_horizontaly, float alpha, + float beta, + const tflite::gpu::HW& destination_size, + id command_buffer, + id output_texture) { RET_CHECK(command_buffer != nil); RET_CHECK(output_texture != nil); // Obtain texture mapping coordinates transformation matrix and copy its // data to the buffer. + std::array transform_mat; GetRotatedSubRectToRectTransformMatrix(sub_rect, input_texture.width, input_texture.height, - flip_horizontaly, &transform_mat_); - std::memcpy(reinterpret_cast(transform_mat_buffer_.contents), - transform_mat_.data(), sizeof(transform_mat_)); + flip_horizontaly, &transform_mat); + id transform_mat_buffer = + [device_ newBufferWithBytes:&transform_mat + length:sizeof(transform_mat) + options:MTLResourceOptionCPUCacheModeDefault]; // Create parameters wrapper. float parameters[] = {alpha, beta}; @@ -237,7 +243,7 @@ class SubRectExtractorMetal { [command_encoder setRenderPipelineState:pipeline_state_]; [command_encoder setVertexBuffer:positions_buffer_ offset:0 atIndex:0]; [command_encoder setVertexBuffer:tex_coords_buffer_ offset:0 atIndex:1]; - [command_encoder setVertexBuffer:transform_mat_buffer_ offset:0 atIndex:2]; + [command_encoder setVertexBuffer:transform_mat_buffer offset:0 atIndex:2]; [command_encoder setFragmentTexture:input_texture atIndex:0]; [command_encoder setFragmentBytes:¶meters length:sizeof(parameters) @@ -248,11 +254,11 @@ class SubRectExtractorMetal { vertexCount:6]; [command_encoder endEncoding]; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - static ::mediapipe::Status MakePipelineState( - id device, OutputFormat output_format, + static mediapipe::Status MakePipelineState( + id device, OutputFormat output_format, BorderMode border_mode, id* pipeline_state) { RET_CHECK(pipeline_state != nil); @@ -271,8 +277,25 @@ class SubRectExtractorMetal { break; } - std::string shader_lib = absl::StrCat(kShaderLibHeader, output_type_def, - kVertexShader, kFragmentShader); + std::string clamp_def; + switch (border_mode) { + case BorderMode::kReplicate: { + clamp_def = R"( + #define CLAMP_TO_EDGE + )"; + break; + } + case BorderMode::kZero: { + clamp_def = R"( + #define CLAMP_TO_ZERO + )"; + break; + } + } + + std::string shader_lib = + absl::StrCat(kShaderLibHeader, output_type_def, clamp_def, + kVertexShader, kFragmentShader); NSError* error = nil; NSString* library_source = [NSString stringWithUTF8String:shader_lib.c_str()]; @@ -305,27 +328,25 @@ class SubRectExtractorMetal { RET_CHECK(error == nil) << "Couldn't create a pipeline state" << [[error localizedDescription] UTF8String]; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } id positions_buffer_; id tex_coords_buffer_; - id transform_mat_buffer_; id device_; id pipeline_state_; - std::array transform_mat_; OutputFormat output_format_; }; class MetalProcessor : public ImageToTensorConverter { public: - ::mediapipe::Status Init(CalculatorContext* cc) { + mediapipe::Status Init(CalculatorContext* cc, BorderMode border_mode) { metal_helper_ = [[MPPMetalHelper alloc] initWithCalculatorContext:cc]; RET_CHECK(metal_helper_); - ASSIGN_OR_RETURN(extractor_, - SubRectExtractorMetal::Make(metal_helper_.mtlDevice, - OutputFormat::kF32C4)); - return ::mediapipe::OkStatus(); + ASSIGN_OR_RETURN(extractor_, SubRectExtractorMetal::Make( + metal_helper_.mtlDevice, + OutputFormat::kF32C4, border_mode)); + return mediapipe::OkStatus(); } Size GetImageSize(const Packet& image_packet) override { @@ -333,11 +354,10 @@ class MetalProcessor : public ImageToTensorConverter { return {image.width(), image.height()}; } - ::mediapipe::StatusOr Convert(const Packet& image_packet, - const RotatedRect& roi, - const Size& output_dims, - float range_min, - float range_max) override { + mediapipe::StatusOr Convert(const Packet& image_packet, + const RotatedRect& roi, + const Size& output_dims, float range_min, + float range_max) override { const auto& input = image_packet.Get(); if (input.format() != mediapipe::GpuBufferFormat::kBGRA32) { return InvalidArgumentError( @@ -369,9 +389,6 @@ class MetalProcessor : public ImageToTensorConverter { tflite::gpu::HW(output_dims.height, output_dims.width), command_buffer, buffer_view.buffer())); [command_buffer commit]; - // TODO: consider removing waitUntilCompleted - [command_buffer waitUntilCompleted]; - return tensor; } } @@ -383,10 +400,10 @@ class MetalProcessor : public ImageToTensorConverter { } // namespace -::mediapipe::StatusOr> -CreateMetalConverter(CalculatorContext* cc) { +mediapipe::StatusOr> +CreateMetalConverter(CalculatorContext* cc, BorderMode border_mode) { auto result = absl::make_unique(); - MP_RETURN_IF_ERROR(result->Init(cc)); + MP_RETURN_IF_ERROR(result->Init(cc, border_mode)); // Simply "return std::move(result)" failed to build on macOS with bazel. return std::unique_ptr(std::move(result)); diff --git a/mediapipe/calculators/tensor/image_to_tensor_converter_metal.h b/mediapipe/calculators/tensor/image_to_tensor_converter_metal.h index c20c2cf5c..fe46c67b4 100644 --- a/mediapipe/calculators/tensor/image_to_tensor_converter_metal.h +++ b/mediapipe/calculators/tensor/image_to_tensor_converter_metal.h @@ -30,8 +30,8 @@ namespace mediapipe { // Creates Metal image-to-tensor converter. // NOTE: [MPPMetalHelper updateContract:...] invocation must precede // converter creation. -::mediapipe::StatusOr> -CreateMetalConverter(CalculatorContext* cc); +mediapipe::StatusOr> +CreateMetalConverter(CalculatorContext* cc, BorderMode border_mode); } // namespace mediapipe diff --git a/mediapipe/calculators/tensor/image_to_tensor_converter_opencv.cc b/mediapipe/calculators/tensor/image_to_tensor_converter_opencv.cc index 8c49c9322..ed109d4ef 100644 --- a/mediapipe/calculators/tensor/image_to_tensor_converter_opencv.cc +++ b/mediapipe/calculators/tensor/image_to_tensor_converter_opencv.cc @@ -35,16 +35,26 @@ namespace { class OpenCvProcessor : public ImageToTensorConverter { public: + OpenCvProcessor(BorderMode border_mode) { + switch (border_mode) { + case BorderMode::kReplicate: + border_mode_ = cv::BORDER_REPLICATE; + break; + case BorderMode::kZero: + border_mode_ = cv::BORDER_CONSTANT; + break; + } + } + Size GetImageSize(const Packet& image_packet) override { const auto& image = image_packet.Get(); return {image.Width(), image.Height()}; } - ::mediapipe::StatusOr Convert(const Packet& image_packet, - const RotatedRect& roi, - const Size& output_dims, - float range_min, - float range_max) override { + mediapipe::StatusOr Convert(const Packet& image_packet, + const RotatedRect& roi, + const Size& output_dims, float range_min, + float range_max) override { const auto& input = image_packet.Get(); if (input.Format() != mediapipe::ImageFormat::SRGB && input.Format() != mediapipe::ImageFormat::SRGBA) { @@ -84,7 +94,7 @@ class OpenCvProcessor : public ImageToTensorConverter { cv::warpPerspective(src, transformed, projection_matrix, cv::Size(dst_width, dst_height), /*flags=*/cv::INTER_LINEAR, - /*borderMode=*/cv::BORDER_REPLICATE); + /*borderMode=*/border_mode_); if (transformed.channels() > kNumChannels) { cv::Mat proper_channels_mat; @@ -101,16 +111,19 @@ class OpenCvProcessor : public ImageToTensorConverter { transformed.convertTo(dst, CV_32FC3, transform.scale, transform.offset); return tensor; } + + private: + enum cv::BorderTypes border_mode_; }; } // namespace -::mediapipe::StatusOr> -CreateOpenCvConverter(CalculatorContext* cc) { +mediapipe::StatusOr> +CreateOpenCvConverter(CalculatorContext* cc, BorderMode border_mode) { // Simply "return absl::make_unique()" failed to build on // macOS with bazel. return std::unique_ptr( - absl::make_unique()); + absl::make_unique(border_mode)); } } // namespace mediapipe diff --git a/mediapipe/calculators/tensor/image_to_tensor_converter_opencv.h b/mediapipe/calculators/tensor/image_to_tensor_converter_opencv.h index a667029ec..a10bffaf1 100644 --- a/mediapipe/calculators/tensor/image_to_tensor_converter_opencv.h +++ b/mediapipe/calculators/tensor/image_to_tensor_converter_opencv.h @@ -24,8 +24,8 @@ namespace mediapipe { // Creates OpenCV image-to-tensor converter. -::mediapipe::StatusOr> -CreateOpenCvConverter(CalculatorContext* cc); +mediapipe::StatusOr> +CreateOpenCvConverter(CalculatorContext* cc, BorderMode border_mode); } // namespace mediapipe diff --git a/mediapipe/calculators/tensor/image_to_tensor_utils.cc b/mediapipe/calculators/tensor/image_to_tensor_utils.cc index c2bfc0f53..dc5946760 100644 --- a/mediapipe/calculators/tensor/image_to_tensor_utils.cc +++ b/mediapipe/calculators/tensor/image_to_tensor_utils.cc @@ -25,23 +25,23 @@ namespace mediapipe { RotatedRect GetRoi(int input_width, int input_height, absl::optional norm_rect) { if (norm_rect) { - return {.center_x = norm_rect->x_center() * input_width, - .center_y = norm_rect->y_center() * input_height, - .width = norm_rect->width() * input_width, - .height = norm_rect->height() * input_height, - .rotation = norm_rect->rotation()}; + return {/*center_x=*/norm_rect->x_center() * input_width, + /*center_y =*/norm_rect->y_center() * input_height, + /*width =*/norm_rect->width() * input_width, + /*height =*/norm_rect->height() * input_height, + /*rotation =*/norm_rect->rotation()}; } - return {.center_x = 0.5f * input_width, - .center_y = 0.5f * input_height, - .width = static_cast(input_width), - .height = static_cast(input_height), - .rotation = 0}; + return {/*center_x=*/0.5f * input_width, + /*center_y =*/0.5f * input_height, + /*width =*/static_cast(input_width), + /*height =*/static_cast(input_height), + /*rotation =*/0}; } -::mediapipe::StatusOr> PadRoi(int input_tensor_width, - int input_tensor_height, - bool keep_aspect_ratio, - RotatedRect* roi) { +mediapipe::StatusOr> PadRoi(int input_tensor_width, + int input_tensor_height, + bool keep_aspect_ratio, + RotatedRect* roi) { if (!keep_aspect_ratio) { return std::array{0.0f, 0.0f, 0.0f, 0.0f}; } @@ -76,7 +76,7 @@ RotatedRect GetRoi(int input_width, int input_height, horizontal_padding, vertical_padding}; } -::mediapipe::StatusOr GetValueRangeTransformation( +mediapipe::StatusOr GetValueRangeTransformation( float from_range_min, float from_range_max, float to_range_min, float to_range_max) { RET_CHECK_LT(from_range_min, from_range_max) diff --git a/mediapipe/calculators/tensor/image_to_tensor_utils.h b/mediapipe/calculators/tensor/image_to_tensor_utils.h index 6cb735256..44ba28902 100644 --- a/mediapipe/calculators/tensor/image_to_tensor_utils.h +++ b/mediapipe/calculators/tensor/image_to_tensor_utils.h @@ -37,10 +37,10 @@ RotatedRect GetRoi(int input_width, int input_height, // Pads ROI, so extraction happens correctly if aspect ratio is to be kept. // Returns letterbox padding applied. -::mediapipe::StatusOr> PadRoi(int input_tensor_width, - int input_tensor_height, - bool keep_aspect_ratio, - RotatedRect* roi); +mediapipe::StatusOr> PadRoi(int input_tensor_width, + int input_tensor_height, + bool keep_aspect_ratio, + RotatedRect* roi); // Represents a transformation of value which involves scaling and offsetting. // To apply transformation: @@ -55,7 +55,7 @@ struct ValueTransformation { // [from_range_min, from_range_max] into [to_range_min, to_range_max] range. // from_range_min must be less than from_range_max // to_range_min must be less than to_range_max -::mediapipe::StatusOr GetValueRangeTransformation( +mediapipe::StatusOr GetValueRangeTransformation( float from_range_min, float from_range_max, float to_range_min, float to_range_max); diff --git a/mediapipe/calculators/tensor/inference_calculator.cc b/mediapipe/calculators/tensor/inference_calculator.cc index dc02de170..f675813b5 100644 --- a/mediapipe/calculators/tensor/inference_calculator.cc +++ b/mediapipe/calculators/tensor/inference_calculator.cc @@ -23,12 +23,12 @@ #include "mediapipe/framework/formats/tensor.h" #include "mediapipe/framework/port/ret_check.h" #include "mediapipe/util/tflite/config.h" +#include "mediapipe/util/tflite/tflite_model_loader.h" #if !defined(__EMSCRIPTEN__) || defined(__EMSCRIPTEN_PTHREADS__) #include "mediapipe/util/cpu_util.h" #endif // !__EMSCRIPTEN__ || __EMSCRIPTEN_PTHREADS__ -#include "mediapipe/util/resource_util.h" #include "tensorflow/lite/error_reporter.h" #include "tensorflow/lite/interpreter.h" #include "tensorflow/lite/kernels/register.h" @@ -228,23 +228,20 @@ class InferenceCalculator : public CalculatorBase { public: using TfLiteDelegatePtr = std::unique_ptr>; - using TfLiteModelPtr = - std::unique_ptr>; - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; - ::mediapipe::Status Close(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Close(CalculatorContext* cc) override; private: - ::mediapipe::Status ReadKernelsFromFile(); - ::mediapipe::Status WriteKernelsToFile(); - ::mediapipe::Status LoadModel(CalculatorContext* cc); - ::mediapipe::StatusOr GetModelAsPacket(const CalculatorContext& cc); - ::mediapipe::Status LoadDelegate(CalculatorContext* cc); - ::mediapipe::Status InitTFLiteGPURunner(CalculatorContext* cc); + mediapipe::Status ReadKernelsFromFile(); + mediapipe::Status WriteKernelsToFile(); + mediapipe::Status LoadModel(CalculatorContext* cc); + mediapipe::StatusOr GetModelAsPacket(const CalculatorContext& cc); + mediapipe::Status LoadDelegate(CalculatorContext* cc); + mediapipe::Status InitTFLiteGPURunner(CalculatorContext* cc); Packet model_packet_; std::unique_ptr interpreter_; @@ -253,6 +250,9 @@ class InferenceCalculator : public CalculatorBase { #if MEDIAPIPE_TFLITE_GL_INFERENCE mediapipe::GlCalculatorHelper gpu_helper_; std::unique_ptr tflite_gpu_runner_; + bool allow_precision_loss_ = false; + mediapipe::InferenceCalculatorOptions::Delegate::Gpu::API + tflite_gpu_runner_api_; #elif MEDIAPIPE_TFLITE_METAL_INFERENCE MPPMetalHelper* gpu_helper_ = nullptr; TFLBufferConvert* converter_to_BPHWC4_ = nil; @@ -276,9 +276,10 @@ class InferenceCalculator : public CalculatorBase { bool use_kernel_caching_ = false; std::string cached_kernel_filename_; }; + REGISTER_CALCULATOR(InferenceCalculator); -::mediapipe::Status InferenceCalculator::GetContract(CalculatorContract* cc) { +mediapipe::Status InferenceCalculator::GetContract(CalculatorContract* cc) { RET_CHECK(cc->Inputs().HasTag(kTensorsTag)); cc->Inputs().Tag(kTensorsTag).Set>(); RET_CHECK(cc->Outputs().HasTag(kTensorsTag)); @@ -305,10 +306,10 @@ REGISTER_CALCULATOR(InferenceCalculator); MP_RETURN_IF_ERROR([MPPMetalHelper updateContract:cc]); #endif } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status InferenceCalculator::Open(CalculatorContext* cc) { +mediapipe::Status InferenceCalculator::Open(CalculatorContext* cc) { cc->SetOffset(TimestampDiff(0)); #if MEDIAPIPE_TFLITE_GL_INFERENCE || MEDIAPIPE_TFLITE_METAL_INFERENCE @@ -318,6 +319,8 @@ REGISTER_CALCULATOR(InferenceCalculator); use_advanced_gpu_api_ = options.has_delegate() && options.delegate().has_gpu() && options.delegate().gpu().use_advanced_gpu_api(); + allow_precision_loss_ = options.delegate().gpu().allow_precision_loss(); + tflite_gpu_runner_api_ = options.delegate().gpu().api(); use_kernel_caching_ = use_advanced_gpu_api_ && options.delegate().gpu().use_kernel_caching(); #endif // MEDIAPIPE_TFLITE_GL_INFERENCE @@ -354,17 +357,21 @@ REGISTER_CALCULATOR(InferenceCalculator); } else { MP_RETURN_IF_ERROR(LoadDelegate(cc)); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status InferenceCalculator::Process(CalculatorContext* cc) { +mediapipe::Status InferenceCalculator::Process(CalculatorContext* cc) { if (cc->Inputs().Tag(kTensorsTag).IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } const auto& input_tensors = cc->Inputs().Tag(kTensorsTag).Get>(); RET_CHECK(!input_tensors.empty()); auto output_tensors = absl::make_unique>(); +#if MEDIAPIPE_TFLITE_METAL_INFERENCE + id command_buffer; + id compute_encoder; +#endif // MEDIAPIPE_TFLITE_METAL_INFERENCE if (use_gpu_delegate_ || use_advanced_gpu_api_) { #if MEDIAPIPE_TFLITE_GL_INFERENCE @@ -382,7 +389,7 @@ REGISTER_CALCULATOR(InferenceCalculator); MP_RETURN_IF_ERROR(tflite_gpu_runner_->BindSSBOToOutputTensor( output_tensors->back().GetOpenGlBufferWriteView().name(), i)); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); } else { MP_RETURN_IF_ERROR(gpu_helper_.RunInGlContext( @@ -397,15 +404,14 @@ REGISTER_CALCULATOR(InferenceCalculator); glCopyBufferSubData(GL_COPY_READ_BUFFER, GL_COPY_WRITE_BUFFER, 0, 0, input_tensors[i].bytes()); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); } #elif MEDIAPIPE_TFLITE_METAL_INFERENCE + command_buffer = [gpu_helper_ commandBuffer]; + command_buffer.label = @"InferenceCalculator"; + compute_encoder = [command_buffer computeCommandEncoder]; // Explicit copy input with conversion float 32 bits to 16 bits. - id command_buffer = [gpu_helper_ commandBuffer]; - command_buffer.label = @"InferenceCalculatorConvert"; - id compute_encoder = - [command_buffer computeCommandEncoder]; for (int i = 0; i < input_tensors.size(); ++i) { auto input_view = input_tensors[i].GetMtlBufferReadView(command_buffer); // Reshape tensor. @@ -417,8 +423,6 @@ REGISTER_CALCULATOR(InferenceCalculator); sourceBuffer:input_view.buffer() convertedBuffer:gpu_buffer_view.buffer()]; } - [compute_encoder endEncoding]; - [command_buffer commit]; #endif // MEDIAPIPE_TFLITE_GL_INFERENCE } else { // Read CPU input into tensors. @@ -440,6 +444,12 @@ REGISTER_CALCULATOR(InferenceCalculator); RET_CHECK_EQ(interpreter_->Invoke(), kTfLiteOk); } #else +#if MEDIAPIPE_TFLITE_METAL_INFERENCE + if (use_gpu_delegate_) { + RET_CHECK( + TFLGpuDelegateSetCommandEncoder(delegate_.get(), compute_encoder)); + } +#endif // MEDIAPIPE_TFLITE_METAL_INFERENCE RET_CHECK_EQ(interpreter_->Invoke(), kTfLiteOk); #endif // MEDIAPIPE_TFLITE_GL_INFERENCE @@ -461,15 +471,11 @@ REGISTER_CALCULATOR(InferenceCalculator); glCopyBufferSubData(GL_COPY_READ_BUFFER, GL_COPY_WRITE_BUFFER, 0, 0, t->bytes()); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); } // Output tensors are already bound if use_advanced_gpu_api_ is true. #elif MEDIAPIPE_TFLITE_METAL_INFERENCE - id command_buffer = [gpu_helper_ commandBuffer]; - command_buffer.label = @"InferenceBPHWC4Convert"; - id convert_command = - [command_buffer computeCommandEncoder]; output_tensors->reserve(output_shapes_.size()); for (int i = 0; i < output_shapes_.size(); ++i) { output_tensors->emplace_back(Tensor::ElementType::kFloat32, @@ -480,12 +486,12 @@ REGISTER_CALCULATOR(InferenceCalculator); gpu_buffers_out_[i]->GetMtlBufferReadView(command_buffer); auto write_view = output_tensors->at(i).GetMtlBufferWriteView(command_buffer); - [converter_from_BPHWC4_ convertWithEncoder:convert_command + [converter_from_BPHWC4_ convertWithEncoder:compute_encoder shape:shape sourceBuffer:read_view.buffer() convertedBuffer:write_view.buffer()]; } - [convert_command endEncoding]; + [compute_encoder endEncoding]; [command_buffer commit]; #endif // MEDIAPIPE_TFLITE_GL_INFERENCE } else { @@ -506,10 +512,10 @@ REGISTER_CALCULATOR(InferenceCalculator); cc->Outputs() .Tag(kTensorsTag) .Add(output_tensors.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status InferenceCalculator::WriteKernelsToFile() { +mediapipe::Status InferenceCalculator::WriteKernelsToFile() { #if MEDIAPIPE_TFLITE_GL_INFERENCE && defined(MEDIAPIPE_ANDROID) if (use_kernel_caching_) { // Save kernel file. @@ -520,17 +526,17 @@ REGISTER_CALCULATOR(InferenceCalculator); mediapipe::file::SetContents(cached_kernel_filename_, cache_str)); } #endif // MEDIAPIPE_TFLITE_GL_INFERENCE && MEDIAPIPE_ANDROID - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status InferenceCalculator::Close(CalculatorContext* cc) { +mediapipe::Status InferenceCalculator::Close(CalculatorContext* cc) { MP_RETURN_IF_ERROR(WriteKernelsToFile()); #if MEDIAPIPE_TFLITE_GL_INFERENCE if (use_gpu_delegate_) { MP_RETURN_IF_ERROR(gpu_helper_.RunInGlContext([this]() -> Status { gpu_buffers_in_.clear(); gpu_buffers_out_.clear(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); } #elif MEDIAPIPE_TFLITE_METAL_INFERENCE @@ -545,10 +551,10 @@ REGISTER_CALCULATOR(InferenceCalculator); #endif interpreter_ = nullptr; delegate_ = nullptr; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status InferenceCalculator::ReadKernelsFromFile() { +mediapipe::Status InferenceCalculator::ReadKernelsFromFile() { #if MEDIAPIPE_TFLITE_GL_INFERENCE && defined(MEDIAPIPE_ANDROID) if (use_kernel_caching_) { // Load pre-compiled kernel file. @@ -561,10 +567,10 @@ REGISTER_CALCULATOR(InferenceCalculator); } } #endif // MEDIAPIPE_TFLITE_GL_INFERENCE && MEDIAPIPE_ANDROID - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status InferenceCalculator::InitTFLiteGPURunner( +mediapipe::Status InferenceCalculator::InitTFLiteGPURunner( CalculatorContext* cc) { #if MEDIAPIPE_TFLITE_GL_INFERENCE ASSIGN_OR_RETURN(model_packet_, GetModelAsPacket(*cc)); @@ -578,11 +584,27 @@ REGISTER_CALCULATOR(InferenceCalculator); // Create runner tflite::gpu::InferenceOptions options; - options.priority1 = tflite::gpu::InferencePriority::MIN_LATENCY; + options.priority1 = allow_precision_loss_ + ? tflite::gpu::InferencePriority::MIN_LATENCY + : tflite::gpu::InferencePriority::MAX_PRECISION; options.priority2 = tflite::gpu::InferencePriority::AUTO; options.priority3 = tflite::gpu::InferencePriority::AUTO; options.usage = tflite::gpu::InferenceUsage::SUSTAINED_SPEED; tflite_gpu_runner_ = std::make_unique(options); + switch (tflite_gpu_runner_api_) { + case mediapipe::InferenceCalculatorOptions::Delegate::Gpu::OPENGL: { + tflite_gpu_runner_->ForceOpenGL(); + break; + } + case mediapipe::InferenceCalculatorOptions::Delegate::Gpu::OPENCL: { + tflite_gpu_runner_->ForceOpenCL(); + break; + } + case mediapipe::InferenceCalculatorOptions::Delegate::Gpu::ANY: { + // Do not need to force any specific API. + break; + } + } MP_RETURN_IF_ERROR( tflite_gpu_runner_->InitializeWithModel(model, op_resolver)); @@ -601,10 +623,10 @@ REGISTER_CALCULATOR(InferenceCalculator); MP_RETURN_IF_ERROR(tflite_gpu_runner_->Build()); #endif // MEDIAPIPE_TFLITE_GL_INFERENCE - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status InferenceCalculator::LoadModel(CalculatorContext* cc) { +mediapipe::Status InferenceCalculator::LoadModel(CalculatorContext* cc) { ASSIGN_OR_RETURN(model_packet_, GetModelAsPacket(*cc)); const auto& model = *model_packet_.Get(); tflite::ops::builtin::BuiltinOpResolver op_resolver; @@ -634,37 +656,30 @@ REGISTER_CALCULATOR(InferenceCalculator); CHECK(interpreter_->tensor(interpreter_->inputs()[0])->quantization.type != kTfLiteAffineQuantization); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::StatusOr InferenceCalculator::GetModelAsPacket( +mediapipe::StatusOr InferenceCalculator::GetModelAsPacket( const CalculatorContext& cc) { const auto& options = cc.Options(); if (!options.model_path().empty()) { - std::string model_path = options.model_path(); - - ASSIGN_OR_RETURN(model_path, mediapipe::PathToResourceAsFile(model_path)); - - auto model = tflite::FlatBufferModel::BuildFromFile(model_path.c_str()); - RET_CHECK(model) << "Failed to load model from path."; - return MakePacket(TfLiteModelPtr( - model.release(), [](tflite::FlatBufferModel* model) { delete model; })); + return TfLiteModelLoader::LoadFromPath(options.model_path()); } if (cc.InputSidePackets().HasTag("MODEL")) { return cc.InputSidePackets().Tag("MODEL"); } - return ::mediapipe::Status( - ::mediapipe::StatusCode::kNotFound, + return mediapipe::Status( + mediapipe::StatusCode::kNotFound, "Must specify TFLite model as path or loaded model."); } -::mediapipe::Status InferenceCalculator::LoadDelegate(CalculatorContext* cc) { +mediapipe::Status InferenceCalculator::LoadDelegate(CalculatorContext* cc) { const auto& calculator_opts = cc->Options(); if (calculator_opts.has_delegate() && calculator_opts.delegate().has_tflite()) { // Default tflite inference requeqsted - no need to modify graph. - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } if (!use_gpu_delegate_) { @@ -683,7 +698,7 @@ REGISTER_CALCULATOR(InferenceCalculator); }); RET_CHECK_EQ(interpreter_->ModifyGraphWithDelegate(delegate_.get()), kTfLiteOk); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } #endif // MEDIAPIPE_ANDROID @@ -702,11 +717,12 @@ REGISTER_CALCULATOR(InferenceCalculator); &TfLiteXNNPackDelegateDelete); RET_CHECK_EQ(interpreter_->ModifyGraphWithDelegate(delegate_.get()), kTfLiteOk); + return mediapipe::OkStatus(); } #endif // !EDGETPU // Return, no need for GPU delegate below. - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } else { #if MEDIAPIPE_TFLITE_GL_INFERENCE // Configure and create the delegate. @@ -760,7 +776,7 @@ REGISTER_CALCULATOR(InferenceCalculator); // Configure and create the delegate. TFLGpuDelegateOptions options; options.allow_precision_loss = true; - options.wait_type = TFLGpuDelegateWaitType::TFLGpuDelegateWaitTypePassive; + options.wait_type = TFLGpuDelegateWaitType::TFLGpuDelegateWaitTypeDoNotWait; delegate_ = TfLiteDelegatePtr(TFLGpuDelegateCreate(&options), &TFLGpuDelegateDelete); RET_CHECK_EQ(interpreter_->ModifyGraphWithDelegate(delegate_.get()), @@ -826,7 +842,7 @@ REGISTER_CALCULATOR(InferenceCalculator); #endif // MEDIAPIPE_TFLITE_GL_INFERENCE } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/tensor/tensor_converter_calculator.cc b/mediapipe/calculators/tensor/tensor_converter_calculator.cc index 9ac246d5a..4da199c7a 100644 --- a/mediapipe/calculators/tensor/tensor_converter_calculator.cc +++ b/mediapipe/calculators/tensor/tensor_converter_calculator.cc @@ -100,22 +100,21 @@ namespace mediapipe { class TensorConverterCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; - ::mediapipe::Status Close(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Close(CalculatorContext* cc) override; private: - ::mediapipe::Status InitGpu(CalculatorContext* cc); - ::mediapipe::Status LoadOptions(CalculatorContext* cc); + mediapipe::Status InitGpu(CalculatorContext* cc); + mediapipe::Status LoadOptions(CalculatorContext* cc); template - ::mediapipe::Status NormalizeImage(const ImageFrame& image_frame, - bool flip_vertically, float* tensor_ptr); - ::mediapipe::Status CopyMatrixToTensor(const Matrix& matrix, - float* tensor_ptr); - ::mediapipe::Status ProcessCPU(CalculatorContext* cc); - ::mediapipe::Status ProcessGPU(CalculatorContext* cc); + mediapipe::Status NormalizeImage(const ImageFrame& image_frame, + bool flip_vertically, float* tensor_ptr); + mediapipe::Status CopyMatrixToTensor(const Matrix& matrix, float* tensor_ptr); + mediapipe::Status ProcessCPU(CalculatorContext* cc); + mediapipe::Status ProcessGPU(CalculatorContext* cc); #if MEDIAPIPE_METAL_ENABLED MPPMetalHelper* gpu_helper_ = nullptr; @@ -140,7 +139,7 @@ class TensorConverterCalculator : public CalculatorBase { }; REGISTER_CALCULATOR(TensorConverterCalculator); -::mediapipe::Status TensorConverterCalculator::GetContract( +mediapipe::Status TensorConverterCalculator::GetContract( CalculatorContract* cc) { // Confirm only one of the input streams is present. RET_CHECK(static_cast(cc->Inputs().HasTag(kImageFrameTag)) + @@ -168,10 +167,10 @@ REGISTER_CALCULATOR(TensorConverterCalculator); RET_CHECK(cc->Outputs().HasTag(kTensorsTag)); cc->Outputs().Tag(kTensorsTag).Set>(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TensorConverterCalculator::Open(CalculatorContext* cc) { +mediapipe::Status TensorConverterCalculator::Open(CalculatorContext* cc) { cc->SetOffset(TimestampDiff(0)); MP_RETURN_IF_ERROR(LoadOptions(cc)); @@ -188,13 +187,13 @@ REGISTER_CALCULATOR(TensorConverterCalculator); } #endif // !MEDIAPIPE_DISABLE_GPU - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TensorConverterCalculator::Process(CalculatorContext* cc) { +mediapipe::Status TensorConverterCalculator::Process(CalculatorContext* cc) { if (use_gpu_) { if (cc->Inputs().Tag(kGpuBufferTag).IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Convert to GPU tensors type. MP_RETURN_IF_ERROR(ProcessGPU(cc)); @@ -202,10 +201,10 @@ REGISTER_CALCULATOR(TensorConverterCalculator); // Convert to CPU tensors or Matrix type. MP_RETURN_IF_ERROR(ProcessCPU(cc)); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TensorConverterCalculator::Close(CalculatorContext* cc) { +mediapipe::Status TensorConverterCalculator::Close(CalculatorContext* cc) { #if !MEDIAPIPE_DISABLE_GPU if (use_gpu_) { #if MEDIAPIPE_METAL_ENABLED @@ -222,15 +221,14 @@ REGISTER_CALCULATOR(TensorConverterCalculator); #endif // MEDIAPIPE_METAL_ENABLED } #endif // !MEDIAPIPE_DISABLE_GPU - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TensorConverterCalculator::ProcessCPU( - CalculatorContext* cc) { +mediapipe::Status TensorConverterCalculator::ProcessCPU(CalculatorContext* cc) { auto output_tensors = absl::make_unique>(); if (cc->Inputs().HasTag(kImageFrameTag)) { if (cc->Inputs().Tag(kImageFrameTag).IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } const auto& image_frame = cc->Inputs().Tag(kImageFrameTag).Get(); @@ -259,12 +257,12 @@ REGISTER_CALCULATOR(TensorConverterCalculator); MP_RETURN_IF_ERROR(NormalizeImage(image_frame, flip_vertically_, cpu_view.buffer())); } else { - return ::mediapipe::InternalError( + return mediapipe::InternalError( "Only byte-based (8 bit) and float (32 bit) images supported."); } } else if (cc->Inputs().HasTag(kMatrixTag)) { if (cc->Inputs().Tag(kMatrixTag).IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } const auto& matrix = cc->Inputs().Tag(kMatrixTag).Get(); const int height = matrix.rows(); @@ -275,17 +273,16 @@ REGISTER_CALCULATOR(TensorConverterCalculator); MP_RETURN_IF_ERROR(CopyMatrixToTensor( matrix, output_tensors->back().GetCpuWriteView().buffer())); } else { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } cc->Outputs() .Tag(kTensorsTag) .Add(output_tensors.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TensorConverterCalculator::ProcessGPU( - CalculatorContext* cc) { +mediapipe::Status TensorConverterCalculator::ProcessGPU(CalculatorContext* cc) { #if !MEDIAPIPE_DISABLE_GPU if (!initialized_) { MP_RETURN_IF_ERROR(InitGpu(cc)); @@ -321,7 +318,7 @@ REGISTER_CALCULATOR(TensorConverterCalculator); [command_buffer commit]; #elif MEDIAPIPE_OPENGL_ES_VERSION >= MEDIAPIPE_OPENGL_ES_30 MP_RETURN_IF_ERROR(gpu_helper_.RunInGlContext( - [this, &output_tensors, &input]() -> ::mediapipe::Status { + [this, &output_tensors, &input]() -> mediapipe::Status { auto src = gpu_helper_.CreateSourceTexture(input); #if MEDIAPIPE_OPENGL_ES_VERSION >= MEDIAPIPE_OPENGL_ES_31 // Convert GL texture into SSBO. @@ -364,7 +361,7 @@ REGISTER_CALCULATOR(TensorConverterCalculator); glBindTexture(GL_TEXTURE_2D, 0); #endif // MEDIAPIPE_OPENGL_ES_VERSION >= MEDIAPIPE_OPENGL_ES_31 src.Release(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); #endif // MEDIAPIPE_METAL_ENABLED cc->Outputs() @@ -374,10 +371,10 @@ REGISTER_CALCULATOR(TensorConverterCalculator); RET_CHECK_FAIL() << "GPU processing is not enabled."; #endif // !MEDIAPIPE_DISABLE_GPU - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TensorConverterCalculator::InitGpu(CalculatorContext* cc) { +mediapipe::Status TensorConverterCalculator::InitGpu(CalculatorContext* cc) { #if !MEDIAPIPE_DISABLE_GPU // Get input image sizes. const auto& input = @@ -451,7 +448,7 @@ REGISTER_CALCULATOR(TensorConverterCalculator); &input, #endif // MEDIAPIPE_OPENGL_ES_VERSION >= MEDIAPIPE_OPENGL_ES_31 &single_channel]() - -> ::mediapipe::Status { + -> mediapipe::Status { #if MEDIAPIPE_OPENGL_ES_VERSION >= MEDIAPIPE_OPENGL_ES_31 // Shader to convert GL Texture to Shader Storage Buffer Object (SSBO), // with normalization to either: [0,1] or [-1,1]. @@ -561,14 +558,14 @@ REGISTER_CALCULATOR(TensorConverterCalculator); glGenFramebuffers(1, &framebuffer_); #endif // MEDIAPIPE_OPENGL_ES_VERSION >= MEDIAPIPE_OPENGL_ES_31 - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); #endif // MEDIAPIPE_OPENGL_ES_VERSION >= MEDIAPIPE_OPENGL_ES_30 #endif // !MEDIAPIPE_DISABLE_GPU - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TensorConverterCalculator::LoadOptions( +mediapipe::Status TensorConverterCalculator::LoadOptions( CalculatorContext* cc) { // Get calculator options specified in the graph. const auto& options = @@ -607,11 +604,11 @@ REGISTER_CALCULATOR(TensorConverterCalculator); CHECK_GE(max_num_channels_, 1); CHECK_LE(max_num_channels_, 4); CHECK_NE(max_num_channels_, 2); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } template -::mediapipe::Status TensorConverterCalculator::NormalizeImage( +mediapipe::Status TensorConverterCalculator::NormalizeImage( const ImageFrame& image_frame, bool flip_vertically, float* tensor_ptr) { const int height = image_frame.Height(); const int width = image_frame.Width(); @@ -655,10 +652,10 @@ template } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TensorConverterCalculator::CopyMatrixToTensor( +mediapipe::Status TensorConverterCalculator::CopyMatrixToTensor( const Matrix& matrix, float* tensor_ptr) { if (row_major_matrix_) { auto matrix_map = @@ -670,7 +667,7 @@ template matrix_map = matrix; } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/tensor/tensor_converter_calculator_test.cc b/mediapipe/calculators/tensor/tensor_converter_calculator_test.cc index eccd8c73f..69eb7df77 100644 --- a/mediapipe/calculators/tensor/tensor_converter_calculator_test.cc +++ b/mediapipe/calculators/tensor/tensor_converter_calculator_test.cc @@ -84,7 +84,7 @@ TEST_F(TensorConverterCalculatorTest, RandomMatrixColMajor) { // Run the calculator and verify that one output is generated. CalculatorGraphConfig graph_config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: "matrix" node { calculator: "TensorConverterCalculator" @@ -146,7 +146,7 @@ TEST_F(TensorConverterCalculatorTest, RandomMatrixRowMajor) { // Run the calculator and verify that one output is generated. CalculatorGraphConfig graph_config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: "matrix" node { calculator: "TensorConverterCalculator" @@ -205,7 +205,7 @@ TEST_F(TensorConverterCalculatorTest, CustomDivAndSub) { CalculatorGraph graph; // Run the calculator and verify that one output is generated. CalculatorGraphConfig graph_config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: "input_image" node { calculator: "TensorConverterCalculator" @@ -228,7 +228,7 @@ TEST_F(TensorConverterCalculatorTest, CustomDivAndSub) { MP_ASSERT_OK(graph.Initialize(graph_config)); MP_ASSERT_OK(graph.StartRun({})); auto input_image = absl::make_unique(ImageFormat::GRAY8, 1, 1); - cv::Mat mat = ::mediapipe::formats::MatView(input_image.get()); + cv::Mat mat = mediapipe::formats::MatView(input_image.get()); mat.at(0, 0) = 200; MP_ASSERT_OK(graph.AddPacketToInputStream( "input_image", Adopt(input_image.release()).At(Timestamp(0)))); @@ -259,7 +259,7 @@ TEST_F(TensorConverterCalculatorTest, SetOutputRange) { for (std::pair range : range_values) { CalculatorGraph graph; CalculatorGraphConfig graph_config = - ::mediapipe::ParseTextProtoOrDie( + mediapipe::ParseTextProtoOrDie( absl::Substitute(R"( input_stream: "input_image" node { @@ -285,7 +285,7 @@ TEST_F(TensorConverterCalculatorTest, SetOutputRange) { MP_ASSERT_OK(graph.Initialize(graph_config)); MP_ASSERT_OK(graph.StartRun({})); auto input_image = absl::make_unique(ImageFormat::GRAY8, 1, 1); - cv::Mat mat = ::mediapipe::formats::MatView(input_image.get()); + cv::Mat mat = mediapipe::formats::MatView(input_image.get()); mat.at(0, 0) = 200; MP_ASSERT_OK(graph.AddPacketToInputStream( "input_image", Adopt(input_image.release()).At(Timestamp(0)))); diff --git a/mediapipe/calculators/tensor/tensors_to_classification_calculator.cc b/mediapipe/calculators/tensor/tensors_to_classification_calculator.cc index 6dca95d8e..b9a72ac1d 100644 --- a/mediapipe/calculators/tensor/tensors_to_classification_calculator.cc +++ b/mediapipe/calculators/tensor/tensors_to_classification_calculator.cc @@ -59,11 +59,11 @@ namespace mediapipe { // } class TensorsToClassificationCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; - ::mediapipe::Status Close(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Close(CalculatorContext* cc) override; private: ::mediapipe::TensorsToClassificationCalculatorOptions options_; @@ -73,7 +73,7 @@ class TensorsToClassificationCalculator : public CalculatorBase { }; REGISTER_CALCULATOR(TensorsToClassificationCalculator); -::mediapipe::Status TensorsToClassificationCalculator::GetContract( +mediapipe::Status TensorsToClassificationCalculator::GetContract( CalculatorContract* cc) { RET_CHECK(!cc->Inputs().GetTags().empty()); RET_CHECK(!cc->Outputs().GetTags().empty()); @@ -86,10 +86,10 @@ REGISTER_CALCULATOR(TensorsToClassificationCalculator); cc->Outputs().Tag("CLASSIFICATIONS").Set(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TensorsToClassificationCalculator::Open( +mediapipe::Status TensorsToClassificationCalculator::Open( CalculatorContext* cc) { cc->SetOffset(TimestampDiff(0)); @@ -113,10 +113,10 @@ REGISTER_CALCULATOR(TensorsToClassificationCalculator); label_map_loaded_ = true; } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TensorsToClassificationCalculator::Process( +mediapipe::Status TensorsToClassificationCalculator::Process( CalculatorContext* cc) { const auto& input_tensors = cc->Inputs().Tag("TENSORS").Get>(); @@ -186,12 +186,12 @@ REGISTER_CALCULATOR(TensorsToClassificationCalculator); .Tag("CLASSIFICATIONS") .Add(classification_list.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TensorsToClassificationCalculator::Close( +mediapipe::Status TensorsToClassificationCalculator::Close( CalculatorContext* cc) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/tensor/tensors_to_classification_calculator_test.cc b/mediapipe/calculators/tensor/tensors_to_classification_calculator_test.cc index 8e2619429..8f4877dad 100644 --- a/mediapipe/calculators/tensor/tensors_to_classification_calculator_test.cc +++ b/mediapipe/calculators/tensor/tensors_to_classification_calculator_test.cc @@ -27,7 +27,7 @@ namespace mediapipe { -using ::mediapipe::ParseTextProtoOrDie; +using mediapipe::ParseTextProtoOrDie; using Node = ::mediapipe::CalculatorGraphConfig::Node; class TensorsToClassificationCalculatorTest : public ::testing::Test { diff --git a/mediapipe/calculators/tensor/tensors_to_detections_calculator.cc b/mediapipe/calculators/tensor/tensors_to_detections_calculator.cc index 39add5062..c095ea8bb 100644 --- a/mediapipe/calculators/tensor/tensors_to_detections_calculator.cc +++ b/mediapipe/calculators/tensor/tensors_to_detections_calculator.cc @@ -130,24 +130,24 @@ void ConvertAnchorsToRawValues(const std::vector& anchors, // } class TensorsToDetectionsCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; - ::mediapipe::Status Close(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Close(CalculatorContext* cc) override; private: - ::mediapipe::Status ProcessCPU(CalculatorContext* cc, - std::vector* output_detections); - ::mediapipe::Status ProcessGPU(CalculatorContext* cc, - std::vector* output_detections); + mediapipe::Status ProcessCPU(CalculatorContext* cc, + std::vector* output_detections); + mediapipe::Status ProcessGPU(CalculatorContext* cc, + std::vector* output_detections); - ::mediapipe::Status LoadOptions(CalculatorContext* cc); - ::mediapipe::Status GpuInit(CalculatorContext* cc); - ::mediapipe::Status DecodeBoxes(const float* raw_boxes, - const std::vector& anchors, - std::vector* boxes); - ::mediapipe::Status ConvertToDetections( + mediapipe::Status LoadOptions(CalculatorContext* cc); + mediapipe::Status GpuInit(CalculatorContext* cc); + mediapipe::Status DecodeBoxes(const float* raw_boxes, + const std::vector& anchors, + std::vector* boxes); + mediapipe::Status ConvertToDetections( const float* detection_boxes, const float* detection_scores, const int* detection_classes, std::vector* output_detections); Detection ConvertToDetection(float box_ymin, float box_xmin, float box_ymax, @@ -181,7 +181,7 @@ class TensorsToDetectionsCalculator : public CalculatorBase { }; REGISTER_CALCULATOR(TensorsToDetectionsCalculator); -::mediapipe::Status TensorsToDetectionsCalculator::GetContract( +mediapipe::Status TensorsToDetectionsCalculator::GetContract( CalculatorContract* cc) { RET_CHECK(cc->Inputs().HasTag(kTensorsTag)); cc->Inputs().Tag(kTensorsTag).Set>(); @@ -203,10 +203,10 @@ REGISTER_CALCULATOR(TensorsToDetectionsCalculator); #endif // !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TensorsToDetectionsCalculator::Open(CalculatorContext* cc) { +mediapipe::Status TensorsToDetectionsCalculator::Open(CalculatorContext* cc) { cc->SetOffset(TimestampDiff(0)); side_packet_anchors_ = cc->InputSidePackets().HasTag(kAnchorsTag); MP_RETURN_IF_ERROR(LoadOptions(cc)); @@ -221,13 +221,13 @@ REGISTER_CALCULATOR(TensorsToDetectionsCalculator); MP_RETURN_IF_ERROR(GpuInit(cc)); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TensorsToDetectionsCalculator::Process( +mediapipe::Status TensorsToDetectionsCalculator::Process( CalculatorContext* cc) { if (cc->Inputs().Tag(kTensorsTag).IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } auto output_detections = absl::make_unique>(); @@ -256,10 +256,10 @@ REGISTER_CALCULATOR(TensorsToDetectionsCalculator); .Tag(kDetectionsTag) .Add(output_detections.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TensorsToDetectionsCalculator::ProcessCPU( +mediapipe::Status TensorsToDetectionsCalculator::ProcessCPU( CalculatorContext* cc, std::vector* output_detections) { const auto& input_tensors = cc->Inputs().Tag(kTensorsTag).Get>(); @@ -299,7 +299,7 @@ REGISTER_CALCULATOR(TensorsToDetectionsCalculator); anchors_ = cc->InputSidePackets().Tag("ANCHORS").Get>(); } else { - return ::mediapipe::UnavailableError("No anchor data available."); + return mediapipe::UnavailableError("No anchor data available."); } anchors_init_ = true; } @@ -386,10 +386,10 @@ REGISTER_CALCULATOR(TensorsToDetectionsCalculator); detection_classes.data(), output_detections)); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TensorsToDetectionsCalculator::ProcessGPU( +mediapipe::Status TensorsToDetectionsCalculator::ProcessGPU( CalculatorContext* cc, std::vector* output_detections) { const auto& input_tensors = cc->Inputs().Tag(kTensorsTag).Get>(); @@ -398,7 +398,7 @@ REGISTER_CALCULATOR(TensorsToDetectionsCalculator); MP_RETURN_IF_ERROR(gpu_helper_.RunInGlContext([this, &input_tensors, &cc, &output_detections]() - -> ::mediapipe::Status { + -> mediapipe::Status { if (!anchors_init_) { if (side_packet_anchors_) { CHECK(!cc->InputSidePackets().Tag(kAnchorsTag).IsEmpty()); @@ -418,11 +418,11 @@ REGISTER_CALCULATOR(TensorsToDetectionsCalculator); } anchors_init_ = true; } - // Use the scope to release the writable buffers' views before requesting // the reading buffers' views. { // Decode boxes. + auto scored_boxes_view = scored_boxes_buffer_->GetOpenGlBufferWriteView(); auto decoded_boxes_view = decoded_boxes_buffer_->GetOpenGlBufferWriteView(); glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 0, decoded_boxes_view.name()); @@ -434,32 +434,33 @@ REGISTER_CALCULATOR(TensorsToDetectionsCalculator); glDispatchCompute(num_boxes_, 1, 1); // Score boxes. - auto scored_boxes_view = scored_boxes_buffer_->GetOpenGlBufferWriteView(); glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 0, scored_boxes_view.name()); auto input1_view = input_tensors[1].GetOpenGlBufferReadView(); glBindBufferBase(GL_SHADER_STORAGE_BUFFER, 1, input1_view.name()); glUseProgram(score_program_); glDispatchCompute(num_boxes_, 1, 1); } - - // TODO: b/138851969. Is it possible to output a float vector - // for score and an int vector for class so that we can avoid copying twice? - std::vector detection_scores(num_boxes_); - std::vector detection_classes(num_boxes_); - auto score_view = scored_boxes_buffer_->GetCpuReadView(); - auto score_class_id_pairs = score_view.buffer(); - for (int i = 0; i < num_boxes_; ++i) { - detection_scores[i] = score_class_id_pairs[i * 2]; - detection_classes[i] = static_cast(score_class_id_pairs[i * 2 + 1]); - } - auto boxes_view = decoded_boxes_buffer_->GetCpuReadView(); - auto boxes = boxes_view.buffer(); - MP_RETURN_IF_ERROR(ConvertToDetections(boxes, detection_scores.data(), - detection_classes.data(), - output_detections)); - - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); + + // TODO: b/138851969. Is it possible to output a float vector + // for score and an int vector for class so that we can avoid copying twice? + std::vector detection_scores(num_boxes_); + std::vector detection_classes(num_boxes_); + // The order of requesting of CpuViews must be the same as the order of + // requesting OpenGlViews above to avoid 'Potential mutex deadlock' message + // when compiled without '-c opt' option. + auto scored_boxes_view = scored_boxes_buffer_->GetCpuReadView(); + auto score_class_id_pairs = scored_boxes_view.buffer(); + for (int i = 0; i < num_boxes_; ++i) { + detection_scores[i] = score_class_id_pairs[i * 2]; + detection_classes[i] = static_cast(score_class_id_pairs[i * 2 + 1]); + } + auto decoded_boxes_view = decoded_boxes_buffer_->GetCpuReadView(); + auto boxes = decoded_boxes_view.buffer(); + MP_RETURN_IF_ERROR(ConvertToDetections(boxes, detection_scores.data(), + detection_classes.data(), + output_detections)); #elif MEDIAPIPE_METAL_ENABLED id device = gpu_helper_.mtlDevice; if (!anchors_init_) { @@ -497,6 +498,8 @@ REGISTER_CALCULATOR(TensorsToDetectionsCalculator); [command_buffer computeCommandEncoder]; [command_encoder setComputePipelineState:decode_program_]; { + auto scored_boxes_view = + scored_boxes_buffer_->GetMtlBufferWriteView(command_buffer); auto decoded_boxes_view = decoded_boxes_buffer_->GetMtlBufferWriteView(command_buffer); [command_encoder setBuffer:decoded_boxes_view.buffer() offset:0 atIndex:0]; @@ -511,8 +514,6 @@ REGISTER_CALCULATOR(TensorsToDetectionsCalculator); threadsPerThreadgroup:decode_threads_per_group]; [command_encoder setComputePipelineState:score_program_]; - auto scored_boxes_view = - scored_boxes_buffer_->GetMtlBufferWriteView(command_buffer); [command_encoder setBuffer:scored_boxes_view.buffer() offset:0 atIndex:0]; auto input1_view = input_tensors[1].GetMtlBufferReadView(command_buffer); [command_encoder setBuffer:input1_view.buffer() offset:0 atIndex:1]; @@ -545,11 +546,10 @@ REGISTER_CALCULATOR(TensorsToDetectionsCalculator); #else LOG(ERROR) << "GPU input on non-Android not supported yet."; #endif // !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TensorsToDetectionsCalculator::Close( - CalculatorContext* cc) { +mediapipe::Status TensorsToDetectionsCalculator::Close(CalculatorContext* cc) { #ifndef MEDIAPIPE_DISABLE_GL_COMPUTE gpu_helper_.RunInGlContext([this] { decoded_boxes_buffer_ = nullptr; @@ -566,10 +566,10 @@ REGISTER_CALCULATOR(TensorsToDetectionsCalculator); score_program_ = nil; #endif // !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TensorsToDetectionsCalculator::LoadOptions( +mediapipe::Status TensorsToDetectionsCalculator::LoadOptions( CalculatorContext* cc) { // Get calculator options specified in the graph. options_ = cc->Options<::mediapipe::TensorsToDetectionsCalculatorOptions>(); @@ -593,10 +593,10 @@ REGISTER_CALCULATOR(TensorsToDetectionsCalculator); ignore_classes_.insert(options_.ignore_classes(i)); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TensorsToDetectionsCalculator::DecodeBoxes( +mediapipe::Status TensorsToDetectionsCalculator::DecodeBoxes( const float* raw_boxes, const std::vector& anchors, std::vector* boxes) { for (int i = 0; i < num_boxes_; ++i) { @@ -657,10 +657,10 @@ REGISTER_CALCULATOR(TensorsToDetectionsCalculator); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TensorsToDetectionsCalculator::ConvertToDetections( +mediapipe::Status TensorsToDetectionsCalculator::ConvertToDetections( const float* detection_boxes, const float* detection_scores, const int* detection_classes, std::vector* output_detections) { for (int i = 0; i < num_boxes_; ++i) { @@ -697,7 +697,7 @@ REGISTER_CALCULATOR(TensorsToDetectionsCalculator); } output_detections->emplace_back(detection); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } Detection TensorsToDetectionsCalculator::ConvertToDetection( @@ -720,11 +720,10 @@ Detection TensorsToDetectionsCalculator::ConvertToDetection( return detection; } -::mediapipe::Status TensorsToDetectionsCalculator::GpuInit( +mediapipe::Status TensorsToDetectionsCalculator::GpuInit( CalculatorContext* cc) { #ifndef MEDIAPIPE_DISABLE_GL_COMPUTE - MP_RETURN_IF_ERROR(gpu_helper_.RunInGlContext([this]() - -> ::mediapipe::Status { + MP_RETURN_IF_ERROR(gpu_helper_.RunInGlContext([this]() -> mediapipe::Status { // A shader to decode detection boxes. const std::string decode_src = absl::Substitute( R"( #version 310 es @@ -937,7 +936,7 @@ void main() { scored_boxes_buffer_ = absl::make_unique( Tensor::ElementType::kFloat32, Tensor::Shape{1, num_boxes_ * 2}); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); #elif MEDIAPIPE_METAL_ENABLED @@ -1155,7 +1154,7 @@ kernel void scoreKernel( #endif // !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/tensor/tensors_to_floats_calculator.cc b/mediapipe/calculators/tensor/tensors_to_floats_calculator.cc index 74731ebb1..8cb56f264 100644 --- a/mediapipe/calculators/tensor/tensors_to_floats_calculator.cc +++ b/mediapipe/calculators/tensor/tensors_to_floats_calculator.cc @@ -12,12 +12,19 @@ // See the License for the specific language governing permissions and // limitations under the License. +#include "mediapipe/calculators/tensor/tensors_to_floats_calculator.pb.h" #include "mediapipe/framework/calculator_framework.h" #include "mediapipe/framework/formats/tensor.h" #include "mediapipe/framework/port/ret_check.h" namespace mediapipe { +namespace { + +inline float Sigmoid(float value) { return 1.0f / (1.0f + std::exp(-value)); } + +} // namespace + // A calculator for converting Tensors to to a float or a float vector. // // Input: @@ -38,15 +45,18 @@ namespace mediapipe { // } class TensorsToFloatsCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; + + private: + ::mediapipe::TensorsToFloatsCalculatorOptions options_; }; REGISTER_CALCULATOR(TensorsToFloatsCalculator); -::mediapipe::Status TensorsToFloatsCalculator::GetContract( +mediapipe::Status TensorsToFloatsCalculator::GetContract( CalculatorContract* cc) { RET_CHECK(cc->Inputs().HasTag("TENSORS")); RET_CHECK(cc->Outputs().HasTag("FLOATS") || cc->Outputs().HasTag("FLOAT")); @@ -59,16 +69,17 @@ REGISTER_CALCULATOR(TensorsToFloatsCalculator); cc->Outputs().Tag("FLOAT").Set(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TensorsToFloatsCalculator::Open(CalculatorContext* cc) { +mediapipe::Status TensorsToFloatsCalculator::Open(CalculatorContext* cc) { cc->SetOffset(TimestampDiff(0)); + options_ = cc->Options<::mediapipe::TensorsToFloatsCalculatorOptions>(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TensorsToFloatsCalculator::Process(CalculatorContext* cc) { +mediapipe::Status TensorsToFloatsCalculator::Process(CalculatorContext* cc) { RET_CHECK(!cc->Inputs().Tag("TENSORS").IsEmpty()); const auto& input_tensors = @@ -77,21 +88,30 @@ REGISTER_CALCULATOR(TensorsToFloatsCalculator); auto view = input_tensors[0].GetCpuReadView(); auto raw_floats = view.buffer(); int num_values = input_tensors[0].shape().num_elements(); + auto output_floats = absl::make_unique>( + raw_floats, raw_floats + num_values); + + switch (options_.activation()) { + case TensorsToFloatsCalculatorOptions::SIGMOID: + std::transform(output_floats->begin(), output_floats->end(), + output_floats->begin(), Sigmoid); + break; + case TensorsToFloatsCalculatorOptions::NONE: + break; + } if (cc->Outputs().HasTag("FLOAT")) { // TODO: Could add an index in the option to specifiy returning one // value of a float array. RET_CHECK_EQ(num_values, 1); cc->Outputs().Tag("FLOAT").AddPacket( - MakePacket(raw_floats[0]).At(cc->InputTimestamp())); + MakePacket(output_floats->at(0)).At(cc->InputTimestamp())); } if (cc->Outputs().HasTag("FLOATS")) { - auto output_floats = absl::make_unique>( - raw_floats, raw_floats + num_values); cc->Outputs().Tag("FLOATS").Add(output_floats.release(), cc->InputTimestamp()); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/tensor/tensors_to_floats_calculator.proto b/mediapipe/calculators/tensor/tensors_to_floats_calculator.proto new file mode 100644 index 000000000..694050190 --- /dev/null +++ b/mediapipe/calculators/tensor/tensors_to_floats_calculator.proto @@ -0,0 +1,33 @@ +// Copyright 2020 The MediaPipe Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The option proto for the TensorsToFloatsCalculator. + +syntax = "proto2"; + +package mediapipe; + +import "mediapipe/framework/calculator.proto"; + +message TensorsToFloatsCalculatorOptions { + extend .mediapipe.CalculatorOptions { + optional TensorsToFloatsCalculatorOptions ext = 343499115; + } + enum Activation { + NONE = 0; + SIGMOID = 1; + } + // Apply activation function to the floats. + optional Activation activation = 1 [default = NONE]; +} diff --git a/mediapipe/calculators/tensor/tensors_to_floats_calculator_test.cc b/mediapipe/calculators/tensor/tensors_to_floats_calculator_test.cc new file mode 100644 index 000000000..9a564f564 --- /dev/null +++ b/mediapipe/calculators/tensor/tensors_to_floats_calculator_test.cc @@ -0,0 +1,144 @@ +// Copyright 2020 The MediaPipe Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include "absl/memory/memory.h" +#include "mediapipe/calculators/tensor/tensors_to_floats_calculator.pb.h" +#include "mediapipe/framework/calculator.pb.h" +#include "mediapipe/framework/calculator_framework.h" +#include "mediapipe/framework/calculator_runner.h" +#include "mediapipe/framework/formats/tensor.h" +#include "mediapipe/framework/port/gtest.h" +#include "mediapipe/framework/port/parse_text_proto.h" +#include "mediapipe/framework/port/status_matchers.h" + +namespace mediapipe { + +using mediapipe::ParseTextProtoOrDie; +using Node = ::mediapipe::CalculatorGraphConfig::Node; + +const float kErrorMargin = 1e-2f; + +class TensorsToFloatsCalculatorTest : public ::testing::Test { + protected: + void BuildGraph(mediapipe::CalculatorRunner* runner, + const std::vector& values) { + auto tensors = absl::make_unique>(); + tensors->emplace_back( + Tensor::ElementType::kFloat32, + Tensor::Shape{1, 1, static_cast(values.size()), 1}); + auto view = tensors->back().GetCpuWriteView(); + float* tensor_buffer = view.buffer(); + ASSERT_NE(tensor_buffer, nullptr); + for (int i = 0; i < values.size(); ++i) { + tensor_buffer[i] = values[i]; + } + + int64 stream_timestamp = 0; + auto& input_stream_packets = + runner->MutableInputs()->Tag("TENSORS").packets; + + input_stream_packets.push_back( + mediapipe::Adopt(tensors.release()) + .At(mediapipe::Timestamp(stream_timestamp++))); + } +}; + +TEST_F(TensorsToFloatsCalculatorTest, SingleValue) { + mediapipe::CalculatorRunner runner(ParseTextProtoOrDie(R"( + calculator: "TensorsToFloatsCalculator" + input_stream: "TENSORS:tensors" + output_stream: "FLOAT:float" + )")); + + const float single_value = 0.5; + BuildGraph(&runner, {single_value}); + MP_ASSERT_OK(runner.Run()); + + const auto& output_packets_ = runner.Outputs().Tag("FLOAT").packets; + + EXPECT_EQ(1, output_packets_.size()); + + const auto& value = output_packets_[0].Get(); + EXPECT_EQ(single_value, value); +} + +TEST_F(TensorsToFloatsCalculatorTest, SingleValueAsVector) { + mediapipe::CalculatorRunner runner(ParseTextProtoOrDie(R"( + calculator: "TensorsToFloatsCalculator" + input_stream: "TENSORS:tensors" + output_stream: "FLOATS:floats" + )")); + + const float single_value = 0.5; + BuildGraph(&runner, {single_value}); + MP_ASSERT_OK(runner.Run()); + + const auto& output_packets_ = runner.Outputs().Tag("FLOATS").packets; + EXPECT_EQ(1, output_packets_.size()); + + const auto& values = output_packets_[0].Get>(); + EXPECT_EQ(1, values.size()); + EXPECT_EQ(single_value, values[0]); +} + +TEST_F(TensorsToFloatsCalculatorTest, FloatVector) { + mediapipe::CalculatorRunner runner(ParseTextProtoOrDie(R"( + calculator: "TensorsToFloatsCalculator" + input_stream: "TENSORS:tensors" + output_stream: "FLOATS:floats" + )")); + + const std::vector input_values = {0.f, 0.5f, 1.0f}; + BuildGraph(&runner, input_values); + MP_ASSERT_OK(runner.Run()); + + const auto& output_packets_ = runner.Outputs().Tag("FLOATS").packets; + EXPECT_EQ(1, output_packets_.size()); + + const auto& values = output_packets_[0].Get>(); + EXPECT_EQ(input_values.size(), values.size()); + for (int i = 0; i < values.size(); ++i) { + EXPECT_NEAR(values[i], input_values[i], kErrorMargin); + } +} + +TEST_F(TensorsToFloatsCalculatorTest, FloatVectorWithSigmoid) { + mediapipe::CalculatorRunner runner(ParseTextProtoOrDie(R"( + calculator: "TensorsToFloatsCalculator" + input_stream: "TENSORS:tensors" + output_stream: "FLOATS:floats" + options { + [mediapipe.TensorsToFloatsCalculatorOptions.ext] { activation: SIGMOID } + } + )")); + + const std::vector input_values = {-1.f, 0.f, 1.0f}; + const std::vector expected_output_with_sigmoid = {0.269f, 0.5f, + 0.731f}; + BuildGraph(&runner, input_values); + MP_ASSERT_OK(runner.Run()); + + const auto& output_packets_ = runner.Outputs().Tag("FLOATS").packets; + EXPECT_EQ(1, output_packets_.size()); + + const auto& values = output_packets_[0].Get>(); + EXPECT_EQ(expected_output_with_sigmoid.size(), values.size()); + for (int i = 0; i < values.size(); ++i) { + EXPECT_NEAR(values[i], expected_output_with_sigmoid[i], kErrorMargin); + } +} + +} // namespace mediapipe diff --git a/mediapipe/calculators/tensor/tensors_to_landmarks_calculator.cc b/mediapipe/calculators/tensor/tensors_to_landmarks_calculator.cc index 731e904ad..dc4d26a36 100644 --- a/mediapipe/calculators/tensor/tensors_to_landmarks_calculator.cc +++ b/mediapipe/calculators/tensor/tensors_to_landmarks_calculator.cc @@ -20,9 +20,29 @@ namespace mediapipe { +namespace { + +inline float Sigmoid(float value) { return 1.0f / (1.0f + std::exp(-value)); } + +float ApplyActivation( + ::mediapipe::TensorsToLandmarksCalculatorOptions::Activation activation, + float value) { + switch (activation) { + case ::mediapipe::TensorsToLandmarksCalculatorOptions::SIGMOID: + return Sigmoid(value); + break; + default: + return value; + } +} + +} // namespace + // A calculator for converting Tensors from regression models into landmarks. // Note that if the landmarks in the tensor has more than 5 dimensions, only the -// first 5 dimensions will be converted to [x,y,z, visibility, presence]. +// first 5 dimensions will be converted to [x,y,z, visibility, presence]. The +// latter two fields may also stay unset if such attributes are not supported in +// the model. // // Input: // TENSORS - Vector of Tensors of type kFloat32. Only the first tensor will be @@ -67,13 +87,13 @@ namespace mediapipe { // } class TensorsToLandmarksCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; private: - ::mediapipe::Status LoadOptions(CalculatorContext* cc); + mediapipe::Status LoadOptions(CalculatorContext* cc); int num_landmarks_ = 0; bool flip_vertically_ = false; bool flip_horizontally_ = false; @@ -82,7 +102,7 @@ class TensorsToLandmarksCalculator : public CalculatorBase { }; REGISTER_CALCULATOR(TensorsToLandmarksCalculator); -::mediapipe::Status TensorsToLandmarksCalculator::GetContract( +mediapipe::Status TensorsToLandmarksCalculator::GetContract( CalculatorContract* cc) { RET_CHECK(!cc->Inputs().GetTags().empty()); RET_CHECK(!cc->Outputs().GetTags().empty()); @@ -115,10 +135,10 @@ REGISTER_CALCULATOR(TensorsToLandmarksCalculator); cc->Outputs().Tag("NORM_LANDMARKS").Set(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TensorsToLandmarksCalculator::Open(CalculatorContext* cc) { +mediapipe::Status TensorsToLandmarksCalculator::Open(CalculatorContext* cc) { cc->SetOffset(TimestampDiff(0)); MP_RETURN_IF_ERROR(LoadOptions(cc)); @@ -148,11 +168,10 @@ REGISTER_CALCULATOR(TensorsToLandmarksCalculator); ? cc->InputSidePackets().Tag("FLIP_VERTICALLY").Get() : options_.flip_vertically(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TensorsToLandmarksCalculator::Process( - CalculatorContext* cc) { +mediapipe::Status TensorsToLandmarksCalculator::Process(CalculatorContext* cc) { // Override values if specified so. if (cc->Inputs().HasTag("FLIP_HORIZONTALLY") && !cc->Inputs().Tag("FLIP_HORIZONTALLY").IsEmpty()) { @@ -164,7 +183,7 @@ REGISTER_CALCULATOR(TensorsToLandmarksCalculator); } if (cc->Inputs().Tag("TENSORS").IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } const auto& input_tensors = @@ -200,10 +219,12 @@ REGISTER_CALCULATOR(TensorsToLandmarksCalculator); landmark->set_z(raw_landmarks[offset + 2]); } if (num_dimensions > 3) { - landmark->set_visibility(raw_landmarks[offset + 3]); + landmark->set_visibility(ApplyActivation(options_.visibility_activation(), + raw_landmarks[offset + 3])); } if (num_dimensions > 4) { - landmark->set_presence(raw_landmarks[offset + 4]); + landmark->set_presence(ApplyActivation(options_.presence_activation(), + raw_landmarks[offset + 4])); } } @@ -218,8 +239,12 @@ REGISTER_CALCULATOR(TensorsToLandmarksCalculator); // Scale Z coordinate as X + allow additional uniform normalization. norm_landmark->set_z(landmark.z() / options_.input_image_width() / options_.normalize_z()); - norm_landmark->set_visibility(landmark.visibility()); - norm_landmark->set_presence(landmark.presence()); + if (landmark.has_visibility()) { // Set only if supported in the model. + norm_landmark->set_visibility(landmark.visibility()); + } + if (landmark.has_presence()) { // Set only if supported in the model. + norm_landmark->set_presence(landmark.presence()); + } } cc->Outputs() .Tag("NORM_LANDMARKS") @@ -235,16 +260,16 @@ REGISTER_CALCULATOR(TensorsToLandmarksCalculator); .At(cc->InputTimestamp())); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TensorsToLandmarksCalculator::LoadOptions( +mediapipe::Status TensorsToLandmarksCalculator::LoadOptions( CalculatorContext* cc) { // Get calculator options specified in the graph. options_ = cc->Options<::mediapipe::TensorsToLandmarksCalculatorOptions>(); RET_CHECK(options_.has_num_landmarks()); num_landmarks_ = options_.num_landmarks(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/tensor/tensors_to_landmarks_calculator.proto b/mediapipe/calculators/tensor/tensors_to_landmarks_calculator.proto index c321fe8d1..2608a1459 100644 --- a/mediapipe/calculators/tensor/tensors_to_landmarks_calculator.proto +++ b/mediapipe/calculators/tensor/tensors_to_landmarks_calculator.proto @@ -25,6 +25,11 @@ message TensorsToLandmarksCalculatorOptions { optional TensorsToLandmarksCalculatorOptions ext = 335742640; } + enum Activation { + NONE = 0; + SIGMOID = 1; + } + // [Required] Number of landmarks from the output of the model. optional int32 num_landmarks = 1; @@ -51,4 +56,10 @@ message TensorsToLandmarksCalculatorOptions { // when normalized landmarks are needed. It is applied in addition to Z // coordinate being re-scaled as X. optional float normalize_z = 5 [default = 1.0]; + + // Apply activation function to the tensor representing landmark visibility. + optional Activation visibility_activation = 7 [default = NONE]; + + // Apply activation function to the tensor representing landmark presence. + optional Activation presence_activation = 8 [default = NONE]; } diff --git a/mediapipe/calculators/tensor/testdata/image_to_tensor/large_sub_rect_border_zero.png b/mediapipe/calculators/tensor/testdata/image_to_tensor/large_sub_rect_border_zero.png new file mode 100644 index 000000000..1a738a50d Binary files /dev/null and b/mediapipe/calculators/tensor/testdata/image_to_tensor/large_sub_rect_border_zero.png differ diff --git a/mediapipe/calculators/tensor/testdata/image_to_tensor/large_sub_rect_keep_aspect_border_zero.png b/mediapipe/calculators/tensor/testdata/image_to_tensor/large_sub_rect_keep_aspect_border_zero.png new file mode 100644 index 000000000..5b096cb4d Binary files /dev/null and b/mediapipe/calculators/tensor/testdata/image_to_tensor/large_sub_rect_keep_aspect_border_zero.png differ diff --git a/mediapipe/calculators/tensor/testdata/image_to_tensor/large_sub_rect_keep_aspect_with_rotation_border_zero.png b/mediapipe/calculators/tensor/testdata/image_to_tensor/large_sub_rect_keep_aspect_with_rotation_border_zero.png new file mode 100644 index 000000000..c5512ec0d Binary files /dev/null and b/mediapipe/calculators/tensor/testdata/image_to_tensor/large_sub_rect_keep_aspect_with_rotation_border_zero.png differ diff --git a/mediapipe/calculators/tensor/testdata/image_to_tensor/medium_sub_rect_keep_aspect_border_zero.png b/mediapipe/calculators/tensor/testdata/image_to_tensor/medium_sub_rect_keep_aspect_border_zero.png new file mode 100644 index 000000000..bfb461546 Binary files /dev/null and b/mediapipe/calculators/tensor/testdata/image_to_tensor/medium_sub_rect_keep_aspect_border_zero.png differ diff --git a/mediapipe/calculators/tensor/testdata/image_to_tensor/medium_sub_rect_keep_aspect_with_rotation_border_zero.png b/mediapipe/calculators/tensor/testdata/image_to_tensor/medium_sub_rect_keep_aspect_with_rotation_border_zero.png new file mode 100644 index 000000000..ab14e5954 Binary files /dev/null and b/mediapipe/calculators/tensor/testdata/image_to_tensor/medium_sub_rect_keep_aspect_with_rotation_border_zero.png differ diff --git a/mediapipe/calculators/tensor/testdata/image_to_tensor/medium_sub_rect_with_rotation_border_zero.png b/mediapipe/calculators/tensor/testdata/image_to_tensor/medium_sub_rect_with_rotation_border_zero.png new file mode 100644 index 000000000..d55301146 Binary files /dev/null and b/mediapipe/calculators/tensor/testdata/image_to_tensor/medium_sub_rect_with_rotation_border_zero.png differ diff --git a/mediapipe/calculators/tensorflow/graph_tensors_packet_generator.cc b/mediapipe/calculators/tensorflow/graph_tensors_packet_generator.cc index 54126cf1d..5d449f037 100644 --- a/mediapipe/calculators/tensorflow/graph_tensors_packet_generator.cc +++ b/mediapipe/calculators/tensorflow/graph_tensors_packet_generator.cc @@ -33,7 +33,7 @@ namespace tf = ::tensorflow; class GraphTensorsPacketGenerator : public PacketGenerator { public: - static ::mediapipe::Status FillExpectations( + static mediapipe::Status FillExpectations( const PacketGeneratorOptions& extendable_options, PacketTypeSet* input_side_packets, PacketTypeSet* output_side_packets) { RET_CHECK(extendable_options.HasExtension( @@ -45,10 +45,10 @@ class GraphTensorsPacketGenerator : public PacketGenerator { /* "A map of tensor tags and tensors" */); RET_CHECK_EQ(options.tensor_tag_size(), options.tensor_num_nodes_size()); RET_CHECK_GT(options.tensor_tag_size(), 0); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - static ::mediapipe::Status Generate( + static mediapipe::Status Generate( const PacketGeneratorOptions& packet_generator_options, const PacketSet& input_side_packets, PacketSet* output_side_packets) { const GraphTensorsPacketGeneratorOptions& options = @@ -65,7 +65,7 @@ class GraphTensorsPacketGenerator : public PacketGenerator { (*tensor_map)[tensor_tag].flat().setZero(); } output_side_packets->Index(0) = AdoptAsUniquePtr(tensor_map.release()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_PACKET_GENERATOR(GraphTensorsPacketGenerator); diff --git a/mediapipe/calculators/tensorflow/graph_tensors_packet_generator_test.cc b/mediapipe/calculators/tensorflow/graph_tensors_packet_generator_test.cc index 77069c658..829994e3c 100644 --- a/mediapipe/calculators/tensorflow/graph_tensors_packet_generator_test.cc +++ b/mediapipe/calculators/tensorflow/graph_tensors_packet_generator_test.cc @@ -72,7 +72,7 @@ TEST_F(GraphTensorsPacketGeneratorTest, VerifyTensorSizeShapeAndValue) { PacketSet inputs({}); PacketSet outputs(1); - ::mediapipe::Status run_status = tool::RunGenerateAndValidateTypes( + mediapipe::Status run_status = tool::RunGenerateAndValidateTypes( "GraphTensorsPacketGenerator", extendable_options_, inputs, &outputs); MP_EXPECT_OK(run_status) << run_status.message(); VerifyTensorMap(&outputs); diff --git a/mediapipe/calculators/tensorflow/image_frame_to_tensor_calculator.cc b/mediapipe/calculators/tensorflow/image_frame_to_tensor_calculator.cc index fd109a3bd..e5c0601e5 100644 --- a/mediapipe/calculators/tensorflow/image_frame_to_tensor_calculator.cc +++ b/mediapipe/calculators/tensorflow/image_frame_to_tensor_calculator.cc @@ -78,17 +78,17 @@ std::unique_ptr ImageFrameToNormalizedTensor( // } class ImageFrameToTensorCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; private: ImageFrameToTensorCalculatorOptions options_; }; REGISTER_CALCULATOR(ImageFrameToTensorCalculator); -::mediapipe::Status ImageFrameToTensorCalculator::GetContract( +mediapipe::Status ImageFrameToTensorCalculator::GetContract( CalculatorContract* cc) { // Start with only one input packet. RET_CHECK_EQ(cc->Inputs().NumEntries(), 1) @@ -101,19 +101,18 @@ REGISTER_CALCULATOR(ImageFrameToTensorCalculator); cc->Outputs().Index(0).Set( // Output TensorFlow Tensor. ); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ImageFrameToTensorCalculator::Open(CalculatorContext* cc) { +mediapipe::Status ImageFrameToTensorCalculator::Open(CalculatorContext* cc) { options_ = cc->Options(); // Inform the framework that we always output at the same timestamp // as we receive a packet at. cc->SetOffset(TimestampDiff(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ImageFrameToTensorCalculator::Process( - CalculatorContext* cc) { +mediapipe::Status ImageFrameToTensorCalculator::Process(CalculatorContext* cc) { const Packet& input_item = cc->Inputs().Index(0).Value(); RET_CHECK(!input_item.IsEmpty()) << "Input cannot be empty."; @@ -147,7 +146,7 @@ REGISTER_CALCULATOR(ImageFrameToTensorCalculator); } else if (bytes_per_pixel == 4) { data_type = tf::DT_FLOAT; } else { - return ::mediapipe::InvalidArgumentError(absl::StrCat( + return mediapipe::InvalidArgumentError(absl::StrCat( "Unsupported image format (", bytes_per_pixel, " bytes per pixel)")); } @@ -174,7 +173,7 @@ REGISTER_CALCULATOR(ImageFrameToTensorCalculator); } cc->Outputs().Index(0).Add(tensor.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/tensorflow/lapped_tensor_buffer_calculator.cc b/mediapipe/calculators/tensorflow/lapped_tensor_buffer_calculator.cc index 68e58ebac..7332439ef 100644 --- a/mediapipe/calculators/tensorflow/lapped_tensor_buffer_calculator.cc +++ b/mediapipe/calculators/tensorflow/lapped_tensor_buffer_calculator.cc @@ -84,18 +84,18 @@ namespace tf = tensorflow; class LappedTensorBufferCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; - ::mediapipe::Status Close(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Close(CalculatorContext* cc) override; private: // Adds a batch dimension to the input tensor if specified in the // calculator options. - ::mediapipe::Status AddBatchDimension(tf::Tensor* input_tensor); + mediapipe::Status AddBatchDimension(tf::Tensor* input_tensor); // Sends the current buffer downstream. - ::mediapipe::Status ProcessBuffer(CalculatorContext* cc); + mediapipe::Status ProcessBuffer(CalculatorContext* cc); int steps_until_output_; int buffer_size_; @@ -110,7 +110,7 @@ class LappedTensorBufferCalculator : public CalculatorBase { REGISTER_CALCULATOR(LappedTensorBufferCalculator); -::mediapipe::Status LappedTensorBufferCalculator::GetContract( +mediapipe::Status LappedTensorBufferCalculator::GetContract( CalculatorContract* cc) { RET_CHECK_EQ(cc->Inputs().NumEntries(), 1) << "Only one input stream is supported."; @@ -132,7 +132,7 @@ REGISTER_CALCULATOR(LappedTensorBufferCalculator); if (cc->InputSidePackets().HasTag(kCalculatorOptions)) { cc->InputSidePackets() .Tag(kCalculatorOptions) - .Set(); + .Set(); } cc->Outputs().Index(0).Set( // Output tensorflow::Tensor stream with possibly overlapping steps. @@ -141,10 +141,10 @@ REGISTER_CALCULATOR(LappedTensorBufferCalculator); if (cc->Outputs().NumEntries() > 1) { cc->Outputs().Index(1).Set>(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status LappedTensorBufferCalculator::Open(CalculatorContext* cc) { +mediapipe::Status LappedTensorBufferCalculator::Open(CalculatorContext* cc) { options_ = cc->Options(); if (cc->InputSidePackets().HasTag(kCalculatorOptions)) { options_ = cc->InputSidePackets() @@ -176,11 +176,10 @@ REGISTER_CALCULATOR(LappedTensorBufferCalculator); buffer_ = absl::make_unique>(buffer_size_); steps_until_output_ = buffer_size_ - options_.padding(); initialized_ = false; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status LappedTensorBufferCalculator::Process( - CalculatorContext* cc) { +mediapipe::Status LappedTensorBufferCalculator::Process(CalculatorContext* cc) { // These are cheap, shallow copies. tensorflow::Tensor input_tensor( cc->Inputs().Index(0).Get()); @@ -202,12 +201,12 @@ REGISTER_CALCULATOR(LappedTensorBufferCalculator); MP_RETURN_IF_ERROR(ProcessBuffer(cc)); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status LappedTensorBufferCalculator::Close(CalculatorContext* cc) { +mediapipe::Status LappedTensorBufferCalculator::Close(CalculatorContext* cc) { if (!initialized_ || options_.padding() == 0) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } int last_frame = buffer_size_ - steps_until_output_ - 1; const auto& pad_frame = buffer_->Get(last_frame); @@ -217,12 +216,12 @@ REGISTER_CALCULATOR(LappedTensorBufferCalculator); } MP_RETURN_IF_ERROR(ProcessBuffer(cc)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Adds a batch dimension to the input tensor if specified in the calculator // options. -::mediapipe::Status LappedTensorBufferCalculator::AddBatchDimension( +mediapipe::Status LappedTensorBufferCalculator::AddBatchDimension( tf::Tensor* input_tensor) { if (options_.add_batch_dim_to_tensors()) { tf::TensorShape new_shape(input_tensor->shape()); @@ -231,11 +230,11 @@ REGISTER_CALCULATOR(LappedTensorBufferCalculator); << "Could not add 0th dimension to tensor without changing its shape." << " Current shape: " << input_tensor->shape().DebugString(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Process buffer -::mediapipe::Status LappedTensorBufferCalculator::ProcessBuffer( +mediapipe::Status LappedTensorBufferCalculator::ProcessBuffer( CalculatorContext* cc) { auto concatenated = ::absl::make_unique(); const tf::Status concat_status = tf::tensor::Concat( @@ -256,7 +255,7 @@ REGISTER_CALCULATOR(LappedTensorBufferCalculator); timestamp_buffer_->Get(timestamp_offset_)); } steps_until_output_ = buffer_size_ - overlap_; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/tensorflow/matrix_to_tensor_calculator.cc b/mediapipe/calculators/tensorflow/matrix_to_tensor_calculator.cc index ca704b793..20e2883b4 100644 --- a/mediapipe/calculators/tensorflow/matrix_to_tensor_calculator.cc +++ b/mediapipe/calculators/tensorflow/matrix_to_tensor_calculator.cc @@ -26,20 +26,19 @@ namespace mediapipe { namespace { -::mediapipe::Status FillTimeSeriesHeaderIfValid(const Packet& header_packet, - TimeSeriesHeader* header) { +mediapipe::Status FillTimeSeriesHeaderIfValid(const Packet& header_packet, + TimeSeriesHeader* header) { CHECK(header); if (header_packet.IsEmpty()) { - return ::mediapipe::UnknownError("No header found."); + return mediapipe::UnknownError("No header found."); } if (!header_packet.ValidateAsType().ok()) { - return ::mediapipe::UnknownError( - "Packet does not contain TimeSeriesHeader."); + return mediapipe::UnknownError("Packet does not contain TimeSeriesHeader."); } *header = header_packet.Get(); if (header->has_sample_rate() && header->sample_rate() >= 0 && header->has_num_channels() && header->num_channels() >= 0) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } else { std::string error_message = "TimeSeriesHeader is missing necessary fields: " @@ -48,7 +47,7 @@ namespace { absl::StrAppend(&error_message, "Got header:\n", header->ShortDebugString()); #endif - return ::mediapipe::InvalidArgumentError(error_message); + return mediapipe::InvalidArgumentError(error_message); } } } // namespace @@ -78,17 +77,17 @@ typedef Eigen::Matrix // } class MatrixToTensorCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; private: MatrixToTensorCalculatorOptions options_; }; REGISTER_CALCULATOR(MatrixToTensorCalculator); -::mediapipe::Status MatrixToTensorCalculator::GetContract( +mediapipe::Status MatrixToTensorCalculator::GetContract( CalculatorContract* cc) { RET_CHECK_EQ(cc->Inputs().NumEntries(), 1) << "Only one input stream is supported."; @@ -102,15 +101,15 @@ REGISTER_CALCULATOR(MatrixToTensorCalculator); // TimeSeriesHeader as the input (or no header if the input has no // header). ); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status MatrixToTensorCalculator::Open(CalculatorContext* cc) { +mediapipe::Status MatrixToTensorCalculator::Open(CalculatorContext* cc) { // If the input is part of a time series, then preserve the header so that // downstream consumers can access the sample rate if needed. options_ = cc->Options(); auto input_header = ::absl::make_unique(); - const ::mediapipe::Status header_status = FillTimeSeriesHeaderIfValid( + const mediapipe::Status header_status = FillTimeSeriesHeaderIfValid( cc->Inputs().Index(0).Header(), input_header.get()); if (header_status.ok()) { cc->Outputs().Index(0).SetHeader(Adopt(input_header.release())); @@ -119,10 +118,10 @@ REGISTER_CALCULATOR(MatrixToTensorCalculator); // Inform the framework that we always output at the same timestamp // as we receive a packet at. cc->SetOffset(TimestampDiff(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status MatrixToTensorCalculator::Process(CalculatorContext* cc) { +mediapipe::Status MatrixToTensorCalculator::Process(CalculatorContext* cc) { const Matrix& matrix = cc->Inputs().Index(0).Get(); tf::TensorShape tensor_shape; if (options_.transpose()) { @@ -151,7 +150,7 @@ REGISTER_CALCULATOR(MatrixToTensorCalculator); << " Current shape: " << tensor->shape().DebugString(); } cc->Outputs().Index(0).Add(tensor.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/tensorflow/object_detection_tensors_to_detections_calculator.cc b/mediapipe/calculators/tensorflow/object_detection_tensors_to_detections_calculator.cc index fa4fd1035..f108d8c40 100644 --- a/mediapipe/calculators/tensorflow/object_detection_tensors_to_detections_calculator.cc +++ b/mediapipe/calculators/tensorflow/object_detection_tensors_to_detections_calculator.cc @@ -93,7 +93,7 @@ class ObjectDetectionTensorsToDetectionsCalculator : public CalculatorBase { public: ObjectDetectionTensorsToDetectionsCalculator() = default; - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Tag(kBoxes).Set(); cc->Inputs().Tag(kScores).Set(); @@ -114,7 +114,7 @@ class ObjectDetectionTensorsToDetectionsCalculator : public CalculatorBase { cc->Options(); float mask_threshold = calculator_options.mask_threshold(); if (!(mask_threshold >= 0.0 && mask_threshold <= 1.0)) { - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "mask_threshold must be in range [0.0, 1.0]"; } } @@ -126,10 +126,10 @@ class ObjectDetectionTensorsToDetectionsCalculator : public CalculatorBase { .Tag(kLabelMap) .Set>>(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { if (cc->InputSidePackets().HasTag(kLabelMap)) { label_map_ = GetFromUniquePtr>( cc->InputSidePackets().Tag(kLabelMap)); @@ -141,10 +141,10 @@ class ObjectDetectionTensorsToDetectionsCalculator : public CalculatorBase { tensor_dim_to_squeeze_field.begin(), tensor_dim_to_squeeze_field.end()); std::sort(tensor_dims_to_squeeze_.rbegin(), tensor_dims_to_squeeze_.rend()); cc->SetOffset(0); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { const auto& options = cc->Options(); @@ -205,14 +205,14 @@ class ObjectDetectionTensorsToDetectionsCalculator : public CalculatorBase { .Tag(kDetections) .Add(output_detections.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: std::map* label_map_; std::vector tensor_dims_to_squeeze_; - ::mediapipe::StatusOr MaybeSqueezeDims( + mediapipe::StatusOr MaybeSqueezeDims( const std::string& tensor_tag, const tf::Tensor& input_tensor) { if (tensor_dims_to_squeeze_.empty()) { return input_tensor; diff --git a/mediapipe/calculators/tensorflow/pack_media_sequence_calculator.cc b/mediapipe/calculators/tensorflow/pack_media_sequence_calculator.cc index 662d8fa16..d4c054681 100644 --- a/mediapipe/calculators/tensorflow/pack_media_sequence_calculator.cc +++ b/mediapipe/calculators/tensorflow/pack_media_sequence_calculator.cc @@ -42,7 +42,7 @@ const char kKeypointsTag[] = "KEYPOINTS"; const char kSegmentationMaskTag[] = "CLASS_SEGMENTATION"; namespace tf = ::tensorflow; -namespace mpms = ::mediapipe::mediasequence; +namespace mpms = mediapipe::mediasequence; // Sink calculator to package streams into tf.SequenceExamples. // diff --git a/mediapipe/calculators/tensorflow/pack_media_sequence_calculator_test.cc b/mediapipe/calculators/tensorflow/pack_media_sequence_calculator_test.cc index 9d383cbd4..c71c3173a 100644 --- a/mediapipe/calculators/tensorflow/pack_media_sequence_calculator_test.cc +++ b/mediapipe/calculators/tensorflow/pack_media_sequence_calculator_test.cc @@ -36,7 +36,7 @@ namespace mediapipe { namespace { namespace tf = ::tensorflow; -namespace mpms = ::mediapipe::mediasequence; +namespace mpms = mediapipe::mediasequence; class PackMediaSequenceCalculatorTest : public ::testing::Test { protected: @@ -433,7 +433,7 @@ TEST_F(PackMediaSequenceCalculatorTest, PacksBBoxWithoutImageDims) { Adopt(input_sequence.release()); auto status = runner_->Run(); - EXPECT_EQ(::mediapipe::StatusCode::kInvalidArgument, status.code()); + EXPECT_EQ(mediapipe::StatusCode::kInvalidArgument, status.code()); } TEST_F(PackMediaSequenceCalculatorTest, PacksBBoxWithImages) { diff --git a/mediapipe/calculators/tensorflow/string_to_sequence_example_calculator.cc b/mediapipe/calculators/tensorflow/string_to_sequence_example_calculator.cc index 6693a0642..64a2da016 100644 --- a/mediapipe/calculators/tensorflow/string_to_sequence_example_calculator.cc +++ b/mediapipe/calculators/tensorflow/string_to_sequence_example_calculator.cc @@ -44,15 +44,15 @@ constexpr char kSequenceExample[] = "SEQUENCE_EXAMPLE"; class StringToSequenceExampleCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; - ::mediapipe::Status Close(CalculatorContext* cc) override; + static mediapipe::Status GetContract(CalculatorContract* cc); + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Close(CalculatorContext* cc) override; }; REGISTER_CALCULATOR(StringToSequenceExampleCalculator); -::mediapipe::Status StringToSequenceExampleCalculator::GetContract( +mediapipe::Status StringToSequenceExampleCalculator::GetContract( CalculatorContract* cc) { if (cc->InputSidePackets().HasTag(kString)) { cc->InputSidePackets().Tag(kString).Set(); @@ -62,10 +62,10 @@ REGISTER_CALCULATOR(StringToSequenceExampleCalculator); cc->InputSidePackets().Tag(kSequenceExample).Set(); cc->OutputSidePackets().Tag(kString).Set(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status StringToSequenceExampleCalculator::Open( +mediapipe::Status StringToSequenceExampleCalculator::Open( CalculatorContext* cc) { if (cc->InputSidePackets().HasTag(kString)) { auto string_value = cc->InputSidePackets().Tag(kString).Get(); @@ -73,17 +73,17 @@ REGISTER_CALCULATOR(StringToSequenceExampleCalculator); example->ParseFromString(string_value); cc->OutputSidePackets() .Tag(kSequenceExample) - .Set(::mediapipe::Adopt(example.release())); + .Set(mediapipe::Adopt(example.release())); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status StringToSequenceExampleCalculator::Process( +mediapipe::Status StringToSequenceExampleCalculator::Process( CalculatorContext* cc) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status StringToSequenceExampleCalculator::Close( +mediapipe::Status StringToSequenceExampleCalculator::Close( CalculatorContext* cc) { if (cc->InputSidePackets().HasTag(kSequenceExample)) { const auto& example = @@ -91,9 +91,9 @@ REGISTER_CALCULATOR(StringToSequenceExampleCalculator); auto string_value = absl::make_unique(); example.SerializeToString(string_value.get()); cc->OutputSidePackets().Tag(kString).Set( - ::mediapipe::Adopt(string_value.release())); + mediapipe::Adopt(string_value.release())); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/tensorflow/tensor_squeeze_dimensions_calculator.cc b/mediapipe/calculators/tensorflow/tensor_squeeze_dimensions_calculator.cc index b1e4f05f0..a3acc49f3 100644 --- a/mediapipe/calculators/tensorflow/tensor_squeeze_dimensions_calculator.cc +++ b/mediapipe/calculators/tensorflow/tensor_squeeze_dimensions_calculator.cc @@ -27,7 +27,7 @@ namespace tf = ::tensorflow; // containing identical data (example output dimensions [1024, 5]). class TensorSqueezeDimensionsCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { RET_CHECK_EQ(cc->Inputs().NumEntries(), 1) << "Need one input"; cc->Inputs().Index(0).Set( // Input Tensor @@ -36,10 +36,10 @@ class TensorSqueezeDimensionsCalculator : public CalculatorBase { cc->Outputs().Index(0).Set( // Output Tensor Reduced Dimensions ); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { options_ = cc->Options(); RET_CHECK(options_.squeeze_all_single_dims() ^ (options_.dim_size() > 0)) << "Must specify dimensions to remove, or set squeeze_all_single_dims, " @@ -52,10 +52,10 @@ class TensorSqueezeDimensionsCalculator : public CalculatorBase { remove_dims_initialized_ = true; } cc->SetOffset(0); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { const tf::Tensor& input_tensor = cc->Inputs().Index(0).Get(); tf::TensorShape tensor_shape = input_tensor.shape(); if (!remove_dims_initialized_) { @@ -78,11 +78,11 @@ class TensorSqueezeDimensionsCalculator : public CalculatorBase { std::unique_ptr output_tensor(new tf::Tensor); RET_CHECK(output_tensor->CopyFrom(input_tensor, tensor_shape)); cc->Outputs().Index(0).Add(output_tensor.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Close(CalculatorContext* cc) override { - return ::mediapipe::OkStatus(); + mediapipe::Status Close(CalculatorContext* cc) override { + return mediapipe::OkStatus(); } private: diff --git a/mediapipe/calculators/tensorflow/tensor_to_image_frame_calculator.cc b/mediapipe/calculators/tensorflow/tensor_to_image_frame_calculator.cc index f6e4354d3..035acb564 100644 --- a/mediapipe/calculators/tensorflow/tensor_to_image_frame_calculator.cc +++ b/mediapipe/calculators/tensorflow/tensor_to_image_frame_calculator.cc @@ -45,10 +45,10 @@ constexpr char kTensor[] = "TENSOR"; // Possible extensions: support other input ranges, maybe 4D tensors. class TensorToImageFrameCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; private: float scale_factor_; @@ -56,7 +56,7 @@ class TensorToImageFrameCalculator : public CalculatorBase { REGISTER_CALCULATOR(TensorToImageFrameCalculator); -::mediapipe::Status TensorToImageFrameCalculator::GetContract( +mediapipe::Status TensorToImageFrameCalculator::GetContract( CalculatorContract* cc) { RET_CHECK_EQ(cc->Inputs().NumEntries(), 1) << "Only one input stream is supported."; @@ -70,18 +70,17 @@ REGISTER_CALCULATOR(TensorToImageFrameCalculator); cc->Outputs().Tag(kImage).Set( // Output ImageFrame. ); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TensorToImageFrameCalculator::Open(CalculatorContext* cc) { +mediapipe::Status TensorToImageFrameCalculator::Open(CalculatorContext* cc) { scale_factor_ = cc->Options().scale_factor(); cc->SetOffset(TimestampDiff(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TensorToImageFrameCalculator::Process( - CalculatorContext* cc) { +mediapipe::Status TensorToImageFrameCalculator::Process(CalculatorContext* cc) { const tf::Tensor& input_tensor = cc->Inputs().Tag(kTensor).Get(); int32 depth = 1; if (input_tensor.dims() != 2) { // Depth is 1 for 2D tensors. @@ -114,11 +113,11 @@ REGISTER_CALCULATOR(TensorToImageFrameCalculator); ImageFormat::GRAY8, input_tensor.dim_size(1), input_tensor.dim_size(0), input_tensor.dim_size(1), buffer.release()); } else { - return ::mediapipe::InvalidArgumentError("Unrecognized image depth."); + return mediapipe::InvalidArgumentError("Unrecognized image depth."); } cc->Outputs().Tag(kImage).Add(output.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/tensorflow/tensor_to_matrix_calculator.cc b/mediapipe/calculators/tensorflow/tensor_to_matrix_calculator.cc index 9c7f3458c..270f00982 100644 --- a/mediapipe/calculators/tensorflow/tensor_to_matrix_calculator.cc +++ b/mediapipe/calculators/tensorflow/tensor_to_matrix_calculator.cc @@ -34,20 +34,19 @@ constexpr char kMatrix[] = "MATRIX"; constexpr char kTensor[] = "TENSOR"; constexpr char kReference[] = "REFERENCE"; -::mediapipe::Status FillTimeSeriesHeaderIfValid(const Packet& header_packet, - TimeSeriesHeader* header) { +mediapipe::Status FillTimeSeriesHeaderIfValid(const Packet& header_packet, + TimeSeriesHeader* header) { CHECK(header); if (header_packet.IsEmpty()) { - return ::mediapipe::UnknownError("No header found."); + return mediapipe::UnknownError("No header found."); } if (!header_packet.ValidateAsType().ok()) { - return ::mediapipe::UnknownError( - "Packet does not contain TimeSeriesHeader."); + return mediapipe::UnknownError("Packet does not contain TimeSeriesHeader."); } *header = header_packet.Get(); if (header->has_sample_rate() && header->sample_rate() >= 0 && header->has_num_channels() && header->num_channels() >= 0) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } else { std::string error_message = "TimeSeriesHeader is missing necessary fields: " @@ -56,7 +55,7 @@ constexpr char kReference[] = "REFERENCE"; absl::StrAppend(&error_message, "Got header:\n", header->ShortDebugString()); #endif - return ::mediapipe::InvalidArgumentError(error_message); + return mediapipe::InvalidArgumentError(error_message); } } @@ -110,17 +109,17 @@ constexpr char kReference[] = "REFERENCE"; // } class TensorToMatrixCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; // Store header information so that we can verify the inputs in process(). TimeSeriesHeader header_; }; REGISTER_CALCULATOR(TensorToMatrixCalculator); -::mediapipe::Status TensorToMatrixCalculator::GetContract( +mediapipe::Status TensorToMatrixCalculator::GetContract( CalculatorContract* cc) { RET_CHECK_LE(cc->Inputs().NumEntries(), 2) << "Only one or two input streams are supported."; @@ -147,12 +146,12 @@ REGISTER_CALCULATOR(TensorToMatrixCalculator); cc->Outputs().Tag(kMatrix).Set( // Output Matrix. ); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TensorToMatrixCalculator::Open(CalculatorContext* cc) { +mediapipe::Status TensorToMatrixCalculator::Open(CalculatorContext* cc) { auto input_header = absl::make_unique(); - ::mediapipe::Status header_status; + mediapipe::Status header_status; if (cc->Inputs().HasTag(kReference)) { header_status = FillTimeSeriesHeaderIfValid( cc->Inputs().Tag(kReference).Header(), input_header.get()); @@ -184,10 +183,10 @@ REGISTER_CALCULATOR(TensorToMatrixCalculator); cc->Outputs().Tag(kMatrix).SetHeader(Adopt(input_header.release())); } cc->SetOffset(TimestampDiff(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TensorToMatrixCalculator::Process(CalculatorContext* cc) { +mediapipe::Status TensorToMatrixCalculator::Process(CalculatorContext* cc) { // Daredevil requested CHECK for noisy failures rather than quieter RET_CHECK // failures. These are absolute conditions of the graph for the graph to be // valid, and if it is violated by any input anywhere, the graph will be @@ -221,7 +220,7 @@ REGISTER_CALCULATOR(TensorToMatrixCalculator); *output = Eigen::MatrixXf::Map(input_tensor.flat().data(), length, width); cc->Outputs().Tag(kMatrix).Add(output.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/tensorflow/tensor_to_vector_float_calculator.cc b/mediapipe/calculators/tensorflow/tensor_to_vector_float_calculator.cc index 7b447f4d5..e50df9276 100644 --- a/mediapipe/calculators/tensorflow/tensor_to_vector_float_calculator.cc +++ b/mediapipe/calculators/tensorflow/tensor_to_vector_float_calculator.cc @@ -28,17 +28,17 @@ namespace tf = ::tensorflow; class TensorToVectorFloatCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; private: TensorToVectorFloatCalculatorOptions options_; }; REGISTER_CALCULATOR(TensorToVectorFloatCalculator); -::mediapipe::Status TensorToVectorFloatCalculator::GetContract( +mediapipe::Status TensorToVectorFloatCalculator::GetContract( CalculatorContract* cc) { // Start with only one input packet. RET_CHECK_EQ(cc->Inputs().NumEntries(), 1) @@ -58,15 +58,15 @@ REGISTER_CALCULATOR(TensorToVectorFloatCalculator); // Output vector. ); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TensorToVectorFloatCalculator::Open(CalculatorContext* cc) { +mediapipe::Status TensorToVectorFloatCalculator::Open(CalculatorContext* cc) { options_ = cc->Options(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TensorToVectorFloatCalculator::Process( +mediapipe::Status TensorToVectorFloatCalculator::Process( CalculatorContext* cc) { const tf::Tensor& input_tensor = cc->Inputs().Index(0).Value().Get(); @@ -103,7 +103,7 @@ REGISTER_CALCULATOR(TensorToVectorFloatCalculator); cc->Outputs().Index(0).Add(output.release(), cc->InputTimestamp()); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/tensorflow/tensorflow_inference_calculator.cc b/mediapipe/calculators/tensorflow/tensorflow_inference_calculator.cc index 5b9a74a6d..eb9891a37 100644 --- a/mediapipe/calculators/tensorflow/tensorflow_inference_calculator.cc +++ b/mediapipe/calculators/tensorflow/tensorflow_inference_calculator.cc @@ -234,7 +234,7 @@ class TensorFlowInferenceCalculator : public CalculatorBase { mediapipe::MonotonicClock::CreateSynchronizedMonotonicClock()); } - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { const auto& options = cc->Options(); RET_CHECK(!cc->Inputs().GetTags().empty()); for (const std::string& tag : cc->Inputs().GetTags()) { @@ -261,7 +261,7 @@ class TensorFlowInferenceCalculator : public CalculatorBase { .Tag("RECURRENT_INIT_TENSORS") .Set>>(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } std::unique_ptr CreateInferenceState(CalculatorContext* cc) @@ -280,7 +280,7 @@ class TensorFlowInferenceCalculator : public CalculatorBase { return inference_state; } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { options_ = cc->Options(); RET_CHECK(cc->InputSidePackets().HasTag("SESSION")); @@ -304,10 +304,10 @@ class TensorFlowInferenceCalculator : public CalculatorBase { << "recurrent_tag_pair must be a colon " "separated std::string with two components: " << tag_pair; - RET_CHECK(::mediapipe::ContainsKey(tag_to_tensor_map_, tags[0])) + RET_CHECK(mediapipe::ContainsKey(tag_to_tensor_map_, tags[0])) << "Can't find tag '" << tags[0] << "' in signature " << options_.signature_name(); - RET_CHECK(::mediapipe::ContainsKey(tag_to_tensor_map_, tags[1])) + RET_CHECK(mediapipe::ContainsKey(tag_to_tensor_map_, tags[1])) << "Can't find tag '" << tags[1] << "' in signature " << options_.signature_name(); recurrent_feed_tags_.insert(tags[0]); @@ -316,12 +316,12 @@ class TensorFlowInferenceCalculator : public CalculatorBase { // Check that all tags are present in this signature bound to tensors. for (const std::string& tag : cc->Inputs().GetTags()) { - RET_CHECK(::mediapipe::ContainsKey(tag_to_tensor_map_, tag)) + RET_CHECK(mediapipe::ContainsKey(tag_to_tensor_map_, tag)) << "Can't find tag '" << tag << "' in signature " << options_.signature_name(); } for (const std::string& tag : cc->Outputs().GetTags()) { - RET_CHECK(::mediapipe::ContainsKey(tag_to_tensor_map_, tag)) + RET_CHECK(mediapipe::ContainsKey(tag_to_tensor_map_, tag)) << "Can't find tag '" << tag << "' in signature " << options_.signature_name(); } @@ -335,12 +335,12 @@ class TensorFlowInferenceCalculator : public CalculatorBase { cc->SetOffset(0); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Adds a batch dimension to the input tensor if specified in the calculator // options. - ::mediapipe::Status AddBatchDimension(tf::Tensor* input_tensor) { + mediapipe::Status AddBatchDimension(tf::Tensor* input_tensor) { if (options_.add_batch_dim_to_tensors()) { tf::TensorShape new_shape(input_tensor->shape()); new_shape.InsertDim(0, 1); @@ -348,17 +348,17 @@ class TensorFlowInferenceCalculator : public CalculatorBase { << "Could not add 0th dimension to tensor without changing its shape." << " Current shape: " << input_tensor->shape().DebugString(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status AggregateTensorPacket( + mediapipe::Status AggregateTensorPacket( const std::string& tag_name, const Packet& packet, std::map>* input_tensors_by_tag_by_timestamp, InferenceState* inference_state) ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_) { tf::Tensor input_tensor(packet.Get()); RET_CHECK_OK(AddBatchDimension(&input_tensor)); - if (::mediapipe::ContainsKey(recurrent_feed_tags_, tag_name)) { + if (mediapipe::ContainsKey(recurrent_feed_tags_, tag_name)) { // If we receive an input on a recurrent tag, override the state. // It's OK to override the global state because there is just one // input stream allowed for recurrent tensors. @@ -366,12 +366,12 @@ class TensorFlowInferenceCalculator : public CalculatorBase { } (*input_tensors_by_tag_by_timestamp)[packet.Timestamp()].insert( std::make_pair(tag_name, input_tensor)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Removes the batch dimension of the output tensor if specified in the // calculator options. - ::mediapipe::Status RemoveBatchDimension(tf::Tensor* output_tensor) { + mediapipe::Status RemoveBatchDimension(tf::Tensor* output_tensor) { if (options_.add_batch_dim_to_tensors()) { tf::TensorShape new_shape(output_tensor->shape()); new_shape.RemoveDim(0); @@ -380,10 +380,10 @@ class TensorFlowInferenceCalculator : public CalculatorBase { << "shape. Current shape: " << output_tensor->shape().DebugString() << " (The expected first dimension is 1 for a batch element.)"; } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { std::unique_ptr inference_state_to_process; { absl::WriterMutexLock l(&mutex_); @@ -395,12 +395,11 @@ class TensorFlowInferenceCalculator : public CalculatorBase { for (const std::string& tag_as_node_name : cc->Inputs().GetTags()) { if (cc->Inputs().Tag(tag_as_node_name).IsEmpty()) { // Recurrent tensors can be empty. - if (!::mediapipe::ContainsKey(recurrent_feed_tags_, - tag_as_node_name)) { + if (!mediapipe::ContainsKey(recurrent_feed_tags_, tag_as_node_name)) { if (options_.skip_on_missing_features()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } else { - return ::mediapipe::InvalidArgumentError(absl::StrCat( + return mediapipe::InvalidArgumentError(absl::StrCat( "Tag ", tag_as_node_name, " not present at timestamp: ", cc->InputTimestamp().Value())); } @@ -409,7 +408,7 @@ class TensorFlowInferenceCalculator : public CalculatorBase { const auto& tensor_packets = cc->Inputs().Tag(tag_as_node_name).Get>(); if (tensor_packets.size() > options_.batch_size()) { - return ::mediapipe::InvalidArgumentError(absl::StrCat( + return mediapipe::InvalidArgumentError(absl::StrCat( "Batch for tag ", tag_as_node_name, " has more packets than batch capacity. batch_size: ", options_.batch_size(), " packets: ", tensor_packets.size())); @@ -447,10 +446,10 @@ class TensorFlowInferenceCalculator : public CalculatorBase { OutputBatch(cc, std::move(inference_state_to_process))); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Close(CalculatorContext* cc) override { + mediapipe::Status Close(CalculatorContext* cc) override { std::unique_ptr inference_state_to_process = nullptr; { absl::WriterMutexLock l(&mutex_); @@ -464,7 +463,7 @@ class TensorFlowInferenceCalculator : public CalculatorBase { MP_RETURN_IF_ERROR( OutputBatch(cc, std::move(inference_state_to_process))); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // When a batch of input tensors is ready to be run, runs TensorFlow and @@ -475,7 +474,7 @@ class TensorFlowInferenceCalculator : public CalculatorBase { // memory buffer. Therefore, copies are cheap and should not cause the memory // buffer to fall out of scope. In contrast, concat is only used where // necessary. - ::mediapipe::Status OutputBatch( + mediapipe::Status OutputBatch( CalculatorContext* cc, std::unique_ptr inference_state) { const int64 start_time = absl::ToUnixMicros(clock_->TimeNow()); std::vector> input_tensors; @@ -488,8 +487,8 @@ class TensorFlowInferenceCalculator : public CalculatorBase { keyed_tensors.second[0]); } else { // The input buffer can be empty for recurrent tensors. - RET_CHECK(::mediapipe::ContainsKey(recurrent_feed_tags_, - keyed_tensors.first)) + RET_CHECK( + mediapipe::ContainsKey(recurrent_feed_tags_, keyed_tensors.first)) << "A non-recurrent tensor does not have an input: " << keyed_tensors.first; } @@ -606,7 +605,7 @@ class TensorFlowInferenceCalculator : public CalculatorBase { inference_state_->batch_timestamps_.clear(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: diff --git a/mediapipe/calculators/tensorflow/tensorflow_inference_calculator_test.cc b/mediapipe/calculators/tensorflow/tensorflow_inference_calculator_test.cc index 2ec6cbe3b..557d46ff8 100644 --- a/mediapipe/calculators/tensorflow/tensorflow_inference_calculator_test.cc +++ b/mediapipe/calculators/tensorflow/tensorflow_inference_calculator_test.cc @@ -47,15 +47,15 @@ std::string GetGraphDefPath() { CFURLGetFileSystemRepresentation( bundle_url, true, reinterpret_cast(path), sizeof(path)); CFRelease(bundle_url); - return ::mediapipe::file::JoinPath(path, "testdata/frozen_graph_def.pb"); + return mediapipe::file::JoinPath(path, "testdata/frozen_graph_def.pb"); #elif defined(__ANDROID__) char path[1024]; getcwd(path, sizeof(path)); - return ::mediapipe::file::JoinPath(path, - "mediapipe/calculators/tensorflow/" - "testdata/frozen_graph_def.pb"); + return mediapipe::file::JoinPath(path, + "mediapipe/calculators/tensorflow/" + "testdata/frozen_graph_def.pb"); #else - return ::mediapipe::file::JoinPath( + return mediapipe::file::JoinPath( "./", // This should match the path of the output files // of the genrule() that generates test model files. diff --git a/mediapipe/calculators/tensorflow/tensorflow_session_from_frozen_graph_calculator.cc b/mediapipe/calculators/tensorflow/tensorflow_session_from_frozen_graph_calculator.cc index 1c34ee6ed..2650447ca 100644 --- a/mediapipe/calculators/tensorflow/tensorflow_session_from_frozen_graph_calculator.cc +++ b/mediapipe/calculators/tensorflow/tensorflow_session_from_frozen_graph_calculator.cc @@ -59,7 +59,7 @@ void SetPreferredDevice(tf::GraphDef* graph_def, absl::string_view device_id) { class TensorFlowSessionFromFrozenGraphCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { const auto& options = cc->Options(); bool has_exactly_one_model = @@ -89,10 +89,10 @@ class TensorFlowSessionFromFrozenGraphCalculator : public CalculatorBase { // a map from tags to tensor names. ); RET_CHECK_GT(options.tag_to_tensor_names().size(), 0); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { auto clock = std::unique_ptr( mediapipe::MonotonicClock::CreateSynchronizedMonotonicClock()); const uint64 start_time = absl::ToUnixMicros(clock->TimeNow()); @@ -151,11 +151,11 @@ class TensorFlowSessionFromFrozenGraphCalculator : public CalculatorBase { const uint64 end_time = absl::ToUnixMicros(clock->TimeNow()); LOG(INFO) << "Loaded frozen model in: " << end_time - start_time << " microseconds."; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { - return ::mediapipe::OkStatus(); + mediapipe::Status Process(CalculatorContext* cc) override { + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(TensorFlowSessionFromFrozenGraphCalculator); diff --git a/mediapipe/calculators/tensorflow/tensorflow_session_from_frozen_graph_calculator_test.cc b/mediapipe/calculators/tensorflow/tensorflow_session_from_frozen_graph_calculator_test.cc index 5277eb348..097f5534b 100644 --- a/mediapipe/calculators/tensorflow/tensorflow_session_from_frozen_graph_calculator_test.cc +++ b/mediapipe/calculators/tensorflow/tensorflow_session_from_frozen_graph_calculator_test.cc @@ -120,7 +120,7 @@ TEST_F(TensorFlowSessionFromFrozenGraphCalculatorTest, TEST_F(TensorFlowSessionFromFrozenGraphCalculatorTest, ProducesPacketUsableByTensorFlowInferenceCalculator) { CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie( + mediapipe::ParseTextProtoOrDie( absl::Substitute(R"( node { calculator: "TensorFlowInferenceCalculator" diff --git a/mediapipe/calculators/tensorflow/tensorflow_session_from_frozen_graph_generator.cc b/mediapipe/calculators/tensorflow/tensorflow_session_from_frozen_graph_generator.cc index cd46a9a9f..b2dc3a8d5 100644 --- a/mediapipe/calculators/tensorflow/tensorflow_session_from_frozen_graph_generator.cc +++ b/mediapipe/calculators/tensorflow/tensorflow_session_from_frozen_graph_generator.cc @@ -55,7 +55,7 @@ void SetPreferredDevice(tf::GraphDef* graph_def, absl::string_view device_id) { class TensorFlowSessionFromFrozenGraphGenerator : public PacketGenerator { public: - static ::mediapipe::Status FillExpectations( + static mediapipe::Status FillExpectations( const PacketGeneratorOptions& extendable_options, PacketTypeSet* input_side_packets, PacketTypeSet* output_side_packets) { RET_CHECK(extendable_options.HasExtension( @@ -87,10 +87,10 @@ class TensorFlowSessionFromFrozenGraphGenerator : public PacketGenerator { // a map from tags to tensor names. ); RET_CHECK_GT(options.tag_to_tensor_names().size(), 0); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - static ::mediapipe::Status Generate( + static mediapipe::Status Generate( const PacketGeneratorOptions& packet_generator_options, const PacketSet& input_side_packets, PacketSet* output_side_packets) { auto clock = std::unique_ptr( @@ -151,7 +151,7 @@ class TensorFlowSessionFromFrozenGraphGenerator : public PacketGenerator { const uint64 end_time = absl::ToUnixMicros(clock->TimeNow()); LOG(INFO) << "Loaded frozen model in: " << end_time - start_time << " microseconds."; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_PACKET_GENERATOR(TensorFlowSessionFromFrozenGraphGenerator); diff --git a/mediapipe/calculators/tensorflow/tensorflow_session_from_frozen_graph_generator_test.cc b/mediapipe/calculators/tensorflow/tensorflow_session_from_frozen_graph_generator_test.cc index e2b968217..793f58163 100644 --- a/mediapipe/calculators/tensorflow/tensorflow_session_from_frozen_graph_generator_test.cc +++ b/mediapipe/calculators/tensorflow/tensorflow_session_from_frozen_graph_generator_test.cc @@ -104,7 +104,7 @@ TEST_F(TensorFlowSessionFromFrozenGraphGeneratorTest, PacketSet input_side_packets(tool::CreateTagMap({}).ValueOrDie()); PacketSet output_side_packets( tool::CreateTagMap({"SESSION:session"}).ValueOrDie()); - ::mediapipe::Status run_status = tool::RunGenerateAndValidateTypes( + mediapipe::Status run_status = tool::RunGenerateAndValidateTypes( "TensorFlowSessionFromFrozenGraphGenerator", extendable_options_, input_side_packets, &output_side_packets); MP_EXPECT_OK(run_status) << run_status.message(); @@ -116,7 +116,7 @@ TEST_F(TensorFlowSessionFromFrozenGraphGeneratorTest, TEST_F(TensorFlowSessionFromFrozenGraphGeneratorTest, ProducesPacketUsableByTensorFlowInferenceCalculator) { CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie( + mediapipe::ParseTextProtoOrDie( absl::Substitute(R"( node { calculator: "TensorFlowInferenceCalculator" @@ -180,7 +180,7 @@ TEST_F(TensorFlowSessionFromFrozenGraphGeneratorTest, generator_options_->clear_graph_proto_path(); input_side_packets.Tag("STRING_MODEL") = Adopt(new std::string(serialized_graph_contents)); - ::mediapipe::Status run_status = tool::RunGenerateAndValidateTypes( + mediapipe::Status run_status = tool::RunGenerateAndValidateTypes( "TensorFlowSessionFromFrozenGraphGenerator", extendable_options_, input_side_packets, &output_side_packets); MP_EXPECT_OK(run_status) << run_status.message(); @@ -197,7 +197,7 @@ TEST_F( generator_options_->clear_graph_proto_path(); input_side_packets.Tag("STRING_MODEL_FILE_PATH") = Adopt(new std::string(GetGraphDefPath())); - ::mediapipe::Status run_status = tool::RunGenerateAndValidateTypes( + mediapipe::Status run_status = tool::RunGenerateAndValidateTypes( "TensorFlowSessionFromFrozenGraphGenerator", extendable_options_, input_side_packets, &output_side_packets); MP_EXPECT_OK(run_status) << run_status.message(); @@ -212,10 +212,10 @@ TEST_F(TensorFlowSessionFromFrozenGraphGeneratorTest, tool::CreateTagMap({"SESSION:session"}).ValueOrDie()); input_side_packets.Tag("STRING_MODEL_FILE_PATH") = Adopt(new std::string(GetGraphDefPath())); - ::mediapipe::Status run_status = tool::RunGenerateAndValidateTypes( + mediapipe::Status run_status = tool::RunGenerateAndValidateTypes( "TensorFlowSessionFromFrozenGraphGenerator", extendable_options_, input_side_packets, &output_side_packets); - EXPECT_EQ(run_status.code(), ::mediapipe::StatusCode::kInternal); + EXPECT_EQ(run_status.code(), mediapipe::StatusCode::kInternal); EXPECT_THAT( run_status.message(), ::testing::HasSubstr("Must have exactly one of graph_proto_path")); @@ -237,10 +237,10 @@ TEST_F(TensorFlowSessionFromFrozenGraphGeneratorTest, input_side_packets.Tag("STRING_MODEL_FILE_PATH") = Adopt(new std::string(GetGraphDefPath())); - ::mediapipe::Status run_status = tool::RunGenerateAndValidateTypes( + mediapipe::Status run_status = tool::RunGenerateAndValidateTypes( "TensorFlowSessionFromFrozenGraphGenerator", extendable_options_, input_side_packets, &output_side_packets); - EXPECT_EQ(run_status.code(), ::mediapipe::StatusCode::kInternal); + EXPECT_EQ(run_status.code(), mediapipe::StatusCode::kInternal); EXPECT_THAT( run_status.message(), ::testing::HasSubstr("Must have exactly one of graph_proto_path")); @@ -263,10 +263,10 @@ TEST_F(TensorFlowSessionFromFrozenGraphGeneratorTest, Adopt(new std::string(GetGraphDefPath())); generator_options_->clear_graph_proto_path(); - ::mediapipe::Status run_status = tool::RunGenerateAndValidateTypes( + mediapipe::Status run_status = tool::RunGenerateAndValidateTypes( "TensorFlowSessionFromFrozenGraphGenerator", extendable_options_, input_side_packets, &output_side_packets); - EXPECT_EQ(run_status.code(), ::mediapipe::StatusCode::kInternal); + EXPECT_EQ(run_status.code(), mediapipe::StatusCode::kInternal); EXPECT_THAT( run_status.message(), ::testing::HasSubstr("Must have exactly one of graph_proto_path")); @@ -278,7 +278,7 @@ TEST_F(TensorFlowSessionFromFrozenGraphGeneratorTest, PacketSet output_side_packets( tool::CreateTagMap({"SESSION:session"}).ValueOrDie()); generator_options_->add_initialization_op_names("multiplied:0"); - ::mediapipe::Status run_status = tool::RunGenerateAndValidateTypes( + mediapipe::Status run_status = tool::RunGenerateAndValidateTypes( "TensorFlowSessionFromFrozenGraphGenerator", extendable_options_, input_side_packets, &output_side_packets); MP_EXPECT_OK(run_status); diff --git a/mediapipe/calculators/tensorflow/tensorflow_session_from_saved_model_calculator.cc b/mediapipe/calculators/tensorflow/tensorflow_session_from_saved_model_calculator.cc index 55709bcd9..5852f5655 100644 --- a/mediapipe/calculators/tensorflow/tensorflow_session_from_saved_model_calculator.cc +++ b/mediapipe/calculators/tensorflow/tensorflow_session_from_saved_model_calculator.cc @@ -35,9 +35,9 @@ static constexpr char kStringSavedModelPath[] = "STRING_SAVED_MODEL_PATH"; // Given the path to a directory containing multiple tensorflow saved models // in subdirectories, replaces path with the alphabetically last subdirectory. -::mediapipe::Status GetLatestDirectory(std::string* path) { +mediapipe::Status GetLatestDirectory(std::string* path) { #if defined(__ANDROID__) - return ::mediapipe::UnimplementedError( + return mediapipe::UnimplementedError( "GetLatestDirectory is not implemented on Android"); #else std::vector saved_models; @@ -47,7 +47,7 @@ static constexpr char kStringSavedModelPath[] = "STRING_SAVED_MODEL_PATH"; << "No exported bundles found in " << path; ::std::sort(saved_models.begin(), saved_models.end()); *path = std::string(file::Dirname(saved_models.back())); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); #endif } @@ -93,7 +93,7 @@ const std::string MaybeConvertSignatureToTag( // } class TensorFlowSessionFromSavedModelCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { const auto& options = cc->Options(); const bool has_exactly_one_model = @@ -108,10 +108,10 @@ class TensorFlowSessionFromSavedModelCalculator : public CalculatorBase { } // A TensorFlow model loaded and ready for use along with tensor cc->OutputSidePackets().Tag("SESSION").Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { const auto& options = cc->Options(); std::string path = cc->InputSidePackets().HasTag(kStringSavedModelPath) @@ -140,9 +140,8 @@ class TensorFlowSessionFromSavedModelCalculator : public CalculatorBase { ::tensorflow::Status status = tensorflow::LoadSavedModel( session_options, run_options, path, tags_set, saved_model.get()); if (!status.ok()) { - return ::mediapipe::Status( - static_cast<::mediapipe::StatusCode>(status.code()), - status.ToString()); + return mediapipe::Status( + static_cast(status.code()), status.ToString()); } auto session = absl::make_unique(); @@ -161,11 +160,11 @@ class TensorFlowSessionFromSavedModelCalculator : public CalculatorBase { } cc->OutputSidePackets().Tag("SESSION").Set(Adopt(session.release())); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { - return ::mediapipe::OkStatus(); + mediapipe::Status Process(CalculatorContext* cc) override { + return mediapipe::OkStatus(); } }; diff --git a/mediapipe/calculators/tensorflow/tensorflow_session_from_saved_model_calculator_test.cc b/mediapipe/calculators/tensorflow/tensorflow_session_from_saved_model_calculator_test.cc index d6064d862..516a50d8e 100644 --- a/mediapipe/calculators/tensorflow/tensorflow_session_from_saved_model_calculator_test.cc +++ b/mediapipe/calculators/tensorflow/tensorflow_session_from_saved_model_calculator_test.cc @@ -132,7 +132,7 @@ TEST_F(TensorFlowSessionFromSavedModelCalculatorTest, TEST_F(TensorFlowSessionFromSavedModelCalculatorTest, ProducesPacketUsableByTensorFlowInferenceCalculator) { CalculatorGraphConfig graph_config = - ::mediapipe::ParseTextProtoOrDie( + mediapipe::ParseTextProtoOrDie( absl::Substitute(R"( node { calculator: "TensorFlowInferenceCalculator" diff --git a/mediapipe/calculators/tensorflow/tensorflow_session_from_saved_model_generator.cc b/mediapipe/calculators/tensorflow/tensorflow_session_from_saved_model_generator.cc index 73ffc6497..7e5fb289e 100644 --- a/mediapipe/calculators/tensorflow/tensorflow_session_from_saved_model_generator.cc +++ b/mediapipe/calculators/tensorflow/tensorflow_session_from_saved_model_generator.cc @@ -37,9 +37,9 @@ static constexpr char kStringSavedModelPath[] = "STRING_SAVED_MODEL_PATH"; // Given the path to a directory containing multiple tensorflow saved models // in subdirectories, replaces path with the alphabetically last subdirectory. -::mediapipe::Status GetLatestDirectory(std::string* path) { +mediapipe::Status GetLatestDirectory(std::string* path) { #if defined(__ANDROID__) - return ::mediapipe::UnimplementedError( + return mediapipe::UnimplementedError( "GetLatestDirectory is not implemented on Android"); #else std::vector saved_models; @@ -49,7 +49,7 @@ static constexpr char kStringSavedModelPath[] = "STRING_SAVED_MODEL_PATH"; << "No exported bundles found in " << path; ::std::sort(saved_models.begin(), saved_models.end()); *path = std::string(file::Dirname(saved_models.back())); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); #endif } @@ -83,7 +83,7 @@ const std::string MaybeConvertSignatureToTag( // ready for execution and a map between tags and tensor names. class TensorFlowSessionFromSavedModelGenerator : public PacketGenerator { public: - static ::mediapipe::Status FillExpectations( + static mediapipe::Status FillExpectations( const PacketGeneratorOptions& extendable_options, PacketTypeSet* input_side_packets, PacketTypeSet* output_side_packets) { const TensorFlowSessionFromSavedModelGeneratorOptions& options = @@ -101,10 +101,10 @@ class TensorFlowSessionFromSavedModelGenerator : public PacketGenerator { } // A TensorFlow model loaded and ready for use along with tensor output_side_packets->Tag("SESSION").Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - static ::mediapipe::Status Generate( + static mediapipe::Status Generate( const PacketGeneratorOptions& extendable_options, const PacketSet& input_side_packets, PacketSet* output_side_packets) { const TensorFlowSessionFromSavedModelGeneratorOptions& options = @@ -135,9 +135,8 @@ class TensorFlowSessionFromSavedModelGenerator : public PacketGenerator { ::tensorflow::Status status = tensorflow::LoadSavedModel( session_options, run_options, path, tags_set, saved_model.get()); if (!status.ok()) { - return ::mediapipe::Status( - static_cast<::mediapipe::StatusCode>(status.code()), - status.ToString()); + return mediapipe::Status( + static_cast(status.code()), status.ToString()); } auto session = absl::make_unique(); session->session = std::move(saved_model->session); @@ -155,7 +154,7 @@ class TensorFlowSessionFromSavedModelGenerator : public PacketGenerator { } output_side_packets->Tag("SESSION") = Adopt(session.release()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_PACKET_GENERATOR(TensorFlowSessionFromSavedModelGenerator); diff --git a/mediapipe/calculators/tensorflow/tensorflow_session_from_saved_model_generator_test.cc b/mediapipe/calculators/tensorflow/tensorflow_session_from_saved_model_generator_test.cc index 792c3841b..ffe9d1fc5 100644 --- a/mediapipe/calculators/tensorflow/tensorflow_session_from_saved_model_generator_test.cc +++ b/mediapipe/calculators/tensorflow/tensorflow_session_from_saved_model_generator_test.cc @@ -69,7 +69,7 @@ TEST_F(TensorFlowSessionFromSavedModelGeneratorTest, PacketSet input_side_packets(tool::CreateTagMap({}).ValueOrDie()); PacketSet output_side_packets( tool::CreateTagMap({"SESSION:session"}).ValueOrDie()); - ::mediapipe::Status run_status = tool::RunGenerateAndValidateTypes( + mediapipe::Status run_status = tool::RunGenerateAndValidateTypes( "TensorFlowSessionFromSavedModelGenerator", extendable_options_, input_side_packets, &output_side_packets); MP_EXPECT_OK(run_status) << run_status.message(); @@ -111,7 +111,7 @@ TEST_F(TensorFlowSessionFromSavedModelGeneratorTest, Adopt(new std::string(GetSavedModelDir())); PacketSet output_side_packets( tool::CreateTagMap({"SESSION:session"}).ValueOrDie()); - ::mediapipe::Status run_status = tool::RunGenerateAndValidateTypes( + mediapipe::Status run_status = tool::RunGenerateAndValidateTypes( "TensorFlowSessionFromSavedModelGenerator", extendable_options_, input_side_packets, &output_side_packets); MP_EXPECT_OK(run_status) << run_status.message(); @@ -126,7 +126,7 @@ TEST_F(TensorFlowSessionFromSavedModelGeneratorTest, TEST_F(TensorFlowSessionFromSavedModelGeneratorTest, ProducesPacketUsableByTensorFlowInferenceCalculator) { CalculatorGraphConfig graph_config = - ::mediapipe::ParseTextProtoOrDie( + mediapipe::ParseTextProtoOrDie( absl::Substitute(R"( node { calculator: "TensorFlowInferenceCalculator" @@ -187,7 +187,7 @@ TEST_F(TensorFlowSessionFromSavedModelGeneratorTest, PacketSet input_side_packets(tool::CreateTagMap({}).ValueOrDie()); PacketSet output_side_packets( tool::CreateTagMap({"SESSION:session"}).ValueOrDie()); - ::mediapipe::Status run_status = tool::RunGenerateAndValidateTypes( + mediapipe::Status run_status = tool::RunGenerateAndValidateTypes( "TensorFlowSessionFromSavedModelGenerator", extendable_options_, input_side_packets, &output_side_packets); MP_EXPECT_OK(run_status) << run_status.message(); @@ -208,7 +208,7 @@ TEST_F(TensorFlowSessionFromSavedModelGeneratorTest, PacketSet input_side_packets(tool::CreateTagMap({}).ValueOrDie()); PacketSet output_side_packets( tool::CreateTagMap({"SESSION:session"}).ValueOrDie()); - ::mediapipe::Status run_status = tool::RunGenerateAndValidateTypes( + mediapipe::Status run_status = tool::RunGenerateAndValidateTypes( "TensorFlowSessionFromSavedModelGenerator", extendable_options_, input_side_packets, &output_side_packets); MP_EXPECT_OK(run_status) << run_status.message(); diff --git a/mediapipe/calculators/tensorflow/tfrecord_reader_calculator.cc b/mediapipe/calculators/tensorflow/tfrecord_reader_calculator.cc index f3b0b485d..46c03a1be 100644 --- a/mediapipe/calculators/tensorflow/tfrecord_reader_calculator.cc +++ b/mediapipe/calculators/tensorflow/tfrecord_reader_calculator.cc @@ -49,13 +49,13 @@ const char kSequenceExampleTag[] = "SEQUENCE_EXAMPLE"; // } class TFRecordReaderCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; }; -::mediapipe::Status TFRecordReaderCalculator::GetContract( +mediapipe::Status TFRecordReaderCalculator::GetContract( CalculatorContract* cc) { cc->InputSidePackets().Tag(kTFRecordPath).Set(); if (cc->InputSidePackets().HasTag(kRecordIndex)) { @@ -73,10 +73,10 @@ class TFRecordReaderCalculator : public CalculatorBase { .Tag(kSequenceExampleTag) .Set(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TFRecordReaderCalculator::Open(CalculatorContext* cc) { +mediapipe::Status TFRecordReaderCalculator::Open(CalculatorContext* cc) { std::unique_ptr file; auto tf_status = tensorflow::Env::Default()->NewRandomAccessFile( cc->InputSidePackets().Tag(kTFRecordPath).Get(), &file); @@ -114,11 +114,11 @@ class TFRecordReaderCalculator : public CalculatorBase { ++current_idx; } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TFRecordReaderCalculator::Process(CalculatorContext* cc) { - return ::mediapipe::OkStatus(); +mediapipe::Status TFRecordReaderCalculator::Process(CalculatorContext* cc) { + return mediapipe::OkStatus(); } REGISTER_CALCULATOR(TFRecordReaderCalculator); diff --git a/mediapipe/calculators/tensorflow/unpack_media_sequence_calculator.cc b/mediapipe/calculators/tensorflow/unpack_media_sequence_calculator.cc index 8bd0273e0..1c4fc9218 100644 --- a/mediapipe/calculators/tensorflow/unpack_media_sequence_calculator.cc +++ b/mediapipe/calculators/tensorflow/unpack_media_sequence_calculator.cc @@ -42,7 +42,7 @@ const char kImagesFrameRateTag[] = "IMAGE_FRAME_RATE"; const char kAudioDecoderOptions[] = "AUDIO_DECODER_OPTIONS"; namespace tf = ::tensorflow; -namespace mpms = ::mediapipe::mediasequence; +namespace mpms = mediapipe::mediasequence; // Source calculator to unpack side_packets and streams from tf.SequenceExamples // diff --git a/mediapipe/calculators/tensorflow/unpack_media_sequence_calculator_test.cc b/mediapipe/calculators/tensorflow/unpack_media_sequence_calculator_test.cc index 185e2e186..dcbda224e 100644 --- a/mediapipe/calculators/tensorflow/unpack_media_sequence_calculator_test.cc +++ b/mediapipe/calculators/tensorflow/unpack_media_sequence_calculator_test.cc @@ -31,7 +31,7 @@ namespace mediapipe { namespace { namespace tf = ::tensorflow; -namespace mpms = ::mediapipe::mediasequence; +namespace mpms = mediapipe::mediasequence; class UnpackMediaSequenceCalculatorTest : public ::testing::Test { protected: diff --git a/mediapipe/calculators/tensorflow/unpack_yt8m_sequence_example_calculator.cc b/mediapipe/calculators/tensorflow/unpack_yt8m_sequence_example_calculator.cc index daf7f1117..d03d2c0e0 100644 --- a/mediapipe/calculators/tensorflow/unpack_yt8m_sequence_example_calculator.cc +++ b/mediapipe/calculators/tensorflow/unpack_yt8m_sequence_example_calculator.cc @@ -64,7 +64,7 @@ std::string GetQuantizedFeature( // } class UnpackYt8mSequenceExampleCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->InputSidePackets() .Tag(kYt8mSequenceExample) .Set(); @@ -84,10 +84,10 @@ class UnpackYt8mSequenceExampleCalculator : public CalculatorBase { if (cc->OutputSidePackets().HasTag(kSegmentSize)) { cc->OutputSidePackets().Tag(kSegmentSize).Set(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { const tensorflow::SequenceExample& sequence_example = cc->InputSidePackets() .Tag(kYt8mSequenceExample) @@ -108,7 +108,7 @@ class UnpackYt8mSequenceExampleCalculator : public CalculatorBase { .feature_size(); if (rgb_feature_list_length != audio_feature_list_length) { - return ::mediapipe::FailedPreconditionError(absl::StrCat( + return mediapipe::FailedPreconditionError(absl::StrCat( "Data corruption: the length of audio features and rgb features are " "not equal. Please check the sequence example that contains yt8m " "id: ", @@ -151,12 +151,12 @@ class UnpackYt8mSequenceExampleCalculator : public CalculatorBase { } LOG(INFO) << "Reading the sequence example that contains yt8m id: " << yt8m_id << ". Feature list length: " << feature_list_length_; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { if (current_index_ >= feature_list_length_) { - return ::mediapipe::tool::StatusStop(); + return mediapipe::tool::StatusStop(); } const tensorflow::SequenceExample& sequence_example = cc->InputSidePackets() @@ -179,7 +179,7 @@ class UnpackYt8mSequenceExampleCalculator : public CalculatorBase { GetQuantizedFeature(sequence_example, kAudio, current_index_)) .At(timestamp)); ++current_index_; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: diff --git a/mediapipe/calculators/tensorflow/vector_float_to_tensor_calculator.cc b/mediapipe/calculators/tensorflow/vector_float_to_tensor_calculator.cc index f7c041788..d75348daa 100644 --- a/mediapipe/calculators/tensorflow/vector_float_to_tensor_calculator.cc +++ b/mediapipe/calculators/tensorflow/vector_float_to_tensor_calculator.cc @@ -44,17 +44,17 @@ namespace tf = ::tensorflow; // } class VectorFloatToTensorCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; private: VectorFloatToTensorCalculatorOptions options_; }; REGISTER_CALCULATOR(VectorFloatToTensorCalculator); -::mediapipe::Status VectorFloatToTensorCalculator::GetContract( +mediapipe::Status VectorFloatToTensorCalculator::GetContract( CalculatorContract* cc) { const auto& options = cc->Options(); // Start with only one input packet. @@ -75,15 +75,15 @@ REGISTER_CALCULATOR(VectorFloatToTensorCalculator); cc->Outputs().Index(0).Set( // Output stream with data as tf::Tensor and the same TimeSeriesHeader. ); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status VectorFloatToTensorCalculator::Open(CalculatorContext* cc) { +mediapipe::Status VectorFloatToTensorCalculator::Open(CalculatorContext* cc) { options_ = cc->Options(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status VectorFloatToTensorCalculator::Process( +mediapipe::Status VectorFloatToTensorCalculator::Process( CalculatorContext* cc) { tf::TensorShape tensor_shape; if (options_.input_size() == INPUT_2D) { @@ -127,7 +127,7 @@ REGISTER_CALCULATOR(VectorFloatToTensorCalculator); } else { LOG(FATAL) << "input size not supported"; } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/tensorflow/vector_int_to_tensor_calculator.cc b/mediapipe/calculators/tensorflow/vector_int_to_tensor_calculator.cc index 1269e2761..c05bccd70 100644 --- a/mediapipe/calculators/tensorflow/vector_int_to_tensor_calculator.cc +++ b/mediapipe/calculators/tensorflow/vector_int_to_tensor_calculator.cc @@ -62,17 +62,17 @@ void AssignMatrixValue(int r, int c, int value, tf::Tensor* output_tensor) { // } class VectorIntToTensorCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; private: VectorIntToTensorCalculatorOptions options_; }; REGISTER_CALCULATOR(VectorIntToTensorCalculator); -::mediapipe::Status VectorIntToTensorCalculator::GetContract( +mediapipe::Status VectorIntToTensorCalculator::GetContract( CalculatorContract* cc) { const auto& options = cc->Options(); // Start with only one input packet. @@ -92,20 +92,19 @@ REGISTER_CALCULATOR(VectorIntToTensorCalculator); RET_CHECK_EQ(cc->Outputs().NumEntries(), 1) << "Only one output stream is supported."; cc->Outputs().Tag(kTensorOut).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status VectorIntToTensorCalculator::Open(CalculatorContext* cc) { +mediapipe::Status VectorIntToTensorCalculator::Open(CalculatorContext* cc) { options_ = cc->Options(); RET_CHECK(options_.tensor_data_type() == tf::DT_UINT8 || options_.tensor_data_type() == tf::DT_INT32 || options_.tensor_data_type() == tf::DT_INT64) << "Output tensor data type is not supported."; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status VectorIntToTensorCalculator::Process( - CalculatorContext* cc) { +mediapipe::Status VectorIntToTensorCalculator::Process(CalculatorContext* cc) { tf::TensorShape tensor_shape; if (options_.input_size() == INPUT_2D) { const std::vector>& input = @@ -197,7 +196,7 @@ REGISTER_CALCULATOR(VectorIntToTensorCalculator); } else { LOG(FATAL) << "input size not supported"; } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/tflite/BUILD b/mediapipe/calculators/tflite/BUILD index 53775bed1..18138b8d7 100644 --- a/mediapipe/calculators/tflite/BUILD +++ b/mediapipe/calculators/tflite/BUILD @@ -185,8 +185,8 @@ cc_library( ":tflite_inference_calculator_cc_proto", "@com_google_absl//absl/memory", "//mediapipe/framework:calculator_framework", - "//mediapipe/util:resource_util", "//mediapipe/util/tflite:config", + "//mediapipe/util/tflite:tflite_model_loader", "@org_tensorflow//tensorflow/lite:framework", "@org_tensorflow//tensorflow/lite/delegates/xnnpack:xnnpack_delegate", "@org_tensorflow//tensorflow/lite/kernels:builtin_ops", @@ -404,6 +404,7 @@ cc_library( visibility = ["//visibility:public"], deps = [ ":tflite_tensors_to_classification_calculator_cc_proto", + "@com_google_absl//absl/container:node_hash_map", "@com_google_absl//absl/strings:str_format", "@com_google_absl//absl/types:span", "//mediapipe/framework/formats:classification_cc_proto", diff --git a/mediapipe/calculators/tflite/ssd_anchors_calculator.cc b/mediapipe/calculators/tflite/ssd_anchors_calculator.cc index 90d35573e..07a91ecd8 100644 --- a/mediapipe/calculators/tflite/ssd_anchors_calculator.cc +++ b/mediapipe/calculators/tflite/ssd_anchors_calculator.cc @@ -71,12 +71,12 @@ float CalculateScale(float min_scale, float max_scale, int stride_index, // } class SsdAnchorsCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->OutputSidePackets().Index(0).Set>(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { cc->SetOffset(TimestampDiff(0)); const SsdAnchorsCalculatorOptions& options = @@ -85,24 +85,24 @@ class SsdAnchorsCalculator : public CalculatorBase { auto anchors = absl::make_unique>(); MP_RETURN_IF_ERROR(GenerateAnchors(anchors.get(), options)); cc->OutputSidePackets().Index(0).Set(Adopt(anchors.release())); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { - return ::mediapipe::OkStatus(); + mediapipe::Status Process(CalculatorContext* cc) override { + return mediapipe::OkStatus(); } private: - static ::mediapipe::Status GenerateAnchors( + static mediapipe::Status GenerateAnchors( std::vector* anchors, const SsdAnchorsCalculatorOptions& options); }; REGISTER_CALCULATOR(SsdAnchorsCalculator); -::mediapipe::Status SsdAnchorsCalculator::GenerateAnchors( +mediapipe::Status SsdAnchorsCalculator::GenerateAnchors( std::vector* anchors, const SsdAnchorsCalculatorOptions& options) { // Verify the options. if (!options.feature_map_height_size() && !options.strides_size()) { - return ::mediapipe::InvalidArgumentError( + return mediapipe::InvalidArgumentError( "Both feature map shape and strides are missing. Must provide either " "one."); } @@ -206,7 +206,7 @@ REGISTER_CALCULATOR(SsdAnchorsCalculator); } layer_id = last_same_stride_layer; } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/tflite/tflite_converter_calculator.cc b/mediapipe/calculators/tflite/tflite_converter_calculator.cc index e81354242..ccb7d3744 100644 --- a/mediapipe/calculators/tflite/tflite_converter_calculator.cc +++ b/mediapipe/calculators/tflite/tflite_converter_calculator.cc @@ -134,22 +134,21 @@ struct GPUData { // class TfLiteConverterCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; - ::mediapipe::Status Close(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Close(CalculatorContext* cc) override; private: - ::mediapipe::Status InitGpu(CalculatorContext* cc); - ::mediapipe::Status LoadOptions(CalculatorContext* cc); + mediapipe::Status InitGpu(CalculatorContext* cc); + mediapipe::Status LoadOptions(CalculatorContext* cc); template - ::mediapipe::Status NormalizeImage(const ImageFrame& image_frame, - bool flip_vertically, float* tensor_ptr); - ::mediapipe::Status CopyMatrixToTensor(const Matrix& matrix, - float* tensor_ptr); - ::mediapipe::Status ProcessCPU(CalculatorContext* cc); - ::mediapipe::Status ProcessGPU(CalculatorContext* cc); + mediapipe::Status NormalizeImage(const ImageFrame& image_frame, + bool flip_vertically, float* tensor_ptr); + mediapipe::Status CopyMatrixToTensor(const Matrix& matrix, float* tensor_ptr); + mediapipe::Status ProcessCPU(CalculatorContext* cc); + mediapipe::Status ProcessGPU(CalculatorContext* cc); std::unique_ptr interpreter_ = nullptr; @@ -183,7 +182,7 @@ bool ShouldUseGpu(CC* cc) { } } // namespace -::mediapipe::Status TfLiteConverterCalculator::GetContract( +mediapipe::Status TfLiteConverterCalculator::GetContract( CalculatorContract* cc) { // Confirm only one of the input streams is present. RET_CHECK(cc->Inputs().HasTag(kImageFrameTag) ^ @@ -224,10 +223,10 @@ bool ShouldUseGpu(CC* cc) { // Assign this calculator's default InputStreamHandler. cc->SetInputStreamHandler("FixedSizeInputStreamHandler"); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TfLiteConverterCalculator::Open(CalculatorContext* cc) { +mediapipe::Status TfLiteConverterCalculator::Open(CalculatorContext* cc) { cc->SetOffset(TimestampDiff(0)); MP_RETURN_IF_ERROR(LoadOptions(cc)); @@ -252,13 +251,13 @@ bool ShouldUseGpu(CC* cc) { interpreter_->SetInputs({0}); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TfLiteConverterCalculator::Process(CalculatorContext* cc) { +mediapipe::Status TfLiteConverterCalculator::Process(CalculatorContext* cc) { if (use_gpu_) { if (cc->Inputs().Tag(kGpuBufferTag).IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } if (!initialized_) { MP_RETURN_IF_ERROR(InitGpu(cc)); @@ -270,24 +269,23 @@ bool ShouldUseGpu(CC* cc) { // Convert to CPU tensors or Matrix type. MP_RETURN_IF_ERROR(ProcessCPU(cc)); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TfLiteConverterCalculator::Close(CalculatorContext* cc) { +mediapipe::Status TfLiteConverterCalculator::Close(CalculatorContext* cc) { interpreter_.reset(); #if MEDIAPIPE_TFLITE_GL_INFERENCE gpu_helper_.RunInGlContext([this] { gpu_data_out_.reset(); }); #elif MEDIAPIPE_TFLITE_METAL_INFERENCE gpu_data_out_.reset(); #endif // MEDIAPIPE_TFLITE_GL_INFERENCE - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TfLiteConverterCalculator::ProcessCPU( - CalculatorContext* cc) { +mediapipe::Status TfLiteConverterCalculator::ProcessCPU(CalculatorContext* cc) { if (cc->Inputs().HasTag(kImageFrameTag)) { if (cc->Inputs().Tag(kImageFrameTag).IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // CPU ImageFrame to TfLiteTensor conversion. @@ -363,7 +361,7 @@ bool ShouldUseGpu(CC* cc) { MP_RETURN_IF_ERROR(NormalizeImage(image_frame, flip_vertically_, tensor_buffer)); } else { - return ::mediapipe::InternalError( + return mediapipe::InternalError( "Only byte-based (8 bit) and float (32 bit) images supported."); } } @@ -375,7 +373,7 @@ bool ShouldUseGpu(CC* cc) { .Add(output_tensors.release(), cc->InputTimestamp()); } else if (cc->Inputs().HasTag(kMatrixTag)) { if (cc->Inputs().Tag(kMatrixTag).IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // CPU Matrix to TfLiteTensor conversion. const auto& matrix = cc->Inputs().Tag(kMatrixTag).Get(); @@ -407,17 +405,16 @@ bool ShouldUseGpu(CC* cc) { .Add(output_tensors.release(), cc->InputTimestamp()); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TfLiteConverterCalculator::ProcessGPU( - CalculatorContext* cc) { +mediapipe::Status TfLiteConverterCalculator::ProcessGPU(CalculatorContext* cc) { #if MEDIAPIPE_TFLITE_GL_INFERENCE // GpuBuffer to tflite::gpu::GlBuffer conversion. const auto& input = cc->Inputs().Tag(kGpuBufferTag).Get(); MP_RETURN_IF_ERROR( - gpu_helper_.RunInGlContext([this, &input]() -> ::mediapipe::Status { + gpu_helper_.RunInGlContext([this, &input]() -> mediapipe::Status { // Convert GL texture into TfLite GlBuffer (SSBO). auto src = gpu_helper_.CreateSourceTexture(input); glActiveTexture(GL_TEXTURE0 + 0); @@ -430,13 +427,13 @@ bool ShouldUseGpu(CC* cc) { glBindBuffer(GL_SHADER_STORAGE_BUFFER, 0); glBindTexture(GL_TEXTURE_2D, 0); src.Release(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); // Copy into outputs. auto output_tensors = absl::make_unique>(); MP_RETURN_IF_ERROR(gpu_helper_.RunInGlContext( - [this, &output_tensors]() -> ::mediapipe::Status { + [this, &output_tensors]() -> mediapipe::Status { output_tensors->resize(1); { GpuTensor& tensor = output_tensors->at(0); @@ -444,7 +441,7 @@ bool ShouldUseGpu(CC* cc) { gpu_data_out_->elements, &tensor)); MP_RETURN_IF_ERROR(CopyBuffer(gpu_data_out_->buffer, tensor)); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); cc->Outputs() .Tag(kTensorsGpuTag) @@ -490,10 +487,10 @@ bool ShouldUseGpu(CC* cc) { RET_CHECK_FAIL() << "GPU processing is not enabled."; #endif // MEDIAPIPE_TFLITE_GL_INFERENCE - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TfLiteConverterCalculator::InitGpu(CalculatorContext* cc) { +mediapipe::Status TfLiteConverterCalculator::InitGpu(CalculatorContext* cc) { #if MEDIAPIPE_TFLITE_GPU_SUPPORTED // Get input image sizes. const auto& input = @@ -514,7 +511,7 @@ bool ShouldUseGpu(CC* cc) { #if MEDIAPIPE_TFLITE_GL_INFERENCE MP_RETURN_IF_ERROR(gpu_helper_.RunInGlContext( - [this, &include_alpha, &input, &single_channel]() -> ::mediapipe::Status { + [this, &include_alpha, &input, &single_channel]() -> mediapipe::Status { // Device memory. MP_RETURN_IF_ERROR( ::tflite::gpu::gl::CreateReadWriteShaderStorageBuffer( @@ -560,7 +557,7 @@ bool ShouldUseGpu(CC* cc) { GL_COMPUTE_SHADER, shader_source, &gpu_data_out_->shader)); MP_RETURN_IF_ERROR(GlProgram::CreateWithShader( gpu_data_out_->shader, &gpu_data_out_->program)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); #elif MEDIAPIPE_TFLITE_METAL_INFERENCE @@ -627,10 +624,10 @@ bool ShouldUseGpu(CC* cc) { << [[error localizedDescription] UTF8String]; #endif // MEDIAPIPE_TFLITE_GL_INFERENCE - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TfLiteConverterCalculator::LoadOptions( +mediapipe::Status TfLiteConverterCalculator::LoadOptions( CalculatorContext* cc) { // Get calculator options specified in the graph. const auto& options = @@ -679,11 +676,11 @@ bool ShouldUseGpu(CC* cc) { // Get tensor type, float or quantized. use_quantized_tensors_ = options.use_quantized_tensors(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } template -::mediapipe::Status TfLiteConverterCalculator::NormalizeImage( +mediapipe::Status TfLiteConverterCalculator::NormalizeImage( const ImageFrame& image_frame, bool flip_vertically, float* tensor_ptr) { const int height = image_frame.Height(); const int width = image_frame.Width(); @@ -727,10 +724,10 @@ template } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TfLiteConverterCalculator::CopyMatrixToTensor( +mediapipe::Status TfLiteConverterCalculator::CopyMatrixToTensor( const Matrix& matrix, float* tensor_ptr) { if (row_major_matrix_) { auto matrix_map = @@ -742,7 +739,7 @@ template matrix_map = matrix; } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/tflite/tflite_converter_calculator_test.cc b/mediapipe/calculators/tflite/tflite_converter_calculator_test.cc index c8762b09b..54df277d0 100644 --- a/mediapipe/calculators/tflite/tflite_converter_calculator_test.cc +++ b/mediapipe/calculators/tflite/tflite_converter_calculator_test.cc @@ -85,7 +85,7 @@ TEST_F(TfLiteConverterCalculatorTest, RandomMatrixColMajor) { // Run the calculator and verify that one output is generated. CalculatorGraphConfig graph_config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: "matrix" node { calculator: "TfLiteConverterCalculator" @@ -146,7 +146,7 @@ TEST_F(TfLiteConverterCalculatorTest, RandomMatrixRowMajor) { // Run the calculator and verify that one output is generated. CalculatorGraphConfig graph_config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: "matrix" node { calculator: "TfLiteConverterCalculator" @@ -204,7 +204,7 @@ TEST_F(TfLiteConverterCalculatorTest, CustomDivAndSub) { CalculatorGraph graph; // Run the calculator and verify that one output is generated. CalculatorGraphConfig graph_config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: "input_image" node { calculator: "TfLiteConverterCalculator" @@ -227,7 +227,7 @@ TEST_F(TfLiteConverterCalculatorTest, CustomDivAndSub) { MP_ASSERT_OK(graph.Initialize(graph_config)); MP_ASSERT_OK(graph.StartRun({})); auto input_image = absl::make_unique(ImageFormat::GRAY8, 1, 1); - cv::Mat mat = ::mediapipe::formats::MatView(input_image.get()); + cv::Mat mat = mediapipe::formats::MatView(input_image.get()); mat.at(0, 0) = 200; MP_ASSERT_OK(graph.AddPacketToInputStream( "input_image", Adopt(input_image.release()).At(Timestamp(0)))); @@ -257,7 +257,7 @@ TEST_F(TfLiteConverterCalculatorTest, SetOutputRange) { for (std::pair range : range_values) { CalculatorGraph graph; CalculatorGraphConfig graph_config = - ::mediapipe::ParseTextProtoOrDie( + mediapipe::ParseTextProtoOrDie( absl::Substitute(R"( input_stream: "input_image" node { @@ -283,7 +283,7 @@ TEST_F(TfLiteConverterCalculatorTest, SetOutputRange) { MP_ASSERT_OK(graph.Initialize(graph_config)); MP_ASSERT_OK(graph.StartRun({})); auto input_image = absl::make_unique(ImageFormat::GRAY8, 1, 1); - cv::Mat mat = ::mediapipe::formats::MatView(input_image.get()); + cv::Mat mat = mediapipe::formats::MatView(input_image.get()); mat.at(0, 0) = 200; MP_ASSERT_OK(graph.AddPacketToInputStream( "input_image", Adopt(input_image.release()).At(Timestamp(0)))); diff --git a/mediapipe/calculators/tflite/tflite_custom_op_resolver_calculator.cc b/mediapipe/calculators/tflite/tflite_custom_op_resolver_calculator.cc index bce1b6076..7b61f3c8f 100644 --- a/mediapipe/calculators/tflite/tflite_custom_op_resolver_calculator.cc +++ b/mediapipe/calculators/tflite/tflite_custom_op_resolver_calculator.cc @@ -39,14 +39,14 @@ namespace mediapipe { // } class TfLiteCustomOpResolverCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->OutputSidePackets() .Index(0) .Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { cc->SetOffset(TimestampDiff(0)); const TfLiteCustomOpResolverCalculatorOptions& options = @@ -54,17 +54,17 @@ class TfLiteCustomOpResolverCalculator : public CalculatorBase { std::unique_ptr op_resolver; if (options.use_gpu()) { - op_resolver = absl::make_unique<::mediapipe::OpResolver>(); + op_resolver = absl::make_unique(); } else { - op_resolver = absl::make_unique<::mediapipe::CpuOpResolver>(); + op_resolver = absl::make_unique(); } cc->OutputSidePackets().Index(0).Set(Adopt(op_resolver.release())); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { - return ::mediapipe::OkStatus(); + mediapipe::Status Process(CalculatorContext* cc) override { + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(TfLiteCustomOpResolverCalculator); diff --git a/mediapipe/calculators/tflite/tflite_inference_calculator.cc b/mediapipe/calculators/tflite/tflite_inference_calculator.cc index 314637e59..8661e0744 100644 --- a/mediapipe/calculators/tflite/tflite_inference_calculator.cc +++ b/mediapipe/calculators/tflite/tflite_inference_calculator.cc @@ -27,7 +27,7 @@ #include "mediapipe/util/cpu_util.h" #endif // !__EMSCRIPTEN__ || __EMSCRIPTEN_PTHREADS__ -#include "mediapipe/util/resource_util.h" +#include "mediapipe/util/tflite/tflite_model_loader.h" #include "tensorflow/lite/error_reporter.h" #include "tensorflow/lite/interpreter.h" #include "tensorflow/lite/kernels/register.h" @@ -214,36 +214,40 @@ class TfLiteInferenceCalculator : public CalculatorBase { public: using TfLiteDelegatePtr = std::unique_ptr>; - using TfLiteModelPtr = - std::unique_ptr>; - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; - ::mediapipe::Status Close(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Close(CalculatorContext* cc) override; private: - ::mediapipe::Status ReadKernelsFromFile(); - ::mediapipe::Status WriteKernelsToFile(); - ::mediapipe::Status LoadModel(CalculatorContext* cc); - ::mediapipe::StatusOr GetModelAsPacket(const CalculatorContext& cc); - ::mediapipe::Status LoadDelegate(CalculatorContext* cc); - ::mediapipe::Status InitTFLiteGPURunner(CalculatorContext* cc); - ::mediapipe::Status ProcessInputsCpu( + mediapipe::Status ReadKernelsFromFile(); + mediapipe::Status WriteKernelsToFile(); + mediapipe::Status LoadModel(CalculatorContext* cc); + mediapipe::StatusOr GetModelAsPacket(const CalculatorContext& cc); + mediapipe::Status LoadDelegate(CalculatorContext* cc); + mediapipe::Status InitTFLiteGPURunner(CalculatorContext* cc); + mediapipe::Status ProcessInputsCpu( CalculatorContext* cc, std::vector* output_tensors_cpu); - ::mediapipe::Status ProcessOutputsCpu( + mediapipe::Status ProcessOutputsCpu( CalculatorContext* cc, std::unique_ptr> output_tensors_cpu); - ::mediapipe::Status ProcessInputsGpu( - CalculatorContext* cc, std::vector* output_tensors_gpu); - ::mediapipe::Status ProcessOutputsGpu( + mediapipe::Status ProcessInputsGpu( + CalculatorContext* cc, +#if MEDIAPIPE_TFLITE_METAL_INFERENCE + id compute_encoder, +#endif // MEDIAPIPE_TFLITE_METAL_INFERENCE + std::vector* output_tensors_gpu); + mediapipe::Status ProcessOutputsGpu( CalculatorContext* cc, std::unique_ptr> output_tensors_cpu, +#if MEDIAPIPE_TFLITE_METAL_INFERENCE + id compute_encoder, +#endif // MEDIAPIPE_TFLITE_METAL_INFERENCE std::unique_ptr> output_tensors_gpu); - ::mediapipe::Status RunInContextIfNeeded( + mediapipe::Status RunInContextIfNeeded( std::function<::mediapipe::Status(void)> f) { if (gpu_inference_) { #if MEDIAPIPE_TFLITE_GL_INFERENCE @@ -282,7 +286,7 @@ class TfLiteInferenceCalculator : public CalculatorBase { bool use_advanced_gpu_api_ = false; bool allow_precision_loss_ = false; - ::mediapipe::TfLiteInferenceCalculatorOptions_Delegate_Gpu_API + mediapipe::TfLiteInferenceCalculatorOptions::Delegate::Gpu::API tflite_gpu_runner_api_; bool use_kernel_caching_ = false; @@ -308,7 +312,7 @@ bool ShouldUseGpu(CC* cc) { } } // namespace -::mediapipe::Status TfLiteInferenceCalculator::GetContract( +mediapipe::Status TfLiteInferenceCalculator::GetContract( CalculatorContract* cc) { RET_CHECK(cc->Inputs().HasTag(kTensorsTag) ^ cc->Inputs().HasTag(kTensorsGpuTag)); @@ -351,10 +355,10 @@ bool ShouldUseGpu(CC* cc) { // Assign this calculator's default InputStreamHandler. cc->SetInputStreamHandler("FixedSizeInputStreamHandler"); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TfLiteInferenceCalculator::Open(CalculatorContext* cc) { +mediapipe::Status TfLiteInferenceCalculator::Open(CalculatorContext* cc) { cc->SetOffset(TimestampDiff(0)); const auto& options = @@ -406,18 +410,33 @@ bool ShouldUseGpu(CC* cc) { } else { MP_RETURN_IF_ERROR(LoadDelegate(cc)); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TfLiteInferenceCalculator::Process(CalculatorContext* cc) { +mediapipe::Status TfLiteInferenceCalculator::Process(CalculatorContext* cc) { return RunInContextIfNeeded([this, cc]() -> ::mediapipe::Status { // 0. Declare outputs auto output_tensors_gpu = absl::make_unique>(); auto output_tensors_cpu = absl::make_unique>(); +#if MEDIAPIPE_TFLITE_METAL_INFERENCE + id command_buffer; + id compute_encoder; + if (gpu_inference_) { + command_buffer = [gpu_helper_ commandBuffer]; + command_buffer.label = @"TfLiteInferenceCalculator"; + compute_encoder = [command_buffer computeCommandEncoder]; + } +#endif // MEDIAPIPE_TFLITE_METAL_INFERENCE + // 1. Receive pre-processed tensor inputs. if (gpu_input_) { - MP_RETURN_IF_ERROR(ProcessInputsGpu(cc, output_tensors_gpu.get())); + MP_RETURN_IF_ERROR(ProcessInputsGpu(cc, +#if MEDIAPIPE_TFLITE_METAL_INFERENCE + compute_encoder, +#endif // MEDIAPIPE_TFLITE_METAL_INFERENCE + + output_tensors_gpu.get())); } else { MP_RETURN_IF_ERROR(ProcessInputsCpu(cc, output_tensors_cpu.get())); } @@ -430,22 +449,37 @@ bool ShouldUseGpu(CC* cc) { RET_CHECK_EQ(interpreter_->Invoke(), kTfLiteOk); } #else +#if MEDIAPIPE_TFLITE_METAL_INFERENCE + if (gpu_inference_) { + RET_CHECK( + TFLGpuDelegateSetCommandEncoder(delegate_.get(), compute_encoder)); + } +#endif // MEDIAPIPE_TFLITE_METAL_INFERENCE RET_CHECK_EQ(interpreter_->Invoke(), kTfLiteOk); #endif // MEDIAPIPE_TFLITE_GL_INFERENCE // 3. Output processed tensors. if (gpu_output_ || use_advanced_gpu_api_) { MP_RETURN_IF_ERROR(ProcessOutputsGpu(cc, std::move(output_tensors_cpu), +#if MEDIAPIPE_TFLITE_METAL_INFERENCE + compute_encoder, +#endif // MEDIAPIPE_TFLITE_METAL_INFERENCE std::move(output_tensors_gpu))); } else { MP_RETURN_IF_ERROR(ProcessOutputsCpu(cc, std::move(output_tensors_cpu))); } +#if MEDIAPIPE_TFLITE_METAL_INFERENCE + if (gpu_inference_) { + [compute_encoder endEncoding]; + [command_buffer commit]; + } +#endif // MEDIAPIPE_TFLITE_METAL_INFERENCE - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); }); } -::mediapipe::Status TfLiteInferenceCalculator::WriteKernelsToFile() { +mediapipe::Status TfLiteInferenceCalculator::WriteKernelsToFile() { #if MEDIAPIPE_TFLITE_GL_INFERENCE && defined(MEDIAPIPE_ANDROID) if (use_kernel_caching_) { // Save kernel file. @@ -456,10 +490,10 @@ bool ShouldUseGpu(CC* cc) { mediapipe::file::SetContents(cached_kernel_filename_, cache_str)); } #endif // MEDIAPIPE_TFLITE_GL_INFERENCE && MEDIAPIPE_ANDROID - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TfLiteInferenceCalculator::Close(CalculatorContext* cc) { +mediapipe::Status TfLiteInferenceCalculator::Close(CalculatorContext* cc) { MP_RETURN_IF_ERROR(WriteKernelsToFile()); return RunInContextIfNeeded([this]() -> ::mediapipe::Status { @@ -480,16 +514,16 @@ bool ShouldUseGpu(CC* cc) { #if defined(MEDIAPIPE_EDGE_TPU) edgetpu_context_.reset(); #endif - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); }); } // Calculator Auxiliary Section -::mediapipe::Status TfLiteInferenceCalculator::ProcessInputsCpu( +mediapipe::Status TfLiteInferenceCalculator::ProcessInputsCpu( CalculatorContext* cc, std::vector* output_tensors_cpu) { if (cc->Inputs().Tag(kTensorsTag).IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Read CPU input into tensors. const auto& input_tensors = @@ -511,13 +545,17 @@ bool ShouldUseGpu(CC* cc) { } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TfLiteInferenceCalculator::ProcessInputsGpu( - CalculatorContext* cc, std::vector* output_tensors_gpu) { +mediapipe::Status TfLiteInferenceCalculator::ProcessInputsGpu( + CalculatorContext* cc, +#if MEDIAPIPE_TFLITE_METAL_INFERENCE + id compute_encoder, +#endif // MEDIAPIPE_TFLITE_METAL_INFERENCE + std::vector* output_tensors_gpu) { if (cc->Inputs().Tag(kTensorsGpuTag).IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } if (use_advanced_gpu_api_) { #if MEDIAPIPE_TFLITE_GL_INFERENCE @@ -563,10 +601,6 @@ bool ShouldUseGpu(CC* cc) { RET_CHECK_GT(input_tensors.size(), 0); // Explicit copy input with conversion float 32 bits to 16 bits. gpu_data_in_.resize(input_tensors.size()); - id command_buffer = [gpu_helper_ commandBuffer]; - command_buffer.label = @"TfLiteInferenceCalculatorConvert"; - id compute_encoder = - [command_buffer computeCommandEncoder]; [compute_encoder setComputePipelineState:fp32_to_fp16_program_]; for (int i = 0; i < input_tensors.size(); ++i) { [compute_encoder setBuffer:input_tensors[i] offset:0 atIndex:0]; @@ -578,15 +612,13 @@ bool ShouldUseGpu(CC* cc) { [compute_encoder dispatchThreadgroups:MTLSizeMake(threadgroups, 1, 1) threadsPerThreadgroup:threads_per_group]; } - [compute_encoder endEncoding]; - [command_buffer commit]; #endif // MEDIAPIPE_TFLITE_GL_INFERENCE } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TfLiteInferenceCalculator::ProcessOutputsCpu( +mediapipe::Status TfLiteInferenceCalculator::ProcessOutputsCpu( CalculatorContext* cc, std::unique_ptr> output_tensors_cpu) { // Output result tensors (CPU). @@ -599,12 +631,15 @@ bool ShouldUseGpu(CC* cc) { .Tag(kTensorsTag) .Add(output_tensors_cpu.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TfLiteInferenceCalculator::ProcessOutputsGpu( +mediapipe::Status TfLiteInferenceCalculator::ProcessOutputsGpu( CalculatorContext* cc, std::unique_ptr> output_tensors_cpu, +#if MEDIAPIPE_TFLITE_METAL_INFERENCE + id compute_encoder, +#endif // MEDIAPIPE_TFLITE_METAL_INFERENCE std::unique_ptr> output_tensors_gpu) { if (use_advanced_gpu_api_) { #if MEDIAPIPE_TFLITE_GL_INFERENCE @@ -647,33 +682,27 @@ bool ShouldUseGpu(CC* cc) { // Output result tensors (GPU). output_tensors_gpu->resize(gpu_data_out_.size()); id device = gpu_helper_.mtlDevice; - id command_buffer = [gpu_helper_ commandBuffer]; - command_buffer.label = @"TfLiteInferenceBPHWC4Convert"; - id convert_command = - [command_buffer computeCommandEncoder]; for (int i = 0; i < gpu_data_out_.size(); ++i) { // Allocate output tensor. output_tensors_gpu->at(i) = [device newBufferWithLength:gpu_data_out_[i]->elements * sizeof(float) options:MTLResourceStorageModeShared]; // Reshape tensor. - [converter_from_BPHWC4_ convertWithEncoder:convert_command + [converter_from_BPHWC4_ convertWithEncoder:compute_encoder shape:gpu_data_out_[i]->shape sourceBuffer:gpu_data_out_[i]->buffer convertedBuffer:output_tensors_gpu->at(i)]; } - [convert_command endEncoding]; - [command_buffer commit]; cc->Outputs() .Tag(kTensorsGpuTag) .Add(output_tensors_gpu.release(), cc->InputTimestamp()); #endif // MEDIAPIPE_TFLITE_GL_INFERENCE } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TfLiteInferenceCalculator::ReadKernelsFromFile() { +mediapipe::Status TfLiteInferenceCalculator::ReadKernelsFromFile() { #if MEDIAPIPE_TFLITE_GL_INFERENCE && defined(MEDIAPIPE_ANDROID) if (use_kernel_caching_) { // Load pre-compiled kernel file. @@ -686,10 +715,10 @@ bool ShouldUseGpu(CC* cc) { } } #endif // MEDIAPIPE_TFLITE_GL_INFERENCE && MEDIAPIPE_ANDROID - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TfLiteInferenceCalculator::InitTFLiteGPURunner( +mediapipe::Status TfLiteInferenceCalculator::InitTFLiteGPURunner( CalculatorContext* cc) { #if MEDIAPIPE_TFLITE_GL_INFERENCE ASSIGN_OR_RETURN(model_packet_, GetModelAsPacket(*cc)); @@ -710,15 +739,19 @@ bool ShouldUseGpu(CC* cc) { options.priority3 = tflite::gpu::InferencePriority::AUTO; options.usage = tflite::gpu::InferenceUsage::SUSTAINED_SPEED; tflite_gpu_runner_ = std::make_unique(options); - if (tflite_gpu_runner_api_ == - ::mediapipe::TfLiteInferenceCalculatorOptions_Delegate_Gpu_API:: - TfLiteInferenceCalculatorOptions_Delegate_Gpu_API_OPENGL) { - tflite_gpu_runner_->ForceOpenGL(); - } - if (tflite_gpu_runner_api_ == - ::mediapipe::TfLiteInferenceCalculatorOptions_Delegate_Gpu_API:: - TfLiteInferenceCalculatorOptions_Delegate_Gpu_API_OPENCL) { - tflite_gpu_runner_->ForceOpenCL(); + switch (tflite_gpu_runner_api_) { + case mediapipe::TfLiteInferenceCalculatorOptions::Delegate::Gpu::OPENGL: { + tflite_gpu_runner_->ForceOpenGL(); + break; + } + case mediapipe::TfLiteInferenceCalculatorOptions::Delegate::Gpu::OPENCL: { + tflite_gpu_runner_->ForceOpenCL(); + break; + } + case mediapipe::TfLiteInferenceCalculatorOptions::Delegate::Gpu::ANY: { + // Do not need to force any specific API. + break; + } } MP_RETURN_IF_ERROR( tflite_gpu_runner_->InitializeWithModel(model, op_resolver)); @@ -736,12 +769,11 @@ bool ShouldUseGpu(CC* cc) { quant.type = kTfLiteNoQuantization; quant.params = nullptr; for (int i = 0; i < num_outputs; ++i) { - auto shape = tflite_gpu_runner_->GetOutputShapes()[i]; + auto shape = tflite_gpu_runner_->GetTFLiteOutputShapes()[i]; const int tensor_idx = interpreter_->inputs()[i]; interpreter_->SetTensorParametersReadWrite(tensor_idx, kTfLiteFloat32, "", - {shape.c}, quant); - CHECK(interpreter_->ResizeInputTensor( - tensor_idx, {shape.h, shape.w, shape.c}) == kTfLiteOk); + shape, quant); + CHECK(interpreter_->ResizeInputTensor(tensor_idx, shape) == kTfLiteOk); } CHECK(interpreter_->AllocateTensors() == kTfLiteOk); } @@ -764,14 +796,13 @@ bool ShouldUseGpu(CC* cc) { MP_RETURN_IF_ERROR(tflite_gpu_runner_->Build()); #endif // MEDIAPIPE_TFLITE_GL_INFERENCE - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TfLiteInferenceCalculator::LoadModel( - CalculatorContext* cc) { +mediapipe::Status TfLiteInferenceCalculator::LoadModel(CalculatorContext* cc) { if (use_advanced_gpu_api_) { // Use InitTFLiteGPURunner for everything. - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } ASSIGN_OR_RETURN(model_packet_, GetModelAsPacket(*cc)); @@ -810,39 +841,32 @@ bool ShouldUseGpu(CC* cc) { if (use_quantized_tensors_) gpu_inference_ = false; } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::StatusOr TfLiteInferenceCalculator::GetModelAsPacket( +mediapipe::StatusOr TfLiteInferenceCalculator::GetModelAsPacket( const CalculatorContext& cc) { const auto& options = cc.Options(); if (!options.model_path().empty()) { - std::string model_path = options.model_path(); - - ASSIGN_OR_RETURN(model_path, mediapipe::PathToResourceAsFile(model_path)); - - auto model = tflite::FlatBufferModel::BuildFromFile(model_path.c_str()); - RET_CHECK(model) << "Failed to load model from path."; - return MakePacket(TfLiteModelPtr( - model.release(), [](tflite::FlatBufferModel* model) { delete model; })); + return TfLiteModelLoader::LoadFromPath(options.model_path()); } if (cc.InputSidePackets().HasTag("MODEL")) { return cc.InputSidePackets().Tag("MODEL"); } - return ::mediapipe::Status( - ::mediapipe::StatusCode::kNotFound, + return mediapipe::Status( + mediapipe::StatusCode::kNotFound, "Must specify TFLite model as path or loaded model."); } -::mediapipe::Status TfLiteInferenceCalculator::LoadDelegate( +mediapipe::Status TfLiteInferenceCalculator::LoadDelegate( CalculatorContext* cc) { const auto& calculator_opts = cc->Options(); if (calculator_opts.has_delegate() && calculator_opts.delegate().has_tflite()) { // Default tflite inference requeqsted - no need to modify graph. - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } if (!gpu_inference_) { @@ -861,7 +885,7 @@ bool ShouldUseGpu(CC* cc) { }); RET_CHECK_EQ(interpreter_->ModifyGraphWithDelegate(delegate_.get()), kTfLiteOk); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } #endif // MEDIAPIPE_ANDROID @@ -880,11 +904,12 @@ bool ShouldUseGpu(CC* cc) { &TfLiteXNNPackDelegateDelete); RET_CHECK_EQ(interpreter_->ModifyGraphWithDelegate(delegate_.get()), kTfLiteOk); + return mediapipe::OkStatus(); } #endif // !EDGETPU // Return, no need for GPU delegate below. - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } #if MEDIAPIPE_TFLITE_GL_INFERENCE @@ -955,7 +980,7 @@ bool ShouldUseGpu(CC* cc) { // Configure and create the delegate. TFLGpuDelegateOptions options; options.allow_precision_loss = true; - options.wait_type = TFLGpuDelegateWaitType::TFLGpuDelegateWaitTypePassive; + options.wait_type = TFLGpuDelegateWaitType::TFLGpuDelegateWaitTypeDoNotWait; if (!delegate_) delegate_ = TfLiteDelegatePtr(TFLGpuDelegateCreate(&options), &TFLGpuDelegateDelete); @@ -1076,7 +1101,7 @@ bool ShouldUseGpu(CC* cc) { } #endif // MEDIAPIPE_TFLITE_METAL_INFERENCE - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/tflite/tflite_inference_calculator_test.cc b/mediapipe/calculators/tflite/tflite_inference_calculator_test.cc index c3df07191..60ea1a860 100644 --- a/mediapipe/calculators/tflite/tflite_inference_calculator_test.cc +++ b/mediapipe/calculators/tflite/tflite_inference_calculator_test.cc @@ -56,18 +56,18 @@ void DoSmokeTest(const std::string& graph_proto) { interpreter->SetTensorParametersReadWrite(0, kTfLiteFloat32, "", {3}, TfLiteQuantization()); int t = interpreter->inputs()[0]; - TfLiteTensor* tensor = interpreter->tensor(t); + TfLiteTensor* input_tensor = interpreter->tensor(t); interpreter->ResizeInputTensor(t, {width, height, channels}); interpreter->AllocateTensors(); - float* tensor_buffer = tensor->data.f; - ASSERT_NE(tensor_buffer, nullptr); + float* input_tensor_buffer = input_tensor->data.f; + ASSERT_NE(input_tensor_buffer, nullptr); for (int i = 0; i < width * height * channels - 1; i++) { - tensor_buffer[i] = 1; + input_tensor_buffer[i] = 1; } auto input_vec = absl::make_unique>(); - input_vec->emplace_back(*tensor); + input_vec->emplace_back(*input_tensor); // Prepare single calculator graph to and wait for packets. CalculatorGraphConfig graph_config = diff --git a/mediapipe/calculators/tflite/tflite_model_calculator.cc b/mediapipe/calculators/tflite/tflite_model_calculator.cc index d24c55b14..c8e4fc36b 100644 --- a/mediapipe/calculators/tflite/tflite_model_calculator.cc +++ b/mediapipe/calculators/tflite/tflite_model_calculator.cc @@ -51,13 +51,13 @@ class TfLiteModelCalculator : public CalculatorBase { std::unique_ptr>; - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->InputSidePackets().Tag("MODEL_BLOB").Set(); cc->OutputSidePackets().Tag("MODEL").Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { const Packet& model_packet = cc->InputSidePackets().Tag("MODEL_BLOB"); const std::string& model_blob = model_packet.Get(); std::unique_ptr model = @@ -74,11 +74,11 @@ class TfLiteModelCalculator : public CalculatorBase { delete model; }))); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { - return ::mediapipe::OkStatus(); + mediapipe::Status Process(CalculatorContext* cc) override { + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(TfLiteModelCalculator); diff --git a/mediapipe/calculators/tflite/tflite_tensors_to_classification_calculator.cc b/mediapipe/calculators/tflite/tflite_tensors_to_classification_calculator.cc index e9c09169b..56c9d05f3 100644 --- a/mediapipe/calculators/tflite/tflite_tensors_to_classification_calculator.cc +++ b/mediapipe/calculators/tflite/tflite_tensors_to_classification_calculator.cc @@ -16,6 +16,7 @@ #include #include +#include "absl/container/node_hash_map.h" #include "absl/strings/str_format.h" #include "absl/types/span.h" #include "mediapipe/calculators/tflite/tflite_tensors_to_classification_calculator.pb.h" @@ -59,21 +60,21 @@ namespace mediapipe { // } class TfLiteTensorsToClassificationCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; - ::mediapipe::Status Close(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Close(CalculatorContext* cc) override; private: ::mediapipe::TfLiteTensorsToClassificationCalculatorOptions options_; int top_k_ = 0; - std::unordered_map label_map_; + absl::node_hash_map label_map_; bool label_map_loaded_ = false; }; REGISTER_CALCULATOR(TfLiteTensorsToClassificationCalculator); -::mediapipe::Status TfLiteTensorsToClassificationCalculator::GetContract( +mediapipe::Status TfLiteTensorsToClassificationCalculator::GetContract( CalculatorContract* cc) { RET_CHECK(!cc->Inputs().GetTags().empty()); RET_CHECK(!cc->Outputs().GetTags().empty()); @@ -86,10 +87,10 @@ REGISTER_CALCULATOR(TfLiteTensorsToClassificationCalculator); cc->Outputs().Tag("CLASSIFICATIONS").Set(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TfLiteTensorsToClassificationCalculator::Open( +mediapipe::Status TfLiteTensorsToClassificationCalculator::Open( CalculatorContext* cc) { cc->SetOffset(TimestampDiff(0)); @@ -113,10 +114,10 @@ REGISTER_CALCULATOR(TfLiteTensorsToClassificationCalculator); label_map_loaded_ = true; } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TfLiteTensorsToClassificationCalculator::Process( +mediapipe::Status TfLiteTensorsToClassificationCalculator::Process( CalculatorContext* cc) { const auto& input_tensors = cc->Inputs().Tag("TENSORS").Get>(); @@ -189,12 +190,12 @@ REGISTER_CALCULATOR(TfLiteTensorsToClassificationCalculator); .Tag("CLASSIFICATIONS") .Add(classification_list.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TfLiteTensorsToClassificationCalculator::Close( +mediapipe::Status TfLiteTensorsToClassificationCalculator::Close( CalculatorContext* cc) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/tflite/tflite_tensors_to_classification_calculator_test.cc b/mediapipe/calculators/tflite/tflite_tensors_to_classification_calculator_test.cc index a7290b112..ab66d3077 100644 --- a/mediapipe/calculators/tflite/tflite_tensors_to_classification_calculator_test.cc +++ b/mediapipe/calculators/tflite/tflite_tensors_to_classification_calculator_test.cc @@ -27,7 +27,7 @@ namespace mediapipe { -using ::mediapipe::ParseTextProtoOrDie; +using mediapipe::ParseTextProtoOrDie; using ::tflite::Interpreter; using Node = ::mediapipe::CalculatorGraphConfig::Node; diff --git a/mediapipe/calculators/tflite/tflite_tensors_to_detections_calculator.cc b/mediapipe/calculators/tflite/tflite_tensors_to_detections_calculator.cc index ec07aab98..7747c4357 100644 --- a/mediapipe/calculators/tflite/tflite_tensors_to_detections_calculator.cc +++ b/mediapipe/calculators/tflite/tflite_tensors_to_detections_calculator.cc @@ -143,24 +143,24 @@ void ConvertAnchorsToRawValues(const std::vector& anchors, // } class TfLiteTensorsToDetectionsCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; - ::mediapipe::Status Close(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Close(CalculatorContext* cc) override; private: - ::mediapipe::Status ProcessCPU(CalculatorContext* cc, - std::vector* output_detections); - ::mediapipe::Status ProcessGPU(CalculatorContext* cc, - std::vector* output_detections); + mediapipe::Status ProcessCPU(CalculatorContext* cc, + std::vector* output_detections); + mediapipe::Status ProcessGPU(CalculatorContext* cc, + std::vector* output_detections); - ::mediapipe::Status LoadOptions(CalculatorContext* cc); - ::mediapipe::Status GpuInit(CalculatorContext* cc); - ::mediapipe::Status DecodeBoxes(const float* raw_boxes, - const std::vector& anchors, - std::vector* boxes); - ::mediapipe::Status ConvertToDetections( + mediapipe::Status LoadOptions(CalculatorContext* cc); + mediapipe::Status GpuInit(CalculatorContext* cc); + mediapipe::Status DecodeBoxes(const float* raw_boxes, + const std::vector& anchors, + std::vector* boxes); + mediapipe::Status ConvertToDetections( const float* detection_boxes, const float* detection_scores, const int* detection_classes, std::vector* output_detections); Detection ConvertToDetection(float box_ymin, float box_xmin, float box_ymax, @@ -189,7 +189,7 @@ class TfLiteTensorsToDetectionsCalculator : public CalculatorBase { }; REGISTER_CALCULATOR(TfLiteTensorsToDetectionsCalculator); -::mediapipe::Status TfLiteTensorsToDetectionsCalculator::GetContract( +mediapipe::Status TfLiteTensorsToDetectionsCalculator::GetContract( CalculatorContract* cc) { RET_CHECK(!cc->Inputs().GetTags().empty()); RET_CHECK(!cc->Outputs().GetTags().empty()); @@ -223,10 +223,10 @@ REGISTER_CALCULATOR(TfLiteTensorsToDetectionsCalculator); #endif // MEDIAPIPE_TFLITE_GL_INFERENCE } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TfLiteTensorsToDetectionsCalculator::Open( +mediapipe::Status TfLiteTensorsToDetectionsCalculator::Open( CalculatorContext* cc) { cc->SetOffset(TimestampDiff(0)); @@ -247,14 +247,14 @@ REGISTER_CALCULATOR(TfLiteTensorsToDetectionsCalculator); MP_RETURN_IF_ERROR(GpuInit(cc)); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TfLiteTensorsToDetectionsCalculator::Process( +mediapipe::Status TfLiteTensorsToDetectionsCalculator::Process( CalculatorContext* cc) { if ((!gpu_input_ && cc->Inputs().Tag(kTensorsTag).IsEmpty()) || (gpu_input_ && cc->Inputs().Tag(kTensorsGpuTag).IsEmpty())) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } auto output_detections = absl::make_unique>(); @@ -272,10 +272,10 @@ REGISTER_CALCULATOR(TfLiteTensorsToDetectionsCalculator); .Add(output_detections.release(), cc->InputTimestamp()); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TfLiteTensorsToDetectionsCalculator::ProcessCPU( +mediapipe::Status TfLiteTensorsToDetectionsCalculator::ProcessCPU( CalculatorContext* cc, std::vector* output_detections) { const auto& input_tensors = cc->Inputs().Tag(kTensorsTag).Get>(); @@ -313,7 +313,7 @@ REGISTER_CALCULATOR(TfLiteTensorsToDetectionsCalculator); anchors_ = cc->InputSidePackets().Tag("ANCHORS").Get>(); } else { - return ::mediapipe::UnavailableError("No anchor data available."); + return mediapipe::UnavailableError("No anchor data available."); } anchors_init_ = true; } @@ -390,9 +390,9 @@ REGISTER_CALCULATOR(TfLiteTensorsToDetectionsCalculator); detection_classes.data(), output_detections)); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TfLiteTensorsToDetectionsCalculator::ProcessGPU( +mediapipe::Status TfLiteTensorsToDetectionsCalculator::ProcessGPU( CalculatorContext* cc, std::vector* output_detections) { #if MEDIAPIPE_TFLITE_GL_INFERENCE const auto& input_tensors = @@ -401,7 +401,7 @@ REGISTER_CALCULATOR(TfLiteTensorsToDetectionsCalculator); MP_RETURN_IF_ERROR(gpu_helper_.RunInGlContext([this, &input_tensors, &cc, &output_detections]() - -> ::mediapipe::Status { + -> mediapipe::Status { // Copy inputs. MP_RETURN_IF_ERROR( CopyBuffer(input_tensors[0], gpu_data_->raw_boxes_buffer)); @@ -458,7 +458,7 @@ REGISTER_CALCULATOR(TfLiteTensorsToDetectionsCalculator); ConvertToDetections(boxes.data(), detection_scores.data(), detection_classes.data(), output_detections)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); #elif MEDIAPIPE_TFLITE_METAL_INFERENCE @@ -543,10 +543,10 @@ REGISTER_CALCULATOR(TfLiteTensorsToDetectionsCalculator); #else LOG(ERROR) << "GPU input on non-Android not supported yet."; #endif // MEDIAPIPE_TFLITE_GL_INFERENCE - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TfLiteTensorsToDetectionsCalculator::Close( +mediapipe::Status TfLiteTensorsToDetectionsCalculator::Close( CalculatorContext* cc) { #if MEDIAPIPE_TFLITE_GL_INFERENCE gpu_helper_.RunInGlContext([this] { gpu_data_.reset(); }); @@ -554,10 +554,10 @@ REGISTER_CALCULATOR(TfLiteTensorsToDetectionsCalculator); gpu_data_.reset(); #endif // MEDIAPIPE_TFLITE_GL_INFERENCE - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TfLiteTensorsToDetectionsCalculator::LoadOptions( +mediapipe::Status TfLiteTensorsToDetectionsCalculator::LoadOptions( CalculatorContext* cc) { // Get calculator options specified in the graph. options_ = @@ -579,10 +579,10 @@ REGISTER_CALCULATOR(TfLiteTensorsToDetectionsCalculator); ignore_classes_.insert(options_.ignore_classes(i)); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TfLiteTensorsToDetectionsCalculator::DecodeBoxes( +mediapipe::Status TfLiteTensorsToDetectionsCalculator::DecodeBoxes( const float* raw_boxes, const std::vector& anchors, std::vector* boxes) { for (int i = 0; i < num_boxes_; ++i) { @@ -643,10 +643,10 @@ REGISTER_CALCULATOR(TfLiteTensorsToDetectionsCalculator); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TfLiteTensorsToDetectionsCalculator::ConvertToDetections( +mediapipe::Status TfLiteTensorsToDetectionsCalculator::ConvertToDetections( const float* detection_boxes, const float* detection_scores, const int* detection_classes, std::vector* output_detections) { for (int i = 0; i < num_boxes_; ++i) { @@ -659,6 +659,14 @@ REGISTER_CALCULATOR(TfLiteTensorsToDetectionsCalculator); detection_boxes[box_offset + 0], detection_boxes[box_offset + 1], detection_boxes[box_offset + 2], detection_boxes[box_offset + 3], detection_scores[i], detection_classes[i], options_.flip_vertically()); + const auto& bbox = detection.location_data().relative_bounding_box(); + if (bbox.width() < 0 || bbox.height() < 0) { + // Decoded detection boxes could have negative values for width/height due + // to model prediction. Filter out those boxes since some downstream + // calculators may assume non-negative values. (b/171391719) + continue; + } + // Add keypoints. if (options_.num_keypoints() > 0) { auto* location_data = detection.mutable_location_data(); @@ -676,7 +684,7 @@ REGISTER_CALCULATOR(TfLiteTensorsToDetectionsCalculator); } output_detections->emplace_back(detection); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } Detection TfLiteTensorsToDetectionsCalculator::ConvertToDetection( @@ -699,11 +707,10 @@ Detection TfLiteTensorsToDetectionsCalculator::ConvertToDetection( return detection; } -::mediapipe::Status TfLiteTensorsToDetectionsCalculator::GpuInit( +mediapipe::Status TfLiteTensorsToDetectionsCalculator::GpuInit( CalculatorContext* cc) { #if MEDIAPIPE_TFLITE_GL_INFERENCE - MP_RETURN_IF_ERROR(gpu_helper_.RunInGlContext([this]() - -> ::mediapipe::Status { + MP_RETURN_IF_ERROR(gpu_helper_.RunInGlContext([this]() -> mediapipe::Status { gpu_data_ = absl::make_unique(); // A shader to decode detection boxes. @@ -911,7 +918,7 @@ void main() { MP_RETURN_IF_ERROR(CreateReadWriteShaderStorageBuffer( raw_scores_length, &gpu_data_->raw_scores_buffer)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); #elif MEDIAPIPE_TFLITE_METAL_INFERENCE @@ -1146,7 +1153,7 @@ kernel void scoreKernel( #endif // MEDIAPIPE_TFLITE_GL_INFERENCE - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/tflite/tflite_tensors_to_floats_calculator.cc b/mediapipe/calculators/tflite/tflite_tensors_to_floats_calculator.cc index 72dd60a0b..f3a8e3ffe 100644 --- a/mediapipe/calculators/tflite/tflite_tensors_to_floats_calculator.cc +++ b/mediapipe/calculators/tflite/tflite_tensors_to_floats_calculator.cc @@ -38,15 +38,15 @@ namespace mediapipe { // } class TfLiteTensorsToFloatsCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; }; REGISTER_CALCULATOR(TfLiteTensorsToFloatsCalculator); -::mediapipe::Status TfLiteTensorsToFloatsCalculator::GetContract( +mediapipe::Status TfLiteTensorsToFloatsCalculator::GetContract( CalculatorContract* cc) { RET_CHECK(cc->Inputs().HasTag("TENSORS")); RET_CHECK(cc->Outputs().HasTag("FLOATS") || cc->Outputs().HasTag("FLOAT")); @@ -59,17 +59,16 @@ REGISTER_CALCULATOR(TfLiteTensorsToFloatsCalculator); cc->Outputs().Tag("FLOAT").Set(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TfLiteTensorsToFloatsCalculator::Open( - CalculatorContext* cc) { +mediapipe::Status TfLiteTensorsToFloatsCalculator::Open(CalculatorContext* cc) { cc->SetOffset(TimestampDiff(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TfLiteTensorsToFloatsCalculator::Process( +mediapipe::Status TfLiteTensorsToFloatsCalculator::Process( CalculatorContext* cc) { RET_CHECK(!cc->Inputs().Tag("TENSORS").IsEmpty()); @@ -98,6 +97,6 @@ REGISTER_CALCULATOR(TfLiteTensorsToFloatsCalculator); cc->InputTimestamp()); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/tflite/tflite_tensors_to_landmarks_calculator.cc b/mediapipe/calculators/tflite/tflite_tensors_to_landmarks_calculator.cc index 8f3b8b35c..af3f6684c 100644 --- a/mediapipe/calculators/tflite/tflite_tensors_to_landmarks_calculator.cc +++ b/mediapipe/calculators/tflite/tflite_tensors_to_landmarks_calculator.cc @@ -20,10 +20,30 @@ namespace mediapipe { +namespace { + +inline float Sigmoid(float value) { return 1.0f / (1.0f + std::exp(-value)); } + +float ApplyActivation( + ::mediapipe::TfLiteTensorsToLandmarksCalculatorOptions::Activation + activation, + float value) { + switch (activation) { + case ::mediapipe::TfLiteTensorsToLandmarksCalculatorOptions::SIGMOID: + return Sigmoid(value); + break; + default: + return value; + } +} + +} // namespace + // A calculator for converting TFLite tensors from regression models into // landmarks. Note that if the landmarks in the tensor has more than 5 // dimensions, only the first 5 dimensions will be converted to -// [x,y,z, visibility, presence]. +// [x,y,z, visibility, presence]. The latter two fields may also stay unset if +// such attributes are not supported in the model. // // Input: // TENSORS - Vector of TfLiteTensor of type kTfLiteFloat32. Only the first @@ -69,13 +89,13 @@ namespace mediapipe { // } class TfLiteTensorsToLandmarksCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; private: - ::mediapipe::Status LoadOptions(CalculatorContext* cc); + mediapipe::Status LoadOptions(CalculatorContext* cc); int num_landmarks_ = 0; bool flip_vertically_ = false; bool flip_horizontally_ = false; @@ -84,7 +104,7 @@ class TfLiteTensorsToLandmarksCalculator : public CalculatorBase { }; REGISTER_CALCULATOR(TfLiteTensorsToLandmarksCalculator); -::mediapipe::Status TfLiteTensorsToLandmarksCalculator::GetContract( +mediapipe::Status TfLiteTensorsToLandmarksCalculator::GetContract( CalculatorContract* cc) { RET_CHECK(!cc->Inputs().GetTags().empty()); RET_CHECK(!cc->Outputs().GetTags().empty()); @@ -117,10 +137,10 @@ REGISTER_CALCULATOR(TfLiteTensorsToLandmarksCalculator); cc->Outputs().Tag("NORM_LANDMARKS").Set(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TfLiteTensorsToLandmarksCalculator::Open( +mediapipe::Status TfLiteTensorsToLandmarksCalculator::Open( CalculatorContext* cc) { cc->SetOffset(TimestampDiff(0)); @@ -151,10 +171,10 @@ REGISTER_CALCULATOR(TfLiteTensorsToLandmarksCalculator); ? cc->InputSidePackets().Tag("FLIP_VERTICALLY").Get() : options_.flip_vertically(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TfLiteTensorsToLandmarksCalculator::Process( +mediapipe::Status TfLiteTensorsToLandmarksCalculator::Process( CalculatorContext* cc) { // Override values if specified so. if (cc->Inputs().HasTag("FLIP_HORIZONTALLY") && @@ -167,7 +187,7 @@ REGISTER_CALCULATOR(TfLiteTensorsToLandmarksCalculator); } if (cc->Inputs().Tag("TENSORS").IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } const auto& input_tensors = @@ -207,10 +227,12 @@ REGISTER_CALCULATOR(TfLiteTensorsToLandmarksCalculator); landmark->set_z(raw_landmarks[offset + 2]); } if (num_dimensions > 3) { - landmark->set_visibility(raw_landmarks[offset + 3]); + landmark->set_visibility(ApplyActivation(options_.visibility_activation(), + raw_landmarks[offset + 3])); } if (num_dimensions > 4) { - landmark->set_presence(raw_landmarks[offset + 4]); + landmark->set_presence(ApplyActivation(options_.presence_activation(), + raw_landmarks[offset + 4])); } } @@ -225,8 +247,12 @@ REGISTER_CALCULATOR(TfLiteTensorsToLandmarksCalculator); // Scale Z coordinate as X + allow additional uniform normalization. norm_landmark->set_z(landmark.z() / options_.input_image_width() / options_.normalize_z()); - norm_landmark->set_visibility(landmark.visibility()); - norm_landmark->set_presence(landmark.presence()); + if (landmark.has_visibility()) { // Set only if supported in the model. + norm_landmark->set_visibility(landmark.visibility()); + } + if (landmark.has_presence()) { // Set only if supported in the model. + norm_landmark->set_presence(landmark.presence()); + } } cc->Outputs() .Tag("NORM_LANDMARKS") @@ -242,16 +268,16 @@ REGISTER_CALCULATOR(TfLiteTensorsToLandmarksCalculator); .At(cc->InputTimestamp())); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TfLiteTensorsToLandmarksCalculator::LoadOptions( +mediapipe::Status TfLiteTensorsToLandmarksCalculator::LoadOptions( CalculatorContext* cc) { // Get calculator options specified in the graph. options_ = cc->Options<::mediapipe::TfLiteTensorsToLandmarksCalculatorOptions>(); num_landmarks_ = options_.num_landmarks(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/tflite/tflite_tensors_to_landmarks_calculator.proto b/mediapipe/calculators/tflite/tflite_tensors_to_landmarks_calculator.proto index cbf30c181..793639a53 100644 --- a/mediapipe/calculators/tflite/tflite_tensors_to_landmarks_calculator.proto +++ b/mediapipe/calculators/tflite/tflite_tensors_to_landmarks_calculator.proto @@ -25,6 +25,11 @@ message TfLiteTensorsToLandmarksCalculatorOptions { optional TfLiteTensorsToLandmarksCalculatorOptions ext = 257405002; } + enum Activation { + NONE = 0; + SIGMOID = 1; + } + // Number of landmarks from the output of the model. required int32 num_landmarks = 1; @@ -51,4 +56,10 @@ message TfLiteTensorsToLandmarksCalculatorOptions { // when normalized landmarks are needed. It is applied in addition to Z // coordinate being re-scaled as X. optional float normalize_z = 5 [default = 1.0]; + + // Apply activation function to the tensor representing landmark visibility. + optional Activation visibility_activation = 7 [default = NONE]; + + // Apply activation function to the tensor representing landmark presence. + optional Activation presence_activation = 8 [default = NONE]; } diff --git a/mediapipe/calculators/tflite/tflite_tensors_to_segmentation_calculator.cc b/mediapipe/calculators/tflite/tflite_tensors_to_segmentation_calculator.cc index 3369840e4..4190a05cb 100644 --- a/mediapipe/calculators/tflite/tflite_tensors_to_segmentation_calculator.cc +++ b/mediapipe/calculators/tflite/tflite_tensors_to_segmentation_calculator.cc @@ -121,17 +121,17 @@ using ::tflite::gpu::gl::GlShader; // class TfLiteTensorsToSegmentationCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; - ::mediapipe::Status Close(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Close(CalculatorContext* cc) override; private: - ::mediapipe::Status LoadOptions(CalculatorContext* cc); - ::mediapipe::Status InitGpu(CalculatorContext* cc); - ::mediapipe::Status ProcessGpu(CalculatorContext* cc); - ::mediapipe::Status ProcessCpu(CalculatorContext* cc); + mediapipe::Status LoadOptions(CalculatorContext* cc); + mediapipe::Status InitGpu(CalculatorContext* cc); + mediapipe::Status ProcessGpu(CalculatorContext* cc); + mediapipe::Status ProcessCpu(CalculatorContext* cc); void GlRender(); ::mediapipe::TfLiteTensorsToSegmentationCalculatorOptions options_; @@ -152,7 +152,7 @@ class TfLiteTensorsToSegmentationCalculator : public CalculatorBase { REGISTER_CALCULATOR(TfLiteTensorsToSegmentationCalculator); // static -::mediapipe::Status TfLiteTensorsToSegmentationCalculator::GetContract( +mediapipe::Status TfLiteTensorsToSegmentationCalculator::GetContract( CalculatorContract* cc) { RET_CHECK(!cc->Inputs().GetTags().empty()); RET_CHECK(!cc->Outputs().GetTags().empty()); @@ -202,10 +202,10 @@ REGISTER_CALCULATOR(TfLiteTensorsToSegmentationCalculator); MP_RETURN_IF_ERROR(mediapipe::GlCalculatorHelper::UpdateContract(cc)); #endif // !MEDIAPIPE_DISABLE_GPU } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TfLiteTensorsToSegmentationCalculator::Open( +mediapipe::Status TfLiteTensorsToSegmentationCalculator::Open( CalculatorContext* cc) { cc->SetOffset(TimestampDiff(0)); @@ -221,36 +221,36 @@ REGISTER_CALCULATOR(TfLiteTensorsToSegmentationCalculator); if (use_gpu_) { #if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) MP_RETURN_IF_ERROR( - gpu_helper_.RunInGlContext([this, cc]() -> ::mediapipe::Status { + gpu_helper_.RunInGlContext([this, cc]() -> mediapipe::Status { MP_RETURN_IF_ERROR(InitGpu(cc)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); #else RET_CHECK_FAIL() << "GPU processing not enabled."; #endif // !MEDIAPIPE_DISABLE_GPU } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TfLiteTensorsToSegmentationCalculator::Process( +mediapipe::Status TfLiteTensorsToSegmentationCalculator::Process( CalculatorContext* cc) { if (use_gpu_) { #if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) MP_RETURN_IF_ERROR( - gpu_helper_.RunInGlContext([this, cc]() -> ::mediapipe::Status { + gpu_helper_.RunInGlContext([this, cc]() -> mediapipe::Status { MP_RETURN_IF_ERROR(ProcessGpu(cc)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); #endif // !MEDIAPIPE_DISABLE_GPU } else { MP_RETURN_IF_ERROR(ProcessCpu(cc)); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TfLiteTensorsToSegmentationCalculator::Close( +mediapipe::Status TfLiteTensorsToSegmentationCalculator::Close( CalculatorContext* cc) { #if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) gpu_helper_.RunInGlContext([this] { @@ -262,13 +262,13 @@ REGISTER_CALCULATOR(TfLiteTensorsToSegmentationCalculator); }); #endif // !MEDIAPIPE_DISABLE_GPU - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TfLiteTensorsToSegmentationCalculator::ProcessCpu( +mediapipe::Status TfLiteTensorsToSegmentationCalculator::ProcessCpu( CalculatorContext* cc) { if (cc->Inputs().Tag(kTensorsTag).IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Get input streams. @@ -366,17 +366,17 @@ REGISTER_CALCULATOR(TfLiteTensorsToSegmentationCalculator); large_mask_mat.copyTo(output_mat); cc->Outputs().Tag(kMaskTag).Add(output_mask.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Steps: // 1. receive tensor and optional previous mask // 2. process segmentation tensor into small mask // 3. upsample small mask into output mask to be same size as input image -::mediapipe::Status TfLiteTensorsToSegmentationCalculator::ProcessGpu( +mediapipe::Status TfLiteTensorsToSegmentationCalculator::ProcessGpu( CalculatorContext* cc) { if (cc->Inputs().Tag(kTensorsGpuTag).IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } #if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) // Get input streams. @@ -460,7 +460,7 @@ REGISTER_CALCULATOR(TfLiteTensorsToSegmentationCalculator); output_texture.Release(); #endif // !MEDIAPIPE_DISABLE_GPU - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } void TfLiteTensorsToSegmentationCalculator::GlRender() { @@ -515,7 +515,7 @@ void TfLiteTensorsToSegmentationCalculator::GlRender() { #endif // !MEDIAPIPE_DISABLE_GPU } -::mediapipe::Status TfLiteTensorsToSegmentationCalculator::LoadOptions( +mediapipe::Status TfLiteTensorsToSegmentationCalculator::LoadOptions( CalculatorContext* cc) { // Get calculator options specified in the graph. options_ = @@ -531,10 +531,10 @@ void TfLiteTensorsToSegmentationCalculator::GlRender() { RET_CHECK_EQ(tensor_channels_, 2) << "Only 2 channel segmentation tensor currently supported"; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TfLiteTensorsToSegmentationCalculator::InitGpu( +mediapipe::Status TfLiteTensorsToSegmentationCalculator::InitGpu( CalculatorContext* cc) { #if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) MP_RETURN_IF_ERROR(gpu_helper_.RunInGlContext([this]() @@ -698,11 +698,11 @@ void main() { glUseProgram(upsample_program_); glUniform1i(glGetUniformLocation(upsample_program_, "input_data"), 1); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); #endif // !MEDIAPIPE_DISABLE_GPU - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/util/BUILD b/mediapipe/calculators/util/BUILD index 7914390e9..fd81d4de9 100644 --- a/mediapipe/calculators/util/BUILD +++ b/mediapipe/calculators/util/BUILD @@ -545,6 +545,30 @@ mediapipe_proto_library( ], ) +cc_library( + name = "landmark_visibility_calculator", + srcs = ["landmark_visibility_calculator.cc"], + visibility = ["//visibility:public"], + deps = [ + "//mediapipe/framework:calculator_framework", + "//mediapipe/framework/formats:landmark_cc_proto", + "//mediapipe/framework/port:ret_check", + ], + alwayslink = 1, +) + +cc_library( + name = "set_landmark_visibility_calculator", + srcs = ["set_landmark_visibility_calculator.cc"], + visibility = ["//visibility:public"], + deps = [ + "//mediapipe/framework:calculator_framework", + "//mediapipe/framework/formats:landmark_cc_proto", + "//mediapipe/framework/port:ret_check", + ], + alwayslink = 1, +) + mediapipe_proto_library( name = "landmarks_to_floats_calculator_proto", srcs = ["landmarks_to_floats_calculator.proto"], @@ -720,6 +744,39 @@ cc_library( alwayslink = 1, ) +cc_library( + name = "detection_projection_calculator", + srcs = ["detection_projection_calculator.cc"], + visibility = ["//visibility:public"], + deps = [ + "//mediapipe/framework:calculator_framework", + "//mediapipe/framework/formats:detection_cc_proto", + "//mediapipe/framework/formats:location", + "//mediapipe/framework/formats:rect_cc_proto", + "//mediapipe/framework/port:point", + "//mediapipe/framework/port:ret_check", + "//mediapipe/framework/port:status", + ], + alwayslink = 1, +) + +cc_test( + name = "detection_projection_calculator_test", + srcs = ["detection_projection_calculator_test.cc"], + deps = [ + ":detection_projection_calculator", + "//mediapipe/calculators/tensor:image_to_tensor_utils", + "//mediapipe/framework:calculator_framework", + "//mediapipe/framework:calculator_runner", + "//mediapipe/framework/formats:detection_cc_proto", + "//mediapipe/framework/formats:location", + "//mediapipe/framework/formats:rect_cc_proto", + "//mediapipe/framework/port:gtest_main", + "//mediapipe/framework/port:parse_text_proto", + "//mediapipe/framework/port:point", + ], +) + cc_library( name = "landmark_letterbox_removal_calculator", srcs = ["landmark_letterbox_removal_calculator.cc"], diff --git a/mediapipe/calculators/util/alignment_points_to_rects_calculator.cc b/mediapipe/calculators/util/alignment_points_to_rects_calculator.cc index 49768eae7..be79fda20 100644 --- a/mediapipe/calculators/util/alignment_points_to_rects_calculator.cc +++ b/mediapipe/calculators/util/alignment_points_to_rects_calculator.cc @@ -40,7 +40,7 @@ namespace {} // namespace // } class AlignmentPointsRectsCalculator : public DetectionsToRectsCalculator { public: - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { RET_CHECK_OK(DetectionsToRectsCalculator::Open(cc)); // Make sure that start and end keypoints are provided. @@ -52,18 +52,18 @@ class AlignmentPointsRectsCalculator : public DetectionsToRectsCalculator { RET_CHECK(options_.has_rotation_vector_end_keypoint_index()) << "End keypoint is required to calculate rect size and rotation"; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: - ::mediapipe::Status DetectionToNormalizedRect( + mediapipe::Status DetectionToNormalizedRect( const ::mediapipe::Detection& detection, const DetectionSpec& detection_spec, ::mediapipe::NormalizedRect* rect) override; }; REGISTER_CALCULATOR(AlignmentPointsRectsCalculator); -::mediapipe::Status AlignmentPointsRectsCalculator::DetectionToNormalizedRect( +mediapipe::Status AlignmentPointsRectsCalculator::DetectionToNormalizedRect( const Detection& detection, const DetectionSpec& detection_spec, NormalizedRect* rect) { const auto& location_data = detection.location_data(); @@ -96,7 +96,7 @@ REGISTER_CALCULATOR(AlignmentPointsRectsCalculator); rect->set_width(box_size / image_size->first); rect->set_height(box_size / image_size->second); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/util/annotation_overlay_calculator.cc b/mediapipe/calculators/util/annotation_overlay_calculator.cc index 5da424a61..89ecd1cee 100644 --- a/mediapipe/calculators/util/annotation_overlay_calculator.cc +++ b/mediapipe/calculators/util/annotation_overlay_calculator.cc @@ -124,29 +124,29 @@ class AnnotationOverlayCalculator : public CalculatorBase { AnnotationOverlayCalculator() = default; ~AnnotationOverlayCalculator() override = default; - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); // From Calculator. - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; - ::mediapipe::Status Close(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Close(CalculatorContext* cc) override; private: - ::mediapipe::Status CreateRenderTargetCpu(CalculatorContext* cc, - std::unique_ptr& image_mat, - ImageFormat::Format* target_format); + mediapipe::Status CreateRenderTargetCpu(CalculatorContext* cc, + std::unique_ptr& image_mat, + ImageFormat::Format* target_format); template - ::mediapipe::Status CreateRenderTargetGpu( - CalculatorContext* cc, std::unique_ptr& image_mat); + mediapipe::Status CreateRenderTargetGpu(CalculatorContext* cc, + std::unique_ptr& image_mat); template - ::mediapipe::Status RenderToGpu(CalculatorContext* cc, uchar* overlay_image); - ::mediapipe::Status RenderToCpu(CalculatorContext* cc, - const ImageFormat::Format& target_format, - uchar* data_image); + mediapipe::Status RenderToGpu(CalculatorContext* cc, uchar* overlay_image); + mediapipe::Status RenderToCpu(CalculatorContext* cc, + const ImageFormat::Format& target_format, + uchar* data_image); - ::mediapipe::Status GlRender(CalculatorContext* cc); + mediapipe::Status GlRender(CalculatorContext* cc); template - ::mediapipe::Status GlSetup(CalculatorContext* cc); + mediapipe::Status GlSetup(CalculatorContext* cc); // Options for the calculator. AnnotationOverlayCalculatorOptions options_; @@ -171,7 +171,7 @@ class AnnotationOverlayCalculator : public CalculatorBase { }; REGISTER_CALCULATOR(AnnotationOverlayCalculator); -::mediapipe::Status AnnotationOverlayCalculator::GetContract( +mediapipe::Status AnnotationOverlayCalculator::GetContract( CalculatorContract* cc) { CHECK_GE(cc->Inputs().NumEntries(), 1); @@ -179,11 +179,11 @@ REGISTER_CALCULATOR(AnnotationOverlayCalculator); if (cc->Inputs().HasTag(kImageFrameTag) && cc->Inputs().HasTag(kGpuBufferTag)) { - return ::mediapipe::InternalError("Cannot have multiple input images."); + return mediapipe::InternalError("Cannot have multiple input images."); } if (cc->Inputs().HasTag(kGpuBufferTag) != cc->Outputs().HasTag(kGpuBufferTag)) { - return ::mediapipe::InternalError("GPU output must have GPU input."); + return mediapipe::InternalError("GPU output must have GPU input."); } // Input image to render onto copy of. Should be same type as output. @@ -228,10 +228,10 @@ REGISTER_CALCULATOR(AnnotationOverlayCalculator); #endif // !MEDIAPIPE_DISABLE_GPU } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status AnnotationOverlayCalculator::Open(CalculatorContext* cc) { +mediapipe::Status AnnotationOverlayCalculator::Open(CalculatorContext* cc) { cc->SetOffset(TimestampDiff(0)); options_ = cc->Options(); @@ -269,11 +269,10 @@ REGISTER_CALCULATOR(AnnotationOverlayCalculator); #endif // !MEDIAPIPE_DISABLE_GPU } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status AnnotationOverlayCalculator::Process( - CalculatorContext* cc) { +mediapipe::Status AnnotationOverlayCalculator::Process(CalculatorContext* cc) { // Initialize render target, drawn with OpenCV. std::unique_ptr image_mat; ImageFormat::Format target_format; @@ -281,7 +280,7 @@ REGISTER_CALCULATOR(AnnotationOverlayCalculator); #if !defined(MEDIAPIPE_DISABLE_GPU) if (!gpu_initialized_) { MP_RETURN_IF_ERROR( - gpu_helper_.RunInGlContext([this, cc]() -> ::mediapipe::Status { + gpu_helper_.RunInGlContext([this, cc]() -> mediapipe::Status { return GlSetup(cc); })); gpu_initialized_ = true; @@ -293,7 +292,7 @@ REGISTER_CALCULATOR(AnnotationOverlayCalculator); } #endif // !MEDIAPIPE_DISABLE_GPU } else { - if (cc->Inputs().HasTag(kImageFrameTag)) { + if (cc->Outputs().HasTag(kImageFrameTag)) { MP_RETURN_IF_ERROR(CreateRenderTargetCpu(cc, image_mat, &target_format)); } } @@ -331,7 +330,7 @@ REGISTER_CALCULATOR(AnnotationOverlayCalculator); // Overlay rendered image in OpenGL, onto a copy of input. uchar* image_mat_ptr = image_mat->data; MP_RETURN_IF_ERROR(gpu_helper_.RunInGlContext( - [this, cc, image_mat_ptr]() -> ::mediapipe::Status { + [this, cc, image_mat_ptr]() -> mediapipe::Status { return RenderToGpu( cc, image_mat_ptr); })); @@ -342,10 +341,10 @@ REGISTER_CALCULATOR(AnnotationOverlayCalculator); MP_RETURN_IF_ERROR(RenderToCpu(cc, target_format, image_mat_ptr)); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status AnnotationOverlayCalculator::Close(CalculatorContext* cc) { +mediapipe::Status AnnotationOverlayCalculator::Close(CalculatorContext* cc) { #if !defined(MEDIAPIPE_DISABLE_GPU) gpu_helper_.RunInGlContext([this] { if (program_) glDeleteProgram(program_); @@ -355,10 +354,10 @@ REGISTER_CALCULATOR(AnnotationOverlayCalculator); }); #endif // !MEDIAPIPE_DISABLE_GPU - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status AnnotationOverlayCalculator::RenderToCpu( +mediapipe::Status AnnotationOverlayCalculator::RenderToCpu( CalculatorContext* cc, const ImageFormat::Format& target_format, uchar* data_image) { auto output_frame = absl::make_unique( @@ -380,11 +379,11 @@ REGISTER_CALCULATOR(AnnotationOverlayCalculator); .Add(output_frame.release(), cc->InputTimestamp()); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } template -::mediapipe::Status AnnotationOverlayCalculator::RenderToGpu( +mediapipe::Status AnnotationOverlayCalculator::RenderToGpu( CalculatorContext* cc, uchar* overlay_image) { #if !defined(MEDIAPIPE_DISABLE_GPU) // Source and destination textures. @@ -429,10 +428,10 @@ template output_texture.Release(); #endif // !MEDIAPIPE_DISABLE_GPU - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status AnnotationOverlayCalculator::CreateRenderTargetCpu( +mediapipe::Status AnnotationOverlayCalculator::CreateRenderTargetCpu( CalculatorContext* cc, std::unique_ptr& image_mat, ImageFormat::Format* target_format) { if (image_frame_available_) { @@ -454,7 +453,7 @@ template target_mat_type = CV_8UC3; break; default: - return ::mediapipe::UnknownError("Unexpected image frame format."); + return mediapipe::UnknownError("Unexpected image frame format."); break; } @@ -477,11 +476,11 @@ template *target_format = ImageFormat::SRGB; } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } template -::mediapipe::Status AnnotationOverlayCalculator::CreateRenderTargetGpu( +mediapipe::Status AnnotationOverlayCalculator::CreateRenderTargetGpu( CalculatorContext* cc, std::unique_ptr& image_mat) { #if !defined(MEDIAPIPE_DISABLE_GPU) if (image_frame_available_) { @@ -503,11 +502,10 @@ template } #endif // !MEDIAPIPE_DISABLE_GPU - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status AnnotationOverlayCalculator::GlRender( - CalculatorContext* cc) { +mediapipe::Status AnnotationOverlayCalculator::GlRender(CalculatorContext* cc) { #if !defined(MEDIAPIPE_DISABLE_GPU) static const GLfloat square_vertices[] = { -1.0f, -1.0f, // bottom left @@ -558,12 +556,11 @@ template glDeleteBuffers(2, vbo); #endif // !MEDIAPIPE_DISABLE_GPU - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } template -::mediapipe::Status AnnotationOverlayCalculator::GlSetup( - CalculatorContext* cc) { +mediapipe::Status AnnotationOverlayCalculator::GlSetup(CalculatorContext* cc) { #if !defined(MEDIAPIPE_DISABLE_GPU) const GLint attr_location[NUM_ATTRIBUTES] = { ATTRIB_VERTEX, @@ -663,7 +660,7 @@ template } #endif // !MEDIAPIPE_DISABLE_GPU - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/util/association_calculator.h b/mediapipe/calculators/util/association_calculator.h index a16de4977..37a77400a 100644 --- a/mediapipe/calculators/util/association_calculator.h +++ b/mediapipe/calculators/util/association_calculator.h @@ -56,7 +56,7 @@ inline float OverlapSimilarity(const Rectangle_f& rect1, template class AssociationCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { // Atmost one input stream can be tagged with "PREV". RET_CHECK_LE(cc->Inputs().NumEntries("PREV"), 1); @@ -71,10 +71,10 @@ class AssociationCalculator : public CalculatorBase { cc->Outputs().Index(0).Set>(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { cc->SetOffset(TimestampDiff(0)); has_prev_input_stream_ = cc->Inputs().HasTag("PREV"); @@ -84,10 +84,10 @@ class AssociationCalculator : public CalculatorBase { options_ = cc->Options<::mediapipe::AssociationCalculatorOptions>(); CHECK_GE(options_.min_similarity_threshold(), 0); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { auto get_non_overlapping_elements = GetNonOverlappingElements(cc); if (!get_non_overlapping_elements.ok()) { return get_non_overlapping_elements.status(); @@ -114,7 +114,7 @@ class AssociationCalculator : public CalculatorBase { } cc->Outputs().Index(0).Add(output.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } protected: @@ -123,8 +123,8 @@ class AssociationCalculator : public CalculatorBase { bool has_prev_input_stream_; CollectionItemId prev_input_stream_id_; - virtual ::mediapipe::StatusOr GetRectangle(const T& input) { - return ::mediapipe::OkStatus(); + virtual mediapipe::StatusOr GetRectangle(const T& input) { + return mediapipe::OkStatus(); } virtual std::pair GetId(const T& input) { return {false, -1}; } @@ -176,7 +176,7 @@ class AssociationCalculator : public CalculatorBase { return result; } - ::mediapipe::Status AddElementToList(T element, std::list* current) { + mediapipe::Status AddElementToList(T element, std::list* current) { // Compare this element with elements of the input collection. If this // element has high overlap with elements of the collection, remove // those elements from the collection and add this element. @@ -207,13 +207,13 @@ class AssociationCalculator : public CalculatorBase { } current->push_back(element); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Compare elements of the current list with elements in from the collection // of elements from the previous input stream, and propagate IDs from the // previous input stream as appropriate. - ::mediapipe::Status PropagateIdsFromPreviousToCurrent( + mediapipe::Status PropagateIdsFromPreviousToCurrent( const std::vector& prev_input_vec, std::list* current) { for (auto vit = current->begin(); vit != current->end(); ++vit) { auto get_cur_rectangle = GetRectangle(*vit); @@ -250,7 +250,7 @@ class AssociationCalculator : public CalculatorBase { *vit = element; } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; diff --git a/mediapipe/calculators/util/association_detection_calculator.cc b/mediapipe/calculators/util/association_detection_calculator.cc index 125e8c4ff..59d052769 100644 --- a/mediapipe/calculators/util/association_detection_calculator.cc +++ b/mediapipe/calculators/util/association_detection_calculator.cc @@ -37,27 +37,27 @@ namespace mediapipe { class AssociationDetectionCalculator : public AssociationCalculator<::mediapipe::Detection> { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { return AssociationCalculator<::mediapipe::Detection>::GetContract(cc); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { return AssociationCalculator<::mediapipe::Detection>::Open(cc); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { return AssociationCalculator<::mediapipe::Detection>::Process(cc); } - ::mediapipe::Status Close(CalculatorContext* cc) override { + mediapipe::Status Close(CalculatorContext* cc) override { return AssociationCalculator<::mediapipe::Detection>::Close(cc); } protected: - ::mediapipe::StatusOr GetRectangle( + mediapipe::StatusOr GetRectangle( const ::mediapipe::Detection& input) override { if (!input.has_location_data()) { - return ::mediapipe::InternalError("Missing location_data in Detection"); + return mediapipe::InternalError("Missing location_data in Detection"); } const Location location(input.location_data()); return location.GetRelativeBBox(); diff --git a/mediapipe/calculators/util/association_norm_rect_calculator.cc b/mediapipe/calculators/util/association_norm_rect_calculator.cc index 4069eda60..a77d65f0d 100644 --- a/mediapipe/calculators/util/association_norm_rect_calculator.cc +++ b/mediapipe/calculators/util/association_norm_rect_calculator.cc @@ -36,29 +36,28 @@ namespace mediapipe { class AssociationNormRectCalculator : public AssociationCalculator<::mediapipe::NormalizedRect> { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { return AssociationCalculator<::mediapipe::NormalizedRect>::GetContract(cc); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { return AssociationCalculator<::mediapipe::NormalizedRect>::Open(cc); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { return AssociationCalculator<::mediapipe::NormalizedRect>::Process(cc); } - ::mediapipe::Status Close(CalculatorContext* cc) override { + mediapipe::Status Close(CalculatorContext* cc) override { return AssociationCalculator<::mediapipe::NormalizedRect>::Close(cc); } protected: - ::mediapipe::StatusOr GetRectangle( + mediapipe::StatusOr GetRectangle( const ::mediapipe::NormalizedRect& input) override { if (!input.has_x_center() || !input.has_y_center() || !input.has_width() || !input.has_height()) { - return ::mediapipe::InternalError( - "Missing dimensions in NormalizedRect."); + return mediapipe::InternalError("Missing dimensions in NormalizedRect."); } const float xmin = input.x_center() - input.width() / 2.0; const float ymin = input.y_center() - input.height() / 2.0; diff --git a/mediapipe/calculators/util/clock_latency_calculator.cc b/mediapipe/calculators/util/clock_latency_calculator.cc index 768abb2a4..d852c68c7 100644 --- a/mediapipe/calculators/util/clock_latency_calculator.cc +++ b/mediapipe/calculators/util/clock_latency_calculator.cc @@ -60,18 +60,17 @@ class ClockLatencyCalculator : public CalculatorBase { public: ClockLatencyCalculator() {} - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; private: int64 num_packet_streams_ = -1; }; REGISTER_CALCULATOR(ClockLatencyCalculator); -::mediapipe::Status ClockLatencyCalculator::GetContract( - CalculatorContract* cc) { +mediapipe::Status ClockLatencyCalculator::GetContract(CalculatorContract* cc) { RET_CHECK_GT(cc->Inputs().NumEntries(), 1); int64 num_packet_streams = cc->Inputs().NumEntries() - 1; @@ -83,17 +82,17 @@ REGISTER_CALCULATOR(ClockLatencyCalculator); } cc->Inputs().Tag(kReferenceTag).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ClockLatencyCalculator::Open(CalculatorContext* cc) { +mediapipe::Status ClockLatencyCalculator::Open(CalculatorContext* cc) { // Direct passthrough, as far as timestamp and bounds are concerned. cc->SetOffset(TimestampDiff(0)); num_packet_streams_ = cc->Inputs().NumEntries() - 1; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ClockLatencyCalculator::Process(CalculatorContext* cc) { +mediapipe::Status ClockLatencyCalculator::Process(CalculatorContext* cc) { // Get reference time. RET_CHECK(!cc->Inputs().Tag(kReferenceTag).IsEmpty()); const absl::Time& reference_time = @@ -110,7 +109,7 @@ REGISTER_CALCULATOR(ClockLatencyCalculator); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/util/clock_timestamp_calculator.cc b/mediapipe/calculators/util/clock_timestamp_calculator.cc index ea715f8ae..82bdb41b2 100644 --- a/mediapipe/calculators/util/clock_timestamp_calculator.cc +++ b/mediapipe/calculators/util/clock_timestamp_calculator.cc @@ -52,10 +52,10 @@ class ClockTimestampCalculator : public CalculatorBase { public: ClockTimestampCalculator() {} - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; private: // Clock object. @@ -63,7 +63,7 @@ class ClockTimestampCalculator : public CalculatorBase { }; REGISTER_CALCULATOR(ClockTimestampCalculator); -::mediapipe::Status ClockTimestampCalculator::GetContract( +mediapipe::Status ClockTimestampCalculator::GetContract( CalculatorContract* cc) { RET_CHECK_EQ(cc->Inputs().NumEntries(), 1); RET_CHECK_EQ(cc->Outputs().NumEntries(), 1); @@ -78,10 +78,10 @@ REGISTER_CALCULATOR(ClockTimestampCalculator); .Set>(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ClockTimestampCalculator::Open(CalculatorContext* cc) { +mediapipe::Status ClockTimestampCalculator::Open(CalculatorContext* cc) { // Direct passthrough, as far as timestamp and bounds are concerned. cc->SetOffset(TimestampDiff(0)); @@ -95,14 +95,14 @@ REGISTER_CALCULATOR(ClockTimestampCalculator); ::mediapipe::MonotonicClock::CreateSynchronizedMonotonicClock()); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ClockTimestampCalculator::Process(CalculatorContext* cc) { +mediapipe::Status ClockTimestampCalculator::Process(CalculatorContext* cc) { // Push the Time packet to output. auto timestamp_packet = MakePacket(clock_->TimeNow()); cc->Outputs().Index(0).AddPacket(timestamp_packet.At(cc->InputTimestamp())); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/util/collection_has_min_size_calculator.h b/mediapipe/calculators/util/collection_has_min_size_calculator.h index 80b4556c5..6cbc63e51 100644 --- a/mediapipe/calculators/util/collection_has_min_size_calculator.h +++ b/mediapipe/calculators/util/collection_has_min_size_calculator.h @@ -42,7 +42,7 @@ namespace mediapipe { template class CollectionHasMinSizeCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { RET_CHECK(cc->Inputs().HasTag("ITERABLE")); RET_CHECK_EQ(1, cc->Inputs().NumEntries()); @@ -60,10 +60,10 @@ class CollectionHasMinSizeCalculator : public CalculatorBase { if (cc->InputSidePackets().NumEntries() > 0) { cc->InputSidePackets().Index(0).Set(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { cc->SetOffset(TimestampDiff(0)); min_size_ = cc->Options<::mediapipe::CollectionHasMinSizeCalculatorOptions>() @@ -73,17 +73,17 @@ class CollectionHasMinSizeCalculator : public CalculatorBase { !cc->InputSidePackets().Index(0).IsEmpty()) { min_size_ = cc->InputSidePackets().Index(0).Get(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { const IterableT& input = cc->Inputs().Tag("ITERABLE").Get(); bool has_min_size = input.size() >= min_size_; cc->Outputs().Index(0).AddPacket( MakePacket(has_min_size).At(cc->InputTimestamp())); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: diff --git a/mediapipe/calculators/util/detection_label_id_to_text_calculator.cc b/mediapipe/calculators/util/detection_label_id_to_text_calculator.cc index 5e5564731..779e9785b 100644 --- a/mediapipe/calculators/util/detection_label_id_to_text_calculator.cc +++ b/mediapipe/calculators/util/detection_label_id_to_text_calculator.cc @@ -47,25 +47,25 @@ namespace mediapipe { // } class DetectionLabelIdToTextCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; private: absl::node_hash_map label_map_; }; REGISTER_CALCULATOR(DetectionLabelIdToTextCalculator); -::mediapipe::Status DetectionLabelIdToTextCalculator::GetContract( +mediapipe::Status DetectionLabelIdToTextCalculator::GetContract( CalculatorContract* cc) { cc->Inputs().Index(0).Set>(); cc->Outputs().Index(0).Set>(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status DetectionLabelIdToTextCalculator::Open( +mediapipe::Status DetectionLabelIdToTextCalculator::Open( CalculatorContext* cc) { cc->SetOffset(TimestampDiff(0)); @@ -90,10 +90,10 @@ REGISTER_CALCULATOR(DetectionLabelIdToTextCalculator); label_map_[i] = options.label(i); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status DetectionLabelIdToTextCalculator::Process( +mediapipe::Status DetectionLabelIdToTextCalculator::Process( CalculatorContext* cc) { std::vector output_detections; for (const auto& input_detection : @@ -115,7 +115,7 @@ REGISTER_CALCULATOR(DetectionLabelIdToTextCalculator); cc->Outputs().Index(0).AddPacket( MakePacket>(output_detections) .At(cc->InputTimestamp())); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/util/detection_letterbox_removal_calculator.cc b/mediapipe/calculators/util/detection_letterbox_removal_calculator.cc index cf3761010..a23a1d225 100644 --- a/mediapipe/calculators/util/detection_letterbox_removal_calculator.cc +++ b/mediapipe/calculators/util/detection_letterbox_removal_calculator.cc @@ -70,7 +70,7 @@ constexpr char kLetterboxPaddingTag[] = "LETTERBOX_PADDING"; // } class DetectionLetterboxRemovalCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { RET_CHECK(cc->Inputs().HasTag(kDetectionsTag) && cc->Inputs().HasTag(kLetterboxPaddingTag)) << "Missing one or more input streams."; @@ -80,19 +80,19 @@ class DetectionLetterboxRemovalCalculator : public CalculatorBase { cc->Outputs().Tag(kDetectionsTag).Set>(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { cc->SetOffset(TimestampDiff(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { // Only process if there's input detections. if (cc->Inputs().Tag(kDetectionsTag).IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } const auto& input_detections = @@ -146,7 +146,7 @@ class DetectionLetterboxRemovalCalculator : public CalculatorBase { cc->Outputs() .Tag("DETECTIONS") .Add(output_detections.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(DetectionLetterboxRemovalCalculator); diff --git a/mediapipe/calculators/util/detection_projection_calculator.cc b/mediapipe/calculators/util/detection_projection_calculator.cc new file mode 100644 index 000000000..9200ebfe3 --- /dev/null +++ b/mediapipe/calculators/util/detection_projection_calculator.cc @@ -0,0 +1,179 @@ +// Copyright 2020 The MediaPipe Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include +#include + +#include "mediapipe/framework/calculator_framework.h" +#include "mediapipe/framework/formats/detection.pb.h" +#include "mediapipe/framework/formats/location.h" +#include "mediapipe/framework/formats/rect.pb.h" +#include "mediapipe/framework/port/point2.h" +#include "mediapipe/framework/port/ret_check.h" +#include "mediapipe/framework/port/status.h" + +namespace mediapipe { + +// Projects detections to a different coordinate system using a provided +// projection matrix. +// +// Input: +// DETECTIONS - std::vector +// Detections to project using the provided projection matrix. +// PROJECTION_MATRIX - std::array +// A 4x4 row-major-order matrix that maps data from one coordinate system to +// another. +// +// Output: +// DETECTIONS - std::vector +// Projected detections. +// +// Example: +// node { +// calculator: "DetectionProjectionCalculator" +// input_stream: "DETECTIONS:detections" +// input_stream: "PROJECTION_MATRIX:matrix" +// output_stream: "DETECTIONS:projected_detections" +// } +class DetectionProjectionCalculator : public CalculatorBase { + public: + static mediapipe::Status GetContract(CalculatorContract* cc); + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; +}; +REGISTER_CALCULATOR(DetectionProjectionCalculator); + +namespace { + +constexpr char kDetections[] = "DETECTIONS"; +constexpr char kProjectionMatrix[] = "PROJECTION_MATRIX"; + +mediapipe::Status ProjectDetection( + const std::function& project_fn, + Detection* detection) { + auto* location_data = detection->mutable_location_data(); + RET_CHECK_EQ(location_data->format(), LocationData::RELATIVE_BOUNDING_BOX); + + // Project keypoints. + for (int i = 0; i < location_data->relative_keypoints_size(); ++i) { + auto* kp = location_data->mutable_relative_keypoints(i); + const auto point = project_fn({kp->x(), kp->y()}); + kp->set_x(point.x()); + kp->set_y(point.y()); + } + + // Project bounding box. + auto* box = location_data->mutable_relative_bounding_box(); + + const float xmin = box->xmin(); + const float ymin = box->ymin(); + const float width = box->width(); + const float height = box->height(); + // a) Define and project box points. + std::array box_coordinates = { + Point2_f{xmin, ymin}, Point2_f{xmin + width, ymin}, + Point2_f{xmin + width, ymin + height}, Point2_f{xmin, ymin + height}}; + std::transform(box_coordinates.begin(), box_coordinates.end(), + box_coordinates.begin(), project_fn); + // b) Find new left top and right bottom points for a box which encompases + // non-projected (rotated) box. + constexpr float kFloatMax = std::numeric_limits::max(); + constexpr float kFloatMin = std::numeric_limits::lowest(); + Point2_f left_top = {kFloatMax, kFloatMax}; + Point2_f right_bottom = {kFloatMin, kFloatMin}; + std::for_each(box_coordinates.begin(), box_coordinates.end(), + [&left_top, &right_bottom](const Point2_f& p) { + left_top.set_x(std::min(left_top.x(), p.x())); + left_top.set_y(std::min(left_top.y(), p.y())); + right_bottom.set_x(std::max(right_bottom.x(), p.x())); + right_bottom.set_y(std::max(right_bottom.y(), p.y())); + }); + box->set_xmin(left_top.x()); + box->set_ymin(left_top.y()); + box->set_width(right_bottom.x() - left_top.x()); + box->set_height(right_bottom.y() - left_top.y()); + + return mediapipe::OkStatus(); +} + +} // namespace + +mediapipe::Status DetectionProjectionCalculator::GetContract( + CalculatorContract* cc) { + RET_CHECK(cc->Inputs().HasTag(kDetections) && + cc->Inputs().HasTag(kProjectionMatrix)) + << "Missing one or more input streams."; + + RET_CHECK_EQ(cc->Inputs().NumEntries(kDetections), + cc->Outputs().NumEntries(kDetections)) + << "Same number of DETECTIONS input and output is required."; + + for (CollectionItemId id = cc->Inputs().BeginId(kDetections); + id != cc->Inputs().EndId(kDetections); ++id) { + cc->Inputs().Get(id).Set>(); + } + cc->Inputs().Tag(kProjectionMatrix).Set>(); + + for (CollectionItemId id = cc->Outputs().BeginId(kDetections); + id != cc->Outputs().EndId(kDetections); ++id) { + cc->Outputs().Get(id).Set>(); + } + + return mediapipe::OkStatus(); +} + +mediapipe::Status DetectionProjectionCalculator::Open(CalculatorContext* cc) { + cc->SetOffset(TimestampDiff(0)); + return mediapipe::OkStatus(); +} + +mediapipe::Status DetectionProjectionCalculator::Process( + CalculatorContext* cc) { + if (cc->Inputs().Tag(kProjectionMatrix).IsEmpty()) { + return mediapipe::OkStatus(); + } + const auto& project_mat = + cc->Inputs().Tag(kProjectionMatrix).Get>(); + auto project_fn = [project_mat](const Point2_f& p) -> Point2_f { + return {p.x() * project_mat[0] + p.y() * project_mat[1] + project_mat[3], + p.x() * project_mat[4] + p.y() * project_mat[5] + project_mat[7]}; + }; + + CollectionItemId input_id = cc->Inputs().BeginId(kDetections); + CollectionItemId output_id = cc->Outputs().BeginId(kDetections); + // Number of inputs and outpus is the same according to the contract. + for (; input_id != cc->Inputs().EndId(kDetections); ++input_id, ++output_id) { + const auto& input_packet = cc->Inputs().Get(input_id); + if (input_packet.IsEmpty()) { + continue; + } + + std::vector output_detections; + for (const auto& detection : input_packet.Get>()) { + Detection output_detection = detection; + MP_RETURN_IF_ERROR(ProjectDetection(project_fn, &output_detection)); + output_detections.push_back(std::move(output_detection)); + } + + cc->Outputs().Get(output_id).AddPacket( + MakePacket>(std::move(output_detections)) + .At(cc->InputTimestamp())); + } + return mediapipe::OkStatus(); +} + +} // namespace mediapipe diff --git a/mediapipe/calculators/util/detection_projection_calculator_test.cc b/mediapipe/calculators/util/detection_projection_calculator_test.cc new file mode 100644 index 000000000..bf8d49263 --- /dev/null +++ b/mediapipe/calculators/util/detection_projection_calculator_test.cc @@ -0,0 +1,309 @@ +// Copyright 2019 The MediaPipe Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include + +#include "mediapipe/calculators/tensor/image_to_tensor_utils.h" +#include "mediapipe/framework/calculator_framework.h" +#include "mediapipe/framework/calculator_runner.h" +#include "mediapipe/framework/formats/detection.pb.h" +#include "mediapipe/framework/formats/location.h" +#include "mediapipe/framework/formats/rect.pb.h" +#include "mediapipe/framework/port/gmock.h" +#include "mediapipe/framework/port/gtest.h" +#include "mediapipe/framework/port/parse_text_proto.h" +#include "mediapipe/framework/port/point2.h" +#include "mediapipe/framework/port/status_matchers.h" + +namespace mediapipe { +namespace { + +using ::testing::ElementsAre; +using ::testing::FloatNear; + +constexpr float kMaxError = 1e-4; + +MATCHER_P2(PointEq, x, y, "") { + bool result = testing::Value(arg.x(), FloatNear(x, kMaxError)) && + testing::Value(arg.y(), FloatNear(y, kMaxError)); + if (!result) { + *result_listener << "actual: {" << arg.x() << ", " << arg.y() + << "}, expected: {" << x << ", " << y << "}"; + } + return result; +} + +MATCHER_P4(BoundingBoxEq, xmin, ymin, width, height, "") { + return testing::Value(arg.xmin(), FloatNear(xmin, kMaxError)) && + testing::Value(arg.ymin(), FloatNear(ymin, kMaxError)) && + testing::Value(arg.width(), FloatNear(width, kMaxError)) && + testing::Value(arg.height(), FloatNear(height, kMaxError)); +} + +std::vector GetPoints(const Detection& detection) { + std::vector points; + const auto& location_data = detection.location_data(); + for (int i = 0; i < location_data.relative_keypoints_size(); ++i) { + const auto& kp = location_data.relative_keypoints(i); + points.push_back({kp.x(), kp.y()}); + } + return points; +} + +// Test helper function to run "DetectionProjectionCalculator". +mediapipe::StatusOr RunProjectionCalculator( + Detection detection, std::array project_mat) { + CalculatorRunner runner(ParseTextProtoOrDie(R"( + calculator: "DetectionProjectionCalculator" + input_stream: "DETECTIONS:detections" + input_stream: "PROJECTION_MATRIX:matrix" + output_stream: "DETECTIONS:projected_detections" + )")); + + runner.MutableInputs() + ->Tag("DETECTIONS") + .packets.push_back(MakePacket>( + std::vector({std::move(detection)})) + .At(Timestamp::PostStream())); + runner.MutableInputs() + ->Tag("PROJECTION_MATRIX") + .packets.push_back( + MakePacket>(std::move(project_mat)) + .At(Timestamp::PostStream())); + + MP_RETURN_IF_ERROR(runner.Run()); + const std::vector& output = + runner.Outputs().Tag("DETECTIONS").packets; + RET_CHECK_EQ(output.size(), 1); + const auto& output_detections = output[0].Get>(); + + RET_CHECK_EQ(output_detections.size(), 1); + return output_detections[0]; +} + +TEST(DetectionProjectionCalculatorTest, ProjectionFullRoiNoOp) { + Detection detection; + auto* location_data = detection.mutable_location_data(); + location_data->set_format(LocationData::RELATIVE_BOUNDING_BOX); + location_data->mutable_relative_bounding_box()->set_xmin(0.0f); + location_data->mutable_relative_bounding_box()->set_ymin(0.0f); + location_data->mutable_relative_bounding_box()->set_width(0.5f); + location_data->mutable_relative_bounding_box()->set_height(0.5f); + + auto* kp = location_data->add_relative_keypoints(); + kp->set_x(0.25f); + kp->set_y(0.25f); + + mediapipe::NormalizedRect roi; + roi.set_x_center(0.5f); + roi.set_y_center(0.5f); + roi.set_width(1.0f); + roi.set_height(1.0f); + roi.set_rotation(0.0f); + + constexpr int kImageWidth = 100; + constexpr int kImageHeight = 100; + + RotatedRect rect; + rect.center_x = roi.x_center() * kImageWidth; + rect.center_y = roi.y_center() * kImageHeight; + rect.width = roi.width() * kImageWidth; + rect.height = roi.height() * kImageHeight; + rect.rotation = roi.rotation(); + + std::array projection_matrix; + GetRotatedSubRectToRectTransformMatrix(rect, kImageWidth, kImageHeight, + /*flip_horizontaly=*/false, + &projection_matrix); + + auto status_or_result = RunProjectionCalculator(std::move(detection), + std::move(projection_matrix)); + MP_ASSERT_OK(status_or_result); + const auto& result = status_or_result.ValueOrDie(); + ASSERT_EQ(result.location_data().format(), + LocationData::RELATIVE_BOUNDING_BOX); + EXPECT_THAT(result.location_data().relative_bounding_box(), + BoundingBoxEq(0.0f, 0.0f, 0.5f, 0.5f)); + EXPECT_THAT(GetPoints(result), testing::ElementsAre(PointEq(0.25f, 0.25f))); +} + +TEST(DetectionProjectionCalculatorTest, ProjectionFullRoi90Rotation) { + Detection detection; + auto* location_data = detection.mutable_location_data(); + location_data->set_format(LocationData::RELATIVE_BOUNDING_BOX); + location_data->mutable_relative_bounding_box()->set_xmin(0.0f); + location_data->mutable_relative_bounding_box()->set_ymin(0.0f); + location_data->mutable_relative_bounding_box()->set_width(0.5f); + location_data->mutable_relative_bounding_box()->set_height(0.5f); + + auto* kp = location_data->add_relative_keypoints(); + kp->set_x(0.25f); + kp->set_y(0.25f); + + mediapipe::NormalizedRect roi; + roi.set_x_center(0.5f); + roi.set_y_center(0.5f); + roi.set_width(1.0f); + roi.set_height(1.0f); + roi.set_rotation(90 * M_PI / 180.0f); + + constexpr int kImageWidth = 100; + constexpr int kImageHeight = 100; + + RotatedRect rect; + rect.center_x = roi.x_center() * kImageWidth; + rect.center_y = roi.y_center() * kImageHeight; + rect.width = roi.width() * kImageWidth; + rect.height = roi.height() * kImageHeight; + rect.rotation = roi.rotation(); + + std::array projection_matrix; + GetRotatedSubRectToRectTransformMatrix(rect, kImageWidth, kImageHeight, + /*flip_horizontaly=*/false, + &projection_matrix); + + auto status_or_result = RunProjectionCalculator(std::move(detection), + std::move(projection_matrix)); + MP_ASSERT_OK(status_or_result); + const auto& result = status_or_result.ValueOrDie(); + ASSERT_EQ(result.location_data().format(), + LocationData::RELATIVE_BOUNDING_BOX); + EXPECT_THAT(result.location_data().relative_bounding_box(), + BoundingBoxEq(0.5f, 0.0f, 0.5f, 0.5f)); + EXPECT_THAT(GetPoints(result), ElementsAre(PointEq(0.75f, 0.25f))); +} + +TEST(DetectionProjectionCalculatorTest, ProjectionSmallerRoi) { + Detection detection; + auto* location_data = detection.mutable_location_data(); + location_data->set_format(LocationData::RELATIVE_BOUNDING_BOX); + location_data->mutable_relative_bounding_box()->set_xmin(0.5f); + location_data->mutable_relative_bounding_box()->set_ymin(0.0f); + location_data->mutable_relative_bounding_box()->set_width(0.5f); + location_data->mutable_relative_bounding_box()->set_height(0.5f); + + auto* kp = location_data->add_relative_keypoints(); + kp->set_x(0.5f); + kp->set_y(0.5f); + + mediapipe::NormalizedRect roi; + roi.set_x_center(0.75f); + roi.set_y_center(0.75f); + roi.set_width(0.5f); + roi.set_height(0.5f); + roi.set_rotation(0.0f); + + constexpr int kImageWidth = 100; + constexpr int kImageHeight = 100; + + RotatedRect rect; + rect.center_x = roi.x_center() * kImageWidth; + rect.center_y = roi.y_center() * kImageHeight; + rect.width = roi.width() * kImageWidth; + rect.height = roi.height() * kImageHeight; + rect.rotation = roi.rotation(); + + std::array projection_matrix; + GetRotatedSubRectToRectTransformMatrix(rect, kImageWidth, kImageHeight, + /*flip_horizontaly=*/false, + &projection_matrix); + + auto status_or_result = RunProjectionCalculator(std::move(detection), + std::move(projection_matrix)); + MP_ASSERT_OK(status_or_result); + const auto& result = status_or_result.ValueOrDie(); + ASSERT_EQ(result.location_data().format(), + LocationData::RELATIVE_BOUNDING_BOX); + EXPECT_THAT(result.location_data().relative_bounding_box(), + BoundingBoxEq(0.75f, 0.5f, 0.25f, 0.25f)); + EXPECT_THAT(GetPoints(result), ElementsAre(PointEq(0.75f, 0.75f))); +} + +TEST(DetectionProjectionCalculatorTest, ProjectionSmallerRoi30Rotation) { + constexpr float kImageWidth = 80; + constexpr float kImageHeight = 120; + constexpr float kRectWidth = 50; + constexpr float kRectHeight = 30; + constexpr float kRectXCenter = 65; + constexpr float kRectYCenter = 85; + constexpr float kRectRotation = 30 * M_PI / 180.0f; + + Detection detection; + auto* location_data = detection.mutable_location_data(); + location_data->set_format(LocationData::RELATIVE_BOUNDING_BOX); + location_data->mutable_relative_bounding_box()->set_xmin(0.0f); + location_data->mutable_relative_bounding_box()->set_ymin(0.0f); + location_data->mutable_relative_bounding_box()->set_width(1.0f); + location_data->mutable_relative_bounding_box()->set_height(1.0f); + // Expected box values were calculated manually from image. + constexpr float kExpectedBoxXMin = 35.849f / kImageWidth; + constexpr float kExpectedBoxYMin = 59.510f / kImageHeight; + constexpr float kExpectedBoxWidth = 58.301f / kImageWidth; + constexpr float kExpectedBoxHeight = 50.981f / kImageHeight; + + auto* kp1 = location_data->add_relative_keypoints(); + kp1->set_x(0.0f); + kp1->set_y(0.0f); + auto* kp2 = location_data->add_relative_keypoints(); + kp2->set_x(0.5f); + kp2->set_y(0.5f); + auto* kp3 = location_data->add_relative_keypoints(); + kp3->set_x(1.0f); + kp3->set_y(0.0f); + // Expected key points were calculated manually from image. + constexpr float kExpectedPoint1X = 50.85f / kImageWidth; + constexpr float kExpectedPoint1Y = 59.52f / kImageHeight; + constexpr float kExpectedPoint2X = kRectXCenter / kImageWidth; + constexpr float kExpectedPoint2Y = kRectYCenter / kImageHeight; + constexpr float kExpectedPoint3X = 94.15f / kImageWidth; + constexpr float kExpectedPoint3Y = 84.51f / kImageHeight; + + mediapipe::NormalizedRect roi; + roi.set_x_center(kRectXCenter / kImageWidth); + roi.set_y_center(kRectYCenter / kImageHeight); + roi.set_width(kRectWidth / kImageWidth); + roi.set_height(kRectHeight / kImageHeight); + roi.set_rotation(kRectRotation); + + RotatedRect rect; + rect.center_x = roi.x_center() * kImageWidth; + rect.center_y = roi.y_center() * kImageHeight; + rect.width = roi.width() * kImageWidth; + rect.height = roi.height() * kImageHeight; + rect.rotation = roi.rotation(); + + std::array projection_matrix; + GetRotatedSubRectToRectTransformMatrix(rect, kImageWidth, kImageHeight, + /*flip_horizontaly=*/false, + &projection_matrix); + + auto status_or_result = RunProjectionCalculator(std::move(detection), + std::move(projection_matrix)); + MP_ASSERT_OK(status_or_result); + const auto& result = status_or_result.ValueOrDie(); + ASSERT_EQ(result.location_data().format(), + LocationData::RELATIVE_BOUNDING_BOX); + EXPECT_THAT(result.location_data().relative_bounding_box(), + BoundingBoxEq(kExpectedBoxXMin, kExpectedBoxYMin, + kExpectedBoxWidth, kExpectedBoxHeight)); + EXPECT_THAT(GetPoints(result), + ElementsAre(PointEq(kExpectedPoint1X, kExpectedPoint1Y), + PointEq(kExpectedPoint2X, kExpectedPoint2Y), + PointEq(kExpectedPoint3X, kExpectedPoint3Y))); +} + +} // namespace +} // namespace mediapipe diff --git a/mediapipe/calculators/util/detection_unique_id_calculator.cc b/mediapipe/calculators/util/detection_unique_id_calculator.cc index 2069f1677..9a4d1afa4 100644 --- a/mediapipe/calculators/util/detection_unique_id_calculator.cc +++ b/mediapipe/calculators/util/detection_unique_id_calculator.cc @@ -44,7 +44,7 @@ inline int GetNextDetectionId() { return ++detection_id; } // } class DetectionUniqueIdCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { RET_CHECK(cc->Inputs().HasTag(kDetectionListTag) || cc->Inputs().HasTag(kDetectionsTag)) << "None of the input streams are provided."; @@ -60,19 +60,18 @@ class DetectionUniqueIdCalculator : public CalculatorBase { cc->Outputs().Tag(kDetectionsTag).Set>(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { - cc->SetOffset(::mediapipe::TimestampDiff(0)); - return ::mediapipe::OkStatus(); + mediapipe::Status Open(CalculatorContext* cc) override { + cc->SetOffset(mediapipe::TimestampDiff(0)); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; }; REGISTER_CALCULATOR(DetectionUniqueIdCalculator); -::mediapipe::Status DetectionUniqueIdCalculator::Process( - CalculatorContext* cc) { +mediapipe::Status DetectionUniqueIdCalculator::Process(CalculatorContext* cc) { if (cc->Inputs().HasTag(kDetectionListTag) && !cc->Inputs().Tag(kDetectionListTag).IsEmpty()) { auto result = @@ -104,7 +103,7 @@ REGISTER_CALCULATOR(DetectionUniqueIdCalculator); .Add(detections.release(), cc->InputTimestamp()); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/util/detections_to_rects_calculator.cc b/mediapipe/calculators/util/detections_to_rects_calculator.cc index a126ed8cb..27c0460e2 100644 --- a/mediapipe/calculators/util/detections_to_rects_calculator.cc +++ b/mediapipe/calculators/util/detections_to_rects_calculator.cc @@ -40,8 +40,8 @@ constexpr char kNormRectsTag[] = "NORM_RECTS"; constexpr float kMinFloat = std::numeric_limits::lowest(); constexpr float kMaxFloat = std::numeric_limits::max(); -::mediapipe::Status NormRectFromKeyPoints(const LocationData& location_data, - NormalizedRect* rect) { +mediapipe::Status NormRectFromKeyPoints(const LocationData& location_data, + NormalizedRect* rect) { RET_CHECK_GT(location_data.relative_keypoints_size(), 1) << "2 or more key points required to calculate a rect."; float xmin = kMaxFloat; @@ -59,7 +59,7 @@ constexpr float kMaxFloat = std::numeric_limits::max(); rect->set_y_center((ymin + ymax) / 2); rect->set_width(xmax - xmin); rect->set_height(ymax - ymin); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } template @@ -72,7 +72,7 @@ void RectFromBox(B box, R* rect) { } // namespace -::mediapipe::Status DetectionsToRectsCalculator::DetectionToRect( +mediapipe::Status DetectionsToRectsCalculator::DetectionToRect( const Detection& detection, const DetectionSpec& detection_spec, Rect* rect) { const LocationData location_data = detection.location_data(); @@ -101,10 +101,10 @@ void RectFromBox(B box, R* rect) { break; } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status DetectionsToRectsCalculator::DetectionToNormalizedRect( +mediapipe::Status DetectionsToRectsCalculator::DetectionToNormalizedRect( const Detection& detection, const DetectionSpec& detection_spec, NormalizedRect* rect) { const LocationData location_data = detection.location_data(); @@ -124,10 +124,10 @@ void RectFromBox(B box, R* rect) { break; } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status DetectionsToRectsCalculator::GetContract( +mediapipe::Status DetectionsToRectsCalculator::GetContract( CalculatorContract* cc) { RET_CHECK(cc->Inputs().HasTag(kDetectionTag) ^ cc->Inputs().HasTag(kDetectionsTag)) @@ -164,10 +164,10 @@ void RectFromBox(B box, R* rect) { cc->Outputs().Tag(kNormRectsTag).Set>(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status DetectionsToRectsCalculator::Open(CalculatorContext* cc) { +mediapipe::Status DetectionsToRectsCalculator::Open(CalculatorContext* cc) { cc->SetOffset(TimestampDiff(0)); options_ = cc->Options(); @@ -192,18 +192,17 @@ void RectFromBox(B box, R* rect) { output_zero_rect_for_empty_detections_ = options_.output_zero_rect_for_empty_detections(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status DetectionsToRectsCalculator::Process( - CalculatorContext* cc) { +mediapipe::Status DetectionsToRectsCalculator::Process(CalculatorContext* cc) { if (cc->Inputs().HasTag(kDetectionTag) && cc->Inputs().Tag(kDetectionTag).IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } if (cc->Inputs().HasTag(kDetectionsTag) && cc->Inputs().Tag(kDetectionsTag).IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } std::vector detections; @@ -231,7 +230,7 @@ void RectFromBox(B box, R* rect) { .Add(rect_vector.release(), cc->InputTimestamp()); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } @@ -298,10 +297,10 @@ void RectFromBox(B box, R* rect) { .Add(output_rects.release(), cc->InputTimestamp()); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status DetectionsToRectsCalculator::ComputeRotation( +mediapipe::Status DetectionsToRectsCalculator::ComputeRotation( const Detection& detection, const DetectionSpec& detection_spec, float* rotation) { const auto& location_data = detection.location_data(); @@ -319,7 +318,7 @@ void RectFromBox(B box, R* rect) { *rotation = NormalizeRadians(target_angle_ - std::atan2(-(y1 - y0), x1 - x0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } DetectionSpec DetectionsToRectsCalculator::GetDetectionSpec( diff --git a/mediapipe/calculators/util/detections_to_rects_calculator.h b/mediapipe/calculators/util/detections_to_rects_calculator.h index 7fb26895e..333f9bfdd 100644 --- a/mediapipe/calculators/util/detections_to_rects_calculator.h +++ b/mediapipe/calculators/util/detections_to_rects_calculator.h @@ -83,19 +83,19 @@ struct DetectionSpec { // } class DetectionsToRectsCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; protected: - virtual ::mediapipe::Status DetectionToRect( + virtual mediapipe::Status DetectionToRect( const ::mediapipe::Detection& detection, const DetectionSpec& detection_spec, ::mediapipe::Rect* rect); - virtual ::mediapipe::Status DetectionToNormalizedRect( + virtual mediapipe::Status DetectionToNormalizedRect( const ::mediapipe::Detection& detection, const DetectionSpec& detection_spec, ::mediapipe::NormalizedRect* rect); - virtual ::mediapipe::Status ComputeRotation( + virtual mediapipe::Status ComputeRotation( const ::mediapipe::Detection& detection, const DetectionSpec& detection_spec, float* rotation); virtual DetectionSpec GetDetectionSpec(const CalculatorContext* cc); diff --git a/mediapipe/calculators/util/detections_to_rects_calculator_test.cc b/mediapipe/calculators/util/detections_to_rects_calculator_test.cc index e526a8532..cebe64153 100644 --- a/mediapipe/calculators/util/detections_to_rects_calculator_test.cc +++ b/mediapipe/calculators/util/detections_to_rects_calculator_test.cc @@ -105,7 +105,7 @@ TEST(DetectionsToRectsCalculatorTest, DetectionToRect) { EXPECT_THAT(rect, RectEq(250, 400, 300, 400)); } -::mediapipe::StatusOr RunDetectionKeyPointsToRectCalculation( +mediapipe::StatusOr RunDetectionKeyPointsToRectCalculation( Detection detection, std::pair image_size) { CalculatorRunner runner(ParseTextProtoOrDie(R"( calculator: "DetectionsToRectsCalculator" @@ -181,8 +181,8 @@ TEST(DetectionsToRectsCalculatorTest, DetectionToNormalizedRect) { EXPECT_THAT(rect, NormRectEq(0.25f, 0.4f, 0.3f, 0.4f)); } -::mediapipe::StatusOr -RunDetectionKeyPointsToNormRectCalculation(Detection detection) { +mediapipe::StatusOr RunDetectionKeyPointsToNormRectCalculation( + Detection detection) { CalculatorRunner runner(ParseTextProtoOrDie(R"( calculator: "DetectionsToRectsCalculator" input_stream: "DETECTION:detection" diff --git a/mediapipe/calculators/util/detections_to_render_data_calculator.cc b/mediapipe/calculators/util/detections_to_render_data_calculator.cc index 5082cd363..94099f603 100644 --- a/mediapipe/calculators/util/detections_to_render_data_calculator.cc +++ b/mediapipe/calculators/util/detections_to_render_data_calculator.cc @@ -82,11 +82,11 @@ class DetectionsToRenderDataCalculator : public CalculatorBase { DetectionsToRenderDataCalculator& operator=( const DetectionsToRenderDataCalculator&) = delete; - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; private: // These utility methods are supposed to be used only by this class. No @@ -122,7 +122,7 @@ class DetectionsToRenderDataCalculator : public CalculatorBase { }; REGISTER_CALCULATOR(DetectionsToRenderDataCalculator); -::mediapipe::Status DetectionsToRenderDataCalculator::GetContract( +mediapipe::Status DetectionsToRenderDataCalculator::GetContract( CalculatorContract* cc) { RET_CHECK(cc->Inputs().HasTag(kDetectionListTag) || cc->Inputs().HasTag(kDetectionsTag) || @@ -139,17 +139,17 @@ REGISTER_CALCULATOR(DetectionsToRenderDataCalculator); cc->Inputs().Tag(kDetectionsTag).Set>(); } cc->Outputs().Tag(kRenderDataTag).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status DetectionsToRenderDataCalculator::Open( +mediapipe::Status DetectionsToRenderDataCalculator::Open( CalculatorContext* cc) { cc->SetOffset(TimestampDiff(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status DetectionsToRenderDataCalculator::Process( +mediapipe::Status DetectionsToRenderDataCalculator::Process( CalculatorContext* cc) { const auto& options = cc->Options(); const bool has_detection_from_list = @@ -165,7 +165,7 @@ REGISTER_CALCULATOR(DetectionsToRenderDataCalculator); !cc->Inputs().Tag(kDetectionTag).IsEmpty(); if (!options.produce_empty_packet() && !has_detection_from_list && !has_detection_from_vector && !has_single_detection) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // TODO: Add score threshold to @@ -191,7 +191,7 @@ REGISTER_CALCULATOR(DetectionsToRenderDataCalculator); cc->Outputs() .Tag(kRenderDataTag) .Add(render_data.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } void DetectionsToRenderDataCalculator::SetRenderAnnotationColorThickness( diff --git a/mediapipe/calculators/util/detections_to_timed_box_list_calculator.cc b/mediapipe/calculators/util/detections_to_timed_box_list_calculator.cc index b0a177e58..38907d6e7 100644 --- a/mediapipe/calculators/util/detections_to_timed_box_list_calculator.cc +++ b/mediapipe/calculators/util/detections_to_timed_box_list_calculator.cc @@ -42,7 +42,7 @@ constexpr char kBoxesTag[] = "BOXES"; // } class DetectionsToTimedBoxListCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { RET_CHECK(cc->Inputs().HasTag(kDetectionListTag) || cc->Inputs().HasTag(kDetectionsTag)) << "None of the input streams are provided."; @@ -53,14 +53,14 @@ class DetectionsToTimedBoxListCalculator : public CalculatorBase { cc->Inputs().Tag(kDetectionsTag).Set>(); } cc->Outputs().Tag(kBoxesTag).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { cc->SetOffset(TimestampDiff(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; private: void ConvertDetectionToTimedBox(const Detection& detection, @@ -68,7 +68,7 @@ class DetectionsToTimedBoxListCalculator : public CalculatorBase { }; REGISTER_CALCULATOR(DetectionsToTimedBoxListCalculator); -::mediapipe::Status DetectionsToTimedBoxListCalculator::Process( +mediapipe::Status DetectionsToTimedBoxListCalculator::Process( CalculatorContext* cc) { auto output_timed_box_list = absl::make_unique(); @@ -91,7 +91,7 @@ REGISTER_CALCULATOR(DetectionsToTimedBoxListCalculator); cc->Outputs().Tag(kBoxesTag).Add(output_timed_box_list.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } void DetectionsToTimedBoxListCalculator::ConvertDetectionToTimedBox( diff --git a/mediapipe/calculators/util/filter_collection_calculator.h b/mediapipe/calculators/util/filter_collection_calculator.h index 5f08dd982..f3799dd23 100644 --- a/mediapipe/calculators/util/filter_collection_calculator.h +++ b/mediapipe/calculators/util/filter_collection_calculator.h @@ -42,7 +42,7 @@ namespace mediapipe { template class FilterCollectionCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { RET_CHECK(cc->Inputs().HasTag("ITERABLE")); RET_CHECK(cc->Inputs().HasTag("CONDITION")); RET_CHECK(cc->Outputs().HasTag("ITERABLE")); @@ -52,20 +52,20 @@ class FilterCollectionCalculator : public CalculatorBase { cc->Outputs().Tag("ITERABLE").Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { cc->SetOffset(TimestampDiff(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { if (cc->Inputs().Tag("ITERABLE").IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } if (cc->Inputs().Tag("CONDITION").IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } const std::vector& filter_by = @@ -77,11 +77,11 @@ class FilterCollectionCalculator : public CalculatorBase { } template - ::mediapipe::Status FilterCollection(std::true_type, CalculatorContext* cc, - const std::vector& filter_by) { + mediapipe::Status FilterCollection(std::true_type, CalculatorContext* cc, + const std::vector& filter_by) { const IterableU& input = cc->Inputs().Tag("ITERABLE").Get(); if (input.size() != filter_by.size()) { - return ::mediapipe::InternalError(absl::StrCat( + return mediapipe::InternalError(absl::StrCat( "Input vector size: ", input.size(), " doesn't mach condition vector size: ", filter_by.size())); } @@ -93,13 +93,13 @@ class FilterCollectionCalculator : public CalculatorBase { } } cc->Outputs().Tag("ITERABLE").Add(output.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } template - ::mediapipe::Status FilterCollection(std::false_type, CalculatorContext* cc, - const std::vector& filter_by) { - return ::mediapipe::InternalError( + mediapipe::Status FilterCollection(std::false_type, CalculatorContext* cc, + const std::vector& filter_by) { + return mediapipe::InternalError( "Cannot copy input collection to filter it."); } }; diff --git a/mediapipe/calculators/util/labels_to_render_data_calculator.cc b/mediapipe/calculators/util/labels_to_render_data_calculator.cc index fafedba5b..2e63d29f7 100644 --- a/mediapipe/calculators/util/labels_to_render_data_calculator.cc +++ b/mediapipe/calculators/util/labels_to_render_data_calculator.cc @@ -59,9 +59,9 @@ constexpr float kFontHeightScale = 1.25f; // } class LabelsToRenderDataCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + static mediapipe::Status GetContract(CalculatorContract* cc); + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; private: LabelsToRenderDataCalculatorOptions options_; @@ -73,7 +73,7 @@ class LabelsToRenderDataCalculator : public CalculatorBase { }; REGISTER_CALCULATOR(LabelsToRenderDataCalculator); -::mediapipe::Status LabelsToRenderDataCalculator::GetContract( +mediapipe::Status LabelsToRenderDataCalculator::GetContract( CalculatorContract* cc) { if (cc->Inputs().HasTag("CLASSIFICATIONS")) { cc->Inputs().Tag("CLASSIFICATIONS").Set(); @@ -89,26 +89,25 @@ REGISTER_CALCULATOR(LabelsToRenderDataCalculator); cc->Inputs().Tag("VIDEO_PRESTREAM").Set(); } cc->Outputs().Tag("RENDER_DATA").Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status LabelsToRenderDataCalculator::Open(CalculatorContext* cc) { +mediapipe::Status LabelsToRenderDataCalculator::Open(CalculatorContext* cc) { cc->SetOffset(TimestampDiff(0)); options_ = cc->Options(); num_colors_ = options_.color_size(); label_height_px_ = std::ceil(options_.font_height_px() * kFontHeightScale); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status LabelsToRenderDataCalculator::Process( - CalculatorContext* cc) { +mediapipe::Status LabelsToRenderDataCalculator::Process(CalculatorContext* cc) { if (cc->Inputs().HasTag("VIDEO_PRESTREAM") && cc->InputTimestamp() == Timestamp::PreStream()) { const VideoHeader& video_header = cc->Inputs().Tag("VIDEO_PRESTREAM").Get(); video_width_ = video_header.width; video_height_ = video_header.height; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } else { CHECK_EQ(options_.location(), LabelsToRenderDataCalculatorOptions::TOP_LEFT) << "Only TOP_LEFT is supported without VIDEO_PRESTREAM."; @@ -180,6 +179,6 @@ REGISTER_CALCULATOR(LabelsToRenderDataCalculator); .Tag("RENDER_DATA") .AddPacket(MakePacket(render_data).At(cc->InputTimestamp())); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/util/landmark_letterbox_removal_calculator.cc b/mediapipe/calculators/util/landmark_letterbox_removal_calculator.cc index 737c44100..d07f76e1e 100644 --- a/mediapipe/calculators/util/landmark_letterbox_removal_calculator.cc +++ b/mediapipe/calculators/util/landmark_letterbox_removal_calculator.cc @@ -64,7 +64,7 @@ constexpr char kLetterboxPaddingTag[] = "LETTERBOX_PADDING"; // } class LandmarkLetterboxRemovalCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { RET_CHECK(cc->Inputs().HasTag(kLandmarksTag) && cc->Inputs().HasTag(kLetterboxPaddingTag)) << "Missing one or more input streams."; @@ -84,18 +84,18 @@ class LandmarkLetterboxRemovalCalculator : public CalculatorBase { cc->Outputs().Get(id).Set(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { cc->SetOffset(TimestampDiff(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { if (cc->Inputs().Tag(kLetterboxPaddingTag).IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } const auto& letterbox_padding = cc->Inputs().Tag(kLetterboxPaddingTag).Get>(); @@ -124,22 +124,17 @@ class LandmarkLetterboxRemovalCalculator : public CalculatorBase { const float new_y = (landmark.y() - top) / (1.0f - top_and_bottom); const float new_z = landmark.z() / (1.0f - left_and_right); // Scale Z coordinate as X. - + *new_landmark = landmark; new_landmark->set_x(new_x); new_landmark->set_y(new_y); - // Keep z-coord as is. new_landmark->set_z(new_z); - // Keep visibility as is. - new_landmark->set_visibility(landmark.visibility()); - // Keep presence as is. - new_landmark->set_presence(landmark.presence()); } cc->Outputs().Get(output_id).AddPacket( MakePacket(output_landmarks) .At(cc->InputTimestamp())); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(LandmarkLetterboxRemovalCalculator); diff --git a/mediapipe/calculators/util/landmark_projection_calculator.cc b/mediapipe/calculators/util/landmark_projection_calculator.cc index dacb4ab50..aaf28c02a 100644 --- a/mediapipe/calculators/util/landmark_projection_calculator.cc +++ b/mediapipe/calculators/util/landmark_projection_calculator.cc @@ -60,7 +60,7 @@ constexpr char kRectTag[] = "NORM_RECT"; // } class LandmarkProjectionCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { RET_CHECK(cc->Inputs().HasTag(kLandmarksTag) && cc->Inputs().HasTag(kRectTag)) << "Missing one or more input streams."; @@ -80,18 +80,18 @@ class LandmarkProjectionCalculator : public CalculatorBase { cc->Outputs().Get(id).Set(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { cc->SetOffset(TimestampDiff(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { if (cc->Inputs().Tag(kRectTag).IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } const auto& input_rect = cc->Inputs().Tag(kRectTag).Get(); @@ -126,20 +126,17 @@ class LandmarkProjectionCalculator : public CalculatorBase { const float new_z = landmark.z() * input_rect.width(); // Scale Z coordinate as X. + *new_landmark = landmark; new_landmark->set_x(new_x); new_landmark->set_y(new_y); new_landmark->set_z(new_z); - // Keep visibility as is. - new_landmark->set_visibility(landmark.visibility()); - // Keep presence as is. - new_landmark->set_presence(landmark.presence()); } cc->Outputs().Get(output_id).AddPacket( MakePacket(output_landmarks) .At(cc->InputTimestamp())); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(LandmarkProjectionCalculator); diff --git a/mediapipe/calculators/util/landmark_visibility_calculator.cc b/mediapipe/calculators/util/landmark_visibility_calculator.cc new file mode 100644 index 000000000..e2239a5ee --- /dev/null +++ b/mediapipe/calculators/util/landmark_visibility_calculator.cc @@ -0,0 +1,86 @@ +// Copyright 2020 The MediaPipe Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "mediapipe/framework/calculator_framework.h" +#include "mediapipe/framework/formats/landmark.pb.h" +#include "mediapipe/framework/port/ret_check.h" + +namespace mediapipe { + +namespace { + +constexpr char kNormalizedLandmarksTag[] = "NORM_LANDMARKS"; +constexpr char kVisibilityTag[] = "VISIBILITY"; + +} // namespace + +// A calculator to extract visibility from the landmark. +// +// Inputs: +// NORM_LANDMARKS: A NormalizedLandmarkList with only a single landmark to +// take visibility from. It's a list and not single landmark as +// split/concatenate calculators work with lists. +// +// Outputs: +// VISIBILITY: Float visibility of the given landmark. +// +// Example config: +// node { +// calculator: "LandmarkVisibilityCalculator" +// input_stream: "NORM_LANDMARKS:landmarks" +// output_stream: "VISIBILITY:visibility" +// } +// +class LandmarkVisibilityCalculator : public CalculatorBase { + public: + static mediapipe::Status GetContract(CalculatorContract* cc); + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; +}; +REGISTER_CALCULATOR(LandmarkVisibilityCalculator); + +mediapipe::Status LandmarkVisibilityCalculator::GetContract( + CalculatorContract* cc) { + cc->Inputs().Tag(kNormalizedLandmarksTag).Set(); + cc->Outputs().Tag(kVisibilityTag).Set(); + + return mediapipe::OkStatus(); +} + +mediapipe::Status LandmarkVisibilityCalculator::Open(CalculatorContext* cc) { + cc->SetOffset(TimestampDiff(0)); + + return mediapipe::OkStatus(); +} + +mediapipe::Status LandmarkVisibilityCalculator::Process(CalculatorContext* cc) { + // Check that landmark is not empty. + // Don't emit an empty packet for this timestamp. + if (cc->Inputs().Tag(kNormalizedLandmarksTag).IsEmpty()) { + return mediapipe::OkStatus(); + } + + const auto& landmarks = + cc->Inputs().Tag(kNormalizedLandmarksTag).Get(); + RET_CHECK_EQ(landmarks.landmark_size(), 1); + float visibility = landmarks.landmark(0).visibility(); + + cc->Outputs() + .Tag(kVisibilityTag) + .AddPacket(MakePacket(visibility).At(cc->InputTimestamp())); + + return mediapipe::OkStatus(); +} + +} // namespace mediapipe diff --git a/mediapipe/calculators/util/landmarks_smoothing_calculator.cc b/mediapipe/calculators/util/landmarks_smoothing_calculator.cc index 751e730f6..f08dfe026 100644 --- a/mediapipe/calculators/util/landmarks_smoothing_calculator.cc +++ b/mediapipe/calculators/util/landmarks_smoothing_calculator.cc @@ -28,7 +28,7 @@ constexpr char kNormalizedLandmarksTag[] = "NORM_LANDMARKS"; constexpr char kImageSizeTag[] = "IMAGE_SIZE"; constexpr char kNormalizedFilteredLandmarksTag[] = "NORM_FILTERED_LANDMARKS"; -using ::mediapipe::RelativeVelocityFilter; +using mediapipe::RelativeVelocityFilter; // Estimate object scale to use its inverse value as velocity scale for // RelativeVelocityFilter. If value will be too small (less than @@ -61,23 +61,23 @@ class LandmarksFilter { public: virtual ~LandmarksFilter() = default; - virtual ::mediapipe::Status Reset() { return ::mediapipe::OkStatus(); } + virtual mediapipe::Status Reset() { return mediapipe::OkStatus(); } - virtual ::mediapipe::Status Apply(const NormalizedLandmarkList& in_landmarks, - const std::pair& image_size, - const absl::Duration& timestamp, - NormalizedLandmarkList* out_landmarks) = 0; + virtual mediapipe::Status Apply(const NormalizedLandmarkList& in_landmarks, + const std::pair& image_size, + const absl::Duration& timestamp, + NormalizedLandmarkList* out_landmarks) = 0; }; // Returns landmarks as is without smoothing. class NoFilter : public LandmarksFilter { public: - ::mediapipe::Status Apply(const NormalizedLandmarkList& in_landmarks, - const std::pair& image_size, - const absl::Duration& timestamp, - NormalizedLandmarkList* out_landmarks) override { + mediapipe::Status Apply(const NormalizedLandmarkList& in_landmarks, + const std::pair& image_size, + const absl::Duration& timestamp, + NormalizedLandmarkList* out_landmarks) override { *out_landmarks = in_landmarks; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; @@ -90,17 +90,17 @@ class VelocityFilter : public LandmarksFilter { velocity_scale_(velocity_scale), min_allowed_object_scale_(min_allowed_object_scale) {} - ::mediapipe::Status Reset() override { + mediapipe::Status Reset() override { x_filters_.clear(); y_filters_.clear(); z_filters_.clear(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Apply(const NormalizedLandmarkList& in_landmarks, - const std::pair& image_size, - const absl::Duration& timestamp, - NormalizedLandmarkList* out_landmarks) override { + mediapipe::Status Apply(const NormalizedLandmarkList& in_landmarks, + const std::pair& image_size, + const absl::Duration& timestamp, + NormalizedLandmarkList* out_landmarks) override { // Get image size. int image_width; int image_height; @@ -113,7 +113,7 @@ class VelocityFilter : public LandmarksFilter { GetObjectScale(in_landmarks, image_width, image_height); if (object_scale < min_allowed_object_scale_) { *out_landmarks = in_landmarks; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } const float value_scale = 1.0f / object_scale; @@ -125,6 +125,7 @@ class VelocityFilter : public LandmarksFilter { const NormalizedLandmark& in_landmark = in_landmarks.landmark(i); NormalizedLandmark* out_landmark = out_landmarks->add_landmark(); + *out_landmark = in_landmark; out_landmark->set_x(x_filters_[i].Apply(timestamp, value_scale, in_landmark.x() * image_width) / image_width); @@ -135,24 +136,20 @@ class VelocityFilter : public LandmarksFilter { out_landmark->set_z(z_filters_[i].Apply(timestamp, value_scale, in_landmark.z() * image_width) / image_width); - // Keep visibility as is. - out_landmark->set_visibility(in_landmark.visibility()); - // Keep presence as is. - out_landmark->set_presence(in_landmark.presence()); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: // Initializes filters for the first time or after Reset. If initialized then // check the size. - ::mediapipe::Status InitializeFiltersIfEmpty(const int n_landmarks) { + mediapipe::Status InitializeFiltersIfEmpty(const int n_landmarks) { if (!x_filters_.empty()) { RET_CHECK_EQ(x_filters_.size(), n_landmarks); RET_CHECK_EQ(y_filters_.size(), n_landmarks); RET_CHECK_EQ(z_filters_.size(), n_landmarks); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } x_filters_.resize(n_landmarks, @@ -162,7 +159,7 @@ class VelocityFilter : public LandmarksFilter { z_filters_.resize(n_landmarks, RelativeVelocityFilter(window_size_, velocity_scale_)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } int window_size_; @@ -205,16 +202,16 @@ class VelocityFilter : public LandmarksFilter { // class LandmarksSmoothingCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + static mediapipe::Status GetContract(CalculatorContract* cc); + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; private: LandmarksFilter* landmarks_filter_; }; REGISTER_CALCULATOR(LandmarksSmoothingCalculator); -::mediapipe::Status LandmarksSmoothingCalculator::GetContract( +mediapipe::Status LandmarksSmoothingCalculator::GetContract( CalculatorContract* cc) { cc->Inputs().Tag(kNormalizedLandmarksTag).Set(); cc->Inputs().Tag(kImageSizeTag).Set>(); @@ -222,10 +219,10 @@ REGISTER_CALCULATOR(LandmarksSmoothingCalculator); .Tag(kNormalizedFilteredLandmarksTag) .Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status LandmarksSmoothingCalculator::Open(CalculatorContext* cc) { +mediapipe::Status LandmarksSmoothingCalculator::Open(CalculatorContext* cc) { cc->SetOffset(TimestampDiff(0)); // Pick landmarks filter. @@ -242,16 +239,15 @@ REGISTER_CALCULATOR(LandmarksSmoothingCalculator); << "Landmarks filter is either not specified or not supported"; } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status LandmarksSmoothingCalculator::Process( - CalculatorContext* cc) { +mediapipe::Status LandmarksSmoothingCalculator::Process(CalculatorContext* cc) { // Check that landmarks are not empty and reset the filter if so. // Don't emit an empty packet for this timestamp. if (cc->Inputs().Tag(kNormalizedLandmarksTag).IsEmpty()) { MP_RETURN_IF_ERROR(landmarks_filter_->Reset()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } const auto& in_landmarks = @@ -269,7 +265,7 @@ REGISTER_CALCULATOR(LandmarksSmoothingCalculator); .Tag(kNormalizedFilteredLandmarksTag) .Add(out_landmarks.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/util/landmarks_to_detection_calculator.cc b/mediapipe/calculators/util/landmarks_to_detection_calculator.cc index 64a7a8cc6..5f9f81061 100644 --- a/mediapipe/calculators/util/landmarks_to_detection_calculator.cc +++ b/mediapipe/calculators/util/landmarks_to_detection_calculator.cc @@ -80,17 +80,17 @@ Detection ConvertLandmarksToDetection(const NormalizedLandmarkList& landmarks) { // } class LandmarksToDetectionCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; + static mediapipe::Status GetContract(CalculatorContract* cc); + mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; private: ::mediapipe::LandmarksToDetectionCalculatorOptions options_; }; REGISTER_CALCULATOR(LandmarksToDetectionCalculator); -::mediapipe::Status LandmarksToDetectionCalculator::GetContract( +mediapipe::Status LandmarksToDetectionCalculator::GetContract( CalculatorContract* cc) { RET_CHECK(cc->Inputs().HasTag(kNormalizedLandmarksTag)); RET_CHECK(cc->Outputs().HasTag(kDetectionTag)); @@ -98,18 +98,17 @@ REGISTER_CALCULATOR(LandmarksToDetectionCalculator); cc->Inputs().Tag(kNormalizedLandmarksTag).Set(); cc->Outputs().Tag(kDetectionTag).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status LandmarksToDetectionCalculator::Open( - CalculatorContext* cc) { +mediapipe::Status LandmarksToDetectionCalculator::Open(CalculatorContext* cc) { cc->SetOffset(TimestampDiff(0)); options_ = cc->Options<::mediapipe::LandmarksToDetectionCalculatorOptions>(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status LandmarksToDetectionCalculator::Process( +mediapipe::Status LandmarksToDetectionCalculator::Process( CalculatorContext* cc) { const auto& landmarks = cc->Inputs().Tag(kNormalizedLandmarksTag).Get(); @@ -134,7 +133,7 @@ REGISTER_CALCULATOR(LandmarksToDetectionCalculator); .Tag(kDetectionTag) .Add(detection.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/util/landmarks_to_floats_calculator.cc b/mediapipe/calculators/util/landmarks_to_floats_calculator.cc index b86542dd5..edfbc93f1 100644 --- a/mediapipe/calculators/util/landmarks_to_floats_calculator.cc +++ b/mediapipe/calculators/util/landmarks_to_floats_calculator.cc @@ -62,7 +62,7 @@ constexpr char kMatrixTag[] = "MATRIX"; // } class LandmarksToFloatsCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Tag(kLandmarksTag).Set(); RET_CHECK(cc->Outputs().HasTag(kFloatsTag) || cc->Outputs().HasTag(kMatrixTag)); @@ -73,10 +73,10 @@ class LandmarksToFloatsCalculator : public CalculatorBase { cc->Outputs().Tag(kMatrixTag).Set(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { cc->SetOffset(TimestampDiff(0)); const auto& options = cc->Options<::mediapipe::LandmarksToFloatsCalculatorOptions>(); @@ -84,13 +84,13 @@ class LandmarksToFloatsCalculator : public CalculatorBase { // Currently number of dimensions must be within [1, 3]. RET_CHECK_GE(num_dimensions_, 1); RET_CHECK_LE(num_dimensions_, 3); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { // Only process if there's input landmarks. if (cc->Inputs().Tag(kLandmarksTag).IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } const auto& input_landmarks = @@ -128,7 +128,7 @@ class LandmarksToFloatsCalculator : public CalculatorBase { .Tag(kMatrixTag) .Add(output_matrix.release(), cc->InputTimestamp()); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: diff --git a/mediapipe/calculators/util/landmarks_to_render_data_calculator.cc b/mediapipe/calculators/util/landmarks_to_render_data_calculator.cc index c075f04e5..f0fd165fc 100644 --- a/mediapipe/calculators/util/landmarks_to_render_data_calculator.cc +++ b/mediapipe/calculators/util/landmarks_to_render_data_calculator.cc @@ -106,12 +106,14 @@ void AddConnectionsWithDepth(const LandmarkListType& landmarks, for (int i = 0; i < landmark_connections.size(); i += 2) { const auto& ld0 = landmarks.landmark(landmark_connections[i]); const auto& ld1 = landmarks.landmark(landmark_connections[i + 1]); - if (utilize_visibility && (ld0.visibility() < visibility_threshold || - ld1.visibility() < visibility_threshold)) { + if (utilize_visibility && + ((ld0.has_visibility() && ld0.visibility() < visibility_threshold) || + (ld1.has_visibility() && ld1.visibility() < visibility_threshold))) { continue; } - if (utilize_presence && (ld0.presence() < presence_threshold || - ld1.presence() < presence_threshold)) { + if (utilize_presence && + ((ld0.has_presence() && ld0.presence() < presence_threshold) || + (ld1.has_presence() && ld1.presence() < presence_threshold))) { continue; } const int gray_val1 = @@ -149,12 +151,14 @@ void AddConnections(const LandmarkListType& landmarks, for (int i = 0; i < landmark_connections.size(); i += 2) { const auto& ld0 = landmarks.landmark(landmark_connections[i]); const auto& ld1 = landmarks.landmark(landmark_connections[i + 1]); - if (utilize_visibility && (ld0.visibility() < visibility_threshold || - ld1.visibility() < visibility_threshold)) { + if (utilize_visibility && + ((ld0.has_visibility() && ld0.visibility() < visibility_threshold) || + (ld1.has_visibility() && ld1.visibility() < visibility_threshold))) { continue; } - if (utilize_presence && (ld0.presence() < presence_threshold || - ld1.presence() < presence_threshold)) { + if (utilize_presence && + ((ld0.has_presence() && ld0.presence() < presence_threshold) || + (ld1.has_presence() && ld1.presence() < presence_threshold))) { continue; } AddConnectionToRenderData(ld0, ld1, connection_color, @@ -173,7 +177,7 @@ RenderAnnotation* AddPointRenderData(const Color& landmark_color, } // namespace -::mediapipe::Status LandmarksToRenderDataCalculator::GetContract( +mediapipe::Status LandmarksToRenderDataCalculator::GetContract( CalculatorContract* cc) { RET_CHECK(cc->Inputs().HasTag(kLandmarksTag) || cc->Inputs().HasTag(kNormLandmarksTag)) @@ -193,11 +197,10 @@ RenderAnnotation* AddPointRenderData(const Color& landmark_color, cc->Inputs().Tag(kRenderScaleTag).Set(); } cc->Outputs().Tag(kRenderDataTag).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status LandmarksToRenderDataCalculator::Open( - CalculatorContext* cc) { +mediapipe::Status LandmarksToRenderDataCalculator::Open(CalculatorContext* cc) { cc->SetOffset(TimestampDiff(0)); options_ = cc->Options(); @@ -209,20 +212,20 @@ RenderAnnotation* AddPointRenderData(const Color& landmark_color, landmark_connections_.push_back(options_.landmark_connections(i)); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status LandmarksToRenderDataCalculator::Process( +mediapipe::Status LandmarksToRenderDataCalculator::Process( CalculatorContext* cc) { // Check that landmarks are not empty and skip rendering if so. // Don't emit an empty packet for this timestamp. if (cc->Inputs().HasTag(kLandmarksTag) && cc->Inputs().Tag(kLandmarksTag).IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } if (cc->Inputs().HasTag(kNormLandmarksTag) && cc->Inputs().Tag(kNormLandmarksTag).IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } auto render_data = absl::make_unique(); @@ -263,12 +266,12 @@ RenderAnnotation* AddPointRenderData(const Color& landmark_color, for (int i = 0; i < landmarks.landmark_size(); ++i) { const Landmark& landmark = landmarks.landmark(i); - if (options_.utilize_visibility() && + if (options_.utilize_visibility() && landmark.has_visibility() && landmark.visibility() < options_.visibility_threshold()) { continue; } - if (options_.utilize_presence() && + if (options_.utilize_presence() && landmark.has_presence() && landmark.presence() < options_.presence_threshold()) { continue; } @@ -312,11 +315,11 @@ RenderAnnotation* AddPointRenderData(const Color& landmark_color, for (int i = 0; i < landmarks.landmark_size(); ++i) { const NormalizedLandmark& landmark = landmarks.landmark(i); - if (options_.utilize_visibility() && + if (options_.utilize_visibility() && landmark.has_visibility() && landmark.visibility() < options_.visibility_threshold()) { continue; } - if (options_.utilize_presence() && + if (options_.utilize_presence() && landmark.has_presence() && landmark.presence() < options_.presence_threshold()) { continue; } @@ -338,7 +341,7 @@ RenderAnnotation* AddPointRenderData(const Color& landmark_color, cc->Outputs() .Tag(kRenderDataTag) .Add(render_data.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } REGISTER_CALCULATOR(LandmarksToRenderDataCalculator); diff --git a/mediapipe/calculators/util/landmarks_to_render_data_calculator.h b/mediapipe/calculators/util/landmarks_to_render_data_calculator.h index 8f45955f4..ce31ef9c7 100644 --- a/mediapipe/calculators/util/landmarks_to_render_data_calculator.h +++ b/mediapipe/calculators/util/landmarks_to_render_data_calculator.h @@ -54,11 +54,11 @@ class LandmarksToRenderDataCalculator : public CalculatorBase { LandmarksToRenderDataCalculator& operator=( const LandmarksToRenderDataCalculator&) = delete; - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; protected: ::mediapipe::LandmarksToRenderDataCalculatorOptions options_; diff --git a/mediapipe/calculators/util/local_file_contents_calculator.cc b/mediapipe/calculators/util/local_file_contents_calculator.cc index 254a552c9..2883a961a 100644 --- a/mediapipe/calculators/util/local_file_contents_calculator.cc +++ b/mediapipe/calculators/util/local_file_contents_calculator.cc @@ -52,7 +52,7 @@ constexpr char kContentsTag[] = "CONTENTS"; // } class LocalFileContentsCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { RET_CHECK(cc->InputSidePackets().HasTag(kFilePathTag)) << "Missing PATH input side packet(s)"; RET_CHECK(cc->OutputSidePackets().HasTag(kContentsTag)) @@ -72,10 +72,10 @@ class LocalFileContentsCalculator : public CalculatorBase { cc->OutputSidePackets().Get(id).Set(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { CollectionItemId input_id = cc->InputSidePackets().BeginId(kFilePathTag); CollectionItemId output_id = cc->OutputSidePackets().BeginId(kContentsTag); // Number of inputs and outpus is the same according to the contract. @@ -90,11 +90,11 @@ class LocalFileContentsCalculator : public CalculatorBase { cc->OutputSidePackets().Get(output_id).Set( MakePacket(std::move(contents))); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { - return ::mediapipe::OkStatus(); + mediapipe::Status Process(CalculatorContext* cc) override { + return mediapipe::OkStatus(); } }; diff --git a/mediapipe/calculators/util/local_file_pattern_contents_calculator.cc b/mediapipe/calculators/util/local_file_pattern_contents_calculator.cc index 04fe3ac1c..8bfb49af2 100644 --- a/mediapipe/calculators/util/local_file_pattern_contents_calculator.cc +++ b/mediapipe/calculators/util/local_file_pattern_contents_calculator.cc @@ -34,22 +34,22 @@ namespace mediapipe { // } class LocalFilePatternContentsCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->InputSidePackets().Tag("FILE_DIRECTORY").Set(); cc->InputSidePackets().Tag("FILE_SUFFIX").Set(); cc->Outputs().Tag("CONTENTS").Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { - MP_RETURN_IF_ERROR(::mediapipe::file::MatchFileTypeInDirectory( + mediapipe::Status Open(CalculatorContext* cc) override { + MP_RETURN_IF_ERROR(mediapipe::file::MatchFileTypeInDirectory( cc->InputSidePackets().Tag("FILE_DIRECTORY").Get(), cc->InputSidePackets().Tag("FILE_SUFFIX").Get(), &filenames_)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { if (current_output_ < filenames_.size()) { auto contents = absl::make_unique(); LOG(INFO) << filenames_[current_output_]; @@ -62,7 +62,7 @@ class LocalFilePatternContentsCalculator : public CalculatorBase { } else { return tool::StatusStop(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: diff --git a/mediapipe/calculators/util/logic_calculator.cc b/mediapipe/calculators/util/logic_calculator.cc index 3b6ef81bd..3b6a3e6c8 100644 --- a/mediapipe/calculators/util/logic_calculator.cc +++ b/mediapipe/calculators/util/logic_calculator.cc @@ -45,7 +45,7 @@ using mediapipe::LogicCalculatorOptions; // } class LogicCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { for (int k = 0; k < cc->Inputs().NumEntries(""); ++k) { cc->Inputs().Index(k).Set(); } @@ -58,13 +58,13 @@ class LogicCalculator : public CalculatorBase { 1); RET_CHECK_EQ(cc->Outputs().NumEntries(""), 1); cc->Outputs().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { options_ = cc->Options(); cc->SetOffset(TimestampDiff(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } bool LogicalOp(bool b1, bool b2) { @@ -79,7 +79,7 @@ class LogicCalculator : public CalculatorBase { return false; } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { bool result = options_.op() == LogicCalculatorOptions::AND ? true : false; for (int k = 0; k < options_.input_value_size(); ++k) { result = LogicalOp(result, options_.input_value(k)); @@ -94,7 +94,7 @@ class LogicCalculator : public CalculatorBase { result = !result; } cc->Outputs().Index(0).Add(new bool(result), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: diff --git a/mediapipe/calculators/util/non_max_suppression_calculator.cc b/mediapipe/calculators/util/non_max_suppression_calculator.cc index 1ea1b3d6b..048ba33fe 100644 --- a/mediapipe/calculators/util/non_max_suppression_calculator.cc +++ b/mediapipe/calculators/util/non_max_suppression_calculator.cc @@ -154,7 +154,7 @@ class NonMaxSuppressionCalculator : public CalculatorBase { NonMaxSuppressionCalculator() = default; ~NonMaxSuppressionCalculator() override = default; - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { const auto& options = cc->Options(); if (cc->Inputs().HasTag(kImageTag)) { cc->Inputs().Tag(kImageTag).Set(); @@ -163,10 +163,10 @@ class NonMaxSuppressionCalculator : public CalculatorBase { cc->Inputs().Index(k).Set(); } cc->Outputs().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { cc->SetOffset(TimestampDiff(0)); options_ = cc->Options(); @@ -176,10 +176,10 @@ class NonMaxSuppressionCalculator : public CalculatorBase { << "max_num_detections=0 is not a valid value. Please choose a " << "positive number of you want to limit the number of output " << "detections, or set -1 if you do not want any limit."; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { // Add all input detections to the same vector. Detections input_detections; for (int i = 0; i < options_.num_detection_streams(); ++i) { @@ -199,7 +199,7 @@ class NonMaxSuppressionCalculator : public CalculatorBase { if (options_.return_empty_detections()) { cc->Outputs().Index(0).Add(new Detections(), cc->InputTimestamp()); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Remove all but the maximum scoring label from each input detection. This @@ -244,7 +244,7 @@ class NonMaxSuppressionCalculator : public CalculatorBase { cc->Outputs().Index(0).Add(retained_detections, cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: diff --git a/mediapipe/calculators/util/packet_frequency_calculator.cc b/mediapipe/calculators/util/packet_frequency_calculator.cc index f63c72fdc..cc407efd8 100644 --- a/mediapipe/calculators/util/packet_frequency_calculator.cc +++ b/mediapipe/calculators/util/packet_frequency_calculator.cc @@ -70,27 +70,26 @@ class PacketFrequencyCalculator : public CalculatorBase { public: PacketFrequencyCalculator() {} - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; private: // Outputs the given framerate on the specified output stream as a // PacketFrequency proto. - ::mediapipe::Status OutputPacketFrequency(CalculatorContext* cc, - int stream_id, double framerate_hz, - const std::string& label, - const Timestamp& input_timestamp); + mediapipe::Status OutputPacketFrequency(CalculatorContext* cc, int stream_id, + double framerate_hz, + const std::string& label, + const Timestamp& input_timestamp); // Adds the input timestamp in the particular stream's timestamp buffer. - ::mediapipe::Status AddPacketTimestampForStream(int stream_id, - int64 timestamp); + mediapipe::Status AddPacketTimestampForStream(int stream_id, int64 timestamp); // For the specified input stream, clears timestamps from buffer that are // older than the configured time_window_sec. - ::mediapipe::Status ClearOldpacketTimestamps(int stream_id, - int64 current_timestamp); + mediapipe::Status ClearOldpacketTimestamps(int stream_id, + int64 current_timestamp); // Options for the calculator. PacketFrequencyCalculatorOptions options_; @@ -106,17 +105,17 @@ class PacketFrequencyCalculator : public CalculatorBase { }; REGISTER_CALCULATOR(PacketFrequencyCalculator); -::mediapipe::Status PacketFrequencyCalculator::GetContract( +mediapipe::Status PacketFrequencyCalculator::GetContract( CalculatorContract* cc) { RET_CHECK_EQ(cc->Outputs().NumEntries(), cc->Inputs().NumEntries()); for (int i = 0; i < cc->Inputs().NumEntries(); ++i) { cc->Inputs().Index(i).SetAny(); cc->Outputs().Index(i).Set(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status PacketFrequencyCalculator::Open(CalculatorContext* cc) { +mediapipe::Status PacketFrequencyCalculator::Open(CalculatorContext* cc) { options_ = cc->Options(); RET_CHECK_EQ(options_.label_size(), cc->Inputs().NumEntries()); RET_CHECK_GT(options_.time_window_sec(), 0); @@ -128,10 +127,10 @@ REGISTER_CALCULATOR(PacketFrequencyCalculator); previous_timestamps_for_stream_id_[i] = {}; first_timestamp_for_stream_id_usec_[i] = -1; } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status PacketFrequencyCalculator::Process(CalculatorContext* cc) { +mediapipe::Status PacketFrequencyCalculator::Process(CalculatorContext* cc) { for (int i = 0; i < cc->Inputs().NumEntries(); ++i) { if (cc->Inputs().Index(i).IsEmpty()) { continue; @@ -165,26 +164,26 @@ REGISTER_CALCULATOR(PacketFrequencyCalculator); options_.label(i), cc->InputTimestamp()); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status PacketFrequencyCalculator::AddPacketTimestampForStream( +mediapipe::Status PacketFrequencyCalculator::AddPacketTimestampForStream( int stream_id, int64 timestamp_usec) { if (previous_timestamps_for_stream_id_.find(stream_id) == previous_timestamps_for_stream_id_.end()) { - return ::mediapipe::InvalidArgumentError("Input stream id is invalid"); + return mediapipe::InvalidArgumentError("Input stream id is invalid"); } previous_timestamps_for_stream_id_[stream_id].push_back(timestamp_usec); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status PacketFrequencyCalculator::ClearOldpacketTimestamps( +mediapipe::Status PacketFrequencyCalculator::ClearOldpacketTimestamps( int stream_id, int64 current_timestamp_usec) { if (previous_timestamps_for_stream_id_.find(stream_id) == previous_timestamps_for_stream_id_.end()) { - return ::mediapipe::InvalidArgumentError("Input stream id is invalid"); + return mediapipe::InvalidArgumentError("Input stream id is invalid"); } auto& timestamps_buffer = previous_timestamps_for_stream_id_[stream_id]; @@ -199,10 +198,10 @@ REGISTER_CALCULATOR(PacketFrequencyCalculator); }), timestamps_buffer.end()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status PacketFrequencyCalculator::OutputPacketFrequency( +mediapipe::Status PacketFrequencyCalculator::OutputPacketFrequency( CalculatorContext* cc, int stream_id, double framerate_hz, const std::string& label, const Timestamp& input_timestamp) { auto packet_frequency = absl::make_unique(); @@ -212,7 +211,7 @@ REGISTER_CALCULATOR(PacketFrequencyCalculator); cc->Outputs().Index(stream_id).Add(packet_frequency.release(), input_timestamp); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/util/packet_latency_calculator.cc b/mediapipe/calculators/util/packet_latency_calculator.cc index 162cc9356..19ff6fc24 100644 --- a/mediapipe/calculators/util/packet_latency_calculator.cc +++ b/mediapipe/calculators/util/packet_latency_calculator.cc @@ -101,10 +101,10 @@ class PacketLatencyCalculator : public CalculatorBase { public: PacketLatencyCalculator() {} - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; private: // Resets the histogram and running average variables by initializing them to @@ -139,8 +139,7 @@ class PacketLatencyCalculator : public CalculatorBase { }; REGISTER_CALCULATOR(PacketLatencyCalculator); -::mediapipe::Status PacketLatencyCalculator::GetContract( - CalculatorContract* cc) { +mediapipe::Status PacketLatencyCalculator::GetContract(CalculatorContract* cc) { RET_CHECK_GT(cc->Inputs().NumEntries(), 1); // Input and output streams. @@ -161,7 +160,7 @@ REGISTER_CALCULATOR(PacketLatencyCalculator); .Set>(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } void PacketLatencyCalculator::ResetStatistics() { @@ -178,7 +177,7 @@ void PacketLatencyCalculator::ResetStatistics() { } } -::mediapipe::Status PacketLatencyCalculator::Open(CalculatorContext* cc) { +mediapipe::Status PacketLatencyCalculator::Open(CalculatorContext* cc) { options_ = cc->Options(); num_packet_streams_ = cc->Inputs().NumEntries() - 1; @@ -225,10 +224,10 @@ void PacketLatencyCalculator::ResetStatistics() { ::mediapipe::MonotonicClock::CreateSynchronizedMonotonicClock()); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status PacketLatencyCalculator::Process(CalculatorContext* cc) { +mediapipe::Status PacketLatencyCalculator::Process(CalculatorContext* cc) { // Record first process timestamp if this is the first call. if (first_process_time_usec_ < 0 && !cc->Inputs().Tag(kReferenceSignalTag).IsEmpty()) { @@ -239,7 +238,7 @@ void PacketLatencyCalculator::ResetStatistics() { if (first_process_time_usec_ < 0) { LOG(WARNING) << "No reference packet received."; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } if (options_.reset_duration_usec() > 0) { @@ -293,7 +292,7 @@ void PacketLatencyCalculator::ResetStatistics() { } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/util/rect_projection_calculator.cc b/mediapipe/calculators/util/rect_projection_calculator.cc index 0b0ec9468..364f26629 100644 --- a/mediapipe/calculators/util/rect_projection_calculator.cc +++ b/mediapipe/calculators/util/rect_projection_calculator.cc @@ -47,29 +47,29 @@ constexpr char kNormReferenceRectTag[] = "NORM_REFERENCE_RECT"; // class RectProjectionCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + static mediapipe::Status GetContract(CalculatorContract* cc); + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; }; REGISTER_CALCULATOR(RectProjectionCalculator); -::mediapipe::Status RectProjectionCalculator::GetContract( +mediapipe::Status RectProjectionCalculator::GetContract( CalculatorContract* cc) { cc->Inputs().Tag(kNormRectTag).Set(); cc->Inputs().Tag(kNormReferenceRectTag).Set(); cc->Outputs().Tag(kNormRectTag).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status RectProjectionCalculator::Open(CalculatorContext* cc) { +mediapipe::Status RectProjectionCalculator::Open(CalculatorContext* cc) { cc->SetOffset(TimestampDiff(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status RectProjectionCalculator::Process(CalculatorContext* cc) { +mediapipe::Status RectProjectionCalculator::Process(CalculatorContext* cc) { if (cc->Inputs().Tag(kNormRectTag).IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } const auto& rect = cc->Inputs().Tag(kNormRectTag).Get(); @@ -101,7 +101,7 @@ REGISTER_CALCULATOR(RectProjectionCalculator); cc->Outputs().Tag(kNormRectTag).Add(new_rect.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/util/rect_to_render_data_calculator.cc b/mediapipe/calculators/util/rect_to_render_data_calculator.cc index 365d364dc..4b85e232a 100644 --- a/mediapipe/calculators/util/rect_to_render_data_calculator.cc +++ b/mediapipe/calculators/util/rect_to_render_data_calculator.cc @@ -37,7 +37,12 @@ RenderAnnotation::Rectangle* NewRect( annotation->mutable_color()->set_b(options.color().b()); annotation->set_thickness(options.thickness()); - return options.filled() + return options.oval() ? options.filled() + ? annotation->mutable_filled_oval() + ->mutable_oval() + ->mutable_rectangle() + : annotation->mutable_oval()->mutable_rectangle() + : options.filled() ? annotation->mutable_filled_rectangle()->mutable_rectangle() : annotation->mutable_rectangle(); } @@ -89,18 +94,18 @@ void SetRect(bool normalized, double xmin, double ymin, double width, // } class RectToRenderDataCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; private: RectToRenderDataCalculatorOptions options_; }; REGISTER_CALCULATOR(RectToRenderDataCalculator); -::mediapipe::Status RectToRenderDataCalculator::GetContract( +mediapipe::Status RectToRenderDataCalculator::GetContract( CalculatorContract* cc) { RET_CHECK_EQ((cc->Inputs().HasTag(kNormRectTag) ? 1 : 0) + (cc->Inputs().HasTag(kRectTag) ? 1 : 0) + @@ -125,18 +130,18 @@ REGISTER_CALCULATOR(RectToRenderDataCalculator); } cc->Outputs().Tag(kRenderDataTag).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status RectToRenderDataCalculator::Open(CalculatorContext* cc) { +mediapipe::Status RectToRenderDataCalculator::Open(CalculatorContext* cc) { cc->SetOffset(TimestampDiff(0)); options_ = cc->Options(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status RectToRenderDataCalculator::Process(CalculatorContext* cc) { +mediapipe::Status RectToRenderDataCalculator::Process(CalculatorContext* cc) { auto render_data = absl::make_unique(); if (cc->Inputs().HasTag(kNormRectTag) && @@ -180,7 +185,7 @@ REGISTER_CALCULATOR(RectToRenderDataCalculator); .Tag(kRenderDataTag) .Add(render_data.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/util/rect_to_render_data_calculator.proto b/mediapipe/calculators/util/rect_to_render_data_calculator.proto index badc8df44..9b6d5e6ee 100644 --- a/mediapipe/calculators/util/rect_to_render_data_calculator.proto +++ b/mediapipe/calculators/util/rect_to_render_data_calculator.proto @@ -32,4 +32,7 @@ message RectToRenderDataCalculatorOptions { // Thickness of the line (applicable when the rectangle is not filled). optional double thickness = 3 [default = 1.0]; + + // Whether the rendered rectangle should be an oval. + optional bool oval = 4 [default = false]; } diff --git a/mediapipe/calculators/util/rect_to_render_scale_calculator.cc b/mediapipe/calculators/util/rect_to_render_scale_calculator.cc index d55063aa4..fdc209359 100644 --- a/mediapipe/calculators/util/rect_to_render_scale_calculator.cc +++ b/mediapipe/calculators/util/rect_to_render_scale_calculator.cc @@ -51,39 +51,38 @@ constexpr char kRenderScaleTag[] = "RENDER_SCALE"; // } class RectToRenderScaleCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + static mediapipe::Status GetContract(CalculatorContract* cc); + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; private: RectToRenderScaleCalculatorOptions options_; }; REGISTER_CALCULATOR(RectToRenderScaleCalculator); -::mediapipe::Status RectToRenderScaleCalculator::GetContract( +mediapipe::Status RectToRenderScaleCalculator::GetContract( CalculatorContract* cc) { cc->Inputs().Tag(kNormRectTag).Set(); cc->Inputs().Tag(kImageSizeTag).Set>(); cc->Outputs().Tag(kRenderScaleTag).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status RectToRenderScaleCalculator::Open(CalculatorContext* cc) { +mediapipe::Status RectToRenderScaleCalculator::Open(CalculatorContext* cc) { cc->SetOffset(TimestampDiff(0)); options_ = cc->Options(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status RectToRenderScaleCalculator::Process( - CalculatorContext* cc) { +mediapipe::Status RectToRenderScaleCalculator::Process(CalculatorContext* cc) { if (cc->Inputs().Tag(kNormRectTag).IsEmpty()) { cc->Outputs() .Tag(kRenderScaleTag) .AddPacket( MakePacket(options_.multiplier()).At(cc->InputTimestamp())); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Get image size. @@ -105,7 +104,7 @@ REGISTER_CALCULATOR(RectToRenderScaleCalculator); .Tag(kRenderScaleTag) .AddPacket(MakePacket(render_scale).At(cc->InputTimestamp())); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/util/rect_transformation_calculator.cc b/mediapipe/calculators/util/rect_transformation_calculator.cc index 5b42a3499..5132ca8b3 100644 --- a/mediapipe/calculators/util/rect_transformation_calculator.cc +++ b/mediapipe/calculators/util/rect_transformation_calculator.cc @@ -57,10 +57,10 @@ inline float NormalizeRadians(float angle) { // } class RectTransformationCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; private: RectTransformationCalculatorOptions options_; @@ -72,7 +72,7 @@ class RectTransformationCalculator : public CalculatorBase { }; REGISTER_CALCULATOR(RectTransformationCalculator); -::mediapipe::Status RectTransformationCalculator::GetContract( +mediapipe::Status RectTransformationCalculator::GetContract( CalculatorContract* cc) { RET_CHECK_EQ((cc->Inputs().HasTag(kNormRectTag) ? 1 : 0) + (cc->Inputs().HasTag(kNormRectsTag) ? 1 : 0) + @@ -100,21 +100,20 @@ REGISTER_CALCULATOR(RectTransformationCalculator); cc->Outputs().Index(0).Set>(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status RectTransformationCalculator::Open(CalculatorContext* cc) { +mediapipe::Status RectTransformationCalculator::Open(CalculatorContext* cc) { cc->SetOffset(TimestampDiff(0)); options_ = cc->Options(); RET_CHECK(!(options_.has_rotation() && options_.has_rotation_degrees())); RET_CHECK(!(options_.has_square_long() && options_.has_square_short())); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status RectTransformationCalculator::Process( - CalculatorContext* cc) { +mediapipe::Status RectTransformationCalculator::Process(CalculatorContext* cc) { if (cc->Inputs().HasTag(kRectTag) && !cc->Inputs().Tag(kRectTag).IsEmpty()) { auto rect = cc->Inputs().Tag(kRectTag).Get(); TransformRect(&rect); @@ -157,7 +156,7 @@ REGISTER_CALCULATOR(RectTransformationCalculator); cc->Outputs().Index(0).Add(output_rects.release(), cc->InputTimestamp()); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } float RectTransformationCalculator::ComputeNewRotation(float rotation) { diff --git a/mediapipe/calculators/util/set_landmark_visibility_calculator.cc b/mediapipe/calculators/util/set_landmark_visibility_calculator.cc new file mode 100644 index 000000000..90ce06bca --- /dev/null +++ b/mediapipe/calculators/util/set_landmark_visibility_calculator.cc @@ -0,0 +1,103 @@ +// Copyright 2020 The MediaPipe Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include "mediapipe/framework/calculator_framework.h" +#include "mediapipe/framework/formats/landmark.pb.h" +#include "mediapipe/framework/port/ret_check.h" + +namespace mediapipe { + +namespace { + +constexpr char kNormalizedLandmarksTag[] = "NORM_LANDMARKS"; +constexpr char kVisibilityTag[] = "VISIBILITY"; + +} // namespace + +// A calculator to set landmark visibility. +// +// Inputs: +// NORM_LANDMARKS: A NormalizedLandmarkList with only a single landmark to set +// visibility to. It's a list and not single landmark as split/concatenate +// calculators work with lists. +// +// VISIBILITY: Float visibility of the given landmark. +// +// Outputs: +// NORM_LANDMARKS: A NormalizedLandmarkList with only single landmark with +// updated visibility. +// +// Example config: +// node { +// calculator: "SetLandmarkVisibility" +// input_stream: "NORM_LANDMARKS:landmarks" +// input_stream: "VISIBILITY:visibility" +// output_stream: "NORM_LANDMARKS:landmarks_with_visibility" +// } +// +class SetLandmarkVisibilityCalculator : public CalculatorBase { + public: + static mediapipe::Status GetContract(CalculatorContract* cc); + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; +}; +REGISTER_CALCULATOR(SetLandmarkVisibilityCalculator); + +mediapipe::Status SetLandmarkVisibilityCalculator::GetContract( + CalculatorContract* cc) { + cc->Inputs().Tag(kNormalizedLandmarksTag).Set(); + cc->Inputs().Tag(kVisibilityTag).Set(); + cc->Outputs().Tag(kNormalizedLandmarksTag).Set(); + + return mediapipe::OkStatus(); +} + +mediapipe::Status SetLandmarkVisibilityCalculator::Open(CalculatorContext* cc) { + cc->SetOffset(TimestampDiff(0)); + + return mediapipe::OkStatus(); +} + +mediapipe::Status SetLandmarkVisibilityCalculator::Process( + CalculatorContext* cc) { + // Check that landmark and visibility are not empty. + // Don't emit an empty packet for this timestamp. + if (cc->Inputs().Tag(kNormalizedLandmarksTag).IsEmpty() || + cc->Inputs().Tag(kVisibilityTag).IsEmpty()) { + return mediapipe::OkStatus(); + } + + const auto& in_landmarks = + cc->Inputs().Tag(kNormalizedLandmarksTag).Get(); + RET_CHECK_EQ(in_landmarks.landmark_size(), 1); + const NormalizedLandmark& in_landmark = in_landmarks.landmark(0); + + const auto& visibility = cc->Inputs().Tag(kVisibilityTag).Get(); + + auto out_landmarks = absl::make_unique(); + NormalizedLandmark* out_landmark = out_landmarks->add_landmark(); + *out_landmark = in_landmark; + // Update visibility. + out_landmark->set_visibility(visibility); + + cc->Outputs() + .Tag(kNormalizedLandmarksTag) + .Add(out_landmarks.release(), cc->InputTimestamp()); + + return mediapipe::OkStatus(); +} + +} // namespace mediapipe diff --git a/mediapipe/calculators/util/thresholding_calculator.cc b/mediapipe/calculators/util/thresholding_calculator.cc index 1d7b5476b..4ee5fc4b6 100644 --- a/mediapipe/calculators/util/thresholding_calculator.cc +++ b/mediapipe/calculators/util/thresholding_calculator.cc @@ -50,18 +50,17 @@ namespace mediapipe { // } class ThresholdingCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; + static mediapipe::Status GetContract(CalculatorContract* cc); + mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; private: double threshold_{}; }; REGISTER_CALCULATOR(ThresholdingCalculator); -::mediapipe::Status ThresholdingCalculator::GetContract( - CalculatorContract* cc) { +mediapipe::Status ThresholdingCalculator::GetContract(CalculatorContract* cc) { RET_CHECK(cc->Inputs().HasTag("FLOAT")); cc->Inputs().Tag("FLOAT").Set(); @@ -84,10 +83,10 @@ REGISTER_CALCULATOR(ThresholdingCalculator); "supported."; } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ThresholdingCalculator::Open(CalculatorContext* cc) { +mediapipe::Status ThresholdingCalculator::Open(CalculatorContext* cc) { cc->SetOffset(TimestampDiff(0)); const auto& options = @@ -104,10 +103,10 @@ REGISTER_CALCULATOR(ThresholdingCalculator); if (cc->InputSidePackets().HasTag("THRESHOLD")) { threshold_ = cc->InputSidePackets().Tag("THRESHOLD").Get(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ThresholdingCalculator::Process(CalculatorContext* cc) { +mediapipe::Status ThresholdingCalculator::Process(CalculatorContext* cc) { if (cc->Inputs().HasTag("THRESHOLD") && !cc->Inputs().Tag("THRESHOLD").IsEmpty()) { threshold_ = cc->Inputs().Tag("THRESHOLD").Get(); @@ -132,6 +131,6 @@ REGISTER_CALCULATOR(ThresholdingCalculator); MakePacket(false).At(cc->InputTimestamp())); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/util/timed_box_list_id_to_label_calculator.cc b/mediapipe/calculators/util/timed_box_list_id_to_label_calculator.cc index 5d81a7af3..391a83c67 100644 --- a/mediapipe/calculators/util/timed_box_list_id_to_label_calculator.cc +++ b/mediapipe/calculators/util/timed_box_list_id_to_label_calculator.cc @@ -48,26 +48,25 @@ using mediapipe::TimedBoxProtoList; // } class TimedBoxListIdToLabelCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; private: absl::node_hash_map label_map_; }; REGISTER_CALCULATOR(TimedBoxListIdToLabelCalculator); -::mediapipe::Status TimedBoxListIdToLabelCalculator::GetContract( +mediapipe::Status TimedBoxListIdToLabelCalculator::GetContract( CalculatorContract* cc) { cc->Inputs().Index(0).Set(); cc->Outputs().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TimedBoxListIdToLabelCalculator::Open( - CalculatorContext* cc) { +mediapipe::Status TimedBoxListIdToLabelCalculator::Open(CalculatorContext* cc) { cc->SetOffset(TimestampDiff(0)); const auto& options = @@ -84,10 +83,10 @@ REGISTER_CALCULATOR(TimedBoxListIdToLabelCalculator); while (std::getline(stream, line)) { label_map_[i++] = line; } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TimedBoxListIdToLabelCalculator::Process( +mediapipe::Status TimedBoxListIdToLabelCalculator::Process( CalculatorContext* cc) { const auto& input_list = cc->Inputs().Index(0).Get(); auto output_list = absl::make_unique(); @@ -100,7 +99,7 @@ REGISTER_CALCULATOR(TimedBoxListIdToLabelCalculator); } } cc->Outputs().Index(0).Add(output_list.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/util/timed_box_list_to_render_data_calculator.cc b/mediapipe/calculators/util/timed_box_list_to_render_data_calculator.cc index 3979d14fc..a70c62f49 100644 --- a/mediapipe/calculators/util/timed_box_list_to_render_data_calculator.cc +++ b/mediapipe/calculators/util/timed_box_list_to_render_data_calculator.cc @@ -120,35 +120,35 @@ class TimedBoxListToRenderDataCalculator : public CalculatorBase { TimedBoxListToRenderDataCalculator& operator=( const TimedBoxListToRenderDataCalculator&) = delete; - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; private: TimedBoxListToRenderDataCalculatorOptions options_; }; REGISTER_CALCULATOR(TimedBoxListToRenderDataCalculator); -::mediapipe::Status TimedBoxListToRenderDataCalculator::GetContract( +mediapipe::Status TimedBoxListToRenderDataCalculator::GetContract( CalculatorContract* cc) { if (cc->Inputs().HasTag(kTimedBoxListTag)) { cc->Inputs().Tag(kTimedBoxListTag).Set(); } cc->Outputs().Tag(kRenderDataTag).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TimedBoxListToRenderDataCalculator::Open( +mediapipe::Status TimedBoxListToRenderDataCalculator::Open( CalculatorContext* cc) { cc->SetOffset(TimestampDiff(0)); options_ = cc->Options(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TimedBoxListToRenderDataCalculator::Process( +mediapipe::Status TimedBoxListToRenderDataCalculator::Process( CalculatorContext* cc) { auto render_data = absl::make_unique(); @@ -164,7 +164,7 @@ REGISTER_CALCULATOR(TimedBoxListToRenderDataCalculator); cc->Outputs() .Tag(kRenderDataTag) .Add(render_data.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/util/top_k_scores_calculator.cc b/mediapipe/calculators/util/top_k_scores_calculator.cc index 1d5a8fede..d2b0d98f7 100644 --- a/mediapipe/calculators/util/top_k_scores_calculator.cc +++ b/mediapipe/calculators/util/top_k_scores_calculator.cc @@ -62,14 +62,14 @@ namespace mediapipe { // } class TopKScoresCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; private: - ::mediapipe::Status LoadLabelmap(std::string label_map_path); + mediapipe::Status LoadLabelmap(std::string label_map_path); int top_k_ = -1; float threshold_ = 0.0; @@ -78,7 +78,7 @@ class TopKScoresCalculator : public CalculatorBase { }; REGISTER_CALCULATOR(TopKScoresCalculator); -::mediapipe::Status TopKScoresCalculator::GetContract(CalculatorContract* cc) { +mediapipe::Status TopKScoresCalculator::GetContract(CalculatorContract* cc) { RET_CHECK(cc->Inputs().HasTag("SCORES")); cc->Inputs().Tag("SCORES").Set>(); if (cc->Outputs().HasTag("TOP_K_INDEXES")) { @@ -96,10 +96,10 @@ REGISTER_CALCULATOR(TopKScoresCalculator); if (cc->Outputs().HasTag("SUMMARY")) { cc->Outputs().Tag("SUMMARY").Set(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TopKScoresCalculator::Open(CalculatorContext* cc) { +mediapipe::Status TopKScoresCalculator::Open(CalculatorContext* cc) { const auto& options = cc->Options<::mediapipe::TopKScoresCalculatorOptions>(); RET_CHECK(options.has_top_k() || options.has_threshold()) << "Must specify at least one of the top_k and threshold fields in " @@ -117,10 +117,10 @@ REGISTER_CALCULATOR(TopKScoresCalculator); if (cc->Outputs().HasTag("TOP_K_LABELS")) { RET_CHECK(!label_map_.empty()); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TopKScoresCalculator::Process(CalculatorContext* cc) { +mediapipe::Status TopKScoresCalculator::Process(CalculatorContext* cc) { const std::vector& input_vector = cc->Inputs().Tag("SCORES").Get>(); std::vector top_k_indexes; @@ -213,10 +213,10 @@ REGISTER_CALCULATOR(TopKScoresCalculator); } } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TopKScoresCalculator::LoadLabelmap( +mediapipe::Status TopKScoresCalculator::LoadLabelmap( std::string label_map_path) { std::string string_path; ASSIGN_OR_RETURN(string_path, PathToResourceAsFile(label_map_path)); @@ -230,7 +230,7 @@ REGISTER_CALCULATOR(TopKScoresCalculator); label_map_[i++] = line; } label_map_loaded_ = true; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/video/box_detector_calculator.cc b/mediapipe/calculators/video/box_detector_calculator.cc index d9afdd333..db179e125 100644 --- a/mediapipe/calculators/video/box_detector_calculator.cc +++ b/mediapipe/calculators/video/box_detector_calculator.cc @@ -92,11 +92,11 @@ class BoxDetectorCalculator : public CalculatorBase { public: ~BoxDetectorCalculator() override = default; - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; - ::mediapipe::Status Close(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Close(CalculatorContext* cc) override; private: BoxDetectorCalculatorOptions options_; @@ -109,7 +109,7 @@ class BoxDetectorCalculator : public CalculatorBase { REGISTER_CALCULATOR(BoxDetectorCalculator); -::mediapipe::Status BoxDetectorCalculator::GetContract(CalculatorContract* cc) { +mediapipe::Status BoxDetectorCalculator::GetContract(CalculatorContract* cc) { if (cc->Inputs().HasTag("TRACKING")) { cc->Inputs().Tag("TRACKING").Set(); } @@ -172,10 +172,10 @@ REGISTER_CALCULATOR(BoxDetectorCalculator); cc->InputSidePackets().Tag("FRAME_ALIGNMENT").Set(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status BoxDetectorCalculator::Open(CalculatorContext* cc) { +mediapipe::Status BoxDetectorCalculator::Open(CalculatorContext* cc) { options_ = cc->Options(); box_detector_ = BoxDetectorInterface::Create(options_.detector_options()); @@ -210,10 +210,10 @@ REGISTER_CALCULATOR(BoxDetectorCalculator); frame_alignment_ = cc->InputSidePackets().Tag("FRAME_ALIGNMENT").Get(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status BoxDetectorCalculator::Process(CalculatorContext* cc) { +mediapipe::Status BoxDetectorCalculator::Process(CalculatorContext* cc) { const Timestamp timestamp = cc->InputTimestamp(); const int64 timestamp_msec = timestamp.Value() / 1000; @@ -246,7 +246,7 @@ REGISTER_CALCULATOR(BoxDetectorCalculator); } if (!detector_switch_) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } InputStream* track_stream = cc->Inputs().HasTag("TRACKING") @@ -274,7 +274,7 @@ REGISTER_CALCULATOR(BoxDetectorCalculator); if (track_stream != nullptr) { // Detect from tracking data if (track_stream->IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } const TrackingData& tracking_data = track_stream->Get(); @@ -289,7 +289,7 @@ REGISTER_CALCULATOR(BoxDetectorCalculator); } else if (video_stream != nullptr) { // Detect from input frame if (video_stream->IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } TimedBoxProtoList tracked_boxes; @@ -305,7 +305,7 @@ REGISTER_CALCULATOR(BoxDetectorCalculator); detected_boxes.get()); } else { if (feature_stream->IsEmpty() || descriptor_stream->IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } const auto& image_size = @@ -377,17 +377,17 @@ REGISTER_CALCULATOR(BoxDetectorCalculator); cc->Outputs().Tag("BOXES").Add(detected_boxes.release(), timestamp); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status BoxDetectorCalculator::Close(CalculatorContext* cc) { +mediapipe::Status BoxDetectorCalculator::Close(CalculatorContext* cc) { if (write_index_) { BoxDetectorIndex index = box_detector_->ObtainBoxDetectorIndex(); MEDIAPIPE_CHECK_OK(mediapipe::file::SetContents( cc->InputSidePackets().Tag("OUTPUT_INDEX_FILENAME").Get(), index.SerializeAsString())); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/video/box_tracker_calculator.cc b/mediapipe/calculators/video/box_tracker_calculator.cc index a946eb3fa..30ac2b26d 100644 --- a/mediapipe/calculators/video/box_tracker_calculator.cc +++ b/mediapipe/calculators/video/box_tracker_calculator.cc @@ -123,10 +123,10 @@ class BoxTrackerCalculator : public CalculatorBase { public: ~BoxTrackerCalculator() override = default; - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; protected: void RenderStates(const std::vector& states, cv::Mat* mat); @@ -167,7 +167,7 @@ class BoxTrackerCalculator : public CalculatorBase { }; // MotionBoxPath per unique id that we are tracking. - typedef std::unordered_map MotionBoxMap; + typedef absl::node_hash_map MotionBoxMap; // Performs tracking of all MotionBoxes in box_map by one frame forward or // backward to or from data_frame_num using passed TrackingData. @@ -373,7 +373,7 @@ void AddStateToPath(const MotionBoxState& state, int64 time_msec, } // namespace. -::mediapipe::Status BoxTrackerCalculator::GetContract(CalculatorContract* cc) { +mediapipe::Status BoxTrackerCalculator::GetContract(CalculatorContract* cc) { if (cc->Inputs().HasTag("TRACKING")) { cc->Inputs().Tag("TRACKING").Set(); } @@ -452,10 +452,10 @@ void AddStateToPath(const MotionBoxState& state, int64 time_msec, cc->InputSidePackets().Tag(kOptionsTag).Set(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status BoxTrackerCalculator::Open(CalculatorContext* cc) { +mediapipe::Status BoxTrackerCalculator::Open(CalculatorContext* cc) { options_ = tool::RetrieveOptions(cc->Options(), cc->InputSidePackets(), kOptionsTag); @@ -515,10 +515,10 @@ void AddStateToPath(const MotionBoxState& state, int64 time_msec, << "Streaming mode not compatible with cache dir."; } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status BoxTrackerCalculator::Process(CalculatorContext* cc) { +mediapipe::Status BoxTrackerCalculator::Process(CalculatorContext* cc) { // Batch mode, issue tracking requests. if (box_tracker_ && !tracking_issued_) { for (const auto& pos : initial_pos_.box()) { @@ -530,7 +530,7 @@ void AddStateToPath(const MotionBoxState& state, int64 time_msec, const Timestamp& timestamp = cc->InputTimestamp(); if (timestamp == Timestamp::PreStream()) { // Indicator packet. - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } InputStream* track_stream = cc->Inputs().HasTag("TRACKING") @@ -892,7 +892,7 @@ void AddStateToPath(const MotionBoxState& state, int64 time_msec, cc->Outputs().Tag("VIZ").Add(viz_frame.release(), timestamp); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } void BoxTrackerCalculator::AddSmoothTransitionToOutputBox( diff --git a/mediapipe/calculators/video/flow_packager_calculator.cc b/mediapipe/calculators/video/flow_packager_calculator.cc index f871433eb..ee4181723 100644 --- a/mediapipe/calculators/video/flow_packager_calculator.cc +++ b/mediapipe/calculators/video/flow_packager_calculator.cc @@ -59,11 +59,11 @@ class FlowPackagerCalculator : public CalculatorBase { public: ~FlowPackagerCalculator() override = default; - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; - ::mediapipe::Status Close(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Close(CalculatorContext* cc) override; // Writes passed chunk to disk. void WriteChunk(const TrackingDataChunk& chunk) const; @@ -90,8 +90,7 @@ class FlowPackagerCalculator : public CalculatorBase { REGISTER_CALCULATOR(FlowPackagerCalculator); -::mediapipe::Status FlowPackagerCalculator::GetContract( - CalculatorContract* cc) { +mediapipe::Status FlowPackagerCalculator::GetContract(CalculatorContract* cc) { if (!cc->Inputs().HasTag("FLOW")) { return tool::StatusFail("No input flow was specified."); } @@ -115,10 +114,10 @@ REGISTER_CALCULATOR(FlowPackagerCalculator); cc->InputSidePackets().Tag("CACHE_DIR").Set(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status FlowPackagerCalculator::Open(CalculatorContext* cc) { +mediapipe::Status FlowPackagerCalculator::Open(CalculatorContext* cc) { options_ = cc->Options(); flow_packager_.reset(new FlowPackager(options_.flow_packager_options())); @@ -129,10 +128,10 @@ REGISTER_CALCULATOR(FlowPackagerCalculator); cache_dir_ = cc->InputSidePackets().Tag("CACHE_DIR").Get(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status FlowPackagerCalculator::Process(CalculatorContext* cc) { +mediapipe::Status FlowPackagerCalculator::Process(CalculatorContext* cc) { InputStream* flow_stream = &(cc->Inputs().Tag("FLOW")); const RegionFlowFeatureList& flow = flow_stream->Get(); @@ -194,10 +193,10 @@ REGISTER_CALCULATOR(FlowPackagerCalculator); prev_timestamp_ = timestamp; ++frame_idx_; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status FlowPackagerCalculator::Close(CalculatorContext* cc) { +mediapipe::Status FlowPackagerCalculator::Close(CalculatorContext* cc) { if (frame_idx_ > 0) { tracking_chunk_.set_last_chunk(true); if (cc->Outputs().HasTag("TRACKING_CHUNK")) { @@ -216,7 +215,7 @@ REGISTER_CALCULATOR(FlowPackagerCalculator); cc->Outputs().Tag("COMPLETE").Add(new bool(true), Timestamp::PreStream()); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } void FlowPackagerCalculator::WriteChunk(const TrackingDataChunk& chunk) const { diff --git a/mediapipe/calculators/video/flow_to_image_calculator.cc b/mediapipe/calculators/video/flow_to_image_calculator.cc index d32319c6f..b63163092 100644 --- a/mediapipe/calculators/video/flow_to_image_calculator.cc +++ b/mediapipe/calculators/video/flow_to_image_calculator.cc @@ -56,27 +56,27 @@ class FlowToImageCalculator : public CalculatorBase { public: FlowToImageCalculator() {} ~FlowToImageCalculator() override {} - static ::mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + static mediapipe::Status GetContract(CalculatorContract* cc); + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; private: FlowQuantizerModel model_; }; -::mediapipe::Status FlowToImageCalculator::GetContract(CalculatorContract* cc) { +mediapipe::Status FlowToImageCalculator::GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).Set(); cc->Outputs().Index(0).Set(); // Model sanity check const auto& options = cc->Options(); if (options.min_value() >= options.max_value()) { - return ::mediapipe::InvalidArgumentError("Invalid quantizer model."); + return mediapipe::InvalidArgumentError("Invalid quantizer model."); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status FlowToImageCalculator::Open(CalculatorContext* cc) { +mediapipe::Status FlowToImageCalculator::Open(CalculatorContext* cc) { const auto& options = cc->Options(); // Fill the the model_data, ideally we want to train the model, but we omit // the step for now, and takes the (min, max) range from protobuf. @@ -86,10 +86,10 @@ class FlowToImageCalculator : public CalculatorBase { options.min_value(), options.min_value(), options.max_value(), options.max_value())); model_.LoadFromProto(model_data); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status FlowToImageCalculator::Process(CalculatorContext* cc) { +mediapipe::Status FlowToImageCalculator::Process(CalculatorContext* cc) { const auto& input = cc->Inputs().Index(0).Get(); // Input flow is 2-channel with x-dim flow and y-dim flow. // Convert it to a ImageFrame in SRGB space, the 3rd channel is not used (0). @@ -106,7 +106,7 @@ class FlowToImageCalculator : public CalculatorBase { } } cc->Outputs().Index(0).Add(output.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } REGISTER_CALCULATOR(FlowToImageCalculator); diff --git a/mediapipe/calculators/video/motion_analysis_calculator.cc b/mediapipe/calculators/video/motion_analysis_calculator.cc index 6746bc3a9..bce6dbbe0 100644 --- a/mediapipe/calculators/video/motion_analysis_calculator.cc +++ b/mediapipe/calculators/video/motion_analysis_calculator.cc @@ -95,11 +95,11 @@ class MotionAnalysisCalculator : public CalculatorBase { public: ~MotionAnalysisCalculator() override = default; - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; - ::mediapipe::Status Close(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Close(CalculatorContext* cc) override; private: // Outputs results to Outputs() if MotionAnalysis buffered sufficient results. @@ -107,8 +107,8 @@ class MotionAnalysisCalculator : public CalculatorBase { void OutputMotionAnalyzedFrames(bool flush, CalculatorContext* cc); // Lazy init function to be called on Process. - ::mediapipe::Status InitOnProcess(InputStream* video_stream, - InputStream* selection_stream); + mediapipe::Status InitOnProcess(InputStream* video_stream, + InputStream* selection_stream); // Parses CSV file contents to homographies. bool ParseModelCSV(const std::string& contents, @@ -189,7 +189,7 @@ class MotionAnalysisCalculator : public CalculatorBase { REGISTER_CALCULATOR(MotionAnalysisCalculator); -::mediapipe::Status MotionAnalysisCalculator::GetContract( +mediapipe::Status MotionAnalysisCalculator::GetContract( CalculatorContract* cc) { if (cc->Inputs().HasTag("VIDEO")) { cc->Inputs().Tag("VIDEO").Set(); @@ -246,10 +246,10 @@ REGISTER_CALCULATOR(MotionAnalysisCalculator); cc->InputSidePackets().Tag(kOptionsTag).Set(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status MotionAnalysisCalculator::Open(CalculatorContext* cc) { +mediapipe::Status MotionAnalysisCalculator::Open(CalculatorContext* cc) { options_ = tool::RetrieveOptions(cc->Options(), cc->InputSidePackets(), kOptionsTag); @@ -364,7 +364,7 @@ REGISTER_CALCULATOR(MotionAnalysisCalculator); // If no video header is provided, just return and initialize on the first // Process() call. if (video_header == nullptr) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } ////////////// EARLY RETURN; ONLY HEADER OUTPUT SHOULD GO HERE /////////////// @@ -397,12 +397,12 @@ REGISTER_CALCULATOR(MotionAnalysisCalculator); .SetHeader(Adopt(new VideoHeader(*video_header))); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status MotionAnalysisCalculator::Process(CalculatorContext* cc) { +mediapipe::Status MotionAnalysisCalculator::Process(CalculatorContext* cc) { if (options_.bypass_mode()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } InputStream* video_stream = @@ -441,7 +441,7 @@ REGISTER_CALCULATOR(MotionAnalysisCalculator); } ++frame_idx_; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } if (motion_analysis_ == nullptr) { @@ -491,7 +491,7 @@ REGISTER_CALCULATOR(MotionAnalysisCalculator); cc->Outputs().Tag("VIDEO_OUT").AddPacket(video_stream->Value()); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } if (use_frame) { @@ -520,7 +520,7 @@ REGISTER_CALCULATOR(MotionAnalysisCalculator); selected_motions_.push_back(frame_selection_result->camera_motion()); switch (options_.selection_analysis()) { case MotionAnalysisCalculatorOptions::NO_ANALYSIS_USE_SELECTION: - return ::mediapipe::UnknownErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::UnknownErrorBuilder(MEDIAPIPE_LOC) << "Should not reach this point!"; case MotionAnalysisCalculatorOptions::ANALYSIS_FROM_FEATURES: @@ -574,10 +574,10 @@ REGISTER_CALCULATOR(MotionAnalysisCalculator); OutputMotionAnalyzedFrames(false, cc); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status MotionAnalysisCalculator::Close(CalculatorContext* cc) { +mediapipe::Status MotionAnalysisCalculator::Close(CalculatorContext* cc) { // Guard against empty videos. if (motion_analysis_) { OutputMotionAnalyzedFrames(true, cc); @@ -588,7 +588,7 @@ REGISTER_CALCULATOR(MotionAnalysisCalculator); << meta_motions_.size(); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } void MotionAnalysisCalculator::OutputMotionAnalyzedFrames( @@ -688,7 +688,7 @@ void MotionAnalysisCalculator::OutputMotionAnalyzedFrames( } } -::mediapipe::Status MotionAnalysisCalculator::InitOnProcess( +mediapipe::Status MotionAnalysisCalculator::InitOnProcess( InputStream* video_stream, InputStream* selection_stream) { if (video_stream) { frame_width_ = video_stream->Get().Width(); @@ -761,7 +761,7 @@ void MotionAnalysisCalculator::OutputMotionAnalyzedFrames( motion_options->set_filter_initialized_irls_weights(true); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } bool MotionAnalysisCalculator::ParseModelCSV( diff --git a/mediapipe/calculators/video/opencv_video_decoder_calculator.cc b/mediapipe/calculators/video/opencv_video_decoder_calculator.cc index c75e58620..a70785cb0 100644 --- a/mediapipe/calculators/video/opencv_video_decoder_calculator.cc +++ b/mediapipe/calculators/video/opencv_video_decoder_calculator.cc @@ -86,7 +86,7 @@ ImageFormat::Format GetImageFormat(int num_channels) { // class OpenCvVideoDecoderCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->InputSidePackets().Tag("INPUT_FILE_PATH").Set(); cc->Outputs().Tag("VIDEO").Set(); if (cc->Outputs().HasTag("VIDEO_PRESTREAM")) { @@ -95,15 +95,15 @@ class OpenCvVideoDecoderCalculator : public CalculatorBase { if (cc->OutputSidePackets().HasTag("SAVED_AUDIO_PATH")) { cc->OutputSidePackets().Tag("SAVED_AUDIO_PATH").Set(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { const std::string& input_file_path = cc->InputSidePackets().Tag("INPUT_FILE_PATH").Get(); cap_ = absl::make_unique(input_file_path); if (!cap_->isOpened()) { - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "Fail to open video file at " << input_file_path; } width_ = static_cast(cap_->get(cv::CAP_PROP_FRAME_WIDTH)); @@ -116,19 +116,19 @@ class OpenCvVideoDecoderCalculator : public CalculatorBase { cv::Mat frame; cap_->read(frame); if (frame.empty()) { - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "Fail to read any frames from the video file at " << input_file_path; } format_ = GetImageFormat(frame.channels()); if (format_ == ImageFormat::UNKNOWN) { - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "Unsupported video format of the video file at " << input_file_path; } if (fps <= 0 || frame_count_ <= 0 || width_ <= 0 || height_ <= 0) { - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "Fail to make video header due to the incorrect metadata from " "the video file at " << input_file_path; @@ -170,17 +170,17 @@ class OpenCvVideoDecoderCalculator : public CalculatorBase { .Set(MakePacket(std::string())); } #else - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "OpenCVVideoDecoderCalculator can't save the audio file " "because FFmpeg is not installed. Please remove " "output_side_packet: \"SAVED_AUDIO_PATH\" from the node " "config."; #endif } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { auto image_frame = absl::make_unique(format_, width_, height_, /*alignment_boundary=*/1); // Use microsecond as the unit of time. @@ -213,10 +213,10 @@ class OpenCvVideoDecoderCalculator : public CalculatorBase { decoded_frames_++; } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Close(CalculatorContext* cc) override { + mediapipe::Status Close(CalculatorContext* cc) override { if (cap_ && cap_->isOpened()) { cap_->release(); } @@ -225,7 +225,7 @@ class OpenCvVideoDecoderCalculator : public CalculatorBase { << frame_count_ << " vs decoded frames: " << decoded_frames_ << ")."; } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: diff --git a/mediapipe/calculators/video/opencv_video_encoder_calculator.cc b/mediapipe/calculators/video/opencv_video_encoder_calculator.cc index 43d71f059..ffb546dbd 100644 --- a/mediapipe/calculators/video/opencv_video_encoder_calculator.cc +++ b/mediapipe/calculators/video/opencv_video_encoder_calculator.cc @@ -76,20 +76,20 @@ namespace mediapipe { // class OpenCvVideoEncoderCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; - ::mediapipe::Status Close(CalculatorContext* cc) override; + static mediapipe::Status GetContract(CalculatorContract* cc); + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Close(CalculatorContext* cc) override; private: - ::mediapipe::Status SetUpVideoWriter(float frame_rate, int width, int height); + mediapipe::Status SetUpVideoWriter(float frame_rate, int width, int height); std::string output_file_path_; int four_cc_; std::unique_ptr writer_; }; -::mediapipe::Status OpenCvVideoEncoderCalculator::GetContract( +mediapipe::Status OpenCvVideoEncoderCalculator::GetContract( CalculatorContract* cc) { RET_CHECK(cc->Inputs().HasTag("VIDEO")); cc->Inputs().Tag("VIDEO").Set(); @@ -101,10 +101,10 @@ class OpenCvVideoEncoderCalculator : public CalculatorBase { if (cc->InputSidePackets().HasTag("AUDIO_FILE_PATH")) { cc->InputSidePackets().Tag("AUDIO_FILE_PATH").Set(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status OpenCvVideoEncoderCalculator::Open(CalculatorContext* cc) { +mediapipe::Status OpenCvVideoEncoderCalculator::Open(CalculatorContext* cc) { OpenCvVideoEncoderCalculatorOptions options = cc->Options(); RET_CHECK(options.has_codec() && options.codec().length() == 4) @@ -128,13 +128,12 @@ class OpenCvVideoEncoderCalculator : public CalculatorBase { // from the video header directly. The calculator will receive the video // header packet at timestamp prestream. if (cc->Inputs().HasTag("VIDEO_PRESTREAM")) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } return SetUpVideoWriter(options.fps(), options.width(), options.height()); } -::mediapipe::Status OpenCvVideoEncoderCalculator::Process( - CalculatorContext* cc) { +mediapipe::Status OpenCvVideoEncoderCalculator::Process(CalculatorContext* cc) { if (cc->InputTimestamp() == Timestamp::PreStream()) { const VideoHeader& video_header = cc->Inputs().Tag("VIDEO_PRESTREAM").Get(); @@ -149,7 +148,7 @@ class OpenCvVideoEncoderCalculator : public CalculatorBase { if (format == ImageFormat::GRAY8) { frame = formats::MatView(&image_frame); if (frame.empty()) { - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "Receive empty frame at timestamp " << cc->Inputs().Tag("VIDEO").Value().Timestamp() << " in OpenCvVideoEncoderCalculator::Process()"; @@ -157,7 +156,7 @@ class OpenCvVideoEncoderCalculator : public CalculatorBase { } else { cv::Mat tmp_frame = formats::MatView(&image_frame); if (tmp_frame.empty()) { - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "Receive empty frame at timestamp " << cc->Inputs().Tag("VIDEO").Value().Timestamp() << " in OpenCvVideoEncoderCalculator::Process()"; @@ -167,15 +166,15 @@ class OpenCvVideoEncoderCalculator : public CalculatorBase { } else if (format == ImageFormat::SRGBA) { cv::cvtColor(tmp_frame, frame, cv::COLOR_RGBA2BGR); } else { - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "Unsupported image format: " << format; } } writer_->write(frame); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status OpenCvVideoEncoderCalculator::Close(CalculatorContext* cc) { +mediapipe::Status OpenCvVideoEncoderCalculator::Close(CalculatorContext* cc) { if (writer_ && writer_->isOpened()) { writer_->release(); } @@ -199,17 +198,17 @@ class OpenCvVideoEncoderCalculator : public CalculatorBase { } #else - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "OpenCVVideoEncoderCalculator can't attach the audio tracks to " "the video because FFmpeg is not installed. Please remove " "input_side_packet: \"AUDIO_FILE_PATH\" from the node " "config."; #endif } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status OpenCvVideoEncoderCalculator::SetUpVideoWriter( +mediapipe::Status OpenCvVideoEncoderCalculator::SetUpVideoWriter( float frame_rate, int width, int height) { RET_CHECK(frame_rate > 0 && width > 0 && height > 0) << "Invalid video metadata: frame_rate=" << frame_rate @@ -217,10 +216,10 @@ class OpenCvVideoEncoderCalculator : public CalculatorBase { writer_ = absl::make_unique( output_file_path_, four_cc_, frame_rate, cv::Size(width, height)); if (!writer_->isOpened()) { - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "Fail to open file at " << output_file_path_; } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } REGISTER_CALCULATOR(OpenCvVideoEncoderCalculator); diff --git a/mediapipe/calculators/video/tracked_detection_manager_calculator.cc b/mediapipe/calculators/video/tracked_detection_manager_calculator.cc index 7e6ba6749..ccab6755d 100644 --- a/mediapipe/calculators/video/tracked_detection_manager_calculator.cc +++ b/mediapipe/calculators/video/tracked_detection_manager_calculator.cc @@ -139,10 +139,10 @@ Detection GetAxisAlignedDetectionFromTrackedDetection( // } class TrackedDetectionManagerCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; + static mediapipe::Status GetContract(CalculatorContract* cc); + mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; private: // Adds new list of detections to |waiting_for_update_detections_|. @@ -161,7 +161,7 @@ class TrackedDetectionManagerCalculator : public CalculatorBase { }; REGISTER_CALCULATOR(TrackedDetectionManagerCalculator); -::mediapipe::Status TrackedDetectionManagerCalculator::GetContract( +mediapipe::Status TrackedDetectionManagerCalculator::GetContract( CalculatorContract* cc) { if (cc->Inputs().HasTag(kDetectionsTag)) { cc->Inputs().Tag(kDetectionsTag).Set>(); @@ -183,19 +183,19 @@ REGISTER_CALCULATOR(TrackedDetectionManagerCalculator); cc->Outputs().Tag(kDetectionBoxesTag).Set>(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TrackedDetectionManagerCalculator::Open( +mediapipe::Status TrackedDetectionManagerCalculator::Open( CalculatorContext* cc) { mediapipe::TrackedDetectionManagerCalculatorOptions options = cc->Options(); tracked_detection_manager_.SetConfig( options.tracked_detection_manager_options()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TrackedDetectionManagerCalculator::Process( +mediapipe::Status TrackedDetectionManagerCalculator::Process( CalculatorContext* cc) { if (cc->Inputs().HasTag(kTrackingBoxesTag) && !cc->Inputs().Tag(kTrackingBoxesTag).IsEmpty()) { @@ -296,7 +296,7 @@ REGISTER_CALCULATOR(TrackedDetectionManagerCalculator); AddDetectionList(detection_list, cc); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } void TrackedDetectionManagerCalculator::AddDetectionList( diff --git a/mediapipe/calculators/video/tracking_graph_test.cc b/mediapipe/calculators/video/tracking_graph_test.cc index fc04ee6e8..e446e155c 100644 --- a/mediapipe/calculators/video/tracking_graph_test.cc +++ b/mediapipe/calculators/video/tracking_graph_test.cc @@ -52,14 +52,14 @@ std::string GetTestDir() { CFURLGetFileSystemRepresentation( bundle_url, true, reinterpret_cast(path), sizeof(path)); CFRelease(bundle_url); - return ::mediapipe::file::JoinPath(path, "testdata"); + return mediapipe::file::JoinPath(path, "testdata"); #elif defined(__ANDROID__) char path[1024]; getcwd(path, sizeof(path)); - return ::mediapipe::file::JoinPath(path, - "mediapipe/calculators/video/testdata"); + return mediapipe::file::JoinPath(path, + "mediapipe/calculators/video/testdata"); #else - return ::mediapipe::file::JoinPath( + return mediapipe::file::JoinPath( "./", // This should match the path of the output files // of the genrule() that generates test model files. diff --git a/mediapipe/calculators/video/tvl1_optical_flow_calculator.cc b/mediapipe/calculators/video/tvl1_optical_flow_calculator.cc index c774cfeb1..56aa86412 100644 --- a/mediapipe/calculators/video/tvl1_optical_flow_calculator.cc +++ b/mediapipe/calculators/video/tvl1_optical_flow_calculator.cc @@ -74,14 +74,14 @@ cv::Mat ConvertToGrayscale(const cv::Mat& image) { // num_threads: 10 class Tvl1OpticalFlowCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + static mediapipe::Status GetContract(CalculatorContract* cc); + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; private: - ::mediapipe::Status CalculateOpticalFlow(const ImageFrame& current_frame, - const ImageFrame& next_frame, - OpticalFlowField* flow); + mediapipe::Status CalculateOpticalFlow(const ImageFrame& current_frame, + const ImageFrame& next_frame, + OpticalFlowField* flow); bool forward_requested_ = false; bool backward_requested_ = false; // Stores the idle DenseOpticalFlow objects. @@ -93,11 +93,11 @@ class Tvl1OpticalFlowCalculator : public CalculatorBase { absl::Mutex mutex_; }; -::mediapipe::Status Tvl1OpticalFlowCalculator::GetContract( +mediapipe::Status Tvl1OpticalFlowCalculator::GetContract( CalculatorContract* cc) { if (!cc->Inputs().HasTag("FIRST_FRAME") || !cc->Inputs().HasTag("SECOND_FRAME")) { - return ::mediapipe::InvalidArgumentError( + return mediapipe::InvalidArgumentError( "Missing required input streams. Both FIRST_FRAME and SECOND_FRAME " "must be specified."); } @@ -109,10 +109,10 @@ class Tvl1OpticalFlowCalculator : public CalculatorBase { if (cc->Outputs().HasTag("BACKWARD_FLOW")) { cc->Outputs().Tag("BACKWARD_FLOW").Set(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status Tvl1OpticalFlowCalculator::Open(CalculatorContext* cc) { +mediapipe::Status Tvl1OpticalFlowCalculator::Open(CalculatorContext* cc) { { absl::MutexLock lock(&mutex_); tvl1_computers_.emplace_back(cv::createOptFlow_DualTVL1()); @@ -124,10 +124,10 @@ class Tvl1OpticalFlowCalculator : public CalculatorBase { backward_requested_ = true; } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status Tvl1OpticalFlowCalculator::Process(CalculatorContext* cc) { +mediapipe::Status Tvl1OpticalFlowCalculator::Process(CalculatorContext* cc) { const ImageFrame& first_frame = cc->Inputs().Tag("FIRST_FRAME").Value().Get(); const ImageFrame& second_frame = @@ -148,10 +148,10 @@ class Tvl1OpticalFlowCalculator : public CalculatorBase { .Tag("BACKWARD_FLOW") .Add(backward_optical_flow_field.release(), cc->InputTimestamp()); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status Tvl1OpticalFlowCalculator::CalculateOpticalFlow( +mediapipe::Status Tvl1OpticalFlowCalculator::CalculateOpticalFlow( const ImageFrame& current_frame, const ImageFrame& next_frame, OpticalFlowField* flow) { CHECK(flow); @@ -184,7 +184,7 @@ class Tvl1OpticalFlowCalculator : public CalculatorBase { absl::MutexLock lock(&mutex_); tvl1_computers_.push_back(tvl1_computer); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } REGISTER_CALCULATOR(Tvl1OpticalFlowCalculator); diff --git a/mediapipe/calculators/video/video_pre_stream_calculator.cc b/mediapipe/calculators/video/video_pre_stream_calculator.cc index 69c76ec36..36547830e 100644 --- a/mediapipe/calculators/video/video_pre_stream_calculator.cc +++ b/mediapipe/calculators/video/video_pre_stream_calculator.cc @@ -45,13 +45,13 @@ namespace mediapipe { // } class VideoPreStreamCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + static mediapipe::Status GetContract(CalculatorContract* cc); + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; private: - ::mediapipe::Status ProcessWithFrameRateInPreStream(CalculatorContext* cc); - ::mediapipe::Status ProcessWithFrameRateInOptions(CalculatorContext* cc); + mediapipe::Status ProcessWithFrameRateInPreStream(CalculatorContext* cc); + mediapipe::Status ProcessWithFrameRateInOptions(CalculatorContext* cc); std::unique_ptr header_; bool frame_rate_in_prestream_ = false; @@ -60,7 +60,7 @@ class VideoPreStreamCalculator : public CalculatorBase { REGISTER_CALCULATOR(VideoPreStreamCalculator); -::mediapipe::Status VideoPreStreamCalculator::GetContract( +mediapipe::Status VideoPreStreamCalculator::GetContract( CalculatorContract* cc) { if (!cc->Inputs().UsesTags()) { cc->Inputs().Index(0).Set(); @@ -69,17 +69,17 @@ REGISTER_CALCULATOR(VideoPreStreamCalculator); cc->Inputs().Tag("VIDEO_PRESTREAM").Set(); } cc->Outputs().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status VideoPreStreamCalculator::Open(CalculatorContext* cc) { +mediapipe::Status VideoPreStreamCalculator::Open(CalculatorContext* cc) { frame_rate_in_prestream_ = cc->Inputs().UsesTags() && cc->Inputs().HasTag("FRAME") && cc->Inputs().HasTag("VIDEO_PRESTREAM"); header_ = absl::make_unique(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status VideoPreStreamCalculator::ProcessWithFrameRateInPreStream( +mediapipe::Status VideoPreStreamCalculator::ProcessWithFrameRateInPreStream( CalculatorContext* cc) { cc->GetCounter("ProcessWithFrameRateInPreStream")->Increment(); if (cc->InputTimestamp() == Timestamp::PreStream()) { @@ -99,13 +99,13 @@ REGISTER_CALCULATOR(VideoPreStreamCalculator); cc->Outputs().Index(0).Add(header_.release(), Timestamp::PreStream()); emitted_ = true; } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status VideoPreStreamCalculator::Process(CalculatorContext* cc) { +mediapipe::Status VideoPreStreamCalculator::Process(CalculatorContext* cc) { cc->GetCounter("Process")->Increment(); if (emitted_) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } if (frame_rate_in_prestream_) { return ProcessWithFrameRateInPreStream(cc); @@ -114,7 +114,7 @@ REGISTER_CALCULATOR(VideoPreStreamCalculator); } } -::mediapipe::Status VideoPreStreamCalculator::ProcessWithFrameRateInOptions( +mediapipe::Status VideoPreStreamCalculator::ProcessWithFrameRateInOptions( CalculatorContext* cc) { cc->GetCounter("ProcessWithFrameRateInOptions")->Increment(); RET_CHECK_NE(cc->InputTimestamp(), Timestamp::PreStream()); @@ -136,7 +136,7 @@ REGISTER_CALCULATOR(VideoPreStreamCalculator); RET_CHECK_NE(header_->frame_rate, 0.0) << "frame rate should be non-zero"; cc->Outputs().Index(0).Add(header_.release(), Timestamp::PreStream()); emitted_ = true; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/calculators/video/video_pre_stream_calculator_test.cc b/mediapipe/calculators/video/video_pre_stream_calculator_test.cc index 1f0ccb7f2..c5d71ff90 100644 --- a/mediapipe/calculators/video/video_pre_stream_calculator_test.cc +++ b/mediapipe/calculators/video/video_pre_stream_calculator_test.cc @@ -118,7 +118,7 @@ TEST(VideoPreStreamCalculatorTest, FailsWithoutFrameRateInOptions) { "frame", Adopt(new ImageFrame(ImageFormat::SRGB, 1, 2)).At(Timestamp(0)))); MP_ASSERT_OK(graph.CloseInputStream("frame")); - ::mediapipe::Status status = graph.WaitUntilDone(); + mediapipe::Status status = graph.WaitUntilDone(); EXPECT_FALSE(status.ok()); EXPECT_THAT(status.ToString(), testing::HasSubstr("frame rate should be non-zero")); @@ -144,7 +144,7 @@ TEST(VideoPreStreamCalculatorTest, FailsWithoutFrameRateInPreStream1) { Adopt(new ImageFrame(ImageFormat::SRGB, 1, 2)).At(Timestamp(0)))); MP_ASSERT_OK(graph.CloseInputStream("frame")); MP_ASSERT_OK(graph.CloseInputStream("input_header")); - ::mediapipe::Status status = graph.WaitUntilDone(); + mediapipe::Status status = graph.WaitUntilDone(); EXPECT_FALSE(status.ok()); EXPECT_THAT(status.ToString(), testing::HasSubstr("frame rate should be non-zero")); @@ -177,7 +177,7 @@ TEST(VideoPreStreamCalculatorTest, FailsWithoutFrameRateInPreStream2) { "frame", Adopt(new ImageFrame(ImageFormat::SRGB, 1, 2)).At(Timestamp(0)))); MP_ASSERT_OK(graph.CloseInputStream("frame")); - ::mediapipe::Status status = graph.WaitUntilDone(); + mediapipe::Status status = graph.WaitUntilDone(); EXPECT_FALSE(status.ok()); } } diff --git a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/basic/AndroidManifest.xml b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/basic/AndroidManifest.xml index 99288624c..f7218c97c 100644 --- a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/basic/AndroidManifest.xml +++ b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/basic/AndroidManifest.xml @@ -32,5 +32,6 @@ + diff --git a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/basic/BUILD b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/basic/BUILD index d0ff4e8cb..ae4652dba 100644 --- a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/basic/BUILD +++ b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/basic/BUILD @@ -74,6 +74,7 @@ android_binary( "inputVideoStreamName": "input_video", "outputVideoStreamName": "output_video", "flipFramesVertically": "True", + "converterNumBuffers": "2", }, multidex = "native", deps = [ diff --git a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/basic/MainActivity.java b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/basic/MainActivity.java index 4bf30c833..952132cdf 100644 --- a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/basic/MainActivity.java +++ b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/basic/MainActivity.java @@ -46,6 +46,14 @@ public class MainActivity extends AppCompatActivity { // NOTE: use "flipFramesVertically" in manifest metadata to override this behavior. private static final boolean FLIP_FRAMES_VERTICALLY = true; + // Number of output frames allocated in ExternalTextureConverter. + // NOTE: use "converterNumBuffers" in manifest metadata to override number of buffers. For + // example, when there is a FlowLimiterCalculator in the graph, number of buffers should be at + // least `max_in_flight + max_in_queue + 1` (where max_in_flight and max_in_queue are used in + // FlowLimiterCalculator options). That's because we need buffers for all the frames that are in + // flight/queue plus one for the next frame from the camera. + private static final int NUM_BUFFERS = 2; + static { // Load all native libraries needed by the app. System.loadLibrary("mediapipe_jni"); @@ -103,7 +111,6 @@ public class MainActivity extends AppCompatActivity { applicationInfo.metaData.getString("binaryGraphName"), applicationInfo.metaData.getString("inputVideoStreamName"), applicationInfo.metaData.getString("outputVideoStreamName")); - processor .getVideoSurfaceOutput() .setFlipY( @@ -121,7 +128,10 @@ public class MainActivity extends AppCompatActivity { @Override protected void onResume() { super.onResume(); - converter = new ExternalTextureConverter(eglManager.getContext()); + converter = + new ExternalTextureConverter( + eglManager.getContext(), + applicationInfo.metaData.getInt("converterNumBuffers", NUM_BUFFERS)); converter.setFlipY( applicationInfo.metaData.getBoolean("flipFramesVertically", FLIP_FRAMES_VERTICALLY)); converter.setConsumer(processor); @@ -168,7 +178,7 @@ public class MainActivity extends AppCompatActivity { ? CameraHelper.CameraFacing.FRONT : CameraHelper.CameraFacing.BACK; cameraHelper.startCamera( - this, cameraFacing, /*surfaceTexture=*/ null, cameraTargetResolution()); + this, cameraFacing, /*unusedSurfaceTexture=*/ null, cameraTargetResolution()); } protected Size computeViewSize(int width, int height) { diff --git a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/facedetectioncpu/BUILD b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/facedetectioncpu/BUILD index 7af950678..279d29b74 100644 --- a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/facedetectioncpu/BUILD +++ b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/facedetectioncpu/BUILD @@ -50,6 +50,7 @@ android_binary( "inputVideoStreamName": "input_video", "outputVideoStreamName": "output_video", "flipFramesVertically": "True", + "converterNumBuffers": "2", }, multidex = "native", deps = [ diff --git a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/facedetectiongpu/BUILD b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/facedetectiongpu/BUILD index 60d4ef44f..11351fc56 100644 --- a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/facedetectiongpu/BUILD +++ b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/facedetectiongpu/BUILD @@ -50,6 +50,7 @@ android_binary( "inputVideoStreamName": "input_video", "outputVideoStreamName": "output_video", "flipFramesVertically": "True", + "converterNumBuffers": "2", }, multidex = "native", deps = [ diff --git a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/faceeffect/BUILD b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/faceeffect/BUILD index c665f563f..d9b2554dc 100644 --- a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/faceeffect/BUILD +++ b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/faceeffect/BUILD @@ -55,6 +55,7 @@ android_binary( "inputVideoStreamName": "input_video", "outputVideoStreamName": "output_video", "flipFramesVertically": "True", + "converterNumBuffers": "2", }, multidex = "native", deps = [ diff --git a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/facemeshgpu/BUILD b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/facemeshgpu/BUILD index 2de32b36f..26406e77b 100644 --- a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/facemeshgpu/BUILD +++ b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/facemeshgpu/BUILD @@ -51,6 +51,7 @@ android_binary( "inputVideoStreamName": "input_video", "outputVideoStreamName": "output_video", "flipFramesVertically": "True", + "converterNumBuffers": "2", }, multidex = "native", deps = [ diff --git a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/hairsegmentationgpu/BUILD b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/hairsegmentationgpu/BUILD index 284dcd9a0..df58f2713 100644 --- a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/hairsegmentationgpu/BUILD +++ b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/hairsegmentationgpu/BUILD @@ -50,6 +50,7 @@ android_binary( "inputVideoStreamName": "input_video", "outputVideoStreamName": "output_video", "flipFramesVertically": "True", + "converterNumBuffers": "2", }, multidex = "native", deps = [ diff --git a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/handdetectiongpu/BUILD b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/handdetectiongpu/BUILD index 51bc74a33..2d9813301 100644 --- a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/handdetectiongpu/BUILD +++ b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/handdetectiongpu/BUILD @@ -50,6 +50,7 @@ android_binary( "inputVideoStreamName": "input_video", "outputVideoStreamName": "output_video", "flipFramesVertically": "True", + "converterNumBuffers": "2", }, multidex = "native", deps = [ diff --git a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/handtrackinggpu/BUILD b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/handtrackinggpu/BUILD index afe1c6777..7b3bfe847 100644 --- a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/handtrackinggpu/BUILD +++ b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/handtrackinggpu/BUILD @@ -52,6 +52,7 @@ android_binary( "inputVideoStreamName": "input_video", "outputVideoStreamName": "output_video", "flipFramesVertically": "True", + "converterNumBuffers": "2", }, multidex = "native", deps = [ diff --git a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/holistictrackinggpu/BUILD b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/holistictrackinggpu/BUILD new file mode 100644 index 000000000..44a6d6428 --- /dev/null +++ b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/holistictrackinggpu/BUILD @@ -0,0 +1,69 @@ +# Copyright 2019 The MediaPipe Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +licenses(["notice"]) + +package(default_visibility = ["//visibility:private"]) + +cc_binary( + name = "libmediapipe_jni.so", + linkshared = 1, + linkstatic = 1, + deps = [ + "//mediapipe/graphs/holistic_tracking:holistic_tracking_gpu_deps", + "//mediapipe/java/com/google/mediapipe/framework/jni:mediapipe_framework_jni", + ], +) + +cc_library( + name = "mediapipe_jni_lib", + srcs = [":libmediapipe_jni.so"], + alwayslink = 1, +) + +android_binary( + name = "holistictrackinggpu", + srcs = glob(["*.java"]), + assets = [ + "//mediapipe/graphs/holistic_tracking:holistic_tracking_gpu.binarypb", + "//mediapipe/modules/face_detection:face_detection_front.tflite", + "//mediapipe/modules/face_landmark:face_landmark.tflite", + "//mediapipe/modules/hand_landmark:hand_landmark.tflite", + "//mediapipe/modules/hand_landmark:handedness.txt", + "//mediapipe/modules/holistic_landmark:hand_recrop.tflite", + "//mediapipe/modules/pose_detection:pose_detection.tflite", + "//mediapipe/modules/pose_landmark:pose_landmark_upper_body.tflite", + "//mediapipe/modules/pose_landmark:pose_landmark_full_body.tflite", + ], + assets_dir = "", + manifest = "//mediapipe/examples/android/src/java/com/google/mediapipe/apps/basic:AndroidManifest.xml", + manifest_values = { + "applicationId": "com.google.mediapipe.apps.holistictrackinggpu", + "appName": "Holistic Tracking", + "mainActivity": "com.google.mediapipe.apps.basic.MainActivity", + "cameraFacingFront": "False", + "binaryGraphName": "holistic_tracking_gpu.binarypb", + "inputVideoStreamName": "input_video", + "outputVideoStreamName": "output_video", + "flipFramesVertically": "True", + "converterNumBuffers": "3", + }, + multidex = "native", + deps = [ + ":mediapipe_jni_lib", + "//mediapipe/examples/android/src/java/com/google/mediapipe/apps/basic:basic_lib", + "//mediapipe/framework/formats:landmark_java_proto_lite", + "//mediapipe/java/com/google/mediapipe/framework:android_framework", + ], +) diff --git a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/instantmotiontracking/BUILD b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/instantmotiontracking/BUILD index 784221084..3dea64053 100644 --- a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/instantmotiontracking/BUILD +++ b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/instantmotiontracking/BUILD @@ -89,6 +89,7 @@ android_binary( "inputVideoStreamName": "input_video", "outputVideoStreamName": "output_video", "flipFramesVertically": "True", + "converterNumBuffers": "2", }, multidex = "native", deps = [ diff --git a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/iristrackinggpu/BUILD b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/iristrackinggpu/BUILD index 473404fdd..5bf497f42 100644 --- a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/iristrackinggpu/BUILD +++ b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/iristrackinggpu/BUILD @@ -52,6 +52,7 @@ android_binary( "inputVideoStreamName": "input_video", "outputVideoStreamName": "output_video", "flipFramesVertically": "True", + "converterNumBuffers": "2", }, multidex = "native", deps = [ diff --git a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/objectdetection3d/build_defs.bzl b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/objectdetection3d/build_defs.bzl index 85a2a76ae..9c30dd58c 100644 --- a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/objectdetection3d/build_defs.bzl +++ b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/objectdetection3d/build_defs.bzl @@ -10,5 +10,6 @@ def generate_manifest_values(application_id, app_name): "inputVideoStreamName": "input_video", "outputVideoStreamName": "output_video", "flipFramesVertically": "True", + "converterNumBuffers": "2", } return manifest_values diff --git a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/objectdetectioncpu/BUILD b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/objectdetectioncpu/BUILD index 080fe4ced..9bb054936 100644 --- a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/objectdetectioncpu/BUILD +++ b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/objectdetectioncpu/BUILD @@ -51,6 +51,7 @@ android_binary( "inputVideoStreamName": "input_video", "outputVideoStreamName": "output_video", "flipFramesVertically": "True", + "converterNumBuffers": "2", }, multidex = "native", deps = [ diff --git a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/objectdetectiongpu/BUILD b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/objectdetectiongpu/BUILD index 56e70c2b6..81f2ed3e6 100644 --- a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/objectdetectiongpu/BUILD +++ b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/objectdetectiongpu/BUILD @@ -51,6 +51,7 @@ android_binary( "inputVideoStreamName": "input_video", "outputVideoStreamName": "output_video", "flipFramesVertically": "True", + "converterNumBuffers": "2", }, multidex = "native", deps = [ diff --git a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/objecttrackinggpu/BUILD b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/objecttrackinggpu/BUILD index 220d48067..50ea70f89 100644 --- a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/objecttrackinggpu/BUILD +++ b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/objecttrackinggpu/BUILD @@ -51,6 +51,7 @@ android_binary( "inputVideoStreamName": "input_video", "outputVideoStreamName": "output_video", "flipFramesVertically": "True", + "converterNumBuffers": "2", }, multidex = "native", deps = [ diff --git a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/posetrackinggpu/BUILD b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/posetrackinggpu/BUILD new file mode 100644 index 000000000..4ed51a556 --- /dev/null +++ b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/posetrackinggpu/BUILD @@ -0,0 +1,63 @@ +# Copyright 2020 The MediaPipe Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +licenses(["notice"]) + +package(default_visibility = ["//visibility:private"]) + +cc_binary( + name = "libmediapipe_jni.so", + linkshared = 1, + linkstatic = 1, + deps = [ + "//mediapipe/graphs/pose_tracking:pose_tracking_gpu_deps", + "//mediapipe/java/com/google/mediapipe/framework/jni:mediapipe_framework_jni", + ], +) + +cc_library( + name = "mediapipe_jni_lib", + srcs = [":libmediapipe_jni.so"], + alwayslink = 1, +) + +android_binary( + name = "posetrackinggpu", + srcs = glob(["*.java"]), + assets = [ + "//mediapipe/graphs/pose_tracking:pose_tracking_gpu.binarypb", + "//mediapipe/modules/pose_landmark:pose_landmark_full_body.tflite", + "//mediapipe/modules/pose_detection:pose_detection.tflite", + ], + assets_dir = "", + manifest = "//mediapipe/examples/android/src/java/com/google/mediapipe/apps/basic:AndroidManifest.xml", + manifest_values = { + "applicationId": "com.google.mediapipe.apps.posetrackinggpu", + "appName": "Pose Tracking", + "mainActivity": ".MainActivity", + "cameraFacingFront": "False", + "binaryGraphName": "pose_tracking_gpu.binarypb", + "inputVideoStreamName": "input_video", + "outputVideoStreamName": "output_video", + "flipFramesVertically": "True", + "converterNumBuffers": "2", + }, + multidex = "native", + deps = [ + ":mediapipe_jni_lib", + "//mediapipe/examples/android/src/java/com/google/mediapipe/apps/basic:basic_lib", + "//mediapipe/framework/formats:landmark_java_proto_lite", + "//mediapipe/java/com/google/mediapipe/framework:android_framework", + ], +) diff --git a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/posetrackinggpu/MainActivity.java b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/posetrackinggpu/MainActivity.java new file mode 100644 index 000000000..730aa6e1f --- /dev/null +++ b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/posetrackinggpu/MainActivity.java @@ -0,0 +1,75 @@ +// Copyright 2020 The MediaPipe Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package com.google.mediapipe.apps.posetrackinggpu; + +import android.os.Bundle; +import android.util.Log; +import com.google.mediapipe.formats.proto.LandmarkProto.NormalizedLandmark; +import com.google.mediapipe.formats.proto.LandmarkProto.NormalizedLandmarkList; +import com.google.mediapipe.framework.PacketGetter; +import com.google.protobuf.InvalidProtocolBufferException; + +/** Main activity of MediaPipe pose tracking app. */ +public class MainActivity extends com.google.mediapipe.apps.basic.MainActivity { + private static final String TAG = "MainActivity"; + + private static final String OUTPUT_LANDMARKS_STREAM_NAME = "pose_landmarks"; + + @Override + protected void onCreate(Bundle savedInstanceState) { + super.onCreate(savedInstanceState); + + // To show verbose logging, run: + // adb shell setprop log.tag.MainActivity VERBOSE + if (Log.isLoggable(TAG, Log.VERBOSE)) { + processor.addPacketCallback( + OUTPUT_LANDMARKS_STREAM_NAME, + (packet) -> { + Log.v(TAG, "Received pose landmarks packet."); + try { + NormalizedLandmarkList poseLandmarks = + PacketGetter.getProto(packet, NormalizedLandmarkList.class); + Log.v( + TAG, + "[TS:" + + packet.getTimestamp() + + "] " + + getPoseLandmarksDebugString(poseLandmarks)); + } catch (InvalidProtocolBufferException exception) { + Log.e(TAG, "Failed to get proto.", exception); + } + }); + } + } + + private static String getPoseLandmarksDebugString(NormalizedLandmarkList poseLandmarks) { + String poseLandmarkStr = "Pose landmarks: " + poseLandmarks.getLandmarkCount() + "\n"; + int landmarkIndex = 0; + for (NormalizedLandmark landmark : poseLandmarks.getLandmarkList()) { + poseLandmarkStr += + "\tLandmark [" + + landmarkIndex + + "]: (" + + landmark.getX() + + ", " + + landmark.getY() + + ", " + + landmark.getZ() + + ")\n"; + ++landmarkIndex; + } + return poseLandmarkStr; + } +} diff --git a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/templatematchingcpu/BUILD b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/templatematchingcpu/BUILD index 0ceeeee1b..ed3a63a70 100644 --- a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/templatematchingcpu/BUILD +++ b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/templatematchingcpu/BUILD @@ -52,6 +52,7 @@ android_binary( "inputVideoStreamName": "input_video", "outputVideoStreamName": "output_video", "flipFramesVertically": "True", + "converterNumBuffers": "2", }, multidex = "native", deps = [ diff --git a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/upperbodyposetrackinggpu/BUILD b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/upperbodyposetrackinggpu/BUILD index fe2da982c..e4e41741f 100644 --- a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/upperbodyposetrackinggpu/BUILD +++ b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/upperbodyposetrackinggpu/BUILD @@ -51,6 +51,7 @@ android_binary( "inputVideoStreamName": "input_video", "outputVideoStreamName": "output_video", "flipFramesVertically": "True", + "converterNumBuffers": "2", }, multidex = "native", deps = [ diff --git a/mediapipe/examples/coral/demo_run_graph_main.cc b/mediapipe/examples/coral/demo_run_graph_main.cc index 0755ecb60..db066043a 100644 --- a/mediapipe/examples/coral/demo_run_graph_main.cc +++ b/mediapipe/examples/coral/demo_run_graph_main.cc @@ -40,7 +40,7 @@ DEFINE_string(output_video_path, "", "Full path of where to save result (.mp4 only). " "If not provided, show result in a window."); -::mediapipe::Status RunMPPGraph() { +mediapipe::Status RunMPPGraph() { std::string calculator_graph_config_contents; MP_RETURN_IF_ERROR(mediapipe::file::GetContents( FLAGS_calculator_graph_config_file, &calculator_graph_config_contents)); @@ -143,7 +143,7 @@ DEFINE_string(output_video_path, "", int main(int argc, char** argv) { google::InitGoogleLogging(argv[0]); gflags::ParseCommandLineFlags(&argc, &argv, true); - ::mediapipe::Status run_status = RunMPPGraph(); + mediapipe::Status run_status = RunMPPGraph(); if (!run_status.ok()) { LOG(ERROR) << "Failed to run the graph: " << run_status.message(); return EXIT_FAILURE; diff --git a/mediapipe/examples/desktop/autoflip/calculators/border_detection_calculator.cc b/mediapipe/examples/desktop/autoflip/calculators/border_detection_calculator.cc index 0dc672208..440620fc9 100644 --- a/mediapipe/examples/desktop/autoflip/calculators/border_detection_calculator.cc +++ b/mediapipe/examples/desktop/autoflip/calculators/border_detection_calculator.cc @@ -97,12 +97,12 @@ class BorderDetectionCalculator : public CalculatorBase { }; REGISTER_CALCULATOR(BorderDetectionCalculator); -::mediapipe::Status BorderDetectionCalculator::Open( +mediapipe::Status BorderDetectionCalculator::Open( mediapipe::CalculatorContext* cc) { options_ = cc->Options(); RET_CHECK_LT(options_.vertical_search_distance(), 0.5) << "Search distance must be less than half the full image."; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } mediapipe::Status BorderDetectionCalculator::SetAndCheckInputs( @@ -118,14 +118,14 @@ mediapipe::Status BorderDetectionCalculator::SetAndCheckInputs( RET_CHECK_EQ(frame.rows, frame_height_) << "Input frame dimensions must remain constant throughout the video."; RET_CHECK_EQ(frame.channels(), 3) << "Input video type must be 3-channel"; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } mediapipe::Status BorderDetectionCalculator::Process( mediapipe::CalculatorContext* cc) { if (!cc->Inputs().HasTag(kVideoInputTag) || cc->Inputs().Tag(kVideoInputTag).Value().IsEmpty()) { - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "Input tag VIDEO not set or empty at timestamp: " << cc->InputTimestamp().Value(); } @@ -173,7 +173,7 @@ mediapipe::Status BorderDetectionCalculator::Process( .Tag(kDetectedBorders) .AddPacket(Adopt(features.release()).At(cc->InputTimestamp())); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Find the dominant color within an image. @@ -291,11 +291,11 @@ void BorderDetectionCalculator::DetectBorder( } } -::mediapipe::Status BorderDetectionCalculator::GetContract( +mediapipe::Status BorderDetectionCalculator::GetContract( mediapipe::CalculatorContract* cc) { cc->Inputs().Tag(kVideoInputTag).Set(); cc->Outputs().Tag(kDetectedBorders).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace autoflip diff --git a/mediapipe/examples/desktop/autoflip/calculators/content_zooming_calculator.cc b/mediapipe/examples/desktop/autoflip/calculators/content_zooming_calculator.cc index 3c22951dc..dad46f924 100644 --- a/mediapipe/examples/desktop/autoflip/calculators/content_zooming_calculator.cc +++ b/mediapipe/examples/desktop/autoflip/calculators/content_zooming_calculator.cc @@ -55,15 +55,15 @@ class ContentZoomingCalculator : public CalculatorBase { ContentZoomingCalculator(const ContentZoomingCalculator&) = delete; ContentZoomingCalculator& operator=(const ContentZoomingCalculator&) = delete; - static ::mediapipe::Status GetContract(mediapipe::CalculatorContract* cc); - ::mediapipe::Status Open(mediapipe::CalculatorContext* cc) override; - ::mediapipe::Status Process(mediapipe::CalculatorContext* cc) override; + static mediapipe::Status GetContract(mediapipe::CalculatorContract* cc); + mediapipe::Status Open(mediapipe::CalculatorContext* cc) override; + mediapipe::Status Process(mediapipe::CalculatorContext* cc) override; private: // Converts bounds to tilt offset, pan offset and height. - ::mediapipe::Status ConvertToPanTiltZoom(float xmin, float xmax, float ymin, - float ymax, int* tilt_offset, - int* pan_offset, int* height); + mediapipe::Status ConvertToPanTiltZoom(float xmin, float xmax, float ymin, + float ymax, int* tilt_offset, + int* pan_offset, int* height); ContentZoomingCalculatorOptions options_; // Detection frame width/height. int frame_height_; @@ -89,7 +89,7 @@ class ContentZoomingCalculator : public CalculatorBase { }; REGISTER_CALCULATOR(ContentZoomingCalculator); -::mediapipe::Status ContentZoomingCalculator::GetContract( +mediapipe::Status ContentZoomingCalculator::GetContract( mediapipe::CalculatorContract* cc) { RET_CHECK( !(cc->Inputs().HasTag(kVideoFrame) && cc->Inputs().HasTag(kVideoSize))) @@ -99,7 +99,7 @@ REGISTER_CALCULATOR(ContentZoomingCalculator); } else if (cc->Inputs().HasTag(kVideoSize)) { cc->Inputs().Tag(kVideoSize).Set>(); } else { - return ::mediapipe::UnknownErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::UnknownErrorBuilder(MEDIAPIPE_LOC) << "Input VIDEO or VIDEO_SIZE must be provided."; } if (cc->Inputs().HasTag(kSalientRegions)) { @@ -114,27 +114,27 @@ REGISTER_CALCULATOR(ContentZoomingCalculator); if (cc->Outputs().HasTag(kCropRect)) { cc->Outputs().Tag(kCropRect).Set(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ContentZoomingCalculator::Open( +mediapipe::Status ContentZoomingCalculator::Open( mediapipe::CalculatorContext* cc) { options_ = cc->Options(); if (options_.has_kinematic_options()) { - return ::mediapipe::UnknownErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::UnknownErrorBuilder(MEDIAPIPE_LOC) << "Deprecated kinematic_options was set, please set " "kinematic_options_zoom and kinematic_options_tilt."; } if (options_.has_min_motion_to_reframe()) { - return ::mediapipe::UnknownErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::UnknownErrorBuilder(MEDIAPIPE_LOC) << "Deprecated min_motion_to_reframe was set, please set " "in kinematic_options_zoom and kinematic_options_tilt " "directly."; } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ContentZoomingCalculator::ConvertToPanTiltZoom( +mediapipe::Status ContentZoomingCalculator::ConvertToPanTiltZoom( float xmin, float xmax, float ymin, float ymax, int* tilt_offset, int* pan_offset, int* height) { // Find center of the y-axis offset (for tilt control). @@ -161,7 +161,7 @@ REGISTER_CALCULATOR(ContentZoomingCalculator); *tilt_offset = frame_height_ * y_center; *pan_offset = frame_width_ * x_center; *height = frame_height_ * fit_size; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } namespace { @@ -185,12 +185,12 @@ mediapipe::autoflip::RectF ShiftDetection( relative_bounding_box.width() * x_offset_percent); return shifted_bb; } -::mediapipe::Status UpdateRanges(const SalientRegion& region, - const float shift_vertical, - const float shift_horizontal, float* xmin, - float* xmax, float* ymin, float* ymax) { +mediapipe::Status UpdateRanges(const SalientRegion& region, + const float shift_vertical, + const float shift_horizontal, float* xmin, + float* xmax, float* ymin, float* ymax) { if (!region.has_location_normalized()) { - return ::mediapipe::UnknownErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::UnknownErrorBuilder(MEDIAPIPE_LOC) << "SalientRegion did not have location normalized set."; } auto location = ShiftDetection(region.location_normalized(), shift_vertical, @@ -200,12 +200,12 @@ mediapipe::autoflip::RectF ShiftDetection( *ymin = fmin(*ymin, location.y()); *ymax = fmax(*ymax, location.y() + location.height()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status UpdateRanges(const mediapipe::Detection& detection, - const float shift_vertical, - const float shift_horizontal, float* xmin, - float* xmax, float* ymin, float* ymax) { +mediapipe::Status UpdateRanges(const mediapipe::Detection& detection, + const float shift_vertical, + const float shift_horizontal, float* xmin, + float* xmax, float* ymin, float* ymax) { RET_CHECK(detection.location_data().format() == mediapipe::LocationData::RELATIVE_BOUNDING_BOX) << "Face detection input is lacking required relative_bounding_box()"; @@ -217,7 +217,7 @@ mediapipe::autoflip::RectF ShiftDetection( *ymin = fmin(*ymin, location.ymin()); *ymax = fmax(*ymax, location.ymin() + location.height()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } void MakeStaticFeatures(const int top_border, const int bottom_border, const int frame_width, const int frame_height, @@ -238,21 +238,21 @@ void MakeStaticFeatures(const int top_border, const int bottom_border, } } // namespace -::mediapipe::Status ContentZoomingCalculator::Process( +mediapipe::Status ContentZoomingCalculator::Process( mediapipe::CalculatorContext* cc) { if (cc->Inputs().HasTag(kVideoFrame)) { frame_width_ = cc->Inputs().Tag(kVideoFrame).Get().Width(); frame_height_ = cc->Inputs().Tag(kVideoFrame).Get().Height(); } else if (cc->Inputs().HasTag(kVideoSize)) { if (cc->Inputs().Tag(kVideoSize).IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } frame_width_ = cc->Inputs().Tag(kVideoSize).Get>().first; frame_height_ = cc->Inputs().Tag(kVideoSize).Get>().second; } else { - return ::mediapipe::UnknownErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::UnknownErrorBuilder(MEDIAPIPE_LOC) << "Input VIDEO or VIDEO_SIZE must be provided."; } @@ -311,7 +311,7 @@ void MakeStaticFeatures(const int top_border, const int bottom_border, default_rect->set_height(frame_height_); cc->Outputs().Tag(kCropRect).Add(default_rect.release(), Timestamp(cc->InputTimestamp())); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } auto raw_detections = cc->Inputs().Tag(kDetections).Get>(); @@ -358,10 +358,13 @@ void MakeStaticFeatures(const int top_border, const int bottom_border, int path_width = path_height * target_aspect_; // Update pixel-per-degree value for pan/tilt. + int target_height; + MP_RETURN_IF_ERROR(path_solver_height_->GetTargetPosition(&target_height)); + int target_width = target_height * target_aspect_; MP_RETURN_IF_ERROR(path_solver_width_->UpdatePixelsPerDegree( - static_cast(path_width) / kFieldOfView)); + static_cast(target_width) / kFieldOfView)); MP_RETURN_IF_ERROR(path_solver_offset_->UpdatePixelsPerDegree( - static_cast(path_height) / kFieldOfView)); + static_cast(target_height) / kFieldOfView)); // Compute smoothed pan/tilt paths. MP_RETURN_IF_ERROR(path_solver_width_->AddObservation( @@ -412,7 +415,7 @@ void MakeStaticFeatures(const int top_border, const int bottom_border, Timestamp(cc->InputTimestamp())); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace autoflip diff --git a/mediapipe/examples/desktop/autoflip/calculators/face_to_region_calculator.cc b/mediapipe/examples/desktop/autoflip/calculators/face_to_region_calculator.cc index 86f03cf7a..e9904a299 100644 --- a/mediapipe/examples/desktop/autoflip/calculators/face_to_region_calculator.cc +++ b/mediapipe/examples/desktop/autoflip/calculators/face_to_region_calculator.cc @@ -55,9 +55,9 @@ class FaceToRegionCalculator : public CalculatorBase { FaceToRegionCalculator(const FaceToRegionCalculator&) = delete; FaceToRegionCalculator& operator=(const FaceToRegionCalculator&) = delete; - static ::mediapipe::Status GetContract(mediapipe::CalculatorContract* cc); - ::mediapipe::Status Open(mediapipe::CalculatorContext* cc) override; - ::mediapipe::Status Process(mediapipe::CalculatorContext* cc) override; + static mediapipe::Status GetContract(mediapipe::CalculatorContract* cc); + mediapipe::Status Open(mediapipe::CalculatorContext* cc) override; + mediapipe::Status Process(mediapipe::CalculatorContext* cc) override; private: double NormalizeX(const int pixel); @@ -78,17 +78,17 @@ REGISTER_CALCULATOR(FaceToRegionCalculator); FaceToRegionCalculator::FaceToRegionCalculator() {} -::mediapipe::Status FaceToRegionCalculator::GetContract( +mediapipe::Status FaceToRegionCalculator::GetContract( mediapipe::CalculatorContract* cc) { if (cc->Inputs().HasTag("VIDEO")) { cc->Inputs().Tag("VIDEO").Set(); } cc->Inputs().Tag("FACES").Set>(); cc->Outputs().Tag("REGIONS").Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status FaceToRegionCalculator::Open( +mediapipe::Status FaceToRegionCalculator::Open( mediapipe::CalculatorContext* cc) { options_ = cc->Options(); if (!cc->Inputs().HasTag("VIDEO")) { @@ -105,7 +105,7 @@ FaceToRegionCalculator::FaceToRegionCalculator() {} scorer_ = absl::make_unique(options_.scorer_options()); frame_width_ = -1; frame_height_ = -1; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } inline double FaceToRegionCalculator::NormalizeX(const int pixel) { @@ -146,11 +146,11 @@ void FaceToRegionCalculator::ExtendSalientRegionWithPoint( } } -::mediapipe::Status FaceToRegionCalculator::Process( +mediapipe::Status FaceToRegionCalculator::Process( mediapipe::CalculatorContext* cc) { if (cc->Inputs().HasTag("VIDEO") && cc->Inputs().Tag("VIDEO").Value().IsEmpty()) { - return ::mediapipe::UnknownErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::UnknownErrorBuilder(MEDIAPIPE_LOC) << "No VIDEO input at time " << cc->InputTimestamp().Seconds(); } @@ -280,7 +280,7 @@ void FaceToRegionCalculator::ExtendSalientRegionWithPoint( } cc->Outputs().Tag("REGIONS").Add(region_set.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace autoflip diff --git a/mediapipe/examples/desktop/autoflip/calculators/localization_to_region_calculator.cc b/mediapipe/examples/desktop/autoflip/calculators/localization_to_region_calculator.cc index 572d80998..106be49b9 100644 --- a/mediapipe/examples/desktop/autoflip/calculators/localization_to_region_calculator.cc +++ b/mediapipe/examples/desktop/autoflip/calculators/localization_to_region_calculator.cc @@ -38,9 +38,9 @@ class LocalizationToRegionCalculator : public mediapipe::CalculatorBase { LocalizationToRegionCalculator& operator=( const LocalizationToRegionCalculator&) = delete; - static ::mediapipe::Status GetContract(mediapipe::CalculatorContract* cc); - ::mediapipe::Status Open(mediapipe::CalculatorContext* cc) override; - ::mediapipe::Status Process(mediapipe::CalculatorContext* cc) override; + static mediapipe::Status GetContract(mediapipe::CalculatorContract* cc); + mediapipe::Status Open(mediapipe::CalculatorContext* cc) override; + mediapipe::Status Process(mediapipe::CalculatorContext* cc) override; private: // Calculator options. @@ -84,21 +84,21 @@ void FillSalientRegion(const mediapipe::Detection& detection, } // namespace -::mediapipe::Status LocalizationToRegionCalculator::GetContract( +mediapipe::Status LocalizationToRegionCalculator::GetContract( mediapipe::CalculatorContract* cc) { cc->Inputs().Tag("DETECTIONS").Set>(); cc->Outputs().Tag("REGIONS").Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status LocalizationToRegionCalculator::Open( +mediapipe::Status LocalizationToRegionCalculator::Open( mediapipe::CalculatorContext* cc) { options_ = cc->Options(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status LocalizationToRegionCalculator::Process( +mediapipe::Status LocalizationToRegionCalculator::Process( mediapipe::CalculatorContext* cc) { const auto& annotations = cc->Inputs().Tag("DETECTIONS").Get>(); @@ -119,7 +119,7 @@ void FillSalientRegion(const mediapipe::Detection& detection, } cc->Outputs().Tag("REGIONS").Add(regions.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace autoflip diff --git a/mediapipe/examples/desktop/autoflip/calculators/scene_cropping_calculator.cc b/mediapipe/examples/desktop/autoflip/calculators/scene_cropping_calculator.cc index a8ba3eeb9..8cd6c42aa 100644 --- a/mediapipe/examples/desktop/autoflip/calculators/scene_cropping_calculator.cc +++ b/mediapipe/examples/desktop/autoflip/calculators/scene_cropping_calculator.cc @@ -68,8 +68,8 @@ constexpr char kOutputSummary[] = "CROPPING_SUMMARY"; constexpr char kExternalRenderingPerFrame[] = "EXTERNAL_RENDERING_PER_FRAME"; constexpr char kExternalRenderingFullVid[] = "EXTERNAL_RENDERING_FULL_VID"; -::mediapipe::Status SceneCroppingCalculator::GetContract( - ::mediapipe::CalculatorContract* cc) { +mediapipe::Status SceneCroppingCalculator::GetContract( + mediapipe::CalculatorContract* cc) { if (cc->InputSidePackets().HasTag(kInputExternalSettings)) { cc->InputSidePackets().Tag(kInputExternalSettings).Set(); } @@ -136,10 +136,10 @@ constexpr char kExternalRenderingFullVid[] = "EXTERNAL_RENDERING_FULL_VID"; cc->Outputs().HasTag(kExternalRenderingFullVid) || cc->Outputs().HasTag(kOutputCroppedFrames)) << "At leaset one output stream must be specified"; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status SceneCroppingCalculator::Open(CalculatorContext* cc) { +mediapipe::Status SceneCroppingCalculator::Open(CalculatorContext* cc) { options_ = cc->Options(); RET_CHECK_GT(options_.max_scene_size(), 0) << "Maximum scene size is non-positive."; @@ -175,12 +175,12 @@ constexpr char kExternalRenderingFullVid[] = "EXTERNAL_RENDERING_FULL_VID"; should_perform_frame_cropping_ = cc->Outputs().HasTag(kOutputCroppedFrames); scene_camera_motion_analyzer_ = absl::make_unique( options_.scene_camera_motion_analyzer_options()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } namespace { -::mediapipe::Status ParseAspectRatioString( - const std::string& aspect_ratio_string, double* aspect_ratio) { +mediapipe::Status ParseAspectRatioString(const std::string& aspect_ratio_string, + double* aspect_ratio) { std::string error_msg = "Aspect ratio std::string must be in the format of 'width:height', e.g. " "'1:1' or '5:4', your input was " + @@ -196,7 +196,7 @@ namespace { &height_ratio)) << error_msg; *aspect_ratio = width_ratio / height_ratio; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } void ConstructExternalRenderMessage( const cv::Rect& crop_from_location, const cv::Rect& render_to_location, @@ -235,8 +235,8 @@ int RoundToEven(float value) { } // namespace -::mediapipe::Status SceneCroppingCalculator::InitializeSceneCroppingCalculator( - ::mediapipe::CalculatorContext* cc) { +mediapipe::Status SceneCroppingCalculator::InitializeSceneCroppingCalculator( + mediapipe::CalculatorContext* cc) { if (cc->Inputs().HasTag(kInputVideoFrames)) { const auto& frame = cc->Inputs().Tag(kInputVideoFrames).Get(); frame_width_ = frame.Width(); @@ -248,7 +248,7 @@ int RoundToEven(float value) { frame_height_ = cc->Inputs().Tag(kInputVideoSize).Get>().second; } else { - return ::mediapipe::UnknownErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::UnknownErrorBuilder(MEDIAPIPE_LOC) << "Input VIDEO or VIDEO_SIZE must be provided."; } RET_CHECK_GT(frame_height_, 0) << "Input frame height is non-positive."; @@ -337,18 +337,18 @@ int RoundToEven(float value) { scene_cropper_ = absl::make_unique( options_.camera_motion_options(), frame_width_, frame_height_); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -bool HasFrameSignal(::mediapipe::CalculatorContext* cc) { +bool HasFrameSignal(mediapipe::CalculatorContext* cc) { if (cc->Inputs().HasTag(kInputVideoFrames)) { return !cc->Inputs().Tag(kInputVideoFrames).Value().IsEmpty(); } return !cc->Inputs().Tag(kInputVideoSize).Value().IsEmpty(); } -::mediapipe::Status SceneCroppingCalculator::Process( - ::mediapipe::CalculatorContext* cc) { +mediapipe::Status SceneCroppingCalculator::Process( + mediapipe::CalculatorContext* cc) { // Sets frame dimension and initializes scenecroppingcalculator on first video // frame. if (frame_width_ < 0) { @@ -417,11 +417,11 @@ bool HasFrameSignal(::mediapipe::CalculatorContext* cc) { continue_last_scene_ = true; } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status SceneCroppingCalculator::Close( - ::mediapipe::CalculatorContext* cc) { +mediapipe::Status SceneCroppingCalculator::Close( + mediapipe::CalculatorContext* cc) { if (!scene_frame_timestamps_.empty()) { MP_RETURN_IF_ERROR(ProcessScene(/* is_end_of_scene = */ true, cc)); } @@ -435,12 +435,12 @@ bool HasFrameSignal(::mediapipe::CalculatorContext* cc) { .Tag(kExternalRenderingFullVid) .Add(external_render_list_.release(), Timestamp::PostStream()); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // TODO: split this function into two, one for calculating the border // sizes, the other for the actual removal of borders from the frames. -::mediapipe::Status SceneCroppingCalculator::RemoveStaticBorders( +mediapipe::Status SceneCroppingCalculator::RemoveStaticBorders( CalculatorContext* cc, int* top_border_size, int* bottom_border_size) { *top_border_size = 0; *bottom_border_size = 0; @@ -492,11 +492,10 @@ bool HasFrameSignal(::mediapipe::CalculatorContext* cc) { *key_frame_infos_[i].mutable_detections() = adjusted_detections; } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status -SceneCroppingCalculator::InitializeFrameCropRegionComputer() { +mediapipe::Status SceneCroppingCalculator::InitializeFrameCropRegionComputer() { key_frame_crop_options_ = options_.key_frame_crop_options(); MP_RETURN_IF_ERROR( SetKeyFrameCropTarget(frame_width_, effective_frame_height_, @@ -505,7 +504,7 @@ SceneCroppingCalculator::InitializeFrameCropRegionComputer() { VLOG(1) << "Target height " << key_frame_crop_options_.target_height(); frame_crop_region_computer_ = absl::make_unique(key_frame_crop_options_); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } void SceneCroppingCalculator::FilterKeyFrameInfo() { @@ -531,7 +530,7 @@ void SceneCroppingCalculator::FilterKeyFrameInfo() { } } -::mediapipe::Status SceneCroppingCalculator::ProcessScene( +mediapipe::Status SceneCroppingCalculator::ProcessScene( const bool is_end_of_scene, CalculatorContext* cc) { // Removes detections under special circumstances. FilterKeyFrameInfo(); @@ -654,10 +653,10 @@ void SceneCroppingCalculator::FilterKeyFrameInfo() { is_key_frames_.clear(); static_features_.clear(); static_features_timestamps_.clear(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status SceneCroppingCalculator::FormatAndOutputCroppedFrames( +mediapipe::Status SceneCroppingCalculator::FormatAndOutputCroppedFrames( const int crop_width, const int crop_height, const int num_frames, std::vector* render_to_locations, bool* apply_padding, std::vector* padding_colors, float* vertical_fill_percent, @@ -730,7 +729,7 @@ void SceneCroppingCalculator::FilterKeyFrameInfo() { padding_colors->push_back(padding_color_to_add); } if (!cropped_frames_ptr) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Resizes cropped frames, pads frames, and output frames. @@ -773,7 +772,7 @@ void SceneCroppingCalculator::FilterKeyFrameInfo() { .Add(scaled_frame.release(), timestamp); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } mediapipe::Status SceneCroppingCalculator::OutputVizFrames( @@ -816,7 +815,7 @@ mediapipe::Status SceneCroppingCalculator::OutputVizFrames( .Add(viz_frames[i].release(), Timestamp(scene_frame_timestamps_[i])); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } REGISTER_CALCULATOR(SceneCroppingCalculator); diff --git a/mediapipe/examples/desktop/autoflip/calculators/scene_cropping_calculator.h b/mediapipe/examples/desktop/autoflip/calculators/scene_cropping_calculator.h index 1c00e6210..4ffacafca 100644 --- a/mediapipe/examples/desktop/autoflip/calculators/scene_cropping_calculator.h +++ b/mediapipe/examples/desktop/autoflip/calculators/scene_cropping_calculator.h @@ -125,35 +125,35 @@ namespace autoflip { // fields are optional with default settings. class SceneCroppingCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); // Validates calculator options and initializes SceneCameraMotionAnalyzer and // SceneCropper. - ::mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; // Buffers each scene frame and its timestamp. Packs and stores KeyFrameInfo // for key frames (a.k.a. frames with detection features). When a shot // boundary is encountered or when the buffer is full, calls ProcessScene() // to process the scene at once, and clears buffers. - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; // Calls ProcessScene() on remaining buffered frames. Optionally outputs a // VideoCroppingSummary if the output stream CROPPING_SUMMARY is present. - ::mediapipe::Status Close(::mediapipe::CalculatorContext* cc) override; + mediapipe::Status Close(mediapipe::CalculatorContext* cc) override; private: // Removes any static borders from the scene frames before cropping. The // arguments |top_border_size| and |bottom_border_size| report the size of the // removed borders. - ::mediapipe::Status RemoveStaticBorders(CalculatorContext* cc, - int* top_border_size, - int* bottom_border_size); + mediapipe::Status RemoveStaticBorders(CalculatorContext* cc, + int* top_border_size, + int* bottom_border_size); // Sets up autoflip after first frame is received and input size is known. - ::mediapipe::Status InitializeSceneCroppingCalculator( - ::mediapipe::CalculatorContext* cc); + mediapipe::Status InitializeSceneCroppingCalculator( + mediapipe::CalculatorContext* cc); // Initializes a FrameCropRegionComputer given input and target frame sizes. - ::mediapipe::Status InitializeFrameCropRegionComputer(); + mediapipe::Status InitializeFrameCropRegionComputer(); // Processes a scene using buffered scene frames and KeyFrameInfos: // 1. Computes key frame crop regions using a FrameCropRegionComputer. @@ -165,8 +165,8 @@ class SceneCroppingCalculator : public CalculatorBase { // to force flush). // 6. Optionally outputs visualization frames. // 7. Optionally updates cropping summary. - ::mediapipe::Status ProcessScene(const bool is_end_of_scene, - CalculatorContext* cc); + mediapipe::Status ProcessScene(const bool is_end_of_scene, + CalculatorContext* cc); // Formats and outputs the cropped frames passed in through // |cropped_frames_ptr|. Scales them to be at least as big as the target @@ -177,14 +177,14 @@ class SceneCroppingCalculator : public CalculatorBase { // cropped frames. This is useful when the calculator is only used for // computing the cropping metadata rather than doing the actual cropping // operation. - ::mediapipe::Status FormatAndOutputCroppedFrames( + mediapipe::Status FormatAndOutputCroppedFrames( const int crop_width, const int crop_height, const int num_frames, std::vector* render_to_locations, bool* apply_padding, std::vector* padding_colors, float* vertical_fill_percent, const std::vector* cropped_frames_ptr, CalculatorContext* cc); // Draws and outputs visualization frames if those streams are present. - ::mediapipe::Status OutputVizFrames( + mediapipe::Status OutputVizFrames( const std::vector& key_frame_crop_results, const std::vector& focus_point_frames, const std::vector& crop_from_locations, diff --git a/mediapipe/examples/desktop/autoflip/calculators/shot_boundary_calculator.cc b/mediapipe/examples/desktop/autoflip/calculators/shot_boundary_calculator.cc index 8d8e2570a..9a091523d 100644 --- a/mediapipe/examples/desktop/autoflip/calculators/shot_boundary_calculator.cc +++ b/mediapipe/examples/desktop/autoflip/calculators/shot_boundary_calculator.cc @@ -60,7 +60,7 @@ class ShotBoundaryCalculator : public mediapipe::CalculatorBase { ShotBoundaryCalculator(const ShotBoundaryCalculator&) = delete; ShotBoundaryCalculator& operator=(const ShotBoundaryCalculator&) = delete; - static ::mediapipe::Status GetContract(mediapipe::CalculatorContract* cc); + static mediapipe::Status GetContract(mediapipe::CalculatorContract* cc); mediapipe::Status Open(mediapipe::CalculatorContext* cc) override; mediapipe::Status Process(mediapipe::CalculatorContext* cc) override; @@ -103,7 +103,7 @@ mediapipe::Status ShotBoundaryCalculator::Open( options_ = cc->Options(); last_shot_timestamp_ = Timestamp(0); init_ = false; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } void ShotBoundaryCalculator::Transmit(mediapipe::CalculatorContext* cc, @@ -127,7 +127,7 @@ void ShotBoundaryCalculator::Transmit(mediapipe::CalculatorContext* cc, } } -::mediapipe::Status ShotBoundaryCalculator::Process( +mediapipe::Status ShotBoundaryCalculator::Process( mediapipe::CalculatorContext* cc) { // Connect to input frame and make a mutable copy. cv::Mat frame_org = mediapipe::formats::MatView( @@ -142,7 +142,7 @@ void ShotBoundaryCalculator::Transmit(mediapipe::CalculatorContext* cc, last_histogram_ = current_histogram; init_ = true; Transmit(cc, false); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } double current_motion_estimate = @@ -152,7 +152,7 @@ void ShotBoundaryCalculator::Transmit(mediapipe::CalculatorContext* cc, if (motion_history_.size() != options_.window_size()) { Transmit(cc, false); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Shot detection algorithm is a mixture of adaptive (controlled with @@ -176,14 +176,14 @@ void ShotBoundaryCalculator::Transmit(mediapipe::CalculatorContext* cc, // Store histogram for next frame. last_histogram_ = current_histogram; motion_history_.pop_back(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ShotBoundaryCalculator::GetContract( +mediapipe::Status ShotBoundaryCalculator::GetContract( mediapipe::CalculatorContract* cc) { cc->Inputs().Tag(kVideoInputTag).Set(); cc->Outputs().Tag(kShotChangeTag).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace autoflip diff --git a/mediapipe/examples/desktop/autoflip/calculators/signal_fusing_calculator.cc b/mediapipe/examples/desktop/autoflip/calculators/signal_fusing_calculator.cc index 703932938..a85c8bb2e 100644 --- a/mediapipe/examples/desktop/autoflip/calculators/signal_fusing_calculator.cc +++ b/mediapipe/examples/desktop/autoflip/calculators/signal_fusing_calculator.cc @@ -105,7 +105,7 @@ class SignalFusingCalculator : public mediapipe::CalculatorBase { SignalFusingCalculator(const SignalFusingCalculator&) = delete; SignalFusingCalculator& operator=(const SignalFusingCalculator&) = delete; - static ::mediapipe::Status GetContract(mediapipe::CalculatorContract* cc); + static mediapipe::Status GetContract(mediapipe::CalculatorContract* cc); mediapipe::Status Open(mediapipe::CalculatorContext* cc) override; mediapipe::Status Process(mediapipe::CalculatorContext* cc) override; mediapipe::Status Close(mediapipe::CalculatorContext* cc) override; @@ -166,7 +166,7 @@ mediapipe::Status SignalFusingCalculator::Open( process_by_scene_ = false; } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } mediapipe::Status SignalFusingCalculator::Close( @@ -175,7 +175,7 @@ mediapipe::Status SignalFusingCalculator::Close( MP_RETURN_IF_ERROR(ProcessScene(cc)); scene_frames_.clear(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } mediapipe::Status SignalFusingCalculator::ProcessScene( @@ -240,7 +240,7 @@ mediapipe::Status SignalFusingCalculator::ProcessScene( } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } std::vector SignalFusingCalculator::GetSignalPackets( @@ -302,17 +302,17 @@ mediapipe::Status SignalFusingCalculator::Process( scene_frames_.clear(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status SignalFusingCalculator::GetContract( +mediapipe::Status SignalFusingCalculator::GetContract( mediapipe::CalculatorContract* cc) { if (cc->Inputs().NumEntries(kSignalInputsTag) > 0) { SetupTagInput(cc); } else { SetupOrderedInput(cc); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace autoflip diff --git a/mediapipe/examples/desktop/autoflip/calculators/video_filtering_calculator.cc b/mediapipe/examples/desktop/autoflip/calculators/video_filtering_calculator.cc index b8af09ce8..0a7c34d9d 100644 --- a/mediapipe/examples/desktop/autoflip/calculators/video_filtering_calculator.cc +++ b/mediapipe/examples/desktop/autoflip/calculators/video_filtering_calculator.cc @@ -57,20 +57,20 @@ class VideoFilteringCalculator : public CalculatorBase { VideoFilteringCalculator() = default; ~VideoFilteringCalculator() override = default; - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; }; REGISTER_CALCULATOR(VideoFilteringCalculator); -::mediapipe::Status VideoFilteringCalculator::GetContract( +mediapipe::Status VideoFilteringCalculator::GetContract( CalculatorContract* cc) { cc->Inputs().Tag(kInputFrameTag).Set(); cc->Outputs().Tag(kOutputFrameTag).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status VideoFilteringCalculator::Process(CalculatorContext* cc) { +mediapipe::Status VideoFilteringCalculator::Process(CalculatorContext* cc) { const auto& options = cc->Options(); const Packet& input_packet = cc->Inputs().Tag(kInputFrameTag).Value(); @@ -84,7 +84,7 @@ REGISTER_CALCULATOR(VideoFilteringCalculator); if (filter_type == VideoFilteringCalculatorOptions::AspectRatioFilter::NO_FILTERING) { cc->Outputs().Tag(kOutputFrameTag).AddPacket(input_packet); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } const int target_width = options.aspect_ratio_filter().target_width(); const int target_height = options.aspect_ratio_filter().target_height(); @@ -92,7 +92,7 @@ REGISTER_CALCULATOR(VideoFilteringCalculator); RET_CHECK_GT(target_height, 0); bool should_pass = false; - cv::Mat frame_mat = ::mediapipe::formats::MatView(&frame); + cv::Mat frame_mat = mediapipe::formats::MatView(&frame); const double ratio = static_cast(frame_mat.cols) / frame_mat.rows; const double target_ratio = static_cast(target_width) / target_height; if (filter_type == VideoFilteringCalculatorOptions::AspectRatioFilter:: @@ -106,16 +106,16 @@ REGISTER_CALCULATOR(VideoFilteringCalculator); } if (should_pass) { cc->Outputs().Tag(kOutputFrameTag).AddPacket(input_packet); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } if (options.fail_if_any()) { - return ::mediapipe::UnknownErrorBuilder(MEDIAPIPE_LOC) << absl::Substitute( + return mediapipe::UnknownErrorBuilder(MEDIAPIPE_LOC) << absl::Substitute( "Failing due to aspect ratio. Target aspect ratio: $0. Frame " "width: $1, height: $2.", target_ratio, frame.Width(), frame.Height()); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace autoflip } // namespace mediapipe diff --git a/mediapipe/examples/desktop/autoflip/calculators/video_filtering_calculator_test.cc b/mediapipe/examples/desktop/autoflip/calculators/video_filtering_calculator_test.cc index 4f907d001..9927d8077 100644 --- a/mediapipe/examples/desktop/autoflip/calculators/video_filtering_calculator_test.cc +++ b/mediapipe/examples/desktop/autoflip/calculators/video_filtering_calculator_test.cc @@ -166,8 +166,8 @@ TEST(VerticalFrameRemovalCalculatorTest, OutputError) { runner->MutableInputs() ->Tag("INPUT_FRAMES") .packets.push_back(Adopt(input_frame.release()).At(Timestamp(1000))); - ::mediapipe::Status status = runner->Run(); - EXPECT_EQ(status.code(), ::mediapipe::StatusCode::kUnknown); + mediapipe::Status status = runner->Run(); + EXPECT_EQ(status.code(), mediapipe::StatusCode::kUnknown); EXPECT_THAT(status.ToString(), ::testing::HasSubstr("Failing due to aspect ratio")); } diff --git a/mediapipe/examples/desktop/autoflip/quality/frame_crop_region_computer.cc b/mediapipe/examples/desktop/autoflip/quality/frame_crop_region_computer.cc index 0b57cd0da..8626ae715 100644 --- a/mediapipe/examples/desktop/autoflip/quality/frame_crop_region_computer.cc +++ b/mediapipe/examples/desktop/autoflip/quality/frame_crop_region_computer.cc @@ -22,7 +22,7 @@ namespace mediapipe { namespace autoflip { -::mediapipe::Status FrameCropRegionComputer::ExpandSegmentUnderConstraint( +mediapipe::Status FrameCropRegionComputer::ExpandSegmentUnderConstraint( const Segment& segment_to_add, const Segment& base_segment, const int max_length, Segment* combined_segment, CoverType* cover_type) const { @@ -75,10 +75,10 @@ namespace autoflip { *combined_segment = std::make_pair(combined_segment_left, combined_segment_right); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status FrameCropRegionComputer::ExpandRectUnderConstraints( +mediapipe::Status FrameCropRegionComputer::ExpandRectUnderConstraints( const Rect& rect_to_add, const int max_width, const int max_height, Rect* base_rect, CoverType* cover_type) const { RET_CHECK(base_rect != nullptr) << "Base rect is null."; @@ -129,7 +129,7 @@ namespace autoflip { } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } void FrameCropRegionComputer::UpdateCropRegionScore( @@ -167,7 +167,7 @@ void FrameCropRegionComputer::UpdateCropRegionScore( } } -::mediapipe::Status FrameCropRegionComputer::ComputeFrameCropRegion( +mediapipe::Status FrameCropRegionComputer::ComputeFrameCropRegion( const KeyFrameInfo& frame_info, KeyFrameCropResult* crop_result) const { RET_CHECK(crop_result != nullptr) << "KeyFrameCropResult is null."; @@ -254,7 +254,7 @@ void FrameCropRegionComputer::UpdateCropRegionScore( crop_result->set_region_is_empty(crop_region_is_empty); crop_result->set_region_score(crop_region_score); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace autoflip diff --git a/mediapipe/examples/desktop/autoflip/quality/frame_crop_region_computer.h b/mediapipe/examples/desktop/autoflip/quality/frame_crop_region_computer.h index d7adbc41c..1d5107bb3 100644 --- a/mediapipe/examples/desktop/autoflip/quality/frame_crop_region_computer.h +++ b/mediapipe/examples/desktop/autoflip/quality/frame_crop_region_computer.h @@ -43,7 +43,7 @@ class FrameCropRegionComputer { // consider static features, and simply tries to fit the detected features // within the target frame size. The score of the crop region is aggregated // from individual feature scores given the score aggregation type. - ::mediapipe::Status ComputeFrameCropRegion( + mediapipe::Status ComputeFrameCropRegion( const KeyFrameInfo& frame_info, KeyFrameCropResult* crop_result) const; protected: @@ -75,10 +75,11 @@ class FrameCropRegionComputer { // fraction of the new segment exceeds the maximum length. // In this case the combined segment is the base segment, and cover // type is NOT_COVERED. - ::mediapipe::Status ExpandSegmentUnderConstraint( - const Segment& segment_to_add, const Segment& base_segment, - const int max_length, Segment* combined_segment, - CoverType* cover_type) const; + mediapipe::Status ExpandSegmentUnderConstraint(const Segment& segment_to_add, + const Segment& base_segment, + const int max_length, + Segment* combined_segment, + CoverType* cover_type) const; // Expands a base rectangle to cover a new rectangle to be added under width // and height constraints. The operation is best-effort. It considers @@ -87,11 +88,11 @@ class FrameCropRegionComputer { // FULLY_COVERED if the new rectangle is fully covered in both directions, // PARTIALLY_COVERED if it is at least partially covered in both directions, // and NOT_COVERED if it is not covered in either direction. - ::mediapipe::Status ExpandRectUnderConstraints(const Rect& rect_to_add, - const int max_width, - const int max_height, - Rect* base_rect, - CoverType* cover_type) const; + mediapipe::Status ExpandRectUnderConstraints(const Rect& rect_to_add, + const int max_width, + const int max_height, + Rect* base_rect, + CoverType* cover_type) const; // Updates crop region score given current feature score, whether the feature // is required, and the score aggregation type. Ignores negative scores. diff --git a/mediapipe/examples/desktop/autoflip/quality/kinematic_path_solver.cc b/mediapipe/examples/desktop/autoflip/quality/kinematic_path_solver.cc index cee5b40fb..f0e4a19b1 100644 --- a/mediapipe/examples/desktop/autoflip/quality/kinematic_path_solver.cc +++ b/mediapipe/examples/desktop/autoflip/quality/kinematic_path_solver.cc @@ -14,10 +14,13 @@ int Median(const std::deque>& positions_raw) { return positions[n]; } } // namespace -::mediapipe::Status KinematicPathSolver::AddObservation(int position, - const uint64 time_us) { +mediapipe::Status KinematicPathSolver::AddObservation(int position, + const uint64 time_us) { if (!initialized_) { current_position_px_ = position; + target_position_px_ = position; + motion_state_ = false; + mean_delta_t_ = -1; raw_positions_at_time_.push_front( std::pair(time_us, position)); current_time_ = time_us; @@ -31,7 +34,9 @@ int Median(const std::deque>& positions_raw) { << "Reframe window cannot exceed min_motion_to_reframe."; RET_CHECK_GE(options_.filtering_time_window_us(), 0) << "update_rate_seconds must be greater than 0."; - return ::mediapipe::OkStatus(); + RET_CHECK_GE(options_.mean_period_update_rate(), 0) + << "mean_period_update_rate must be greater than 0."; + return mediapipe::OkStatus(); } RET_CHECK(current_time_ < time_us) @@ -50,18 +55,30 @@ int Median(const std::deque>& positions_raw) { double delta_degs = (Median(raw_positions_at_time_) - current_position_px_) / pixels_per_degree_; - // If the motion is smaller than the min, don't use the update. - if (abs(delta_degs) < options_.min_motion_to_reframe()) { - position = current_position_px_; + // If the motion is smaller than the min_motion_to_reframe and camera is + // stationary, don't use the update. + if (abs(delta_degs) < options_.min_motion_to_reframe() && !motion_state_) { delta_degs = 0; + motion_state_ = false; + } else if (abs(delta_degs) < options_.reframe_window() && motion_state_) { + // If the motion is smaller than the reframe_window and camera is moving, + // don't use the update. + delta_degs = 0; + motion_state_ = false; } else if (delta_degs > 0) { // Apply new position, less the reframe window size. - position = position - pixels_per_degree_ * options_.reframe_window(); - delta_degs = (position - current_position_px_) / pixels_per_degree_; + target_position_px_ = + position - pixels_per_degree_ * options_.reframe_window(); + delta_degs = + (target_position_px_ - current_position_px_) / pixels_per_degree_; + motion_state_ = true; } else { // Apply new position, plus the reframe window size. - position = position + pixels_per_degree_ * options_.reframe_window(); - delta_degs = (position - current_position_px_) / pixels_per_degree_; + target_position_px_ = + position + pixels_per_degree_ * options_.reframe_window(); + delta_degs = + (target_position_px_ - current_position_px_) / pixels_per_degree_; + motion_state_ = true; } // Time and position updates. @@ -82,17 +99,25 @@ int Median(const std::deque>& positions_raw) { return UpdatePrediction(time_us); } -::mediapipe::Status KinematicPathSolver::UpdatePrediction(const int64 time_us) { +mediapipe::Status KinematicPathSolver::UpdatePrediction(const int64 time_us) { RET_CHECK(current_time_ < time_us) << "Prediction time added before a prior observation or prediction."; - // Time since last state/prediction update. + + // Time since last state/prediction update, smoothed by + // mean_period_update_rate. double delta_t = (time_us - current_time_) / 1000000.0; + if (mean_delta_t_ < 0) { + mean_delta_t_ = delta_t; + } else { + mean_delta_t_ = mean_delta_t_ * (1 - options_.mean_period_update_rate()) + + delta_t * options_.mean_period_update_rate(); + } // Position update limited by min/max. - - const double update_position_px = + double update_position_px = current_position_px_ + - current_velocity_deg_per_s_ * delta_t * pixels_per_degree_; + current_velocity_deg_per_s_ * mean_delta_t_ * pixels_per_degree_; + if (update_position_px < min_location_) { current_position_px_ = min_location_; current_velocity_deg_per_s_ = 0; @@ -104,21 +129,28 @@ int Median(const std::deque>& positions_raw) { } current_time_ = time_us; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status KinematicPathSolver::GetState(int* position) { +mediapipe::Status KinematicPathSolver::GetState(int* position) { RET_CHECK(initialized_) << "GetState called before first observation added."; *position = round(current_position_px_); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status KinematicPathSolver::UpdatePixelsPerDegree( +mediapipe::Status KinematicPathSolver::GetTargetPosition(int* target_position) { + RET_CHECK(initialized_) + << "GetTargetPosition called before first observation added."; + *target_position = round(target_position_px_); + return mediapipe::OkStatus(); +} + +mediapipe::Status KinematicPathSolver::UpdatePixelsPerDegree( const float pixels_per_degree) { RET_CHECK_GT(pixels_per_degree_, 0) << "pixels_per_degree must be larger than 0."; pixels_per_degree_ = pixels_per_degree; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace autoflip diff --git a/mediapipe/examples/desktop/autoflip/quality/kinematic_path_solver.h b/mediapipe/examples/desktop/autoflip/quality/kinematic_path_solver.h index 9634b6ee9..60ac4dc35 100644 --- a/mediapipe/examples/desktop/autoflip/quality/kinematic_path_solver.h +++ b/mediapipe/examples/desktop/autoflip/quality/kinematic_path_solver.h @@ -43,13 +43,15 @@ class KinematicPathSolver { initialized_(false), pixels_per_degree_(pixels_per_degree) {} // Add an observation (detection) at a position and time. - ::mediapipe::Status AddObservation(int position, const uint64 time_us); + mediapipe::Status AddObservation(int position, const uint64 time_us); // Get the predicted position at a time. - ::mediapipe::Status UpdatePrediction(const int64 time_us); + mediapipe::Status UpdatePrediction(const int64 time_us); // Get the state at a time. - ::mediapipe::Status GetState(int* position); + mediapipe::Status GetState(int* position); // Update PixelPerDegree value. - ::mediapipe::Status UpdatePixelsPerDegree(const float pixels_per_degree); + mediapipe::Status UpdatePixelsPerDegree(const float pixels_per_degree); + // Provide the current target position of the reframe action. + mediapipe::Status GetTargetPosition(int* target_position); private: // Tuning options. @@ -65,6 +67,13 @@ class KinematicPathSolver { uint64 current_time_; // History of observations (second) and their time (first). std::deque> raw_positions_at_time_; + // Current target position. + double target_position_px_; + // Defines if the camera is moving to a target (true) or reached a target + // within a tolerance (false). + bool motion_state_; + // Average period of incoming frames. + double mean_delta_t_; }; } // namespace autoflip diff --git a/mediapipe/examples/desktop/autoflip/quality/kinematic_path_solver.proto b/mediapipe/examples/desktop/autoflip/quality/kinematic_path_solver.proto index 525bcdb62..406418733 100644 --- a/mediapipe/examples/desktop/autoflip/quality/kinematic_path_solver.proto +++ b/mediapipe/examples/desktop/autoflip/quality/kinematic_path_solver.proto @@ -22,4 +22,6 @@ message KinematicOptions { optional double max_update_rate = 6 [default = 0.8]; // History time window of observations to be median filtered. optional int64 filtering_time_window_us = 7 [default = 0]; + // Weighted update of average period, used for motion updates. + optional float mean_period_update_rate = 8 [default = 0.25]; } diff --git a/mediapipe/examples/desktop/autoflip/quality/kinematic_path_solver_test.cc b/mediapipe/examples/desktop/autoflip/quality/kinematic_path_solver_test.cc index c9ad2690c..7ca8045e5 100644 --- a/mediapipe/examples/desktop/autoflip/quality/kinematic_path_solver_test.cc +++ b/mediapipe/examples/desktop/autoflip/quality/kinematic_path_solver_test.cc @@ -118,7 +118,7 @@ TEST(KinematicPathSolverTest, PassEnoughMotionNotFiltered) { MP_ASSERT_OK(solver.AddObservation(500, kMicroSecInSec * 3)); MP_ASSERT_OK(solver.GetState(&state)); // Expect cam to not move. - EXPECT_EQ(state, 519); + EXPECT_EQ(state, 506); } TEST(KinematicPathSolverTest, PassEnoughMotionLargeImg) { @@ -187,7 +187,7 @@ TEST(KinematicPathSolverTest, PassReframeWindow) { MP_ASSERT_OK(solver.AddObservation(520, kMicroSecInSec * 1)); MP_ASSERT_OK(solver.GetState(&state)); // Expect cam to move 1.2-.75 deg, * 16.6 = 7.47px + 500 = - EXPECT_EQ(state, 507); + EXPECT_EQ(state, 508); } TEST(KinematicPathSolverTest, PassUpdateRate30FPS) { @@ -227,9 +227,13 @@ TEST(KinematicPathSolverTest, PassUpdateRate) { options.set_max_update_rate(1.0); options.set_max_velocity(18); KinematicPathSolver solver(options, 0, 1000, 1000.0 / kWidthFieldOfView); - int state; + int state, target_position; MP_ASSERT_OK(solver.AddObservation(500, kMicroSecInSec * 0)); + MP_ASSERT_OK(solver.GetTargetPosition(&target_position)); + EXPECT_EQ(target_position, 500); MP_ASSERT_OK(solver.AddObservation(520, kMicroSecInSec * 1)); + MP_ASSERT_OK(solver.GetTargetPosition(&target_position)); + EXPECT_EQ(target_position, 520); MP_ASSERT_OK(solver.GetState(&state)); EXPECT_EQ(state, 505); } diff --git a/mediapipe/examples/desktop/autoflip/quality/padding_effect_generator.cc b/mediapipe/examples/desktop/autoflip/quality/padding_effect_generator.cc index 3da821f08..bdbfe2d42 100644 --- a/mediapipe/examples/desktop/autoflip/quality/padding_effect_generator.cc +++ b/mediapipe/examples/desktop/autoflip/quality/padding_effect_generator.cc @@ -45,7 +45,7 @@ PaddingEffectGenerator::PaddingEffectGenerator(const int input_width, } } -::mediapipe::Status PaddingEffectGenerator::Process( +mediapipe::Status PaddingEffectGenerator::Process( const ImageFrame& input_frame, const float background_contrast, const int blur_cv_size, const float overlay_opacity, ImageFrame* output_frame, const cv::Scalar* background_color_in_rgb) { @@ -170,7 +170,7 @@ PaddingEffectGenerator::PaddingEffectGenerator(const int input_width, output_frame->CopyPixelData(input_frame.Format(), canvas.cols, canvas.rows, canvas.data, ImageFrame::kDefaultAlignmentBoundary); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } cv::Rect PaddingEffectGenerator::ComputeOutputLocation() { diff --git a/mediapipe/examples/desktop/autoflip/quality/padding_effect_generator.h b/mediapipe/examples/desktop/autoflip/quality/padding_effect_generator.h index 679f01a68..20e34ecd7 100644 --- a/mediapipe/examples/desktop/autoflip/quality/padding_effect_generator.h +++ b/mediapipe/examples/desktop/autoflip/quality/padding_effect_generator.h @@ -49,7 +49,7 @@ class PaddingEffectGenerator { // the opacity of the black layer. // - background_color_in_rgb: If not null, uses this solid color as background // instead of blurring the image, and does not adjust contrast or opacity. - ::mediapipe::Status Process( + mediapipe::Status Process( const ImageFrame& input_frame, const float background_contrast, const int blur_cv_size, const float overlay_opacity, ImageFrame* output_frame, diff --git a/mediapipe/examples/desktop/autoflip/quality/padding_effect_generator_test.cc b/mediapipe/examples/desktop/autoflip/quality/padding_effect_generator_test.cc index 0bf5c0960..f3c3097d7 100644 --- a/mediapipe/examples/desktop/autoflip/quality/padding_effect_generator_test.cc +++ b/mediapipe/examples/desktop/autoflip/quality/padding_effect_generator_test.cc @@ -72,11 +72,11 @@ void TestWithAspectRatio(const double aspect_ratio, cv::cvtColor(decoded_mat, output_mat, cv::COLOR_BGR2RGB); break; case 4: - MP_ASSERT_OK(::mediapipe::UnimplementedErrorBuilder(MEDIAPIPE_LOC) + MP_ASSERT_OK(mediapipe::UnimplementedErrorBuilder(MEDIAPIPE_LOC) << "4-channel image isn't supported yet"); break; default: - MP_ASSERT_OK(::mediapipe::FailedPreconditionErrorBuilder(MEDIAPIPE_LOC) + MP_ASSERT_OK(mediapipe::FailedPreconditionErrorBuilder(MEDIAPIPE_LOC) << "Unsupported number of channels: " << decoded_mat.channels()); } @@ -101,11 +101,11 @@ void TestWithAspectRatio(const double aspect_ratio, cv::cvtColor(original_mat, input_mat, cv::COLOR_RGB2BGR); break; case 4: - MP_ASSERT_OK(::mediapipe::UnimplementedErrorBuilder(MEDIAPIPE_LOC) + MP_ASSERT_OK(mediapipe::UnimplementedErrorBuilder(MEDIAPIPE_LOC) << "4-channel image isn't supported yet"); break; default: - MP_ASSERT_OK(::mediapipe::FailedPreconditionErrorBuilder(MEDIAPIPE_LOC) + MP_ASSERT_OK(mediapipe::FailedPreconditionErrorBuilder(MEDIAPIPE_LOC) << "Unsupported number of channels: " << original_mat.channels()); } @@ -120,7 +120,7 @@ void TestWithAspectRatio(const double aspect_ratio, // Check its JpegEncoder::write() in "imgcodecs/src/grfmt_jpeg.cpp" for more // info. if (!cv::imencode(".jpg", input_mat, encode_buffer, parameters)) { - MP_ASSERT_OK(::mediapipe::InternalErrorBuilder(MEDIAPIPE_LOC) + MP_ASSERT_OK(mediapipe::InternalErrorBuilder(MEDIAPIPE_LOC) << "Fail to encode the image to be jpeg format."); } diff --git a/mediapipe/examples/desktop/autoflip/quality/polynomial_regression_path_solver.cc b/mediapipe/examples/desktop/autoflip/quality/polynomial_regression_path_solver.cc index b038b0f3c..2db06151c 100644 --- a/mediapipe/examples/desktop/autoflip/quality/polynomial_regression_path_solver.cc +++ b/mediapipe/examples/desktop/autoflip/quality/polynomial_regression_path_solver.cc @@ -91,7 +91,7 @@ void PolynomialRegressionPathSolver::AddCostFunctionToProblem( problem->AddResidualBlock(cost_function, new CauchyLoss(0.5), a, b, c, d, k); } -::mediapipe::Status PolynomialRegressionPathSolver::ComputeCameraPath( +mediapipe::Status PolynomialRegressionPathSolver::ComputeCameraPath( const std::vector& focus_point_frames, const std::vector& prior_focus_point_frames, const int original_width, const int original_height, const int output_width, diff --git a/mediapipe/examples/desktop/autoflip/quality/polynomial_regression_path_solver.h b/mediapipe/examples/desktop/autoflip/quality/polynomial_regression_path_solver.h index 514f8760d..99d6a2f2c 100644 --- a/mediapipe/examples/desktop/autoflip/quality/polynomial_regression_path_solver.h +++ b/mediapipe/examples/desktop/autoflip/quality/polynomial_regression_path_solver.h @@ -42,7 +42,7 @@ class PolynomialRegressionPathSolver { // y-axis, such that focus points can be preserved as much as possible. The // returned |all_transforms| hold the camera location at each timestamp // corresponding to each input frame. - ::mediapipe::Status ComputeCameraPath( + mediapipe::Status ComputeCameraPath( const std::vector& focus_point_frames, const std::vector& prior_focus_point_frames, const int original_width, const int original_height, diff --git a/mediapipe/examples/desktop/autoflip/quality/scene_camera_motion_analyzer.cc b/mediapipe/examples/desktop/autoflip/quality/scene_camera_motion_analyzer.cc index 34f1a4ee6..25a6a2c6a 100644 --- a/mediapipe/examples/desktop/autoflip/quality/scene_camera_motion_analyzer.cc +++ b/mediapipe/examples/desktop/autoflip/quality/scene_camera_motion_analyzer.cc @@ -30,7 +30,7 @@ namespace mediapipe { namespace autoflip { -::mediapipe::Status +mediapipe::Status SceneCameraMotionAnalyzer::AnalyzeSceneAndPopulateFocusPointFrames( const KeyFrameCropOptions& key_frame_crop_options, const std::vector& key_frame_crop_results, @@ -67,7 +67,7 @@ SceneCameraMotionAnalyzer::AnalyzeSceneAndPopulateFocusPointFrames( scene_frame_timestamps, focus_point_frames); } -::mediapipe::Status SceneCameraMotionAnalyzer::ToUseSteadyMotion( +mediapipe::Status SceneCameraMotionAnalyzer::ToUseSteadyMotion( const float look_at_center_x, const float look_at_center_y, const int crop_window_width, const int crop_window_height, SceneKeyFrameCropSummary* scene_summary, @@ -77,10 +77,10 @@ SceneCameraMotionAnalyzer::AnalyzeSceneAndPopulateFocusPointFrames( auto* steady_motion = scene_camera_motion->mutable_steady_motion(); steady_motion->set_steady_look_at_center_x(look_at_center_x); steady_motion->set_steady_look_at_center_y(look_at_center_y); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status SceneCameraMotionAnalyzer::ToUseSweepingMotion( +mediapipe::Status SceneCameraMotionAnalyzer::ToUseSweepingMotion( const float start_x, const float start_y, const float end_x, const float end_y, const int crop_window_width, const int crop_window_height, const double time_duration_in_sec, @@ -99,10 +99,10 @@ SceneCameraMotionAnalyzer::AnalyzeSceneAndPopulateFocusPointFrames( scene_summary->frame_success_rate(), start_x, start_y, end_x, end_y, time_duration_in_sec); VLOG(1) << sweeping_log; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status SceneCameraMotionAnalyzer::DecideCameraMotionType( +mediapipe::Status SceneCameraMotionAnalyzer::DecideCameraMotionType( const KeyFrameCropOptions& key_frame_crop_options, const double scene_span_sec, const int64 end_time_us, SceneKeyFrameCropSummary* scene_summary, @@ -131,7 +131,7 @@ SceneCameraMotionAnalyzer::AnalyzeSceneAndPopulateFocusPointFrames( no_salient_position_x, no_salient_position_y, scene_summary->crop_window_width(), scene_summary->crop_window_height(), scene_summary, scene_camera_motion)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Sweep across the scene when 1) success rate is too low, AND 2) the current @@ -164,7 +164,7 @@ SceneCameraMotionAnalyzer::AnalyzeSceneAndPopulateFocusPointFrames( start_x, start_y, end_x, end_y, key_frame_crop_options.target_width(), key_frame_crop_options.target_height(), scene_span_sec, scene_summary, scene_camera_motion)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // If scene motion is small, then look at a steady point in the scene. @@ -179,14 +179,14 @@ SceneCameraMotionAnalyzer::AnalyzeSceneAndPopulateFocusPointFrames( // Otherwise, tracks the focus regions. scene_camera_motion->mutable_tracking_motion(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // If there is no required focus region, looks at the middle of the center // range, and snaps to the scene center if close. Otherwise, look at the center // of the union of the required focus regions, and ensures the crop region // covers this union. -::mediapipe::Status SceneCameraMotionAnalyzer::DecideSteadyLookAtRegion( +mediapipe::Status SceneCameraMotionAnalyzer::DecideSteadyLookAtRegion( const KeyFrameCropOptions& key_frame_crop_options, SceneKeyFrameCropSummary* scene_summary, SceneCameraMotion* scene_camera_motion) const { @@ -252,10 +252,10 @@ SceneCameraMotionAnalyzer::AnalyzeSceneAndPopulateFocusPointFrames( MP_RETURN_IF_ERROR(ToUseSteadyMotion(center_x, center_y, crop_width, crop_height, scene_summary, scene_camera_motion)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status +mediapipe::Status SceneCameraMotionAnalyzer::AddFocusPointsFromCenterTypeAndWeight( const float center_x, const float center_y, const int frame_width, const int frame_height, const FocusPointFrameType type, const float weight, @@ -294,10 +294,10 @@ SceneCameraMotionAnalyzer::AddFocusPointsFromCenterTypeAndWeight( } else { RET_CHECK_FAIL() << absl::StrCat("Invalid FocusPointFrameType ", type); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status SceneCameraMotionAnalyzer::PopulateFocusPointFrames( +mediapipe::Status SceneCameraMotionAnalyzer::PopulateFocusPointFrames( const SceneKeyFrameCropSummary& scene_summary, const SceneCameraMotion& scene_camera_motion, const std::vector& scene_frame_timestamps, @@ -340,7 +340,7 @@ SceneCameraMotionAnalyzer::AddFocusPointsFromCenterTypeAndWeight( options_.salient_point_bound(), &focus_point_frame)); focus_point_frames->push_back(focus_point_frame); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } else if (scene_camera_motion.has_sweeping_motion()) { // Camera sweeps across the frame. const auto& sweeping_motion = scene_camera_motion.sweeping_motion(); @@ -361,7 +361,7 @@ SceneCameraMotionAnalyzer::AddFocusPointsFromCenterTypeAndWeight( options_.salient_point_bound(), &focus_point_frame)); focus_point_frames->push_back(focus_point_frame); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } else if (scene_camera_motion.has_tracking_motion()) { // Camera tracks crop regions. RET_CHECK_GT(scene_summary.num_key_frames(), 0) << "No key frames."; @@ -369,8 +369,8 @@ SceneCameraMotionAnalyzer::AddFocusPointsFromCenterTypeAndWeight( scene_summary, focus_point_frame_type, scene_frame_timestamps, focus_point_frames); } else { - return ::mediapipe::Status(StatusCode::kInvalidArgument, - "Unknown motion type."); + return mediapipe::Status(StatusCode::kInvalidArgument, + "Unknown motion type."); } } @@ -380,7 +380,7 @@ SceneCameraMotionAnalyzer::AddFocusPointsFromCenterTypeAndWeight( // The weight for the focus point is proportional to the interpolated score // and scaled so that the maximum weight is equal to // maximum_focus_point_weight in the SceneCameraMotionAnalyzerOptions. -::mediapipe::Status +mediapipe::Status SceneCameraMotionAnalyzer::PopulateFocusPointFramesForTracking( const SceneKeyFrameCropSummary& scene_summary, const FocusPointFrameType focus_point_frame_type, @@ -440,7 +440,7 @@ SceneCameraMotionAnalyzer::PopulateFocusPointFramesForTracking( focus_point->set_weight(scale * focus_point->weight()); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace autoflip diff --git a/mediapipe/examples/desktop/autoflip/quality/scene_camera_motion_analyzer.h b/mediapipe/examples/desktop/autoflip/quality/scene_camera_motion_analyzer.h index 4aca2108c..63295ffd1 100644 --- a/mediapipe/examples/desktop/autoflip/quality/scene_camera_motion_analyzer.h +++ b/mediapipe/examples/desktop/autoflip/quality/scene_camera_motion_analyzer.h @@ -62,7 +62,7 @@ class SceneCameraMotionAnalyzer { // Aggregates information from KeyFrameInfos and KeyFrameCropResults into // SceneKeyFrameCropSummary, and populates FocusPointFrames given scene // frame timestamps. Optionally returns SceneCameraMotion. - ::mediapipe::Status AnalyzeSceneAndPopulateFocusPointFrames( + mediapipe::Status AnalyzeSceneAndPopulateFocusPointFrames( const KeyFrameCropOptions& key_frame_crop_options, const std::vector& key_frame_crop_results, const int scene_frame_width, const int scene_frame_height, @@ -75,7 +75,7 @@ class SceneCameraMotionAnalyzer { protected: // Decides SceneCameraMotion based on SceneKeyFrameCropSummary. Updates the // crop window in SceneKeyFrameCropSummary in the case of steady motion. - ::mediapipe::Status DecideCameraMotionType( + mediapipe::Status DecideCameraMotionType( const KeyFrameCropOptions& key_frame_crop_options, const double scene_span_sec, const int64 end_time_us, SceneKeyFrameCropSummary* scene_summary, @@ -83,7 +83,7 @@ class SceneCameraMotionAnalyzer { // Populates the FocusPointFrames for each scene frame based on // SceneKeyFrameCropSummary, SceneCameraMotion, and scene frame timestamps. - ::mediapipe::Status PopulateFocusPointFrames( + mediapipe::Status PopulateFocusPointFrames( const SceneKeyFrameCropSummary& scene_summary, const SceneCameraMotion& scene_camera_motion, const std::vector& scene_frame_timestamps, @@ -91,7 +91,7 @@ class SceneCameraMotionAnalyzer { private: // Decides the look-at region when camera is steady. - ::mediapipe::Status DecideSteadyLookAtRegion( + mediapipe::Status DecideSteadyLookAtRegion( const KeyFrameCropOptions& key_frame_crop_options, SceneKeyFrameCropSummary* scene_summary, SceneCameraMotion* scene_camera_motion) const; @@ -105,7 +105,7 @@ class SceneCameraMotionAnalyzer { // Adds FocusPoint(s) to given FocusPointFrame given center location, // frame size, FocusPointFrameType, weight, and bound. - ::mediapipe::Status AddFocusPointsFromCenterTypeAndWeight( + mediapipe::Status AddFocusPointsFromCenterTypeAndWeight( const float center_x, const float center_y, const int frame_width, const int frame_height, const FocusPointFrameType type, const float weight, const float bound, @@ -114,21 +114,21 @@ class SceneCameraMotionAnalyzer { // Populates the FocusPointFrames for each scene frame based on // SceneKeyFrameCropSummary and scene frame timestamps in the case where // camera is tracking the crop regions. - ::mediapipe::Status PopulateFocusPointFramesForTracking( + mediapipe::Status PopulateFocusPointFramesForTracking( const SceneKeyFrameCropSummary& scene_summary, const FocusPointFrameType focus_point_frame_type, const std::vector& scene_frame_timestamps, std::vector* focus_point_frames) const; // Decide to use steady motion. - ::mediapipe::Status ToUseSteadyMotion( + mediapipe::Status ToUseSteadyMotion( const float look_at_center_x, const float look_at_center_y, const int crop_window_width, const int crop_window_height, SceneKeyFrameCropSummary* scene_summary, SceneCameraMotion* scene_camera_motion) const; // Decide to use sweeping motion. - ::mediapipe::Status ToUseSweepingMotion( + mediapipe::Status ToUseSweepingMotion( const float start_x, const float start_y, const float end_x, const float end_y, const int crop_window_width, const int crop_window_height, const double time_duration_in_sec, diff --git a/mediapipe/examples/desktop/autoflip/quality/scene_cropper.cc b/mediapipe/examples/desktop/autoflip/quality/scene_cropper.cc index 420cb8146..70c2f92b5 100644 --- a/mediapipe/examples/desktop/autoflip/quality/scene_cropper.cc +++ b/mediapipe/examples/desktop/autoflip/quality/scene_cropper.cc @@ -29,7 +29,7 @@ constexpr float kWidthFieldOfView = 60; namespace mediapipe { namespace autoflip { -::mediapipe::Status SceneCropper::ProcessKinematicPathSolver( +mediapipe::Status SceneCropper::ProcessKinematicPathSolver( const SceneKeyFrameCropSummary& scene_summary, const std::vector& scene_timestamps, const std::vector& is_key_frames, @@ -77,10 +77,10 @@ namespace autoflip { -(x_path - scene_summary.crop_window_width() / 2); all_xforms->push_back(transform); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status SceneCropper::CropFrames( +mediapipe::Status SceneCropper::CropFrames( const SceneKeyFrameCropSummary& scene_summary, const std::vector& scene_timestamps, const std::vector& is_key_frames, @@ -151,7 +151,7 @@ namespace autoflip { // If no cropped_frames is passed in, return directly. if (!cropped_frames) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } RET_CHECK(!scene_frames_or_empty.empty()) << "If |cropped_frames| != nullptr, scene_frames_or_empty must not be " diff --git a/mediapipe/examples/desktop/autoflip/quality/scene_cropper.h b/mediapipe/examples/desktop/autoflip/quality/scene_cropper.h index c99ae59e7..b77cfc60f 100644 --- a/mediapipe/examples/desktop/autoflip/quality/scene_cropper.h +++ b/mediapipe/examples/desktop/autoflip/quality/scene_cropper.h @@ -60,7 +60,7 @@ class SceneCropper { // on the transform matrix if |cropped_frames| is not nullptr and // |scene_frames_or_empty| isn't empty. // TODO: split this function into two separate functions. - ::mediapipe::Status CropFrames( + mediapipe::Status CropFrames( const SceneKeyFrameCropSummary& scene_summary, const std::vector& scene_timestamps, const std::vector& is_key_frames, @@ -71,7 +71,7 @@ class SceneCropper { const bool continue_last_scene, std::vector* crop_from_location, std::vector* cropped_frames); - ::mediapipe::Status ProcessKinematicPathSolver( + mediapipe::Status ProcessKinematicPathSolver( const SceneKeyFrameCropSummary& scene_summary, const std::vector& scene_timestamps, const std::vector& is_key_frames, diff --git a/mediapipe/examples/desktop/autoflip/quality/scene_cropping_viz.cc b/mediapipe/examples/desktop/autoflip/quality/scene_cropping_viz.cc index e2be36c08..76fffb33e 100644 --- a/mediapipe/examples/desktop/autoflip/quality/scene_cropping_viz.cc +++ b/mediapipe/examples/desktop/autoflip/quality/scene_cropping_viz.cc @@ -46,7 +46,7 @@ const cv::Scalar kOrange = cv::Scalar(255.0, 165.0, 0.0); // ica object detector const cv::Scalar kWhite = cv::Scalar(255.0, 255.0, 255.0); // others -::mediapipe::Status DrawDetectionsAndCropRegions( +mediapipe::Status DrawDetectionsAndCropRegions( const std::vector& scene_frames, const std::vector& is_key_frames, const std::vector& key_frame_infos, @@ -130,7 +130,7 @@ const cv::Scalar kWhite = cv::Scalar(255.0, 255.0, 255.0); // others } viz_frames->push_back(std::move(viz_frame)); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } namespace { @@ -147,7 +147,7 @@ cv::Rect LimitBounds(const cv::Rect& rect, const int max_width, } } // namespace -::mediapipe::Status DrawDetectionAndFramingWindow( +mediapipe::Status DrawDetectionAndFramingWindow( const std::vector& org_scene_frames, const std::vector& crop_from_locations, const ImageFormat::Format image_format, const float overlay_opacity, @@ -166,10 +166,10 @@ cv::Rect LimitBounds(const cv::Rect& rect, const int max_width, scene_frame(crop_from_bounded).copyTo(darkened(crop_from_bounded)); viz_frames->push_back(std::move(viz_frame)); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status DrawFocusPointAndCropWindow( +mediapipe::Status DrawFocusPointAndCropWindow( const std::vector& scene_frames, const std::vector& focus_point_frames, const float overlay_opacity, const int crop_window_width, @@ -215,7 +215,7 @@ cv::Rect LimitBounds(const cv::Rect& rect, const int max_width, } viz_frames->push_back(std::move(viz_frame)); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace autoflip diff --git a/mediapipe/examples/desktop/autoflip/quality/scene_cropping_viz.h b/mediapipe/examples/desktop/autoflip/quality/scene_cropping_viz.h index e951f2df7..c2309f77a 100644 --- a/mediapipe/examples/desktop/autoflip/quality/scene_cropping_viz.h +++ b/mediapipe/examples/desktop/autoflip/quality/scene_cropping_viz.h @@ -36,7 +36,7 @@ namespace autoflip { // magenta, logos are red, ocrs are yellow (foreground) and light yellow // (background), brain objects are cyan, ica objects are orange, and the rest // are white. -::mediapipe::Status DrawDetectionsAndCropRegions( +mediapipe::Status DrawDetectionsAndCropRegions( const std::vector& scene_frames, const std::vector& is_key_frames, const std::vector& key_frame_infos, @@ -47,7 +47,7 @@ namespace autoflip { // Draws the focus point from the given FocusPointFrame and the crop window // centered around it on the scene frame in red. This helps visualize the input // to the retargeter. -::mediapipe::Status DrawFocusPointAndCropWindow( +mediapipe::Status DrawFocusPointAndCropWindow( const std::vector& scene_frames, const std::vector& focus_point_frames, const float overlay_opacity, const int crop_window_width, @@ -57,7 +57,7 @@ namespace autoflip { // Draws the final smoothed path of the camera retargeter by darkening the // removed areas. -::mediapipe::Status DrawDetectionAndFramingWindow( +mediapipe::Status DrawDetectionAndFramingWindow( const std::vector& org_scene_frames, const std::vector& crop_from_locations, const ImageFormat::Format image_format, const float overlay_opacity, diff --git a/mediapipe/examples/desktop/autoflip/quality/utils.cc b/mediapipe/examples/desktop/autoflip/quality/utils.cc index 68db4aa11..d1cd7723e 100644 --- a/mediapipe/examples/desktop/autoflip/quality/utils.cc +++ b/mediapipe/examples/desktop/autoflip/quality/utils.cc @@ -53,13 +53,12 @@ void NormalizedRectToRect(const RectF& normalized_location, const int width, ScaleRect(normalized_location, width, height, location); } -::mediapipe::Status ClampRect(const int width, const int height, - Rect* location) { +mediapipe::Status ClampRect(const int width, const int height, Rect* location) { return ClampRect(0, 0, width, height, location); } -::mediapipe::Status ClampRect(const int x0, const int y0, const int x1, - const int y1, Rect* location) { +mediapipe::Status ClampRect(const int x0, const int y0, const int x1, + const int y1, Rect* location) { RET_CHECK(!(location->x() >= x1 || location->x() + location->width() <= x0 || location->y() >= y1 || location->y() + location->height() <= y0)); @@ -74,7 +73,7 @@ void NormalizedRectToRect(const RectF& normalized_location, const int width, location->set_y(clamped_top); location->set_width(std::max(0, clamped_right - clamped_left)); location->set_height(std::max(0, clamped_bottom - clamped_top)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } void RectUnion(const Rect& rect_to_add, Rect* rect) { @@ -90,13 +89,13 @@ void RectUnion(const Rect& rect_to_add, Rect* rect) { rect->set_height(y2 - y1); } -::mediapipe::Status PackKeyFrameInfo(const int64 frame_timestamp_ms, - const DetectionSet& detections, - const int original_frame_width, - const int original_frame_height, - const int feature_frame_width, - const int feature_frame_height, - KeyFrameInfo* key_frame_info) { +mediapipe::Status PackKeyFrameInfo(const int64 frame_timestamp_ms, + const DetectionSet& detections, + const int original_frame_width, + const int original_frame_height, + const int feature_frame_width, + const int feature_frame_height, + KeyFrameInfo* key_frame_info) { RET_CHECK(key_frame_info != nullptr) << "KeyFrameInfo is null"; RET_CHECK(original_frame_width > 0 && original_frame_height > 0 && feature_frame_width > 0 && feature_frame_height > 0) @@ -136,10 +135,10 @@ void RectUnion(const Rect& rect_to_add, Rect* rect) { } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status SortDetections( +mediapipe::Status SortDetections( const DetectionSet& detections, std::vector* required_regions, std::vector* non_required_regions) { @@ -175,13 +174,13 @@ void RectUnion(const Rect& rect_to_add, Rect* rect) { non_required_regions->push_back(detections.detections(original_idx)); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status SetKeyFrameCropTarget(const int frame_width, - const int frame_height, - const double target_aspect_ratio, - KeyFrameCropOptions* crop_options) { +mediapipe::Status SetKeyFrameCropTarget(const int frame_width, + const int frame_height, + const double target_aspect_ratio, + KeyFrameCropOptions* crop_options) { RET_CHECK_NE(crop_options, nullptr) << "KeyFrameCropOptions is null."; RET_CHECK_GT(frame_width, 0) << "Frame width is non-positive."; RET_CHECK_GT(frame_height, 0) << "Frame height is non-positive."; @@ -199,10 +198,10 @@ void RectUnion(const Rect& rect_to_add, Rect* rect) { : std::round(frame_width / target_aspect_ratio); crop_options->set_target_width(crop_target_width); crop_options->set_target_height(crop_target_height); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status AggregateKeyFrameResults( +mediapipe::Status AggregateKeyFrameResults( const KeyFrameCropOptions& key_frame_crop_options, const std::vector& key_frame_crop_results, const int scene_frame_width, const int scene_frame_height, @@ -232,7 +231,7 @@ void RectUnion(const Rect& rect_to_add, Rect* rect) { // Handles the corner case of no key frames. if (num_key_frames == 0) { scene_summary->set_has_salient_region(false); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } scene_summary->set_num_key_frames(num_key_frames); @@ -328,10 +327,10 @@ void RectUnion(const Rect& rect_to_add, Rect* rect) { scene_summary->key_frame_center_min_y()) / scene_frame_height; scene_summary->set_vertical_motion_amount(motion_y); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ComputeSceneStaticBordersSize( +mediapipe::Status ComputeSceneStaticBordersSize( const std::vector& static_features, int* top_border_size, int* bottom_border_size) { RET_CHECK(top_border_size) << "Output top border size is null."; @@ -375,10 +374,10 @@ void RectUnion(const Rect& rect_to_add, Rect* rect) { *top_border_size = std::max(0, *top_border_size); *bottom_border_size = std::max(0, *bottom_border_size); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status FindSolidBackgroundColor( +mediapipe::Status FindSolidBackgroundColor( const std::vector& static_features, const std::vector& static_features_timestamps, const double min_fraction_solid_background_color, @@ -423,13 +422,13 @@ void RectUnion(const Rect& rect_to_add, Rect* rect) { min_fraction_solid_background_color) { *has_solid_background = true; } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status AffineRetarget( - const cv::Size& output_size, const std::vector& frames, - const std::vector& affine_projection, - std::vector* cropped_frames) { +mediapipe::Status AffineRetarget(const cv::Size& output_size, + const std::vector& frames, + const std::vector& affine_projection, + std::vector* cropped_frames) { RET_CHECK(frames.size() == affine_projection.size()) << "number of frames and retarget offsets must be the same."; RET_CHECK(cropped_frames->size() == frames.size()) @@ -443,7 +442,7 @@ void RectUnion(const Rect& rect_to_add, Rect* rect) { RET_CHECK(affine.rows == 2) << "Affine matrix must be 2x3"; cv::warpAffine(frames[i], (*cropped_frames)[i], affine, output_size); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace autoflip } // namespace mediapipe diff --git a/mediapipe/examples/desktop/autoflip/quality/utils.h b/mediapipe/examples/desktop/autoflip/quality/utils.h index ec1373ae4..d9f05c4ea 100644 --- a/mediapipe/examples/desktop/autoflip/quality/utils.h +++ b/mediapipe/examples/desktop/autoflip/quality/utils.h @@ -29,16 +29,16 @@ namespace autoflip { // Packs detected features and timestamp (ms) into a KeyFrameInfo object. Scales // features back to the original frame size if features have been detected on a // different frame size. -::mediapipe::Status PackKeyFrameInfo(const int64 frame_timestamp_ms, - const DetectionSet& detections, - const int original_frame_width, - const int original_frame_height, - const int feature_frame_width, - const int feature_frame_height, - KeyFrameInfo* key_frame_info); +mediapipe::Status PackKeyFrameInfo(const int64 frame_timestamp_ms, + const DetectionSet& detections, + const int original_frame_width, + const int original_frame_height, + const int feature_frame_width, + const int feature_frame_height, + KeyFrameInfo* key_frame_info); // Sorts required and non-required salient regions given a detection set. -::mediapipe::Status SortDetections( +mediapipe::Status SortDetections( const DetectionSet& detections, std::vector* required_regions, std::vector* non_required_regions); @@ -46,14 +46,14 @@ namespace autoflip { // Sets the target crop size in KeyFrameCropOptions based on frame size and // target aspect ratio so that the target crop size covers the biggest area // possible in the frame. -::mediapipe::Status SetKeyFrameCropTarget(const int frame_width, - const int frame_height, - const double target_aspect_ratio, - KeyFrameCropOptions* crop_options); +mediapipe::Status SetKeyFrameCropTarget(const int frame_width, + const int frame_height, + const double target_aspect_ratio, + KeyFrameCropOptions* crop_options); // Aggregates information from KeyFrameInfos and KeyFrameCropResults into // SceneKeyFrameCropSummary. -::mediapipe::Status AggregateKeyFrameResults( +mediapipe::Status AggregateKeyFrameResults( const KeyFrameCropOptions& key_frame_crop_options, const std::vector& key_frame_crop_results, const int scene_frame_width, const int scene_frame_height, @@ -61,7 +61,7 @@ namespace autoflip { // Computes the static top and border size across a scene given a vector of // StaticFeatures over frames. -::mediapipe::Status ComputeSceneStaticBordersSize( +mediapipe::Status ComputeSceneStaticBordersSize( const std::vector& static_features, int* top_border_size, int* bottom_border_size); @@ -70,7 +70,7 @@ namespace autoflip { // background color exceeds given threshold, i.e., // min_fraction_solid_background_color. Builds the background color // interpolation functions in Lab space using input timestamps. -::mediapipe::Status FindSolidBackgroundColor( +mediapipe::Status FindSolidBackgroundColor( const std::vector& static_features, const std::vector& static_features_timestamps, const double min_fraction_solid_background_color, @@ -93,13 +93,12 @@ void NormalizedRectToRect(const RectF& normalized_location, const int width, // Clamps a rectangle to lie within [x0, y0] and [x1, y1]. Returns true if the // rectangle has any overlapping with the target window. -::mediapipe::Status ClampRect(const int x0, const int y0, const int x1, - const int y1, Rect* location); +mediapipe::Status ClampRect(const int x0, const int y0, const int x1, + const int y1, Rect* location); // Convenience function to clamp a rectangle to lie within [0, 0] and // [width, height]. -::mediapipe::Status ClampRect(const int width, const int height, - Rect* location); +mediapipe::Status ClampRect(const int width, const int height, Rect* location); // Enlarges a given rectangle to cover a new rectangle to be added. void RectUnion(const Rect& rect_to_add, Rect* rect); @@ -107,10 +106,10 @@ void RectUnion(const Rect& rect_to_add, Rect* rect); // Performs an affine retarget on a list of input images. Output vector // cropped_frames must be filled with Mats of the same size as output_size and // type. -::mediapipe::Status AffineRetarget( - const cv::Size& output_size, const std::vector& frames, - const std::vector& affine_projection, - std::vector* cropped_frames); +mediapipe::Status AffineRetarget(const cv::Size& output_size, + const std::vector& frames, + const std::vector& affine_projection, + std::vector* cropped_frames); } // namespace autoflip } // namespace mediapipe diff --git a/mediapipe/examples/desktop/autoflip/quality/visual_scorer.cc b/mediapipe/examples/desktop/autoflip/quality/visual_scorer.cc index ce73cf5bf..cf01adf2a 100644 --- a/mediapipe/examples/desktop/autoflip/quality/visual_scorer.cc +++ b/mediapipe/examples/desktop/autoflip/quality/visual_scorer.cc @@ -67,14 +67,14 @@ mediapipe::Status VisualScorer::CalculateScore(const cv::Mat& image, region.location_normalized().width() * image.cols, region.location_normalized().height() * image.rows); } else { - return ::mediapipe::UnknownErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::UnknownErrorBuilder(MEDIAPIPE_LOC) << "Unset region location."; } CropRectToMat(image, ®ion_rect); if (region_rect.area() == 0) { *score = 0; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Compute a score based on area covered by this region. @@ -89,7 +89,7 @@ mediapipe::Status VisualScorer::CalculateScore(const cv::Mat& image, float sharpness_score_result = 0.0; if (options_.sharpness_weight() > kEpsilon) { // TODO: implement a sharpness score or remove this code block. - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "sharpness scorer is not yet implemented, please set weight to " "0.0"; } @@ -108,7 +108,7 @@ mediapipe::Status VisualScorer::CalculateScore(const cv::Mat& image, if (*score > 1.0f || *score < 0.0f) { LOG(WARNING) << "Score of region outside expected range: " << *score; } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } mediapipe::Status VisualScorer::CalculateColorfulness( @@ -134,7 +134,7 @@ mediapipe::Status VisualScorer::CalculateColorfulness( // If the mask is empty, return. if (empty_mask) { *colorfulness = 0; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Generate a 2D histogram (hue/saturation). @@ -162,7 +162,7 @@ mediapipe::Status VisualScorer::CalculateColorfulness( } if (hue_sum == 0.0f) { *colorfulness = 0; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Compute the histogram entropy. @@ -175,7 +175,7 @@ mediapipe::Status VisualScorer::CalculateColorfulness( } *colorfulness /= std::log(2.0f); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace autoflip diff --git a/mediapipe/examples/desktop/demo_run_graph_main.cc b/mediapipe/examples/desktop/demo_run_graph_main.cc index 25f4bb4f1..e09d2abf5 100644 --- a/mediapipe/examples/desktop/demo_run_graph_main.cc +++ b/mediapipe/examples/desktop/demo_run_graph_main.cc @@ -40,7 +40,7 @@ DEFINE_string(output_video_path, "", "Full path of where to save result (.mp4 only). " "If not provided, show result in a window."); -::mediapipe::Status RunMPPGraph() { +mediapipe::Status RunMPPGraph() { std::string calculator_graph_config_contents; MP_RETURN_IF_ERROR(mediapipe::file::GetContents( FLAGS_calculator_graph_config_file, &calculator_graph_config_contents)); @@ -86,7 +86,14 @@ DEFINE_string(output_video_path, "", // Capture opencv camera or video frame. cv::Mat camera_frame_raw; capture >> camera_frame_raw; - if (camera_frame_raw.empty()) break; // End of video. + if (camera_frame_raw.empty()) { + if (!load_video) { + LOG(INFO) << "Ignore empty frames from camera."; + continue; + } + LOG(INFO) << "Empty frame, end of video reached."; + break; + } cv::Mat camera_frame; cv::cvtColor(camera_frame_raw, camera_frame, cv::COLOR_BGR2RGB); if (!load_video) { @@ -141,7 +148,7 @@ DEFINE_string(output_video_path, "", int main(int argc, char** argv) { google::InitGoogleLogging(argv[0]); gflags::ParseCommandLineFlags(&argc, &argv, true); - ::mediapipe::Status run_status = RunMPPGraph(); + mediapipe::Status run_status = RunMPPGraph(); if (!run_status.ok()) { LOG(ERROR) << "Failed to run the graph: " << run_status.message(); return EXIT_FAILURE; diff --git a/mediapipe/examples/desktop/demo_run_graph_main_gpu.cc b/mediapipe/examples/desktop/demo_run_graph_main_gpu.cc index b77d8d4a3..4bc7f92e4 100644 --- a/mediapipe/examples/desktop/demo_run_graph_main_gpu.cc +++ b/mediapipe/examples/desktop/demo_run_graph_main_gpu.cc @@ -44,7 +44,7 @@ DEFINE_string(output_video_path, "", "Full path of where to save result (.mp4 only). " "If not provided, show result in a window."); -::mediapipe::Status RunMPPGraph() { +mediapipe::Status RunMPPGraph() { std::string calculator_graph_config_contents; MP_RETURN_IF_ERROR(mediapipe::file::GetContents( FLAGS_calculator_graph_config_file, &calculator_graph_config_contents)); @@ -96,16 +96,23 @@ DEFINE_string(output_video_path, "", // Capture opencv camera or video frame. cv::Mat camera_frame_raw; capture >> camera_frame_raw; - if (camera_frame_raw.empty()) break; // End of video. + if (camera_frame_raw.empty()) { + if (!load_video) { + LOG(INFO) << "Ignore empty frames from camera."; + continue; + } + LOG(INFO) << "Empty frame, end of video reached."; + break; + } cv::Mat camera_frame; - cv::cvtColor(camera_frame_raw, camera_frame, cv::COLOR_BGR2RGB); + cv::cvtColor(camera_frame_raw, camera_frame, cv::COLOR_BGR2RGBA); if (!load_video) { cv::flip(camera_frame, camera_frame, /*flipcode=HORIZONTAL*/ 1); } // Wrap Mat into an ImageFrame. auto input_frame = absl::make_unique( - mediapipe::ImageFormat::SRGB, camera_frame.cols, camera_frame.rows, + mediapipe::ImageFormat::SRGBA, camera_frame.cols, camera_frame.rows, mediapipe::ImageFrame::kGlDefaultAlignmentBoundary); cv::Mat input_frame_mat = mediapipe::formats::MatView(input_frame.get()); camera_frame.copyTo(input_frame_mat); @@ -125,7 +132,7 @@ DEFINE_string(output_video_path, "", MP_RETURN_IF_ERROR(graph.AddPacketToInputStream( kInputStream, mediapipe::Adopt(gpu_frame.release()) .At(mediapipe::Timestamp(frame_timestamp_us)))); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); // Get the graph result packet, or stop if that fails. @@ -149,12 +156,15 @@ DEFINE_string(output_video_path, "", info.gl_type, output_frame->MutablePixelData()); glFlush(); texture.Release(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); // Convert back to opencv for display or saving. cv::Mat output_frame_mat = mediapipe::formats::MatView(output_frame.get()); - cv::cvtColor(output_frame_mat, output_frame_mat, cv::COLOR_RGB2BGR); + if (output_frame_mat.channels() == 4) + cv::cvtColor(output_frame_mat, output_frame_mat, cv::COLOR_RGBA2BGR); + else + cv::cvtColor(output_frame_mat, output_frame_mat, cv::COLOR_RGB2BGR); if (save_video) { if (!writer.isOpened()) { LOG(INFO) << "Prepare video writer."; @@ -181,7 +191,7 @@ DEFINE_string(output_video_path, "", int main(int argc, char** argv) { google::InitGoogleLogging(argv[0]); gflags::ParseCommandLineFlags(&argc, &argv, true); - ::mediapipe::Status run_status = RunMPPGraph(); + mediapipe::Status run_status = RunMPPGraph(); if (!run_status.ok()) { LOG(ERROR) << "Failed to run the graph: " << run_status.message(); return EXIT_FAILURE; diff --git a/mediapipe/examples/desktop/hello_world/hello_world.cc b/mediapipe/examples/desktop/hello_world/hello_world.cc index b7dfa40c3..a5d0e790c 100644 --- a/mediapipe/examples/desktop/hello_world/hello_world.cc +++ b/mediapipe/examples/desktop/hello_world/hello_world.cc @@ -21,7 +21,7 @@ namespace mediapipe { -::mediapipe::Status PrintHelloWorld() { +mediapipe::Status PrintHelloWorld() { // Configures a simple graph, which concatenates 2 PassThroughCalculators. CalculatorGraphConfig config = ParseTextProtoOrDie(R"( input_stream: "in" diff --git a/mediapipe/examples/desktop/holistic_tracking/BUILD b/mediapipe/examples/desktop/holistic_tracking/BUILD new file mode 100644 index 000000000..0f69c1e4f --- /dev/null +++ b/mediapipe/examples/desktop/holistic_tracking/BUILD @@ -0,0 +1,34 @@ +# Copyright 2020 The MediaPipe Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +licenses(["notice"]) + +package(default_visibility = ["//mediapipe/examples:__subpackages__"]) + +cc_binary( + name = "holistic_tracking_cpu", + deps = [ + "//mediapipe/examples/desktop:demo_run_graph_main", + "//mediapipe/graphs/holistic_tracking:holistic_tracking_cpu_graph_deps", + ], +) + +# Linux only +cc_binary( + name = "holistic_tracking_gpu", + deps = [ + "//mediapipe/examples/desktop:demo_run_graph_main_gpu", + "//mediapipe/graphs/holistic_tracking:holistic_tracking_gpu_deps", + ], +) diff --git a/mediapipe/examples/desktop/iris_tracking/iris_depth_from_image_desktop.cc b/mediapipe/examples/desktop/iris_tracking/iris_depth_from_image_desktop.cc index 4cfab621d..d08e95a1e 100644 --- a/mediapipe/examples/desktop/iris_tracking/iris_depth_from_image_desktop.cc +++ b/mediapipe/examples/desktop/iris_tracking/iris_depth_from_image_desktop.cc @@ -47,25 +47,25 @@ DEFINE_string(output_image_path, "", namespace { -::mediapipe::StatusOr ReadFileToString( +mediapipe::StatusOr ReadFileToString( const std::string& file_path) { std::string contents; - MP_RETURN_IF_ERROR(::mediapipe::file::GetContents(file_path, &contents)); + MP_RETURN_IF_ERROR(mediapipe::file::GetContents(file_path, &contents)); return contents; } -::mediapipe::Status ProcessImage( - std::unique_ptr<::mediapipe::CalculatorGraph> graph) { +mediapipe::Status ProcessImage( + std::unique_ptr graph) { LOG(INFO) << "Load the image."; ASSIGN_OR_RETURN(const std::string raw_image, ReadFileToString(FLAGS_input_image_path)); LOG(INFO) << "Start running the calculator graph."; - ASSIGN_OR_RETURN(::mediapipe::OutputStreamPoller output_image_poller, + ASSIGN_OR_RETURN(mediapipe::OutputStreamPoller output_image_poller, graph->AddOutputStreamPoller(kOutputImageStream)); - ASSIGN_OR_RETURN(::mediapipe::OutputStreamPoller left_iris_depth_poller, + ASSIGN_OR_RETURN(mediapipe::OutputStreamPoller left_iris_depth_poller, graph->AddOutputStreamPoller(kLeftIrisDepthMmStream)); - ASSIGN_OR_RETURN(::mediapipe::OutputStreamPoller right_iris_depth_poller, + ASSIGN_OR_RETURN(mediapipe::OutputStreamPoller right_iris_depth_poller, graph->AddOutputStreamPoller(kRightIrisDepthMmStream)); MP_RETURN_IF_ERROR(graph->StartRun({})); @@ -74,22 +74,22 @@ namespace { (double)cv::getTickFrequency() * kMicrosPerSecond; MP_RETURN_IF_ERROR(graph->AddPacketToInputStream( - kInputStream, ::mediapipe::MakePacket(raw_image).At( - ::mediapipe::Timestamp(fake_timestamp_us)))); + kInputStream, mediapipe::MakePacket(raw_image).At( + mediapipe::Timestamp(fake_timestamp_us)))); // Get the graph result packets, or stop if that fails. - ::mediapipe::Packet left_iris_depth_packet; + mediapipe::Packet left_iris_depth_packet; if (!left_iris_depth_poller.Next(&left_iris_depth_packet)) { - return ::mediapipe::UnknownError( + return mediapipe::UnknownError( "Failed to get packet from output stream 'left_iris_depth_mm'."); } const auto& left_iris_depth_mm = left_iris_depth_packet.Get(); const int left_iris_depth_cm = std::round(left_iris_depth_mm / 10); std::cout << "Left Iris Depth: " << left_iris_depth_cm << " cm." << std::endl; - ::mediapipe::Packet right_iris_depth_packet; + mediapipe::Packet right_iris_depth_packet; if (!right_iris_depth_poller.Next(&right_iris_depth_packet)) { - return ::mediapipe::UnknownError( + return mediapipe::UnknownError( "Failed to get packet from output stream 'right_iris_depth_mm'."); } const auto& right_iris_depth_mm = right_iris_depth_packet.Get(); @@ -97,15 +97,15 @@ namespace { std::cout << "Right Iris Depth: " << right_iris_depth_cm << " cm." << std::endl; - ::mediapipe::Packet output_image_packet; + mediapipe::Packet output_image_packet; if (!output_image_poller.Next(&output_image_packet)) { - return ::mediapipe::UnknownError( + return mediapipe::UnknownError( "Failed to get packet from output stream 'output_image'."); } - auto& output_frame = output_image_packet.Get<::mediapipe::ImageFrame>(); + auto& output_frame = output_image_packet.Get(); // Convert back to opencv for display or saving. - cv::Mat output_frame_mat = ::mediapipe::formats::MatView(&output_frame); + cv::Mat output_frame_mat = mediapipe::formats::MatView(&output_frame); cv::cvtColor(output_frame_mat, output_frame_mat, cv::COLOR_RGB2BGR); const bool save_image = !FLAGS_output_image_path.empty(); if (save_image) { @@ -123,26 +123,26 @@ namespace { return graph->WaitUntilDone(); } -::mediapipe::Status RunMPPGraph() { +mediapipe::Status RunMPPGraph() { std::string calculator_graph_config_contents; - MP_RETURN_IF_ERROR(::mediapipe::file::GetContents( + MP_RETURN_IF_ERROR(mediapipe::file::GetContents( kCalculatorGraphConfigFile, &calculator_graph_config_contents)); LOG(INFO) << "Get calculator graph config contents: " << calculator_graph_config_contents; - ::mediapipe::CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie<::mediapipe::CalculatorGraphConfig>( + mediapipe::CalculatorGraphConfig config = + mediapipe::ParseTextProtoOrDie( calculator_graph_config_contents); LOG(INFO) << "Initialize the calculator graph."; - std::unique_ptr<::mediapipe::CalculatorGraph> graph = - absl::make_unique<::mediapipe::CalculatorGraph>(); + std::unique_ptr graph = + absl::make_unique(); MP_RETURN_IF_ERROR(graph->Initialize(config)); const bool load_image = !FLAGS_input_image_path.empty(); if (load_image) { return ProcessImage(std::move(graph)); } else { - return ::mediapipe::InvalidArgumentError("Missing image file."); + return mediapipe::InvalidArgumentError("Missing image file."); } } @@ -151,7 +151,7 @@ namespace { int main(int argc, char** argv) { google::InitGoogleLogging(argv[0]); gflags::ParseCommandLineFlags(&argc, &argv, true); - ::mediapipe::Status run_status = RunMPPGraph(); + mediapipe::Status run_status = RunMPPGraph(); if (!run_status.ok()) { LOG(ERROR) << "Failed to run the graph: " << run_status.message(); return EXIT_FAILURE; diff --git a/mediapipe/examples/desktop/media_sequence/run_graph_file_io_main.cc b/mediapipe/examples/desktop/media_sequence/run_graph_file_io_main.cc index a9a2456be..56be7505d 100644 --- a/mediapipe/examples/desktop/media_sequence/run_graph_file_io_main.cc +++ b/mediapipe/examples/desktop/media_sequence/run_graph_file_io_main.cc @@ -38,7 +38,7 @@ DEFINE_string(output_side_packets, "", "side packets and paths to write to disk for the " "CalculatorGraph."); -::mediapipe::Status RunMPPGraph() { +mediapipe::Status RunMPPGraph() { std::string calculator_graph_config_contents; MP_RETURN_IF_ERROR(mediapipe::file::GetContents( FLAGS_calculator_graph_config_file, &calculator_graph_config_contents)); @@ -47,18 +47,18 @@ DEFINE_string(output_side_packets, "", mediapipe::CalculatorGraphConfig config = mediapipe::ParseTextProtoOrDie( calculator_graph_config_contents); - std::map input_side_packets; + std::map input_side_packets; std::vector kv_pairs = absl::StrSplit(FLAGS_input_side_packets, ','); for (const std::string& kv_pair : kv_pairs) { std::vector name_and_value = absl::StrSplit(kv_pair, '='); RET_CHECK(name_and_value.size() == 2); - RET_CHECK(!::mediapipe::ContainsKey(input_side_packets, name_and_value[0])); + RET_CHECK(!mediapipe::ContainsKey(input_side_packets, name_and_value[0])); std::string input_side_packet_contents; MP_RETURN_IF_ERROR(mediapipe::file::GetContents( name_and_value[1], &input_side_packet_contents)); input_side_packets[name_and_value[0]] = - ::mediapipe::MakePacket(input_side_packet_contents); + mediapipe::MakePacket(input_side_packet_contents); } LOG(INFO) << "Initialize the calculator graph."; mediapipe::CalculatorGraph graph; @@ -70,7 +70,7 @@ DEFINE_string(output_side_packets, "", for (const std::string& kv_pair : kv_pairs) { std::vector name_and_value = absl::StrSplit(kv_pair, '='); RET_CHECK(name_and_value.size() == 2); - ::mediapipe::StatusOr<::mediapipe::Packet> output_packet = + mediapipe::StatusOr output_packet = graph.GetOutputSidePacket(name_and_value[0]); RET_CHECK(output_packet.ok()) << "Packet " << name_and_value[0] << " was not available."; @@ -79,13 +79,13 @@ DEFINE_string(output_side_packets, "", MP_RETURN_IF_ERROR( mediapipe::file::SetContents(name_and_value[1], serialized_string)); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } int main(int argc, char** argv) { google::InitGoogleLogging(argv[0]); gflags::ParseCommandLineFlags(&argc, &argv, true); - ::mediapipe::Status run_status = RunMPPGraph(); + mediapipe::Status run_status = RunMPPGraph(); if (!run_status.ok()) { LOG(ERROR) << "Failed to run the graph: " << run_status.message(); return EXIT_FAILURE; diff --git a/mediapipe/examples/desktop/object_detection_3d/BUILD b/mediapipe/examples/desktop/object_detection_3d/BUILD new file mode 100644 index 000000000..0e72c9e51 --- /dev/null +++ b/mediapipe/examples/desktop/object_detection_3d/BUILD @@ -0,0 +1,34 @@ +# Copyright 2020 The MediaPipe Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +licenses(["notice"]) + +package(default_visibility = ["//mediapipe/examples:__subpackages__"]) + +# bazel build -c opt --define MEDIAPIPE_DISABLE_GPU=1 mediapipe/examples/desktop/object_detection_3d:objectron_cpu +# To run 3D object detection for shoes, +# bazel-bin/mediapipe/examples/desktop/object_detection_3d/objectron_cpu \ +# --calculator_graph_config_file=mediapipe/graphs/object_detection_3d/objectron_desktop_cpu.pbtxt \ +# --input_side_packets="input_video_path=,box_landmark_model_path=mediapipe/models/object_detection_3d_sneakers.tflite,output_video_path=,allowed_labels=Footwear" +# To detect objects from other categories, change box_landmark_model_path and allowed_labels accordingly. +# Chair: box_landmark_model_path=mediapipe/models/object_detection_3d_chair.tflite,allowed_labels=Chair +# Camera: box_landmark_model_path=mediapipe/models/object_detection_3d_camera.tflite,allowed_labels=Camera +# Cup: box_landmark_model_path=mediapipe/models/object_detection_3d_cup.tflite,allowed_labels=Mug +cc_binary( + name = "objectron_cpu", + deps = [ + "//mediapipe/examples/desktop:simple_run_graph_main", + "//mediapipe/graphs/object_detection_3d:desktop_cpu_calculators", + ], +) diff --git a/mediapipe/examples/desktop/pose_tracking/BUILD b/mediapipe/examples/desktop/pose_tracking/BUILD new file mode 100644 index 000000000..447e2dfdc --- /dev/null +++ b/mediapipe/examples/desktop/pose_tracking/BUILD @@ -0,0 +1,34 @@ +# Copyright 2020 The MediaPipe Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +licenses(["notice"]) + +package(default_visibility = ["//mediapipe/examples:__subpackages__"]) + +cc_binary( + name = "pose_tracking_cpu", + deps = [ + "//mediapipe/examples/desktop:demo_run_graph_main", + "//mediapipe/graphs/pose_tracking:pose_tracking_cpu_deps", + ], +) + +# Linux only +cc_binary( + name = "pose_tracking_gpu", + deps = [ + "//mediapipe/examples/desktop:demo_run_graph_main_gpu", + "//mediapipe/graphs/pose_tracking:pose_tracking_gpu_deps", + ], +) diff --git a/mediapipe/examples/desktop/simple_run_graph_main.cc b/mediapipe/examples/desktop/simple_run_graph_main.cc index 2b76de6a5..0f45a3810 100644 --- a/mediapipe/examples/desktop/simple_run_graph_main.cc +++ b/mediapipe/examples/desktop/simple_run_graph_main.cc @@ -58,11 +58,11 @@ DEFINE_string(output_side_packets_file, "", "The name of the local file to output all side packets specified " "with --output_side_packets. "); -::mediapipe::Status OutputStreamToLocalFile( - ::mediapipe::OutputStreamPoller& poller) { +mediapipe::Status OutputStreamToLocalFile( + mediapipe::OutputStreamPoller& poller) { std::ofstream file; file.open(FLAGS_output_stream_file); - ::mediapipe::Packet packet; + mediapipe::Packet packet; while (poller.Next(&packet)) { std::string output_data; if (!FLAGS_strip_timestamps) { @@ -72,11 +72,11 @@ DEFINE_string(output_side_packets_file, "", file << output_data; } file.close(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status OutputSidePacketsToLocalFile( - ::mediapipe::CalculatorGraph& graph) { +mediapipe::Status OutputSidePacketsToLocalFile( + mediapipe::CalculatorGraph& graph) { if (!FLAGS_output_side_packets.empty() && !FLAGS_output_side_packets_file.empty()) { std::ofstream file; @@ -96,33 +96,32 @@ DEFINE_string(output_side_packets_file, "", << "--output_side_packets and --output_side_packets_file should be " "specified in pair."; } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status RunMPPGraph() { +mediapipe::Status RunMPPGraph() { std::string calculator_graph_config_contents; - MP_RETURN_IF_ERROR(::mediapipe::file::GetContents( + MP_RETURN_IF_ERROR(mediapipe::file::GetContents( FLAGS_calculator_graph_config_file, &calculator_graph_config_contents)); LOG(INFO) << "Get calculator graph config contents: " << calculator_graph_config_contents; - ::mediapipe::CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie<::mediapipe::CalculatorGraphConfig>( + mediapipe::CalculatorGraphConfig config = + mediapipe::ParseTextProtoOrDie( calculator_graph_config_contents); - std::map input_side_packets; + std::map input_side_packets; if (!FLAGS_input_side_packets.empty()) { std::vector kv_pairs = absl::StrSplit(FLAGS_input_side_packets, ','); for (const std::string& kv_pair : kv_pairs) { std::vector name_and_value = absl::StrSplit(kv_pair, '='); RET_CHECK(name_and_value.size() == 2); - RET_CHECK( - !::mediapipe::ContainsKey(input_side_packets, name_and_value[0])); + RET_CHECK(!mediapipe::ContainsKey(input_side_packets, name_and_value[0])); input_side_packets[name_and_value[0]] = - ::mediapipe::MakePacket(name_and_value[1]); + mediapipe::MakePacket(name_and_value[1]); } } LOG(INFO) << "Initialize the calculator graph."; - ::mediapipe::CalculatorGraph graph; + mediapipe::CalculatorGraph graph; MP_RETURN_IF_ERROR(graph.Initialize(config, input_side_packets)); if (!FLAGS_output_stream.empty() && !FLAGS_output_stream_file.empty()) { ASSIGN_OR_RETURN(auto poller, @@ -144,7 +143,7 @@ DEFINE_string(output_side_packets_file, "", int main(int argc, char** argv) { google::InitGoogleLogging(argv[0]); gflags::ParseCommandLineFlags(&argc, &argv, true); - ::mediapipe::Status run_status = RunMPPGraph(); + mediapipe::Status run_status = RunMPPGraph(); if (!run_status.ok()) { LOG(ERROR) << "Failed to run the graph: " << run_status.message(); return EXIT_FAILURE; diff --git a/mediapipe/examples/desktop/youtube8m/README.md b/mediapipe/examples/desktop/youtube8m/README.md index 57a606fd6..94b36809d 100644 --- a/mediapipe/examples/desktop/youtube8m/README.md +++ b/mediapipe/examples/desktop/youtube8m/README.md @@ -1,7 +1,7 @@ ### Steps to run the YouTube-8M feature extraction graph 1. Checkout the repository and follow - [the installation instructions](https://github.com/google/mediapipe/blob/master/mediapipe/docs/install.md) + [the installation instructions](https://github.com/google/mediapipe/blob/master/docs/getting_started/install.md) to set up MediaPipe. ```bash diff --git a/mediapipe/examples/desktop/youtube8m/extract_yt8m_features.cc b/mediapipe/examples/desktop/youtube8m/extract_yt8m_features.cc index 593fb187d..c1ad40a90 100644 --- a/mediapipe/examples/desktop/youtube8m/extract_yt8m_features.cc +++ b/mediapipe/examples/desktop/youtube8m/extract_yt8m_features.cc @@ -39,7 +39,7 @@ DEFINE_string(output_side_packets, "", "side packets and paths to write to disk for the " "CalculatorGraph."); -::mediapipe::Status RunMPPGraph() { +mediapipe::Status RunMPPGraph() { std::string calculator_graph_config_contents; MP_RETURN_IF_ERROR(mediapipe::file::GetContents( FLAGS_calculator_graph_config_file, &calculator_graph_config_contents)); @@ -48,18 +48,18 @@ DEFINE_string(output_side_packets, "", mediapipe::CalculatorGraphConfig config = mediapipe::ParseTextProtoOrDie( calculator_graph_config_contents); - std::map input_side_packets; + std::map input_side_packets; std::vector kv_pairs = absl::StrSplit(FLAGS_input_side_packets, ','); for (const std::string& kv_pair : kv_pairs) { std::vector name_and_value = absl::StrSplit(kv_pair, '='); RET_CHECK(name_and_value.size() == 2); - RET_CHECK(!::mediapipe::ContainsKey(input_side_packets, name_and_value[0])); + RET_CHECK(!mediapipe::ContainsKey(input_side_packets, name_and_value[0])); std::string input_side_packet_contents; MP_RETURN_IF_ERROR(mediapipe::file::GetContents( name_and_value[1], &input_side_packet_contents)); input_side_packets[name_and_value[0]] = - ::mediapipe::MakePacket(input_side_packet_contents); + mediapipe::MakePacket(input_side_packet_contents); } mediapipe::MatrixData inc3_pca_mean_matrix_data, @@ -75,7 +75,7 @@ DEFINE_string(output_side_packets, "", mediapipe::MatrixFromMatrixDataProto(inc3_pca_mean_matrix_data, &inc3_pca_mean_matrix); input_side_packets["inception3_pca_mean_matrix"] = - ::mediapipe::MakePacket(inc3_pca_mean_matrix); + mediapipe::MakePacket(inc3_pca_mean_matrix); MP_RETURN_IF_ERROR(mediapipe::file::GetContents( "/tmp/mediapipe/inception3_projection_matrix_data.pb", &content)); @@ -83,7 +83,7 @@ DEFINE_string(output_side_packets, "", mediapipe::MatrixFromMatrixDataProto(inc3_pca_projection_matrix_data, &inc3_pca_projection_matrix); input_side_packets["inception3_pca_projection_matrix"] = - ::mediapipe::MakePacket(inc3_pca_projection_matrix); + mediapipe::MakePacket(inc3_pca_projection_matrix); MP_RETURN_IF_ERROR(mediapipe::file::GetContents( "/tmp/mediapipe/vggish_mean_matrix_data.pb", &content)); @@ -91,7 +91,7 @@ DEFINE_string(output_side_packets, "", mediapipe::MatrixFromMatrixDataProto(vggish_pca_mean_matrix_data, &vggish_pca_mean_matrix); input_side_packets["vggish_pca_mean_matrix"] = - ::mediapipe::MakePacket(vggish_pca_mean_matrix); + mediapipe::MakePacket(vggish_pca_mean_matrix); MP_RETURN_IF_ERROR(mediapipe::file::GetContents( "/tmp/mediapipe/vggish_projection_matrix_data.pb", &content)); @@ -99,7 +99,7 @@ DEFINE_string(output_side_packets, "", mediapipe::MatrixFromMatrixDataProto(vggish_pca_projection_matrix_data, &vggish_pca_projection_matrix); input_side_packets["vggish_pca_projection_matrix"] = - ::mediapipe::MakePacket(vggish_pca_projection_matrix); + mediapipe::MakePacket(vggish_pca_projection_matrix); LOG(INFO) << "Initialize the calculator graph."; mediapipe::CalculatorGraph graph; @@ -111,7 +111,7 @@ DEFINE_string(output_side_packets, "", for (const std::string& kv_pair : kv_pairs) { std::vector name_and_value = absl::StrSplit(kv_pair, '='); RET_CHECK(name_and_value.size() == 2); - ::mediapipe::StatusOr<::mediapipe::Packet> output_packet = + mediapipe::StatusOr output_packet = graph.GetOutputSidePacket(name_and_value[0]); RET_CHECK(output_packet.ok()) << "Packet " << name_and_value[0] << " was not available."; @@ -120,13 +120,13 @@ DEFINE_string(output_side_packets, "", MP_RETURN_IF_ERROR( mediapipe::file::SetContents(name_and_value[1], serialized_string)); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } int main(int argc, char** argv) { google::InitGoogleLogging(argv[0]); gflags::ParseCommandLineFlags(&argc, &argv, true); - ::mediapipe::Status run_status = RunMPPGraph(); + mediapipe::Status run_status = RunMPPGraph(); if (!run_status.ok()) { LOG(ERROR) << "Failed to run the graph: " << run_status.message(); return EXIT_FAILURE; diff --git a/mediapipe/examples/ios/common/CommonViewController.h b/mediapipe/examples/ios/common/CommonViewController.h index b4650423b..d7cb1121a 100644 --- a/mediapipe/examples/ios/common/CommonViewController.h +++ b/mediapipe/examples/ios/common/CommonViewController.h @@ -18,6 +18,7 @@ #import "mediapipe/objc/MPPGraph.h" #import "mediapipe/objc/MPPLayerRenderer.h" #import "mediapipe/objc/MPPPlayerInputSource.h" +#import "mediapipe/objc/MPPTimestampConverter.h" typedef NS_ENUM(NSInteger, MediaPipeDemoSourceMode) { MediaPipeDemoSourceCamera, @@ -36,6 +37,9 @@ typedef NS_ENUM(NSInteger, MediaPipeDemoSourceMode) { // Provides data from a video. @property(nonatomic) MPPPlayerInputSource* videoSource; +// Helps to convert timestamp. +@property(nonatomic) MPPTimestampConverter* timestampConverter; + // The data source for the demo. @property(nonatomic) MediaPipeDemoSourceMode sourceMode; diff --git a/mediapipe/examples/ios/common/CommonViewController.mm b/mediapipe/examples/ios/common/CommonViewController.mm index aa7eb5d57..e819e8170 100644 --- a/mediapipe/examples/ios/common/CommonViewController.mm +++ b/mediapipe/examples/ios/common/CommonViewController.mm @@ -77,6 +77,8 @@ static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue"; [self.liveView.layer addSublayer:self.renderer.layer]; self.renderer.frameScaleMode = MPPFrameScaleModeFillAndCrop; + self.timestampConverter = [[MPPTimestampConverter alloc] init]; + dispatch_queue_attr_t qosAttribute = dispatch_queue_attr_make_with_qos_class( DISPATCH_QUEUE_SERIAL, QOS_CLASS_USER_INTERACTIVE, /*relative_priority=*/0); self.videoQueue = dispatch_queue_create(kVideoQueueLabel, qosAttribute); @@ -173,7 +175,8 @@ static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue"; [self.mediapipeGraph sendPixelBuffer:imageBuffer intoStream:self.graphInputStream - packetType:MPPPacketTypePixelBuffer]; + packetType:MPPPacketTypePixelBuffer + timestamp:[self.timestampConverter timestampForMediaTime:timestamp]]; } #pragma mark - MPPGraphDelegate methods diff --git a/mediapipe/examples/ios/faceeffect/BUILD b/mediapipe/examples/ios/faceeffect/BUILD index 271dcfa88..7b681cd85 100644 --- a/mediapipe/examples/ios/faceeffect/BUILD +++ b/mediapipe/examples/ios/faceeffect/BUILD @@ -49,27 +49,14 @@ ios_application( ) objc_library( - name = "FaceEffectAppLibrary", + name = "FaceEffectViewController", srcs = [ - "AppDelegate.m", "FaceEffectViewController.mm", - "main.m", ], hdrs = [ - "AppDelegate.h", "FaceEffectViewController.h", ], - data = [ - "Base.lproj/LaunchScreen.storyboard", - "Base.lproj/Main.storyboard", - "//mediapipe/graphs/face_effect:face_effect_gpu.binarypb", - "//mediapipe/graphs/face_effect/data:facepaint.pngblob", - "//mediapipe/graphs/face_effect/data:glasses.binarypb", - "//mediapipe/graphs/face_effect/data:glasses.pngblob", - "//mediapipe/modules/face_detection:face_detection_front.tflite", - "//mediapipe/modules/face_geometry/data:geometry_pipeline_metadata.binarypb", - "//mediapipe/modules/face_landmark:face_landmark.tflite", - ], + copts = ["-std=c++17"], sdk_frameworks = [ "AVFoundation", "CoreGraphics", @@ -90,3 +77,28 @@ objc_library( ], }), ) + +objc_library( + name = "FaceEffectAppLibrary", + srcs = [ + "AppDelegate.m", + "main.m", + ], + hdrs = [ + "AppDelegate.h", + ], + data = [ + "Base.lproj/LaunchScreen.storyboard", + "Base.lproj/Main.storyboard", + "//mediapipe/graphs/face_effect:face_effect_gpu.binarypb", + "//mediapipe/graphs/face_effect/data:facepaint.pngblob", + "//mediapipe/graphs/face_effect/data:glasses.binarypb", + "//mediapipe/graphs/face_effect/data:glasses.pngblob", + "//mediapipe/modules/face_detection:face_detection_front.tflite", + "//mediapipe/modules/face_geometry/data:geometry_pipeline_metadata.binarypb", + "//mediapipe/modules/face_landmark:face_landmark.tflite", + ], + deps = [ + ":FaceEffectViewController", + ], +) diff --git a/mediapipe/examples/ios/facemeshgpu/BUILD b/mediapipe/examples/ios/facemeshgpu/BUILD index 5a7f92e1e..942a19659 100644 --- a/mediapipe/examples/ios/facemeshgpu/BUILD +++ b/mediapipe/examples/ios/facemeshgpu/BUILD @@ -59,6 +59,7 @@ objc_library( hdrs = [ "FaceMeshGpuViewController.h", ], + copts = ["-std=c++17"], data = [ "//mediapipe/graphs/face_mesh:face_mesh_mobile_gpu.binarypb", "//mediapipe/modules/face_detection:face_detection_front.tflite", diff --git a/mediapipe/examples/ios/handtrackinggpu/BUILD b/mediapipe/examples/ios/handtrackinggpu/BUILD index ed732f8fb..0121150e1 100644 --- a/mediapipe/examples/ios/handtrackinggpu/BUILD +++ b/mediapipe/examples/ios/handtrackinggpu/BUILD @@ -59,6 +59,7 @@ objc_library( hdrs = [ "HandTrackingViewController.h", ], + copts = ["-std=c++17"], data = [ "//mediapipe/graphs/hand_tracking:hand_tracking_mobile_gpu.binarypb", "//mediapipe/modules/hand_landmark:hand_landmark.tflite", diff --git a/mediapipe/examples/ios/holistictrackinggpu/BUILD b/mediapipe/examples/ios/holistictrackinggpu/BUILD new file mode 100644 index 000000000..b8d6c00ab --- /dev/null +++ b/mediapipe/examples/ios/holistictrackinggpu/BUILD @@ -0,0 +1,76 @@ +# Copyright 2020 The MediaPipe Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +load( + "@build_bazel_rules_apple//apple:ios.bzl", + "ios_application", +) +load( + "//mediapipe/examples/ios:bundle_id.bzl", + "BUNDLE_ID_PREFIX", + "example_provisioning", +) + +licenses(["notice"]) + +MIN_IOS_VERSION = "10.0" + +alias( + name = "holistictrackinggpu", + actual = "HolisticTrackingGpuApp", +) + +ios_application( + name = "HolisticTrackingGpuApp", + app_icons = ["//mediapipe/examples/ios/common:AppIcon"], + bundle_id = BUNDLE_ID_PREFIX + ".HolisticTrackingGpu", + families = [ + "iphone", + "ipad", + ], + infoplists = [ + "//mediapipe/examples/ios/common:Info.plist", + "Info.plist", + ], + minimum_os_version = MIN_IOS_VERSION, + provisioning_profile = example_provisioning(), + deps = [ + ":HolisticTrackingGpuAppLibrary", + "@ios_opencv//:OpencvFramework", + ], +) + +objc_library( + name = "HolisticTrackingGpuAppLibrary", + data = [ + "//mediapipe/graphs/holistic_tracking:holistic_tracking_gpu.binarypb", + "//mediapipe/modules/face_detection:face_detection_front.tflite", + "//mediapipe/modules/face_landmark:face_landmark.tflite", + "//mediapipe/modules/hand_landmark:hand_landmark.tflite", + "//mediapipe/modules/hand_landmark:handedness.txt", + "//mediapipe/modules/holistic_landmark:hand_recrop.tflite", + "//mediapipe/modules/pose_detection:pose_detection.tflite", + "//mediapipe/modules/pose_landmark:pose_landmark_full_body.tflite", + "//mediapipe/modules/pose_landmark:pose_landmark_upper_body.tflite", + ], + deps = [ + "//mediapipe/examples/ios/common:CommonMediaPipeAppLibrary", + ] + select({ + "//mediapipe:ios_i386": [], + "//mediapipe:ios_x86_64": [], + "//conditions:default": [ + "//mediapipe/graphs/holistic_tracking:holistic_tracking_gpu_deps", + ], + }), +) diff --git a/mediapipe/examples/ios/holistictrackinggpu/Info.plist b/mediapipe/examples/ios/holistictrackinggpu/Info.plist new file mode 100644 index 000000000..ae92eb50f --- /dev/null +++ b/mediapipe/examples/ios/holistictrackinggpu/Info.plist @@ -0,0 +1,14 @@ + + + + + CameraPosition + back + GraphOutputStream + output_video + GraphInputStream + input_video + GraphName + holistic_tracking_gpu + + diff --git a/mediapipe/examples/ios/iristrackinggpu/BUILD b/mediapipe/examples/ios/iristrackinggpu/BUILD index 3cf8d14f7..b58ecc104 100644 --- a/mediapipe/examples/ios/iristrackinggpu/BUILD +++ b/mediapipe/examples/ios/iristrackinggpu/BUILD @@ -59,6 +59,7 @@ objc_library( hdrs = [ "IrisTrackingViewController.h", ], + copts = ["-std=c++17"], data = [ "//mediapipe/graphs/iris_tracking:iris_tracking_gpu.binarypb", "//mediapipe/modules/face_detection:face_detection_front.tflite", diff --git a/mediapipe/examples/ios/posetrackinggpu/BUILD b/mediapipe/examples/ios/posetrackinggpu/BUILD new file mode 100644 index 000000000..c78c6a674 --- /dev/null +++ b/mediapipe/examples/ios/posetrackinggpu/BUILD @@ -0,0 +1,78 @@ +# Copyright 2020 The MediaPipe Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +load( + "@build_bazel_rules_apple//apple:ios.bzl", + "ios_application", +) +load( + "//mediapipe/examples/ios:bundle_id.bzl", + "BUNDLE_ID_PREFIX", + "example_provisioning", +) + +licenses(["notice"]) + +MIN_IOS_VERSION = "10.0" + +alias( + name = "posetrackinggpu", + actual = "PoseTrackingGpuApp", +) + +ios_application( + name = "PoseTrackingGpuApp", + app_icons = ["//mediapipe/examples/ios/common:AppIcon"], + bundle_id = BUNDLE_ID_PREFIX + ".PoseTrackingGpu", + families = [ + "iphone", + "ipad", + ], + infoplists = [ + "//mediapipe/examples/ios/common:Info.plist", + "Info.plist", + ], + minimum_os_version = MIN_IOS_VERSION, + provisioning_profile = example_provisioning(), + deps = [ + ":PoseTrackingGpuAppLibrary", + "@ios_opencv//:OpencvFramework", + ], +) + +objc_library( + name = "PoseTrackingGpuAppLibrary", + srcs = [ + "PoseTrackingViewController.mm", + ], + hdrs = [ + "PoseTrackingViewController.h", + ], + copts = ["-std=c++17"], + data = [ + "//mediapipe/graphs/pose_tracking:pose_tracking_gpu.binarypb", + "//mediapipe/modules/pose_detection:pose_detection.tflite", + "//mediapipe/modules/pose_landmark:pose_landmark_full_body.tflite", + ], + deps = [ + "//mediapipe/examples/ios/common:CommonMediaPipeAppLibrary", + ] + select({ + "//mediapipe:ios_i386": [], + "//mediapipe:ios_x86_64": [], + "//conditions:default": [ + "//mediapipe/graphs/pose_tracking:pose_tracking_gpu_deps", + "//mediapipe/framework/formats:landmark_cc_proto", + ], + }), +) diff --git a/mediapipe/examples/ios/posetrackinggpu/Info.plist b/mediapipe/examples/ios/posetrackinggpu/Info.plist new file mode 100644 index 000000000..71e2e429e --- /dev/null +++ b/mediapipe/examples/ios/posetrackinggpu/Info.plist @@ -0,0 +1,16 @@ + + + + + CameraPosition + back + MainViewController + PoseTrackingViewController + GraphOutputStream + output_video + GraphInputStream + input_video + GraphName + pose_tracking_gpu + + diff --git a/mediapipe/examples/ios/posetrackinggpu/PoseTrackingViewController.h b/mediapipe/examples/ios/posetrackinggpu/PoseTrackingViewController.h new file mode 100644 index 000000000..f5dc4674a --- /dev/null +++ b/mediapipe/examples/ios/posetrackinggpu/PoseTrackingViewController.h @@ -0,0 +1,21 @@ +// Copyright 2020 The MediaPipe Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#import + +#import "mediapipe/examples/ios/common/CommonViewController.h" + +@interface PoseTrackingViewController : CommonViewController + +@end diff --git a/mediapipe/examples/ios/posetrackinggpu/PoseTrackingViewController.mm b/mediapipe/examples/ios/posetrackinggpu/PoseTrackingViewController.mm new file mode 100644 index 000000000..0f082031c --- /dev/null +++ b/mediapipe/examples/ios/posetrackinggpu/PoseTrackingViewController.mm @@ -0,0 +1,53 @@ +// Copyright 2020 The MediaPipe Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#import "PoseTrackingViewController.h" + +#include "mediapipe/framework/formats/landmark.pb.h" + +static const char* kLandmarksOutputStream = "pose_landmarks"; + +@implementation PoseTrackingViewController + +#pragma mark - UIViewController methods + +- (void)viewDidLoad { + [super viewDidLoad]; + + [self.mediapipeGraph addFrameOutputStream:kLandmarksOutputStream + outputPacketType:MPPPacketTypeRaw]; +} + +#pragma mark - MPPGraphDelegate methods + +// Receives a raw packet from the MediaPipe graph. Invoked on a MediaPipe worker thread. +- (void)mediapipeGraph:(MPPGraph*)graph + didOutputPacket:(const ::mediapipe::Packet&)packet + fromStream:(const std::string&)streamName { + if (streamName == kLandmarksOutputStream) { + if (packet.IsEmpty()) { + NSLog(@"[TS:%lld] No pose landmarks", packet.Timestamp().Value()); + return; + } + const auto& landmarks = packet.Get<::mediapipe::NormalizedLandmarkList>(); + NSLog(@"[TS:%lld] Number of pose landmarks: %d", packet.Timestamp().Value(), + landmarks.landmark_size()); + for (int i = 0; i < landmarks.landmark_size(); ++i) { + NSLog(@"\tLandmark[%d]: (%f, %f, %f)", i, landmarks.landmark(i).x(), + landmarks.landmark(i).y(), landmarks.landmark(i).z()); + } + } +} + +@end diff --git a/mediapipe/examples/ios/upperbodyposetrackinggpu/BUILD b/mediapipe/examples/ios/upperbodyposetrackinggpu/BUILD index 0a2402857..3455fbbf8 100644 --- a/mediapipe/examples/ios/upperbodyposetrackinggpu/BUILD +++ b/mediapipe/examples/ios/upperbodyposetrackinggpu/BUILD @@ -59,6 +59,7 @@ objc_library( hdrs = [ "UpperBodyPoseTrackingViewController.h", ], + copts = ["-std=c++17"], data = [ "//mediapipe/graphs/pose_tracking:upper_body_pose_tracking_gpu.binarypb", "//mediapipe/modules/pose_detection:pose_detection.tflite", diff --git a/mediapipe/framework/basic_types_registration.cc b/mediapipe/framework/basic_types_registration.cc index 7e5478118..a16daac72 100644 --- a/mediapipe/framework/basic_types_registration.cc +++ b/mediapipe/framework/basic_types_registration.cc @@ -4,9 +4,9 @@ #include "mediapipe/framework/port/integral_types.h" #include "mediapipe/framework/type_map.h" -#define MEDIAPIPE_REGISTER_GENERIC_TYPE(type) \ - MEDIAPIPE_REGISTER_TYPE( \ - ::mediapipe::type_map_internal::ReflectType::Type, #type, \ +#define MEDIAPIPE_REGISTER_GENERIC_TYPE(type) \ + MEDIAPIPE_REGISTER_TYPE( \ + mediapipe::type_map_internal::ReflectType::Type, #type, \ nullptr, nullptr) // Note: we cannot define a type which type hash id is already in the map. diff --git a/mediapipe/framework/calculator_base.h b/mediapipe/framework/calculator_base.h index e0ca42170..8448e732f 100644 --- a/mediapipe/framework/calculator_base.h +++ b/mediapipe/framework/calculator_base.h @@ -81,7 +81,7 @@ class CalculatorBase { // this function is static the registration macro provides access to // each subclass' GetContract function. // - // static ::mediapipe::Status GetContract(CalculatorContract* cc); + // static mediapipe::Status GetContract(CalculatorContract* cc); // // GetContract fills in the calculator's contract with the framework, such // as its expectations of what packets it will receive. When this function @@ -115,23 +115,23 @@ class CalculatorBase { // Open is called before any Process() calls, on a freshly constructed // calculator. Subclasses may override this method to perform necessary // setup, and possibly output Packets and/or set output streams' headers. - // Must return ::mediapipe::OkStatus() to indicate success. On failure any + // Must return mediapipe::OkStatus() to indicate success. On failure any // other status code can be returned. If failure is returned then the // framework will call neither Process() nor Close() on the calculator (so any // necessary cleanup should be done before returning failure or in the // destructor). - virtual ::mediapipe::Status Open(CalculatorContext* cc) { - return ::mediapipe::OkStatus(); + virtual mediapipe::Status Open(CalculatorContext* cc) { + return mediapipe::OkStatus(); } // Processes the incoming inputs. May call the methods on cc to access // inputs and produce outputs. // // Process() called on a non-source node must return - // ::mediapipe::OkStatus() to indicate that all went well, or any other + // mediapipe::OkStatus() to indicate that all went well, or any other // status code to signal an error. // For example: - // ::mediapipe::UnknownError("Failure Message"); + // mediapipe::UnknownError("Failure Message"); // Notice the convenience functions in util/task/canonical_errors.h . // If a non-source Calculator returns tool::StatusStop(), then this // signals the graph is being cancelled early. In this case, all @@ -139,22 +139,22 @@ class CalculatorBase { // remaining Packets will propagate through the graph). // // A source node will continue to have Process() called on it as long - // as it returns ::mediapipe::OkStatus(). To indicate that there is + // as it returns mediapipe::OkStatus(). To indicate that there is // no more data to be generated return tool::StatusStop(). Any other // status indicates an error has occurred. - virtual ::mediapipe::Status Process(CalculatorContext* cc) = 0; + virtual mediapipe::Status Process(CalculatorContext* cc) = 0; // Is called if Open() was called and succeeded. Is called either // immediately after processing is complete or after a graph run has ended - // (if an error occurred in the graph). Must return ::mediapipe::OkStatus() + // (if an error occurred in the graph). Must return mediapipe::OkStatus() // to indicate success. On failure any other status code can be returned. // Packets may be output during a call to Close(). However, output packets // are silently discarded if Close() is called after a graph run has ended. // // NOTE: If Close() needs to perform an action only when processing is // complete, Close() must check if cc->GraphStatus() is OK. - virtual ::mediapipe::Status Close(CalculatorContext* cc) { - return ::mediapipe::OkStatus(); + virtual mediapipe::Status Close(CalculatorContext* cc) { + return mediapipe::OkStatus(); } // Returns a value according to which the framework selects @@ -178,7 +178,7 @@ namespace internal { class StaticAccessToCalculatorBase { public: virtual ~StaticAccessToCalculatorBase() {} - virtual ::mediapipe::Status GetContract(CalculatorContract* cc) = 0; + virtual mediapipe::Status GetContract(CalculatorContract* cc) = 0; }; using StaticAccessToCalculatorBaseRegistry = @@ -187,7 +187,7 @@ using StaticAccessToCalculatorBaseRegistry = // Functions for checking that the calculator has the required GetContract. template constexpr bool CalculatorHasGetContract(decltype(&T::GetContract) /*unused*/) { - typedef ::mediapipe::Status (*GetContractType)(CalculatorContract * cc); + typedef mediapipe::Status (*GetContractType)(CalculatorContract * cc); return std::is_same::value; } template @@ -200,17 +200,17 @@ constexpr bool CalculatorHasGetContract(...) { template class StaticAccessToCalculatorBaseTyped : public StaticAccessToCalculatorBase { public: - static_assert(std::is_base_of<::mediapipe::CalculatorBase, - CalculatorBaseSubclass>::value, - "Classes registered with REGISTER_CALCULATOR must be " - "subclasses of ::mediapipe::CalculatorBase."); + static_assert( + std::is_base_of::value, + "Classes registered with REGISTER_CALCULATOR must be " + "subclasses of mediapipe::CalculatorBase."); static_assert(CalculatorHasGetContract(nullptr), "GetContract() must be defined with the correct signature in " "every calculator."); // Provides access to the static function GetContract within a specific // subclass of CalculatorBase. - ::mediapipe::Status GetContract(CalculatorContract* cc) final { + mediapipe::Status GetContract(CalculatorContract* cc) final { // CalculatorBaseSubclass must implement this function, since it is not // implemented in the parent class. return CalculatorBaseSubclass::GetContract(cc); diff --git a/mediapipe/framework/calculator_base_test.cc b/mediapipe/framework/calculator_base_test.cc index fcb4ebf37..ac63eb13d 100644 --- a/mediapipe/framework/calculator_base_test.cc +++ b/mediapipe/framework/calculator_base_test.cc @@ -41,7 +41,7 @@ namespace test_ns { // streams and input side packets. class DeadEndCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { for (int i = 0; i < cc->Inputs().NumEntries(); ++i) { cc->Inputs().Index(i).SetAny(); } @@ -51,16 +51,16 @@ class DeadEndCalculator : public CalculatorBase { for (int i = 0; i < cc->InputSidePackets().NumEntries(); ++i) { cc->InputSidePackets().Index(i).SetAny(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { - return ::mediapipe::OkStatus(); + mediapipe::Status Open(CalculatorContext* cc) override { + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { if (cc->Inputs().NumEntries() > 0) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } else { // This is a source calculator, but we don't produce any outputs. return tool::StatusStop(); @@ -73,14 +73,14 @@ namespace whitelisted_ns { class DeadCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { - return ::mediapipe::OkStatus(); + static mediapipe::Status GetContract(CalculatorContract* cc) { + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { - return ::mediapipe::OkStatus(); + mediapipe::Status Open(CalculatorContext* cc) override { + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { - return ::mediapipe::OkStatus(); + mediapipe::Status Process(CalculatorContext* cc) override { + return mediapipe::OkStatus(); } }; @@ -89,14 +89,14 @@ class DeadCalculator : public CalculatorBase { class EndCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { - return ::mediapipe::OkStatus(); + static mediapipe::Status GetContract(CalculatorContract* cc) { + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { - return ::mediapipe::OkStatus(); + mediapipe::Status Open(CalculatorContext* cc) override { + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { - return ::mediapipe::OkStatus(); + mediapipe::Status Process(CalculatorContext* cc) override { + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(::mediapipe::EndCalculator); @@ -181,13 +181,13 @@ TEST(CalculatorTest, CreateByName) { "mediapipe", "DeadEndCalculator") .status() .code(), - ::mediapipe::StatusCode::kNotFound); + mediapipe::StatusCode::kNotFound); EXPECT_EQ(CalculatorBaseRegistry::CreateByName( // "DeadEndCalculator") .status() .code(), - ::mediapipe::StatusCode::kNotFound); + mediapipe::StatusCode::kNotFound); } // Tests registration of a calculator within a whitelisted namespace. @@ -202,7 +202,7 @@ TEST(CalculatorTest, CreateByNameWhitelisted) { // Register a whitelisted calculator. CalculatorBaseRegistry::Register( "::mediapipe::test_ns::whitelisted_ns::DeadCalculator", - absl::make_unique< ::mediapipe::test_ns::whitelisted_ns::DeadCalculator>); + absl::make_unique); // A whitelisted calculator can be found in its own namespace. MP_EXPECT_OK(CalculatorBaseRegistry::CreateByNameInNamespace( // diff --git a/mediapipe/framework/calculator_context.h b/mediapipe/framework/calculator_context.h index 9f12c0133..8aaca701a 100644 --- a/mediapipe/framework/calculator_context.h +++ b/mediapipe/framework/calculator_context.h @@ -103,7 +103,7 @@ class CalculatorContext { // Returns the status of the graph run. // // NOTE: This method should only be called during CalculatorBase::Close(). - ::mediapipe::Status GraphStatus() const { return graph_status_; } + mediapipe::Status GraphStatus() const { return graph_status_; } ProfilingContext* GetProfilingContext() const { return calculator_state_->GetSharedProfilingContext().get(); @@ -148,7 +148,7 @@ class CalculatorContext { input_timestamps_.pop(); } - void SetGraphStatus(const ::mediapipe::Status& status) { + void SetGraphStatus(const mediapipe::Status& status) { graph_status_ = status; } @@ -167,7 +167,7 @@ class CalculatorContext { std::queue input_timestamps_; // The status of the graph run. Only used when Close() is called. - ::mediapipe::Status graph_status_; + mediapipe::Status graph_status_; // Accesses CalculatorContext for setting input timestamp. friend class CalculatorContextManager; diff --git a/mediapipe/framework/calculator_context_manager.cc b/mediapipe/framework/calculator_context_manager.cc index 93ee9855e..271976628 100644 --- a/mediapipe/framework/calculator_context_manager.cc +++ b/mediapipe/framework/calculator_context_manager.cc @@ -34,8 +34,8 @@ void CalculatorContextManager::Initialize( calculator_run_in_parallel_ = calculator_run_in_parallel; } -::mediapipe::Status CalculatorContextManager::PrepareForRun( - std::function<::mediapipe::Status(CalculatorContext*)> +mediapipe::Status CalculatorContextManager::PrepareForRun( + std::function setup_shards_callback) { setup_shards_callback_ = std::move(setup_shards_callback); default_context_ = absl::make_unique( @@ -71,7 +71,7 @@ CalculatorContext* CalculatorContextManager::PrepareCalculatorContext( return GetDefaultCalculatorContext(); } absl::MutexLock lock(&contexts_mutex_); - CHECK(!::mediapipe::ContainsKey(active_contexts_, input_timestamp)) + CHECK(!mediapipe::ContainsKey(active_contexts_, input_timestamp)) << "Multiple invocations with the same timestamps are not allowed with " "parallel execution, input_timestamp = " << input_timestamp; diff --git a/mediapipe/framework/calculator_context_manager.h b/mediapipe/framework/calculator_context_manager.h index c5b8053ea..14e49e9bf 100644 --- a/mediapipe/framework/calculator_context_manager.h +++ b/mediapipe/framework/calculator_context_manager.h @@ -45,8 +45,8 @@ class CalculatorContextManager { // Sets the callback that can setup the input and output stream shards in a // newly constructed calculator context. Then, initializes the default // calculator context. - ::mediapipe::Status PrepareForRun( - std::function<::mediapipe::Status(CalculatorContext*)> + mediapipe::Status PrepareForRun( + std::function setup_shards_callback); // Invoked by CalculatorNode::CleanupAfterRun(). @@ -108,7 +108,7 @@ class CalculatorContextManager { } void SetGraphStatusInContext(CalculatorContext* calculator_context, - const ::mediapipe::Status& status) { + const mediapipe::Status& status) { CHECK(calculator_context); calculator_context->SetGraphStatus(status); } @@ -124,7 +124,7 @@ class CalculatorContextManager { // NOTE: This callback invokes input/output stream handler methods. // The callback is used to break the circular dependency between // calculator context manager and input/output stream handlers. - std::function<::mediapipe::Status(CalculatorContext*)> setup_shards_callback_; + std::function setup_shards_callback_; // The default calculator context that is always reused for sequential // execution. It is also used by Open() and Close() method of a parallel diff --git a/mediapipe/framework/calculator_contract.cc b/mediapipe/framework/calculator_contract.cc index 6e0e44749..503d47106 100644 --- a/mediapipe/framework/calculator_contract.cc +++ b/mediapipe/framework/calculator_contract.cc @@ -24,9 +24,9 @@ namespace mediapipe { -::mediapipe::Status CalculatorContract::Initialize( +mediapipe::Status CalculatorContract::Initialize( const CalculatorGraphConfig::Node& node) { - std::vector<::mediapipe::Status> statuses; + std::vector statuses; auto input_stream_statusor = tool::TagMap::Create(node.input_stream()); if (!input_stream_statusor.ok()) { @@ -48,7 +48,7 @@ namespace mediapipe { } if (!statuses.empty()) { - auto builder = ::mediapipe::UnknownErrorBuilder(MEDIAPIPE_LOC) + auto builder = mediapipe::UnknownErrorBuilder(MEDIAPIPE_LOC) << "Unable to initialize TagMaps for node."; for (const auto& status : statuses) { builder << "\n" << status.message(); @@ -71,12 +71,12 @@ namespace mediapipe { std::move(input_side_packet_statusor).ValueOrDie()); output_side_packets_ = absl::make_unique( std::move(output_side_packet_statusor).ValueOrDie()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status CalculatorContract::Initialize( +mediapipe::Status CalculatorContract::Initialize( const PacketGeneratorConfig& node) { - std::vector<::mediapipe::Status> statuses; + std::vector statuses; auto input_side_packet_statusor = tool::TagMap::Create(node.input_side_packet()); @@ -106,12 +106,12 @@ namespace mediapipe { std::move(input_side_packet_statusor).ValueOrDie()); output_side_packets_ = absl::make_unique( std::move(output_side_packet_statusor).ValueOrDie()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status CalculatorContract::Initialize( +mediapipe::Status CalculatorContract::Initialize( const StatusHandlerConfig& node) { - std::vector<::mediapipe::Status> statuses; + std::vector statuses; auto input_side_packet_statusor = tool::TagMap::Create(node.input_side_packet()); @@ -120,7 +120,7 @@ namespace mediapipe { } if (!statuses.empty()) { - auto builder = ::mediapipe::UnknownErrorBuilder(MEDIAPIPE_LOC) + auto builder = mediapipe::UnknownErrorBuilder(MEDIAPIPE_LOC) << "NodeTypeInfo Initialization failed."; for (const auto& status : statuses) { builder << "\n" << status.message(); @@ -134,7 +134,7 @@ namespace mediapipe { input_side_packets_ = absl::make_unique( std::move(input_side_packet_statusor).ValueOrDie()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/framework/calculator_contract.h b/mediapipe/framework/calculator_contract.h index ba6cdad85..dfe4a897b 100644 --- a/mediapipe/framework/calculator_contract.h +++ b/mediapipe/framework/calculator_contract.h @@ -47,9 +47,9 @@ namespace mediapipe { // class CalculatorContract { public: - ::mediapipe::Status Initialize(const CalculatorGraphConfig::Node& node); - ::mediapipe::Status Initialize(const PacketGeneratorConfig& node); - ::mediapipe::Status Initialize(const StatusHandlerConfig& node); + mediapipe::Status Initialize(const CalculatorGraphConfig::Node& node); + mediapipe::Status Initialize(const PacketGeneratorConfig& node); + mediapipe::Status Initialize(const StatusHandlerConfig& node); void SetNodeName(const std::string& node_name) { node_name_ = node_name; } // Returns the options given to this node. diff --git a/mediapipe/framework/calculator_contract_test.cc b/mediapipe/framework/calculator_contract_test.cc index e9c3813c6..679668a7e 100644 --- a/mediapipe/framework/calculator_contract_test.cc +++ b/mediapipe/framework/calculator_contract_test.cc @@ -30,7 +30,7 @@ namespace { TEST(CalculatorContractTest, Calculator) { const CalculatorGraphConfig::Node node = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( calculator: "MixtureOfExpertsFusionCalculator" input_stream: "FRAME:fdense_pca_moe_aggregated_detection" input_stream: "FNET:fnet_logreg_aggregated_detection" @@ -49,7 +49,7 @@ TEST(CalculatorContractTest, Calculator) { TEST(CalculatorContractTest, CalculatorOptions) { const CalculatorGraphConfig::Node node = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( calculator: "CalculatorTestCalculator" input_stream: "DATA:ycbcr_frames" input_stream: "VIDEO_HEADER:ycbcr_frames_prestream" @@ -71,7 +71,7 @@ TEST(CalculatorContractTest, CalculatorOptions) { TEST(CalculatorContractTest, PacketGenerator) { const PacketGeneratorConfig node = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( packet_generator: "DaredevilLabeledTimeSeriesGenerator" input_side_packet: "labeled_time_series" output_side_packet: "time_series_header" @@ -87,7 +87,7 @@ TEST(CalculatorContractTest, PacketGenerator) { TEST(CalculatorContractTest, StatusHandler) { const StatusHandlerConfig node = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( status_handler: "TaskInjectorStatusHandler" input_side_packet: "ROW:cid" input_side_packet: "SPEC:task_specification" diff --git a/mediapipe/framework/calculator_graph.cc b/mediapipe/framework/calculator_graph.cc index 7dad637aa..2111569e8 100644 --- a/mediapipe/framework/calculator_graph.cc +++ b/mediapipe/framework/calculator_graph.cc @@ -129,13 +129,13 @@ CalculatorGraph::CalculatorGraph(const CalculatorGraphConfig& config) // instantiated. CalculatorGraph::~CalculatorGraph() { // Stop periodic profiler output to ublock Executor destructors. - ::mediapipe::Status status = profiler()->Stop(); + mediapipe::Status status = profiler()->Stop(); if (!status.ok()) { LOG(ERROR) << "During graph destruction: " << status; } } -::mediapipe::Status CalculatorGraph::InitializePacketGeneratorGraph( +mediapipe::Status CalculatorGraph::InitializePacketGeneratorGraph( const std::map& side_packets) { // Create and initialize the output side packets. if (!validated_graph_->OutputSidePacketInfos().empty()) { @@ -164,7 +164,7 @@ CalculatorGraph::~CalculatorGraph() { default_executor, side_packets); } -::mediapipe::Status CalculatorGraph::InitializeStreams() { +mediapipe::Status CalculatorGraph::InitializeStreams() { any_packet_type_.SetAny(); // Create and initialize the input streams. @@ -193,7 +193,7 @@ CalculatorGraph::~CalculatorGraph() { auto input_tag_map, tool::TagMap::Create(validated_graph_->Config().input_stream())); for (const auto& stream_name : input_tag_map->Names()) { - RET_CHECK(!::mediapipe::ContainsKey(graph_input_streams_, stream_name)) + RET_CHECK(!mediapipe::ContainsKey(graph_input_streams_, stream_name)) .SetNoLogging() << "CalculatorGraph Initialization failed, graph input stream \"" << stream_name << "\" was specified twice."; @@ -221,16 +221,16 @@ CalculatorGraph::~CalculatorGraph() { graph_input_stream_add_mode_ = GraphInputStreamAddMode::WAIT_TILL_NOT_FULL; } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status CalculatorGraph::InitializeCalculatorNodes() { +mediapipe::Status CalculatorGraph::InitializeCalculatorNodes() { // Check if the user has specified a maximum queue size for an input stream. max_queue_size_ = validated_graph_->Config().max_queue_size(); max_queue_size_ = max_queue_size_ ? max_queue_size_ : 100; // Use a local variable to avoid needing to lock errors_. - std::vector<::mediapipe::Status> errors; + std::vector errors; // Create and initialize all the nodes in the graph. nodes_ = absl::make_unique>( @@ -240,7 +240,7 @@ CalculatorGraph::~CalculatorGraph() { // buffer_size_hint will be positive if one was specified in // the graph proto. int buffer_size_hint = 0; - const ::mediapipe::Status result = (*nodes_)[node_id].Initialize( + const mediapipe::Status result = (*nodes_)[node_id].Initialize( validated_graph_.get(), node_id, input_stream_managers_.get(), output_stream_managers_.get(), output_side_packets_.get(), &buffer_size_hint, profiler_); @@ -259,15 +259,15 @@ CalculatorGraph::~CalculatorGraph() { VLOG(2) << "Maximum input stream queue size based on graph config: " << max_queue_size_; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status CalculatorGraph::InitializeProfiler() { +mediapipe::Status CalculatorGraph::InitializeProfiler() { profiler_->Initialize(*validated_graph_); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status CalculatorGraph::InitializeExecutors() { +mediapipe::Status CalculatorGraph::InitializeExecutors() { // If the ExecutorConfig for the default executor leaves the executor type // unspecified, default_executor_options points to the // ThreadPoolExecutorOptions in that ExecutorConfig. Otherwise, @@ -276,9 +276,9 @@ CalculatorGraph::~CalculatorGraph() { bool use_application_thread = false; for (const ExecutorConfig& executor_config : validated_graph_->Config().executor()) { - if (::mediapipe::ContainsKey(executors_, executor_config.name())) { + if (mediapipe::ContainsKey(executors_, executor_config.name())) { if (!executor_config.type().empty()) { - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "ExecutorConfig for \"" << executor_config.name() << "\" has a \"type\" field but is also provided to the graph " "with a CalculatorGraph::SetExecutor() call."; @@ -302,7 +302,7 @@ CalculatorGraph::~CalculatorGraph() { } } if (executor_config.type().empty()) { - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "ExecutorConfig for \"" << executor_config.name() << "\" does not have a \"type\" field. The executor \"" << executor_config.name() @@ -319,15 +319,15 @@ CalculatorGraph::~CalculatorGraph() { executor_config.name(), std::shared_ptr(executor))); } - if (!::mediapipe::ContainsKey(executors_, "")) { + if (!mediapipe::ContainsKey(executors_, "")) { MP_RETURN_IF_ERROR(InitializeDefaultExecutor(default_executor_options, use_application_thread)); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status CalculatorGraph::InitializeDefaultExecutor( +mediapipe::Status CalculatorGraph::InitializeDefaultExecutor( const ThreadPoolExecutorOptions* default_executor_options, bool use_application_thread) { #ifdef __EMSCRIPTEN__ @@ -340,7 +340,7 @@ CalculatorGraph::~CalculatorGraph() { "", std::make_shared( std::bind(&internal::Scheduler::AddApplicationThreadTask, &scheduler_, std::placeholders::_1)))); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Check the number of threads specified in the proto. @@ -359,10 +359,10 @@ CalculatorGraph::~CalculatorGraph() { } MP_RETURN_IF_ERROR( CreateDefaultThreadPool(default_executor_options, num_threads)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status CalculatorGraph::Initialize( +mediapipe::Status CalculatorGraph::Initialize( std::unique_ptr validated_graph, const std::map& side_packets) { RET_CHECK(!initialized_).SetNoLogging() @@ -380,15 +380,15 @@ CalculatorGraph::~CalculatorGraph() { #endif initialized_ = true; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status CalculatorGraph::Initialize( +mediapipe::Status CalculatorGraph::Initialize( const CalculatorGraphConfig& input_config) { return Initialize(input_config, {}); } -::mediapipe::Status CalculatorGraph::Initialize( +mediapipe::Status CalculatorGraph::Initialize( const CalculatorGraphConfig& input_config, const std::map& side_packets) { auto validated_graph = absl::make_unique(); @@ -396,7 +396,7 @@ CalculatorGraph::~CalculatorGraph() { return Initialize(std::move(validated_graph), side_packets); } -::mediapipe::Status CalculatorGraph::Initialize( +mediapipe::Status CalculatorGraph::Initialize( const std::vector& input_configs, const std::vector& input_templates, const std::map& side_packets, @@ -407,16 +407,16 @@ CalculatorGraph::~CalculatorGraph() { return Initialize(std::move(validated_graph), side_packets); } -::mediapipe::Status CalculatorGraph::ObserveOutputStream( +mediapipe::Status CalculatorGraph::ObserveOutputStream( const std::string& stream_name, - std::function<::mediapipe::Status(const Packet&)> packet_callback) { + std::function packet_callback) { RET_CHECK(initialized_).SetNoLogging() << "CalculatorGraph is not initialized."; // TODO Allow output observers to be attached by graph level // tag/index. int output_stream_index = validated_graph_->OutputStreamIndex(stream_name); if (output_stream_index < 0) { - return ::mediapipe::NotFoundErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::NotFoundErrorBuilder(MEDIAPIPE_LOC) << "Unable to attach observer to output stream \"" << stream_name << "\" because it doesn't exist."; } @@ -425,16 +425,16 @@ CalculatorGraph::~CalculatorGraph() { stream_name, &any_packet_type_, std::move(packet_callback), &output_stream_managers_[output_stream_index])); graph_output_streams_.push_back(std::move(observer)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::StatusOr -CalculatorGraph::AddOutputStreamPoller(const std::string& stream_name) { +mediapipe::StatusOr CalculatorGraph::AddOutputStreamPoller( + const std::string& stream_name) { RET_CHECK(initialized_).SetNoLogging() << "CalculatorGraph is not initialized."; int output_stream_index = validated_graph_->OutputStreamIndex(stream_name); if (output_stream_index < 0) { - return ::mediapipe::NotFoundErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::NotFoundErrorBuilder(MEDIAPIPE_LOC) << "Unable to attach observer to output stream \"" << stream_name << "\" because it doesn't exist."; } @@ -449,11 +449,11 @@ CalculatorGraph::AddOutputStreamPoller(const std::string& stream_name) { return std::move(poller); } -::mediapipe::StatusOr CalculatorGraph::GetOutputSidePacket( +mediapipe::StatusOr CalculatorGraph::GetOutputSidePacket( const std::string& packet_name) { int side_packet_index = validated_graph_->OutputSidePacketIndex(packet_name); if (side_packet_index < 0) { - return ::mediapipe::NotFoundErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::NotFoundErrorBuilder(MEDIAPIPE_LOC) << "Unable to get the output side packet \"" << packet_name << "\" because it doesn't exist."; } @@ -478,7 +478,7 @@ CalculatorGraph::AddOutputStreamPoller(const std::string& stream_name) { !current_run_side_packet_iter->second.IsEmpty()) { output_packet = current_run_side_packet_iter->second; } else { - return ::mediapipe::UnavailableErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::UnavailableErrorBuilder(MEDIAPIPE_LOC) << "The output side packet \"" << packet_name << "\" is unavailable."; } @@ -486,7 +486,7 @@ CalculatorGraph::AddOutputStreamPoller(const std::string& stream_name) { return output_packet; } -::mediapipe::Status CalculatorGraph::Run( +mediapipe::Status CalculatorGraph::Run( const std::map& extra_side_packets) { RET_CHECK(graph_input_streams_.empty()).SetNoLogging() << "When using graph input streams, call StartRun() instead of Run() so " @@ -495,7 +495,7 @@ CalculatorGraph::AddOutputStreamPoller(const std::string& stream_name) { return WaitUntilDone(); } -::mediapipe::Status CalculatorGraph::StartRun( +mediapipe::Status CalculatorGraph::StartRun( const std::map& extra_side_packets, const std::map& stream_headers) { RET_CHECK(initialized_).SetNoLogging() @@ -503,18 +503,18 @@ CalculatorGraph::AddOutputStreamPoller(const std::string& stream_name) { MP_RETURN_IF_ERROR(PrepareForRun(extra_side_packets, stream_headers)); MP_RETURN_IF_ERROR(profiler_->Start(executors_[""].get())); scheduler_.Start(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } #ifndef MEDIAPIPE_DISABLE_GPU -::mediapipe::Status CalculatorGraph::SetGpuResources( +mediapipe::Status CalculatorGraph::SetGpuResources( std::shared_ptr<::mediapipe::GpuResources> resources) { RET_CHECK(!ContainsKey(service_packets_, kGpuService.key)) << "The GPU resources have already been configured."; service_packets_[kGpuService.key] = MakePacket>( std::move(resources)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } std::shared_ptr<::mediapipe::GpuResources> CalculatorGraph::GetGpuResources() @@ -524,8 +524,8 @@ std::shared_ptr<::mediapipe::GpuResources> CalculatorGraph::GetGpuResources() return service_iter->second.Get>(); } -::mediapipe::StatusOr> -CalculatorGraph::PrepareGpu(const std::map& side_packets) { +mediapipe::StatusOr> CalculatorGraph::PrepareGpu( + const std::map& side_packets) { std::map additional_side_packets; bool update_sp = false; bool uses_gpu = false; @@ -590,7 +590,7 @@ CalculatorGraph::PrepareGpu(const std::map& side_packets) { } #endif // !defined(MEDIAPIPE_DISABLE_GPU) -::mediapipe::Status CalculatorGraph::PrepareForRun( +mediapipe::Status CalculatorGraph::PrepareForRun( const std::map& extra_side_packets, const std::map& stream_headers) { if (VLOG_IS_ON(1)) { @@ -621,7 +621,7 @@ CalculatorGraph::PrepareGpu(const std::map& side_packets) { } current_run_side_packets_.clear(); - ::mediapipe::Status generator_status = packet_generator_graph_.RunGraphSetup( + mediapipe::Status generator_status = packet_generator_graph_.RunGraphSetup( *input_side_packets, ¤t_run_side_packets_); CallStatusHandlers(GraphRunState::PRE_RUN, generator_status); @@ -632,7 +632,7 @@ CalculatorGraph::PrepareGpu(const std::map& side_packets) { // If there was an error on the CallStatusHandlers (PRE_RUN), it was stored // in the error list. We return immediately notifying this to the caller. - ::mediapipe::Status error_status; + mediapipe::Status error_status; if (has_error_) { GetCombinedErrors(&error_status); LOG(ERROR) << error_status; @@ -682,7 +682,7 @@ CalculatorGraph::PrepareGpu(const std::map& side_packets) { std::placeholders::_1, std::placeholders::_2); node.SetQueueSizeCallbacks(queue_size_callback, queue_size_callback); scheduler_.AssignNodeToSchedulerQueue(&node); - const ::mediapipe::Status result = node.PrepareForRun( + const mediapipe::Status result = node.PrepareForRun( current_run_side_packets_, service_packets_, std::bind(&internal::Scheduler::ScheduleNodeForOpen, &scheduler_, &node), @@ -700,13 +700,13 @@ CalculatorGraph::PrepareGpu(const std::map& side_packets) { for (auto& graph_output_stream : graph_output_streams_) { graph_output_stream->PrepareForRun( [&graph_output_stream, this] { - ::mediapipe::Status status = graph_output_stream->Notify(); + mediapipe::Status status = graph_output_stream->Notify(); if (!status.ok()) { RecordError(status); } scheduler_.EmittedObservedOutput(); }, - [this](::mediapipe::Status status) { RecordError(status); }); + [this](mediapipe::Status status) { RecordError(status); }); } if (GetCombinedErrors(&error_status)) { @@ -724,7 +724,7 @@ CalculatorGraph::PrepareGpu(const std::map& side_packets) { // Allow graph input streams to override the global max queue size. for (const auto& name_max : graph_input_stream_max_queue_size_) { std::unique_ptr* stream = - ::mediapipe::FindOrNull(graph_input_streams_, name_max.first); + mediapipe::FindOrNull(graph_input_streams_, name_max.first); RET_CHECK(stream).SetNoLogging() << absl::Substitute( "SetInputStreamMaxQueueSize called on \"$0\" which is not a " "graph input stream.", @@ -759,20 +759,20 @@ CalculatorGraph::PrepareGpu(const std::map& side_packets) { } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status CalculatorGraph::WaitUntilIdle() { +mediapipe::Status CalculatorGraph::WaitUntilIdle() { MP_RETURN_IF_ERROR(scheduler_.WaitUntilIdle()); VLOG(2) << "Scheduler idle."; - ::mediapipe::Status status = ::mediapipe::OkStatus(); + mediapipe::Status status = mediapipe::OkStatus(); if (GetCombinedErrors(&status)) { LOG(ERROR) << status; } return status; } -::mediapipe::Status CalculatorGraph::WaitUntilDone() { +mediapipe::Status CalculatorGraph::WaitUntilDone() { VLOG(2) << "Waiting for scheduler to terminate..."; MP_RETURN_IF_ERROR(scheduler_.WaitUntilDone()); VLOG(2) << "Scheduler terminated."; @@ -780,16 +780,16 @@ CalculatorGraph::PrepareGpu(const std::map& side_packets) { return FinishRun(); } -::mediapipe::Status CalculatorGraph::WaitForObservedOutput() { +mediapipe::Status CalculatorGraph::WaitForObservedOutput() { return scheduler_.WaitForObservedOutput(); } -::mediapipe::Status CalculatorGraph::AddPacketToInputStream( +mediapipe::Status CalculatorGraph::AddPacketToInputStream( const std::string& stream_name, const Packet& packet) { return AddPacketToInputStreamInternal(stream_name, packet); } -::mediapipe::Status CalculatorGraph::AddPacketToInputStream( +mediapipe::Status CalculatorGraph::AddPacketToInputStream( const std::string& stream_name, Packet&& packet) { return AddPacketToInputStreamInternal(stream_name, std::move(packet)); } @@ -799,29 +799,28 @@ CalculatorGraph::PrepareGpu(const std::map& side_packets) { // internal-only templated version. T&& is a forwarding reference here, so // std::forward will deduce the correct type as we pass along packet. template -::mediapipe::Status CalculatorGraph::AddPacketToInputStreamInternal( +mediapipe::Status CalculatorGraph::AddPacketToInputStreamInternal( const std::string& stream_name, T&& packet) { std::unique_ptr* stream = - ::mediapipe::FindOrNull(graph_input_streams_, stream_name); + mediapipe::FindOrNull(graph_input_streams_, stream_name); RET_CHECK(stream).SetNoLogging() << absl::Substitute( "AddPacketToInputStream called on input stream \"$0\" which is not a " "graph input stream.", stream_name); - int node_id = - ::mediapipe::FindOrDie(graph_input_stream_node_ids_, stream_name); + int node_id = mediapipe::FindOrDie(graph_input_stream_node_ids_, stream_name); CHECK_GE(node_id, validated_graph_->CalculatorInfos().size()); { absl::MutexLock lock(&full_input_streams_mutex_); if (graph_input_stream_add_mode_ == GraphInputStreamAddMode::ADD_IF_NOT_FULL) { if (has_error_) { - ::mediapipe::Status error_status; + mediapipe::Status error_status; GetCombinedErrors("Graph has errors: ", &error_status); return error_status; } // Return with StatusUnavailable if this stream is being throttled. if (!full_input_streams_[node_id].empty()) { - return ::mediapipe::UnavailableErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::UnavailableErrorBuilder(MEDIAPIPE_LOC) << "Graph is throttled."; } } else if (graph_input_stream_add_mode_ == @@ -836,7 +835,7 @@ template &full_input_streams_mutex_); } if (has_error_) { - ::mediapipe::Status error_status; + mediapipe::Status error_status; GetCombinedErrors("Graph has errors: ", &error_status); return error_status; } @@ -858,7 +857,7 @@ template // because we don't have the lock over the input stream. (*stream)->AddPacket(std::forward(packet)); if (has_error_) { - ::mediapipe::Status error_status; + mediapipe::Status error_status; GetCombinedErrors("Graph has errors: ", &error_status); return error_status; } @@ -870,25 +869,25 @@ template // again if the graph is still idle. Unthrottling basically only lets in one // packet at a time. TODO: add test. scheduler_.AddedPacketToGraphInputStream(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status CalculatorGraph::SetInputStreamMaxQueueSize( +mediapipe::Status CalculatorGraph::SetInputStreamMaxQueueSize( const std::string& stream_name, int max_queue_size) { // graph_input_streams_ has not been filled in yet, so we'll check this when // it is applied when the graph is started. graph_input_stream_max_queue_size_[stream_name] = max_queue_size; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } bool CalculatorGraph::HasInputStream(const std::string& stream_name) { - return ::mediapipe::FindOrNull(graph_input_streams_, stream_name) != nullptr; + return mediapipe::FindOrNull(graph_input_streams_, stream_name) != nullptr; } -::mediapipe::Status CalculatorGraph::CloseInputStream( +mediapipe::Status CalculatorGraph::CloseInputStream( const std::string& stream_name) { std::unique_ptr* stream = - ::mediapipe::FindOrNull(graph_input_streams_, stream_name); + mediapipe::FindOrNull(graph_input_streams_, stream_name); RET_CHECK(stream).SetNoLogging() << absl::Substitute( "CloseInputStream called on input stream \"$0\" which is not a graph " "input stream.", @@ -897,7 +896,7 @@ bool CalculatorGraph::HasInputStream(const std::string& stream_name) { // threads cannot call CloseInputStream() on the same stream_name at the same // time. if ((*stream)->IsClosed()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } (*stream)->Close(); @@ -906,10 +905,10 @@ bool CalculatorGraph::HasInputStream(const std::string& stream_name) { scheduler_.ClosedAllGraphInputStreams(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status CalculatorGraph::CloseAllInputStreams() { +mediapipe::Status CalculatorGraph::CloseAllInputStreams() { for (auto& item : graph_input_streams_) { item.second->Close(); } @@ -917,10 +916,10 @@ bool CalculatorGraph::HasInputStream(const std::string& stream_name) { num_closed_graph_input_streams_ = graph_input_streams_.size(); scheduler_.ClosedAllGraphInputStreams(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status CalculatorGraph::CloseAllPacketSources() { +mediapipe::Status CalculatorGraph::CloseAllPacketSources() { for (auto& item : graph_input_streams_) { item.second->Close(); } @@ -929,10 +928,10 @@ bool CalculatorGraph::HasInputStream(const std::string& stream_name) { scheduler_.ClosedAllGraphInputStreams(); scheduler_.CloseAllSourceNodes(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -void CalculatorGraph::RecordError(const ::mediapipe::Status& error) { +void CalculatorGraph::RecordError(const mediapipe::Status& error) { VLOG(2) << "RecordError called with " << error; { absl::MutexLock lock(&error_mutex_); @@ -943,7 +942,7 @@ void CalculatorGraph::RecordError(const ::mediapipe::Status& error) { stream->NotifyError(); } if (errors_.size() > kMaxNumAccumulatedErrors) { - for (const ::mediapipe::Status& error : errors_) { + for (const mediapipe::Status& error : errors_) { LOG(ERROR) << error; } LOG(FATAL) << "Forcefully aborting to prevent the framework running out " @@ -952,13 +951,13 @@ void CalculatorGraph::RecordError(const ::mediapipe::Status& error) { } } -bool CalculatorGraph::GetCombinedErrors(::mediapipe::Status* error_status) { +bool CalculatorGraph::GetCombinedErrors(mediapipe::Status* error_status) { return GetCombinedErrors("CalculatorGraph::Run() failed in Run: ", error_status); } bool CalculatorGraph::GetCombinedErrors(const std::string& error_prefix, - ::mediapipe::Status* error_status) { + mediapipe::Status* error_status) { absl::MutexLock lock(&error_mutex_); if (!errors_.empty()) { *error_status = tool::CombinedStatus(error_prefix, errors_); @@ -968,7 +967,7 @@ bool CalculatorGraph::GetCombinedErrors(const std::string& error_prefix, } void CalculatorGraph::CallStatusHandlers(GraphRunState graph_run_state, - const ::mediapipe::Status& status) { + const mediapipe::Status& status) { for (int status_handler_index = 0; status_handler_index < validated_graph_->Config().status_handler_size(); ++status_handler_index) { @@ -980,23 +979,22 @@ void CalculatorGraph::CallStatusHandlers(GraphRunState graph_run_state, validated_graph_->StatusHandlerInfos()[status_handler_index]; const PacketTypeSet& packet_type_set = status_handler_info.InputSidePacketTypes(); - ::mediapipe::StatusOr> packet_set_statusor = + mediapipe::StatusOr> packet_set_statusor = tool::FillPacketSet(packet_type_set, current_run_side_packets_, nullptr); if (!packet_set_statusor.ok()) { - RecordError(::mediapipe::StatusBuilder( + RecordError(mediapipe::StatusBuilder( std::move(packet_set_statusor).status(), MEDIAPIPE_LOC) .SetPrepend() << "Skipping run of " << handler_type << ": "); continue; } - ::mediapipe::StatusOr< - std::unique_ptr> + mediapipe::StatusOr> static_access_statusor = internal::StaticAccessToStatusHandlerRegistry:: CreateByNameInNamespace(validated_graph_->Package(), handler_type); CHECK(static_access_statusor.ok()) << handler_type << " is not registered."; auto static_access = std::move(static_access_statusor).ValueOrDie(); - ::mediapipe::Status handler_result; + mediapipe::Status handler_result; if (graph_run_state == GraphRunState::PRE_RUN) { handler_result = static_access->HandlePreRunStatus( handler_config.options(), *packet_set_statusor.ValueOrDie(), status); @@ -1005,8 +1003,8 @@ void CalculatorGraph::CallStatusHandlers(GraphRunState graph_run_state, handler_config.options(), *packet_set_statusor.ValueOrDie(), status); } if (!handler_result.ok()) { - ::mediapipe::StatusBuilder builder(std::move(handler_result), - MEDIAPIPE_LOC); + mediapipe::StatusBuilder builder(std::move(handler_result), + MEDIAPIPE_LOC); builder.SetPrepend() << handler_type; if (graph_run_state == GraphRunState::PRE_RUN) { builder << "::HandlePreRunStatus failed: "; @@ -1051,11 +1049,10 @@ void CalculatorGraph::UpdateThrottledNodes(InputStreamManager* stream, VLOG(2) << "Stream \"" << stream->Name() << "\" is " << (stream_is_full ? "throttling" : "no longer throttling") << " node with node ID " << node_id; - ::mediapipe::LogEvent( - profiler_.get(), - TraceEvent(stream_is_full ? TraceEvent::THROTTLED - : TraceEvent::UNTHROTTLED) - .set_stream_id(&stream->Name())); + mediapipe::LogEvent(profiler_.get(), + TraceEvent(stream_is_full ? TraceEvent::THROTTLED + : TraceEvent::UNTHROTTLED) + .set_stream_id(&stream->Name())); bool was_throttled = !full_input_streams_[node_id].empty(); if (stream_is_full) { DCHECK_EQ(full_input_streams_[node_id].count(stream), 0); @@ -1137,7 +1134,7 @@ bool CalculatorGraph::UnthrottleSources() { } for (InputStreamManager* stream : full_streams) { if (Config().report_deadlock()) { - RecordError(::mediapipe::UnavailableError(absl::StrCat( + RecordError(mediapipe::UnavailableError(absl::StrCat( "Detected a deadlock due to input throttling for: \"", stream->Name(), "\". All calculators are idle while packet sources remain active " "and throttled. Consider adjusting \"max_queue_size\" or " @@ -1166,7 +1163,7 @@ void CalculatorGraph::SetGraphInputStreamAddMode(GraphInputStreamAddMode mode) { } void CalculatorGraph::Cancel() { - // TODO This function should return ::mediapipe::Status. + // TODO This function should return mediapipe::Status. scheduler_.Cancel(); } @@ -1174,11 +1171,11 @@ void CalculatorGraph::Pause() { scheduler_.Pause(); } void CalculatorGraph::Resume() { scheduler_.Resume(); } -::mediapipe::Status CalculatorGraph::SetServicePacket( +mediapipe::Status CalculatorGraph::SetServicePacket( const GraphServiceBase& service, Packet p) { // TODO: check that the graph has not been started! service_packets_[service.key] = std::move(p); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } Packet CalculatorGraph::GetServicePacket(const GraphServiceBase& service) { @@ -1189,10 +1186,10 @@ Packet CalculatorGraph::GetServicePacket(const GraphServiceBase& service) { return it->second; } -::mediapipe::Status CalculatorGraph::SetExecutorInternal( +mediapipe::Status CalculatorGraph::SetExecutorInternal( const std::string& name, std::shared_ptr executor) { if (!executors_.emplace(name, executor).second) { - return ::mediapipe::AlreadyExistsErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::AlreadyExistsErrorBuilder(MEDIAPIPE_LOC) << "SetExecutor must be called only once for the executor \"" << name << "\""; } @@ -1201,21 +1198,21 @@ Packet CalculatorGraph::GetServicePacket(const GraphServiceBase& service) { } else { MP_RETURN_IF_ERROR(scheduler_.SetNonDefaultExecutor(name, executor.get())); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status CalculatorGraph::SetExecutor( +mediapipe::Status CalculatorGraph::SetExecutor( const std::string& name, std::shared_ptr executor) { RET_CHECK(!initialized_) << "SetExecutor can only be called before Initialize()"; if (IsReservedExecutorName(name)) { - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "\"" << name << "\" is a reserved executor name."; } return SetExecutorInternal(name, std::move(executor)); } -::mediapipe::Status CalculatorGraph::CreateDefaultThreadPool( +mediapipe::Status CalculatorGraph::CreateDefaultThreadPool( const ThreadPoolExecutorOptions* default_executor_options, int num_threads) { MediaPipeOptions extendable_options; @@ -1237,16 +1234,16 @@ bool CalculatorGraph::IsReservedExecutorName(const std::string& name) { return ValidatedGraphConfig::IsReservedExecutorName(name); } -::mediapipe::Status CalculatorGraph::FinishRun() { +mediapipe::Status CalculatorGraph::FinishRun() { // Check for any errors that may have occurred. - ::mediapipe::Status status = ::mediapipe::OkStatus(); + mediapipe::Status status = mediapipe::OkStatus(); MP_RETURN_IF_ERROR(profiler_->Stop()); GetCombinedErrors(&status); CleanupAfterRun(&status); return status; } -void CalculatorGraph::CleanupAfterRun(::mediapipe::Status* status) { +void CalculatorGraph::CleanupAfterRun(mediapipe::Status* status) { for (auto& item : graph_input_streams_) { item.second->Close(); } @@ -1313,7 +1310,7 @@ bool MetricElementComparator(const std::pair& e1, } } // namespace -::mediapipe::Status CalculatorGraph::GetCalculatorProfiles( +mediapipe::Status CalculatorGraph::GetCalculatorProfiles( std::vector* profiles) const { return profiler_->GetCalculatorProfiles(profiles); } diff --git a/mediapipe/framework/calculator_graph.h b/mediapipe/framework/calculator_graph.h index 7a1935a92..56d46b0ae 100644 --- a/mediapipe/framework/calculator_graph.h +++ b/mediapipe/framework/calculator_graph.h @@ -62,7 +62,7 @@ struct GpuSharedData; namespace mediapipe { -typedef ::mediapipe::StatusOr StatusOrPoller; +typedef mediapipe::StatusOr StatusOrPoller; // The class representing a DAG of calculator nodes. // @@ -126,12 +126,12 @@ class CalculatorGraph { // Initializes the graph from a its proto description. // side_packets that are provided at this stage are common across all Run() // invocations and could be used to execute PacketGenerators immediately. - ::mediapipe::Status Initialize( + mediapipe::Status Initialize( const CalculatorGraphConfig& config, const std::map& side_packets); // Convenience version which does not take side packets. - ::mediapipe::Status Initialize(const CalculatorGraphConfig& config); + mediapipe::Status Initialize(const CalculatorGraphConfig& config); // Initializes the CalculatorGraph from the specified graph and subgraph // configs. Template graph and subgraph configs can be specified through @@ -139,7 +139,7 @@ class CalculatorGraph { // CalclatorGraphConfig.type. A subgraph can be instantiated directly by // specifying its type in |graph_type|. A template graph can be instantiated // directly by specifying its template arguments in |options|. - ::mediapipe::Status Initialize( + mediapipe::Status Initialize( const std::vector& configs, const std::vector& templates, const std::map& side_packets = {}, @@ -155,9 +155,9 @@ class CalculatorGraph { // packet emitted by the output stream. Can only be called before Run() or // StartRun(). // TODO: Rename to AddOutputStreamCallback. - ::mediapipe::Status ObserveOutputStream( + mediapipe::Status ObserveOutputStream( const std::string& stream_name, - std::function<::mediapipe::Status(const Packet&)> packet_callback); + std::function packet_callback); // Adds an OutputStreamPoller for a stream. This provides a synchronous, // polling API for accessing a stream's output. Should only be called before @@ -169,17 +169,17 @@ class CalculatorGraph { // packets (generated by PacketGenerators) can be retrieved before // graph is done. Returns error if the graph is still running (for non-base // packets) or the output side packet is not found or empty. - ::mediapipe::StatusOr GetOutputSidePacket( + mediapipe::StatusOr GetOutputSidePacket( const std::string& packet_name); // Runs the graph after adding the given extra input side packets. All // arguments are forgotten after Run() returns. // Run() is a blocking call and will return when all calculators are done. - virtual ::mediapipe::Status Run( + virtual mediapipe::Status Run( const std::map& extra_side_packets); // Run the graph without adding any input side packets. - ::mediapipe::Status Run() { return Run({}); } + mediapipe::Status Run() { return Run({}); } // Start a run of the graph. StartRun, WaitUntilDone, HasError, // AddPacketToInputStream, and CloseInputStream allow more control over @@ -199,7 +199,7 @@ class CalculatorGraph { // MP_RETURN_IF_ERROR(graph.CloseInputStream(stream)); // } // MP_RETURN_IF_ERROR(graph.WaitUntilDone()); - ::mediapipe::Status StartRun( + mediapipe::Status StartRun( const std::map& extra_side_packets) { return StartRun(extra_side_packets, {}); } @@ -208,7 +208,7 @@ class CalculatorGraph { // stream header before running. // Note: We highly discourage the use of stream headers, this is added for the // compatibility of existing calculators that use headers during Open(). - ::mediapipe::Status StartRun( + mediapipe::Status StartRun( const std::map& extra_side_packets, const std::map& stream_headers); @@ -216,20 +216,20 @@ class CalculatorGraph { // until all source calculators have returned StatusStop(), all // graph_input_streams_ have been closed, and no more calculators can // be run). This function can be called only after StartRun(). - ::mediapipe::Status WaitUntilDone(); + mediapipe::Status WaitUntilDone(); // Wait until the running graph is in the idle mode, which is when nothing can // be scheduled and nothing is running in the worker threads. This function // can be called only after StartRun(). // NOTE: The graph must not have any source nodes because source nodes prevent // the running graph from becoming idle until the source nodes are done. - ::mediapipe::Status WaitUntilIdle(); + mediapipe::Status WaitUntilIdle(); // Wait until a packet is emitted on one of the observed output streams. // Returns immediately if a packet has already been emitted since the last // call to this function. // Returns OutOfRangeError if the graph terminated while waiting. - ::mediapipe::Status WaitForObservedOutput(); + mediapipe::Status WaitForObservedOutput(); // Quick non-locking means of checking if the graph has encountered an error. bool HasError() const { return has_error_; } @@ -243,8 +243,8 @@ class CalculatorGraph { // sizes of the queues in the graph. The input stream must have been specified // in the configuration as a graph level input_stream. On error, nothing is // added. - ::mediapipe::Status AddPacketToInputStream(const std::string& stream_name, - const Packet& packet); + mediapipe::Status AddPacketToInputStream(const std::string& stream_name, + const Packet& packet); // Same as the l-value version of this function by the same name, but moves // the r-value referenced packet into the stream instead of copying it over. @@ -253,12 +253,12 @@ class CalculatorGraph { // packet may remain valid. In particular, when using the ADD_IF_NOT_FULL // mode with a full queue, this will return StatusUnavailable and the caller // may try adding the packet again later. - ::mediapipe::Status AddPacketToInputStream(const std::string& stream_name, - Packet&& packet); + mediapipe::Status AddPacketToInputStream(const std::string& stream_name, + Packet&& packet); // Sets the queue size of a graph input stream, overriding the graph default. - ::mediapipe::Status SetInputStreamMaxQueueSize(const std::string& stream_name, - int max_queue_size); + mediapipe::Status SetInputStreamMaxQueueSize(const std::string& stream_name, + int max_queue_size); // Check if an input stream exists in the graph bool HasInputStream(const std::string& name); @@ -268,14 +268,14 @@ class CalculatorGraph { // been closed (and all packets propagate through the graph). // Note that multiple threads cannot call CloseInputStream() on the same // stream_name at the same time. - ::mediapipe::Status CloseInputStream(const std::string& stream_name); + mediapipe::Status CloseInputStream(const std::string& stream_name); // Closes all the graph input streams. // TODO: deprecate this function in favor of CloseAllPacketSources. - ::mediapipe::Status CloseAllInputStreams(); + mediapipe::Status CloseAllInputStreams(); // Closes all the graph input streams and source calculator nodes. - ::mediapipe::Status CloseAllPacketSources(); + mediapipe::Status CloseAllPacketSources(); // Returns the pointer to the stream with the given name, or dies if none // exists. The result remains owned by the CalculatorGraph. @@ -290,7 +290,7 @@ class CalculatorGraph { // calculator in the graph. May be called at any time after the graph has been // initialized. ABSL_DEPRECATED("Use profiler()->GetCalculatorProfiles() instead") - ::mediapipe::Status GetCalculatorProfiles( + mediapipe::Status GetCalculatorProfiles( std::vector*) const; // Set the type of counter used in this graph. @@ -301,15 +301,15 @@ class CalculatorGraph { // Callback when an error is encountered. // Adds the error to the vector of errors. - void RecordError(const ::mediapipe::Status& error) + void RecordError(const mediapipe::Status& error) ABSL_LOCKS_EXCLUDED(error_mutex_); // Combines errors into a status. Returns true if the vector of errors is // non-empty. bool GetCombinedErrors(const std::string& error_prefix, - ::mediapipe::Status* error_status); + mediapipe::Status* error_status); // Convenience overload which specifies a default error prefix. - bool GetCombinedErrors(::mediapipe::Status* error_status); + bool GetCombinedErrors(mediapipe::Status* error_status); // Returns the maximum input stream queue size. int GetMaxInputStreamQueueSize(); @@ -338,8 +338,8 @@ class CalculatorGraph { // Sets the executor that will run the nodes assigned to the executor // named |name|. If |name| is empty, this sets the default executor. Must // be called before the graph is initialized. - ::mediapipe::Status SetExecutor(const std::string& name, - std::shared_ptr executor); + mediapipe::Status SetExecutor(const std::string& name, + std::shared_ptr executor); // WARNING: the following public methods are exposed to Scheduler only. @@ -370,18 +370,18 @@ class CalculatorGraph { // Only meant for internal use. std::shared_ptr<::mediapipe::GpuResources> GetGpuResources() const; - ::mediapipe::Status SetGpuResources( + mediapipe::Status SetGpuResources( std::shared_ptr<::mediapipe::GpuResources> resources); // Helper for PrepareForRun. If it returns a non-empty map, those packets // must be added to the existing side packets, replacing existing values // that have the same key. - ::mediapipe::StatusOr> PrepareGpu( + mediapipe::StatusOr> PrepareGpu( const std::map& side_packets); #endif // !defined(MEDIAPIPE_DISABLE_GPU) template - ::mediapipe::Status SetServiceObject(const GraphService& service, - std::shared_ptr object) { + mediapipe::Status SetServiceObject(const GraphService& service, + std::shared_ptr object) { return SetServicePacket(service, MakePacket>(std::move(object))); } @@ -394,8 +394,7 @@ class CalculatorGraph { } // Only the Java API should call this directly. - ::mediapipe::Status SetServicePacket(const GraphServiceBase& service, - Packet p); + mediapipe::Status SetServicePacket(const GraphServiceBase& service, Packet p); private: // GraphRunState is used as a parameter in the function CallStatusHandlers. @@ -419,8 +418,7 @@ class CalculatorGraph { shard_.SetSpec(manager_->Spec()); } - void PrepareForRun( - std::function error_callback) { + void PrepareForRun(std::function error_callback) { manager_->PrepareForRun(std::move(error_callback)); } @@ -448,7 +446,7 @@ class CalculatorGraph { }; // Initializes the graph from a ValidatedGraphConfig object. - ::mediapipe::Status Initialize( + mediapipe::Status Initialize( std::unique_ptr validated_graph, const std::map& side_packets); @@ -456,28 +454,28 @@ class CalculatorGraph { // AddPacketToInputStream(Packet&& packet) or // AddPacketToInputStream(const Packet& packet). template - ::mediapipe::Status AddPacketToInputStreamInternal( + mediapipe::Status AddPacketToInputStreamInternal( const std::string& stream_name, T&& packet); // Sets the executor that will run the nodes assigned to the executor // named |name|. If |name| is empty, this sets the default executor. // Does not check that the graph is uninitialized and |name| is not a // reserved executor name. - ::mediapipe::Status SetExecutorInternal(const std::string& name, - std::shared_ptr executor); + mediapipe::Status SetExecutorInternal(const std::string& name, + std::shared_ptr executor); // If the num_threads field in default_executor_options is not specified, // assigns a reasonable value based on system configuration and the graph. // Then, creates the default thread pool if appropriate. // // Only called by InitializeExecutors(). - ::mediapipe::Status InitializeDefaultExecutor( + mediapipe::Status InitializeDefaultExecutor( const ThreadPoolExecutorOptions* default_executor_options, bool use_application_thread); // Creates a thread pool as the default executor. The num_threads argument // overrides the num_threads field in default_executor_options. - ::mediapipe::Status CreateDefaultThreadPool( + mediapipe::Status CreateDefaultThreadPool( const ThreadPoolExecutorOptions* default_executor_options, int num_threads); @@ -485,31 +483,31 @@ class CalculatorGraph { static bool IsReservedExecutorName(const std::string& name); // Helper functions for Initialize(). - ::mediapipe::Status InitializeExecutors(); - ::mediapipe::Status InitializePacketGeneratorGraph( + mediapipe::Status InitializeExecutors(); + mediapipe::Status InitializePacketGeneratorGraph( const std::map& side_packets); - ::mediapipe::Status InitializeStreams(); - ::mediapipe::Status InitializeProfiler(); - ::mediapipe::Status InitializeCalculatorNodes(); + mediapipe::Status InitializeStreams(); + mediapipe::Status InitializeProfiler(); + mediapipe::Status InitializeCalculatorNodes(); // Iterates through all nodes and schedules any that can be opened. void ScheduleAllOpenableNodes(); // Does the bulk of the work for StartRun but does not start the scheduler. - ::mediapipe::Status PrepareForRun( + mediapipe::Status PrepareForRun( const std::map& extra_side_packets, const std::map& stream_headers); // Cleans up any remaining state after the run and returns any errors that may // have occurred during the run. Called after the scheduler has terminated. - ::mediapipe::Status FinishRun(); + mediapipe::Status FinishRun(); // Cleans up any remaining state after the run. All status handlers run here // if their requested input side packets exist. // The original |*status| is passed to all the status handlers. If any status // handler fails, it appends its error to errors_, and CleanupAfterRun sets // |*status| to the new combined errors on return. - void CleanupAfterRun(::mediapipe::Status* status) + void CleanupAfterRun(mediapipe::Status* status) ABSL_LOCKS_EXCLUDED(error_mutex_); // Calls HandlePreRunStatus or HandleStatus on the StatusHandlers. Which one @@ -517,7 +515,7 @@ class CalculatorGraph { // current_run_side_packets_ must be set before this function is called. // On error, has_error_ will be set. void CallStatusHandlers(GraphRunState graph_run_state, - const ::mediapipe::Status& status); + const mediapipe::Status& status); // Callback function to throttle or unthrottle source nodes when a stream // becomes full or non-full. A node is throttled (i.e. prevented being @@ -611,7 +609,7 @@ class CalculatorGraph { // Vector of errors encountered while running graph. Always use RecordError() // to add an error to this vector. - std::vector<::mediapipe::Status> errors_ ABSL_GUARDED_BY(error_mutex_); + std::vector errors_ ABSL_GUARDED_BY(error_mutex_); // True if the default executor uses the application thread. bool use_application_thread_ = false; diff --git a/mediapipe/framework/calculator_graph_bounds_test.cc b/mediapipe/framework/calculator_graph_bounds_test.cc index ebc9ee6c8..21ee132d9 100644 --- a/mediapipe/framework/calculator_graph_bounds_test.cc +++ b/mediapipe/framework/calculator_graph_bounds_test.cc @@ -30,7 +30,7 @@ namespace { constexpr int kIntTestValue = 33; -typedef std::function<::mediapipe::Status(CalculatorContext* cc)> +typedef std::function CalculatorContextFunction; // Returns the contents of a set of Packets. @@ -87,26 +87,26 @@ class CountingExecutor : public Executor { // streams and outputs the sum to the output stream. class IntAdderCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { for (int i = 0; i < cc->Inputs().NumEntries(); ++i) { cc->Inputs().Index(i).Set(); } cc->Outputs().Index(0).Set(); cc->SetTimestampOffset(TimestampDiff(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { - return ::mediapipe::OkStatus(); + mediapipe::Status Open(CalculatorContext* cc) final { + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { int sum = 0; for (int i = 0; i < cc->Inputs().NumEntries(); ++i) { sum += cc->Inputs().Index(i).Get(); } cc->Outputs().Index(0).Add(new int(sum), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(IntAdderCalculator); @@ -114,13 +114,13 @@ REGISTER_CALCULATOR(IntAdderCalculator); template class TypedSinkCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { - return ::mediapipe::OkStatus(); + mediapipe::Status Process(CalculatorContext* cc) override { + return mediapipe::OkStatus(); } }; typedef TypedSinkCalculator StringSinkCalculator; @@ -132,13 +132,13 @@ REGISTER_CALCULATOR(IntSinkCalculator); // integer. class EvenIntFilterCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).Set(); cc->Outputs().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { int value = cc->Inputs().Index(0).Get(); if (value % 2 == 0) { cc->Outputs().Index(0).AddPacket(cc->Inputs().Index(0).Value()); @@ -146,7 +146,7 @@ class EvenIntFilterCalculator : public CalculatorBase { cc->Outputs().Index(0).SetNextTimestampBound( cc->InputTimestamp().NextAllowedInStream()); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(EvenIntFilterCalculator); @@ -156,19 +156,19 @@ REGISTER_CALCULATOR(EvenIntFilterCalculator); // input stream carries the value true. class ValveCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).SetAny(); cc->Inputs().Index(1).Set(); cc->Outputs().Index(0).SetSameAs(&cc->Inputs().Index(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { + mediapipe::Status Open(CalculatorContext* cc) final { cc->Outputs().Index(0).SetHeader(cc->Inputs().Index(0).Header()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { if (cc->Inputs().Index(1).Get()) { cc->GetCounter("PassThrough")->Increment(); cc->Outputs().Index(0).AddPacket(cc->Inputs().Index(0).Value()); @@ -182,7 +182,7 @@ class ValveCalculator : public CalculatorBase { cc->Outputs().Index(0).SetNextTimestampBound( cc->InputTimestamp().NextAllowedInStream()); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(ValveCalculator); @@ -191,27 +191,27 @@ REGISTER_CALCULATOR(ValveCalculator); // but shifts the timestamp. class TimeShiftCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).SetAny(); cc->Outputs().Index(0).SetSameAs(&cc->Inputs().Index(0)); cc->InputSidePackets().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { + mediapipe::Status Open(CalculatorContext* cc) final { // Input: arbitrary Packets. // Output: copy of the input. cc->Outputs().Index(0).SetHeader(cc->Inputs().Index(0).Header()); shift_ = cc->InputSidePackets().Index(0).Get(); cc->SetOffset(shift_); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { cc->GetCounter("PassThrough")->Increment(); cc->Outputs().Index(0).AddPacket( cc->Inputs().Index(0).Value().At(cc->InputTimestamp() + shift_)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: @@ -231,17 +231,17 @@ REGISTER_CALCULATOR(TimeShiftCalculator); // T=2000 Output 100 class OutputAndBoundSourceCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Outputs().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { counter_ = 0; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { Timestamp timestamp(counter_); if (counter_ % 20 == 0) { cc->Outputs().Index(0).AddPacket( @@ -253,7 +253,7 @@ class OutputAndBoundSourceCalculator : public CalculatorBase { return tool::StatusStop(); } counter_ += 10; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: @@ -266,42 +266,42 @@ REGISTER_CALCULATOR(OutputAndBoundSourceCalculator); // Process() method. The input stream and output stream have the integer type. class Delay20Calculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).Set(); cc->Outputs().Index(0).Set(); cc->SetTimestampOffset(TimestampDiff(20)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { + mediapipe::Status Open(CalculatorContext* cc) final { cc->Outputs().Index(0).AddPacket(MakePacket(0).At(Timestamp(0))); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { const Packet& packet = cc->Inputs().Index(0).Value(); Timestamp timestamp = packet.Timestamp() + 20; cc->Outputs().Index(0).AddPacket(packet.At(timestamp)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(Delay20Calculator); class CustomBoundCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).Set(); cc->Outputs().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { - return ::mediapipe::OkStatus(); + mediapipe::Status Open(CalculatorContext* cc) final { + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { cc->Outputs().Index(0).SetNextTimestampBound(cc->InputTimestamp() + 1); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(CustomBoundCalculator); @@ -310,7 +310,7 @@ REGISTER_CALCULATOR(CustomBoundCalculator); TEST(CalculatorGraph, SetNextTimestampBoundPropagation) { CalculatorGraph graph; CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: 'in' input_stream: 'gate' node { @@ -435,7 +435,7 @@ TEST(CalculatorGraph, NotAllInputPacketsAtNextTimestampBoundAvailable) { // CalculatorGraph graph; CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: 'in0_unfiltered' input_stream: 'in1_to_be_filtered' node { @@ -507,7 +507,7 @@ TEST(CalculatorGraph, NotAllInputPacketsAtNextTimestampBoundAvailable) { TEST(CalculatorGraph, PropagateBoundLoop) { CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( node { calculator: 'OutputAndBoundSourceCalculator' output_stream: 'integers' @@ -553,7 +553,7 @@ TEST(CalculatorGraph, CheckBatchProcessingBoundPropagation) { // the sink calculator's input stream should report packet timestamp // mismatches. CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( node { calculator: 'OutputAndBoundSourceCalculator' output_stream: 'integers' @@ -585,7 +585,7 @@ TEST(CalculatorGraphBoundsTest, ImmediateHandlerBounds) { // The second PassthroughCalculator delivers an output packet whenever the // first PassThroughCalculator delivers a timestamp bound. CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: 'input' node { calculator: 'CustomBoundCalculator' @@ -613,7 +613,7 @@ TEST(CalculatorGraphBoundsTest, ImmediateHandlerBounds) { MP_ASSERT_OK(graph.Initialize(config)); MP_ASSERT_OK(graph.ObserveOutputStream("output", [&](const Packet& p) { output_packets.push_back(p); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); MP_ASSERT_OK(graph.StartRun({})); MP_ASSERT_OK(graph.WaitUntilIdle()); @@ -638,19 +638,19 @@ TEST(CalculatorGraphBoundsTest, ImmediateHandlerBounds) { // A Calculator that only sets timestamp bound by SetOffset(). class OffsetBoundCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).Set(); cc->Outputs().Index(0).Set(); cc->SetTimestampOffset(TimestampDiff(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { - return ::mediapipe::OkStatus(); + mediapipe::Status Open(CalculatorContext* cc) final { + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { - return ::mediapipe::OkStatus(); + mediapipe::Status Process(CalculatorContext* cc) final { + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(OffsetBoundCalculator); @@ -658,27 +658,27 @@ REGISTER_CALCULATOR(OffsetBoundCalculator); // A Calculator that produces a packet for each call to Process. class BoundToPacketCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { for (int i = 0; i < cc->Inputs().NumEntries(); ++i) { cc->Inputs().Index(i).SetAny(); } for (int i = 0; i < cc->Outputs().NumEntries(); ++i) { cc->Outputs().Index(i).Set(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { - return ::mediapipe::OkStatus(); + mediapipe::Status Open(CalculatorContext* cc) final { + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { for (int i = 0; i < cc->Outputs().NumEntries(); ++i) { Timestamp t = cc->Inputs().Index(i).Value().Timestamp(); cc->Outputs().Index(i).AddPacket( mediapipe::MakePacket(t).At(cc->InputTimestamp())); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(BoundToPacketCalculator); @@ -688,22 +688,22 @@ class FuturePacketCalculator : public CalculatorBase { public: static constexpr int64 kOutputFutureMicros = 3; - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).Set(); cc->Outputs().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { - return ::mediapipe::OkStatus(); + mediapipe::Status Open(CalculatorContext* cc) final { + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { const Packet& packet = cc->Inputs().Index(0).Value(); Timestamp timestamp = Timestamp(packet.Timestamp().Value() + kOutputFutureMicros); cc->Outputs().Index(0).AddPacket(packet.At(timestamp)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(FuturePacketCalculator); @@ -715,7 +715,7 @@ TEST(CalculatorGraphBoundsTest, OffsetBoundPropagation) { // The PassThroughCalculator delivers an output packet whenever the // OffsetBoundCalculator delivers a timestamp bound. CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: 'input' node { calculator: 'OffsetBoundCalculator' @@ -735,7 +735,7 @@ TEST(CalculatorGraphBoundsTest, OffsetBoundPropagation) { MP_ASSERT_OK(graph.Initialize(config)); MP_ASSERT_OK(graph.ObserveOutputStream("output", [&](const Packet& p) { output_packets.push_back(p); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); MP_ASSERT_OK(graph.StartRun({})); MP_ASSERT_OK(graph.WaitUntilIdle()); @@ -763,7 +763,7 @@ TEST(CalculatorGraphBoundsTest, BoundWithoutInputPackets) { // The BoundToPacketCalculator delivers an output packet whenever the // OffsetBoundCalculator delivers a timestamp bound. CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: 'input' node { calculator: 'FuturePacketCalculator' @@ -786,7 +786,7 @@ TEST(CalculatorGraphBoundsTest, BoundWithoutInputPackets) { MP_ASSERT_OK(graph.Initialize(config)); MP_ASSERT_OK(graph.ObserveOutputStream("output", [&](const Packet& p) { output_packets.push_back(p); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); MP_ASSERT_OK(graph.StartRun({})); MP_ASSERT_OK(graph.WaitUntilIdle()); @@ -821,7 +821,7 @@ TEST(CalculatorGraphBoundsTest, FixedSizeHandlerBounds) { // The PassthroughCalculator delivers an output packet whenever the // LambdaCalculator delivers a timestamp bound. CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: 'input' input_side_packet: 'open_function' input_side_packet: 'process_function' @@ -860,13 +860,13 @@ TEST(CalculatorGraphBoundsTest, FixedSizeHandlerBounds) { std::vector outputs; MP_ASSERT_OK(graph.ObserveOutputStream("output", [&](const Packet& p) { outputs.push_back(p); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); std::vector thinned_outputs; MP_ASSERT_OK( graph.ObserveOutputStream("thinned_output", [&](const Packet& p) { thinned_outputs.push_back(p); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); // The enter_semaphore is used to wait for LambdaCalculator::Process. @@ -875,13 +875,13 @@ TEST(CalculatorGraphBoundsTest, FixedSizeHandlerBounds) { AtomicSemaphore exit_semaphore(0); CalculatorContextFunction open_fn = [&](CalculatorContext* cc) { cc->SetOffset(0); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); }; CalculatorContextFunction process_fn = [&](CalculatorContext* cc) { enter_semaphore.Release(1); exit_semaphore.Acquire(1); cc->Outputs().Index(0).AddPacket(cc->Inputs().Index(0).Value()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); }; MP_ASSERT_OK(graph.StartRun({ {"open_fn", Adopt(new auto(open_fn))}, @@ -935,22 +935,22 @@ TEST(CalculatorGraphBoundsTest, FixedSizeHandlerBounds) { // A Calculator that outputs only the last packet from its input stream. class LastPacketCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).SetAny(); cc->Outputs().Index(0).SetAny(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { - return ::mediapipe::OkStatus(); + mediapipe::Status Open(CalculatorContext* cc) final { + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { cc->Outputs().Index(0).SetNextTimestampBound(cc->InputTimestamp()); last_packet_ = cc->Inputs().Index(0).Value(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Close(CalculatorContext* cc) final { + mediapipe::Status Close(CalculatorContext* cc) final { cc->Outputs().Index(0).AddPacket(last_packet_); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: @@ -965,7 +965,7 @@ TEST(CalculatorGraphBoundsTest, LastPacketCheck) { // packet or input stream close. The output "last_output" shows the // last packet, and "output" shows the timestamp bounds. CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: 'input' output_stream: 'output' output_stream: 'last_output' @@ -992,12 +992,12 @@ TEST(CalculatorGraphBoundsTest, LastPacketCheck) { MP_ASSERT_OK(graph.Initialize(config)); MP_ASSERT_OK(graph.ObserveOutputStream("output", [&](const Packet& p) { output_packets.push_back(p); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); std::vector last_output_packets; MP_ASSERT_OK(graph.ObserveOutputStream("last_output", [&](const Packet& p) { last_output_packets.push_back(p); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); MP_ASSERT_OK(graph.StartRun({})); MP_ASSERT_OK(graph.WaitUntilIdle()); @@ -1048,18 +1048,18 @@ void TestBoundsForEmptyInputs(std::string input_stream_handler) { absl::StrReplaceAll({{"$input_stream_handler", input_stream_handler}}, &config_str); CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(config_str); + mediapipe::ParseTextProtoOrDie(config_str); CalculatorGraph graph; std::vector input_ts_packets; std::vector bounds_ts_packets; MP_ASSERT_OK(graph.Initialize(config)); MP_ASSERT_OK(graph.ObserveOutputStream("input_ts", [&](const Packet& p) { input_ts_packets.push_back(p); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); MP_ASSERT_OK(graph.ObserveOutputStream("bounds_ts", [&](const Packet& p) { bounds_ts_packets.push_back(p); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); MP_ASSERT_OK(graph.StartRun({})); MP_ASSERT_OK(graph.WaitUntilIdle()); @@ -1129,7 +1129,7 @@ TEST(CalculatorGraphBoundsTest, BoundsForEmptyInputs_SyncSets) { // A Calculator that produces a packet for each timestamp bounds update. class ProcessBoundToPacketCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { for (int i = 0; i < cc->Inputs().NumEntries(); ++i) { cc->Inputs().Index(i).SetAny(); } @@ -1138,10 +1138,10 @@ class ProcessBoundToPacketCalculator : public CalculatorBase { } cc->SetInputStreamHandler("ImmediateInputStreamHandler"); cc->SetProcessTimestampBounds(true); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { for (int i = 0; i < cc->Outputs().NumEntries(); ++i) { Timestamp t = cc->Inputs().Index(i).Value().Timestamp(); // Create a new packet for each input stream with a new timestamp bound, @@ -1151,7 +1151,7 @@ class ProcessBoundToPacketCalculator : public CalculatorBase { cc->Outputs().Index(i).Add(new auto(t), t); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(ProcessBoundToPacketCalculator); @@ -1159,7 +1159,7 @@ REGISTER_CALCULATOR(ProcessBoundToPacketCalculator); // A Calculator that passes through each packet and timestamp immediately. class ImmediatePassthroughCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { for (int i = 0; i < cc->Inputs().NumEntries(); ++i) { cc->Inputs().Index(i).SetAny(); } @@ -1168,10 +1168,10 @@ class ImmediatePassthroughCalculator : public CalculatorBase { } cc->SetInputStreamHandler("ImmediateInputStreamHandler"); cc->SetProcessTimestampBounds(true); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { for (int i = 0; i < cc->Outputs().NumEntries(); ++i) { if (!cc->Inputs().Index(i).IsEmpty()) { cc->Outputs().Index(i).AddPacket(cc->Inputs().Index(i).Value()); @@ -1185,7 +1185,7 @@ class ImmediatePassthroughCalculator : public CalculatorBase { } } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(ImmediatePassthroughCalculator); @@ -1217,14 +1217,14 @@ void TestProcessForEmptyInputs(const std::string& input_stream_handler) { absl::StrReplaceAll({{"$input_stream_handler", input_stream_handler}}, &config_str); CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(config_str); + mediapipe::ParseTextProtoOrDie(config_str); CalculatorGraph graph; std::vector input_ts_packets; std::vector bounds_ts_packets; MP_ASSERT_OK(graph.Initialize(config)); MP_ASSERT_OK(graph.ObserveOutputStream("bounds_ts", [&](const Packet& p) { bounds_ts_packets.push_back(p); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); MP_ASSERT_OK(graph.StartRun({})); MP_ASSERT_OK(graph.WaitUntilIdle()); @@ -1317,18 +1317,18 @@ TEST(CalculatorGraphBoundsTest, ProcessTimestampBounds_Passthrough) { } )"; CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(config_str); + mediapipe::ParseTextProtoOrDie(config_str); CalculatorGraph graph; std::vector output_0_packets; std::vector output_1_packets; MP_ASSERT_OK(graph.Initialize(config)); MP_ASSERT_OK(graph.ObserveOutputStream("output_0", [&](const Packet& p) { output_0_packets.push_back(p); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); MP_ASSERT_OK(graph.ObserveOutputStream("output_1", [&](const Packet& p) { output_1_packets.push_back(p); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); MP_ASSERT_OK(graph.StartRun({})); MP_ASSERT_OK(graph.WaitUntilIdle()); @@ -1378,20 +1378,20 @@ TEST(CalculatorGraphBoundsTest, ProcessTimestampBounds_Passthrough) { // A Calculator that sends a timestamp bound for every other input. class OccasionalBoundCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).Set(); cc->Outputs().Index(0).SetSameAs(&cc->Inputs().Index(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { absl::SleepFor(absl::Milliseconds(1)); if (cc->InputTimestamp().Value() % 20 == 0) { Timestamp bound = cc->InputTimestamp().NextAllowedInStream(); cc->Outputs().Index(0).SetNextTimestampBound( std::max(bound, cc->Outputs().Index(0).NextTimestampBound())); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(OccasionalBoundCalculator); @@ -1413,13 +1413,13 @@ TEST(CalculatorGraphBoundsTest, MaxInFlightWithOccasionalBound) { num_threads: 4 )"; CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(config_str); + mediapipe::ParseTextProtoOrDie(config_str); CalculatorGraph graph; std::vector output_0_packets; MP_ASSERT_OK(graph.Initialize(config)); MP_ASSERT_OK(graph.ObserveOutputStream("output_0", [&](const Packet& p) { output_0_packets.push_back(p); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); MP_ASSERT_OK(graph.StartRun({})); MP_ASSERT_OK(graph.WaitUntilIdle()); @@ -1443,20 +1443,20 @@ TEST(CalculatorGraphBoundsTest, MaxInFlightWithOccasionalBound) { // A Calculator that uses both SetTimestampOffset and SetNextTimestampBound. class OffsetAndBoundCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).Set(); cc->Outputs().Index(0).SetSameAs(&cc->Inputs().Index(0)); cc->SetTimestampOffset(TimestampDiff(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { - return ::mediapipe::OkStatus(); + mediapipe::Status Open(CalculatorContext* cc) final { + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { if (cc->InputTimestamp().Value() % 20 == 0) { cc->Outputs().Index(0).SetNextTimestampBound(Timestamp(10000)); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(OffsetAndBoundCalculator); @@ -1475,13 +1475,13 @@ TEST(CalculatorGraphBoundsTest, OffsetAndBound) { } )"; CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(config_str); + mediapipe::ParseTextProtoOrDie(config_str); CalculatorGraph graph; std::vector output_0_packets; MP_ASSERT_OK(graph.Initialize(config)); MP_ASSERT_OK(graph.ObserveOutputStream("output_0", [&](const Packet& p) { output_0_packets.push_back(p); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); MP_ASSERT_OK(graph.StartRun({})); MP_ASSERT_OK(graph.WaitUntilIdle()); diff --git a/mediapipe/framework/calculator_graph_event_loop_test.cc b/mediapipe/framework/calculator_graph_event_loop_test.cc index eb7b1d866..4cdae61a4 100644 --- a/mediapipe/framework/calculator_graph_event_loop_test.cc +++ b/mediapipe/framework/calculator_graph_event_loop_test.cc @@ -53,25 +53,25 @@ class CalculatorGraphEventLoopTest : public testing::Test { // testing. class BlockingPassThroughCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).SetAny(); cc->Outputs().Index(0).SetSameAs(&cc->Inputs().Index(0)); cc->InputSidePackets().Index(0).Set>(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { + mediapipe::Status Open(CalculatorContext* cc) final { mutex_ = GetFromUniquePtr(cc->InputSidePackets().Index(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { mutex_->Lock(); cc->Outputs().Index(0).AddPacket( cc->Inputs().Index(0).Value().At(cc->InputTimestamp())); mutex_->Unlock(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: @@ -87,15 +87,15 @@ struct SimpleHeader { class UsingHeaderCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).SetAny(); cc->Outputs().Index(0).SetSameAs(&cc->Inputs().Index(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { + mediapipe::Status Open(CalculatorContext* cc) final { if (cc->Inputs().Index(0).Header().IsEmpty()) { - return ::mediapipe::UnknownError("No stream header present."); + return mediapipe::UnknownError("No stream header present."); } const SimpleHeader& header = @@ -105,13 +105,13 @@ class UsingHeaderCalculator : public CalculatorBase { output_header->height = header.height; cc->Outputs().Index(0).SetHeader(Adopt(output_header.release())); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { cc->Outputs().Index(0).AddPacket( cc->Inputs().Index(0).Value().At(cc->InputTimestamp())); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(UsingHeaderCalculator); @@ -187,21 +187,21 @@ TEST_F(CalculatorGraphEventLoopTest, WellProvisionedEventLoop) { // Pass-Through calculator that fails upon receiving the 10th packet. class FailingPassThroughCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).SetAny(); cc->Outputs().Index(0).SetSameAs(&cc->Inputs().Index(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { Timestamp timestamp = cc->InputTimestamp(); if (timestamp.Value() == 9) { - return ::mediapipe::UnknownError( + return mediapipe::UnknownError( "Meant to fail (magicstringincludedhere)."); } cc->Outputs().Index(0).AddPacket( cc->Inputs().Index(0).Value().At(timestamp)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(FailingPassThroughCalculator); @@ -231,7 +231,7 @@ TEST_F(CalculatorGraphEventLoopTest, FailingEventLoop) { this, std::placeholders::_1))}})); // Insert packets. - ::mediapipe::Status status; + mediapipe::Status status; for (int i = 0; true; ++i) { status = graph.AddPacketToInputStream("input_numbers", Adopt(new int(i)).At(Timestamp(i))); @@ -315,10 +315,10 @@ TEST_F(CalculatorGraphEventLoopTest, SetStreamHeader) { &CalculatorGraphEventLoopTest::AddThreadSafeVectorSink, this, std::placeholders::_1))}})); - ::mediapipe::Status status = graph.WaitUntilIdle(); + mediapipe::Status status = graph.WaitUntilIdle(); // Expect to fail if header not set. ASSERT_FALSE(status.ok()); - EXPECT_EQ(status.code(), ::mediapipe::StatusCode::kUnknown); + EXPECT_EQ(status.code(), mediapipe::StatusCode::kUnknown); EXPECT_THAT(status.message(), testing::HasSubstr("No stream header present.")); @@ -387,7 +387,7 @@ TEST_F(CalculatorGraphEventLoopTest, TryToAddPacketToInputStream) { // mechanism could be off by 1 at most due to the order of acquisition of // locks. for (int i = 0; i < kNumInputPackets; ++i) { - ::mediapipe::Status status = graph.AddPacketToInputStream( + mediapipe::Status status = graph.AddPacketToInputStream( "input_numbers", Adopt(new int(i)).At(Timestamp(i))); if (!status.ok()) { ++fail_count; @@ -472,7 +472,7 @@ TEST_F(CalculatorGraphEventLoopTest, ThrottleGraphInputStreamTwice) { // Lock the mutex so that the BlockingPassThroughCalculator cannot read any // of these packets. mutex->Lock(); - ::mediapipe::Status status = ::mediapipe::OkStatus(); + mediapipe::Status status = mediapipe::OkStatus(); for (int i = 0; i < 10; ++i) { status = graph.AddPacketToInputStream("input_numbers", Adopt(new int(i)).At(Timestamp(i))); @@ -482,7 +482,7 @@ TEST_F(CalculatorGraphEventLoopTest, ThrottleGraphInputStreamTwice) { } mutex->Unlock(); ASSERT_FALSE(status.ok()); - EXPECT_EQ(status.code(), ::mediapipe::StatusCode::kUnavailable); + EXPECT_EQ(status.code(), mediapipe::StatusCode::kUnavailable); EXPECT_THAT(status.message(), testing::HasSubstr("Graph is throttled.")); MP_ASSERT_OK(graph.CloseInputStream("input_numbers")); MP_ASSERT_OK(graph.WaitUntilDone()); @@ -523,7 +523,7 @@ TEST_F(CalculatorGraphEventLoopTest, WaitToAddPacketToInputStream) { // All of these packets should be accepted by the graph. int fail_count = 0; for (int i = 0; i < kNumInputPackets; ++i) { - ::mediapipe::Status status = graph.AddPacketToInputStream( + mediapipe::Status status = graph.AddPacketToInputStream( "input_numbers", Adopt(new int(i)).At(Timestamp(i))); if (!status.ok()) { ++fail_count; diff --git a/mediapipe/framework/calculator_graph_side_packet_test.cc b/mediapipe/framework/calculator_graph_side_packet_test.cc index fd78dc7d7..e530238e0 100644 --- a/mediapipe/framework/calculator_graph_side_packet_test.cc +++ b/mediapipe/framework/calculator_graph_side_packet_test.cc @@ -38,16 +38,16 @@ namespace { // output side packet. class OutputSidePacketInProcessCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).SetAny(); cc->OutputSidePackets().Index(0).SetSameAs(&cc->Inputs().Index(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { cc->OutputSidePackets().Index(0).Set( cc->Inputs().Index(0).Value().At(Timestamp::Unset())); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(OutputSidePacketInProcessCalculator); @@ -56,22 +56,22 @@ REGISTER_CALCULATOR(OutputSidePacketInProcessCalculator); // receives. Outputs the total number of packets as a side packet in Close. class CountAndOutputSummarySidePacketInCloseCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).SetAny(); cc->OutputSidePackets().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { ++count_; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Close(CalculatorContext* cc) final { + mediapipe::Status Close(CalculatorContext* cc) final { absl::SleepFor(absl::Milliseconds(300)); // For GetOutputSidePacket test. cc->OutputSidePackets().Index(0).Set( MakePacket(count_).At(Timestamp::Unset())); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } int count_ = 0; @@ -82,15 +82,15 @@ REGISTER_CALCULATOR(CountAndOutputSummarySidePacketInCloseCalculator); // output side packet. This triggers an error in the graph. class OutputSidePacketWithTimestampCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).SetAny(); cc->OutputSidePackets().Index(0).SetSameAs(&cc->Inputs().Index(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { cc->OutputSidePackets().Index(0).Set(cc->Inputs().Index(0).Value()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(OutputSidePacketWithTimestampCalculator); @@ -98,19 +98,19 @@ REGISTER_CALCULATOR(OutputSidePacketWithTimestampCalculator); // Generates an output side packet containing the integer 1. class IntegerOutputSidePacketCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->OutputSidePackets().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { + mediapipe::Status Open(CalculatorContext* cc) final { cc->OutputSidePackets().Index(0).Set(MakePacket(1)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { LOG(FATAL) << "Not reached."; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(IntegerOutputSidePacketCalculator); @@ -119,23 +119,23 @@ REGISTER_CALCULATOR(IntegerOutputSidePacketCalculator); // side packets. class SidePacketAdderCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->InputSidePackets().Index(0).Set(); cc->InputSidePackets().Index(1).Set(); cc->OutputSidePackets().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { + mediapipe::Status Open(CalculatorContext* cc) final { cc->OutputSidePackets().Index(0).Set( MakePacket(cc->InputSidePackets().Index(1).Get() + cc->InputSidePackets().Index(0).Get())); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { LOG(FATAL) << "Not reached."; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(SidePacketAdderCalculator); @@ -144,21 +144,21 @@ REGISTER_CALCULATOR(SidePacketAdderCalculator); // input side packet. class SidePacketToStreamPacketCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->InputSidePackets().Index(0).SetAny(); cc->Outputs().Index(0).SetSameAs(&cc->InputSidePackets().Index(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { + mediapipe::Status Open(CalculatorContext* cc) final { cc->Outputs().Index(0).AddPacket( cc->InputSidePackets().Index(0).At(Timestamp::PostStream())); cc->Outputs().Index(0).Close(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { - return ::mediapipe::tool::StatusStop(); + mediapipe::Status Process(CalculatorContext* cc) final { + return mediapipe::tool::StatusStop(); } }; REGISTER_CALCULATOR(SidePacketToStreamPacketCalculator); @@ -166,18 +166,18 @@ REGISTER_CALCULATOR(SidePacketToStreamPacketCalculator); // Packet generator for an arbitrary unit64 packet. class Uint64PacketGenerator : public PacketGenerator { public: - static ::mediapipe::Status FillExpectations( + static mediapipe::Status FillExpectations( const PacketGeneratorOptions& extendable_options, PacketTypeSet* input_side_packets, PacketTypeSet* output_side_packets) { output_side_packets->Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - static ::mediapipe::Status Generate( + static mediapipe::Status Generate( const PacketGeneratorOptions& extendable_options, const PacketSet& input_side_packets, PacketSet* output_side_packets) { output_side_packets->Index(0) = Adopt(new uint64(15LL << 32 | 5)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_PACKET_GENERATOR(Uint64PacketGenerator); @@ -185,7 +185,7 @@ REGISTER_PACKET_GENERATOR(Uint64PacketGenerator); TEST(CalculatorGraph, OutputSidePacketInProcess) { const int64 offset = 100; CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: "offset" node { calculator: "OutputSidePacketInProcessCalculator" @@ -204,7 +204,7 @@ TEST(CalculatorGraph, OutputSidePacketInProcess) { MP_ASSERT_OK(graph.ObserveOutputStream( "output", [&output_packets](const Packet& packet) { output_packets.push_back(packet); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); // Run the graph twice. @@ -226,11 +226,11 @@ TEST(CalculatorGraph, OutputSidePacketInProcess) { // also be ignored. class PassThroughGenerator : public PacketGenerator { public: - static ::mediapipe::Status FillExpectations( + static mediapipe::Status FillExpectations( const PacketGeneratorOptions& extendable_options, PacketTypeSet* inputs, PacketTypeSet* outputs) { if (!inputs->TagMap()->SameAs(*outputs->TagMap())) { - return ::mediapipe::InvalidArgumentError( + return mediapipe::InvalidArgumentError( "Input and outputs to PassThroughGenerator must use the same tags " "and indexes."); } @@ -238,24 +238,24 @@ class PassThroughGenerator : public PacketGenerator { inputs->Get(id).SetAny(); outputs->Get(id).SetSameAs(&inputs->Get(id)); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - static ::mediapipe::Status Generate( + static mediapipe::Status Generate( const PacketGeneratorOptions& extendable_options, const PacketSet& input_side_packets, PacketSet* output_side_packets) { for (CollectionItemId id = input_side_packets.BeginId(); id < input_side_packets.EndId(); ++id) { output_side_packets->Get(id) = input_side_packets.Get(id); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_PACKET_GENERATOR(PassThroughGenerator); TEST(CalculatorGraph, SharePacketGeneratorGraph) { CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( node { calculator: 'CountingSourceCalculator' output_stream: 'count1' @@ -383,7 +383,7 @@ TEST(CalculatorGraph, SharePacketGeneratorGraph) { TEST(CalculatorGraph, OutputSidePacketAlreadySet) { const int64 offset = 100; CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: "offset" node { calculator: "OutputSidePacketInProcessCalculator" @@ -402,15 +402,15 @@ TEST(CalculatorGraph, OutputSidePacketAlreadySet) { "offset", MakePacket(offset).At(Timestamp(1)))); MP_ASSERT_OK(graph.CloseInputStream("offset")); - ::mediapipe::Status status = graph.WaitUntilDone(); - EXPECT_EQ(status.code(), ::mediapipe::StatusCode::kAlreadyExists); + mediapipe::Status status = graph.WaitUntilDone(); + EXPECT_EQ(status.code(), mediapipe::StatusCode::kAlreadyExists); EXPECT_THAT(status.message(), testing::HasSubstr("was already set.")); } TEST(CalculatorGraph, OutputSidePacketWithTimestamp) { const int64 offset = 100; CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: "offset" node { calculator: "OutputSidePacketWithTimestampCalculator" @@ -428,15 +428,15 @@ TEST(CalculatorGraph, OutputSidePacketWithTimestamp) { MP_ASSERT_OK(graph.AddPacketToInputStream( "offset", MakePacket(offset).At(Timestamp(237)))); MP_ASSERT_OK(graph.CloseInputStream("offset")); - ::mediapipe::Status status = graph.WaitUntilDone(); - EXPECT_EQ(status.code(), ::mediapipe::StatusCode::kInvalidArgument); + mediapipe::Status status = graph.WaitUntilDone(); + EXPECT_EQ(status.code(), mediapipe::StatusCode::kInvalidArgument); EXPECT_THAT(status.message(), testing::HasSubstr("has a timestamp 237.")); } TEST(CalculatorGraph, OutputSidePacketConsumedBySourceNode) { const int max_count = 10; CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: "max_count" node { calculator: "OutputSidePacketInProcessCalculator" @@ -460,7 +460,7 @@ TEST(CalculatorGraph, OutputSidePacketConsumedBySourceNode) { MP_ASSERT_OK(graph.ObserveOutputStream( "output", [&output_packets](const Packet& packet) { output_packets.push_back(packet); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); MP_ASSERT_OK(graph.StartRun({})); // Wait until the graph is idle so that @@ -486,19 +486,19 @@ class FirstPacketFilterCalculator : public CalculatorBase { FirstPacketFilterCalculator() {} ~FirstPacketFilterCalculator() override {} - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).SetAny(); cc->Outputs().Index(0).SetSameAs(&cc->Inputs().Index(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { if (!seen_first_packet_) { cc->Outputs().Index(0).AddPacket(cc->Inputs().Index(0).Value()); cc->Outputs().Index(0).Close(); seen_first_packet_ = true; } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: @@ -529,7 +529,7 @@ TEST(CalculatorGraph, SourceLayerInversion) { // Set num_threads to 1 to force sequential execution for deterministic // outputs. CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( num_threads: 1 node { calculator: "CountingSourceCalculator" @@ -568,8 +568,8 @@ TEST(CalculatorGraph, SourceLayerInversion) { MP_ASSERT_OK(graph.Initialize( config, {{"max_count", MakePacket(max_count)}, {"initial_value1", MakePacket(initial_value1)}})); - ::mediapipe::Status status = graph.Run(); - EXPECT_EQ(status.code(), ::mediapipe::StatusCode::kUnknown); + mediapipe::Status status = graph.Run(); + EXPECT_EQ(status.code(), mediapipe::StatusCode::kUnknown); EXPECT_THAT(status.message(), testing::HasSubstr("deadlock")); } @@ -577,7 +577,7 @@ TEST(CalculatorGraph, SourceLayerInversion) { // streams and no output streams. TEST(CalculatorGraph, PacketGeneratorLikeCalculators) { CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( node { calculator: "IntegerOutputSidePacketCalculator" output_side_packet: "one" @@ -614,7 +614,7 @@ TEST(CalculatorGraph, PacketGeneratorLikeCalculators) { MP_ASSERT_OK(graph.ObserveOutputStream( "output", [&output_packets](const Packet& packet) { output_packets.push_back(packet); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); MP_ASSERT_OK(graph.Run()); ASSERT_EQ(1, output_packets.size()); @@ -624,7 +624,7 @@ TEST(CalculatorGraph, PacketGeneratorLikeCalculators) { TEST(CalculatorGraph, OutputSummarySidePacketInClose) { CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: "input_packets" node { calculator: "CountAndOutputSummarySidePacketInCloseCalculator" @@ -643,7 +643,7 @@ TEST(CalculatorGraph, OutputSummarySidePacketInClose) { MP_ASSERT_OK(graph.ObserveOutputStream( "output", [&output_packets](const Packet& packet) { output_packets.push_back(packet); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); // Run the graph twice. @@ -665,7 +665,7 @@ TEST(CalculatorGraph, OutputSummarySidePacketInClose) { TEST(CalculatorGraph, GetOutputSidePacket) { CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: "input_packets" node { calculator: "CountAndOutputSummarySidePacketInCloseCalculator" @@ -686,14 +686,14 @@ TEST(CalculatorGraph, GetOutputSidePacket) { MP_ASSERT_OK(graph.Initialize(config)); // Check a packet generated by the PacketGenerator, which is available after // graph initialization, can be fetched before graph starts. - ::mediapipe::StatusOr status_or_packet = + mediapipe::StatusOr status_or_packet = graph.GetOutputSidePacket("output_uint64"); MP_ASSERT_OK(status_or_packet); EXPECT_EQ(Timestamp::Unset(), status_or_packet.ValueOrDie().Timestamp()); // IntSplitterPacketGenerator is missing its input side packet and we // won't be able to get its output side packet now. status_or_packet = graph.GetOutputSidePacket("output_uint32_pair"); - EXPECT_EQ(::mediapipe::StatusCode::kUnavailable, + EXPECT_EQ(mediapipe::StatusCode::kUnavailable, status_or_packet.status().code()); // Run the graph twice. int max_count = 100; @@ -713,13 +713,13 @@ TEST(CalculatorGraph, GetOutputSidePacket) { // Should return NOT_FOUND for invalid side packets. status_or_packet = graph.GetOutputSidePacket("unknown"); EXPECT_FALSE(status_or_packet.ok()); - EXPECT_EQ(::mediapipe::StatusCode::kNotFound, + EXPECT_EQ(mediapipe::StatusCode::kNotFound, status_or_packet.status().code()); // Should return UNAVAILABLE before graph is done for valid non-base // packets. status_or_packet = graph.GetOutputSidePacket("num_of_packets"); EXPECT_FALSE(status_or_packet.ok()); - EXPECT_EQ(::mediapipe::StatusCode::kUnavailable, + EXPECT_EQ(mediapipe::StatusCode::kUnavailable, status_or_packet.status().code()); // Should stil return a base even before graph is done. status_or_packet = graph.GetOutputSidePacket("output_uint64"); @@ -749,20 +749,20 @@ typedef std::string HugeModel; // Generates an output-side-packet once for each calculator-graph. class OutputSidePacketCachedCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->OutputSidePackets().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { + mediapipe::Status Open(CalculatorContext* cc) final { cc->OutputSidePackets().Index(0).Set(MakePacket( R"(An expensive side-packet created only once per graph)")); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { LOG(FATAL) << "Not reached."; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(OutputSidePacketCachedCalculator); @@ -774,7 +774,7 @@ bool Equals(Packet p1, Packet p2) { TEST(CalculatorGraph, OutputSidePacketCached) { CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( node { calculator: "OutputSidePacketCachedCalculator" output_side_packet: "model" @@ -791,7 +791,7 @@ TEST(CalculatorGraph, OutputSidePacketCached) { MP_ASSERT_OK(graph.ObserveOutputStream( "output", [&output_packets](const Packet& packet) { output_packets.push_back(packet); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); // Run the graph three times. diff --git a/mediapipe/framework/calculator_graph_stopping_test.cc b/mediapipe/framework/calculator_graph_stopping_test.cc index b7a9fafcc..75262a03f 100644 --- a/mediapipe/framework/calculator_graph_stopping_test.cc +++ b/mediapipe/framework/calculator_graph_stopping_test.cc @@ -34,39 +34,39 @@ namespace mediapipe {} namespace testing_ns { -using ::mediapipe::CalculatorBase; -using ::mediapipe::CalculatorContext; -using ::mediapipe::CalculatorContract; -using ::mediapipe::CalculatorGraphConfig; -using ::mediapipe::GetFromUniquePtr; -using ::mediapipe::InputStreamShardSet; -using ::mediapipe::MakePacket; -using ::mediapipe::OutputStreamShardSet; -using ::mediapipe::Timestamp; -namespace proto_ns = ::mediapipe::proto_ns; -using ::mediapipe::CalculatorGraph; -using ::mediapipe::Packet; +using mediapipe::CalculatorBase; +using mediapipe::CalculatorContext; +using mediapipe::CalculatorContract; +using mediapipe::CalculatorGraphConfig; +using mediapipe::GetFromUniquePtr; +using mediapipe::InputStreamShardSet; +using mediapipe::MakePacket; +using mediapipe::OutputStreamShardSet; +using mediapipe::Timestamp; +namespace proto_ns = mediapipe::proto_ns; +using mediapipe::CalculatorGraph; +using mediapipe::Packet; class InfiniteSequenceCalculator : public mediapipe::CalculatorBase { public: - static ::mediapipe::Status GetContract(mediapipe::CalculatorContract* cc) { + static mediapipe::Status GetContract(mediapipe::CalculatorContract* cc) { cc->Outputs().Tag("OUT").Set(); cc->Outputs().Tag("EVENT").Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { cc->Outputs().Tag("EVENT").AddPacket(MakePacket(1).At(Timestamp(1))); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { cc->Outputs().Tag("OUT").AddPacket( MakePacket(count_).At(Timestamp(count_))); count_++; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Close(CalculatorContext* cc) override { + mediapipe::Status Close(CalculatorContext* cc) override { cc->Outputs().Tag("EVENT").AddPacket(MakePacket(2).At(Timestamp(2))); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: @@ -76,30 +76,30 @@ REGISTER_CALCULATOR(::testing_ns::InfiniteSequenceCalculator); class StoppingPassThroughCalculator : public mediapipe::CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { for (int i = 0; i < cc->Inputs().NumEntries(""); ++i) { cc->Inputs().Get("", i).SetAny(); cc->Outputs().Get("", i).SetSameAs(&cc->Inputs().Get("", i)); } cc->Outputs().Tag("EVENT").Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { cc->Outputs().Tag("EVENT").AddPacket(MakePacket(1).At(Timestamp(1))); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { for (int i = 0; i < cc->Inputs().NumEntries(""); ++i) { if (!cc->Inputs().Get("", i).IsEmpty()) { cc->Outputs().Get("", i).AddPacket(cc->Inputs().Get("", i).Value()); } } - return (++count_ <= max_count_) ? ::mediapipe::OkStatus() - : ::mediapipe::tool::StatusStop(); + return (++count_ <= max_count_) ? mediapipe::OkStatus() + : mediapipe::tool::StatusStop(); } - ::mediapipe::Status Close(CalculatorContext* cc) override { + mediapipe::Status Close(CalculatorContext* cc) override { cc->Outputs().Tag("EVENT").AddPacket(MakePacket(2).At(Timestamp(2))); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: @@ -124,39 +124,39 @@ class AtomicSemaphore { }; // A ProcessFunction that passes through all packets. -::mediapipe::Status DoProcess(const InputStreamShardSet& inputs, - OutputStreamShardSet* outputs) { +mediapipe::Status DoProcess(const InputStreamShardSet& inputs, + OutputStreamShardSet* outputs) { for (int i = 0; i < inputs.NumEntries(); ++i) { if (!inputs.Index(i).Value().IsEmpty()) { outputs->Index(i).AddPacket(inputs.Index(i).Value()); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -typedef std::function<::mediapipe::Status(const InputStreamShardSet&, - OutputStreamShardSet*)> +typedef std::function ProcessFunction; // A Calculator that delegates its Process function to a callback function. class ProcessCallbackCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { for (int i = 0; i < cc->Inputs().NumEntries(); ++i) { cc->Inputs().Index(i).SetAny(); cc->Outputs().Index(i).SetSameAs(&cc->Inputs().Index(0)); } cc->InputSidePackets().Index(0).Set>(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { + mediapipe::Status Open(CalculatorContext* cc) final { callback_ = *GetFromUniquePtr(cc->InputSidePackets().Index(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { return callback_(cc->Inputs(), &(cc->Outputs())); } @@ -202,22 +202,22 @@ TEST(CalculatorGraphStoppingTest, CloseAllPacketSources) { if (out_packets.size() >= kNumPackets) { MP_EXPECT_OK(graph.CloseAllPacketSources()); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); MP_ASSERT_OK(graph.ObserveOutputStream( // "count_out", [&](const Packet& packet) { count_packets.push_back(packet); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); MP_ASSERT_OK(graph.ObserveOutputStream( // "event", [&](const Packet& packet) { event_packets.push_back(packet.Get()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); MP_ASSERT_OK(graph.ObserveOutputStream( // "event_out", [&](const Packet& packet) { event_out_packets.push_back(packet.Get()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); MP_ASSERT_OK(graph.StartRun({})); for (int i = 0; i < kNumPackets; ++i) { @@ -261,7 +261,7 @@ TEST(CalculatorGraphStoppingTest, DeadlockReporting) { MP_ASSERT_OK( graph.ObserveOutputStream("out_1", [&out_packets](const Packet& packet) { out_packets.push_back(packet); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); // Lambda that waits for a local semaphore. @@ -289,8 +289,8 @@ TEST(CalculatorGraphStoppingTest, DeadlockReporting) { MP_EXPECT_OK(add_packet("in_1", 2)); EXPECT_FALSE(add_packet("in_1", 3).ok()); - ::mediapipe::Status status = graph.WaitUntilIdle(); - EXPECT_EQ(status.code(), ::mediapipe::StatusCode::kUnavailable); + mediapipe::Status status = graph.WaitUntilIdle(); + EXPECT_EQ(status.code(), mediapipe::StatusCode::kUnavailable); EXPECT_THAT( status.message(), testing::HasSubstr("Detected a deadlock due to input throttling")); @@ -326,7 +326,7 @@ TEST(CalculatorGraphStoppingTest, DeadlockResolution) { MP_ASSERT_OK( graph.ObserveOutputStream("out_1", [&out_packets](const Packet& packet) { out_packets.push_back(packet); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); // Lambda that waits for a local semaphore. diff --git a/mediapipe/framework/calculator_graph_test.cc b/mediapipe/framework/calculator_graph_test.cc index 2b63490a0..38b4bf8d0 100644 --- a/mediapipe/framework/calculator_graph_test.cc +++ b/mediapipe/framework/calculator_graph_test.cc @@ -72,24 +72,24 @@ using testing::HasSubstr; // instead of Open(). class SetOffsetInProcessCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).SetAny(); cc->Outputs().Index(0).SetSameAs(&cc->Inputs().Index(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { + mediapipe::Status Open(CalculatorContext* cc) final { // Input: arbitrary Packets. // Output: copy of the input. cc->Outputs().Index(0).SetHeader(cc->Inputs().Index(0).Header()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { cc->SetOffset(TimestampDiff(0)); cc->GetCounter("PassThrough")->Increment(); cc->Outputs().Index(0).AddPacket(cc->Inputs().Index(0).Value()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(SetOffsetInProcessCalculator); @@ -97,21 +97,21 @@ REGISTER_CALCULATOR(SetOffsetInProcessCalculator); // A Calculator that outputs the square of its input packet (an int). class SquareIntCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).Set(); cc->Outputs().Index(0).SetSameAs(&cc->Inputs().Index(0)); cc->SetTimestampOffset(TimestampDiff(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { - return ::mediapipe::OkStatus(); + mediapipe::Status Open(CalculatorContext* cc) final { + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { int value = cc->Inputs().Index(0).Value().Get(); cc->Outputs().Index(0).Add(new int(value * value), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(SquareIntCalculator); @@ -125,7 +125,7 @@ REGISTER_CALCULATOR(SquareIntCalculator); // the unselected outputs. class DemuxTimedCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { RET_CHECK_EQ(cc->Inputs().NumEntries(), 2); cc->Inputs().Tag("SELECT").Set(); PacketType* data_input = &cc->Inputs().Tag("INPUT"); @@ -135,18 +135,18 @@ class DemuxTimedCalculator : public CalculatorBase { cc->Outputs().Get(id).SetSameAs(data_input); } cc->SetTimestampOffset(TimestampDiff(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { + mediapipe::Status Open(CalculatorContext* cc) final { select_input_ = cc->Inputs().GetId("SELECT", 0); data_input_ = cc->Inputs().GetId("INPUT", 0); output_base_ = cc->Outputs().GetId("OUTPUT", 0); num_outputs_ = cc->Outputs().NumEntries("OUTPUT"); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { int select = cc->Inputs().Get(select_input_).Get(); RET_CHECK(0 <= select && select < num_outputs_); const Timestamp next_timestamp_bound = @@ -162,7 +162,7 @@ class DemuxTimedCalculator : public CalculatorBase { .SetNextTimestampBound(next_timestamp_bound); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: @@ -183,7 +183,7 @@ REGISTER_CALCULATOR(DemuxTimedCalculator); // propagation on the unselected inputs. class MuxTimedCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Tag("SELECT").Set(); CollectionItemId data_input_id = cc->Inputs().BeginId("INPUT"); PacketType* data_input0 = &cc->Inputs().Get(data_input_id); @@ -195,23 +195,23 @@ class MuxTimedCalculator : public CalculatorBase { RET_CHECK_EQ(cc->Outputs().NumEntries(), 1); cc->Outputs().Tag("OUTPUT").SetSameAs(data_input0); cc->SetTimestampOffset(TimestampDiff(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { + mediapipe::Status Open(CalculatorContext* cc) final { select_input_ = cc->Inputs().GetId("SELECT", 0); data_input_base_ = cc->Inputs().GetId("INPUT", 0); num_data_inputs_ = cc->Inputs().NumEntries("INPUT"); output_ = cc->Outputs().GetId("OUTPUT", 0); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { int select = cc->Inputs().Get(select_input_).Get(); RET_CHECK(0 <= select && select < num_data_inputs_); cc->Outputs().Get(output_).AddPacket( cc->Inputs().Get(data_input_base_ + select).Value()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: @@ -227,26 +227,26 @@ REGISTER_CALCULATOR(MuxTimedCalculator); // streams and outputs the sum to the output stream. class IntAdderCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { for (int i = 0; i < cc->Inputs().NumEntries(); ++i) { cc->Inputs().Index(i).Set(); } cc->Outputs().Index(0).Set(); cc->SetTimestampOffset(TimestampDiff(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { - return ::mediapipe::OkStatus(); + mediapipe::Status Open(CalculatorContext* cc) final { + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { int sum = 0; for (int i = 0; i < cc->Inputs().NumEntries(); ++i) { sum += cc->Inputs().Index(i).Get(); } cc->Outputs().Index(0).Add(new int(sum), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(IntAdderCalculator); @@ -255,26 +255,26 @@ REGISTER_CALCULATOR(IntAdderCalculator); // streams and outputs the sum to the output stream. class FloatAdderCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { for (int i = 0; i < cc->Inputs().NumEntries(); ++i) { cc->Inputs().Index(i).Set(); } cc->Outputs().Index(0).Set(); cc->SetTimestampOffset(TimestampDiff(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { - return ::mediapipe::OkStatus(); + mediapipe::Status Open(CalculatorContext* cc) final { + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { float sum = 0.0; for (int i = 0; i < cc->Inputs().NumEntries(); ++i) { sum += cc->Inputs().Index(i).Get(); } cc->Outputs().Index(0).Add(new float(sum), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(FloatAdderCalculator); @@ -283,26 +283,26 @@ REGISTER_CALCULATOR(FloatAdderCalculator); // input streams and outputs the product to the output stream. class IntMultiplierCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { for (int i = 0; i < cc->Inputs().NumEntries(); ++i) { cc->Inputs().Index(i).Set(); } cc->Outputs().Index(0).Set(); cc->SetTimestampOffset(TimestampDiff(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { - return ::mediapipe::OkStatus(); + mediapipe::Status Open(CalculatorContext* cc) final { + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { int product = 1; for (int i = 0; i < cc->Inputs().NumEntries(); ++i) { product *= cc->Inputs().Index(i).Get(); } cc->Outputs().Index(0).Add(new int(product), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(IntMultiplierCalculator); @@ -312,24 +312,24 @@ REGISTER_CALCULATOR(IntMultiplierCalculator); // output stream. class FloatScalarMultiplierCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).Set(); cc->Outputs().Index(0).Set(); cc->InputSidePackets().Index(0).Set(); cc->SetTimestampOffset(TimestampDiff(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { + mediapipe::Status Open(CalculatorContext* cc) final { scalar_ = cc->InputSidePackets().Index(0).Get(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { float value = cc->Inputs().Index(0).Value().Get(); cc->Outputs().Index(0).Add(new float(scalar_ * value), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: @@ -340,22 +340,22 @@ REGISTER_CALCULATOR(FloatScalarMultiplierCalculator); // A Calculator that converts an integer to a float. class IntToFloatCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).Set(); cc->Outputs().Index(0).Set(); cc->SetTimestampOffset(TimestampDiff(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { - return ::mediapipe::OkStatus(); + mediapipe::Status Open(CalculatorContext* cc) final { + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { int value = cc->Inputs().Index(0).Value().Get(); cc->Outputs().Index(0).Add(new float(static_cast(value)), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(IntToFloatCalculator); @@ -363,12 +363,12 @@ REGISTER_CALCULATOR(IntToFloatCalculator); template class TypedEmptySourceCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Outputs().Index(0).SetAny(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { cc->Outputs().Index(0).Add(new OutputType(), Timestamp::PostStream()); return tool::StatusStop(); } @@ -381,13 +381,13 @@ REGISTER_CALCULATOR(IntEmptySourceCalculator); template class TypedSinkCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { - return ::mediapipe::OkStatus(); + mediapipe::Status Process(CalculatorContext* cc) override { + return mediapipe::OkStatus(); } }; typedef TypedSinkCalculator StringSinkCalculator; @@ -403,29 +403,29 @@ class GlobalCountSourceCalculator : public CalculatorBase { public: static const int kNumOutputPackets; - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->InputSidePackets().Index(0).Set*>(); if (cc->InputSidePackets().NumEntries() >= 2) { cc->InputSidePackets().Index(1).Set(); } cc->Outputs().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { if (cc->InputSidePackets().NumEntries() >= 2 && cc->InputSidePackets().Index(1).Get()) { OutputOne(cc); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { OutputOne(cc); if (local_count_ >= kNumOutputPackets) { return tool::StatusStop(); } else { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } @@ -448,19 +448,19 @@ static const int kTestSequenceLength = 15; // Outputs the integers 0, 1, 2, 3, ..., 14, all with timestamp 0. class TestSequence1SourceCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Outputs().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { cc->Outputs().Index(0).Add(new int(count_), Timestamp(0)); ++count_; ++num_outputs_; if (num_outputs_ >= kTestSequenceLength) { return tool::StatusStop(); } else { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } @@ -474,12 +474,12 @@ REGISTER_CALCULATOR(TestSequence1SourceCalculator); // 100, 99, 98, 97, .... class TestSequence2SourceCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Outputs().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { cc->Outputs().Index(0).Add(new int(count_), Timestamp(timestamp_)); ++count_; ++num_outputs_; @@ -487,7 +487,7 @@ class TestSequence2SourceCalculator : public CalculatorBase { if (num_outputs_ >= kTestSequenceLength) { return tool::StatusStop(); } else { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } @@ -501,19 +501,19 @@ REGISTER_CALCULATOR(TestSequence2SourceCalculator); // Outputs the integers 0, 1, 2 repeatedly for a total of 15 outputs. class Modulo3SourceCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Outputs().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { cc->Outputs().Index(0).Add(new int(count_ % 3), Timestamp(count_ % 3)); ++count_; ++num_outputs_; if (num_outputs_ >= kTestSequenceLength) { return tool::StatusStop(); } else { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } @@ -532,12 +532,12 @@ class OutputAllSourceCalculator : public CalculatorBase { public: static constexpr int kNumOutputPackets = 100; - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Outputs().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { for (int i = 0; i < kNumOutputPackets; ++i) { cc->Outputs().Index(0).Add(new int(0), Timestamp(i)); } @@ -555,16 +555,16 @@ class OutputOneAtATimeSourceCalculator : public CalculatorBase { public: static constexpr int kNumOutputPackets = 1000; - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Outputs().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { if (index_ < kNumOutputPackets) { cc->Outputs().Index(0).Add(new int(0), Timestamp(index_)); ++index_; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } return tool::StatusStop(); } @@ -582,18 +582,18 @@ class DecimatorCalculator : public CalculatorBase { public: static constexpr int kDecimationRatio = 101; - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).SetAny(); cc->Outputs().Index(0).SetSameAs(&cc->Inputs().Index(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { if (index_ % kDecimationRatio == 0) { cc->Outputs().Index(0).AddPacket(cc->Inputs().Index(0).Value()); } ++index_; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: @@ -605,23 +605,23 @@ REGISTER_CALCULATOR(DecimatorCalculator); // this calculator simply passes its input packets through, unchanged. class ErrorOnOpenCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).SetAny(); cc->Outputs().Index(0).SetSameAs(&cc->Inputs().Index(0)); cc->InputSidePackets().Tag("ERROR_ON_OPEN").Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { + mediapipe::Status Open(CalculatorContext* cc) final { if (cc->InputSidePackets().Tag("ERROR_ON_OPEN").Get()) { - return ::mediapipe::NotFoundError("expected error"); + return mediapipe::NotFoundError("expected error"); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { cc->Outputs().Index(0).AddPacket(cc->Inputs().Index(0).Value()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(ErrorOnOpenCalculator); @@ -631,64 +631,64 @@ REGISTER_CALCULATOR(ErrorOnOpenCalculator); // Process() method. The input stream and output stream have the integer type. class UnitDelayCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).Set(); cc->Outputs().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { + mediapipe::Status Open(CalculatorContext* cc) final { cc->Outputs().Index(0).Add(new int(0), Timestamp(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { const Packet& packet = cc->Inputs().Index(0).Value(); cc->Outputs().Index(0).AddPacket( packet.At(packet.Timestamp().NextAllowedInStream())); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(UnitDelayCalculator); class UnitDelayUntimedCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).Set(); cc->Outputs().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { + mediapipe::Status Open(CalculatorContext* cc) final { cc->Outputs().Index(0).Add(new int(0), Timestamp::Min()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { cc->Outputs().Index(0).AddPacket(cc->Inputs().Index(0).Value()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(UnitDelayUntimedCalculator); class FloatUnitDelayCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).Set(); cc->Outputs().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { + mediapipe::Status Open(CalculatorContext* cc) final { cc->Outputs().Index(0).Add(new float(0.0), Timestamp(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { const Packet& packet = cc->Inputs().Index(0).Value(); cc->Outputs().Index(0).AddPacket( packet.At(packet.Timestamp().NextAllowedInStream())); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(FloatUnitDelayCalculator); @@ -697,19 +697,19 @@ REGISTER_CALCULATOR(FloatUnitDelayCalculator); // discards input packets in Process(). class AssertEmptyInputInOpenCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).SetAny(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { + mediapipe::Status Open(CalculatorContext* cc) final { RET_CHECK(cc->Inputs().Index(0).Value().IsEmpty()); RET_CHECK_EQ(cc->Inputs().Index(0).Value().Timestamp(), Timestamp::Unset()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { - return ::mediapipe::OkStatus(); + mediapipe::Status Process(CalculatorContext* cc) final { + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(AssertEmptyInputInOpenCalculator); @@ -718,22 +718,22 @@ REGISTER_CALCULATOR(AssertEmptyInputInOpenCalculator); // 0, 1, ..., 9. class SlowCountingSinkCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { absl::SleepFor(absl::Milliseconds(10)); int value = cc->Inputs().Index(0).Get(); CHECK_EQ(value, counter_); ++counter_; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Close(CalculatorContext* cc) override { + mediapipe::Status Close(CalculatorContext* cc) override { CHECK_EQ(10, counter_); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: @@ -745,25 +745,25 @@ template class TypedStatusHandler : public StatusHandler { public: ~TypedStatusHandler() override = 0; - static ::mediapipe::Status FillExpectations( + static mediapipe::Status FillExpectations( const MediaPipeOptions& extendable_options, PacketTypeSet* input_side_packets) { input_side_packets->Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - static ::mediapipe::Status HandlePreRunStatus( + static mediapipe::Status HandlePreRunStatus( const MediaPipeOptions& extendable_options, const PacketSet& input_side_packets, // - const ::mediapipe::Status& pre_run_status) { - return ::mediapipe::OkStatus(); + const mediapipe::Status& pre_run_status) { + return mediapipe::OkStatus(); } - static ::mediapipe::Status HandleStatus( + static mediapipe::Status HandleStatus( const MediaPipeOptions& extendable_options, const PacketSet& input_side_packets, // - const ::mediapipe::Status& run_status) { - return ::mediapipe::OkStatus(); + const mediapipe::Status& run_status) { + return mediapipe::OkStatus(); } }; typedef TypedStatusHandler StringStatusHandler; @@ -774,22 +774,22 @@ REGISTER_STATUS_HANDLER(Uint32StatusHandler); // A std::string generator that will succeed. class StaticCounterStringGenerator : public PacketGenerator { public: - static ::mediapipe::Status FillExpectations( + static mediapipe::Status FillExpectations( const PacketGeneratorOptions& extendable_options, PacketTypeSet* input_side_packets, PacketTypeSet* output_side_packets) { for (int i = 0; i < input_side_packets->NumEntries(); ++i) { input_side_packets->Index(i).SetAny(); } output_side_packets->Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - static ::mediapipe::Status Generate( + static mediapipe::Status Generate( const PacketGeneratorOptions& extendable_options, const PacketSet& input_side_packets, PacketSet* output_side_packets) { output_side_packets->Index(0) = MakePacket("fixed_string"); ++num_packets_generated_; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } static int NumPacketsGenerated() { return num_packets_generated_; } @@ -806,20 +806,20 @@ REGISTER_PACKET_GENERATOR(StaticCounterStringGenerator); // called. Both claim to output strings but instead always fail. class FailingPacketGenerator : public PacketGenerator { public: - static ::mediapipe::Status FillExpectations( + static mediapipe::Status FillExpectations( const PacketGeneratorOptions& extendable_options, PacketTypeSet* input_side_packets, PacketTypeSet* output_side_packets) { for (int i = 0; i < input_side_packets->NumEntries(); ++i) { input_side_packets->Index(i).SetAny(); } output_side_packets->Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - static ::mediapipe::Status Generate( + static mediapipe::Status Generate( const PacketGeneratorOptions& extendable_options, const PacketSet& input_side_packets, PacketSet* output_side_packets) { - return ::mediapipe::UnknownError("this always fails."); + return mediapipe::UnknownError("this always fails."); } }; REGISTER_PACKET_GENERATOR(FailingPacketGenerator); @@ -827,28 +827,28 @@ REGISTER_PACKET_GENERATOR(FailingPacketGenerator); // Passes the integer through if it is positive. class EnsurePositivePacketGenerator : public PacketGenerator { public: - static ::mediapipe::Status FillExpectations( + static mediapipe::Status FillExpectations( const PacketGeneratorOptions& extendable_options, PacketTypeSet* input_side_packets, PacketTypeSet* output_side_packets) { for (int i = 0; i < input_side_packets->NumEntries(); ++i) { input_side_packets->Index(i).Set(); output_side_packets->Index(i).Set(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - static ::mediapipe::Status Generate( + static mediapipe::Status Generate( const PacketGeneratorOptions& extendable_options, const PacketSet& input_side_packets, PacketSet* output_side_packets) { for (int i = 0; i < input_side_packets.NumEntries(); ++i) { if (input_side_packets.Index(i).Get() > 0) { output_side_packets->Index(i) = input_side_packets.Index(i); } else { - return ::mediapipe::UnknownError( + return mediapipe::UnknownError( absl::StrCat("Integer ", i, " was not positive.")); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_PACKET_GENERATOR(EnsurePositivePacketGenerator); @@ -865,32 +865,32 @@ class FailableStatusHandler : public StatusHandler { kFailPostRun = 2, }; - static ::mediapipe::Status FillExpectations( + static mediapipe::Status FillExpectations( const MediaPipeOptions& extendable_options, PacketTypeSet* input_side_packets) { input_side_packets->Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - static ::mediapipe::Status HandlePreRunStatus( + static mediapipe::Status HandlePreRunStatus( const MediaPipeOptions& extendable_options, const PacketSet& input_side_packets, - const ::mediapipe::Status& pre_run_status) { + const mediapipe::Status& pre_run_status) { if (input_side_packets.Index(0).Get() == kFailPreRun) { - return ::mediapipe::UnknownError( + return mediapipe::UnknownError( "FailableStatusHandler failing pre run as intended."); } else { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } - static ::mediapipe::Status HandleStatus( + static mediapipe::Status HandleStatus( const MediaPipeOptions& extendable_options, const PacketSet& input_side_packets, - const ::mediapipe::Status& run_status) { + const mediapipe::Status& run_status) { if (input_side_packets.Index(0).Get() == kFailPostRun) { - return ::mediapipe::UnknownError( + return mediapipe::UnknownError( "FailableStatusHandler failing post run as intended."); } else { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } }; @@ -898,13 +898,13 @@ REGISTER_STATUS_HANDLER(FailableStatusHandler); class FailingSourceCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Outputs().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { - return ::mediapipe::UnknownError("this always fails."); + mediapipe::Status Process(CalculatorContext* cc) override { + return mediapipe::UnknownError("this always fails."); } }; REGISTER_CALCULATOR(FailingSourceCalculator); @@ -932,24 +932,24 @@ class SemaphoreCalculator : public CalculatorBase { public: using Semaphore = AtomicSemaphore; - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).SetAny(); cc->Outputs().Index(0).SetSameAs(&cc->Inputs().Index(0)); cc->InputSidePackets().Tag("POST_SEM").Set(); cc->InputSidePackets().Tag("WAIT_SEM").Set(); cc->SetTimestampOffset(TimestampDiff(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { - return ::mediapipe::OkStatus(); + mediapipe::Status Open(CalculatorContext* cc) override { + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { cc->InputSidePackets().Tag("POST_SEM").Get()->Release(1); cc->InputSidePackets().Tag("WAIT_SEM").Get()->Acquire(1); cc->Outputs().Index(0).AddPacket(cc->Inputs().Index(0).Value()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(SemaphoreCalculator); @@ -958,11 +958,11 @@ REGISTER_CALCULATOR(SemaphoreCalculator); // and takes 20 milliseconds to run. class OneShot20MsCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { - return ::mediapipe::OkStatus(); + static mediapipe::Status GetContract(CalculatorContract* cc) { + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { absl::SleepFor(absl::Milliseconds(20)); return tool::StatusStop(); } @@ -973,12 +973,12 @@ REGISTER_CALCULATOR(OneShot20MsCalculator); // pthread_self() (the pthread id of the current thread). class PthreadSelfSourceCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Outputs().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { cc->Outputs().Index(0).AddPacket( MakePacket(pthread_self()).At(Timestamp(0))); return tool::StatusStop(); @@ -990,38 +990,38 @@ REGISTER_CALCULATOR(PthreadSelfSourceCalculator); // It outputs five int packets with timestamps 0, 1, 2, 3, 4. class CheckInputTimestampSourceCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Outputs().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // InputTimestamp() returns Timestamp::Unstarted() in Open() for both source // and non-source nodes. - ::mediapipe::Status Open(CalculatorContext* cc) final { + mediapipe::Status Open(CalculatorContext* cc) final { RET_CHECK_EQ(cc->InputTimestamp(), Timestamp::Unstarted()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // InputTimestamp() always returns Timestamp(0) in Process() for source // nodes. - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { RET_CHECK_EQ(cc->InputTimestamp(), Timestamp(0)); cc->Outputs().Index(0).Add(new int(count_), Timestamp(count_)); ++count_; if (count_ >= 5) { return tool::StatusStop(); } else { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // InputTimestamp() returns Timestamp::Done() in Close() for both source // and non-source nodes. - ::mediapipe::Status Close(CalculatorContext* cc) final { + mediapipe::Status Close(CalculatorContext* cc) final { // Must use CHECK instead of RET_CHECK in Close(), because the framework // may call the Close() method of a source node with .IgnoreError(). CHECK_EQ(cc->InputTimestamp(), Timestamp::Done()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: @@ -1033,33 +1033,33 @@ REGISTER_CALCULATOR(CheckInputTimestampSourceCalculator); // It expects to consume the output of a CheckInputTimestampSourceCalculator. class CheckInputTimestampSinkCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // InputTimestamp() returns Timestamp::Unstarted() in Open() for both source // and non-source nodes. - ::mediapipe::Status Open(CalculatorContext* cc) final { + mediapipe::Status Open(CalculatorContext* cc) final { RET_CHECK_EQ(cc->InputTimestamp(), Timestamp::Unstarted()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // InputTimestamp() returns the timestamp of input packets in Process() for // non-source nodes. - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { RET_CHECK_EQ(cc->InputTimestamp(), cc->Inputs().Index(0).Value().Timestamp()); RET_CHECK_EQ(cc->InputTimestamp(), Timestamp(count_)); ++count_; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // InputTimestamp() returns Timestamp::Done() in Close() for both source // and non-source nodes. - ::mediapipe::Status Close(CalculatorContext* cc) final { + mediapipe::Status Close(CalculatorContext* cc) final { RET_CHECK_EQ(cc->InputTimestamp(), Timestamp::Done()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: @@ -1072,34 +1072,34 @@ REGISTER_CALCULATOR(CheckInputTimestampSinkCalculator); // the framework. class CheckInputTimestamp2SourceCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Outputs().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // InputTimestamp() returns Timestamp::Unstarted() in Open() for both source // and non-source nodes. - ::mediapipe::Status Open(CalculatorContext* cc) final { + mediapipe::Status Open(CalculatorContext* cc) final { RET_CHECK_EQ(cc->InputTimestamp(), Timestamp::Unstarted()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // InputTimestamp() always returns Timestamp(0) in Process() for source // nodes. - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { RET_CHECK_EQ(cc->InputTimestamp(), Timestamp(0)); cc->Outputs().Index(0).Add(new int(count_), Timestamp(count_)); ++count_; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // InputTimestamp() returns Timestamp::Done() in Close() for both source // and non-source nodes. - ::mediapipe::Status Close(CalculatorContext* cc) final { + mediapipe::Status Close(CalculatorContext* cc) final { // Must use CHECK instead of RET_CHECK in Close(), because the framework // may call the Close() method of a source node with .IgnoreError(). CHECK_EQ(cc->InputTimestamp(), Timestamp::Done()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: @@ -1112,21 +1112,21 @@ REGISTER_CALCULATOR(CheckInputTimestamp2SourceCalculator); // It returns tool::StatusStop() after consuming five input packets. class CheckInputTimestamp2SinkCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // InputTimestamp() returns Timestamp::Unstarted() in Open() for both source // and non-source nodes. - ::mediapipe::Status Open(CalculatorContext* cc) final { + mediapipe::Status Open(CalculatorContext* cc) final { RET_CHECK_EQ(cc->InputTimestamp(), Timestamp::Unstarted()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // InputTimestamp() returns the timestamp of input packets in Process() for // non-source nodes. - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { RET_CHECK_EQ(cc->InputTimestamp(), cc->Inputs().Index(0).Value().Timestamp()); RET_CHECK_EQ(cc->InputTimestamp(), Timestamp(count_)); @@ -1134,15 +1134,15 @@ class CheckInputTimestamp2SinkCalculator : public CalculatorBase { if (count_ >= 5) { return tool::StatusStop(); } else { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // InputTimestamp() returns Timestamp::Done() in Close() for both source // and non-source nodes. - ::mediapipe::Status Close(CalculatorContext* cc) final { + mediapipe::Status Close(CalculatorContext* cc) final { RET_CHECK_EQ(cc->InputTimestamp(), Timestamp::Done()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: @@ -1154,16 +1154,16 @@ REGISTER_CALCULATOR(CheckInputTimestamp2SinkCalculator); // output side packet. class OutputSidePacketInProcessCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).SetAny(); cc->OutputSidePackets().Index(0).SetSameAs(&cc->Inputs().Index(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { cc->OutputSidePackets().Index(0).Set( cc->Inputs().Index(0).Value().At(Timestamp::Unset())); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(OutputSidePacketInProcessCalculator); @@ -1172,21 +1172,21 @@ REGISTER_CALCULATOR(OutputSidePacketInProcessCalculator); // sends the packet to the single output stream with the same timestamp. class SimpleMuxCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { RET_CHECK_EQ(cc->Inputs().NumEntries(), 2); cc->Inputs().Index(0).SetAny(); cc->Inputs().Index(1).SetSameAs(&cc->Inputs().Index(0)); RET_CHECK_EQ(cc->Outputs().NumEntries(), 1); cc->Outputs().Index(0).SetSameAs(&cc->Inputs().Index(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { + mediapipe::Status Open(CalculatorContext* cc) final { data_input_base_ = cc->Inputs().BeginId(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { int select_packet_index = -1; if (!cc->Inputs().Index(0).IsEmpty()) { select_packet_index = 0; @@ -1197,7 +1197,7 @@ class SimpleMuxCalculator : public CalculatorBase { cc->Outputs().Index(0).AddPacket( cc->Inputs().Get(data_input_base_ + select_packet_index).Value()); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: @@ -1209,51 +1209,51 @@ REGISTER_CALCULATOR(SimpleMuxCalculator); // by modifying the int in its input side packet. class IncrementingStatusHandler : public StatusHandler { public: - static ::mediapipe::Status FillExpectations( + static mediapipe::Status FillExpectations( const MediaPipeOptions& extendable_options, PacketTypeSet* input_side_packets) { input_side_packets->Tag("EXTRA").SetAny().Optional(); input_side_packets->Tag("COUNTER1").Set>(); input_side_packets->Tag("COUNTER2").Set>(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - static ::mediapipe::Status HandlePreRunStatus( + static mediapipe::Status HandlePreRunStatus( const MediaPipeOptions& extendable_options, const PacketSet& input_side_packets, // - const ::mediapipe::Status& pre_run_status) { + const mediapipe::Status& pre_run_status) { int* counter = GetFromUniquePtr(input_side_packets.Tag("COUNTER1")); (*counter)++; return pre_run_status_result_; } - static ::mediapipe::Status HandleStatus( + static mediapipe::Status HandleStatus( const MediaPipeOptions& extendable_options, const PacketSet& input_side_packets, // - const ::mediapipe::Status& run_status) { + const mediapipe::Status& run_status) { int* counter = GetFromUniquePtr(input_side_packets.Tag("COUNTER2")); (*counter)++; return post_run_status_result_; } - static void SetPreRunStatusResult(const ::mediapipe::Status& status) { + static void SetPreRunStatusResult(const mediapipe::Status& status) { pre_run_status_result_ = status; } - static void SetPostRunStatusResult(const ::mediapipe::Status& status) { + static void SetPostRunStatusResult(const mediapipe::Status& status) { post_run_status_result_ = status; } private: // Return values of HandlePreRunStatus() and HandleSTatus(), respectively. - static ::mediapipe::Status pre_run_status_result_; - static ::mediapipe::Status post_run_status_result_; + static mediapipe::Status pre_run_status_result_; + static mediapipe::Status post_run_status_result_; }; -::mediapipe::Status IncrementingStatusHandler::pre_run_status_result_ = - ::mediapipe::OkStatus(); -::mediapipe::Status IncrementingStatusHandler::post_run_status_result_ = - ::mediapipe::OkStatus(); +mediapipe::Status IncrementingStatusHandler::pre_run_status_result_ = + mediapipe::OkStatus(); +mediapipe::Status IncrementingStatusHandler::post_run_status_result_ = + mediapipe::OkStatus(); REGISTER_STATUS_HANDLER(IncrementingStatusHandler); @@ -1301,7 +1301,7 @@ class CurrentThreadExecutor : public Executor { // Returns a CalculatorGraphConfig used by tests. CalculatorGraphConfig GetConfig() { CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( # The graph configuration. We list the nodes in an arbitrary (not # topologically-sorted) order to verify that CalculatorGraph can # handle such configurations. @@ -1601,18 +1601,18 @@ TEST(CalculatorGraph, RunsCorrectlyWithMultipleExecutors) { // Packet generator for an arbitrary unit64 packet. class Uint64PacketGenerator : public PacketGenerator { public: - static ::mediapipe::Status FillExpectations( + static mediapipe::Status FillExpectations( const PacketGeneratorOptions& extendable_options, PacketTypeSet* input_side_packets, PacketTypeSet* output_side_packets) { output_side_packets->Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - static ::mediapipe::Status Generate( + static mediapipe::Status Generate( const PacketGeneratorOptions& extendable_options, const PacketSet& input_side_packets, PacketSet* output_side_packets) { output_side_packets->Index(0) = Adopt(new uint64(15LL << 32 | 5)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_PACKET_GENERATOR(Uint64PacketGenerator); @@ -1757,7 +1757,7 @@ TEST(CalculatorGraph, StatusHandlerInputVerification) { // Status handlers with all inputs present should be OK. auto graph = absl::make_unique(); CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( packet_generator { packet_generator: "StaticCounterStringGenerator" output_side_packet: "created_by_factory" @@ -1806,7 +1806,7 @@ TEST(CalculatorGraph, StatusHandlerInputVerification) { invalid_handler->set_status_handler("Uint32StatusHandler"); invalid_handler->add_input_side_packet("created_by_factory"); graph.reset(new CalculatorGraph()); - ::mediapipe::Status status = graph->Initialize(config); + mediapipe::Status status = graph->Initialize(config); EXPECT_THAT(status.message(), testing::AllOf(testing::HasSubstr("Uint32StatusHandler"), // The problematic input side packet. @@ -1841,7 +1841,7 @@ TEST(CalculatorGraph, StatusHandlerInputVerification) { TEST(CalculatorGraph, GenerateInInitialize) { CalculatorGraph graph; CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( packet_generator { packet_generator: "StaticCounterStringGenerator" input_side_packet: "created_by_factory" @@ -1921,7 +1921,7 @@ void ResetCounters(std::map* input_side_packets) { TEST(CalculatorGraph, HandlersRun) { std::unique_ptr graph(new CalculatorGraph()); CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( packet_generator { packet_generator: "FailingPacketGenerator" output_side_packet: "unavailable" @@ -2037,17 +2037,17 @@ TEST(CalculatorGraph, HandlersRun) { EXPECT_EQ(1, *GetFromUniquePtr( input_side_packets.at("unavailable_input_counter2"))); - ::mediapipe::Status run_status; + mediapipe::Status run_status; // Make status handlers fail. The graph should fail. // First, when the PRE_run fails IncrementingStatusHandler::SetPreRunStatusResult( - ::mediapipe::InternalError("Fail at pre-run")); + mediapipe::InternalError("Fail at pre-run")); graph.reset(new CalculatorGraph()); MP_ASSERT_OK(graph->Initialize(config)); ResetCounters(&input_side_packets); run_status = graph->Run(input_side_packets); - EXPECT_TRUE(run_status.code() == ::mediapipe::StatusCode::kInternal); + EXPECT_TRUE(run_status.code() == mediapipe::StatusCode::kInternal); EXPECT_THAT(run_status.ToString(), testing::HasSubstr("Fail at pre-run")); EXPECT_EQ(1, *GetFromUniquePtr(input_side_packets.at("no_input_counter1"))); @@ -2063,14 +2063,14 @@ TEST(CalculatorGraph, HandlersRun) { input_side_packets.at("unavailable_input_counter2"))); // Second, when the POST_run fails - IncrementingStatusHandler::SetPreRunStatusResult(::mediapipe::OkStatus()); + IncrementingStatusHandler::SetPreRunStatusResult(mediapipe::OkStatus()); IncrementingStatusHandler::SetPostRunStatusResult( - ::mediapipe::InternalError("Fail at post-run")); + mediapipe::InternalError("Fail at post-run")); graph.reset(new CalculatorGraph()); MP_ASSERT_OK(graph->Initialize(config)); ResetCounters(&input_side_packets); run_status = graph->Run(input_side_packets); - EXPECT_TRUE(run_status.code() == ::mediapipe::StatusCode::kInternal); + EXPECT_TRUE(run_status.code() == mediapipe::StatusCode::kInternal); EXPECT_THAT(run_status.ToString(), testing::HasSubstr("Fail at post-run")); EXPECT_EQ(1, *GetFromUniquePtr(input_side_packets.at("no_input_counter1"))); @@ -2087,11 +2087,11 @@ TEST(CalculatorGraph, HandlersRun) { } // Test that calling SetOffset() in Calculator::Process() results in the -// ::mediapipe::StatusCode::kFailedPrecondition error. +// mediapipe::StatusCode::kFailedPrecondition error. TEST(CalculatorGraph, SetOffsetInProcess) { CalculatorGraph graph; CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: 'in' node { calculator: 'SetOffsetInProcessCalculator' @@ -2104,16 +2104,16 @@ TEST(CalculatorGraph, SetOffsetInProcess) { MP_EXPECT_OK(graph.StartRun({})); MP_EXPECT_OK( graph.AddPacketToInputStream("in", MakePacket(0).At(Timestamp(0)))); - ::mediapipe::Status status = graph.WaitUntilIdle(); + mediapipe::Status status = graph.WaitUntilIdle(); EXPECT_FALSE(status.ok()); - EXPECT_EQ(::mediapipe::StatusCode::kFailedPrecondition, status.code()); + EXPECT_EQ(mediapipe::StatusCode::kFailedPrecondition, status.code()); } // Test that MediaPipe releases input packets when it is done with them. TEST(CalculatorGraph, InputPacketLifetime) { CalculatorGraph graph; CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: 'in' node { calculator: 'PassThroughCalculator' @@ -2156,7 +2156,7 @@ TEST(CalculatorGraph, IfThenElse) { // of the two branches different. CalculatorGraph graph; CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: 'in' input_stream: 'select' node { @@ -2262,7 +2262,7 @@ TEST(CalculatorGraph, IfThenElse) { // for the unselected outputs. class DemuxUntimedCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { RET_CHECK_EQ(cc->Inputs().NumEntries(), 2); cc->Inputs().Tag("INPUT").SetAny(); cc->Inputs().Tag("SELECT").Set(); @@ -2270,9 +2270,9 @@ class DemuxUntimedCalculator : public CalculatorBase { id < cc->Outputs().EndId("OUTPUT"); ++id) { cc->Outputs().Get(id).SetSameAs(&cc->Inputs().Tag("INPUT")); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { int index = cc->Inputs().Tag("SELECT").Get(); if (!cc->Inputs().Tag("INPUT").IsEmpty()) { cc->Outputs() @@ -2283,7 +2283,7 @@ class DemuxUntimedCalculator : public CalculatorBase { .Get("OUTPUT", index) .SetNextTimestampBound(cc->InputTimestamp() + 1); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(DemuxUntimedCalculator); @@ -2299,7 +2299,7 @@ TEST(CalculatorGraph, IfThenElse2) { // of the two branches different. CalculatorGraph graph; CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: 'in' input_stream: 'select' node { @@ -2420,7 +2420,7 @@ TEST(CalculatorGraph, ClosedSourceNodeShouldNotBeUnthrottled) { // the source node filled an input stream and the input stream changes from // being "full" to "not full". CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( num_threads: 1 max_queue_size: 100 node { @@ -2461,7 +2461,7 @@ TEST(CalculatorGraph, ClosedSourceNodeShouldNotBeUnthrottled) { // The scheduler should be able to run the graph from this initial state. TEST(CalculatorGraph, OutputPacketInOpen) { CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( max_queue_size: 1 node { calculator: 'GlobalCountSourceCalculator' @@ -2510,7 +2510,7 @@ TEST(CalculatorGraph, OutputPacketInOpen) { // The scheduler must schedule a throttled source node from the beginning. TEST(CalculatorGraph, OutputPacketInOpen2) { CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( max_queue_size: 1 node { calculator: 'GlobalCountSourceCalculator' @@ -2559,7 +2559,7 @@ TEST(CalculatorGraph, OutputPacketInOpen2) { // upstream calculator outputs a packet in Open(). TEST(CalculatorGraph, EmptyInputInOpen) { CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( max_queue_size: 1 node { calculator: 'GlobalCountSourceCalculator' @@ -2594,7 +2594,7 @@ TEST(CalculatorGraph, EmptyInputInOpen) { // Test for b/33568859. TEST(CalculatorGraph, UnthrottleRespectsLayers) { CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( max_queue_size: 1 node { calculator: 'GlobalCountSourceCalculator' @@ -2660,7 +2660,7 @@ TEST(CalculatorGraph, UnthrottleRespectsLayers) { // so far. The graph has one cycle. TEST(CalculatorGraph, Cycle) { CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( node { calculator: 'GlobalCountSourceCalculator' input_side_packet: 'global_counter' @@ -2711,7 +2711,7 @@ TEST(CalculatorGraph, Cycle) { // packet timestamps ignored. TEST(CalculatorGraph, CycleUntimed) { CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream_handler { input_stream_handler: 'BarrierInputStreamHandler' } @@ -2761,7 +2761,7 @@ TEST(CalculatorGraph, CycleUntimed) { // The graph has two cycles. TEST(CalculatorGraph, DirectFormI) { CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( node { calculator: 'GlobalCountSourceCalculator' input_side_packet: 'global_counter' @@ -2868,7 +2868,7 @@ TEST(CalculatorGraph, DirectFormI) { // The graph has two cycles. TEST(CalculatorGraph, DirectFormII) { CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( node { calculator: 'GlobalCountSourceCalculator' input_side_packet: 'global_counter' @@ -2969,7 +2969,7 @@ TEST(CalculatorGraph, DotProduct) { // The use of BarrierInputStreamHandler in this graph aligns the input // packets to a calculator by arrival order rather than by timestamp. CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream_handler { input_stream_handler: 'BarrierInputStreamHandler' } @@ -3065,7 +3065,7 @@ TEST(CalculatorGraph, DotProduct) { TEST(CalculatorGraph, TerminatesOnCancelWithOpenGraphInputStreams) { CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( node { calculator: 'PassThroughCalculator' input_stream: 'in_a' @@ -3088,13 +3088,13 @@ TEST(CalculatorGraph, TerminatesOnCancelWithOpenGraphInputStreams) { graph.Cancel(); // This tests that the graph doesn't deadlock on WaitUntilDone (because // the scheduler thread is sleeping). - ::mediapipe::Status status = graph.WaitUntilDone(); - EXPECT_EQ(status.code(), ::mediapipe::StatusCode::kCancelled); + mediapipe::Status status = graph.WaitUntilDone(); + EXPECT_EQ(status.code(), mediapipe::StatusCode::kCancelled); } TEST(CalculatorGraph, TerminatesOnCancelAfterPause) { CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( node { calculator: 'PassThroughCalculator' input_stream: 'in' @@ -3117,8 +3117,8 @@ TEST(CalculatorGraph, TerminatesOnCancelAfterPause) { graph.Pause(); // This tests that the graph doesn't deadlock on WaitUntilDone (because // the scheduler thread is sleeping). - ::mediapipe::Status status = graph.WaitUntilDone(); - EXPECT_EQ(status.code(), ::mediapipe::StatusCode::kCancelled); + mediapipe::Status status = graph.WaitUntilDone(); + EXPECT_EQ(status.code(), mediapipe::StatusCode::kCancelled); } // A PacketGenerator that simply passes its input Packets through @@ -3127,11 +3127,11 @@ TEST(CalculatorGraph, TerminatesOnCancelAfterPause) { // also be ignored. class PassThroughGenerator : public PacketGenerator { public: - static ::mediapipe::Status FillExpectations( + static mediapipe::Status FillExpectations( const PacketGeneratorOptions& extendable_options, PacketTypeSet* inputs, PacketTypeSet* outputs) { if (!inputs->TagMap()->SameAs(*outputs->TagMap())) { - return ::mediapipe::InvalidArgumentError( + return mediapipe::InvalidArgumentError( "Input and outputs to PassThroughGenerator must use the same tags " "and indexes."); } @@ -3139,24 +3139,24 @@ class PassThroughGenerator : public PacketGenerator { inputs->Get(id).SetAny(); outputs->Get(id).SetSameAs(&inputs->Get(id)); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - static ::mediapipe::Status Generate( + static mediapipe::Status Generate( const PacketGeneratorOptions& extendable_options, const PacketSet& input_side_packets, PacketSet* output_side_packets) { for (CollectionItemId id = input_side_packets.BeginId(); id < input_side_packets.EndId(); ++id) { output_side_packets->Get(id) = input_side_packets.Get(id); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_PACKET_GENERATOR(PassThroughGenerator); TEST(CalculatorGraph, RecoverAfterRunError) { PacketGeneratorGraph generator_graph; CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( node { name: 'calculator1' calculator: 'CountingSourceCalculator' @@ -3183,7 +3183,7 @@ TEST(CalculatorGraph, RecoverAfterRunError) { MP_ASSERT_OK(graph.ObserveOutputStream("count1", [&packet_count](const Packet& packet) { ++packet_count; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); // Set ERROR_COUNT higher than MAX_COUNT and hence the calculator will // finish successfully. @@ -3277,7 +3277,7 @@ TEST(CalculatorGraph, RecoverAfterRunError) { TEST(CalculatorGraph, SetInputStreamMaxQueueSizeWorksSlowCalculator) { using Semaphore = SemaphoreCalculator::Semaphore; CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( node { calculator: 'SemaphoreCalculator' input_stream: 'in' @@ -3328,9 +3328,9 @@ TEST(CalculatorGraph, SetInputStreamMaxQueueSizeWorksSlowCalculator) { MP_EXPECT_OK( graph.AddPacketToInputStream("in", MakePacket(i).At(timestamp))); // We should be prevented from adding another, since the queue is now full. - ::mediapipe::Status status = graph.AddPacketToInputStream( + mediapipe::Status status = graph.AddPacketToInputStream( "in", MakePacket(i).At(timestamp + 1)); - EXPECT_EQ(status.code(), ::mediapipe::StatusCode::kUnavailable); + EXPECT_EQ(status.code(), mediapipe::StatusCode::kUnavailable); // Allow calculator to complete its Process call. calc_can_exit_process.Release(1); } @@ -3370,7 +3370,7 @@ TEST(CalculatorGraph, AddPacketNoBusyLoop) { // out // CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: 'in' max_queue_size: 1 node { @@ -3393,7 +3393,7 @@ TEST(CalculatorGraph, AddPacketNoBusyLoop) { MP_ASSERT_OK( graph.ObserveOutputStream("out", [&out_packets](const Packet& packet) { out_packets.push_back(packet); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); MP_ASSERT_OK(graph.StartRun({})); @@ -3438,29 +3438,29 @@ TEST(CalculatorGraph, AddPacketNoBusyLoop) { namespace nested_ns { -typedef std::function<::mediapipe::Status(const InputStreamShardSet&, - OutputStreamShardSet*)> +typedef std::function ProcessFunction; // A Calculator that delegates its Process function to a callback function. class ProcessCallbackCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { for (int i = 0; i < cc->Inputs().NumEntries(); ++i) { cc->Inputs().Index(i).SetAny(); cc->Outputs().Index(i).SetSameAs(&cc->Inputs().Index(0)); } cc->InputSidePackets().Index(0).Set>(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { + mediapipe::Status Open(CalculatorContext* cc) final { callback_ = *GetFromUniquePtr(cc->InputSidePackets().Index(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { return callback_(cc->Inputs(), &(cc->Outputs())); } @@ -3492,20 +3492,20 @@ TEST(CalculatorGraph, CalculatorInNamepsace) { } // A ProcessFunction that passes through all packets. -::mediapipe::Status DoProcess(const InputStreamShardSet& inputs, - OutputStreamShardSet* outputs) { +mediapipe::Status DoProcess(const InputStreamShardSet& inputs, + OutputStreamShardSet* outputs) { for (int i = 0; i < inputs.NumEntries(); ++i) { if (!inputs.Index(i).Value().IsEmpty()) { outputs->Index(i).AddPacket(inputs.Index(i).Value()); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } TEST(CalculatorGraph, ObserveOutputStream) { const int max_count = 10; CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( node { calculator: 'CountingSourceCalculator' output_stream: 'count' @@ -3532,12 +3532,12 @@ TEST(CalculatorGraph, ObserveOutputStream) { MP_ASSERT_OK(graph.ObserveOutputStream( "count", [&count_packets](const Packet& packet) { count_packets.push_back(packet); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); MP_ASSERT_OK( graph.ObserveOutputStream("out", [&out_packets](const Packet& packet) { out_packets.push_back(packet); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); MP_ASSERT_OK(graph.Run()); ASSERT_EQ(max_count, count_packets.size()); @@ -3554,10 +3554,10 @@ TEST(CalculatorGraph, ObserveOutputStream) { class PassThroughSubgraph : public Subgraph { public: - ::mediapipe::StatusOr GetConfig( + mediapipe::StatusOr GetConfig( const SubgraphOptions& options) override { CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: 'INPUT:input' output_stream: 'OUTPUT:output' node { @@ -3574,7 +3574,7 @@ REGISTER_MEDIAPIPE_GRAPH(PassThroughSubgraph); TEST(CalculatorGraph, ObserveOutputStreamSubgraph) { const int max_count = 10; CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( node { calculator: 'CountingSourceCalculator' output_stream: 'count' @@ -3594,7 +3594,7 @@ TEST(CalculatorGraph, ObserveOutputStreamSubgraph) { MP_ASSERT_OK( graph.ObserveOutputStream("out", [&out_packets](const Packet& packet) { out_packets.push_back(packet); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); MP_ASSERT_OK(graph.Run()); ASSERT_EQ(max_count, out_packets.size()); @@ -3608,7 +3608,7 @@ TEST(CalculatorGraph, ObserveOutputStreamError) { const int max_count = 10; const int fail_count = 6; CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( node { calculator: 'CountingSourceCalculator' output_stream: 'count' @@ -3636,17 +3636,17 @@ TEST(CalculatorGraph, ObserveOutputStreamError) { "count", [&count_packets](const Packet& packet) { count_packets.push_back(packet); if (count_packets.size() >= fail_count) { - return ::mediapipe::UnknownError("Expected. MagicString-eatnhuea"); + return mediapipe::UnknownError("Expected. MagicString-eatnhuea"); } else { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } })); MP_ASSERT_OK( graph.ObserveOutputStream("out", [&out_packets](const Packet& packet) { out_packets.push_back(packet); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); - ::mediapipe::Status status = graph.Run(); + mediapipe::Status status = graph.Run(); ASSERT_THAT(status.message(), testing::HasSubstr("MagicString-eatnhuea")); ASSERT_EQ(fail_count, count_packets.size()); for (int i = 0; i < count_packets.size(); ++i) { @@ -3658,7 +3658,7 @@ TEST(CalculatorGraph, ObserveOutputStreamError) { TEST(CalculatorGraph, ObserveOutputStreamNonexistent) { const int max_count = 10; CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( node { calculator: 'CountingSourceCalculator' output_stream: 'count' @@ -3680,12 +3680,12 @@ TEST(CalculatorGraph, ObserveOutputStreamNonexistent) { graph.Initialize(config, {{"max_count", MakePacket(max_count)}})); // Observe the internal output stream "count". std::vector count_packets; // Packets from the output stream "count". - ::mediapipe::Status status = graph.ObserveOutputStream( + mediapipe::Status status = graph.ObserveOutputStream( "not_found", [&count_packets](const Packet& packet) { count_packets.push_back(packet); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); }); - EXPECT_EQ(status.code(), ::mediapipe::StatusCode::kNotFound); + EXPECT_EQ(status.code(), mediapipe::StatusCode::kNotFound); EXPECT_THAT(status.message(), testing::HasSubstr("not_found")); } @@ -3695,7 +3695,7 @@ TEST(CalculatorGraph, ObserveOutputStreamNonexistent) { TEST(CalculatorGraph, FastSourceSlowSink) { const int max_count = 10; CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( num_threads: 2 max_queue_size: 100 node { @@ -3725,7 +3725,7 @@ TEST(CalculatorGraph, GraphFinishesWhilePaused) { // // graph.WaitUntilDone must not block forever. CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( node { calculator: 'OneShot20MsCalculator' } )"); CalculatorGraph graph; @@ -3747,7 +3747,7 @@ TEST(CalculatorGraph, ConstructAndDestruct) { CalculatorGraph graph; } TEST(CalculatorGraph, RecoverAfterPreviousFailInOpen) { const int max_count = 10; CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( node { calculator: 'CountingSourceCalculator' output_stream: 'a' @@ -3782,7 +3782,7 @@ TEST(CalculatorGraph, RecoverAfterPreviousFailInOpen) { TEST(CalculatorGraph, ReuseValidatedGraphConfig) { CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( packet_generator { packet_generator: "StaticCounterStringGenerator" input_side_packet: "created_by_factory" @@ -3856,10 +3856,10 @@ TEST(CalculatorGraph, ReuseValidatedGraphConfig) { class TestRangeStdDevSubgraph : public Subgraph { public: - ::mediapipe::StatusOr GetConfig( + mediapipe::StatusOr GetConfig( const SubgraphOptions& options) override { CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_side_packet: 'node_converted' output_stream: 'DATA:range' output_stream: 'SUM:range_sum' @@ -3886,10 +3886,10 @@ REGISTER_MEDIAPIPE_GRAPH(TestRangeStdDevSubgraph); class TestMergeSaverSubgraph : public Subgraph { public: - ::mediapipe::StatusOr GetConfig( + mediapipe::StatusOr GetConfig( const SubgraphOptions& options) override { CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: 'DATA1:range1' input_stream: 'DATA2:range2' output_stream: 'MERGE:merge' @@ -3914,7 +3914,7 @@ REGISTER_MEDIAPIPE_GRAPH(TestMergeSaverSubgraph); CalculatorGraphConfig GetConfigWithSubgraphs() { CalculatorGraphConfig proto = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( # Ensure stream name for FindOutputStreamManager output_stream: 'MERGE:merge' packet_generator { @@ -3987,18 +3987,18 @@ TEST(CalculatorGraph, SetExecutorTwice) { graph.SetExecutor("xyz", std::make_shared(1))); MP_EXPECT_OK( graph.SetExecutor("abc", std::make_shared(1))); - ::mediapipe::Status status = + mediapipe::Status status = graph.SetExecutor("xyz", std::make_shared(1)); - EXPECT_EQ(status.code(), ::mediapipe::StatusCode::kAlreadyExists); + EXPECT_EQ(status.code(), mediapipe::StatusCode::kAlreadyExists); EXPECT_THAT(status.message(), testing::HasSubstr("xyz")); } TEST(CalculatorGraph, ReservedNameSetExecutor) { // A reserved executor name such as "__gpu" must not be used. CalculatorGraph graph; - ::mediapipe::Status status = + mediapipe::Status status = graph.SetExecutor("__gpu", std::make_shared(1)); - EXPECT_EQ(status.code(), ::mediapipe::StatusCode::kInvalidArgument); + EXPECT_EQ(status.code(), mediapipe::StatusCode::kInvalidArgument); EXPECT_THAT(status.message(), testing::AllOf(testing::HasSubstr("__gpu"), testing::HasSubstr("reserved"))); } @@ -4007,7 +4007,7 @@ TEST(CalculatorGraph, ReservedNameExecutorConfig) { // A reserved executor name such as "__gpu" must not be used. CalculatorGraph graph; CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: 'in' executor { name: '__gpu' @@ -4022,8 +4022,8 @@ TEST(CalculatorGraph, ReservedNameExecutorConfig) { output_stream: 'out' } )"); - ::mediapipe::Status status = graph.Initialize(config); - EXPECT_EQ(status.code(), ::mediapipe::StatusCode::kInvalidArgument); + mediapipe::Status status = graph.Initialize(config); + EXPECT_EQ(status.code(), mediapipe::StatusCode::kInvalidArgument); EXPECT_THAT(status.message(), testing::AllOf(testing::HasSubstr("__gpu"), testing::HasSubstr("reserved"))); } @@ -4032,7 +4032,7 @@ TEST(CalculatorGraph, ReservedNameNodeExecutor) { // A reserved executor name such as "__gpu" must not be used. CalculatorGraph graph; CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: 'in' node { calculator: 'PassThroughCalculator' @@ -4041,8 +4041,8 @@ TEST(CalculatorGraph, ReservedNameNodeExecutor) { output_stream: 'out' } )"); - ::mediapipe::Status status = graph.Initialize(config); - EXPECT_EQ(status.code(), ::mediapipe::StatusCode::kInvalidArgument); + mediapipe::Status status = graph.Initialize(config); + EXPECT_EQ(status.code(), mediapipe::StatusCode::kInvalidArgument); EXPECT_THAT(status.message(), testing::AllOf(testing::HasSubstr("__gpu"), testing::HasSubstr("reserved"))); } @@ -4053,7 +4053,7 @@ TEST(CalculatorGraph, NonExistentExecutor) { // provided to the graph with a CalculatorGraph::SetExecutor() call. CalculatorGraph graph; CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: 'in' node { calculator: 'PassThroughCalculator' @@ -4062,8 +4062,8 @@ TEST(CalculatorGraph, NonExistentExecutor) { output_stream: 'out' } )"); - ::mediapipe::Status status = graph.Initialize(config); - EXPECT_EQ(status.code(), ::mediapipe::StatusCode::kInvalidArgument); + mediapipe::Status status = graph.Initialize(config); + EXPECT_EQ(status.code(), mediapipe::StatusCode::kInvalidArgument); EXPECT_THAT(status.message(), testing::AllOf(testing::HasSubstr("xyz"), testing::HasSubstr("not declared"))); @@ -4077,7 +4077,7 @@ TEST(CalculatorGraph, UndeclaredExecutor) { MP_ASSERT_OK( graph.SetExecutor("xyz", std::make_shared(1))); CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: 'in' node { calculator: 'PassThroughCalculator' @@ -4086,8 +4086,8 @@ TEST(CalculatorGraph, UndeclaredExecutor) { output_stream: 'out' } )"); - ::mediapipe::Status status = graph.Initialize(config); - EXPECT_EQ(status.code(), ::mediapipe::StatusCode::kInvalidArgument); + mediapipe::Status status = graph.Initialize(config); + EXPECT_EQ(status.code(), mediapipe::StatusCode::kInvalidArgument); EXPECT_THAT(status.message(), testing::AllOf(testing::HasSubstr("xyz"), testing::HasSubstr("not declared"))); @@ -4098,7 +4098,7 @@ TEST(CalculatorGraph, UntypedExecutorDeclaredButNotSet) { // the graph with a CalculatorGraph::SetExecutor() call. CalculatorGraph graph; CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: 'in' executor { name: 'xyz' } node { @@ -4108,8 +4108,8 @@ TEST(CalculatorGraph, UntypedExecutorDeclaredButNotSet) { output_stream: 'out' } )"); - ::mediapipe::Status status = graph.Initialize(config); - EXPECT_EQ(status.code(), ::mediapipe::StatusCode::kInvalidArgument); + mediapipe::Status status = graph.Initialize(config); + EXPECT_EQ(status.code(), mediapipe::StatusCode::kInvalidArgument); EXPECT_THAT(status.message(), testing::AllOf(testing::HasSubstr("xyz"), testing::HasSubstr("SetExecutor"))); @@ -4121,7 +4121,7 @@ TEST(CalculatorGraph, DuplicateExecutorConfig) { MP_ASSERT_OK( graph.SetExecutor("xyz", std::make_shared(1))); CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: 'in' executor { name: 'xyz' } executor { name: 'xyz' } @@ -4132,8 +4132,8 @@ TEST(CalculatorGraph, DuplicateExecutorConfig) { output_stream: 'out' } )"); - ::mediapipe::Status status = graph.Initialize(config); - EXPECT_EQ(status.code(), ::mediapipe::StatusCode::kInvalidArgument); + mediapipe::Status status = graph.Initialize(config); + EXPECT_EQ(status.code(), mediapipe::StatusCode::kInvalidArgument); EXPECT_THAT(status.message(), testing::AllOf(testing::HasSubstr("xyz"), testing::HasSubstr("duplicate"))); @@ -4146,7 +4146,7 @@ TEST(CalculatorGraph, TypedExecutorDeclaredAndSet) { MP_ASSERT_OK( graph.SetExecutor("xyz", std::make_shared(1))); CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: 'in' executor { name: 'xyz' @@ -4162,8 +4162,8 @@ TEST(CalculatorGraph, TypedExecutorDeclaredAndSet) { output_stream: 'out' } )"); - ::mediapipe::Status status = graph.Initialize(config); - EXPECT_EQ(status.code(), ::mediapipe::StatusCode::kInvalidArgument); + mediapipe::Status status = graph.Initialize(config); + EXPECT_EQ(status.code(), mediapipe::StatusCode::kInvalidArgument); EXPECT_THAT(status.message(), testing::AllOf(testing::HasSubstr("xyz"), testing::HasSubstr("SetExecutor"))); @@ -4174,7 +4174,7 @@ TEST(CalculatorGraph, TypedExecutorDeclaredAndSet) { TEST(CalculatorGraph, NumThreadsAndDefaultExecutorConfig) { CalculatorGraph graph; CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: 'in' num_threads: 1 executor { @@ -4194,8 +4194,8 @@ TEST(CalculatorGraph, NumThreadsAndDefaultExecutorConfig) { output_stream: 'out' } )"); - ::mediapipe::Status status = graph.Initialize(config); - EXPECT_EQ(status.code(), ::mediapipe::StatusCode::kInvalidArgument); + mediapipe::Status status = graph.Initialize(config); + EXPECT_EQ(status.code(), mediapipe::StatusCode::kInvalidArgument); EXPECT_THAT(status.message(), testing::AllOf(testing::HasSubstr("num_threads"), testing::HasSubstr("default executor"))); @@ -4206,7 +4206,7 @@ TEST(CalculatorGraph, NumThreadsAndDefaultExecutorConfig) { TEST(CalculatorGraph, NumThreadsAndNonDefaultExecutorConfig) { CalculatorGraph graph; CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: 'in' num_threads: 1 executor { @@ -4245,7 +4245,7 @@ TEST(CalculatorGraph, RunWithNumThreadsInExecutorConfig) { {"ThreadPoolExecutor", 1, false}}; CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( executor { options { [mediapipe.ThreadPoolExecutorOptions.ext] { num_threads: 0 } @@ -4268,7 +4268,7 @@ TEST(CalculatorGraph, RunWithNumThreadsInExecutorConfig) { MP_ASSERT_OK( graph.ObserveOutputStream("out", [&out_packet](const Packet& packet) { out_packet = packet; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); MP_ASSERT_OK(graph.Run()); EXPECT_EQ(cases[i].use_app_thread_is_expected, @@ -4284,7 +4284,7 @@ TEST(CalculatorGraph, CalculatorGraphNotInitialized) { TEST(CalculatorGraph, SimulateAssertFailure) { CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( num_threads: 2 node { calculator: 'PassThroughCalculator' @@ -4311,7 +4311,7 @@ TEST(CalculatorGraph, SimulateAssertFailure) { // the source node stops the graph. TEST(CalculatorGraph, CheckInputTimestamp) { CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( node { calculator: 'CheckInputTimestampSourceCalculator' output_stream: 'integer' @@ -4332,7 +4332,7 @@ TEST(CalculatorGraph, CheckInputTimestamp) { // source node. TEST(CalculatorGraph, CheckInputTimestamp2) { CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( node { calculator: 'CheckInputTimestamp2SourceCalculator' output_stream: 'integer' @@ -4349,7 +4349,7 @@ TEST(CalculatorGraph, CheckInputTimestamp2) { TEST(CalculatorGraph, GraphInputStreamWithTag) { CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: "VIDEO_METADATA:video_metadata" input_stream: "max_count" node { @@ -4380,19 +4380,19 @@ class FirstPacketFilterCalculator : public CalculatorBase { FirstPacketFilterCalculator() {} ~FirstPacketFilterCalculator() override {} - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).SetAny(); cc->Outputs().Index(0).SetSameAs(&cc->Inputs().Index(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { if (!seen_first_packet_) { cc->Outputs().Index(0).AddPacket(cc->Inputs().Index(0).Value()); cc->Outputs().Index(0).Close(); seen_first_packet_ = true; } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: @@ -4509,7 +4509,7 @@ TEST(CalculatorGraph, TestPollPacketsFromMultipleStreams) { TEST(CalculatorGraph, SimpleMuxCalculatorWithCustomInputStreamHandler) { CalculatorGraph graph; CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: 'input0' input_stream: 'input1' node { @@ -4550,7 +4550,7 @@ TEST(CalculatorGraph, SimpleMuxCalculatorWithCustomInputStreamHandler) { Timestamp input1_timestamp = Timestamp(0); MP_EXPECT_OK(graph.AddPacketToInputStream( "input1", MakePacket(2).At(input1_timestamp))); - ::mediapipe::Status run_status = graph.WaitUntilIdle(); + mediapipe::Status run_status = graph.WaitUntilIdle(); EXPECT_THAT( run_status.ToString(), testing::AllOf( @@ -4580,7 +4580,7 @@ void DoTestMultipleGraphRuns(absl::string_view input_stream_handler, )", input_stream_handler.data()); CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(graph_proto); + mediapipe::ParseTextProtoOrDie(graph_proto); std::vector packet_dump; tool::AddVectorSink("output", &config, &packet_dump); diff --git a/mediapipe/framework/calculator_node.cc b/mediapipe/framework/calculator_node.cc index f6a15f4d3..b6cb7237e 100644 --- a/mediapipe/framework/calculator_node.cc +++ b/mediapipe/framework/calculator_node.cc @@ -73,7 +73,7 @@ Timestamp CalculatorNode::SourceProcessOrder( return calculator_->SourceProcessOrder(cc); } -::mediapipe::Status CalculatorNode::Initialize( +mediapipe::Status CalculatorNode::Initialize( const ValidatedGraphConfig* validated_graph, int node_id, InputStreamManager* input_stream_managers, OutputStreamManager* output_stream_managers, @@ -159,7 +159,7 @@ Timestamp CalculatorNode::SourceProcessOrder( return InitializeInputStreams(input_stream_managers, output_stream_managers); } -::mediapipe::Status CalculatorNode::InitializeOutputSidePackets( +mediapipe::Status CalculatorNode::InitializeOutputSidePackets( const PacketTypeSet& output_side_packet_types, OutputSidePacketImpl* output_side_packets) { output_side_packets_ = @@ -173,10 +173,10 @@ Timestamp CalculatorNode::SourceProcessOrder( output_side_packets_->GetPtr(id) = &output_side_packets[base_index + id.value()]; } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status CalculatorNode::InitializeInputSidePackets( +mediapipe::Status CalculatorNode::InitializeInputSidePackets( OutputSidePacketImpl* output_side_packets) { const NodeTypeInfo& node_type_info = validated_graph_->CalculatorInfos()[node_id_]; @@ -201,10 +201,10 @@ Timestamp CalculatorNode::SourceProcessOrder( << output_side_packet_index; origin_output_side_packet->AddMirror(&input_side_packet_handler_, id); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status CalculatorNode::InitializeOutputStreams( +mediapipe::Status CalculatorNode::InitializeOutputStreams( OutputStreamManager* output_stream_managers) { RET_CHECK(output_stream_managers) << "output_stream_managers is NULL"; const NodeTypeInfo& node_type_info = @@ -216,7 +216,7 @@ Timestamp CalculatorNode::SourceProcessOrder( current_output_stream_managers); } -::mediapipe::Status CalculatorNode::InitializeInputStreams( +mediapipe::Status CalculatorNode::InitializeInputStreams( InputStreamManager* input_stream_managers, OutputStreamManager* output_stream_managers) { RET_CHECK(input_stream_managers) << "input_stream_managers is NULL"; @@ -247,10 +247,10 @@ Timestamp CalculatorNode::SourceProcessOrder( << output_stream_index; origin_output_stream_manager->AddMirror(input_stream_handler_.get(), id); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status CalculatorNode::InitializeInputStreamHandler( +mediapipe::Status CalculatorNode::InitializeInputStreamHandler( const InputStreamHandlerConfig& handler_config, const PacketTypeSet& input_stream_types) { const ProtoString& input_stream_handler_name = @@ -265,10 +265,10 @@ Timestamp CalculatorNode::SourceProcessOrder( _ << "\"" << input_stream_handler_name << "\" is not a registered input stream handler."); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status CalculatorNode::InitializeOutputStreamHandler( +mediapipe::Status CalculatorNode::InitializeOutputStreamHandler( const OutputStreamHandlerConfig& handler_config, const PacketTypeSet& output_stream_types) { const ProtoString& output_stream_handler_name = @@ -282,10 +282,10 @@ Timestamp CalculatorNode::SourceProcessOrder( /*calculator_run_in_parallel=*/max_in_flight_ > 1), _ << "\"" << output_stream_handler_name << "\" is not a registered output stream handler."); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status CalculatorNode::ConnectShardsToStreams( +mediapipe::Status CalculatorNode::ConnectShardsToStreams( CalculatorContext* calculator_context) { RET_CHECK(calculator_context); MP_RETURN_IF_ERROR( @@ -325,13 +325,13 @@ void CalculatorNode::SetMaxInputStreamQueueSize(int max_queue_size) { input_stream_handler_->SetMaxQueueSize(max_queue_size); } -::mediapipe::Status CalculatorNode::PrepareForRun( +mediapipe::Status CalculatorNode::PrepareForRun( const std::map& all_side_packets, const std::map& service_packets, std::function ready_for_open_callback, std::function source_node_opened_callback, std::function schedule_callback, - std::function error_callback, + std::function error_callback, CounterFactory* counter_factory) { RET_CHECK(ready_for_open_callback) << "ready_for_open_callback is NULL"; RET_CHECK(schedule_callback) << "schedule_callback is NULL"; @@ -398,7 +398,7 @@ void CalculatorNode::SetMaxInputStreamQueueSize(int max_queue_size) { input_side_packets_ready_ = (input_side_packet_handler_.MissingInputSidePacketCount() == 0); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } namespace { @@ -410,7 +410,7 @@ const Packet GetPacket(const OutputSidePacket& out) { } // Resends the output-side-packets from the previous graph run. -::mediapipe::Status ResendSidePackets(CalculatorContext* cc) { +mediapipe::Status ResendSidePackets(CalculatorContext* cc) { auto& outs = cc->OutputSidePackets(); for (CollectionItemId id = outs.BeginId(); id < outs.EndId(); ++id) { Packet packet = GetPacket(outs.Get(id)); @@ -419,7 +419,7 @@ const Packet GetPacket(const OutputSidePacket& out) { outs.Get(id).Set(packet); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace @@ -433,7 +433,7 @@ bool CalculatorNode::OutputsAreConstant(CalculatorContext* cc) { return true; } -::mediapipe::Status CalculatorNode::OpenNode() { +mediapipe::Status CalculatorNode::OpenNode() { VLOG(2) << "CalculatorNode::OpenNode() for " << DebugName(); CalculatorContext* default_context = @@ -448,7 +448,7 @@ bool CalculatorNode::OutputsAreConstant(CalculatorContext* cc) { calculator_context_manager_.PushInputTimestampToContext( default_context, Timestamp::Unstarted()); - ::mediapipe::Status result; + mediapipe::Status result; if (OutputsAreConstant(default_context)) { result = ResendSidePackets(default_context); } else { @@ -493,7 +493,7 @@ bool CalculatorNode::OutputsAreConstant(CalculatorContext* cc) { status_ = kStateOpened; } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } void CalculatorNode::ActivateNode() { @@ -527,8 +527,8 @@ void CalculatorNode::CloseOutputStreams(OutputStreamShardSet* outputs) { output_stream_handler_->Close(outputs); } -::mediapipe::Status CalculatorNode::CloseNode( - const ::mediapipe::Status& graph_status, bool graph_run_ended) { +mediapipe::Status CalculatorNode::CloseNode( + const mediapipe::Status& graph_status, bool graph_run_ended) { { absl::MutexLock status_lock(&status_mutex_); RET_CHECK_NE(status_, kStateClosed) @@ -548,11 +548,11 @@ void CalculatorNode::CloseOutputStreams(OutputStreamShardSet* outputs) { calculator_context_manager_.SetGraphStatusInContext(default_context, graph_status); - ::mediapipe::Status result; + mediapipe::Status result; if (OutputsAreConstant(default_context)) { // Do nothing. - result = ::mediapipe::OkStatus(); + result = mediapipe::OkStatus(); } else { MEDIAPIPE_PROFILING(CLOSE, default_context); LegacyCalculatorSupport::Scoped s(default_context); @@ -582,10 +582,10 @@ void CalculatorNode::CloseOutputStreams(OutputStreamShardSet* outputs) { "Calculator::Close() for node \"$0\" failed: ", DebugName()); VLOG(2) << "Closed node " << DebugName(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -void CalculatorNode::CleanupAfterRun(const ::mediapipe::Status& graph_status) { +void CalculatorNode::CleanupAfterRun(const mediapipe::Status& graph_status) { if (needs_to_close_) { calculator_context_manager_.PushInputTimestampToContext( calculator_context_manager_.GetDefaultCalculatorContext(), @@ -754,12 +754,12 @@ std::string CalculatorNode::DebugName() const { } // TODO: Split this function. -::mediapipe::Status CalculatorNode::ProcessNode( +mediapipe::Status CalculatorNode::ProcessNode( CalculatorContext* calculator_context) { if (IsSource()) { // This is a source Calculator. if (Closed()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } const Timestamp input_timestamp = calculator_context->InputTimestamp(); @@ -768,7 +768,7 @@ std::string CalculatorNode::DebugName() const { output_stream_handler_->PrepareOutputs(input_timestamp, outputs); VLOG(2) << "Calling Calculator::Process() for node: " << DebugName(); - ::mediapipe::Status result; + mediapipe::Status result; { MEDIAPIPE_PROFILING(PROCESS, calculator_context); @@ -782,7 +782,7 @@ std::string CalculatorNode::DebugName() const { // Needs to call CloseNode(). node_stopped = true; } else { - return ::mediapipe::StatusBuilder(result, MEDIAPIPE_LOC).SetPrepend() + return mediapipe::StatusBuilder(result, MEDIAPIPE_LOC).SetPrepend() << absl::Substitute( "Calculator::Process() for node \"$0\" failed: ", DebugName()); @@ -791,15 +791,15 @@ std::string CalculatorNode::DebugName() const { output_stream_handler_->PostProcess(input_timestamp); if (node_stopped) { MP_RETURN_IF_ERROR( - CloseNode(::mediapipe::OkStatus(), /*graph_run_ended=*/false)); + CloseNode(mediapipe::OkStatus(), /*graph_run_ended=*/false)); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } else { // This is not a source Calculator. InputStreamShardSet* const inputs = &calculator_context->Inputs(); OutputStreamShardSet* const outputs = &calculator_context->Outputs(); - ::mediapipe::Status result = - ::mediapipe::InternalError("Calculator context has no input packets."); + mediapipe::Status result = + mediapipe::InternalError("Calculator context has no input packets."); int num_invocations = calculator_context_manager_.NumberOfContextTimestamps( *calculator_context); @@ -818,7 +818,7 @@ std::string CalculatorNode::DebugName() const { if (OutputsAreConstant(calculator_context)) { // Do nothing. - result = ::mediapipe::OkStatus(); + result = mediapipe::OkStatus(); } else { MEDIAPIPE_PROFILING(PROCESS, calculator_context); LegacyCalculatorSupport::Scoped s( @@ -838,7 +838,7 @@ std::string CalculatorNode::DebugName() const { // ensure that all sources will be closed and that packets in input // streams will be processed before the graph is terminated. if (!result.ok() && result != tool::StatusStop()) { - return ::mediapipe::StatusBuilder(result, MEDIAPIPE_LOC).SetPrepend() + return mediapipe::StatusBuilder(result, MEDIAPIPE_LOC).SetPrepend() << absl::Substitute( "Calculator::Process() for node \"$0\" failed: ", DebugName()); @@ -855,7 +855,7 @@ std::string CalculatorNode::DebugName() const { CHECK_EQ(calculator_context_manager_.NumberOfContextTimestamps( *calculator_context), 1); - return CloseNode(::mediapipe::OkStatus(), /*graph_run_ended=*/false); + return CloseNode(mediapipe::OkStatus(), /*graph_run_ended=*/false); } else { RET_CHECK_FAIL() << "Invalid input timestamp in ProcessNode(). timestamp: " diff --git a/mediapipe/framework/calculator_node.h b/mediapipe/framework/calculator_node.h index 49b766caa..a9e9fd2ff 100644 --- a/mediapipe/framework/calculator_node.h +++ b/mediapipe/framework/calculator_node.h @@ -95,7 +95,7 @@ class CalculatorNode { void SetExecutor(const std::string& executor); // Calls Process() on the Calculator corresponding to this node. - ::mediapipe::Status ProcessNode(CalculatorContext* calculator_context); + mediapipe::Status ProcessNode(CalculatorContext* calculator_context); // Initializes the node. The buffer_size_hint argument is // set to the value specified in the graph proto for this field. @@ -105,7 +105,7 @@ class CalculatorNode { // output_side_packets is expected to point to a contiguous flat array with // OutputSidePacketImpls corresponding to the output side packet indexes in // validated_graph. - ::mediapipe::Status Initialize( + mediapipe::Status Initialize( const ValidatedGraphConfig* validated_graph, int node_id, InputStreamManager* input_stream_managers, OutputStreamManager* output_stream_managers, @@ -121,22 +121,22 @@ class CalculatorNode { // can be scheduled. source_node_opened_callback is called when a source // node is opened. schedule_callback is passed to the InputStreamHandler // and is called each time a new invocation can be scheduled. - ::mediapipe::Status PrepareForRun( + mediapipe::Status PrepareForRun( const std::map& all_side_packets, const std::map& service_packets, std::function ready_for_open_callback, std::function source_node_opened_callback, std::function schedule_callback, - std::function error_callback, + std::function error_callback, CounterFactory* counter_factory) ABSL_LOCKS_EXCLUDED(status_mutex_); // Opens the node. - ::mediapipe::Status OpenNode() ABSL_LOCKS_EXCLUDED(status_mutex_); + mediapipe::Status OpenNode() ABSL_LOCKS_EXCLUDED(status_mutex_); // Called when a source node's layer becomes active. void ActivateNode() ABSL_LOCKS_EXCLUDED(status_mutex_); // Cleans up the node after the CalculatorGraph has been run. Deletes // the Calculator managed by this node. graph_status is the status of // the graph run. - void CleanupAfterRun(const ::mediapipe::Status& graph_status) + void CleanupAfterRun(const mediapipe::Status& graph_status) ABSL_LOCKS_EXCLUDED(status_mutex_); // Returns true iff PrepareForRun() has been called (and types verified). @@ -218,8 +218,8 @@ class CalculatorNode { // Closes the node's calculator and input and output streams. // graph_status is the current status of the graph run. graph_run_ended // indicates whether the graph run has ended. - ::mediapipe::Status CloseNode(const ::mediapipe::Status& graph_status, - bool graph_run_ended) + mediapipe::Status CloseNode(const mediapipe::Status& graph_status, + bool graph_run_ended) ABSL_LOCKS_EXCLUDED(status_mutex_); // Returns a pointer to the default calculator context that is used for @@ -235,34 +235,34 @@ class CalculatorNode { private: // Sets up the output side packets from the master flat array. - ::mediapipe::Status InitializeOutputSidePackets( + mediapipe::Status InitializeOutputSidePackets( const PacketTypeSet& output_side_packet_types, OutputSidePacketImpl* output_side_packets); // Connects the input side packets as mirrors on the output side packets. // Output side packets are looked up in the master flat array which is // provided. - ::mediapipe::Status InitializeInputSidePackets( + mediapipe::Status InitializeInputSidePackets( OutputSidePacketImpl* output_side_packets); // Sets up the output streams from the master flat array. - ::mediapipe::Status InitializeOutputStreams( + mediapipe::Status InitializeOutputStreams( OutputStreamManager* output_stream_managers); // Sets up the input streams and connects them as mirrors on the // output streams. Both input streams and output streams are looked // up in the master flat arrays which are provided. - ::mediapipe::Status InitializeInputStreams( + mediapipe::Status InitializeInputStreams( InputStreamManager* input_stream_managers, OutputStreamManager* output_stream_managers); - ::mediapipe::Status InitializeInputStreamHandler( + mediapipe::Status InitializeInputStreamHandler( const InputStreamHandlerConfig& handler_config, const PacketTypeSet& input_stream_types); - ::mediapipe::Status InitializeOutputStreamHandler( + mediapipe::Status InitializeOutputStreamHandler( const OutputStreamHandlerConfig& handler_config, const PacketTypeSet& output_stream_types); // Connects the input/output stream shards in the given calculator context to // the input/output streams of the node. - ::mediapipe::Status ConnectShardsToStreams( + mediapipe::Status ConnectShardsToStreams( CalculatorContext* calculator_context); // The general scheduling logic shared by EndScheduling() and diff --git a/mediapipe/framework/calculator_node_test.cc b/mediapipe/framework/calculator_node_test.cc index e7b4f6fbb..e72a178ca 100644 --- a/mediapipe/framework/calculator_node_test.cc +++ b/mediapipe/framework/calculator_node_test.cc @@ -37,23 +37,23 @@ class CountCalculator : public CalculatorBase { CountCalculator() { ++num_constructed_; } ~CountCalculator() override { ++num_destroyed_; } - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { ++num_fill_expectations_; cc->Inputs().Get(cc->Inputs().BeginId()).Set(); cc->Outputs().Get(cc->Outputs().BeginId()).Set(); cc->InputSidePackets().Get(cc->InputSidePackets().BeginId()).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { ++num_open_; // Simulate doing nontrivial work to ensure that the time spent in the // method will register on streamz each time it is called. usleep(100); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { ++num_process_; int input_stream_int = cc->Inputs().Get(cc->Inputs().BeginId()).Get(); int side_packet_int = @@ -65,15 +65,15 @@ class CountCalculator : public CalculatorBase { // Simulate doing nontrivial work to ensure that the time spent in the // method will register on streamz each time it is called. usleep(100); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Close(CalculatorContext* cc) override { + mediapipe::Status Close(CalculatorContext* cc) override { ++num_close_; // Simulate doing nontrivial work to ensure that the time spent in the // method will register on streamz each time it is called. usleep(100); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } static int num_constructed_; @@ -94,7 +94,7 @@ int CountCalculator::num_destroyed_ = 0; void SourceNodeOpenedNoOp() {} -void CheckFail(const ::mediapipe::Status& status) { +void CheckFail(const mediapipe::Status& status) { LOG(FATAL) << "The test triggered the error callback with status: " << status; } @@ -133,7 +133,7 @@ class CalculatorNodeTest : public ::testing::Test { CalculatorGraphConfig graph_config; // Add the test for the node under test. if (use_tags) { - graph_config = ::mediapipe::ParseTextProtoOrDie( + graph_config = mediapipe::ParseTextProtoOrDie( first_two_nodes_string + "node {\n" // Node index 2 " calculator: \"CountCalculator\"\n" @@ -143,7 +143,7 @@ class CalculatorNodeTest : public ::testing::Test { " input_side_packet: \"INPUT_SIDE_PACKET_TAG:input_a\"\n" "}\n"); } else { - graph_config = ::mediapipe::ParseTextProtoOrDie( + graph_config = mediapipe::ParseTextProtoOrDie( first_two_nodes_string + "node {\n" // Node index 2 " calculator: \"CountCalculator\"\n" @@ -165,7 +165,7 @@ class CalculatorNodeTest : public ::testing::Test { &buffer_size_hint_, graph_profiler_)); } - ::mediapipe::Status PrepareNodeForRun() { + mediapipe::Status PrepareNodeForRun() { return node_->PrepareForRun( // input_side_packets_, // service_packets_, // @@ -180,7 +180,7 @@ class CalculatorNodeTest : public ::testing::Test { nullptr); } - ::mediapipe::Status InitializeStreams() { + mediapipe::Status InitializeStreams() { // START OF: code is copied from // CalculatorGraph::InitializePacketGeneratorGraph. // Create and initialize the output side packets. @@ -220,7 +220,7 @@ class CalculatorNodeTest : public ::testing::Test { stream_a_manager_ = &output_stream_managers_[1]; stream_b_manager_ = &output_stream_managers_[2]; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } virtual void SimulateParentOpenNode() { stream_a_manager_->LockIntroData(); } @@ -482,7 +482,7 @@ TEST_F(CalculatorNodeTest, CleanupAfterRun) { node_->EndScheduling(); // The max parallelism is already reached. EXPECT_FALSE(node_->TryToBeginScheduling()); - node_->CleanupAfterRun(::mediapipe::OkStatus()); + node_->CleanupAfterRun(mediapipe::OkStatus()); EXPECT_FALSE(node_->Prepared()); EXPECT_FALSE(node_->Opened()); @@ -517,7 +517,7 @@ void CalculatorNodeTest::TestCleanupAfterRunTwice() { EXPECT_TRUE(node_->TryToBeginScheduling()); MP_EXPECT_OK(node_->ProcessNode(cc_)); node_->EndScheduling(); - node_->CleanupAfterRun(::mediapipe::OkStatus()); + node_->CleanupAfterRun(mediapipe::OkStatus()); stream_a_manager_->PrepareForRun(nullptr); @@ -543,7 +543,7 @@ void CalculatorNodeTest::TestCleanupAfterRunTwice() { node_->EndScheduling(); // The max parallelism is already reached. EXPECT_FALSE(node_->TryToBeginScheduling()); - node_->CleanupAfterRun(::mediapipe::OkStatus()); + node_->CleanupAfterRun(mediapipe::OkStatus()); EXPECT_FALSE(node_->Prepared()); EXPECT_FALSE(node_->Opened()); diff --git a/mediapipe/framework/calculator_parallel_execution_test.cc b/mediapipe/framework/calculator_parallel_execution_test.cc index 862f83d27..887fd4352 100644 --- a/mediapipe/framework/calculator_parallel_execution_test.cc +++ b/mediapipe/framework/calculator_parallel_execution_test.cc @@ -50,20 +50,20 @@ inline void BusySleep(absl::Duration duration) { class SlowPlusOneCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).Set(); cc->Outputs().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { cc->SetOffset(mediapipe::TimestampDiff(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { if (cc->InputTimestamp().Value() % 4 == 0) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } RandomEngine random(testing::UnitTest::GetInstance()->random_seed()); @@ -71,7 +71,7 @@ class SlowPlusOneCalculator : public CalculatorBase { BusySleep(absl::Milliseconds(90 + uniform_dist(random))); cc->Outputs().Index(0).Add(new int(cc->Inputs().Index(0).Get() + 1), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; @@ -91,7 +91,7 @@ class ParallelExecutionTest : public testing::Test { TEST_F(ParallelExecutionTest, SlowPlusOneCalculatorsTest) { CalculatorGraphConfig graph_config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: "input" node { calculator: "SlowPlusOneCalculator" @@ -124,7 +124,7 @@ TEST_F(ParallelExecutionTest, SlowPlusOneCalculatorsTest) { const int kTotalNums = 100; int fail_count = 0; for (int i = 0; i < kTotalNums; ++i) { - ::mediapipe::Status status = graph.AddPacketToInputStream( + mediapipe::Status status = graph.AddPacketToInputStream( "input", Adopt(new int(i)).At(Timestamp(i))); if (!status.ok()) { ++fail_count; diff --git a/mediapipe/framework/calculator_registry.h b/mediapipe/framework/calculator_registry.h index f3d88fa18..b6580dec0 100644 --- a/mediapipe/framework/calculator_registry.h +++ b/mediapipe/framework/calculator_registry.h @@ -19,14 +19,14 @@ #include "mediapipe/framework/calculator_base.h" -#define REGISTER_CALCULATOR(name) \ - REGISTER_FACTORY_FUNCTION_QUALIFIED(::mediapipe::CalculatorBaseRegistry, \ - calculator_registration, name, \ - absl::make_unique); \ - REGISTER_FACTORY_FUNCTION_QUALIFIED( \ - ::mediapipe::internal::StaticAccessToCalculatorBaseRegistry, \ - access_registration, name, \ - absl::make_unique< \ - ::mediapipe::internal::StaticAccessToCalculatorBaseTyped>) +#define REGISTER_CALCULATOR(name) \ + REGISTER_FACTORY_FUNCTION_QUALIFIED(mediapipe::CalculatorBaseRegistry, \ + calculator_registration, name, \ + absl::make_unique); \ + REGISTER_FACTORY_FUNCTION_QUALIFIED( \ + mediapipe::internal::StaticAccessToCalculatorBaseRegistry, \ + access_registration, name, \ + absl::make_unique< \ + mediapipe::internal::StaticAccessToCalculatorBaseTyped>) #endif // MEDIAPIPE_FRAMEWORK_CALCULATOR_REGISTRY_H_ diff --git a/mediapipe/framework/calculator_registry_util.cc b/mediapipe/framework/calculator_registry_util.cc index 77aab0be4..ed8a35bb6 100644 --- a/mediapipe/framework/calculator_registry_util.cc +++ b/mediapipe/framework/calculator_registry_util.cc @@ -30,9 +30,9 @@ bool IsLegacyCalculator(const std::string& package_name, return false; } -::mediapipe::Status VerifyCalculatorWithContract( - const std::string& package_name, const std::string& node_class, - CalculatorContract* contract) { +mediapipe::Status VerifyCalculatorWithContract(const std::string& package_name, + const std::string& node_class, + CalculatorContract* contract) { // A number of calculators use the non-CC methods on GlCalculatorHelper // even though they are CalculatorBase-based. ASSIGN_OR_RETURN( @@ -43,10 +43,10 @@ bool IsLegacyCalculator(const std::string& package_name, MP_RETURN_IF_ERROR(static_access_to_calculator_base->GetContract(contract)) .SetPrepend() << node_class << ": "; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::StatusOr> CreateCalculator( +mediapipe::StatusOr> CreateCalculator( const std::shared_ptr& input_tag_map, const std::shared_ptr& output_tag_map, const std::string& package_name, CalculatorState* calculator_state, diff --git a/mediapipe/framework/calculator_registry_util.h b/mediapipe/framework/calculator_registry_util.h index 6d218c1c0..36a5cb802 100644 --- a/mediapipe/framework/calculator_registry_util.h +++ b/mediapipe/framework/calculator_registry_util.h @@ -31,11 +31,11 @@ namespace mediapipe { bool IsLegacyCalculator(const std::string& package_name, const std::string& node_class); -::mediapipe::Status VerifyCalculatorWithContract( - const std::string& package_name, const std::string& node_class, - CalculatorContract* contract); +mediapipe::Status VerifyCalculatorWithContract(const std::string& package_name, + const std::string& node_class, + CalculatorContract* contract); -::mediapipe::StatusOr> CreateCalculator( +mediapipe::StatusOr> CreateCalculator( const std::shared_ptr& input_tag_map, const std::shared_ptr& output_tag_map, const std::string& package_name, CalculatorState* calculator_state, diff --git a/mediapipe/framework/calculator_runner.cc b/mediapipe/framework/calculator_runner.cc index 9c4a31335..827ac1c0a 100644 --- a/mediapipe/framework/calculator_runner.cc +++ b/mediapipe/framework/calculator_runner.cc @@ -36,15 +36,15 @@ namespace { // Input side packets: 1, pointing to CalculatorRunner::StreamContents. class CalculatorRunnerSourceCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->InputSidePackets() .Index(0) .Set(); cc->Outputs().Index(0).SetAny(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { const auto* contents = cc->InputSidePackets() .Index(0) .Get(); @@ -53,9 +53,9 @@ class CalculatorRunnerSourceCalculator : public CalculatorBase { for (const Packet& packet : contents->packets) { cc->Outputs().Index(0).AddPacket(packet); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { return tool::StatusStop(); } }; @@ -67,23 +67,23 @@ REGISTER_CALCULATOR(CalculatorRunnerSourceCalculator); // Input side packets: 1, pointing to CalculatorRunner::StreamContents. class CalculatorRunnerSinkCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).SetAny(); cc->InputSidePackets().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { contents_ = cc->InputSidePackets() .Index(0) .Get(); contents_->header = cc->Inputs().Index(0).Header(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { contents_->packets.push_back(cc->Inputs().Index(0).Value()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: @@ -98,7 +98,7 @@ CalculatorRunner::CalculatorRunner( MEDIAPIPE_CHECK_OK(InitializeFromNodeConfig(node_config)); } -::mediapipe::Status CalculatorRunner::InitializeFromNodeConfig( +mediapipe::Status CalculatorRunner::InitializeFromNodeConfig( const CalculatorGraphConfig::Node& node_config) { node_config_ = node_config; @@ -126,7 +126,7 @@ CalculatorRunner::CalculatorRunner( tool::TagMap::Create(node_config_.output_side_packet())); output_side_packets_ = absl::make_unique(output_side_map); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } CalculatorRunner::CalculatorRunner(const std::string& calculator_type, @@ -220,10 +220,10 @@ std::map CalculatorRunner::GetCountersValues() { return graph_->GetCounterFactory()->GetCounterSet()->GetCountersValues(); } -::mediapipe::Status CalculatorRunner::BuildGraph() { +mediapipe::Status CalculatorRunner::BuildGraph() { if (graph_ != nullptr) { // The graph was already built. - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } RET_CHECK(inputs_) << "The inputs were not initialized."; RET_CHECK(outputs_) << "The outputs were not initialized."; @@ -277,10 +277,10 @@ std::map CalculatorRunner::GetCountersValues() { graph_ = absl::make_unique(); MP_RETURN_IF_ERROR(graph_->Initialize(config)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status CalculatorRunner::Run() { +mediapipe::Status CalculatorRunner::Run() { MP_RETURN_IF_ERROR(BuildGraph()); // Set the input side packets for the sources. std::map input_side_packets; @@ -352,7 +352,7 @@ std::map CalculatorRunner::GetCountersValues() { tag, (index == -1) ? ++positional_index : index); ASSIGN_OR_RETURN(contents, graph_->GetOutputSidePacket(name)); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/framework/calculator_runner.h b/mediapipe/framework/calculator_runner.h index 7fe12eba2..8a3e8ba83 100644 --- a/mediapipe/framework/calculator_runner.h +++ b/mediapipe/framework/calculator_runner.h @@ -109,11 +109,11 @@ class CalculatorRunner { // Runs the calculator, by calling Open(), Process() with the // inputs provided via mutable_inputs(), and Close(). Returns the - // ::mediapipe::Status from CalculatorGraph::Run(). Internally, Run() + // mediapipe::Status from CalculatorGraph::Run(). Internally, Run() // constructs a CalculatorGraph in the first call, and calls // CalculatorGraph::Run(). A single instance of CalculatorRunner // uses the same instance of CalculatorGraph for all runs. - ::mediapipe::Status Run(); + mediapipe::Status Run(); // Returns the vector of contents of the output streams. The .header // field contains the stream header and the .packets field contains @@ -135,11 +135,11 @@ class CalculatorRunner { static const char kSinkPrefix[]; // Initialize using a node config (does the constructor's work). - ::mediapipe::Status InitializeFromNodeConfig( + mediapipe::Status InitializeFromNodeConfig( const CalculatorGraphConfig::Node& node_config); // Builds the graph if one does not already exist. - ::mediapipe::Status BuildGraph(); + mediapipe::Status BuildGraph(); CalculatorGraphConfig::Node node_config_; diff --git a/mediapipe/framework/calculator_runner_test.cc b/mediapipe/framework/calculator_runner_test.cc index 95dd76144..51484a4e3 100644 --- a/mediapipe/framework/calculator_runner_test.cc +++ b/mediapipe/framework/calculator_runner_test.cc @@ -40,7 +40,7 @@ namespace { // at InputTimestamp. The headers are strings. class CalculatorRunnerTestCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).Set(); cc->Inputs().Index(1).Set(); cc->Outputs().Index(0).Set(); @@ -50,10 +50,10 @@ class CalculatorRunnerTestCalculator : public CalculatorBase { cc->OutputSidePackets() .Tag("SIDE_OUTPUT") .SetSameAs(&cc->InputSidePackets().Index(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { std::string input_header_string = absl::StrCat(cc->Inputs().Index(0).Header().Get(), cc->Inputs().Index(1).Header().Get()); @@ -66,17 +66,17 @@ class CalculatorRunnerTestCalculator : public CalculatorBase { cc->OutputSidePackets() .Tag("SIDE_OUTPUT") .Set(cc->InputSidePackets().Index(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { for (int index = 0; index < 2; ++index) { cc->Outputs().Index(index).Add( new int(-cc->Inputs().Index(index).Get()), cc->InputTimestamp()); } cc->Outputs().Index(2).AddPacket( cc->InputSidePackets().Index(0).At(cc->InputTimestamp())); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(CalculatorRunnerTestCalculator); @@ -87,7 +87,7 @@ REGISTER_CALCULATOR(CalculatorRunnerTestCalculator); // with the same tag name (and any index). class CalculatorRunnerMultiTagTestCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { for (const std::string& tag : cc->Inputs().GetTags()) { for (CollectionItemId item_id = cc->Inputs().BeginId(tag); item_id < cc->Inputs().EndId(tag); ++item_id) { @@ -95,10 +95,10 @@ class CalculatorRunnerMultiTagTestCalculator : public CalculatorBase { } cc->Outputs().Get(tag, 0).Set(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { for (const std::string& tag : cc->Inputs().GetTags()) { auto sum = absl::make_unique(0); for (CollectionItemId item_id = cc->Inputs().BeginId(tag); @@ -109,7 +109,7 @@ class CalculatorRunnerMultiTagTestCalculator : public CalculatorBase { } cc->Outputs().Get(tag, 0).Add(sum.release(), cc->InputTimestamp()); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(CalculatorRunnerMultiTagTestCalculator); diff --git a/mediapipe/framework/collection.h b/mediapipe/framework/collection.h index 448968be2..5c1ac199c 100644 --- a/mediapipe/framework/collection.h +++ b/mediapipe/framework/collection.h @@ -448,7 +448,7 @@ template typename Collection::value_type*& Collection::GetPtr(CollectionItemId id) { static_assert(storage == CollectionStorage::kStorePointer, - "::mediapipe::internal::Collection::GetPtr() is only " + "mediapipe::internal::Collection::GetPtr() is only " "available for collections that were defined with template " "argument storage == CollectionStorage::kStorePointer."); CHECK_LE(BeginId(), id); @@ -460,7 +460,7 @@ template const typename Collection::value_type* Collection::GetPtr(CollectionItemId id) const { static_assert(storage == CollectionStorage::kStorePointer, - "::mediapipe::internal::Collection::GetPtr() is only " + "mediapipe::internal::Collection::GetPtr() is only " "available for collections that were defined with template " "argument storage == CollectionStorage::kStorePointer."); CHECK_LE(BeginId(), id); diff --git a/mediapipe/framework/collection_item_id.h b/mediapipe/framework/collection_item_id.h index 4d87eb060..cde28bfd8 100644 --- a/mediapipe/framework/collection_item_id.h +++ b/mediapipe/framework/collection_item_id.h @@ -157,7 +157,7 @@ class CollectionItemId { } private: - friend class ::mediapipe::tool::TagMap; + friend class mediapipe::tool::TagMap; // Initialization from a value. explicit constexpr CollectionItemId(int init_value) : value_(init_value) {} diff --git a/mediapipe/framework/collection_test.cc b/mediapipe/framework/collection_test.cc index 359342b3f..704a3f08e 100644 --- a/mediapipe/framework/collection_test.cc +++ b/mediapipe/framework/collection_test.cc @@ -183,7 +183,7 @@ TEST(CollectionTest, StaticEmptyCollectionHeapCheck) { } template -::mediapipe::Status TestCollectionWithPointers( +mediapipe::Status TestCollectionWithPointers( const std::vector& original_values, const T& inject1, const T& inject2) { std::shared_ptr tag_map = tool::CreateTagMap({"TAG_A:a", "TAG_B:1:b", "TAG_A:2:c", "TAG_B:d", @@ -451,7 +451,7 @@ template ++i; } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } TEST(CollectionTest, TestCollectionWithPointersIntAndString) { diff --git a/mediapipe/framework/counter_factory.cc b/mediapipe/framework/counter_factory.cc index 6c88b3cc6..94a6a4213 100644 --- a/mediapipe/framework/counter_factory.cc +++ b/mediapipe/framework/counter_factory.cc @@ -67,7 +67,7 @@ void CounterSet::PrintCounters() ABSL_LOCKS_EXCLUDED(mu_) { Counter* CounterSet::Get(const std::string& name) ABSL_LOCKS_EXCLUDED(mu_) { absl::ReaderMutexLock lock(&mu_); - if (!::mediapipe::ContainsKey(counters_, name)) { + if (!mediapipe::ContainsKey(counters_, name)) { return nullptr; } return counters_[name].get(); diff --git a/mediapipe/framework/deps/canonical_errors.h b/mediapipe/framework/deps/canonical_errors.h index b5a956da2..9f82b2094 100644 --- a/mediapipe/framework/deps/canonical_errors.h +++ b/mediapipe/framework/deps/canonical_errors.h @@ -22,63 +22,60 @@ namespace mediapipe { // Each of the functions below creates a canonical error with the given // message. The error code of the returned status object matches the name of // the function. -inline ::mediapipe::Status AlreadyExistsError(absl::string_view message) { - return ::mediapipe::Status(::mediapipe::StatusCode::kAlreadyExists, message); +inline mediapipe::Status AlreadyExistsError(absl::string_view message) { + return mediapipe::Status(mediapipe::StatusCode::kAlreadyExists, message); } -inline ::mediapipe::Status CancelledError() { - return ::mediapipe::Status(::mediapipe::StatusCode::kCancelled, ""); +inline mediapipe::Status CancelledError() { + return mediapipe::Status(mediapipe::StatusCode::kCancelled, ""); } -inline ::mediapipe::Status CancelledError(absl::string_view message) { - return ::mediapipe::Status(::mediapipe::StatusCode::kCancelled, message); +inline mediapipe::Status CancelledError(absl::string_view message) { + return mediapipe::Status(mediapipe::StatusCode::kCancelled, message); } -inline ::mediapipe::Status InternalError(absl::string_view message) { - return ::mediapipe::Status(::mediapipe::StatusCode::kInternal, message); +inline mediapipe::Status InternalError(absl::string_view message) { + return mediapipe::Status(mediapipe::StatusCode::kInternal, message); } -inline ::mediapipe::Status InvalidArgumentError(absl::string_view message) { - return ::mediapipe::Status(::mediapipe::StatusCode::kInvalidArgument, - message); +inline mediapipe::Status InvalidArgumentError(absl::string_view message) { + return mediapipe::Status(mediapipe::StatusCode::kInvalidArgument, message); } -inline ::mediapipe::Status FailedPreconditionError(absl::string_view message) { - return ::mediapipe::Status(::mediapipe::StatusCode::kFailedPrecondition, - message); +inline mediapipe::Status FailedPreconditionError(absl::string_view message) { + return mediapipe::Status(mediapipe::StatusCode::kFailedPrecondition, message); } -inline ::mediapipe::Status NotFoundError(absl::string_view message) { - return ::mediapipe::Status(::mediapipe::StatusCode::kNotFound, message); +inline mediapipe::Status NotFoundError(absl::string_view message) { + return mediapipe::Status(mediapipe::StatusCode::kNotFound, message); } -inline ::mediapipe::Status OutOfRangeError(absl::string_view message) { - return ::mediapipe::Status(::mediapipe::StatusCode::kOutOfRange, message); +inline mediapipe::Status OutOfRangeError(absl::string_view message) { + return mediapipe::Status(mediapipe::StatusCode::kOutOfRange, message); } -inline ::mediapipe::Status PermissionDeniedError(absl::string_view message) { - return ::mediapipe::Status(::mediapipe::StatusCode::kPermissionDenied, - message); +inline mediapipe::Status PermissionDeniedError(absl::string_view message) { + return mediapipe::Status(mediapipe::StatusCode::kPermissionDenied, message); } -inline ::mediapipe::Status UnimplementedError(absl::string_view message) { - return ::mediapipe::Status(::mediapipe::StatusCode::kUnimplemented, message); +inline mediapipe::Status UnimplementedError(absl::string_view message) { + return mediapipe::Status(mediapipe::StatusCode::kUnimplemented, message); } -inline ::mediapipe::Status UnknownError(absl::string_view message) { - return ::mediapipe::Status(::mediapipe::StatusCode::kUnknown, message); +inline mediapipe::Status UnknownError(absl::string_view message) { + return mediapipe::Status(mediapipe::StatusCode::kUnknown, message); } -inline ::mediapipe::Status UnavailableError(absl::string_view message) { - return ::mediapipe::Status(::mediapipe::StatusCode::kUnavailable, message); +inline mediapipe::Status UnavailableError(absl::string_view message) { + return mediapipe::Status(mediapipe::StatusCode::kUnavailable, message); } -inline bool IsCancelled(const ::mediapipe::Status& status) { - return status.code() == ::mediapipe::StatusCode::kCancelled; +inline bool IsCancelled(const mediapipe::Status& status) { + return status.code() == mediapipe::StatusCode::kCancelled; } -inline bool IsNotFound(const ::mediapipe::Status& status) { - return status.code() == ::mediapipe::StatusCode::kNotFound; +inline bool IsNotFound(const mediapipe::Status& status) { + return status.code() == mediapipe::StatusCode::kNotFound; } } // namespace mediapipe diff --git a/mediapipe/framework/deps/cleanup.h b/mediapipe/framework/deps/cleanup.h index 141e71c6c..125cc7400 100644 --- a/mediapipe/framework/deps/cleanup.h +++ b/mediapipe/framework/deps/cleanup.h @@ -21,7 +21,7 @@ // void func() {} // FILE* fp = fopen("data.txt", "r"); // if (fp == nullptr) return; -// auto fp_cleaner = ::mediapipe::MakeCleanup([fp] { fclose(fp); }); +// auto fp_cleaner = mediapipe::MakeCleanup([fp] { fclose(fp); }); // // No matter what, fclose(fp) will happen. // DataObject d; // while (ReadDataObject(fp, &d)) { diff --git a/mediapipe/framework/deps/file_helpers.cc b/mediapipe/framework/deps/file_helpers.cc index 5a7e7e381..a3807b1d0 100644 --- a/mediapipe/framework/deps/file_helpers.cc +++ b/mediapipe/framework/deps/file_helpers.cc @@ -138,11 +138,11 @@ class DirectoryListing { } // namespace -::mediapipe::Status GetContents(absl::string_view file_name, - std::string* output) { - FILE* fp = fopen(file_name.data(), "r"); +mediapipe::Status GetContents(absl::string_view file_name, std::string* output, + bool read_as_binary) { + FILE* fp = fopen(file_name.data(), read_as_binary ? "rb" : "r"); if (fp == NULL) { - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "Can't find file: " << file_name; } @@ -151,36 +151,36 @@ class DirectoryListing { char buf[4096]; size_t ret = fread(buf, 1, 4096, fp); if (ret == 0 && ferror(fp)) { - return ::mediapipe::InternalErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InternalErrorBuilder(MEDIAPIPE_LOC) << "Error while reading file: " << file_name; } output->append(std::string(buf, ret)); } fclose(fp); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status SetContents(absl::string_view file_name, - absl::string_view content) { +mediapipe::Status SetContents(absl::string_view file_name, + absl::string_view content) { FILE* fp = fopen(file_name.data(), "w"); if (fp == NULL) { - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "Can't open file: " << file_name; } fwrite(content.data(), sizeof(char), content.size(), fp); size_t write_error = ferror(fp); if (fclose(fp) != 0 || write_error) { - return ::mediapipe::InternalErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InternalErrorBuilder(MEDIAPIPE_LOC) << "Error while writing file: " << file_name << ". Error message: " << strerror(write_error); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status MatchInTopSubdirectories( - const std::string& parent_directory, const std::string& file_name, - std::vector* results) { +mediapipe::Status MatchInTopSubdirectories(const std::string& parent_directory, + const std::string& file_name, + std::vector* results) { DirectoryListing parent_listing(parent_directory); while (parent_listing.HasNextEntry()) { @@ -194,12 +194,12 @@ class DirectoryListing { } } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status MatchFileTypeInDirectory( - const std::string& directory, const std::string& file_suffix, - std::vector* results) { +mediapipe::Status MatchFileTypeInDirectory(const std::string& directory, + const std::string& file_suffix, + std::vector* results) { DirectoryListing directory_listing(directory); while (directory_listing.HasNextEntry()) { @@ -209,21 +209,21 @@ class DirectoryListing { } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status Exists(absl::string_view file_name) { +mediapipe::Status Exists(absl::string_view file_name) { struct stat buffer; int status; status = stat(std::string(file_name).c_str(), &buffer); if (status == 0) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } switch (errno) { case EACCES: - return ::mediapipe::PermissionDeniedError("Insufficient permissions."); + return mediapipe::PermissionDeniedError("Insufficient permissions."); default: - return ::mediapipe::NotFoundError("The path does not exist."); + return mediapipe::NotFoundError("The path does not exist."); } } @@ -235,7 +235,7 @@ int mkdir(std::string path) { int mkdir(std::string path) { return _mkdir(path.c_str()); } #endif -::mediapipe::Status RecursivelyCreateDir(absl::string_view path) { +mediapipe::Status RecursivelyCreateDir(absl::string_view path) { if (path.empty() || Exists(path).ok()) { return mediapipe::OkStatus(); } @@ -244,9 +244,9 @@ int mkdir(std::string path) { return _mkdir(path.c_str()); } if (mkdir(std::string(path)) != 0) { switch (errno) { case EACCES: - return ::mediapipe::PermissionDeniedError("Insufficient permissions."); + return mediapipe::PermissionDeniedError("Insufficient permissions."); default: - return ::mediapipe::UnavailableError("Failed to create directory."); + return mediapipe::UnavailableError("Failed to create directory."); } } return mediapipe::OkStatus(); diff --git a/mediapipe/framework/deps/file_helpers.h b/mediapipe/framework/deps/file_helpers.h index c79909ed7..4004af19a 100644 --- a/mediapipe/framework/deps/file_helpers.h +++ b/mediapipe/framework/deps/file_helpers.h @@ -20,23 +20,23 @@ namespace mediapipe { namespace file { -::mediapipe::Status GetContents(absl::string_view file_name, - std::string* output); +mediapipe::Status GetContents(absl::string_view file_name, std::string* output, + bool read_as_binary = false); -::mediapipe::Status SetContents(absl::string_view file_name, - absl::string_view content); +mediapipe::Status SetContents(absl::string_view file_name, + absl::string_view content); -::mediapipe::Status MatchInTopSubdirectories( - const std::string& parent_directory, const std::string& file_name, - std::vector* results); +mediapipe::Status MatchInTopSubdirectories(const std::string& parent_directory, + const std::string& file_name, + std::vector* results); -::mediapipe::Status MatchFileTypeInDirectory(const std::string& directory, - const std::string& file_suffix, - std::vector* results); +mediapipe::Status MatchFileTypeInDirectory(const std::string& directory, + const std::string& file_suffix, + std::vector* results); -::mediapipe::Status Exists(absl::string_view file_name); +mediapipe::Status Exists(absl::string_view file_name); -::mediapipe::Status RecursivelyCreateDir(absl::string_view path); +mediapipe::Status RecursivelyCreateDir(absl::string_view path); } // namespace file } // namespace mediapipe diff --git a/mediapipe/framework/deps/monotonic_clock.h b/mediapipe/framework/deps/monotonic_clock.h index 586a75cd1..f2e8b975e 100644 --- a/mediapipe/framework/deps/monotonic_clock.h +++ b/mediapipe/framework/deps/monotonic_clock.h @@ -92,7 +92,7 @@ class MonotonicClockAccess { // Create a monotonic clock based on the given state. Caller owns state // so that multiple such clocks can be created from the same state. static MonotonicClock* CreateMonotonicClock(State* state); - friend class ::mediapipe::MonotonicClockTest; + friend class mediapipe::MonotonicClockTest; }; } // namespace mediapipe diff --git a/mediapipe/framework/deps/monotonic_clock_test.cc b/mediapipe/framework/deps/monotonic_clock_test.cc index 8a123d463..533830e43 100644 --- a/mediapipe/framework/deps/monotonic_clock_test.cc +++ b/mediapipe/framework/deps/monotonic_clock_test.cc @@ -428,7 +428,7 @@ class ClockFrenzy { void Start(int nthreads) { absl::MutexLock l(&lock_); running_ = true; - threads_ = absl::make_unique<::mediapipe::ThreadPool>("Frenzy", nthreads); + threads_ = absl::make_unique("Frenzy", nthreads); threads_->StartWorkers(); for (int i = 0; i < nthreads; ++i) { threads_->Schedule([&]() { Feed(); }); @@ -452,7 +452,7 @@ class ClockFrenzy { Clock* real_clock_; std::vector sim_clocks_; std::vector mono_clocks_; - std::unique_ptr<::mediapipe::ThreadPool> threads_; + std::unique_ptr threads_; // Provide a lock to avoid race conditions in non-threadsafe ACMRandom. mutable absl::Mutex lock_; diff --git a/mediapipe/framework/deps/registration.h b/mediapipe/framework/deps/registration.h index 66845ad06..006d9a8a3 100644 --- a/mediapipe/framework/deps/registration.h +++ b/mediapipe/framework/deps/registration.h @@ -67,7 +67,7 @@ namespace mediapipe { // class Client {}; // // using ClientRegistry = -// GlobalFactoryRegistry<::mediapipe::StatusOr>; +// GlobalFactoryRegistry>; // // class MyClient : public Client { // public: @@ -84,7 +84,7 @@ namespace mediapipe { // ::my_ns::MyClient, // []() { // auto backend = absl::make_unique("/path/to/backend"); -// const ::mediapipe::Status status = backend->Init(); +// const mediapipe::Status status = backend->Init(); // if (!status.ok()) { // return status; // } @@ -95,8 +95,8 @@ namespace mediapipe { // // === Using the registry to create instances ============================== // -// // Registry will return ::mediapipe::StatusOr -// ::mediapipe::StatusOr> s_or_widget = +// // Registry will return mediapipe::StatusOr +// mediapipe::StatusOr> s_or_widget = // WidgetRegistry::CreateByName( // "my_ns.MyWidget", std::move(gadget), thing); // // Registry will return NOT_FOUND if the name is unknown. @@ -115,7 +115,7 @@ namespace mediapipe { // // This might be useful if clients outside of your codebase are registering // // plugins. // for (const auto& name : WidgetRegistry::GetRegisteredNames()) { -// ::mediapipe::StatusOr> s_or_widget = +// mediapipe::StatusOr> s_or_widget = // WidgetRegistry::CreateByName(name, std::move(gadget), thing); // ... // } @@ -134,13 +134,13 @@ constexpr char kNameSep[] = "."; template struct WrapStatusOr { - using type = ::mediapipe::StatusOr; + using type = mediapipe::StatusOr; }; // Specialization to avoid double-wrapping types that are already StatusOrs. template -struct WrapStatusOr<::mediapipe::StatusOr> { - using type = ::mediapipe::StatusOr; +struct WrapStatusOr> { + using type = mediapipe::StatusOr; }; } // namespace registration_internal @@ -196,8 +196,8 @@ class FunctionRegistry { absl::ReaderMutexLock lock(&lock_); auto it = functions_.find(name); if (it == functions_.end()) { - return ::mediapipe::NotFoundError("No registered object with name: " + - name); + return mediapipe::NotFoundError("No registered object with name: " + + name); } function = it->second; } @@ -379,12 +379,12 @@ class GlobalFactoryRegistry { #define MEDIAPIPE_REGISTER_FACTORY_FUNCTION(RegistryType, name, ...) \ static auto* REGISTRY_STATIC_VAR(registration_##name, __LINE__) = \ - new ::mediapipe::RegistrationToken( \ + new mediapipe::RegistrationToken( \ RegistryType::Register(#name, __VA_ARGS__)) #define REGISTER_FACTORY_FUNCTION_QUALIFIED(RegistryType, var_name, name, ...) \ static auto* REGISTRY_STATIC_VAR(var_name, __LINE__) = \ - new ::mediapipe::RegistrationToken( \ + new mediapipe::RegistrationToken( \ RegistryType::Register(#name, __VA_ARGS__)) } // namespace mediapipe diff --git a/mediapipe/framework/deps/ret_check.cc b/mediapipe/framework/deps/ret_check.cc index 65fdcc033..1e4b49e26 100644 --- a/mediapipe/framework/deps/ret_check.cc +++ b/mediapipe/framework/deps/ret_check.cc @@ -16,23 +16,23 @@ namespace mediapipe { -::mediapipe::StatusBuilder RetCheckFailSlowPath( - ::mediapipe::source_location location) { +mediapipe::StatusBuilder RetCheckFailSlowPath( + mediapipe::source_location location) { // TODO Implement LogWithStackTrace(). - return ::mediapipe::InternalErrorBuilder(location) + return mediapipe::InternalErrorBuilder(location) << "RET_CHECK failure (" << location.file_name() << ":" << location.line() << ") "; } -::mediapipe::StatusBuilder RetCheckFailSlowPath( - ::mediapipe::source_location location, const char* condition) { - return ::mediapipe::RetCheckFailSlowPath(location) << condition; +mediapipe::StatusBuilder RetCheckFailSlowPath( + mediapipe::source_location location, const char* condition) { + return mediapipe::RetCheckFailSlowPath(location) << condition; } -::mediapipe::StatusBuilder RetCheckFailSlowPath( - ::mediapipe::source_location location, const char* condition, - const ::mediapipe::Status& status) { - return ::mediapipe::RetCheckFailSlowPath(location) +mediapipe::StatusBuilder RetCheckFailSlowPath( + mediapipe::source_location location, const char* condition, + const mediapipe::Status& status) { + return mediapipe::RetCheckFailSlowPath(location) << condition << " returned " << status << " "; } diff --git a/mediapipe/framework/deps/ret_check.h b/mediapipe/framework/deps/ret_check.h index 54c05a7e6..c81baa245 100644 --- a/mediapipe/framework/deps/ret_check.h +++ b/mediapipe/framework/deps/ret_check.h @@ -21,23 +21,23 @@ namespace mediapipe { // Returns a StatusBuilder that corresponds to a `RET_CHECK` failure. -::mediapipe::StatusBuilder RetCheckFailSlowPath( - ::mediapipe::source_location location); +mediapipe::StatusBuilder RetCheckFailSlowPath( + mediapipe::source_location location); // Returns a StatusBuilder that corresponds to a `RET_CHECK` failure. -::mediapipe::StatusBuilder RetCheckFailSlowPath( - ::mediapipe::source_location location, const char* condition); +mediapipe::StatusBuilder RetCheckFailSlowPath( + mediapipe::source_location location, const char* condition); // Returns a StatusBuilder that corresponds to a `RET_CHECK` failure. -::mediapipe::StatusBuilder RetCheckFailSlowPath( - ::mediapipe::source_location location, const char* condition, - const ::mediapipe::Status& status); +mediapipe::StatusBuilder RetCheckFailSlowPath( + mediapipe::source_location location, const char* condition, + const mediapipe::Status& status); -inline StatusBuilder RetCheckImpl(const ::mediapipe::Status& status, +inline StatusBuilder RetCheckImpl(const mediapipe::Status& status, const char* condition, - ::mediapipe::source_location location) { + mediapipe::source_location location) { if (ABSL_PREDICT_TRUE(status.ok())) - return ::mediapipe::StatusBuilder(OkStatus(), location); + return mediapipe::StatusBuilder(OkStatus(), location); return RetCheckFailSlowPath(location, condition, status); } @@ -45,13 +45,12 @@ inline StatusBuilder RetCheckImpl(const ::mediapipe::Status& status, #define RET_CHECK(cond) \ while (ABSL_PREDICT_FALSE(!(cond))) \ - return ::mediapipe::RetCheckFailSlowPath(MEDIAPIPE_LOC, #cond) + return mediapipe::RetCheckFailSlowPath(MEDIAPIPE_LOC, #cond) #define RET_CHECK_OK(status) \ - MP_RETURN_IF_ERROR( \ - ::mediapipe::RetCheckImpl((status), #status, MEDIAPIPE_LOC)) + MP_RETURN_IF_ERROR(mediapipe::RetCheckImpl((status), #status, MEDIAPIPE_LOC)) -#define RET_CHECK_FAIL() return ::mediapipe::RetCheckFailSlowPath(MEDIAPIPE_LOC) +#define RET_CHECK_FAIL() return mediapipe::RetCheckFailSlowPath(MEDIAPIPE_LOC) #define MEDIAPIPE_INTERNAL_RET_CHECK_OP(name, op, lhs, rhs) \ RET_CHECK((lhs)op(rhs)) diff --git a/mediapipe/framework/deps/safe_int.h b/mediapipe/framework/deps/safe_int.h index 94aaeb8b1..4c120bc1b 100644 --- a/mediapipe/framework/deps/safe_int.h +++ b/mediapipe/framework/deps/safe_int.h @@ -302,9 +302,9 @@ struct LogFatalOnError { // type is created per type_name. #define MEDIAPIPE_DEFINE_SAFE_INT_TYPE(type_name, value_type, policy_type) \ struct type_name##_safe_tag_ {}; \ - typedef ::mediapipe::intops::StrongInt< \ + typedef mediapipe::intops::StrongInt< \ type_name##_safe_tag_, value_type, \ - ::mediapipe::intops::SafeIntStrongIntValidator> \ + mediapipe::intops::SafeIntStrongIntValidator> \ type_name; #endif // MEDIAPIPE_DEPS_SAFE_INT_H_ diff --git a/mediapipe/framework/deps/safe_int_test.cc b/mediapipe/framework/deps/safe_int_test.cc index 2619837f7..7f385848f 100644 --- a/mediapipe/framework/deps/safe_int_test.cc +++ b/mediapipe/framework/deps/safe_int_test.cc @@ -21,21 +21,21 @@ #include "mediapipe/framework/port/gtest.h" MEDIAPIPE_DEFINE_SAFE_INT_TYPE(SafeInt8, int8, - ::mediapipe::intops::LogFatalOnError); + mediapipe::intops::LogFatalOnError); MEDIAPIPE_DEFINE_SAFE_INT_TYPE(SafeUInt8, uint8, - ::mediapipe::intops::LogFatalOnError); + mediapipe::intops::LogFatalOnError); MEDIAPIPE_DEFINE_SAFE_INT_TYPE(SafeInt16, int16, - ::mediapipe::intops::LogFatalOnError); + mediapipe::intops::LogFatalOnError); MEDIAPIPE_DEFINE_SAFE_INT_TYPE(SafeUInt16, uint16, - ::mediapipe::intops::LogFatalOnError); + mediapipe::intops::LogFatalOnError); MEDIAPIPE_DEFINE_SAFE_INT_TYPE(SafeInt32, int32, - ::mediapipe::intops::LogFatalOnError); + mediapipe::intops::LogFatalOnError); MEDIAPIPE_DEFINE_SAFE_INT_TYPE(SafeInt64, int64, - ::mediapipe::intops::LogFatalOnError); + mediapipe::intops::LogFatalOnError); MEDIAPIPE_DEFINE_SAFE_INT_TYPE(SafeUInt32, uint32, - ::mediapipe::intops::LogFatalOnError); + mediapipe::intops::LogFatalOnError); MEDIAPIPE_DEFINE_SAFE_INT_TYPE(SafeUInt64, uint64, - ::mediapipe::intops::LogFatalOnError); + mediapipe::intops::LogFatalOnError); namespace mediapipe { namespace intops { diff --git a/mediapipe/framework/deps/source_location.h b/mediapipe/framework/deps/source_location.h index 7f7af9f37..59218b236 100644 --- a/mediapipe/framework/deps/source_location.h +++ b/mediapipe/framework/deps/source_location.h @@ -59,6 +59,6 @@ class source_location { // If a function takes a source_location parameter, pass this as the argument. #define MEDIAPIPE_LOC \ - ::mediapipe::source_location::DoNotInvokeDirectly(__LINE__, __FILE__) + mediapipe::source_location::DoNotInvokeDirectly(__LINE__, __FILE__) #endif // MEDIAPIPE_DEPS_SOURCE_LOCATION_H_ diff --git a/mediapipe/framework/deps/status.cc b/mediapipe/framework/deps/status.cc index 43f4f03ea..da7f7718e 100644 --- a/mediapipe/framework/deps/status.cc +++ b/mediapipe/framework/deps/status.cc @@ -23,7 +23,7 @@ std::ostream& operator<<(std::ostream& os, const Status& x) { return os; } -std::string* MediaPipeCheckOpHelperOutOfLine(const ::mediapipe::Status& v, +std::string* MediaPipeCheckOpHelperOutOfLine(const mediapipe::Status& v, const char* msg) { std::string r("Non-OK-status: "); r += msg; diff --git a/mediapipe/framework/deps/status.h b/mediapipe/framework/deps/status.h index 9d878779b..c1d21ebc9 100644 --- a/mediapipe/framework/deps/status.h +++ b/mediapipe/framework/deps/status.h @@ -29,24 +29,21 @@ namespace mediapipe { using Status = absl::Status; using StatusCode = absl::StatusCode; -inline ::mediapipe::Status OkStatus() { return absl::OkStatus(); } +inline mediapipe::Status OkStatus() { return absl::OkStatus(); } -extern std::string* MediaPipeCheckOpHelperOutOfLine( - const ::mediapipe::Status& v, const char* msg); +extern std::string* MediaPipeCheckOpHelperOutOfLine(const mediapipe::Status& v, + const char* msg); -inline std::string* MediaPipeCheckOpHelper(::mediapipe::Status v, +inline std::string* MediaPipeCheckOpHelper(mediapipe::Status v, const char* msg) { if (v.ok()) return nullptr; return MediaPipeCheckOpHelperOutOfLine(v, msg); } -#define MEDIAPIPE_DO_CHECK_OK(val, level) \ - while (auto _result = ::mediapipe::MediaPipeCheckOpHelper(val, #val)) \ +#define MEDIAPIPE_DO_CHECK_OK(val, level) \ + while (auto _result = mediapipe::MediaPipeCheckOpHelper(val, #val)) \ LOG(level) << *(_result) -// To be consistent with MP_EXPECT_OK, we add prefix MEDIAPIPE_ to -// CHECK_OK, QCHECK_OK, and DCHECK_OK. We prefer to use the marcos with -// MEDIAPIPE_ prefix in mediapipe's codebase. #define MEDIAPIPE_CHECK_OK(val) MEDIAPIPE_DO_CHECK_OK(val, FATAL) #define MEDIAPIPE_QCHECK_OK(val) MEDIAPIPE_DO_CHECK_OK(val, QFATAL) @@ -54,9 +51,13 @@ inline std::string* MediaPipeCheckOpHelper(::mediapipe::Status v, #define MEDIAPIPE_DCHECK_OK(val) MEDIAPIPE_CHECK_OK(val) #else #define MEDIAPIPE_DCHECK_OK(val) \ - while (false && (::mediapipe::OkStatus() == (val))) LOG(FATAL) + while (false && (mediapipe::OkStatus() == (val))) LOG(FATAL) #endif +#define CHECK_OK MEDIAPIPE_CHECK_OK +#define QCHECK_OK MEDIAPIPE_QCHECK_OK +#define DCHECK_OK MEDIAPIPE_DCHECK_OK + } // namespace mediapipe #endif // MEDIAPIPE_DEPS_STATUS_H_ diff --git a/mediapipe/framework/deps/status_builder.cc b/mediapipe/framework/deps/status_builder.cc index 21b80638e..fef0e4b2e 100644 --- a/mediapipe/framework/deps/status_builder.cc +++ b/mediapipe/framework/deps/status_builder.cc @@ -68,7 +68,7 @@ StatusBuilder::operator Status() && { return JoinMessageToStatus(); } -::mediapipe::Status StatusBuilder::JoinMessageToStatus() { +mediapipe::Status StatusBuilder::JoinMessageToStatus() { std::string message; if (join_style_ == MessageJoinStyle::kAnnotate) { if (!status_.ok()) { diff --git a/mediapipe/framework/deps/status_builder.h b/mediapipe/framework/deps/status_builder.h index c89a4d4c7..a42bfa939 100644 --- a/mediapipe/framework/deps/status_builder.h +++ b/mediapipe/framework/deps/status_builder.h @@ -30,15 +30,15 @@ class ABSL_MUST_USE_RESULT StatusBuilder { // Creates a `StatusBuilder` based on an original status. If logging is // enabled, it will use `location` as the location from which the log message // occurs. A typical user will call this with `MEDIAPIPE_LOC`. - StatusBuilder(const ::mediapipe::Status& original_status, - ::mediapipe::source_location location) + StatusBuilder(const mediapipe::Status& original_status, + mediapipe::source_location location) : status_(original_status), line_(location.line()), file_(location.file_name()), stream_(new std::ostringstream) {} - StatusBuilder(::mediapipe::Status&& original_status, - ::mediapipe::source_location location) + StatusBuilder(mediapipe::Status&& original_status, + mediapipe::source_location location) : status_(std::move(original_status)), line_(location.line()), file_(location.file_name()), @@ -47,14 +47,13 @@ class ABSL_MUST_USE_RESULT StatusBuilder { // Creates a `StatusBuilder` from a mediapipe status code. If logging is // enabled, it will use `location` as the location from which the log message // occurs. A typical user will call this with `MEDIAPIPE_LOC`. - StatusBuilder(::mediapipe::StatusCode code, - ::mediapipe::source_location location) + StatusBuilder(mediapipe::StatusCode code, mediapipe::source_location location) : status_(code, ""), line_(location.line()), file_(location.file_name()), stream_(new std::ostringstream) {} - StatusBuilder(const ::mediapipe::Status& original_status, const char* file, + StatusBuilder(const mediapipe::Status& original_status, const char* file, int line) : status_(original_status), line_(line), @@ -79,7 +78,7 @@ class ABSL_MUST_USE_RESULT StatusBuilder { operator Status() const&; operator Status() &&; - ::mediapipe::Status JoinMessageToStatus(); + mediapipe::Status JoinMessageToStatus(); private: // Specifies how to join the error message in the original status and any @@ -91,7 +90,7 @@ class ABSL_MUST_USE_RESULT StatusBuilder { }; // The status that the result will be based on. - ::mediapipe::Status status_; + mediapipe::Status status_; // The line to record if this file is logged. int line_; // Not-owned: The file to record if this status is logged. @@ -104,43 +103,40 @@ class ABSL_MUST_USE_RESULT StatusBuilder { }; inline StatusBuilder AlreadyExistsErrorBuilder( - ::mediapipe::source_location location) { - return StatusBuilder(::mediapipe::StatusCode::kAlreadyExists, location); + mediapipe::source_location location) { + return StatusBuilder(mediapipe::StatusCode::kAlreadyExists, location); } inline StatusBuilder FailedPreconditionErrorBuilder( - ::mediapipe::source_location location) { - return StatusBuilder(::mediapipe::StatusCode::kFailedPrecondition, location); + mediapipe::source_location location) { + return StatusBuilder(mediapipe::StatusCode::kFailedPrecondition, location); } -inline StatusBuilder InternalErrorBuilder( - ::mediapipe::source_location location) { - return StatusBuilder(::mediapipe::StatusCode::kInternal, location); +inline StatusBuilder InternalErrorBuilder(mediapipe::source_location location) { + return StatusBuilder(mediapipe::StatusCode::kInternal, location); } inline StatusBuilder InvalidArgumentErrorBuilder( - ::mediapipe::source_location location) { - return StatusBuilder(::mediapipe::StatusCode::kInvalidArgument, location); + mediapipe::source_location location) { + return StatusBuilder(mediapipe::StatusCode::kInvalidArgument, location); } -inline StatusBuilder NotFoundErrorBuilder( - ::mediapipe::source_location location) { - return StatusBuilder(::mediapipe::StatusCode::kNotFound, location); +inline StatusBuilder NotFoundErrorBuilder(mediapipe::source_location location) { + return StatusBuilder(mediapipe::StatusCode::kNotFound, location); } inline StatusBuilder UnavailableErrorBuilder( - ::mediapipe::source_location location) { - return StatusBuilder(::mediapipe::StatusCode::kUnavailable, location); + mediapipe::source_location location) { + return StatusBuilder(mediapipe::StatusCode::kUnavailable, location); } inline StatusBuilder UnimplementedErrorBuilder( - ::mediapipe::source_location location) { - return StatusBuilder(::mediapipe::StatusCode::kUnimplemented, location); + mediapipe::source_location location) { + return StatusBuilder(mediapipe::StatusCode::kUnimplemented, location); } -inline StatusBuilder UnknownErrorBuilder( - ::mediapipe::source_location location) { - return StatusBuilder(::mediapipe::StatusCode::kUnknown, location); +inline StatusBuilder UnknownErrorBuilder(mediapipe::source_location location) { + return StatusBuilder(mediapipe::StatusCode::kUnknown, location); } } // namespace mediapipe diff --git a/mediapipe/framework/deps/status_builder_test.cc b/mediapipe/framework/deps/status_builder_test.cc index fbb59fc44..b7e6a978d 100644 --- a/mediapipe/framework/deps/status_builder_test.cc +++ b/mediapipe/framework/deps/status_builder_test.cc @@ -19,55 +19,54 @@ namespace mediapipe { TEST(StatusBuilder, AnnotateMode) { - ::mediapipe::Status status = - StatusBuilder(::mediapipe::Status(::mediapipe::StatusCode::kNotFound, - "original message"), + mediapipe::Status status = + StatusBuilder(mediapipe::Status(mediapipe::StatusCode::kNotFound, + "original message"), MEDIAPIPE_LOC) << "annotated message1 " << "annotated message2"; ASSERT_FALSE(status.ok()); - EXPECT_EQ(status.code(), ::mediapipe::StatusCode::kNotFound); + EXPECT_EQ(status.code(), mediapipe::StatusCode::kNotFound); EXPECT_EQ(status.message(), "original message; annotated message1 annotated message2"); } TEST(StatusBuilder, PrependMode) { - ::mediapipe::Status status = - StatusBuilder( - ::mediapipe::Status(::mediapipe::StatusCode::kInvalidArgument, - "original message"), - MEDIAPIPE_LOC) + mediapipe::Status status = + StatusBuilder(mediapipe::Status(mediapipe::StatusCode::kInvalidArgument, + "original message"), + MEDIAPIPE_LOC) .SetPrepend() << "prepended message1 " << "prepended message2 "; ASSERT_FALSE(status.ok()); - EXPECT_EQ(status.code(), ::mediapipe::StatusCode::kInvalidArgument); + EXPECT_EQ(status.code(), mediapipe::StatusCode::kInvalidArgument); EXPECT_EQ(status.message(), "prepended message1 prepended message2 original message"); } TEST(StatusBuilder, AppendMode) { - ::mediapipe::Status status = - StatusBuilder(::mediapipe::Status(::mediapipe::StatusCode::kInternal, - "original message"), + mediapipe::Status status = + StatusBuilder(mediapipe::Status(mediapipe::StatusCode::kInternal, + "original message"), MEDIAPIPE_LOC) .SetAppend() << " extra message1" << " extra message2"; ASSERT_FALSE(status.ok()); - EXPECT_EQ(status.code(), ::mediapipe::StatusCode::kInternal); + EXPECT_EQ(status.code(), mediapipe::StatusCode::kInternal); EXPECT_EQ(status.message(), "original message extra message1 extra message2"); } TEST(StatusBuilder, NoLoggingMode) { - ::mediapipe::Status status = - StatusBuilder(::mediapipe::Status(::mediapipe::StatusCode::kUnavailable, - "original message"), + mediapipe::Status status = + StatusBuilder(mediapipe::Status(mediapipe::StatusCode::kUnavailable, + "original message"), MEDIAPIPE_LOC) .SetNoLogging() << " extra message"; ASSERT_FALSE(status.ok()); - EXPECT_EQ(status.code(), ::mediapipe::StatusCode::kUnavailable); + EXPECT_EQ(status.code(), mediapipe::StatusCode::kUnavailable); EXPECT_EQ(status.message(), "original message"); } diff --git a/mediapipe/framework/deps/status_macros.h b/mediapipe/framework/deps/status_macros.h index 8e3ddf2c6..229f61b48 100644 --- a/mediapipe/framework/deps/status_macros.h +++ b/mediapipe/framework/deps/status_macros.h @@ -13,7 +13,7 @@ // limitations under the License. // // Helper macros and methods to return and propagate errors with -// `::mediapipe::Status`. +// `mediapipe::Status`. // // The owners of mediapipe do not endorse use of these macros as a good // programming practice, and would prefer that you write the equivalent C++ @@ -26,29 +26,29 @@ #include "mediapipe/framework/deps/status.h" #include "mediapipe/framework/deps/status_builder.h" -// Evaluates an expression that produces a `::mediapipe::Status`. If the status +// Evaluates an expression that produces a `mediapipe::Status`. If the status // is not ok, returns it from the current function. // // For example: -// ::mediapipe::Status MultiStepFunction() { +// mediapipe::Status MultiStepFunction() { // MP_RETURN_IF_ERROR(Function(args...)); // MP_RETURN_IF_ERROR(foo.Method(args...)); -// return ::mediapipe::OkStatus(); +// return mediapipe::OkStatus(); // } // -// The macro ends with a `::mediapipe::StatusBuilder` which allows the returned +// The macro ends with a `mediapipe::StatusBuilder` which allows the returned // status to be extended with more details. Any chained expressions after the // macro will not be evaluated unless there is an error. // // For example: -// ::mediapipe::Status MultiStepFunction() { +// mediapipe::Status MultiStepFunction() { // MP_RETURN_IF_ERROR(Function(args...)) << "in MultiStepFunction"; // MP_RETURN_IF_ERROR(foo.Method(args...)).Log(base_logging::ERROR) // << "while processing query: " << query.DebugString(); -// return ::mediapipe::OkStatus(); +// return mediapipe::OkStatus(); // } // -// `::mediapipe::StatusBuilder` supports adapting the builder chain using a +// `mediapipe::StatusBuilder` supports adapting the builder chain using a // `With` method and a functor. This allows for powerful extensions to the // macro. // @@ -62,10 +62,10 @@ // MP_RETURN_IF_ERROR(bar()).With(TeamPolicy); // // Changing the return type allows the macro to be used with Task and Rpc -// interfaces. See `::mediapipe::TaskReturn` and `rpc::RpcSetStatus` for +// interfaces. See `mediapipe::TaskReturn` and `rpc::RpcSetStatus` for // details. // -// void Read(StringPiece name, ::mediapipe::Task* task) { +// void Read(StringPiece name, mediapipe::Task* task) { // int64 id; // MP_RETURN_IF_ERROR(GetIdForName(name, &id)).With(TaskReturn(task)); // MP_RETURN_IF_ERROR(ReadForId(id)).With(TaskReturn(task)); @@ -73,22 +73,22 @@ // } // // If using this macro inside a lambda, you need to annotate the return type -// to avoid confusion between a `::mediapipe::StatusBuilder` and a -// `::mediapipe::Status` type. E.g. +// to avoid confusion between a `mediapipe::StatusBuilder` and a +// `mediapipe::Status` type. E.g. // -// []() -> ::mediapipe::Status { +// []() -> mediapipe::Status { // MP_RETURN_IF_ERROR(Function(args...)); // MP_RETURN_IF_ERROR(foo.Method(args...)); -// return ::mediapipe::OkStatus(); +// return mediapipe::OkStatus(); // } #define MP_RETURN_IF_ERROR(expr) \ STATUS_MACROS_IMPL_ELSE_BLOCKER_ \ - if (::mediapipe::status_macro_internal::StatusAdaptorForMacros \ + if (mediapipe::status_macro_internal::StatusAdaptorForMacros \ status_macro_internal_adaptor = {(expr), __FILE__, __LINE__}) { \ } else /* NOLINT */ \ return status_macro_internal_adaptor.Consume() -// Executes an expression `rexpr` that returns a `::mediapipe::StatusOr`. On +// Executes an expression `rexpr` that returns a `mediapipe::StatusOr`. On // OK, extracts its value into the variable defined by `lhs`, otherwise returns // from the current function. By default the error status is returned // unchanged, but it may be modified by an `error_expression`. If there is an @@ -121,7 +121,7 @@ // // If passed, the `error_expression` is evaluated to produce the return // value. The expression may reference any variable visible in scope, as -// well as a `::mediapipe::StatusBuilder` object populated with the error and +// well as a `mediapipe::StatusBuilder` object populated with the error and // named by a single underscore `_`. The expression typically uses the // builder to modify the status and is returned directly in manner similar // to MP_RETURN_IF_ERROR. The expression may, however, evaluate to any type @@ -156,15 +156,15 @@ STATUS_MACROS_IMPL_ASSIGN_OR_RETURN_( \ STATUS_MACROS_IMPL_CONCAT_(_status_or_value, __LINE__), lhs, rexpr, \ error_expression) -#define STATUS_MACROS_IMPL_ASSIGN_OR_RETURN_(statusor, lhs, rexpr, \ - error_expression) \ - auto statusor = (rexpr); \ - if (ABSL_PREDICT_FALSE(!statusor.ok())) { \ - ::mediapipe::StatusBuilder _(std::move(statusor).status(), __FILE__, \ - __LINE__); \ - (void)_; /* error_expression is allowed to not use this variable */ \ - return (error_expression); \ - } \ +#define STATUS_MACROS_IMPL_ASSIGN_OR_RETURN_(statusor, lhs, rexpr, \ + error_expression) \ + auto statusor = (rexpr); \ + if (ABSL_PREDICT_FALSE(!statusor.ok())) { \ + mediapipe::StatusBuilder _(std::move(statusor).status(), __FILE__, \ + __LINE__); \ + (void)_; /* error_expression is allowed to not use this variable */ \ + return (error_expression); \ + } \ lhs = std::move(statusor).ValueOrDie() // Internal helper for concatenating macro values. diff --git a/mediapipe/framework/deps/status_test.cc b/mediapipe/framework/deps/status_test.cc index 10a70c0f7..ea5ad3629 100644 --- a/mediapipe/framework/deps/status_test.cc +++ b/mediapipe/framework/deps/status_test.cc @@ -20,7 +20,7 @@ namespace mediapipe { TEST(Status, OK) { - EXPECT_EQ(OkStatus().code(), ::mediapipe::StatusCode::kOk); + EXPECT_EQ(OkStatus().code(), mediapipe::StatusCode::kOk); EXPECT_EQ(OkStatus().message(), ""); MP_EXPECT_OK(OkStatus()); MP_ASSERT_OK(OkStatus()); @@ -30,25 +30,25 @@ TEST(Status, OK) { } TEST(DeathStatus, CheckOK) { - Status status(::mediapipe::StatusCode::kInvalidArgument, "Invalid"); + Status status(mediapipe::StatusCode::kInvalidArgument, "Invalid"); ASSERT_DEATH(MEDIAPIPE_CHECK_OK(status), "Invalid"); } TEST(Status, Set) { Status status; - status = Status(::mediapipe::StatusCode::kCancelled, "Error message"); - EXPECT_EQ(status.code(), ::mediapipe::StatusCode::kCancelled); + status = Status(mediapipe::StatusCode::kCancelled, "Error message"); + EXPECT_EQ(status.code(), mediapipe::StatusCode::kCancelled); EXPECT_EQ(status.message(), "Error message"); } TEST(Status, Copy) { - Status a(::mediapipe::StatusCode::kInvalidArgument, "Invalid"); + Status a(mediapipe::StatusCode::kInvalidArgument, "Invalid"); Status b(a); ASSERT_EQ(a.ToString(), b.ToString()); } TEST(Status, Assign) { - Status a(::mediapipe::StatusCode::kInvalidArgument, "Invalid"); + Status a(mediapipe::StatusCode::kInvalidArgument, "Invalid"); Status b; b = a; ASSERT_EQ(a.ToString(), b.ToString()); @@ -58,10 +58,10 @@ TEST(Status, Update) { Status s; s.Update(OkStatus()); ASSERT_TRUE(s.ok()); - Status a(::mediapipe::StatusCode::kInvalidArgument, "Invalid"); + Status a(mediapipe::StatusCode::kInvalidArgument, "Invalid"); s.Update(a); ASSERT_EQ(s.ToString(), a.ToString()); - Status b(::mediapipe::StatusCode::kInternal, "Invalid"); + Status b(mediapipe::StatusCode::kInternal, "Invalid"); s.Update(b); ASSERT_EQ(s.ToString(), a.ToString()); s.Update(OkStatus()); @@ -72,26 +72,26 @@ TEST(Status, Update) { TEST(Status, EqualsOK) { ASSERT_EQ(OkStatus(), Status()); } TEST(Status, EqualsSame) { - Status a(::mediapipe::StatusCode::kInvalidArgument, "Invalid"); - Status b(::mediapipe::StatusCode::kInvalidArgument, "Invalid"); + Status a(mediapipe::StatusCode::kInvalidArgument, "Invalid"); + Status b(mediapipe::StatusCode::kInvalidArgument, "Invalid"); ASSERT_EQ(a, b); } TEST(Status, EqualsCopy) { - const Status a(::mediapipe::StatusCode::kInvalidArgument, "Invalid"); + const Status a(mediapipe::StatusCode::kInvalidArgument, "Invalid"); const Status b = a; ASSERT_EQ(a, b); } TEST(Status, EqualsDifferentCode) { - const Status a(::mediapipe::StatusCode::kInvalidArgument, "Invalid"); - const Status b(::mediapipe::StatusCode::kInternal, "Internal"); + const Status a(mediapipe::StatusCode::kInvalidArgument, "Invalid"); + const Status b(mediapipe::StatusCode::kInternal, "Internal"); ASSERT_NE(a, b); } TEST(Status, EqualsDifferentMessage) { - const Status a(::mediapipe::StatusCode::kInvalidArgument, "message"); - const Status b(::mediapipe::StatusCode::kInvalidArgument, "another"); + const Status a(mediapipe::StatusCode::kInvalidArgument, "message"); + const Status b(mediapipe::StatusCode::kInvalidArgument, "another"); ASSERT_NE(a, b); } diff --git a/mediapipe/framework/deps/statusor.cc b/mediapipe/framework/deps/statusor.cc index fe63c133f..5d7eddec2 100644 --- a/mediapipe/framework/deps/statusor.cc +++ b/mediapipe/framework/deps/statusor.cc @@ -22,14 +22,14 @@ namespace mediapipe { namespace internal_statusor { -void Helper::HandleInvalidStatusCtorArg(::mediapipe::Status* status) { +void Helper::HandleInvalidStatusCtorArg(mediapipe::Status* status) { const char* kMessage = "An OK status is not a valid constructor argument to StatusOr"; LOG(ERROR) << kMessage; - *status = ::mediapipe::InternalError(kMessage); + *status = mediapipe::InternalError(kMessage); } -void Helper::Crash(const ::mediapipe::Status& status) { +void Helper::Crash(const mediapipe::Status& status) { LOG(FATAL) << "Attempting to fetch value instead of handling error " << status; } diff --git a/mediapipe/framework/deps/statusor.h b/mediapipe/framework/deps/statusor.h index a33c9382c..9dc7fc332 100644 --- a/mediapipe/framework/deps/statusor.h +++ b/mediapipe/framework/deps/statusor.h @@ -25,7 +25,7 @@ // // Example client usage for a StatusOr, where T is not a pointer: // -// ::mediapipe::StatusOr result = DoBigCalculationThatCouldFail(); +// mediapipe::StatusOr result = DoBigCalculationThatCouldFail(); // if (result.ok()) { // float answer = result.ValueOrDie(); // printf("Big calculation yielded: %f", answer); @@ -35,7 +35,7 @@ // // Example client usage for a StatusOr: // -// ::mediapipe::StatusOr result = FooFactory::MakeNewFoo(arg); +// mediapipe::StatusOr result = FooFactory::MakeNewFoo(arg); // if (result.ok()) { // std::unique_ptr foo(result.ValueOrDie()); // foo->DoSomethingCool(); @@ -45,7 +45,7 @@ // // Example client usage for a StatusOr>: // -// ::mediapipe::StatusOr> result = +// mediapipe::StatusOr> result = // FooFactory::MakeNewFoo(arg); // if (result.ok()) { // std::unique_ptr foo = std::move(result.ValueOrDie()); @@ -56,9 +56,9 @@ // // Example factory implementation returning StatusOr: // -// ::mediapipe::StatusOr FooFactory::MakeNewFoo(int arg) { +// mediapipe::StatusOr FooFactory::MakeNewFoo(int arg) { // if (arg <= 0) { -// return ::mediapipe::InvalidArgumentError("Arg must be positive"); +// return mediapipe::InvalidArgumentError("Arg must be positive"); // } else { // return new Foo(arg); // } @@ -148,11 +148,11 @@ class StatusOr : private internal_statusor::StatusOrData, // // REQUIRES: !status.ok(). This requirement is DCHECKed. // In optimized builds, passing Status::OK() here will have the effect - // of passing ::mediapipe::StatusCode::kInternal as a fallback. - StatusOr(const ::mediapipe::Status& status); - StatusOr& operator=(const ::mediapipe::Status& status); - StatusOr(const ::mediapipe::StatusBuilder& builder); - StatusOr& operator=(const ::mediapipe::StatusBuilder& builder); + // of passing mediapipe::StatusCode::kInternal as a fallback. + StatusOr(const mediapipe::Status& status); + StatusOr& operator=(const mediapipe::Status& status); + StatusOr(const mediapipe::StatusBuilder& builder); + StatusOr& operator=(const mediapipe::StatusBuilder& builder); // TODO: Add operator=(T) overloads. @@ -162,18 +162,18 @@ class StatusOr : private internal_statusor::StatusOrData, StatusOr(T&& value); // RValue versions of the operations declared above. - StatusOr(::mediapipe::Status&& status); - StatusOr& operator=(::mediapipe::Status&& status); - StatusOr(::mediapipe::StatusBuilder&& builder); - StatusOr& operator=(::mediapipe::StatusBuilder&& builder); + StatusOr(mediapipe::Status&& status); + StatusOr& operator=(mediapipe::Status&& status); + StatusOr(mediapipe::StatusBuilder&& builder); + StatusOr& operator=(mediapipe::StatusBuilder&& builder); // Returns this->status().ok() bool ok() const { return this->status_.ok(); } // Returns a reference to mediapipe status. If this contains a T, then // returns Status::OK(). - const ::mediapipe::Status& status() const&; - ::mediapipe::Status status() &&; + const mediapipe::Status& status() const&; + mediapipe::Status status() &&; // Returns a reference to our current value, or CHECK-fails if !this->ok(). // @@ -213,48 +213,48 @@ class StatusOr : private internal_statusor::StatusOrData, template StatusOr::StatusOr() - : Base(::mediapipe::Status(::mediapipe::StatusCode::kUnknown, "")) {} + : Base(mediapipe::Status(mediapipe::StatusCode::kUnknown, "")) {} template StatusOr::StatusOr(const T& value) : Base(value) {} template -StatusOr::StatusOr(const ::mediapipe::Status& status) : Base(status) {} +StatusOr::StatusOr(const mediapipe::Status& status) : Base(status) {} template -StatusOr::StatusOr(const ::mediapipe::StatusBuilder& builder) +StatusOr::StatusOr(const mediapipe::StatusBuilder& builder) : Base(builder) {} template -StatusOr& StatusOr::operator=(const ::mediapipe::Status& status) { +StatusOr& StatusOr::operator=(const mediapipe::Status& status) { this->Assign(status); return *this; } template -StatusOr& StatusOr::operator=(const ::mediapipe::StatusBuilder& builder) { - return *this = static_cast<::mediapipe::Status>(builder); +StatusOr& StatusOr::operator=(const mediapipe::StatusBuilder& builder) { + return *this = static_cast(builder); } template StatusOr::StatusOr(T&& value) : Base(std::move(value)) {} template -StatusOr::StatusOr(::mediapipe::Status&& status) : Base(std::move(status)) {} +StatusOr::StatusOr(mediapipe::Status&& status) : Base(std::move(status)) {} template -StatusOr::StatusOr(::mediapipe::StatusBuilder&& builder) +StatusOr::StatusOr(mediapipe::StatusBuilder&& builder) : Base(std::move(builder)) {} template -StatusOr& StatusOr::operator=(::mediapipe::Status&& status) { +StatusOr& StatusOr::operator=(mediapipe::Status&& status) { this->Assign(std::move(status)); return *this; } template -StatusOr& StatusOr::operator=(::mediapipe::StatusBuilder&& builder) { - return *this = static_cast<::mediapipe::Status>(std::move(builder)); +StatusOr& StatusOr::operator=(mediapipe::StatusBuilder&& builder) { + return *this = static_cast(std::move(builder)); } template @@ -289,12 +289,12 @@ inline StatusOr& StatusOr::operator=(StatusOr&& other) { } template -const ::mediapipe::Status& StatusOr::status() const& { +const mediapipe::Status& StatusOr::status() const& { return this->status_; } template -::mediapipe::Status StatusOr::status() && { - return ok() ? ::mediapipe::OkStatus() : std::move(this->status_); +mediapipe::Status StatusOr::status() && { + return ok() ? mediapipe::OkStatus() : std::move(this->status_); } template diff --git a/mediapipe/framework/deps/statusor_internals.h b/mediapipe/framework/deps/statusor_internals.h index b42d206a2..8a419ba3b 100644 --- a/mediapipe/framework/deps/statusor_internals.h +++ b/mediapipe/framework/deps/statusor_internals.h @@ -24,8 +24,8 @@ namespace internal_statusor { class Helper { public: // Move type-agnostic error handling to the .cc. - static void HandleInvalidStatusCtorArg(::mediapipe::Status*); - ABSL_ATTRIBUTE_NORETURN static void Crash(const ::mediapipe::Status& status); + static void HandleInvalidStatusCtorArg(mediapipe::Status*); + ABSL_ATTRIBUTE_NORETURN static void Crash(const mediapipe::Status& status); }; // Construct an instance of T in `p` through placement new, passing Args... to @@ -92,10 +92,10 @@ class StatusOrData { explicit StatusOrData(const T& value) : data_(value) { MakeStatus(); } explicit StatusOrData(T&& value) : data_(std::move(value)) { MakeStatus(); } - explicit StatusOrData(const ::mediapipe::Status& status) : status_(status) { + explicit StatusOrData(const mediapipe::Status& status) : status_(status) { EnsureNotOk(); } - explicit StatusOrData(::mediapipe::Status&& status) + explicit StatusOrData(mediapipe::Status&& status) : status_(std::move(status)) { EnsureNotOk(); } @@ -133,7 +133,7 @@ class StatusOrData { MakeValue(value); } else { MakeValue(value); - status_ = ::mediapipe::OkStatus(); + status_ = mediapipe::OkStatus(); } } @@ -143,17 +143,17 @@ class StatusOrData { MakeValue(std::move(value)); } else { MakeValue(std::move(value)); - status_ = ::mediapipe::OkStatus(); + status_ = mediapipe::OkStatus(); } } - void Assign(const ::mediapipe::Status& status) { + void Assign(const mediapipe::Status& status) { Clear(); status_ = status; EnsureNotOk(); } - void Assign(::mediapipe::Status&& status) { + void Assign(mediapipe::Status&& status) { Clear(); status_ = std::move(status); EnsureNotOk(); @@ -168,7 +168,7 @@ class StatusOrData { // Eg. in the copy constructor we use the default constructor of Status in // the ok() path to avoid an extra Ref call. union { - ::mediapipe::Status status_; + mediapipe::Status status_; }; // data_ is active iff status_.ok()==true @@ -203,7 +203,7 @@ class StatusOrData { // argument. template void MakeStatus(Args&&... args) { - internal_statusor::PlacementNew<::mediapipe::Status>( + internal_statusor::PlacementNew( &status_, std::forward(args)...); } }; diff --git a/mediapipe/framework/deps/statusor_test.cc b/mediapipe/framework/deps/statusor_test.cc index dae6390d7..bf2b436d1 100644 --- a/mediapipe/framework/deps/statusor_test.cc +++ b/mediapipe/framework/deps/statusor_test.cc @@ -74,19 +74,19 @@ TEST(StatusOr, ElementType) { TEST(StatusOr, TestNoDefaultConstructorInitialization) { // Explicitly initialize it with an error code. - ::mediapipe::StatusOr statusor( - ::mediapipe::CancelledError("")); + mediapipe::StatusOr statusor( + mediapipe::CancelledError("")); EXPECT_FALSE(statusor.ok()); - EXPECT_EQ(statusor.status().code(), ::mediapipe::StatusCode::kCancelled); + EXPECT_EQ(statusor.status().code(), mediapipe::StatusCode::kCancelled); // Default construction of StatusOr initializes it with an UNKNOWN error code. - ::mediapipe::StatusOr statusor2; + mediapipe::StatusOr statusor2; EXPECT_FALSE(statusor2.ok()); - EXPECT_EQ(statusor2.status().code(), ::mediapipe::StatusCode::kUnknown); + EXPECT_EQ(statusor2.status().code(), mediapipe::StatusCode::kUnknown); } TEST(StatusOr, TestMoveOnlyInitialization) { - ::mediapipe::StatusOr> thing(ReturnUniquePtr()); + mediapipe::StatusOr> thing(ReturnUniquePtr()); ASSERT_TRUE(thing.ok()); EXPECT_EQ(0, *thing.ValueOrDie()); int* previous = thing.ValueOrDie().get(); @@ -98,13 +98,13 @@ TEST(StatusOr, TestMoveOnlyInitialization) { } TEST(StatusOr, TestMoveOnlyStatusCtr) { - ::mediapipe::StatusOr> thing( - ::mediapipe::CancelledError("")); + mediapipe::StatusOr> thing( + mediapipe::CancelledError("")); ASSERT_FALSE(thing.ok()); } TEST(StatusOr, TestMoveOnlyValueExtraction) { - ::mediapipe::StatusOr> thing(ReturnUniquePtr()); + mediapipe::StatusOr> thing(ReturnUniquePtr()); ASSERT_TRUE(thing.ok()); std::unique_ptr ptr = thing.ConsumeValueOrDie(); EXPECT_EQ(0, *ptr); @@ -115,7 +115,7 @@ TEST(StatusOr, TestMoveOnlyValueExtraction) { } TEST(StatusOr, TestMoveOnlyConversion) { - ::mediapipe::StatusOr> const_thing( + mediapipe::StatusOr> const_thing( ReturnUniquePtr()); EXPECT_TRUE(const_thing.ok()); EXPECT_EQ(0, *const_thing.ValueOrDie()); @@ -129,23 +129,23 @@ TEST(StatusOr, TestMoveOnlyConversion) { } TEST(StatusOr, TestMoveOnlyVector) { - // Sanity check that ::mediapipe::StatusOr works in vector. - std::vector<::mediapipe::StatusOr>> vec; + // Sanity check that mediapipe::StatusOr works in vector. + std::vector>> vec; vec.push_back(ReturnUniquePtr()); vec.resize(2); auto another_vec = std::move(vec); EXPECT_EQ(0, *another_vec[0].ValueOrDie()); - EXPECT_EQ(::mediapipe::StatusCode::kUnknown, another_vec[1].status().code()); + EXPECT_EQ(mediapipe::StatusCode::kUnknown, another_vec[1].status().code()); } TEST(StatusOr, TestMoveWithValuesAndErrors) { - ::mediapipe::StatusOr status_or(std::string(1000, '0')); - ::mediapipe::StatusOr value1(std::string(1000, '1')); - ::mediapipe::StatusOr value2(std::string(1000, '2')); - ::mediapipe::StatusOr error1( - Status(::mediapipe::StatusCode::kUnknown, "error1")); - ::mediapipe::StatusOr error2( - Status(::mediapipe::StatusCode::kUnknown, "error2")); + mediapipe::StatusOr status_or(std::string(1000, '0')); + mediapipe::StatusOr value1(std::string(1000, '1')); + mediapipe::StatusOr value2(std::string(1000, '2')); + mediapipe::StatusOr error1( + Status(mediapipe::StatusCode::kUnknown, "error1")); + mediapipe::StatusOr error2( + Status(mediapipe::StatusCode::kUnknown, "error2")); ASSERT_TRUE(status_or.ok()); EXPECT_EQ(std::string(1000, '0'), status_or.ValueOrDie()); @@ -172,13 +172,13 @@ TEST(StatusOr, TestMoveWithValuesAndErrors) { } TEST(StatusOr, TestCopyWithValuesAndErrors) { - ::mediapipe::StatusOr status_or(std::string(1000, '0')); - ::mediapipe::StatusOr value1(std::string(1000, '1')); - ::mediapipe::StatusOr value2(std::string(1000, '2')); - ::mediapipe::StatusOr error1( - Status(::mediapipe::StatusCode::kUnknown, "error1")); - ::mediapipe::StatusOr error2( - Status(::mediapipe::StatusCode::kUnknown, "error2")); + mediapipe::StatusOr status_or(std::string(1000, '0')); + mediapipe::StatusOr value1(std::string(1000, '1')); + mediapipe::StatusOr value2(std::string(1000, '2')); + mediapipe::StatusOr error1( + Status(mediapipe::StatusCode::kUnknown, "error1")); + mediapipe::StatusOr error2( + Status(mediapipe::StatusCode::kUnknown, "error2")); ASSERT_TRUE(status_or.ok()); EXPECT_EQ(std::string(1000, '0'), status_or.ValueOrDie()); @@ -211,226 +211,225 @@ TEST(StatusOr, TestCopyWithValuesAndErrors) { } TEST(StatusOr, TestDefaultCtor) { - ::mediapipe::StatusOr thing; + mediapipe::StatusOr thing; EXPECT_FALSE(thing.ok()); - EXPECT_EQ(thing.status().code(), ::mediapipe::StatusCode::kUnknown); + EXPECT_EQ(thing.status().code(), mediapipe::StatusCode::kUnknown); } TEST(StatusOrDeathTest, TestDefaultCtorValue) { - ::mediapipe::StatusOr thing; + mediapipe::StatusOr thing; EXPECT_DEATH(thing.ValueOrDie(), ""); - const ::mediapipe::StatusOr thing2; + const mediapipe::StatusOr thing2; EXPECT_DEATH(thing.ValueOrDie(), ""); } TEST(StatusOr, TestStatusCtor) { - ::mediapipe::StatusOr thing( - ::mediapipe::Status(::mediapipe::StatusCode::kCancelled, "")); + mediapipe::StatusOr thing( + mediapipe::Status(mediapipe::StatusCode::kCancelled, "")); EXPECT_FALSE(thing.ok()); - EXPECT_EQ(thing.status().code(), ::mediapipe::StatusCode::kCancelled); + EXPECT_EQ(thing.status().code(), mediapipe::StatusCode::kCancelled); } TEST(StatusOr, TestValueCtor) { const int kI = 4; - const ::mediapipe::StatusOr thing(kI); + const mediapipe::StatusOr thing(kI); EXPECT_TRUE(thing.ok()); EXPECT_EQ(kI, thing.ValueOrDie()); } TEST(StatusOr, TestCopyCtorStatusOk) { const int kI = 4; - const ::mediapipe::StatusOr original(kI); - const ::mediapipe::StatusOr copy(original); + const mediapipe::StatusOr original(kI); + const mediapipe::StatusOr copy(original); EXPECT_EQ(copy.status(), original.status()); EXPECT_EQ(original.ValueOrDie(), copy.ValueOrDie()); } TEST(StatusOr, TestCopyCtorStatusNotOk) { - ::mediapipe::StatusOr original( - Status(::mediapipe::StatusCode::kCancelled, "")); - ::mediapipe::StatusOr copy(original); + mediapipe::StatusOr original( + Status(mediapipe::StatusCode::kCancelled, "")); + mediapipe::StatusOr copy(original); EXPECT_EQ(copy.status(), original.status()); } TEST(StatusOr, TestCopyCtorNonAssignable) { const int kI = 4; CopyNoAssign value(kI); - ::mediapipe::StatusOr original(value); - ::mediapipe::StatusOr copy(original); + mediapipe::StatusOr original(value); + mediapipe::StatusOr copy(original); EXPECT_EQ(copy.status(), original.status()); EXPECT_EQ(original.ValueOrDie().foo_, copy.ValueOrDie().foo_); } TEST(StatusOr, TestCopyCtorStatusOKConverting) { const int kI = 4; - ::mediapipe::StatusOr original(kI); - ::mediapipe::StatusOr copy(original); + mediapipe::StatusOr original(kI); + mediapipe::StatusOr copy(original); EXPECT_EQ(copy.status(), original.status()); EXPECT_DOUBLE_EQ(original.ValueOrDie(), copy.ValueOrDie()); } TEST(StatusOr, TestCopyCtorStatusNotOkConverting) { - ::mediapipe::StatusOr original( - Status(::mediapipe::StatusCode::kCancelled, "")); - ::mediapipe::StatusOr copy(original); + mediapipe::StatusOr original( + Status(mediapipe::StatusCode::kCancelled, "")); + mediapipe::StatusOr copy(original); EXPECT_EQ(copy.status(), original.status()); } TEST(StatusOr, TestAssignmentStatusOk) { const int kI = 4; - ::mediapipe::StatusOr source(kI); - ::mediapipe::StatusOr target; + mediapipe::StatusOr source(kI); + mediapipe::StatusOr target; target = source; EXPECT_EQ(target.status(), source.status()); EXPECT_EQ(source.ValueOrDie(), target.ValueOrDie()); } TEST(StatusOr, TestAssignmentStatusNotOk) { - ::mediapipe::StatusOr source( - Status(::mediapipe::StatusCode::kCancelled, "")); - ::mediapipe::StatusOr target; + mediapipe::StatusOr source( + Status(mediapipe::StatusCode::kCancelled, "")); + mediapipe::StatusOr target; target = source; EXPECT_EQ(target.status(), source.status()); } TEST(StatusOr, TestStatus) { - ::mediapipe::StatusOr good(4); + mediapipe::StatusOr good(4); EXPECT_TRUE(good.ok()); - ::mediapipe::StatusOr bad( - Status(::mediapipe::StatusCode::kCancelled, "")); + mediapipe::StatusOr bad(Status(mediapipe::StatusCode::kCancelled, "")); EXPECT_FALSE(bad.ok()); - EXPECT_EQ(bad.status(), Status(::mediapipe::StatusCode::kCancelled, "")); + EXPECT_EQ(bad.status(), Status(mediapipe::StatusCode::kCancelled, "")); } TEST(StatusOr, TestValue) { const int kI = 4; - ::mediapipe::StatusOr thing(kI); + mediapipe::StatusOr thing(kI); EXPECT_EQ(kI, thing.ValueOrDie()); } TEST(StatusOr, TestValueConst) { const int kI = 4; - const ::mediapipe::StatusOr thing(kI); + const mediapipe::StatusOr thing(kI); EXPECT_EQ(kI, thing.ValueOrDie()); } TEST(StatusOrDeathTest, TestValueNotOk) { - ::mediapipe::StatusOr thing( - ::mediapipe::Status(::mediapipe::StatusCode::kCancelled, "cancelled")); + mediapipe::StatusOr thing( + mediapipe::Status(mediapipe::StatusCode::kCancelled, "cancelled")); EXPECT_DEATH(thing.ValueOrDie(), "cancelled"); } TEST(StatusOrDeathTest, TestValueNotOkConst) { - const ::mediapipe::StatusOr thing( - ::mediapipe::Status(::mediapipe::StatusCode::kUnknown, "")); + const mediapipe::StatusOr thing( + mediapipe::Status(mediapipe::StatusCode::kUnknown, "")); EXPECT_DEATH(thing.ValueOrDie(), ""); } TEST(StatusOr, TestPointerDefaultCtor) { - ::mediapipe::StatusOr thing; + mediapipe::StatusOr thing; EXPECT_FALSE(thing.ok()); - EXPECT_EQ(thing.status().code(), ::mediapipe::StatusCode::kUnknown); + EXPECT_EQ(thing.status().code(), mediapipe::StatusCode::kUnknown); } TEST(StatusOrDeathTest, TestPointerDefaultCtorValue) { - ::mediapipe::StatusOr thing; + mediapipe::StatusOr thing; EXPECT_DEATH(thing.ValueOrDie(), ""); } TEST(StatusOr, TestPointerStatusCtor) { - ::mediapipe::StatusOr thing( - Status(::mediapipe::StatusCode::kCancelled, "")); + mediapipe::StatusOr thing( + Status(mediapipe::StatusCode::kCancelled, "")); EXPECT_FALSE(thing.ok()); - EXPECT_EQ(thing.status(), Status(::mediapipe::StatusCode::kCancelled, "")); + EXPECT_EQ(thing.status(), Status(mediapipe::StatusCode::kCancelled, "")); } TEST(StatusOr, TestPointerValueCtor) { const int kI = 4; - ::mediapipe::StatusOr thing(&kI); + mediapipe::StatusOr thing(&kI); EXPECT_TRUE(thing.ok()); EXPECT_EQ(&kI, thing.ValueOrDie()); } TEST(StatusOr, TestPointerCopyCtorStatusOk) { const int kI = 0; - ::mediapipe::StatusOr original(&kI); - ::mediapipe::StatusOr copy(original); + mediapipe::StatusOr original(&kI); + mediapipe::StatusOr copy(original); EXPECT_EQ(copy.status(), original.status()); EXPECT_EQ(original.ValueOrDie(), copy.ValueOrDie()); } TEST(StatusOr, TestPointerCopyCtorStatusNotOk) { - ::mediapipe::StatusOr original( - Status(::mediapipe::StatusCode::kCancelled, "")); - ::mediapipe::StatusOr copy(original); + mediapipe::StatusOr original( + Status(mediapipe::StatusCode::kCancelled, "")); + mediapipe::StatusOr copy(original); EXPECT_EQ(copy.status(), original.status()); } TEST(StatusOr, TestPointerCopyCtorStatusOKConverting) { Derived derived; - ::mediapipe::StatusOr original(&derived); - ::mediapipe::StatusOr copy(original); + mediapipe::StatusOr original(&derived); + mediapipe::StatusOr copy(original); EXPECT_EQ(copy.status(), original.status()); EXPECT_EQ(static_cast(original.ValueOrDie()), copy.ValueOrDie()); } TEST(StatusOr, TestPointerCopyCtorStatusNotOkConverting) { - ::mediapipe::StatusOr original( - ::mediapipe::Status(::mediapipe::StatusCode::kCancelled, "")); - ::mediapipe::StatusOr copy(original); + mediapipe::StatusOr original( + mediapipe::Status(mediapipe::StatusCode::kCancelled, "")); + mediapipe::StatusOr copy(original); EXPECT_EQ(copy.status(), original.status()); } TEST(StatusOr, TestPointerAssignmentStatusOk) { const int kI = 0; - ::mediapipe::StatusOr source(&kI); - ::mediapipe::StatusOr target; + mediapipe::StatusOr source(&kI); + mediapipe::StatusOr target; target = source; EXPECT_EQ(target.status(), source.status()); EXPECT_EQ(source.ValueOrDie(), target.ValueOrDie()); } TEST(StatusOr, TestPointerAssignmentStatusNotOk) { - ::mediapipe::StatusOr source( - ::mediapipe::Status(::mediapipe::StatusCode::kCancelled, "")); - ::mediapipe::StatusOr target; + mediapipe::StatusOr source( + mediapipe::Status(mediapipe::StatusCode::kCancelled, "")); + mediapipe::StatusOr target; target = source; EXPECT_EQ(target.status(), source.status()); } TEST(StatusOr, TestPointerStatus) { const int kI = 0; - ::mediapipe::StatusOr good(&kI); + mediapipe::StatusOr good(&kI); EXPECT_TRUE(good.ok()); - ::mediapipe::StatusOr bad( - ::mediapipe::Status(::mediapipe::StatusCode::kCancelled, "")); + mediapipe::StatusOr bad( + mediapipe::Status(mediapipe::StatusCode::kCancelled, "")); EXPECT_EQ(bad.status(), - ::mediapipe::Status(::mediapipe::StatusCode::kCancelled, "")); + mediapipe::Status(mediapipe::StatusCode::kCancelled, "")); } TEST(StatusOr, TestPointerValue) { const int kI = 0; - ::mediapipe::StatusOr thing(&kI); + mediapipe::StatusOr thing(&kI); EXPECT_EQ(&kI, thing.ValueOrDie()); } TEST(StatusOr, TestPointerValueConst) { const int kI = 0; - const ::mediapipe::StatusOr thing(&kI); + const mediapipe::StatusOr thing(&kI); EXPECT_EQ(&kI, thing.ValueOrDie()); } TEST(StatusOrDeathTest, TestPointerValueNotOk) { - ::mediapipe::StatusOr thing( - ::mediapipe::Status(::mediapipe::StatusCode::kCancelled, "cancelled")); + mediapipe::StatusOr thing( + mediapipe::Status(mediapipe::StatusCode::kCancelled, "cancelled")); EXPECT_DEATH(thing.ValueOrDie(), "cancelled"); } TEST(StatusOrDeathTest, TestPointerValueNotOkConst) { - const ::mediapipe::StatusOr thing( - ::mediapipe::Status(::mediapipe::StatusCode::kCancelled, "cancelled")); + const mediapipe::StatusOr thing( + mediapipe::Status(mediapipe::StatusCode::kCancelled, "cancelled")); EXPECT_DEATH(thing.ValueOrDie(), "cancelled"); } diff --git a/mediapipe/framework/deps/vector.h b/mediapipe/framework/deps/vector.h index 0ecf3f2dd..ae6fa0068 100644 --- a/mediapipe/framework/deps/vector.h +++ b/mediapipe/framework/deps/vector.h @@ -313,9 +313,9 @@ VT2 operator/(const K& k, const BasicVector& a) { // ====================================================================== template class Vector2 - : public ::mediapipe::deps::internal_vector::BasicVector { + : public mediapipe::deps::internal_vector::BasicVector { private: - using Base = ::mediapipe::deps::internal_vector::BasicVector<::Vector2, T, 2>; + using Base = mediapipe::deps::internal_vector::BasicVector<::Vector2, T, 2>; using VType = T; public: @@ -383,9 +383,9 @@ class Vector2 template class Vector3 - : public ::mediapipe::deps::internal_vector::BasicVector { + : public mediapipe::deps::internal_vector::BasicVector { private: - using Base = ::mediapipe::deps::internal_vector::BasicVector<::Vector3, T, 3>; + using Base = mediapipe::deps::internal_vector::BasicVector<::Vector3, T, 3>; using VType = T; public: @@ -479,9 +479,9 @@ class Vector3 template class Vector4 - : public ::mediapipe::deps::internal_vector::BasicVector { + : public mediapipe::deps::internal_vector::BasicVector { private: - using Base = ::mediapipe::deps::internal_vector::BasicVector<::Vector4, T, 4>; + using Base = mediapipe::deps::internal_vector::BasicVector<::Vector4, T, 4>; using VType = T; public: diff --git a/mediapipe/framework/executor.h b/mediapipe/framework/executor.h index 1ecc88640..50eb9854a 100644 --- a/mediapipe/framework/executor.h +++ b/mediapipe/framework/executor.h @@ -48,7 +48,7 @@ class Executor { // A registered Executor subclass must implement the static factory method // Create. The Executor subclass cannot be registered without it. // - // static ::mediapipe::StatusOr Create( + // static mediapipe::StatusOr Create( // const MediaPipeOptions& extendable_options); // // Create validates extendable_options, then calls the constructor, and @@ -65,14 +65,13 @@ class Executor { virtual void Schedule(std::function task) = 0; }; -using ExecutorRegistry = GlobalFactoryRegistry<::mediapipe::StatusOr, +using ExecutorRegistry = GlobalFactoryRegistry, const MediaPipeOptions&>; // Macro for registering the executor. -#define REGISTER_EXECUTOR(name) \ - REGISTER_FACTORY_FUNCTION_QUALIFIED(::mediapipe::ExecutorRegistry, \ - executor_registration, name, \ - name::Create) +#define REGISTER_EXECUTOR(name) \ + REGISTER_FACTORY_FUNCTION_QUALIFIED( \ + mediapipe::ExecutorRegistry, executor_registration, name, name::Create) } // namespace mediapipe diff --git a/mediapipe/framework/executor_external_build_test.cc b/mediapipe/framework/executor_external_build_test.cc index 12eb6d377..bf62a7b27 100644 --- a/mediapipe/framework/executor_external_build_test.cc +++ b/mediapipe/framework/executor_external_build_test.cc @@ -29,7 +29,7 @@ namespace { // NOTE: If we need to update this class, that means there is a // backward-incompatible change in the MediaPipe API and MediaPipe clients also // need to update their mediapipe::Executor subclasses. -class MyExecutor : public ::mediapipe::Executor { +class MyExecutor : public mediapipe::Executor { public: MyExecutor(); ~MyExecutor() override; @@ -37,21 +37,21 @@ class MyExecutor : public ::mediapipe::Executor { // To verify a mediapipe::Executor subclass outside the mediapipe namespace // can override any method, override every method in the mediapipe::Executor // interface. - void AddTask(::mediapipe::TaskQueue* task_queue) override; + void AddTask(mediapipe::TaskQueue* task_queue) override; void Schedule(std::function task) override; private: - std::unique_ptr<::mediapipe::ThreadPool> thread_pool_; + std::unique_ptr thread_pool_; }; MyExecutor::MyExecutor() { - thread_pool_ = absl::make_unique<::mediapipe::ThreadPool>("my_executor", 1); + thread_pool_ = absl::make_unique("my_executor", 1); thread_pool_->StartWorkers(); } MyExecutor::~MyExecutor() { thread_pool_.reset(nullptr); } -void MyExecutor::AddTask(::mediapipe::TaskQueue* task_queue) { +void MyExecutor::AddTask(mediapipe::TaskQueue* task_queue) { thread_pool_->Schedule([task_queue] { task_queue->RunNextTask(); }); } @@ -59,7 +59,7 @@ void MyExecutor::Schedule(std::function task) { thread_pool_->Schedule(std::move(task)); } -class NoOpTaskQueue : public ::mediapipe::TaskQueue { +class NoOpTaskQueue : public mediapipe::TaskQueue { public: // Returns the number of times RunNextTask() was called. int call_count() const { return call_count_; } diff --git a/mediapipe/framework/formats/classification.proto b/mediapipe/framework/formats/classification.proto index dea6c5d3c..328d9ecb5 100644 --- a/mediapipe/framework/formats/classification.proto +++ b/mediapipe/framework/formats/classification.proto @@ -20,6 +20,8 @@ syntax = "proto2"; package mediapipe; +option objc_class_prefix = "MediaPipe"; + message Classification { // The index of the class in the corresponding label map. optional int32 index = 1; diff --git a/mediapipe/framework/formats/landmark.proto b/mediapipe/framework/formats/landmark.proto index 0b4106ef8..3cb77e148 100644 --- a/mediapipe/framework/formats/landmark.proto +++ b/mediapipe/framework/formats/landmark.proto @@ -27,17 +27,18 @@ message Landmark { optional float y = 2; optional float z = 3; - // Landmark visibility. Float score of whether landmark is visible or occluded - // by other objects. Landmark considered as invisible also if it is - // not present on the screen (out of scene bounds). - // Depending on the model, visibility value is either a + // Landmark visibility. Should stay unset if not supported. + // Float score of whether landmark is visible or occluded by other objects. + // Landmark considered as invisible also if it is not present on the screen + // (out of scene bounds). Depending on the model, visibility value is either a // sigmoid or an argument of sigmoid. optional float visibility = 4; - // Landmark presence. Float score of whether landmark is present on the scene - // (located within scene bounds). - // Depending on the model, presence value is either a result of sigmoid - // or an argument of sigmoid function to get landmark presence probability. + // Landmark presence. Should stay unset if not supported. + // Float score of whether landmark is present on the scene (located within + // scene bounds). Depending on the model, presence value is either a result of + // sigmoid or an argument of sigmoid function to get landmark presence + // probability. optional float presence = 5; } diff --git a/mediapipe/framework/formats/location.cc b/mediapipe/framework/formats/location.cc index 2ce5aed00..9a9b83233 100644 --- a/mediapipe/framework/formats/location.cc +++ b/mediapipe/framework/formats/location.cc @@ -72,13 +72,13 @@ std::unique_ptr MaskToMat(const LocationData::BinaryMask& mask) { } return image; } -::mediapipe::StatusOr> RectangleToMat( +mediapipe::StatusOr> RectangleToMat( int image_width, int image_height, const Rectangle_i& rect) { // These checks prevent undefined behavior caused when setting memory for // rectangles whose edges lie outside image edges. if (rect.ymin() < 0 || rect.xmin() < 0 || rect.xmax() > image_width || rect.ymax() > image_height) { - return ::mediapipe::InvalidArgumentError(absl::Substitute( + return mediapipe::InvalidArgumentError(absl::Substitute( "Rectangle must be bounded by image boundaries.\nImage Width: " "$0\nImage Height: $1\nRectangle: [($2, $3), ($4, $5)]", image_width, image_height, rect.xmin(), rect.ymin(), rect.xmax(), @@ -564,7 +564,6 @@ Rectangle_f Location::ConvertToRelativeBBox(int image_width, switch (location_data_.format()) { case LocationData::GLOBAL: { return Rectangle_f(0.0f, 0.0f, 1.0f, 1.0f); - break; } case LocationData::BOUNDING_BOX: { const auto& box = location_data_.bounding_box(); diff --git a/mediapipe/framework/formats/tensor.cc b/mediapipe/framework/formats/tensor.cc index 985ae07b9..9c66c242a 100644 --- a/mediapipe/framework/formats/tensor.cc +++ b/mediapipe/framework/formats/tensor.cc @@ -132,9 +132,6 @@ void Tensor::AllocateMtlBuffer(id device) const { #if MEDIAPIPE_OPENGL_ES_VERSION >= MEDIAPIPE_OPENGL_ES_30 Tensor::OpenGlTexture2dView Tensor::GetOpenGlTexture2dReadView() const { - LOG_IF(FATAL, BhwcDepthFromShape(shape_) > 4) - << "OpenGlTexture2d supports depth <= 4. Current depth is " - << BhwcDepthFromShape(shape_); LOG_IF(FATAL, valid_ == kValidNone) << "Tensor must be written prior to read from."; LOG_IF(FATAL, !(valid_ & (kValidCpu | kValidOpenGlTexture2d))) @@ -145,10 +142,11 @@ Tensor::OpenGlTexture2dView Tensor::GetOpenGlTexture2dReadView() const { if (!(valid_ & kValidOpenGlTexture2d)) { uint8_t* buffer; std::unique_ptr temp_buffer; - if (BhwcDepthFromShape(shape_) == 4) { + if (BhwcDepthFromShape(shape_) % 4 == 0) { + // No padding exists because number of channels are multiple of 4. buffer = reinterpret_cast(cpu_buffer_); } else { - const int padded_depth = 4; + const int padded_depth = (BhwcDepthFromShape(shape_) + 3) / 4 * 4; const int padded_depth_size = padded_depth * element_size(); const int padded_size = BhwcBatchFromShape(shape_) * BhwcHeightFromShape(shape_) * @@ -194,9 +192,14 @@ void Tensor::AllocateOpenGlTexture2d() const { // supported from floating point textures. glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST); glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST); - glTexStorage2D(GL_TEXTURE_2D, 1, GL_RGBA32F, BhwcWidthFromShape(shape_), + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_BASE_LEVEL, 0); + glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAX_LEVEL, 0); + const int pixels_per_depth = (BhwcDepthFromShape(shape_) + 3) / 4; + const int width = BhwcWidthFromShape(shape_) * pixels_per_depth; + glTexStorage2D(GL_TEXTURE_2D, 1, GL_RGBA32F, width, BhwcHeightFromShape(shape_)); glBindTexture(GL_TEXTURE_2D, 0); + glGenFramebuffers(1, &frame_buffer_); } } #endif // MEDIAPIPE_OPENGL_ES_VERSION >= MEDIAPIPE_OPENGL_ES_30 @@ -265,6 +268,8 @@ void Tensor::Move(Tensor* src) { #if MEDIAPIPE_OPENGL_ES_VERSION >= MEDIAPIPE_OPENGL_ES_30 gl_context_ = std::move(src->gl_context_); + frame_buffer_ = src->frame_buffer_; + src->frame_buffer_ = GL_INVALID_INDEX; opengl_texture2d_ = src->opengl_texture2d_; src->opengl_texture2d_ = GL_INVALID_INDEX; #if MEDIAPIPE_OPENGL_ES_VERSION >= MEDIAPIPE_OPENGL_ES_31 @@ -298,9 +303,13 @@ void Tensor::Invalidate() { #if MEDIAPIPE_OPENGL_ES_VERSION >= MEDIAPIPE_OPENGL_ES_30 if (opengl_texture2d_ != GL_INVALID_INDEX) { GLuint opengl_texture2d = opengl_texture2d_; - gl_context_->RunWithoutWaiting( - [opengl_texture2d]() { glDeleteTextures(1, &opengl_texture2d); }); + GLuint frame_buffer = frame_buffer_; + gl_context_->RunWithoutWaiting([opengl_texture2d, frame_buffer]() { + glDeleteTextures(1, &opengl_texture2d); + glDeleteFramebuffers(1, &frame_buffer); + }); opengl_texture2d_ = GL_INVALID_INDEX; + frame_buffer_ = GL_INVALID_INDEX; } #if MEDIAPIPE_OPENGL_ES_VERSION >= MEDIAPIPE_OPENGL_ES_31 if (opengl_buffer_ != GL_INVALID_INDEX) { @@ -347,53 +356,34 @@ Tensor::CpuReadView Tensor::GetCpuReadView() const { // yet. if (valid_ & kValidOpenGlTexture2d) { gl_context_->Run([this]() { - GLint current_fbo; - glGetIntegerv(GL_FRAMEBUFFER_BINDING, ¤t_fbo); + const int pixels_per_depth = (BhwcDepthFromShape(shape_) + 3) / 4; + const int width = BhwcWidthFromShape(shape_) * pixels_per_depth; uint8_t* buffer; std::unique_ptr temp_buffer; - if (BhwcDepthFromShape(shape_) == 4) { + if (BhwcDepthFromShape(shape_) % 4 == 0) { buffer = reinterpret_cast(cpu_buffer_); } else { - const int padded_depth = (BhwcDepthFromShape(shape_) + 3) / 4 * 4; - const int padded_size = - BhwcBatchFromShape(shape_) * BhwcHeightFromShape(shape_) * - BhwcWidthFromShape(shape_) * padded_depth * element_size(); + const int padded_size = BhwcBatchFromShape(shape_) * + BhwcHeightFromShape(shape_) * width * + pixels_per_depth * 4 * element_size(); temp_buffer = absl::make_unique(padded_size); buffer = temp_buffer.get(); } - GLint color_attachment_name; - glGetFramebufferAttachmentParameteriv( - GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, - GL_FRAMEBUFFER_ATTACHMENT_OBJECT_NAME, &color_attachment_name); - if (color_attachment_name != opengl_texture2d_) { - // Save the viewport. Note that we assume that the color attachment is - // a GL_TEXTURE_2D texture. - GLint viewport[4]; - glGetIntegerv(GL_VIEWPORT, viewport); + glBindFramebuffer(GL_FRAMEBUFFER, frame_buffer_); + glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, + GL_TEXTURE_2D, opengl_texture2d_, 0); + glPixelStorei(GL_PACK_ROW_LENGTH, width); + glPixelStorei(GL_PACK_ALIGNMENT, 1); + glReadPixels(0, 0, width, BhwcHeightFromShape(shape_), GL_RGBA, + GL_FLOAT, buffer); - // Set the data from GLTexture object. - glViewport(0, 0, BhwcWidthFromShape(shape_), - BhwcHeightFromShape(shape_)); - glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, - GL_TEXTURE_2D, opengl_texture2d_, 0); - glReadPixels(0, 0, BhwcWidthFromShape(shape_), - BhwcHeightFromShape(shape_), GL_RGBA, GL_FLOAT, buffer); - - // Restore from the saved viewport and color attachment name. - glViewport(viewport[0], viewport[1], viewport[2], viewport[3]); - glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, - GL_TEXTURE_2D, color_attachment_name, 0); - } else { - glReadPixels(0, 0, BhwcWidthFromShape(shape_), - BhwcHeightFromShape(shape_), GL_RGBA, GL_FLOAT, buffer); - } - if (BhwcDepthFromShape(shape_) < 4) { + if (BhwcDepthFromShape(shape_) % 4) { uint8_t* dest_buffer = reinterpret_cast(cpu_buffer_); const int actual_depth_size = BhwcDepthFromShape(shape_) * element_size(); - const int padded_depth_size = 4 * element_size(); + const int padded_depth_size = pixels_per_depth * 4 * element_size(); for (int e = 0; e < BhwcBatchFromShape(shape_) * BhwcHeightFromShape(shape_) * BhwcWidthFromShape(shape_); diff --git a/mediapipe/framework/formats/tensor.h b/mediapipe/framework/formats/tensor.h index b6a55145d..b3cfa5de3 100644 --- a/mediapipe/framework/formats/tensor.h +++ b/mediapipe/framework/formats/tensor.h @@ -253,6 +253,7 @@ class Tensor { #if MEDIAPIPE_OPENGL_ES_VERSION >= MEDIAPIPE_OPENGL_ES_30 mutable std::shared_ptr gl_context_; mutable GLuint opengl_texture2d_ = GL_INVALID_INDEX; + mutable GLuint frame_buffer_ = GL_INVALID_INDEX; void AllocateOpenGlTexture2d() const; #if MEDIAPIPE_OPENGL_ES_VERSION >= MEDIAPIPE_OPENGL_ES_31 mutable GLuint opengl_buffer_ = GL_INVALID_INDEX; diff --git a/mediapipe/framework/graph_output_stream.cc b/mediapipe/framework/graph_output_stream.cc index c50ed2ff6..8e99a050f 100644 --- a/mediapipe/framework/graph_output_stream.cc +++ b/mediapipe/framework/graph_output_stream.cc @@ -18,7 +18,7 @@ namespace mediapipe { namespace internal { -::mediapipe::Status GraphOutputStream::Initialize( +mediapipe::Status GraphOutputStream::Initialize( const std::string& stream_name, const PacketType* packet_type, OutputStreamManager* output_stream_manager) { RET_CHECK(output_stream_manager); @@ -38,20 +38,20 @@ namespace internal { MP_RETURN_IF_ERROR(input_stream_handler_->InitializeInputStreamManagers( input_stream_.get())); output_stream_manager->AddMirror(input_stream_handler_.get(), id); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } void GraphOutputStream::PrepareForRun( std::function notification_callback, - std::function error_callback) { + std::function error_callback) { input_stream_handler_->PrepareForRun( /*headers_ready_callback=*/[] {}, std::move(notification_callback), /*schedule_callback=*/nullptr, std::move(error_callback)); } -::mediapipe::Status OutputStreamObserver::Initialize( +mediapipe::Status OutputStreamObserver::Initialize( const std::string& stream_name, const PacketType* packet_type, - std::function<::mediapipe::Status(const Packet&)> packet_callback, + std::function packet_callback, OutputStreamManager* output_stream_manager) { RET_CHECK(output_stream_manager); @@ -60,7 +60,7 @@ void GraphOutputStream::PrepareForRun( output_stream_manager); } -::mediapipe::Status OutputStreamObserver::Notify() { +mediapipe::Status OutputStreamObserver::Notify() { while (true) { bool empty; Timestamp min_timestamp = input_stream_->MinTimestampOrBound(&empty); @@ -76,10 +76,10 @@ void GraphOutputStream::PrepareForRun( num_packets_dropped, input_stream_->Name()); MP_RETURN_IF_ERROR(packet_callback_(packet)); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status OutputStreamPollerImpl::Initialize( +mediapipe::Status OutputStreamPollerImpl::Initialize( const std::string& stream_name, const PacketType* packet_type, std::function queue_size_callback, OutputStreamManager* output_stream_manager) { @@ -87,12 +87,12 @@ void GraphOutputStream::PrepareForRun( output_stream_manager)); input_stream_handler_->SetQueueSizeCallbacks(queue_size_callback, queue_size_callback); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } void OutputStreamPollerImpl::PrepareForRun( std::function notification_callback, - std::function error_callback) { + std::function error_callback) { input_stream_handler_->PrepareForRun( /*headers_ready_callback=*/[] {}, std::move(notification_callback), /*schedule_callback=*/nullptr, std::move(error_callback)); @@ -116,11 +116,11 @@ void OutputStreamPollerImpl::SetMaxQueueSize(int queue_size) { int OutputStreamPollerImpl::QueueSize() { return input_stream_->QueueSize(); } -::mediapipe::Status OutputStreamPollerImpl::Notify() { +mediapipe::Status OutputStreamPollerImpl::Notify() { mutex_.Lock(); handler_condvar_.Signal(); mutex_.Unlock(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } void OutputStreamPollerImpl::NotifyError() { diff --git a/mediapipe/framework/graph_output_stream.h b/mediapipe/framework/graph_output_stream.h index 15f38c0ec..06f26fe7b 100644 --- a/mediapipe/framework/graph_output_stream.h +++ b/mediapipe/framework/graph_output_stream.h @@ -50,18 +50,18 @@ class GraphOutputStream { // input stream and attaches the input stream to an output stream as // the mirror for observation/polling. Ownership of output_stream_manager // is not transferred to the graph output stream object. - ::mediapipe::Status Initialize(const std::string& stream_name, - const PacketType* packet_type, - OutputStreamManager* output_stream_manager); + mediapipe::Status Initialize(const std::string& stream_name, + const PacketType* packet_type, + OutputStreamManager* output_stream_manager); // Installs callbacks into its GraphOutputStreamHandler. virtual void PrepareForRun( std::function notification_callback, - std::function error_callback); + std::function error_callback); // Notifies the graph output stream of new packets emitted by the output // stream. - virtual ::mediapipe::Status Notify() = 0; + virtual mediapipe::Status Notify() = 0; // Notifies the graph output stream of the errors in the calculator graph. virtual void NotifyError() = 0; @@ -110,21 +110,21 @@ class OutputStreamObserver : public GraphOutputStream { public: virtual ~OutputStreamObserver() {} - ::mediapipe::Status Initialize( + mediapipe::Status Initialize( const std::string& stream_name, const PacketType* packet_type, - std::function<::mediapipe::Status(const Packet&)> packet_callback, + std::function packet_callback, OutputStreamManager* output_stream_manager); // Notifies the observer of new packets emitted by the observed // output stream. - ::mediapipe::Status Notify() override; + mediapipe::Status Notify() override; // Notifies the observer of the errors in the calculator graph. void NotifyError() override {} private: // Invoked on every packet emitted by the observed output stream. - std::function<::mediapipe::Status(const Packet&)> packet_callback_; + std::function packet_callback_; }; // OutputStreamPollerImpl that returns packets to the caller via @@ -134,14 +134,14 @@ class OutputStreamPollerImpl : public GraphOutputStream { virtual ~OutputStreamPollerImpl() {} // Initializes an OutputStreamPollerImpl. - ::mediapipe::Status Initialize( + mediapipe::Status Initialize( const std::string& stream_name, const PacketType* packet_type, std::function queue_size_callback, OutputStreamManager* output_stream_manager); void PrepareForRun( std::function notification_callback, - std::function error_callback) override; + std::function error_callback) override; // Resets graph_has_error_ and cleans the internal packet queue. void Reset(); @@ -152,7 +152,7 @@ class OutputStreamPollerImpl : public GraphOutputStream { int QueueSize(); // Notifies the poller of new packets emitted by the output stream. - ::mediapipe::Status Notify() override; + mediapipe::Status Notify() override; // Notifies the poller of the errors in the calculator graph. void NotifyError() override; diff --git a/mediapipe/framework/graph_service_test.cc b/mediapipe/framework/graph_service_test.cc index 0cb79e933..31cd2aa77 100644 --- a/mediapipe/framework/graph_service_test.cc +++ b/mediapipe/framework/graph_service_test.cc @@ -48,7 +48,7 @@ class GraphServiceTest : public ::testing::Test { protected: void SetUp() override { CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: "in" node { calculator: "TestServiceCalculator" @@ -60,7 +60,7 @@ class GraphServiceTest : public ::testing::Test { MP_ASSERT_OK( graph_.ObserveOutputStream("out", [this](const Packet& packet) { output_packets_.push_back(packet); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); } @@ -98,7 +98,7 @@ TEST_F(GraphServiceTest, UseInCalculator) { TEST_F(GraphServiceTest, Contract) { const CalculatorGraphConfig::Node node = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( calculator: "TestServiceCalculator" input_stream: "in" output_stream: "out" diff --git a/mediapipe/framework/graph_validation.h b/mediapipe/framework/graph_validation.h index a3332a16e..63ab02bd0 100644 --- a/mediapipe/framework/graph_validation.h +++ b/mediapipe/framework/graph_validation.h @@ -28,7 +28,7 @@ namespace mediapipe { class GraphValidation { public: // Validates the specified CalculatorGraphConfig. - ::mediapipe::Status Validate( + mediapipe::Status Validate( const CalculatorGraphConfig& config, const std::map& side_packets = {}) { return graph_.Initialize(config, side_packets); @@ -40,7 +40,7 @@ class GraphValidation { // CalclatorGraphConfig.type. A subgraph can be validated directly by // specifying its type in |graph_type|. A template graph can be validated // directly by specifying its template arguments in |arguments|. - ::mediapipe::Status Validate( + mediapipe::Status Validate( const std::vector& configs, const std::vector& templates, const std::map& side_packets = {}, diff --git a/mediapipe/framework/graph_validation_test.cc b/mediapipe/framework/graph_validation_test.cc index 632ecac4c..57af7cdf3 100644 --- a/mediapipe/framework/graph_validation_test.cc +++ b/mediapipe/framework/graph_validation_test.cc @@ -66,7 +66,7 @@ TEST(GraphValidationTest, InitializeGraphFromProtos) { graph_1.Initialize({config_1, config_2}, {}, {}, "PassThroughGraph")); EXPECT_THAT( graph_1.Config(), - EqualsProto(::mediapipe::ParseTextProtoOrDie(R"( + EqualsProto(mediapipe::ParseTextProtoOrDie(R"( type: "PassThroughGraph" input_stream: "INPUT:stream_1" output_stream: "OUTPUT:stream_2" @@ -84,7 +84,7 @@ TEST(GraphValidationTest, InitializeGraphFromProtos) { MP_EXPECT_OK(graph_2.Initialize({config_1, config_2}, {})); EXPECT_THAT( graph_2.Config(), - EqualsProto(::mediapipe::ParseTextProtoOrDie(R"( + EqualsProto(mediapipe::ParseTextProtoOrDie(R"( input_stream: "INPUT:stream_1" output_stream: "OUTPUT:stream_2" node { @@ -106,9 +106,9 @@ TEST(GraphValidationTest, InitializeGraphFromProtos) { TEST(GraphValidationTest, InitializeGraphFromLinker) { EXPECT_FALSE(SubgraphRegistry::IsRegistered("DubQuadTestSubgraph")); ValidatedGraphConfig builder_1; - ::mediapipe::Status status_1 = + mediapipe::Status status_1 = builder_1.Initialize({}, {}, "DubQuadTestSubgraph"); - EXPECT_EQ(status_1.code(), ::mediapipe::StatusCode::kNotFound); + EXPECT_EQ(status_1.code(), mediapipe::StatusCode::kNotFound); EXPECT_THAT(status_1.message(), testing::HasSubstr( R"(No registered object with name: DubQuadTestSubgraph)")); @@ -174,7 +174,7 @@ TEST(GraphValidationTest, InitializeTemplateFromProtos) { "PassThroughGraph", &options)); EXPECT_THAT( graph_1.Config(), - EqualsProto(::mediapipe::ParseTextProtoOrDie(R"( + EqualsProto(mediapipe::ParseTextProtoOrDie(R"( type: "PassThroughGraph" input_stream: "INPUT:stream_9" output_stream: "OUTPUT:stream_2" @@ -193,7 +193,7 @@ TEST(GraphValidationTest, InitializeTemplateFromProtos) { MP_EXPECT_OK(graph_2.Initialize({config_2}, {config_1})); EXPECT_THAT( graph_2.Config(), - EqualsProto(::mediapipe::ParseTextProtoOrDie(R"( + EqualsProto(mediapipe::ParseTextProtoOrDie(R"( input_stream: "INPUT:stream_1" output_stream: "OUTPUT:stream_2" node { @@ -254,7 +254,7 @@ TEST(GraphValidationTest, OptionalSubgraphStreams) { graph_1.Config(), // The result includes only the requested input and output streams. - EqualsProto(::mediapipe::ParseTextProtoOrDie(R"( + EqualsProto(mediapipe::ParseTextProtoOrDie(R"( input_stream: "INPUT:foo_in" output_stream: "OUTPUT:foo_out" node { @@ -314,7 +314,7 @@ TEST(GraphValidationTest, OptionalSubgraphStreamsMismatched) { GraphValidation validation_1; mediapipe::Status status = validation_1.Validate({config_1, config_2}, {}); - ASSERT_EQ(status.code(), ::mediapipe::StatusCode::kInvalidArgument); + ASSERT_EQ(status.code(), mediapipe::StatusCode::kInvalidArgument); ASSERT_THAT(status.ToString(), testing::HasSubstr( "PassThroughCalculator must use matching tags and indexes")); @@ -323,22 +323,22 @@ TEST(GraphValidationTest, OptionalSubgraphStreamsMismatched) { // A calculator that optionally accepts an input-side-packet. class OptionalSideInputTestCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->InputSidePackets().Tag("SIDEINPUT").Set().Optional(); cc->Inputs().Tag("SELECT").Set().Optional(); cc->Inputs().Tag("ENABLE").Set().Optional(); cc->Outputs().Tag("OUTPUT").Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { std::string value("default"); if (cc->InputSidePackets().HasTag("SIDEINPUT")) { value = cc->InputSidePackets().Tag("SIDEINPUT").Get(); } cc->Outputs().Tag("OUTPUT").Add(new std::string(value), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(OptionalSideInputTestCalculator); @@ -374,7 +374,7 @@ TEST(GraphValidationTest, OptionalInputNotProvidedForSubgraphCalculator) { graph_1.Config(), // The expanded graph omits the optional input-side-packet. - EqualsProto(::mediapipe::ParseTextProtoOrDie(R"( + EqualsProto(mediapipe::ParseTextProtoOrDie(R"( input_side_packet: "INPUT:foo_in" output_stream: "OUTPUT:foo_out" node { @@ -431,7 +431,7 @@ TEST(GraphValidationTest, MultipleOptionalInputsForSubgraph) { // The expanded graph includes only the specified input, "SELECT". // Without the fix to RemoveIgnoredStreams(), the expanded graph // includes the wrong input. - EqualsProto(::mediapipe::ParseTextProtoOrDie(R"( + EqualsProto(mediapipe::ParseTextProtoOrDie(R"( input_side_packet: "INPUT:foo_in" input_stream: "SELECT:foo_select" output_stream: "OUTPUT:foo_out" diff --git a/mediapipe/framework/input_side_packet_handler.cc b/mediapipe/framework/input_side_packet_handler.cc index ce43508d2..87517ee5d 100644 --- a/mediapipe/framework/input_side_packet_handler.cc +++ b/mediapipe/framework/input_side_packet_handler.cc @@ -21,11 +21,11 @@ namespace mediapipe { -::mediapipe::Status InputSidePacketHandler::PrepareForRun( +mediapipe::Status InputSidePacketHandler::PrepareForRun( const PacketTypeSet* input_side_packet_types, const std::map& all_side_packets, std::function input_side_packets_ready_callback, - std::function error_callback) { + std::function error_callback) { int missing_input_side_packet_count; prev_input_side_packets_ = std::move(input_side_packets_); ASSIGN_OR_RETURN( @@ -39,7 +39,7 @@ namespace mediapipe { input_side_packets_ready_callback_ = std::move(input_side_packets_ready_callback); error_callback_ = std::move(error_callback); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } bool InputSidePacketHandler::InputSidePacketsChanged() { @@ -49,25 +49,24 @@ bool InputSidePacketHandler::InputSidePacketsChanged() { } void InputSidePacketHandler::Set(CollectionItemId id, const Packet& packet) { - ::mediapipe::Status status = SetInternal(id, packet); + mediapipe::Status status = SetInternal(id, packet); if (!status.ok()) { TriggerErrorCallback(status); } } -::mediapipe::Status InputSidePacketHandler::SetInternal(CollectionItemId id, - const Packet& packet) { +mediapipe::Status InputSidePacketHandler::SetInternal(CollectionItemId id, + const Packet& packet) { RET_CHECK_GT(missing_input_side_packet_count_, 0); Packet& side_packet = input_side_packets_->Get(id); if (!side_packet.IsEmpty()) { - return ::mediapipe::AlreadyExistsErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::AlreadyExistsErrorBuilder(MEDIAPIPE_LOC) << "Input side packet with id " << id << " was already set."; } - ::mediapipe::Status result = - input_side_packet_types_->Get(id).Validate(packet); + mediapipe::Status result = input_side_packet_types_->Get(id).Validate(packet); if (!result.ok()) { - return ::mediapipe::StatusBuilder(result, MEDIAPIPE_LOC).SetPrepend() + return mediapipe::StatusBuilder(result, MEDIAPIPE_LOC).SetPrepend() << absl::StrCat( "Packet type mismatch on calculator input side packet with " "id ", @@ -78,11 +77,11 @@ void InputSidePacketHandler::Set(CollectionItemId id, const Packet& packet) { 1, std::memory_order_acq_rel) == 1) { input_side_packets_ready_callback_(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } void InputSidePacketHandler::TriggerErrorCallback( - const ::mediapipe::Status& status) const { + const mediapipe::Status& status) const { CHECK(error_callback_); error_callback_(status); } diff --git a/mediapipe/framework/input_side_packet_handler.h b/mediapipe/framework/input_side_packet_handler.h index ecfa2239e..022432414 100644 --- a/mediapipe/framework/input_side_packet_handler.h +++ b/mediapipe/framework/input_side_packet_handler.h @@ -41,11 +41,11 @@ class InputSidePacketHandler { // Resets the input side packet handler and its underlying input side packets // for another run of the graph. - ::mediapipe::Status PrepareForRun( + mediapipe::Status PrepareForRun( const PacketTypeSet* input_side_packet_types, const std::map& all_side_packets, std::function input_side_packets_ready_callback, - std::function error_callback); + std::function error_callback); // Sets a particular input side packet. void Set(CollectionItemId id, const Packet& packet); @@ -63,11 +63,11 @@ class InputSidePacketHandler { private: // Called by Set(). - ::mediapipe::Status SetInternal(CollectionItemId id, const Packet& packet); + mediapipe::Status SetInternal(CollectionItemId id, const Packet& packet); - // Triggers the error callback with ::mediapipe::Status info when an error + // Triggers the error callback with mediapipe::Status info when an error // occurs. - void TriggerErrorCallback(const ::mediapipe::Status& status) const; + void TriggerErrorCallback(const mediapipe::Status& status) const; const PacketTypeSet* input_side_packet_types_; @@ -77,7 +77,7 @@ class InputSidePacketHandler { std::atomic missing_input_side_packet_count_{0}; std::function input_side_packets_ready_callback_; - std::function error_callback_; + std::function error_callback_; }; } // namespace mediapipe diff --git a/mediapipe/framework/input_stream_handler.cc b/mediapipe/framework/input_stream_handler.cc index 13a2adc8a..541001d52 100644 --- a/mediapipe/framework/input_stream_handler.cc +++ b/mediapipe/framework/input_stream_handler.cc @@ -24,13 +24,13 @@ namespace mediapipe { using SyncSet = InputStreamHandler::SyncSet; -::mediapipe::Status InputStreamHandler::InitializeInputStreamManagers( +mediapipe::Status InputStreamHandler::InitializeInputStreamManagers( InputStreamManager* flat_input_stream_managers) { for (CollectionItemId id = input_stream_managers_.BeginId(); id < input_stream_managers_.EndId(); ++id) { input_stream_managers_.Get(id) = &flat_input_stream_managers[id.value()]; } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } InputStreamManager* InputStreamHandler::GetInputStreamManager( @@ -38,7 +38,7 @@ InputStreamManager* InputStreamHandler::GetInputStreamManager( return input_stream_managers_.Get(id); } -::mediapipe::Status InputStreamHandler::SetupInputShards( +mediapipe::Status InputStreamHandler::SetupInputShards( InputStreamShardSet* input_shards) { RET_CHECK(input_shards); for (CollectionItemId id = input_stream_managers_.BeginId(); @@ -48,7 +48,7 @@ InputStreamManager* InputStreamHandler::GetInputStreamManager( input_shards->Get(id).SetName(&manager->Name()); input_shards->Get(id).SetHeader(manager->Header()); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } std::vector> @@ -68,7 +68,7 @@ void InputStreamHandler::PrepareForRun( std::function headers_ready_callback, std::function notification_callback, std::function schedule_callback, - std::function error_callback) { + std::function error_callback) { headers_ready_callback_ = std::move(headers_ready_callback); notification_ = std::move(notification_callback); schedule_callback_ = std::move(schedule_callback); @@ -94,8 +94,7 @@ void InputStreamHandler::SetQueueSizeCallbacks( } void InputStreamHandler::SetHeader(CollectionItemId id, const Packet& header) { - ::mediapipe::Status result = - input_stream_managers_.Get(id)->SetHeader(header); + mediapipe::Status result = input_stream_managers_.Get(id)->SetHeader(header); if (!result.ok()) { error_callback_(result); return; @@ -175,9 +174,9 @@ bool InputStreamHandler::ScheduleInvocations(int max_allowance, } CalculatorContext* default_context = calculator_context_manager_->GetDefaultCalculatorContext(); - ::mediapipe::LogEvent(default_context->GetProfilingContext(), - TraceEvent(TraceEvent::NOT_READY) - .set_node_id(default_context->NodeId())); + mediapipe::LogEvent(default_context->GetProfilingContext(), + TraceEvent(TraceEvent::NOT_READY) + .set_node_id(default_context->NodeId())); break; } else if (node_readiness == NodeReadiness::kReadyForProcess) { CalculatorContext* calculator_context = @@ -193,9 +192,9 @@ bool InputStreamHandler::ScheduleInvocations(int max_allowance, schedule_callback_(calculator_context); ++invocations_scheduled; } - ::mediapipe::LogEvent(calculator_context->GetProfilingContext(), - TraceEvent(TraceEvent::READY_FOR_PROCESS) - .set_node_id(calculator_context->NodeId())); + mediapipe::LogEvent(calculator_context->GetProfilingContext(), + TraceEvent(TraceEvent::READY_FOR_PROCESS) + .set_node_id(calculator_context->NodeId())); } else { CHECK(node_readiness == NodeReadiness::kReadyForClose); // If any parallel invocations are in progress or a calculator context has @@ -214,9 +213,9 @@ bool InputStreamHandler::ScheduleInvocations(int max_allowance, schedule_callback_(default_context); ++invocations_scheduled; prepared_context_for_close_ = true; - ::mediapipe::LogEvent(default_context->GetProfilingContext(), - TraceEvent(TraceEvent::READY_FOR_CLOSE) - .set_node_id(default_context->NodeId())); + mediapipe::LogEvent(default_context->GetProfilingContext(), + TraceEvent(TraceEvent::READY_FOR_CLOSE) + .set_node_id(default_context->NodeId())); break; } } @@ -246,12 +245,12 @@ void LogQueuedPackets(CalculatorContext* context, InputStreamManager* stream, .set_input_ts(queue_tail.Timestamp()) .set_stream_id(&stream->Name()) .set_event_data(stream->QueueSize() + 1); - ::mediapipe::LogEvent(context->GetProfilingContext(), - event.set_packet_ts(queue_tail.Timestamp())); + mediapipe::LogEvent(context->GetProfilingContext(), + event.set_packet_ts(queue_tail.Timestamp())); Packet queue_head = stream->QueueHead(); if (!queue_head.IsEmpty()) { - ::mediapipe::LogEvent(context->GetProfilingContext(), - event.set_packet_ts(queue_head.Timestamp())); + mediapipe::LogEvent(context->GetProfilingContext(), + event.set_packet_ts(queue_head.Timestamp())); } } } @@ -261,7 +260,7 @@ void InputStreamHandler::AddPackets(CollectionItemId id, LogQueuedPackets(GetCalculatorContext(calculator_context_manager_), input_stream_managers_.Get(id), packets.back()); bool notify = false; - ::mediapipe::Status result = + mediapipe::Status result = input_stream_managers_.Get(id)->AddPackets(packets, ¬ify); if (!result.ok()) { error_callback_(result); @@ -276,7 +275,7 @@ void InputStreamHandler::MovePackets(CollectionItemId id, LogQueuedPackets(GetCalculatorContext(calculator_context_manager_), input_stream_managers_.Get(id), packets->back()); bool notify = false; - ::mediapipe::Status result = + mediapipe::Status result = input_stream_managers_.Get(id)->MovePackets(packets, ¬ify); if (!result.ok()) { error_callback_(result); @@ -289,7 +288,7 @@ void InputStreamHandler::MovePackets(CollectionItemId id, void InputStreamHandler::SetNextTimestampBound(CollectionItemId id, Timestamp bound) { bool notify = false; - ::mediapipe::Status result = + mediapipe::Status result = input_stream_managers_.Get(id)->SetNextTimestampBound(bound, ¬ify); if (!result.ok()) { error_callback_(result); diff --git a/mediapipe/framework/input_stream_handler.h b/mediapipe/framework/input_stream_handler.h index 115b6d626..8e7f44b5f 100644 --- a/mediapipe/framework/input_stream_handler.h +++ b/mediapipe/framework/input_stream_handler.h @@ -84,13 +84,13 @@ class InputStreamHandler { // InputStreamHandler::input_stream_managers_ (meaning it should point // to somewhere in the middle of the master flat array of all input // stream managers). - ::mediapipe::Status InitializeInputStreamManagers( + mediapipe::Status InitializeInputStreamManagers( InputStreamManager* flat_input_stream_managers); InputStreamManager* GetInputStreamManager(CollectionItemId id); // Sets up the InputStreamShardSet by propagating data from the managers. - ::mediapipe::Status SetupInputShards(InputStreamShardSet* input_shards); + mediapipe::Status SetupInputShards(InputStreamShardSet* input_shards); // Returns a vector of pairs of stream name and queue size for monitoring // purpose. @@ -106,7 +106,7 @@ class InputStreamHandler { std::function headers_ready_callback, std::function notification_callback, std::function schedule_callback, - std::function error_callback); + std::function error_callback); int NumInputStreams() const { return input_stream_managers_.NumEntries(); } @@ -286,7 +286,7 @@ class InputStreamHandler { std::function notification_; // A callback to schedule the node with the prepared calculator context. std::function schedule_callback_; - std::function error_callback_; + std::function error_callback_; private: // Indicates when to fill the input set. If true, every input set will be @@ -318,12 +318,11 @@ using InputStreamHandlerRegistry = GlobalFactoryRegistry< } // namespace mediapipe // Macro for registering the input stream handler. -#define REGISTER_INPUT_STREAM_HANDLER(name) \ - REGISTER_FACTORY_FUNCTION_QUALIFIED( \ - ::mediapipe::InputStreamHandlerRegistry, input_handler_registration, \ - name, \ - absl::make_unique, \ - CalculatorContextManager*, const MediaPipeOptions&, \ +#define REGISTER_INPUT_STREAM_HANDLER(name) \ + REGISTER_FACTORY_FUNCTION_QUALIFIED( \ + mediapipe::InputStreamHandlerRegistry, input_handler_registration, name, \ + absl::make_unique, \ + CalculatorContextManager*, const MediaPipeOptions&, \ bool>) #endif // MEDIAPIPE_FRAMEWORK_INPUT_STREAM_HANDLER_H_ diff --git a/mediapipe/framework/input_stream_manager.cc b/mediapipe/framework/input_stream_manager.cc index 2f2f124d7..f5ce10d70 100644 --- a/mediapipe/framework/input_stream_manager.cc +++ b/mediapipe/framework/input_stream_manager.cc @@ -27,13 +27,14 @@ namespace mediapipe { -::mediapipe::Status InputStreamManager::Initialize( - const std::string& name, const PacketType* packet_type, bool back_edge) { +mediapipe::Status InputStreamManager::Initialize(const std::string& name, + const PacketType* packet_type, + bool back_edge) { name_ = name; packet_type_ = packet_type; back_edge_ = back_edge; PrepareForRun(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } const std::string& InputStreamManager::Name() const { return name_; } @@ -69,28 +70,28 @@ Packet InputStreamManager::QueueHead() const { return queue_.front(); } -::mediapipe::Status InputStreamManager::SetHeader(const Packet& header) { +mediapipe::Status InputStreamManager::SetHeader(const Packet& header) { if (header.Timestamp() != Timestamp::Unset()) { - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "Headers must not have a timestamp. Stream: \"" << name_ << "\"."; } header_ = header; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status InputStreamManager::AddPackets( +mediapipe::Status InputStreamManager::AddPackets( const std::list& container, bool* notify) { return AddOrMovePacketsInternal&>(container, notify); } -::mediapipe::Status InputStreamManager::MovePackets( - std::list* container, bool* notify) { +mediapipe::Status InputStreamManager::MovePackets(std::list* container, + bool* notify) { return AddOrMovePacketsInternal&>(*container, notify); } template -::mediapipe::Status InputStreamManager::AddOrMovePacketsInternal( +mediapipe::Status InputStreamManager::AddOrMovePacketsInternal( Container container, bool* notify) { *notify = false; bool queue_became_non_empty = false; @@ -99,7 +100,7 @@ template // Scope to prevent locking the stream when notification is called. absl::MutexLock stream_lock(&stream_mutex_); if (closed_) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Check if the queue was full before packets came in. bool was_queue_full = @@ -107,7 +108,7 @@ template // Check if the queue becomes non-empty. queue_became_non_empty = queue_.empty() && !container.empty(); for (auto& packet : container) { - ::mediapipe::Status result = packet_type_->Validate(packet); + mediapipe::Status result = packet_type_->Validate(packet); if (!result.ok()) { return tool::AddStatusPrefix( absl::StrCat( @@ -118,7 +119,7 @@ template const Timestamp timestamp = packet.Timestamp(); if (!timestamp.IsAllowedInStream()) { - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "In stream \"" << name_ << "\", timestamp not specified or set to illegal value: " << timestamp.DebugString(); @@ -129,13 +130,13 @@ template // Timestamp::PreStream().NextAllowedInStream() is // Timestamp::OneOverPostStream(). if (timestamp == Timestamp::PostStream() && num_packets_added_ > 0) { - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "In stream \"" << name_ << "\", a packet at Timestamp::PostStream() must be the only " "Packet in an InputStream."; } if (timestamp < next_timestamp_bound_) { - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "Packet timestamp mismatch on a calculator receiving from " "stream \"" << name_ << "\". Current minimum expected timestamp is " @@ -176,21 +177,21 @@ template becomes_full_callback_(this, &last_reported_stream_full_); } *notify = queue_became_non_empty; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status InputStreamManager::SetNextTimestampBound( +mediapipe::Status InputStreamManager::SetNextTimestampBound( const Timestamp bound, bool* notify) { *notify = false; { // Scope to prevent locking the stream when notification is called. absl::MutexLock stream_lock(&stream_mutex_); if (closed_) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } if (enable_timestamps_ && bound < next_timestamp_bound_) { - return ::mediapipe::UnknownErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::UnknownErrorBuilder(MEDIAPIPE_LOC) << "SetNextTimestampBound must be called with a timestamp greater " "than or equal to the current bound. In stream \"" << name_ << "\". Current minimum expected timestamp is " @@ -210,7 +211,7 @@ template } } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } void InputStreamManager::DisableTimestamps() { enable_timestamps_ = false; } diff --git a/mediapipe/framework/input_stream_manager.h b/mediapipe/framework/input_stream_manager.h index a110ec383..0541247ff 100644 --- a/mediapipe/framework/input_stream_manager.h +++ b/mediapipe/framework/input_stream_manager.h @@ -57,8 +57,8 @@ class InputStreamManager { InputStreamManager() = default; // Initializes the InputStreamManager. - ::mediapipe::Status Initialize(const std::string& name, - const PacketType* packet_type, bool back_edge); + mediapipe::Status Initialize(const std::string& name, + const PacketType* packet_type, bool back_edge); // Returns the stream name. const std::string& Name() const; @@ -67,7 +67,7 @@ class InputStreamManager { bool BackEdge() const { return back_edge_; } // Sets the header Packet. - ::mediapipe::Status SetHeader(const Packet& header); + mediapipe::Status SetHeader(const Packet& header); const Packet& Header() const { return header_; } @@ -87,13 +87,13 @@ class InputStreamManager { // Timestamp::PostStream(), the packet must be the only packet in the // stream. // Violation of any of these conditions causes an error status. - ::mediapipe::Status AddPackets(const std::list& container, - bool* notify); + mediapipe::Status AddPackets(const std::list& container, + bool* notify); // Move a list of timestamped packets. Sets "notify" to true if the queue // becomes non-empty. Does nothing if the input stream is closed. After the // move, all packets in the container must be empty. - ::mediapipe::Status MovePackets(std::list* container, bool* notify); + mediapipe::Status MovePackets(std::list* container, bool* notify); // Closes the input stream. This function can be called multiple times. void Close() ABSL_LOCKS_EXCLUDED(stream_mutex_); @@ -103,7 +103,7 @@ class InputStreamManager { // empty. Returns an error status if this decreases the bound, unless // DisableTimestamps() is called. Does nothing if the input stream is // closed. - ::mediapipe::Status SetNextTimestampBound(Timestamp bound, bool* notify) + mediapipe::Status SetNextTimestampBound(Timestamp bound, bool* notify) ABSL_LOCKS_EXCLUDED(stream_mutex_); // Returns the smallest timestamp at which we might see an input in @@ -182,8 +182,7 @@ class InputStreamManager { // Otherwise, the caller must be MovePackets() and Container should be // non-const reference. template - ::mediapipe::Status AddOrMovePacketsInternal(Container container, - bool* notify) + mediapipe::Status AddOrMovePacketsInternal(Container container, bool* notify) ABSL_LOCKS_EXCLUDED(stream_mutex_); // Returns true if the next timestamp bound reaches Timestamp::Done(). diff --git a/mediapipe/framework/input_stream_manager_test.cc b/mediapipe/framework/input_stream_manager_test.cc index 141619a44..6b158928b 100644 --- a/mediapipe/framework/input_stream_manager_test.cc +++ b/mediapipe/framework/input_stream_manager_test.cc @@ -133,7 +133,7 @@ TEST_F(InputStreamManagerTest, AddPacketUnset) { packets.push_back(MakePacket("packet 1").At(Timestamp::Unset())); EXPECT_TRUE(input_stream_manager_->IsEmpty()); - ::mediapipe::Status result = + mediapipe::Status result = input_stream_manager_->AddPackets(packets, ¬ify_); // No notification ASSERT_THAT(result.message(), testing::HasSubstr("Timestamp::Unset()")); EXPECT_FALSE(notify_); @@ -145,7 +145,7 @@ TEST_F(InputStreamManagerTest, AddPacketUnstarted) { MakePacket("packet 1").At(Timestamp::Unstarted())); EXPECT_TRUE(input_stream_manager_->IsEmpty()); - ::mediapipe::Status result = + mediapipe::Status result = input_stream_manager_->AddPackets(packets, ¬ify_); // No notification ASSERT_THAT(result.message(), testing::HasSubstr("Timestamp::Unstarted()")); EXPECT_FALSE(notify_); @@ -157,7 +157,7 @@ TEST_F(InputStreamManagerTest, AddPacketOneOverPostStream) { MakePacket("packet 1").At(Timestamp::OneOverPostStream())); EXPECT_TRUE(input_stream_manager_->IsEmpty()); - ::mediapipe::Status result = + mediapipe::Status result = input_stream_manager_->AddPackets(packets, ¬ify_); // No notification ASSERT_THAT(result.message(), testing::HasSubstr("Timestamp::OneOverPostStream()")); @@ -169,7 +169,7 @@ TEST_F(InputStreamManagerTest, AddPacketDone) { packets.push_back(MakePacket("packet 1").At(Timestamp::Done())); EXPECT_TRUE(input_stream_manager_->IsEmpty()); - ::mediapipe::Status result = + mediapipe::Status result = input_stream_manager_->AddPackets(packets, ¬ify_); // No notification ASSERT_THAT(result.message(), testing::HasSubstr("Timestamp::Done()")); EXPECT_FALSE(notify_); @@ -196,7 +196,7 @@ TEST_F(InputStreamManagerTest, AddPacketsAfterPreStream) { packets.push_back(MakePacket("packet 2").At(Timestamp(10))); EXPECT_TRUE(input_stream_manager_->IsEmpty()); - ::mediapipe::Status result = + mediapipe::Status result = input_stream_manager_->AddPackets(packets, ¬ify_); // No notification ASSERT_THAT(result.message(), testing::HasSubstr("Timestamp::OneOverPostStream()")); @@ -224,7 +224,7 @@ TEST_F(InputStreamManagerTest, AddPacketsBeforePostStream) { MakePacket("packet 2").At(Timestamp::PostStream())); EXPECT_TRUE(input_stream_manager_->IsEmpty()); - ::mediapipe::Status result = + mediapipe::Status result = input_stream_manager_->AddPackets(packets, ¬ify_); // No notification ASSERT_THAT(result.message(), testing::HasSubstr("Timestamp::PostStream()")); EXPECT_FALSE(notify_); @@ -237,7 +237,7 @@ TEST_F(InputStreamManagerTest, AddPacketsReverseTimestamps) { packets.push_back(MakePacket("packet 3").At(Timestamp(30))); EXPECT_TRUE(input_stream_manager_->IsEmpty()); - ::mediapipe::Status result = + mediapipe::Status result = input_stream_manager_->AddPackets(packets, ¬ify_); // No notification ASSERT_THAT(result.message(), testing::HasSubstr( @@ -398,7 +398,7 @@ TEST_F(InputStreamManagerTest, BadPacketType) { packets.push_back(MakePacket(10).At(Timestamp(10))); EXPECT_TRUE(input_stream_manager_->IsEmpty()); - ::mediapipe::Status result = + mediapipe::Status result = input_stream_manager_->AddPackets(packets, ¬ify_); // No notification ASSERT_THAT(result.message(), testing::HasSubstr("Packet type mismatch")); EXPECT_FALSE(notify_); @@ -543,7 +543,7 @@ TEST_F(InputStreamManagerTest, BackwardsInTime) { EXPECT_FALSE(notify_); notify_ = false; - ::mediapipe::Status result = input_stream_manager_->SetNextTimestampBound( + mediapipe::Status result = input_stream_manager_->SetNextTimestampBound( Timestamp(40), ¬ify_); // Set Timestamp bound backwards in time. ASSERT_THAT(result.message(), testing::HasSubstr("40")); ASSERT_THAT(result.message(), testing::HasSubstr("50")); @@ -554,7 +554,7 @@ TEST_F(InputStreamManagerTest, BackwardsInTime) { packets.clear(); packets.push_back(MakePacket("packet 3") .At(Timestamp(30))); // Backwards in time - ::mediapipe::Status result2 = + mediapipe::Status result2 = input_stream_manager_->AddPackets(packets, ¬ify_); // No notification ASSERT_THAT(result2.message(), testing::HasSubstr("50")); ASSERT_THAT(result2.message(), testing::HasSubstr("30")); @@ -585,7 +585,7 @@ TEST_F(InputStreamManagerTest, BackwardsInTime) { packets.clear(); packets.push_back(MakePacket("packet 5") .At(Timestamp(130))); // Backwards in time. - ::mediapipe::Status result3 = + mediapipe::Status result3 = input_stream_manager_->AddPackets(packets, ¬ify_); // No notification ASSERT_THAT(result3.message(), testing::HasSubstr("151")); ASSERT_THAT(result3.message(), testing::HasSubstr("130")); diff --git a/mediapipe/framework/output_side_packet_impl.cc b/mediapipe/framework/output_side_packet_impl.cc index f2771da5d..0cb8be047 100644 --- a/mediapipe/framework/output_side_packet_impl.cc +++ b/mediapipe/framework/output_side_packet_impl.cc @@ -20,21 +20,21 @@ namespace mediapipe { -::mediapipe::Status OutputSidePacketImpl::Initialize( +mediapipe::Status OutputSidePacketImpl::Initialize( const std::string& name, const PacketType* packet_type) { name_ = name; packet_type_ = packet_type; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } void OutputSidePacketImpl::PrepareForRun( - std::function error_callback) { + std::function error_callback) { error_callback_ = std::move(error_callback); initialized_ = false; } void OutputSidePacketImpl::Set(const Packet& packet) { - ::mediapipe::Status status = SetInternal(packet); + mediapipe::Status status = SetInternal(packet); if (!status.ok()) { TriggerErrorCallback(status); } @@ -46,26 +46,26 @@ void OutputSidePacketImpl::AddMirror( mirrors_.emplace_back(input_side_packet_handler, id); } -::mediapipe::Status OutputSidePacketImpl::SetInternal(const Packet& packet) { +mediapipe::Status OutputSidePacketImpl::SetInternal(const Packet& packet) { if (initialized_) { - return ::mediapipe::AlreadyExistsErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::AlreadyExistsErrorBuilder(MEDIAPIPE_LOC) << "Output side packet \"" << name_ << "\" was already set."; } if (packet.IsEmpty()) { - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "Empty packet set on output side packet \"" << name_ << "\"."; } if (packet.Timestamp() != Timestamp::Unset()) { - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "Output side packet \"" << name_ << "\" has a timestamp " << packet.Timestamp().DebugString() << "."; } - ::mediapipe::Status result = packet_type_->Validate(packet); + mediapipe::Status result = packet_type_->Validate(packet); if (!result.ok()) { - return ::mediapipe::StatusBuilder(result, MEDIAPIPE_LOC).SetPrepend() + return mediapipe::StatusBuilder(result, MEDIAPIPE_LOC).SetPrepend() << absl::StrCat( "Packet type mismatch on calculator output side packet \"", name_, "\": "); @@ -76,11 +76,11 @@ void OutputSidePacketImpl::AddMirror( for (const auto& mirror : mirrors_) { mirror.input_side_packet_handler->Set(mirror.id, packet_); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } void OutputSidePacketImpl::TriggerErrorCallback( - const ::mediapipe::Status& status) const { + const mediapipe::Status& status) const { CHECK(error_callback_); error_callback_(status); } diff --git a/mediapipe/framework/output_side_packet_impl.h b/mediapipe/framework/output_side_packet_impl.h index df9ac4082..c38c65912 100644 --- a/mediapipe/framework/output_side_packet_impl.h +++ b/mediapipe/framework/output_side_packet_impl.h @@ -35,13 +35,13 @@ class OutputSidePacketImpl : public OutputSidePacket { ~OutputSidePacketImpl() override = default; // Initializes the OutputSidePacketImpl. - ::mediapipe::Status Initialize(const std::string& name, - const PacketType* packet_type); + mediapipe::Status Initialize(const std::string& name, + const PacketType* packet_type); // Prepares this for processing. If an error occurs in a user called function // (such as Set()) then error_callback will be called before returning // control to the user. - void PrepareForRun(std::function error_callback); + void PrepareForRun(std::function error_callback); // Gets the output side packet. Packet GetPacket() const { return packet_; } @@ -70,15 +70,15 @@ class OutputSidePacketImpl : public OutputSidePacket { }; // Called by Set(). - ::mediapipe::Status SetInternal(const Packet& packet); + mediapipe::Status SetInternal(const Packet& packet); - // Triggers the error callback with ::mediapipe::Status info when an error + // Triggers the error callback with mediapipe::Status info when an error // occurs. - void TriggerErrorCallback(const ::mediapipe::Status& status) const; + void TriggerErrorCallback(const mediapipe::Status& status) const; std::string name_; const PacketType* packet_type_; - std::function error_callback_; + std::function error_callback_; Packet packet_; bool initialized_ = false; diff --git a/mediapipe/framework/output_stream_handler.cc b/mediapipe/framework/output_stream_handler.cc index 678d5dd22..86990d343 100644 --- a/mediapipe/framework/output_stream_handler.cc +++ b/mediapipe/framework/output_stream_handler.cc @@ -20,16 +20,16 @@ namespace mediapipe { -::mediapipe::Status OutputStreamHandler::InitializeOutputStreamManagers( +mediapipe::Status OutputStreamHandler::InitializeOutputStreamManagers( OutputStreamManager* flat_output_stream_managers) { for (CollectionItemId id = output_stream_managers_.BeginId(); id < output_stream_managers_.EndId(); ++id) { output_stream_managers_.Get(id) = &flat_output_stream_managers[id.value()]; } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status OutputStreamHandler::SetupOutputShards( +mediapipe::Status OutputStreamHandler::SetupOutputShards( OutputStreamShardSet* output_shards) { CHECK(output_shards); for (CollectionItemId id = output_stream_managers_.BeginId(); @@ -37,11 +37,11 @@ namespace mediapipe { OutputStreamManager* manager = output_stream_managers_.Get(id); output_shards->Get(id).SetSpec(manager->Spec()); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } void OutputStreamHandler::PrepareForRun( - const std::function& error_callback) { + const std::function& error_callback) { for (auto& manager : output_stream_managers_) { manager->PrepareForRun(error_callback); } diff --git a/mediapipe/framework/output_stream_handler.h b/mediapipe/framework/output_stream_handler.h index 092fa78b2..994f356e8 100644 --- a/mediapipe/framework/output_stream_handler.h +++ b/mediapipe/framework/output_stream_handler.h @@ -76,11 +76,11 @@ class OutputStreamHandler { // OutputStreamHandler::output_stream_managers_ (meaning it should // point to somewhere in the middle of the master flat array of all // output stream managers). - ::mediapipe::Status InitializeOutputStreamManagers( + mediapipe::Status InitializeOutputStreamManagers( OutputStreamManager* flat_output_stream_managers); // Sets up output shards by connecting to the managers. - ::mediapipe::Status SetupOutputShards(OutputStreamShardSet* output_shards); + mediapipe::Status SetupOutputShards(OutputStreamShardSet* output_shards); int NumOutputStreams() const { return output_stream_managers_.NumEntries(); } @@ -92,7 +92,7 @@ class OutputStreamHandler { // Calls OutputStreamManager::PrepareForRun(error_callback) per stream, and // resets data memebers. void PrepareForRun( - const std::function& error_callback) + const std::function& error_callback) ABSL_LOCKS_EXCLUDED(timestamp_mutex_); // Marks the output streams as started and propagates any changes made in @@ -203,12 +203,12 @@ using OutputStreamHandlerRegistry = GlobalFactoryRegistry< } // namespace mediapipe // Macro for registering the output stream handler. -#define REGISTER_OUTPUT_STREAM_HANDLER(name) \ - REGISTER_FACTORY_FUNCTION_QUALIFIED( \ - ::mediapipe::OutputStreamHandlerRegistry, output_handler_registration, \ - name, \ - absl::make_unique, \ - CalculatorContextManager*, const MediaPipeOptions&, \ +#define REGISTER_OUTPUT_STREAM_HANDLER(name) \ + REGISTER_FACTORY_FUNCTION_QUALIFIED( \ + mediapipe::OutputStreamHandlerRegistry, output_handler_registration, \ + name, \ + absl::make_unique, \ + CalculatorContextManager*, const MediaPipeOptions&, \ bool>) #endif // MEDIAPIPE_FRAMEWORK_OUTPUT_STREAM_HANDLER_H_ diff --git a/mediapipe/framework/output_stream_manager.cc b/mediapipe/framework/output_stream_manager.cc index fba5ee5a4..572245bf6 100644 --- a/mediapipe/framework/output_stream_manager.cc +++ b/mediapipe/framework/output_stream_manager.cc @@ -20,17 +20,17 @@ namespace mediapipe { -::mediapipe::Status OutputStreamManager::Initialize( +mediapipe::Status OutputStreamManager::Initialize( const std::string& name, const PacketType* packet_type) { output_stream_spec_.name = name; output_stream_spec_.packet_type = packet_type; output_stream_spec_.offset_enabled = false; PrepareForRun(nullptr); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } void OutputStreamManager::PrepareForRun( - std::function error_callback) { + std::function error_callback) { output_stream_spec_.error_callback = std::move(error_callback); output_stream_spec_.locked_intro_data = false; @@ -66,7 +66,7 @@ bool OutputStreamManager::IsClosed() const { void OutputStreamManager::PropagateHeader() { if (output_stream_spec_.locked_intro_data) { output_stream_spec_.TriggerErrorCallback( - ::mediapipe::FailedPreconditionErrorBuilder(MEDIAPIPE_LOC) + mediapipe::FailedPreconditionErrorBuilder(MEDIAPIPE_LOC) << "PropagateHeader must be called in CalculatorNode::OpenNode(). " "Stream: \"" << output_stream_spec_.name << "\"."); @@ -106,7 +106,7 @@ Timestamp OutputStreamManager::ComputeOutputTimestampBound( if (input_timestamp != Timestamp::Unstarted() && !input_timestamp.IsAllowedInStream()) { output_stream_spec_.TriggerErrorCallback( - ::mediapipe::FailedPreconditionErrorBuilder(MEDIAPIPE_LOC) + mediapipe::FailedPreconditionErrorBuilder(MEDIAPIPE_LOC) << "Invalid input timestamp to compute the output timestamp bound. " "Stream: \"" << output_stream_spec_.name diff --git a/mediapipe/framework/output_stream_manager.h b/mediapipe/framework/output_stream_manager.h index ec9d6c1ef..6fec67d08 100644 --- a/mediapipe/framework/output_stream_manager.h +++ b/mediapipe/framework/output_stream_manager.h @@ -40,13 +40,13 @@ class OutputStreamManager { OutputStreamManager() = default; // Initializes the OutputStreamManager. - ::mediapipe::Status Initialize(const std::string& name, - const PacketType* packet_type); + mediapipe::Status Initialize(const std::string& name, + const PacketType* packet_type); // Prepares this for processing. If an error occurs in a user called function // (such as AddPacket()) then error_callback will be called before returning // control to the user. - void PrepareForRun(std::function error_callback); + void PrepareForRun(std::function error_callback); // Gets the stream name. const std::string& Name() const { return output_stream_spec_.name; } diff --git a/mediapipe/framework/output_stream_manager_test.cc b/mediapipe/framework/output_stream_manager_test.cc index 4428790c6..77b15e341 100644 --- a/mediapipe/framework/output_stream_manager_test.cc +++ b/mediapipe/framework/output_stream_manager_test.cc @@ -85,9 +85,7 @@ class OutputStreamManagerTest : public ::testing::Test { void ScheduleNoOp(CalculatorContext* cc) {} - void RecordError(const ::mediapipe::Status& error) { - errors_.push_back(error); - } + void RecordError(const mediapipe::Status& error) { errors_.push_back(error); } void ReportQueueNoOp(InputStreamManager* stream, bool* stream_was_full) {} @@ -106,7 +104,7 @@ class OutputStreamManagerTest : public ::testing::Test { std::function headers_ready_callback_; std::function notification_callback_; std::function schedule_callback_; - std::function error_callback_; + std::function error_callback_; InputStreamManager::QueueSizeCallback queue_full_callback_; InputStreamManager::QueueSizeCallback queue_not_full_callback_; @@ -116,7 +114,7 @@ class OutputStreamManagerTest : public ::testing::Test { InputStreamManager input_stream_manager_; // Vector of errors encountered while using the stream. - std::vector<::mediapipe::Status> errors_; + std::vector errors_; }; TEST_F(OutputStreamManagerTest, Init) {} diff --git a/mediapipe/framework/output_stream_shard.cc b/mediapipe/framework/output_stream_shard.cc index 825dc67fa..0a600be86 100644 --- a/mediapipe/framework/output_stream_shard.cc +++ b/mediapipe/framework/output_stream_shard.cc @@ -34,7 +34,7 @@ const std::string& OutputStreamShard::Name() const { void OutputStreamShard::SetNextTimestampBound(Timestamp bound) { if (!bound.IsAllowedInStream() && bound != Timestamp::OneOverPostStream()) { output_stream_spec_->TriggerErrorCallback( - ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "In stream \"" << Name() << "\", timestamp bound set to illegal value: " << bound.DebugString()); return; @@ -54,7 +54,7 @@ bool OutputStreamShard::IsClosed() const { return closed_; } void OutputStreamShard::SetOffset(TimestampDiff offset) { if (output_stream_spec_->locked_intro_data) { output_stream_spec_->TriggerErrorCallback( - ::mediapipe::FailedPreconditionErrorBuilder(MEDIAPIPE_LOC) + mediapipe::FailedPreconditionErrorBuilder(MEDIAPIPE_LOC) << "SetOffset must be called from Calculator::Open(). Stream: \"" << output_stream_spec_->name << "\"."); return; @@ -66,7 +66,7 @@ void OutputStreamShard::SetOffset(TimestampDiff offset) { void OutputStreamShard::SetHeader(const Packet& header) { if (closed_) { output_stream_spec_->TriggerErrorCallback( - ::mediapipe::FailedPreconditionErrorBuilder(MEDIAPIPE_LOC) + mediapipe::FailedPreconditionErrorBuilder(MEDIAPIPE_LOC) << "SetHeader must be called before the stream is closed. Stream: \"" << output_stream_spec_->name << "\"."); return; @@ -74,7 +74,7 @@ void OutputStreamShard::SetHeader(const Packet& header) { if (output_stream_spec_->locked_intro_data) { output_stream_spec_->TriggerErrorCallback( - ::mediapipe::FailedPreconditionErrorBuilder(MEDIAPIPE_LOC) + mediapipe::FailedPreconditionErrorBuilder(MEDIAPIPE_LOC) << "SetHeader must be called from Calculator::Open(). Stream: \"" << output_stream_spec_->name << "\"."); return; @@ -96,18 +96,18 @@ const Packet& OutputStreamShard::Header() const { template Status OutputStreamShard::AddPacketInternal(T&& packet) { if (IsClosed()) { - return ::mediapipe::FailedPreconditionErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::FailedPreconditionErrorBuilder(MEDIAPIPE_LOC) << "Packet sent to closed stream \"" << Name() << "\"."; } if (packet.IsEmpty()) { - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "Empty packet sent to stream \"" << Name() << "\"."; } const Timestamp timestamp = packet.Timestamp(); if (!timestamp.IsAllowedInStream()) { - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "In stream \"" << Name() << "\", timestamp not specified or set to illegal value: " << timestamp.DebugString(); @@ -128,7 +128,7 @@ Status OutputStreamShard::AddPacketInternal(T&& packet) { // TODO debug log? - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } void OutputStreamShard::AddPacket(const Packet& packet) { diff --git a/mediapipe/framework/output_stream_shard.h b/mediapipe/framework/output_stream_shard.h index 009fc2f3c..ad8ac5995 100644 --- a/mediapipe/framework/output_stream_shard.h +++ b/mediapipe/framework/output_stream_shard.h @@ -31,16 +31,16 @@ class OutputStreamManager; // The output stream spec shared across all output stream shards and their // output stream manager. struct OutputStreamSpec { - // Triggers the error callback with ::mediapipe::Status info when an error + // Triggers the error callback with mediapipe::Status info when an error // occurs. - void TriggerErrorCallback(const ::mediapipe::Status& status) const { + void TriggerErrorCallback(const mediapipe::Status& status) const { CHECK(error_callback); error_callback(status); } std::string name; const PacketType* packet_type; - std::function error_callback; + std::function error_callback; bool locked_intro_data; // Those three variables are the intro data protected by locked_intro_data. bool offset_enabled; @@ -102,7 +102,7 @@ class OutputStreamShard : public OutputStream { // AddPacketInternal template is called by either AddPacket(Packet&& packet) // or AddPacket(const Packet& packet). template - ::mediapipe::Status AddPacketInternal(T&& packet); + mediapipe::Status AddPacketInternal(T&& packet); // Returns a pointer to the output queue. std::list* OutputQueue() { return &output_queue_; } diff --git a/mediapipe/framework/packet.cc b/mediapipe/framework/packet.cc index 8d9914835..3d25a4d0f 100644 --- a/mediapipe/framework/packet.cc +++ b/mediapipe/framework/packet.cc @@ -51,7 +51,7 @@ const HolderBase* GetHolder(const Packet& packet) { return packet.holder_.get(); } -::mediapipe::StatusOr PacketFromDynamicProto( +mediapipe::StatusOr PacketFromDynamicProto( const std::string& type_name, const std::string& serialized) { ASSIGN_OR_RETURN( auto message_holder, @@ -105,16 +105,16 @@ std::string Packet::DebugString() const { return result; } -::mediapipe::Status Packet::ValidateAsProtoMessageLite() const { +mediapipe::Status Packet::ValidateAsProtoMessageLite() const { if (ABSL_PREDICT_FALSE(IsEmpty())) { - return ::mediapipe::InternalError("Packet is empty."); + return mediapipe::InternalError("Packet is empty."); } if (ABSL_PREDICT_FALSE(holder_->GetProtoMessageLite() == nullptr)) { - return ::mediapipe::InvalidArgumentError( + return mediapipe::InvalidArgumentError( absl::StrCat("The Packet stores \"", holder_->DebugTypeName(), "\"", "which is not convertible to proto_ns::MessageLite.")); } else { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } @@ -129,7 +129,7 @@ const proto_ns::MessageLite& Packet::GetProtoMessageLite() const { StatusOr> Packet::GetVectorOfProtoMessageLitePtrs() { if (holder_ == nullptr) { - return ::mediapipe::InternalError("Packet is empty."); + return mediapipe::InternalError("Packet is empty."); } return holder_->GetVectorOfProtoMessageLite(); } diff --git a/mediapipe/framework/packet.h b/mediapipe/framework/packet.h index 5221f5553..f079afb89 100644 --- a/mediapipe/framework/packet.h +++ b/mediapipe/framework/packet.h @@ -53,7 +53,7 @@ Packet Create(HolderBase* holder, Timestamp timestamp); Packet Create(std::shared_ptr holder, Timestamp timestamp); const HolderBase* GetHolder(const Packet& packet); const std::shared_ptr& GetHolderShared(const Packet& packet); -::mediapipe::StatusOr PacketFromDynamicProto( +mediapipe::StatusOr PacketFromDynamicProto( const std::string& type_name, const std::string& serialized); } // namespace packet_internal @@ -111,7 +111,7 @@ class Packet { // holder. Otherwise, returns error when the packet can't be consumed. // See ConsumeOrCopy for threading requirements and example usage. template - ::mediapipe::StatusOr> Consume(); + mediapipe::StatusOr> Consume(); // Consumes the packet and transfers the ownership of the data to a // unique pointer if the packet is the sole owner of a non-foreign @@ -131,11 +131,11 @@ class Packet { // ASSIGN_OR_RETURN(auto detection, p.ConsumeOrCopy()); // If you would like to crash on failure (prefer ASSIGN_OR_RETURN): // auto detection = p.ConsumeOrCopy().ValueOrDie(); - // // In functions which do not return ::mediapipe::Status use an adaptor + // // In functions which do not return mediapipe::Status use an adaptor // // function as the third argument to ASSIGN_OR_RETURN. In tests, // // use an adaptor which returns void. // ASSIGN_OR_RETURN(auto detection, p.ConsumeOrCopy(), - // _.With([](const ::mediapipe::Status& status) { + // _.With([](const mediapipe::Status& status) { // MP_EXPECT_OK(status); // // Use CHECK_OK to crash and report a usable line // // number (which the ValueOrDie alternative does not). @@ -145,13 +145,13 @@ class Packet { // // Version for non-arrays. template - ::mediapipe::StatusOr> ConsumeOrCopy( + mediapipe::StatusOr> ConsumeOrCopy( bool* was_copied = nullptr, typename std::enable_if::value>::type* = nullptr); // Version for bounded array. template - ::mediapipe::StatusOr> ConsumeOrCopy( + mediapipe::StatusOr> ConsumeOrCopy( bool* was_copied = nullptr, typename std::enable_if::value && std::extent::value != 0>::type* = nullptr); @@ -160,7 +160,7 @@ class Packet { // delete helper. // Version for unbounded array. template - ::mediapipe::StatusOr> ConsumeOrCopy( + mediapipe::StatusOr> ConsumeOrCopy( bool* was_copied = nullptr, typename std::enable_if::value && std::extent::value == 0>::type* = nullptr); @@ -178,11 +178,11 @@ class Packet { // Returns an error if the packet does not contain data of type T. template - ::mediapipe::Status ValidateAsType() const; + mediapipe::Status ValidateAsType() const; // Returns an error if the packet is not an instance of // a protocol buffer message. - ::mediapipe::Status ValidateAsProtoMessageLite() const; + mediapipe::Status ValidateAsProtoMessageLite() const; // Get the type id for the underlying type stored in the Packet. // Crashes if IsEmpty() == true. @@ -406,7 +406,7 @@ template StatusOr> ConvertToVectorOfProtoMessageLitePtrs(const T* data, /*is_proto_vector=*/std::false_type) { - return ::mediapipe::InvalidArgumentError(absl::StrCat( + return mediapipe::InvalidArgumentError(absl::StrCat( "The Packet stores \"", tool::TypeId().name(), "\"", "which is not convertible to vector.")); } @@ -496,7 +496,7 @@ class Holder : public HolderBase { // This method is dangerous and is only used by Packet::Consume() if the // packet is the only owner of the holder. template - ::mediapipe::StatusOr> Release( + mediapipe::StatusOr> Release( typename std::enable_if::value || std::extent::value != 0>::type* = 0) { // Since C++ doesn't allow virtual, templated functions, check holder @@ -513,10 +513,10 @@ class Holder : public HolderBase { // TODO: support unbounded array after fixing the bug in holder's // delete helper. template - ::mediapipe::StatusOr> Release( + mediapipe::StatusOr> Release( typename std::enable_if::value && std::extent::value == 0>::type* = 0) { - return ::mediapipe::InternalError("Release T[] isn't supported."); + return mediapipe::InternalError("Release T[] isn't supported."); } const std::string DebugTypeName() const final { return MediaPipeTypeStringOrDemangled(); @@ -580,8 +580,8 @@ class ForeignHolder : public Holder { this->ptr_ = nullptr; } // Foreign holder can't release data pointer without ownership. - ::mediapipe::StatusOr> Release() { - return ::mediapipe::InternalError( + mediapipe::StatusOr> Release() { + return mediapipe::InternalError( "Foreign holder can't release data ptr without ownership."); } }; @@ -621,14 +621,14 @@ inline Packet& Packet::operator=(const Packet& packet) { } template -inline ::mediapipe::StatusOr> Packet::Consume() { +inline mediapipe::StatusOr> Packet::Consume() { // If type validation fails, returns error. MP_RETURN_IF_ERROR(ValidateAsType()); // Clients who use this function are responsible for ensuring that no // other thread is doing anything with this Packet. if (holder_.unique()) { VLOG(2) << "Consuming the data of " << DebugString(); - ::mediapipe::StatusOr> release_result = + mediapipe::StatusOr> release_result = holder_->As()->Release(); if (release_result.ok()) { VLOG(2) << "Setting " << DebugString() << " to empty."; @@ -638,12 +638,12 @@ inline ::mediapipe::StatusOr> Packet::Consume() { } // If packet isn't the sole owner of the holder, returns kFailedPrecondition // error with message. - return ::mediapipe::Status(::mediapipe::StatusCode::kFailedPrecondition, - "Packet isn't the sole owner of the holder."); + return mediapipe::Status(mediapipe::StatusCode::kFailedPrecondition, + "Packet isn't the sole owner of the holder."); } template -inline ::mediapipe::StatusOr> Packet::ConsumeOrCopy( +inline mediapipe::StatusOr> Packet::ConsumeOrCopy( bool* was_copied, typename std::enable_if::value>::type*) { MP_RETURN_IF_ERROR(ValidateAsType()); @@ -651,7 +651,7 @@ inline ::mediapipe::StatusOr> Packet::ConsumeOrCopy( if (!holder_->HolderIsOfType>() && holder_.unique()) { VLOG(2) << "Consuming the data of " << DebugString(); - ::mediapipe::StatusOr> release_result = + mediapipe::StatusOr> release_result = holder_->As()->Release(); if (release_result.ok()) { VLOG(2) << "Setting " << DebugString() << " to empty."; @@ -673,7 +673,7 @@ inline ::mediapipe::StatusOr> Packet::ConsumeOrCopy( } template -inline ::mediapipe::StatusOr> Packet::ConsumeOrCopy( +inline mediapipe::StatusOr> Packet::ConsumeOrCopy( bool* was_copied, typename std::enable_if::value && std::extent::value != 0>::type*) { @@ -682,7 +682,7 @@ inline ::mediapipe::StatusOr> Packet::ConsumeOrCopy( if (!holder_->HolderIsOfType>() && holder_.unique()) { VLOG(2) << "Consuming the data of " << DebugString(); - ::mediapipe::StatusOr> release_result = + mediapipe::StatusOr> release_result = holder_->As()->Release(); if (release_result.ok()) { VLOG(2) << "Setting " << DebugString() << " to empty."; @@ -710,11 +710,11 @@ inline ::mediapipe::StatusOr> Packet::ConsumeOrCopy( } template -inline ::mediapipe::StatusOr> Packet::ConsumeOrCopy( +inline mediapipe::StatusOr> Packet::ConsumeOrCopy( bool* was_copied, typename std::enable_if::value && std::extent::value == 0>::type*) { - return ::mediapipe::InternalError("Unbounded array isn't supported."); + return mediapipe::InternalError("Unbounded array isn't supported."); } inline Packet::Packet(Packet&& packet) { @@ -746,25 +746,25 @@ inline const T& Packet::Get() const { packet_internal::Holder* holder = IsEmpty() ? nullptr : holder_->As(); if (holder == nullptr) { // Produce a good error message. - ::mediapipe::Status status = ValidateAsType(); + mediapipe::Status status = ValidateAsType(); LOG(FATAL) << "Packet::Get() failed: " << status.message(); } return holder->data(); } template -::mediapipe::Status Packet::ValidateAsType() const { +mediapipe::Status Packet::ValidateAsType() const { if (ABSL_PREDICT_FALSE(IsEmpty())) { - return ::mediapipe::InternalError(absl::StrCat( + return mediapipe::InternalError(absl::StrCat( "Expected a Packet of type: ", MediaPipeTypeStringOrDemangled(), ", but received an empty Packet.")); } if (ABSL_PREDICT_FALSE(holder_->As() == nullptr)) { - return ::mediapipe::InvalidArgumentError(absl::StrCat( + return mediapipe::InvalidArgumentError(absl::StrCat( "The Packet stores \"", holder_->DebugTypeName(), "\", but \"", MediaPipeTypeStringOrDemangled(), "\" was requested.")); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } inline Timestamp Packet::Timestamp() const { return timestamp_; } diff --git a/mediapipe/framework/packet_generator.h b/mediapipe/framework/packet_generator.h index dd2e13390..8c8c1185a 100644 --- a/mediapipe/framework/packet_generator.h +++ b/mediapipe/framework/packet_generator.h @@ -49,12 +49,12 @@ class PacketGenerator { // and // produce output side packets. // - // static ::mediapipe::Status FillExpectations( + // static mediapipe::Status FillExpectations( // const PacketGeneratorOptions& extendable_options, // PacketTypeSet* input_side_packets, // PacketTypeSet* output_side_packets); // - // static ::mediapipe::Status Generate( + // static mediapipe::Status Generate( // const PacketGeneratorOptions& extendable_options, // const PacketSet& input_side_packets, // PacketSet* output_side_packets); @@ -69,11 +69,11 @@ namespace internal { class StaticAccessToGenerator { public: virtual ~StaticAccessToGenerator() {} - virtual ::mediapipe::Status FillExpectations( + virtual mediapipe::Status FillExpectations( const PacketGeneratorOptions& extendable_options, // PacketTypeSet* input_side_packets, // PacketTypeSet* output_side_packets) = 0; - virtual ::mediapipe::Status Generate( + virtual mediapipe::Status Generate( const PacketGeneratorOptions& extendable_options, // const PacketSet& input_side_packets, // PacketSet* output_side_packets) = 0; @@ -87,7 +87,7 @@ using StaticAccessToGeneratorRegistry = template constexpr bool PacketGeneratorHasFillExpectations( decltype(&T::FillExpectations) /*unused*/) { - typedef ::mediapipe::Status (*FillExpectationsType)( + typedef mediapipe::Status (*FillExpectationsType)( const PacketGeneratorOptions& extendable_options, // PacketTypeSet* input_side_packets, // PacketTypeSet* output_side_packets); @@ -100,7 +100,7 @@ constexpr bool PacketGeneratorHasFillExpectations(...) { } template constexpr bool PacketGeneratorHasGenerate(decltype(&T::Generate) /*unused*/) { - typedef ::mediapipe::Status (*GenerateType)( + typedef mediapipe::Status (*GenerateType)( const PacketGeneratorOptions& extendable_options, // const PacketSet& input_side_packets, // PacketSet* output_side_packets); @@ -117,10 +117,10 @@ constexpr bool PacketGeneratorHasGenerate(...) { template class StaticAccessToGeneratorTyped : public StaticAccessToGenerator { public: - static_assert(std::is_base_of<::mediapipe::PacketGenerator, + static_assert(std::is_base_of::value, "Classes registered with REGISTER_PACKET_GENERATOR must be " - "subclasses of ::mediapipe::PacketGenerator."); + "subclasses of mediapipe::PacketGenerator."); static_assert( PacketGeneratorHasFillExpectations(nullptr), "FillExpectations() must be defined with the correct signature in " @@ -129,7 +129,7 @@ class StaticAccessToGeneratorTyped : public StaticAccessToGenerator { "Generate() must be defined with the correct signature in " "every PacketGenerator."); - ::mediapipe::Status FillExpectations( + mediapipe::Status FillExpectations( const PacketGeneratorOptions& extendable_options, // PacketTypeSet* input_side_packets, // PacketTypeSet* output_side_packets) final { @@ -137,7 +137,7 @@ class StaticAccessToGeneratorTyped : public StaticAccessToGenerator { extendable_options, input_side_packets, output_side_packets); } - ::mediapipe::Status Generate( + mediapipe::Status Generate( const PacketGeneratorOptions& extendable_options, // const PacketSet& input_side_packets, // PacketSet* output_side_packets) final { @@ -150,12 +150,12 @@ class StaticAccessToGeneratorTyped : public StaticAccessToGenerator { // Macro for registering PacketGenerators. It actually just registers // the StaticAccessToGeneratorTyped class. -#define REGISTER_PACKET_GENERATOR(name) \ - REGISTER_FACTORY_FUNCTION_QUALIFIED( \ - ::mediapipe::internal::StaticAccessToGeneratorRegistry, \ - generator_registration, name, \ - absl::make_unique< \ - ::mediapipe::internal::StaticAccessToGeneratorTyped>) +#define REGISTER_PACKET_GENERATOR(name) \ + REGISTER_FACTORY_FUNCTION_QUALIFIED( \ + mediapipe::internal::StaticAccessToGeneratorRegistry, \ + generator_registration, name, \ + absl::make_unique< \ + mediapipe::internal::StaticAccessToGeneratorTyped>) } // namespace mediapipe diff --git a/mediapipe/framework/packet_generator_graph.cc b/mediapipe/framework/packet_generator_graph.cc index f19da5687..ee7472d7b 100644 --- a/mediapipe/framework/packet_generator_graph.cc +++ b/mediapipe/framework/packet_generator_graph.cc @@ -44,7 +44,7 @@ namespace { // generator cannot be run given the currently available side packets // (and false otherwise). If an error occurs then unrunnable and // input_side_packet_set are undefined. -::mediapipe::Status CreateInputsForGenerator( +mediapipe::Status CreateInputsForGenerator( const ValidatedGraphConfig& validated_graph, int generator_index, const std::map& side_packets, PacketSet* input_side_packet_set, bool* unrunnable) { @@ -55,7 +55,7 @@ namespace { .packet_generator(); // Fill the PacketSet (if possible). *unrunnable = false; - std::vector<::mediapipe::Status> statuses; + std::vector statuses; for (CollectionItemId id = node_type_info.InputSidePacketTypes().BeginId(); id < node_type_info.InputSidePacketTypes().EndId(); ++id) { const std::string& name = @@ -67,7 +67,7 @@ namespace { continue; } input_side_packet_set->Get(id) = it->second; - ::mediapipe::Status status = + mediapipe::Status status = node_type_info.InputSidePacketTypes().Get(id).Validate( input_side_packet_set->Get(id)); if (!status.ok()) { @@ -82,15 +82,15 @@ namespace { return tool::CombinedStatus( absl::StrCat(generator_name, " had invalid configuration."), statuses); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Generate the packets from a PacketGenerator, place them in // output_side_packet_set, and validate their types. -::mediapipe::Status Generate(const ValidatedGraphConfig& validated_graph, - int generator_index, - const PacketSet& input_side_packet_set, - PacketSet* output_side_packet_set) { +mediapipe::Status Generate(const ValidatedGraphConfig& validated_graph, + int generator_index, + const PacketSet& input_side_packet_set, + PacketSet* output_side_packet_set) { const NodeTypeInfo& node_type_info = validated_graph.GeneratorInfos()[generator_index]; const PacketGeneratorConfig& generator_config = @@ -113,7 +113,7 @@ namespace { .SetPrepend() << generator_name << "::Generate() output packets were of incorrect type: "; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // GeneratorScheduler schedules the packet generators in a validated graph for @@ -126,7 +126,7 @@ class GeneratorScheduler { // PacketGenerators (those not run at initialize time due to missing // dependencies). GeneratorScheduler(const ValidatedGraphConfig* validated_graph, - ::mediapipe::Executor* executor, + mediapipe::Executor* executor, const std::vector& non_base_generators, bool initial); // Run a PacketGenerator on a given executor on the provided input @@ -149,7 +149,7 @@ class GeneratorScheduler { // rather, not executed) in non_scheduled_generators. Returns the combined // error status if there were errors while running the packet generators. // NOTE: This method should only be called when there are no pending tasks. - ::mediapipe::Status GetNonScheduledGenerators( + mediapipe::Status GetNonScheduledGenerators( std::vector* non_scheduled_generators) const; private: @@ -161,7 +161,7 @@ class GeneratorScheduler { void RunApplicationThreadTasks() ABSL_LOCKS_EXCLUDED(app_thread_mutex_); const ValidatedGraphConfig* const validated_graph_; - ::mediapipe::Executor* executor_; + mediapipe::Executor* executor_; mutable absl::Mutex mutex_; // The number of pending tasks. @@ -169,7 +169,7 @@ class GeneratorScheduler { // This condition variable is signaled when num_tasks_ becomes 0. absl::CondVar idle_condvar_; // Accumulates the error statuses while running the packet generators. - std::vector<::mediapipe::Status> statuses_ ABSL_GUARDED_BY(mutex_); + std::vector statuses_ ABSL_GUARDED_BY(mutex_); // scheduled_generators_[i] is true if the packet generator with index i was // scheduled (or rather, executed). std::vector scheduled_generators_ ABSL_GUARDED_BY(mutex_); @@ -182,8 +182,7 @@ class GeneratorScheduler { }; GeneratorScheduler::GeneratorScheduler( - const ValidatedGraphConfig* validated_graph, - ::mediapipe::Executor* executor, + const ValidatedGraphConfig* validated_graph, mediapipe::Executor* executor, const std::vector& non_base_generators, bool initial) : validated_graph_(validated_graph), executor_(executor), @@ -220,7 +219,7 @@ void GeneratorScheduler::GenerateAndScheduleNext( .OutputSidePacketTypes() .TagMap()); VLOG(1) << "Running generator " << generator_index; - ::mediapipe::Status status = + mediapipe::Status status = Generate(*validated_graph_, generator_index, *input_side_packet_set, &output_side_packet_set); @@ -236,7 +235,7 @@ void GeneratorScheduler::GenerateAndScheduleNext( const auto& name = output_side_packet_set.TagMap()->Names()[id.value()]; auto item = side_packets->emplace(name, output_side_packet_set.Get(id)); if (!item.second) { - statuses_.push_back(::mediapipe::AlreadyExistsError( + statuses_.push_back(mediapipe::AlreadyExistsError( absl::StrCat("Side packet \"", name, "\" was defined twice."))); } } @@ -267,7 +266,7 @@ void GeneratorScheduler::ScheduleAllRunnableGenerators( .InputSidePacketTypes() .TagMap()); - ::mediapipe::Status status = + mediapipe::Status status = CreateInputsForGenerator(*validated_graph_, index, *side_packets, input_side_packet_set.get(), &is_unrunnable); if (!status.ok()) { @@ -314,7 +313,7 @@ void GeneratorScheduler::WaitUntilIdle() { } } -::mediapipe::Status GeneratorScheduler::GetNonScheduledGenerators( +mediapipe::Status GeneratorScheduler::GetNonScheduledGenerators( std::vector* non_scheduled_generators) const { non_scheduled_generators->clear(); @@ -327,7 +326,7 @@ void GeneratorScheduler::WaitUntilIdle() { non_scheduled_generators->push_back(i); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } void GeneratorScheduler::AddApplicationThreadTask(std::function task) { @@ -357,9 +356,8 @@ void GeneratorScheduler::RunApplicationThreadTasks() { PacketGeneratorGraph::~PacketGeneratorGraph() {} -::mediapipe::Status PacketGeneratorGraph::Initialize( - const ValidatedGraphConfig* validated_graph, - ::mediapipe::Executor* executor, +mediapipe::Status PacketGeneratorGraph::Initialize( + const ValidatedGraphConfig* validated_graph, mediapipe::Executor* executor, const std::map& input_side_packets) { validated_graph_ = validated_graph; executor_ = executor; @@ -370,14 +368,14 @@ PacketGeneratorGraph::~PacketGeneratorGraph() {} /*initial=*/true); } -::mediapipe::Status PacketGeneratorGraph::RunGraphSetup( +mediapipe::Status PacketGeneratorGraph::RunGraphSetup( const std::map& input_side_packets, std::map* output_side_packets) const { *output_side_packets = base_packets_; for (const std::pair& item : input_side_packets) { auto iter = output_side_packets->find(item.first); if (iter != output_side_packets->end()) { - return ::mediapipe::AlreadyExistsError( + return mediapipe::AlreadyExistsError( absl::StrCat("Side packet \"", iter->first, "\" was defined twice.")); } output_side_packets->insert(iter, item); @@ -396,10 +394,10 @@ PacketGeneratorGraph::~PacketGeneratorGraph() {} << "Some Generators were unrunnable (validation should have failed).\n" "Generator indexes: " << absl::StrJoin(non_scheduled_generators, ", "); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status PacketGeneratorGraph::ExecuteGenerators( +mediapipe::Status PacketGeneratorGraph::ExecuteGenerators( std::map* output_side_packets, std::vector* non_scheduled_generators, bool initial) const { VLOG(1) << "ExecuteGenerators initial == " << initial; diff --git a/mediapipe/framework/packet_generator_graph.h b/mediapipe/framework/packet_generator_graph.h index 7d77fe6d0..a1c493ca9 100644 --- a/mediapipe/framework/packet_generator_graph.h +++ b/mediapipe/framework/packet_generator_graph.h @@ -67,14 +67,14 @@ class PacketGeneratorGraph { // stage and will be common to all calls to CalculatorGraph::Run(). // Any generators which are runnable at this stage (that only depend on // things in the graph or input_side_packets) will be run at this time. - virtual ::mediapipe::Status Initialize( + virtual mediapipe::Status Initialize( const ValidatedGraphConfig* validated_graph, - ::mediapipe::Executor* executor, + mediapipe::Executor* executor, const std::map& input_side_packets); // Add the input_side_packets and run any remaining generators (which // must now be runnable) to produce output_side_packets. - virtual ::mediapipe::Status RunGraphSetup( + virtual mediapipe::Status RunGraphSetup( const std::map& input_side_packets, std::map* output_side_packets) const; @@ -96,7 +96,7 @@ class PacketGeneratorGraph { // packets and unrunnable generators. "initial" must be set to true for // the first pass and false for subsequent passes. output_side_packets // must be set to include the input side packets before calling. - ::mediapipe::Status ExecuteGenerators( + mediapipe::Status ExecuteGenerators( std::map* output_side_packets, std::vector* non_scheduled_generators, bool initial) const; @@ -109,9 +109,9 @@ class PacketGeneratorGraph { // The executor to use for running the generators. We do not own the // executor but it must outlive this object. - ::mediapipe::Executor* executor_ = nullptr; + mediapipe::Executor* executor_ = nullptr; // An object to own the executor if it needs to be deleted. - std::unique_ptr<::mediapipe::Executor> executor_owner_; + std::unique_ptr executor_owner_; // The base level packets available after initialization. std::map base_packets_; diff --git a/mediapipe/framework/packet_generator_test.cc b/mediapipe/framework/packet_generator_test.cc index f56e892d3..05197bc9e 100644 --- a/mediapipe/framework/packet_generator_test.cc +++ b/mediapipe/framework/packet_generator_test.cc @@ -27,7 +27,7 @@ namespace mediapipe { namespace { class DoNothingGenerator : public PacketGenerator { public: - static ::mediapipe::Status FillExpectations( + static mediapipe::Status FillExpectations( const PacketGeneratorOptions& extendable_options, PacketTypeSet* input_side_packets, PacketTypeSet* output_side_packets) { for (CollectionItemId id = input_side_packets->BeginId(); @@ -38,17 +38,17 @@ class DoNothingGenerator : public PacketGenerator { id < output_side_packets->EndId(); ++id) { output_side_packets->Get(id).Set(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - static ::mediapipe::Status Generate( + static mediapipe::Status Generate( const PacketGeneratorOptions& extendable_options, const PacketSet& input_side_packets, PacketSet* output_side_packets) { for (CollectionItemId id = output_side_packets->BeginId(); id < output_side_packets->EndId(); ++id) { output_side_packets->Get(id) = MakePacket(true); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; diff --git a/mediapipe/framework/packet_registration_test.cc b/mediapipe/framework/packet_registration_test.cc index 25acc264c..0860adb1d 100644 --- a/mediapipe/framework/packet_registration_test.cc +++ b/mediapipe/framework/packet_registration_test.cc @@ -28,20 +28,20 @@ namespace test_ns { class TestSinkCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Tag("IN").Set(); cc->Outputs().Tag("OUT").Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { int x = cc->Inputs().Tag("IN").Get().x(); cc->Outputs().Tag("OUT").AddPacket( MakePacket(x).At(cc->InputTimestamp())); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; -REGISTER_CALCULATOR(::mediapipe::test_ns::TestSinkCalculator); +REGISTER_CALCULATOR(TestSinkCalculator); } // namespace test_ns diff --git a/mediapipe/framework/packet_test.cc b/mediapipe/framework/packet_test.cc index d63578668..db863248d 100644 --- a/mediapipe/framework/packet_test.cc +++ b/mediapipe/framework/packet_test.cc @@ -145,7 +145,7 @@ struct UnregisteredPairStruct { std::string first; bool second; }; -MEDIAPIPE_REGISTER_TYPE(::mediapipe::RegisteredPairStruct, +MEDIAPIPE_REGISTER_TYPE(mediapipe::RegisteredPairStruct, "::mediapipe::RegisteredPairStruct", nullptr, nullptr); MEDIAPIPE_REGISTER_TYPE(int, "int", nullptr, nullptr); MEDIAPIPE_REGISTER_TYPE(float, "float", nullptr, nullptr); @@ -210,8 +210,8 @@ TEST(PacketTest, ValidateAsProtoMessageLite) { Packet packet = Adopt(proto_ptr.release()); MP_EXPECT_OK(packet.ValidateAsProtoMessageLite()); Packet packet2 = MakePacket(3); - ::mediapipe::Status status = packet2.ValidateAsProtoMessageLite(); - EXPECT_EQ(status.code(), ::mediapipe::StatusCode::kInvalidArgument); + mediapipe::Status status = packet2.ValidateAsProtoMessageLite(); + EXPECT_EQ(status.code(), mediapipe::StatusCode::kInvalidArgument); } TEST(PacketTest, SyncedPacket) { @@ -283,11 +283,11 @@ TEST(PacketTest, TestPacketMoveConstructor) { TEST(PacketTest, TestPacketConsume) { Packet packet1 = MakePacket(33); Packet packet_copy = packet1; - ::mediapipe::StatusOr> result1 = + mediapipe::StatusOr> result1 = packet_copy.Consume(); // Both packet1 and packet_copy own the data, Consume() should return error. - ::mediapipe::Status status1 = result1.status(); - EXPECT_EQ(status1.code(), ::mediapipe::StatusCode::kFailedPrecondition); + mediapipe::Status status1 = result1.status(); + EXPECT_EQ(status1.code(), mediapipe::StatusCode::kFailedPrecondition); EXPECT_THAT(status1.message(), testing::HasSubstr("isn't the sole owner of the holder")); ASSERT_FALSE(packet1.IsEmpty()); @@ -297,7 +297,7 @@ TEST(PacketTest, TestPacketConsume) { Packet packet2 = MakePacket(33); // Types don't match (int vs float). - ::mediapipe::StatusOr> result2 = + mediapipe::StatusOr> result2 = packet2.Consume(); EXPECT_THAT( result2.status().message(), @@ -307,7 +307,7 @@ TEST(PacketTest, TestPacketConsume) { // packet3 is the sole owner of the data. Packet packet3 = MakePacket(42); - ::mediapipe::StatusOr> result3 = packet3.Consume(); + mediapipe::StatusOr> result3 = packet3.Consume(); // After Consume(), packet3 should be empty and result3 owns the data. EXPECT_TRUE(result3.ok()); ASSERT_NE(nullptr, result3.ValueOrDie()); @@ -319,7 +319,7 @@ TEST(PacketTest, TestPacketConsumeOrCopy) { Packet packet1 = MakePacket(33); Packet packet_copy = packet1; bool was_copied1 = false; - ::mediapipe::StatusOr> result1 = + mediapipe::StatusOr> result1 = packet_copy.ConsumeOrCopy(&was_copied1); // Both packet1 and packet_copy own the data, ConsumeOrCopy() returns a copy // of the data and sets packet_copy to empty. @@ -334,7 +334,7 @@ TEST(PacketTest, TestPacketConsumeOrCopy) { Packet packet2 = MakePacket(33); // Types don't match (int vs float). - ::mediapipe::StatusOr> result2 = + mediapipe::StatusOr> result2 = packet2.ConsumeOrCopy(); EXPECT_THAT( result2.status().message(), @@ -346,7 +346,7 @@ TEST(PacketTest, TestPacketConsumeOrCopy) { bool was_copied3 = false; // packet3 is the sole owner of the data. ConsumeOrCopy() transfers the // ownership to result3 and makes packet3 empty. - ::mediapipe::StatusOr> result3 = + mediapipe::StatusOr> result3 = packet3.ConsumeOrCopy(&was_copied3); EXPECT_FALSE(was_copied3); EXPECT_TRUE(result3.ok()); @@ -358,9 +358,9 @@ TEST(PacketTest, TestPacketConsumeOrCopy) { TEST(PacketTest, TestConsumeForeignHolder) { std::unique_ptr data(new int(33)); Packet packet = PointToForeign(data.get()); - ::mediapipe::StatusOr> result = packet.Consume(); + mediapipe::StatusOr> result = packet.Consume(); EXPECT_FALSE(result.ok()); - EXPECT_EQ(result.status().code(), ::mediapipe::StatusCode::kInternal); + EXPECT_EQ(result.status().code(), mediapipe::StatusCode::kInternal); EXPECT_EQ(result.status().message(), "Foreign holder can't release data ptr without ownership."); ASSERT_FALSE(packet.IsEmpty()); @@ -372,7 +372,7 @@ TEST(PacketTest, TestForeignHolderConsumeOrCopy) { Packet packet1 = PointToForeign(data1.get()); Packet packet_copy = packet1; bool was_copied1 = false; - ::mediapipe::StatusOr> result1 = + mediapipe::StatusOr> result1 = packet_copy.ConsumeOrCopy(&was_copied1); // After ConsumeOrCopy(), result1 gets the copy of packet_copy's data and // packet_copy is set to empty. @@ -388,7 +388,7 @@ TEST(PacketTest, TestForeignHolderConsumeOrCopy) { std::unique_ptr data2(new int(33)); Packet packet2 = PointToForeign(data2.get()); bool was_copied2 = false; - ::mediapipe::StatusOr> result2 = + mediapipe::StatusOr> result2 = packet2.ConsumeOrCopy(&was_copied2); // After ConsumeOrCopy(), result2 gets the copy of packet2's data and packet2 // is set to empty. @@ -402,11 +402,11 @@ TEST(PacketTest, TestForeignHolderConsumeOrCopy) { TEST(PacketTest, TestConsumeBoundedArray) { Packet packet1 = MakePacket(10, 20, 30); Packet packet_copy = packet1; - ::mediapipe::StatusOr> result1 = + mediapipe::StatusOr> result1 = packet_copy.Consume(); // Both packet1 and packet_copy own the data, Consume() should return error. - ::mediapipe::Status status1 = result1.status(); - EXPECT_EQ(status1.code(), ::mediapipe::StatusCode::kFailedPrecondition); + mediapipe::Status status1 = result1.status(); + EXPECT_EQ(status1.code(), mediapipe::StatusCode::kFailedPrecondition); EXPECT_THAT(status1.message(), testing::HasSubstr("isn't the sole owner of the holder")); ASSERT_FALSE(packet1.IsEmpty()); @@ -422,7 +422,7 @@ TEST(PacketTest, TestConsumeBoundedArray) { Packet packet2 = MakePacket(40, 50, 60); // After Consume(), packet2 should be empty and result2 owns the data. - ::mediapipe::StatusOr> result2 = + mediapipe::StatusOr> result2 = packet2.Consume(); ASSERT_NE(nullptr, result2.ValueOrDie()); auto value3 = result2.ValueOrDie().get(); @@ -436,7 +436,7 @@ TEST(PacketTest, TestConsumeOrCopyBoundedArray) { Packet packet1 = MakePacket(10, 20, 30); Packet packet_copy = packet1; bool was_copied1 = false; - ::mediapipe::StatusOr> result1 = + mediapipe::StatusOr> result1 = packet_copy.ConsumeOrCopy(&was_copied1); // Both packet1 and packet_copy own the data, ConsumeOrCopy() returns a copy // of the data and sets packet_copy to empty. @@ -459,7 +459,7 @@ TEST(PacketTest, TestConsumeOrCopyBoundedArray) { bool was_copied2 = false; // packet2 is the sole owner of the data. ConsumeOrCopy() transfers the // ownership to result2 and makes packet2 empty. - ::mediapipe::StatusOr> result2 = + mediapipe::StatusOr> result2 = packet2.ConsumeOrCopy(&was_copied2); EXPECT_TRUE(result2.ok()); EXPECT_FALSE(was_copied2); diff --git a/mediapipe/framework/packet_type.cc b/mediapipe/framework/packet_type.cc index 83b3b8b49..ecf9f8041 100644 --- a/mediapipe/framework/packet_type.cc +++ b/mediapipe/framework/packet_type.cc @@ -125,9 +125,9 @@ const std::string PacketType::DebugTypeName() const { return type_name_; } -::mediapipe::Status PacketType::Validate(const Packet& packet) const { +mediapipe::Status PacketType::Validate(const Packet& packet) const { if (!initialized_) { - return ::mediapipe::InvalidArgumentError( + return mediapipe::InvalidArgumentError( "Uninitialized PacketType was used for validation."); } if (same_as_) { @@ -136,7 +136,7 @@ const std::string PacketType::DebugTypeName() const { return GetSameAs()->Validate(packet); } if (no_packets_allowed_) { - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "No packets are allowed for type: " << type_name_; } if (validate_method_ != nullptr) { @@ -144,10 +144,10 @@ const std::string PacketType::DebugTypeName() const { } // The PacketType is the Any Type. if (packet.IsEmpty()) { - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "Empty packets are not allowed for type: " << type_name_; } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } bool PacketType::IsConsistentWith(const PacketType& other) const { @@ -176,8 +176,7 @@ bool PacketType::IsConsistentWith(const PacketType& other) const { return type1->validate_method_ == type2->validate_method_; } -::mediapipe::Status ValidatePacketTypeSet( - const PacketTypeSet& packet_type_set) { +mediapipe::Status ValidatePacketTypeSet(const PacketTypeSet& packet_type_set) { std::vector errors; if (packet_type_set.GetErrorHandler().HasError()) { errors = packet_type_set.GetErrorHandler().ErrorMessages(); @@ -191,30 +190,30 @@ bool PacketType::IsConsistentWith(const PacketType& other) const { } } if (!errors.empty()) { - return ::mediapipe::InvalidArgumentError(absl::StrCat( + return mediapipe::InvalidArgumentError(absl::StrCat( "ValidatePacketTypeSet failed:\n", absl::StrJoin(errors, "\n"))); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ValidatePacketSet(const PacketTypeSet& packet_type_set, - const PacketSet& packet_set) { - std::vector<::mediapipe::Status> errors; +mediapipe::Status ValidatePacketSet(const PacketTypeSet& packet_type_set, + const PacketSet& packet_set) { + std::vector errors; if (!packet_type_set.TagMap()->SameAs(*packet_set.TagMap())) { - return ::mediapipe::InvalidArgumentError(absl::StrCat( + return mediapipe::InvalidArgumentError(absl::StrCat( "TagMaps do not match. PacketTypeSet TagMap:\n", packet_type_set.TagMap()->DebugString(), "\n\nPacketSet TagMap:\n", packet_set.TagMap()->DebugString())); } for (CollectionItemId id = packet_type_set.BeginId(); id < packet_type_set.EndId(); ++id) { - ::mediapipe::Status status = + mediapipe::Status status = packet_type_set.Get(id).Validate(packet_set.Get(id)); if (!status.ok()) { std::pair tag_index = packet_type_set.TagAndIndexFromId(id); errors.push_back( - ::mediapipe::StatusBuilder(status, MEDIAPIPE_LOC).SetPrepend() + mediapipe::StatusBuilder(status, MEDIAPIPE_LOC).SetPrepend() << "Packet \"" << packet_type_set.TagMap()->Names()[id.value()] << "\" with tag \"" << tag_index.first << "\" and index " << tag_index.second << " failed validation. "); @@ -223,7 +222,7 @@ bool PacketType::IsConsistentWith(const PacketType& other) const { if (!errors.empty()) { return tool::CombinedStatus("ValidatePacketSet failed:", errors); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/framework/packet_type.h b/mediapipe/framework/packet_type.h index 003e94ffd..1a3ee81e1 100644 --- a/mediapipe/framework/packet_type.h +++ b/mediapipe/framework/packet_type.h @@ -85,7 +85,7 @@ class PacketType { bool IsConsistentWith(const PacketType& other) const; // Returns OK if the packet contains an object of the appropriate type. - ::mediapipe::Status Validate(const Packet& packet) const; + mediapipe::Status Validate(const Packet& packet) const; // Returns a pointer to the Registered type name, or nullptr if the type // is not registered. Do not use this for validation, use Validate() @@ -98,7 +98,7 @@ class PacketType { private: // Typedef for the ValidateAsType() method in Packet that is used for // type validation and identification. - typedef ::mediapipe::Status (Packet::*ValidateMethodType)() const; + typedef mediapipe::Status (Packet::*ValidateMethodType)() const; // Records whether the packet type was set in any way. bool initialized_; @@ -213,15 +213,15 @@ using PacketTypeSet = // Returns OK if the packets in the PacketSet are of the appropriate type. // packet_type_set must be valid before this is called (but packet_set // may be in any state). -::mediapipe::Status ValidatePacketSet(const PacketTypeSet& packet_type_set, - const PacketSet& packet_set); +mediapipe::Status ValidatePacketSet(const PacketTypeSet& packet_type_set, + const PacketSet& packet_set); // Validates that the PacketTypeSet was initialized properly. // An error is returned if // 1) Tag() or Index() is called with an invalid argument (however, // a valid PacketType is still returned by the function). // 2) Any PacketType is not initialized. -::mediapipe::Status ValidatePacketTypeSet(const PacketTypeSet& packet_type_set); +mediapipe::Status ValidatePacketTypeSet(const PacketTypeSet& packet_type_set); // Templated function definitions. diff --git a/mediapipe/framework/platform_specific_profiling.h b/mediapipe/framework/platform_specific_profiling.h index 5b16f3c3e..cbdc653e0 100644 --- a/mediapipe/framework/platform_specific_profiling.h +++ b/mediapipe/framework/platform_specific_profiling.h @@ -81,16 +81,16 @@ class PlatformSpecificProfilingScope { } // namespace mediapipe // General profiling macro. -#define PLATFORM_SPECIFIC_PROFILER(name, id, packet_timestamp) \ - ::mediapipe::PlatformSpecificProfilingScope platform_specific_scope( \ +#define PLATFORM_SPECIFIC_PROFILER(name, id, packet_timestamp) \ + mediapipe::PlatformSpecificProfilingScope platform_specific_scope( \ name, id, packet_timestamp); // Automated profiling macro. // Filters out all methods except Calculator::Process(). -#define PLATFORM_SPECIFIC_PROCESS_PROFILER(name, id, method_name, \ - packet_timestamp) \ - ::mediapipe::PlatformSpecificProfilingScope platform_specific_scope( \ - name, id, packet_timestamp, ::mediapipe::TraceEvent::method_name); +#define PLATFORM_SPECIFIC_PROCESS_PROFILER(name, id, method_name, \ + packet_timestamp) \ + mediapipe::PlatformSpecificProfilingScope platform_specific_scope( \ + name, id, packet_timestamp, mediapipe::TraceEvent::method_name); #else #define PLATFORM_SPECIFIC_PROFILER(name, id, packet_timestamp) diff --git a/mediapipe/framework/profiler/circular_buffer_test.cc b/mediapipe/framework/profiler/circular_buffer_test.cc index 165d7974a..37456f518 100644 --- a/mediapipe/framework/profiler/circular_buffer_test.cc +++ b/mediapipe/framework/profiler/circular_buffer_test.cc @@ -53,7 +53,7 @@ TEST_F(CircularBufferTest, ParallelWriteAndRead) { std::atomic_int read_sum(0); std::atomic_int read_count(0); { - ::mediapipe::ThreadPool pool(12); + mediapipe::ThreadPool pool(12); pool.StartWorkers(); // Start 6 writers. diff --git a/mediapipe/framework/profiler/graph_profiler.cc b/mediapipe/framework/profiler/graph_profiler.cc index 55a7b70b1..10a2e742e 100644 --- a/mediapipe/framework/profiler/graph_profiler.cc +++ b/mediapipe/framework/profiler/graph_profiler.cc @@ -199,7 +199,7 @@ void GraphProfiler::Reset() { } // Begins profiling for a single graph run. -::mediapipe::Status GraphProfiler::Start(::mediapipe::Executor* executor) { +mediapipe::Status GraphProfiler::Start(mediapipe::Executor* executor) { // If specified, start periodic profile output while the graph runs. Resume(); if (is_tracing_ && IsTraceIntervalEnabled(profiler_config_, tracer()) && @@ -220,18 +220,18 @@ void GraphProfiler::Reset() { } }); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Ends profiling for a single graph run. -::mediapipe::Status GraphProfiler::Stop() { +mediapipe::Status GraphProfiler::Stop() { is_running_ = false; Pause(); // If specified, write a final profile. if (IsTraceLogEnabled(profiler_config_)) { MP_RETURN_IF_ERROR(WriteProfile()); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } void GraphProfiler::LogEvent(const TraceEvent& event) { @@ -281,7 +281,7 @@ void GraphProfiler::AddPacketInfo(const TraceEvent& packet_info) { production_time_usec, production_time_usec); } -::mediapipe::Status GraphProfiler::GetCalculatorProfiles( +mediapipe::Status GraphProfiler::GetCalculatorProfiles( std::vector* profiles) const { absl::ReaderMutexLock lock(&profiler_mutex_); RET_CHECK(is_initialized_) @@ -289,7 +289,7 @@ void GraphProfiler::AddPacketInfo(const TraceEvent& packet_info) { for (auto& entry : calculator_profiles_) { profiles->push_back(entry.second); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } void GraphProfiler::InitializeTimeHistogram(int64 interval_size_usec, @@ -566,9 +566,9 @@ void AssignNodeNames(GraphProfile* profile) { } } -::mediapipe::StatusOr GraphProfiler::GetTraceLogPath() { +mediapipe::StatusOr GraphProfiler::GetTraceLogPath() { if (!IsTraceLogEnabled(profiler_config_)) { - return ::mediapipe::InternalError( + return mediapipe::InternalError( "Trace log writing is disabled, unable to get trace_log_path."); } if (profiler_config_.trace_log_path().empty()) { @@ -581,10 +581,10 @@ void AssignNodeNames(GraphProfile* profile) { } } -::mediapipe::Status GraphProfiler::WriteProfile() { +mediapipe::Status GraphProfiler::WriteProfile() { if (profiler_config_.trace_log_disabled()) { // Logging is disabled, so we can exit writing without error. - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } ASSIGN_OR_RETURN(std::string trace_log_path, GetTraceLogPath()); int log_interval_count = GetLogIntervalCount(profiler_config_); @@ -606,7 +606,7 @@ void AssignNodeNames(GraphProfile* profile) { previous_log_end_time_ = end_time; // If there are no trace events, skip log writing. if (is_tracing_ && trace->calculator_trace().empty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Record the latest CalculatorProfiles. diff --git a/mediapipe/framework/profiler/graph_profiler.h b/mediapipe/framework/profiler/graph_profiler.h index 48b486593..94fc9fcc1 100644 --- a/mediapipe/framework/profiler/graph_profiler.h +++ b/mediapipe/framework/profiler/graph_profiler.h @@ -140,9 +140,9 @@ class GraphProfiler : public std::enable_shared_from_this { // Process() and does NOT affect information for Open() and Close() methods. void Reset() ABSL_LOCKS_EXCLUDED(profiler_mutex_); // Begins profiling for a single graph run. - ::mediapipe::Status Start(::mediapipe::Executor* executor); + mediapipe::Status Start(mediapipe::Executor* executor); // Ends profiling for a single graph run. - ::mediapipe::Status Stop(); + mediapipe::Status Stop(); // Record a tracing event. void LogEvent(const TraceEvent& event); @@ -150,12 +150,12 @@ class GraphProfiler : public std::enable_shared_from_this { // Collects the runtime profile for Open(), Process(), and Close() of each // calculator in the graph. May be called at any time after the graph has been // initialized. - ::mediapipe::Status GetCalculatorProfiles(std::vector*) - const ABSL_LOCKS_EXCLUDED(profiler_mutex_); + mediapipe::Status GetCalculatorProfiles(std::vector*) const + ABSL_LOCKS_EXCLUDED(profiler_mutex_); // Writes recent profiling and tracing data to a file specified in the // ProfilerConfig. Includes events since the previous call to WriteProfile. - ::mediapipe::Status WriteProfile(); + mediapipe::Status WriteProfile(); // Returns the trace event buffer. GraphTracer* tracer() { return packet_tracer_.get(); } @@ -294,7 +294,7 @@ class GraphProfiler : public std::enable_shared_from_this { // Helper method to get trace_log_path. If the trace_log_path is empty and // tracing is enabled, this function returns a default platform dependent // trace_log_path. - ::mediapipe::StatusOr GetTraceLogPath(); + mediapipe::StatusOr GetTraceLogPath(); // Helper method to get the clock time in microsecond. int64 TimeNowUsec() { return ToUnixMicros(clock_->TimeNow()); } diff --git a/mediapipe/framework/profiler/graph_profiler_stub.h b/mediapipe/framework/profiler/graph_profiler_stub.h index a21ca0ed2..16a12abf0 100644 --- a/mediapipe/framework/profiler/graph_profiler_stub.h +++ b/mediapipe/framework/profiler/graph_profiler_stub.h @@ -81,17 +81,17 @@ class GraphProfilerStub { inline void Initialize(const ValidatedGraphConfig& validated_graph_config) {} inline void SetClock(const std::shared_ptr& clock) {} inline void LogEvent(const TraceEvent& event) {} - inline ::mediapipe::Status GetCalculatorProfiles( + inline mediapipe::Status GetCalculatorProfiles( std::vector*) const { return mediapipe::OkStatus(); } inline void Pause() {} inline void Resume() {} inline void Reset() {} - inline ::mediapipe::Status Start(::mediapipe::Executor* executor) { + inline mediapipe::Status Start(mediapipe::Executor* executor) { return mediapipe::OkStatus(); } - inline ::mediapipe::Status Stop() { return mediapipe::OkStatus(); } + inline mediapipe::Status Stop() { return mediapipe::OkStatus(); } inline GraphTracer* tracer() { return nullptr; } inline std::unique_ptr CreateGlProfilingHelper() { return nullptr; diff --git a/mediapipe/framework/profiler/graph_profiler_test.cc b/mediapipe/framework/profiler/graph_profiler_test.cc index b5eb953ff..d06b0eb6c 100644 --- a/mediapipe/framework/profiler/graph_profiler_test.cc +++ b/mediapipe/framework/profiler/graph_profiler_test.cc @@ -1176,7 +1176,7 @@ TEST(GraphProfilerTest, ParallelReads) { MP_ASSERT_OK(graph.ObserveOutputStream("out_1", [&](const Packet& packet) { absl::MutexLock lock(&out_1_mutex); out_1_packets.push_back(packet); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); MP_EXPECT_OK(graph.StartRun( {{"range_step", MakePacket>(1000, 1)}})); diff --git a/mediapipe/framework/profiler/graph_tracer_test.cc b/mediapipe/framework/profiler/graph_tracer_test.cc index 13b5522bb..1d91b0ab1 100644 --- a/mediapipe/framework/profiler/graph_tracer_test.cc +++ b/mediapipe/framework/profiler/graph_tracer_test.cc @@ -121,7 +121,7 @@ TEST_F(GraphTracerTest, EmptyTrace) { // Validate the GraphTrace data. EXPECT_THAT(GetTrace(), - EqualsProto(::mediapipe::ParseTextProtoOrDie(R"( + EqualsProto(mediapipe::ParseTextProtoOrDie(R"( base_time: 0 base_timestamp: 0 stream_name: "" @@ -144,7 +144,7 @@ TEST_F(GraphTracerTest, CalculatorTrace) { // Validate the GraphTrace data. EXPECT_THAT( - GetTrace(), EqualsProto(::mediapipe::ParseTextProtoOrDie(R"( + GetTrace(), EqualsProto(mediapipe::ParseTextProtoOrDie(R"( base_time: 1608911100000000 base_timestamp: 1608911100000000 stream_name: "" @@ -225,7 +225,7 @@ TEST_F(GraphTracerTest, GraphTrace) { // Validate the GraphTrace data. EXPECT_THAT( - GetTrace(), EqualsProto(::mediapipe::ParseTextProtoOrDie(R"( + GetTrace(), EqualsProto(mediapipe::ParseTextProtoOrDie(R"( base_time: 1608911100000000 base_timestamp: 1608911100000000 stream_name: "" @@ -425,7 +425,7 @@ class GraphTracerE2ETest : public ::testing::Test { MP_ASSERT_OK(graph_.SetExecutor("", executor)); } - void SetUpRealClock() { clock_ = ::mediapipe::Clock::RealClock(); } + void SetUpRealClock() { clock_ = mediapipe::Clock::RealClock(); } static Packet PacketAt(int64 ts) { return Adopt(new int64(999)).At(Timestamp(ts)); @@ -475,19 +475,19 @@ class GraphTracerE2ETest : public ::testing::Test { } // A Calculator::Process callback function. - typedef std::function<::mediapipe::Status(const InputStreamShardSet&, - OutputStreamShardSet*)> + typedef std::function ProcessFunction; // A testing callback function that passes through all packets. - ::mediapipe::Status PassThrough(const InputStreamShardSet& inputs, - OutputStreamShardSet* outputs) { + mediapipe::Status PassThrough(const InputStreamShardSet& inputs, + OutputStreamShardSet* outputs) { for (int i = 0; i < inputs.NumEntries(); ++i) { if (!inputs.Index(i).Value().IsEmpty()) { outputs->Index(i).AddPacket(inputs.Index(i).Value()); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } void RunPassThroughGraph() { @@ -511,7 +511,7 @@ class GraphTracerE2ETest : public ::testing::Test { MP_ASSERT_OK( graph_.ObserveOutputStream("output_0", [&](const Packet& packet) { out_packets.push_back(packet); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); simulation_clock_->ThreadStart(); MP_ASSERT_OK(graph_.StartRun({})); @@ -557,7 +557,7 @@ class GraphTracerE2ETest : public ::testing::Test { clock_->Sleep(absl::Microseconds(packets.front().first)); outputs->Index(0).AddPacket(packets.front().second); packets.erase(packets.begin()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } return tool::StatusStop(); }; @@ -580,7 +580,7 @@ class GraphTracerE2ETest : public ::testing::Test { MP_ASSERT_OK(graph_.ObserveOutputStream("output_packets_0", [&](const Packet& packet) { out_packets.push_back(packet); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); simulation_clock_->ThreadStart(); MP_ASSERT_OK(graph_.StartRun({})); @@ -597,7 +597,7 @@ class GraphTracerE2ETest : public ::testing::Test { CalculatorGraphConfig graph_config_; CalculatorGraph graph_; - ::mediapipe::Clock* clock_; + mediapipe::Clock* clock_; std::shared_ptr simulation_clock_; }; @@ -628,7 +628,7 @@ TEST_F(GraphTracerE2ETest, PassThroughGraphProfile) { MP_EXPECT_OK(graph_.profiler()->GetCalculatorProfiles(&profiles)); EXPECT_EQ(1, profiles.size()); CalculatorProfile expected = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( name: "LambdaCalculator" open_runtime: 0 close_runtime: 0 @@ -658,7 +658,7 @@ TEST_F(GraphTracerE2ETest, DemuxGraphLog) { absl::InfiniteFuture(), &trace); GraphTrace node_timestamps = NodeTimestamps(trace); EXPECT_THAT(node_timestamps, - EqualsProto(::mediapipe::ParseTextProtoOrDie(R"( + EqualsProto(mediapipe::ParseTextProtoOrDie(R"( calculator_trace { node_id: 1 input_timestamp: 10000 } calculator_trace { node_id: 2 input_timestamp: 10000 } calculator_trace { node_id: 3 input_timestamp: 10000 } @@ -801,7 +801,9 @@ TEST_F(GraphTracerE2ETest, DemuxGraphLog) { calculator_trace { node_id: 5 input_timestamp: 10000 } calculator_trace { node_id: 5 input_timestamp: 10000 } calculator_trace { node_id: 1 input_timestamp: 50001 } - calculator_trace { node_id: 1 input_timestamp: 10000 })"))); + calculator_trace { node_id: 1 input_timestamp: 10000 } + calculator_trace { node_id: 1 input_timestamp: 10000 } + )"))); // Validate a one-timestamp slice of the event trace. GraphTrace trace_2; @@ -812,7 +814,7 @@ TEST_F(GraphTracerE2ETest, DemuxGraphLog) { StripDataIds(&trace_2); EXPECT_THAT( trace_2, - EqualsProto(::mediapipe::ParseTextProtoOrDie( + EqualsProto(mediapipe::ParseTextProtoOrDie( R"( base_time: 1544086800000000 base_timestamp: 10000 @@ -986,14 +988,14 @@ TEST_F(GraphTracerE2ETest, DemuxGraphLog) { } // Read a GraphProfile from a file path. -::mediapipe::Status ReadGraphProfile(const std::string& path, - GraphProfile* profile) { +mediapipe::Status ReadGraphProfile(const std::string& path, + GraphProfile* profile) { std::ifstream ifs; ifs.open(path); proto_ns::io::IstreamInputStream in_stream(&ifs); profile->ParseFromZeroCopyStream(&in_stream); - return ifs.is_open() ? ::mediapipe::OkStatus() - : ::mediapipe::UnavailableError("Cannot open"); + return ifs.is_open() ? mediapipe::OkStatus() + : mediapipe::UnavailableError("Cannot open"); } TEST_F(GraphTracerE2ETest, DemuxGraphLogFile) { @@ -1005,7 +1007,7 @@ TEST_F(GraphTracerE2ETest, DemuxGraphLogFile) { GraphProfile profile; MP_EXPECT_OK( ReadGraphProfile(absl::StrCat(log_path, 0, ".binarypb"), &profile)); - EXPECT_EQ(111, profile.graph_trace(0).calculator_trace().size()); + EXPECT_EQ(112, profile.graph_trace(0).calculator_trace().size()); } TEST_F(GraphTracerE2ETest, DemuxGraphLogFiles) { @@ -1034,7 +1036,7 @@ TEST_F(GraphTracerE2ETest, DemuxGraphLogFiles) { // The expected counts of calculator_trace records in each of the log files. // The processing spans three 12.5ms log files, because // RunDemuxInFlightGraph adds packets over 30ms. - std::vector expected = {49, 64, 11}; + std::vector expected = {49, 64, 12}; EXPECT_EQ(event_counts, expected); GraphProfile& profile_2 = graph_profiles[2]; profile_2.clear_calculator_profiles(); @@ -1044,7 +1046,7 @@ TEST_F(GraphTracerE2ETest, DemuxGraphLogFiles) { StripDataIds(&trace); } EXPECT_THAT(profile_2, - EqualsProto(::mediapipe::ParseTextProtoOrDie(R"( + EqualsProto(mediapipe::ParseTextProtoOrDie(R"( graph_trace { base_time: 1544086800000000 base_timestamp: 0 @@ -1145,6 +1147,11 @@ TEST_F(GraphTracerE2ETest, DemuxGraphLogFiles) { stream_id: 7 } } + calculator_trace { + node_id: 1 + event_type: READY_FOR_PROCESS + start_time: 70004 + } calculator_trace { node_id: 1 event_type: READY_FOR_CLOSE @@ -1286,7 +1293,7 @@ TEST_F(GraphTracerE2ETest, GpuTaskTrace) { &trace_1); EXPECT_THAT( trace_1, - EqualsProto(::mediapipe::ParseTextProtoOrDie( + EqualsProto(mediapipe::ParseTextProtoOrDie( R"( base_time: 1100 base_timestamp: 1000 @@ -1323,7 +1330,7 @@ TEST_F(GraphTracerE2ETest, GpuTaskTrace) { &trace_2); EXPECT_THAT( trace_2, - EqualsProto(::mediapipe::ParseTextProtoOrDie( + EqualsProto(mediapipe::ParseTextProtoOrDie( R"( base_time: 1100 base_timestamp: 1000 diff --git a/mediapipe/framework/profiler/profiler_resource_util_android.cc b/mediapipe/framework/profiler/profiler_resource_util_android.cc index b092a01ac..0bdf51db7 100644 --- a/mediapipe/framework/profiler/profiler_resource_util_android.cc +++ b/mediapipe/framework/profiler/profiler_resource_util_android.cc @@ -27,13 +27,13 @@ StatusOr GetDefaultTraceLogDirectory() { StatusOr* result = new StatusOr(); bool has_jvm = java::HasJavaVM(); if (!has_jvm) { - *result = ::mediapipe::InternalError("JavaVM not available."); + *result = mediapipe::InternalError("JavaVM not available."); return result; } JNIEnv* env = java::GetJNIEnv(); if (!env) { - *result = ::mediapipe::InternalError("JNIEnv not available."); + *result = mediapipe::InternalError("JNIEnv not available."); return result; } diff --git a/mediapipe/framework/profiler/profiler_resource_util_common.cc b/mediapipe/framework/profiler/profiler_resource_util_common.cc index f4da002a7..bd205f8b7 100644 --- a/mediapipe/framework/profiler/profiler_resource_util_common.cc +++ b/mediapipe/framework/profiler/profiler_resource_util_common.cc @@ -33,7 +33,7 @@ ABSL_FLAG(std::string, log_root_dir, "", #ifdef __ANDROID__ namespace mediapipe { namespace file { -::mediapipe::Status RecursivelyCreateDir(absl::string_view path) { +mediapipe::Status RecursivelyCreateDir(absl::string_view path) { return RecursivelyCreateDir(path, file::Options()); } } // namespace file @@ -42,18 +42,18 @@ namespace file { namespace mediapipe { -::mediapipe::StatusOr GetLogDirectory() { +mediapipe::StatusOr GetLogDirectory() { if (!FLAGS_log_root_dir.CurrentValue().empty()) { return FLAGS_log_root_dir.CurrentValue(); } return GetDefaultTraceLogDirectory(); } -::mediapipe::StatusOr PathToLogFile(const std::string& path) { +mediapipe::StatusOr PathToLogFile(const std::string& path) { ASSIGN_OR_RETURN(std::string log_dir, GetLogDirectory()); std::string result = file::JoinPath(log_dir, path); MP_RETURN_IF_ERROR( - ::mediapipe::file::RecursivelyCreateDir(file::Dirname(result))); + mediapipe::file::RecursivelyCreateDir(file::Dirname(result))); return result; } diff --git a/mediapipe/framework/profiler/profiler_resource_util_ios.cc b/mediapipe/framework/profiler/profiler_resource_util_ios.cc index 1ce19ff99..b0f72f9db 100644 --- a/mediapipe/framework/profiler/profiler_resource_util_ios.cc +++ b/mediapipe/framework/profiler/profiler_resource_util_ios.cc @@ -37,8 +37,7 @@ StatusOr GetDefaultTraceLogDirectory() { error:&error]; if (!success) { // TODO: Use NSError+util_status to get status from NSError. - return ::mediapipe::InternalError( - [[error localizedDescription] UTF8String]); + return mediapipe::InternalError([[error localizedDescription] UTF8String]); } std::string trace_log_directory = [ns_documents_directory UTF8String]; diff --git a/mediapipe/framework/profiler/reporter/print_profile.cc b/mediapipe/framework/profiler/reporter/print_profile.cc index 86144df7b..a15403872 100644 --- a/mediapipe/framework/profiler/reporter/print_profile.cc +++ b/mediapipe/framework/profiler/reporter/print_profile.cc @@ -37,7 +37,7 @@ ABSL_FLAG(std::vector, cols, {"*"}, ABSL_FLAG(bool, compact, false, "if true, then don't print unnecessary whitespace."); -using ::mediapipe::reporter::Reporter; +using mediapipe::reporter::Reporter; // The command line utility to mine trace files of useful statistics to // determine bottlenecks and performance of a graph. diff --git a/mediapipe/framework/profiler/reporter/reporter.cc b/mediapipe/framework/profiler/reporter/reporter.cc index e9f5db6a0..8a754ddc5 100644 --- a/mediapipe/framework/profiler/reporter/reporter.cc +++ b/mediapipe/framework/profiler/reporter/reporter.cc @@ -297,7 +297,7 @@ void Reporter::Accumulate(const mediapipe::GraphProfile& profile) { } } -::mediapipe::Status Reporter::set_columns( +mediapipe::Status Reporter::set_columns( const std::vector& columns) { bool error = false; std::stringstream warnings; @@ -337,9 +337,9 @@ void Reporter::Accumulate(const mediapipe::GraphProfile& profile) { columns_.swap(new_columns); } if (!error) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - return ::mediapipe::InvalidArgumentError(warnings.str()); + return mediapipe::InvalidArgumentError(warnings.str()); } class ReportImpl : public Report { diff --git a/mediapipe/framework/profiler/reporter/reporter.h b/mediapipe/framework/profiler/reporter/reporter.h index 8652fdbdb..07f50e227 100644 --- a/mediapipe/framework/profiler/reporter/reporter.h +++ b/mediapipe/framework/profiler/reporter/reporter.h @@ -106,7 +106,7 @@ class Reporter { // Accepts names of of columns or wildcard patterns (* or ?) to // select which statistics columns will be included in a generated // report. - ::mediapipe::Status set_columns(const std::vector& columns); + mediapipe::Status set_columns(const std::vector& columns); // Generates a report based on the current accumulated statistics. std::unique_ptr Report(); diff --git a/mediapipe/framework/profiler/reporter_test.cc b/mediapipe/framework/profiler/reporter_test.cc index 9bdb1557b..e5bc541a7 100644 --- a/mediapipe/framework/profiler/reporter_test.cc +++ b/mediapipe/framework/profiler/reporter_test.cc @@ -35,7 +35,7 @@ namespace mediapipe { -using ::mediapipe::reporter::Reporter; +using mediapipe::reporter::Reporter; using ::testing::ElementsAre; using ::testing::HasSubstr; using ::testing::IsSupersetOf; diff --git a/mediapipe/framework/profiler/sharded_map_test.cc b/mediapipe/framework/profiler/sharded_map_test.cc index a589ac42a..b9981a80b 100644 --- a/mediapipe/framework/profiler/sharded_map_test.cc +++ b/mediapipe/framework/profiler/sharded_map_test.cc @@ -79,7 +79,7 @@ void TestParallelAccess(Map& time_map, int num_threads) { int64 kNumWrites = 1000; int64 kNumReads = 10; - ::mediapipe::ThreadPool pool(num_threads); + mediapipe::ThreadPool pool(num_threads); pool.StartWorkers(); for (int i = 0; i < kNumTasks; ++i) { pool.Schedule([=, &time_map]() { diff --git a/mediapipe/framework/profiler/test_context_builder.h b/mediapipe/framework/profiler/test_context_builder.h index fdeb3d46f..986a1ad8f 100644 --- a/mediapipe/framework/profiler/test_context_builder.h +++ b/mediapipe/framework/profiler/test_context_builder.h @@ -91,7 +91,7 @@ class TestContextBuilder { OutputStreamSpec spec; spec.name = output_map_->Names()[id.value()]; spec.packet_type = packet_type; - spec.error_callback = [](const ::mediapipe::Status& status) { + spec.error_callback = [](const mediapipe::Status& status) { LOG(ERROR) << status; }; output_specs_[spec.name] = spec; diff --git a/mediapipe/framework/profiler/testing/simple_calculator.cc b/mediapipe/framework/profiler/testing/simple_calculator.cc index 8931f2379..2126b052d 100644 --- a/mediapipe/framework/profiler/testing/simple_calculator.cc +++ b/mediapipe/framework/profiler/testing/simple_calculator.cc @@ -19,15 +19,15 @@ namespace mediapipe { class SimpleCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Outputs().Index(0).Set(); if (cc->InputSidePackets().HasTag("MAX_COUNT")) { cc->InputSidePackets().Tag("MAX_COUNT").Set(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { LOG(WARNING) << "Simple Calculator Process called, count_: " << count_; int max_count = 1; if (cc->InputSidePackets().HasTag("MAX_COUNT")) { @@ -38,7 +38,7 @@ class SimpleCalculator : public CalculatorBase { } cc->Outputs().Index(0).Add(new int(count_), Timestamp(count_)); ++count_; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: diff --git a/mediapipe/framework/scheduler.cc b/mediapipe/framework/scheduler.cc index dedb4e120..30d0b355d 100644 --- a/mediapipe/framework/scheduler.cc +++ b/mediapipe/framework/scheduler.cc @@ -83,8 +83,8 @@ void Scheduler::SetExecutor(Executor* executor) { } // TODO: Consider renaming this method CreateNonDefaultQueue. -::mediapipe::Status Scheduler::SetNonDefaultExecutor(const std::string& name, - Executor* executor) { +mediapipe::Status Scheduler::SetNonDefaultExecutor(const std::string& name, + Executor* executor) { RET_CHECK_EQ(state_, STATE_NOT_STARTED) << "SetNonDefaultExecutor must not " "be called after the scheduler " "has started"; @@ -99,7 +99,7 @@ void Scheduler::SetExecutor(Executor* executor) { std::placeholders::_1)); queue->SetExecutor(executor); scheduler_queues_.push_back(queue); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } void Scheduler::SetQueuesRunning(bool running) { @@ -252,7 +252,7 @@ void Scheduler::EmittedObservedOutput() { } } -::mediapipe::Status Scheduler::WaitForObservedOutput() { +mediapipe::Status Scheduler::WaitForObservedOutput() { bool observed = false; ApplicationThreadAwait( [this, &observed]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(state_mutex_) { @@ -262,8 +262,8 @@ void Scheduler::EmittedObservedOutput() { // Wait until the member waiting_for_observed_output_ becomes false. return !waiting_for_observed_output_; }); - return observed ? ::mediapipe::OkStatus() - : ::mediapipe::OutOfRangeError("Graph is done."); + return observed ? mediapipe::OkStatus() + : mediapipe::OutOfRangeError("Graph is done."); } // Idleness requires: @@ -273,18 +273,18 @@ void Scheduler::EmittedObservedOutput() { // no source nodes. (This is enforced by CalculatorGraph::WaitUntilIdle().) // The application must ensure no other threads are adding packets to graph // input streams while a WaitUntilIdle() call is in progress. -::mediapipe::Status Scheduler::WaitUntilIdle() { +mediapipe::Status Scheduler::WaitUntilIdle() { RET_CHECK_NE(state_, STATE_NOT_STARTED); ApplicationThreadAwait(std::bind(&Scheduler::IsIdle, this)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status Scheduler::WaitUntilDone() { +mediapipe::Status Scheduler::WaitUntilDone() { RET_CHECK_NE(state_, STATE_NOT_STARTED); ApplicationThreadAwait([this]() ABSL_EXCLUSIVE_LOCKS_REQUIRED(state_mutex_) { return state_ == STATE_TERMINATED; }); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } void Scheduler::ApplicationThreadAwait( @@ -379,7 +379,7 @@ bool Scheduler::TryToScheduleNextSourceLayer() { // If no graph input streams are open, then there are no packet sources in // the graph. It's a deadlock. if (graph_input_streams_closed_) { - graph_->RecordError(::mediapipe::UnknownError( + graph_->RecordError(mediapipe::UnknownError( "Detected a deadlock because source nodes cannot be activated when a " "source node at a lower layer is still not opened.")); } @@ -496,7 +496,7 @@ void Scheduler::Cancel() { if (state_ != STATE_RUNNING && state_ != STATE_PAUSED) { return; } - graph_->RecordError(::mediapipe::CancelledError()); + graph_->RecordError(mediapipe::CancelledError()); if (state_ == STATE_PAUSED) { // Keep the scheduler queue running, since we need to exhaust it. SetQueuesRunning(true); diff --git a/mediapipe/framework/scheduler.h b/mediapipe/framework/scheduler.h index 7bd7823fe..6c3050a11 100644 --- a/mediapipe/framework/scheduler.h +++ b/mediapipe/framework/scheduler.h @@ -56,8 +56,8 @@ class Scheduler { // Sets the executor that will run the nodes assigned to the executor // named |name|. Must be called before the scheduler is started. - ::mediapipe::Status SetNonDefaultExecutor(const std::string& name, - Executor* executor); + mediapipe::Status SetNonDefaultExecutor(const std::string& name, + Executor* executor); // Resets the data members at the beginning of each graph run. void Reset(); @@ -70,13 +70,13 @@ class Scheduler { // have been closed, and no more calculators can be run). // This function can be called only after Start(). // Runs application thread tasks while waiting. - ::mediapipe::Status WaitUntilDone() ABSL_LOCKS_EXCLUDED(state_mutex_); + mediapipe::Status WaitUntilDone() ABSL_LOCKS_EXCLUDED(state_mutex_); // Wait until the running graph is in the idle mode, which is when nothing can // be scheduled and nothing is running in the worker threads. This function // can be called only after Start(). // Runs application thread tasks while waiting. - ::mediapipe::Status WaitUntilIdle() ABSL_LOCKS_EXCLUDED(state_mutex_); + mediapipe::Status WaitUntilIdle() ABSL_LOCKS_EXCLUDED(state_mutex_); // Wait until any graph input stream has been unthrottled. // This is meant to be used by CalculatorGraph::AddPacketToInputStream, which @@ -93,8 +93,8 @@ class Scheduler { // this function returns immediately if an observed packet has already been // emitted since the previous call. This relies on the fact that the calls are // in sequence. Runs application thread tasks while waiting. - // Returns ::mediapipe::OutOfRangeError if the graph terminated. - ::mediapipe::Status WaitForObservedOutput() ABSL_LOCKS_EXCLUDED(state_mutex_); + // Returns mediapipe::OutOfRangeError if the graph terminated. + mediapipe::Status WaitForObservedOutput() ABSL_LOCKS_EXCLUDED(state_mutex_); // Callback that is invoked by a node when it wants to be scheduled. // If the node is throttled, the call is ignored. diff --git a/mediapipe/framework/scheduler_queue.cc b/mediapipe/framework/scheduler_queue.cc index 1b491cc36..f6f92abd8 100644 --- a/mediapipe/framework/scheduler_queue.cc +++ b/mediapipe/framework/scheduler_queue.cc @@ -245,8 +245,8 @@ void SchedulerQueue::RunCalculatorNode(CalculatorNode* node, // source node always reuses the same CalculatorContext and Close() doesn't // access any inputs. // TODO: Should we pass tool::StatusStop() in this case? - const ::mediapipe::Status result = - node->CloseNode(::mediapipe::OkStatus(), /*graph_run_ended=*/false); + const mediapipe::Status result = + node->CloseNode(mediapipe::OkStatus(), /*graph_run_ended=*/false); shared_->timer.EndNode(start_time); if (!result.ok()) { VLOG(3) << node->DebugName() @@ -257,7 +257,7 @@ void SchedulerQueue::RunCalculatorNode(CalculatorNode* node, // Note that we don't need a lock because only one thread can execute this // due to the lock on running_nodes. int64 start_time = shared_->timer.StartNode(); - const ::mediapipe::Status result = node->ProcessNode(cc); + const mediapipe::Status result = node->ProcessNode(cc); shared_->timer.EndNode(start_time); if (!result.ok()) { @@ -284,7 +284,7 @@ void SchedulerQueue::RunCalculatorNode(CalculatorNode* node, void SchedulerQueue::OpenCalculatorNode(CalculatorNode* node) { VLOG(3) << "Opening " << node->DebugName(); int64 start_time = shared_->timer.StartNode(); - const ::mediapipe::Status result = node->OpenNode(); + const mediapipe::Status result = node->OpenNode(); shared_->timer.EndNode(start_time); if (!result.ok()) { VLOG(3) << node->DebugName() << " had an error!"; diff --git a/mediapipe/framework/scheduler_shared.h b/mediapipe/framework/scheduler_shared.h index 6cbe20a33..adbd6801a 100644 --- a/mediapipe/framework/scheduler_shared.h +++ b/mediapipe/framework/scheduler_shared.h @@ -105,7 +105,7 @@ struct SchedulerShared { // flag indicates that the graph is in that mode. std::atomic stopping; std::atomic has_error; - std::function error_callback; + std::function error_callback; // Collects timing information for measuring overhead. internal::SchedulerTimer timer; }; diff --git a/mediapipe/framework/status_handler.h b/mediapipe/framework/status_handler.h index b89eb7725..225561db2 100644 --- a/mediapipe/framework/status_handler.h +++ b/mediapipe/framework/status_handler.h @@ -48,19 +48,19 @@ class StatusHandler { // All subclasses of StatusHandler must implement these static functions with // the following signatures: // - // static ::mediapipe::Status FillExpectations( + // static mediapipe::Status FillExpectations( // const MediaPipeOptions& extendable_options, // PacketTypeSet* input_side_packets); // - // static ::mediapipe::Status HandlePreRunStatus( + // static mediapipe::Status HandlePreRunStatus( // const MediaPipeOptions& extendable_options, // const PacketSet& input_side_packets, - // const ::mediapipe::Status& pre_run_status); + // const mediapipe::Status& pre_run_status); // - // static ::mediapipe::Status HandleStatus( + // static mediapipe::Status HandleStatus( // const MediaPipeOptions& extendable_options, // const PacketSet& input_side_packets, - // const ::mediapipe::Status& run_status); + // const mediapipe::Status& run_status); // // FillExpectations() is used to validate the graph and it is analogous to the // function in calculator.h, packet_generator.h, and packet_factory.h. @@ -90,17 +90,17 @@ namespace internal { class StaticAccessToStatusHandler { public: virtual ~StaticAccessToStatusHandler() {} - virtual ::mediapipe::Status FillExpectations( + virtual mediapipe::Status FillExpectations( const MediaPipeOptions& extendable_options, PacketTypeSet* input_side_packets) = 0; - virtual ::mediapipe::Status HandlePreRunStatus( + virtual mediapipe::Status HandlePreRunStatus( const MediaPipeOptions& extendable_options, const PacketSet& input_side_packets, - const ::mediapipe::Status& pre_run_status) = 0; - virtual ::mediapipe::Status HandleStatus( + const mediapipe::Status& pre_run_status) = 0; + virtual mediapipe::Status HandleStatus( const MediaPipeOptions& extendable_options, const PacketSet& input_side_packets, // - const ::mediapipe::Status& run_status) = 0; + const mediapipe::Status& run_status) = 0; }; using StaticAccessToStatusHandlerRegistry = @@ -111,7 +111,7 @@ using StaticAccessToStatusHandlerRegistry = template constexpr bool StatusHandlerHasFillExpectations( decltype(&T::FillExpectations) /* unused */) { - typedef ::mediapipe::Status (*FillExpectationsType)( + typedef mediapipe::Status (*FillExpectationsType)( const MediaPipeOptions& extendable_options, PacketTypeSet* input_side_packets); return std::is_same constexpr bool StatusHandlerHasHandlePreRunStatus( decltype(&T::HandlePreRunStatus) /* unused */) { - typedef ::mediapipe::Status (*HandlePreRunStatusType)( + typedef mediapipe::Status (*HandlePreRunStatusType)( const MediaPipeOptions& extendable_options, const PacketSet& input_side_packets, - const ::mediapipe::Status& pre_run_status); + const mediapipe::Status& pre_run_status); return std::is_same::value; } template constexpr bool StatusHandlerHasHandleStatus( decltype(&T::HandleStatus) /* unused */) { - typedef ::mediapipe::Status (*HandleStatusType)( + typedef mediapipe::Status (*HandleStatusType)( const MediaPipeOptions& extendable_options, - const PacketSet& input_side_packets, - const ::mediapipe::Status& run_status); + const PacketSet& input_side_packets, const mediapipe::Status& run_status); return std::is_same::value; } template @@ -154,7 +153,7 @@ class StaticAccessToStatusHandlerTyped : public StaticAccessToStatusHandler { static_assert( std::is_base_of::value, "Classes registered with REGISTER_STATUS_HANDLER must be subclasses of " - "::mediapipe::StatusHandler."); + "mediapipe::StatusHandler."); static_assert( StatusHandlerHasFillExpectations(nullptr), "FillExpectations() must be defined with the correct signature in every " @@ -167,25 +166,23 @@ class StaticAccessToStatusHandlerTyped : public StaticAccessToStatusHandler { "HandleStatus() must be defined with the correct signature in " "every StatusHandler."); - ::mediapipe::Status FillExpectations( - const MediaPipeOptions& extendable_options, - PacketTypeSet* input_side_packets) final { + mediapipe::Status FillExpectations(const MediaPipeOptions& extendable_options, + PacketTypeSet* input_side_packets) final { return StatusHandlerSubclass::FillExpectations(extendable_options, input_side_packets); } - ::mediapipe::Status HandlePreRunStatus( + mediapipe::Status HandlePreRunStatus( const MediaPipeOptions& extendable_options, const PacketSet& input_side_packets, - const ::mediapipe::Status& pre_run_status) final { + const mediapipe::Status& pre_run_status) final { return StatusHandlerSubclass::HandlePreRunStatus( extendable_options, input_side_packets, pre_run_status); } - ::mediapipe::Status HandleStatus( - const MediaPipeOptions& extendable_options, - const PacketSet& input_side_packets, - const ::mediapipe::Status& run_status) final { + mediapipe::Status HandleStatus(const MediaPipeOptions& extendable_options, + const PacketSet& input_side_packets, + const mediapipe::Status& run_status) final { return StatusHandlerSubclass::HandleStatus(extendable_options, input_side_packets, run_status); } @@ -195,12 +192,12 @@ class StaticAccessToStatusHandlerTyped : public StaticAccessToStatusHandler { // Macro for registering StatusHandlers. It actually just registers the // StaticAccessToStatusHandlerTyped class. -#define REGISTER_STATUS_HANDLER(name) \ - REGISTER_FACTORY_FUNCTION_QUALIFIED( \ - ::mediapipe::internal::StaticAccessToStatusHandlerRegistry, \ - status_handler_registration, name, \ - absl::make_unique< \ - ::mediapipe::internal::StaticAccessToStatusHandlerTyped>) +#define REGISTER_STATUS_HANDLER(name) \ + REGISTER_FACTORY_FUNCTION_QUALIFIED( \ + mediapipe::internal::StaticAccessToStatusHandlerRegistry, \ + status_handler_registration, name, \ + absl::make_unique< \ + mediapipe::internal::StaticAccessToStatusHandlerTyped>) } // namespace mediapipe diff --git a/mediapipe/framework/stream_handler/barrier_input_stream_handler.cc b/mediapipe/framework/stream_handler/barrier_input_stream_handler.cc index 1cfbf1176..5a2f30296 100644 --- a/mediapipe/framework/stream_handler/barrier_input_stream_handler.cc +++ b/mediapipe/framework/stream_handler/barrier_input_stream_handler.cc @@ -37,7 +37,7 @@ class BarrierInputStreamHandler : public InputStreamHandler { std::function headers_ready_callback, std::function notification_callback, std::function schedule_callback, - std::function error_callback) override { + std::function error_callback) override { InputStreamHandler::PrepareForRun( std::move(headers_ready_callback), std::move(notification_callback), std::move(schedule_callback), std::move(error_callback)); diff --git a/mediapipe/framework/stream_handler/barrier_input_stream_handler_test.cc b/mediapipe/framework/stream_handler/barrier_input_stream_handler_test.cc index a0881b04f..e60ba9133 100644 --- a/mediapipe/framework/stream_handler/barrier_input_stream_handler_test.cc +++ b/mediapipe/framework/stream_handler/barrier_input_stream_handler_test.cc @@ -109,12 +109,10 @@ class BarrierInputStreamHandlerTest : public ::testing::Test { calculator_context_ = calculator_context; } - void RecordError(const ::mediapipe::Status& error) { - errors_.push_back(error); - } + void RecordError(const mediapipe::Status& error) { errors_.push_back(error); } - ::mediapipe::Status SetupShardsNoOp(CalculatorContext* calculator_context) { - return ::mediapipe::OkStatus(); + mediapipe::Status SetupShardsNoOp(CalculatorContext* calculator_context) { + return mediapipe::OkStatus(); } void ReportQueueNoOp(InputStreamManager* stream, bool* stream_was_full) {} @@ -123,13 +121,13 @@ class BarrierInputStreamHandlerTest : public ::testing::Test { std::function headers_ready_callback_; std::function notification_callback_; std::function schedule_callback_; - std::function error_callback_; - std::function<::mediapipe::Status(CalculatorContext*)> setup_shards_callback_; + std::function error_callback_; + std::function setup_shards_callback_; InputStreamManager::QueueSizeCallback queue_full_callback_; InputStreamManager::QueueSizeCallback queue_not_full_callback_; // Vector of errors encountered while using the stream. - std::vector<::mediapipe::Status> errors_; + std::vector errors_; std::unique_ptr calculator_state_; CalculatorContextManager calculator_context_manager_; diff --git a/mediapipe/framework/stream_handler/default_input_stream_handler.cc b/mediapipe/framework/stream_handler/default_input_stream_handler.cc index 354ee08e0..4c95241a9 100644 --- a/mediapipe/framework/stream_handler/default_input_stream_handler.cc +++ b/mediapipe/framework/stream_handler/default_input_stream_handler.cc @@ -49,7 +49,7 @@ void DefaultInputStreamHandler::PrepareForRun( std::function headers_ready_callback, std::function notification_callback, std::function schedule_callback, - std::function error_callback) { + std::function error_callback) { sync_set_.PrepareForRun(); InputStreamHandler::PrepareForRun( std::move(headers_ready_callback), std::move(notification_callback), diff --git a/mediapipe/framework/stream_handler/default_input_stream_handler.h b/mediapipe/framework/stream_handler/default_input_stream_handler.h index 0ff9d9a10..edf41fbea 100644 --- a/mediapipe/framework/stream_handler/default_input_stream_handler.h +++ b/mediapipe/framework/stream_handler/default_input_stream_handler.h @@ -40,7 +40,7 @@ class DefaultInputStreamHandler : public InputStreamHandler { std::function headers_ready_callback, std::function notification_callback, std::function schedule_callback, - std::function error_callback) override; + std::function error_callback) override; // In DefaultInputStreamHandler, a node is "ready" if: // - all streams are done (need to call Close() in this case), or diff --git a/mediapipe/framework/stream_handler/default_input_stream_handler_test.cc b/mediapipe/framework/stream_handler/default_input_stream_handler_test.cc index b41ccb356..b174a210e 100644 --- a/mediapipe/framework/stream_handler/default_input_stream_handler_test.cc +++ b/mediapipe/framework/stream_handler/default_input_stream_handler_test.cc @@ -30,7 +30,7 @@ TEST(DefaultInputStreamHandlerTest, NoBatchingWorks) { // A single calculator with two input streams, and two output streams. This // calculator passes all the input packets along. CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: "input0" input_stream: "input1" node { @@ -88,7 +88,7 @@ TEST(DefaultInputStreamHandlerTest, Batches) { // A single batching calculator with one input stream and one output stream. // This calculator passes all the input packets onto the output streams. CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: "input0" node { calculator: "PassThroughCalculator" @@ -152,7 +152,7 @@ TEST(DefaultInputStreamHandlerTest, BatchIsFlushedWhenClosing) { // A single batching calculator with one input stream and one output stream. // This calculator passes all the input packets onto the output streams. CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: "input0" node { calculator: "PassThroughCalculator" @@ -211,7 +211,7 @@ TEST(DefaultInputStreamHandlerTest, BatchIsFlushedWhenClosing) { // batching except for the first timestamp of the batch. TEST(DefaultInputStreamHandlerTest, DoesntPropagateTimestampWhenBatching) { CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: "input0" input_stream: "input1" node { diff --git a/mediapipe/framework/stream_handler/fixed_size_input_stream_handler_test.cc b/mediapipe/framework/stream_handler/fixed_size_input_stream_handler_test.cc index 947e2e0d3..075bbef31 100644 --- a/mediapipe/framework/stream_handler/fixed_size_input_stream_handler_test.cc +++ b/mediapipe/framework/stream_handler/fixed_size_input_stream_handler_test.cc @@ -46,17 +46,17 @@ bool g_source_done ABSL_GUARDED_BY(g_source_mutex); class TestSourceCalculator : public CalculatorBase { public: TestSourceCalculator() : current_packet_id_(0) {} - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Outputs().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { absl::MutexLock lock(&g_source_mutex); g_source_counter = 0; g_source_done = false; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { if (current_packet_id_ == kMaxPacketId) { absl::MutexLock lock(&g_source_mutex); g_source_done = true; @@ -70,7 +70,7 @@ class TestSourceCalculator : public CalculatorBase { g_source_mutex.Await( absl::Condition(this, &TestSourceCalculator::CanProceed)); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: @@ -86,17 +86,17 @@ REGISTER_CALCULATOR(TestSourceCalculator); class TestSlowCalculator : public CalculatorBase { public: TestSlowCalculator() = default; - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).Set(); cc->Outputs().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { absl::MutexLock lock(&g_source_mutex); g_slow_counter = 0; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { cc->Outputs().Index(0).Add(new int64(0), cc->Inputs().Index(0).Value().Timestamp()); { @@ -105,7 +105,7 @@ class TestSlowCalculator : public CalculatorBase { g_source_mutex.Await( absl::Condition(this, &TestSlowCalculator::CanProceed)); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: @@ -253,7 +253,7 @@ TEST_P(FixedSizeInputStreamHandlerTest, ParallelWriteAndRead) { MP_ASSERT_OK(graph.StartRun({})); { - ::mediapipe::ThreadPool pool(3); + mediapipe::ThreadPool pool(3); pool.StartWorkers(); // Start 3 writers. diff --git a/mediapipe/framework/stream_handler/immediate_input_stream_handler.cc b/mediapipe/framework/stream_handler/immediate_input_stream_handler.cc index 5c368781c..4bcaff59c 100644 --- a/mediapipe/framework/stream_handler/immediate_input_stream_handler.cc +++ b/mediapipe/framework/stream_handler/immediate_input_stream_handler.cc @@ -45,7 +45,7 @@ class ImmediateInputStreamHandler : public InputStreamHandler { std::function headers_ready_callback, std::function notification_callback, std::function schedule_callback, - std::function error_callback) override; + std::function error_callback) override; // Returns kReadyForProcess whenever a Packet is available at any of // the input streams, or any input stream becomes done. @@ -83,7 +83,7 @@ void ImmediateInputStreamHandler::PrepareForRun( std::function headers_ready_callback, std::function notification_callback, std::function schedule_callback, - std::function error_callback) { + std::function error_callback) { { absl::MutexLock lock(&mutex_); for (int i = 0; i < sync_sets_.size(); ++i) { diff --git a/mediapipe/framework/stream_handler/immediate_input_stream_handler_test.cc b/mediapipe/framework/stream_handler/immediate_input_stream_handler_test.cc index 0c14fcdbe..3bcc1358d 100644 --- a/mediapipe/framework/stream_handler/immediate_input_stream_handler_test.cc +++ b/mediapipe/framework/stream_handler/immediate_input_stream_handler_test.cc @@ -108,12 +108,10 @@ class ImmediateInputStreamHandlerTest : public ::testing::Test { cc_ = cc; } - void RecordError(const ::mediapipe::Status& error) { - errors_.push_back(error); - } + void RecordError(const mediapipe::Status& error) { errors_.push_back(error); } - ::mediapipe::Status SetupShardsNoOp(CalculatorContext* calculator_context) { - return ::mediapipe::OkStatus(); + mediapipe::Status SetupShardsNoOp(CalculatorContext* calculator_context) { + return mediapipe::OkStatus(); } void ReportQueueNoOp(InputStreamManager* stream, bool* stream_was_full) {} @@ -123,10 +121,10 @@ class ImmediateInputStreamHandlerTest : public ::testing::Test { const std::map& expected_values) { for (const auto& name_and_id : name_to_id_) { const InputStream& input_stream = input_set.Get(name_and_id.second); - if (::mediapipe::ContainsKey(expected_values, name_and_id.first)) { + if (mediapipe::ContainsKey(expected_values, name_and_id.first)) { ASSERT_FALSE(input_stream.Value().IsEmpty()); EXPECT_EQ(input_stream.Value().Get(), - ::mediapipe::FindOrDie(expected_values, name_and_id.first)); + mediapipe::FindOrDie(expected_values, name_and_id.first)); } else { EXPECT_TRUE(input_stream.Value().IsEmpty()); } @@ -142,13 +140,13 @@ class ImmediateInputStreamHandlerTest : public ::testing::Test { std::function headers_ready_callback_; std::function notification_callback_; std::function schedule_callback_; - std::function error_callback_; - std::function<::mediapipe::Status(CalculatorContext*)> setup_shards_callback_; + std::function error_callback_; + std::function setup_shards_callback_; InputStreamManager::QueueSizeCallback queue_full_callback_; InputStreamManager::QueueSizeCallback queue_not_full_callback_; // Vector of errors encountered while using the stream. - std::vector<::mediapipe::Status> errors_; + std::vector errors_; std::unique_ptr calculator_state_; CalculatorContextManager cc_manager_; diff --git a/mediapipe/framework/stream_handler/mux_input_stream_handler_test.cc b/mediapipe/framework/stream_handler/mux_input_stream_handler_test.cc index dbf85401f..f7ecfca3d 100644 --- a/mediapipe/framework/stream_handler/mux_input_stream_handler_test.cc +++ b/mediapipe/framework/stream_handler/mux_input_stream_handler_test.cc @@ -28,7 +28,7 @@ namespace { // MuxInputStreamHandler should fail when running this test. TEST(MuxInputStreamHandlerTest, AtomicAccessToControlAndDataStreams) { CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: "input" node { calculator: "RoundRobinDemuxCalculator" diff --git a/mediapipe/framework/stream_handler/set_input_stream_handler_test.cc b/mediapipe/framework/stream_handler/set_input_stream_handler_test.cc index c31fd1631..ff01bc92e 100644 --- a/mediapipe/framework/stream_handler/set_input_stream_handler_test.cc +++ b/mediapipe/framework/stream_handler/set_input_stream_handler_test.cc @@ -35,7 +35,7 @@ namespace { // MuxInputStreamHandler should fail when running this test. TEST(MuxInputStreamHandlerTest, AtomicAccessToControlAndDataStreams) { CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: "input" node { calculator: "RoundRobinDemuxCalculator" @@ -108,9 +108,9 @@ TEST(MuxInputStreamHandlerTest, AtomicAccessToControlAndDataStreams) { // ignored. class FixedPassThroughCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { if (!cc->Inputs().TagMap()->SameAs(*cc->Outputs().TagMap())) { - return ::mediapipe::InvalidArgumentError( + return mediapipe::InvalidArgumentError( "Input and output streams to PassThroughCalculator must use " "matching tags and indexes."); } @@ -126,7 +126,7 @@ class FixedPassThroughCalculator : public CalculatorBase { if (cc->OutputSidePackets().NumEntries() != 0) { if (!cc->InputSidePackets().TagMap()->SameAs( *cc->OutputSidePackets().TagMap())) { - return ::mediapipe::InvalidArgumentError( + return mediapipe::InvalidArgumentError( "Input and output side packets to PassThroughCalculator must use " "matching tags and indexes."); } @@ -148,10 +148,10 @@ class FixedPassThroughCalculator : public CalculatorBase { ->set_target_queue_size(2); cc->SetInputStreamHandlerOptions(options); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { + mediapipe::Status Open(CalculatorContext* cc) final { for (CollectionItemId id = cc->Inputs().BeginId(); id < cc->Inputs().EndId(); ++id) { if (!cc->Inputs().Get(id).Header().IsEmpty()) { @@ -165,10 +165,10 @@ class FixedPassThroughCalculator : public CalculatorBase { } } cc->SetOffset(TimestampDiff(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { cc->GetCounter("PassThrough")->Increment(); if (cc->Inputs().NumEntries() == 0) { return tool::StatusStop(); @@ -182,7 +182,7 @@ class FixedPassThroughCalculator : public CalculatorBase { cc->Outputs().Get(id).AddPacket(cc->Inputs().Get(id).Value()); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(FixedPassThroughCalculator); @@ -194,7 +194,7 @@ REGISTER_CALCULATOR(FixedPassThroughCalculator); TEST(FixedSizeInputStreamHandlerTest, ParallelWriteAndRead) { #define NUM_STREAMS 4 CalculatorGraphConfig graph_config = - ::mediapipe::ParseTextProtoOrDie( + mediapipe::ParseTextProtoOrDie( R"( input_stream: "in_0" input_stream: "in_1" @@ -222,7 +222,7 @@ TEST(FixedSizeInputStreamHandlerTest, ParallelWriteAndRead) { MP_ASSERT_OK(graph.StartRun({})); { - ::mediapipe::ThreadPool pool(NUM_STREAMS); + mediapipe::ThreadPool pool(NUM_STREAMS); pool.StartWorkers(); // Start writers. diff --git a/mediapipe/framework/stream_handler/sync_set_input_stream_handler.cc b/mediapipe/framework/stream_handler/sync_set_input_stream_handler.cc index 6c2bd78e7..d597ab5db 100644 --- a/mediapipe/framework/stream_handler/sync_set_input_stream_handler.cc +++ b/mediapipe/framework/stream_handler/sync_set_input_stream_handler.cc @@ -48,7 +48,7 @@ class SyncSetInputStreamHandler : public InputStreamHandler { std::function headers_ready_callback, std::function notification_callback, std::function schedule_callback, - std::function error_callback) override; + std::function error_callback) override; protected: // In SyncSetInputStreamHandler, a node is "ready" if any @@ -94,7 +94,7 @@ void SyncSetInputStreamHandler::PrepareForRun( std::function headers_ready_callback, std::function notification_callback, std::function schedule_callback, - std::function error_callback) { + std::function error_callback) { const auto& handler_options = options_.GetExtension(SyncSetInputStreamHandlerOptions::ext); { @@ -110,7 +110,7 @@ void SyncSetInputStreamHandler::PrepareForRun( MEDIAPIPE_CHECK_OK(tool::ParseTagIndex(tag_index, &tag, &index)); CollectionItemId id = input_stream_managers_.GetId(tag, index); CHECK(id.IsValid()) << "stream \"" << tag_index << "\" is not found."; - CHECK(!::mediapipe::ContainsKey(used_ids, id)) + CHECK(!mediapipe::ContainsKey(used_ids, id)) << "stream \"" << tag_index << "\" is in more than one sync set."; used_ids.insert(id); stream_ids.push_back(id); @@ -120,7 +120,7 @@ void SyncSetInputStreamHandler::PrepareForRun( std::vector remaining_ids; for (CollectionItemId id = input_stream_managers_.BeginId(); id < input_stream_managers_.EndId(); ++id) { - if (!::mediapipe::ContainsKey(used_ids, id)) { + if (!mediapipe::ContainsKey(used_ids, id)) { remaining_ids.push_back(id); } } diff --git a/mediapipe/framework/stream_handler/sync_set_input_stream_handler_test.cc b/mediapipe/framework/stream_handler/sync_set_input_stream_handler_test.cc index f6716a5a6..cd3379e6a 100644 --- a/mediapipe/framework/stream_handler/sync_set_input_stream_handler_test.cc +++ b/mediapipe/framework/stream_handler/sync_set_input_stream_handler_test.cc @@ -36,8 +36,8 @@ namespace mediapipe { namespace { // The type LambdaCalculator takes. -typedef std::function<::mediapipe::Status(const InputStreamShardSet&, - OutputStreamShardSet*)> +typedef std::function ProcessFunction; // Helper function to create a tuple (inside an initializer list). @@ -50,8 +50,8 @@ std::tuple> CommandTuple( // Function to take the inputs and produce a diagnostic output std::string // and output a packet with a diagnostic output std::string which includes // the input timestamp and the ids of each input which is present. -::mediapipe::Status InputsToDebugString(const InputStreamShardSet& inputs, - OutputStreamShardSet* outputs) { +mediapipe::Status InputsToDebugString(const InputStreamShardSet& inputs, + OutputStreamShardSet* outputs) { std::string output; Timestamp output_timestamp; for (CollectionItemId id = inputs.BeginId(); id < inputs.EndId(); ++id) { @@ -79,7 +79,7 @@ std::tuple> CommandTuple( // TODO Output at output_timestamp once unordered output stream // handlers are allowed. outputs->Index(0).AddPacket(output_packet); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } TEST(SyncSetInputStreamHandlerTest, OrdinaryOperation) { @@ -273,7 +273,7 @@ TEST(SyncSetInputStreamHandlerTest, OrdinaryOperation) { MP_ASSERT_OK( graph.ObserveOutputStream("output", [&outputs](const Packet& packet) { outputs.push_back(packet); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); MP_ASSERT_OK(graph.StartRun({})); for (int command_index = 0; command_index < shuffled_commands.size(); diff --git a/mediapipe/framework/stream_handler/timestamp_align_input_stream_handler.cc b/mediapipe/framework/stream_handler/timestamp_align_input_stream_handler.cc index 0c63618e2..bcc180d6c 100644 --- a/mediapipe/framework/stream_handler/timestamp_align_input_stream_handler.cc +++ b/mediapipe/framework/stream_handler/timestamp_align_input_stream_handler.cc @@ -56,7 +56,7 @@ class TimestampAlignInputStreamHandler : public InputStreamHandler { std::function headers_ready_callback, std::function notification_callback, std::function schedule_callback, - std::function error_callback) override; + std::function error_callback) override; protected: // In TimestampAlignInputStreamHandler, a node is "ready" if: @@ -107,7 +107,7 @@ void TimestampAlignInputStreamHandler::PrepareForRun( std::function headers_ready_callback, std::function notification_callback, std::function schedule_callback, - std::function error_callback) { + std::function error_callback) { { absl::MutexLock lock(&mutex_); offsets_initialized_ = (input_stream_managers_.NumEntries() == 1); diff --git a/mediapipe/framework/stream_handler/timestamp_align_input_stream_handler_test.cc b/mediapipe/framework/stream_handler/timestamp_align_input_stream_handler_test.cc index 1be620189..9221a65f3 100644 --- a/mediapipe/framework/stream_handler/timestamp_align_input_stream_handler_test.cc +++ b/mediapipe/framework/stream_handler/timestamp_align_input_stream_handler_test.cc @@ -26,7 +26,7 @@ namespace { TEST(TimestampAlignInputStreamHandlerTest, Initialization) { CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: "input_video" input_stream: "input_camera" node { @@ -121,7 +121,7 @@ TEST(TimestampAlignInputStreamHandlerTest, Initialization) { TEST(TimestampAlignInputStreamHandlerTest, TickRate) { CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: "input_video" input_stream: "input_camera" node { diff --git a/mediapipe/framework/subgraph.cc b/mediapipe/framework/subgraph.cc index 1fc69b587..231aeb6bc 100644 --- a/mediapipe/framework/subgraph.cc +++ b/mediapipe/framework/subgraph.cc @@ -32,7 +32,7 @@ ProtoSubgraph::ProtoSubgraph(const CalculatorGraphConfig& config) ProtoSubgraph::~ProtoSubgraph() {} -::mediapipe::StatusOr ProtoSubgraph::GetConfig( +mediapipe::StatusOr ProtoSubgraph::GetConfig( const Subgraph::SubgraphOptions& options) { return config_; } @@ -42,7 +42,7 @@ TemplateSubgraph::TemplateSubgraph(const CalculatorGraphTemplate& templ) TemplateSubgraph::~TemplateSubgraph() {} -::mediapipe::StatusOr TemplateSubgraph::GetConfig( +mediapipe::StatusOr TemplateSubgraph::GetConfig( const Subgraph::SubgraphOptions& options) { TemplateDict arguments = Subgraph::GetOptions(options).dict(); @@ -91,14 +91,14 @@ bool GraphRegistry::IsRegistered(const std::string& ns, global_factories_->IsRegistered(ns, type_name); } -::mediapipe::StatusOr GraphRegistry::CreateByName( +mediapipe::StatusOr GraphRegistry::CreateByName( const std::string& ns, const std::string& type_name, const Subgraph::SubgraphOptions* options) const { Subgraph::SubgraphOptions graph_options; if (options) { graph_options = *options; } - ::mediapipe::StatusOr> maker = + mediapipe::StatusOr> maker = local_factories_.IsRegistered(ns, type_name) ? local_factories_.Invoke(ns, type_name) : global_factories_->Invoke(ns, type_name); diff --git a/mediapipe/framework/subgraph.h b/mediapipe/framework/subgraph.h index 1791e027f..8529cbad3 100644 --- a/mediapipe/framework/subgraph.h +++ b/mediapipe/framework/subgraph.h @@ -41,7 +41,7 @@ class Subgraph { // the parent graph. // Subclasses may use the options argument to parameterize the config. // TODO: make this static? - virtual ::mediapipe::StatusOr GetConfig( + virtual mediapipe::StatusOr GetConfig( const SubgraphOptions& options) = 0; // Returns options of a specific type. @@ -61,9 +61,9 @@ class Subgraph { using SubgraphRegistry = GlobalFactoryRegistry>; -#define REGISTER_MEDIAPIPE_GRAPH(name) \ - REGISTER_FACTORY_FUNCTION_QUALIFIED(::mediapipe::SubgraphRegistry, \ - subgraph_registration, name, \ +#define REGISTER_MEDIAPIPE_GRAPH(name) \ + REGISTER_FACTORY_FUNCTION_QUALIFIED(mediapipe::SubgraphRegistry, \ + subgraph_registration, name, \ absl::make_unique) // A graph factory holding a literal CalculatorGraphConfig. @@ -71,7 +71,7 @@ class ProtoSubgraph : public Subgraph { public: ProtoSubgraph(const CalculatorGraphConfig& config); virtual ~ProtoSubgraph(); - virtual ::mediapipe::StatusOr GetConfig( + virtual mediapipe::StatusOr GetConfig( const Subgraph::SubgraphOptions& options); private: @@ -83,7 +83,7 @@ class TemplateSubgraph : public Subgraph { public: TemplateSubgraph(const CalculatorGraphTemplate& templ); virtual ~TemplateSubgraph(); - virtual ::mediapipe::StatusOr GetConfig( + virtual mediapipe::StatusOr GetConfig( const Subgraph::SubgraphOptions& options); private: @@ -118,7 +118,7 @@ class GraphRegistry { bool IsRegistered(const std::string& ns, const std::string& type_name) const; // Returns the specified graph config. - ::mediapipe::StatusOr CreateByName( + mediapipe::StatusOr CreateByName( const std::string& ns, const std::string& type_name, const Subgraph::SubgraphOptions* options = nullptr) const; diff --git a/mediapipe/framework/test_calculators.cc b/mediapipe/framework/test_calculators.cc index 000a6301f..4addce93b 100644 --- a/mediapipe/framework/test_calculators.cc +++ b/mediapipe/framework/test_calculators.cc @@ -39,21 +39,21 @@ using RandomEngine = std::mt19937_64; // A Calculator that outputs twice the value of its input packet (an int). class DoubleIntCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).Set(); cc->Outputs().Index(0).SetSameAs(&cc->Inputs().Index(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { + mediapipe::Status Open(CalculatorContext* cc) final { cc->SetOffset(TimestampDiff(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { int value = cc->Inputs().Index(0).Value().Get(); cc->Outputs().Index(0).Add(new int(2 * value), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(DoubleIntCalculator); @@ -62,16 +62,16 @@ REGISTER_CALCULATOR(DoubleIntCalculator); // holds the high order bits and the second the low order ones. class IntSplitterPacketGenerator : public PacketGenerator { public: - static ::mediapipe::Status FillExpectations( + static mediapipe::Status FillExpectations( const PacketGeneratorOptions& extendable_options, // PacketTypeSet* input_side_packets, // PacketTypeSet* output_side_packets) { input_side_packets->Index(0).Set(); output_side_packets->Index(0).Set>(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - static ::mediapipe::Status Generate( + static mediapipe::Status Generate( const PacketGeneratorOptions& extendable_options, // const PacketSet& input_side_packets, // PacketSet* output_side_packets) { @@ -80,7 +80,7 @@ class IntSplitterPacketGenerator : public PacketGenerator { uint32 low = value & 0xFFFFFFFF; output_side_packets->Index(0) = Adopt(new std::pair(high, low)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_PACKET_GENERATOR(IntSplitterPacketGenerator); @@ -90,7 +90,7 @@ REGISTER_PACKET_GENERATOR(IntSplitterPacketGenerator); // with both the high and low order bits. class TaggedIntSplitterPacketGenerator : public PacketGenerator { public: - static ::mediapipe::Status FillExpectations( + static mediapipe::Status FillExpectations( const PacketGeneratorOptions& extendable_options, // PacketTypeSet* input_side_packets, // PacketTypeSet* output_side_packets) { @@ -98,10 +98,10 @@ class TaggedIntSplitterPacketGenerator : public PacketGenerator { output_side_packets->Tag("HIGH").Set(); output_side_packets->Tag("LOW").Set(); output_side_packets->Tag("PAIR").Set>(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - static ::mediapipe::Status Generate( + static mediapipe::Status Generate( const PacketGeneratorOptions& extendable_options, // const PacketSet& input_side_packets, // PacketSet* output_side_packets) { @@ -112,7 +112,7 @@ class TaggedIntSplitterPacketGenerator : public PacketGenerator { output_side_packets->Tag("LOW") = Adopt(new uint32(low)); output_side_packets->Tag("PAIR") = Adopt(new std::pair(high, low)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_PACKET_GENERATOR(TaggedIntSplitterPacketGenerator); @@ -129,23 +129,22 @@ class RangeCalculator : public CalculatorBase { public: RangeCalculator() : initialized_(false) {} - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Outputs().Index(0).Set(); cc->Outputs().Index(1).Set(); cc->Outputs().Index(2).Set(); cc->InputSidePackets().Index(0).Set>(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { + mediapipe::Status Open(CalculatorContext* cc) final { Initialize(cc); // Fail if requested, without setting any stream headers. This tests that // the downstream Calculators will not try to access the headers in case // this one failed. if (k_ == 0) { - return ::mediapipe::Status(::mediapipe::StatusCode::kCancelled, - "k_ == 0"); + return mediapipe::Status(mediapipe::StatusCode::kCancelled, "k_ == 0"); } cc->Outputs().Index(0).SetHeader( Adopt(new std::string(absl::StrCat(cc->CalculatorType(), k_)))); @@ -155,21 +154,21 @@ class RangeCalculator : public CalculatorBase { cc->Outputs().Index(1).SetNextTimestampBound(Timestamp::PostStream()); cc->Outputs().Index(2).SetNextTimestampBound(Timestamp::PreStream()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { // Output at timestamps 1:N-1 that are divisible by K. index_ += k_; if (index_ < n_) { cc->Outputs().Index(0).AddPacket(GetNextPacket().At(Timestamp(index_))); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } else { return tool::StatusStop(); } } - ::mediapipe::Status Close(CalculatorContext* cc) final { + mediapipe::Status Close(CalculatorContext* cc) final { // Output at timestamp N. cc->Outputs().Index(0).AddPacket(GetNextPacket().At(Timestamp(n_))); // Output: ints from a range specified in the input side packet. @@ -178,7 +177,7 @@ class RangeCalculator : public CalculatorBase { new double(static_cast(total_) / static_cast(count_)), Timestamp::PreStream()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: @@ -221,19 +220,19 @@ class StdDevCalculator : public CalculatorBase { public: StdDevCalculator() {} - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Tag("DATA").Set(); cc->Inputs().Tag("MEAN").Set(); cc->Outputs().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { + mediapipe::Status Open(CalculatorContext* cc) final { cc->Outputs().Index(0).SetNextTimestampBound(Timestamp::PostStream()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { if (cc->InputTimestamp() == Timestamp::PreStream()) { RET_CHECK(cc->Inputs().Tag("DATA").Value().IsEmpty()); RET_CHECK(!cc->Inputs().Tag("MEAN").Value().IsEmpty()); @@ -247,15 +246,15 @@ class StdDevCalculator : public CalculatorBase { cummulative_variance_ += diff * diff; ++count_; } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Close(CalculatorContext* cc) final { + mediapipe::Status Close(CalculatorContext* cc) final { cc->Outputs().Index(0).Add( - new int(::mediapipe::MathUtil::SafeRound( + new int(mediapipe::MathUtil::SafeRound( sqrt(cummulative_variance_ / count_) * 100.0)), Timestamp::PostStream()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: @@ -273,15 +272,15 @@ REGISTER_CALCULATOR(StdDevCalculator); // concatenation of the input stream headers. class MergeCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { for (int i = 0; i < cc->Inputs().NumEntries(); ++i) { cc->Inputs().Index(i).Set(); } cc->Outputs().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { + mediapipe::Status Open(CalculatorContext* cc) final { auto header = absl::make_unique(); for (auto& input : cc->Inputs()) { if (!input.Header().IsEmpty()) { @@ -292,10 +291,10 @@ class MergeCalculator : public CalculatorBase { } } cc->Outputs().Index(0).SetHeader(Adopt(header.release())); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { std::string result; if (cc->InputTimestamp().IsSpecialValue()) { absl::StrAppend(&result, cc->InputTimestamp().DebugString()); @@ -313,7 +312,7 @@ class MergeCalculator : public CalculatorBase { } } cc->Outputs().Index(0).Add(new std::string(result), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(MergeCalculator); @@ -325,28 +324,28 @@ class SaverCalculator : public CalculatorBase { public: SaverCalculator() : result_(new std::string) {} - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).Set(); cc->Outputs().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { + mediapipe::Status Open(CalculatorContext* cc) final { cc->Outputs().Index(0).SetNextTimestampBound(Timestamp::PostStream()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { if (!result_->empty()) { result_->append("/"); } result_->append(cc->Inputs().Index(0).Get()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Close(CalculatorContext* cc) final { + mediapipe::Status Close(CalculatorContext* cc) final { cc->Outputs().Index(0).Add(result_.release(), Timestamp::PostStream()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: @@ -360,13 +359,13 @@ REGISTER_CALCULATOR(SaverCalculator); // as an input side packet. class RandomMatrixCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Outputs().Index(0).Set(); cc->InputSidePackets().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { auto& options = cc->Options(); CHECK_LT(0, options.timestamp_step()); CHECK_LT(0, options.rows()); @@ -381,10 +380,10 @@ class RandomMatrixCalculator : public CalculatorBase { std::vector seed(1); seq.generate(seed.begin(), seed.end()); random_ = absl::make_unique(seed[0]); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { auto& options = cc->Options(); Matrix* matrix = new Matrix(options.rows(), options.cols()); @@ -399,7 +398,7 @@ class RandomMatrixCalculator : public CalculatorBase { if (current_timestamp_ >= Timestamp(options.limit_timestamp())) { return tool::StatusStop(); } else { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } @@ -418,21 +417,21 @@ REGISTER_CALCULATOR(RandomMatrixCalculator); // effect of round off error). class MeanAndCovarianceCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).Set(); cc->Outputs().Index(0).Set>(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { cc->Outputs().Index(0).SetNextTimestampBound(Timestamp::PostStream()); rows_ = -1; num_samples_ = 0; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { const Eigen::MatrixXd sample = cc->Inputs().Index(0).Get().cast(); CHECK_EQ(1, sample.cols()); @@ -447,10 +446,10 @@ class MeanAndCovarianceCalculator : public CalculatorBase { outer_product_sum_ += sample * sample.transpose(); ++num_samples_; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Close(CalculatorContext* cc) override { + mediapipe::Status Close(CalculatorContext* cc) override { Eigen::VectorXd mean_vector = sum_vector_ / num_samples_; Eigen::MatrixXd covariance_matrix(rows_, rows_); @@ -470,7 +469,7 @@ class MeanAndCovarianceCalculator : public CalculatorBase { new std::pair( mean_vector.cast(), covariance_matrix.cast()), Timestamp::PostStream()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: @@ -486,27 +485,27 @@ REGISTER_CALCULATOR(MeanAndCovarianceCalculator); // increases by 1 for each packet. class SidePacketToOutputPacketCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->InputSidePackets().Index(0).SetAny(); for (int i = 1; i < cc->InputSidePackets().NumEntries(); ++i) { cc->InputSidePackets().Index(i).SetSameAs( &cc->InputSidePackets().Index(0)); } cc->Outputs().Index(0).SetSameAs(&cc->InputSidePackets().Index(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { int current_timestamp = 0; for (const Packet& packet : cc->InputSidePackets()) { cc->Outputs().Index(0).AddPacket(packet.At(Timestamp(current_timestamp))); ++current_timestamp; } cc->Outputs().Index(0).Close(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { return tool::StatusStop(); } }; @@ -517,46 +516,46 @@ REGISTER_CALCULATOR(SidePacketToOutputPacketCalculator); class ABSL_DEPRECATED("Use SidePacketToOutputPacketCalculator instead") ExternalInputToOutputPacketCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->InputSidePackets().Index(0).SetAny(); for (int i = 1; i < cc->InputSidePackets().NumEntries(); ++i) { cc->InputSidePackets().Index(i).SetSameAs( &cc->InputSidePackets().Index(0)); } cc->Outputs().Index(0).SetSameAs(&cc->InputSidePackets().Index(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { int current_timestamp = 0; for (const Packet& packet : cc->InputSidePackets()) { cc->Outputs().Index(0).AddPacket(packet.At(Timestamp(current_timestamp))); ++current_timestamp; } cc->Outputs().Index(0).Close(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { return tool::StatusStop(); } }; REGISTER_CALCULATOR(ExternalInputToOutputPacketCalculator); // A Calculator::Process callback function. -typedef std::function<::mediapipe::Status(const InputStreamShardSet&, - OutputStreamShardSet*)> +typedef std::function ProcessFunction; // A callback function for Calculator::Open, Process, or Close. -typedef std::function<::mediapipe::Status(CalculatorContext* cc)> +typedef std::function CalculatorContextFunction; // A Calculator that runs a testing callback function in Process, // Open, or Close, which is specified as an input side packet. class LambdaCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { for (CollectionItemId id = cc->Inputs().BeginId(); id < cc->Inputs().EndId(); ++id) { cc->Inputs().Get(id).SetAny(); @@ -573,31 +572,31 @@ class LambdaCalculator : public CalculatorBase { cc->InputSidePackets().Tag(tag).Set(); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { + mediapipe::Status Open(CalculatorContext* cc) final { if (cc->InputSidePackets().HasTag("OPEN")) { return GetContextFn(cc, "OPEN")(cc); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { if (cc->InputSidePackets().HasTag("PROCESS")) { return GetContextFn(cc, "PROCESS")(cc); } if (cc->InputSidePackets().HasTag("") > 0) { return GetProcessFn(cc, "")(cc->Inputs(), &cc->Outputs()); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Close(CalculatorContext* cc) final { + mediapipe::Status Close(CalculatorContext* cc) final { if (cc->InputSidePackets().HasTag("CLOSE")) { return GetContextFn(cc, "CLOSE")(cc); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: @@ -619,7 +618,7 @@ REGISTER_CALCULATOR(LambdaCalculator); // stream connections. class DummyTestCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { for (CollectionItemId id = cc->Inputs().BeginId(); id < cc->Inputs().EndId(); ++id) { cc->Inputs().Get(id).SetAny(); @@ -632,11 +631,11 @@ class DummyTestCalculator : public CalculatorBase { id < cc->InputSidePackets().EndId(); ++id) { cc->InputSidePackets().Get(id).SetAny(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { - return ::mediapipe::OkStatus(); + mediapipe::Status Process(CalculatorContext* cc) final { + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(DummyTestCalculator); @@ -645,27 +644,27 @@ REGISTER_CALCULATOR(DummyTestCalculator); // a set number of microseconds. class PassThroughWithSleepCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).Set(); cc->Outputs().Index(0).SetSameAs(&cc->Inputs().Index(0)); cc->InputSidePackets().Tag("SLEEP_MICROS").Set(); cc->InputSidePackets().Tag("CLOCK").Set>(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { + mediapipe::Status Open(CalculatorContext* cc) final { cc->SetOffset(TimestampDiff(0)); sleep_micros_ = cc->InputSidePackets().Tag("SLEEP_MICROS").Get(); if (sleep_micros_ < 0) { - return ::mediapipe::InternalError("SLEEP_MICROS should be >= 0"); + return mediapipe::InternalError("SLEEP_MICROS should be >= 0"); } clock_ = cc->InputSidePackets().Tag("CLOCK").Get>(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { clock_->Sleep(absl::Microseconds(sleep_micros_)); int value = cc->Inputs().Index(0).Value().Get(); cc->Outputs().Index(0).Add(new int(value), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } private: @@ -677,23 +676,23 @@ REGISTER_CALCULATOR(PassThroughWithSleepCalculator); // A Calculator that multiples two input values. class MultiplyIntCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).Set(); cc->Inputs().Index(1).SetSameAs(&cc->Inputs().Index(0)); // cc->Outputs().Index(0).SetSameAs(&cc->Inputs().Index(0)); RET_CHECK(cc->Outputs().HasTag("OUT")); cc->Outputs().Tag("OUT").SetSameAs(&cc->Inputs().Index(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { + mediapipe::Status Open(CalculatorContext* cc) final { cc->SetOffset(TimestampDiff(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { int x = cc->Inputs().Index(0).Value().Get(); int y = cc->Inputs().Index(1).Value().Get(); cc->Outputs().Tag("OUT").Add(new int(x * y), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(MultiplyIntCalculator); diff --git a/mediapipe/framework/test_service.cc b/mediapipe/framework/test_service.cc index e258889ea..a5139e8b2 100644 --- a/mediapipe/framework/test_service.cc +++ b/mediapipe/framework/test_service.cc @@ -19,26 +19,26 @@ namespace mediapipe { const GraphService kTestService("test_service"); const GraphService kAnotherService("another_service"); -::mediapipe::Status TestServiceCalculator::GetContract(CalculatorContract* cc) { +mediapipe::Status TestServiceCalculator::GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).Set(); cc->Outputs().Index(0).SetSameAs(&cc->Inputs().Index(0)); // This service will be required. The graph won't start without it. cc->UseService(kTestService); // This service is optional for this calculator. cc->UseService(kAnotherService).Optional(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TestServiceCalculator::Open(CalculatorContext* cc) { +mediapipe::Status TestServiceCalculator::Open(CalculatorContext* cc) { cc->SetOffset(TimestampDiff(0)); // For an optional service, check whether it's available. if (cc->Service(kAnotherService).IsAvailable()) { optional_bias_ = cc->Service(kAnotherService).GetObject(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TestServiceCalculator::Process(CalculatorContext* cc) { +mediapipe::Status TestServiceCalculator::Process(CalculatorContext* cc) { int value = cc->Inputs().Index(0).Value().Get(); // A required service is sure to be available, so we can just GetObject. TestServiceObject& service_object = cc->Service(kTestService).GetObject(); @@ -46,7 +46,7 @@ const GraphService kAnotherService("another_service"); service_object["count"] += 1; int x = value + delta + optional_bias_; cc->Outputs().Index(0).Add(new int(x), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } REGISTER_CALCULATOR(TestServiceCalculator); diff --git a/mediapipe/framework/test_service.h b/mediapipe/framework/test_service.h index c348c887c..4d76f84e5 100644 --- a/mediapipe/framework/test_service.h +++ b/mediapipe/framework/test_service.h @@ -27,9 +27,9 @@ extern const GraphService kAnotherService; // Use a service. class TestServiceCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) final; - ::mediapipe::Status Process(CalculatorContext* cc) final; + static mediapipe::Status GetContract(CalculatorContract* cc); + mediapipe::Status Open(CalculatorContext* cc) final; + mediapipe::Status Process(CalculatorContext* cc) final; private: int optional_bias_ = 0; diff --git a/mediapipe/framework/thread_pool_executor.cc b/mediapipe/framework/thread_pool_executor.cc index c52dd7ed0..d7c3a7886 100644 --- a/mediapipe/framework/thread_pool_executor.cc +++ b/mediapipe/framework/thread_pool_executor.cc @@ -25,16 +25,16 @@ namespace mediapipe { // static -::mediapipe::StatusOr ThreadPoolExecutor::Create( +mediapipe::StatusOr ThreadPoolExecutor::Create( const MediaPipeOptions& extendable_options) { auto& options = extendable_options.GetExtension(ThreadPoolExecutorOptions::ext); if (!options.has_num_threads()) { - return ::mediapipe::InvalidArgumentError( + return mediapipe::InvalidArgumentError( "num_threads is not specified in ThreadPoolExecutorOptions."); } if (options.num_threads() <= 0) { - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "The num_threads field in ThreadPoolExecutorOptions should be " "positive but is " << options.num_threads(); @@ -46,7 +46,7 @@ namespace mediapipe { // pass a negative value. 0 has a special meaning (the default thread // stack size for the system), so we also avoid that. if (options.stack_size() <= 0) { - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "The stack_size field in ThreadPoolExecutorOptions should be " "positive but is " << options.stack_size(); diff --git a/mediapipe/framework/thread_pool_executor.h b/mediapipe/framework/thread_pool_executor.h index 92c16bcc7..a7700c4bb 100644 --- a/mediapipe/framework/thread_pool_executor.h +++ b/mediapipe/framework/thread_pool_executor.h @@ -25,7 +25,7 @@ namespace mediapipe { // A multithreaded executor based on a thread pool. class ThreadPoolExecutor : public Executor { public: - static ::mediapipe::StatusOr Create( + static mediapipe::StatusOr Create( const MediaPipeOptions& extendable_options); explicit ThreadPoolExecutor(int num_threads); @@ -43,7 +43,7 @@ class ThreadPoolExecutor : public Executor { // Saves the value of the stack size option and starts the thread pool. void Start(); - ::mediapipe::ThreadPool thread_pool_; + mediapipe::ThreadPool thread_pool_; // Records the stack size in ThreadOptions right before we call // thread_pool_.StartWorkers(). diff --git a/mediapipe/framework/timestamp.h b/mediapipe/framework/timestamp.h index 179388942..03f41597f 100644 --- a/mediapipe/framework/timestamp.h +++ b/mediapipe/framework/timestamp.h @@ -57,7 +57,7 @@ namespace mediapipe { // have underflow/overflow etc. This type is used internally by Timestamp // and TimestampDiff. MEDIAPIPE_DEFINE_SAFE_INT_TYPE(TimestampBaseType, int64, - ::mediapipe::intops::LogFatalOnError); + mediapipe::intops::LogFatalOnError); class TimestampDiff; diff --git a/mediapipe/framework/tool/BUILD b/mediapipe/framework/tool/BUILD index 1e4624c82..fa470883f 100644 --- a/mediapipe/framework/tool/BUILD +++ b/mediapipe/framework/tool/BUILD @@ -600,6 +600,123 @@ cc_library( }), ) +cc_library( + name = "container_util", + srcs = ["container_util.cc"], + hdrs = ["container_util.h"], + visibility = ["//visibility:public"], + deps = [ + ":name_util", + "//mediapipe/framework:calculator_framework", + "//mediapipe/framework/port:ret_check", + "//mediapipe/framework/port:status", + "//mediapipe/framework/tool:switch_container_cc_proto", + ], +) + +cc_library( + name = "switch_demux_calculator", + srcs = ["switch_demux_calculator.cc"], + visibility = [ + "//visibility:public", + ], + deps = [ + ":container_util", + ":options_util", + "//mediapipe/framework:calculator_framework", + "//mediapipe/framework:collection_item_id", + "//mediapipe/framework/deps:mathutil", + "//mediapipe/framework/formats:video_stream_header", + "//mediapipe/framework/port:integral_types", + "//mediapipe/framework/port:logging", + "//mediapipe/framework/port:ret_check", + "//mediapipe/framework/port:status", + "@com_google_absl//absl/strings", + ], + alwayslink = 1, +) + +cc_library( + name = "switch_mux_calculator", + srcs = ["switch_mux_calculator.cc"], + visibility = [ + "//visibility:public", + ], + deps = [ + ":container_util", + ":options_util", + "//mediapipe/framework:calculator_framework", + "//mediapipe/framework:collection_item_id", + "//mediapipe/framework:input_stream_shard", + "//mediapipe/framework:output_stream_shard", + "//mediapipe/framework/deps:mathutil", + "//mediapipe/framework/formats:video_stream_header", + "//mediapipe/framework/port:integral_types", + "//mediapipe/framework/port:logging", + "//mediapipe/framework/port:ret_check", + "//mediapipe/framework/port:status", + "@com_google_absl//absl/strings", + ], + alwayslink = 1, +) + +mediapipe_proto_library( + name = "switch_container_proto", + srcs = ["switch_container.proto"], + visibility = ["//visibility:public"], + deps = [ + "//mediapipe/framework:calculator_options_proto", + "//mediapipe/framework:calculator_proto", + ], +) + +cc_library( + name = "switch_container", + srcs = ["switch_container.cc"], + visibility = ["//visibility:public"], + deps = [ + ":container_util", + ":name_util", + ":subgraph_expansion", + ":switch_demux_calculator", + ":switch_mux_calculator", + "//mediapipe/framework:calculator_cc_proto", + "//mediapipe/framework:calculator_framework", + "//mediapipe/framework:mediapipe_options_cc_proto", + "//mediapipe/framework:stream_handler_cc_proto", + "//mediapipe/framework:subgraph", + "//mediapipe/framework/port:ret_check", + "//mediapipe/framework/port:status", + "//mediapipe/framework/stream_handler:sync_set_input_stream_handler_cc_proto", + "//mediapipe/framework/tool:switch_container_cc_proto", + ], + alwayslink = 1, +) + +cc_test( + name = "switch_container_test", + size = "small", + srcs = ["switch_container_test.cc"], + visibility = ["//visibility:public"], + deps = [ + ":node_chain_subgraph_cc_proto", + ":subgraph_expansion", + ":switch_container", + "//mediapipe/calculators/core:pass_through_calculator", + "//mediapipe/framework:calculator_cc_proto", + "//mediapipe/framework:calculator_framework", + "//mediapipe/framework:subgraph", + "//mediapipe/framework:test_calculators", + "//mediapipe/framework/deps:message_matchers", + "//mediapipe/framework/port:gtest_main", + "//mediapipe/framework/port:logging", + "//mediapipe/framework/port:parse_text_proto", + "//mediapipe/framework/port:ret_check", + "//mediapipe/framework/port:status", + "//mediapipe/framework/stream_handler:immediate_input_stream_handler", + ], +) + exports_files( ["build_defs.bzl"], visibility = ["//mediapipe/framework:__subpackages__"], diff --git a/mediapipe/framework/tool/container_util.cc b/mediapipe/framework/tool/container_util.cc new file mode 100644 index 000000000..9939ef277 --- /dev/null +++ b/mediapipe/framework/tool/container_util.cc @@ -0,0 +1,97 @@ +#include "mediapipe/framework/tool/container_util.h" + +#include "mediapipe/framework/tool/switch_container.pb.h" + +namespace mediapipe { +namespace tool { + +std::string ChannelTag(const std::string& tag, int channel) { + return absl::StrCat("C", channel, "__", tag); +} + +// Parses a tag name starting with a channel prefix, like "C2__". +bool ParseChannelTag(const std::string& channel_name, std::string* name, + std::string* num) { + int pos = channel_name.find("C"); + int sep = channel_name.find("__"); + if (pos != 0 || sep == std::string::npos) { + return false; + } + *num = channel_name.substr(pos + 1, sep - (pos + 1)); + *name = channel_name.substr(sep + 2); + return true; +} + +std::set ChannelTags(const std::shared_ptr& map) { + std::set result; + for (const std::string& tag : map->GetTags()) { + std::string name, num; + if (ParseChannelTag(tag, &name, &num)) { + result.insert(name); + } + } + return result; +} + +int ChannelCount(const std::shared_ptr& map) { + int count = 0; + for (const std::string& tag : map->GetTags()) { + std::string name, num; + int channel = -1; + if (ParseChannelTag(tag, &name, &num)) { + if (absl::SimpleAtoi(num, &channel)) { + count = std::max(count, channel + 1); + } + } + } + return count; +} + +void Relay(const InputStreamShard& input, OutputStreamShard* output) { + if (input.IsEmpty()) { + Timestamp input_bound = input.Value().Timestamp().NextAllowedInStream(); + if (output->NextTimestampBound() < input_bound) { + output->SetNextTimestampBound(input_bound); + } + } else { + output->AddPacket(input.Value()); + } +} + +int GetChannelIndex(const CalculatorContext& cc, int previous_index) { + int result = previous_index; + Packet select_packet; + Packet enable_packet; + if (cc.InputTimestamp() == Timestamp::Unstarted()) { + auto& options = cc.Options(); + if (options.has_enable()) { + result = options.enable() ? 1 : 0; + } + if (options.has_select()) { + result = options.select(); + } + if (cc.InputSidePackets().HasTag("ENABLE")) { + enable_packet = cc.InputSidePackets().Tag("ENABLE"); + } + if (cc.InputSidePackets().HasTag("SELECT")) { + select_packet = cc.InputSidePackets().Tag("SELECT"); + } + } else { + if (cc.Inputs().HasTag("ENABLE")) { + enable_packet = cc.Inputs().Tag("ENABLE").Value(); + } + if (cc.Inputs().HasTag("SELECT")) { + select_packet = cc.Inputs().Tag("SELECT").Value(); + } + } + if (!enable_packet.IsEmpty()) { + result = enable_packet.Get() ? 1 : 0; + } + if (!select_packet.IsEmpty()) { + result = select_packet.Get(); + } + return result; +} + +} // namespace tool +} // namespace mediapipe diff --git a/mediapipe/framework/tool/container_util.h b/mediapipe/framework/tool/container_util.h new file mode 100644 index 000000000..feffb0c28 --- /dev/null +++ b/mediapipe/framework/tool/container_util.h @@ -0,0 +1,31 @@ +#ifndef MEDIAPIPE_FRAMEWORK_TOOL_CONTAINER_UTIL_H_ +#define MEDIAPIPE_FRAMEWORK_TOOL_CONTAINER_UTIL_H_ + +#include "mediapipe/framework/calculator_framework.h" + +namespace mediapipe { +namespace tool { + +// Returns a tag name for one of the demux output channels. +// This is the channel number followed by the stream name separated by "__". +// For example, the channel-name for stream "FRAME" on channel 1 is "C1__FRAME". +std::string ChannelTag(const std::string& tag, int channel); + +// Returns the set of tags directed to demux output channels. +// Each demux output tag is named using function ChannelTag(). +// This function returns the demux input tags without the channel numbers. +std::set ChannelTags(const std::shared_ptr& map); + +// Returns the number of demux output channels. +int ChannelCount(const std::shared_ptr& map); + +// Copies packet or timestamp bound from input to output stream. +void Relay(const InputStreamShard& input, OutputStreamShard* output); + +// Returns the most recent specified channel index. +int GetChannelIndex(const CalculatorContext& cc, int previous_index); + +} // namespace tool +} // namespace mediapipe + +#endif // MEDIAPIPE_FRAMEWORK_TOOL_CONTAINER_UTIL_H_ diff --git a/mediapipe/framework/tool/fill_packet_set.cc b/mediapipe/framework/tool/fill_packet_set.cc index 30bbfa6db..365f238c4 100644 --- a/mediapipe/framework/tool/fill_packet_set.cc +++ b/mediapipe/framework/tool/fill_packet_set.cc @@ -25,14 +25,14 @@ namespace mediapipe { namespace tool { -::mediapipe::StatusOr> FillPacketSet( +mediapipe::StatusOr> FillPacketSet( const PacketTypeSet& input_side_packet_types, const std::map& input_side_packets, int* missing_packet_count_ptr) { if (missing_packet_count_ptr != nullptr) { *missing_packet_count_ptr = 0; } - std::vector<::mediapipe::Status> errors; + std::vector errors; auto packet_set = absl::make_unique(input_side_packet_types.TagMap()); const auto& names = input_side_packet_types.TagMap()->Names(); @@ -44,20 +44,20 @@ namespace tool { if (missing_packet_count_ptr != nullptr) { ++(*missing_packet_count_ptr); } else { - errors.push_back(::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + errors.push_back(mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "Missing input side packet: " << name); } continue; } packet_set->Get(id) = iter->second; // Check the type. - ::mediapipe::Status status = + mediapipe::Status status = input_side_packet_types.Get(id).Validate(iter->second); if (!status.ok()) { std::pair tag_index = input_side_packet_types.TagAndIndexFromId(id); errors.push_back( - ::mediapipe::StatusBuilder(status, MEDIAPIPE_LOC).SetPrepend() + mediapipe::StatusBuilder(status, MEDIAPIPE_LOC).SetPrepend() << "Packet \"" << input_side_packet_types.TagMap()->Names()[id.value()] << "\" with tag \"" << tag_index.first << "\" and index " diff --git a/mediapipe/framework/tool/fill_packet_set.h b/mediapipe/framework/tool/fill_packet_set.h index 2d9fad30e..a2cd72130 100644 --- a/mediapipe/framework/tool/fill_packet_set.h +++ b/mediapipe/framework/tool/fill_packet_set.h @@ -32,7 +32,7 @@ namespace tool { // missing_packet_count_ptr is not null, the number of missing packets // is returned in *missing_packet_count_ptr. Otherwise, an error is // returned if any packets are missing. -::mediapipe::StatusOr> FillPacketSet( +mediapipe::StatusOr> FillPacketSet( const PacketTypeSet& input_side_packet_types, const std::map& input_side_packets, int* missing_packet_count_ptr); diff --git a/mediapipe/framework/tool/gate_subgraph.proto b/mediapipe/framework/tool/gate_subgraph.proto deleted file mode 100644 index 8dd124270..000000000 --- a/mediapipe/framework/tool/gate_subgraph.proto +++ /dev/null @@ -1,22 +0,0 @@ -syntax = "proto2"; - -package mediapipe; - -import "mediapipe/framework/calculator.proto"; - -option java_package = "com.google.mediapipe.proto"; -option java_outer_classname = "GateSubgraphProto"; - -// Options for a gate-subgraph directing traffic to one of several contained -// CalculatorGraphConfig's. -message GateSubgraphOptions { - extend mediapipe.CalculatorOptions { - optional GateSubgraphOptions ext = 297196839; - } - - // The contained literal subgraph configuration(s). - repeated CalculatorGraphConfig contained_graph = 1; - - // The contained registered subgraphs or calculators. - repeated CalculatorGraphConfig.Node contained_node = 2; -} diff --git a/mediapipe/framework/tool/name_util.cc b/mediapipe/framework/tool/name_util.cc index 7aa4965ee..695b5b2e4 100644 --- a/mediapipe/framework/tool/name_util.cc +++ b/mediapipe/framework/tool/name_util.cc @@ -61,7 +61,7 @@ std::string GetUnusedSidePacketName( } std::string candidate = input_side_packet_name_base; int iter = 2; - while (::mediapipe::ContainsKey(input_side_packets, candidate)) { + while (mediapipe::ContainsKey(input_side_packets, candidate)) { candidate = absl::StrCat(input_side_packet_name_base, "_", absl::StrFormat("%02d", iter)); ++iter; @@ -116,7 +116,9 @@ std::pair ParseTagIndexFromStream(const std::string& stream) { } std::string CatTag(const std::string& tag, int index) { - return absl::StrCat(tag, index <= 0 ? "" : absl::StrCat(":", index)); + std::string colon_index = + (index <= 0 || tag.empty()) ? "" : absl::StrCat(":", index); + return absl::StrCat(tag, colon_index); } std::string CatStream(const std::pair& tag_index, diff --git a/mediapipe/framework/tool/name_util.h b/mediapipe/framework/tool/name_util.h index 69885ae47..207fe162c 100644 --- a/mediapipe/framework/tool/name_util.h +++ b/mediapipe/framework/tool/name_util.h @@ -93,7 +93,7 @@ std::string CatStream(const std::pair& tag_index, } // namespace mediapipe namespace mediapipe { -using ::mediapipe::tool::CanonicalNodeName; +using mediapipe::tool::CanonicalNodeName; } // namespace mediapipe #endif // MEDIAPIPE_FRAMEWORK_TOOL_NAME_UTIL_H_ diff --git a/mediapipe/framework/tool/options_util.h b/mediapipe/framework/tool/options_util.h index ee946ff23..51d5bed4b 100644 --- a/mediapipe/framework/tool/options_util.h +++ b/mediapipe/framework/tool/options_util.h @@ -85,7 +85,7 @@ void GetNodeOptions(const CalculatorGraphConfig::Node& node_config, T* result) { #if defined(MEDIAPIPE_PROTO_LITE) && defined(MEDIAPIPE_PROTO_THIRD_PARTY) // protobuf::Any is unavailable with third_party/protobuf:protobuf-lite. #else - for (const ::mediapipe::protobuf::Any& options : node_config.node_options()) { + for (const mediapipe::protobuf::Any& options : node_config.node_options()) { if (options.Is()) { options.UnpackTo(result); } diff --git a/mediapipe/framework/tool/proto_util_lite.cc b/mediapipe/framework/tool/proto_util_lite.cc index 263a7c3a7..1eb2c812d 100644 --- a/mediapipe/framework/tool/proto_util_lite.cc +++ b/mediapipe/framework/tool/proto_util_lite.cc @@ -42,8 +42,8 @@ bool IsLengthDelimited(WireFormatLite::WireType wire_type) { } // Reads a single data value for a wire type. -::mediapipe::Status ReadFieldValue(uint32 tag, CodedInputStream* in, - std::string* result) { +mediapipe::Status ReadFieldValue(uint32 tag, CodedInputStream* in, + std::string* result) { WireFormatLite::WireType wire_type = WireFormatLite::GetTagWireType(tag); if (IsLengthDelimited(wire_type)) { uint32 length; @@ -59,13 +59,13 @@ bool IsLengthDelimited(WireFormatLite::WireType wire_type) { cos.Trim(); result->assign(field_data, tag_size, std::string::npos); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Reads the packed sequence of data values for a wire type. -::mediapipe::Status ReadPackedValues(WireFormatLite::WireType wire_type, - CodedInputStream* in, - std::vector* field_values) { +mediapipe::Status ReadPackedValues(WireFormatLite::WireType wire_type, + CodedInputStream* in, + std::vector* field_values) { uint32 data_size; RET_CHECK(in->ReadVarint32(&data_size)); // fake_tag encodes the wire-type for calls to WireFormatLite::SkipField. @@ -77,15 +77,15 @@ bool IsLengthDelimited(WireFormatLite::WireType wire_type) { field_values->push_back(number); data_size -= number.size(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Extracts the data value(s) for one field from a serialized message. // The message with these field values removed is written to |out|. -::mediapipe::Status GetFieldValues(uint32 field_id, - WireFormatLite::WireType wire_type, - CodedInputStream* in, CodedOutputStream* out, - std::vector* field_values) { +mediapipe::Status GetFieldValues(uint32 field_id, + WireFormatLite::WireType wire_type, + CodedInputStream* in, CodedOutputStream* out, + std::vector* field_values) { uint32 tag; while ((tag = in->ReadTag()) != 0) { int field_number = WireFormatLite::GetTagFieldNumber(tag); @@ -102,7 +102,7 @@ bool IsLengthDelimited(WireFormatLite::WireType wire_type) { RET_CHECK(WireFormatLite::SkipField(in, tag, out)); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Injects the data value(s) for one field into a serialized message. @@ -122,7 +122,7 @@ void SetFieldValues(uint32 field_id, WireFormatLite::WireType wire_type, FieldAccess::FieldAccess(uint32 field_id, FieldType field_type) : field_id_(field_id), field_type_(field_type) {} -::mediapipe::Status FieldAccess::SetMessage(const std::string& message) { +mediapipe::Status FieldAccess::SetMessage(const std::string& message) { ArrayInputStream ais(message.data(), message.size()); CodedInputStream in(&ais); StringOutputStream sos(&message_); @@ -146,7 +146,7 @@ std::vector* FieldAccess::mutable_field_values() { } // Replaces a range of field values for one field nested within a protobuf. -::mediapipe::Status ProtoUtilLite::ReplaceFieldRange( +mediapipe::Status ProtoUtilLite::ReplaceFieldRange( FieldValue* message, ProtoPath proto_path, int length, FieldType field_type, const std::vector& field_values) { int field_id, index; @@ -169,11 +169,11 @@ std::vector* FieldAccess::mutable_field_values() { } message->clear(); access.GetMessage(message); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Returns a range of field values from one field nested within a protobuf. -::mediapipe::Status ProtoUtilLite::GetFieldRange( +mediapipe::Status ProtoUtilLite::GetFieldRange( const FieldValue& message, ProtoPath proto_path, int length, FieldType field_type, std::vector* field_values) { int field_id, index; @@ -194,41 +194,40 @@ std::vector* FieldAccess::mutable_field_values() { field_values->insert(field_values->begin(), v.begin() + index, v.begin() + index + length); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // If ok, returns OkStatus, otherwise returns InvalidArgumentError. template -::mediapipe::Status SyntaxStatus(bool ok, const std::string& text, T* result) { - return ok ? ::mediapipe::OkStatus() - : ::mediapipe::InvalidArgumentError(absl::StrCat( +mediapipe::Status SyntaxStatus(bool ok, const std::string& text, T* result) { + return ok ? mediapipe::OkStatus() + : mediapipe::InvalidArgumentError(absl::StrCat( "Syntax error: \"", text, "\"", " for type: ", MediaPipeTypeStringOrDemangled(), ".")); } // Templated parsing of a std::string value. template -::mediapipe::Status ParseValue(const std::string& text, T* result) { +mediapipe::Status ParseValue(const std::string& text, T* result) { return SyntaxStatus(absl::SimpleAtoi(text, result), text, result); } template <> -::mediapipe::Status ParseValue(const std::string& text, - double* result) { +mediapipe::Status ParseValue(const std::string& text, double* result) { return SyntaxStatus(absl::SimpleAtod(text, result), text, result); } template <> -::mediapipe::Status ParseValue(const std::string& text, float* result) { +mediapipe::Status ParseValue(const std::string& text, float* result) { return SyntaxStatus(absl::SimpleAtof(text, result), text, result); } template <> -::mediapipe::Status ParseValue(const std::string& text, bool* result) { +mediapipe::Status ParseValue(const std::string& text, bool* result) { return SyntaxStatus(absl::SimpleAtob(text, result), text, result); } template <> -::mediapipe::Status ParseValue(const std::string& text, - std::string* result) { +mediapipe::Status ParseValue(const std::string& text, + std::string* result) { *result = text; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Templated formatting of a primitive value. @@ -239,20 +238,20 @@ std::string FormatValue(T v) { // A helper function to parse and serialize one primtive value. template -::mediapipe::Status WritePrimitive( +mediapipe::Status WritePrimitive( void (*writer)(T, proto_ns::io::CodedOutputStream*), const std::string& text, CodedOutputStream* out) { T value; MP_RETURN_IF_ERROR(ParseValue(text, &value)); (*writer)(value, out); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Serializes a protobuf FieldValue. -static ::mediapipe::Status SerializeValue(const std::string& text, - FieldType field_type, - FieldValue* field_value) { - ::mediapipe::Status status; +static mediapipe::Status SerializeValue(const std::string& text, + FieldType field_type, + FieldValue* field_value) { + mediapipe::Status status; StringOutputStream sos(field_value); CodedOutputStream out(&sos); @@ -278,11 +277,11 @@ static ::mediapipe::Status SerializeValue(const std::string& text, case W::TYPE_BYTES: case W::TYPE_STRING: { out.WriteRaw(text.data(), text.size()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } case W::TYPE_GROUP: case W::TYPE_MESSAGE: - return ::mediapipe::UnimplementedError( + return mediapipe::UnimplementedError( "SerializeValue cannot serialize a Message."); case W::TYPE_UINT32: return WritePrimitive(W::WriteUInt32NoTag, text, &out); @@ -297,27 +296,27 @@ static ::mediapipe::Status SerializeValue(const std::string& text, case W::TYPE_SINT64: return WritePrimitive(W::WriteSInt64NoTag, text, &out); } - return ::mediapipe::UnimplementedError("SerializeValue unimplemented type."); + return mediapipe::UnimplementedError("SerializeValue unimplemented type."); } // A helper function for deserializing one text value. template -static ::mediapipe::Status ReadPrimitive(CodedInputStream* input, - std::string* result) { +static mediapipe::Status ReadPrimitive(CodedInputStream* input, + std::string* result) { CType value; if (!WireFormatLite::ReadPrimitive(input, &value)) { - return ::mediapipe::InvalidArgumentError(absl::StrCat( + return mediapipe::InvalidArgumentError(absl::StrCat( "Bad serialized value: ", MediaPipeTypeStringOrDemangled(), ".")); } *result = FormatValue(value); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Deserializes a protobuf FieldValue. -static ::mediapipe::Status DeserializeValue(const FieldValue& bytes, - FieldType field_type, - std::string* result) { +static mediapipe::Status DeserializeValue(const FieldValue& bytes, + FieldType field_type, + std::string* result) { ArrayInputStream ais(bytes.data(), bytes.size()); CodedInputStream input(&ais); typedef WireFormatLite W; @@ -341,7 +340,7 @@ static ::mediapipe::Status DeserializeValue(const FieldValue& bytes, case W::TYPE_BYTES: case W::TYPE_STRING: { *result = bytes; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } case W::TYPE_GROUP: case W::TYPE_MESSAGE: @@ -359,11 +358,10 @@ static ::mediapipe::Status DeserializeValue(const FieldValue& bytes, case W::TYPE_SINT64: return ReadPrimitive(&input, result); } - return ::mediapipe::UnimplementedError( - "DeserializeValue unimplemented type."); + return mediapipe::UnimplementedError("DeserializeValue unimplemented type."); } -::mediapipe::Status ProtoUtilLite::Serialize( +mediapipe::Status ProtoUtilLite::Serialize( const std::vector& text_values, FieldType field_type, std::vector* result) { result->clear(); @@ -373,10 +371,10 @@ static ::mediapipe::Status DeserializeValue(const FieldValue& bytes, MP_RETURN_IF_ERROR(SerializeValue(text_value, field_type, &field_value)); result->push_back(field_value); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ProtoUtilLite::Deserialize( +mediapipe::Status ProtoUtilLite::Deserialize( const std::vector& field_values, FieldType field_type, std::vector* result) { result->clear(); @@ -386,7 +384,7 @@ static ::mediapipe::Status DeserializeValue(const FieldValue& bytes, MP_RETURN_IF_ERROR(DeserializeValue(field_value, field_type, &text_value)); result->push_back(text_value); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace tool diff --git a/mediapipe/framework/tool/proto_util_lite.h b/mediapipe/framework/tool/proto_util_lite.h index 9b2d96324..f39fbd963 100644 --- a/mediapipe/framework/tool/proto_util_lite.h +++ b/mediapipe/framework/tool/proto_util_lite.h @@ -47,7 +47,7 @@ class ProtoUtilLite { FieldAccess(uint32 field_id, FieldType field_type); // Specifies the original serialized protobuf message. - ::mediapipe::Status SetMessage(const FieldValue& message); + mediapipe::Status SetMessage(const FieldValue& message); // Returns the serialized protobuf message with updated field values. void GetMessage(FieldValue* result); @@ -64,23 +64,24 @@ class ProtoUtilLite { // Replace a range of field values nested within a protobuf. // Starting at the proto_path index, "length" values are replaced. - static ::mediapipe::Status ReplaceFieldRange( + static mediapipe::Status ReplaceFieldRange( FieldValue* message, ProtoPath proto_path, int length, FieldType field_type, const std::vector& field_values); // Retrieve a range of field values nested within a protobuf. // Starting at the proto_path index, "length" values are retrieved. - static ::mediapipe::Status GetFieldRange( - const FieldValue& message, ProtoPath proto_path, int length, - FieldType field_type, std::vector* field_values); + static mediapipe::Status GetFieldRange(const FieldValue& message, + ProtoPath proto_path, int length, + FieldType field_type, + std::vector* field_values); // Serialize one or more protobuf field values from text. - static ::mediapipe::Status Serialize( + static mediapipe::Status Serialize( const std::vector& text_values, FieldType field_type, std::vector* result); // Deserialize one or more protobuf field values to text. - static ::mediapipe::Status Deserialize( + static mediapipe::Status Deserialize( const std::vector& field_values, FieldType field_type, std::vector* result); }; diff --git a/mediapipe/framework/tool/simulation_clock_test.cc b/mediapipe/framework/tool/simulation_clock_test.cc index 95996f3b9..fca9ebbd0 100644 --- a/mediapipe/framework/tool/simulation_clock_test.cc +++ b/mediapipe/framework/tool/simulation_clock_test.cc @@ -96,7 +96,7 @@ class SimulationClockTest : public ::testing::Test { } // Initialize the test clock as a RealClock. - void SetupRealClock() { clock_ = ::mediapipe::Clock::RealClock(); } + void SetupRealClock() { clock_ = mediapipe::Clock::RealClock(); } // Return the values of the timestamps of a vector of Packets. static std::vector TimestampValues( @@ -119,7 +119,7 @@ class SimulationClockTest : public ::testing::Test { std::shared_ptr simulation_clock_; CalculatorGraphConfig graph_config_; CalculatorGraph graph_; - ::mediapipe::Clock* clock_; + mediapipe::Clock* clock_; }; // Just directly calls SimulationClock::Sleep on several threads. @@ -177,19 +177,19 @@ TEST_F(SimulationClockTest, DuplicateWakeTimes) { } // A Calculator::Process callback function. -typedef std::function<::mediapipe::Status(const InputStreamShardSet&, - OutputStreamShardSet*)> +typedef std::function ProcessFunction; // A testing callback function that passes through all packets. -::mediapipe::Status PassThrough(const InputStreamShardSet& inputs, - OutputStreamShardSet* outputs) { +mediapipe::Status PassThrough(const InputStreamShardSet& inputs, + OutputStreamShardSet* outputs) { for (int i = 0; i < inputs.NumEntries(); ++i) { if (!inputs.Index(i).Value().IsEmpty()) { outputs->Index(i).AddPacket(inputs.Index(i).Value()); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // This test shows sim clock synchronizing a bunch of parallel tasks. diff --git a/mediapipe/framework/tool/sink.cc b/mediapipe/framework/tool/sink.cc index c66413f3b..5382975b7 100644 --- a/mediapipe/framework/tool/sink.cc +++ b/mediapipe/framework/tool/sink.cc @@ -45,22 +45,22 @@ namespace { class MediaPipeInternalSidePacketToPacketStreamCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->InputSidePackets().Index(0).SetAny(); cc->Outputs().Index(0).SetSameAs(&cc->InputSidePackets().Index(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) final { + mediapipe::Status Open(CalculatorContext* cc) final { cc->Outputs().Index(0).AddPacket( cc->InputSidePackets().Index(0).At(Timestamp::PostStream())); cc->Outputs().Index(0).Close(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { // The framework treats this calculator as a source calculator. - return ::mediapipe::tool::StatusStop(); + return mediapipe::tool::StatusStop(); } }; REGISTER_CALCULATOR(MediaPipeInternalSidePacketToPacketStreamCalculator); @@ -222,7 +222,7 @@ void AddCallbackWithHeaderCalculator(const std::string& stream_name, // CallbackCalculator // static -::mediapipe::Status CallbackCalculator::GetContract(CalculatorContract* cc) { +mediapipe::Status CallbackCalculator::GetContract(CalculatorContract* cc) { bool allow_multiple_streams = false; // If the input side packet is specified using tag "CALLBACK" it must contain // a std::function, which may be generated by CallbackPacketCalculator. @@ -237,7 +237,7 @@ void AddCallbackWithHeaderCalculator(const std::string& stream_name, .Set&)>>(); allow_multiple_streams = true; } else { - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "InputSidePackets must use tags."; } @@ -246,10 +246,10 @@ void AddCallbackWithHeaderCalculator(const std::string& stream_name, cc->Inputs().Index(i).SetAny(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status CallbackCalculator::Open(CalculatorContext* cc) { +mediapipe::Status CallbackCalculator::Open(CalculatorContext* cc) { if (cc->InputSidePackets().HasTag("CALLBACK")) { callback_ = cc->InputSidePackets() .Tag("CALLBACK") @@ -263,13 +263,13 @@ void AddCallbackWithHeaderCalculator(const std::string& stream_name, LOG(FATAL) << "InputSidePackets must use tags."; } if (callback_ == nullptr && vector_callback_ == nullptr) { - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "missing callback."; } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status CallbackCalculator::Process(CalculatorContext* cc) { +mediapipe::Status CallbackCalculator::Process(CalculatorContext* cc) { if (callback_) { callback_(cc->Inputs().Index(0).Value()); } else if (vector_callback_) { @@ -281,7 +281,7 @@ void AddCallbackWithHeaderCalculator(const std::string& stream_name, } vector_callback_(packets); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } REGISTER_CALCULATOR(CallbackCalculator); @@ -289,7 +289,7 @@ REGISTER_CALCULATOR(CallbackCalculator); // CallbackWithHeaderCalculator // static -::mediapipe::Status CallbackWithHeaderCalculator::GetContract( +mediapipe::Status CallbackWithHeaderCalculator::GetContract( CalculatorContract* cc) { cc->Inputs().Tag("INPUT").SetAny(); cc->Inputs().Tag("HEADER").SetAny(); @@ -300,13 +300,13 @@ REGISTER_CALCULATOR(CallbackCalculator); .Tag("CALLBACK") .Set>(); } else { - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "InputSidePackets must use tags."; } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status CallbackWithHeaderCalculator::Open(CalculatorContext* cc) { +mediapipe::Status CallbackWithHeaderCalculator::Open(CalculatorContext* cc) { if (cc->InputSidePackets().UsesTags()) { callback_ = cc->InputSidePackets() .Tag("CALLBACK") @@ -315,17 +315,17 @@ REGISTER_CALCULATOR(CallbackCalculator); LOG(FATAL) << "InputSidePackets must use tags."; } if (callback_ == nullptr) { - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "callback is nullptr."; } if (!cc->Inputs().HasTag("INPUT")) { - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "No input stream connected."; } if (!cc->Inputs().HasTag("HEADER")) { // Note: for the current MediaPipe header implementation, we just need to // connect the output stream to both of the two inputs: INPUT and HEADER. - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "No header stream connected."; } // If the input stream has the header, just use it as the header. Otherwise, @@ -333,16 +333,15 @@ REGISTER_CALCULATOR(CallbackCalculator); if (!cc->Inputs().Tag("INPUT").Header().IsEmpty()) { header_packet_ = cc->Inputs().Tag("INPUT").Header(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status CallbackWithHeaderCalculator::Process( - CalculatorContext* cc) { +mediapipe::Status CallbackWithHeaderCalculator::Process(CalculatorContext* cc) { if (!cc->Inputs().Tag("INPUT").Value().IsEmpty() && header_packet_.IsEmpty()) { // Header packet should be available before we receive any normal input // stream packet. - return ::mediapipe::UnknownErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::UnknownErrorBuilder(MEDIAPIPE_LOC) << "Header not available!"; } if (header_packet_.IsEmpty() && @@ -352,7 +351,7 @@ REGISTER_CALCULATOR(CallbackCalculator); if (!cc->Inputs().Tag("INPUT").Value().IsEmpty()) { callback_(cc->Inputs().Tag("INPUT").Value(), header_packet_); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } REGISTER_CALCULATOR(CallbackWithHeaderCalculator); diff --git a/mediapipe/framework/tool/sink.h b/mediapipe/framework/tool/sink.h index 2f244d9f1..f563f603f 100644 --- a/mediapipe/framework/tool/sink.h +++ b/mediapipe/framework/tool/sink.h @@ -166,10 +166,10 @@ class CallbackCalculator : public CalculatorBase { ~CallbackCalculator() override {} - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; private: std::function callback_; @@ -185,10 +185,10 @@ class CallbackWithHeaderCalculator : public CalculatorBase { ~CallbackWithHeaderCalculator() override {} - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; private: std::function callback_; diff --git a/mediapipe/framework/tool/sink_test.cc b/mediapipe/framework/tool/sink_test.cc index d5223041c..4e64a5e8d 100644 --- a/mediapipe/framework/tool/sink_test.cc +++ b/mediapipe/framework/tool/sink_test.cc @@ -31,21 +31,21 @@ namespace mediapipe { namespace { class CountAndOutputSummarySidePacketInCloseCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).SetAny(); cc->OutputSidePackets().Index(0).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { ++count_; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Close(CalculatorContext* cc) final { + mediapipe::Status Close(CalculatorContext* cc) final { cc->OutputSidePackets().Index(0).Set( MakePacket(count_).At(Timestamp::Unset())); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } int count_ = 0; @@ -75,7 +75,7 @@ TEST(CallbackFromGeneratorTest, TestAddVectorSink) { TEST(CalculatorGraph, OutputSummarySidePacketInClose) { CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: "input_packets" node { calculator: "CountAndOutputSummarySidePacketInCloseCalculator" diff --git a/mediapipe/framework/tool/source.cc b/mediapipe/framework/tool/source.cc index 89b6e1841..3bdc0faa2 100644 --- a/mediapipe/framework/tool/source.cc +++ b/mediapipe/framework/tool/source.cc @@ -43,19 +43,19 @@ class SidePacketsToStreamsCalculator : public CalculatorBase { const SidePacketsToStreamsCalculator&) = delete; ~SidePacketsToStreamsCalculator() override {} - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { auto& options = cc->Options(); if (options.has_num_inputs() && (options.num_inputs() != cc->InputSidePackets().NumEntries() || options.num_inputs() != cc->Outputs().NumEntries())) { - return ::mediapipe::InvalidArgumentError( + return mediapipe::InvalidArgumentError( "If num_inputs is specified it must be equal to the number of " "input side packets and output streams."); } if (!options.vectors_of_packets() && options.set_timestamp() == SidePacketsToStreamsCalculatorOptions::NONE) { - return ::mediapipe::InvalidArgumentError( + return mediapipe::InvalidArgumentError( "If set_timestamp is NONE, vectors_of_packets must not be false."); } for (int i = 0; i < cc->InputSidePackets().NumEntries(); ++i) { @@ -72,10 +72,10 @@ class SidePacketsToStreamsCalculator : public CalculatorBase { cc->Outputs().Index(i).SetSameAs(&cc->InputSidePackets().Index(i)); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) final { + mediapipe::Status Process(CalculatorContext* cc) final { const auto& options = cc->Options(); // The i-th input side packet contains a vector of packets corresponding // to the values of this input for all batch elements. @@ -87,7 +87,7 @@ class SidePacketsToStreamsCalculator : public CalculatorBase { const auto& packets = input_side_packet.Get>(); if (batch_size >= 0) { if (packets.size() != batch_size) { - return ::mediapipe::InvalidArgumentError( + return mediapipe::InvalidArgumentError( "The specified input side packets contain vectors of different " "sizes."); } diff --git a/mediapipe/framework/tool/status_util.cc b/mediapipe/framework/tool/status_util.cc index 6f712c364..ad5a69a2c 100644 --- a/mediapipe/framework/tool/status_util.cc +++ b/mediapipe/framework/tool/status_util.cc @@ -22,47 +22,46 @@ namespace mediapipe { namespace tool { -::mediapipe::Status StatusInvalid(const std::string& message) { - return ::mediapipe::Status(::mediapipe::StatusCode::kInvalidArgument, - message); +mediapipe::Status StatusInvalid(const std::string& message) { + return mediapipe::Status(mediapipe::StatusCode::kInvalidArgument, message); } -::mediapipe::Status StatusFail(const std::string& message) { - return ::mediapipe::Status(::mediapipe::StatusCode::kUnknown, message); +mediapipe::Status StatusFail(const std::string& message) { + return mediapipe::Status(mediapipe::StatusCode::kUnknown, message); } -::mediapipe::Status StatusStop() { - return ::mediapipe::Status(::mediapipe::StatusCode::kOutOfRange, - "::mediapipe::tool::StatusStop()"); +mediapipe::Status StatusStop() { + return mediapipe::Status(mediapipe::StatusCode::kOutOfRange, + "mediapipe::tool::StatusStop()"); } -::mediapipe::Status AddStatusPrefix(const std::string& prefix, - const ::mediapipe::Status& status) { - return ::mediapipe::Status(status.code(), - absl::StrCat(prefix, status.message())); +mediapipe::Status AddStatusPrefix(const std::string& prefix, + const mediapipe::Status& status) { + return mediapipe::Status(status.code(), + absl::StrCat(prefix, status.message())); } -::mediapipe::Status CombinedStatus( +mediapipe::Status CombinedStatus( const std::string& general_comment, - const std::vector<::mediapipe::Status>& statuses) { - // The final error code is ::mediapipe::StatusCode::kUnknown if not all + const std::vector& statuses) { + // The final error code is mediapipe::StatusCode::kUnknown if not all // the error codes are the same. Otherwise it is the same error code // as all of the (non-OK) statuses. If statuses is empty or they are - // all OK, then ::mediapipe::OkStatus() is returned. - ::mediapipe::StatusCode error_code = ::mediapipe::StatusCode::kOk; + // all OK, then mediapipe::OkStatus() is returned. + mediapipe::StatusCode error_code = mediapipe::StatusCode::kOk; std::vector errors; - for (const ::mediapipe::Status& status : statuses) { + for (const mediapipe::Status& status : statuses) { if (!status.ok()) { errors.emplace_back(status.message()); - if (error_code == ::mediapipe::StatusCode::kOk) { + if (error_code == mediapipe::StatusCode::kOk) { error_code = status.code(); } else if (error_code != status.code()) { - error_code = ::mediapipe::StatusCode::kUnknown; + error_code = mediapipe::StatusCode::kUnknown; } } } if (error_code == StatusCode::kOk) return OkStatus(); - Status combined = ::mediapipe::Status( + Status combined = mediapipe::Status( error_code, absl::StrCat(general_comment, "\n", absl::StrJoin(errors, "\n"))); return combined; diff --git a/mediapipe/framework/tool/status_util.h b/mediapipe/framework/tool/status_util.h index b9c4235a1..92e0c9fab 100644 --- a/mediapipe/framework/tool/status_util.h +++ b/mediapipe/framework/tool/status_util.h @@ -29,31 +29,31 @@ namespace tool { // be called on it again). When returned from a non-source Calculator // it signals that the graph should be cancelled (which is handled by // closing all source Calculators and waiting for the graph to finish). -::mediapipe::Status StatusStop(); +mediapipe::Status StatusStop(); // Return a status which signals an invalid initial condition (for // example an InputSidePacket does not include all necessary fields). -ABSL_DEPRECATED("Use ::mediapipe::InvalidArgumentError(error_message) instead.") -::mediapipe::Status StatusInvalid(const std::string& error_message); +ABSL_DEPRECATED("Use mediapipe::InvalidArgumentError(error_message) instead.") +mediapipe::Status StatusInvalid(const std::string& error_message); // Return a status which signals that something unexpectedly failed. -ABSL_DEPRECATED("Use ::mediapipe::UnknownError(error_message) instead.") -::mediapipe::Status StatusFail(const std::string& error_message); +ABSL_DEPRECATED("Use mediapipe::UnknownError(error_message) instead.") +mediapipe::Status StatusFail(const std::string& error_message); // Prefixes the given std::string to the error message in status. // This function should be considered internal to the framework. // TODO Replace usage of AddStatusPrefix with util::Annotate(). -::mediapipe::Status AddStatusPrefix(const std::string& prefix, - const ::mediapipe::Status& status); +mediapipe::Status AddStatusPrefix(const std::string& prefix, + const mediapipe::Status& status); -// Combine a vector of ::mediapipe::Status into a single composite status. -// If statuses is empty or all statuses are OK then ::mediapipe::OkStatus() +// Combine a vector of mediapipe::Status into a single composite status. +// If statuses is empty or all statuses are OK then mediapipe::OkStatus() // will be returned. // This function should be considered internal to the framework. // TODO Move this function to somewhere with less visibility. -::mediapipe::Status CombinedStatus( +mediapipe::Status CombinedStatus( const std::string& general_comment, - const std::vector<::mediapipe::Status>& statuses); + const std::vector& statuses); } // namespace tool } // namespace mediapipe diff --git a/mediapipe/framework/tool/status_util_test.cc b/mediapipe/framework/tool/status_util_test.cc index 27373d87a..711cb4148 100644 --- a/mediapipe/framework/tool/status_util_test.cc +++ b/mediapipe/framework/tool/status_util_test.cc @@ -36,24 +36,24 @@ TEST(StatusTest, StatusStopIsNotOk) { EXPECT_FALSE(tool::StatusStop().ok()); } TEST(StatusTest, Prefix) { const std::string base_error_message("error_with_this_string"); const std::string prefix_error_message("error_with_prefix: "); - ::mediapipe::Status base_status = ::mediapipe::Status( - ::mediapipe::StatusCode::kInvalidArgument, base_error_message); - ::mediapipe::Status status = + mediapipe::Status base_status = mediapipe::Status( + mediapipe::StatusCode::kInvalidArgument, base_error_message); + mediapipe::Status status = tool::AddStatusPrefix(prefix_error_message, base_status); EXPECT_THAT(status.ToString(), HasSubstr(base_error_message)); EXPECT_THAT(status.ToString(), HasSubstr(prefix_error_message)); - EXPECT_EQ(::mediapipe::StatusCode::kInvalidArgument, status.code()); + EXPECT_EQ(mediapipe::StatusCode::kInvalidArgument, status.code()); } TEST(StatusTest, CombinedStatus) { - std::vector<::mediapipe::Status> errors; + std::vector errors; const std::string prefix_error_message("error_with_prefix: "); - ::mediapipe::Status status; + mediapipe::Status status; errors.clear(); - errors.emplace_back(::mediapipe::StatusCode::kInvalidArgument, + errors.emplace_back(mediapipe::StatusCode::kInvalidArgument, "error_with_this_string"); - errors.emplace_back(::mediapipe::StatusCode::kInvalidArgument, + errors.emplace_back(mediapipe::StatusCode::kInvalidArgument, "error_with_that_string"); errors.back().SetPayload("test payload type", absl::Cord(absl::string_view("hello"))); @@ -61,30 +61,30 @@ TEST(StatusTest, CombinedStatus) { EXPECT_THAT(status.ToString(), HasSubstr(std::string(errors[0].message()))); EXPECT_THAT(status.ToString(), HasSubstr(std::string(errors[1].message()))); EXPECT_THAT(status.ToString(), HasSubstr(prefix_error_message)); - EXPECT_EQ(::mediapipe::StatusCode::kInvalidArgument, status.code()); + EXPECT_EQ(mediapipe::StatusCode::kInvalidArgument, status.code()); errors.clear(); - errors.emplace_back(::mediapipe::StatusCode::kNotFound, + errors.emplace_back(mediapipe::StatusCode::kNotFound, "error_with_this_string"); - errors.emplace_back(::mediapipe::StatusCode::kInvalidArgument, + errors.emplace_back(mediapipe::StatusCode::kInvalidArgument, "error_with_that_string"); status = tool::CombinedStatus(prefix_error_message, errors); EXPECT_THAT(status.ToString(), HasSubstr(std::string(errors[0].message()))); EXPECT_THAT(status.ToString(), HasSubstr(std::string(errors[1].message()))); EXPECT_THAT(status.ToString(), HasSubstr(prefix_error_message)); - EXPECT_EQ(::mediapipe::StatusCode::kUnknown, status.code()); + EXPECT_EQ(mediapipe::StatusCode::kUnknown, status.code()); errors.clear(); - errors.emplace_back(::mediapipe::StatusCode::kOk, "error_with_this_string"); - errors.emplace_back(::mediapipe::StatusCode::kInvalidArgument, + errors.emplace_back(mediapipe::StatusCode::kOk, "error_with_this_string"); + errors.emplace_back(mediapipe::StatusCode::kInvalidArgument, "error_with_that_string"); status = tool::CombinedStatus(prefix_error_message, errors); EXPECT_THAT(status.ToString(), HasSubstr(std::string(errors[1].message()))); EXPECT_THAT(status.ToString(), HasSubstr(prefix_error_message)); - EXPECT_EQ(::mediapipe::StatusCode::kInvalidArgument, status.code()); + EXPECT_EQ(mediapipe::StatusCode::kInvalidArgument, status.code()); errors.clear(); - errors.emplace_back(::mediapipe::StatusCode::kOk, "error_with_this_string"); - errors.emplace_back(::mediapipe::StatusCode::kOk, "error_with_that_string"); + errors.emplace_back(mediapipe::StatusCode::kOk, "error_with_this_string"); + errors.emplace_back(mediapipe::StatusCode::kOk, "error_with_that_string"); MP_EXPECT_OK(tool::CombinedStatus(prefix_error_message, errors)); errors.clear(); @@ -93,13 +93,13 @@ TEST(StatusTest, CombinedStatus) { // Verify tool::StatusInvalid() and tool::StatusFail() and the alternatives // recommended by their ABSL_DEPRECATED messages return the same -// ::mediapipe::Status objects. +// mediapipe::Status objects. TEST(StatusTest, Deprecated) { const std::string error_message = "an error message"; EXPECT_EQ(tool::StatusInvalid(error_message), // NOLINT - ::mediapipe::InvalidArgumentError(error_message)); + mediapipe::InvalidArgumentError(error_message)); EXPECT_EQ(tool::StatusFail(error_message), // NOLINT - ::mediapipe::UnknownError(error_message)); + mediapipe::UnknownError(error_message)); } } // namespace diff --git a/mediapipe/framework/tool/subgraph_expansion.cc b/mediapipe/framework/tool/subgraph_expansion.cc index efa0d8e32..9483048fa 100644 --- a/mediapipe/framework/tool/subgraph_expansion.cc +++ b/mediapipe/framework/tool/subgraph_expansion.cc @@ -42,7 +42,7 @@ namespace mediapipe { namespace tool { -::mediapipe::Status TransformStreamNames( +mediapipe::Status TransformStreamNames( proto_ns::RepeatedPtrField* streams, const std::function& transform) { for (auto& stream : *streams) { @@ -53,11 +53,11 @@ namespace tool { absl::StrCat(port_and_name.substr(0, name_pos), transform(absl::ClippedSubstr(port_and_name, name_pos))); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Returns subgraph streams not requested by a subgraph-node. -::mediapipe::Status FindIgnoredStreams( +mediapipe::Status FindIgnoredStreams( const proto_ns::RepeatedPtrField& src_streams, const proto_ns::RepeatedPtrField& dst_streams, std::set* result) { @@ -69,11 +69,11 @@ namespace tool { result->insert(src_map->Names()[id.value()]); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Removes subgraph streams not requested by a subgraph-node. -::mediapipe::Status RemoveIgnoredStreams( +mediapipe::Status RemoveIgnoredStreams( proto_ns::RepeatedPtrField* streams, const std::set& missing_streams) { for (int i = streams->size() - 1; i >= 0; --i) { @@ -84,10 +84,10 @@ namespace tool { streams->DeleteSubrange(i, 1); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TransformNames( +mediapipe::Status TransformNames( CalculatorGraphConfig* config, const std::function& transform) { RET_CHECK_EQ(config->packet_factory().size(), 0); @@ -122,7 +122,7 @@ namespace tool { MP_RETURN_IF_ERROR(TransformStreamNames( status_handler.mutable_input_side_packet(), transform)); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Adds a prefix to the name of each stream, side packet and node in the @@ -131,8 +131,8 @@ namespace tool { // 2, { foo, bar } --PrefixNames-> { rsg__foo, rsg__bar } // This means that two copies of the same subgraph will not interfere with // each other. -static ::mediapipe::Status PrefixNames(std::string prefix, - CalculatorGraphConfig* config) { +static mediapipe::Status PrefixNames(std::string prefix, + CalculatorGraphConfig* config) { std::transform(prefix.begin(), prefix.end(), prefix.begin(), ::tolower); std::replace(prefix.begin(), prefix.end(), '.', '_'); std::replace(prefix.begin(), prefix.end(), ' ', '_'); @@ -144,7 +144,7 @@ static ::mediapipe::Status PrefixNames(std::string prefix, return TransformNames(config, add_prefix); } -::mediapipe::Status FindCorrespondingStreams( +mediapipe::Status FindCorrespondingStreams( std::map* stream_map, const proto_ns::RepeatedPtrField& src_streams, const proto_ns::RepeatedPtrField& dst_streams) { @@ -153,16 +153,16 @@ static ::mediapipe::Status PrefixNames(std::string prefix, for (const auto& it : dst_map->Mapping()) { const std::string& tag = it.first; const TagMap::TagData* src_tag_data = - ::mediapipe::FindOrNull(src_map->Mapping(), tag); + mediapipe::FindOrNull(src_map->Mapping(), tag); if (!src_tag_data) { - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "Tag \"" << tag << "\" does not exist in the subgraph config."; } const TagMap::TagData& dst_tag_data = it.second; CollectionItemId src_id = src_tag_data->id; CollectionItemId dst_id = dst_tag_data.id; if (dst_tag_data.count > src_tag_data->count) { - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "Tag \"" << tag << "\" has " << dst_tag_data.count << " indexes in the subgraph node but has only " << src_tag_data->count << " indexes in the subgraph config."; @@ -175,28 +175,28 @@ static ::mediapipe::Status PrefixNames(std::string prefix, (*stream_map)[src_name] = dst_name; } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // The following fields can be used in a Node message for a subgraph: // name, calculator, input_stream, output_stream, input_side_packet, // output_side_packet, options. // All other fields are only applicable to calculators. -::mediapipe::Status ValidateSubgraphFields( +mediapipe::Status ValidateSubgraphFields( const CalculatorGraphConfig::Node& subgraph_node) { if (subgraph_node.source_layer() || subgraph_node.buffer_size_hint() || subgraph_node.has_input_stream_handler() || subgraph_node.has_output_stream_handler() || subgraph_node.input_stream_info_size() != 0 || !subgraph_node.executor().empty()) { - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "Subgraph \"" << subgraph_node.name() << "\" has a field that is only applicable to calculators."; } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ConnectSubgraphStreams( +mediapipe::Status ConnectSubgraphStreams( const CalculatorGraphConfig::Node& subgraph_node, CalculatorGraphConfig* subgraph_config) { std::map stream_map; @@ -237,7 +237,7 @@ static ::mediapipe::Status PrefixNames(std::string prefix, std::map* name_map; auto replace_names = [&name_map](absl::string_view s) { std::string original(s); - std::string* replacement = ::mediapipe::FindOrNull(*name_map, original); + std::string* replacement = mediapipe::FindOrNull(*name_map, original); return replacement ? *replacement : original; }; for (auto& node : *subgraph_config->mutable_node()) { @@ -269,11 +269,11 @@ static ::mediapipe::Status PrefixNames(std::string prefix, MP_RETURN_IF_ERROR(RemoveIgnoredStreams( generator.mutable_input_side_packet(), ignored_input_side_packets)); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ExpandSubgraphs(CalculatorGraphConfig* config, - const GraphRegistry* graph_registry) { +mediapipe::Status ExpandSubgraphs(CalculatorGraphConfig* config, + const GraphRegistry* graph_registry) { graph_registry = graph_registry ? graph_registry : &GraphRegistry::global_graph_registry; RET_CHECK(config); @@ -313,7 +313,7 @@ static ::mediapipe::Status PrefixNames(std::string prefix, config->mutable_status_handler())); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } CalculatorGraphConfig MakeSingleNodeGraph(CalculatorGraphConfig::Node node) { diff --git a/mediapipe/framework/tool/subgraph_expansion.h b/mediapipe/framework/tool/subgraph_expansion.h index 2fc994d15..6c422a72a 100644 --- a/mediapipe/framework/tool/subgraph_expansion.h +++ b/mediapipe/framework/tool/subgraph_expansion.h @@ -29,13 +29,13 @@ namespace tool { // Apply the given transformation function to the names of streams and // side packets. -::mediapipe::Status TransformStreamNames( +mediapipe::Status TransformStreamNames( proto_ns::RepeatedPtrField* streams, const std::function& transform); // Apply the given transformation function to the names of streams, // side packets, and nodes. -::mediapipe::Status TransformNames( +mediapipe::Status TransformNames( CalculatorGraphConfig* config, const std::function& transform); @@ -48,7 +48,7 @@ namespace tool { // src: FOO:abc dst: FOO:bob // BAR:def // The entry 'abc' -> 'bob' is added to the map. -::mediapipe::Status FindCorrespondingStreams( +mediapipe::Status FindCorrespondingStreams( std::map* stream_map, const proto_ns::RepeatedPtrField& src_streams, const proto_ns::RepeatedPtrField& dst_streams); @@ -56,19 +56,19 @@ namespace tool { // Validates the fields in the given Node message that specifies a subgraph. // Returns an error status if the Node message contains any field that is only // applicable to calculators. -::mediapipe::Status ValidateSubgraphFields( +mediapipe::Status ValidateSubgraphFields( const CalculatorGraphConfig::Node& subgraph_node); // Renames the streams in a subgraph config to match the connections on the // wrapping node. -::mediapipe::Status ConnectSubgraphStreams( +mediapipe::Status ConnectSubgraphStreams( const CalculatorGraphConfig::Node& subgraph_node, CalculatorGraphConfig* subgraph_config); // Replaces subgraph nodes in the given config with the contents of the // corresponding subgraphs. Nested subgraphs are retrieved from the // graph registry and expanded recursively. -::mediapipe::Status ExpandSubgraphs( +mediapipe::Status ExpandSubgraphs( CalculatorGraphConfig* config, const GraphRegistry* graph_registry = nullptr); diff --git a/mediapipe/framework/tool/subgraph_expansion_test.cc b/mediapipe/framework/tool/subgraph_expansion_test.cc index 3eb38f39f..7de3f80f6 100644 --- a/mediapipe/framework/tool/subgraph_expansion_test.cc +++ b/mediapipe/framework/tool/subgraph_expansion_test.cc @@ -38,10 +38,10 @@ namespace { class SimpleTestCalculator : public CalculatorBase { public: - ::mediapipe::Status Process(CalculatorContext* cc) override { - return ::mediapipe::OkStatus(); + mediapipe::Status Process(CalculatorContext* cc) override { + return mediapipe::OkStatus(); } - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { for (PacketType& type : cc->Inputs()) { type.Set(); } @@ -51,7 +51,7 @@ class SimpleTestCalculator : public CalculatorBase { for (PacketType& type : cc->InputSidePackets()) { type.Set(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(SimpleTestCalculator); @@ -66,10 +66,10 @@ REGISTER_CALCULATOR(SomeAggregator); class TestSubgraph : public Subgraph { public: - ::mediapipe::StatusOr GetConfig( + mediapipe::StatusOr GetConfig( const SubgraphOptions& /*options*/) override { CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: "DATA:input_1" node { name: "regular_node" @@ -95,10 +95,10 @@ REGISTER_MEDIAPIPE_GRAPH(TestSubgraph); class PacketFactoryTestSubgraph : public Subgraph { public: - ::mediapipe::StatusOr GetConfig( + mediapipe::StatusOr GetConfig( const SubgraphOptions& /*options*/) override { CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: "DATA:input_1" node { name: "regular_node" @@ -126,7 +126,7 @@ REGISTER_MEDIAPIPE_GRAPH(PacketFactoryTestSubgraph); // and the number of copies of the node are specified in subgraph options. class NodeChainSubgraph : public Subgraph { public: - ::mediapipe::StatusOr GetConfig( + mediapipe::StatusOr GetConfig( const SubgraphOptions& options) override { auto opts = Subgraph::GetOptions(options); @@ -152,10 +152,10 @@ REGISTER_MEDIAPIPE_GRAPH(NodeChainSubgraph); // subgraph contains a node with the executor field "custom_thread_pool". class NodeWithExecutorSubgraph : public Subgraph { public: - ::mediapipe::StatusOr GetConfig( + mediapipe::StatusOr GetConfig( const SubgraphOptions& options) override { CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: "INPUT:foo" output_stream: "OUTPUT:bar" node { @@ -174,10 +174,10 @@ REGISTER_MEDIAPIPE_GRAPH(NodeWithExecutorSubgraph); // subgraph contains a NodeWithExecutorSubgraph. class EnclosingSubgraph : public Subgraph { public: - ::mediapipe::StatusOr GetConfig( + mediapipe::StatusOr GetConfig( const SubgraphOptions& options) override { CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: "IN:in" output_stream: "OUT:out" node { @@ -193,7 +193,7 @@ REGISTER_MEDIAPIPE_GRAPH(EnclosingSubgraph); TEST(SubgraphExpansionTest, TransformStreamNames) { CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( node { calculator: "SomeSinkCalculator" input_stream: "input_1" @@ -203,7 +203,7 @@ TEST(SubgraphExpansionTest, TransformStreamNames) { } )"); CalculatorGraphConfig expected_config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( node { calculator: "SomeSinkCalculator" input_stream: "input_1_foo" @@ -220,7 +220,7 @@ TEST(SubgraphExpansionTest, TransformStreamNames) { TEST(SubgraphExpansionTest, TransformNames) { CalculatorGraphConfig config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: "input_1" node { calculator: "SomeRegularCalculator" @@ -238,7 +238,7 @@ TEST(SubgraphExpansionTest, TransformNames) { } )"); CalculatorGraphConfig expected_config = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: "__sg0_input_1" node { calculator: "SomeRegularCalculator" @@ -265,14 +265,14 @@ TEST(SubgraphExpansionTest, TransformNames) { TEST(SubgraphExpansionTest, FindCorrespondingStreams) { CalculatorGraphConfig config1 = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: "input_1" input_stream: "VIDEO:input_2" input_stream: "AUDIO:0:input_3" input_stream: "AUDIO:1:input_4" )"); CalculatorGraphConfig config2 = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( node { calculator: "SomeSubgraph" input_stream: "foo" @@ -294,13 +294,13 @@ TEST(SubgraphExpansionTest, FindCorrespondingStreams) { TEST(SubgraphExpansionTest, FindCorrespondingStreamsNonexistentTag) { // The VIDEO tag does not exist in the subgraph. CalculatorGraphConfig config1 = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: "input_1" input_stream: "AUDIO:0:input_3" input_stream: "AUDIO:1:input_4" )"); CalculatorGraphConfig config2 = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( node { calculator: "SomeSubgraph" input_stream: "foo" @@ -324,13 +324,13 @@ TEST(SubgraphExpansionTest, FindCorrespondingStreamsNonexistentTag) { TEST(SubgraphExpansionTest, FindCorrespondingStreamsTooFewIndexes) { // The AUDIO tag has too few indexes in the subgraph (1 vs. 2). CalculatorGraphConfig config1 = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: "input_1" input_stream: "VIDEO:input_2" input_stream: "AUDIO:0:input_3" )"); CalculatorGraphConfig config2 = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( node { calculator: "SomeSubgraph" input_stream: "foo" @@ -353,7 +353,7 @@ TEST(SubgraphExpansionTest, FindCorrespondingStreamsTooFewIndexes) { TEST(SubgraphExpansionTest, ConnectSubgraphStreams) { CalculatorGraphConfig subgraph = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: "A:input_1" input_stream: "B:input_2" output_stream: "O:output_2" @@ -379,7 +379,7 @@ TEST(SubgraphExpansionTest, ConnectSubgraphStreams) { } )"); CalculatorGraphConfig supergraph = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( node { calculator: "SomeSubgraph" input_stream: "A:foo" @@ -392,7 +392,7 @@ TEST(SubgraphExpansionTest, ConnectSubgraphStreams) { // Note: graph input streams, output streams, and side packets on the // subgraph are not changed because they are going to be discarded anyway. CalculatorGraphConfig expected_subgraph = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: "A:input_1" input_stream: "B:input_2" output_stream: "O:output_2" @@ -423,7 +423,7 @@ TEST(SubgraphExpansionTest, ConnectSubgraphStreams) { TEST(SubgraphExpansionTest, ExpandSubgraphs) { CalculatorGraphConfig supergraph = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( node { name: "simple_source" calculator: "SomeSourceCalculator" @@ -432,7 +432,7 @@ TEST(SubgraphExpansionTest, ExpandSubgraphs) { node { calculator: "TestSubgraph" input_stream: "DATA:foo" } )"); CalculatorGraphConfig expected_graph = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( node { name: "simple_source" calculator: "SomeSourceCalculator" @@ -461,7 +461,7 @@ TEST(SubgraphExpansionTest, ExpandSubgraphs) { TEST(SubgraphExpansionTest, ValidateSubgraphFields) { CalculatorGraphConfig supergraph = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( node { name: "simple_source" calculator: "SomeSourceCalculator" @@ -474,12 +474,12 @@ TEST(SubgraphExpansionTest, ValidateSubgraphFields) { buffer_size_hint: -1 # This field is only applicable to calculators. } )"); - ::mediapipe::Status s1 = tool::ValidateSubgraphFields(supergraph.node(1)); - EXPECT_EQ(s1.code(), ::mediapipe::StatusCode::kInvalidArgument); + mediapipe::Status s1 = tool::ValidateSubgraphFields(supergraph.node(1)); + EXPECT_EQ(s1.code(), mediapipe::StatusCode::kInvalidArgument); EXPECT_THAT(s1.message(), testing::HasSubstr("foo_subgraph")); - ::mediapipe::Status s2 = tool::ExpandSubgraphs(&supergraph); - EXPECT_EQ(s2.code(), ::mediapipe::StatusCode::kInvalidArgument); + mediapipe::Status s2 = tool::ExpandSubgraphs(&supergraph); + EXPECT_EQ(s2.code(), mediapipe::StatusCode::kInvalidArgument); EXPECT_THAT(s2.message(), testing::HasSubstr("foo_subgraph")); } @@ -489,7 +489,7 @@ TEST(SubgraphExpansionTest, ValidateSubgraphFields) { // subgraph executor support in the future. TEST(SubgraphExpansionTest, ExecutorFieldOfNodeInSubgraphPreserved) { CalculatorGraphConfig supergraph = - ::mediapipe::ParseTextProtoOrDie(R"( + mediapipe::ParseTextProtoOrDie(R"( input_stream: "input" executor { name: "custom_thread_pool" @@ -504,7 +504,7 @@ TEST(SubgraphExpansionTest, ExecutorFieldOfNodeInSubgraphPreserved) { output_stream: "OUT:output" } )"); - CalculatorGraphConfig expected_graph = ::mediapipe::ParseTextProtoOrDie< + CalculatorGraphConfig expected_graph = mediapipe::ParseTextProtoOrDie< CalculatorGraphConfig>(R"( input_stream: "input" executor { diff --git a/mediapipe/framework/tool/switch_container.cc b/mediapipe/framework/tool/switch_container.cc new file mode 100644 index 000000000..b923460a3 --- /dev/null +++ b/mediapipe/framework/tool/switch_container.cc @@ -0,0 +1,308 @@ +// Copyright 2019 The MediaPipe Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include + +#include "mediapipe/framework/calculator.pb.h" +#include "mediapipe/framework/calculator_framework.h" +#include "mediapipe/framework/mediapipe_options.pb.h" +#include "mediapipe/framework/port/canonical_errors.h" +#include "mediapipe/framework/port/ret_check.h" +#include "mediapipe/framework/port/status.h" +#include "mediapipe/framework/stream_handler.pb.h" +#include "mediapipe/framework/stream_handler/sync_set_input_stream_handler.pb.h" +#include "mediapipe/framework/tool/container_util.h" +#include "mediapipe/framework/tool/name_util.h" +#include "mediapipe/framework/tool/subgraph_expansion.h" +#include "mediapipe/framework/tool/switch_container.pb.h" + +namespace mediapipe { +namespace tool { +using mediapipe::SwitchContainerOptions; + +// A graph factory producing a CalculatorGraphConfig routing packets to +// one of several contained CalculatorGraphConfigs. +// +// Usage example: +// +// node { +// calculator: "SwitchContainer" +// input_stream: "ENABLE:enable" +// input_stream: "INPUT_VIDEO:video_frames" +// output_stream: "OUTPUT_VIDEO:output_frames" +// options { +// [mediapipe.SwitchContainerOptions.ext] { +// contained_node: { calculator: "BasicSubgraph" } +// contained_node: { calculator: "AdvancedSubgraph" } +// } +// } +// } +// +// Note that the input and output stream tags supplied to the container node +// must match the input and output stream tags required by the contained nodes, +// such as "INPUT_VIDEO" and "OUTPUT_VIDEO" in the example above. +// +// Input stream "ENABLE" specifies routing of packets to either contained_node 0 +// or contained_node 1, given "ENABLE:false" or "ENABLE:true" respectively. +// Input-side-packet "ENABLE" and input-stream "SELECT" can also be used +// similarly to specify the active channel. +class SwitchContainer : public Subgraph { + public: + SwitchContainer() = default; + mediapipe::StatusOr GetConfig( + const Subgraph::SubgraphOptions& options) override; +}; +REGISTER_MEDIAPIPE_GRAPH(SwitchContainer); + +using TagIndex = std::pair; + +// Returns the stream name for one of the demux output channels. +// This is the channel number followed by the stream name separated by "__". +// For example, the channel-name for sream "frame" on channel 1 is "c1__frame". +std::string ChannelName(const std::string& name, int channel) { + return absl::StrCat("c", channel, "__", name); +} + +// Returns a SwitchDemuxCalculator node. +CalculatorGraphConfig::Node* BuildDemuxNode( + const std::map& input_tags, + CalculatorGraphConfig* config) { + CalculatorGraphConfig::Node* result = config->add_node(); + *result->mutable_calculator() = "SwitchDemuxCalculator"; + return result; +} + +// Returns a SwitchMuxCalculator node. +CalculatorGraphConfig::Node* BuildMuxNode( + const std::map& output_tags, + CalculatorGraphConfig* config) { + CalculatorGraphConfig::Node* result = config->add_node(); + *result->mutable_calculator() = "SwitchMuxCalculator"; + return result; +} + +// Returns an unused name similar to a specified name. +std::string UniqueName(std::string name, std::set* names) { + CHECK(names != nullptr); + std::string result = name; + int suffix = 2; + while (names->count(result) > 0) { + result = absl::StrCat(name, "_", suffix++); + } + names->insert(result); + return result; +} + +// Parses tag, index, and name from a list of stream identifiers. +void ParseTags(const proto_ns::RepeatedPtrField& streams, + std::map* result) { + CHECK(result != nullptr); + std::set used_names; + int used_index = -1; + for (const std::string& stream : streams) { + std::string name = UniqueName(ParseNameFromStream(stream), &used_names); + TagIndex tag_index = ParseTagIndexFromStream(stream); + if (tag_index.second == -1) { + tag_index.second = ++used_index; + } + result->insert({tag_index, name}); + } +} + +// Removes the entry for a tag and index from a map. +void EraseTag(const std::string& stream, + std::map* streams) { + CHECK(streams != nullptr); + streams->erase(ParseTagIndexFromStream(absl::StrCat(stream, ":u"))); +} + +// Removes the entry for a tag and index from a list. +void EraseTag(const std::string& stream, + proto_ns::RepeatedPtrField* streams) { + CHECK(streams != nullptr); + TagIndex stream_tag = ParseTagIndexFromStream(absl::StrCat(stream, ":u")); + for (int i = streams->size() - 1; i >= 0; --i) { + TagIndex tag = ParseTagIndexFromStream(streams->at(i)); + if (tag == stream_tag) { + streams->erase(streams->begin() + i); + } + } +} + +// Returns the stream names for the container node. +void GetContainerNodeStreams(const CalculatorGraphConfig::Node& node, + CalculatorGraphConfig::Node* result) { + CHECK(result != nullptr); + *result->mutable_input_stream() = node.input_stream(); + *result->mutable_output_stream() = node.output_stream(); + *result->mutable_input_side_packet() = node.input_side_packet(); + *result->mutable_output_side_packet() = node.output_side_packet(); + EraseTag("ENABLE", result->mutable_input_stream()); + EraseTag("ENABLE", result->mutable_input_side_packet()); + EraseTag("SELECT", result->mutable_input_stream()); + EraseTag("SELECT", result->mutable_input_side_packet()); +} + +// Validate all subgraph inputs and outputs. +mediapipe::Status ValidateContract( + const CalculatorGraphConfig::Node& subgraph_node, + const Subgraph::SubgraphOptions& subgraph_options) { + auto options = + Subgraph::GetOptions(subgraph_options); + std::map input_tags, side_tags; + ParseTags(subgraph_node.input_stream(), &input_tags); + ParseTags(subgraph_node.input_side_packet(), &side_tags); + if (options.has_select() && options.has_enable()) { + return mediapipe::InvalidArgumentError( + "Only one of SwitchContainer options 'enable' and 'select' can be " + "specified"); + } + if (side_tags.count({"SELECT", 0}) + side_tags.count({"ENABLE", 0}) > 1 || + input_tags.count({"SELECT", 0}) + input_tags.count({"ENABLE", 0}) > 1) { + return mediapipe::InvalidArgumentError( + "Only one of SwitchContainer inputs 'ENABLE' and 'SELECT' can be " + "specified"); + } + return mediapipe::OkStatus(); +} + +mediapipe::StatusOr SwitchContainer::GetConfig( + const Subgraph::SubgraphOptions& options) { + CalculatorGraphConfig config; + std::vector subnodes; + std::vector substreams; + + // Parse all input and output tags from the container node. + auto container_node = Subgraph::GetNode(options); + MP_RETURN_IF_ERROR(ValidateContract(container_node, options)); + CalculatorGraphConfig::Node container_streams; + GetContainerNodeStreams(container_node, &container_streams); + std::map input_tags, output_tags; + std::map side_input_tags, side_output_tags; + ParseTags(container_streams.input_stream(), &input_tags); + ParseTags(container_streams.output_stream(), &output_tags); + ParseTags(container_streams.input_side_packet(), &side_input_tags); + ParseTags(container_streams.output_side_packet(), &side_output_tags); + + // Add a graph node for the demux, mux. + auto demux = BuildDemuxNode(input_tags, &config); + demux->add_input_stream("SELECT:gate_select"); + demux->add_input_stream("ENABLE:gate_enable"); + demux->add_input_side_packet("SELECT:gate_select"); + demux->add_input_side_packet("ENABLE:gate_enable"); + + auto mux = BuildMuxNode(output_tags, &config); + mux->add_input_stream("SELECT:gate_select"); + mux->add_input_stream("ENABLE:gate_enable"); + mux->add_input_side_packet("SELECT:gate_select"); + mux->add_input_side_packet("ENABLE:gate_enable"); + + // Add input streams for graph and demux. + config.add_input_stream("SELECT:gate_select"); + config.add_input_stream("ENABLE:gate_enable"); + config.add_input_side_packet("SELECT:gate_select"); + config.add_input_side_packet("ENABLE:gate_enable"); + for (const auto& p : input_tags) { + std::string stream = CatStream(p.first, p.second); + config.add_input_stream(stream); + demux->add_input_stream(stream); + } + + // Add output streams for graph and mux. + for (const auto& p : output_tags) { + std::string stream = CatStream(p.first, p.second); + config.add_output_stream(stream); + mux->add_output_stream(stream); + } + for (const auto& p : side_input_tags) { + std::string side = CatStream(p.first, p.second); + config.add_input_side_packet(side); + demux->add_input_side_packet(side); + } + for (const auto& p : side_output_tags) { + std::string side = CatStream(p.first, p.second); + config.add_output_side_packet(side); + mux->add_output_side_packet(side); + } + + // Add a subnode for each contained_node. + auto nodes = Subgraph::GetOptions(options) + .contained_node(); + std::vector contained_nodes(nodes.begin(), + nodes.end()); + for (int i = 0; i < contained_nodes.size(); ++i) { + auto subnode = config.add_node(); + *subnode = contained_nodes[i]; + subnodes.push_back(subnode); + substreams.push_back(container_streams); + } + + // Connect each contained graph node to demux and mux. + for (int channel = 0; channel < subnodes.size(); ++channel) { + CalculatorGraphConfig::Node& streams = substreams[channel]; + + // Connect each contained graph node input to a demux output. + std::map input_stream_tags; + ParseTags(streams.input_stream(), &input_stream_tags); + for (auto& it : input_stream_tags) { + TagIndex tag_index = it.first; + std::string tag = ChannelTag(tag_index.first, channel); + std::string name = ChannelName(input_tags[tag_index], channel); + std::string demux_stream = CatStream({tag, tag_index.second}, name); + demux->add_output_stream(demux_stream); + subnodes[channel]->add_input_stream(CatStream(tag_index, name)); + } + + // Connect each contained graph node output to a mux input. + std::map output_stream_tags; + ParseTags(streams.output_stream(), &output_stream_tags); + for (auto& it : output_stream_tags) { + TagIndex tag_index = it.first; + std::string tag = ChannelTag(tag_index.first, channel); + std::string name = ChannelName(output_tags[tag_index], channel); + subnodes[channel]->add_output_stream(CatStream(tag_index, name)); + mux->add_input_stream(CatStream({tag, tag_index.second}, name)); + } + + // Connect each contained graph node side-input to a demux side-output. + std::map input_side_tags; + ParseTags(streams.input_side_packet(), &input_side_tags); + for (auto& it : input_side_tags) { + TagIndex tag_index = it.first; + std::string tag = ChannelTag(tag_index.first, channel); + std::string name = ChannelName(side_input_tags[tag_index], channel); + std::string demux_stream = CatStream({tag, tag_index.second}, name); + demux->add_output_side_packet(demux_stream); + subnodes[channel]->add_input_side_packet(CatStream(tag_index, name)); + } + + // Connect each contained graph node side-output to a mux side-input. + std::map output_side_tags; + ParseTags(streams.output_side_packet(), &output_side_tags); + for (auto& it : output_side_tags) { + TagIndex tag_index = it.first; + std::string tag = ChannelTag(tag_index.first, channel); + std::string name = ChannelName(side_output_tags[tag_index], channel); + subnodes[channel]->add_output_side_packet(CatStream(tag_index, name)); + mux->add_input_side_packet(CatStream({tag, tag_index.second}, name)); + } + } + + return config; +} + +} // namespace tool +} // namespace mediapipe diff --git a/mediapipe/framework/tool/switch_container.proto b/mediapipe/framework/tool/switch_container.proto new file mode 100644 index 000000000..ac3995006 --- /dev/null +++ b/mediapipe/framework/tool/switch_container.proto @@ -0,0 +1,27 @@ +syntax = "proto2"; + +package mediapipe; + +import "mediapipe/framework/calculator.proto"; + +option java_package = "com.google.mediapipe.proto"; +option java_outer_classname = "SwitchContainerProto"; + +// Options for a switch-container directing traffic to one of several +// contained subgraph or calculator nodes. +message SwitchContainerOptions { + extend mediapipe.CalculatorOptions { + optional SwitchContainerOptions ext = 345967970; + } + + reserved 1; + + // The contained registered subgraphs or calculators. + repeated CalculatorGraphConfig.Node contained_node = 2; + + // Activates the specified channel to receive input packets. + optional int32 select = 3; + + // Activates channel 1 for enable = true, channel 0 otherwise. + optional bool enable = 4; +} diff --git a/mediapipe/framework/tool/switch_container_test.cc b/mediapipe/framework/tool/switch_container_test.cc new file mode 100644 index 000000000..b214dc105 --- /dev/null +++ b/mediapipe/framework/tool/switch_container_test.cc @@ -0,0 +1,368 @@ +// Copyright 2019 The MediaPipe Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "mediapipe/framework/calculator.pb.h" +#include "mediapipe/framework/calculator_framework.h" +#include "mediapipe/framework/deps/message_matchers.h" +#include "mediapipe/framework/port/gmock.h" +#include "mediapipe/framework/port/gtest.h" +#include "mediapipe/framework/port/logging.h" +#include "mediapipe/framework/port/parse_text_proto.h" +#include "mediapipe/framework/port/proto_ns.h" +#include "mediapipe/framework/port/ret_check.h" +#include "mediapipe/framework/port/status.h" +#include "mediapipe/framework/port/status_matchers.h" +#include "mediapipe/framework/subgraph.h" +#include "mediapipe/framework/tool/node_chain_subgraph.pb.h" +#include "mediapipe/framework/tool/subgraph_expansion.h" + +namespace mediapipe { +namespace { + +// A Calculator that outputs thrice the value of its input packet (an int). +// It also accepts a side packet tagged "TIMEZONE", but doesn't use it. +class TripleIntCalculator : public CalculatorBase { + public: + static mediapipe::Status GetContract(CalculatorContract* cc) { + cc->Inputs().Index(0).Set().Optional(); + cc->Outputs().Index(0).SetSameAs(&cc->Inputs().Index(0)).Optional(); + cc->InputSidePackets().Index(0).Set().Optional(); + cc->OutputSidePackets() + .Index(0) + .SetSameAs(&cc->InputSidePackets().Index(0)) + .Optional(); + cc->InputSidePackets().Tag("TIMEZONE").Set().Optional(); + return mediapipe::OkStatus(); + } + + mediapipe::Status Open(CalculatorContext* cc) final { + cc->SetOffset(TimestampDiff(0)); + if (cc->OutputSidePackets().HasTag("")) { + cc->OutputSidePackets().Index(0).Set( + MakePacket(cc->InputSidePackets().Index(0).Get() * 3)); + } + return mediapipe::OkStatus(); + } + + mediapipe::Status Process(CalculatorContext* cc) final { + int value = cc->Inputs().Index(0).Value().Get(); + cc->Outputs().Index(0).Add(new int(3 * value), cc->InputTimestamp()); + return mediapipe::OkStatus(); + } +}; +REGISTER_CALCULATOR(TripleIntCalculator); + +// A testing example of a SwitchContainer containing two subnodes. +// Note that the input and output tags supplied to the container node, +// must match the input and output tags required by the subnodes. +CalculatorGraphConfig SubnodeContainerExample() { + return mediapipe::ParseTextProtoOrDie(R"( + input_stream: "foo" + input_stream: "enable" + input_side_packet: "timezone" + node { + calculator: "SwitchContainer" + input_stream: "ENABLE:enable" + input_stream: "foo" + output_stream: "bar" + options { + [mediapipe.SwitchContainerOptions.ext] { + contained_node: { calculator: "TripleIntCalculator" } + contained_node: { calculator: "PassThroughCalculator" } + } + } + } + node { + calculator: "PassThroughCalculator" + input_stream: "foo" + input_stream: "bar" + output_stream: "output_foo" + output_stream: "output_bar" + } + )"); +} + +// A testing example of a SwitchContainer containing two subnodes. +// Note that the side-input and side-output tags supplied to the container node, +// must match the side-input and side-output tags required by the subnodes. +CalculatorGraphConfig SideSubnodeContainerExample() { + return mediapipe::ParseTextProtoOrDie(R"( + input_side_packet: "foo" + input_side_packet: "enable" + output_side_packet: "output_bar" + node { + calculator: "SwitchContainer" + input_side_packet: "ENABLE:enable" + input_side_packet: "foo" + output_side_packet: "bar" + options { + [mediapipe.SwitchContainerOptions.ext] { + contained_node: { calculator: "TripleIntCalculator" } + contained_node: { calculator: "PassThroughCalculator" } + } + } + } + node { + calculator: "PassThroughCalculator" + input_side_packet: "foo" + input_side_packet: "bar" + output_side_packet: "output_foo" + output_side_packet: "output_bar" + } + )"); +} + +// Runs the test container graph with a few input packets. +void RunTestContainer(CalculatorGraphConfig supergraph) { + CalculatorGraph graph; + std::vector out_foo, out_bar; + tool::AddVectorSink("output_foo", &supergraph, &out_foo); + tool::AddVectorSink("output_bar", &supergraph, &out_bar); + MP_ASSERT_OK(graph.Initialize(supergraph, {})); + MP_ASSERT_OK(graph.StartRun({{"timezone", MakePacket(3)}})); + + // Send enable == true signal at 5000 us. + const int64 enable_ts = 5000; + MP_EXPECT_OK(graph.AddPacketToInputStream( + "enable", MakePacket(true).At(Timestamp(enable_ts)))); + MP_ASSERT_OK(graph.WaitUntilIdle()); + + const int packet_count = 10; + // Send int value packets at {10K, 20K, 30K, ..., 100K}. + for (uint64 t = 1; t <= packet_count; ++t) { + MP_EXPECT_OK(graph.AddPacketToInputStream( + "foo", MakePacket(t).At(Timestamp(t * 10000)))); + MP_ASSERT_OK(graph.WaitUntilIdle()); + // The inputs are sent to the input stream "foo", they should pass through. + EXPECT_EQ(out_foo.size(), t); + // Since "enable == true" for ts 10K...100K us, the second contained graph + // i.e. the one containing the PassThroughCalculator should output the + // input values without changing them. + EXPECT_EQ(out_bar.size(), t); + EXPECT_EQ(out_bar.back().Get(), t); + } + + // Send enable == false signal at 105K us. + MP_EXPECT_OK(graph.AddPacketToInputStream( + "enable", MakePacket(false).At(Timestamp(105000)))); + MP_ASSERT_OK(graph.WaitUntilIdle()); + + // Send int value packets at {110K, 120K, ..., 200K}. + for (uint64 t = 11; t <= packet_count * 2; ++t) { + MP_EXPECT_OK(graph.AddPacketToInputStream( + "foo", MakePacket(t).At(Timestamp(t * 10000)))); + MP_ASSERT_OK(graph.WaitUntilIdle()); + // The inputs are sent to the input stream "foo", they should pass through. + EXPECT_EQ(out_foo.size(), t); + // Since "enable == false" for ts 110K...200K us, the first contained graph + // i.e. the one containing the TripleIntCalculator should output the values + // after tripling them. + EXPECT_EQ(out_bar.size(), t); + EXPECT_EQ(out_bar.back().Get(), t * 3); + } + + MP_ASSERT_OK(graph.CloseAllInputStreams()); + MP_ASSERT_OK(graph.WaitUntilDone()); + EXPECT_EQ(out_foo.size(), packet_count * 2); + EXPECT_EQ(out_bar.size(), packet_count * 2); +} + +// Runs the test side-packet container graph with input side-packets. +void RunTestSideContainer(CalculatorGraphConfig supergraph) { + CalculatorGraph graph; + MP_ASSERT_OK(graph.Initialize(supergraph, {})); + MP_ASSERT_OK(graph.StartRun({ + {"enable", MakePacket(false)}, + {"foo", MakePacket(4)}, + })); + MP_ASSERT_OK(graph.CloseAllInputStreams()); + MP_ASSERT_OK(graph.WaitUntilDone()); + Packet side_output = graph.GetOutputSidePacket("output_bar").ValueOrDie(); + EXPECT_EQ(side_output.Get(), 12); + + MP_ASSERT_OK(graph.StartRun({ + {"enable", MakePacket(true)}, + {"foo", MakePacket(4)}, + })); + MP_ASSERT_OK(graph.CloseAllInputStreams()); + MP_ASSERT_OK(graph.WaitUntilDone()); + side_output = graph.GetOutputSidePacket("output_bar").ValueOrDie(); + EXPECT_EQ(side_output.Get(), 4); +} + +// Rearrange the Node messages within a CalculatorGraphConfig message. +CalculatorGraphConfig OrderNodes(const CalculatorGraphConfig& config, + std::vector order) { + auto result = config; + result.clear_node(); + for (int i = 0; i < order.size(); ++i) { + *result.add_node() = config.node(order[i]); + } + return result; +} + +// Shows the SwitchContainer container applied to a pair of simple subnodes. +TEST(SwitchContainerTest, ApplyToSubnodes) { + EXPECT_TRUE(SubgraphRegistry::IsRegistered("SwitchContainer")); + CalculatorGraphConfig supergraph = SubnodeContainerExample(); + CalculatorGraphConfig expected_graph = + mediapipe::ParseTextProtoOrDie(R"( + node { + name: "switchcontainer__SwitchDemuxCalculator" + calculator: "SwitchDemuxCalculator" + input_stream: "ENABLE:enable" + input_stream: "foo" + output_stream: "C0__:switchcontainer__c0__foo" + output_stream: "C1__:switchcontainer__c1__foo" + } + node { + name: "switchcontainer__TripleIntCalculator" + calculator: "TripleIntCalculator" + input_stream: "switchcontainer__c0__foo" + output_stream: "switchcontainer__c0__bar" + } + node { + name: "switchcontainer__PassThroughCalculator" + calculator: "PassThroughCalculator" + input_stream: "switchcontainer__c1__foo" + output_stream: "switchcontainer__c1__bar" + } + node { + name: "switchcontainer__SwitchMuxCalculator" + calculator: "SwitchMuxCalculator" + input_stream: "ENABLE:enable" + input_stream: "C0__:switchcontainer__c0__bar" + input_stream: "C1__:switchcontainer__c1__bar" + output_stream: "bar" + } + node { + calculator: "PassThroughCalculator" + input_stream: "foo" + input_stream: "bar" + output_stream: "output_foo" + output_stream: "output_bar" + } + input_stream: "foo" + input_stream: "enable" + input_side_packet: "timezone" + )"); + expected_graph = OrderNodes(expected_graph, {4, 0, 3, 1, 2}); + MP_EXPECT_OK(tool::ExpandSubgraphs(&supergraph)); + EXPECT_THAT(supergraph, mediapipe::EqualsProto(expected_graph)); +} + +// Shows the SwitchContainer container runs with a pair of simple subnodes. +TEST(SwitchContainerTest, RunsWithSubnodes) { + EXPECT_TRUE(SubgraphRegistry::IsRegistered("SwitchContainer")); + CalculatorGraphConfig supergraph = SubnodeContainerExample(); + MP_EXPECT_OK(tool::ExpandSubgraphs(&supergraph)); + RunTestContainer(supergraph); +} + +// Shows the SwitchContainer container applied to a pair of simple subnodes. +TEST(SwitchContainerTest, ApplyToSideSubnodes) { + EXPECT_TRUE(SubgraphRegistry::IsRegistered("SwitchContainer")); + CalculatorGraphConfig supergraph = SideSubnodeContainerExample(); + CalculatorGraphConfig expected_graph = + mediapipe::ParseTextProtoOrDie(R"( + input_side_packet: "foo" + input_side_packet: "enable" + output_side_packet: "output_bar" + node { + name: "switchcontainer__SwitchDemuxCalculator" + calculator: "SwitchDemuxCalculator" + input_side_packet: "ENABLE:enable" + input_side_packet: "foo" + output_side_packet: "C0__:switchcontainer__c0__foo" + output_side_packet: "C1__:switchcontainer__c1__foo" + } + node { + name: "switchcontainer__TripleIntCalculator" + calculator: "TripleIntCalculator" + input_side_packet: "switchcontainer__c0__foo" + output_side_packet: "switchcontainer__c0__bar" + } + node { + name: "switchcontainer__PassThroughCalculator" + calculator: "PassThroughCalculator" + input_side_packet: "switchcontainer__c1__foo" + output_side_packet: "switchcontainer__c1__bar" + } + node { + name: "switchcontainer__SwitchMuxCalculator" + calculator: "SwitchMuxCalculator" + input_side_packet: "ENABLE:enable" + input_side_packet: "C0__:switchcontainer__c0__bar" + input_side_packet: "C1__:switchcontainer__c1__bar" + output_side_packet: "bar" + } + node { + calculator: "PassThroughCalculator" + input_side_packet: "foo" + input_side_packet: "bar" + output_side_packet: "output_foo" + output_side_packet: "output_bar" + } + )"); + expected_graph = OrderNodes(expected_graph, {4, 0, 3, 1, 2}); + MP_EXPECT_OK(tool::ExpandSubgraphs(&supergraph)); + EXPECT_THAT(supergraph, mediapipe::EqualsProto(expected_graph)); +} + +// Shows the SwitchContainer container runs with a pair of simple subnodes. +TEST(SwitchContainerTest, RunWithSideSubnodes) { + EXPECT_TRUE(SubgraphRegistry::IsRegistered("SwitchContainer")); + CalculatorGraphConfig supergraph = SideSubnodeContainerExample(); + MP_EXPECT_OK(tool::ExpandSubgraphs(&supergraph)); + RunTestSideContainer(supergraph); +} + +// Shows validation of SwitchContainer container side inputs. +TEST(SwitchContainerTest, ValidateSideInputs) { + EXPECT_TRUE(SubgraphRegistry::IsRegistered("SwitchContainer")); + CalculatorGraphConfig supergraph = + mediapipe::ParseTextProtoOrDie(R"( + input_side_packet: "foo" + input_side_packet: "enable" + output_side_packet: "output_bar" + node { + calculator: "SwitchContainer" + input_side_packet: "ENABLE:enable" + input_side_packet: "SELECT:enable" + input_side_packet: "foo" + output_side_packet: "bar" + options { + [mediapipe.SwitchContainerOptions.ext] { + contained_node: { calculator: "TripleIntCalculator" } + contained_node: { calculator: "PassThroughCalculator" } + } + } + } + node { + calculator: "PassThroughCalculator" + input_side_packet: "foo" + input_side_packet: "bar" + output_side_packet: "output_foo" + output_side_packet: "output_bar" + } + )"); + auto status = tool::ExpandSubgraphs(&supergraph); + EXPECT_EQ(std::pair(status.code(), std::string(status.message())), + std::pair(mediapipe::StatusCode::kInvalidArgument, + std::string("Only one of SwitchContainer inputs " + "'ENABLE' and 'SELECT' can be specified"))); +} + +} // namespace +} // namespace mediapipe diff --git a/mediapipe/framework/tool/switch_demux_calculator.cc b/mediapipe/framework/tool/switch_demux_calculator.cc new file mode 100644 index 000000000..0a16b1f3e --- /dev/null +++ b/mediapipe/framework/tool/switch_demux_calculator.cc @@ -0,0 +1,170 @@ +// Copyright 2019 The MediaPipe Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include + +#include "absl/strings/str_cat.h" +#include "mediapipe/framework/calculator_framework.h" +#include "mediapipe/framework/collection_item_id.h" +#include "mediapipe/framework/port/integral_types.h" +#include "mediapipe/framework/port/logging.h" +#include "mediapipe/framework/port/ret_check.h" +#include "mediapipe/framework/port/status.h" +#include "mediapipe/framework/port/status_macros.h" +#include "mediapipe/framework/tool/container_util.h" + +namespace mediapipe { + +// A calculator to redirect a set of input streams to one of several output +// channels, each consisting of corresponding output streams. Each channel +// is distinguished by a tag-prefix such as "C1__". For example: +// +// node { +// calculator: "SwitchDemuxCalculator" +// input_stream: "ENABLE:enable" +// input_stream: "FUNC_INPUT:foo" +// input_stream: "FUNC_INPUT:bar" +// output_stream: "C0__FUNC_INPUT:foo_0" +// output_stream: "C0__FUNC_INPUT:bar_0" +// output_stream: "C1__FUNC_INPUT:foo_1" +// output_stream: "C1__FUNC_INPUT:bar_1" +// } +// +// Input stream "ENABLE" specifies routing of packets to either channel 0 +// or channel 1, given "ENABLE:false" or "ENABLE:true" respectively. +// Input-side-packet "ENABLE" and input-stream "SELECT" can also be used +// similarly to specify the active channel. +// +// SwitchDemuxCalculator is used by SwitchContainer to enable one of several +// contained subgraph or calculator nodes. +// +class SwitchDemuxCalculator : public CalculatorBase { + static constexpr char kSelectTag[] = "SELECT"; + static constexpr char kEnableTag[] = "ENABLE"; + + public: + static mediapipe::Status GetContract(CalculatorContract* cc); + + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; + + private: + int channel_index_; + std::set channel_tags_; +}; +REGISTER_CALCULATOR(SwitchDemuxCalculator); + +mediapipe::Status SwitchDemuxCalculator::GetContract(CalculatorContract* cc) { + // Allow any one of kSelectTag, kEnableTag. + if (cc->Inputs().HasTag(kSelectTag)) { + cc->Inputs().Tag(kSelectTag).Set(); + } else if (cc->Inputs().HasTag(kEnableTag)) { + cc->Inputs().Tag(kEnableTag).Set(); + } + // Allow any one of kSelectTag, kEnableTag. + if (cc->InputSidePackets().HasTag(kSelectTag)) { + cc->InputSidePackets().Tag(kSelectTag).Set(); + } else if (cc->InputSidePackets().HasTag(kEnableTag)) { + cc->InputSidePackets().Tag(kEnableTag).Set(); + } + + // Set the types for all output channels to corresponding input types. + std::set channel_tags = ChannelTags(cc->Outputs().TagMap()); + int channel_count = ChannelCount(cc->Outputs().TagMap()); + for (const std::string& tag : channel_tags) { + for (int index = 0; index < cc->Inputs().NumEntries(tag); ++index) { + auto input_id = cc->Inputs().GetId(tag, index); + if (input_id.IsValid()) { + cc->Inputs().Get(tag, index).SetAny(); + for (int channel = 0; channel < channel_count; ++channel) { + auto output_id = + cc->Outputs().GetId(tool::ChannelTag(tag, channel), index); + if (output_id.IsValid()) { + cc->Outputs().Get(output_id).SetSameAs(&cc->Inputs().Get(input_id)); + } + } + } + } + } + channel_tags = ChannelTags(cc->OutputSidePackets().TagMap()); + channel_count = ChannelCount(cc->OutputSidePackets().TagMap()); + for (const std::string& tag : channel_tags) { + int num_entries = cc->InputSidePackets().NumEntries(tag); + for (int index = 0; index < num_entries; ++index) { + auto input_id = cc->InputSidePackets().GetId(tag, index); + if (input_id.IsValid()) { + cc->InputSidePackets().Get(tag, index).SetAny(); + for (int channel = 0; channel < channel_count; ++channel) { + auto output_id = cc->OutputSidePackets().GetId( + tool::ChannelTag(tag, channel), index); + if (output_id.IsValid()) { + cc->OutputSidePackets().Get(output_id).SetSameAs( + &cc->InputSidePackets().Get(input_id)); + } + } + } + } + } + cc->SetInputStreamHandler("ImmediateInputStreamHandler"); + cc->SetProcessTimestampBounds(true); + return mediapipe::OkStatus(); +} + +mediapipe::Status SwitchDemuxCalculator::Open(CalculatorContext* cc) { + channel_index_ = tool::GetChannelIndex(*cc, channel_index_); + channel_tags_ = ChannelTags(cc->Outputs().TagMap()); + + // Relay side packets to all channels. + // Note: This is necessary because Calculator::Open only proceeds when every + // anticipated side-packet arrives. + int channel_count = tool::ChannelCount(cc->OutputSidePackets().TagMap()); + for (const std::string& tag : ChannelTags(cc->OutputSidePackets().TagMap())) { + int num_entries = cc->InputSidePackets().NumEntries(tag); + for (int index = 0; index < num_entries; ++index) { + Packet input = cc->InputSidePackets().Get(tag, index); + for (int channel = 0; channel < channel_count; ++channel) { + std::string output_tag = tool::ChannelTag(tag, channel); + auto output_id = cc->OutputSidePackets().GetId(output_tag, index); + if (output_id.IsValid()) { + cc->OutputSidePackets().Get(output_tag, index).Set(input); + } + } + } + } + return mediapipe::OkStatus(); +} + +mediapipe::Status SwitchDemuxCalculator::Process(CalculatorContext* cc) { + // Update the input channel index if specified. + channel_index_ = tool::GetChannelIndex(*cc, channel_index_); + + // Relay packets and timestamps only to channel_index_. + for (const std::string& tag : channel_tags_) { + for (int index = 0; index < cc->Inputs().NumEntries(tag); ++index) { + auto& input = cc->Inputs().Get(tag, index); + std::string output_tag = tool::ChannelTag(tag, channel_index_); + auto output_id = cc->Outputs().GetId(output_tag, index); + if (output_id.IsValid()) { + auto& output = cc->Outputs().Get(output_tag, index); + tool::Relay(input, &output); + } + } + } + return mediapipe::OkStatus(); +} + +} // namespace mediapipe diff --git a/mediapipe/framework/tool/switch_mux_calculator.cc b/mediapipe/framework/tool/switch_mux_calculator.cc new file mode 100644 index 000000000..aaa36ac04 --- /dev/null +++ b/mediapipe/framework/tool/switch_mux_calculator.cc @@ -0,0 +1,162 @@ +// Copyright 2019 The MediaPipe Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include +#include + +#include "absl/strings/str_cat.h" +#include "mediapipe/framework/calculator_framework.h" +#include "mediapipe/framework/collection_item_id.h" +#include "mediapipe/framework/input_stream_shard.h" +#include "mediapipe/framework/output_stream_shard.h" +#include "mediapipe/framework/port/integral_types.h" +#include "mediapipe/framework/port/logging.h" +#include "mediapipe/framework/port/ret_check.h" +#include "mediapipe/framework/port/status.h" +#include "mediapipe/framework/port/status_macros.h" +#include "mediapipe/framework/tool/container_util.h" + +namespace mediapipe { + +// A calculator to join several sets of input streams into one +// output channel, consisting of corresponding output streams. +// Each channel is distinguished by a tag-prefix such as "C1__". +// For example: +// +// node { +// calculator: "SwitchMuxCalculator" +// input_stream: "ENABLE:enable" +// input_stream: "C0__FUNC_INPUT:foo_0" +// input_stream: "C0__FUNC_INPUT:bar_0" +// input_stream: "C1__FUNC_INPUT:foo_1" +// input_stream: "C1__FUNC_INPUT:bar_1" +// output_stream: "FUNC_INPUT:foo" +// output_stream: "FUNC_INPUT:bar" +// } +// +// Input stream "ENABLE" specifies routing of packets from either channel 0 +// or channel 1, given "ENABLE:false" or "ENABLE:true" respectively. +// Input-side-packet "ENABLE" and input-stream "SELECT" can also be used +// similarly to specify the active channel. +// +// SwitchMuxCalculator is used by SwitchContainer to enable one of several +// contained subgraph or calculator nodes. +// +class SwitchMuxCalculator : public CalculatorBase { + static constexpr char kSelectTag[] = "SELECT"; + static constexpr char kEnableTag[] = "ENABLE"; + + public: + static mediapipe::Status GetContract(CalculatorContract* cc); + + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; + + private: + int channel_index_; + std::set channel_tags_; +}; +REGISTER_CALCULATOR(SwitchMuxCalculator); + +mediapipe::Status SwitchMuxCalculator::GetContract(CalculatorContract* cc) { + // Allow any one of kSelectTag, kEnableTag. + if (cc->Inputs().HasTag(kSelectTag)) { + cc->Inputs().Tag(kSelectTag).Set(); + } else if (cc->Inputs().HasTag(kEnableTag)) { + cc->Inputs().Tag(kEnableTag).Set(); + } + // Allow any one of kSelectTag, kEnableTag. + if (cc->InputSidePackets().HasTag(kSelectTag)) { + cc->InputSidePackets().Tag(kSelectTag).Set(); + } else if (cc->InputSidePackets().HasTag(kEnableTag)) { + cc->InputSidePackets().Tag(kEnableTag).Set(); + } + + // Set the types for all input channels to corresponding output types. + std::set channel_tags = ChannelTags(cc->Inputs().TagMap()); + int channel_count = ChannelCount(cc->Inputs().TagMap()); + for (const std::string& tag : channel_tags) { + for (int index = 0; index < cc->Outputs().NumEntries(tag); ++index) { + cc->Outputs().Get(tag, index).SetAny(); + auto output_id = cc->Outputs().GetId(tag, index); + if (output_id.IsValid()) { + for (int channel = 0; channel < channel_count; ++channel) { + auto input_id = + cc->Inputs().GetId(tool::ChannelTag(tag, channel), index); + if (input_id.IsValid()) { + cc->Inputs().Get(input_id).SetSameAs(&cc->Outputs().Get(output_id)); + } + } + } + } + } + channel_tags = ChannelTags(cc->InputSidePackets().TagMap()); + channel_count = ChannelCount(cc->InputSidePackets().TagMap()); + for (const std::string& tag : channel_tags) { + int num_entries = cc->OutputSidePackets().NumEntries(tag); + for (int index = 0; index < num_entries; ++index) { + cc->OutputSidePackets().Get(tag, index).SetAny(); + auto output_id = cc->OutputSidePackets().GetId(tag, index); + if (output_id.IsValid()) { + for (int channel = 0; channel < channel_count; ++channel) { + auto input_id = cc->InputSidePackets().GetId( + tool::ChannelTag(tag, channel), index); + if (input_id.IsValid()) { + cc->InputSidePackets().Get(input_id).SetSameAs( + &cc->OutputSidePackets().Get(output_id)); + } + } + } + } + } + cc->SetInputStreamHandler("ImmediateInputStreamHandler"); + cc->SetProcessTimestampBounds(true); + return mediapipe::OkStatus(); +} + +mediapipe::Status SwitchMuxCalculator::Open(CalculatorContext* cc) { + channel_index_ = tool::GetChannelIndex(*cc, channel_index_); + channel_tags_ = ChannelTags(cc->Inputs().TagMap()); + + // Relay side packets only from channel_index_. + for (const std::string& tag : ChannelTags(cc->InputSidePackets().TagMap())) { + int num_outputs = cc->OutputSidePackets().NumEntries(tag); + for (int index = 0; index < num_outputs; ++index) { + std::string input_tag = tool::ChannelTag(tag, channel_index_); + Packet input = cc->InputSidePackets().Get(input_tag, index); + cc->OutputSidePackets().Get(tag, index).Set(input); + } + } + return mediapipe::OkStatus(); +} + +mediapipe::Status SwitchMuxCalculator::Process(CalculatorContext* cc) { + // Update the input channel index if specified. + channel_index_ = tool::GetChannelIndex(*cc, channel_index_); + + // Relay packets and timestamps only from channel_index_. + for (const std::string& tag : channel_tags_) { + for (int index = 0; index < cc->Outputs().NumEntries(tag); ++index) { + auto& output = cc->Outputs().Get(tag, index); + std::string input_tag = tool::ChannelTag(tag, channel_index_); + auto& input = cc->Inputs().Get(input_tag, index); + tool::Relay(input, &output); + } + } + return mediapipe::OkStatus(); +} + +} // namespace mediapipe diff --git a/mediapipe/framework/tool/tag_map.cc b/mediapipe/framework/tool/tag_map.cc index fcc9a92fe..5a39faf81 100644 --- a/mediapipe/framework/tool/tag_map.cc +++ b/mediapipe/framework/tool/tag_map.cc @@ -37,7 +37,7 @@ void TagMap::InitializeNames( } } -::mediapipe::Status TagMap::Initialize( +mediapipe::Status TagMap::Initialize( const proto_ns::RepeatedPtrField& tag_index_names) { std::map> tag_to_names; for (const auto& tag_index_name : tag_index_names) { @@ -63,7 +63,7 @@ void TagMap::InitializeNames( names.resize(index + 1); } if (!names[index].empty()) { - return ::mediapipe::FailedPreconditionErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::FailedPreconditionErrorBuilder(MEDIAPIPE_LOC) << "tag \"" << tag << "\" index " << index << " already had a name \"" << names[index] << "\" but is being reassigned a name \"" << name << "\""; @@ -81,7 +81,7 @@ void TagMap::InitializeNames( // loop above), this means that all indexes were used exactly once. const std::vector& names = tag_to_names[item.first]; if (tag_data.count != names.size()) { - auto builder = ::mediapipe::FailedPreconditionErrorBuilder(MEDIAPIPE_LOC) + auto builder = mediapipe::FailedPreconditionErrorBuilder(MEDIAPIPE_LOC) << "Not all indexes were assigned names. Tag \"" << item.first << "\" has the following:\n"; // Note, names.size() will always be larger than tag_data.count. @@ -100,10 +100,10 @@ void TagMap::InitializeNames( num_entries_ = current_index; InitializeNames(tag_to_names); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TagMap::Initialize(const TagAndNameInfo& info) { +mediapipe::Status TagMap::Initialize(const TagAndNameInfo& info) { if (info.tags.empty()) { if (!info.names.empty()) { mapping_.emplace( @@ -115,7 +115,7 @@ void TagMap::InitializeNames( } else { std::map> tag_to_names; if (info.tags.size() != info.names.size()) { - return ::mediapipe::FailedPreconditionError( + return mediapipe::FailedPreconditionError( "Expected info.tags.size() == info.names.size()"); } @@ -139,7 +139,7 @@ void TagMap::InitializeNames( // Now create the names_ array in the correctly sorted order. InitializeNames(tag_to_names); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } proto_ns::RepeatedPtrField TagMap::CanonicalEntries() const { diff --git a/mediapipe/framework/tool/tag_map.h b/mediapipe/framework/tool/tag_map.h index e2ec97599..a6b4a6b2a 100644 --- a/mediapipe/framework/tool/tag_map.h +++ b/mediapipe/framework/tool/tag_map.h @@ -53,7 +53,7 @@ class TagMap { // TAG::name. This is the most common usage: // ASSIGN_OR_RETURN(std::shared_ptr tag_map, // tool::TagMap::Create(node.input_streams())); - static ::mediapipe::StatusOr> Create( + static mediapipe::StatusOr> Create( const proto_ns::RepeatedPtrField& tag_index_names) { std::shared_ptr output(new TagMap()); MP_RETURN_IF_ERROR(output->Initialize(tag_index_names)); @@ -64,7 +64,7 @@ class TagMap { // TODO: Migrate callers and delete this method. ABSL_DEPRECATED( "Use mediapipe::tool::TagMap::Create(tag_index_names) instead.") - static ::mediapipe::StatusOr> Create( + static mediapipe::StatusOr> Create( const TagAndNameInfo& info) { std::shared_ptr output(new TagMap()); MP_RETURN_IF_ERROR(output->Initialize(info)); @@ -108,12 +108,12 @@ class TagMap { // Initialize the TagMap. Due to only having a factory function for // creation, there is no way for a user to have an uninitialized TagMap. - ::mediapipe::Status Initialize( + mediapipe::Status Initialize( const proto_ns::RepeatedPtrField& tag_index_names); // Initialize from a TagAndNameInfo. ABSL_DEPRECATED("Use Initialize(tag_index_names) instead.") - ::mediapipe::Status Initialize(const TagAndNameInfo& info); + mediapipe::Status Initialize(const TagAndNameInfo& info); // Initialize names_ using a map from tag to the names for that tag. void InitializeNames( diff --git a/mediapipe/framework/tool/tag_map_helper.cc b/mediapipe/framework/tool/tag_map_helper.cc index 3d0f4b2fb..8213a503f 100644 --- a/mediapipe/framework/tool/tag_map_helper.cc +++ b/mediapipe/framework/tool/tag_map_helper.cc @@ -31,7 +31,7 @@ namespace mediapipe { namespace tool { // Create using a vector of TAG::name. -::mediapipe::StatusOr> CreateTagMap( +mediapipe::StatusOr> CreateTagMap( const std::vector& tag_index_names) { proto_ns::RepeatedPtrField fields; for (const auto& tag_index_name : tag_index_names) { @@ -41,7 +41,7 @@ namespace tool { } // Create using an integer number of entries (for tag ""). -::mediapipe::StatusOr> CreateTagMap(int num_entries) { +mediapipe::StatusOr> CreateTagMap(int num_entries) { RET_CHECK_LE(0, num_entries); proto_ns::RepeatedPtrField fields; for (int i = 0; i < num_entries; ++i) { @@ -51,7 +51,7 @@ namespace tool { } // Create using a vector of just tag names. -::mediapipe::StatusOr> CreateTagMapFromTags( +mediapipe::StatusOr> CreateTagMapFromTags( const std::vector& tags) { proto_ns::RepeatedPtrField fields; for (int i = 0; i < tags.size(); ++i) { diff --git a/mediapipe/framework/tool/tag_map_helper.h b/mediapipe/framework/tool/tag_map_helper.h index 69d875c65..6d4f67db5 100644 --- a/mediapipe/framework/tool/tag_map_helper.h +++ b/mediapipe/framework/tool/tag_map_helper.h @@ -23,14 +23,14 @@ namespace mediapipe { namespace tool { // Create a TagMap using a vector of TAG::name. -::mediapipe::StatusOr> CreateTagMap( +mediapipe::StatusOr> CreateTagMap( const std::vector& tag_index_names); // Create a TagMap using an integer number of entries (for tag ""). -::mediapipe::StatusOr> CreateTagMap(int num_entries); +mediapipe::StatusOr> CreateTagMap(int num_entries); // Create a TagMap using a vector of just tag names. -::mediapipe::StatusOr> CreateTagMapFromTags( +mediapipe::StatusOr> CreateTagMapFromTags( const std::vector& tags); } // namespace tool diff --git a/mediapipe/framework/tool/tag_map_test.cc b/mediapipe/framework/tool/tag_map_test.cc index cac666988..759f4fe70 100644 --- a/mediapipe/framework/tool/tag_map_test.cc +++ b/mediapipe/framework/tool/tag_map_test.cc @@ -101,7 +101,7 @@ void TestSuccessTagMap(const std::vector& tag_index_names, EXPECT_EQ(tags.size(), tag_map->Mapping().size()) << "Parameters: in " << tag_map->DebugString(); for (int i = 0; i < tags.size(); ++i) { - EXPECT_TRUE(::mediapipe::ContainsKey(tag_map->Mapping(), tags[i])) + EXPECT_TRUE(mediapipe::ContainsKey(tag_map->Mapping(), tags[i])) << "Parameters: Trying to find \"" << tags[i] << "\" in\n" << tag_map->DebugString(); } @@ -321,10 +321,10 @@ TEST(TagMapTest, SameAs) { // A helper function to test that a TagMap's debug std::string and short // debug std::string each satisfy a matcher. template -void TestDebugString(const ::mediapipe::StatusOr>& - statusor_tag_map, - const std::vector& canonical_entries, - Matcher short_string_matcher) { +void TestDebugString( + const mediapipe::StatusOr>& statusor_tag_map, + const std::vector& canonical_entries, + Matcher short_string_matcher) { MP_ASSERT_OK(statusor_tag_map); tool::TagMap& tag_map = *statusor_tag_map.ValueOrDie(); std::string debug_string = tag_map.DebugString(); diff --git a/mediapipe/framework/tool/template_expander.cc b/mediapipe/framework/tool/template_expander.cc index e2de6e3e7..4c0d8f13b 100644 --- a/mediapipe/framework/tool/template_expander.cc +++ b/mediapipe/framework/tool/template_expander.cc @@ -98,7 +98,7 @@ mediapipe::Status ProtoPathSplit(const std::string& path, ProtoPath* result) { bool ok = absl::SimpleAtoi(id_pair.first, &tag) && absl::SimpleAtoi(id_pair.second, &index); if (!ok) { - status.Update(::mediapipe::InvalidArgumentError(path)); + status.Update(mediapipe::InvalidArgumentError(path)); } result->push_back(std::make_pair(tag, index)); } @@ -146,7 +146,7 @@ int FieldCount(const FieldValue& base, ProtoPath field_path, // The default implementation for the mediapipe template rule interpreter. class TemplateExpanderImpl { public: - explicit TemplateExpanderImpl(std::vector<::mediapipe::Status>* errors) + explicit TemplateExpanderImpl(std::vector* errors) : errors_(errors) {} // Applies the rules specified in a CalculatorGraphTemplate to a @@ -221,12 +221,12 @@ class TemplateExpanderImpl { std::vector* base) { if (!rule.has_path()) { base->push_back(output); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } if (rule.has_field_value()) { // For a non-repeated field, the field value is stored only in the rule. base->push_back(rule.field_value()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } ProtoPath field_path; mediapipe::Status status = @@ -242,7 +242,7 @@ class TemplateExpanderImpl { const std::vector& field_values, FieldValue* output) { if (!rule.has_path()) { *output = field_values[0]; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } ProtoPath field_path; RET_CHECK_OK( @@ -252,7 +252,7 @@ class TemplateExpanderImpl { // For a non-repeated field, only one value can be specified. if (!field_values.empty() && FieldCount(*output, field_path, GetFieldType(rule)) > 0) { - return ::mediapipe::InvalidArgumentError(absl::StrCat( + return mediapipe::InvalidArgumentError(absl::StrCat( "Multiple values specified for non-repeated field: ", rule.path())); } // For a non-repeated field, the field value is stored only in the rule. @@ -280,7 +280,7 @@ class TemplateExpanderImpl { if (!status.ok()) break; std::vector values; if (!ExpandTemplateRule(rules[i], base[0], &values)) { - status = ::mediapipe::InternalError("ExpandTemplateRule failed"); + status = mediapipe::InternalError("ExpandTemplateRule failed"); break; } edits.push_back(values); @@ -348,7 +348,7 @@ class TemplateExpanderImpl { // Retrieve the var param and the range expression. const TemplateExpression& rule = template_rules_.rule().Get(base_index); if (rule.arg().empty() || rule.arg().size() > 2) { - RecordError(::mediapipe::InvalidArgumentError( + RecordError(mediapipe::InvalidArgumentError( "Param declaration must specify a parameter name and " "may specify a single default value.")); } @@ -401,7 +401,7 @@ class TemplateExpanderImpl { TemplateArgument* result = GetItem(&environment_, expr.param()); if (result == nullptr) { RecordError( - ::mediapipe::NotFoundError(absl::StrCat("param: ", expr.param()))); + mediapipe::NotFoundError(absl::StrCat("param: ", expr.param()))); return AsArgument(0.0); } return *result; @@ -412,7 +412,7 @@ class TemplateExpanderImpl { TemplateArgument lhs = EvalExpression(expr.arg(0)); TemplateArgument* result = GetItem(lhs.mutable_dict(), expr.arg(1).param()); if (result == nullptr) { - RecordError(::mediapipe::NotFoundError( + RecordError(mediapipe::NotFoundError( absl::StrCat("param field: ", expr.arg(1).param()))); return AsArgument(0.0); } @@ -427,7 +427,7 @@ class TemplateExpanderImpl { } if (value.has_str()) { if (!absl::SimpleAtod(value.str(), &result)) { - RecordError(::mediapipe::InvalidArgumentError(value.str())); + RecordError(mediapipe::InvalidArgumentError(value.str())); } } return result; @@ -452,7 +452,7 @@ class TemplateExpanderImpl { return value.num() != 0; } else if (value.has_str()) { if (!absl::SimpleAtob(value.str(), &result)) { - RecordError(::mediapipe::InvalidArgumentError(value.str())); + RecordError(mediapipe::InvalidArgumentError(value.str())); } } return result; @@ -462,7 +462,7 @@ class TemplateExpanderImpl { TemplateArgument AsDict(const std::vector& args) { TemplateArgument result; if (args.size() % 2 != 0) { - RecordError(::mediapipe::InvalidArgumentError(absl::StrCat( + RecordError(mediapipe::InvalidArgumentError(absl::StrCat( "Dict requires an even number of arguments, got: ", args.size()))); return result; } @@ -613,11 +613,11 @@ class TemplateExpanderImpl { result->push_back(r[0]); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Record a Status if it indicates an error. - void RecordError(const ::mediapipe::Status& status) { + void RecordError(const mediapipe::Status& status) { if (!status.ok()) { errors_->push_back(status); } @@ -631,23 +631,23 @@ class TemplateExpanderImpl { TemplateDict environment_; // List of errors found in template parameters. - std::vector<::mediapipe::Status>* errors_; + std::vector* errors_; }; TemplateExpander::TemplateExpander() {} // Expands template rules within a proto message. // Replaces template rules with expanded sub-messages. -::mediapipe::Status TemplateExpander::ExpandTemplates( +mediapipe::Status TemplateExpander::ExpandTemplates( const TemplateDict& args, const CalculatorGraphTemplate& templ, CalculatorGraphConfig* output) { errors_.clear(); TemplateExpanderImpl expander(&errors_); if (!expander.ExpandTemplates(args, templ, output)) { - errors_.push_back(::mediapipe::InternalError("ExpandTemplates failed")); + errors_.push_back(mediapipe::InternalError("ExpandTemplates failed")); } - ::mediapipe::Status status; - for (const ::mediapipe::Status& error : errors_) { + mediapipe::Status status; + for (const mediapipe::Status& error : errors_) { LOG(ERROR) << error; status.Update(error); } diff --git a/mediapipe/framework/tool/template_expander.h b/mediapipe/framework/tool/template_expander.h index bf6c72362..f62e5b747 100644 --- a/mediapipe/framework/tool/template_expander.h +++ b/mediapipe/framework/tool/template_expander.h @@ -33,13 +33,13 @@ class TemplateExpander { // Applies the rules specified in a CalculatorGraphTemplate to a // CalculatorGraphConfig. Each rule references a nested field-value or // message and defines zero or more replacement values for it. - ::mediapipe::Status ExpandTemplates(const TemplateDict& args, - const CalculatorGraphTemplate& templ, - CalculatorGraphConfig* output); + mediapipe::Status ExpandTemplates(const TemplateDict& args, + const CalculatorGraphTemplate& templ, + CalculatorGraphConfig* output); private: // List of errors found in template parameters. - std::vector<::mediapipe::Status> errors_; + std::vector errors_; }; } // namespace tool diff --git a/mediapipe/framework/tool/template_parser.cc b/mediapipe/framework/tool/template_parser.cc index 2954566e8..b6d2fb371 100644 --- a/mediapipe/framework/tool/template_parser.cc +++ b/mediapipe/framework/tool/template_parser.cc @@ -1332,13 +1332,13 @@ bool IsFunctionOperator(const std::string& token) { // by the DynamicMessageFactory ("output"). These two Messages have // different Descriptors so Message::MergeFrom cannot be applied directly, // but they are expected to be equivalent. -::mediapipe::Status MergeFields(const Message& source, Message* dest) { +mediapipe::Status MergeFields(const Message& source, Message* dest) { std::unique_ptr temp(dest->New()); std::string temp_str; RET_CHECK(TextFormat::PrintToString(source, &temp_str)); RET_CHECK(TextFormat::ParseFromString(temp_str, temp.get())); dest->MergeFrom(*temp); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Returns the (tag, index) pairs in a field path. @@ -1356,7 +1356,7 @@ mediapipe::Status ProtoPathSplit(const std::string& path, bool ok = absl::SimpleAtoi(id_pair.first, &tag) && absl::SimpleAtoi(id_pair.second, &index); if (!ok) { - status.Update(::mediapipe::InvalidArgumentError(path)); + status.Update(mediapipe::InvalidArgumentError(path)); } result->push_back(std::make_pair(tag, index)); } diff --git a/mediapipe/framework/tool/text_to_binary_graph.cc b/mediapipe/framework/tool/text_to_binary_graph.cc index 365fd91ee..1b4d53b01 100644 --- a/mediapipe/framework/tool/text_to_binary_graph.cc +++ b/mediapipe/framework/tool/text_to_binary_graph.cc @@ -99,11 +99,11 @@ int main(int argc, char** argv) { mediapipe::Status status; if (FLAGS_proto_source.empty()) { status.Update( - ::mediapipe::InvalidArgumentError("--proto_source must be specified")); + mediapipe::InvalidArgumentError("--proto_source must be specified")); } if (FLAGS_proto_output.empty()) { status.Update( - ::mediapipe::InvalidArgumentError("--proto_output must be specified")); + mediapipe::InvalidArgumentError("--proto_output must be specified")); } if (!status.ok()) { return EXIT_FAILURE; diff --git a/mediapipe/framework/tool/validate.cc b/mediapipe/framework/tool/validate.cc index f0ddf13b3..c15a268eb 100644 --- a/mediapipe/framework/tool/validate.cc +++ b/mediapipe/framework/tool/validate.cc @@ -26,7 +26,7 @@ namespace mediapipe { namespace tool { -::mediapipe::Status ValidateInput(const InputCollection& input_collection) { +mediapipe::Status ValidateInput(const InputCollection& input_collection) { if (!input_collection.name().empty()) { MP_RETURN_IF_ERROR(tool::ValidateName(input_collection.name())).SetPrepend() << "InputCollection " << input_collection.name() @@ -34,14 +34,14 @@ namespace tool { } if (input_collection.input_type() <= InputCollection::UNKNOWN || input_collection.input_type() >= InputCollection::INVALID_UPPER_BOUND) { - return ::mediapipe::InvalidArgumentError( + return mediapipe::InvalidArgumentError( "InputCollection must specify a valid input_type."); } if (input_collection.file_name().empty()) { - return ::mediapipe::InvalidArgumentError( + return mediapipe::InvalidArgumentError( "InputCollection must specify a file_name."); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace tool diff --git a/mediapipe/framework/tool/validate.h b/mediapipe/framework/tool/validate.h index fcc10b32d..545e7387c 100644 --- a/mediapipe/framework/tool/validate.h +++ b/mediapipe/framework/tool/validate.h @@ -24,12 +24,12 @@ namespace mediapipe { namespace tool { -// Returns ::mediapipe::OkStatus() if the InputCollection is valid. An input +// Returns mediapipe::OkStatus() if the InputCollection is valid. An input // collection is invalid if it does not have the proper fields set // depending on what its input_type field is. Furthermore, if it uses // INLINE, then the number of value fields in each inputs must match // the number of input_side_packet_name fields. -::mediapipe::Status ValidateInput(const InputCollection& input); +mediapipe::Status ValidateInput(const InputCollection& input); } // namespace tool } // namespace mediapipe diff --git a/mediapipe/framework/tool/validate_name.cc b/mediapipe/framework/tool/validate_name.cc index 48cd48dfb..e98fd3bf9 100644 --- a/mediapipe/framework/tool/validate_name.cc +++ b/mediapipe/framework/tool/validate_name.cc @@ -41,7 +41,7 @@ namespace tool { #define MEDIAPIPE_TAG_INDEX_REGEX \ "(" MEDIAPIPE_TAG_REGEX ")?(:" MEDIAPIPE_NUMBER_REGEX ")?" -::mediapipe::Status GetTagAndNameInfo( +mediapipe::Status GetTagAndNameInfo( const proto_ns::RepeatedPtrField& tags_and_names, TagAndNameInfo* info) { RET_CHECK(info); @@ -59,15 +59,15 @@ namespace tool { if (info->tags.size() > 0 && info->names.size() != info->tags.size()) { info->tags.clear(); info->names.clear(); - return ::mediapipe::InvalidArgumentError(absl::StrCat( + return mediapipe::InvalidArgumentError(absl::StrCat( "Each set of names must use exclusively either tags or indexes. " "Encountered: \"", absl::StrJoin(tags_and_names, "\", \""), "\"")); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status SetFromTagAndNameInfo( +mediapipe::Status SetFromTagAndNameInfo( const TagAndNameInfo& info, proto_ns::RepeatedPtrField* tags_and_names) { tags_and_names->Clear(); @@ -78,7 +78,7 @@ namespace tool { } } else { if (info.names.size() != info.tags.size()) { - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "Number of tags " << info.names.size() << " does not match the number of tags " << info.tags.size(); } @@ -88,52 +88,52 @@ namespace tool { *tags_and_names->Add() = absl::StrCat(info.tags[i], ":", info.names[i]); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ValidateName(const std::string& name) { +mediapipe::Status ValidateName(const std::string& name) { return name.length() > 0 && (name[0] == '_' || islower(name[0])) && std::all_of(name.begin() + 1, name.end(), [](char c) { return c == '_' || isdigit(c) || islower(c); }) - ? ::mediapipe::OkStatus() - : ::mediapipe::InvalidArgumentError(absl::StrCat( + ? mediapipe::OkStatus() + : mediapipe::InvalidArgumentError(absl::StrCat( "Name \"", absl::CEscape(name), "\" does not match \"" MEDIAPIPE_NAME_REGEX "\".")); } -::mediapipe::Status ValidateNumber(const std::string& number) { +mediapipe::Status ValidateNumber(const std::string& number) { return (number.length() == 1 && isdigit(number[0])) || (number.length() > 1 && isdigit(number[0]) && number[0] != '0' && std::all_of(number.begin() + 1, number.end(), [](char c) { return isdigit(c); })) - ? ::mediapipe::OkStatus() - : ::mediapipe::InvalidArgumentError(absl::StrCat( + ? mediapipe::OkStatus() + : mediapipe::InvalidArgumentError(absl::StrCat( "Number \"", absl::CEscape(number), "\" does not match \"" MEDIAPIPE_NUMBER_REGEX "\".")); } -::mediapipe::Status ValidateTag(const std::string& tag) { +mediapipe::Status ValidateTag(const std::string& tag) { return tag.length() > 0 && (tag[0] == '_' || isupper(tag[0])) && std::all_of(tag.begin() + 1, tag.end(), [](char c) { return c == '_' || isdigit(c) || isupper(c); }) - ? ::mediapipe::OkStatus() - : ::mediapipe::InvalidArgumentError(absl::StrCat( + ? mediapipe::OkStatus() + : mediapipe::InvalidArgumentError(absl::StrCat( "Tag \"", absl::CEscape(tag), "\" does not match \"" MEDIAPIPE_TAG_REGEX "\".")); } -::mediapipe::Status ParseTagAndName(const std::string& tag_and_name, - std::string* tag, std::string* name) { +mediapipe::Status ParseTagAndName(const std::string& tag_and_name, + std::string* tag, std::string* name) { // An optional tag and colon, followed by a name. RET_CHECK(tag); RET_CHECK(name); - ::mediapipe::Status tag_status = ::mediapipe::OkStatus(); - ::mediapipe::Status name_status = ::mediapipe::UnknownError(""); + mediapipe::Status tag_status = mediapipe::OkStatus(); + mediapipe::Status name_status = mediapipe::UnknownError(""); int name_index = 0; std::vector v = absl::StrSplit(tag_and_name, ':'); if (v.size() == 1) { @@ -144,11 +144,11 @@ namespace tool { name_status = ValidateName(v[1]); name_index = 1; } - if (name_index == -1 || tag_status != ::mediapipe::OkStatus() || - name_status != ::mediapipe::OkStatus()) { + if (name_index == -1 || tag_status != mediapipe::OkStatus() || + name_status != mediapipe::OkStatus()) { tag->clear(); name->clear(); - return ::mediapipe::InvalidArgumentError( + return mediapipe::InvalidArgumentError( absl::StrCat("\"tag and name\" is invalid, \"", tag_and_name, "\" does not match " "\"" MEDIAPIPE_TAG_AND_NAME_REGEX @@ -156,20 +156,20 @@ namespace tool { } *tag = name_index == 1 ? v[0] : ""; *name = v[name_index]; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ParseTagIndexName(const std::string& tag_index_name, - std::string* tag, int* index, - std::string* name) { +mediapipe::Status ParseTagIndexName(const std::string& tag_index_name, + std::string* tag, int* index, + std::string* name) { // An optional tag and colon, an optional index and color, followed by a name. RET_CHECK(tag); RET_CHECK(index); RET_CHECK(name); - ::mediapipe::Status tag_status = ::mediapipe::OkStatus(); - ::mediapipe::Status number_status = ::mediapipe::OkStatus(); - ::mediapipe::Status name_status = ::mediapipe::UnknownError(""); + mediapipe::Status tag_status = mediapipe::OkStatus(); + mediapipe::Status number_status = mediapipe::OkStatus(); + mediapipe::Status name_status = mediapipe::UnknownError(""); int name_index = -1; int the_index = 0; std::vector v = absl::StrSplit(tag_index_name, ':'); @@ -195,7 +195,7 @@ namespace tool { } // else omitted, name_index == -1, triggering error. if (name_index == -1 || !tag_status.ok() || !number_status.ok() || !name_status.ok()) { - return ::mediapipe::InvalidArgumentError(absl::StrCat( + return mediapipe::InvalidArgumentError(absl::StrCat( "TAG:index:name is invalid, \"", tag_index_name, "\" does not match " "\"" MEDIAPIPE_TAG_INDEX_NAME_REGEX @@ -204,16 +204,16 @@ namespace tool { *tag = name_index != 0 ? v[0] : ""; *index = the_index; *name = v[name_index]; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ParseTagIndex(const std::string& tag_index, - std::string* tag, int* index) { +mediapipe::Status ParseTagIndex(const std::string& tag_index, std::string* tag, + int* index) { RET_CHECK(tag); RET_CHECK(index); - ::mediapipe::Status tag_status = ::mediapipe::OkStatus(); - ::mediapipe::Status number_status = ::mediapipe::OkStatus(); + mediapipe::Status tag_status = mediapipe::OkStatus(); + mediapipe::Status number_status = mediapipe::OkStatus(); int the_index = -1; std::vector v = absl::StrSplit(tag_index, ':'); if (v.size() == 1) { @@ -234,14 +234,14 @@ namespace tool { } } // else omitted, the_index == -1, triggering error. if (the_index == -1 || !tag_status.ok() || !number_status.ok()) { - return ::mediapipe::InvalidArgumentError(absl::StrCat( + return mediapipe::InvalidArgumentError(absl::StrCat( "TAG:index is invalid, \"", tag_index, "\" does not match " "\"" MEDIAPIPE_TAG_INDEX_REGEX "\" (examples: \"TAG\" \"VIDEO:2\").")); } *tag = v[0]; *index = the_index; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } #undef MEDIAPIPE_NAME_REGEX diff --git a/mediapipe/framework/tool/validate_name.h b/mediapipe/framework/tool/validate_name.h index 4409804cc..1e9299f75 100644 --- a/mediapipe/framework/tool/validate_name.h +++ b/mediapipe/framework/tool/validate_name.h @@ -52,7 +52,7 @@ ABSL_DEPRECATED( "support the TAG:INDEX:name notation. You can use Create() to create the " "tag map, and then Names(), Mapping(), and other methods to access the " "tag, index and name information.") -::mediapipe::Status GetTagAndNameInfo( +mediapipe::Status GetTagAndNameInfo( const proto_ns::RepeatedPtrField& tags_and_names, TagAndNameInfo* info); @@ -62,7 +62,7 @@ ABSL_DEPRECATED( "Prefer using mediapipe::tool::TagMap instead, since this method does not " "support the TAG:INDEX:name notation. You can use CanonicalEntries() to " "translate a tag map to a RepeatedPtrField of tag and names.") -::mediapipe::Status SetFromTagAndNameInfo( +mediapipe::Status SetFromTagAndNameInfo( const TagAndNameInfo& info, proto_ns::RepeatedPtrField* tags_and_names); @@ -76,17 +76,17 @@ ABSL_DEPRECATED( // trainer/calculator names. // (3) Because input side packet names end up in model directory names, // where lower case naming is the norm. -::mediapipe::Status ValidateName(const std::string& name); +mediapipe::Status ValidateName(const std::string& name); // The std::string is a valid tag name. Tags use only upper case letters, // numbers, and underscores. -::mediapipe::Status ValidateTag(const std::string& tag); +mediapipe::Status ValidateTag(const std::string& tag); // Parse a "Tag and Name" std::string into a tag and a name. // The format is an optional tag and colon, followed by a name. // Example 1: "VIDEO:frames2" -> tag: "VIDEO", name: "frames2" // Example 2: "video_frames_1" -> tag: "", name: "video_frames_1" -::mediapipe::Status ParseTagAndName(const std::string& tag_and_name, - std::string* tag, std::string* name); +mediapipe::Status ParseTagAndName(const std::string& tag_and_name, + std::string* tag, std::string* name); // Parse a generic TAG:index:name std::string. The format is a tag, then an // index, then a name. The tag and index are optional. If the index @@ -96,9 +96,9 @@ ABSL_DEPRECATED( // "VIDEO:frames2" -> tag: "VIDEO", index: 0, name: "frames2" // "VIDEO:1:frames" -> tag: "VIDEO", index: 1, name: "frames" // "raw_frames" -> tag: "", index: -1, name: "raw_frames" -::mediapipe::Status ParseTagIndexName(const std::string& tag_and_name, - std::string* tag, int* index, - std::string* name); +mediapipe::Status ParseTagIndexName(const std::string& tag_and_name, + std::string* tag, int* index, + std::string* name); // Parse a generic TAG:index std::string. The format is a tag, then an index // with both being optional. If the tag is missing it is assumed to be @@ -109,8 +109,8 @@ ABSL_DEPRECATED( // "VIDEO:1" -> tag: "VIDEO", index: 1 // ":2" -> tag: "", index: 2 // "" -> tag: "", index: 0 -::mediapipe::Status ParseTagIndex(const std::string& tag_and_index, - std::string* tag, int* index); +mediapipe::Status ParseTagIndex(const std::string& tag_and_index, + std::string* tag, int* index); } // namespace tool } // namespace mediapipe diff --git a/mediapipe/framework/tool/validate_name_test.cc b/mediapipe/framework/tool/validate_name_test.cc index 3eb9f9715..000be5bff 100644 --- a/mediapipe/framework/tool/validate_name_test.cc +++ b/mediapipe/framework/tool/validate_name_test.cc @@ -207,9 +207,8 @@ TEST(ValidateNameTest, ParseTagIndexName) { "mieko_harada"); TestPassParseTagIndexName("A1:100:mieko1", "A1", 100, "mieko1"); TestPassParseTagIndexName( - absl::StrCat("A1:", ::mediapipe::internal::kMaxCollectionItemId, - ":mieko1"), - "A1", ::mediapipe::internal::kMaxCollectionItemId, "mieko1"); + absl::StrCat("A1:", mediapipe::internal::kMaxCollectionItemId, ":mieko1"), + "A1", mediapipe::internal::kMaxCollectionItemId, "mieko1"); // Failure cases. TestFailParseTagIndexName(""); // Empty name. @@ -246,7 +245,7 @@ TEST(ValidateNameTest, ParseTagIndexName) { TestFailParseTagIndexName("A:01:name"); // Leading zero. TestFailParseTagIndexName("A:00:name"); // Leading zero. TestFailParseTagIndexName( - absl::StrCat("A:", ::mediapipe::internal::kMaxCollectionItemId + 1, + absl::StrCat("A:", mediapipe::internal::kMaxCollectionItemId + 1, ":a")); // Too large an index. // Extra field TestFailParseTagIndexName("A:1:a:"); // extra field. diff --git a/mediapipe/framework/tool/validate_type.cc b/mediapipe/framework/tool/validate_type.cc index 79a6e36e7..3e768fc56 100644 --- a/mediapipe/framework/tool/validate_type.cc +++ b/mediapipe/framework/tool/validate_type.cc @@ -38,7 +38,7 @@ namespace mediapipe { namespace tool { -::mediapipe::Status RunGeneratorFillExpectations( +mediapipe::Status RunGeneratorFillExpectations( const PacketGeneratorConfig& input_config, const std::string& package) { // TODO Remove conversion after everyone uses input/output // side packet. @@ -64,7 +64,7 @@ namespace tool { } // Check that everything got initialized. - std::vector<::mediapipe::Status> statuses; + std::vector statuses; statuses.push_back(ValidatePacketTypeSet(contract.InputSidePackets())); statuses.push_back(ValidatePacketTypeSet(contract.OutputSidePackets())); return tool::CombinedStatus( @@ -72,7 +72,7 @@ namespace tool { statuses); } -::mediapipe::Status RunGenerateAndValidateTypes( +mediapipe::Status RunGenerateAndValidateTypes( const std::string& packet_generator_name, const PacketGeneratorOptions& extendable_options, const PacketSet& input_side_packets, PacketSet* output_side_packets, @@ -95,7 +95,7 @@ namespace tool { .SetPrepend() << packet_generator_name << "::FillExpectations failed: "; // Check that the types were filled well. - std::vector<::mediapipe::Status> statuses; + std::vector statuses; statuses.push_back(ValidatePacketTypeSet(input_side_packet_types)); statuses.push_back(ValidatePacketTypeSet(output_side_packet_types)); MP_RETURN_IF_ERROR(tool::CombinedStatus( @@ -118,7 +118,7 @@ namespace tool { << packet_generator_name << "::FillExpectations expected different " "output type than those produced: "; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace tool diff --git a/mediapipe/framework/tool/validate_type.h b/mediapipe/framework/tool/validate_type.h index 9a5686b4f..5f87f40f5 100644 --- a/mediapipe/framework/tool/validate_type.h +++ b/mediapipe/framework/tool/validate_type.h @@ -26,14 +26,14 @@ namespace mediapipe { namespace tool { // Equivalent functions for PacketGenerators. -::mediapipe::Status RunGeneratorFillExpectations( +mediapipe::Status RunGeneratorFillExpectations( const PacketGeneratorConfig& config, const std::string& package = "mediapipe"); // Run PacketGenerator::Generate() on the given generator, options, // and inputs to produce outputs. Validate the types of the inputs and // outputs using PacketGenerator::FillExpectations. -::mediapipe::Status RunGenerateAndValidateTypes( +mediapipe::Status RunGenerateAndValidateTypes( const std::string& packet_generator_name, const PacketGeneratorOptions& extendable_options, const PacketSet& input_side_packets, PacketSet* output_side_packets, diff --git a/mediapipe/framework/type_map.h b/mediapipe/framework/type_map.h index 366da8d54..b37522cb9 100644 --- a/mediapipe/framework/type_map.h +++ b/mediapipe/framework/type_map.h @@ -36,9 +36,9 @@ // // // If your type is serialized by converting it to an easily serializable // // type (such as a proto) use a proxy. -// // See mediapipe/framework/formats/location.cc for more details. -// MEDIAPIPE_REGISTER_TYPE_WITH_PROXY( -// ::mediapipe::Location, "::mediapipe::Location", +// // See mediapipe/framework/formats/location.cc for more +// details. MEDIAPIPE_REGISTER_TYPE_WITH_PROXY( +// mediapipe::Location, "mediapipe::Location", // ::mediapipe::SerializeUsingGenericFn, // ::mediapipe::DeserializeUsingGenericFn; -using DeserializeFn = std::function<::mediapipe::Status( +using DeserializeFn = std::function* holder_base)>; @@ -252,7 +252,7 @@ DEFINE_MEDIAPIPE_TYPE_MAP(PacketTypeStringToMediaPipeTypeData, std::string); // Even std types should have their names start with "::std". // Only basic types such as "int" can be left bare. Remember to // include full namespaces for template arguments. For example -// "::map". +// "::map". // // Examples: // Prefers an additional macro to define a type that contains comma(s) in @@ -271,20 +271,18 @@ DEFINE_MEDIAPIPE_TYPE_MAP(PacketTypeStringToMediaPipeTypeData, std::string); // #define MEDIAPIPE_REGISTER_TYPE(type, type_name, serialize_fn, deserialize_fn) \ SET_MEDIAPIPE_TYPE_MAP_VALUE( \ - ::mediapipe::PacketTypeIdToMediaPipeTypeData, \ - ::mediapipe::tool::GetTypeHash< \ - ::mediapipe::type_map_internal::ReflectType::Type>(), \ - (::mediapipe::MediaPipeTypeData{ \ - ::mediapipe::tool::GetTypeHash< \ - ::mediapipe::type_map_internal::ReflectType::Type>(), \ + mediapipe::PacketTypeIdToMediaPipeTypeData, \ + mediapipe::tool::GetTypeHash< \ + mediapipe::type_map_internal::ReflectType::Type>(), \ + (mediapipe::MediaPipeTypeData{ \ + mediapipe::tool::GetTypeHash< \ + mediapipe::type_map_internal::ReflectType::Type>(), \ type_name, serialize_fn, deserialize_fn})); \ SET_MEDIAPIPE_TYPE_MAP_VALUE( \ - ::mediapipe::PacketTypeStringToMediaPipeTypeData, type_name, \ - (::mediapipe::MediaPipeTypeData{ \ - ::mediapipe::tool::GetTypeHash< \ - ::mediapipe::type_map_internal::ReflectType::Type>(), \ + mediapipe::PacketTypeStringToMediaPipeTypeData, type_name, \ + (mediapipe::MediaPipeTypeData{ \ + mediapipe::tool::GetTypeHash< \ + mediapipe::type_map_internal::ReflectType::Type>(), \ type_name, serialize_fn, deserialize_fn})); // End define MEDIAPIPE_REGISTER_TYPE. @@ -296,18 +294,18 @@ DEFINE_MEDIAPIPE_TYPE_MAP(PacketTypeStringToMediaPipeTypeData, std::string); // seperated by double colons. // // Example 1: register type with non-std::string proxy. -// ::mediapipe::Status ToProxyFn( +// mediapipe::Status ToProxyFn( // const ClassType& obj, ProxyType* proxy) // { // ... -// return ::mediapipe::OkStatus(); +// return mediapipe::OkStatus(); // } // -// ::mediapipe::Status FromProxyFn( +// mediapipe::Status FromProxyFn( // const ProxyType& proxy, ClassType* obj) // { // ... -// return ::mediapipe::OkStatus(); +// return mediapipe::OkStatus(); // } // // MEDIAPIPE_REGISTER_TYPE_WITH_PROXY( @@ -318,16 +316,16 @@ DEFINE_MEDIAPIPE_TYPE_MAP(PacketTypeStringToMediaPipeTypeData, std::string); // ProxyType>, ToProxyFn, FromProxyFn); // // Example 2: register type with std::string proxy. -// ::mediapipe::Status ToProxyFn(const ClassType& obj, std::string* encoding) +// mediapipe::Status ToProxyFn(const ClassType& obj, std::string* encoding) // { // ... -// return ::mediapipe::OkStatus(); +// return mediapipe::OkStatus(); // } // -// ::mediapipe::Status FromProxyFn( +// mediapipe::Status FromProxyFn( // const ProxyType& proxy, std::string* encoding) { // ... -// return ::mediapipe::OkStatus(); +// return mediapipe::OkStatus(); // } // // MEDIAPIPE_REGISTER_TYPE_WITH_PROXY( @@ -339,24 +337,22 @@ DEFINE_MEDIAPIPE_TYPE_MAP(PacketTypeStringToMediaPipeTypeData, std::string); #define MEDIAPIPE_REGISTER_TYPE_WITH_PROXY( \ type, type_name, serialize_fn, deserialize_fn, to_proxy_fn, from_proxy_fn) \ SET_MEDIAPIPE_TYPE_MAP_VALUE( \ - ::mediapipe::PacketTypeIdToMediaPipeTypeData, \ - ::mediapipe::tool::GetTypeHash< \ - ::mediapipe::type_map_internal::ReflectType::Type>(), \ - (::mediapipe::MediaPipeTypeData{ \ - ::mediapipe::tool::GetTypeHash< \ - ::mediapipe::type_map_internal::ReflectType::Type>(), \ + mediapipe::PacketTypeIdToMediaPipeTypeData, \ + mediapipe::tool::GetTypeHash< \ + mediapipe::type_map_internal::ReflectType::Type>(), \ + (mediapipe::MediaPipeTypeData{ \ + mediapipe::tool::GetTypeHash< \ + mediapipe::type_map_internal::ReflectType::Type>(), \ type_name, \ std::bind(&serialize_fn, to_proxy_fn, std::placeholders::_1, \ std::placeholders::_2), \ std::bind(&deserialize_fn, from_proxy_fn, std::placeholders::_1, \ std::placeholders::_2)})); \ SET_MEDIAPIPE_TYPE_MAP_VALUE( \ - ::mediapipe::PacketTypeStringToMediaPipeTypeData, type_name, \ - (::mediapipe::MediaPipeTypeData{ \ - ::mediapipe::tool::GetTypeHash< \ - ::mediapipe::type_map_internal::ReflectType::Type>(), \ + mediapipe::PacketTypeStringToMediaPipeTypeData, type_name, \ + (mediapipe::MediaPipeTypeData{ \ + mediapipe::tool::GetTypeHash< \ + mediapipe::type_map_internal::ReflectType::Type>(), \ type_name, \ std::bind(&serialize_fn, to_proxy_fn, std::placeholders::_1, \ std::placeholders::_2), \ @@ -383,7 +379,7 @@ const std::string MediaPipeTypeStringOrDemangled() { if (type_string) { return *type_string; } else { - return ::mediapipe::Demangle(tool::TypeId().name()); + return mediapipe::Demangle(tool::TypeId().name()); } } diff --git a/mediapipe/framework/validated_graph_config.cc b/mediapipe/framework/validated_graph_config.cc index 13d236560..f6e574301 100644 --- a/mediapipe/framework/validated_graph_config.cc +++ b/mediapipe/framework/validated_graph_config.cc @@ -115,13 +115,13 @@ std::string DebugName(const CalculatorGraphConfig& config, // // Converts the graph-level num_threads field to an ExecutorConfig for the // default executor with the executor type unspecified. -::mediapipe::Status AddPredefinedExecutorConfigs( +mediapipe::Status AddPredefinedExecutorConfigs( CalculatorGraphConfig* graph_config) { bool has_default_executor_config = false; for (ExecutorConfig& executor_config : *graph_config->mutable_executor()) { if (executor_config.name().empty()) { if (graph_config->num_threads()) { - return ::mediapipe::InvalidArgumentError( + return mediapipe::InvalidArgumentError( "ExecutorConfig for the default executor and the graph-level " "num_threads field should not both be specified."); } @@ -138,10 +138,10 @@ std::string DebugName(const CalculatorGraphConfig& config, graph_config->clear_num_threads(); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status PerformBasicTransforms( +mediapipe::Status PerformBasicTransforms( const CalculatorGraphConfig& input_graph_config, const GraphRegistry* graph_registry, CalculatorGraphConfig* output_graph_config) { @@ -165,7 +165,7 @@ std::string DebugName(const CalculatorGraphConfig& config, } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace @@ -188,7 +188,7 @@ std::string NodeTypeInfo::NodeTypeToString(NodeType node_type) { << static_cast(node_type); } -::mediapipe::Status NodeTypeInfo::Initialize( +mediapipe::Status NodeTypeInfo::Initialize( const ValidatedGraphConfig& validated_graph, const CalculatorGraphConfig::Node& node, int node_index) { node_.type = NodeType::CALCULATOR; @@ -208,14 +208,14 @@ std::string NodeTypeInfo::NodeTypeToString(NodeType node_type) { tool::ParseTagIndex(input_stream_info.tag_index(), &tag, &index)); CollectionItemId id = contract_.Inputs().GetId(tag, index); if (!id.IsValid()) { - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "Input stream with tag_index \"" << input_stream_info.tag_index() << "\" requested in InputStreamInfo but is not an input stream " "of the calculator."; } if (id_used[id.value()]) { - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "Input stream with tag_index \"" << input_stream_info.tag_index() << "\" has more than one InputStreamInfo."; @@ -229,7 +229,7 @@ std::string NodeTypeInfo::NodeTypeToString(NodeType node_type) { RET_CHECK_EQ(&node.options(), &contract_.Options()); #if !defined(MEDIAPIPE_PROTO_LITE) std::set type_urls; - for (const ::mediapipe::protobuf::Any& options : node.node_options()) { + for (const mediapipe::protobuf::Any& options : node.node_options()) { RET_CHECK(type_urls.insert(options.type_url()).second) << "Options type: '" << options.type_url() << "' specified more than once for a single calculator node config."; @@ -240,26 +240,23 @@ std::string NodeTypeInfo::NodeTypeToString(NodeType node_type) { node_class, &contract_)); // Validate result of FillExpectations or GetContract. - std::vector<::mediapipe::Status> statuses; - ::mediapipe::Status status = ValidatePacketTypeSet(contract_.Inputs()); + std::vector statuses; + mediapipe::Status status = ValidatePacketTypeSet(contract_.Inputs()); if (!status.ok()) { statuses.push_back( - ::mediapipe::StatusBuilder(std::move(status), MEDIAPIPE_LOC) - .SetPrepend() + mediapipe::StatusBuilder(std::move(status), MEDIAPIPE_LOC).SetPrepend() << "For input streams "); } status = ValidatePacketTypeSet(contract_.Outputs()); if (!status.ok()) { statuses.push_back( - ::mediapipe::StatusBuilder(std::move(status), MEDIAPIPE_LOC) - .SetPrepend() + mediapipe::StatusBuilder(std::move(status), MEDIAPIPE_LOC).SetPrepend() << "For output streams "); } status = ValidatePacketTypeSet(contract_.InputSidePackets()); if (!status.ok()) { statuses.push_back( - ::mediapipe::StatusBuilder(std::move(status), MEDIAPIPE_LOC) - .SetPrepend() + mediapipe::StatusBuilder(std::move(status), MEDIAPIPE_LOC).SetPrepend() << "For input side packets "); } if (!statuses.empty()) { @@ -271,10 +268,10 @@ std::string NodeTypeInfo::NodeTypeToString(NodeType node_type) { " failed to validate: "), statuses); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status NodeTypeInfo::Initialize( +mediapipe::Status NodeTypeInfo::Initialize( const ValidatedGraphConfig& validated_graph, const PacketGeneratorConfig& node, int node_index) { node_.type = NodeType::PACKET_GENERATOR; @@ -298,8 +295,8 @@ std::string NodeTypeInfo::NodeTypeToString(NodeType node_type) { } // Validate result of FillExpectations. - std::vector<::mediapipe::Status> statuses; - ::mediapipe::Status status = + std::vector statuses; + mediapipe::Status status = ValidatePacketTypeSet(contract_.InputSidePackets()); if (!status.ok()) { statuses.push_back(std::move(status)); @@ -313,10 +310,10 @@ std::string NodeTypeInfo::NodeTypeToString(NodeType node_type) { absl::StrCat(node_class, "::FillExpectations failed to validate: "), statuses); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status NodeTypeInfo::Initialize( +mediapipe::Status NodeTypeInfo::Initialize( const ValidatedGraphConfig& validated_graph, const StatusHandlerConfig& node, int node_index) { node_.type = NodeType::STATUS_HANDLER; @@ -342,10 +339,10 @@ std::string NodeTypeInfo::NodeTypeToString(NodeType node_type) { MP_RETURN_IF_ERROR(ValidatePacketTypeSet(contract_.InputSidePackets())) .SetPrepend() << node_class << "::FillExpectations failed to validate: "; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ValidatedGraphConfig::Initialize( +mediapipe::Status ValidatedGraphConfig::Initialize( const CalculatorGraphConfig& input_config, const GraphRegistry* graph_registry) { RET_CHECK(!initialized_) @@ -427,10 +424,10 @@ std::string NodeTypeInfo::NodeTypeToString(NodeType node_type) { << config_.DebugString(); #endif initialized_ = true; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ValidatedGraphConfig::Initialize( +mediapipe::Status ValidatedGraphConfig::Initialize( const std::string& graph_type, const Subgraph::SubgraphOptions* options, const GraphRegistry* graph_registry) { graph_registry = @@ -440,7 +437,7 @@ std::string NodeTypeInfo::NodeTypeToString(NodeType node_type) { return Initialize(status_or_config.ValueOrDie(), graph_registry); } -::mediapipe::Status ValidatedGraphConfig::Initialize( +mediapipe::Status ValidatedGraphConfig::Initialize( const std::vector& input_configs, const std::vector& input_templates, const std::string& graph_type, const Subgraph::SubgraphOptions* options) { @@ -454,12 +451,12 @@ std::string NodeTypeInfo::NodeTypeToString(NodeType node_type) { return Initialize(graph_type, options, &graph_registry); } -::mediapipe::Status ValidatedGraphConfig::InitializeCalculatorInfo() { - std::vector<::mediapipe::Status> statuses; +mediapipe::Status ValidatedGraphConfig::InitializeCalculatorInfo() { + std::vector statuses; calculators_.reserve(config_.node_size()); for (const auto& node : config_.node()) { calculators_.emplace_back(); - ::mediapipe::Status status = + mediapipe::Status status = calculators_.back().Initialize(*this, node, calculators_.size() - 1); if (!status.ok()) { statuses.push_back(status); @@ -469,12 +466,12 @@ std::string NodeTypeInfo::NodeTypeToString(NodeType node_type) { statuses); } -::mediapipe::Status ValidatedGraphConfig::InitializeGeneratorInfo() { - std::vector<::mediapipe::Status> statuses; +mediapipe::Status ValidatedGraphConfig::InitializeGeneratorInfo() { + std::vector statuses; generators_.reserve(config_.packet_generator_size()); for (const auto& node : config_.packet_generator()) { generators_.emplace_back(); - ::mediapipe::Status status = + mediapipe::Status status = generators_.back().Initialize(*this, node, generators_.size() - 1); if (!status.ok()) { statuses.push_back(status); @@ -484,12 +481,12 @@ std::string NodeTypeInfo::NodeTypeToString(NodeType node_type) { statuses); } -::mediapipe::Status ValidatedGraphConfig::InitializeStatusHandlerInfo() { - std::vector<::mediapipe::Status> statuses; +mediapipe::Status ValidatedGraphConfig::InitializeStatusHandlerInfo() { + std::vector statuses; status_handlers_.reserve(config_.status_handler_size()); for (const auto& node : config_.status_handler()) { status_handlers_.emplace_back(); - ::mediapipe::Status status = status_handlers_.back().Initialize( + mediapipe::Status status = status_handlers_.back().Initialize( *this, node, status_handlers_.size() - 1); if (!status.ok()) { statuses.push_back(status); @@ -499,7 +496,7 @@ std::string NodeTypeInfo::NodeTypeToString(NodeType node_type) { statuses); } -::mediapipe::Status ValidatedGraphConfig::InitializeSidePacketInfo( +mediapipe::Status ValidatedGraphConfig::InitializeSidePacketInfo( bool* need_sorting_ptr) { for (NodeTypeInfo* node_type_info : sorted_nodes_) { MP_RETURN_IF_ERROR(AddInputSidePacketsForNode(node_type_info)); @@ -507,7 +504,7 @@ std::string NodeTypeInfo::NodeTypeToString(NodeType node_type) { AddOutputSidePacketsForNode(node_type_info, need_sorting_ptr)); } if (need_sorting_ptr && *need_sorting_ptr) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } for (int index = 0; index < config_.status_handler_size(); ++index) { NodeTypeInfo* node_type_info = &status_handlers_[index]; @@ -516,10 +513,10 @@ std::string NodeTypeInfo::NodeTypeToString(NodeType node_type) { RET_CHECK_EQ(node_type_info->Node().index, index); MP_RETURN_IF_ERROR(AddInputSidePacketsForNode(node_type_info)); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ValidatedGraphConfig::AddInputSidePacketsForNode( +mediapipe::Status ValidatedGraphConfig::AddInputSidePacketsForNode( NodeTypeInfo* node_type_info) { node_type_info->SetInputSidePacketBaseIndex(input_side_packets_.size()); const tool::TagMap& tag_map = @@ -542,10 +539,10 @@ std::string NodeTypeInfo::NodeTypeToString(NodeType node_type) { edge_info.name = name; edge_info.packet_type = &node_type_info->InputSidePacketTypes().Get(id); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ValidatedGraphConfig::AddOutputSidePacketsForNode( +mediapipe::Status ValidatedGraphConfig::AddOutputSidePacketsForNode( NodeTypeInfo* node_type_info, bool* need_sorting_ptr) { node_type_info->SetOutputSidePacketBaseIndex(output_side_packets_.size()); const tool::TagMap& tag_map = @@ -559,27 +556,27 @@ std::string NodeTypeInfo::NodeTypeToString(NodeType node_type) { edge_info.name = name; edge_info.packet_type = &node_type_info->OutputSidePacketTypes().Get(id); - if (!::mediapipe::InsertIfNotPresent(&side_packet_to_producer_, name, - output_side_packets_.size() - 1)) { - return ::mediapipe::UnknownErrorBuilder(MEDIAPIPE_LOC) + if (!mediapipe::InsertIfNotPresent(&side_packet_to_producer_, name, + output_side_packets_.size() - 1)) { + return mediapipe::UnknownErrorBuilder(MEDIAPIPE_LOC) << "Output Side Packet \"" << name << "\" defined twice."; } - if (::mediapipe::ContainsKey(required_side_packets_, name)) { + if (mediapipe::ContainsKey(required_side_packets_, name)) { if (need_sorting_ptr) { *need_sorting_ptr = true; // Don't return early, we still need to gather information about // every side packet in order to sort. } else { - return ::mediapipe::UnknownErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::UnknownErrorBuilder(MEDIAPIPE_LOC) << "Side packet \"" << name << "\" was produced after it was used."; } } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ValidatedGraphConfig::InitializeStreamInfo( +mediapipe::Status ValidatedGraphConfig::InitializeStreamInfo( bool* need_sorting_ptr) { // Define output streams for graph input streams. ASSIGN_OR_RETURN(std::shared_ptr graph_input_streams, @@ -608,10 +605,10 @@ std::string NodeTypeInfo::NodeTypeToString(NodeType node_type) { // Validate tag-name-indexes for graph output streams. MP_RETURN_IF_ERROR(tool::TagMap::Create(config_.output_stream()).status()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ValidatedGraphConfig::AddOutputStreamsForNode( +mediapipe::Status ValidatedGraphConfig::AddOutputStreamsForNode( NodeTypeInfo* node_type_info) { // Define output streams connecting calculators. node_type_info->SetOutputStreamBaseIndex(output_streams_.size()); @@ -621,10 +618,10 @@ std::string NodeTypeInfo::NodeTypeToString(NodeType node_type) { AddOutputStream(node_type_info->Node(), tag_map.Names()[id.value()], &node_type_info->OutputStreamTypes().Get(id))); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ValidatedGraphConfig::AddOutputStream( +mediapipe::Status ValidatedGraphConfig::AddOutputStream( NodeTypeInfo::NodeRef node, const std::string& name, PacketType* packet_type) { output_streams_.emplace_back(); @@ -634,15 +631,15 @@ std::string NodeTypeInfo::NodeTypeToString(NodeType node_type) { edge_info.name = name; edge_info.packet_type = packet_type; - if (!::mediapipe::InsertIfNotPresent(&stream_to_producer_, name, - output_streams_.size() - 1)) { - return ::mediapipe::UnknownErrorBuilder(MEDIAPIPE_LOC) + if (!mediapipe::InsertIfNotPresent(&stream_to_producer_, name, + output_streams_.size() - 1)) { + return mediapipe::UnknownErrorBuilder(MEDIAPIPE_LOC) << "Output Stream \"" << name << "\" defined twice."; } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ValidatedGraphConfig::AddInputStreamsForNode( +mediapipe::Status ValidatedGraphConfig::AddInputStreamsForNode( NodeTypeInfo* node_type_info, bool* need_sorting_ptr) { node_type_info->SetInputStreamBaseIndex(input_streams_.size()); const int node_index = node_type_info->Node().index; @@ -695,7 +692,7 @@ std::string NodeTypeInfo::NodeTypeToString(NodeType node_type) { // Continue to process the nodes so we gather enough information // for the sort operation. } else { - return ::mediapipe::UnknownErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::UnknownErrorBuilder(MEDIAPIPE_LOC) << "Input Stream \"" << name << "\" for node with sorted index " << node_index << " does not have a corresponding output stream."; } @@ -705,7 +702,7 @@ std::string NodeTypeInfo::NodeTypeToString(NodeType node_type) { edge_info.name = name; edge_info.packet_type = &node_type_info->InputStreamTypes().Get(id); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } int ValidatedGraphConfig::SorterIndexForNode(NodeTypeInfo::NodeRef node) const { @@ -729,7 +726,7 @@ NodeTypeInfo::NodeRef ValidatedGraphConfig::NodeForSorterIndex( } } -::mediapipe::Status ValidatedGraphConfig::TopologicalSortNodes() { +mediapipe::Status ValidatedGraphConfig::TopologicalSortNodes() { #if !(defined(MEDIAPIPE_LITE) || defined(MEDIAPIPE_MOBILE)) VLOG(2) << "BEFORE TOPOLOGICAL SORT:\n" << config_.DebugString(); #endif // !(MEDIAPIPE_LITE || MEDIAPIPE_MOBILE) @@ -825,7 +822,7 @@ NodeTypeInfo::NodeRef ValidatedGraphConfig::NodeForSorterIndex( ? tool::CanonicalNodeName(Config(), n.index) : DebugName(Config(), n.type, n.index)); }; - return ::mediapipe::UnknownErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::UnknownErrorBuilder(MEDIAPIPE_LOC) << "Generator side packet cycle or calculator stream cycle detected " "in graph: [" << absl::StrJoin(cycle_indexes, ", ", node_name_formatter) << "]"; @@ -837,10 +834,10 @@ NodeTypeInfo::NodeRef ValidatedGraphConfig::NodeForSorterIndex( #if !(defined(MEDIAPIPE_LITE) || defined(MEDIAPIPE_MOBILE)) VLOG(2) << "AFTER TOPOLOGICAL SORT:\n" << config_.DebugString(); #endif // !(MEDIAPIPE_LITE || MEDIAPIPE_MOBILE) - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ValidatedGraphConfig::FillUpstreamFieldForBackEdges() { +mediapipe::Status ValidatedGraphConfig::FillUpstreamFieldForBackEdges() { for (int index = 0; index < input_streams_.size(); ++index) { auto& input_stream = input_streams_[index]; if (input_stream.back_edge) { @@ -855,10 +852,10 @@ NodeTypeInfo::NodeRef ValidatedGraphConfig::NodeForSorterIndex( input_stream.upstream = iter->second; } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ValidatedGraphConfig::ValidateSidePacketTypes() { +mediapipe::Status ValidatedGraphConfig::ValidateSidePacketTypes() { for (const auto& side_packet : input_side_packets_) { // TODO Add a check to ensure multiple input side packets // connected to a side packet that will be provided later all have @@ -866,7 +863,7 @@ NodeTypeInfo::NodeRef ValidatedGraphConfig::NodeForSorterIndex( if (side_packet.upstream != -1 && !side_packet.packet_type->IsConsistentWith( *output_side_packets_[side_packet.upstream].packet_type)) { - return ::mediapipe::UnknownError(absl::Substitute( + return mediapipe::UnknownError(absl::Substitute( "Input side packet \"$0\" of $1 \"$2\" expected a packet of type " "\"$3\" but the connected output side packet will be of type \"$4\"", side_packet.name, @@ -878,10 +875,10 @@ NodeTypeInfo::NodeRef ValidatedGraphConfig::NodeForSorterIndex( .packet_type->DebugTypeName())); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ValidatedGraphConfig::ResolveAnyTypes( +mediapipe::Status ValidatedGraphConfig::ResolveAnyTypes( std::vector* input_edges, std::vector* output_edges) { for (EdgeInfo& input_edge : *input_edges) { if (input_edge.upstream == -1) { @@ -896,15 +893,15 @@ NodeTypeInfo::NodeRef ValidatedGraphConfig::NodeForSorterIndex( output_root->SetSameAs(input_edge.packet_type); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ValidatedGraphConfig::ValidateStreamTypes() { +mediapipe::Status ValidatedGraphConfig::ValidateStreamTypes() { for (const EdgeInfo& stream : input_streams_) { RET_CHECK_NE(stream.upstream, -1); if (!stream.packet_type->IsConsistentWith( *output_streams_[stream.upstream].packet_type)) { - return ::mediapipe::UnknownError(absl::Substitute( + return mediapipe::UnknownError(absl::Substitute( "Input stream \"$0\" of calculator \"$1\" expects packets of type " "\"$2\" but the connected output stream will contain packets of type " "\"$3\"", @@ -914,23 +911,23 @@ NodeTypeInfo::NodeRef ValidatedGraphConfig::NodeForSorterIndex( output_streams_[stream.upstream].packet_type->DebugTypeName())); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ValidatedGraphConfig::ValidateExecutors() { +mediapipe::Status ValidatedGraphConfig::ValidateExecutors() { absl::flat_hash_set declared_names; for (const ExecutorConfig& executor_config : config_.executor()) { if (IsReservedExecutorName(executor_config.name())) { - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "\"" << executor_config.name() << "\" is a reserved executor name."; } if (!declared_names.emplace(executor_config.name()).second) { if (executor_config.name().empty()) { - return ::mediapipe::InvalidArgumentError( + return mediapipe::InvalidArgumentError( "ExecutorConfig for the default executor is duplicate."); } else { - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "ExecutorConfig for \"" << executor_config.name() << "\" is duplicate."; } @@ -944,17 +941,17 @@ NodeTypeInfo::NodeRef ValidatedGraphConfig::NodeForSorterIndex( if (IsReservedExecutorName(executor_name)) { // TODO: We may want to allow this. For example, we may want to run // a non-GPU calculator on the GPU thread for efficiency reasons. - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "\"" << executor_name << "\" is a reserved executor name."; } // The executor must be declared in an ExecutorConfig. if (!declared_names.contains(executor_name)) { - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "The executor \"" << executor_name << "\" is not declared in an ExecutorConfig."; } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // static @@ -962,23 +959,23 @@ bool ValidatedGraphConfig::IsReservedExecutorName(const std::string& name) { return name == "default" || name == "gpu" || absl::StartsWith(name, "__"); } -::mediapipe::Status ValidatedGraphConfig::ValidateRequiredSidePackets( +mediapipe::Status ValidatedGraphConfig::ValidateRequiredSidePackets( const std::map& side_packets) const { - std::vector<::mediapipe::Status> statuses; + std::vector statuses; for (const auto& required_item : required_side_packets_) { auto iter = side_packets.find(required_item.first); if (iter == side_packets.end()) { - statuses.push_back(::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + statuses.push_back(mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "Side packet \"" << required_item.first << "\" is required but was not provided."); continue; } for (int index : required_item.second) { - ::mediapipe::Status status = + mediapipe::Status status = input_side_packets_[index].packet_type->Validate(iter->second); if (!status.ok()) { statuses.push_back( - ::mediapipe::StatusBuilder(std::move(status), MEDIAPIPE_LOC) + mediapipe::StatusBuilder(std::move(status), MEDIAPIPE_LOC) .SetPrepend() << "Side packet \"" << required_item.first << "\" failed validation: "); @@ -989,16 +986,16 @@ bool ValidatedGraphConfig::IsReservedExecutorName(const std::string& name) { return tool::CombinedStatus( "ValidateRequiredSidePackets failed to validate: ", statuses); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ValidatedGraphConfig::ValidateRequiredSidePacketTypes( +mediapipe::Status ValidatedGraphConfig::ValidateRequiredSidePacketTypes( const std::map& side_packet_types) const { - std::vector<::mediapipe::Status> statuses; + std::vector statuses; for (const auto& required_item : required_side_packets_) { auto iter = side_packet_types.find(required_item.first); if (iter == side_packet_types.end()) { - statuses.push_back(::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + statuses.push_back(mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "Side packet \"" << required_item.first << "\" is required but was not provided."); continue; @@ -1006,7 +1003,7 @@ bool ValidatedGraphConfig::IsReservedExecutorName(const std::string& name) { for (int index : required_item.second) { if (!input_side_packets_[index].packet_type->IsConsistentWith( iter->second)) { - return ::mediapipe::UnknownErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::UnknownErrorBuilder(MEDIAPIPE_LOC) << "Side packet \"" << required_item.first << "\" has incorrect type."; } @@ -1016,10 +1013,10 @@ bool ValidatedGraphConfig::IsReservedExecutorName(const std::string& name) { return tool::CombinedStatus( "ValidateRequiredSidePackets failed to validate: ", statuses); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status ValidatedGraphConfig::ComputeSourceDependence() { +mediapipe::Status ValidatedGraphConfig::ComputeSourceDependence() { for (int node_index = 0; node_index < calculators_.size(); ++node_index) { NodeTypeInfo& node_type_info = calculators_[node_index]; if (node_type_info.InputStreamTypes().NumEntries() == 0) { @@ -1060,10 +1057,10 @@ bool ValidatedGraphConfig::IsReservedExecutorName(const std::string& name) { } } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::StatusOr +mediapipe::StatusOr ValidatedGraphConfig::RegisteredSidePacketTypeName(const std::string& name) { auto iter = side_packet_to_producer_.find(name); bool defined = false; @@ -1093,20 +1090,20 @@ ValidatedGraphConfig::RegisteredSidePacketTypeName(const std::string& name) { } if (!defined) { - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "Side packet \"" << name << "\" is not defined in the config."; } - return ::mediapipe::UnknownErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::UnknownErrorBuilder(MEDIAPIPE_LOC) << "Unable to find the type for side packet \"" << name << "\". It may be set to AnyType or something else that isn't " "determinable, or the type may be defined but not registered."; } -::mediapipe::StatusOr -ValidatedGraphConfig::RegisteredStreamTypeName(const std::string& name) { +mediapipe::StatusOr ValidatedGraphConfig::RegisteredStreamTypeName( + const std::string& name) { auto iter = stream_to_producer_.find(name); if (iter == stream_to_producer_.end()) { - return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) << "Stream \"" << name << "\" is not defined in the config."; } int output_edge_index = iter->second; @@ -1130,7 +1127,7 @@ ValidatedGraphConfig::RegisteredStreamTypeName(const std::string& name) { } } } - return ::mediapipe::UnknownErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::UnknownErrorBuilder(MEDIAPIPE_LOC) << "Unable to find the type for stream \"" << name << "\". It may be set to AnyType or something else that isn't " "determinable, or the type may be defined but not registered."; diff --git a/mediapipe/framework/validated_graph_config.h b/mediapipe/framework/validated_graph_config.h index b0dc0ec3e..0a01accee 100644 --- a/mediapipe/framework/validated_graph_config.h +++ b/mediapipe/framework/validated_graph_config.h @@ -65,15 +65,14 @@ class NodeTypeInfo { // node_index is the index of this node among the nodes of the same type // in the validated graph config. - ::mediapipe::Status Initialize(const ValidatedGraphConfig& validated_graph, - const CalculatorGraphConfig::Node& node, - int node_index); - ::mediapipe::Status Initialize(const ValidatedGraphConfig& validated_graph, - const PacketGeneratorConfig& node, - int node_index); - ::mediapipe::Status Initialize(const ValidatedGraphConfig& validated_graph, - const StatusHandlerConfig& node, - int node_index); + mediapipe::Status Initialize(const ValidatedGraphConfig& validated_graph, + const CalculatorGraphConfig::Node& node, + int node_index); + mediapipe::Status Initialize(const ValidatedGraphConfig& validated_graph, + const PacketGeneratorConfig& node, + int node_index); + mediapipe::Status Initialize(const ValidatedGraphConfig& validated_graph, + const StatusHandlerConfig& node, int node_index); // TODO: many of these accessors can be replaced by Contract(). const PacketTypeSet& InputSidePacketTypes() const { @@ -196,14 +195,14 @@ class ValidatedGraphConfig { // Initializes the ValidatedGraphConfig. This function must be called // before any other functions. Subgraphs are specified through the // global graph registry or an optional local graph registry. - ::mediapipe::Status Initialize(const CalculatorGraphConfig& input_config, - const GraphRegistry* graph_registry = nullptr); + mediapipe::Status Initialize(const CalculatorGraphConfig& input_config, + const GraphRegistry* graph_registry = nullptr); // Initializes the ValidatedGraphConfig from registered graph and subgraph // configs. Subgraphs are retrieved from the specified graph registry or from // the global graph registry. A subgraph can be instantiated directly by // specifying its type in |graph_type|. - ::mediapipe::Status Initialize( + mediapipe::Status Initialize( const std::string& graph_type, const Subgraph::SubgraphOptions* options = nullptr, const GraphRegistry* graph_registry = nullptr); @@ -214,7 +213,7 @@ class ValidatedGraphConfig { // CalclatorGraphConfig.type. A subgraph can be instantiated directly by // specifying its type in |graph_type|. A template graph can be instantiated // directly by specifying its template arguments in |arguments|. - ::mediapipe::Status Initialize( + mediapipe::Status Initialize( const std::vector& input_configs, const std::vector& input_templates, const std::string& graph_type = "", @@ -226,15 +225,15 @@ class ValidatedGraphConfig { // Returns an error if the provided side packets will be generated by // the PacketGenerators in this graph. template - ::mediapipe::Status CanAcceptSidePackets( + mediapipe::Status CanAcceptSidePackets( const std::map& side_packets) const; // Validate that all the required side packets are provided, and the // packets have the required type. - ::mediapipe::Status ValidateRequiredSidePackets( + mediapipe::Status ValidateRequiredSidePackets( const std::map& side_packets) const; // Same as ValidateRequiredSidePackets but only provide the type. - ::mediapipe::Status ValidateRequiredSidePacketTypes( + mediapipe::Status ValidateRequiredSidePacketTypes( const std::map& side_packet_types) const; // The proto configuration (canonicalized). @@ -281,11 +280,11 @@ class ValidatedGraphConfig { // Returns the registered type name of the specified side packet if // it can be determined, otherwise an appropriate error is returned. - ::mediapipe::StatusOr RegisteredSidePacketTypeName( + mediapipe::StatusOr RegisteredSidePacketTypeName( const std::string& name); // Returns the registered type name of the specified stream if it can // be determined, otherwise an appropriate error is returned. - ::mediapipe::StatusOr RegisteredStreamTypeName( + mediapipe::StatusOr RegisteredStreamTypeName( const std::string& name); // The namespace used for class name lookup. @@ -296,11 +295,11 @@ class ValidatedGraphConfig { private: // Initialize the PacketGenerator information. - ::mediapipe::Status InitializeGeneratorInfo(); + mediapipe::Status InitializeGeneratorInfo(); // Initialize the Calculator information. - ::mediapipe::Status InitializeCalculatorInfo(); + mediapipe::Status InitializeCalculatorInfo(); // Initialize the StatusHandler information. - ::mediapipe::Status InitializeStatusHandlerInfo(); + mediapipe::Status InitializeStatusHandlerInfo(); // Initialize the EdgeInfo objects for side packets. // @@ -312,7 +311,7 @@ class ValidatedGraphConfig { // // If need_sorting_ptr is nullptr then an error will be returned if the // nodes in the side packet graph are not in topologically sorted order. - ::mediapipe::Status InitializeSidePacketInfo(bool* need_sorting_ptr); + mediapipe::Status InitializeSidePacketInfo(bool* need_sorting_ptr); // Adds EdgeInfo objects to input_side_packets_ for all the input side // packets required by the node_type_info. If nodes are processed // with AddInputSidePacketsForNode and AddOutputSidePacketsForNode @@ -320,7 +319,7 @@ class ValidatedGraphConfig { // required_side_packets_ are used to ensure that the graph is // topologically sorted. node_type_info is updated with the proper // initial index for input side packets. - ::mediapipe::Status AddInputSidePacketsForNode(NodeTypeInfo* node_type_info); + mediapipe::Status AddInputSidePacketsForNode(NodeTypeInfo* node_type_info); // Adds EdgeInfo objects to output_side_packets_ for all the output side // packets produced by the node_type_info. side_packet_to_producer_ is // updated. need_sorting_ptr will be set to true if the nodes in the @@ -328,21 +327,21 @@ class ValidatedGraphConfig { // is output after something that required it), otherwise need_sorting_ptr // is left as is. node_type_info is updated with the proper initial index // for output side packets. - ::mediapipe::Status AddOutputSidePacketsForNode(NodeTypeInfo* node_type_info, - bool* need_sorting_ptr); + mediapipe::Status AddOutputSidePacketsForNode(NodeTypeInfo* node_type_info, + bool* need_sorting_ptr); // These functions are analogous to the same operations for side // packets, with the small difference that it is an error to use an // undefined stream (whereas it is allowed to use an undefined side // packet). - ::mediapipe::Status InitializeStreamInfo(bool* need_sorting_ptr); - ::mediapipe::Status AddOutputStreamsForNode(NodeTypeInfo* node_type_info); - ::mediapipe::Status AddInputStreamsForNode(NodeTypeInfo* node_type_info, - bool* need_sorting_ptr); + mediapipe::Status InitializeStreamInfo(bool* need_sorting_ptr); + mediapipe::Status AddOutputStreamsForNode(NodeTypeInfo* node_type_info); + mediapipe::Status AddInputStreamsForNode(NodeTypeInfo* node_type_info, + bool* need_sorting_ptr); // A helper function for adding a single output stream EdgeInfo. - ::mediapipe::Status AddOutputStream(NodeTypeInfo::NodeRef node, - const std::string& name, - PacketType* packet_type); + mediapipe::Status AddOutputStream(NodeTypeInfo::NodeRef node, + const std::string& name, + PacketType* packet_type); // Return the index of the node adjusted for the topological sorter. int SorterIndexForNode(NodeTypeInfo::NodeRef node) const; @@ -361,31 +360,31 @@ class ValidatedGraphConfig { // two node types, graph input streams and status handlers, can be safely // ignored in the analysis of output side packet generation or stream // header packet propagation. - ::mediapipe::Status TopologicalSortNodes(); + mediapipe::Status TopologicalSortNodes(); // TODO Add InputStreamHandler. // TODO Add OutputStreamHandler. // Fill the "upstream" field for all back edges. - ::mediapipe::Status FillUpstreamFieldForBackEdges(); + mediapipe::Status FillUpstreamFieldForBackEdges(); // Compute the dependence of nodes on sources. - ::mediapipe::Status ComputeSourceDependence(); + mediapipe::Status ComputeSourceDependence(); // Infer the type of types set to "Any" by what they are connected to. - ::mediapipe::Status ResolveAnyTypes(std::vector* input_edges, - std::vector* output_edges); + mediapipe::Status ResolveAnyTypes(std::vector* input_edges, + std::vector* output_edges); // Returns an error if the generator graph does not have consistent // type specifications for side packets. - ::mediapipe::Status ValidateSidePacketTypes(); + mediapipe::Status ValidateSidePacketTypes(); // Returns an error if the graph of calculators does not have consistent // type specifications for streams. - ::mediapipe::Status ValidateStreamTypes(); + mediapipe::Status ValidateStreamTypes(); // Returns an error if the graph does not have valid ExecutorConfigs, or // if the executor name in a node config is reserved or is not declared // in an ExecutorConfig. - ::mediapipe::Status ValidateExecutors(); + mediapipe::Status ValidateExecutors(); bool initialized_ = false; @@ -423,16 +422,16 @@ class ValidatedGraphConfig { }; template -::mediapipe::Status ValidatedGraphConfig::CanAcceptSidePackets( +mediapipe::Status ValidatedGraphConfig::CanAcceptSidePackets( const std::map& side_packets) const { for (const auto& output_side_packet : output_side_packets_) { if (ContainsKey(side_packets, output_side_packet.name)) { - return ::mediapipe::UnknownErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::UnknownErrorBuilder(MEDIAPIPE_LOC) << "Side packet \"" << output_side_packet.name << "\" is both provided and generated by a PacketGenerator."; } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/gpu/BUILD b/mediapipe/gpu/BUILD index 1e879b41a..42ee76a81 100644 --- a/mediapipe/gpu/BUILD +++ b/mediapipe/gpu/BUILD @@ -244,6 +244,7 @@ objc_library( hdrs = ["pixel_buffer_pool_util.h"], copts = [ "-Wno-shorten-64-to-32", + "-std=c++17", ], sdk_frameworks = [ "Accelerate", @@ -266,6 +267,7 @@ objc_library( copts = [ "-x objective-c++", "-Wno-shorten-64-to-32", + "-std=c++17", ], sdk_frameworks = [ "CoreVideo", @@ -300,6 +302,7 @@ objc_library( copts = [ "-x objective-c++", "-Wno-shorten-64-to-32", + "-std=c++17", ], sdk_frameworks = [ "CoreVideo", @@ -581,6 +584,7 @@ objc_library( hdrs = HELPER_COMMON_HDRS, copts = [ "-Wno-shorten-64-to-32", + "-std=c++17", ], sdk_frameworks = HELPER_IOS_FRAMEWORKS, visibility = ["//visibility:public"], @@ -604,6 +608,7 @@ objc_library( hdrs = ["MPPMetalHelper.h"], copts = [ "-Wno-shorten-64-to-32", + "-std=c++17", ], sdk_frameworks = [ "CoreVideo", @@ -816,6 +821,7 @@ mediapipe_cc_proto_library( objc_library( name = "metal_copy_calculator", srcs = ["MetalCopyCalculator.mm"], + copts = ["-std=c++17"], sdk_frameworks = [ "CoreVideo", "Metal", @@ -833,6 +839,7 @@ objc_library( objc_library( name = "metal_rgb_weight_calculator", srcs = ["MetalRgbWeightCalculator.mm"], + copts = ["-std=c++17"], sdk_frameworks = [ "CoreVideo", "Metal", @@ -849,6 +856,7 @@ objc_library( objc_library( name = "metal_sobel_calculator", srcs = ["MetalSobelCalculator.mm"], + copts = ["-std=c++17"], sdk_frameworks = [ "CoreVideo", "Metal", @@ -865,6 +873,7 @@ objc_library( objc_library( name = "metal_sobel_compute_calculator", srcs = ["MetalSobelComputeCalculator.mm"], + copts = ["-std=c++17"], sdk_frameworks = [ "CoreVideo", "Metal", @@ -881,6 +890,7 @@ objc_library( objc_library( name = "mps_sobel_calculator", srcs = ["MPSSobelCalculator.mm"], + copts = ["-std=c++17"], sdk_frameworks = [ "CoreVideo", "Metal", @@ -915,6 +925,7 @@ objc_library( ], copts = [ "-Wno-shorten-64-to-32", + "-std=c++17", ], data = [ "//mediapipe/objc:testdata/googlelogo_color_272x92dp.png", diff --git a/mediapipe/gpu/gl_context_egl.cc b/mediapipe/gpu/gl_context_egl.cc index eb7f8aebe..016afd995 100644 --- a/mediapipe/gpu/gl_context_egl.cc +++ b/mediapipe/gpu/gl_context_egl.cc @@ -94,7 +94,12 @@ GlContext::StatusOrGlContext GlContext::Create(EGLContext share_context, EGL_RENDERABLE_TYPE, gl_version == 3 ? EGL_OPENGL_ES3_BIT_KHR : EGL_OPENGL_ES2_BIT, // Allow rendering to pixel buffers or directly to windows. - EGL_SURFACE_TYPE, EGL_PBUFFER_BIT | EGL_WINDOW_BIT, + EGL_SURFACE_TYPE, +#ifdef MEDIAPIPE_OMIT_EGL_WINDOW_BIT + EGL_PBUFFER_BIT, +#else + EGL_PBUFFER_BIT | EGL_WINDOW_BIT, +#endif EGL_RED_SIZE, 8, EGL_GREEN_SIZE, 8, EGL_BLUE_SIZE, 8, @@ -114,7 +119,7 @@ GlContext::StatusOrGlContext GlContext::Create(EGLContext share_context, << eglGetError(); } if (!num_configs) { - return ::mediapipe::UnknownErrorBuilder(MEDIAPIPE_LOC) + return mediapipe::UnknownErrorBuilder(MEDIAPIPE_LOC) << "eglChooseConfig() returned no matching EGL configuration for " << "RGBA8888 D16 ES" << gl_version << " request. "; } diff --git a/mediapipe/gpu/gpu_buffer.h b/mediapipe/gpu/gpu_buffer.h index e08164f45..2fd864766 100644 --- a/mediapipe/gpu/gpu_buffer.h +++ b/mediapipe/gpu/gpu_buffer.h @@ -24,9 +24,6 @@ #include #include "mediapipe/objc/CFHolder.h" -#if !TARGET_OS_OSX -#define MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER 1 -#endif // TARGET_OS_OSX #endif // defined(__APPLE__) #if !MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER diff --git a/mediapipe/gpu/gpu_buffer_format.cc b/mediapipe/gpu/gpu_buffer_format.cc index 449bf48d9..9d87274b6 100644 --- a/mediapipe/gpu/gpu_buffer_format.cc +++ b/mediapipe/gpu/gpu_buffer_format.cc @@ -73,12 +73,16 @@ const GlTextureInfo& GlTextureInfoForGpuBufferFormat(GpuBufferFormat format, {GpuBufferFormat::kBGRA32, { // internal_format, format, type, downscale -#ifdef __APPLE__ - // On Apple platforms, the preferred transfer format is BGRA. +#if MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER + // On Apple platforms, we have different code paths for iOS + // (using CVPixelBuffer) and on macOS (using GlTextureBuffer). + // When using CVPixelBuffer, the preferred transfer format is + // BGRA. + // TODO: Check iOS simulator. {GL_RGBA, GL_BGRA, GL_UNSIGNED_BYTE, 1}, #else {GL_RGBA, GL_RGBA, GL_UNSIGNED_BYTE, 1}, -#endif // __APPLE__ +#endif // MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER }}, {GpuBufferFormat::kOneComponent8, { diff --git a/mediapipe/gpu/gpu_buffer_format.h b/mediapipe/gpu/gpu_buffer_format.h index 16c0ff631..fd008ba02 100644 --- a/mediapipe/gpu/gpu_buffer_format.h +++ b/mediapipe/gpu/gpu_buffer_format.h @@ -17,6 +17,9 @@ #ifdef __APPLE__ #include +#if !TARGET_OS_OSX +#define MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER 1 +#endif // TARGET_OS_OSX #endif // defined(__APPLE__) #include "mediapipe/framework/formats/image_format.pb.h" diff --git a/mediapipe/gpu/gpu_buffer_multi_pool.h b/mediapipe/gpu/gpu_buffer_multi_pool.h index 201a070ba..52f1deb1b 100644 --- a/mediapipe/gpu/gpu_buffer_multi_pool.h +++ b/mediapipe/gpu/gpu_buffer_multi_pool.h @@ -147,7 +147,8 @@ class GpuBufferMultiPool { #ifdef __APPLE__ // Texture caches used with this pool. - std::vector> texture_caches_ GUARDED_BY(mutex_); + std::vector> texture_caches_ + ABSL_GUARDED_BY(mutex_); #endif // defined(__APPLE__) }; diff --git a/mediapipe/gpu/gpu_shared_data_internal.cc b/mediapipe/gpu/gpu_shared_data_internal.cc index e399e1715..499894715 100644 --- a/mediapipe/gpu/gpu_shared_data_internal.cc +++ b/mediapipe/gpu/gpu_shared_data_internal.cc @@ -104,7 +104,7 @@ GpuResources::~GpuResources() { #endif } -::mediapipe::Status GpuResources::PrepareGpuNode(CalculatorNode* node) { +mediapipe::Status GpuResources::PrepareGpuNode(CalculatorNode* node) { CHECK(node->UsesGpu()); std::string node_id = node->GetCalculatorState().NodeName(); std::string node_type = node->GetCalculatorState().CalculatorType(); diff --git a/mediapipe/gpu/gpu_shared_data_internal.h b/mediapipe/gpu/gpu_shared_data_internal.h index 11c2a8066..cbe77c709 100644 --- a/mediapipe/gpu/gpu_shared_data_internal.h +++ b/mediapipe/gpu/gpu_shared_data_internal.h @@ -69,7 +69,7 @@ class GpuResources { MPPGraphGPUData* ios_gpu_data(); #endif // defined(__APPLE__)§ - ::mediapipe::Status PrepareGpuNode(CalculatorNode* node); + mediapipe::Status PrepareGpuNode(CalculatorNode* node); // If the node requires custom GPU executors in the current configuration, // returns the executor's names and the executors themselves. diff --git a/mediapipe/graphs/face_mesh/calculators/face_landmarks_to_render_data_calculator.cc b/mediapipe/graphs/face_mesh/calculators/face_landmarks_to_render_data_calculator.cc index 7bbea8574..86ad06ec5 100644 --- a/mediapipe/graphs/face_mesh/calculators/face_landmarks_to_render_data_calculator.cc +++ b/mediapipe/graphs/face_mesh/calculators/face_landmarks_to_render_data_calculator.cc @@ -81,11 +81,11 @@ constexpr int kFaceLandmarkConnections[] = { class FaceLandmarksToRenderDataCalculator : public LandmarksToRenderDataCalculator { public: - ::mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; }; REGISTER_CALCULATOR(FaceLandmarksToRenderDataCalculator); -::mediapipe::Status FaceLandmarksToRenderDataCalculator::Open( +mediapipe::Status FaceLandmarksToRenderDataCalculator::Open( CalculatorContext* cc) { cc->SetOffset(TimestampDiff(0)); options_ = cc->Options(); @@ -95,7 +95,7 @@ REGISTER_CALCULATOR(FaceLandmarksToRenderDataCalculator); landmark_connections_.push_back(kFaceLandmarkConnections[i * 2 + 1]); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/graphs/holistic_tracking/BUILD b/mediapipe/graphs/holistic_tracking/BUILD new file mode 100644 index 000000000..31dc72179 --- /dev/null +++ b/mediapipe/graphs/holistic_tracking/BUILD @@ -0,0 +1,73 @@ +# Copyright 2020 The MediaPipe Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +load( + "//mediapipe/framework/tool:mediapipe_graph.bzl", + "mediapipe_binary_graph", + "mediapipe_simple_subgraph", +) + +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +mediapipe_simple_subgraph( + name = "holistic_tracking_to_render_data", + graph = "holistic_tracking_to_render_data.pbtxt", + register_as = "HolisticTrackingToRenderData", + deps = [ + "//mediapipe/calculators/core:concatenate_normalized_landmark_list_calculator", + "//mediapipe/calculators/core:concatenate_vector_calculator", + "//mediapipe/calculators/core:merge_calculator", + "//mediapipe/calculators/core:split_normalized_landmark_list_calculator", + "//mediapipe/calculators/core:split_vector_calculator", + "//mediapipe/calculators/util:detections_to_render_data_calculator", + "//mediapipe/calculators/util:landmarks_to_render_data_calculator", + "//mediapipe/calculators/util:rect_to_render_data_calculator", + "//mediapipe/calculators/util:rect_to_render_scale_calculator", + "//mediapipe/framework/tool:switch_container", + "//mediapipe/modules/holistic_landmark:hand_wrist_for_pose", + ], +) + +cc_library( + name = "holistic_tracking_gpu_deps", + deps = [ + ":holistic_tracking_to_render_data", + "//mediapipe/calculators/core:constant_side_packet_calculator", + "//mediapipe/calculators/core:flow_limiter_calculator", + "//mediapipe/calculators/image:image_properties_calculator", + "//mediapipe/calculators/util:annotation_overlay_calculator", + "//mediapipe/modules/holistic_landmark:holistic_landmark_gpu", + ], +) + +mediapipe_binary_graph( + name = "holistic_tracking_gpu", + graph = "holistic_tracking_gpu.pbtxt", + output_name = "holistic_tracking_gpu.binarypb", + deps = [":holistic_tracking_gpu_deps"], +) + +cc_library( + name = "holistic_tracking_cpu_graph_deps", + deps = [ + ":holistic_tracking_to_render_data", + "//mediapipe/calculators/core:constant_side_packet_calculator", + "//mediapipe/calculators/core:flow_limiter_calculator", + "//mediapipe/calculators/image:image_properties_calculator", + "//mediapipe/calculators/util:annotation_overlay_calculator", + "//mediapipe/modules/holistic_landmark:holistic_landmark_cpu", + ], +) diff --git a/mediapipe/graphs/holistic_tracking/holistic_tracking_cpu.pbtxt b/mediapipe/graphs/holistic_tracking/holistic_tracking_cpu.pbtxt new file mode 100644 index 000000000..65957ed61 --- /dev/null +++ b/mediapipe/graphs/holistic_tracking/holistic_tracking_cpu.pbtxt @@ -0,0 +1,90 @@ +# Tracks and renders pose + hands + face landmarks. + +# CPU image. (ImageFrame) +input_stream: "input_video" + +# CPU image with rendered results. (ImageFrame) +output_stream: "output_video" + +# Throttles the images flowing downstream for flow control. It passes through +# the very first incoming image unaltered, and waits for downstream nodes +# (calculators and subgraphs) in the graph to finish their tasks before it +# passes through another image. All images that come in while waiting are +# dropped, limiting the number of in-flight images in most part of the graph to +# 1. This prevents the downstream nodes from queuing up incoming images and data +# excessively, which leads to increased latency and memory usage, unwanted in +# real-time mobile applications. It also eliminates unnecessarily computation, +# e.g., the output produced by a node may get dropped downstream if the +# subsequent nodes are still busy processing previous inputs. +node { + calculator: "FlowLimiterCalculator" + input_stream: "input_video" + input_stream: "FINISHED:output_video" + input_stream_info: { + tag_index: "FINISHED" + back_edge: true + } + output_stream: "throttled_input_video" + node_options: { + [type.googleapis.com/mediapipe.FlowLimiterCalculatorOptions] { + max_in_flight: 1 + max_in_queue: 1 + # Timeout is disabled (set to 0) as first frame processing can take more + # than 1 second. + in_flight_timeout: 0 + } + } +} + +node { + calculator: "ConstantSidePacketCalculator" + output_side_packet: "PACKET:0:upper_body_only" + output_side_packet: "PACKET:1:smooth_landmarks" + node_options: { + [type.googleapis.com/mediapipe.ConstantSidePacketCalculatorOptions]: { + packet { bool_value: false } + packet { bool_value: true } + } + } +} + +node { + calculator: "HolisticLandmarkCpu" + input_stream: "IMAGE:throttled_input_video" + input_side_packet: "UPPER_BODY_ONLY:upper_body_only" + input_side_packet: "SMOOTH_LANDMARKS:smooth_landmarks" + output_stream: "POSE_LANDMARKS:pose_landmarks" + output_stream: "POSE_ROI:pose_roi" + output_stream: "POSE_DETECTION:pose_detection" + output_stream: "FACE_LANDMARKS:face_landmarks" + output_stream: "LEFT_HAND_LANDMARKS:left_hand_landmarks" + output_stream: "RIGHT_HAND_LANDMARKS:right_hand_landmarks" +} + +# Gets image size. +node { + calculator: "ImagePropertiesCalculator" + input_stream: "IMAGE:throttled_input_video" + output_stream: "SIZE:image_size" +} + +# Converts pose, hands and face landmarks to a render data vector. +node { + calculator: "HolisticTrackingToRenderData" + input_stream: "IMAGE_SIZE:image_size" + input_stream: "POSE_LANDMARKS:pose_landmarks" + input_stream: "POSE_ROI:pose_roi" + input_stream: "LEFT_HAND_LANDMARKS:left_hand_landmarks" + input_stream: "RIGHT_HAND_LANDMARKS:right_hand_landmarks" + input_stream: "FACE_LANDMARKS:face_landmarks" + input_side_packet: "UPPER_BODY_ONLY:upper_body_only" + output_stream: "RENDER_DATA_VECTOR:render_data_vector" +} + +# Draws annotations and overlays them on top of the input images. +node { + calculator: "AnnotationOverlayCalculator" + input_stream: "IMAGE:throttled_input_video" + input_stream: "VECTOR:render_data_vector" + output_stream: "IMAGE:output_video" +} diff --git a/mediapipe/graphs/holistic_tracking/holistic_tracking_gpu.pbtxt b/mediapipe/graphs/holistic_tracking/holistic_tracking_gpu.pbtxt new file mode 100644 index 000000000..13bf28e51 --- /dev/null +++ b/mediapipe/graphs/holistic_tracking/holistic_tracking_gpu.pbtxt @@ -0,0 +1,90 @@ +# Tracks and renders pose + hands + face landmarks. + +# GPU buffer. (GpuBuffer) +input_stream: "input_video" + +# GPU image with rendered results. (GpuBuffer) +output_stream: "output_video" + +# Throttles the images flowing downstream for flow control. It passes through +# the very first incoming image unaltered, and waits for downstream nodes +# (calculators and subgraphs) in the graph to finish their tasks before it +# passes through another image. All images that come in while waiting are +# dropped, limiting the number of in-flight images in most part of the graph to +# 1. This prevents the downstream nodes from queuing up incoming images and data +# excessively, which leads to increased latency and memory usage, unwanted in +# real-time mobile applications. It also eliminates unnecessarily computation, +# e.g., the output produced by a node may get dropped downstream if the +# subsequent nodes are still busy processing previous inputs. +node { + calculator: "FlowLimiterCalculator" + input_stream: "input_video" + input_stream: "FINISHED:output_video" + input_stream_info: { + tag_index: "FINISHED" + back_edge: true + } + output_stream: "throttled_input_video" + node_options: { + [type.googleapis.com/mediapipe.FlowLimiterCalculatorOptions] { + max_in_flight: 1 + max_in_queue: 1 + # Timeout is disabled (set to 0) as first frame processing can take more + # than 1 second. + in_flight_timeout: 0 + } + } +} + +node { + calculator: "ConstantSidePacketCalculator" + output_side_packet: "PACKET:0:upper_body_only" + output_side_packet: "PACKET:1:smooth_landmarks" + node_options: { + [type.googleapis.com/mediapipe.ConstantSidePacketCalculatorOptions]: { + packet { bool_value: false } + packet { bool_value: true } + } + } +} + +node { + calculator: "HolisticLandmarkGpu" + input_stream: "IMAGE:throttled_input_video" + input_side_packet: "UPPER_BODY_ONLY:upper_body_only" + input_side_packet: "SMOOTH_LANDMARKS:smooth_landmarks" + output_stream: "POSE_LANDMARKS:pose_landmarks" + output_stream: "POSE_ROI:pose_roi" + output_stream: "POSE_DETECTION:pose_detection" + output_stream: "FACE_LANDMARKS:face_landmarks" + output_stream: "LEFT_HAND_LANDMARKS:left_hand_landmarks" + output_stream: "RIGHT_HAND_LANDMARKS:right_hand_landmarks" +} + +# Gets image size. +node { + calculator: "ImagePropertiesCalculator" + input_stream: "IMAGE_GPU:throttled_input_video" + output_stream: "SIZE:image_size" +} + +# Converts pose, hands and face landmarks to a render data vector. +node { + calculator: "HolisticTrackingToRenderData" + input_stream: "IMAGE_SIZE:image_size" + input_stream: "POSE_LANDMARKS:pose_landmarks" + input_stream: "POSE_ROI:pose_roi" + input_stream: "LEFT_HAND_LANDMARKS:left_hand_landmarks" + input_stream: "RIGHT_HAND_LANDMARKS:right_hand_landmarks" + input_stream: "FACE_LANDMARKS:face_landmarks" + input_side_packet: "UPPER_BODY_ONLY:upper_body_only" + output_stream: "RENDER_DATA_VECTOR:render_data_vector" +} + +# Draws annotations and overlays them on top of the input images. +node { + calculator: "AnnotationOverlayCalculator" + input_stream: "IMAGE_GPU:throttled_input_video" + input_stream: "VECTOR:render_data_vector" + output_stream: "IMAGE_GPU:output_video" +} diff --git a/mediapipe/graphs/holistic_tracking/holistic_tracking_to_render_data.pbtxt b/mediapipe/graphs/holistic_tracking/holistic_tracking_to_render_data.pbtxt new file mode 100644 index 000000000..7a326b46f --- /dev/null +++ b/mediapipe/graphs/holistic_tracking/holistic_tracking_to_render_data.pbtxt @@ -0,0 +1,854 @@ +# Converts pose + hands + face landmarks to a render data vector. + +type: "HolisticTrackingToRenderData" + +# Image size. (std::pair) +input_stream: "IMAGE_SIZE:image_size" +# Pose landmarks. (NormalizedLandmarkList) +input_stream: "POSE_LANDMARKS:landmarks" +# Region of interest calculated based on pose landmarks. (NormalizedRect) +input_stream: "POSE_ROI:roi" +# Left hand landmarks. (NormalizedLandmarkList) +input_stream: "LEFT_HAND_LANDMARKS:left_hand_landmarks" +# Right hand landmarks. (NormalizedLandmarkList) +input_stream: "RIGHT_HAND_LANDMARKS:right_hand_landmarks" +# Face landmarks. (NormalizedLandmarkList) +input_stream: "FACE_LANDMARKS:face_landmarks" + +# Whether to render the full set of pose landmarks, or only those on the +# upper body. If unspecified, functions as set to false. (bool) +input_side_packet: "UPPER_BODY_ONLY:upper_body_only" + +# Render data vector. (std::vector) +output_stream: "RENDER_DATA_VECTOR:render_data_vector" + +# --------------------------------------------------------------------------- # +# ------------------ Calculates scale for render objects -------------------- # +# --------------------------------------------------------------------------- # + +# Calculates rendering scale based on the pose bounding box. +node { + calculator: "RectToRenderScaleCalculator" + input_stream: "NORM_RECT:roi" + input_stream: "IMAGE_SIZE:image_size" + output_stream: "RENDER_SCALE:render_scale" + node_options: { + [type.googleapis.com/mediapipe.RectToRenderScaleCalculatorOptions] { + multiplier: 0.0008 + } + } +} + +# --------------------------------------------------------------------------- # +# --------------- Combines pose and hands into pose skeleton ---------------- # +# --------------------------------------------------------------------------- # + +# Gets pose landmarks before wrists. +node { + calculator: "SplitNormalizedLandmarkListCalculator" + input_stream: "landmarks" + output_stream: "landmarks_before_wrist" + node_options: { + [type.googleapis.com/mediapipe.SplitVectorCalculatorOptions] { + ranges: { begin: 11 end: 15 } + } + } +} + +# Gets pose left wrist landmark. +node { + calculator: "SplitNormalizedLandmarkListCalculator" + input_stream: "landmarks" + output_stream: "landmarks_left_wrist" + node_options: { + [type.googleapis.com/mediapipe.SplitVectorCalculatorOptions] { + ranges: { begin: 15 end: 16 } + } + } +} + +# Gets pose right wrist landmark. +node { + calculator: "SplitNormalizedLandmarkListCalculator" + input_stream: "landmarks" + output_stream: "landmarks_right_wrist" + node_options: { + [type.googleapis.com/mediapipe.SplitVectorCalculatorOptions] { + ranges: { begin: 16 end: 17 } + } + } +} + +# Gets pose landmarks after wrists. +node { + calculator: "SwitchContainer" + input_side_packet: "ENABLE:upper_body_only" + input_stream: "landmarks" + output_stream: "landmarks_after_wrist" + node_options: { + [type.googleapis.com/mediapipe.SwitchContainerOptions] { + contained_node: { + calculator: "SplitNormalizedLandmarkListCalculator" + node_options: { + [type.googleapis.com/mediapipe.SplitVectorCalculatorOptions] { + ranges: { begin: 23 end: 33 } + } + } + } + contained_node: { + calculator: "SplitNormalizedLandmarkListCalculator" + node_options: { + [type.googleapis.com/mediapipe.SplitVectorCalculatorOptions] { + ranges: { begin: 23 end: 25 } + } + } + } + } + } +} + +# Gets left hand wrist landmark. +node { + calculator: "HandWristForPose" + input_stream: "HAND_LANDMARKS:left_hand_landmarks" + output_stream: "WRIST_LANDMARK:left_hand_wrist_landmark" +} + +# Gets left hand wrist landmark or keep pose wrist landmark if hand was not +# predicted. +node { + calculator: "MergeCalculator" + input_stream: "left_hand_wrist_landmark" + input_stream: "landmarks_left_wrist" + output_stream: "merged_left_hand_wrist_landmark" +} + +# Gets right hand wrist landmark. +node { + calculator: "HandWristForPose" + input_stream: "HAND_LANDMARKS:right_hand_landmarks" + output_stream: "WRIST_LANDMARK:right_hand_wrist_landmark" +} + +# Gets right hand wrist landmark or keep pose wrist landmark if hand was not +# predicted. +node { + calculator: "MergeCalculator" + input_stream: "right_hand_wrist_landmark" + input_stream: "landmarks_right_wrist" + output_stream: "merged_right_hand_wrist_landmark" +} + +# Combines pose landmarks all together. +node { + calculator: "ConcatenateNormalizedLandmarkListCalculator" + input_stream: "landmarks_before_wrist" + input_stream: "merged_left_hand_wrist_landmark" + input_stream: "merged_right_hand_wrist_landmark" + input_stream: "landmarks_after_wrist" + output_stream: "landmarks_merged" + node_options: { + [type.googleapis.com/mediapipe.ConcatenateVectorCalculatorOptions] { + only_emit_if_all_present: true + } + } +} + +# Takes left pose landmarks. +node { + calculator: "SwitchContainer" + input_side_packet: "ENABLE:upper_body_only" + input_stream: "landmarks_merged" + output_stream: "landmarks_left_side" + node_options: { + [type.googleapis.com/mediapipe.SwitchContainerOptions] { + contained_node: { + calculator: "SplitNormalizedLandmarkListCalculator" + node_options: { + [type.googleapis.com/mediapipe.SplitVectorCalculatorOptions] { + ranges: { begin: 0 end: 1 } + ranges: { begin: 2 end: 3 } + ranges: { begin: 4 end: 5 } + ranges: { begin: 6 end: 7 } + ranges: { begin: 8 end: 9 } + ranges: { begin: 10 end: 11 } + ranges: { begin: 12 end: 13 } + ranges: { begin: 14 end: 15 } + combine_outputs: true + } + } + } + contained_node: { + calculator: "SplitNormalizedLandmarkListCalculator" + node_options: { + [type.googleapis.com/mediapipe.SplitVectorCalculatorOptions] { + ranges: { begin: 0 end: 1 } + ranges: { begin: 2 end: 3 } + ranges: { begin: 4 end: 5 } + ranges: { begin: 6 end: 7 } + combine_outputs: true + } + } + } + } + } +} + +# Takes right pose landmarks. +node { + calculator: "SwitchContainer" + input_side_packet: "ENABLE:upper_body_only" + input_stream: "landmarks_merged" + output_stream: "landmarks_right_side" + node_options: { + [type.googleapis.com/mediapipe.SwitchContainerOptions] { + contained_node: { + calculator: "SplitNormalizedLandmarkListCalculator" + node_options: { + [type.googleapis.com/mediapipe.SplitVectorCalculatorOptions] { + ranges: { begin: 1 end: 2 } + ranges: { begin: 3 end: 4 } + ranges: { begin: 5 end: 6 } + ranges: { begin: 7 end: 8 } + ranges: { begin: 9 end: 10 } + ranges: { begin: 11 end: 12 } + ranges: { begin: 13 end: 14 } + ranges: { begin: 15 end: 16 } + combine_outputs: true + } + } + } + contained_node: { + calculator: "SplitNormalizedLandmarkListCalculator" + node_options: { + [type.googleapis.com/mediapipe.SplitVectorCalculatorOptions] { + ranges: { begin: 1 end: 2 } + ranges: { begin: 3 end: 4 } + ranges: { begin: 5 end: 6 } + ranges: { begin: 7 end: 8 } + combine_outputs: true + } + } + } + } + } +} + +# --------------------------------------------------------------------------- # +# ---------------------------------- Pose ----------------------------------- # +# --------------------------------------------------------------------------- # + +# Converts pose connections to white lines. +node { + calculator: "SwitchContainer" + input_side_packet: "ENABLE:upper_body_only" + input_stream: "NORM_LANDMARKS:landmarks_merged" + input_stream: "RENDER_SCALE:render_scale" + output_stream: "RENDER_DATA:landmarks_render_data" + node_options: { + [type.googleapis.com/mediapipe.SwitchContainerOptions] { + contained_node: { + calculator: "LandmarksToRenderDataCalculator" + node_options: { + [type.googleapis.com/mediapipe.LandmarksToRenderDataCalculatorOptions] { + landmark_connections: 0 + landmark_connections: 1 + landmark_connections: 0 + landmark_connections: 2 + landmark_connections: 2 + landmark_connections: 4 + landmark_connections: 1 + landmark_connections: 3 + landmark_connections: 3 + landmark_connections: 5 + landmark_connections: 0 + landmark_connections: 6 + landmark_connections: 1 + landmark_connections: 7 + landmark_connections: 6 + landmark_connections: 7 + landmark_connections: 6 + landmark_connections: 8 + landmark_connections: 7 + landmark_connections: 9 + landmark_connections: 8 + landmark_connections: 10 + landmark_connections: 9 + landmark_connections: 11 + landmark_connections: 10 + landmark_connections: 12 + landmark_connections: 11 + landmark_connections: 13 + landmark_connections: 12 + landmark_connections: 14 + landmark_connections: 13 + landmark_connections: 15 + landmark_connections: 10 + landmark_connections: 14 + landmark_connections: 11 + landmark_connections: 15 + + landmark_color { r: 255 g: 255 b: 255 } + connection_color { r: 255 g: 255 b: 255 } + thickness: 3.0 + visualize_landmark_depth: false + utilize_visibility: true + visibility_threshold: 0.1 + } + } + } + contained_node: { + calculator: "LandmarksToRenderDataCalculator" + node_options: { + [type.googleapis.com/mediapipe.LandmarksToRenderDataCalculatorOptions] { + landmark_connections: 0 + landmark_connections: 1 + landmark_connections: 0 + landmark_connections: 2 + landmark_connections: 2 + landmark_connections: 4 + landmark_connections: 1 + landmark_connections: 3 + landmark_connections: 3 + landmark_connections: 5 + landmark_connections: 0 + landmark_connections: 6 + landmark_connections: 1 + landmark_connections: 7 + landmark_connections: 6 + landmark_connections: 7 + landmark_color { r: 255 g: 255 b: 255 } + connection_color { r: 255 g: 255 b: 255 } + thickness: 3.0 + visualize_landmark_depth: false + utilize_visibility: true + visibility_threshold: 0.5 + } + } + } + } + } +} + +# Converts pose joints to big white circles. +node { + calculator: "LandmarksToRenderDataCalculator" + input_stream: "NORM_LANDMARKS:landmarks_merged" + input_stream: "RENDER_SCALE:render_scale" + output_stream: "RENDER_DATA:landmarks_background_joints_render_data" + node_options: { + [type.googleapis.com/mediapipe.LandmarksToRenderDataCalculatorOptions] { + landmark_color { r: 255 g: 255 b: 255 } + connection_color { r: 255 g: 255 b: 255 } + thickness: 5.0 + visualize_landmark_depth: false + utilize_visibility: true + visibility_threshold: 0.5 + } + } +} + +# Converts pose left side joints to orange circles (inside white ones). +node { + calculator: "LandmarksToRenderDataCalculator" + input_stream: "NORM_LANDMARKS:landmarks_left_side" + input_stream: "RENDER_SCALE:render_scale" + output_stream: "RENDER_DATA:landmarks_left_joints_render_data" + node_options: { + [type.googleapis.com/mediapipe.LandmarksToRenderDataCalculatorOptions] { + landmark_color { r: 255 g: 138 b: 0 } + connection_color { r: 255 g: 138 b: 0 } + thickness: 3.0 + visualize_landmark_depth: false + utilize_visibility: true + visibility_threshold: 0.5 + } + } +} + +# Converts pose right side joints to cyan circles (inside white ones). +node { + calculator: "LandmarksToRenderDataCalculator" + input_stream: "NORM_LANDMARKS:landmarks_right_side" + input_stream: "RENDER_SCALE:render_scale" + output_stream: "RENDER_DATA:landmarks_right_joints_render_data" + node_options: { + [type.googleapis.com/mediapipe.LandmarksToRenderDataCalculatorOptions] { + landmark_color { r: 0 g: 217 b: 231 } + connection_color { r: 0 g: 217 b: 231 } + thickness: 3.0 + visualize_landmark_depth: false + utilize_visibility: true + visibility_threshold: 0.5 + } + } +} + +# --------------------------------------------------------------------------- # +# ------------------------------- Left hand --------------------------------- # +# --------------------------------------------------------------------------- # + +# Converts left hand connections to white lines. +node { + calculator: "LandmarksToRenderDataCalculator" + input_stream: "NORM_LANDMARKS:left_hand_landmarks" + input_stream: "RENDER_SCALE:render_scale" + output_stream: "RENDER_DATA:left_hand_landmarks_connections_rd" + node_options: { + [type.googleapis.com/mediapipe.LandmarksToRenderDataCalculatorOptions] { + landmark_connections: 0 + landmark_connections: 1 + landmark_connections: 1 + landmark_connections: 2 + landmark_connections: 2 + landmark_connections: 3 + landmark_connections: 3 + landmark_connections: 4 + landmark_connections: 0 + landmark_connections: 5 + landmark_connections: 5 + landmark_connections: 6 + landmark_connections: 6 + landmark_connections: 7 + landmark_connections: 7 + landmark_connections: 8 + landmark_connections: 5 + landmark_connections: 9 + landmark_connections: 9 + landmark_connections: 10 + landmark_connections: 10 + landmark_connections: 11 + landmark_connections: 11 + landmark_connections: 12 + landmark_connections: 9 + landmark_connections: 13 + landmark_connections: 13 + landmark_connections: 14 + landmark_connections: 14 + landmark_connections: 15 + landmark_connections: 15 + landmark_connections: 16 + landmark_connections: 13 + landmark_connections: 17 + landmark_connections: 0 + landmark_connections: 17 + landmark_connections: 17 + landmark_connections: 18 + landmark_connections: 18 + landmark_connections: 19 + landmark_connections: 19 + landmark_connections: 20 + landmark_color { r: 255 g: 255 b: 255 } + connection_color { r: 255 g: 255 b: 255 } + thickness: 4.0 + visualize_landmark_depth: false + } + } +} + +# Converts left hand color joints. +node { + calculator: "LandmarksToRenderDataCalculator" + input_stream: "NORM_LANDMARKS:left_hand_landmarks" + input_stream: "RENDER_SCALE:render_scale" + output_stream: "RENDER_DATA:left_hand_landmarks_joints_rd" + node_options: { + [type.googleapis.com/mediapipe.LandmarksToRenderDataCalculatorOptions] { + landmark_color { r: 255 g: 138 b: 0 } + connection_color { r: 255 g: 138 b: 0 } + thickness: 3.0 + visualize_landmark_depth: false + } + } +} + +# --------------------------------------------------------------------------- # +# -------------------------------- Right hand ------------------------------- # +# --------------------------------------------------------------------------- # + +# Converts right hand connections to white lines. +node { + calculator: "LandmarksToRenderDataCalculator" + input_stream: "NORM_LANDMARKS:right_hand_landmarks" + input_stream: "RENDER_SCALE:render_scale" + output_stream: "RENDER_DATA:right_hand_landmarks_connections_rd" + node_options: { + [type.googleapis.com/mediapipe.LandmarksToRenderDataCalculatorOptions] { + landmark_connections: 0 + landmark_connections: 1 + landmark_connections: 1 + landmark_connections: 2 + landmark_connections: 2 + landmark_connections: 3 + landmark_connections: 3 + landmark_connections: 4 + landmark_connections: 0 + landmark_connections: 5 + landmark_connections: 5 + landmark_connections: 6 + landmark_connections: 6 + landmark_connections: 7 + landmark_connections: 7 + landmark_connections: 8 + landmark_connections: 5 + landmark_connections: 9 + landmark_connections: 9 + landmark_connections: 10 + landmark_connections: 10 + landmark_connections: 11 + landmark_connections: 11 + landmark_connections: 12 + landmark_connections: 9 + landmark_connections: 13 + landmark_connections: 13 + landmark_connections: 14 + landmark_connections: 14 + landmark_connections: 15 + landmark_connections: 15 + landmark_connections: 16 + landmark_connections: 13 + landmark_connections: 17 + landmark_connections: 0 + landmark_connections: 17 + landmark_connections: 17 + landmark_connections: 18 + landmark_connections: 18 + landmark_connections: 19 + landmark_connections: 19 + landmark_connections: 20 + landmark_color { r: 255 g: 255 b: 255 } + connection_color { r: 255 g: 255 b: 255 } + thickness: 4.0 + visualize_landmark_depth: false + } + } +} + +# Converts right hand color joints. +node { + calculator: "LandmarksToRenderDataCalculator" + input_stream: "NORM_LANDMARKS:right_hand_landmarks" + input_stream: "RENDER_SCALE:render_scale" + output_stream: "RENDER_DATA:right_hand_landmarks_joints_rd" + node_options: { + [type.googleapis.com/mediapipe.LandmarksToRenderDataCalculatorOptions] { + landmark_color { r: 0 g: 217 b: 231 } + connection_color { r: 0 g: 217 b: 231 } + thickness: 3.0 + visualize_landmark_depth: false + } + } +} + +# --------------------------------------------------------------------------- # +# ---------------------------------- Face ----------------------------------- # +# --------------------------------------------------------------------------- # + +# Converts face connections to white lines. +node { + calculator: "LandmarksToRenderDataCalculator" + input_stream: "NORM_LANDMARKS:face_landmarks" + input_stream: "RENDER_SCALE:render_scale" + output_stream: "RENDER_DATA:face_landmarks_connections_rd" + node_options: { + [type.googleapis.com/mediapipe.LandmarksToRenderDataCalculatorOptions] { + # Lips. + landmark_connections: 61 + landmark_connections: 146 + landmark_connections: 146 + landmark_connections: 91 + landmark_connections: 91 + landmark_connections: 181 + landmark_connections: 181 + landmark_connections: 84 + landmark_connections: 84 + landmark_connections: 17 + landmark_connections: 17 + landmark_connections: 314 + landmark_connections: 314 + landmark_connections: 405 + landmark_connections: 405 + landmark_connections: 321 + landmark_connections: 321 + landmark_connections: 375 + landmark_connections: 375 + landmark_connections: 291 + landmark_connections: 61 + landmark_connections: 185 + landmark_connections: 185 + landmark_connections: 40 + landmark_connections: 40 + landmark_connections: 39 + landmark_connections: 39 + landmark_connections: 37 + landmark_connections: 37 + landmark_connections: 0 + landmark_connections: 0 + landmark_connections: 267 + landmark_connections: 267 + landmark_connections: 269 + landmark_connections: 269 + landmark_connections: 270 + landmark_connections: 270 + landmark_connections: 409 + landmark_connections: 409 + landmark_connections: 291 + landmark_connections: 78 + landmark_connections: 95 + landmark_connections: 95 + landmark_connections: 88 + landmark_connections: 88 + landmark_connections: 178 + landmark_connections: 178 + landmark_connections: 87 + landmark_connections: 87 + landmark_connections: 14 + landmark_connections: 14 + landmark_connections: 317 + landmark_connections: 317 + landmark_connections: 402 + landmark_connections: 402 + landmark_connections: 318 + landmark_connections: 318 + landmark_connections: 324 + landmark_connections: 324 + landmark_connections: 308 + landmark_connections: 78 + landmark_connections: 191 + landmark_connections: 191 + landmark_connections: 80 + landmark_connections: 80 + landmark_connections: 81 + landmark_connections: 81 + landmark_connections: 82 + landmark_connections: 82 + landmark_connections: 13 + landmark_connections: 13 + landmark_connections: 312 + landmark_connections: 312 + landmark_connections: 311 + landmark_connections: 311 + landmark_connections: 310 + landmark_connections: 310 + landmark_connections: 415 + landmark_connections: 415 + landmark_connections: 308 + # Left eye. + landmark_connections: 33 + landmark_connections: 7 + landmark_connections: 7 + landmark_connections: 163 + landmark_connections: 163 + landmark_connections: 144 + landmark_connections: 144 + landmark_connections: 145 + landmark_connections: 145 + landmark_connections: 153 + landmark_connections: 153 + landmark_connections: 154 + landmark_connections: 154 + landmark_connections: 155 + landmark_connections: 155 + landmark_connections: 133 + landmark_connections: 33 + landmark_connections: 246 + landmark_connections: 246 + landmark_connections: 161 + landmark_connections: 161 + landmark_connections: 160 + landmark_connections: 160 + landmark_connections: 159 + landmark_connections: 159 + landmark_connections: 158 + landmark_connections: 158 + landmark_connections: 157 + landmark_connections: 157 + landmark_connections: 173 + landmark_connections: 173 + landmark_connections: 133 + # Left eyebrow. + landmark_connections: 46 + landmark_connections: 53 + landmark_connections: 53 + landmark_connections: 52 + landmark_connections: 52 + landmark_connections: 65 + landmark_connections: 65 + landmark_connections: 55 + landmark_connections: 70 + landmark_connections: 63 + landmark_connections: 63 + landmark_connections: 105 + landmark_connections: 105 + landmark_connections: 66 + landmark_connections: 66 + landmark_connections: 107 + # Right eye. + landmark_connections: 263 + landmark_connections: 249 + landmark_connections: 249 + landmark_connections: 390 + landmark_connections: 390 + landmark_connections: 373 + landmark_connections: 373 + landmark_connections: 374 + landmark_connections: 374 + landmark_connections: 380 + landmark_connections: 380 + landmark_connections: 381 + landmark_connections: 381 + landmark_connections: 382 + landmark_connections: 382 + landmark_connections: 362 + landmark_connections: 263 + landmark_connections: 466 + landmark_connections: 466 + landmark_connections: 388 + landmark_connections: 388 + landmark_connections: 387 + landmark_connections: 387 + landmark_connections: 386 + landmark_connections: 386 + landmark_connections: 385 + landmark_connections: 385 + landmark_connections: 384 + landmark_connections: 384 + landmark_connections: 398 + landmark_connections: 398 + landmark_connections: 362 + # Right eyebrow. + landmark_connections: 276 + landmark_connections: 283 + landmark_connections: 283 + landmark_connections: 282 + landmark_connections: 282 + landmark_connections: 295 + landmark_connections: 295 + landmark_connections: 285 + landmark_connections: 300 + landmark_connections: 293 + landmark_connections: 293 + landmark_connections: 334 + landmark_connections: 334 + landmark_connections: 296 + landmark_connections: 296 + landmark_connections: 336 + # Face oval. + landmark_connections: 10 + landmark_connections: 338 + landmark_connections: 338 + landmark_connections: 297 + landmark_connections: 297 + landmark_connections: 332 + landmark_connections: 332 + landmark_connections: 284 + landmark_connections: 284 + landmark_connections: 251 + landmark_connections: 251 + landmark_connections: 389 + landmark_connections: 389 + landmark_connections: 356 + landmark_connections: 356 + landmark_connections: 454 + landmark_connections: 454 + landmark_connections: 323 + landmark_connections: 323 + landmark_connections: 361 + landmark_connections: 361 + landmark_connections: 288 + landmark_connections: 288 + landmark_connections: 397 + landmark_connections: 397 + landmark_connections: 365 + landmark_connections: 365 + landmark_connections: 379 + landmark_connections: 379 + landmark_connections: 378 + landmark_connections: 378 + landmark_connections: 400 + landmark_connections: 400 + landmark_connections: 377 + landmark_connections: 377 + landmark_connections: 152 + landmark_connections: 152 + landmark_connections: 148 + landmark_connections: 148 + landmark_connections: 176 + landmark_connections: 176 + landmark_connections: 149 + landmark_connections: 149 + landmark_connections: 150 + landmark_connections: 150 + landmark_connections: 136 + landmark_connections: 136 + landmark_connections: 172 + landmark_connections: 172 + landmark_connections: 58 + landmark_connections: 58 + landmark_connections: 132 + landmark_connections: 132 + landmark_connections: 93 + landmark_connections: 93 + landmark_connections: 234 + landmark_connections: 234 + landmark_connections: 127 + landmark_connections: 127 + landmark_connections: 162 + landmark_connections: 162 + landmark_connections: 21 + landmark_connections: 21 + landmark_connections: 54 + landmark_connections: 54 + landmark_connections: 103 + landmark_connections: 103 + landmark_connections: 67 + landmark_connections: 67 + landmark_connections: 109 + landmark_connections: 109 + landmark_connections: 10 + landmark_color { r: 255 g: 255 b: 255 } + connection_color { r: 255 g: 255 b: 255 } + thickness: 0.5 + visualize_landmark_depth: false + } + } +} + +# Converts face joints to cyan circles. +node { + calculator: "LandmarksToRenderDataCalculator" + input_stream: "NORM_LANDMARKS:face_landmarks" + input_stream: "RENDER_SCALE:render_scale" + output_stream: "RENDER_DATA:face_landmarks_joints_rd" + node_options: { + [type.googleapis.com/mediapipe.LandmarksToRenderDataCalculatorOptions] { + landmark_color { r: 0 g: 217 b: 231 } + connection_color { r: 0 g: 217 b: 231 } + thickness: 0.5 + visualize_landmark_depth: false + } + } +} + +# Concatenates all render data. +node { + calculator: "ConcatenateRenderDataVectorCalculator" + input_stream: "landmarks_render_data" + input_stream: "landmarks_background_joints_render_data" + input_stream: "landmarks_left_joints_render_data" + input_stream: "landmarks_right_joints_render_data" + + # Left hand. + input_stream: "left_hand_landmarks_connections_rd" + input_stream: "left_hand_landmarks_joints_rd" + + # Right hand. + input_stream: "right_hand_landmarks_connections_rd" + input_stream: "right_hand_landmarks_joints_rd" + + # Face. + input_stream: "face_landmarks_connections_rd" + input_stream: "face_landmarks_joints_rd" + + output_stream: "render_data_vector" +} diff --git a/mediapipe/graphs/instant_motion_tracking/calculators/BUILD b/mediapipe/graphs/instant_motion_tracking/calculators/BUILD index 311477959..b8242cfdd 100644 --- a/mediapipe/graphs/instant_motion_tracking/calculators/BUILD +++ b/mediapipe/graphs/instant_motion_tracking/calculators/BUILD @@ -61,8 +61,8 @@ cc_library( "//mediapipe/framework/port:opencv_imgproc", "//mediapipe/framework/port:ret_check", "//mediapipe/framework/port:status", - "//mediapipe/graphs/object_detection_3d/calculators:box", "//mediapipe/graphs/object_detection_3d/calculators:model_matrix_cc_proto", + "//mediapipe/modules/objectron/calculators:box", "@com_google_absl//absl/memory", "@com_google_absl//absl/strings", "@eigen_archive//:eigen", diff --git a/mediapipe/graphs/instant_motion_tracking/calculators/matrices_manager_calculator.cc b/mediapipe/graphs/instant_motion_tracking/calculators/matrices_manager_calculator.cc index 29b942d8e..d3abb6540 100644 --- a/mediapipe/graphs/instant_motion_tracking/calculators/matrices_manager_calculator.cc +++ b/mediapipe/graphs/instant_motion_tracking/calculators/matrices_manager_calculator.cc @@ -25,8 +25,8 @@ #include "mediapipe/framework/port/ret_check.h" #include "mediapipe/framework/port/status.h" #include "mediapipe/graphs/instant_motion_tracking/calculators/transformations.h" -#include "mediapipe/graphs/object_detection_3d/calculators/box.h" #include "mediapipe/graphs/object_detection_3d/calculators/model_matrix.pb.h" +#include "mediapipe/modules/objectron/calculators/box.h" namespace mediapipe { @@ -87,9 +87,9 @@ constexpr float kInitialZ = -10.0f; class MatricesManagerCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + static mediapipe::Status GetContract(CalculatorContract* cc); + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; private: // Device properties that will be preset by side packets @@ -137,7 +137,7 @@ class MatricesManagerCalculator : public CalculatorBase { REGISTER_CALCULATOR(MatricesManagerCalculator); -::mediapipe::Status MatricesManagerCalculator::GetContract( +mediapipe::Status MatricesManagerCalculator::GetContract( CalculatorContract* cc) { RET_CHECK(cc->Inputs().HasTag(kAnchorsTag) && cc->Inputs().HasTag(kIMUMatrixTag) && @@ -162,20 +162,20 @@ REGISTER_CALCULATOR(MatricesManagerCalculator); cc->InputSidePackets().Tag(kFOVSidePacketTag).Set(); cc->InputSidePackets().Tag(kAspectRatioSidePacketTag).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status MatricesManagerCalculator::Open(CalculatorContext* cc) { +mediapipe::Status MatricesManagerCalculator::Open(CalculatorContext* cc) { cc->SetOffset(TimestampDiff(0)); // Set device properties from side packets vertical_fov_radians_ = cc->InputSidePackets().Tag(kFOVSidePacketTag).Get(); aspect_ratio_ = cc->InputSidePackets().Tag(kAspectRatioSidePacketTag).Get(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status MatricesManagerCalculator::Process(CalculatorContext* cc) { +mediapipe::Status MatricesManagerCalculator::Process(CalculatorContext* cc) { // Define each object's model matrices auto asset_matrices_gif = std::make_unique(); @@ -276,7 +276,7 @@ REGISTER_CALCULATOR(MatricesManagerCalculator); .Get(cc->Outputs().GetId("MATRICES", 1)) .Add(asset_matrices_1.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Using a specified rotation value in radians, generate a rotation matrix for diff --git a/mediapipe/graphs/instant_motion_tracking/calculators/sticker_manager_calculator.cc b/mediapipe/graphs/instant_motion_tracking/calculators/sticker_manager_calculator.cc index 96413afb8..5f0ee94ac 100644 --- a/mediapipe/graphs/instant_motion_tracking/calculators/sticker_manager_calculator.cc +++ b/mediapipe/graphs/instant_motion_tracking/calculators/sticker_manager_calculator.cc @@ -53,7 +53,7 @@ constexpr char kRenderDescriptorsTag[] = "RENDER_DATA"; class StickerManagerCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { RET_CHECK(cc->Inputs().HasTag(kProtoDataString)); RET_CHECK(cc->Outputs().HasTag(kAnchorsTag) && cc->Outputs().HasTag(kUserRotationsTag) && @@ -66,15 +66,15 @@ class StickerManagerCalculator : public CalculatorBase { cc->Outputs().Tag(kUserScalingsTag).Set>(); cc->Outputs().Tag(kRenderDescriptorsTag).Set>(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { cc->SetOffset(TimestampDiff(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { std::string sticker_proto_string = cc->Inputs().Tag(kProtoDataString).Get(); @@ -138,11 +138,11 @@ class StickerManagerCalculator : public CalculatorBase { .At(cc->InputTimestamp())); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Close(CalculatorContext* cc) override { - return ::mediapipe::OkStatus(); + mediapipe::Status Close(CalculatorContext* cc) override { + return mediapipe::OkStatus(); } }; diff --git a/mediapipe/graphs/instant_motion_tracking/calculators/tracked_anchor_manager_calculator.cc b/mediapipe/graphs/instant_motion_tracking/calculators/tracked_anchor_manager_calculator.cc index 832ea9217..96ffa7aa8 100644 --- a/mediapipe/graphs/instant_motion_tracking/calculators/tracked_anchor_manager_calculator.cc +++ b/mediapipe/graphs/instant_motion_tracking/calculators/tracked_anchor_manager_calculator.cc @@ -71,7 +71,7 @@ class TrackedAnchorManagerCalculator : public CalculatorBase { std::vector previous_anchor_data_; public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { RET_CHECK(cc->Inputs().HasTag(kAnchorsTag) && cc->Inputs().HasTag(kSentinelTag)); RET_CHECK(cc->Outputs().HasTag(kAnchorsTag) && @@ -91,18 +91,18 @@ class TrackedAnchorManagerCalculator : public CalculatorBase { cc->Outputs().Tag(kCancelTag).Set(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { - return ::mediapipe::OkStatus(); + mediapipe::Status Open(CalculatorContext* cc) override { + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; }; REGISTER_CALCULATOR(TrackedAnchorManagerCalculator); -::mediapipe::Status TrackedAnchorManagerCalculator::Process( +mediapipe::Status TrackedAnchorManagerCalculator::Process( CalculatorContext* cc) { mediapipe::Timestamp timestamp = cc->InputTimestamp(); const int sticker_sentinel = cc->Inputs().Tag(kSentinelTag).Get(); @@ -208,6 +208,6 @@ REGISTER_CALCULATOR(TrackedAnchorManagerCalculator); .Tag(kBoxesOutputTag) .Add(pos_boxes.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/graphs/iris_tracking/calculators/iris_to_depth_calculator.cc b/mediapipe/graphs/iris_tracking/calculators/iris_to_depth_calculator.cc index c8edaea08..caac042e6 100644 --- a/mediapipe/graphs/iris_tracking/calculators/iris_to_depth_calculator.cc +++ b/mediapipe/graphs/iris_tracking/calculators/iris_to_depth_calculator.cc @@ -89,7 +89,7 @@ float CalculateDepth(const NormalizedLandmark& center, float focal_length, // } class IrisToDepthCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Tag(kIrisTag).Set(); cc->Inputs().Tag(kImageSizeTag).Set>(); @@ -111,12 +111,12 @@ class IrisToDepthCalculator : public CalculatorBase { if (cc->Outputs().HasTag(kRightIrisDepthTag)) { cc->Outputs().Tag(kRightIrisDepthTag).Set(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; private: float focal_length_pixels_ = -1.f; @@ -134,7 +134,7 @@ class IrisToDepthCalculator : public CalculatorBase { }; REGISTER_CALCULATOR(IrisToDepthCalculator); -::mediapipe::Status IrisToDepthCalculator::Open(CalculatorContext* cc) { +mediapipe::Status IrisToDepthCalculator::Open(CalculatorContext* cc) { cc->SetOffset(TimestampDiff(0)); if (cc->InputSidePackets().HasTag(kFocalLengthPixelTag)) { #if defined(__APPLE__) @@ -155,13 +155,13 @@ REGISTER_CALCULATOR(IrisToDepthCalculator); } options_ = cc->Options<::mediapipe::IrisToDepthCalculatorOptions>(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status IrisToDepthCalculator::Process(CalculatorContext* cc) { +mediapipe::Status IrisToDepthCalculator::Process(CalculatorContext* cc) { // Only process if there's input landmarks. if (cc->Inputs().Tag(kIrisTag).IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } const auto& iris_landmarks = @@ -220,7 +220,7 @@ REGISTER_CALCULATOR(IrisToDepthCalculator); .At(cc->InputTimestamp())); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } void IrisToDepthCalculator::GetLeftIris(const NormalizedLandmarkList& lds, diff --git a/mediapipe/graphs/iris_tracking/calculators/iris_to_render_data_calculator.cc b/mediapipe/graphs/iris_tracking/calculators/iris_to_render_data_calculator.cc index b55170f61..5bf86b170 100644 --- a/mediapipe/graphs/iris_tracking/calculators/iris_to_render_data_calculator.cc +++ b/mediapipe/graphs/iris_tracking/calculators/iris_to_render_data_calculator.cc @@ -108,7 +108,7 @@ float CalculateDepth(const NormalizedLandmark& center, float focal_length, // } class IrisToRenderDataCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Tag(kIrisTag).Set(); cc->Outputs().Tag(kRenderDataTag).Set(); cc->Inputs().Tag(kImageSizeTag).Set>(); @@ -119,12 +119,12 @@ class IrisToRenderDataCalculator : public CalculatorBase { if (cc->Inputs().HasTag(kRightIrisDepthTag)) { cc->Inputs().Tag(kRightIrisDepthTag).Set(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; private: void RenderIris(const NormalizedLandmarkList& iris_landmarks, @@ -150,15 +150,15 @@ class IrisToRenderDataCalculator : public CalculatorBase { }; REGISTER_CALCULATOR(IrisToRenderDataCalculator); -::mediapipe::Status IrisToRenderDataCalculator::Open(CalculatorContext* cc) { +mediapipe::Status IrisToRenderDataCalculator::Open(CalculatorContext* cc) { cc->SetOffset(TimestampDiff(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status IrisToRenderDataCalculator::Process(CalculatorContext* cc) { +mediapipe::Status IrisToRenderDataCalculator::Process(CalculatorContext* cc) { // Only process if there's input landmarks. if (cc->Inputs().Tag(kIrisTag).IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } const auto& options = cc->Options<::mediapipe::IrisToRenderDataCalculatorOptions>(); @@ -212,7 +212,7 @@ REGISTER_CALCULATOR(IrisToRenderDataCalculator); cc->Outputs() .Tag(kRenderDataTag) .Add(render_data.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } void IrisToRenderDataCalculator::AddTextRenderData( diff --git a/mediapipe/graphs/iris_tracking/calculators/update_face_landmarks_calculator.cc b/mediapipe/graphs/iris_tracking/calculators/update_face_landmarks_calculator.cc index d1bb4b850..3616eba33 100644 --- a/mediapipe/graphs/iris_tracking/calculators/update_face_landmarks_calculator.cc +++ b/mediapipe/graphs/iris_tracking/calculators/update_face_landmarks_calculator.cc @@ -215,28 +215,28 @@ constexpr int kEyeLandmarkIndicesInFaceLandmarks[] = { // class UpdateFaceLandmarksCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Tag(kFaceLandmarksTag).Set(); cc->Inputs().Tag(kNewEyeLandmarksTag).Set(); cc->Outputs().Tag(kUpdatedFaceLandmarksTag).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) { + mediapipe::Status Open(CalculatorContext* cc) { cc->SetOffset(TimestampDiff(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; }; REGISTER_CALCULATOR(UpdateFaceLandmarksCalculator); -::mediapipe::Status UpdateFaceLandmarksCalculator::Process( +mediapipe::Status UpdateFaceLandmarksCalculator::Process( CalculatorContext* cc) { if (cc->Inputs().Tag(kFaceLandmarksTag).IsEmpty() || cc->Inputs().Tag(kNewEyeLandmarksTag).IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } const auto& face_landmarks = cc->Inputs().Tag(kFaceLandmarksTag).Get(); @@ -263,7 +263,7 @@ REGISTER_CALCULATOR(UpdateFaceLandmarksCalculator); .Tag(kUpdatedFaceLandmarksTag) .Add(refined_face_landmarks.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/graphs/object_detection_3d/BUILD b/mediapipe/graphs/object_detection_3d/BUILD index c0704213b..c7d3de274 100644 --- a/mediapipe/graphs/object_detection_3d/BUILD +++ b/mediapipe/graphs/object_detection_3d/BUILD @@ -29,16 +29,11 @@ cc_library( name = "mobile_calculators", visibility = ["//visibility:public"], deps = [ - "//mediapipe/calculators/core:constant_side_packet_calculator", "//mediapipe/calculators/core:flow_limiter_calculator", - "//mediapipe/calculators/core:gate_calculator", - "//mediapipe/calculators/core:merge_calculator", - "//mediapipe/calculators/core:previous_loopback_calculator", "//mediapipe/calculators/image:image_cropping_calculator", "//mediapipe/graphs/object_detection_3d/calculators:annotations_to_model_matrices_calculator", "//mediapipe/graphs/object_detection_3d/calculators:gl_animation_overlay_calculator", - "//mediapipe/graphs/object_detection_3d/subgraphs:box_landmark_gpu", - "//mediapipe/graphs/object_detection_3d/subgraphs:object_detection_oid_v4_gpu", + "//mediapipe/modules/objectron:objectron_gpu", ], ) @@ -51,8 +46,21 @@ cc_library( "//mediapipe/gpu:gl_scaler_calculator", "//mediapipe/graphs/object_detection_3d/calculators:annotations_to_model_matrices_calculator", "//mediapipe/graphs/object_detection_3d/calculators:gl_animation_overlay_calculator", - "//mediapipe/graphs/object_detection_3d/subgraphs:objectron_detection_gpu", - "//mediapipe/graphs/object_detection_3d/subgraphs:objectron_tracking_gpu", + "//mediapipe/modules/objectron:objectron_detection_1stage_gpu", + "//mediapipe/modules/objectron:objectron_tracking_1stage_gpu", + ], +) + +cc_library( + name = "desktop_cpu_calculators", + visibility = ["//visibility:public"], + deps = [ + "//mediapipe/calculators/tflite:tflite_model_calculator", + "//mediapipe/calculators/util:local_file_contents_calculator", + "//mediapipe/calculators/video:opencv_video_decoder_calculator", + "//mediapipe/calculators/video:opencv_video_encoder_calculator", + "//mediapipe/graphs/object_detection_3d/subgraphs:renderer_cpu", + "//mediapipe/modules/objectron:objectron_cpu", ], ) diff --git a/mediapipe/graphs/object_detection_3d/calculators/BUILD b/mediapipe/graphs/object_detection_3d/calculators/BUILD index d03174ff2..5550128af 100644 --- a/mediapipe/graphs/object_detection_3d/calculators/BUILD +++ b/mediapipe/graphs/object_detection_3d/calculators/BUILD @@ -18,43 +18,6 @@ licenses(["notice"]) package(default_visibility = ["//visibility:public"]) -mediapipe_proto_library( - name = "object_proto", - srcs = ["object.proto"], - visibility = ["//visibility:public"], -) - -mediapipe_proto_library( - name = "a_r_capture_metadata_proto", - srcs = ["a_r_capture_metadata.proto"], - visibility = ["//visibility:public"], -) - -mediapipe_proto_library( - name = "annotation_proto", - srcs = ["annotation_data.proto"], - visibility = ["//visibility:public"], - deps = [ - ":a_r_capture_metadata_proto", - ":object_proto", - ], -) - -mediapipe_proto_library( - name = "camera_parameters_proto", - srcs = ["camera_parameters.proto"], - visibility = ["//visibility:public"], -) - -mediapipe_proto_library( - name = "frame_annotation_tracker_calculator_proto", - srcs = ["frame_annotation_tracker_calculator.proto"], - visibility = ["//visibility:public"], - deps = [ - "//mediapipe/framework:calculator_proto", - ], -) - mediapipe_proto_library( name = "gl_animation_overlay_calculator_proto", srcs = ["gl_animation_overlay_calculator.proto"], @@ -64,32 +27,6 @@ mediapipe_proto_library( ], ) -mediapipe_proto_library( - name = "belief_decoder_config_proto", - srcs = ["belief_decoder_config.proto"], - visibility = ["//visibility:public"], -) - -mediapipe_proto_library( - name = "tflite_tensors_to_objects_calculator_proto", - srcs = ["tflite_tensors_to_objects_calculator.proto"], - visibility = ["//visibility:public"], - deps = [ - ":belief_decoder_config_proto", - "//mediapipe/framework:calculator_proto", - ], -) - -mediapipe_proto_library( - name = "lift_2d_frame_annotation_to_3d_calculator_proto", - srcs = ["lift_2d_frame_annotation_to_3d_calculator.proto"], - visibility = ["//visibility:public"], - deps = [ - ":belief_decoder_config_proto", - "//mediapipe/framework:calculator_proto", - ], -) - mediapipe_proto_library( name = "annotations_to_model_matrices_calculator_proto", srcs = ["annotations_to_model_matrices_calculator.proto"], @@ -118,57 +55,11 @@ mediapipe_proto_library( ], ) -mediapipe_proto_library( - name = "frame_annotation_to_rect_calculator_proto", - srcs = ["frame_annotation_to_rect_calculator.proto"], - visibility = ["//visibility:public"], - deps = [ - "//mediapipe/framework:calculator_proto", - ], -) - -mediapipe_proto_library( - name = "filter_detection_calculator_proto", - srcs = ["filter_detection_calculator.proto"], - visibility = ["//visibility:public"], - deps = [ - "//mediapipe/framework:calculator_proto", - ], -) - -cc_library( - name = "box_util", - srcs = ["box_util.cc"], - hdrs = ["box_util.h"], - deps = [ - "//mediapipe/framework/port:logging", - "//mediapipe/framework/port:opencv_core", - "//mediapipe/framework/port:opencv_imgproc", - "//mediapipe/util/tracking:box_tracker_cc_proto", - ], -) - -cc_library( - name = "frame_annotation_tracker", - srcs = ["frame_annotation_tracker.cc"], - hdrs = ["frame_annotation_tracker.h"], - deps = [ - ":annotation_cc_proto", - ":box_util", - "//mediapipe/framework/port:integral_types", - "//mediapipe/framework/port:logging", - "//mediapipe/util/tracking:box_tracker_cc_proto", - "@com_google_absl//absl/container:btree", - "@com_google_absl//absl/container:flat_hash_set", - ], -) - cc_library( name = "gl_animation_overlay_calculator", srcs = ["gl_animation_overlay_calculator.cc"], visibility = ["//visibility:public"], deps = [ - ":camera_parameters_cc_proto", ":gl_animation_overlay_calculator_cc_proto", ":model_matrix_cc_proto", "//mediapipe/framework:calculator_framework", @@ -176,164 +67,25 @@ cc_library( "//mediapipe/framework/port:status", "//mediapipe/gpu:gl_calculator_helper", "//mediapipe/gpu:shader_util", + "//mediapipe/modules/objectron/calculators:camera_parameters_cc_proto", "//mediapipe/util/android:asset_manager_util", ], alwayslink = 1, ) -cc_library( - name = "decoder", - srcs = [ - "decoder.cc", - ], - hdrs = [ - "decoder.h", - ], - deps = [ - ":annotation_cc_proto", - ":belief_decoder_config_cc_proto", - "//mediapipe/framework/port:logging", - "//mediapipe/framework/port:opencv_core", - "//mediapipe/framework/port:opencv_imgproc", - "//mediapipe/framework/port:status", - "@com_google_absl//absl/status", - "@eigen_archive//:eigen", - ], -) - -cc_library( - name = "tensor_util", - srcs = [ - "tensor_util.cc", - ], - hdrs = [ - "tensor_util.h", - ], - deps = [ - "//mediapipe/framework/port:logging", - "//mediapipe/framework/port:opencv_core", - "@org_tensorflow//tensorflow/lite:framework", - ], -) - -cc_library( - name = "box", - srcs = [ - "box.cc", - "model.cc", - ], - hdrs = [ - "box.h", - "model.h", - "types.h", - ], - visibility = ["//visibility:public"], - deps = [ - ":annotation_cc_proto", - ":object_cc_proto", - "//mediapipe/framework/port:logging", - "@eigen_archive//:eigen", - ], -) - -cc_library( - name = "frame_annotation_to_timed_box_list_calculator", - srcs = ["frame_annotation_to_timed_box_list_calculator.cc"], - visibility = ["//visibility:public"], - deps = [ - ":annotation_cc_proto", - ":box_util", - "//mediapipe/framework:calculator_framework", - "//mediapipe/framework/port:opencv_core", - "//mediapipe/framework/port:opencv_imgproc", - "//mediapipe/framework/port:ret_check", - "//mediapipe/framework/port:status", - "//mediapipe/util/tracking:box_tracker_cc_proto", - "@com_google_absl//absl/memory", - ], - alwayslink = 1, -) - -cc_library( - name = "frame_annotation_tracker_calculator", - srcs = ["frame_annotation_tracker_calculator.cc"], - visibility = ["//visibility:public"], - deps = [ - ":annotation_cc_proto", - ":frame_annotation_tracker", - ":frame_annotation_tracker_calculator_cc_proto", - "//mediapipe/framework:calculator_framework", - "//mediapipe/framework/port:ret_check", - "//mediapipe/framework/port:status", - "//mediapipe/util/tracking:box_tracker_cc_proto", - "@com_google_absl//absl/container:flat_hash_set", - "@com_google_absl//absl/memory", - ], - alwayslink = 1, -) - -cc_library( - name = "tflite_tensors_to_objects_calculator", - srcs = ["tflite_tensors_to_objects_calculator.cc"], - visibility = ["//visibility:public"], - deps = [ - ":annotation_cc_proto", - ":belief_decoder_config_cc_proto", - ":decoder", - ":tensor_util", - ":tflite_tensors_to_objects_calculator_cc_proto", - "//mediapipe/framework:calculator_framework", - "//mediapipe/framework/deps:file_path", - "//mediapipe/framework/formats:detection_cc_proto", - "//mediapipe/framework/port:opencv_core", - "//mediapipe/framework/port:ret_check", - "@com_google_absl//absl/memory", - "@com_google_absl//absl/strings:str_format", - "@com_google_absl//absl/types:span", - "@eigen_archive//:eigen", - "@org_tensorflow//tensorflow/lite:framework", - ], - alwayslink = 1, -) - -cc_library( - name = "lift_2d_frame_annotation_to_3d_calculator", - srcs = ["lift_2d_frame_annotation_to_3d_calculator.cc"], - visibility = ["//visibility:public"], - deps = [ - ":annotation_cc_proto", - ":belief_decoder_config_cc_proto", - ":decoder", - ":lift_2d_frame_annotation_to_3d_calculator_cc_proto", - ":tensor_util", - ":tflite_tensors_to_objects_calculator_cc_proto", - "//mediapipe/framework:calculator_framework", - "//mediapipe/framework/deps:file_path", - "//mediapipe/framework/formats:detection_cc_proto", - "//mediapipe/framework/port:opencv_core", - "//mediapipe/framework/port:ret_check", - "@com_google_absl//absl/memory", - "@com_google_absl//absl/strings:str_format", - "@com_google_absl//absl/types:span", - "@eigen_archive//:eigen", - "@org_tensorflow//tensorflow/lite:framework", - ], - alwayslink = 1, -) - cc_library( name = "annotations_to_model_matrices_calculator", srcs = ["annotations_to_model_matrices_calculator.cc"], visibility = ["//visibility:public"], deps = [ - ":annotation_cc_proto", ":annotations_to_model_matrices_calculator_cc_proto", - ":box", ":model_matrix_cc_proto", "//mediapipe/framework:calculator_framework", "//mediapipe/framework:calculator_options_cc_proto", "//mediapipe/framework/port:ret_check", "//mediapipe/framework/port:status", + "//mediapipe/modules/objectron/calculators:annotation_cc_proto", + "//mediapipe/modules/objectron/calculators:box", "//mediapipe/util:color_cc_proto", "@com_google_absl//absl/memory", "@com_google_absl//absl/strings", @@ -347,11 +99,11 @@ cc_library( srcs = ["annotations_to_render_data_calculator.cc"], visibility = ["//visibility:public"], deps = [ - ":annotation_cc_proto", ":annotations_to_render_data_calculator_cc_proto", "//mediapipe/framework:calculator_framework", "//mediapipe/framework:calculator_options_cc_proto", "//mediapipe/framework/port:ret_check", + "//mediapipe/modules/objectron/calculators:annotation_cc_proto", "//mediapipe/util:color_cc_proto", "//mediapipe/util:render_data_cc_proto", "@com_google_absl//absl/memory", @@ -359,76 +111,3 @@ cc_library( ], alwayslink = 1, ) - -cc_library( - name = "frame_annotation_to_rect_calculator", - srcs = ["frame_annotation_to_rect_calculator.cc"], - deps = [ - ":annotation_cc_proto", - ":box", - ":frame_annotation_to_rect_calculator_cc_proto", - "//mediapipe/framework:calculator_framework", - "//mediapipe/framework/formats:rect_cc_proto", - "//mediapipe/framework/port:ret_check", - "//mediapipe/framework/port:status", - "@com_google_absl//absl/memory", - "@eigen_archive//:eigen", - ], - alwayslink = 1, -) - -cc_library( - name = "landmarks_to_frame_annotation_calculator", - srcs = ["landmarks_to_frame_annotation_calculator.cc"], - deps = [ - ":annotation_cc_proto", - "//mediapipe/framework:calculator_framework", - "//mediapipe/framework/formats:landmark_cc_proto", - "//mediapipe/framework/port:ret_check", - "//mediapipe/framework/port:status", - "@com_google_absl//absl/memory", - ], - alwayslink = 1, -) - -cc_library( - name = "filter_detection_calculator", - srcs = ["filter_detection_calculator.cc"], - deps = [ - ":filter_detection_calculator_cc_proto", - "//mediapipe/framework:calculator_framework", - "//mediapipe/framework/formats:detection_cc_proto", - "//mediapipe/framework/formats:location_data_cc_proto", - "//mediapipe/framework/port:logging", - "//mediapipe/framework/port:map_util", - "//mediapipe/framework/port:re2", - "//mediapipe/framework/port:status", - "@com_google_absl//absl/container:node_hash_set", - "@com_google_absl//absl/strings", - ], - alwayslink = 1, -) - -cc_test( - name = "box_util_test", - srcs = ["box_util_test.cc"], - deps = [ - ":box_util", - "//mediapipe/framework/port:gtest_main", - "//mediapipe/framework/port:opencv_core", - "//mediapipe/util/tracking:box_tracker_cc_proto", - ], -) - -cc_test( - name = "frame_annotation_tracker_test", - srcs = ["frame_annotation_tracker_test.cc"], - deps = [ - ":annotation_cc_proto", - ":frame_annotation_tracker", - "//mediapipe/framework/port:gtest_main", - "//mediapipe/framework/port:logging", - "//mediapipe/util/tracking:box_tracker_cc_proto", - "@com_google_absl//absl/container:flat_hash_set", - ], -) diff --git a/mediapipe/graphs/object_detection_3d/calculators/annotations_to_model_matrices_calculator.cc b/mediapipe/graphs/object_detection_3d/calculators/annotations_to_model_matrices_calculator.cc index 0072b51a1..73098e840 100644 --- a/mediapipe/graphs/object_detection_3d/calculators/annotations_to_model_matrices_calculator.cc +++ b/mediapipe/graphs/object_detection_3d/calculators/annotations_to_model_matrices_calculator.cc @@ -24,10 +24,10 @@ #include "mediapipe/framework/calculator_options.pb.h" #include "mediapipe/framework/port/ret_check.h" #include "mediapipe/framework/port/status.h" -#include "mediapipe/graphs/object_detection_3d/calculators/annotation_data.pb.h" #include "mediapipe/graphs/object_detection_3d/calculators/annotations_to_model_matrices_calculator.pb.h" -#include "mediapipe/graphs/object_detection_3d/calculators/box.h" #include "mediapipe/graphs/object_detection_3d/calculators/model_matrix.pb.h" +#include "mediapipe/modules/objectron/calculators/annotation_data.pb.h" +#include "mediapipe/modules/objectron/calculators/box.h" #include "mediapipe/util/color.pb.h" namespace mediapipe { @@ -66,14 +66,14 @@ class AnnotationsToModelMatricesCalculator : public CalculatorBase { AnnotationsToModelMatricesCalculator& operator=( const AnnotationsToModelMatricesCalculator&) = delete; - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; private: - ::mediapipe::Status GetModelMatricesForAnnotations( + mediapipe::Status GetModelMatricesForAnnotations( const FrameAnnotation& annotations, TimedModelMatrixProtoList* model_matrix_list); @@ -83,7 +83,7 @@ class AnnotationsToModelMatricesCalculator : public CalculatorBase { }; REGISTER_CALCULATOR(AnnotationsToModelMatricesCalculator); -::mediapipe::Status AnnotationsToModelMatricesCalculator::GetContract( +mediapipe::Status AnnotationsToModelMatricesCalculator::GetContract( CalculatorContract* cc) { RET_CHECK(cc->Inputs().HasTag(kAnnotationTag)) << "No input stream found."; if (cc->Inputs().HasTag(kAnnotationTag)) { @@ -101,10 +101,10 @@ REGISTER_CALCULATOR(AnnotationsToModelMatricesCalculator); if (cc->InputSidePackets().HasTag("MODEL_TRANSFORMATION")) { cc->InputSidePackets().Tag("MODEL_TRANSFORMATION").Set(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status AnnotationsToModelMatricesCalculator::Open( +mediapipe::Status AnnotationsToModelMatricesCalculator::Open( CalculatorContext* cc) { RET_CHECK(cc->Inputs().HasTag(kAnnotationTag)); @@ -131,10 +131,10 @@ REGISTER_CALCULATOR(AnnotationsToModelMatricesCalculator); model_transformation_.setIdentity(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status AnnotationsToModelMatricesCalculator::Process( +mediapipe::Status AnnotationsToModelMatricesCalculator::Process( CalculatorContext* cc) { auto model_matrices = std::make_unique(); @@ -142,22 +142,21 @@ REGISTER_CALCULATOR(AnnotationsToModelMatricesCalculator); cc->Inputs().Tag(kAnnotationTag).Get(); if (!GetModelMatricesForAnnotations(annotations, model_matrices.get()).ok()) { - return ::mediapipe::InvalidArgumentError( - "Error in GetModelMatricesForBoxes"); + return mediapipe::InvalidArgumentError("Error in GetModelMatricesForBoxes"); } cc->Outputs() .Tag(kModelMatricesTag) .Add(model_matrices.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status +mediapipe::Status AnnotationsToModelMatricesCalculator::GetModelMatricesForAnnotations( const FrameAnnotation& annotations, TimedModelMatrixProtoList* model_matrix_list) { if (model_matrix_list == nullptr) { - return ::mediapipe::InvalidArgumentError("model_matrix_list is nullptr"); + return mediapipe::InvalidArgumentError("model_matrix_list is nullptr"); } model_matrix_list->clear_model_matrix(); @@ -217,7 +216,7 @@ AnnotationsToModelMatricesCalculator::GetModelMatricesForAnnotations( } } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/graphs/object_detection_3d/calculators/annotations_to_render_data_calculator.cc b/mediapipe/graphs/object_detection_3d/calculators/annotations_to_render_data_calculator.cc index 7f8b16009..fc8287d25 100644 --- a/mediapipe/graphs/object_detection_3d/calculators/annotations_to_render_data_calculator.cc +++ b/mediapipe/graphs/object_detection_3d/calculators/annotations_to_render_data_calculator.cc @@ -18,8 +18,8 @@ #include "mediapipe/framework/calculator_framework.h" #include "mediapipe/framework/calculator_options.pb.h" #include "mediapipe/framework/port/ret_check.h" -#include "mediapipe/graphs/object_detection_3d/calculators/annotation_data.pb.h" #include "mediapipe/graphs/object_detection_3d/calculators/annotations_to_render_data_calculator.pb.h" +#include "mediapipe/modules/objectron/calculators/annotation_data.pb.h" #include "mediapipe/util/color.pb.h" #include "mediapipe/util/render_data.pb.h" @@ -98,11 +98,11 @@ class AnnotationsToRenderDataCalculator : public CalculatorBase { AnnotationsToRenderDataCalculator& operator=( const AnnotationsToRenderDataCalculator&) = delete; - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; private: static void SetRenderAnnotationColorThickness( @@ -134,7 +134,7 @@ class AnnotationsToRenderDataCalculator : public CalculatorBase { }; REGISTER_CALCULATOR(AnnotationsToRenderDataCalculator); -::mediapipe::Status AnnotationsToRenderDataCalculator::GetContract( +mediapipe::Status AnnotationsToRenderDataCalculator::GetContract( CalculatorContract* cc) { RET_CHECK(cc->Inputs().HasTag(kAnnotationTag)) << "No input stream found."; if (cc->Inputs().HasTag(kAnnotationTag)) { @@ -142,18 +142,18 @@ REGISTER_CALCULATOR(AnnotationsToRenderDataCalculator); } cc->Outputs().Tag(kRenderDataTag).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status AnnotationsToRenderDataCalculator::Open( +mediapipe::Status AnnotationsToRenderDataCalculator::Open( CalculatorContext* cc) { cc->SetOffset(TimestampDiff(0)); options_ = cc->Options(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status AnnotationsToRenderDataCalculator::Process( +mediapipe::Status AnnotationsToRenderDataCalculator::Process( CalculatorContext* cc) { auto render_data = absl::make_unique(); bool visualize_depth = options_.visualize_landmark_depth(); @@ -215,7 +215,7 @@ REGISTER_CALCULATOR(AnnotationsToRenderDataCalculator); .Tag(kRenderDataTag) .Add(render_data.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } void AnnotationsToRenderDataCalculator::AddConnectionToRenderData( diff --git a/mediapipe/graphs/object_detection_3d/calculators/gl_animation_overlay_calculator.cc b/mediapipe/graphs/object_detection_3d/calculators/gl_animation_overlay_calculator.cc index 943e038d0..865200105 100644 --- a/mediapipe/graphs/object_detection_3d/calculators/gl_animation_overlay_calculator.cc +++ b/mediapipe/graphs/object_detection_3d/calculators/gl_animation_overlay_calculator.cc @@ -24,9 +24,9 @@ #include "mediapipe/framework/port/status.h" #include "mediapipe/gpu/gl_calculator_helper.h" #include "mediapipe/gpu/shader_util.h" -#include "mediapipe/graphs/object_detection_3d/calculators/camera_parameters.pb.h" #include "mediapipe/graphs/object_detection_3d/calculators/gl_animation_overlay_calculator.pb.h" #include "mediapipe/graphs/object_detection_3d/calculators/model_matrix.pb.h" +#include "mediapipe/modules/objectron/calculators/camera_parameters.pb.h" namespace mediapipe { @@ -626,7 +626,7 @@ void GlAnimationOverlayCalculator::LoadModelMatrices( ::mediapipe::Status GlAnimationOverlayCalculator::Process( CalculatorContext *cc) { - return helper_.RunInGlContext([this, &cc]() -> ::mediapipe::Status { + return helper_.RunInGlContext([this, &cc]() -> mediapipe::Status { if (!initialized_) { MP_RETURN_IF_ERROR(GlSetup()); initialized_ = true; diff --git a/mediapipe/graphs/object_detection_3d/object_occlusion_tracking.pbtxt b/mediapipe/graphs/object_detection_3d/object_occlusion_tracking.pbtxt index 885f45f13..6b5e8948c 100644 --- a/mediapipe/graphs/object_detection_3d/object_occlusion_tracking.pbtxt +++ b/mediapipe/graphs/object_detection_3d/object_occlusion_tracking.pbtxt @@ -2,8 +2,15 @@ # Images coming into and out of the graph. input_stream: "input_video" -input_stream: "input_width" -input_stream: "input_height" +input_stream: "WIDTH:input_width" +input_stream: "HEIGHT:input_height" +input_side_packet: "LABELS_CSV:allowed_labels" +input_side_packet: "MODEL_SCALE:model_scale" +input_side_packet: "MODEL_TRANSFORMATION:model_transformation" +input_side_packet: "TEXTURE:box_texture" +input_side_packet: "ANIMATION_ASSET:box_asset_name" +input_side_packet: "MASK_TEXTURE:obj_texture" +input_side_packet: "MASK_ASSET:obj_asset_name" output_stream: "output_video" # Throttles the images flowing downstream for flow control. It passes through @@ -19,7 +26,7 @@ output_stream: "output_video" node { calculator: "FlowLimiterCalculator" input_stream: "input_video" - input_stream: "FINISHED:box_rect" + input_stream: "FINISHED:lifted_objects" input_stream_info: { tag_index: "FINISHED" back_edge: true @@ -31,7 +38,7 @@ node { node: { calculator: "ImageCroppingCalculator" input_stream: "IMAGE_GPU:throttled_input_video" - output_stream: "IMAGE_GPU:throttled_input_video_4x3" + output_stream: "IMAGE_GPU:throttled_input_video_3x4" input_stream: "WIDTH:input_width" input_stream: "HEIGHT:input_height" node_options: { @@ -41,87 +48,11 @@ node: { } } -# Caches a box-presence decision fed back from boxLandmarkSubgraph, and upon -# the arrival of the next input image sends out the cached decision with the -# timestamp replaced by that of the input image, essentially generating a packet -# that carries the previous box-presence decision. Note that upon the arrival -# of the very first input image, an empty packet is sent out to jump start the -# feedback loop. node { - calculator: "PreviousLoopbackCalculator" - input_stream: "MAIN:throttled_input_video_4x3" - input_stream: "LOOP:box_presence" - input_stream_info: { - tag_index: "LOOP" - back_edge: true - } - output_stream: "PREV_LOOP:prev_box_presence" -} - -# Drops the incoming image if boxLandmarkSubgraph was able to identify box -# presence in the previous image. Otherwise, passes the incoming image through -# to trigger a new round of box detection in boxDetectionSubgraph. -node { - calculator: "GateCalculator" - input_stream: "throttled_input_video_4x3" - input_stream: "DISALLOW:prev_box_presence" - output_stream: "detection_input_video" - - node_options: { - [type.googleapis.com/mediapipe.GateCalculatorOptions] { - empty_packets_as_allow: true - } - } -} - -# Subgraph that performs 2D object detection. -node { - calculator: "ObjectDetectionOidV4Subgraph" - input_stream: "detection_input_video" - input_side_packet: "allowed_labels" - output_stream: "NORM_RECT:box_rect_from_object_detections" -} - -# Subgraph that localizes box landmarks (see subgraphs/box_landmark_gpu.pbtxt). -node { - calculator: "BoxLandmarkSubgraph" - input_stream: "IMAGE:throttled_input_video_4x3" - input_stream: "NORM_RECT:box_rect" + calculator: "ObjectronGpuSubgraph" + input_stream: "IMAGE_GPU:throttled_input_video_3x4" + input_side_packet: "LABELS_CSV:allowed_labels" output_stream: "FRAME_ANNOTATION:lifted_objects" - output_stream: "NORM_RECT:box_rect_from_landmarks" - output_stream: "PRESENCE:box_presence" -} - -# Caches a box rectangle fed back from boxLandmarkSubgraph, and upon the -# arrival of the next input image sends out the cached rectangle with the -# timestamp replaced by that of the input image, essentially generating a packet -# that carries the previous box rectangle. Note that upon the arrival of the -# very first input image, an empty packet is sent out to jump start the -# feedback loop. -node { - calculator: "PreviousLoopbackCalculator" - input_stream: "MAIN:throttled_input_video_4x3" - input_stream: "LOOP:box_rect_from_landmarks" - input_stream_info: { - tag_index: "LOOP" - back_edge: true - } - output_stream: "PREV_LOOP:prev_box_rect_from_landmarks" -} - -# Merges a stream of box rectangles generated by boxDetectionSubgraph and that -# generated by boxLandmarkSubgraph into a single output stream by selecting -# between one of the two streams. The former is selected if the incoming packet -# is not empty, i.e., box detection is performed on the current image by -# boxDetectionSubgraph (because boxLandmarkSubgraph could not identify box -# presence in the previous image). Otherwise, the latter is selected, which is -# never empty because boxLandmarkSubgraphs processes all images (that went -# through FlowLimiterCaculator). -node { - calculator: "MergeCalculator" - input_stream: "box_rect_from_object_detections" - input_stream: "prev_box_rect_from_landmarks" - output_stream: "box_rect" } # The rendering nodes: @@ -171,7 +102,7 @@ node { # then we render the occlusion mask. node: { calculator: "GlAnimationOverlayCalculator" - input_stream: "VIDEO:throttled_input_video_4x3" + input_stream: "VIDEO:throttled_input_video_3x4" input_stream: "MODEL_MATRICES:model_matrices" input_stream: "MASK_MODEL_MATRICES:mask_model_matrices" output_stream: "output_video" diff --git a/mediapipe/graphs/object_detection_3d/object_occlusion_tracking_1stage.pbtxt b/mediapipe/graphs/object_detection_3d/object_occlusion_tracking_1stage.pbtxt index cda2efd73..bda02b2f0 100644 --- a/mediapipe/graphs/object_detection_3d/object_occlusion_tracking_1stage.pbtxt +++ b/mediapipe/graphs/object_detection_3d/object_occlusion_tracking_1stage.pbtxt @@ -44,13 +44,13 @@ node { } node { - calculator: "ObjectronDetectionSubgraphGpu" + calculator: "ObjectronDetection1StageSubgraphGpu" input_stream: "IMAGE_GPU:sampled_input_video" output_stream: "ANNOTATIONS:objects" } node { - calculator: "ObjectronTrackingSubgraphGpu" + calculator: "ObjectronTracking1StageSubgraphGpu" input_stream: "FRAME_ANNOTATION:objects" input_stream: "IMAGE_GPU:input_video_copy" output_stream: "LIFTED_FRAME_ANNOTATION:lifted_tracked_objects" diff --git a/mediapipe/graphs/object_detection_3d/objectron_desktop_cpu.pbtxt b/mediapipe/graphs/object_detection_3d/objectron_desktop_cpu.pbtxt new file mode 100644 index 000000000..bb548bb2d --- /dev/null +++ b/mediapipe/graphs/object_detection_3d/objectron_desktop_cpu.pbtxt @@ -0,0 +1,60 @@ +# MediaPipe Objectron 3D object detection on Desktop CPU. +input_side_packet: "INPUT_FILE_PATH:input_video_path" +input_side_packet: "FILE_PATH:0:box_landmark_model_path" +input_side_packet: "LABELS_CSV:allowed_labels" +input_side_packet: "OUTPUT_FILE_PATH:output_video_path" + + +# Decodes an input video file into images and a video header. +node { + calculator: "OpenCvVideoDecoderCalculator" + input_side_packet: "INPUT_FILE_PATH:input_video_path" + output_stream: "VIDEO:input_video" + output_stream: "VIDEO_PRESTREAM:input_video_header" +} + +node { + calculator: "LocalFileContentsCalculator" + input_side_packet: "FILE_PATH:0:box_landmark_model_path" + output_side_packet: "CONTENTS:0:box_landmark_model_blob" +} + +node { + calculator: "TfLiteModelCalculator" + input_side_packet: "MODEL_BLOB:box_landmark_model_blob" + output_side_packet: "MODEL:box_landmark_model" +} + +node { + calculator: "ObjectronCpuSubgraph" + input_stream: "IMAGE:input_video" + input_side_packet: "MODEL:box_landmark_model" + input_side_packet: "LABELS_CSV:allowed_labels" + output_stream: "LANDMARKS:box_landmarks" + output_stream: "NORM_RECT:box_rect" +} + +# Subgraph that renders annotations and overlays them on top of the input +# images (see renderer_gpu.pbtxt). +node { + calculator: "RendererSubgraph" + input_stream: "IMAGE:input_video" + input_stream: "LANDMARKS:box_landmarks" + input_stream: "NORM_RECT:box_rect" + output_stream: "IMAGE:output_video" +} + +# Encodes the annotated images into a video file, adopting properties specified +# in the input video header, e.g., video framerate. +node { + calculator: "OpenCvVideoEncoderCalculator" + input_stream: "VIDEO:output_video" + input_stream: "VIDEO_PRESTREAM:input_video_header" + input_side_packet: "OUTPUT_FILE_PATH:output_video_path" + node_options: { + [type.googleapis.com/mediapipe.OpenCvVideoEncoderCalculatorOptions]: { + codec: "avc1" + video_format: "mp4" + } + } +} diff --git a/mediapipe/graphs/object_detection_3d/subgraphs/BUILD b/mediapipe/graphs/object_detection_3d/subgraphs/BUILD index 10d5b92e6..121fe2221 100644 --- a/mediapipe/graphs/object_detection_3d/subgraphs/BUILD +++ b/mediapipe/graphs/object_detection_3d/subgraphs/BUILD @@ -22,76 +22,14 @@ licenses(["notice"]) package(default_visibility = ["//visibility:public"]) mediapipe_simple_subgraph( - name = "objectron_detection_gpu", - graph = "objectron_detection_gpu.pbtxt", - register_as = "ObjectronDetectionSubgraphGpu", + name = "renderer_cpu", + graph = "renderer_cpu.pbtxt", + register_as = "RendererSubgraph", + visibility = ["//visibility:public"], deps = [ - "//mediapipe/calculators/image:image_transformation_calculator", - "//mediapipe/calculators/tflite:tflite_converter_calculator", - "//mediapipe/calculators/tflite:tflite_custom_op_resolver_calculator", - "//mediapipe/calculators/tflite:tflite_inference_calculator", - "//mediapipe/graphs/object_detection_3d/calculators:tflite_tensors_to_objects_calculator", - ], -) - -mediapipe_simple_subgraph( - name = "objectron_tracking_gpu", - graph = "objectron_tracking_gpu.pbtxt", - register_as = "ObjectronTrackingSubgraphGpu", - deps = [ - "//mediapipe/calculators/image:image_transformation_calculator", - "//mediapipe/calculators/video:box_tracker_calculator", - "//mediapipe/calculators/video:flow_packager_calculator", - "//mediapipe/calculators/video:motion_analysis_calculator", - "//mediapipe/framework/stream_handler:sync_set_input_stream_handler", - "//mediapipe/gpu:gpu_buffer_to_image_frame_calculator", - "//mediapipe/graphs/object_detection_3d/calculators:frame_annotation_to_timed_box_list_calculator", - "//mediapipe/graphs/object_detection_3d/calculators:frame_annotation_tracker_calculator", - "//mediapipe/graphs/object_detection_3d/calculators:lift_2d_frame_annotation_to_3d_calculator", - ], -) - -mediapipe_simple_subgraph( - name = "box_landmark_gpu", - graph = "box_landmark_gpu.pbtxt", - register_as = "BoxLandmarkSubgraph", - deps = [ - "//mediapipe/calculators/core:split_vector_calculator", - "//mediapipe/calculators/image:image_cropping_calculator", - "//mediapipe/calculators/image:image_properties_calculator", - "//mediapipe/calculators/image:image_transformation_calculator", - "//mediapipe/calculators/tflite:tflite_converter_calculator", - "//mediapipe/calculators/tflite:tflite_custom_op_resolver_calculator", - "//mediapipe/calculators/tflite:tflite_inference_calculator", - "//mediapipe/calculators/tflite:tflite_tensors_to_floats_calculator", - "//mediapipe/calculators/tflite:tflite_tensors_to_landmarks_calculator", - "//mediapipe/calculators/util:detections_to_rects_calculator", - "//mediapipe/calculators/util:landmark_letterbox_removal_calculator", - "//mediapipe/calculators/util:landmark_projection_calculator", - "//mediapipe/calculators/util:landmarks_smoothing_calculator", - "//mediapipe/calculators/util:landmarks_to_detection_calculator", - "//mediapipe/calculators/util:rect_transformation_calculator", - "//mediapipe/calculators/util:thresholding_calculator", - "//mediapipe/graphs/object_detection_3d/calculators:frame_annotation_to_rect_calculator", - "//mediapipe/graphs/object_detection_3d/calculators:landmarks_to_frame_annotation_calculator", - "//mediapipe/graphs/object_detection_3d/calculators:lift_2d_frame_annotation_to_3d_calculator", - ], -) - -mediapipe_simple_subgraph( - name = "object_detection_oid_v4_gpu", - graph = "object_detection_oid_v4_gpu.pbtxt", - register_as = "ObjectDetectionOidV4Subgraph", - deps = [ - "//mediapipe/calculators/image:image_transformation_calculator", - "//mediapipe/calculators/tflite:ssd_anchors_calculator", - "//mediapipe/calculators/tflite:tflite_converter_calculator", - "//mediapipe/calculators/tflite:tflite_inference_calculator", - "//mediapipe/calculators/tflite:tflite_tensors_to_detections_calculator", - "//mediapipe/calculators/util:detection_label_id_to_text_calculator", - "//mediapipe/calculators/util:detections_to_rects_calculator", - "//mediapipe/calculators/util:non_max_suppression_calculator", - "//mediapipe/calculators/util:rect_transformation_calculator", - "//mediapipe/graphs/object_detection_3d/calculators:filter_detection_calculator", + "//mediapipe/calculators/util:annotation_overlay_calculator", + "//mediapipe/calculators/util:detections_to_render_data_calculator", + "//mediapipe/calculators/util:landmarks_to_render_data_calculator", + "//mediapipe/calculators/util:rect_to_render_data_calculator", ], ) diff --git a/mediapipe/graphs/object_detection_3d/subgraphs/renderer_cpu.pbtxt b/mediapipe/graphs/object_detection_3d/subgraphs/renderer_cpu.pbtxt new file mode 100644 index 000000000..0a5ca5784 --- /dev/null +++ b/mediapipe/graphs/object_detection_3d/subgraphs/renderer_cpu.pbtxt @@ -0,0 +1,57 @@ +# MediaPipe Objectron vertices/landmarks rendering CPU subgraph. + +type: "RendererSubgraph" + +input_stream: "IMAGE:input_image" +input_stream: "LANDMARKS:landmarks" +input_stream: "NORM_RECT:rect" +output_stream: "IMAGE:output_image" + +# Converts landmarks to drawing primitives for annotation overlay. +node { + calculator: "LandmarksToRenderDataCalculator" + input_stream: "NORM_LANDMARKS:landmarks" + output_stream: "RENDER_DATA:landmark_render_data" + node_options: { + [type.googleapis.com/mediapipe.LandmarksToRenderDataCalculatorOptions] { + landmark_connections: [1, 2] # edge 1-2 + landmark_connections: [1, 3] # edge 1-3 + landmark_connections: [1, 5] # edge 1-5 + landmark_connections: [2, 4] # edge 2-4 + landmark_connections: [2, 6] # edge 2-6 + landmark_connections: [3, 4] # edge 3-4 + landmark_connections: [3, 7] # edge 3-7 + landmark_connections: [4, 8] # edge 4-8 + landmark_connections: [5, 6] # edge 5-6 + landmark_connections: [5, 7] # edge 5-7 + landmark_connections: [6, 8] # edge 6-8 + landmark_connections: [7, 8] # edge 7-8 + landmark_color { r: 255 g: 0 b: 0 } + connection_color { r: 0 g: 255 b: 0 } + thickness: 4.0 + } + } +} + +# Converts normalized rects to drawing primitives for annotation overlay. +node { + calculator: "RectToRenderDataCalculator" + input_stream: "NORM_RECT:rect" + output_stream: "RENDER_DATA:rect_render_data" + node_options: { + [type.googleapis.com/mediapipe.RectToRenderDataCalculatorOptions] { + filled: false + color { r: 255 g: 0 b: 0 } + thickness: 4.0 + } + } +} + +# Draws annotations and overlays them on top of the input images. +node { + calculator: "AnnotationOverlayCalculator" + input_stream: "IMAGE:input_image" + input_stream: "landmark_render_data" + input_stream: "rect_render_data" + output_stream: "IMAGE:output_image" +} diff --git a/mediapipe/graphs/pose_tracking/BUILD b/mediapipe/graphs/pose_tracking/BUILD index 3669b0a24..54af332ca 100644 --- a/mediapipe/graphs/pose_tracking/BUILD +++ b/mediapipe/graphs/pose_tracking/BUILD @@ -21,6 +21,42 @@ licenses(["notice"]) package(default_visibility = ["//visibility:public"]) +cc_library( + name = "pose_tracking_gpu_deps", + deps = [ + "//mediapipe/calculators/core:flow_limiter_calculator", + "//mediapipe/calculators/image:image_properties_calculator", + "//mediapipe/calculators/util:landmarks_smoothing_calculator", + "//mediapipe/graphs/pose_tracking/subgraphs:pose_renderer_gpu", + "//mediapipe/modules/pose_landmark:pose_landmark_gpu", + ], +) + +mediapipe_binary_graph( + name = "pose_tracking_gpu_binary_graph", + graph = "pose_tracking_gpu.pbtxt", + output_name = "pose_tracking_gpu.binarypb", + deps = [":pose_tracking_gpu_deps"], +) + +cc_library( + name = "pose_tracking_cpu_deps", + deps = [ + "//mediapipe/calculators/core:flow_limiter_calculator", + "//mediapipe/calculators/image:image_properties_calculator", + "//mediapipe/calculators/util:landmarks_smoothing_calculator", + "//mediapipe/graphs/pose_tracking/subgraphs:pose_renderer_cpu", + "//mediapipe/modules/pose_landmark:pose_landmark_cpu", + ], +) + +mediapipe_binary_graph( + name = "pose_tracking_cpu_binary_graph", + graph = "pose_tracking_cpu.pbtxt", + output_name = "pose_tracking_cpu.binarypb", + deps = [":pose_tracking_cpu_deps"], +) + cc_library( name = "upper_body_pose_tracking_gpu_deps", deps = [ diff --git a/mediapipe/graphs/pose_tracking/pose_tracking_cpu.pbtxt b/mediapipe/graphs/pose_tracking/pose_tracking_cpu.pbtxt new file mode 100644 index 000000000..441fc67a6 --- /dev/null +++ b/mediapipe/graphs/pose_tracking/pose_tracking_cpu.pbtxt @@ -0,0 +1,72 @@ +# MediaPipe graph that performs pose tracking with TensorFlow Lite on CPU. + +# CPU buffer. (ImageFrame) +input_stream: "input_video" + +# Output image with rendered results. (ImageFrame) +output_stream: "output_video" +# Pose landmarks. (NormalizedLandmarkList) +output_stream: "pose_landmarks" + +# Throttles the images flowing downstream for flow control. It passes through +# the very first incoming image unaltered, and waits for downstream nodes +# (calculators and subgraphs) in the graph to finish their tasks before it +# passes through another image. All images that come in while waiting are +# dropped, limiting the number of in-flight images in most part of the graph to +# 1. This prevents the downstream nodes from queuing up incoming images and data +# excessively, which leads to increased latency and memory usage, unwanted in +# real-time mobile applications. It also eliminates unnecessarily computation, +# e.g., the output produced by a node may get dropped downstream if the +# subsequent nodes are still busy processing previous inputs. +node { + calculator: "FlowLimiterCalculator" + input_stream: "input_video" + input_stream: "FINISHED:output_video" + input_stream_info: { + tag_index: "FINISHED" + back_edge: true + } + output_stream: "throttled_input_video" +} + +# Subgraph that detects poses and corresponding landmarks. +node { + calculator: "PoseLandmarkCpu" + input_stream: "IMAGE:throttled_input_video" + output_stream: "LANDMARKS:pose_landmarks" + output_stream: "DETECTION:pose_detection" + output_stream: "ROI_FROM_LANDMARKS:roi_from_landmarks" +} + +# Calculates size of the image. +node { + calculator: "ImagePropertiesCalculator" + input_stream: "IMAGE:throttled_input_video" + output_stream: "SIZE:image_size" +} + +# Smoothes pose landmarks in order to reduce jitter. +node { + calculator: "LandmarksSmoothingCalculator" + input_stream: "NORM_LANDMARKS:pose_landmarks" + input_stream: "IMAGE_SIZE:image_size" + output_stream: "NORM_FILTERED_LANDMARKS:pose_landmarks_smoothed" + node_options: { + [type.googleapis.com/mediapipe.LandmarksSmoothingCalculatorOptions] { + velocity_filter: { + window_size: 5 + velocity_scale: 10.0 + } + } + } +} + +# Subgraph that renders pose-landmark annotation onto the input image. +node { + calculator: "PoseRendererCpu" + input_stream: "IMAGE:throttled_input_video" + input_stream: "LANDMARKS:pose_landmarks_smoothed" + input_stream: "ROI:roi_from_landmarks" + input_stream: "DETECTION:pose_detection" + output_stream: "IMAGE:output_video" +} diff --git a/mediapipe/graphs/pose_tracking/pose_tracking_gpu.pbtxt b/mediapipe/graphs/pose_tracking/pose_tracking_gpu.pbtxt new file mode 100644 index 000000000..d2712e16d --- /dev/null +++ b/mediapipe/graphs/pose_tracking/pose_tracking_gpu.pbtxt @@ -0,0 +1,72 @@ +# MediaPipe graph that performs pose tracking with TensorFlow Lite on GPU. + +# GPU buffer. (GpuBuffer) +input_stream: "input_video" + +# Output image with rendered results. (GpuBuffer) +output_stream: "output_video" +# Pose landmarks. (NormalizedLandmarkList) +output_stream: "pose_landmarks" + +# Throttles the images flowing downstream for flow control. It passes through +# the very first incoming image unaltered, and waits for downstream nodes +# (calculators and subgraphs) in the graph to finish their tasks before it +# passes through another image. All images that come in while waiting are +# dropped, limiting the number of in-flight images in most part of the graph to +# 1. This prevents the downstream nodes from queuing up incoming images and data +# excessively, which leads to increased latency and memory usage, unwanted in +# real-time mobile applications. It also eliminates unnecessarily computation, +# e.g., the output produced by a node may get dropped downstream if the +# subsequent nodes are still busy processing previous inputs. +node { + calculator: "FlowLimiterCalculator" + input_stream: "input_video" + input_stream: "FINISHED:output_video" + input_stream_info: { + tag_index: "FINISHED" + back_edge: true + } + output_stream: "throttled_input_video" +} + +# Subgraph that detects poses and corresponding landmarks. +node { + calculator: "PoseLandmarkGpu" + input_stream: "IMAGE:throttled_input_video" + output_stream: "LANDMARKS:pose_landmarks" + output_stream: "DETECTION:pose_detection" + output_stream: "ROI_FROM_LANDMARKS:roi_from_landmarks" +} + +# Calculates size of the image. +node { + calculator: "ImagePropertiesCalculator" + input_stream: "IMAGE_GPU:throttled_input_video" + output_stream: "SIZE:image_size" +} + +# Smoothes pose landmarks in order to reduce jitter. +node { + calculator: "LandmarksSmoothingCalculator" + input_stream: "NORM_LANDMARKS:pose_landmarks" + input_stream: "IMAGE_SIZE:image_size" + output_stream: "NORM_FILTERED_LANDMARKS:pose_landmarks_smoothed" + node_options: { + [type.googleapis.com/mediapipe.LandmarksSmoothingCalculatorOptions] { + velocity_filter: { + window_size: 5 + velocity_scale: 10.0 + } + } + } +} + +# Subgraph that renders pose-landmark annotation onto the input image. +node { + calculator: "PoseRendererGpu" + input_stream: "IMAGE:throttled_input_video" + input_stream: "LANDMARKS:pose_landmarks_smoothed" + input_stream: "ROI:roi_from_landmarks" + input_stream: "DETECTION:pose_detection" + output_stream: "IMAGE:output_video" +} diff --git a/mediapipe/graphs/pose_tracking/subgraphs/BUILD b/mediapipe/graphs/pose_tracking/subgraphs/BUILD index 43f436d45..3a1825704 100644 --- a/mediapipe/graphs/pose_tracking/subgraphs/BUILD +++ b/mediapipe/graphs/pose_tracking/subgraphs/BUILD @@ -21,6 +21,34 @@ licenses(["notice"]) package(default_visibility = ["//visibility:public"]) +mediapipe_simple_subgraph( + name = "pose_renderer_gpu", + graph = "pose_renderer_gpu.pbtxt", + register_as = "PoseRendererGpu", + deps = [ + "//mediapipe/calculators/core:split_normalized_landmark_list_calculator", + "//mediapipe/calculators/util:annotation_overlay_calculator", + "//mediapipe/calculators/util:detections_to_render_data_calculator", + "//mediapipe/calculators/util:landmarks_to_render_data_calculator", + "//mediapipe/calculators/util:rect_to_render_data_calculator", + "//mediapipe/calculators/util:rect_to_render_scale_calculator", + ], +) + +mediapipe_simple_subgraph( + name = "pose_renderer_cpu", + graph = "pose_renderer_cpu.pbtxt", + register_as = "PoseRendererCpu", + deps = [ + "//mediapipe/calculators/core:split_normalized_landmark_list_calculator", + "//mediapipe/calculators/util:annotation_overlay_calculator", + "//mediapipe/calculators/util:detections_to_render_data_calculator", + "//mediapipe/calculators/util:landmarks_to_render_data_calculator", + "//mediapipe/calculators/util:rect_to_render_data_calculator", + "//mediapipe/calculators/util:rect_to_render_scale_calculator", + ], +) + mediapipe_simple_subgraph( name = "upper_body_pose_renderer_gpu", graph = "upper_body_pose_renderer_gpu.pbtxt", diff --git a/mediapipe/graphs/pose_tracking/subgraphs/pose_renderer_cpu.pbtxt b/mediapipe/graphs/pose_tracking/subgraphs/pose_renderer_cpu.pbtxt new file mode 100644 index 000000000..a799f3f4c --- /dev/null +++ b/mediapipe/graphs/pose_tracking/subgraphs/pose_renderer_cpu.pbtxt @@ -0,0 +1,274 @@ +# MediaPipe pose landmarks rendering subgraph. + +type: "PoseRendererCpu" + +# CPU image. (ImageFrame) +input_stream: "IMAGE:input_image" +# Pose landmarks. (NormalizedLandmarkList) +input_stream: "LANDMARKS:pose_landmarks" +# Region of interest calculated based on landmarks. (NormalizedRect) +input_stream: "ROI:roi" +# Detected pose. (Detection) +input_stream: "DETECTION:detection" + +# CPU image with rendered data. (ImageFrame) +output_stream: "IMAGE:output_image" + +node { + calculator: "ImagePropertiesCalculator" + input_stream: "IMAGE:input_image" + output_stream: "SIZE:image_size" +} + +# Calculates rendering scale based on the pose roi. +node { + calculator: "RectToRenderScaleCalculator" + input_stream: "NORM_RECT:roi" + input_stream: "IMAGE_SIZE:image_size" + output_stream: "RENDER_SCALE:render_scale" + node_options: { + [type.googleapis.com/mediapipe.RectToRenderScaleCalculatorOptions] { + multiplier: 0.0012 + } + } +} + +# Converts detections to drawing primitives for annotation overlay. +node { + calculator: "DetectionsToRenderDataCalculator" + input_stream: "DETECTION:detection" + output_stream: "RENDER_DATA:detection_render_data" + node_options: { + [type.googleapis.com/mediapipe.DetectionsToRenderDataCalculatorOptions] { + thickness: 4.0 + color { r: 0 g: 255 b: 0 } + } + } +} + +node { + calculator: "SplitNormalizedLandmarkListCalculator" + input_stream: "pose_landmarks" + output_stream: "visible_pose_landmarks" + node_options: { + [type.googleapis.com/mediapipe.SplitVectorCalculatorOptions] { + ranges: { begin: 0 end: 25 } + } + } +} + +# Converts landmarks to drawing primitives for annotation overlay. +node { + calculator: "LandmarksToRenderDataCalculator" + input_stream: "NORM_LANDMARKS:pose_landmarks" + input_stream: "RENDER_SCALE:render_scale" + output_stream: "RENDER_DATA:landmarks_render_data" + node_options: { + [type.googleapis.com/mediapipe.LandmarksToRenderDataCalculatorOptions] { + landmark_connections: 0 + landmark_connections: 1 + landmark_connections: 1 + landmark_connections: 2 + landmark_connections: 2 + landmark_connections: 3 + landmark_connections: 3 + landmark_connections: 7 + landmark_connections: 0 + landmark_connections: 4 + landmark_connections: 4 + landmark_connections: 5 + landmark_connections: 5 + landmark_connections: 6 + landmark_connections: 6 + landmark_connections: 8 + landmark_connections: 9 + landmark_connections: 10 + landmark_connections: 11 + landmark_connections: 12 + landmark_connections: 11 + landmark_connections: 13 + landmark_connections: 13 + landmark_connections: 15 + landmark_connections: 15 + landmark_connections: 17 + landmark_connections: 15 + landmark_connections: 19 + landmark_connections: 15 + landmark_connections: 21 + landmark_connections: 17 + landmark_connections: 19 + landmark_connections: 12 + landmark_connections: 14 + landmark_connections: 14 + landmark_connections: 16 + landmark_connections: 16 + landmark_connections: 18 + landmark_connections: 16 + landmark_connections: 20 + landmark_connections: 16 + landmark_connections: 22 + landmark_connections: 18 + landmark_connections: 20 + landmark_connections: 11 + landmark_connections: 23 + landmark_connections: 12 + landmark_connections: 24 + landmark_connections: 23 + landmark_connections: 24 + landmark_connections: 23 + landmark_connections: 25 + landmark_connections: 24 + landmark_connections: 26 + landmark_connections: 25 + landmark_connections: 27 + landmark_connections: 26 + landmark_connections: 28 + landmark_connections: 27 + landmark_connections: 29 + landmark_connections: 28 + landmark_connections: 30 + landmark_connections: 29 + landmark_connections: 31 + landmark_connections: 30 + landmark_connections: 32 + landmark_connections: 27 + landmark_connections: 31 + landmark_connections: 28 + landmark_connections: 32 + + landmark_color { r: 255 g: 255 b: 255 } + connection_color { r: 255 g: 255 b: 255 } + thickness: 3.0 + visualize_landmark_depth: false + utilize_visibility: true + visibility_threshold: 0.5 + } + } +} + +# Take left pose landmarks. +node { + calculator: "SplitNormalizedLandmarkListCalculator" + input_stream: "pose_landmarks" + output_stream: "landmarks_left_side" + node_options: { + [type.googleapis.com/mediapipe.SplitVectorCalculatorOptions] { + ranges: { begin: 1 end: 4 } + ranges: { begin: 7 end: 8 } + ranges: { begin: 9 end: 10 } + ranges: { begin: 11 end: 12 } + ranges: { begin: 13 end: 14 } + ranges: { begin: 15 end: 16 } + ranges: { begin: 17 end: 18 } + ranges: { begin: 19 end: 20 } + ranges: { begin: 21 end: 22 } + ranges: { begin: 23 end: 24 } + + combine_outputs: true + } + } +} + +# Take right pose landmarks. +node { + calculator: "SplitNormalizedLandmarkListCalculator" + input_stream: "pose_landmarks" + output_stream: "landmarks_right_side" + node_options: { + [type.googleapis.com/mediapipe.SplitVectorCalculatorOptions] { + ranges: { begin: 4 end: 7 } + ranges: { begin: 8 end: 9 } + ranges: { begin: 10 end: 11 } + ranges: { begin: 12 end: 13 } + ranges: { begin: 14 end: 15 } + ranges: { begin: 16 end: 17 } + ranges: { begin: 18 end: 19 } + ranges: { begin: 20 end: 21 } + ranges: { begin: 22 end: 23 } + ranges: { begin: 24 end: 25 } + + combine_outputs: true + } + } +} + +# Render pose joints as big white circles. +node { + calculator: "LandmarksToRenderDataCalculator" + input_stream: "NORM_LANDMARKS:visible_pose_landmarks" + input_stream: "RENDER_SCALE:render_scale" + output_stream: "RENDER_DATA:landmarks_background_joints_render_data" + node_options: { + [type.googleapis.com/mediapipe.LandmarksToRenderDataCalculatorOptions] { + landmark_color { r: 255 g: 255 b: 255 } + connection_color { r: 255 g: 255 b: 255 } + thickness: 5.0 + visualize_landmark_depth: false + utilize_visibility: true + visibility_threshold: 0.5 + } + } +} + +# Render pose left side joints as orange circles (inside white ones). +node { + calculator: "LandmarksToRenderDataCalculator" + input_stream: "NORM_LANDMARKS:landmarks_left_side" + input_stream: "RENDER_SCALE:render_scale" + output_stream: "RENDER_DATA:landmarks_left_joints_render_data" + node_options: { + [type.googleapis.com/mediapipe.LandmarksToRenderDataCalculatorOptions] { + landmark_color { r: 255 g: 138 b: 0 } + connection_color { r: 255 g: 138 b: 0 } + thickness: 3.0 + visualize_landmark_depth: false + utilize_visibility: true + visibility_threshold: 0.5 + } + } +} + +# Render pose right side joints as cyan circles (inside white ones). +node { + calculator: "LandmarksToRenderDataCalculator" + input_stream: "NORM_LANDMARKS:landmarks_right_side" + input_stream: "RENDER_SCALE:render_scale" + output_stream: "RENDER_DATA:landmarks_right_joints_render_data" + node_options: { + [type.googleapis.com/mediapipe.LandmarksToRenderDataCalculatorOptions] { + landmark_color { r: 0 g: 217 b: 231 } + connection_color { r: 0 g: 217 b: 231 } + thickness: 3.0 + visualize_landmark_depth: false + utilize_visibility: true + visibility_threshold: 0.5 + } + } +} + +# Converts normalized rects to drawing primitives for annotation overlay. +node { + calculator: "RectToRenderDataCalculator" + input_stream: "NORM_RECT:roi" + output_stream: "RENDER_DATA:roi_render_data" + node_options: { + [type.googleapis.com/mediapipe.RectToRenderDataCalculatorOptions] { + filled: false + color { r: 255 g: 0 b: 0 } + thickness: 4.0 + } + } +} + +# Draws annotations and overlays them on top of the input images. +node { + calculator: "AnnotationOverlayCalculator" + input_stream: "IMAGE:input_image" + input_stream: "detection_render_data" + input_stream: "landmarks_render_data" + input_stream: "landmarks_background_joints_render_data" + input_stream: "landmarks_left_joints_render_data" + input_stream: "landmarks_right_joints_render_data" + input_stream: "roi_render_data" + output_stream: "IMAGE:output_image" +} diff --git a/mediapipe/graphs/pose_tracking/subgraphs/pose_renderer_gpu.pbtxt b/mediapipe/graphs/pose_tracking/subgraphs/pose_renderer_gpu.pbtxt new file mode 100644 index 000000000..d36d8b06e --- /dev/null +++ b/mediapipe/graphs/pose_tracking/subgraphs/pose_renderer_gpu.pbtxt @@ -0,0 +1,274 @@ +# MediaPipe pose landmarks rendering subgraph. + +type: "PoseRendererGpu" + +# GPU image. (GpuBuffer) +input_stream: "IMAGE:input_image" +# Pose landmarks. (NormalizedLandmarkList) +input_stream: "LANDMARKS:pose_landmarks" +# Region of interest calculated based on landmarks. (NormalizedRect) +input_stream: "ROI:roi" +# Detected pose. (Detection) +input_stream: "DETECTION:detection" + +# GPU image with rendered data. (GpuBuffer) +output_stream: "IMAGE:output_image" + +node { + calculator: "ImagePropertiesCalculator" + input_stream: "IMAGE_GPU:input_image" + output_stream: "SIZE:image_size" +} + +# Calculates rendering scale based on the pose roi. +node { + calculator: "RectToRenderScaleCalculator" + input_stream: "NORM_RECT:roi" + input_stream: "IMAGE_SIZE:image_size" + output_stream: "RENDER_SCALE:render_scale" + node_options: { + [type.googleapis.com/mediapipe.RectToRenderScaleCalculatorOptions] { + multiplier: 0.0012 + } + } +} + +# Converts detections to drawing primitives for annotation overlay. +node { + calculator: "DetectionsToRenderDataCalculator" + input_stream: "DETECTION:detection" + output_stream: "RENDER_DATA:detection_render_data" + node_options: { + [type.googleapis.com/mediapipe.DetectionsToRenderDataCalculatorOptions] { + thickness: 4.0 + color { r: 0 g: 255 b: 0 } + } + } +} + +node { + calculator: "SplitNormalizedLandmarkListCalculator" + input_stream: "pose_landmarks" + output_stream: "visible_pose_landmarks" + node_options: { + [type.googleapis.com/mediapipe.SplitVectorCalculatorOptions] { + ranges: { begin: 0 end: 25 } + } + } +} + +# Converts landmarks to drawing primitives for annotation overlay. +node { + calculator: "LandmarksToRenderDataCalculator" + input_stream: "NORM_LANDMARKS:pose_landmarks" + input_stream: "RENDER_SCALE:render_scale" + output_stream: "RENDER_DATA:landmarks_render_data" + node_options: { + [type.googleapis.com/mediapipe.LandmarksToRenderDataCalculatorOptions] { + landmark_connections: 0 + landmark_connections: 1 + landmark_connections: 1 + landmark_connections: 2 + landmark_connections: 2 + landmark_connections: 3 + landmark_connections: 3 + landmark_connections: 7 + landmark_connections: 0 + landmark_connections: 4 + landmark_connections: 4 + landmark_connections: 5 + landmark_connections: 5 + landmark_connections: 6 + landmark_connections: 6 + landmark_connections: 8 + landmark_connections: 9 + landmark_connections: 10 + landmark_connections: 11 + landmark_connections: 12 + landmark_connections: 11 + landmark_connections: 13 + landmark_connections: 13 + landmark_connections: 15 + landmark_connections: 15 + landmark_connections: 17 + landmark_connections: 15 + landmark_connections: 19 + landmark_connections: 15 + landmark_connections: 21 + landmark_connections: 17 + landmark_connections: 19 + landmark_connections: 12 + landmark_connections: 14 + landmark_connections: 14 + landmark_connections: 16 + landmark_connections: 16 + landmark_connections: 18 + landmark_connections: 16 + landmark_connections: 20 + landmark_connections: 16 + landmark_connections: 22 + landmark_connections: 18 + landmark_connections: 20 + landmark_connections: 11 + landmark_connections: 23 + landmark_connections: 12 + landmark_connections: 24 + landmark_connections: 23 + landmark_connections: 24 + landmark_connections: 23 + landmark_connections: 25 + landmark_connections: 24 + landmark_connections: 26 + landmark_connections: 25 + landmark_connections: 27 + landmark_connections: 26 + landmark_connections: 28 + landmark_connections: 27 + landmark_connections: 29 + landmark_connections: 28 + landmark_connections: 30 + landmark_connections: 29 + landmark_connections: 31 + landmark_connections: 30 + landmark_connections: 32 + landmark_connections: 27 + landmark_connections: 31 + landmark_connections: 28 + landmark_connections: 32 + + landmark_color { r: 255 g: 255 b: 255 } + connection_color { r: 255 g: 255 b: 255 } + thickness: 3.0 + visualize_landmark_depth: false + utilize_visibility: true + visibility_threshold: 0.5 + } + } +} + +# Take left pose landmarks. +node { + calculator: "SplitNormalizedLandmarkListCalculator" + input_stream: "pose_landmarks" + output_stream: "landmarks_left_side" + node_options: { + [type.googleapis.com/mediapipe.SplitVectorCalculatorOptions] { + ranges: { begin: 1 end: 4 } + ranges: { begin: 7 end: 8 } + ranges: { begin: 9 end: 10 } + ranges: { begin: 11 end: 12 } + ranges: { begin: 13 end: 14 } + ranges: { begin: 15 end: 16 } + ranges: { begin: 17 end: 18 } + ranges: { begin: 19 end: 20 } + ranges: { begin: 21 end: 22 } + ranges: { begin: 23 end: 24 } + + combine_outputs: true + } + } +} + +# Take right pose landmarks. +node { + calculator: "SplitNormalizedLandmarkListCalculator" + input_stream: "pose_landmarks" + output_stream: "landmarks_right_side" + node_options: { + [type.googleapis.com/mediapipe.SplitVectorCalculatorOptions] { + ranges: { begin: 4 end: 7 } + ranges: { begin: 8 end: 9 } + ranges: { begin: 10 end: 11 } + ranges: { begin: 12 end: 13 } + ranges: { begin: 14 end: 15 } + ranges: { begin: 16 end: 17 } + ranges: { begin: 18 end: 19 } + ranges: { begin: 20 end: 21 } + ranges: { begin: 22 end: 23 } + ranges: { begin: 24 end: 25 } + + combine_outputs: true + } + } +} + +# Render pose joints as big white circles. +node { + calculator: "LandmarksToRenderDataCalculator" + input_stream: "NORM_LANDMARKS:visible_pose_landmarks" + input_stream: "RENDER_SCALE:render_scale" + output_stream: "RENDER_DATA:landmarks_background_joints_render_data" + node_options: { + [type.googleapis.com/mediapipe.LandmarksToRenderDataCalculatorOptions] { + landmark_color { r: 255 g: 255 b: 255 } + connection_color { r: 255 g: 255 b: 255 } + thickness: 5.0 + visualize_landmark_depth: false + utilize_visibility: true + visibility_threshold: 0.5 + } + } +} + +# Render pose left side joints as orange circles (inside white ones). +node { + calculator: "LandmarksToRenderDataCalculator" + input_stream: "NORM_LANDMARKS:landmarks_left_side" + input_stream: "RENDER_SCALE:render_scale" + output_stream: "RENDER_DATA:landmarks_left_joints_render_data" + node_options: { + [type.googleapis.com/mediapipe.LandmarksToRenderDataCalculatorOptions] { + landmark_color { r: 255 g: 138 b: 0 } + connection_color { r: 255 g: 138 b: 0 } + thickness: 3.0 + visualize_landmark_depth: false + utilize_visibility: true + visibility_threshold: 0.5 + } + } +} + +# Render pose right side joints as cyan circles (inside white ones). +node { + calculator: "LandmarksToRenderDataCalculator" + input_stream: "NORM_LANDMARKS:landmarks_right_side" + input_stream: "RENDER_SCALE:render_scale" + output_stream: "RENDER_DATA:landmarks_right_joints_render_data" + node_options: { + [type.googleapis.com/mediapipe.LandmarksToRenderDataCalculatorOptions] { + landmark_color { r: 0 g: 217 b: 231 } + connection_color { r: 0 g: 217 b: 231 } + thickness: 3.0 + visualize_landmark_depth: false + utilize_visibility: true + visibility_threshold: 0.5 + } + } +} + +# Converts normalized rects to drawing primitives for annotation overlay. +node { + calculator: "RectToRenderDataCalculator" + input_stream: "NORM_RECT:roi" + output_stream: "RENDER_DATA:roi_render_data" + node_options: { + [type.googleapis.com/mediapipe.RectToRenderDataCalculatorOptions] { + filled: false + color { r: 255 g: 0 b: 0 } + thickness: 4.0 + } + } +} + +# Draws annotations and overlays them on top of the input images. +node { + calculator: "AnnotationOverlayCalculator" + input_stream: "IMAGE_GPU:input_image" + input_stream: "detection_render_data" + input_stream: "landmarks_render_data" + input_stream: "landmarks_background_joints_render_data" + input_stream: "landmarks_left_joints_render_data" + input_stream: "landmarks_right_joints_render_data" + input_stream: "roi_render_data" + output_stream: "IMAGE_GPU:output_image" +} diff --git a/mediapipe/graphs/pose_tracking/subgraphs/upper_body_pose_renderer_cpu.pbtxt b/mediapipe/graphs/pose_tracking/subgraphs/upper_body_pose_renderer_cpu.pbtxt index 5878dccde..6dcef9566 100644 --- a/mediapipe/graphs/pose_tracking/subgraphs/upper_body_pose_renderer_cpu.pbtxt +++ b/mediapipe/graphs/pose_tracking/subgraphs/upper_body_pose_renderer_cpu.pbtxt @@ -121,7 +121,7 @@ node { thickness: 3.0 visualize_landmark_depth: false utilize_visibility: true - visibility_threshold: 0.1 + visibility_threshold: 0.5 } } } @@ -185,7 +185,7 @@ node { thickness: 5.0 visualize_landmark_depth: false utilize_visibility: true - visibility_threshold: 0.1 + visibility_threshold: 0.5 } } } @@ -203,7 +203,7 @@ node { thickness: 3.0 visualize_landmark_depth: false utilize_visibility: true - visibility_threshold: 0.1 + visibility_threshold: 0.5 } } } @@ -221,7 +221,7 @@ node { thickness: 3.0 visualize_landmark_depth: false utilize_visibility: true - visibility_threshold: 0.1 + visibility_threshold: 0.5 } } } diff --git a/mediapipe/graphs/pose_tracking/subgraphs/upper_body_pose_renderer_gpu.pbtxt b/mediapipe/graphs/pose_tracking/subgraphs/upper_body_pose_renderer_gpu.pbtxt index aed1044bc..567ad16ac 100644 --- a/mediapipe/graphs/pose_tracking/subgraphs/upper_body_pose_renderer_gpu.pbtxt +++ b/mediapipe/graphs/pose_tracking/subgraphs/upper_body_pose_renderer_gpu.pbtxt @@ -121,7 +121,7 @@ node { thickness: 3.0 visualize_landmark_depth: false utilize_visibility: true - visibility_threshold: 0.1 + visibility_threshold: 0.5 } } } @@ -185,7 +185,7 @@ node { thickness: 5.0 visualize_landmark_depth: false utilize_visibility: true - visibility_threshold: 0.1 + visibility_threshold: 0.5 } } } @@ -203,7 +203,7 @@ node { thickness: 3.0 visualize_landmark_depth: false utilize_visibility: true - visibility_threshold: 0.1 + visibility_threshold: 0.5 } } } @@ -221,7 +221,7 @@ node { thickness: 3.0 visualize_landmark_depth: false utilize_visibility: true - visibility_threshold: 0.1 + visibility_threshold: 0.5 } } } diff --git a/mediapipe/java/com/google/mediapipe/components/CameraXPreviewHelper.java b/mediapipe/java/com/google/mediapipe/components/CameraXPreviewHelper.java index d19311ab4..1a3485591 100644 --- a/mediapipe/java/com/google/mediapipe/components/CameraXPreviewHelper.java +++ b/mediapipe/java/com/google/mediapipe/components/CameraXPreviewHelper.java @@ -120,17 +120,41 @@ public class CameraXPreviewHelper extends CameraHelper { // the source is CameraCharacteristics.SENSOR_INFO_TIMESTAMP_SOURCE_UNKNOWN. private int cameraTimestampSource = CameraCharacteristics.SENSOR_INFO_TIMESTAMP_SOURCE_UNKNOWN; + /** + * Initializes the camera and sets it up for accessing frames, using the default 1280 * 720 + * preview size. + */ @Override public void startCamera( - Activity context, CameraFacing cameraFacing, SurfaceTexture surfaceTexture) { - startCamera(context, cameraFacing, surfaceTexture, TARGET_SIZE); + Activity activity, CameraFacing cameraFacing, SurfaceTexture unusedSurfaceTexture) { + startCamera(activity, (LifecycleOwner) activity, cameraFacing, TARGET_SIZE); } + /** + * Initializes the camera and sets it up for accessing frames. + * + * @param targetSize the preview size to use. If set to {@code null}, the helper will default to + * 1280 * 720. + */ public void startCamera( - Activity context, + Activity activity, CameraFacing cameraFacing, SurfaceTexture unusedSurfaceTexture, - Size targetSize) { + @Nullable Size targetSize) { + startCamera(activity, (LifecycleOwner) activity, cameraFacing, targetSize); + } + + /** + * Initializes the camera and sets it up for accessing frames. + * + * @param targetSize the preview size to use. If set to {@code null}, the helper will default to + * 1280 * 720. + */ + public void startCamera( + Context context, + LifecycleOwner lifecycleOwner, + CameraFacing cameraFacing, + @Nullable Size targetSize) { Executor mainThreadExecutor = ContextCompat.getMainExecutor(context); ListenableFuture cameraProviderFuture = ProcessCameraProvider.getInstance(context); @@ -209,9 +233,7 @@ public class CameraXPreviewHelper extends CameraHelper { cameraProvider.unbindAll(); // Bind preview use case to camera. - camera = - cameraProvider.bindToLifecycle( - /*lifecycleOwner=*/ (LifecycleOwner) context, cameraSelector, preview); + camera = cameraProvider.bindToLifecycle(lifecycleOwner, cameraSelector, preview); }, mainThreadExecutor); } @@ -316,7 +338,7 @@ public class CameraXPreviewHelper extends CameraHelper { return frameSize; } - private void onInitialFrameReceived(Activity context, SurfaceTexture previewFrameTexture) { + private void onInitialFrameReceived(Context context, SurfaceTexture previewFrameTexture) { // This method is called by the onFrameAvailableListener we install when opening the camera // session, the first time we receive a frame. In this method, we remove our callback, // acknowledge the frame (via updateTextImage()), detach the texture from the GL context we @@ -395,7 +417,7 @@ public class CameraXPreviewHelper extends CameraHelper { @Nullable private static CameraCharacteristics getCameraCharacteristics( - Activity context, Integer lensFacing) { + Context context, Integer lensFacing) { CameraManager cameraManager = (CameraManager) context.getSystemService(Context.CAMERA_SERVICE); try { List cameraList = Arrays.asList(cameraManager.getCameraIdList()); diff --git a/mediapipe/java/com/google/mediapipe/components/ExternalTextureConverter.java b/mediapipe/java/com/google/mediapipe/components/ExternalTextureConverter.java index 38cc5e91a..0fb6b76c9 100644 --- a/mediapipe/java/com/google/mediapipe/components/ExternalTextureConverter.java +++ b/mediapipe/java/com/google/mediapipe/components/ExternalTextureConverter.java @@ -113,6 +113,16 @@ public class ExternalTextureConverter implements TextureFrameProducer { thread.setFlipY(flip); } + /** + * Sets rotation of the texture, useful for supporting landscape orientations. The value should + * correspond to Display.getRotation(), e.g. Surface.ROTATION_0. Flipping (if any) is applied + * before rotation. This should be called before {@link #setSurfaceTexture(SurfaceTexture, int, + * int)} or {@link #setSurfaceTextureAndAttachToGLContext(SurfaceTexture, int, int)}. + */ + public void setRotation(int rotation) { + thread.setRotation(rotation); + } + /** * Sets an offset that can be used to adjust the timestamps on the camera frames, for example to * conform to a preferred time-base or to account for a known device latency. The offset is added @@ -251,6 +261,10 @@ public class ExternalTextureConverter implements TextureFrameProducer { renderer.setFlipY(flip); } + public void setRotation(int rotation) { + renderer.setRotation(rotation); + } + public void setSurfaceTexture(SurfaceTexture texture, int width, int height) { if (surfaceTexture != null) { surfaceTexture.setOnFrameAvailableListener(null); diff --git a/mediapipe/java/com/google/mediapipe/framework/AndroidAssetUtil.java b/mediapipe/java/com/google/mediapipe/framework/AndroidAssetUtil.java index c3a053250..1438a782b 100644 --- a/mediapipe/java/com/google/mediapipe/framework/AndroidAssetUtil.java +++ b/mediapipe/java/com/google/mediapipe/framework/AndroidAssetUtil.java @@ -48,7 +48,7 @@ public final class AndroidAssetUtil { *

Note: When possible, using {@link AssetCache} is preferred for portability, since it does * not require any special handling for Android assets on the native code side. */ - public static boolean initializeNativeAssetManager(Context androidContext) { + public static synchronized boolean initializeNativeAssetManager(Context androidContext) { return nativeInitializeAssetManager( androidContext, androidContext.getCacheDir().getAbsolutePath()); } diff --git a/mediapipe/java/com/google/mediapipe/framework/jni/android_packet_creator_jni.cc b/mediapipe/java/com/google/mediapipe/framework/jni/android_packet_creator_jni.cc index 2f71e649c..05ebe26f3 100644 --- a/mediapipe/java/com/google/mediapipe/framework/jni/android_packet_creator_jni.cc +++ b/mediapipe/java/com/google/mediapipe/framework/jni/android_packet_creator_jni.cc @@ -53,9 +53,9 @@ JNIEXPORT jlong JNICALL ANDROID_PACKET_CREATOR_METHOD( << "is not equal to 4 times bitmap width: " << info.width; return 0L; } - auto image_frame = absl::make_unique<::mediapipe::ImageFrame>( + auto image_frame = absl::make_unique( mediapipe::ImageFormat::SRGB, info.width, info.height, - ::mediapipe::ImageFrame::kGlDefaultAlignmentBoundary); + mediapipe::ImageFrame::kGlDefaultAlignmentBoundary); void* pixel_addr = nullptr; result = AndroidBitmap_lockPixels(env, bitmap, &pixel_addr); if (result != ANDROID_BITMAP_RESULT_SUCCESS) { @@ -86,9 +86,9 @@ JNIEXPORT jlong JNICALL ANDROID_PACKET_CREATOR_METHOD( LOG(ERROR) << "AndroidBitmap_getInfo() failed with result code " << result; return 0L; } - auto image_frame = absl::make_unique<::mediapipe::ImageFrame>( + auto image_frame = absl::make_unique( mediapipe::ImageFormat::SRGBA, info.width, info.height, - ::mediapipe::ImageFrame::kGlDefaultAlignmentBoundary); + mediapipe::ImageFrame::kGlDefaultAlignmentBoundary); int64_t buffer_size = info.stride * info.height; if (buffer_size != image_frame->PixelDataSize()) { LOG(ERROR) << "Bitmap stride: " << info.stride diff --git a/mediapipe/java/com/google/mediapipe/framework/jni/graph.cc b/mediapipe/java/com/google/mediapipe/framework/jni/graph.cc index a65de2992..955947559 100644 --- a/mediapipe/java/com/google/mediapipe/framework/jni/graph.cc +++ b/mediapipe/java/com/google/mediapipe/framework/jni/graph.cc @@ -172,10 +172,10 @@ bool Graph::RemovePacket(int64_t packet_handle) { void Graph::EnsureMinimumExecutorStackSizeForJava() {} -::mediapipe::Status Graph::AddCallbackHandler(std::string output_stream_name, - jobject java_callback) { +mediapipe::Status Graph::AddCallbackHandler(std::string output_stream_name, + jobject java_callback) { if (!graph_config()) { - return ::mediapipe::InternalError("Graph is not loaded!"); + return mediapipe::InternalError("Graph is not loaded!"); } std::unique_ptr handler( new internal::CallbackHandler(this, java_callback)); @@ -188,7 +188,7 @@ void Graph::EnsureMinimumExecutorStackSizeForJava() {} side_packet_name, MakePacket>( handler->CreateCallback())); callback_handlers_.emplace_back(std::move(handler)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } int64_t Graph::AddSurfaceOutput(const std::string& output_stream_name) { @@ -201,7 +201,7 @@ int64_t Graph::AddSurfaceOutput(const std::string& output_stream_name) { LOG(FATAL) << "GPU support has been disabled in this build!"; #else CalculatorGraphConfig::Node* sink_node = graph_config()->add_node(); - sink_node->set_name(::mediapipe::tool::GetUnusedNodeName( + sink_node->set_name(mediapipe::tool::GetUnusedNodeName( *graph_config(), absl::StrCat("egl_surface_sink_", output_stream_name))); sink_node->set_calculator("GlSurfaceSinkCalculator"); sink_node->add_input_stream(output_stream_name); @@ -209,7 +209,7 @@ int64_t Graph::AddSurfaceOutput(const std::string& output_stream_name) { absl::StrCat(kGpuSharedTagName, ":", kGpuSharedSidePacketName)); const std::string input_side_packet_name = - ::mediapipe::tool::GetUnusedSidePacketName( + mediapipe::tool::GetUnusedSidePacketName( *graph_config(), absl::StrCat(output_stream_name, "_surface")); sink_node->add_input_side_packet( absl::StrCat("SURFACE:", input_side_packet_name)); @@ -222,9 +222,9 @@ int64_t Graph::AddSurfaceOutput(const std::string& output_stream_name) { #endif // defined(MEDIAPIPE_DISABLE_GPU) } -::mediapipe::Status Graph::LoadBinaryGraph(std::string path_to_graph) { +mediapipe::Status Graph::LoadBinaryGraph(std::string path_to_graph) { std::string graph_config_string; - ::mediapipe::Status status = + mediapipe::Status status = mediapipe::file::GetContents(path_to_graph, &graph_config_string); if (!status.ok()) { return status; @@ -233,39 +233,39 @@ int64_t Graph::AddSurfaceOutput(const std::string& output_stream_name) { graph_config_string.length()); } -::mediapipe::Status Graph::LoadBinaryGraph(const char* data, int size) { +mediapipe::Status Graph::LoadBinaryGraph(const char* data, int size) { CalculatorGraphConfig graph_config; if (!graph_config.ParseFromArray(data, size)) { - return ::mediapipe::InvalidArgumentError("Failed to parse the graph"); + return mediapipe::InvalidArgumentError("Failed to parse the graph"); } graph_configs_.push_back(graph_config); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status Graph::LoadBinaryGraphTemplate(const char* data, int size) { +mediapipe::Status Graph::LoadBinaryGraphTemplate(const char* data, int size) { CalculatorGraphTemplate graph_template; if (!graph_template.ParseFromArray(data, size)) { - return ::mediapipe::InvalidArgumentError("Failed to parse the graph"); + return mediapipe::InvalidArgumentError("Failed to parse the graph"); } graph_templates_.push_back(graph_template); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status Graph::SetGraphType(std::string graph_type) { +mediapipe::Status Graph::SetGraphType(std::string graph_type) { graph_type_ = graph_type; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status Graph::SetGraphOptions(const char* data, int size) { +mediapipe::Status Graph::SetGraphOptions(const char* data, int size) { if (!graph_options_.ParseFromArray(data, size)) { - return ::mediapipe::InvalidArgumentError("Failed to parse the graph"); + return mediapipe::InvalidArgumentError("Failed to parse the graph"); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } CalculatorGraphConfig Graph::GetCalculatorGraphConfig() { CalculatorGraph temp_graph; - ::mediapipe::Status status = InitializeGraph(&temp_graph); + mediapipe::Status status = InitializeGraph(&temp_graph); if (!status.ok()) { LOG(ERROR) << "GetCalculatorGraphConfig failed:\n" << status.message(); } @@ -344,14 +344,14 @@ void Graph::SetPacketJavaClass(JNIEnv* env) { } } -::mediapipe::Status Graph::RunGraphUntilClose(JNIEnv* env) { +mediapipe::Status Graph::RunGraphUntilClose(JNIEnv* env) { // Get a global reference to the packet class, so it can be used in other // native thread for call back. SetPacketJavaClass(env); // Running as a synchronized mode, the same Java thread is available through // out the run. CalculatorGraph calculator_graph; - ::mediapipe::Status status = InitializeGraph(&calculator_graph); + mediapipe::Status status = InitializeGraph(&calculator_graph); if (!status.ok()) { LOG(ERROR) << status.message(); running_graph_.reset(nullptr); @@ -364,9 +364,9 @@ void Graph::SetPacketJavaClass(JNIEnv* env) { return status; } -::mediapipe::Status Graph::StartRunningGraph(JNIEnv* env) { +mediapipe::Status Graph::StartRunningGraph(JNIEnv* env) { if (running_graph_) { - return ::mediapipe::InternalError("Graph is already running."); + return mediapipe::InternalError("Graph is already running."); } // Get a global reference to the packet class, so it can be used in other // native thread for call back. @@ -382,7 +382,7 @@ void Graph::SetPacketJavaClass(JNIEnv* env) { LOG(INFO) << name; } } - ::mediapipe::Status status; + mediapipe::Status status; #ifndef MEDIAPIPE_DISABLE_GPU status = running_graph_->SetGpuResources(gpu_resources_); if (!status.ok()) { @@ -419,7 +419,7 @@ void Graph::SetPacketJavaClass(JNIEnv* env) { return mediapipe::OkStatus(); } -::mediapipe::Status Graph::SetTimestampAndMovePacketToInputStream( +mediapipe::Status Graph::SetTimestampAndMovePacketToInputStream( const std::string& stream_name, int64_t packet_handle, int64_t timestamp) { internal::PacketWithContext* packet_with_context = reinterpret_cast(packet_handle); @@ -433,60 +433,60 @@ void Graph::SetPacketJavaClass(JNIEnv* env) { return AddPacketToInputStream(stream_name, std::move(packet)); } -::mediapipe::Status Graph::AddPacketToInputStream( - const std::string& stream_name, const Packet& packet) { +mediapipe::Status Graph::AddPacketToInputStream(const std::string& stream_name, + const Packet& packet) { if (!running_graph_) { - return ::mediapipe::FailedPreconditionError("Graph must be running."); + return mediapipe::FailedPreconditionError("Graph must be running."); } return running_graph_->AddPacketToInputStream(stream_name, packet); } -::mediapipe::Status Graph::AddPacketToInputStream( - const std::string& stream_name, Packet&& packet) { +mediapipe::Status Graph::AddPacketToInputStream(const std::string& stream_name, + Packet&& packet) { if (!running_graph_) { - return ::mediapipe::FailedPreconditionError("Graph must be running."); + return mediapipe::FailedPreconditionError("Graph must be running."); } return running_graph_->AddPacketToInputStream(stream_name, std::move(packet)); } -::mediapipe::Status Graph::CloseInputStream(std::string stream_name) { +mediapipe::Status Graph::CloseInputStream(std::string stream_name) { if (!running_graph_) { - return ::mediapipe::FailedPreconditionError("Graph must be running."); + return mediapipe::FailedPreconditionError("Graph must be running."); } LOG(INFO) << "Close input stream: " << stream_name; return running_graph_->CloseInputStream(stream_name); } -::mediapipe::Status Graph::CloseAllInputStreams() { +mediapipe::Status Graph::CloseAllInputStreams() { LOG(INFO) << "Close all input streams."; if (!running_graph_) { - return ::mediapipe::FailedPreconditionError("Graph must be running."); + return mediapipe::FailedPreconditionError("Graph must be running."); } return running_graph_->CloseAllInputStreams(); } -::mediapipe::Status Graph::CloseAllPacketSources() { +mediapipe::Status Graph::CloseAllPacketSources() { LOG(INFO) << "Close all input streams."; if (!running_graph_) { - return ::mediapipe::FailedPreconditionError("Graph must be running."); + return mediapipe::FailedPreconditionError("Graph must be running."); } return running_graph_->CloseAllPacketSources(); } -::mediapipe::Status Graph::WaitUntilDone(JNIEnv* env) { +mediapipe::Status Graph::WaitUntilDone(JNIEnv* env) { if (!running_graph_) { - return ::mediapipe::FailedPreconditionError("Graph must be running."); + return mediapipe::FailedPreconditionError("Graph must be running."); } - ::mediapipe::Status status = running_graph_->WaitUntilDone(); + mediapipe::Status status = running_graph_->WaitUntilDone(); running_graph_.reset(nullptr); return status; } -::mediapipe::Status Graph::WaitUntilIdle(JNIEnv* env) { +mediapipe::Status Graph::WaitUntilIdle(JNIEnv* env) { if (!running_graph_) { - return ::mediapipe::FailedPreconditionError("Graph must be running."); + return mediapipe::FailedPreconditionError("Graph must be running."); } return running_graph_->WaitUntilIdle(); } @@ -511,9 +511,9 @@ mediapipe::GpuResources* Graph::GetGpuResources() const { return gpu_resources_.get(); } -::mediapipe::Status Graph::SetParentGlContext(int64 java_gl_context) { +mediapipe::Status Graph::SetParentGlContext(int64 java_gl_context) { if (gpu_resources_) { - return ::mediapipe::AlreadyExistsError( + return mediapipe::AlreadyExistsError( "trying to set the parent GL context, but the gpu shared " "data has already been set up."); } @@ -524,7 +524,7 @@ mediapipe::GpuResources* Graph::GetGpuResources() const { reinterpret_cast(java_gl_context)) .ValueOrDie(); #endif // defined(MEDIAPIPE_DISABLE_GPU) - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } void Graph::SetServicePacket(const GraphServiceBase& service, Packet packet) { @@ -583,7 +583,7 @@ std::string Graph::graph_type() { return ""; } -::mediapipe::Status Graph::InitializeGraph(CalculatorGraph* graph) { +mediapipe::Status Graph::InitializeGraph(CalculatorGraph* graph) { if (graph_configs_.size() == 1 && graph_templates_.empty()) { return graph->Initialize(*graph_config()); } else { diff --git a/mediapipe/java/com/google/mediapipe/framework/jni/graph.h b/mediapipe/java/com/google/mediapipe/framework/jni/graph.h index 6bb310b5f..43da503ff 100644 --- a/mediapipe/java/com/google/mediapipe/framework/jni/graph.h +++ b/mediapipe/java/com/google/mediapipe/framework/jni/graph.h @@ -49,26 +49,26 @@ class Graph { ~Graph(); // Adds a callback for a given stream name. - ::mediapipe::Status AddCallbackHandler(std::string output_stream_name, - jobject java_callback); + mediapipe::Status AddCallbackHandler(std::string output_stream_name, + jobject java_callback); // Loads a binary graph from a file. - ::mediapipe::Status LoadBinaryGraph(std::string path_to_graph); + mediapipe::Status LoadBinaryGraph(std::string path_to_graph); // Loads a binary graph from a buffer. - ::mediapipe::Status LoadBinaryGraph(const char* data, int size); + mediapipe::Status LoadBinaryGraph(const char* data, int size); // Loads a binary graph template from a buffer. - ::mediapipe::Status LoadBinaryGraphTemplate(const char* data, int size); + mediapipe::Status LoadBinaryGraphTemplate(const char* data, int size); // Specifies the CalculatorGraphConfig::type of the top level graph. - ::mediapipe::Status SetGraphType(std::string graph_type); + mediapipe::Status SetGraphType(std::string graph_type); // Specifies options such as template arguments for the graph. - ::mediapipe::Status SetGraphOptions(const char* data, int size); + mediapipe::Status SetGraphOptions(const char* data, int size); // Returns the expanded calculator graph config. CalculatorGraphConfig GetCalculatorGraphConfig(); // Runs the graph until it closes. // Mainly is used for writing tests. - ::mediapipe::Status RunGraphUntilClose(JNIEnv* env); + mediapipe::Status RunGraphUntilClose(JNIEnv* env); // The following 4 functions are used to run the graph in // step by step mode, the usual call sequence is like this: @@ -81,26 +81,26 @@ class Graph { // wait until nothing is running and nothing can be scheduled. // // Starts running the graph. - ::mediapipe::Status StartRunningGraph(JNIEnv* env); + mediapipe::Status StartRunningGraph(JNIEnv* env); // Closes one input stream. - ::mediapipe::Status CloseInputStream(std::string stream_name); + mediapipe::Status CloseInputStream(std::string stream_name); // Closes all the graph input streams. - ::mediapipe::Status CloseAllInputStreams(); + mediapipe::Status CloseAllInputStreams(); // Closes all the graph packet sources. - ::mediapipe::Status CloseAllPacketSources(); + mediapipe::Status CloseAllPacketSources(); // Waits util graph is done. - ::mediapipe::Status WaitUntilDone(JNIEnv* env); + mediapipe::Status WaitUntilDone(JNIEnv* env); // Waits util graph is idle. - ::mediapipe::Status WaitUntilIdle(JNIEnv* env); + mediapipe::Status WaitUntilIdle(JNIEnv* env); // Adds a packet to an input stream. - ::mediapipe::Status AddPacketToInputStream(const std::string& stream_name, - const Packet& packet); + mediapipe::Status AddPacketToInputStream(const std::string& stream_name, + const Packet& packet); // Moves a packet into an input stream. - ::mediapipe::Status AddPacketToInputStream(const std::string& stream_name, - Packet&& packet); + mediapipe::Status AddPacketToInputStream(const std::string& stream_name, + Packet&& packet); // Takes the MediaPipe Packet referenced by the handle, sets its timestamp, // and then tries to move the Packet into the given input stream. - ::mediapipe::Status SetTimestampAndMovePacketToInputStream( + mediapipe::Status SetTimestampAndMovePacketToInputStream( const std::string& stream_name, int64_t packet_handle, int64_t timestamp); // Sets the mode for adding packets to a graph input stream. @@ -127,7 +127,7 @@ class Graph { int64_t AddSurfaceOutput(const std::string& stream_name); // Sets a parent GL context to use for texture sharing. - ::mediapipe::Status SetParentGlContext(int64 java_gl_context); + mediapipe::Status SetParentGlContext(int64 java_gl_context); // Sets the object for a service. template @@ -176,7 +176,7 @@ class Graph { // CalculatorGraphConfig::type is not yet defined. std::string graph_type(); // Initializes CalculatorGraph |graph| using the loaded graph-configs. - ::mediapipe::Status InitializeGraph(CalculatorGraph* graph); + mediapipe::Status InitializeGraph(CalculatorGraph* graph); // CalculatorGraphConfigs for the calculator graph and subgraphs. std::vector graph_configs_; diff --git a/mediapipe/java/com/google/mediapipe/framework/jni/graph_jni.cc b/mediapipe/java/com/google/mediapipe/framework/jni/graph_jni.cc index a90f56f03..c3e981570 100644 --- a/mediapipe/java/com/google/mediapipe/framework/jni/graph_jni.cc +++ b/mediapipe/java/com/google/mediapipe/framework/jni/graph_jni.cc @@ -55,7 +55,7 @@ mediapipe::Status AddStreamHeadersIntoGraph( jobjectArray stream_names, jlongArray packets) { jsize num_headers = env->GetArrayLength(stream_names); if (num_headers != env->GetArrayLength(packets)) { - return mediapipe::Status(::mediapipe::StatusCode::kFailedPrecondition, + return mediapipe::Status(mediapipe::StatusCode::kFailedPrecondition, "Number of streams and packets doesn't match!"); } jlong* packets_array_ref = env->GetLongArrayElements(packets, nullptr); @@ -180,7 +180,7 @@ GRAPH_METHOD(nativeAddPacketCallback)(JNIEnv* env, jobject thiz, jlong context, jobject global_callback_ref = env->NewGlobalRef(callback); if (!global_callback_ref) { ThrowIfError( - env, ::mediapipe::InternalError("Failed to allocate packet callback")); + env, mediapipe::InternalError("Failed to allocate packet callback")); return; } ThrowIfError(env, mediapipe_graph->AddCallbackHandler(output_stream_name, diff --git a/mediapipe/java/com/google/mediapipe/framework/jni/graph_profiler_jni.cc b/mediapipe/java/com/google/mediapipe/framework/jni/graph_profiler_jni.cc index c530ef062..494609e29 100644 --- a/mediapipe/java/com/google/mediapipe/framework/jni/graph_profiler_jni.cc +++ b/mediapipe/java/com/google/mediapipe/framework/jni/graph_profiler_jni.cc @@ -45,7 +45,7 @@ JNIEXPORT jobjectArray JNICALL GRAPH_METHOD(nativeGetCalculatorProfiles)( std::vector profiles_vec; if (profiling_context->GetCalculatorProfiles(&profiles_vec) != - ::mediapipe::OkStatus()) { + mediapipe::OkStatus()) { return nullptr; } int num_profiles = profiles_vec.size(); diff --git a/mediapipe/java/com/google/mediapipe/framework/jni/packet_creator_jni.cc b/mediapipe/java/com/google/mediapipe/framework/jni/packet_creator_jni.cc index f4829c794..ecf388000 100644 --- a/mediapipe/java/com/google/mediapipe/framework/jni/packet_creator_jni.cc +++ b/mediapipe/java/com/google/mediapipe/framework/jni/packet_creator_jni.cc @@ -70,9 +70,9 @@ JNIEXPORT jlong JNICALL PACKET_CREATOR_METHOD(nativeCreateRgbImage)( JNIEnv* env, jobject thiz, jlong context, jobject byte_buffer, jint width, jint height) { const void* data = env->GetDirectBufferAddress(byte_buffer); - auto image_frame = absl::make_unique<::mediapipe::ImageFrame>( + auto image_frame = absl::make_unique( mediapipe::ImageFormat::SRGB, width, height, - ::mediapipe::ImageFrame::kGlDefaultAlignmentBoundary); + mediapipe::ImageFrame::kGlDefaultAlignmentBoundary); int64_t buffer_size = env->GetDirectBufferCapacity(byte_buffer); if (buffer_size != image_frame->PixelDataSize()) { LOG(ERROR) << "The input image buffer should have 4 bytes alignment."; @@ -92,9 +92,9 @@ JNIEXPORT jlong JNICALL PACKET_CREATOR_METHOD(nativeCreateRgbImageFromRgba)( jint height) { const uint8_t* rgba_data = static_cast(env->GetDirectBufferAddress(byte_buffer)); - auto image_frame = absl::make_unique<::mediapipe::ImageFrame>( + auto image_frame = absl::make_unique( mediapipe::ImageFormat::SRGB, width, height, - ::mediapipe::ImageFrame::kGlDefaultAlignmentBoundary); + mediapipe::ImageFrame::kGlDefaultAlignmentBoundary); int64_t buffer_size = env->GetDirectBufferCapacity(byte_buffer); if (buffer_size != width * height * 4) { LOG(ERROR) << "Please check the input buffer size."; @@ -113,9 +113,9 @@ JNIEXPORT jlong JNICALL PACKET_CREATOR_METHOD(nativeCreateRgbImageFromRgba)( JNIEXPORT jlong JNICALL PACKET_CREATOR_METHOD(nativeCreateGrayscaleImage)( JNIEnv* env, jobject thiz, jlong context, jobject byte_buffer, jint width, jint height) { - auto image_frame = absl::make_unique<::mediapipe::ImageFrame>( + auto image_frame = absl::make_unique( mediapipe::ImageFormat::GRAY8, width, height, - ::mediapipe::ImageFrame::kGlDefaultAlignmentBoundary); + mediapipe::ImageFrame::kGlDefaultAlignmentBoundary); int64_t buffer_size = env->GetDirectBufferCapacity(byte_buffer); if (buffer_size != width * height) { LOG(ERROR) << "Please check the input buffer size."; @@ -143,9 +143,9 @@ JNIEXPORT jlong JNICALL PACKET_CREATOR_METHOD(nativeCreateFloatImageFrame)( JNIEnv* env, jobject thiz, jlong context, jobject byte_buffer, jint width, jint height) { const void* data = env->GetDirectBufferAddress(byte_buffer); - auto image_frame = absl::make_unique<::mediapipe::ImageFrame>( + auto image_frame = absl::make_unique( mediapipe::ImageFormat::VEC32F1, width, height, - ::mediapipe::ImageFrame::kGlDefaultAlignmentBoundary); + mediapipe::ImageFrame::kGlDefaultAlignmentBoundary); int64_t buffer_size = env->GetDirectBufferCapacity(byte_buffer); if (buffer_size != image_frame->PixelDataSize()) { LOG(ERROR) << "Please check the input buffer size."; @@ -164,9 +164,9 @@ JNIEXPORT jlong JNICALL PACKET_CREATOR_METHOD(nativeCreateRgbaImageFrame)( JNIEnv* env, jobject thiz, jlong context, jobject byte_buffer, jint width, jint height) { const void* rgba_data = env->GetDirectBufferAddress(byte_buffer); - auto image_frame = absl::make_unique<::mediapipe::ImageFrame>( + auto image_frame = absl::make_unique( mediapipe::ImageFormat::SRGBA, width, height, - ::mediapipe::ImageFrame::kGlDefaultAlignmentBoundary); + mediapipe::ImageFrame::kGlDefaultAlignmentBoundary); int64_t buffer_size = env->GetDirectBufferCapacity(byte_buffer); if (buffer_size != image_frame->PixelDataSize()) { LOG(ERROR) << "Please check the input buffer size."; @@ -183,8 +183,8 @@ JNIEXPORT jlong JNICALL PACKET_CREATOR_METHOD(nativeCreateRgbaImageFrame)( static mediapipe::Packet createAudioPacket(const uint8_t* audio_sample, int num_samples, int num_channels) { - std::unique_ptr<::mediapipe::Matrix> matrix( - new ::mediapipe::Matrix(num_channels, num_samples)); + std::unique_ptr matrix( + new mediapipe::Matrix(num_channels, num_samples)); // Preparing and normalize the audio data. // kMultiplier is same as what used in av_sync_media_decoder.cc. static const float kMultiplier = 1.f / (1 << 15); @@ -292,8 +292,7 @@ JNIEXPORT jlong JNICALL PACKET_CREATOR_METHOD(nativeCreateMatrix)( << rows * cols; return 0L; } - std::unique_ptr<::mediapipe::Matrix> matrix( - new ::mediapipe::Matrix(rows, cols)); + std::unique_ptr matrix(new mediapipe::Matrix(rows, cols)); // The java and native has the same byte order, by default is little Endian, // we can safely copy data directly, we have tests to cover this. env->GetFloatArrayRegion(data, 0, rows * cols, matrix->data()); diff --git a/mediapipe/java/com/google/mediapipe/framework/jni/packet_getter_jni.cc b/mediapipe/java/com/google/mediapipe/framework/jni/packet_getter_jni.cc index 0b1cd788d..9391bce2f 100644 --- a/mediapipe/java/com/google/mediapipe/framework/jni/packet_getter_jni.cc +++ b/mediapipe/java/com/google/mediapipe/framework/jni/packet_getter_jni.cc @@ -255,22 +255,22 @@ JNIEXPORT jdoubleArray JNICALL PACKET_GETTER_METHOD(nativeGetFloat64Vector)( JNIEXPORT jint JNICALL PACKET_GETTER_METHOD(nativeGetImageWidth)(JNIEnv* env, jobject thiz, jlong packet) { - const ::mediapipe::ImageFrame& image = - GetFromNativeHandle<::mediapipe::ImageFrame>(packet); + const mediapipe::ImageFrame& image = + GetFromNativeHandle(packet); return image.Width(); } JNIEXPORT jint JNICALL PACKET_GETTER_METHOD(nativeGetImageHeight)( JNIEnv* env, jobject thiz, jlong packet) { - const ::mediapipe::ImageFrame& image = - GetFromNativeHandle<::mediapipe::ImageFrame>(packet); + const mediapipe::ImageFrame& image = + GetFromNativeHandle(packet); return image.Height(); } JNIEXPORT jboolean JNICALL PACKET_GETTER_METHOD(nativeGetImageData)( JNIEnv* env, jobject thiz, jlong packet, jobject byte_buffer) { - const ::mediapipe::ImageFrame& image = - GetFromNativeHandle<::mediapipe::ImageFrame>(packet); + const mediapipe::ImageFrame& image = + GetFromNativeHandle(packet); int64_t buffer_size = env->GetDirectBufferCapacity(byte_buffer); @@ -313,8 +313,8 @@ JNIEXPORT jboolean JNICALL PACKET_GETTER_METHOD(nativeGetImageData)( JNIEXPORT jboolean JNICALL PACKET_GETTER_METHOD(nativeGetRgbaFromRgb)( JNIEnv* env, jobject thiz, jlong packet, jobject byte_buffer) { - const ::mediapipe::ImageFrame& image = - GetFromNativeHandle<::mediapipe::ImageFrame>(packet); + const mediapipe::ImageFrame& image = + GetFromNativeHandle(packet); uint8_t* rgba_data = static_cast(env->GetDirectBufferAddress(byte_buffer)); int64_t buffer_size = env->GetDirectBufferCapacity(byte_buffer); @@ -357,8 +357,8 @@ JNIEXPORT jdouble JNICALL PACKET_GETTER_METHOD( JNIEXPORT jbyteArray JNICALL PACKET_GETTER_METHOD(nativeGetAudioData)( JNIEnv* env, jobject thiz, jlong packet) { - const ::mediapipe::Matrix& audio_mat = - GetFromNativeHandle<::mediapipe::Matrix>(packet); + const mediapipe::Matrix& audio_mat = + GetFromNativeHandle(packet); int num_channels = audio_mat.rows(); int num_samples = audio_mat.cols(); int data_size = num_channels * num_samples * 2; @@ -381,8 +381,8 @@ JNIEXPORT jbyteArray JNICALL PACKET_GETTER_METHOD(nativeGetAudioData)( JNIEXPORT jfloatArray JNICALL PACKET_GETTER_METHOD(nativeGetMatrixData)( JNIEnv* env, jobject thiz, jlong packet) { - const ::mediapipe::Matrix& audio_mat = - GetFromNativeHandle<::mediapipe::Matrix>(packet); + const mediapipe::Matrix& audio_mat = + GetFromNativeHandle(packet); int rows = audio_mat.rows(); int cols = audio_mat.cols(); jfloatArray float_data = env->NewFloatArray(rows * cols); @@ -394,13 +394,13 @@ JNIEXPORT jfloatArray JNICALL PACKET_GETTER_METHOD(nativeGetMatrixData)( JNIEXPORT jint JNICALL PACKET_GETTER_METHOD(nativeGetMatrixRows)(JNIEnv* env, jobject thiz, jlong packet) { - return GetFromNativeHandle<::mediapipe::Matrix>(packet).rows(); + return GetFromNativeHandle(packet).rows(); } JNIEXPORT jint JNICALL PACKET_GETTER_METHOD(nativeGetMatrixCols)(JNIEnv* env, jobject thiz, jlong packet) { - return GetFromNativeHandle<::mediapipe::Matrix>(packet).cols(); + return GetFromNativeHandle(packet).cols(); } #ifndef MEDIAPIPE_DISABLE_GPU diff --git a/mediapipe/java/com/google/mediapipe/framework/jni/surface_output_jni.cc b/mediapipe/java/com/google/mediapipe/framework/jni/surface_output_jni.cc index 29646a474..b64788dd3 100644 --- a/mediapipe/java/com/google/mediapipe/framework/jni/surface_output_jni.cc +++ b/mediapipe/java/com/google/mediapipe/framework/jni/surface_output_jni.cc @@ -62,7 +62,7 @@ JNIEXPORT void JNICALL MEDIAPIPE_SURFACE_OUTPUT_METHOD(nativeSetSurface)( } auto status = gl_context->Run( - [gl_context, surface_holder, surface, window]() -> ::mediapipe::Status { + [gl_context, surface_holder, surface, window]() -> mediapipe::Status { absl::MutexLock lock(&surface_holder->mutex); // Must destroy old surface first in case we are assigning the same // surface. @@ -90,7 +90,7 @@ JNIEXPORT void JNICALL MEDIAPIPE_SURFACE_OUTPUT_METHOD(nativeSetSurface)( } surface_holder->surface = egl_surface; surface_holder->owned = egl_surface != EGL_NO_SURFACE; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); }); MEDIAPIPE_CHECK_OK(status); @@ -122,10 +122,10 @@ JNIEXPORT void JNICALL MEDIAPIPE_SURFACE_OUTPUT_METHOD(nativeSetEglSurface)( if (old_surface != EGL_NO_SURFACE) { MEDIAPIPE_CHECK_OK( - gl_context->Run([gl_context, old_surface]() -> ::mediapipe::Status { + gl_context->Run([gl_context, old_surface]() -> mediapipe::Status { RET_CHECK(eglDestroySurface(gl_context->egl_display(), old_surface)) << "eglDestroySurface failed:" << eglGetError(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); })); } } diff --git a/mediapipe/java/com/google/mediapipe/glutil/EglManager.java b/mediapipe/java/com/google/mediapipe/glutil/EglManager.java index bad7a610f..bad59ce3a 100644 --- a/mediapipe/java/com/google/mediapipe/glutil/EglManager.java +++ b/mediapipe/java/com/google/mediapipe/glutil/EglManager.java @@ -230,7 +230,7 @@ public class EglManager { // Try to create an OpenGL ES 3 context first. int[] contextAttrs = {EGL_CONTEXT_CLIENT_VERSION, glVersion, EGL10.EGL_NONE}; eglContext = egl.eglCreateContext(eglDisplay, eglConfig, parentContext, contextAttrs); - if (eglContext == null) { + if (eglContext == null || eglContext == EGL10.EGL_NO_CONTEXT) { int error = egl.eglGetError(); throw new RuntimeException( "Could not create GL context: EGL error: 0x" diff --git a/mediapipe/java/com/google/mediapipe/glutil/ExternalTextureRenderer.java b/mediapipe/java/com/google/mediapipe/glutil/ExternalTextureRenderer.java index e03bf409d..4dd35f865 100644 --- a/mediapipe/java/com/google/mediapipe/glutil/ExternalTextureRenderer.java +++ b/mediapipe/java/com/google/mediapipe/glutil/ExternalTextureRenderer.java @@ -17,6 +17,7 @@ package com.google.mediapipe.glutil; import android.graphics.SurfaceTexture; import android.opengl.GLES11Ext; import android.opengl.GLES20; +import android.view.Surface; import java.nio.FloatBuffer; import java.util.HashMap; import java.util.Map; @@ -44,6 +45,17 @@ public class ExternalTextureRenderer { 0.0f, 0.0f, // bottom left 1.0f, 0.0f // bottom right ); + private static final Vertex BOTTOM_LEFT = new Vertex(-1.0f, -1.0f); + private static final Vertex BOTTOM_RIGHT = new Vertex(1.0f, -1.0f); + private static final Vertex TOP_LEFT = new Vertex(-1.0f, 1.0f); + private static final Vertex TOP_RIGHT = new Vertex(1.0f, 1.0f); + private static final Vertex[] POSITION_VERTICIES = { + BOTTOM_LEFT, BOTTOM_RIGHT, TOP_LEFT, TOP_RIGHT + }; + private static final FloatBuffer POSITION_VERTICIES_0 = fb(POSITION_VERTICIES, 0, 1, 2, 3); + private static final FloatBuffer POSITION_VERTICIES_90 = fb(POSITION_VERTICIES, 2, 0, 3, 1); + private static final FloatBuffer POSITION_VERTICIES_180 = fb(POSITION_VERTICIES, 3, 2, 1, 0); + private static final FloatBuffer POSITION_VERTICIES_270 = fb(POSITION_VERTICIES, 1, 3, 0, 2); private static final String TAG = "ExternalTextureRend"; // Max length of a tag is 23. private static final int ATTRIB_POSITION = 1; @@ -54,6 +66,7 @@ public class ExternalTextureRenderer { private int textureTransformUniform; private float[] textureTransformMatrix = new float[16]; private boolean flipY; + private int rotation = Surface.ROTATION_0; /** Call this to setup the shader program before rendering. */ public void setup() { @@ -79,6 +92,15 @@ public class ExternalTextureRenderer { flipY = flip; } + /** + * Rotates the rendering output, useful for supporting landscape orientations. The value should + * correspond to Display.getRotation(), e.g. Surface.ROTATION_0. Flipping (if any) is applied + * before rotation. Effective in subsequent {@link #render(SurfaceTexture)} calls. + */ + public void setRotation(int rotation) { + this.rotation = rotation; + } + /** * Renders the surfaceTexture to the framebuffer with optional vertical flip. * @@ -111,7 +133,7 @@ public class ExternalTextureRenderer { ShaderUtil.checkGlError("glUniformMatrix4fv"); GLES20.glEnableVertexAttribArray(ATTRIB_POSITION); GLES20.glVertexAttribPointer( - ATTRIB_POSITION, 2, GLES20.GL_FLOAT, false, 0, CommonShaders.SQUARE_VERTICES); + ATTRIB_POSITION, 2, GLES20.GL_FLOAT, false, 0, getPositionVerticies()); GLES20.glEnableVertexAttribArray(ATTRIB_TEXTURE_COORDINATE); GLES20.glVertexAttribPointer( @@ -140,4 +162,34 @@ public class ExternalTextureRenderer { public void release() { GLES20.glDeleteProgram(program); } + + private FloatBuffer getPositionVerticies() { + switch (rotation) { + case Surface.ROTATION_90: + return POSITION_VERTICIES_90; + case Surface.ROTATION_180: + return POSITION_VERTICIES_180; + case Surface.ROTATION_270: + return POSITION_VERTICIES_270; + case Surface.ROTATION_0: + default: + return POSITION_VERTICIES_0; + } + } + + private static FloatBuffer fb(Vertex[] v, int i0, int i1, int i2, int i3) { + return ShaderUtil.floatBuffer( + v[i0].x, v[i0].y, v[i1].x, v[i1].y, v[i2].x, v[i2].y, v[i3].x, v[i3].y); + } + + /** Convenience class to make rotations easier. */ + private static class Vertex { + float x; + float y; + + Vertex(float x, float y) { + this.x = x; + this.y = y; + } + } } diff --git a/mediapipe/modules/README.md b/mediapipe/modules/README.md index d38744bc1..12ec103d2 100644 --- a/mediapipe/modules/README.md +++ b/mediapipe/modules/README.md @@ -10,7 +10,9 @@ Each module (represented as a subfolder) provides subgraphs and corresponding re | [`face_geometry`](face_geometry/README.md) | Subgraphs to extract face geometry. | | [`face_landmark`](face_landmark/README.md) | Subgraphs to detect and track face landmarks. | | [`hand_landmark`](hand_landmark/README.md) | Subgraphs to detect and track hand landmarks. | +| [`holistic_landmark`](holistic_landmark/README.md) | Subgraphs to detect and track holistic pose which consists of pose, face and hand landmarks. | | [`iris_landmark`](iris_landmark/README.md) | Subgraphs to detect iris landmarks. | | [`palm_detection`](palm_detection/README.md) | Subgraphs to detect palms/hands. | | [`pose_detection`](pose_detection/README.md) | Subgraphs to detect poses. | | [`pose_landmark`](pose_landmark/README.md) | Subgraphs to detect and track pose landmarks. | +| [`objectron`](objectron/README.md) | Subgraphs to detect and track 3D objects. | diff --git a/mediapipe/modules/face_detection/BUILD b/mediapipe/modules/face_detection/BUILD index 0af70e2ef..25349e181 100644 --- a/mediapipe/modules/face_detection/BUILD +++ b/mediapipe/modules/face_detection/BUILD @@ -21,6 +21,34 @@ licenses(["notice"]) package(default_visibility = ["//visibility:public"]) +mediapipe_simple_subgraph( + name = "face_detection_front_by_roi_cpu", + graph = "face_detection_front_by_roi_cpu.pbtxt", + register_as = "FaceDetectionFrontByRoiCpu", + deps = [ + "//mediapipe/calculators/tensor:image_to_tensor_calculator", + "//mediapipe/calculators/tensor:inference_calculator", + "//mediapipe/calculators/tensor:tensors_to_detections_calculator", + "//mediapipe/calculators/tflite:ssd_anchors_calculator", + "//mediapipe/calculators/util:detection_projection_calculator", + "//mediapipe/calculators/util:non_max_suppression_calculator", + ], +) + +mediapipe_simple_subgraph( + name = "face_detection_front_by_roi_gpu", + graph = "face_detection_front_by_roi_gpu.pbtxt", + register_as = "FaceDetectionFrontByRoiGpu", + deps = [ + "//mediapipe/calculators/tensor:image_to_tensor_calculator", + "//mediapipe/calculators/tensor:inference_calculator", + "//mediapipe/calculators/tensor:tensors_to_detections_calculator", + "//mediapipe/calculators/tflite:ssd_anchors_calculator", + "//mediapipe/calculators/util:detection_projection_calculator", + "//mediapipe/calculators/util:non_max_suppression_calculator", + ], +) + mediapipe_simple_subgraph( name = "face_detection_front_cpu", graph = "face_detection_front_cpu.pbtxt", diff --git a/mediapipe/modules/face_detection/face_detection_front_by_roi_cpu.pbtxt b/mediapipe/modules/face_detection/face_detection_front_by_roi_cpu.pbtxt new file mode 100644 index 000000000..956dd727c --- /dev/null +++ b/mediapipe/modules/face_detection/face_detection_front_by_roi_cpu.pbtxt @@ -0,0 +1,143 @@ +# MediaPipe graph to detect faces. (CPU input, and inference is executed on +# CPU.) +# +# It is required that "face_detection_front.tflite" is available at +# "mediapipe/modules/face_detection/face_detection_front.tflite" +# path during execution. +# +# EXAMPLE: +# node { +# calculator: "FaceDetectionFrontByRoiCpu" +# input_stream: "IMAGE:image" +# input_stream: "ROI:roi" +# output_stream: "DETECTIONS:face_detections" +# } + +type: "FaceDetectionFrontCpu" + +# CPU image. (ImageFrame) +input_stream: "IMAGE:image" + +# ROI (region of interest) within the given image where faces should be +# detected. (NormalizedRect) +input_stream: "ROI:roi" + +# Detected faces. (std::vector) +# NOTE: there will not be an output packet in the DETECTIONS stream for this +# particular timestamp if none of faces detected. However, the MediaPipe +# framework will internally inform the downstream calculators of the absence of +# this packet so that they don't wait for it unnecessarily. +output_stream: "DETECTIONS:detections" + +# Transforms specified region of image into 128x128 tensor keeping aspect ratio +# (padding tensor if needed). +node { + calculator: "ImageToTensorCalculator" + input_stream: "IMAGE:image" + input_stream: "NORM_RECT:roi" + output_stream: "TENSORS:input_tensors" + output_stream: "MATRIX:transform_matrix" + options: { + [mediapipe.ImageToTensorCalculatorOptions.ext] { + output_tensor_width: 128 + output_tensor_height: 128 + keep_aspect_ratio: true + output_tensor_float_range { + min: -1.0 + max: 1.0 + } + border_mode: BORDER_ZERO + } + } +} + +# Runs a TensorFlow Lite model on CPU that takes an image tensor and outputs a +# vector of tensors representing, for instance, detection boxes/keypoints and +# scores. +node { + calculator: "InferenceCalculator" + input_stream: "TENSORS:input_tensors" + output_stream: "TENSORS:detection_tensors" + options: { + [mediapipe.InferenceCalculatorOptions.ext] { + model_path: "mediapipe/modules/face_detection/face_detection_front.tflite" + delegate { xnnpack {} } + } + } +} + +# Generates a single side packet containing a vector of SSD anchors based on +# the specification in the options. +node { + calculator: "SsdAnchorsCalculator" + output_side_packet: "anchors" + options: { + [mediapipe.SsdAnchorsCalculatorOptions.ext] { + num_layers: 4 + min_scale: 0.1484375 + max_scale: 0.75 + input_size_height: 128 + input_size_width: 128 + anchor_offset_x: 0.5 + anchor_offset_y: 0.5 + strides: 8 + strides: 16 + strides: 16 + strides: 16 + aspect_ratios: 1.0 + fixed_anchor_size: true + } + } +} + +# Decodes the detection tensors generated by the TensorFlow Lite model, based on +# the SSD anchors and the specification in the options, into a vector of +# detections. Each detection describes a detected object. +node { + calculator: "TensorsToDetectionsCalculator" + input_stream: "TENSORS:detection_tensors" + input_side_packet: "ANCHORS:anchors" + output_stream: "DETECTIONS:unfiltered_detections" + options: { + [mediapipe.TensorsToDetectionsCalculatorOptions.ext] { + num_classes: 1 + num_boxes: 896 + num_coords: 16 + box_coord_offset: 0 + keypoint_coord_offset: 4 + num_keypoints: 6 + num_values_per_keypoint: 2 + sigmoid_score: true + score_clipping_thresh: 100.0 + reverse_output_order: true + x_scale: 128.0 + y_scale: 128.0 + h_scale: 128.0 + w_scale: 128.0 + min_score_thresh: 0.5 + } + } +} + +# Performs non-max suppression to remove excessive detections. +node { + calculator: "NonMaxSuppressionCalculator" + input_stream: "unfiltered_detections" + output_stream: "filtered_detections" + options: { + [mediapipe.NonMaxSuppressionCalculatorOptions.ext] { + min_suppression_threshold: 0.3 + overlap_type: INTERSECTION_OVER_UNION + algorithm: WEIGHTED + } + } +} + +# Projects the detections from input tensor to the corresponding locations on +# the original image (input to the graph). +node { + calculator: "DetectionProjectionCalculator" + input_stream: "DETECTIONS:filtered_detections" + input_stream: "PROJECTION_MATRIX:transform_matrix" + output_stream: "DETECTIONS:detections" +} diff --git a/mediapipe/modules/face_detection/face_detection_front_by_roi_gpu.pbtxt b/mediapipe/modules/face_detection/face_detection_front_by_roi_gpu.pbtxt new file mode 100644 index 000000000..331b1129f --- /dev/null +++ b/mediapipe/modules/face_detection/face_detection_front_by_roi_gpu.pbtxt @@ -0,0 +1,143 @@ +# MediaPipe graph to detect faces. (CPU input, and inference is executed on +# CPU.) +# +# It is required that "face_detection_front.tflite" is available at +# "mediapipe/modules/face_detection/face_detection_front.tflite" +# path during execution. +# +# EXAMPLE: +# node { +# calculator: "FaceDetectionFrontByRoiGpu" +# input_stream: "IMAGE:image" +# input_stream: "ROI:roi" +# output_stream: "DETECTIONS:face_detections" +# } + +type: "FaceDetectionFrontGpu" + +# GPU image. (GpuBuffer) +input_stream: "IMAGE:image" + +# ROI (region of interest) within the given image where faces should be +# detected. (NormalizedRect) +input_stream: "ROI:roi" + +# Detected faces. (std::vector) +# NOTE: there will not be an output packet in the DETECTIONS stream for this +# particular timestamp if none of faces detected. However, the MediaPipe +# framework will internally inform the downstream calculators of the absence of +# this packet so that they don't wait for it unnecessarily. +output_stream: "DETECTIONS:detections" + +# Transforms specified region of image into 128x128 tensor keeping aspect ratio +# (padding tensor if needed). +node { + calculator: "ImageToTensorCalculator" + input_stream: "IMAGE_GPU:image" + input_stream: "NORM_RECT:roi" + output_stream: "TENSORS:input_tensors" + output_stream: "MATRIX:transform_matrix" + options: { + [mediapipe.ImageToTensorCalculatorOptions.ext] { + output_tensor_width: 128 + output_tensor_height: 128 + keep_aspect_ratio: true + output_tensor_float_range { + min: -1.0 + max: 1.0 + } + border_mode: BORDER_ZERO + gpu_origin: TOP_LEFT + } + } +} + +# Runs a TensorFlow Lite model on GPU that takes an image tensor and outputs a +# vector of tensors representing, for instance, detection boxes/keypoints and +# scores. +node { + calculator: "InferenceCalculator" + input_stream: "TENSORS:input_tensors" + output_stream: "TENSORS:detection_tensors" + options: { + [mediapipe.InferenceCalculatorOptions.ext] { + model_path: "mediapipe/modules/face_detection/face_detection_front.tflite" + } + } +} + +# Generates a single side packet containing a vector of SSD anchors based on +# the specification in the options. +node { + calculator: "SsdAnchorsCalculator" + output_side_packet: "anchors" + options: { + [mediapipe.SsdAnchorsCalculatorOptions.ext] { + num_layers: 4 + min_scale: 0.1484375 + max_scale: 0.75 + input_size_height: 128 + input_size_width: 128 + anchor_offset_x: 0.5 + anchor_offset_y: 0.5 + strides: 8 + strides: 16 + strides: 16 + strides: 16 + aspect_ratios: 1.0 + fixed_anchor_size: true + } + } +} + +# Decodes the detection tensors generated by the TensorFlow Lite model, based on +# the SSD anchors and the specification in the options, into a vector of +# detections. Each detection describes a detected object. +node { + calculator: "TensorsToDetectionsCalculator" + input_stream: "TENSORS:detection_tensors" + input_side_packet: "ANCHORS:anchors" + output_stream: "DETECTIONS:unfiltered_detections" + options: { + [mediapipe.TensorsToDetectionsCalculatorOptions.ext] { + num_classes: 1 + num_boxes: 896 + num_coords: 16 + box_coord_offset: 0 + keypoint_coord_offset: 4 + num_keypoints: 6 + num_values_per_keypoint: 2 + sigmoid_score: true + score_clipping_thresh: 100.0 + reverse_output_order: true + x_scale: 128.0 + y_scale: 128.0 + h_scale: 128.0 + w_scale: 128.0 + min_score_thresh: 0.5 + } + } +} + +# Performs non-max suppression to remove excessive detections. +node { + calculator: "NonMaxSuppressionCalculator" + input_stream: "unfiltered_detections" + output_stream: "filtered_detections" + options: { + [mediapipe.NonMaxSuppressionCalculatorOptions.ext] { + min_suppression_threshold: 0.3 + overlap_type: INTERSECTION_OVER_UNION + algorithm: WEIGHTED + } + } +} + +# Projects the detections from input tensor to the corresponding locations on +# the original image (input to the graph). +node { + calculator: "DetectionProjectionCalculator" + input_stream: "DETECTIONS:filtered_detections" + input_stream: "PROJECTION_MATRIX:transform_matrix" + output_stream: "DETECTIONS:detections" +} diff --git a/mediapipe/modules/face_detection/face_detection_front_cpu.pbtxt b/mediapipe/modules/face_detection/face_detection_front_cpu.pbtxt index 2e0975ce9..a7d8dbcc1 100644 --- a/mediapipe/modules/face_detection/face_detection_front_cpu.pbtxt +++ b/mediapipe/modules/face_detection/face_detection_front_cpu.pbtxt @@ -41,6 +41,7 @@ node: { min: -1.0 max: 1.0 } + border_mode: BORDER_ZERO } } } diff --git a/mediapipe/modules/face_detection/face_detection_front_gpu.pbtxt b/mediapipe/modules/face_detection/face_detection_front_gpu.pbtxt index df2b04551..4fd7dc5fc 100644 --- a/mediapipe/modules/face_detection/face_detection_front_gpu.pbtxt +++ b/mediapipe/modules/face_detection/face_detection_front_gpu.pbtxt @@ -41,6 +41,7 @@ node: { min: -1.0 max: 1.0 } + border_mode: BORDER_ZERO gpu_origin: TOP_LEFT } } diff --git a/mediapipe/modules/face_landmark/BUILD b/mediapipe/modules/face_landmark/BUILD index b72e8147f..7aa10fe54 100644 --- a/mediapipe/modules/face_landmark/BUILD +++ b/mediapipe/modules/face_landmark/BUILD @@ -63,14 +63,13 @@ mediapipe_simple_subgraph( ":face_landmark_landmarks_to_roi", "//mediapipe/calculators/core:begin_loop_calculator", "//mediapipe/calculators/core:clip_vector_size_calculator", + "//mediapipe/calculators/core:constant_side_packet_calculator", "//mediapipe/calculators/core:end_loop_calculator", "//mediapipe/calculators/core:gate_calculator", - "//mediapipe/calculators/core:merge_calculator", "//mediapipe/calculators/core:previous_loopback_calculator", "//mediapipe/calculators/image:image_properties_calculator", "//mediapipe/calculators/util:association_norm_rect_calculator", "//mediapipe/calculators/util:collection_has_min_size_calculator", - "//mediapipe/calculators/util:logic_calculator", "//mediapipe/modules/face_detection:face_detection_front_cpu", ], ) @@ -85,14 +84,13 @@ mediapipe_simple_subgraph( ":face_landmark_landmarks_to_roi", "//mediapipe/calculators/core:begin_loop_calculator", "//mediapipe/calculators/core:clip_vector_size_calculator", + "//mediapipe/calculators/core:constant_side_packet_calculator", "//mediapipe/calculators/core:end_loop_calculator", "//mediapipe/calculators/core:gate_calculator", - "//mediapipe/calculators/core:merge_calculator", "//mediapipe/calculators/core:previous_loopback_calculator", "//mediapipe/calculators/image:image_properties_calculator", "//mediapipe/calculators/util:association_norm_rect_calculator", "//mediapipe/calculators/util:collection_has_min_size_calculator", - "//mediapipe/calculators/util:logic_calculator", "//mediapipe/modules/face_detection:face_detection_front_gpu", ], ) diff --git a/mediapipe/modules/face_landmark/face_detection_front_detection_to_roi.pbtxt b/mediapipe/modules/face_landmark/face_detection_front_detection_to_roi.pbtxt index c64c10558..133bc9a9b 100644 --- a/mediapipe/modules/face_landmark/face_detection_front_detection_to_roi.pbtxt +++ b/mediapipe/modules/face_landmark/face_detection_front_detection_to_roi.pbtxt @@ -26,7 +26,6 @@ node { rotation_vector_start_keypoint_index: 0 # Left eye. rotation_vector_end_keypoint_index: 1 # Right eye. rotation_vector_target_angle_degrees: 0 - output_zero_rect_for_empty_detections: true } } } diff --git a/mediapipe/modules/face_landmark/face_landmark.tflite b/mediapipe/modules/face_landmark/face_landmark.tflite old mode 100644 new mode 100755 index 9058eaa33..e30e514e1 Binary files a/mediapipe/modules/face_landmark/face_landmark.tflite and b/mediapipe/modules/face_landmark/face_landmark.tflite differ diff --git a/mediapipe/modules/face_landmark/face_landmark_cpu.pbtxt b/mediapipe/modules/face_landmark/face_landmark_cpu.pbtxt index 018d3f4d5..4eb29be65 100644 --- a/mediapipe/modules/face_landmark/face_landmark_cpu.pbtxt +++ b/mediapipe/modules/face_landmark/face_landmark_cpu.pbtxt @@ -81,6 +81,11 @@ node { calculator: "TensorsToFloatsCalculator" input_stream: "TENSORS:face_flag_tensor" output_stream: "FLOAT:face_presence_score" + options { + [mediapipe.TensorsToFloatsCalculatorOptions.ext] { + activation: SIGMOID + } + } } # Applies a threshold to the confidence score to determine whether a face is @@ -91,7 +96,7 @@ node { output_stream: "FLAG:face_presence" options: { [mediapipe.ThresholdingCalculatorOptions.ext] { - threshold: 0.1 + threshold: 0.5 } } } diff --git a/mediapipe/modules/face_landmark/face_landmark_front_cpu.pbtxt b/mediapipe/modules/face_landmark/face_landmark_front_cpu.pbtxt index 8245c8517..6086a7cd2 100644 --- a/mediapipe/modules/face_landmark/face_landmark_front_cpu.pbtxt +++ b/mediapipe/modules/face_landmark/face_landmark_front_cpu.pbtxt @@ -26,10 +26,6 @@ input_stream: "IMAGE:image" # Max number of faces to detect/track. (int) input_side_packet: "NUM_FACES:num_faces" -# Whether face detection can be skipped when face regions can already be -# approximated from face landmarks on the previous frame. -input_side_packet: "CAN_SKIP_DETECTION:can_skip_detection" - # Collection of detected/predicted faces, each represented as a list of 468 face # landmarks. (std::vector) # NOTE: there will not be an output packet in the LANDMARKS stream for this @@ -48,32 +44,41 @@ output_stream: "ROIS_FROM_LANDMARKS:face_rects_from_landmarks" # (std::vector) output_stream: "ROIS_FROM_DETECTIONS:face_rects_from_detections" +# Defines whether landmarks on the previous image should be used to help +# localize landmarks on the current image. +node { + name: "ConstantSidePacketCalculator" + calculator: "ConstantSidePacketCalculator" + output_side_packet: "PACKET:use_prev_landmarks" + options: { + [mediapipe.ConstantSidePacketCalculatorOptions.ext]: { + packet { bool_value: true } + } + } +} +node { + calculator: "GateCalculator" + input_side_packet: "ALLOW:use_prev_landmarks" + input_stream: "prev_face_rects_from_landmarks" + output_stream: "gated_prev_face_rects_from_landmarks" +} + # Determines if an input vector of NormalizedRect has a size greater than or # equal to the provided num_faces. node { calculator: "NormalizedRectVectorHasMinSizeCalculator" - input_stream: "ITERABLE:prev_face_rects_from_landmarks" + input_stream: "ITERABLE:gated_prev_face_rects_from_landmarks" input_side_packet: "num_faces" output_stream: "prev_has_enough_faces" } -# Drops the incoming image if FaceLandmarkCpu was able to identify face presence -# in the previous image and skipping face detection is enabled. Otherwise, -# passes the incoming image through to trigger a new round of face detection -# in FaceDetectionFrontCpu. -node { - calculator: "LogicCalculator" - options: { - [mediapipe.LogicCalculatorOptions.ext] { op: AND } - } - input_side_packet: "can_skip_detection" - input_stream: "prev_has_enough_faces" - output_stream: "skip_detection" -} +# Drops the incoming image if enough faces have already been identified from the +# previous image. Otherwise, passes the incoming image through to trigger a new +# round of face detection. node { calculator: "GateCalculator" input_stream: "image" - input_stream: "DISALLOW:skip_detection" + input_stream: "DISALLOW:prev_has_enough_faces" output_stream: "gated_image" options: { [mediapipe.GateCalculatorOptions.ext] { @@ -81,12 +86,6 @@ node { } } } -node { - calculator: "GateCalculator" - input_stream: "prev_face_rects_from_landmarks" - input_stream: "ALLOW:skip_detection" - output_stream: "gated_prev_face_rects_from_landmarks" -} # Detects faces. node { @@ -149,8 +148,8 @@ node { # overlapping regions based on the specified min_similarity_threshold. node { calculator: "AssociationNormRectCalculator" - input_stream: "gated_prev_face_rects_from_landmarks" input_stream: "face_rects_from_detections" + input_stream: "gated_prev_face_rects_from_landmarks" output_stream: "face_rects" options: { [mediapipe.AssociationCalculatorOptions.ext] { diff --git a/mediapipe/modules/face_landmark/face_landmark_front_gpu.pbtxt b/mediapipe/modules/face_landmark/face_landmark_front_gpu.pbtxt index a008717f6..1caa6baf6 100644 --- a/mediapipe/modules/face_landmark/face_landmark_front_gpu.pbtxt +++ b/mediapipe/modules/face_landmark/face_landmark_front_gpu.pbtxt @@ -26,10 +26,6 @@ input_stream: "IMAGE:image" # Max number of faces to detect/track. (int) input_side_packet: "NUM_FACES:num_faces" -# Whether face detection can be skipped when face regions can already be -# approximated from face landmarks on the previous frame. -input_side_packet: "CAN_SKIP_DETECTION:can_skip_detection" - # Collection of detected/predicted faces, each represented as a list of 468 face # landmarks. (std::vector) # NOTE: there will not be an output packet in the LANDMARKS stream for this @@ -48,32 +44,41 @@ output_stream: "ROIS_FROM_LANDMARKS:face_rects_from_landmarks" # (std::vector) output_stream: "ROIS_FROM_DETECTIONS:face_rects_from_detections" +# Defines whether landmarks on the previous image should be used to help +# localize landmarks on the current image. +node { + name: "ConstantSidePacketCalculator" + calculator: "ConstantSidePacketCalculator" + output_side_packet: "PACKET:use_prev_landmarks" + options: { + [mediapipe.ConstantSidePacketCalculatorOptions.ext]: { + packet { bool_value: true } + } + } +} +node { + calculator: "GateCalculator" + input_side_packet: "ALLOW:use_prev_landmarks" + input_stream: "prev_face_rects_from_landmarks" + output_stream: "gated_prev_face_rects_from_landmarks" +} + # Determines if an input vector of NormalizedRect has a size greater than or # equal to the provided num_faces. node { calculator: "NormalizedRectVectorHasMinSizeCalculator" - input_stream: "ITERABLE:prev_face_rects_from_landmarks" + input_stream: "ITERABLE:gated_prev_face_rects_from_landmarks" input_side_packet: "num_faces" output_stream: "prev_has_enough_faces" } -# Drops the incoming image if FaceLandmarkGpu was able to identify face presence -# in the previous image and skipping face detection is enabled. Otherwise, -# passes the incoming image through to trigger a new round of face detection -# in FaceDetectionFrontGpu. -node { - calculator: "LogicCalculator" - options: { - [mediapipe.LogicCalculatorOptions.ext] { op: AND } - } - input_side_packet: "can_skip_detection" - input_stream: "prev_has_enough_faces" - output_stream: "skip_detection" -} +# Drops the incoming image if enough faces have already been identified from the +# previous image. Otherwise, passes the incoming image through to trigger a new +# round of face detection. node { calculator: "GateCalculator" input_stream: "image" - input_stream: "DISALLOW:skip_detection" + input_stream: "DISALLOW:prev_has_enough_faces" output_stream: "gated_image" options: { [mediapipe.GateCalculatorOptions.ext] { @@ -81,12 +86,6 @@ node { } } } -node { - calculator: "GateCalculator" - input_stream: "prev_face_rects_from_landmarks" - input_stream: "ALLOW:skip_detection" - output_stream: "gated_prev_face_rects_from_landmarks" -} # Detects faces. node { @@ -149,8 +148,8 @@ node { # overlapping regions based on the specified min_similarity_threshold. node { calculator: "AssociationNormRectCalculator" - input_stream: "gated_prev_face_rects_from_landmarks" input_stream: "face_rects_from_detections" + input_stream: "gated_prev_face_rects_from_landmarks" output_stream: "face_rects" options: { [mediapipe.AssociationCalculatorOptions.ext] { diff --git a/mediapipe/modules/face_landmark/face_landmark_gpu.pbtxt b/mediapipe/modules/face_landmark/face_landmark_gpu.pbtxt index a606166e7..17c9bd78c 100644 --- a/mediapipe/modules/face_landmark/face_landmark_gpu.pbtxt +++ b/mediapipe/modules/face_landmark/face_landmark_gpu.pbtxt @@ -81,6 +81,11 @@ node { calculator: "TensorsToFloatsCalculator" input_stream: "TENSORS:face_flag_tensor" output_stream: "FLOAT:face_presence_score" + options { + [mediapipe.TensorsToFloatsCalculatorOptions.ext] { + activation: SIGMOID + } + } } # Applies a threshold to the confidence score to determine whether a face is @@ -91,7 +96,7 @@ node { output_stream: "FLAG:face_presence" options: { [mediapipe.ThresholdingCalculatorOptions.ext] { - threshold: 0.1 + threshold: 0.5 } } } diff --git a/mediapipe/modules/hand_landmark/BUILD b/mediapipe/modules/hand_landmark/BUILD index 274147105..9c0ac6dba 100644 --- a/mediapipe/modules/hand_landmark/BUILD +++ b/mediapipe/modules/hand_landmark/BUILD @@ -40,7 +40,6 @@ mediapipe_simple_subgraph( "//mediapipe/calculators/tensor:tensors_to_landmarks_calculator", "//mediapipe/calculators/util:landmark_letterbox_removal_calculator", "//mediapipe/calculators/util:landmark_projection_calculator", - "//mediapipe/calculators/util:logic_calculator", "//mediapipe/calculators/util:thresholding_calculator", ], ) @@ -59,7 +58,6 @@ mediapipe_simple_subgraph( "//mediapipe/calculators/tensor:tensors_to_landmarks_calculator", "//mediapipe/calculators/util:landmark_letterbox_removal_calculator", "//mediapipe/calculators/util:landmark_projection_calculator", - "//mediapipe/calculators/util:logic_calculator", "//mediapipe/calculators/util:thresholding_calculator", ], ) @@ -74,6 +72,7 @@ mediapipe_simple_subgraph( ":palm_detection_detection_to_roi", "//mediapipe/calculators/core:begin_loop_calculator", "//mediapipe/calculators/core:clip_vector_size_calculator", + "//mediapipe/calculators/core:constant_side_packet_calculator", "//mediapipe/calculators/core:end_loop_calculator", "//mediapipe/calculators/core:flow_limiter_calculator", "//mediapipe/calculators/core:gate_calculator", @@ -96,6 +95,7 @@ mediapipe_simple_subgraph( ":palm_detection_detection_to_roi", "//mediapipe/calculators/core:begin_loop_calculator", "//mediapipe/calculators/core:clip_vector_size_calculator", + "//mediapipe/calculators/core:constant_side_packet_calculator", "//mediapipe/calculators/core:end_loop_calculator", "//mediapipe/calculators/core:flow_limiter_calculator", "//mediapipe/calculators/core:gate_calculator", diff --git a/mediapipe/modules/hand_landmark/calculators/hand_landmarks_to_rect_calculator.cc b/mediapipe/modules/hand_landmark/calculators/hand_landmarks_to_rect_calculator.cc index 156425bed..f95d18722 100644 --- a/mediapipe/modules/hand_landmark/calculators/hand_landmarks_to_rect_calculator.cc +++ b/mediapipe/modules/hand_landmark/calculators/hand_landmarks_to_rect_calculator.cc @@ -58,7 +58,7 @@ float ComputeRotation(const NormalizedLandmarkList& landmarks, return rotation; } -::mediapipe::Status NormalizedLandmarkListToRect( +mediapipe::Status NormalizedLandmarkListToRect( const NormalizedLandmarkList& landmarks, const std::pair& image_size, NormalizedRect* rect) { const float rotation = ComputeRotation(landmarks, image_size); @@ -117,7 +117,7 @@ float ComputeRotation(const NormalizedLandmarkList& landmarks, rect->set_height(height); rect->set_rotation(rotation); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace @@ -130,21 +130,21 @@ float ComputeRotation(const NormalizedLandmarkList& landmarks, // mean of PIP joints at the top. class HandLandmarksToRectCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc) { + static mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Tag(kNormalizedLandmarksTag).Set(); cc->Inputs().Tag(kImageSizeTag).Set>(); cc->Outputs().Tag(kNormRectTag).Set(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Open(CalculatorContext* cc) override { + mediapipe::Status Open(CalculatorContext* cc) override { cc->SetOffset(TimestampDiff(0)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } - ::mediapipe::Status Process(CalculatorContext* cc) override { + mediapipe::Status Process(CalculatorContext* cc) override { if (cc->Inputs().Tag(kNormalizedLandmarksTag).IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } RET_CHECK(!cc->Inputs().Tag(kImageSizeTag).IsEmpty()); @@ -159,7 +159,7 @@ class HandLandmarksToRectCalculator : public CalculatorBase { .Tag(kNormRectTag) .Add(output_rect.release(), cc->InputTimestamp()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } }; REGISTER_CALCULATOR(HandLandmarksToRectCalculator); diff --git a/mediapipe/modules/hand_landmark/hand_landmark_tracking_cpu.pbtxt b/mediapipe/modules/hand_landmark/hand_landmark_tracking_cpu.pbtxt index 470bfc000..97e9f1441 100644 --- a/mediapipe/modules/hand_landmark/hand_landmark_tracking_cpu.pbtxt +++ b/mediapipe/modules/hand_landmark/hand_landmark_tracking_cpu.pbtxt @@ -14,10 +14,6 @@ input_stream: "IMAGE:image" # Max number of hands to detect/track. (int) input_side_packet: "NUM_HANDS:num_hands" -# Whether palm detection can be skipped when hand regions can already be -# approximated from hand landmarks on the previous frame. -input_side_packet: "CAN_SKIP_DETECTION:can_skip_detection" - # Collection of detected/predicted hands, each represented as a list of # landmarks. (std::vector) # NOTE: there will not be an output packet in the LANDMARKS stream for this @@ -42,33 +38,41 @@ output_stream: "HAND_ROIS_FROM_LANDMARKS:hand_rects" # (std::vector) output_stream: "HAND_ROIS_FROM_PALM_DETECTIONS:hand_rects_from_palm_detections" +# Defines whether landmarks on the previous image should be used to help +# localize landmarks on the current image. +node { + name: "ConstantSidePacketCalculator" + calculator: "ConstantSidePacketCalculator" + output_side_packet: "PACKET:use_prev_landmarks" + options: { + [mediapipe.ConstantSidePacketCalculatorOptions.ext]: { + packet { bool_value: true } + } + } +} +node { + calculator: "GateCalculator" + input_side_packet: "ALLOW:use_prev_landmarks" + input_stream: "prev_hand_rects_from_landmarks" + output_stream: "gated_prev_hand_rects_from_landmarks" +} + # Determines if an input vector of NormalizedRect has a size greater than or # equal to the provided num_hands. node { calculator: "NormalizedRectVectorHasMinSizeCalculator" - input_stream: "ITERABLE:prev_hand_rects_from_landmarks" + input_stream: "ITERABLE:gated_prev_hand_rects_from_landmarks" input_side_packet: "num_hands" output_stream: "prev_has_enough_hands" } -# Drops the incoming image if the previous image had at least N hands. -# and skipping palm detection is enabled. -# Otherwise, passes the incoming image through to trigger a new round of palm -# detection. -node { - calculator: "LogicCalculator" - options: { - [mediapipe.LogicCalculatorOptions.ext] { op: AND } - } - input_side_packet: "can_skip_detection" - input_stream: "prev_has_enough_hands" - output_stream: "skip_detection" -} - +# Drops the incoming image if enough hands have already been identified from the +# previous image. Otherwise, passes the incoming image through to trigger a new +# round of palm detection. node { calculator: "GateCalculator" input_stream: "image" - input_stream: "DISALLOW:skip_detection" + input_stream: "DISALLOW:prev_has_enough_hands" output_stream: "palm_detection_image" options: { [mediapipe.GateCalculatorOptions.ext] { @@ -76,12 +80,6 @@ node { } } } -node { - calculator: "GateCalculator" - input_stream: "prev_hand_rects_from_landmarks" - input_stream: "ALLOW:skip_detection" - output_stream: "gated_prev_hand_rects_from_landmarks" -} # Detects palms. node { @@ -143,8 +141,8 @@ node { # overlapping regions based on the specified min_similarity_threshold. node { calculator: "AssociationNormRectCalculator" - input_stream: "gated_prev_hand_rects_from_landmarks" input_stream: "hand_rects_from_palm_detections" + input_stream: "gated_prev_hand_rects_from_landmarks" output_stream: "hand_rects" options: { [mediapipe.AssociationCalculatorOptions.ext] { diff --git a/mediapipe/modules/hand_landmark/hand_landmark_tracking_gpu.pbtxt b/mediapipe/modules/hand_landmark/hand_landmark_tracking_gpu.pbtxt index 640676a3d..fa8c5c172 100644 --- a/mediapipe/modules/hand_landmark/hand_landmark_tracking_gpu.pbtxt +++ b/mediapipe/modules/hand_landmark/hand_landmark_tracking_gpu.pbtxt @@ -14,10 +14,6 @@ input_stream: "IMAGE:image" # Max number of hands to detect/track. (int) input_side_packet: "NUM_HANDS:num_hands" -# Whether palm detection can be skipped when hand regions can already be -# approximated from hand landmarks on the previous frame. -input_side_packet: "CAN_SKIP_DETECTION:can_skip_detection" - # Collection of detected/predicted hands, each represented as a list of # landmarks. (std::vector) # NOTE: there will not be an output packet in the LANDMARKS stream for this @@ -42,32 +38,41 @@ output_stream: "HAND_ROIS_FROM_LANDMARKS:hand_rects" # (std::vector) output_stream: "HAND_ROIS_FROM_PALM_DETECTIONS:hand_rects_from_palm_detections" +# Defines whether landmarks on the previous image should be used to help +# localize landmarks on the current image. +node { + name: "ConstantSidePacketCalculator" + calculator: "ConstantSidePacketCalculator" + output_side_packet: "PACKET:use_prev_landmarks" + options: { + [mediapipe.ConstantSidePacketCalculatorOptions.ext]: { + packet { bool_value: true } + } + } +} +node { + calculator: "GateCalculator" + input_side_packet: "ALLOW:use_prev_landmarks" + input_stream: "prev_hand_rects_from_landmarks" + output_stream: "gated_prev_hand_rects_from_landmarks" +} + # Determines if an input vector of NormalizedRect has a size greater than or # equal to the provided num_hands. node { calculator: "NormalizedRectVectorHasMinSizeCalculator" - input_stream: "ITERABLE:prev_hand_rects_from_landmarks" + input_stream: "ITERABLE:gated_prev_hand_rects_from_landmarks" input_side_packet: "num_hands" output_stream: "prev_has_enough_hands" } -# Drops the incoming image if the previous image had at least N hands. -# and skipping palm detection is enabled. -# Otherwise, passes the incoming image through to trigger a new round of palm -# detection. -node { - calculator: "LogicCalculator" - options: { - [mediapipe.LogicCalculatorOptions.ext] { op: AND } - } - input_side_packet: "can_skip_detection" - input_stream: "prev_has_enough_hands" - output_stream: "skip_detection" -} +# Drops the incoming image if enough hands have already been identified from the +# previous image. Otherwise, passes the incoming image through to trigger a new +# round of palm detection. node { calculator: "GateCalculator" input_stream: "image" - input_stream: "DISALLOW:skip_detection" + input_stream: "DISALLOW:prev_has_enough_hands" output_stream: "palm_detection_image" options: { [mediapipe.GateCalculatorOptions.ext] { @@ -75,12 +80,6 @@ node { } } } -node { - calculator: "GateCalculator" - input_stream: "prev_hand_rects_from_landmarks" - input_stream: "ALLOW:skip_detection" - output_stream: "gated_prev_hand_rects_from_landmarks" -} # Detects palms. node { @@ -143,8 +142,8 @@ node { # overlapping regions based on the specified min_similarity_threshold. node { calculator: "AssociationNormRectCalculator" - input_stream: "gated_prev_hand_rects_from_landmarks" input_stream: "hand_rects_from_palm_detections" + input_stream: "gated_prev_hand_rects_from_landmarks" output_stream: "hand_rects" options: { [mediapipe.AssociationCalculatorOptions.ext] { diff --git a/mediapipe/modules/holistic_landmark/BUILD b/mediapipe/modules/holistic_landmark/BUILD new file mode 100644 index 000000000..0b6e40569 --- /dev/null +++ b/mediapipe/modules/holistic_landmark/BUILD @@ -0,0 +1,267 @@ +# Copyright 2020 The MediaPipe Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +load("//mediapipe/framework/tool:mediapipe_graph.bzl", "mediapipe_simple_subgraph") + +# TODO: revert to private. +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +exports_files([ + "hand_recrop.tflite", +]) + +mediapipe_simple_subgraph( + name = "face_landmarks_from_pose_gpu", + graph = "face_landmarks_from_pose_gpu.pbtxt", + register_as = "FaceLandmarksFromPoseGpu", + deps = [ + ":face_detection_front_detections_to_roi", + ":face_landmarks_from_pose_to_recrop_roi", + ":face_tracking", + "//mediapipe/calculators/core:split_normalized_landmark_list_calculator", + "//mediapipe/calculators/image:image_properties_calculator", + "//mediapipe/modules/face_detection:face_detection_front_by_roi_gpu", + "//mediapipe/modules/face_landmark:face_landmark_gpu", + ], +) + +mediapipe_simple_subgraph( + name = "face_landmarks_from_pose_cpu", + graph = "face_landmarks_from_pose_cpu.pbtxt", + register_as = "FaceLandmarksFromPoseCpu", + deps = [ + ":face_detection_front_detections_to_roi", + ":face_landmarks_from_pose_to_recrop_roi", + ":face_tracking", + "//mediapipe/calculators/core:split_normalized_landmark_list_calculator", + "//mediapipe/calculators/image:image_properties_calculator", + "//mediapipe/modules/face_detection:face_detection_front_by_roi_cpu", + "//mediapipe/modules/face_landmark:face_landmark_cpu", + ], +) + +mediapipe_simple_subgraph( + name = "face_landmarks_to_roi", + graph = "face_landmarks_to_roi.pbtxt", + register_as = "FaceLandmarksToRoi", + deps = [ + "//mediapipe/calculators/util:detections_to_rects_calculator", + "//mediapipe/calculators/util:landmarks_to_detection_calculator", + "//mediapipe/calculators/util:rect_transformation_calculator", + ], +) + +mediapipe_simple_subgraph( + name = "face_detection_front_detections_to_roi", + graph = "face_detection_front_detections_to_roi.pbtxt", + register_as = "FaceDetectionFrontDetectionsToRoi", + deps = [ + "//mediapipe/calculators/util:detections_to_rects_calculator", + "//mediapipe/calculators/util:rect_transformation_calculator", + ], +) + +mediapipe_simple_subgraph( + name = "face_tracking", + graph = "face_tracking.pbtxt", + register_as = "FaceTracking", + deps = [ + ":face_landmarks_to_roi", + "//mediapipe/calculators/core:previous_loopback_calculator", + "//mediapipe/modules/holistic_landmark/calculators:roi_tracking_calculator", + ], +) + +mediapipe_simple_subgraph( + name = "face_landmarks_from_pose_to_recrop_roi", + graph = "face_landmarks_from_pose_to_recrop_roi.pbtxt", + register_as = "FaceLandmarksFromPoseToRecropRoi", + deps = [ + "//mediapipe/calculators/util:detections_to_rects_calculator", + "//mediapipe/calculators/util:landmarks_to_detection_calculator", + "//mediapipe/calculators/util:rect_transformation_calculator", + ], +) + +mediapipe_simple_subgraph( + name = "hand_landmarks_from_pose_gpu", + graph = "hand_landmarks_from_pose_gpu.pbtxt", + register_as = "HandLandmarksFromPoseGpu", + deps = [ + ":hand_landmarks_from_pose_to_recrop_roi", + ":hand_recrop_by_roi_gpu", + ":hand_tracking", + ":hand_visibility_from_hand_landmarks_from_pose", + "//mediapipe/calculators/core:gate_calculator", + "//mediapipe/calculators/image:image_properties_calculator", + "//mediapipe/modules/hand_landmark:hand_landmark_gpu", + ], +) + +mediapipe_simple_subgraph( + name = "hand_landmarks_from_pose_cpu", + graph = "hand_landmarks_from_pose_cpu.pbtxt", + register_as = "HandLandmarksFromPoseCpu", + deps = [ + ":hand_landmarks_from_pose_to_recrop_roi", + ":hand_recrop_by_roi_cpu", + ":hand_tracking", + ":hand_visibility_from_hand_landmarks_from_pose", + "//mediapipe/calculators/core:gate_calculator", + "//mediapipe/calculators/image:image_properties_calculator", + "//mediapipe/modules/hand_landmark:hand_landmark_cpu", + ], +) + +mediapipe_simple_subgraph( + name = "hand_landmarks_to_roi", + graph = "hand_landmarks_to_roi.pbtxt", + register_as = "HandLandmarksToRoi", + deps = [ + "//mediapipe/calculators/core:split_normalized_landmark_list_calculator", + "//mediapipe/calculators/util:rect_transformation_calculator", + "//mediapipe/modules/hand_landmark/calculators:hand_landmarks_to_rect_calculator", + ], +) + +mediapipe_simple_subgraph( + name = "hand_recrop_by_roi_gpu", + graph = "hand_recrop_by_roi_gpu.pbtxt", + register_as = "HandRecropByRoiGpu", + deps = [ + "//mediapipe/calculators/image:image_properties_calculator", + "//mediapipe/calculators/tensor:image_to_tensor_calculator", + "//mediapipe/calculators/tensor:inference_calculator", + "//mediapipe/calculators/tensor:tensors_to_landmarks_calculator", + "//mediapipe/calculators/util:alignment_points_to_rects_calculator", + "//mediapipe/calculators/util:landmark_letterbox_removal_calculator", + "//mediapipe/calculators/util:landmark_projection_calculator", + "//mediapipe/calculators/util:landmarks_to_detection_calculator", + "//mediapipe/calculators/util:rect_transformation_calculator", + ], +) + +mediapipe_simple_subgraph( + name = "hand_recrop_by_roi_cpu", + graph = "hand_recrop_by_roi_cpu.pbtxt", + register_as = "HandRecropByRoiCpu", + deps = [ + "//mediapipe/calculators/image:image_properties_calculator", + "//mediapipe/calculators/tensor:image_to_tensor_calculator", + "//mediapipe/calculators/tensor:inference_calculator", + "//mediapipe/calculators/tensor:tensors_to_landmarks_calculator", + "//mediapipe/calculators/util:alignment_points_to_rects_calculator", + "//mediapipe/calculators/util:landmark_letterbox_removal_calculator", + "//mediapipe/calculators/util:landmark_projection_calculator", + "//mediapipe/calculators/util:landmarks_to_detection_calculator", + "//mediapipe/calculators/util:rect_transformation_calculator", + ], +) + +mediapipe_simple_subgraph( + name = "hand_tracking", + graph = "hand_tracking.pbtxt", + register_as = "HandTracking", + deps = [ + ":hand_landmarks_to_roi", + "//mediapipe/calculators/core:previous_loopback_calculator", + "//mediapipe/modules/holistic_landmark/calculators:roi_tracking_calculator", + ], +) + +# TODO: parametrize holistic_landmark graph with visibility and make private. +mediapipe_simple_subgraph( + name = "hand_wrist_for_pose", + graph = "hand_wrist_for_pose.pbtxt", + register_as = "HandWristForPose", + visibility = ["//visibility:public"], + deps = [ + "//mediapipe/calculators/core:constant_side_packet_calculator", + "//mediapipe/calculators/core:side_packet_to_stream_calculator", + "//mediapipe/calculators/core:split_normalized_landmark_list_calculator", + "//mediapipe/calculators/util:set_landmark_visibility_calculator", + ], +) + +mediapipe_simple_subgraph( + name = "hand_landmarks_left_and_right_gpu", + graph = "hand_landmarks_left_and_right_gpu.pbtxt", + register_as = "HandLandmarksLeftAndRightGpu", + deps = [ + ":hand_landmarks_from_pose_gpu", + "//mediapipe/calculators/core:split_normalized_landmark_list_calculator", + ], +) + +mediapipe_simple_subgraph( + name = "hand_landmarks_left_and_right_cpu", + graph = "hand_landmarks_left_and_right_cpu.pbtxt", + register_as = "HandLandmarksLeftAndRightCpu", + deps = [ + ":hand_landmarks_from_pose_cpu", + "//mediapipe/calculators/core:split_normalized_landmark_list_calculator", + ], +) + +mediapipe_simple_subgraph( + name = "hand_landmarks_from_pose_to_recrop_roi", + graph = "hand_landmarks_from_pose_to_recrop_roi.pbtxt", + register_as = "HandLandmarksFromPoseToRecropRoi", + deps = [ + "//mediapipe/calculators/util:landmarks_to_detection_calculator", + "//mediapipe/calculators/util:rect_transformation_calculator", + "//mediapipe/modules/holistic_landmark/calculators:hand_detections_from_pose_to_rects_calculator", + ], +) + +mediapipe_simple_subgraph( + name = "hand_visibility_from_hand_landmarks_from_pose", + graph = "hand_visibility_from_hand_landmarks_from_pose.pbtxt", + register_as = "HandVisibilityFromHandLandmarksFromPose", + deps = [ + "//mediapipe/calculators/core:split_normalized_landmark_list_calculator", + "//mediapipe/calculators/util:landmark_visibility_calculator", + "//mediapipe/calculators/util:thresholding_calculator", + ], +) + +mediapipe_simple_subgraph( + name = "holistic_landmark_gpu", + graph = "holistic_landmark_gpu.pbtxt", + register_as = "HolisticLandmarkGpu", + visibility = ["//visibility:public"], + deps = [ + ":face_landmarks_from_pose_gpu", + ":hand_landmarks_left_and_right_gpu", + "//mediapipe/calculators/core:flow_limiter_calculator", + "//mediapipe/calculators/image:image_transformation_calculator", + "//mediapipe/modules/pose_landmark:pose_landmark_gpu", + ], +) + +mediapipe_simple_subgraph( + name = "holistic_landmark_cpu", + graph = "holistic_landmark_cpu.pbtxt", + register_as = "HolisticLandmarkCpu", + visibility = ["//visibility:public"], + deps = [ + ":face_landmarks_from_pose_cpu", + ":hand_landmarks_left_and_right_cpu", + "//mediapipe/calculators/core:flow_limiter_calculator", + "//mediapipe/calculators/image:image_transformation_calculator", + "//mediapipe/modules/pose_landmark:pose_landmark_cpu", + ], +) diff --git a/mediapipe/modules/holistic_landmark/README.md b/mediapipe/modules/holistic_landmark/README.md new file mode 100644 index 000000000..126518a51 --- /dev/null +++ b/mediapipe/modules/holistic_landmark/README.md @@ -0,0 +1,6 @@ +# holistic_landmark + +Subgraphs|Details +:--- | :--- +[`HolisticLandmarkCpu`](https://github.com/google/mediapipe/tree/master/mediapipe/modules/holistic_landmark/holistic_landmark_cpu.pbtxt)| Predicts pose + left/right hand + face landmarks. (CPU input) +[`HolisticLandmarkCpu`](https://github.com/google/mediapipe/tree/master/mediapipe/modules/holistic_landmark/holistic_landmark_gpu.pbtxt)| Predicts pose + left/right hand + face landmarks. (GPU input.) diff --git a/mediapipe/modules/holistic_landmark/calculators/BUILD b/mediapipe/modules/holistic_landmark/calculators/BUILD new file mode 100644 index 000000000..c3c091924 --- /dev/null +++ b/mediapipe/modules/holistic_landmark/calculators/BUILD @@ -0,0 +1,63 @@ +# Copyright 2020 The MediaPipe Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +load("//mediapipe/framework/port:build_config.bzl", "mediapipe_proto_library") + +licenses(["notice"]) + +package(default_visibility = ["//visibility:public"]) + +cc_library( + name = "hand_detections_from_pose_to_rects_calculator", + srcs = ["hand_detections_from_pose_to_rects_calculator.cc"], + visibility = ["//visibility:public"], + deps = [ + "//mediapipe/calculators/util:detections_to_rects_calculator", + "//mediapipe/calculators/util:detections_to_rects_calculator_cc_proto", + "//mediapipe/framework:calculator_framework", + "//mediapipe/framework:calculator_options_cc_proto", + "//mediapipe/framework/formats:detection_cc_proto", + "//mediapipe/framework/formats:location_data_cc_proto", + "//mediapipe/framework/formats:rect_cc_proto", + "//mediapipe/framework/port:ret_check", + "//mediapipe/framework/port:status", + ], + alwayslink = 1, +) + +mediapipe_proto_library( + name = "roi_tracking_calculator_proto", + srcs = ["roi_tracking_calculator.proto"], + visibility = ["//visibility:public"], + deps = [ + "//mediapipe/framework:calculator_options_proto", + "//mediapipe/framework:calculator_proto", + ], +) + +cc_library( + name = "roi_tracking_calculator", + srcs = ["roi_tracking_calculator.cc"], + visibility = ["//visibility:public"], + deps = [ + ":roi_tracking_calculator_cc_proto", + "//mediapipe/framework:calculator_framework", + "//mediapipe/framework/formats:landmark_cc_proto", + "//mediapipe/framework/formats:rect_cc_proto", + "//mediapipe/framework/port:logging", + "//mediapipe/framework/port:rectangle", + "@com_google_absl//absl/strings:str_format", + ], + alwayslink = 1, +) diff --git a/mediapipe/modules/holistic_landmark/calculators/hand_detections_from_pose_to_rects_calculator.cc b/mediapipe/modules/holistic_landmark/calculators/hand_detections_from_pose_to_rects_calculator.cc new file mode 100644 index 000000000..67faf60a4 --- /dev/null +++ b/mediapipe/modules/holistic_landmark/calculators/hand_detections_from_pose_to_rects_calculator.cc @@ -0,0 +1,156 @@ +#include + +#include "mediapipe/calculators/util/detections_to_rects_calculator.h" +#include "mediapipe/calculators/util/detections_to_rects_calculator.pb.h" +#include "mediapipe/framework/calculator_framework.h" +#include "mediapipe/framework/calculator_options.pb.h" +#include "mediapipe/framework/formats/detection.pb.h" +#include "mediapipe/framework/formats/location_data.pb.h" +#include "mediapipe/framework/formats/rect.pb.h" +#include "mediapipe/framework/port/ret_check.h" +#include "mediapipe/framework/port/status.h" + +namespace mediapipe { + +namespace {} // namespace + +// Generates a hand ROI based on a hand detection derived from hand-related pose +// landmarks. +// +// Inputs: +// DETECTION - Detection. +// Detection to convert to ROI. Must contain 3 key points indicating: wrist, +// pinky and index fingers. +// +// IMAGE_SIZE - std::pair +// Image width and height. +// +// Outputs: +// NORM_RECT - NormalizedRect. +// ROI based on passed input. +// +// Examples +// node { +// calculator: "HandDetectionsFromPoseToRectsCalculator" +// input_stream: "DETECTION:hand_detection_from_pose" +// input_stream: "IMAGE_SIZE:image_size" +// output_stream: "NORM_RECT:hand_roi_from_pose" +// } +class HandDetectionsFromPoseToRectsCalculator + : public DetectionsToRectsCalculator { + public: + ::mediapipe::Status Open(CalculatorContext* cc) override; + + private: + ::mediapipe::Status DetectionToNormalizedRect( + const Detection& detection, const DetectionSpec& detection_spec, + NormalizedRect* rect) override; + ::mediapipe::Status ComputeRotation(const Detection& detection, + const DetectionSpec& detection_spec, + float* rotation) override; +}; +REGISTER_CALCULATOR(HandDetectionsFromPoseToRectsCalculator); + +namespace { + +constexpr int kWrist = 0; +constexpr int kPinky = 1; +constexpr int kIndex = 2; + +constexpr char kImageSizeTag[] = "IMAGE_SIZE"; + +} // namespace + +::mediapipe::Status HandDetectionsFromPoseToRectsCalculator::Open( + CalculatorContext* cc) { + RET_CHECK(cc->Inputs().HasTag(kImageSizeTag)) + << "Image size is required to calculate rotated rect."; + cc->SetOffset(TimestampDiff(0)); + target_angle_ = M_PI * 0.5f; + rotate_ = true; + options_ = cc->Options(); + output_zero_rect_for_empty_detections_ = + options_.output_zero_rect_for_empty_detections(); + + return ::mediapipe::OkStatus(); +} + +::mediapipe::Status +HandDetectionsFromPoseToRectsCalculator ::DetectionToNormalizedRect( + const Detection& detection, const DetectionSpec& detection_spec, + NormalizedRect* rect) { + const auto& location_data = detection.location_data(); + const auto& image_size = detection_spec.image_size; + RET_CHECK(image_size) << "Image size is required to calculate rotation"; + + const float x_wrist = + location_data.relative_keypoints(kWrist).x() * image_size->first; + const float y_wrist = + location_data.relative_keypoints(kWrist).y() * image_size->second; + + const float x_index = + location_data.relative_keypoints(kIndex).x() * image_size->first; + const float y_index = + location_data.relative_keypoints(kIndex).y() * image_size->second; + + const float x_pinky = + location_data.relative_keypoints(kPinky).x() * image_size->first; + const float y_pinky = + location_data.relative_keypoints(kPinky).y() * image_size->second; + + // Estimate middle finger. + const float x_middle = (2.f * x_index + x_pinky) / 3.f; + const float y_middle = (2.f * y_index + y_pinky) / 3.f; + + // Crop center as middle finger. + const float center_x = x_middle; + const float center_y = y_middle; + + // Bounding box size as double distance from middle finger to wrist. + const float box_size = + std::sqrt((x_middle - x_wrist) * (x_middle - x_wrist) + + (y_middle - y_wrist) * (y_middle - y_wrist)) * + 2.0; + + // Set resulting bounding box. + rect->set_x_center(center_x / image_size->first); + rect->set_y_center(center_y / image_size->second); + rect->set_width(box_size / image_size->first); + rect->set_height(box_size / image_size->second); + + return ::mediapipe::OkStatus(); +} + +::mediapipe::Status HandDetectionsFromPoseToRectsCalculator::ComputeRotation( + const Detection& detection, const DetectionSpec& detection_spec, + float* rotation) { + const auto& location_data = detection.location_data(); + const auto& image_size = detection_spec.image_size; + RET_CHECK(image_size) << "Image size is required to calculate rotation"; + + const float x_wrist = + location_data.relative_keypoints(kWrist).x() * image_size->first; + const float y_wrist = + location_data.relative_keypoints(kWrist).y() * image_size->second; + + const float x_index = + location_data.relative_keypoints(kIndex).x() * image_size->first; + const float y_index = + location_data.relative_keypoints(kIndex).y() * image_size->second; + + const float x_pinky = + location_data.relative_keypoints(kPinky).x() * image_size->first; + const float y_pinky = + location_data.relative_keypoints(kPinky).y() * image_size->second; + + // Estimate middle finger. + const float x_middle = (2.f * x_index + x_pinky) / 3.f; + const float y_middle = (2.f * y_index + y_pinky) / 3.f; + + *rotation = NormalizeRadians( + target_angle_ - std::atan2(-(y_middle - y_wrist), x_middle - x_wrist)); + + return ::mediapipe::OkStatus(); +} + +} // namespace mediapipe diff --git a/mediapipe/modules/holistic_landmark/calculators/roi_tracking_calculator.cc b/mediapipe/modules/holistic_landmark/calculators/roi_tracking_calculator.cc new file mode 100644 index 000000000..80560694c --- /dev/null +++ b/mediapipe/modules/holistic_landmark/calculators/roi_tracking_calculator.cc @@ -0,0 +1,358 @@ +// Copyright 2020 The MediaPipe Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include + +#include + +#include "absl/strings/str_format.h" +#include "mediapipe/framework/calculator_framework.h" +#include "mediapipe/framework/formats/landmark.pb.h" +#include "mediapipe/framework/formats/rect.pb.h" +#include "mediapipe/framework/port/logging.h" +#include "mediapipe/framework/port/rectangle.h" +#include "mediapipe/modules/holistic_landmark/calculators/roi_tracking_calculator.pb.h" + +namespace mediapipe { + +namespace { + +constexpr char kPrevLandmarksTag[] = "PREV_LANDMARKS"; +constexpr char kPrevLandmarksRectTag[] = "PREV_LANDMARKS_RECT"; +constexpr char kRecropRectTag[] = "RECROP_RECT"; +constexpr char kImageSizeTag[] = "IMAGE_SIZE"; +constexpr char kTrackingRectTag[] = "TRACKING_RECT"; + +// TODO: Use rect rotation. +// Verifies that Intersection over Union of previous frame rect and current +// frame re-crop rect is less than threshold. +bool IouRequirementsSatisfied(const NormalizedRect& prev_rect, + const NormalizedRect& recrop_rect, + const std::pair& image_size, + const float min_iou) { + auto r1 = Rectangle_f(prev_rect.x_center() * image_size.first, + prev_rect.y_center() * image_size.second, + prev_rect.width() * image_size.first, + prev_rect.height() * image_size.second); + auto r2 = Rectangle_f(recrop_rect.x_center() * image_size.first, + recrop_rect.y_center() * image_size.second, + recrop_rect.width() * image_size.first, + recrop_rect.height() * image_size.second); + + const float intersection_area = r1.Intersect(r2).Area(); + const float union_area = r1.Area() + r2.Area() - intersection_area; + + const float intersection_threshold = union_area * min_iou; + if (intersection_area < intersection_threshold) { + VLOG(1) << absl::StrFormat("Lost tracking: IoU intersection %f < %f", + intersection_area, intersection_threshold); + return false; + } + + return true; +} + +// Verifies that current frame re-crop rect rotation/translation/scale didn't +// change much comparing to the previous frame rect. Translation and scale are +// normalized by current frame re-crop rect. +bool RectRequirementsSatisfied(const NormalizedRect& prev_rect, + const NormalizedRect& recrop_rect, + const std::pair image_size, + const float rotation_degrees, + const float translation, const float scale) { + // Rotate both rects so that re-crop rect edges are parallel to XY axes. That + // will allow to compute x/y translation of the previous frame rect along axes + // of the current frame re-crop rect. + const float rotation = -recrop_rect.rotation(); + + const float cosa = cos(rotation); + const float sina = sin(rotation); + + // Rotate previous frame rect and get its parameters. + const float prev_rect_x = prev_rect.x_center() * image_size.first * cosa - + prev_rect.y_center() * image_size.second * sina; + const float prev_rect_y = prev_rect.x_center() * image_size.first * sina + + prev_rect.y_center() * image_size.second * cosa; + const float prev_rect_width = prev_rect.width() * image_size.first; + const float prev_rect_height = prev_rect.height() * image_size.second; + const float prev_rect_rotation = prev_rect.rotation() / M_PI * 180.f; + + // Rotate current frame re-crop rect and get its parameters. + const float recrop_rect_x = recrop_rect.x_center() * image_size.first * cosa - + recrop_rect.y_center() * image_size.second * sina; + const float recrop_rect_y = recrop_rect.x_center() * image_size.first * sina + + recrop_rect.y_center() * image_size.second * cosa; + const float recrop_rect_width = recrop_rect.width() * image_size.first; + const float recrop_rect_height = recrop_rect.height() * image_size.second; + const float recrop_rect_rotation = recrop_rect.rotation() / M_PI * 180.f; + + // Rect requirements are satisfied unless one of the checks below fails. + bool satisfied = true; + + // Ensure that rotation diff is in [0, 180] range. + float rotation_diff = prev_rect_rotation - recrop_rect_rotation; + if (rotation_diff > 180.f) { + rotation_diff -= 360.f; + } + if (rotation_diff < -180.f) { + rotation_diff += 360.f; + } + rotation_diff = abs(rotation_diff); + if (rotation_diff > rotation_degrees) { + satisfied = false; + VLOG(1) << absl::StrFormat("Lost tracking: rect rotation %f > %f", + rotation_diff, rotation_degrees); + } + + const float x_diff = abs(prev_rect_x - recrop_rect_x); + const float x_threshold = recrop_rect_width * translation; + if (x_diff > x_threshold) { + satisfied = false; + VLOG(1) << absl::StrFormat("Lost tracking: rect x translation %f > %f", + x_diff, x_threshold); + } + + const float y_diff = abs(prev_rect_y - recrop_rect_y); + const float y_threshold = recrop_rect_height * translation; + if (y_diff > y_threshold) { + satisfied = false; + VLOG(1) << absl::StrFormat("Lost tracking: rect y translation %f > %f", + y_diff, y_threshold); + } + + const float width_diff = abs(prev_rect_width - recrop_rect_width); + const float width_threshold = recrop_rect_width * scale; + if (width_diff > width_threshold) { + satisfied = false; + VLOG(1) << absl::StrFormat("Lost tracking: rect width %f > %f", width_diff, + width_threshold); + } + + const float height_diff = abs(prev_rect_height - recrop_rect_height); + const float height_threshold = recrop_rect_height * scale; + if (height_diff > height_threshold) { + satisfied = false; + VLOG(1) << absl::StrFormat("Lost tracking: rect height %f > %f", + height_diff, height_threshold); + } + + return satisfied; +} + +// Verifies that landmarks from the previous frame are within re-crop rectangle +// bounds on the current frame. +bool LandmarksRequirementsSatisfied(const NormalizedLandmarkList& landmarks, + const NormalizedRect& recrop_rect, + const std::pair image_size, + const float recrop_rect_margin) { + // Rotate both re-crop rectangle and landmarks so that re-crop rectangle edges + // are parallel to XY axes. It will allow to easily check if landmarks are + // within re-crop rect bounds along re-crop rect axes. + // + // Rect rotation is specified clockwise. To apply cos/sin functions we + // transform it into counterclockwise. + const float rotation = -recrop_rect.rotation(); + + const float cosa = cos(rotation); + const float sina = sin(rotation); + + // Rotate rect. + const float rect_x = recrop_rect.x_center() * image_size.first * cosa - + recrop_rect.y_center() * image_size.second * sina; + const float rect_y = recrop_rect.x_center() * image_size.first * sina + + recrop_rect.y_center() * image_size.second * cosa; + const float rect_width = + recrop_rect.width() * image_size.first * (1.f + recrop_rect_margin); + const float rect_height = + recrop_rect.height() * image_size.second * (1.f + recrop_rect_margin); + + // Get rect bounds. + const float rect_left = rect_x - rect_width * 0.5f; + const float rect_right = rect_x + rect_width * 0.5f; + const float rect_top = rect_y - rect_height * 0.5f; + const float rect_bottom = rect_y + rect_height * 0.5f; + + for (int i = 0; i < landmarks.landmark_size(); ++i) { + const auto& landmark = landmarks.landmark(i); + const float x = landmark.x() * image_size.first * cosa - + landmark.y() * image_size.second * sina; + const float y = landmark.x() * image_size.first * sina + + landmark.y() * image_size.second * cosa; + + if (!(rect_left < x && x < rect_right && rect_top < y && y < rect_bottom)) { + VLOG(1) << "Lost tracking: landmarks out of re-crop rect"; + return false; + } + } + + return true; +} + +} // namespace + +// A calculator to track object rectangle between frames. +// +// Calculator checks that all requirements for tracking are satisfied and uses +// rectangle from the previous frame in this case, otherwise - uses current +// frame re-crop rectangle. +// +// There are several types of tracking requirements that can be configured via +// options: +// IoU: Verifies that IoU of the previous frame rectangle and current frame +// re-crop rectangle is less than a given threshold. +// Rect parameters: Verifies that rotation/translation/scale of the re-crop +// rectangle on the current frame is close to the rectangle from the +// previous frame within given thresholds. +// Landmarks: Verifies that landmarks from the previous frame are within +// the re-crop rectangle on the current frame. +// +// Inputs: +// PREV_LANDMARKS: Object landmarks from the previous frame. +// PREV_LANDMARKS_RECT: Object rectangle based on the landmarks from the +// previous frame. +// RECROP_RECT: Object re-crop rectangle from the current frame. +// IMAGE_SIZE: Image size to transform normalized coordinates to absolute. +// +// Outputs: +// TRACKING_RECT: Rectangle to use for object prediction on the current frame. +// It will be either object rectangle from the previous frame (if all +// tracking requirements are satisfied) or re-crop rectangle from the +// current frame (if tracking lost the object). +// +// Example config: +// node { +// calculator: "RoiTrackingCalculator" +// input_stream: "PREV_LANDMARKS:prev_hand_landmarks" +// input_stream: "PREV_LANDMARKS_RECT:prev_hand_landmarks_rect" +// input_stream: "RECROP_RECT:hand_recrop_rect" +// input_stream: "IMAGE_SIZE:image_size" +// output_stream: "TRACKING_RECT:hand_tracking_rect" +// options: { +// [mediapipe.RoiTrackingCalculatorOptions.ext] { +// rect_requirements: { +// rotation_degrees: 40.0 +// translation: 0.2 +// scale: 0.4 +// } +// landmarks_requirements: { +// recrop_rect_margin: -0.1 +// } +// } +// } +// } +class RoiTrackingCalculator : public CalculatorBase { + public: + static mediapipe::Status GetContract(CalculatorContract* cc); + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; + + private: + RoiTrackingCalculatorOptions options_; +}; +REGISTER_CALCULATOR(RoiTrackingCalculator); + +mediapipe::Status RoiTrackingCalculator::GetContract(CalculatorContract* cc) { + cc->Inputs().Tag(kPrevLandmarksTag).Set(); + cc->Inputs().Tag(kPrevLandmarksRectTag).Set(); + cc->Inputs().Tag(kRecropRectTag).Set(); + cc->Inputs().Tag(kImageSizeTag).Set>(); + cc->Outputs().Tag(kTrackingRectTag).Set(); + + return mediapipe::OkStatus(); +} + +mediapipe::Status RoiTrackingCalculator::Open(CalculatorContext* cc) { + cc->SetOffset(TimestampDiff(0)); + options_ = cc->Options(); + return mediapipe::OkStatus(); +} + +mediapipe::Status RoiTrackingCalculator::Process(CalculatorContext* cc) { + // If there is no current frame re-crop rect (i.e. object is not present on + // the current frame) - return empty packet. + if (cc->Inputs().Tag(kRecropRectTag).IsEmpty()) { + return mediapipe::OkStatus(); + } + + // If there is no previous rect, but there is current re-crop rect - return + // current re-crop rect as is. + if (cc->Inputs().Tag(kPrevLandmarksRectTag).IsEmpty()) { + cc->Outputs() + .Tag(kTrackingRectTag) + .AddPacket(cc->Inputs().Tag(kRecropRectTag).Value()); + return mediapipe::OkStatus(); + } + + // At this point we have both previous rect (which also means we have previous + // landmarks) and currrent re-crop rect. + const auto& prev_landmarks = + cc->Inputs().Tag(kPrevLandmarksTag).Get(); + const auto& prev_rect = + cc->Inputs().Tag(kPrevLandmarksRectTag).Get(); + const auto& recrop_rect = + cc->Inputs().Tag(kRecropRectTag).Get(); + const auto& image_size = + cc->Inputs().Tag(kImageSizeTag).Get>(); + + // Keep tracking unless one of the requirements below is not satisfied. + bool keep_tracking = true; + + // If IoU of the previous rect and current re-crop rect is lower than allowed + // threshold - use current re-crop rect. + if (options_.has_iou_requirements() && + !IouRequirementsSatisfied(prev_rect, recrop_rect, image_size, + options_.iou_requirements().min_iou())) { + keep_tracking = false; + } + + // If previous rect and current re-crop rect differ more than it is allowed by + // the augmentations (used during the model training) - use current re-crop + // rect. + if (options_.has_rect_requirements() && + !RectRequirementsSatisfied( + prev_rect, recrop_rect, image_size, + options_.rect_requirements().rotation_degrees(), + options_.rect_requirements().translation(), + options_.rect_requirements().scale())) { + keep_tracking = false; + } + + // If landmarks from the previous frame are not in the current re-crop rect + // (i.e. object moved too fast and using previous frame rect won't cover + // landmarks on the current frame) - use current re-crop rect. + if (options_.has_landmarks_requirements() && + !LandmarksRequirementsSatisfied( + prev_landmarks, recrop_rect, image_size, + options_.landmarks_requirements().recrop_rect_margin())) { + keep_tracking = false; + } + + // If object didn't move a lot comparing to the previous frame - we'll keep + // tracking it and will return rect from the previous frame, otherwise - + // return re-crop rect from the current frame. + if (keep_tracking) { + cc->Outputs() + .Tag(kTrackingRectTag) + .AddPacket(cc->Inputs().Tag(kPrevLandmarksRectTag).Value()); + } else { + cc->Outputs() + .Tag(kTrackingRectTag) + .AddPacket(cc->Inputs().Tag(kRecropRectTag).Value()); + VLOG(1) << "Lost tracking: check messages above for details"; + } + + return mediapipe::OkStatus(); +} + +} // namespace mediapipe diff --git a/mediapipe/modules/holistic_landmark/calculators/roi_tracking_calculator.proto b/mediapipe/modules/holistic_landmark/calculators/roi_tracking_calculator.proto new file mode 100644 index 000000000..ec3cf227b --- /dev/null +++ b/mediapipe/modules/holistic_landmark/calculators/roi_tracking_calculator.proto @@ -0,0 +1,59 @@ +// Copyright 2020 The MediaPipe Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto2"; + +package mediapipe; + +import "mediapipe/framework/calculator.proto"; + +message RoiTrackingCalculatorOptions { + extend CalculatorOptions { + optional RoiTrackingCalculatorOptions ext = 329994630; + } + + // Verifies that Intersection over Union of previous frame rect and current + // frame re-crop rect is less than threshold. + message IouRequirements { + optional float min_iou = 1 [default = 0.5]; + } + + // Verifies that current frame re-crop rect rotation/translation/scale didn't + // change much comparing to the previous frame rect. + message RectRequirements { + // Allowed rotation change defined in degrees. + optional float rotation_degrees = 1 [default = 10.0]; + + // Allowed translation change defined as absolute translation normalized by + // re-crop rectangle size. + optional float translation = 2 [default = 0.1]; + + // Allowed scale change defined as absolute translation normalized by + // re-crop rectangle size. + optional float scale = 3 [default = 0.1]; + } + + // Verifies that landmarks from the previous frame are within re-crop + // rectangle bounds on the current frame. + message LandmarksRequirements { + // Margin to apply to re-crop rectangle before checking verifing landmarks. + optional float recrop_rect_margin = 1 [default = 0.0]; + } + + optional IouRequirements iou_requirements = 1; + + optional RectRequirements rect_requirements = 2; + + optional LandmarksRequirements landmarks_requirements = 3; +} diff --git a/mediapipe/modules/holistic_landmark/face_detection_front_detections_to_roi.pbtxt b/mediapipe/modules/holistic_landmark/face_detection_front_detections_to_roi.pbtxt new file mode 100644 index 000000000..9d1190730 --- /dev/null +++ b/mediapipe/modules/holistic_landmark/face_detection_front_detections_to_roi.pbtxt @@ -0,0 +1,48 @@ +# Calculates ROI from detections provided by `face_detection_front.tflite` +# model. +type: "FaceDetectionFrontDetectionsToRoi" + +# Detected faces. (std::vector) +input_stream: "DETECTIONS:detections" +# Image size (width & height). (std::pair) +input_stream: "IMAGE_SIZE:image_size" + +# Refined (more accurate) ROI to use for face landmarks prediction. +# (NormalizedRect) +output_stream: "ROI:roi" + +# Converts the face detection into a rectangle (normalized by image size) +# that encloses the face and is rotated such that the line connecting right side +# of the right eye and left side of the left eye is aligned with the X-axis of +# the rectangle. +node { + calculator: "DetectionsToRectsCalculator" + input_stream: "DETECTIONS:detections" + input_stream: "IMAGE_SIZE:image_size" + output_stream: "NORM_RECT:raw_roi" + options: { + [mediapipe.DetectionsToRectsCalculatorOptions.ext] { + rotation_vector_start_keypoint_index: 0 # Right eye. + rotation_vector_end_keypoint_index: 1 # Left eye. + rotation_vector_target_angle_degrees: 0 + conversion_mode: USE_KEYPOINTS + } + } +} + +# Expands and shifts the rectangle that contains the face so that it's likely +# to cover the entire face. +node { + calculator: "RectTransformationCalculator" + input_stream: "NORM_RECT:raw_roi" + input_stream: "IMAGE_SIZE:image_size" + output_stream: "roi" + options: { + [mediapipe.RectTransformationCalculatorOptions.ext] { + scale_x: 2.0 + scale_y: 2.0 + shift_y: -0.1 + square_long: true + } + } +} diff --git a/mediapipe/modules/holistic_landmark/face_landmarks_from_pose_cpu.pbtxt b/mediapipe/modules/holistic_landmark/face_landmarks_from_pose_cpu.pbtxt new file mode 100644 index 000000000..e8b18c5a2 --- /dev/null +++ b/mediapipe/modules/holistic_landmark/face_landmarks_from_pose_cpu.pbtxt @@ -0,0 +1,76 @@ +# Predicts face landmarks within an ROI derived from face-related pose +# landmarks. + +type: "FaceLandmarksFromPoseCpu" + +# CPU image. (ImageFrame) +input_stream: "IMAGE:input_video" +# Face-related pose landmarks. (NormalizedLandmarkList) +input_stream: "FACE_LANDMARKS_FROM_POSE:face_landmarks_from_pose" + +# Face landmarks. (NormalizedLandmarkList) +output_stream: "FACE_LANDMARKS:face_landmarks" + +# Debug outputs. +# Face ROI derived from face-related pose landmarks, which defines the search +# region for the face detection model. (NormalizedRect) +output_stream: "FACE_ROI_FROM_POSE:face_roi_from_pose" +# Refined face crop rectangle predicted by face detection model. +# (NormalizedRect) +output_stream: "FACE_ROI_FROM_DETECTION:face_roi_from_detection" +# Rectangle used to predict face landmarks. (NormalizedRect) +output_stream: "FACE_TRACKING_ROI:face_tracking_roi" + +# TODO: do not predict face when most of the face landmarks from +# pose are invisible. + +# Extracts image size from the input images. +node { + calculator: "ImagePropertiesCalculator" + input_stream: "IMAGE:input_video" + output_stream: "SIZE:image_size" +} + +# Gets ROI for re-crop model from face-related pose landmarks. +node { + calculator: "FaceLandmarksFromPoseToRecropRoi" + input_stream: "FACE_LANDMARKS_FROM_POSE:face_landmarks_from_pose" + input_stream: "IMAGE_SIZE:image_size" + output_stream: "ROI:face_roi_from_pose" +} + +# Detects faces within the face ROI calculated from pose landmarks. This is done +# to refine face ROI for further landmark detection as ROI calculated from +# pose landmarks may be inaccurate. +node { + calculator: "FaceDetectionFrontByRoiCpu" + input_stream: "IMAGE:input_video" + input_stream: "ROI:face_roi_from_pose" + output_stream: "DETECTIONS:face_detections" +} + +# Calculates refined face ROI. +node { + calculator: "FaceDetectionFrontDetectionsToRoi", + input_stream: "DETECTIONS:face_detections" + input_stream: "IMAGE_SIZE:image_size" + output_stream: "ROI:face_roi_from_detection" +} + +# Gets face tracking rectangle (either face rectangle from the previous +# frame or face re-crop rectangle from the current frame) for face prediction. +node { + calculator: "FaceTracking" + input_stream: "LANDMARKS:face_landmarks" + input_stream: "FACE_RECROP_ROI:face_roi_from_detection" + input_stream: "IMAGE_SIZE:image_size" + output_stream: "FACE_TRACKING_ROI:face_tracking_roi" +} + +# Predicts face landmarks from the tracking rectangle. +node { + calculator: "FaceLandmarkCpu" + input_stream: "IMAGE:input_video" + input_stream: "ROI:face_tracking_roi" + output_stream: "LANDMARKS:face_landmarks" +} diff --git a/mediapipe/modules/holistic_landmark/face_landmarks_from_pose_gpu.pbtxt b/mediapipe/modules/holistic_landmark/face_landmarks_from_pose_gpu.pbtxt new file mode 100644 index 000000000..f5b2f4263 --- /dev/null +++ b/mediapipe/modules/holistic_landmark/face_landmarks_from_pose_gpu.pbtxt @@ -0,0 +1,76 @@ +# Predicts face landmarks within an ROI derived from face-related pose +# landmarks. + +type: "FaceLandmarksFromPoseGpu" + +# GPU image. (ImageFrame) +input_stream: "IMAGE:input_video" +# Face-related pose landmarks. (NormalizedLandmarkList) +input_stream: "FACE_LANDMARKS_FROM_POSE:face_landmarks_from_pose" + +# Face landmarks. (NormalizedLandmarkList) +output_stream: "FACE_LANDMARKS:face_landmarks" + +# Debug outputs. +# Face ROI derived from face-related pose landmarks, which defines the search +# region for the face detection model. (NormalizedRect) +output_stream: "FACE_ROI_FROM_POSE:face_roi_from_pose" +# Refined face crop rectangle predicted by face detection model. +# (NormalizedRect) +output_stream: "FACE_ROI_FROM_DETECTION:face_roi_from_detection" +# Rectangle used to predict face landmarks. (NormalizedRect) +output_stream: "FACE_TRACKING_ROI:face_tracking_roi" + +# TODO: do not predict face when most of the face landmarks from +# pose are invisible. + +# Extracts image size from the input images. +node { + calculator: "ImagePropertiesCalculator" + input_stream: "IMAGE_GPU:input_video" + output_stream: "SIZE:image_size" +} + +# Gets ROI for re-crop model from face-related pose landmarks. +node { + calculator: "FaceLandmarksFromPoseToRecropRoi" + input_stream: "FACE_LANDMARKS_FROM_POSE:face_landmarks_from_pose" + input_stream: "IMAGE_SIZE:image_size" + output_stream: "ROI:face_roi_from_pose" +} + +# Detects faces within the face ROI calculated from pose landmarks. This is done +# to refine face ROI for further landmark detection as ROI calculated from +# pose landmarks may be inaccurate. +node { + calculator: "FaceDetectionFrontByRoiGpu" + input_stream: "IMAGE:input_video" + input_stream: "ROI:face_roi_from_pose" + output_stream: "DETECTIONS:face_detections" +} + +# Calculates refined face ROI. +node { + calculator: "FaceDetectionFrontDetectionsToRoi", + input_stream: "DETECTIONS:face_detections" + input_stream: "IMAGE_SIZE:image_size" + output_stream: "ROI:face_roi_from_detection" +} + +# Gets face tracking rectangle (either face rectangle from the previous +# frame or face re-crop rectangle from the current frame) for face prediction. +node { + calculator: "FaceTracking" + input_stream: "LANDMARKS:face_landmarks" + input_stream: "FACE_RECROP_ROI:face_roi_from_detection" + input_stream: "IMAGE_SIZE:image_size" + output_stream: "FACE_TRACKING_ROI:face_tracking_roi" +} + +# Predicts face landmarks from the tracking rectangle. +node { + calculator: "FaceLandmarkGpu" + input_stream: "IMAGE:input_video" + input_stream: "ROI:face_tracking_roi" + output_stream: "LANDMARKS:face_landmarks" +} diff --git a/mediapipe/modules/holistic_landmark/face_landmarks_from_pose_to_recrop_roi.pbtxt b/mediapipe/modules/holistic_landmark/face_landmarks_from_pose_to_recrop_roi.pbtxt new file mode 100644 index 000000000..65bd340e7 --- /dev/null +++ b/mediapipe/modules/holistic_landmark/face_landmarks_from_pose_to_recrop_roi.pbtxt @@ -0,0 +1,51 @@ +# Converts face-related pose landmarks to re-crop ROI. + +type: "FaceLandmarksFromPoseToRecropRoi" + +# Face-related pose landmarks (There should be 11 of them). +# (NormalizedLandmarkList) +input_stream: "FACE_LANDMARKS_FROM_POSE:face_landmarks_from_pose" +# Image size (width & height). (std::pair) +input_stream: "IMAGE_SIZE:image_size" + +# ROI to be used for face detection. (NormalizedRect) +output_stream: "ROI:roi" + +# Converts face-related pose landmarks to a detection that tightly encloses all +# landmarks. +node { + calculator: "LandmarksToDetectionCalculator" + input_stream: "NORM_LANDMARKS:face_landmarks_from_pose" + output_stream: "DETECTION:pose_face_detection" +} + +# Converts face detection to a normalized face rectangle. +node { + calculator: "DetectionsToRectsCalculator" + input_stream: "DETECTION:pose_face_detection" + input_stream: "IMAGE_SIZE:image_size" + output_stream: "NORM_RECT:pose_face_rect" + options: { + [mediapipe.DetectionsToRectsCalculatorOptions.ext] { + rotation_vector_start_keypoint_index: 5 # Right eye. + rotation_vector_end_keypoint_index: 2 # Left eye. + rotation_vector_target_angle_degrees: 0 + } + } +} + +# Expands face rectangle so that it becomes big enough for face detector to +# localize it accurately. +node { + calculator: "RectTransformationCalculator" + input_stream: "NORM_RECT:pose_face_rect" + input_stream: "IMAGE_SIZE:image_size" + output_stream: "roi" + options: { + [mediapipe.RectTransformationCalculatorOptions.ext] { + scale_x: 3.0 + scale_y: 3.0 + square_long: true + } + } +} diff --git a/mediapipe/modules/holistic_landmark/face_landmarks_to_roi.pbtxt b/mediapipe/modules/holistic_landmark/face_landmarks_to_roi.pbtxt new file mode 100644 index 000000000..8913cc1bd --- /dev/null +++ b/mediapipe/modules/holistic_landmark/face_landmarks_to_roi.pbtxt @@ -0,0 +1,53 @@ +# Converts face landmarks to ROI. + +type: "FaceLandmarksToRoi" + +# Face landmarks. (NormalizedLandmarkList) +input_stream: "LANDMARKS:face_landmarks" +# Image size (width & height). (std::pair) +input_stream: "IMAGE_SIZE:image_size" + +# ROI according to landmarks. (NormalizedRect) +output_stream: "ROI:roi" + +# Converts face landmarks to a detection that tightly encloses all landmarks. +node { + calculator: "LandmarksToDetectionCalculator" + input_stream: "NORM_LANDMARKS:face_landmarks" + output_stream: "DETECTION:face_detection" +} + +# Converts the face detection into a rectangle (normalized by image size) +# that encloses the face and is rotated such that the line connecting center of +# the wrist and MCP of the middle finger is aligned with the Y-axis of the +# rectangle. +node { + calculator: "DetectionsToRectsCalculator" + input_stream: "DETECTION:face_detection" + input_stream: "IMAGE_SIZE:image_size" + output_stream: "NORM_RECT:face_landmarks_rect_tight" + options: { + [mediapipe.DetectionsToRectsCalculatorOptions.ext] { + rotation_vector_start_keypoint_index: 33 # Right side of left eye. + rotation_vector_end_keypoint_index: 263 # Left side of right eye. + rotation_vector_target_angle_degrees: 0 + } + } +} + +# Expands the face rectangle so that it's likely to contain the face even with +# some motion. +node { + calculator: "RectTransformationCalculator" + input_stream: "NORM_RECT:face_landmarks_rect_tight" + input_stream: "IMAGE_SIZE:image_size" + output_stream: "roi" + options: { + [mediapipe.RectTransformationCalculatorOptions.ext] { + scale_x: 1.5 + scale_y: 1.5 + # TODO: remove `square_long` where appropriat + square_long: true + } + } +} diff --git a/mediapipe/modules/holistic_landmark/face_tracking.pbtxt b/mediapipe/modules/holistic_landmark/face_tracking.pbtxt new file mode 100644 index 000000000..53022d3ef --- /dev/null +++ b/mediapipe/modules/holistic_landmark/face_tracking.pbtxt @@ -0,0 +1,61 @@ +# Decides what ROI to use for face landmarks prediction: either previous frame +# landmarks ROI or the current frame face re-crop ROI. + +type: "FaceTracking" + +# Face landmarks from the current frame. They will be memorized for tracking on +# the next frame. (NormalizedLandmarkList) +input_stream: "LANDMARKS:face_landmarks" +# Face re-crop ROI from the current frame. (NormalizedRect) +input_stream: "FACE_RECROP_ROI:face_recrop_roi" +# Image size (width & height). (std::pair) +input_stream: "IMAGE_SIZE:image_size" + +# Face tracking ROI. Which is either face landmarks ROI from the previous frame +# if face is still tracked, or face re-crop ROI from the current frame +# otherwise. (NormalizedRect) +output_stream: "FACE_TRACKING_ROI:face_tracking_roi" + +# Keeps track of face landmarks from the previous frame. +node { + calculator: "PreviousLoopbackCalculator" + input_stream: "MAIN:image_size" + input_stream: "LOOP:face_landmarks" + input_stream_info: { + tag_index: "LOOP" + back_edge: true + } + output_stream: "PREV_LOOP:prev_face_landmarks" +} + +# Gets hand landarmsk rect. +node { + calculator: "FaceLandmarksToRoi" + input_stream: "LANDMARKS:prev_face_landmarks" + input_stream: "IMAGE_SIZE:image_size" + output_stream: "ROI:prev_face_landmarks_rect" +} + +# Checks that all requirements for tracking are satisfied and use face rectangle +# from the previous frame in that case. Otherwise - use face re-crop rectangle +# from the current frame. +node { + calculator: "RoiTrackingCalculator" + input_stream: "PREV_LANDMARKS:prev_face_landmarks" + input_stream: "PREV_LANDMARKS_RECT:prev_face_landmarks_rect" + input_stream: "RECROP_RECT:face_recrop_roi" + input_stream: "IMAGE_SIZE:image_size" + output_stream: "TRACKING_RECT:face_tracking_roi" + options: { + [mediapipe.RoiTrackingCalculatorOptions.ext] { + rect_requirements: { + rotation_degrees: 15.0 + translation: 0.1 + scale: 0.3 + } + landmarks_requirements: { + recrop_rect_margin: -0.2 + } + } + } +} diff --git a/mediapipe/modules/holistic_landmark/hand_landmarks_from_pose_cpu.pbtxt b/mediapipe/modules/holistic_landmark/hand_landmarks_from_pose_cpu.pbtxt new file mode 100644 index 000000000..0a44bcbcc --- /dev/null +++ b/mediapipe/modules/holistic_landmark/hand_landmarks_from_pose_cpu.pbtxt @@ -0,0 +1,78 @@ +# Predicts hand landmarks within a ROI derived from hand-related pose landmarks. + +type: "HandLandmarksFromPoseCpu" + +# CPU image. (ImageFrame) +input_stream: "IMAGE:input_video" +# Hand-related pose landmarks in [wrist, pinky, index] order. +# (NormalizedLandmarkList) +input_stream: "HAND_LANDMARKS_FROM_POSE:hand_landmarks_from_pose" + +# Hand landmarks. (NormalizedLandmarkList) +output_stream: "HAND_LANDMARKS:hand_landmarks" + +# Debug outputs. +# Hand ROI derived from hand-related landmarks, which defines the search region +# for the hand re-crop model. (NormalizedRect) +output_stream: "HAND_ROI_FROM_POSE:hand_roi_from_pose" +# Refined hand crop rectangle predicted by hand re-crop model. (NormalizedRect) +output_stream: "HAND_ROI_FROM_RECROP:hand_roi_from_recrop" +# Rectangle used to predict hand landmarks. (NormalizedRect) +output_stream: "HAND_TRACKING_ROI:hand_tracking_roi" + +# Gets hand visibility. +node { + calculator: "HandVisibilityFromHandLandmarksFromPose" + input_stream: "HAND_LANDMARKS_FROM_POSE:hand_landmarks_from_pose" + output_stream: "VISIBILITY:hand_visibility" +} + +# Drops hand-related pose landmarks if pose wrist is not visible. It will +# prevent from predicting hand landmarks on the current frame. +node { + calculator: "GateCalculator" + input_stream: "hand_landmarks_from_pose" + input_stream: "ALLOW:hand_visibility" + output_stream: "ensured_hand_landmarks_from_pose" +} + +# Extracts image size from the input images. +node { + calculator: "ImagePropertiesCalculator" + input_stream: "IMAGE:input_video" + output_stream: "SIZE:image_size" +} + +# Gets ROI for re-crop model from hand-related pose landmarks. +node { + calculator: "HandLandmarksFromPoseToRecropRoi" + input_stream: "HAND_LANDMARKS_FROM_POSE:ensured_hand_landmarks_from_pose" + input_stream: "IMAGE_SIZE:image_size" + output_stream: "ROI:hand_roi_from_pose" +} + +# Predicts hand re-crop rectangle on the current frame. +node { + calculator: "HandRecropByRoiCpu", + input_stream: "IMAGE:input_video" + input_stream: "ROI:hand_roi_from_pose" + output_stream: "HAND_ROI_FROM_RECROP:hand_roi_from_recrop" +} + +# Gets hand tracking rectangle (either hand rectangle from the previous +# frame or hand re-crop rectangle from the current frame) for hand prediction. +node { + calculator: "HandTracking" + input_stream: "LANDMARKS:hand_landmarks" + input_stream: "HAND_ROI_FROM_RECROP:hand_roi_from_recrop" + input_stream: "IMAGE_SIZE:image_size" + output_stream: "HAND_TRACKING_ROI:hand_tracking_roi" +} + +# Predicts hand landmarks from the tracking rectangle. +node { + calculator: "HandLandmarkCpu" + input_stream: "IMAGE:input_video" + input_stream: "ROI:hand_tracking_roi" + output_stream: "LANDMARKS:hand_landmarks" +} diff --git a/mediapipe/modules/holistic_landmark/hand_landmarks_from_pose_gpu.pbtxt b/mediapipe/modules/holistic_landmark/hand_landmarks_from_pose_gpu.pbtxt new file mode 100644 index 000000000..0296e7dc1 --- /dev/null +++ b/mediapipe/modules/holistic_landmark/hand_landmarks_from_pose_gpu.pbtxt @@ -0,0 +1,78 @@ +# Predicts hand landmarks within a ROI derived from hand-related pose landmarks. + +type: "HandLandmarksFromPoseGpu" + +# GPU image. (ImageFrame) +input_stream: "IMAGE:input_video" +# Hand-related pose landmarks in [wrist, pinky, index] order. +# (NormalizedLandmarkList) +input_stream: "HAND_LANDMARKS_FROM_POSE:hand_landmarks_from_pose" + +# Hand landmarks. (NormalizedLandmarkList) +output_stream: "HAND_LANDMARKS:hand_landmarks" + +# Debug outputs. +# Hand ROI derived from hand-related landmarks, which defines the search region +# for the hand re-crop model. (NormalizedRect) +output_stream: "HAND_ROI_FROM_POSE:hand_roi_from_pose" +# Refined hand crop rectangle predicted by hand re-crop model. (NormalizedRect) +output_stream: "HAND_ROI_FROM_RECROP:hand_roi_from_recrop" +# Rectangle used to predict hand landmarks. (NormalizedRect) +output_stream: "HAND_TRACKING_ROI:hand_tracking_roi" + +# Gets hand visibility. +node { + calculator: "HandVisibilityFromHandLandmarksFromPose" + input_stream: "HAND_LANDMARKS_FROM_POSE:hand_landmarks_from_pose" + output_stream: "VISIBILITY:hand_visibility" +} + +# Drops hand-related pose landmarks if pose wrist is not visible. It will +# prevent from predicting hand landmarks on the current frame. +node { + calculator: "GateCalculator" + input_stream: "hand_landmarks_from_pose" + input_stream: "ALLOW:hand_visibility" + output_stream: "ensured_hand_landmarks_from_pose" +} + +# Extracts image size from the input images. +node { + calculator: "ImagePropertiesCalculator" + input_stream: "IMAGE_GPU:input_video" + output_stream: "SIZE:image_size" +} + +# Gets ROI for re-crop model from hand-related pose landmarks. +node { + calculator: "HandLandmarksFromPoseToRecropRoi" + input_stream: "HAND_LANDMARKS_FROM_POSE:ensured_hand_landmarks_from_pose" + input_stream: "IMAGE_SIZE:image_size" + output_stream: "ROI:hand_roi_from_pose" +} + +# Predicts hand re-crop rectangle on the current frame. +node { + calculator: "HandRecropByRoiGpu", + input_stream: "IMAGE:input_video" + input_stream: "ROI:hand_roi_from_pose" + output_stream: "HAND_ROI_FROM_RECROP:hand_roi_from_recrop" +} + +# Gets hand tracking rectangle (either hand rectangle from the previous +# frame or hand re-crop rectangle from the current frame) for hand prediction. +node { + calculator: "HandTracking" + input_stream: "LANDMARKS:hand_landmarks" + input_stream: "HAND_ROI_FROM_RECROP:hand_roi_from_recrop" + input_stream: "IMAGE_SIZE:image_size" + output_stream: "HAND_TRACKING_ROI:hand_tracking_roi" +} + +# Predicts hand landmarks from the tracking rectangle. +node { + calculator: "HandLandmarkGpu" + input_stream: "IMAGE:input_video" + input_stream: "ROI:hand_tracking_roi" + output_stream: "LANDMARKS:hand_landmarks" +} diff --git a/mediapipe/modules/holistic_landmark/hand_landmarks_from_pose_to_recrop_roi.pbtxt b/mediapipe/modules/holistic_landmark/hand_landmarks_from_pose_to_recrop_roi.pbtxt new file mode 100644 index 000000000..1c2cfe50f --- /dev/null +++ b/mediapipe/modules/holistic_landmark/hand_landmarks_from_pose_to_recrop_roi.pbtxt @@ -0,0 +1,45 @@ +# Converts hand-related pose landmarks to hand re-crop ROI. + +type: "HandLandmarksFromPoseToRecropRoi" + +# Hand-related pose landmarks in [wrist, pinky, index] order. +# (NormalizedLandmarkList) +input_stream: "HAND_LANDMARKS_FROM_POSE:hand_landmarks_from_pose" +# Image size (width & height). (std::pair) +input_stream: "IMAGE_SIZE:image_size" + +# ROI to be used for re-crop prediction. (NormalizedRect) +output_stream: "ROI:roi" + +# Converts hand-related pose landmarks to a detection that tightly encloses all +# of them. +node { + calculator: "LandmarksToDetectionCalculator" + input_stream: "NORM_LANDMARKS:hand_landmarks_from_pose" + output_stream: "DETECTION:hand_detection_from_pose" +} + +# Converts hand detection to a normalized hand rectangle. +node { + calculator: "HandDetectionsFromPoseToRectsCalculator" + input_stream: "DETECTION:hand_detection_from_pose" + input_stream: "IMAGE_SIZE:image_size" + output_stream: "NORM_RECT:hand_roi_from_pose" +} + +# Expands the palm rectangle so that it becomes big enough for hand re-crop +# model to localize it accurately. +node { + calculator: "RectTransformationCalculator" + input_stream: "NORM_RECT:hand_roi_from_pose" + input_stream: "IMAGE_SIZE:image_size" + output_stream: "roi" + options: { + [mediapipe.RectTransformationCalculatorOptions.ext] { + scale_x: 2.7 + scale_y: 2.7 + shift_y: -0.1 + square_long: true + } + } +} diff --git a/mediapipe/modules/holistic_landmark/hand_landmarks_left_and_right_cpu.pbtxt b/mediapipe/modules/holistic_landmark/hand_landmarks_left_and_right_cpu.pbtxt new file mode 100644 index 000000000..75e013382 --- /dev/null +++ b/mediapipe/modules/holistic_landmark/hand_landmarks_left_and_right_cpu.pbtxt @@ -0,0 +1,76 @@ +# Predicts left and right hand landmarks within corresponding ROIs derived from +# hand-related pose landmarks. + +type: "HandLandmarksLeftAndRightCpu" + +# CPU image. (ImageFrame) +input_stream: "IMAGE:input_video" +# Pose landmarks to derive initial hand location from. (NormalizedLandmarkList) +input_stream: "POSE_LANDMARKS:pose_landmarks" + +# Left hand landmarks. (NormalizedLandmarkList) +output_stream: "LEFT_HAND_LANDMARKS:left_hand_landmarks" +# RIght hand landmarks. (NormalizedLandmarkList) +output_stream: "RIGHT_HAND_LANDMARKS:right_hand_landmarks" + +# Debug outputs. +output_stream: "LEFT_HAND_ROI_FROM_POSE:left_hand_roi_from_pose" +output_stream: "LEFT_HAND_ROI_FROM_RECROP:left_hand_roi_from_recrop" +output_stream: "LEFT_HAND_TRACKING_ROI:left_hand_tracking_roi" +output_stream: "RIGHT_HAND_ROI_FROM_POSE:right_hand_roi_from_pose" +output_stream: "RIGHT_HAND_ROI_FROM_RECROP:right_hand_roi_from_recrop" +output_stream: "RIGHT_HAND_TRACKING_ROI:right_hand_tracking_roi" + +# Extracts left-hand-related landmarks from the pose landmarks. +node { + calculator: "SplitNormalizedLandmarkListCalculator" + input_stream: "pose_landmarks" + output_stream: "left_hand_landmarks_from_pose" + options: { + [mediapipe.SplitVectorCalculatorOptions.ext] { + ranges: { begin: 15 end: 16 } + ranges: { begin: 17 end: 18 } + ranges: { begin: 19 end: 20 } + combine_outputs: true + } + } +} + +# Predicts left hand landmarks. +node { + calculator: "HandLandmarksFromPoseCpu" + input_stream: "IMAGE:input_video" + input_stream: "HAND_LANDMARKS_FROM_POSE:left_hand_landmarks_from_pose" + output_stream: "HAND_LANDMARKS:left_hand_landmarks" + # Debug outputs. + output_stream: "HAND_ROI_FROM_POSE:left_hand_roi_from_pose" + output_stream: "HAND_ROI_FROM_RECROP:left_hand_roi_from_recrop" + output_stream: "HAND_TRACKING_ROI:left_hand_tracking_roi" +} + +# Extracts right-hand-related landmarks from the pose landmarks. +node { + calculator: "SplitNormalizedLandmarkListCalculator" + input_stream: "pose_landmarks" + output_stream: "right_hand_landmarks_from_pose" + options: { + [mediapipe.SplitVectorCalculatorOptions.ext] { + ranges: { begin: 16 end: 17 } + ranges: { begin: 18 end: 19 } + ranges: { begin: 20 end: 21 } + combine_outputs: true + } + } +} + +# Extracts right-hand-related landmarks from the pose landmarks. +node { + calculator: "HandLandmarksFromPoseCpu" + input_stream: "IMAGE:input_video" + input_stream: "HAND_LANDMARKS_FROM_POSE:right_hand_landmarks_from_pose" + output_stream: "HAND_LANDMARKS:right_hand_landmarks" + # Debug outputs. + output_stream: "HAND_ROI_FROM_POSE:right_hand_roi_from_pose" + output_stream: "HAND_ROI_FROM_RECROP:right_hand_roi_from_recrop" + output_stream: "HAND_TRACKING_ROI:right_hand_tracking_roi" +} diff --git a/mediapipe/modules/holistic_landmark/hand_landmarks_left_and_right_gpu.pbtxt b/mediapipe/modules/holistic_landmark/hand_landmarks_left_and_right_gpu.pbtxt new file mode 100644 index 000000000..adeec2b74 --- /dev/null +++ b/mediapipe/modules/holistic_landmark/hand_landmarks_left_and_right_gpu.pbtxt @@ -0,0 +1,76 @@ +# Predicts left and right hand landmarks within corresponding ROIs derived from +# hand-related pose landmarks. + +type: "HandLandmarksLeftAndRightGpu" + +# GPU image. (ImageFrame) +input_stream: "IMAGE:input_video" +# Pose landmarks to derive initial hand location from. (NormalizedLandmarkList) +input_stream: "POSE_LANDMARKS:pose_landmarks" + +# Left hand landmarks. (NormalizedLandmarkList) +output_stream: "LEFT_HAND_LANDMARKS:left_hand_landmarks" +# RIght hand landmarks. (NormalizedLandmarkList) +output_stream: "RIGHT_HAND_LANDMARKS:right_hand_landmarks" + +# Debug outputs. +output_stream: "LEFT_HAND_ROI_FROM_POSE:left_hand_roi_from_pose" +output_stream: "LEFT_HAND_ROI_FROM_RECROP:left_hand_roi_from_recrop" +output_stream: "LEFT_HAND_TRACKING_ROI:left_hand_tracking_roi" +output_stream: "RIGHT_HAND_ROI_FROM_POSE:right_hand_roi_from_pose" +output_stream: "RIGHT_HAND_ROI_FROM_RECROP:right_hand_roi_from_recrop" +output_stream: "RIGHT_HAND_TRACKING_ROI:right_hand_tracking_roi" + +# Extracts left-hand-related landmarks from the pose landmarks. +node { + calculator: "SplitNormalizedLandmarkListCalculator" + input_stream: "pose_landmarks" + output_stream: "left_hand_landmarks_from_pose" + options: { + [mediapipe.SplitVectorCalculatorOptions.ext] { + ranges: { begin: 15 end: 16 } + ranges: { begin: 17 end: 18 } + ranges: { begin: 19 end: 20 } + combine_outputs: true + } + } +} + +# Predicts left hand landmarks. +node { + calculator: "HandLandmarksFromPoseGpu" + input_stream: "IMAGE:input_video" + input_stream: "HAND_LANDMARKS_FROM_POSE:left_hand_landmarks_from_pose" + output_stream: "HAND_LANDMARKS:left_hand_landmarks" + # Debug outputs. + output_stream: "HAND_ROI_FROM_POSE:left_hand_roi_from_pose" + output_stream: "HAND_ROI_FROM_RECROP:left_hand_roi_from_recrop" + output_stream: "HAND_TRACKING_ROI:left_hand_tracking_roi" +} + +# Extracts right-hand-related landmarks from the pose landmarks. +node { + calculator: "SplitNormalizedLandmarkListCalculator" + input_stream: "pose_landmarks" + output_stream: "right_hand_landmarks_from_pose" + options: { + [mediapipe.SplitVectorCalculatorOptions.ext] { + ranges: { begin: 16 end: 17 } + ranges: { begin: 18 end: 19 } + ranges: { begin: 20 end: 21 } + combine_outputs: true + } + } +} + +# Extracts right-hand-related landmarks from the pose landmarks. +node { + calculator: "HandLandmarksFromPoseGpu" + input_stream: "IMAGE:input_video" + input_stream: "HAND_LANDMARKS_FROM_POSE:right_hand_landmarks_from_pose" + output_stream: "HAND_LANDMARKS:right_hand_landmarks" + # Debug outputs. + output_stream: "HAND_ROI_FROM_POSE:right_hand_roi_from_pose" + output_stream: "HAND_ROI_FROM_RECROP:right_hand_roi_from_recrop" + output_stream: "HAND_TRACKING_ROI:right_hand_tracking_roi" +} diff --git a/mediapipe/modules/holistic_landmark/hand_landmarks_to_roi.pbtxt b/mediapipe/modules/holistic_landmark/hand_landmarks_to_roi.pbtxt new file mode 100644 index 000000000..b874c1d40 --- /dev/null +++ b/mediapipe/modules/holistic_landmark/hand_landmarks_to_roi.pbtxt @@ -0,0 +1,57 @@ +# Converts hand landmarks to ROI. + +type: "HandLandmarksToRoi" + +# Hand landmarks. (NormalizedLandmarkList) +input_stream: "LANDMARKS:hand_landmarks" +# Image size (width & height). (std::pair) +input_stream: "IMAGE_SIZE:image_size" + +# ROI according to the hand landmarks. (NormalizedRect) +output_stream: "ROI:roi" + +# Gets hand palm landmarks. +node { + calculator: "SplitNormalizedLandmarkListCalculator" + input_stream: "hand_landmarks" + output_stream: "palm_landmarks" + options: { + [mediapipe.SplitVectorCalculatorOptions.ext] { + ranges: { begin: 0 end: 4 } + ranges: { begin: 5 end: 7 } + ranges: { begin: 9 end: 11 } + ranges: { begin: 13 end: 15 } + ranges: { begin: 17 end: 19 } + combine_outputs: true + } + } +} + +# Converts the hand landmarks into a rectangle (normalized by image size) +# that encloses the hand. The calculator uses a subset of all hand landmarks +# extracted from SplitNormalizedLandmarkListCalculator above to +# calculate the bounding box and the rotation of the output rectangle. Please +# see the comments in the calculator for more detail. +node { + calculator: "HandLandmarksToRectCalculator" + input_stream: "NORM_LANDMARKS:palm_landmarks" + input_stream: "IMAGE_SIZE:image_size" + output_stream: "NORM_RECT:palm_landmarks_rect" +} + +# Expands the hand rectangle so that it's likely to contain the hand even with +# some motion. +node { + calculator: "RectTransformationCalculator" + input_stream: "NORM_RECT:palm_landmarks_rect" + input_stream: "IMAGE_SIZE:image_size" + output_stream: "roi" + options: { + [mediapipe.RectTransformationCalculatorOptions.ext] { + scale_x: 2.0 + scale_y: 2.0 + shift_y: -0.1 + square_long: true + } + } +} diff --git a/mediapipe/modules/holistic_landmark/hand_recrop.tflite b/mediapipe/modules/holistic_landmark/hand_recrop.tflite new file mode 100755 index 000000000..dcfd276cb Binary files /dev/null and b/mediapipe/modules/holistic_landmark/hand_recrop.tflite differ diff --git a/mediapipe/modules/holistic_landmark/hand_recrop_by_roi_cpu.pbtxt b/mediapipe/modules/holistic_landmark/hand_recrop_by_roi_cpu.pbtxt new file mode 100644 index 000000000..36efdb1b2 --- /dev/null +++ b/mediapipe/modules/holistic_landmark/hand_recrop_by_roi_cpu.pbtxt @@ -0,0 +1,136 @@ +# Predicts more accurate hand location (re-crop ROI) within a given ROI. + +type: "HandRecropByRoiCpu" + +# CPU image. (ImageFrame) +input_stream: "IMAGE:input_video" +# ROI (region of interest) within the given image where a palm/hand is located. +# (NormalizedRect) +input_stream: "ROI:roi" + +# Refined (more accurate) ROI to use for hand landmark prediction. +# (NormalizedRect) +output_stream: "HAND_ROI_FROM_RECROP:hand_roi_from_recrop_refined" + +# Transforms hand ROI from the input image to a 256x256 tensor. Preserves aspect +# ratio, which results in a letterbox padding. +node { + calculator: "ImageToTensorCalculator" + input_stream: "IMAGE:input_video" + input_stream: "NORM_RECT:roi" + output_stream: "TENSORS:initial_crop_tensor" + output_stream: "LETTERBOX_PADDING:letterbox_padding" + options: { + [mediapipe.ImageToTensorCalculatorOptions.ext] { + output_tensor_width: 256 + output_tensor_height: 256 + keep_aspect_ratio: true + output_tensor_float_range { + min: 0.0 + max: 1.0 + } + # For OpenGL origin should be at the top left corner. + gpu_origin: TOP_LEFT, + } + } +} + +# Predicts hand re-crop rectangle. +node { + calculator: "InferenceCalculator" + input_stream: "TENSORS:initial_crop_tensor" + output_stream: "TENSORS:landmark_tensors" + options: { + [mediapipe.InferenceCalculatorOptions.ext] { + model_path: "mediapipe/modules/holistic_landmark/hand_recrop.tflite" + } + } +} + +# Decodes the landmark tensors into a vector of landmarks, where the landmark +# coordinates are normalized by the size of the input image to the model. Two +# landmarks represent two virtual points: crop and scale of the new crop. +node { + calculator: "TensorsToLandmarksCalculator" + input_stream: "TENSORS:landmark_tensors" + output_stream: "NORM_LANDMARKS:landmarks" + options: { + [mediapipe.TensorsToLandmarksCalculatorOptions.ext] { + num_landmarks: 2 + input_image_width: 256 + input_image_height: 256 + } + } +} + +# Adjusts landmarks (already normalized to [0.f, 1.f]) on the letterboxed hand +# image (after image transformation with the FIT scale mode) to the +# corresponding locations on the same image with the letterbox removed (hand +# image before image transformation). +node { + calculator: "LandmarkLetterboxRemovalCalculator" + input_stream: "LANDMARKS:landmarks" + input_stream: "LETTERBOX_PADDING:letterbox_padding" + output_stream: "LANDMARKS:scaled_landmarks" +} + +# Projects the landmarks from the cropped hand image to the corresponding +# locations on the full image before cropping (input to the graph). +node { + calculator: "LandmarkProjectionCalculator" + input_stream: "NORM_LANDMARKS:scaled_landmarks" + input_stream: "NORM_RECT:roi" + output_stream: "NORM_LANDMARKS:alignment_landmarks" +} + +# Converts hand landmarks to a detection that tightly encloses all landmarks. +node { + calculator: "LandmarksToDetectionCalculator" + input_stream: "NORM_LANDMARKS:alignment_landmarks" + output_stream: "DETECTION:hand_detection" +} + +# Extracts image size from the input images. +node { + calculator: "ImagePropertiesCalculator" + input_stream: "IMAGE:input_video" + output_stream: "SIZE:image_size" +} + +# Converts hand detection into a rectangle based on center and scale alignment +# points. +node { + calculator: "AlignmentPointsRectsCalculator" + input_stream: "DETECTION:hand_detection" + input_stream: "IMAGE_SIZE:image_size" + output_stream: "NORM_RECT:hand_roi_from_recrop" + options: { + [mediapipe.DetectionsToRectsCalculatorOptions.ext] { + rotation_vector_start_keypoint_index: 0 + rotation_vector_end_keypoint_index: 1 + rotation_vector_target_angle_degrees: -90 + } + } +} + +# TODO: revise hand recrop roi calculation. +# Slighly moves hand re-crop rectangle from wrist towards fingertips. Due to the +# new hand cropping logic, crop border is to close to finger tips while a lot of +# space is below the wrist. And when moving hand up fast (with fingers pointing +# up) and using hand rect from the previous frame for tracking - fingertips can +# be cropped. This adjustment partially solves it, but hand cropping logic +# should be reviewed. +node { + calculator: "RectTransformationCalculator" + input_stream: "NORM_RECT:hand_roi_from_recrop" + input_stream: "IMAGE_SIZE:image_size" + output_stream: "hand_roi_from_recrop_refined" + options: { + [mediapipe.RectTransformationCalculatorOptions.ext] { + scale_x: 1.0 + scale_y: 1.0 + shift_y: -0.1 + square_long: true + } + } +} diff --git a/mediapipe/modules/holistic_landmark/hand_recrop_by_roi_gpu.pbtxt b/mediapipe/modules/holistic_landmark/hand_recrop_by_roi_gpu.pbtxt new file mode 100644 index 000000000..4fa8f2929 --- /dev/null +++ b/mediapipe/modules/holistic_landmark/hand_recrop_by_roi_gpu.pbtxt @@ -0,0 +1,136 @@ +# Predicts more accurate hand location (re-crop ROI) within a given ROI. + +type: "HandRecropByRoiGpu" + +# GPU image. (ImageFrame) +input_stream: "IMAGE:input_video" +# ROI (region of interest) within the given image where a palm/hand is located. +# (NormalizedRect) +input_stream: "ROI:roi" + +# Refined (more accurate) ROI to use for hand landmark prediction. +# (NormalizedRect) +output_stream: "HAND_ROI_FROM_RECROP:hand_roi_from_recrop_refined" + +# Transforms hand ROI from the input image to a 256x256 tensor. Preserves aspect +# ratio, which results in a letterbox padding. +node { + calculator: "ImageToTensorCalculator" + input_stream: "IMAGE_GPU:input_video" + input_stream: "NORM_RECT:roi" + output_stream: "TENSORS:initial_crop_tensor" + output_stream: "LETTERBOX_PADDING:letterbox_padding" + options: { + [mediapipe.ImageToTensorCalculatorOptions.ext] { + output_tensor_width: 256 + output_tensor_height: 256 + keep_aspect_ratio: true + output_tensor_float_range { + min: 0.0 + max: 1.0 + } + # For OpenGL origin should be at the top left corner. + gpu_origin: TOP_LEFT, + } + } +} + +# Predicts hand re-crop rectangle. +node { + calculator: "InferenceCalculator" + input_stream: "TENSORS:initial_crop_tensor" + output_stream: "TENSORS:landmark_tensors" + options: { + [mediapipe.InferenceCalculatorOptions.ext] { + model_path: "mediapipe/modules/holistic_landmark/hand_recrop.tflite" + } + } +} + +# Decodes the landmark tensors into a vector of landmarks, where the landmark +# coordinates are normalized by the size of the input image to the model. Two +# landmarks represent two virtual points: crop and scale of the new crop. +node { + calculator: "TensorsToLandmarksCalculator" + input_stream: "TENSORS:landmark_tensors" + output_stream: "NORM_LANDMARKS:landmarks" + options: { + [mediapipe.TensorsToLandmarksCalculatorOptions.ext] { + num_landmarks: 2 + input_image_width: 256 + input_image_height: 256 + } + } +} + +# Adjusts landmarks (already normalized to [0.f, 1.f]) on the letterboxed hand +# image (after image transformation with the FIT scale mode) to the +# corresponding locations on the same image with the letterbox removed (hand +# image before image transformation). +node { + calculator: "LandmarkLetterboxRemovalCalculator" + input_stream: "LANDMARKS:landmarks" + input_stream: "LETTERBOX_PADDING:letterbox_padding" + output_stream: "LANDMARKS:scaled_landmarks" +} + +# Projects the landmarks from the cropped hand image to the corresponding +# locations on the full image before cropping (input to the graph). +node { + calculator: "LandmarkProjectionCalculator" + input_stream: "NORM_LANDMARKS:scaled_landmarks" + input_stream: "NORM_RECT:roi" + output_stream: "NORM_LANDMARKS:alignment_landmarks" +} + +# Converts hand landmarks to a detection that tightly encloses all landmarks. +node { + calculator: "LandmarksToDetectionCalculator" + input_stream: "NORM_LANDMARKS:alignment_landmarks" + output_stream: "DETECTION:hand_detection" +} + +# Extracts image size from the input images. +node { + calculator: "ImagePropertiesCalculator" + input_stream: "IMAGE_GPU:input_video" + output_stream: "SIZE:image_size" +} + +# Converts hand detection into a rectangle based on center and scale alignment +# points. +node { + calculator: "AlignmentPointsRectsCalculator" + input_stream: "DETECTION:hand_detection" + input_stream: "IMAGE_SIZE:image_size" + output_stream: "NORM_RECT:hand_roi_from_recrop" + options: { + [mediapipe.DetectionsToRectsCalculatorOptions.ext] { + rotation_vector_start_keypoint_index: 0 + rotation_vector_end_keypoint_index: 1 + rotation_vector_target_angle_degrees: -90 + } + } +} + +# TODO: revise hand recrop roi calculation. +# Slighly moves hand re-crop rectangle from wrist towards fingertips. Due to the +# new hand cropping logic, crop border is to close to finger tips while a lot of +# space is below the wrist. And when moving hand up fast (with fingers pointing +# up) and using hand rect from the previous frame for tracking - fingertips can +# be cropped. This adjustment partially solves it, but hand cropping logic +# should be reviewed. +node { + calculator: "RectTransformationCalculator" + input_stream: "NORM_RECT:hand_roi_from_recrop" + input_stream: "IMAGE_SIZE:image_size" + output_stream: "hand_roi_from_recrop_refined" + options: { + [mediapipe.RectTransformationCalculatorOptions.ext] { + scale_x: 1.0 + scale_y: 1.0 + shift_y: -0.1 + square_long: true + } + } +} diff --git a/mediapipe/modules/holistic_landmark/hand_tracking.pbtxt b/mediapipe/modules/holistic_landmark/hand_tracking.pbtxt new file mode 100644 index 000000000..07f734e49 --- /dev/null +++ b/mediapipe/modules/holistic_landmark/hand_tracking.pbtxt @@ -0,0 +1,63 @@ +# Decides what ROI to use for hand landmark prediction: either previous frame +# landmarks ROI or current frame re-crop ROI. + +type: "HandTracking" + +# Hand landmarks from the current frame. They will be memorized for tracking on +# the next frame. (NormalizedLandmarkList) +input_stream: "LANDMARKS:hand_landmarks" +# Hand re-crop ROI from the current frame. (NormalizedRect) +input_stream: "HAND_ROI_FROM_RECROP:hand_roi_from_recrop" +# Image size (width & height). (std::pair) +input_stream: "IMAGE_SIZE:image_size" + +# Hand tracking ROI. Which is either hand landmarks ROI from the previous frame +# if hand is still tracked, or hand re-crop ROI from the current frame +# othervise. (NormalizedRect) +output_stream: "HAND_TRACKING_ROI:hand_tracking_roi" + +# Keeps track of hand landmarks from the previous frame. +node { + calculator: "PreviousLoopbackCalculator" + # TODO: check that loop works with image size instead of video. + input_stream: "MAIN:image_size" + input_stream: "LOOP:hand_landmarks" + input_stream_info: { + tag_index: "LOOP" + back_edge: true + } + output_stream: "PREV_LOOP:prev_hand_landmarks" +} + +# Gets hand landarmsk rect. +node { + calculator: "HandLandmarksToRoi" + input_stream: "LANDMARKS:prev_hand_landmarks" + input_stream: "IMAGE_SIZE:image_size" + output_stream: "ROI:prev_hand_landmarks_roi" +} + +# Checks that all requirements for tracking are satisfied and use hand rectangle +# from the previous frame in that case. Otherwise - use hand re-crop rectangle +# from the current frame. +node { + calculator: "RoiTrackingCalculator" + input_stream: "PREV_LANDMARKS:prev_hand_landmarks" + input_stream: "PREV_LANDMARKS_RECT:prev_hand_landmarks_roi" + input_stream: "RECROP_RECT:hand_roi_from_recrop" + input_stream: "IMAGE_SIZE:image_size" + output_stream: "TRACKING_RECT:hand_tracking_roi" + options: { + [mediapipe.RoiTrackingCalculatorOptions.ext] { + rect_requirements: { + rotation_degrees: 40.0 + translation: 0.2 + # TODO: adjust scale for hand tracking. + scale: 0.4 + } + landmarks_requirements: { + recrop_rect_margin: -0.1 + } + } + } +} diff --git a/mediapipe/modules/holistic_landmark/hand_visibility_from_hand_landmarks_from_pose.pbtxt b/mediapipe/modules/holistic_landmark/hand_visibility_from_hand_landmarks_from_pose.pbtxt new file mode 100644 index 000000000..02db6722f --- /dev/null +++ b/mediapipe/modules/holistic_landmark/hand_visibility_from_hand_landmarks_from_pose.pbtxt @@ -0,0 +1,44 @@ +# Determines hand visibility from the visibility prediction values in the +# hand-related pose landmarks. + +type: "HandVisibilityFromHandLandmarksFromPose" + +# Hand-related pose landmarks in [wrist, pinky, index] order. +# (NormalizedLandmarkList) +input_stream: "HAND_LANDMARKS_FROM_POSE:hand_landmarks_from_pose" + +# Hand visibility to be used as a trigger for hand landmark prediction. (bool) +output_stream: "VISIBILITY:wrist_visibility" + +# Gets pose wrist landmark. +node { + calculator: "SplitNormalizedLandmarkListCalculator" + input_stream: "hand_landmarks_from_pose" + output_stream: "pose_wrist_landmark" + options: { + [mediapipe.SplitVectorCalculatorOptions.ext] { + ranges: { begin: 0 end: 1 } + } + } +} + +# TODO: Use other than pose wrist palm landmarks. +# Gets pose wrist visiblity. +node { + calculator: "LandmarkVisibilityCalculator" + input_stream: "NORM_LANDMARKS:pose_wrist_landmark" + output_stream: "VISIBILITY:wrist_visibility_score" +} + +# TODO: ensure the same threshold in rendering. +# Converts pose wrist visibility score into boolean flag. +node { + calculator: "ThresholdingCalculator" + input_stream: "FLOAT:wrist_visibility_score" + output_stream: "FLAG:wrist_visibility" + options: { + [mediapipe.ThresholdingCalculatorOptions.ext] { + threshold: 0.1 + } + } +} diff --git a/mediapipe/modules/holistic_landmark/hand_wrist_for_pose.pbtxt b/mediapipe/modules/holistic_landmark/hand_wrist_for_pose.pbtxt new file mode 100644 index 000000000..f6551bb90 --- /dev/null +++ b/mediapipe/modules/holistic_landmark/hand_wrist_for_pose.pbtxt @@ -0,0 +1,52 @@ +# Extracts hand wrist landmark to be used instead of pose wrist landmark. + +type: "HandWristForPose" + +# Hand landmarks to take wrist landmark from. (NormalizedLandmarkList) +input_stream: "HAND_LANDMARKS:hand_landmarks" + +# Hand wrist landmark to replace original pose wrist landmark with updated +# visibility. (NormalizedLandmarkList) +output_stream: "WRIST_LANDMARK:hand_wrist_landmark_with_visibility" + +# Side packet with constant for visibility score. As score is `x` from +# `sigmoid(x)` we pick some big value that doesn't affect pose landmarks +# visibility rendering threshold. +node { + calculator: "ConstantSidePacketCalculator" + output_side_packet: "PACKET:0:visible_score_side_packet" + options: { + [mediapipe.ConstantSidePacketCalculatorOptions.ext]: { + packet { float_value: 100.0 } + } + } +} + +# Converts side packet with visibility score to a stream. +node { + calculator: "SidePacketToStreamCalculator" + input_stream: "TICK:hand_landmarks" + input_side_packet: "visible_score_side_packet" + output_stream: "AT_TICK:visible_score" +} + +# Extracts wrist landmark from the hand landmarks. +node { + calculator: "SplitNormalizedLandmarkListCalculator" + input_stream: "hand_landmarks" + output_stream: "hand_wrist_landmark" + options: { + [mediapipe.SplitVectorCalculatorOptions.ext] { + ranges: { begin: 0 end: 1 } + } + } +} + +# Sets wrist landmark visibility score. If HAND_LANDMARKS is non-empty - wrist +# will always be visible. +node { + calculator: "SetLandmarkVisibilityCalculator" + input_stream: "NORM_LANDMARKS:hand_wrist_landmark" + input_stream: "VISIBILITY:visible_score" + output_stream: "NORM_LANDMARKS:hand_wrist_landmark_with_visibility" +} diff --git a/mediapipe/modules/holistic_landmark/holistic_landmark_cpu.pbtxt b/mediapipe/modules/holistic_landmark/holistic_landmark_cpu.pbtxt new file mode 100644 index 000000000..e850d7cc1 --- /dev/null +++ b/mediapipe/modules/holistic_landmark/holistic_landmark_cpu.pbtxt @@ -0,0 +1,114 @@ +# Predicts pose + left/right hand + face landmarks. +# +# It is required that: +# - "face_detection_front.tflite" is available at +# "mediapipe/modules/face_detection/face_detection_front.tflite" +# +# - "face_landmark.tflite" is available at +# "mediapipe/modules/face_landmark/face_landmark.tflite" +# +# - "hand_landmark.tflite" is available at +# "mediapipe/modules/hand_landmark/hand_landmark.tflite" +# +# - "hand_recrop.tflite" is available at +# "mediapipe/modules/holistic_landmark/hand_recrop.tflite" +# +# - "handedness.txt" is available at +# "mediapipe/modules/hand_landmark/handedness.txt" +# +# - "pose_detection.tflite" is available at +# "mediapipe/modules/pose_detection/pose_detection.tflite" +# +# - "pose_landmark_full_body.tflite" or "pose_landmark_upper_body.tflite" is +# available at +# "mediapipe/modules/pose_landmark/pose_landmark_full_body.tflite" +# or +# "mediapipe/modules/pose_landmark/pose_landmark_upper_body.tflite" +# path respectively during execution, depending on the specification in the +# UPPER_BODY_ONLY input side packet. +# +# EXAMPLE: +# node { +# calculator: "HolisticLandmarkCpu" +# input_stream: "IMAGE:input_video" +# input_side_packet: UPPER_BODY_ONLY:upper_body_only +# input_side_packet: SMOOTH_LANDMARKS:smooth_landmarks +# output_stream: "POSE_LANDMARKS:pose_landmarks" +# output_stream: "FACE_LANDMARKS:face_landmarks" +# output_stream: "LEFT_HAND_LANDMARKS:left_hand_landmarks" +# output_stream: "RIGHT_HAND_LANDMARKS:right_hand_landmarks" +# } +# +# NOTE: if a pose/hand/face output is not present in the image, for this +# particular timestamp there will not be an output packet in the corresponding +# output stream below. However, the MediaPipe framework will internally inform +# the downstream calculators of the absence of this packet so that they don't +# wait for it unnecessarily. + +type: "HolisticLandmarkCpu" + +# CPU image. (ImageFrame) +input_stream: "IMAGE:image" + +# Whether to detect/predict the full set of pose landmarks (see below), or only +# those on the upper body. If unspecified, functions as set to false. (bool) +# Note that upper-body-only prediction may be more accurate for use cases where +# the lower-body parts are mostly out of view. +input_side_packet: "UPPER_BODY_ONLY:upper_body_only" +# Whether to filter landmarks across different input images to reduce jitter. +# If unspecified, functions as set to true. (bool) +input_side_packet: "SMOOTH_LANDMARKS:smooth_landmarks" + +# Pose landmarks. (NormalizedLandmarkList) +# We have 33 landmarks or 25 landmarks if UPPER_BODY_ONLY is set to true. +output_stream: "POSE_LANDMARKS:pose_landmarks" +# 21 left hand landmarks. (NormalizedLandmarkList) +output_stream: "LEFT_HAND_LANDMARKS:left_hand_landmarks" +# 21 right hand landmarks. (NormalizedLandmarkList) +output_stream: "RIGHT_HAND_LANDMARKS:right_hand_landmarks" +# 468 face landmarks. (NormalizedLandmarkList) +output_stream: "FACE_LANDMARKS:face_landmarks" + +# Debug outputs +output_stream: "POSE_ROI:pose_landmarks_roi" +output_stream: "POSE_DETECTION:pose_detection" + +# Predicts pose landmarks. +node { + calculator: "PoseLandmarkCpu" + input_stream: "IMAGE:image" + input_side_packet: "UPPER_BODY_ONLY:upper_body_only" + input_side_packet: "SMOOTH_LANDMARKS:smooth_landmarks" + output_stream: "LANDMARKS:pose_landmarks" + output_stream: "ROI_FROM_LANDMARKS:pose_landmarks_roi" + output_stream: "DETECTION:pose_detection" +} + +# Predicts left and right hand landmarks based on the initial pose landmarks. +node { + calculator: "HandLandmarksLeftAndRightCpu" + input_stream: "IMAGE:image" + input_stream: "POSE_LANDMARKS:pose_landmarks" + output_stream: "LEFT_HAND_LANDMARKS:left_hand_landmarks" + output_stream: "RIGHT_HAND_LANDMARKS:right_hand_landmarks" +} + +# Extracts face-related pose landmarks. +node { + calculator: "SplitNormalizedLandmarkListCalculator" + input_stream: "pose_landmarks" + output_stream: "face_landmarks_from_pose" + options: { + [mediapipe.SplitVectorCalculatorOptions.ext] { + ranges: { begin: 0 end: 11 } + } + } +} + +# Predicts face landmarks based on the initial pose landmarks. +node { + calculator: "FaceLandmarksFromPoseCpu" + input_stream: "IMAGE:image" + input_stream: "FACE_LANDMARKS_FROM_POSE:face_landmarks_from_pose" + output_stream: "FACE_LANDMARKS:face_landmarks" +} diff --git a/mediapipe/modules/holistic_landmark/holistic_landmark_gpu.pbtxt b/mediapipe/modules/holistic_landmark/holistic_landmark_gpu.pbtxt new file mode 100644 index 000000000..990b21d1d --- /dev/null +++ b/mediapipe/modules/holistic_landmark/holistic_landmark_gpu.pbtxt @@ -0,0 +1,114 @@ +# Predicts pose + left/right hand + face landmarks. +# +# It is required that: +# - "face_detection_front.tflite" is available at +# "mediapipe/modules/face_detection/face_detection_front.tflite" +# +# - "face_landmark.tflite" is available at +# "mediapipe/modules/face_landmark/face_landmark.tflite" +# +# - "hand_landmark.tflite" is available at +# "mediapipe/modules/hand_landmark/hand_landmark.tflite" +# +# - "hand_recrop.tflite" is available at +# "mediapipe/modules/holistic_landmark/hand_recrop.tflite" +# +# - "handedness.txt" is available at +# "mediapipe/modules/hand_landmark/handedness.txt" +# +# - "pose_detection.tflite" is available at +# "mediapipe/modules/pose_detection/pose_detection.tflite" +# +# - "pose_landmark_full_body.tflite" or "pose_landmark_upper_body.tflite" is +# available at +# "mediapipe/modules/pose_landmark/pose_landmark_full_body.tflite" +# or +# "mediapipe/modules/pose_landmark/pose_landmark_upper_body.tflite" +# path respectively during execution, depending on the specification in the +# UPPER_BODY_ONLY input side packet. +# +# EXAMPLE: +# node { +# calculator: "HolisticLandmarkGpu" +# input_stream: "IMAGE:input_video" +# input_side_packet: UPPER_BODY_ONLY:upper_body_only +# input_side_packet: SMOOTH_LANDMARKS:smooth_landmarks +# output_stream: "POSE_LANDMARKS:pose_landmarks" +# output_stream: "FACE_LANDMARKS:face_landmarks" +# output_stream: "LEFT_HAND_LANDMARKS:left_hand_landmarks" +# output_stream: "RIGHT_HAND_LANDMARKS:right_hand_landmarks" +# } +# +# NOTE: if a pose/hand/face output is not present in the image, for this +# particular timestamp there will not be an output packet in the corresponding +# output stream below. However, the MediaPipe framework will internally inform +# the downstream calculators of the absence of this packet so that they don't +# wait for it unnecessarily. + +type: "HolisticLandmarkGpu" + +# GPU image. (GpuBuffer) +input_stream: "IMAGE:image" + +# Whether to detect/predict the full set of pose landmarks (see below), or only +# those on the upper body. If unspecified, functions as set to false. (bool) +# Note that upper-body-only prediction may be more accurate for use cases where +# the lower-body parts are mostly out of view. +input_side_packet: "UPPER_BODY_ONLY:upper_body_only" +# Whether to filter landmarks across different input images to reduce jitter. +# If unspecified, functions as set to true. (bool) +input_side_packet: "SMOOTH_LANDMARKS:smooth_landmarks" + +# Pose landmarks. (NormalizedLandmarkList) +# We have 33 landmarks or 25 landmarks if UPPER_BODY_ONLY is set to true. +output_stream: "POSE_LANDMARKS:pose_landmarks" +# 21 left hand landmarks. (NormalizedLandmarkList) +output_stream: "LEFT_HAND_LANDMARKS:left_hand_landmarks" +# 21 right hand landmarks. (NormalizedLandmarkList) +output_stream: "RIGHT_HAND_LANDMARKS:right_hand_landmarks" +# 468 face landmarks. (NormalizedLandmarkList) +output_stream: "FACE_LANDMARKS:face_landmarks" + +# Debug outputs +output_stream: "POSE_ROI:pose_landmarks_roi" +output_stream: "POSE_DETECTION:pose_detection" + +# Predicts pose landmarks. +node { + calculator: "PoseLandmarkGpu" + input_stream: "IMAGE:image" + input_side_packet: "UPPER_BODY_ONLY:upper_body_only" + input_side_packet: "SMOOTH_LANDMARKS:smooth_landmarks" + output_stream: "LANDMARKS:pose_landmarks" + output_stream: "ROI_FROM_LANDMARKS:pose_landmarks_roi" + output_stream: "DETECTION:pose_detection" +} + +# Predicts left and right hand landmarks based on the initial pose landmarks. +node { + calculator: "HandLandmarksLeftAndRightGpu" + input_stream: "IMAGE:image" + input_stream: "POSE_LANDMARKS:pose_landmarks" + output_stream: "LEFT_HAND_LANDMARKS:left_hand_landmarks" + output_stream: "RIGHT_HAND_LANDMARKS:right_hand_landmarks" +} + +# Extracts face-related pose landmarks. +node { + calculator: "SplitNormalizedLandmarkListCalculator" + input_stream: "pose_landmarks" + output_stream: "face_landmarks_from_pose" + options: { + [mediapipe.SplitVectorCalculatorOptions.ext] { + ranges: { begin: 0 end: 11 } + } + } +} + +# Predicts face landmarks based on the initial pose landmarks. +node { + calculator: "FaceLandmarksFromPoseGpu" + input_stream: "IMAGE:image" + input_stream: "FACE_LANDMARKS_FROM_POSE:face_landmarks_from_pose" + output_stream: "FACE_LANDMARKS:face_landmarks" +} diff --git a/mediapipe/modules/objectron/BUILD b/mediapipe/modules/objectron/BUILD new file mode 100644 index 000000000..3488d2268 --- /dev/null +++ b/mediapipe/modules/objectron/BUILD @@ -0,0 +1,168 @@ +# Copyright 2020 The MediaPipe Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +load( + "//mediapipe/framework/tool:mediapipe_graph.bzl", + "mediapipe_simple_subgraph", +) + +licenses(["notice"]) + +package(default_visibility = ["//visibility:public"]) + +mediapipe_simple_subgraph( + name = "objectron_detection_1stage_gpu", + graph = "objectron_detection_1stage_gpu.pbtxt", + register_as = "ObjectronDetection1StageSubgraphGpu", + deps = [ + "//mediapipe/calculators/image:image_transformation_calculator", + "//mediapipe/calculators/tflite:tflite_converter_calculator", + "//mediapipe/calculators/tflite:tflite_custom_op_resolver_calculator", + "//mediapipe/calculators/tflite:tflite_inference_calculator", + "//mediapipe/modules/objectron/calculators:tflite_tensors_to_objects_calculator", + ], +) + +mediapipe_simple_subgraph( + name = "objectron_tracking_1stage_gpu", + graph = "objectron_tracking_1stage_gpu.pbtxt", + register_as = "ObjectronTracking1StageSubgraphGpu", + deps = [ + "//mediapipe/calculators/image:image_transformation_calculator", + "//mediapipe/calculators/video:box_tracker_calculator", + "//mediapipe/calculators/video:flow_packager_calculator", + "//mediapipe/calculators/video:motion_analysis_calculator", + "//mediapipe/framework/stream_handler:sync_set_input_stream_handler", + "//mediapipe/gpu:gpu_buffer_to_image_frame_calculator", + "//mediapipe/modules/objectron/calculators:frame_annotation_to_timed_box_list_calculator", + "//mediapipe/modules/objectron/calculators:frame_annotation_tracker_calculator", + "//mediapipe/modules/objectron/calculators:lift_2d_frame_annotation_to_3d_calculator", + ], +) + +mediapipe_simple_subgraph( + name = "box_landmark_gpu", + graph = "box_landmark_gpu.pbtxt", + register_as = "BoxLandmarkSubgraph", + deps = [ + "//mediapipe/calculators/core:split_vector_calculator", + "//mediapipe/calculators/image:image_cropping_calculator", + "//mediapipe/calculators/image:image_properties_calculator", + "//mediapipe/calculators/image:image_transformation_calculator", + "//mediapipe/calculators/tflite:tflite_converter_calculator", + "//mediapipe/calculators/tflite:tflite_custom_op_resolver_calculator", + "//mediapipe/calculators/tflite:tflite_inference_calculator", + "//mediapipe/calculators/tflite:tflite_tensors_to_floats_calculator", + "//mediapipe/calculators/tflite:tflite_tensors_to_landmarks_calculator", + "//mediapipe/calculators/util:detections_to_rects_calculator", + "//mediapipe/calculators/util:landmark_letterbox_removal_calculator", + "//mediapipe/calculators/util:landmark_projection_calculator", + "//mediapipe/calculators/util:landmarks_smoothing_calculator", + "//mediapipe/calculators/util:landmarks_to_detection_calculator", + "//mediapipe/calculators/util:rect_transformation_calculator", + "//mediapipe/calculators/util:thresholding_calculator", + "//mediapipe/modules/objectron/calculators:frame_annotation_to_rect_calculator", + "//mediapipe/modules/objectron/calculators:landmarks_to_frame_annotation_calculator", + "//mediapipe/modules/objectron/calculators:lift_2d_frame_annotation_to_3d_calculator", + ], +) + +mediapipe_simple_subgraph( + name = "box_landmark_cpu", + graph = "box_landmark_cpu.pbtxt", + register_as = "BoxLandmarkSubgraph", + deps = [ + "//mediapipe/calculators/core:split_vector_calculator", + "//mediapipe/calculators/image:image_cropping_calculator", + "//mediapipe/calculators/image:image_properties_calculator", + "//mediapipe/calculators/image:image_transformation_calculator", + "//mediapipe/calculators/tflite:tflite_converter_calculator", + "//mediapipe/calculators/tflite:tflite_custom_op_resolver_calculator", + "//mediapipe/calculators/tflite:tflite_inference_calculator", + "//mediapipe/calculators/tflite:tflite_tensors_to_floats_calculator", + "//mediapipe/calculators/tflite:tflite_tensors_to_landmarks_calculator", + "//mediapipe/calculators/util:landmark_letterbox_removal_calculator", + "//mediapipe/calculators/util:landmark_projection_calculator", + "//mediapipe/calculators/util:landmarks_smoothing_calculator", + "//mediapipe/calculators/util:landmarks_to_detection_calculator", + "//mediapipe/calculators/util:rect_transformation_calculator", + "//mediapipe/calculators/util:thresholding_calculator", + "//mediapipe/modules/objectron/calculators:frame_annotation_to_rect_calculator", + "//mediapipe/modules/objectron/calculators:landmarks_to_frame_annotation_calculator", + "//mediapipe/modules/objectron/calculators:lift_2d_frame_annotation_to_3d_calculator", + ], +) + +mediapipe_simple_subgraph( + name = "object_detection_oid_v4_gpu", + graph = "object_detection_oid_v4_gpu.pbtxt", + register_as = "ObjectDetectionOidV4Subgraph", + deps = [ + "//mediapipe/calculators/image:image_transformation_calculator", + "//mediapipe/calculators/tflite:ssd_anchors_calculator", + "//mediapipe/calculators/tflite:tflite_converter_calculator", + "//mediapipe/calculators/tflite:tflite_inference_calculator", + "//mediapipe/calculators/tflite:tflite_tensors_to_detections_calculator", + "//mediapipe/calculators/util:detection_label_id_to_text_calculator", + "//mediapipe/calculators/util:detections_to_rects_calculator", + "//mediapipe/calculators/util:non_max_suppression_calculator", + "//mediapipe/calculators/util:rect_transformation_calculator", + "//mediapipe/modules/objectron/calculators:filter_detection_calculator", + ], +) + +mediapipe_simple_subgraph( + name = "object_detection_oid_v4_cpu", + graph = "object_detection_oid_v4_cpu.pbtxt", + register_as = "ObjectDetectionOidV4Subgraph", + deps = [ + "//mediapipe/calculators/image:image_transformation_calculator", + "//mediapipe/calculators/tflite:ssd_anchors_calculator", + "//mediapipe/calculators/tflite:tflite_converter_calculator", + "//mediapipe/calculators/tflite:tflite_inference_calculator", + "//mediapipe/calculators/tflite:tflite_tensors_to_detections_calculator", + "//mediapipe/calculators/util:detection_label_id_to_text_calculator", + "//mediapipe/calculators/util:detections_to_rects_calculator", + "//mediapipe/calculators/util:non_max_suppression_calculator", + "//mediapipe/calculators/util:rect_transformation_calculator", + "//mediapipe/modules/objectron/calculators:filter_detection_calculator", + ], +) + +mediapipe_simple_subgraph( + name = "objectron_cpu", + graph = "objectron_cpu.pbtxt", + register_as = "ObjectronCpuSubgraph", + deps = [ + ":box_landmark_cpu", + ":object_detection_oid_v4_cpu", + "//mediapipe/calculators/core:gate_calculator", + "//mediapipe/calculators/core:merge_calculator", + "//mediapipe/calculators/core:previous_loopback_calculator", + ], +) + +mediapipe_simple_subgraph( + name = "objectron_gpu", + graph = "objectron_gpu.pbtxt", + register_as = "ObjectronGpuSubgraph", + deps = [ + ":box_landmark_gpu", + ":object_detection_oid_v4_gpu", + "//mediapipe/calculators/core:gate_calculator", + "//mediapipe/calculators/core:merge_calculator", + "//mediapipe/calculators/core:previous_loopback_calculator", + "//mediapipe/calculators/image:image_cropping_calculator", + ], +) diff --git a/mediapipe/modules/objectron/README.md b/mediapipe/modules/objectron/README.md new file mode 100644 index 000000000..00883fe3f --- /dev/null +++ b/mediapipe/modules/objectron/README.md @@ -0,0 +1,6 @@ +# objectron + +Subgraphs|Details +:--- | :--- +[`ObjectronCpuSubgraph`](https://github.com/google/mediapipe/tree/master/mediapipe/modules/objectron/objectron_cpu.pbtxt)| Detects and tracks 3D bounding boxes for objects. (CPU input, and inference is executed on CPU.) +[`ObjectronGpuSubgraph`](https://github.com/google/mediapipe/tree/master/mediapipe/modules/objectron/objectron_gpu.pbtxt)| Detects and tracks 3D bounding boxes for objects. (GPU input, and inference is executed on GPU.) diff --git a/mediapipe/modules/objectron/box_landmark_cpu.pbtxt b/mediapipe/modules/objectron/box_landmark_cpu.pbtxt new file mode 100644 index 000000000..a07ec5249 --- /dev/null +++ b/mediapipe/modules/objectron/box_landmark_cpu.pbtxt @@ -0,0 +1,206 @@ +# MediaPipe Box landmark localization CPU subgraph. + +type: "BoxLandmarkSubgraph" + +input_stream: "IMAGE:input_video" +input_stream: "NORM_RECT:box_rect" +input_side_packet: "MODEL:model" +output_stream: "LANDMARKS:box_landmarks_filtered" +output_stream: "NORM_RECT:box_rect_for_next_frame" +output_stream: "PRESENCE:box_presence" + +# Crops the rectangle that contains a box from the input image. +node { + calculator: "ImageCroppingCalculator" + input_stream: "IMAGE:input_video" + input_stream: "NORM_RECT:box_rect" + output_stream: "IMAGE:box_image" + options: { + [mediapipe.ImageCroppingCalculatorOptions.ext] { + border_mode: BORDER_REPLICATE + } + } +} + +# Transforms the input image to a 224x224 image. To scale the input +# image, the scale_mode option is set to FIT to preserve the aspect ratio, +# resulting in potential letterboxing in the transformed image. +node: { + calculator: "ImageTransformationCalculator" + input_stream: "IMAGE:box_image" + output_stream: "IMAGE:transformed_box_image" + output_stream: "LETTERBOX_PADDING:letterbox_padding" + options: { + [mediapipe.ImageTransformationCalculatorOptions.ext] { + output_width: 224 + output_height: 224 + scale_mode: FIT + } + } +} + +# Converts the transformed input image into an image tensor stored as a +# TfLiteTensor. +node { + calculator: "TfLiteConverterCalculator" + input_stream: "IMAGE:transformed_box_image" + output_stream: "TENSORS:image_tensor" + options: { + [mediapipe.TfLiteConverterCalculatorOptions.ext] { + zero_center: false + } + } +} + +# Generates a single side packet containing a TensorFlow Lite op resolver that +# supports custom ops needed by the model used in this graph. +node { + calculator: "TfLiteCustomOpResolverCalculator" + output_side_packet: "opresolver" +} + +# Runs a TensorFlow Lite model on CPU that takes an image tensor and outputs a +# vector of tensors representing, for instance, detection boxes/keypoints and +# scores. +node { + calculator: "TfLiteInferenceCalculator" + input_stream: "TENSORS:image_tensor" + output_stream: "TENSORS:output_tensors" + input_side_packet: "CUSTOM_OP_RESOLVER:opresolver" + input_side_packet: "MODEL:model" + options: { + [mediapipe.TfLiteInferenceCalculatorOptions.ext] { + use_gpu: false + } + } +} + +# Splits a vector of tensors into multiple vectors. +node { + calculator: "SplitTfLiteTensorVectorCalculator" + input_stream: "output_tensors" + output_stream: "landmark_tensors" + output_stream: "box_flag_tensor" + options: { + [mediapipe.SplitVectorCalculatorOptions.ext] { + ranges: { begin: 0 end: 1 } + ranges: { begin: 1 end: 2 } + } + } +} + +# Converts the box-flag tensor into a float that represents the confidence +# score of box presence. +node { + calculator: "TfLiteTensorsToFloatsCalculator" + input_stream: "TENSORS:box_flag_tensor" + output_stream: "FLOAT:box_presence_score" +} + +# Applies a threshold to the confidence score to determine whether a box is +# present. +node { + calculator: "ThresholdingCalculator" + input_stream: "FLOAT:box_presence_score" + output_stream: "FLAG:box_presence" + options: { + [mediapipe.ThresholdingCalculatorOptions.ext] { + threshold: 0.1 + } + } +} + +# Decodes the landmark tensors into a list of landmarks, where the landmark +# coordinates are normalized by the size of the input image to the model. +node { + calculator: "TfLiteTensorsToLandmarksCalculator" + input_stream: "TENSORS:landmark_tensors" + output_stream: "NORM_LANDMARKS:landmarks" + options: { + [mediapipe.TfLiteTensorsToLandmarksCalculatorOptions.ext] { + num_landmarks: 9 + input_image_width: 224 + input_image_height: 224 + } + } +} + +# Adjusts landmarks (already normalized to [0.f, 1.f]) on the letterboxed box +# image (after image transformation with the FIT scale mode) to the +# corresponding locations on the same image with the letterbox removed (box +# image before image transformation). +node { + calculator: "LandmarkLetterboxRemovalCalculator" + input_stream: "LANDMARKS:landmarks" + input_stream: "LETTERBOX_PADDING:letterbox_padding" + output_stream: "LANDMARKS:scaled_landmarks" +} + +# Projects the landmarks from the cropped box image to the corresponding +# locations on the full image before cropping (input to the graph). +node { + calculator: "LandmarkProjectionCalculator" + input_stream: "NORM_LANDMARKS:scaled_landmarks" + input_stream: "NORM_RECT:box_rect" + output_stream: "NORM_LANDMARKS:box_landmarks" +} + +# Extracts image size from the input images. +node { + calculator: "ImagePropertiesCalculator" + input_stream: "IMAGE:input_video" + output_stream: "SIZE:image_size" +} + +# Smooth predicted landmarks coordinates. +node { + calculator: "LandmarksSmoothingCalculator" + input_stream: "NORM_LANDMARKS:box_landmarks" + input_stream: "IMAGE_SIZE:image_size" + output_stream: "NORM_FILTERED_LANDMARKS:box_landmarks_filtered" + options: { + [mediapipe.LandmarksSmoothingCalculatorOptions.ext] { + velocity_filter: { + window_size: 10 + velocity_scale: 7.5 + } + } + } +} + +# Convert box landmarks to frame annotation. +node { + calculator: "LandmarksToFrameAnnotationCalculator" + input_stream: "LANDMARKS:box_landmarks_filtered" + output_stream: "FRAME_ANNOTATION:box_annotation" +} + +# Lift the 2D landmarks to 3D using EPnP algorithm. +node { + calculator: "Lift2DFrameAnnotationTo3DCalculator" + input_stream: "FRAME_ANNOTATION:box_annotation" + output_stream: "LIFTED_FRAME_ANNOTATION:lifted_box" +} + +# Get rotated rectangle from lifted box. +node { + calculator: "FrameAnnotationToRectCalculator" + input_stream: "FRAME_ANNOTATION:lifted_box" + output_stream: "NORM_RECT:rect_from_box" +} + +# Expands the box rectangle so that in the next video frame it's likely to +# still contain the box even with some motion. +node { + calculator: "RectTransformationCalculator" + input_stream: "NORM_RECT:rect_from_box" + input_stream: "IMAGE_SIZE:image_size" + output_stream: "box_rect_for_next_frame" + options: { + [mediapipe.RectTransformationCalculatorOptions.ext] { + scale_x: 1.5 + scale_y: 1.5 + square_long: true + } + } +} diff --git a/mediapipe/graphs/object_detection_3d/subgraphs/box_landmark_gpu.pbtxt b/mediapipe/modules/objectron/box_landmark_gpu.pbtxt similarity index 85% rename from mediapipe/graphs/object_detection_3d/subgraphs/box_landmark_gpu.pbtxt rename to mediapipe/modules/objectron/box_landmark_gpu.pbtxt index b29c8b11c..0f11a4e56 100644 --- a/mediapipe/graphs/object_detection_3d/subgraphs/box_landmark_gpu.pbtxt +++ b/mediapipe/modules/objectron/box_landmark_gpu.pbtxt @@ -1,4 +1,4 @@ -# MediaPipe Box landmark localization subgraph. +# MediaPipe Box landmark localization GPU subgraph. type: "BoxLandmarkSubgraph" @@ -14,14 +14,14 @@ node { input_stream: "IMAGE_GPU:input_video" input_stream: "NORM_RECT:box_rect" output_stream: "IMAGE_GPU:box_image" - node_options: { - [type.googleapis.com/mediapipe.ImageCroppingCalculatorOptions] { + options: { + [mediapipe.ImageCroppingCalculatorOptions.ext] { border_mode: BORDER_REPLICATE } } } -# Transforms the input image on GPU to a 256x256 image. To scale the input +# Transforms the input image on GPU to a 224x224 image. To scale the input # image, the scale_mode option is set to FIT to preserve the aspect ratio, # resulting in potential letterboxing in the transformed image. node: { @@ -29,8 +29,8 @@ node: { input_stream: "IMAGE_GPU:box_image" output_stream: "IMAGE_GPU:transformed_box_image" output_stream: "LETTERBOX_PADDING:letterbox_padding" - node_options: { - [type.googleapis.com/mediapipe.ImageTransformationCalculatorOptions] { + options: { + [mediapipe.ImageTransformationCalculatorOptions.ext] { output_width: 224 output_height: 224 scale_mode: FIT @@ -44,8 +44,8 @@ node { calculator: "TfLiteConverterCalculator" input_stream: "IMAGE_GPU:transformed_box_image" output_stream: "TENSORS_GPU:image_tensor" - node_options: { - [type.googleapis.com/mediapipe.TfLiteConverterCalculatorOptions] { + options: { + [mediapipe.TfLiteConverterCalculatorOptions.ext] { zero_center: false } } @@ -66,8 +66,8 @@ node { input_stream: "TENSORS_GPU:image_tensor" output_stream: "TENSORS:output_tensors" input_side_packet: "CUSTOM_OP_RESOLVER:opresolver" - node_options: { - [type.googleapis.com/mediapipe.TfLiteInferenceCalculatorOptions] { + options: { + [mediapipe.TfLiteInferenceCalculatorOptions.ext] { model_path: "object_detection_3d.tflite" use_gpu: true } @@ -80,8 +80,8 @@ node { input_stream: "output_tensors" output_stream: "landmark_tensors" output_stream: "box_flag_tensor" - node_options: { - [type.googleapis.com/mediapipe.SplitVectorCalculatorOptions] { + options: { + [mediapipe.SplitVectorCalculatorOptions.ext] { ranges: { begin: 0 end: 1 } ranges: { begin: 1 end: 2 } } @@ -102,8 +102,8 @@ node { calculator: "ThresholdingCalculator" input_stream: "FLOAT:box_presence_score" output_stream: "FLAG:box_presence" - node_options: { - [type.googleapis.com/mediapipe.ThresholdingCalculatorOptions] { + options: { + [mediapipe.ThresholdingCalculatorOptions.ext] { threshold: 0.99 } } @@ -115,8 +115,8 @@ node { calculator: "TfLiteTensorsToLandmarksCalculator" input_stream: "TENSORS:landmark_tensors" output_stream: "NORM_LANDMARKS:landmarks" - node_options: { - [type.googleapis.com/mediapipe.TfLiteTensorsToLandmarksCalculatorOptions] { + options: { + [mediapipe.TfLiteTensorsToLandmarksCalculatorOptions.ext] { num_landmarks: 9 input_image_width: 224 input_image_height: 224 @@ -157,8 +157,8 @@ node { input_stream: "NORM_LANDMARKS:box_landmarks" input_stream: "IMAGE_SIZE:image_size" output_stream: "NORM_FILTERED_LANDMARKS:box_landmarks_filtered" - node_options: { - [type.googleapis.com/mediapipe.LandmarksSmoothingCalculatorOptions] { + options: { + [mediapipe.LandmarksSmoothingCalculatorOptions.ext] { velocity_filter: { window_size: 10 velocity_scale: 7.5 @@ -195,8 +195,8 @@ node { input_stream: "NORM_RECT:rect_from_box" input_stream: "IMAGE_SIZE:image_size" output_stream: "box_rect_for_next_frame" - node_options: { - [type.googleapis.com/mediapipe.RectTransformationCalculatorOptions] { + options: { + [mediapipe.RectTransformationCalculatorOptions.ext] { scale_x: 1.5 scale_y: 1.5 square_long: true diff --git a/mediapipe/modules/objectron/calculators/BUILD b/mediapipe/modules/objectron/calculators/BUILD new file mode 100644 index 000000000..0a8b326a6 --- /dev/null +++ b/mediapipe/modules/objectron/calculators/BUILD @@ -0,0 +1,380 @@ +# Copyright 2020 The MediaPipe Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +load("//mediapipe/framework/port:build_config.bzl", "mediapipe_proto_library") + +licenses(["notice"]) + +package(default_visibility = ["//visibility:public"]) + +mediapipe_proto_library( + name = "object_proto", + srcs = ["object.proto"], + visibility = ["//visibility:public"], +) + +mediapipe_proto_library( + name = "a_r_capture_metadata_proto", + srcs = ["a_r_capture_metadata.proto"], + visibility = ["//visibility:public"], +) + +mediapipe_proto_library( + name = "annotation_proto", + srcs = ["annotation_data.proto"], + visibility = ["//visibility:public"], + deps = [ + ":a_r_capture_metadata_proto", + ":object_proto", + ], +) + +mediapipe_proto_library( + name = "camera_parameters_proto", + srcs = ["camera_parameters.proto"], + visibility = ["//visibility:public"], +) + +mediapipe_proto_library( + name = "frame_annotation_tracker_calculator_proto", + srcs = ["frame_annotation_tracker_calculator.proto"], + visibility = ["//visibility:public"], + deps = [ + "//mediapipe/framework:calculator_options_proto", + "//mediapipe/framework:calculator_proto", + ], +) + +mediapipe_proto_library( + name = "belief_decoder_config_proto", + srcs = ["belief_decoder_config.proto"], + visibility = ["//visibility:public"], +) + +mediapipe_proto_library( + name = "tflite_tensors_to_objects_calculator_proto", + srcs = ["tflite_tensors_to_objects_calculator.proto"], + visibility = ["//visibility:public"], + deps = [ + ":belief_decoder_config_proto", + "//mediapipe/framework:calculator_options_proto", + "//mediapipe/framework:calculator_proto", + ], +) + +mediapipe_proto_library( + name = "tensors_to_objects_calculator_proto", + srcs = ["tensors_to_objects_calculator.proto"], + visibility = ["//visibility:public"], + deps = [ + ":belief_decoder_config_proto", + "//mediapipe/framework:calculator_options_proto", + "//mediapipe/framework:calculator_proto", + ], +) + +mediapipe_proto_library( + name = "lift_2d_frame_annotation_to_3d_calculator_proto", + srcs = ["lift_2d_frame_annotation_to_3d_calculator.proto"], + visibility = ["//visibility:public"], + deps = [ + ":belief_decoder_config_proto", + "//mediapipe/framework:calculator_options_proto", + "//mediapipe/framework:calculator_proto", + ], +) + +mediapipe_proto_library( + name = "frame_annotation_to_rect_calculator_proto", + srcs = ["frame_annotation_to_rect_calculator.proto"], + visibility = ["//visibility:public"], + deps = [ + "//mediapipe/framework:calculator_options_proto", + "//mediapipe/framework:calculator_proto", + ], +) + +mediapipe_proto_library( + name = "filter_detection_calculator_proto", + srcs = ["filter_detection_calculator.proto"], + visibility = ["//visibility:public"], + deps = [ + "//mediapipe/framework:calculator_options_proto", + "//mediapipe/framework:calculator_proto", + ], +) + +cc_library( + name = "box_util", + srcs = ["box_util.cc"], + hdrs = ["box_util.h"], + deps = [ + "//mediapipe/framework/port:logging", + "//mediapipe/framework/port:opencv_core", + "//mediapipe/framework/port:opencv_imgproc", + "//mediapipe/util/tracking:box_tracker_cc_proto", + ], +) + +cc_library( + name = "frame_annotation_tracker", + srcs = ["frame_annotation_tracker.cc"], + hdrs = ["frame_annotation_tracker.h"], + deps = [ + ":annotation_cc_proto", + ":box_util", + "//mediapipe/framework/port:integral_types", + "//mediapipe/framework/port:logging", + "//mediapipe/util/tracking:box_tracker_cc_proto", + "@com_google_absl//absl/container:btree", + "@com_google_absl//absl/container:flat_hash_set", + ], +) + +cc_library( + name = "decoder", + srcs = [ + "decoder.cc", + ], + hdrs = [ + "decoder.h", + ], + deps = [ + ":annotation_cc_proto", + ":belief_decoder_config_cc_proto", + "//mediapipe/framework/port:logging", + "//mediapipe/framework/port:opencv_core", + "//mediapipe/framework/port:opencv_imgproc", + "//mediapipe/framework/port:status", + "@com_google_absl//absl/status", + "@eigen_archive//:eigen", + ], +) + +cc_library( + name = "tensor_util", + srcs = [ + "tensor_util.cc", + ], + hdrs = [ + "tensor_util.h", + ], + deps = [ + "//mediapipe/framework/formats:tensor", + "//mediapipe/framework/port:logging", + "//mediapipe/framework/port:opencv_core", + "@org_tensorflow//tensorflow/lite:framework", + ], +) + +cc_library( + name = "box", + srcs = [ + "box.cc", + "model.cc", + ], + hdrs = [ + "box.h", + "model.h", + "types.h", + ], + visibility = ["//visibility:public"], + deps = [ + ":annotation_cc_proto", + ":object_cc_proto", + "//mediapipe/framework/port:logging", + "@eigen_archive//:eigen", + ], +) + +cc_library( + name = "frame_annotation_to_timed_box_list_calculator", + srcs = ["frame_annotation_to_timed_box_list_calculator.cc"], + visibility = ["//visibility:public"], + deps = [ + ":annotation_cc_proto", + ":box_util", + "//mediapipe/framework:calculator_framework", + "//mediapipe/framework/port:opencv_core", + "//mediapipe/framework/port:opencv_imgproc", + "//mediapipe/framework/port:ret_check", + "//mediapipe/framework/port:status", + "//mediapipe/util/tracking:box_tracker_cc_proto", + "@com_google_absl//absl/memory", + ], + alwayslink = 1, +) + +cc_library( + name = "frame_annotation_tracker_calculator", + srcs = ["frame_annotation_tracker_calculator.cc"], + visibility = ["//visibility:public"], + deps = [ + ":annotation_cc_proto", + ":frame_annotation_tracker", + ":frame_annotation_tracker_calculator_cc_proto", + "//mediapipe/framework:calculator_framework", + "//mediapipe/framework/port:ret_check", + "//mediapipe/framework/port:status", + "//mediapipe/util/tracking:box_tracker_cc_proto", + "@com_google_absl//absl/container:flat_hash_set", + "@com_google_absl//absl/memory", + ], + alwayslink = 1, +) + +cc_library( + name = "tflite_tensors_to_objects_calculator", + srcs = ["tflite_tensors_to_objects_calculator.cc"], + visibility = ["//visibility:public"], + deps = [ + ":annotation_cc_proto", + ":belief_decoder_config_cc_proto", + ":decoder", + ":tensor_util", + ":tflite_tensors_to_objects_calculator_cc_proto", + "//mediapipe/framework:calculator_framework", + "//mediapipe/framework/deps:file_path", + "//mediapipe/framework/formats:detection_cc_proto", + "//mediapipe/framework/port:opencv_core", + "//mediapipe/framework/port:ret_check", + "@com_google_absl//absl/memory", + "@com_google_absl//absl/strings:str_format", + "@com_google_absl//absl/types:span", + "@eigen_archive//:eigen", + "@org_tensorflow//tensorflow/lite:framework", + ], + alwayslink = 1, +) + +cc_library( + name = "tensors_to_objects_calculator", + srcs = ["tensors_to_objects_calculator.cc"], + visibility = ["//visibility:public"], + deps = [ + ":annotation_cc_proto", + ":belief_decoder_config_cc_proto", + ":decoder", + ":tensor_util", + ":tensors_to_objects_calculator_cc_proto", + "//mediapipe/framework:calculator_framework", + "//mediapipe/framework/deps:file_path", + "//mediapipe/framework/formats:detection_cc_proto", + "//mediapipe/framework/port:opencv_core", + "//mediapipe/framework/port:ret_check", + "@com_google_absl//absl/memory", + "@com_google_absl//absl/strings:str_format", + "@com_google_absl//absl/types:span", + "@eigen_archive//:eigen", + ], + alwayslink = 1, +) + +cc_library( + name = "lift_2d_frame_annotation_to_3d_calculator", + srcs = ["lift_2d_frame_annotation_to_3d_calculator.cc"], + visibility = ["//visibility:public"], + deps = [ + ":annotation_cc_proto", + ":belief_decoder_config_cc_proto", + ":decoder", + ":lift_2d_frame_annotation_to_3d_calculator_cc_proto", + ":tensor_util", + ":tflite_tensors_to_objects_calculator_cc_proto", + "//mediapipe/framework:calculator_framework", + "//mediapipe/framework/deps:file_path", + "//mediapipe/framework/formats:detection_cc_proto", + "//mediapipe/framework/port:opencv_core", + "//mediapipe/framework/port:ret_check", + "@com_google_absl//absl/memory", + "@com_google_absl//absl/strings:str_format", + "@com_google_absl//absl/types:span", + "@eigen_archive//:eigen", + "@org_tensorflow//tensorflow/lite:framework", + ], + alwayslink = 1, +) + +cc_library( + name = "frame_annotation_to_rect_calculator", + srcs = ["frame_annotation_to_rect_calculator.cc"], + deps = [ + ":annotation_cc_proto", + ":box", + ":frame_annotation_to_rect_calculator_cc_proto", + "//mediapipe/framework:calculator_framework", + "//mediapipe/framework/formats:rect_cc_proto", + "//mediapipe/framework/port:ret_check", + "//mediapipe/framework/port:status", + "@com_google_absl//absl/memory", + "@eigen_archive//:eigen", + ], + alwayslink = 1, +) + +cc_library( + name = "landmarks_to_frame_annotation_calculator", + srcs = ["landmarks_to_frame_annotation_calculator.cc"], + deps = [ + ":annotation_cc_proto", + "//mediapipe/framework:calculator_framework", + "//mediapipe/framework/formats:landmark_cc_proto", + "//mediapipe/framework/port:ret_check", + "//mediapipe/framework/port:status", + "@com_google_absl//absl/memory", + ], + alwayslink = 1, +) + +cc_library( + name = "filter_detection_calculator", + srcs = ["filter_detection_calculator.cc"], + deps = [ + ":filter_detection_calculator_cc_proto", + "//mediapipe/framework:calculator_framework", + "//mediapipe/framework/formats:detection_cc_proto", + "//mediapipe/framework/formats:location_data_cc_proto", + "//mediapipe/framework/port:logging", + "//mediapipe/framework/port:map_util", + "//mediapipe/framework/port:re2", + "//mediapipe/framework/port:status", + "@com_google_absl//absl/container:node_hash_set", + "@com_google_absl//absl/strings", + ], + alwayslink = 1, +) + +cc_test( + name = "box_util_test", + srcs = ["box_util_test.cc"], + deps = [ + ":box_util", + "//mediapipe/framework/port:gtest_main", + "//mediapipe/framework/port:opencv_core", + "//mediapipe/util/tracking:box_tracker_cc_proto", + ], +) + +cc_test( + name = "frame_annotation_tracker_test", + srcs = ["frame_annotation_tracker_test.cc"], + deps = [ + ":annotation_cc_proto", + ":frame_annotation_tracker", + "//mediapipe/framework/port:gtest_main", + "//mediapipe/framework/port:logging", + "//mediapipe/util/tracking:box_tracker_cc_proto", + "@com_google_absl//absl/container:flat_hash_set", + ], +) diff --git a/mediapipe/graphs/object_detection_3d/calculators/a_r_capture_metadata.proto b/mediapipe/modules/objectron/calculators/a_r_capture_metadata.proto similarity index 100% rename from mediapipe/graphs/object_detection_3d/calculators/a_r_capture_metadata.proto rename to mediapipe/modules/objectron/calculators/a_r_capture_metadata.proto diff --git a/mediapipe/graphs/object_detection_3d/calculators/annotation_data.proto b/mediapipe/modules/objectron/calculators/annotation_data.proto similarity index 94% rename from mediapipe/graphs/object_detection_3d/calculators/annotation_data.proto rename to mediapipe/modules/objectron/calculators/annotation_data.proto index 5a417cbbd..f1a9600eb 100644 --- a/mediapipe/graphs/object_detection_3d/calculators/annotation_data.proto +++ b/mediapipe/modules/objectron/calculators/annotation_data.proto @@ -16,8 +16,8 @@ syntax = "proto3"; package mediapipe; -import "mediapipe/graphs/object_detection_3d/calculators/a_r_capture_metadata.proto"; -import "mediapipe/graphs/object_detection_3d/calculators/object.proto"; +import "mediapipe/modules/objectron/calculators/a_r_capture_metadata.proto"; +import "mediapipe/modules/objectron/calculators/object.proto"; // Projection of a 3D point on an image, and its metric depth. message NormalizedPoint2D { diff --git a/mediapipe/graphs/object_detection_3d/calculators/belief_decoder_config.proto b/mediapipe/modules/objectron/calculators/belief_decoder_config.proto similarity index 100% rename from mediapipe/graphs/object_detection_3d/calculators/belief_decoder_config.proto rename to mediapipe/modules/objectron/calculators/belief_decoder_config.proto diff --git a/mediapipe/graphs/object_detection_3d/calculators/box.cc b/mediapipe/modules/objectron/calculators/box.cc similarity index 99% rename from mediapipe/graphs/object_detection_3d/calculators/box.cc rename to mediapipe/modules/objectron/calculators/box.cc index 31c8ddc1c..a7d0f1460 100644 --- a/mediapipe/graphs/object_detection_3d/calculators/box.cc +++ b/mediapipe/modules/objectron/calculators/box.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "mediapipe/graphs/object_detection_3d/calculators/box.h" +#include "mediapipe/modules/objectron/calculators/box.h" #include "Eigen/src/Core/util/Constants.h" #include "mediapipe/framework/port/logging.h" diff --git a/mediapipe/graphs/object_detection_3d/calculators/box.h b/mediapipe/modules/objectron/calculators/box.h similarity index 95% rename from mediapipe/graphs/object_detection_3d/calculators/box.h rename to mediapipe/modules/objectron/calculators/box.h index 22839b52b..17218f78e 100644 --- a/mediapipe/graphs/object_detection_3d/calculators/box.h +++ b/mediapipe/modules/objectron/calculators/box.h @@ -12,12 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -#ifndef MEDIAPIPE_GRAPHS_OBJECT_DETECTION_3D_BOX_H_ -#define MEDIAPIPE_GRAPHS_OBJECT_DETECTION_3D_BOX_H_ +#ifndef MEDIAPIPE_MODULES_OBJECTRON_CALCULATORS_BOX_H_ +#define MEDIAPIPE_MODULES_OBJECTRON_CALCULATORS_BOX_H_ #include -#include "mediapipe/graphs/object_detection_3d/calculators/model.h" +#include "mediapipe/modules/objectron/calculators/model.h" namespace mediapipe { @@ -129,4 +129,4 @@ class Box : public Model { } // namespace mediapipe -#endif // MEDIAPIPE_GRAPHS_OBJECT_DETECTION_3D_BOX_H_ +#endif // MEDIAPIPE_MODULES_OBJECTRON_CALCULATORS_BOX_H_ diff --git a/mediapipe/graphs/object_detection_3d/calculators/box_util.cc b/mediapipe/modules/objectron/calculators/box_util.cc similarity index 98% rename from mediapipe/graphs/object_detection_3d/calculators/box_util.cc rename to mediapipe/modules/objectron/calculators/box_util.cc index e07cac54c..0663b5bdb 100644 --- a/mediapipe/graphs/object_detection_3d/calculators/box_util.cc +++ b/mediapipe/modules/objectron/calculators/box_util.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "mediapipe/graphs/object_detection_3d/calculators/box_util.h" +#include "mediapipe/modules/objectron/calculators/box_util.h" #include diff --git a/mediapipe/graphs/object_detection_3d/calculators/box_util.h b/mediapipe/modules/objectron/calculators/box_util.h similarity index 91% rename from mediapipe/graphs/object_detection_3d/calculators/box_util.h rename to mediapipe/modules/objectron/calculators/box_util.h index 4076b156d..fed21c0b0 100644 --- a/mediapipe/graphs/object_detection_3d/calculators/box_util.h +++ b/mediapipe/modules/objectron/calculators/box_util.h @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -#ifndef MEDIAPIPE_GRAPHS_OBJECT_DETECTION_3D_BOX_UTIL_H_ -#define MEDIAPIPE_GRAPHS_OBJECT_DETECTION_3D_BOX_UTIL_H_ +#ifndef MEDIAPIPE_MODULES_OBJECTRON_CALCULATORS_BOX_UTIL_H_ +#define MEDIAPIPE_MODULES_OBJECTRON_CALCULATORS_BOX_UTIL_H_ #include "mediapipe/framework/port/opencv_core_inc.h" #include "mediapipe/util/tracking/box_tracker.pb.h" @@ -47,4 +47,4 @@ cv::Point2f MapPoint(const TimedBoxProto& src_box, const TimedBoxProto& dst_box, } // namespace mediapipe -#endif // MEDIAPIPE_GRAPHS_OBJECT_DETECTION_3D_BOX_UTIL_H_ +#endif // MEDIAPIPE_MODULES_OBJECTRON_CALCULATORS_BOX_UTIL_H_ diff --git a/mediapipe/graphs/object_detection_3d/calculators/box_util_test.cc b/mediapipe/modules/objectron/calculators/box_util_test.cc similarity index 98% rename from mediapipe/graphs/object_detection_3d/calculators/box_util_test.cc rename to mediapipe/modules/objectron/calculators/box_util_test.cc index 97698cad2..2a3895fdd 100644 --- a/mediapipe/graphs/object_detection_3d/calculators/box_util_test.cc +++ b/mediapipe/modules/objectron/calculators/box_util_test.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "mediapipe/graphs/object_detection_3d/calculators/box_util.h" +#include "mediapipe/modules/objectron/calculators/box_util.h" #include "mediapipe/framework/port/gmock.h" #include "mediapipe/framework/port/gtest.h" diff --git a/mediapipe/graphs/object_detection_3d/calculators/camera_parameters.proto b/mediapipe/modules/objectron/calculators/camera_parameters.proto similarity index 100% rename from mediapipe/graphs/object_detection_3d/calculators/camera_parameters.proto rename to mediapipe/modules/objectron/calculators/camera_parameters.proto diff --git a/mediapipe/graphs/object_detection_3d/calculators/decoder.cc b/mediapipe/modules/objectron/calculators/decoder.cc similarity index 98% rename from mediapipe/graphs/object_detection_3d/calculators/decoder.cc rename to mediapipe/modules/objectron/calculators/decoder.cc index fdea8d51f..0f66c3a79 100644 --- a/mediapipe/graphs/object_detection_3d/calculators/decoder.cc +++ b/mediapipe/modules/objectron/calculators/decoder.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "mediapipe/graphs/object_detection_3d/calculators/decoder.h" +#include "mediapipe/modules/objectron/calculators/decoder.h" #include @@ -21,7 +21,7 @@ #include "mediapipe/framework/port/logging.h" #include "mediapipe/framework/port/opencv_imgproc_inc.h" #include "mediapipe/framework/port/status.h" -#include "mediapipe/graphs/object_detection_3d/calculators/annotation_data.pb.h" +#include "mediapipe/modules/objectron/calculators/annotation_data.pb.h" namespace mediapipe { constexpr int Decoder::kNumOffsetmaps = 16; diff --git a/mediapipe/graphs/object_detection_3d/calculators/decoder.h b/mediapipe/modules/objectron/calculators/decoder.h similarity index 92% rename from mediapipe/graphs/object_detection_3d/calculators/decoder.h rename to mediapipe/modules/objectron/calculators/decoder.h index 2d7065062..be69939c8 100644 --- a/mediapipe/graphs/object_detection_3d/calculators/decoder.h +++ b/mediapipe/modules/objectron/calculators/decoder.h @@ -12,16 +12,16 @@ // See the License for the specific language governing permissions and // limitations under the License. -#ifndef MEDIAPIPE_GRAPHS_OBJECT_DETECTION_3D_DECODER_H_ -#define MEDIAPIPE_GRAPHS_OBJECT_DETECTION_3D_DECODER_H_ +#ifndef MEDIAPIPE_MODULES_OBJECTRON_CALCULATORS_DECODER_H_ +#define MEDIAPIPE_MODULES_OBJECTRON_CALCULATORS_DECODER_H_ #include #include "Eigen/Dense" #include "absl/status/status.h" #include "mediapipe/framework/port/opencv_core_inc.h" -#include "mediapipe/graphs/object_detection_3d/calculators/annotation_data.pb.h" -#include "mediapipe/graphs/object_detection_3d/calculators/belief_decoder_config.pb.h" +#include "mediapipe/modules/objectron/calculators/annotation_data.pb.h" +#include "mediapipe/modules/objectron/calculators/belief_decoder_config.pb.h" namespace mediapipe { @@ -106,4 +106,4 @@ class Decoder { } // namespace mediapipe -#endif // MEDIAPIPE_GRAPHS_OBJECT_DETECTION_3D_DECODER_H_ +#endif // MEDIAPIPE_MODULES_OBJECTRON_CALCULATORS_DECODER_H_ diff --git a/mediapipe/graphs/object_detection_3d/calculators/filter_detection_calculator.cc b/mediapipe/modules/objectron/calculators/filter_detection_calculator.cc similarity index 90% rename from mediapipe/graphs/object_detection_3d/calculators/filter_detection_calculator.cc rename to mediapipe/modules/objectron/calculators/filter_detection_calculator.cc index 293cc72cc..37eb74a81 100644 --- a/mediapipe/graphs/object_detection_3d/calculators/filter_detection_calculator.cc +++ b/mediapipe/modules/objectron/calculators/filter_detection_calculator.cc @@ -27,7 +27,7 @@ #include "mediapipe/framework/port/map_util.h" #include "mediapipe/framework/port/re2.h" #include "mediapipe/framework/port/status.h" -#include "mediapipe/graphs/object_detection_3d/calculators/filter_detection_calculator.pb.h" +#include "mediapipe/modules/objectron/calculators/filter_detection_calculator.pb.h" namespace mediapipe { @@ -38,8 +38,8 @@ constexpr char kDetectionsTag[] = "DETECTIONS"; constexpr char kLabelsTag[] = "LABELS"; constexpr char kLabelsCsvTag[] = "LABELS_CSV"; -using ::mediapipe::ContainsKey; -using ::mediapipe::RE2; +using mediapipe::ContainsKey; +using mediapipe::RE2; using Detections = std::vector; using Strings = std::vector; @@ -63,8 +63,8 @@ using Strings = std::vector; // input_stream: "DETECTIONS:detections" // output_stream: "DETECTIONS:filtered_detections" // input_side_packet: "LABELS:allowed_labels" -// node_options: { -// [type.googleapis.com/mediapipe.FilterDetectionCalculatorOptions]: { +// options: { +// [mediapipe.FilterDetectionCalculatorOptions.ext]: { // min_score: 0.5 // } // } @@ -77,8 +77,8 @@ struct FirstGreaterComparator { } }; -::mediapipe::Status SortLabelsByDecreasingScore(const Detection& detection, - Detection* sorted_detection) { +mediapipe::Status SortLabelsByDecreasingScore(const Detection& detection, + Detection* sorted_detection) { RET_CHECK(sorted_detection); RET_CHECK_EQ(detection.score_size(), detection.label_size()); if (!detection.label_id().empty()) { @@ -110,14 +110,14 @@ struct FirstGreaterComparator { sorted_detection->set_label_id(i, detection.label_id(index)); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } class FilterDetectionCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; + static mediapipe::Status GetContract(CalculatorContract* cc); + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; private: bool IsValidLabel(const std::string& label); @@ -134,7 +134,7 @@ class FilterDetectionCalculator : public CalculatorBase { }; REGISTER_CALCULATOR(FilterDetectionCalculator); -::mediapipe::Status FilterDetectionCalculator::GetContract( +mediapipe::Status FilterDetectionCalculator::GetContract( CalculatorContract* cc) { RET_CHECK(!cc->Inputs().GetTags().empty()); RET_CHECK(!cc->Outputs().GetTags().empty()); @@ -153,10 +153,10 @@ REGISTER_CALCULATOR(FilterDetectionCalculator); if (cc->InputSidePackets().HasTag(kLabelsCsvTag)) { cc->InputSidePackets().Tag(kLabelsCsvTag).Set(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status FilterDetectionCalculator::Open(CalculatorContext* cc) { +mediapipe::Status FilterDetectionCalculator::Open(CalculatorContext* cc) { cc->SetOffset(TimestampDiff(0)); options_ = cc->Options(); limit_labels_ = cc->InputSidePackets().HasTag(kLabelsTag) || @@ -187,12 +187,12 @@ REGISTER_CALCULATOR(FilterDetectionCalculator); limit_labels_ = false; } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status FilterDetectionCalculator::Process(CalculatorContext* cc) { +mediapipe::Status FilterDetectionCalculator::Process(CalculatorContext* cc) { if (limit_labels_ && allowed_labels_.empty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } Detections detections; if (cc->Inputs().HasTag(kDetectionsTag)) { @@ -234,7 +234,7 @@ REGISTER_CALCULATOR(FilterDetectionCalculator); .Tag(kDetectionsTag) .Add(new Detection((*outputs)[0]), cc->InputTimestamp()); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } bool FilterDetectionCalculator::IsValidLabel(const std::string& label) { diff --git a/mediapipe/graphs/object_detection_3d/calculators/filter_detection_calculator.proto b/mediapipe/modules/objectron/calculators/filter_detection_calculator.proto similarity index 100% rename from mediapipe/graphs/object_detection_3d/calculators/filter_detection_calculator.proto rename to mediapipe/modules/objectron/calculators/filter_detection_calculator.proto diff --git a/mediapipe/graphs/object_detection_3d/calculators/frame_annotation_to_rect_calculator.cc b/mediapipe/modules/objectron/calculators/frame_annotation_to_rect_calculator.cc similarity index 96% rename from mediapipe/graphs/object_detection_3d/calculators/frame_annotation_to_rect_calculator.cc rename to mediapipe/modules/objectron/calculators/frame_annotation_to_rect_calculator.cc index 0d6e9537a..b4e3d031f 100644 --- a/mediapipe/graphs/object_detection_3d/calculators/frame_annotation_to_rect_calculator.cc +++ b/mediapipe/modules/objectron/calculators/frame_annotation_to_rect_calculator.cc @@ -19,9 +19,9 @@ #include "mediapipe/framework/formats/rect.pb.h" #include "mediapipe/framework/port/ret_check.h" #include "mediapipe/framework/port/status.h" -#include "mediapipe/graphs/object_detection_3d/calculators/annotation_data.pb.h" -#include "mediapipe/graphs/object_detection_3d/calculators/box.h" -#include "mediapipe/graphs/object_detection_3d/calculators/frame_annotation_to_rect_calculator.pb.h" +#include "mediapipe/modules/objectron/calculators/annotation_data.pb.h" +#include "mediapipe/modules/objectron/calculators/box.h" +#include "mediapipe/modules/objectron/calculators/frame_annotation_to_rect_calculator.pb.h" namespace mediapipe { diff --git a/mediapipe/graphs/object_detection_3d/calculators/frame_annotation_to_rect_calculator.proto b/mediapipe/modules/objectron/calculators/frame_annotation_to_rect_calculator.proto similarity index 100% rename from mediapipe/graphs/object_detection_3d/calculators/frame_annotation_to_rect_calculator.proto rename to mediapipe/modules/objectron/calculators/frame_annotation_to_rect_calculator.proto diff --git a/mediapipe/graphs/object_detection_3d/calculators/frame_annotation_to_timed_box_list_calculator.cc b/mediapipe/modules/objectron/calculators/frame_annotation_to_timed_box_list_calculator.cc similarity index 79% rename from mediapipe/graphs/object_detection_3d/calculators/frame_annotation_to_timed_box_list_calculator.cc rename to mediapipe/modules/objectron/calculators/frame_annotation_to_timed_box_list_calculator.cc index 788456ad0..55b1acac6 100644 --- a/mediapipe/graphs/object_detection_3d/calculators/frame_annotation_to_timed_box_list_calculator.cc +++ b/mediapipe/modules/objectron/calculators/frame_annotation_to_timed_box_list_calculator.cc @@ -20,8 +20,8 @@ #include "mediapipe/framework/port/opencv_imgproc_inc.h" #include "mediapipe/framework/port/ret_check.h" #include "mediapipe/framework/port/status.h" -#include "mediapipe/graphs/object_detection_3d/calculators/annotation_data.pb.h" -#include "mediapipe/graphs/object_detection_3d/calculators/box_util.h" +#include "mediapipe/modules/objectron/calculators/annotation_data.pb.h" +#include "mediapipe/modules/objectron/calculators/box_util.h" #include "mediapipe/util/tracking/box_tracker.pb.h" namespace { @@ -47,15 +47,15 @@ namespace mediapipe { // } class FrameAnnotationToTimedBoxListCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; - ::mediapipe::Status Close(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Close(CalculatorContext* cc) override; }; REGISTER_CALCULATOR(FrameAnnotationToTimedBoxListCalculator); -::mediapipe::Status FrameAnnotationToTimedBoxListCalculator::GetContract( +mediapipe::Status FrameAnnotationToTimedBoxListCalculator::GetContract( CalculatorContract* cc) { RET_CHECK(!cc->Inputs().GetTags().empty()); RET_CHECK(!cc->Outputs().GetTags().empty()); @@ -67,15 +67,15 @@ REGISTER_CALCULATOR(FrameAnnotationToTimedBoxListCalculator); if (cc->Outputs().HasTag(kOutputStreamTag)) { cc->Outputs().Tag(kOutputStreamTag).Set(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status FrameAnnotationToTimedBoxListCalculator::Open( +mediapipe::Status FrameAnnotationToTimedBoxListCalculator::Open( CalculatorContext* cc) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status FrameAnnotationToTimedBoxListCalculator::Process( +mediapipe::Status FrameAnnotationToTimedBoxListCalculator::Process( CalculatorContext* cc) { if (cc->Inputs().HasTag(kInputStreamTag) && !cc->Inputs().Tag(kInputStreamTag).IsEmpty()) { @@ -104,12 +104,12 @@ REGISTER_CALCULATOR(FrameAnnotationToTimedBoxListCalculator); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status FrameAnnotationToTimedBoxListCalculator::Close( +mediapipe::Status FrameAnnotationToTimedBoxListCalculator::Close( CalculatorContext* cc) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/graphs/object_detection_3d/calculators/frame_annotation_tracker.cc b/mediapipe/modules/objectron/calculators/frame_annotation_tracker.cc similarity index 93% rename from mediapipe/graphs/object_detection_3d/calculators/frame_annotation_tracker.cc rename to mediapipe/modules/objectron/calculators/frame_annotation_tracker.cc index 1cfdb2ddc..eebf88579 100644 --- a/mediapipe/graphs/object_detection_3d/calculators/frame_annotation_tracker.cc +++ b/mediapipe/modules/objectron/calculators/frame_annotation_tracker.cc @@ -12,12 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "mediapipe/graphs/object_detection_3d/calculators/frame_annotation_tracker.h" +#include "mediapipe/modules/objectron/calculators/frame_annotation_tracker.h" #include "absl/container/flat_hash_set.h" #include "mediapipe/framework/port/logging.h" -#include "mediapipe/graphs/object_detection_3d/calculators/annotation_data.pb.h" -#include "mediapipe/graphs/object_detection_3d/calculators/box_util.h" +#include "mediapipe/modules/objectron/calculators/annotation_data.pb.h" +#include "mediapipe/modules/objectron/calculators/box_util.h" #include "mediapipe/util/tracking/box_tracker.pb.h" namespace mediapipe { diff --git a/mediapipe/graphs/object_detection_3d/calculators/frame_annotation_tracker.h b/mediapipe/modules/objectron/calculators/frame_annotation_tracker.h similarity index 87% rename from mediapipe/graphs/object_detection_3d/calculators/frame_annotation_tracker.h rename to mediapipe/modules/objectron/calculators/frame_annotation_tracker.h index 2113c7711..11a469c38 100644 --- a/mediapipe/graphs/object_detection_3d/calculators/frame_annotation_tracker.h +++ b/mediapipe/modules/objectron/calculators/frame_annotation_tracker.h @@ -12,15 +12,15 @@ // See the License for the specific language governing permissions and // limitations under the License. -#ifndef MEDIAPIPE_GRAPHS_OBJECT_DETECTION_3D_FRAME_ANNOTATION_TRACKER_H_ -#define MEDIAPIPE_GRAPHS_OBJECT_DETECTION_3D_FRAME_ANNOTATION_TRACKER_H_ +#ifndef MEDIAPIPE_MODULES_OBJECTRON_CALCULATORS_FRAME_ANNOTATION_TRACKER_H_ +#define MEDIAPIPE_MODULES_OBJECTRON_CALCULATORS_FRAME_ANNOTATION_TRACKER_H_ #include #include "absl/container/btree_map.h" #include "absl/container/flat_hash_set.h" #include "mediapipe/framework/port/integral_types.h" -#include "mediapipe/graphs/object_detection_3d/calculators/annotation_data.pb.h" +#include "mediapipe/modules/objectron/calculators/annotation_data.pb.h" #include "mediapipe/util/tracking/box_tracker.pb.h" namespace mediapipe { @@ -59,4 +59,4 @@ class FrameAnnotationTracker { } // namespace mediapipe -#endif // MEDIAPIPE_GRAPHS_OBJECT_DETECTION_3D_FRAME_ANNOTATION_TRACKER_H_ +#endif // MEDIAPIPE_MODULES_OBJECTRON_CALCULATORS_FRAME_ANNOTATION_TRACKER_H_ diff --git a/mediapipe/graphs/object_detection_3d/calculators/frame_annotation_tracker_calculator.cc b/mediapipe/modules/objectron/calculators/frame_annotation_tracker_calculator.cc similarity index 83% rename from mediapipe/graphs/object_detection_3d/calculators/frame_annotation_tracker_calculator.cc rename to mediapipe/modules/objectron/calculators/frame_annotation_tracker_calculator.cc index ef3f4f5d4..dfdc581a2 100644 --- a/mediapipe/graphs/object_detection_3d/calculators/frame_annotation_tracker_calculator.cc +++ b/mediapipe/modules/objectron/calculators/frame_annotation_tracker_calculator.cc @@ -17,9 +17,9 @@ #include "mediapipe/framework/calculator_framework.h" #include "mediapipe/framework/port/ret_check.h" #include "mediapipe/framework/port/status.h" -#include "mediapipe/graphs/object_detection_3d/calculators/annotation_data.pb.h" -#include "mediapipe/graphs/object_detection_3d/calculators/frame_annotation_tracker.h" -#include "mediapipe/graphs/object_detection_3d/calculators/frame_annotation_tracker_calculator.pb.h" +#include "mediapipe/modules/objectron/calculators/annotation_data.pb.h" +#include "mediapipe/modules/objectron/calculators/frame_annotation_tracker.h" +#include "mediapipe/modules/objectron/calculators/frame_annotation_tracker_calculator.pb.h" #include "mediapipe/util/tracking/box_tracker.pb.h" namespace { @@ -52,18 +52,18 @@ namespace mediapipe { // } class FrameAnnotationTrackerCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; - ::mediapipe::Status Close(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Close(CalculatorContext* cc) override; private: std::unique_ptr frame_annotation_tracker_; }; REGISTER_CALCULATOR(FrameAnnotationTrackerCalculator); -::mediapipe::Status FrameAnnotationTrackerCalculator::GetContract( +mediapipe::Status FrameAnnotationTrackerCalculator::GetContract( CalculatorContract* cc) { RET_CHECK(!cc->Inputs().GetTags().empty()); RET_CHECK(!cc->Outputs().GetTags().empty()); @@ -80,18 +80,18 @@ REGISTER_CALCULATOR(FrameAnnotationTrackerCalculator); if (cc->Outputs().HasTag(kOutputCancelObjectIdTag)) { cc->Outputs().Tag(kOutputCancelObjectIdTag).Set(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status FrameAnnotationTrackerCalculator::Open( +mediapipe::Status FrameAnnotationTrackerCalculator::Open( CalculatorContext* cc) { const auto& options = cc->Options(); frame_annotation_tracker_ = absl::make_unique( options.iou_threshold(), options.img_width(), options.img_height()); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status FrameAnnotationTrackerCalculator::Process( +mediapipe::Status FrameAnnotationTrackerCalculator::Process( CalculatorContext* cc) { if (cc->Inputs().HasTag(kInputFrameAnnotationTag) && !cc->Inputs().Tag(kInputFrameAnnotationTag).IsEmpty()) { @@ -126,12 +126,12 @@ REGISTER_CALCULATOR(FrameAnnotationTrackerCalculator); } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status FrameAnnotationTrackerCalculator::Close( +mediapipe::Status FrameAnnotationTrackerCalculator::Close( CalculatorContext* cc) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/graphs/object_detection_3d/calculators/frame_annotation_tracker_calculator.proto b/mediapipe/modules/objectron/calculators/frame_annotation_tracker_calculator.proto similarity index 100% rename from mediapipe/graphs/object_detection_3d/calculators/frame_annotation_tracker_calculator.proto rename to mediapipe/modules/objectron/calculators/frame_annotation_tracker_calculator.proto diff --git a/mediapipe/graphs/object_detection_3d/calculators/frame_annotation_tracker_test.cc b/mediapipe/modules/objectron/calculators/frame_annotation_tracker_test.cc similarity index 97% rename from mediapipe/graphs/object_detection_3d/calculators/frame_annotation_tracker_test.cc rename to mediapipe/modules/objectron/calculators/frame_annotation_tracker_test.cc index 94d64bbbe..d155f8e73 100644 --- a/mediapipe/graphs/object_detection_3d/calculators/frame_annotation_tracker_test.cc +++ b/mediapipe/modules/objectron/calculators/frame_annotation_tracker_test.cc @@ -12,13 +12,13 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "mediapipe/graphs/object_detection_3d/calculators/frame_annotation_tracker.h" +#include "mediapipe/modules/objectron/calculators/frame_annotation_tracker.h" #include "absl/container/flat_hash_set.h" #include "mediapipe/framework/port/gmock.h" #include "mediapipe/framework/port/gtest.h" #include "mediapipe/framework/port/logging.h" -#include "mediapipe/graphs/object_detection_3d/calculators/annotation_data.pb.h" +#include "mediapipe/modules/objectron/calculators/annotation_data.pb.h" #include "mediapipe/util/tracking/box_tracker.pb.h" namespace mediapipe { diff --git a/mediapipe/graphs/object_detection_3d/calculators/landmarks_to_frame_annotation_calculator.cc b/mediapipe/modules/objectron/calculators/landmarks_to_frame_annotation_calculator.cc similarity index 97% rename from mediapipe/graphs/object_detection_3d/calculators/landmarks_to_frame_annotation_calculator.cc rename to mediapipe/modules/objectron/calculators/landmarks_to_frame_annotation_calculator.cc index a5203c084..3bc849f00 100644 --- a/mediapipe/graphs/object_detection_3d/calculators/landmarks_to_frame_annotation_calculator.cc +++ b/mediapipe/modules/objectron/calculators/landmarks_to_frame_annotation_calculator.cc @@ -16,7 +16,7 @@ #include "mediapipe/framework/formats/landmark.pb.h" #include "mediapipe/framework/port/ret_check.h" #include "mediapipe/framework/port/status.h" -#include "mediapipe/graphs/object_detection_3d/calculators/annotation_data.pb.h" +#include "mediapipe/modules/objectron/calculators/annotation_data.pb.h" namespace mediapipe { diff --git a/mediapipe/graphs/object_detection_3d/calculators/lift_2d_frame_annotation_to_3d_calculator.cc b/mediapipe/modules/objectron/calculators/lift_2d_frame_annotation_to_3d_calculator.cc similarity index 94% rename from mediapipe/graphs/object_detection_3d/calculators/lift_2d_frame_annotation_to_3d_calculator.cc rename to mediapipe/modules/objectron/calculators/lift_2d_frame_annotation_to_3d_calculator.cc index 0f1a9966a..1fd7ee0ec 100644 --- a/mediapipe/graphs/object_detection_3d/calculators/lift_2d_frame_annotation_to_3d_calculator.cc +++ b/mediapipe/modules/objectron/calculators/lift_2d_frame_annotation_to_3d_calculator.cc @@ -23,10 +23,10 @@ #include "mediapipe/framework/calculator_framework.h" #include "mediapipe/framework/deps/file_path.h" #include "mediapipe/framework/port/ret_check.h" -#include "mediapipe/graphs/object_detection_3d/calculators/annotation_data.pb.h" -#include "mediapipe/graphs/object_detection_3d/calculators/decoder.h" -#include "mediapipe/graphs/object_detection_3d/calculators/lift_2d_frame_annotation_to_3d_calculator.pb.h" -#include "mediapipe/graphs/object_detection_3d/calculators/tensor_util.h" +#include "mediapipe/modules/objectron/calculators/annotation_data.pb.h" +#include "mediapipe/modules/objectron/calculators/decoder.h" +#include "mediapipe/modules/objectron/calculators/lift_2d_frame_annotation_to_3d_calculator.pb.h" +#include "mediapipe/modules/objectron/calculators/tensor_util.h" namespace { constexpr char kInputStreamTag[] = "FRAME_ANNOTATION"; diff --git a/mediapipe/graphs/object_detection_3d/calculators/lift_2d_frame_annotation_to_3d_calculator.proto b/mediapipe/modules/objectron/calculators/lift_2d_frame_annotation_to_3d_calculator.proto similarity index 91% rename from mediapipe/graphs/object_detection_3d/calculators/lift_2d_frame_annotation_to_3d_calculator.proto rename to mediapipe/modules/objectron/calculators/lift_2d_frame_annotation_to_3d_calculator.proto index ccbdf2ee4..5e73ea600 100644 --- a/mediapipe/graphs/object_detection_3d/calculators/lift_2d_frame_annotation_to_3d_calculator.proto +++ b/mediapipe/modules/objectron/calculators/lift_2d_frame_annotation_to_3d_calculator.proto @@ -19,7 +19,7 @@ syntax = "proto2"; package mediapipe; import "mediapipe/framework/calculator.proto"; -import "mediapipe/graphs/object_detection_3d/calculators/belief_decoder_config.proto"; +import "mediapipe/modules/objectron/calculators/belief_decoder_config.proto"; message Lift2DFrameAnnotationTo3DCalculatorOptions { extend CalculatorOptions { diff --git a/mediapipe/graphs/object_detection_3d/calculators/model.cc b/mediapipe/modules/objectron/calculators/model.cc similarity index 97% rename from mediapipe/graphs/object_detection_3d/calculators/model.cc rename to mediapipe/modules/objectron/calculators/model.cc index e664aebb3..40aca39d9 100644 --- a/mediapipe/graphs/object_detection_3d/calculators/model.cc +++ b/mediapipe/modules/objectron/calculators/model.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "mediapipe/graphs/object_detection_3d/calculators/model.h" +#include "mediapipe/modules/objectron/calculators/model.h" #include "mediapipe/framework/port/logging.h" diff --git a/mediapipe/graphs/object_detection_3d/calculators/model.h b/mediapipe/modules/objectron/calculators/model.h similarity index 89% rename from mediapipe/graphs/object_detection_3d/calculators/model.h rename to mediapipe/modules/objectron/calculators/model.h index 301b21d7a..72b5eb2f3 100644 --- a/mediapipe/graphs/object_detection_3d/calculators/model.h +++ b/mediapipe/modules/objectron/calculators/model.h @@ -12,12 +12,12 @@ // See the License for the specific language governing permissions and // limitations under the License. -#ifndef MEDIAPIPE_GRAPHS_OBJECT_DETECTION_3D_MODEL_H_ -#define MEDIAPIPE_GRAPHS_OBJECT_DETECTION_3D_MODEL_H_ +#ifndef MEDIAPIPE_MODULES_OBJECTRON_CALCULATORS_MODEL_H_ +#define MEDIAPIPE_MODULES_OBJECTRON_CALCULATORS_MODEL_H_ -#include "mediapipe/graphs/object_detection_3d/calculators/annotation_data.pb.h" -#include "mediapipe/graphs/object_detection_3d/calculators/object.pb.h" -#include "mediapipe/graphs/object_detection_3d/calculators/types.h" +#include "mediapipe/modules/objectron/calculators/annotation_data.pb.h" +#include "mediapipe/modules/objectron/calculators/object.pb.h" +#include "mediapipe/modules/objectron/calculators/types.h" namespace mediapipe { @@ -89,4 +89,4 @@ class Model { } // namespace mediapipe -#endif // MEDIAPIPE_GRAPHS_OBJECT_DETECTION_3D_MODEL_H_ +#endif // MEDIAPIPE_MODULES_OBJECTRON_CALCULATORS_MODEL_H_ diff --git a/mediapipe/graphs/object_detection_3d/calculators/object.proto b/mediapipe/modules/objectron/calculators/object.proto similarity index 100% rename from mediapipe/graphs/object_detection_3d/calculators/object.proto rename to mediapipe/modules/objectron/calculators/object.proto diff --git a/mediapipe/graphs/object_detection_3d/calculators/tensor_util.cc b/mediapipe/modules/objectron/calculators/tensor_util.cc similarity index 62% rename from mediapipe/graphs/object_detection_3d/calculators/tensor_util.cc rename to mediapipe/modules/objectron/calculators/tensor_util.cc index 728b25f4c..0004edd80 100644 --- a/mediapipe/graphs/object_detection_3d/calculators/tensor_util.cc +++ b/mediapipe/modules/objectron/calculators/tensor_util.cc @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -#include "mediapipe/graphs/object_detection_3d/calculators/tensor_util.h" +#include "mediapipe/modules/objectron/calculators/tensor_util.h" #include "mediapipe/framework/port/logging.h" @@ -30,4 +30,19 @@ cv::Mat ConvertTfliteTensorToCvMat(const TfLiteTensor& tensor) { return cv::Mat(dims, sizes, type, reinterpret_cast(tensor.data.f)); } +cv::Mat ConvertTensorToCvMat(const mediapipe::Tensor& tensor) { + // Check tensor is BxCxWxH (size = 4) and the batch size is one(data[0] = 1) + CHECK(tensor.shape().dims.size() == 4 && tensor.shape().dims[0] == 1); + CHECK_EQ(mediapipe::Tensor::ElementType::kFloat32 == tensor.element_type(), + true) + << "tensor type is not float"; + + const size_t num_output_channels = tensor.shape().dims[3]; + const int dims = 2; + const int sizes[] = {tensor.shape().dims[1], tensor.shape().dims[2]}; + const int type = CV_MAKETYPE(CV_32F, num_output_channels); + auto cpu_view = tensor.GetCpuReadView(); + return cv::Mat(dims, sizes, type, const_cast(cpu_view.buffer())); +} + } // namespace mediapipe diff --git a/mediapipe/graphs/object_detection_3d/calculators/tensor_util.h b/mediapipe/modules/objectron/calculators/tensor_util.h similarity index 70% rename from mediapipe/graphs/object_detection_3d/calculators/tensor_util.h rename to mediapipe/modules/objectron/calculators/tensor_util.h index 0fb5b4933..0b262098d 100644 --- a/mediapipe/graphs/object_detection_3d/calculators/tensor_util.h +++ b/mediapipe/modules/objectron/calculators/tensor_util.h @@ -12,9 +12,10 @@ // See the License for the specific language governing permissions and // limitations under the License. -#ifndef MEDIAPIPE_GRAPHS_OBJECT_DETECTION_3D_TENSOR_UTIL_H_ -#define MEDIAPIPE_GRAPHS_OBJECT_DETECTION_3D_TENSOR_UTIL_H_ +#ifndef MEDIAPIPE_MODULES_OBJECTRON_CALCULATORS_TENSOR_UTIL_H_ +#define MEDIAPIPE_MODULES_OBJECTRON_CALCULATORS_TENSOR_UTIL_H_ +#include "mediapipe/framework/formats/tensor.h" #include "mediapipe/framework/port/opencv_core_inc.h" #include "tensorflow/lite/interpreter.h" @@ -22,6 +23,9 @@ namespace mediapipe { // Converts a single channel tflite tensor to a grayscale image cv::Mat ConvertTfliteTensorToCvMat(const TfLiteTensor& tensor); + +// Converts a single channel tensor to grayscale image +cv::Mat ConvertTensorToCvMat(const mediapipe::Tensor& tensor); } // namespace mediapipe -#endif // MEDIAPIPE_GRAPHS_OBJECT_DETECTION_3D_TENSOR_UTIL_H_ +#endif // MEDIAPIPE_MODULES_OBJECTRON_CALCULATORS_TENSOR_UTIL_H_ diff --git a/mediapipe/modules/objectron/calculators/tensors_to_objects_calculator.cc b/mediapipe/modules/objectron/calculators/tensors_to_objects_calculator.cc new file mode 100644 index 000000000..2147ea5ce --- /dev/null +++ b/mediapipe/modules/objectron/calculators/tensors_to_objects_calculator.cc @@ -0,0 +1,211 @@ +// Copyright 2020 The MediaPipe Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include +#include +#include + +#include "Eigen/Dense" +#include "absl/memory/memory.h" +#include "absl/strings/str_format.h" +#include "absl/types/span.h" +#include "mediapipe/framework/calculator_framework.h" +#include "mediapipe/framework/deps/file_path.h" +#include "mediapipe/framework/formats/tensor.h" +#include "mediapipe/framework/port/opencv_core_inc.h" +#include "mediapipe/framework/port/ret_check.h" +#include "mediapipe/modules/objectron/calculators/annotation_data.pb.h" +#include "mediapipe/modules/objectron/calculators/belief_decoder_config.pb.h" +#include "mediapipe/modules/objectron/calculators/decoder.h" +#include "mediapipe/modules/objectron/calculators/tensor_util.h" +#include "mediapipe/modules/objectron/calculators/tensors_to_objects_calculator.pb.h" + +namespace { +constexpr char kInputStreamTag[] = "TENSORS"; +constexpr char kOutputStreamTag[] = "ANNOTATIONS"; + +// Each detection object will be assigned an unique id that starts from 1. +static int object_id = 0; + +inline int GetNextObjectId() { return ++object_id; } +} // namespace + +namespace mediapipe { + +// Convert result Tensors from deep pursuit 3d model into FrameAnnotation. +// +// Input: +// TENSORS - Vector of Tensor of type kFloat32. +// Output: +// ANNOTATIONS - Result FrameAnnotation. +// +// Usage example: +// node { +// calculator: "TensorsToObjectsCalculator" +// input_stream: "TENSORS:tensors" +// output_stream: "ANNOTATIONS:annotations" +// } +class TensorsToObjectsCalculator : public CalculatorBase { + public: + static mediapipe::Status GetContract(CalculatorContract* cc); + + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Close(CalculatorContext* cc) override; + + private: + mediapipe::Status ProcessCPU(CalculatorContext* cc, + FrameAnnotation* output_objects); + mediapipe::Status LoadOptions(CalculatorContext* cc); + // Takes point_3d in FrameAnnotation, projects to 2D, and overwrite the + // point_2d field with the projection. + void Project3DTo2D(bool portrait, FrameAnnotation* annotation) const; + // Increment and assign object ID for each detected object. + // In a single MediaPipe session, the IDs are unique. + // Also assign timestamp for the FrameAnnotation to be the input packet + // timestamp. + void AssignObjectIdAndTimestamp(int64 timestamp_us, + FrameAnnotation* annotation); + + int num_classes_ = 0; + int num_keypoints_ = 0; + + ::mediapipe::TensorsToObjectsCalculatorOptions options_; + std::unique_ptr decoder_; + Eigen::Matrix projection_matrix_; +}; +REGISTER_CALCULATOR(TensorsToObjectsCalculator); + +mediapipe::Status TensorsToObjectsCalculator::GetContract( + CalculatorContract* cc) { + RET_CHECK(!cc->Inputs().GetTags().empty()); + RET_CHECK(!cc->Outputs().GetTags().empty()); + + if (cc->Inputs().HasTag(kInputStreamTag)) { + cc->Inputs().Tag(kInputStreamTag).Set>(); + } + + if (cc->Outputs().HasTag(kOutputStreamTag)) { + cc->Outputs().Tag(kOutputStreamTag).Set(); + } + return mediapipe::OkStatus(); +} + +mediapipe::Status TensorsToObjectsCalculator::Open(CalculatorContext* cc) { + MP_RETURN_IF_ERROR(LoadOptions(cc)); + // clang-format off + projection_matrix_ << + 1.5731, 0, 0, 0, + 0, 2.0975, 0, 0, + 0, 0, -1.0002, -0.2, + 0, 0, -1, 0; + // clang-format on + decoder_ = absl::make_unique( + BeliefDecoderConfig(options_.decoder_config())); + + return mediapipe::OkStatus(); +} + +mediapipe::Status TensorsToObjectsCalculator::Process(CalculatorContext* cc) { + if (cc->Inputs().Tag(kInputStreamTag).IsEmpty()) { + return mediapipe::OkStatus(); + } + + auto output_objects = absl::make_unique(); + + MP_RETURN_IF_ERROR(ProcessCPU(cc, output_objects.get())); + + // Output + if (cc->Outputs().HasTag(kOutputStreamTag)) { + cc->Outputs() + .Tag(kOutputStreamTag) + .Add(output_objects.release(), cc->InputTimestamp()); + } + + return mediapipe::OkStatus(); +} + +mediapipe::Status TensorsToObjectsCalculator::ProcessCPU( + CalculatorContext* cc, FrameAnnotation* output_objects) { + const auto& input_tensors = + cc->Inputs().Tag(kInputStreamTag).Get>(); + + cv::Mat prediction_heatmap = ConvertTensorToCvMat(input_tensors[0]); + cv::Mat offsetmap = ConvertTensorToCvMat(input_tensors[1]); + + *output_objects = + decoder_->DecodeBoundingBoxKeypoints(prediction_heatmap, offsetmap); + auto status = decoder_->Lift2DTo3D(projection_matrix_, /*portrait*/ true, + output_objects); + if (!status.ok()) { + LOG(ERROR) << status; + return status; + } + Project3DTo2D(/*portrait*/ true, output_objects); + AssignObjectIdAndTimestamp(cc->InputTimestamp().Microseconds(), + output_objects); + + return mediapipe::OkStatus(); +} + +mediapipe::Status TensorsToObjectsCalculator::Close(CalculatorContext* cc) { + return mediapipe::OkStatus(); +} + +mediapipe::Status TensorsToObjectsCalculator::LoadOptions( + CalculatorContext* cc) { + // Get calculator options specified in the graph. + options_ = cc->Options<::mediapipe::TensorsToObjectsCalculatorOptions>(); + + num_classes_ = options_.num_classes(); + num_keypoints_ = options_.num_keypoints(); + + // Currently only support 2D when num_values_per_keypoint equals to 2. + CHECK_EQ(options_.num_values_per_keypoint(), 2); + + return mediapipe::OkStatus(); +} + +void TensorsToObjectsCalculator::Project3DTo2D( + bool portrait, FrameAnnotation* annotation) const { + for (auto& ann : *annotation->mutable_annotations()) { + for (auto& key_point : *ann.mutable_keypoints()) { + Eigen::Vector4f point3d; + point3d << key_point.point_3d().x(), key_point.point_3d().y(), + key_point.point_3d().z(), 1.0f; + Eigen::Vector4f point3d_projection = projection_matrix_ * point3d; + float u, v; + const float inv_w = 1.0f / point3d_projection(3); + if (portrait) { + u = (point3d_projection(1) * inv_w + 1.0f) * 0.5f; + v = (point3d_projection(0) * inv_w + 1.0f) * 0.5f; + } else { + u = (point3d_projection(0) * inv_w + 1.0f) * 0.5f; + v = (1.0f - point3d_projection(1) * inv_w) * 0.5f; + } + key_point.mutable_point_2d()->set_x(u); + key_point.mutable_point_2d()->set_y(v); + } + } +} + +void TensorsToObjectsCalculator::AssignObjectIdAndTimestamp( + int64 timestamp_us, FrameAnnotation* annotation) { + for (auto& ann : *annotation->mutable_annotations()) { + ann.set_object_id(GetNextObjectId()); + } + annotation->set_timestamp(timestamp_us); +} + +} // namespace mediapipe diff --git a/mediapipe/modules/objectron/calculators/tensors_to_objects_calculator.proto b/mediapipe/modules/objectron/calculators/tensors_to_objects_calculator.proto new file mode 100644 index 000000000..8d46fce78 --- /dev/null +++ b/mediapipe/modules/objectron/calculators/tensors_to_objects_calculator.proto @@ -0,0 +1,39 @@ +// Copyright 2020 The MediaPipe Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// The option proto for the TensorsToObjectsCalculatorOptions. + +syntax = "proto2"; + +package mediapipe; + +import "mediapipe/framework/calculator.proto"; +import "mediapipe/modules/objectron/calculators/belief_decoder_config.proto"; + +message TensorsToObjectsCalculatorOptions { + extend CalculatorOptions { + optional TensorsToObjectsCalculatorOptions ext = 334361940; + } + + // The number of output classes predicted by the detection model. + optional int32 num_classes = 1; + + // The number of predicted keypoints. + optional int32 num_keypoints = 2; + // The dimension of each keypoint, e.g. number of values predicted for each + // keypoint. + optional int32 num_values_per_keypoint = 3 [default = 2]; + + optional BeliefDecoderConfig decoder_config = 4; +} diff --git a/mediapipe/graphs/object_detection_3d/calculators/tflite_tensors_to_objects_calculator.cc b/mediapipe/modules/objectron/calculators/tflite_tensors_to_objects_calculator.cc similarity index 80% rename from mediapipe/graphs/object_detection_3d/calculators/tflite_tensors_to_objects_calculator.cc rename to mediapipe/modules/objectron/calculators/tflite_tensors_to_objects_calculator.cc index 3b3e692f0..55d7104bd 100644 --- a/mediapipe/graphs/object_detection_3d/calculators/tflite_tensors_to_objects_calculator.cc +++ b/mediapipe/modules/objectron/calculators/tflite_tensors_to_objects_calculator.cc @@ -24,11 +24,11 @@ #include "mediapipe/framework/deps/file_path.h" #include "mediapipe/framework/port/opencv_core_inc.h" #include "mediapipe/framework/port/ret_check.h" -#include "mediapipe/graphs/object_detection_3d/calculators/annotation_data.pb.h" -#include "mediapipe/graphs/object_detection_3d/calculators/belief_decoder_config.pb.h" -#include "mediapipe/graphs/object_detection_3d/calculators/decoder.h" -#include "mediapipe/graphs/object_detection_3d/calculators/tensor_util.h" -#include "mediapipe/graphs/object_detection_3d/calculators/tflite_tensors_to_objects_calculator.pb.h" +#include "mediapipe/modules/objectron/calculators/annotation_data.pb.h" +#include "mediapipe/modules/objectron/calculators/belief_decoder_config.pb.h" +#include "mediapipe/modules/objectron/calculators/decoder.h" +#include "mediapipe/modules/objectron/calculators/tensor_util.h" +#include "mediapipe/modules/objectron/calculators/tflite_tensors_to_objects_calculator.pb.h" #include "tensorflow/lite/interpreter.h" namespace { @@ -59,16 +59,16 @@ namespace mediapipe { // } class TfLiteTensorsToObjectsCalculator : public CalculatorBase { public: - static ::mediapipe::Status GetContract(CalculatorContract* cc); + static mediapipe::Status GetContract(CalculatorContract* cc); - ::mediapipe::Status Open(CalculatorContext* cc) override; - ::mediapipe::Status Process(CalculatorContext* cc) override; - ::mediapipe::Status Close(CalculatorContext* cc) override; + mediapipe::Status Open(CalculatorContext* cc) override; + mediapipe::Status Process(CalculatorContext* cc) override; + mediapipe::Status Close(CalculatorContext* cc) override; private: - ::mediapipe::Status ProcessCPU(CalculatorContext* cc, - FrameAnnotation* output_objects); - ::mediapipe::Status LoadOptions(CalculatorContext* cc); + mediapipe::Status ProcessCPU(CalculatorContext* cc, + FrameAnnotation* output_objects); + mediapipe::Status LoadOptions(CalculatorContext* cc); // Takes point_3d in FrameAnnotation, projects to 2D, and overwrite the // point_2d field with the projection. void Project3DTo2D(bool portrait, FrameAnnotation* annotation) const; @@ -88,7 +88,7 @@ class TfLiteTensorsToObjectsCalculator : public CalculatorBase { }; REGISTER_CALCULATOR(TfLiteTensorsToObjectsCalculator); -::mediapipe::Status TfLiteTensorsToObjectsCalculator::GetContract( +mediapipe::Status TfLiteTensorsToObjectsCalculator::GetContract( CalculatorContract* cc) { RET_CHECK(!cc->Inputs().GetTags().empty()); RET_CHECK(!cc->Outputs().GetTags().empty()); @@ -100,10 +100,10 @@ REGISTER_CALCULATOR(TfLiteTensorsToObjectsCalculator); if (cc->Outputs().HasTag(kOutputStreamTag)) { cc->Outputs().Tag(kOutputStreamTag).Set(); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TfLiteTensorsToObjectsCalculator::Open( +mediapipe::Status TfLiteTensorsToObjectsCalculator::Open( CalculatorContext* cc) { MP_RETURN_IF_ERROR(LoadOptions(cc)); // clang-format off @@ -116,13 +116,13 @@ REGISTER_CALCULATOR(TfLiteTensorsToObjectsCalculator); decoder_ = absl::make_unique( BeliefDecoderConfig(options_.decoder_config())); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TfLiteTensorsToObjectsCalculator::Process( +mediapipe::Status TfLiteTensorsToObjectsCalculator::Process( CalculatorContext* cc) { if (cc->Inputs().Tag(kInputStreamTag).IsEmpty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } auto output_objects = absl::make_unique(); @@ -136,10 +136,10 @@ REGISTER_CALCULATOR(TfLiteTensorsToObjectsCalculator); .Add(output_objects.release(), cc->InputTimestamp()); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TfLiteTensorsToObjectsCalculator::ProcessCPU( +mediapipe::Status TfLiteTensorsToObjectsCalculator::ProcessCPU( CalculatorContext* cc, FrameAnnotation* output_objects) { const auto& input_tensors = cc->Inputs().Tag(kInputStreamTag).Get>(); @@ -159,15 +159,15 @@ REGISTER_CALCULATOR(TfLiteTensorsToObjectsCalculator); AssignObjectIdAndTimestamp(cc->InputTimestamp().Microseconds(), output_objects); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TfLiteTensorsToObjectsCalculator::Close( +mediapipe::Status TfLiteTensorsToObjectsCalculator::Close( CalculatorContext* cc) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status TfLiteTensorsToObjectsCalculator::LoadOptions( +mediapipe::Status TfLiteTensorsToObjectsCalculator::LoadOptions( CalculatorContext* cc) { // Get calculator options specified in the graph. options_ = @@ -179,7 +179,7 @@ REGISTER_CALCULATOR(TfLiteTensorsToObjectsCalculator); // Currently only support 2D when num_values_per_keypoint equals to 2. CHECK_EQ(options_.num_values_per_keypoint(), 2); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } void TfLiteTensorsToObjectsCalculator::Project3DTo2D( diff --git a/mediapipe/graphs/object_detection_3d/calculators/tflite_tensors_to_objects_calculator.proto b/mediapipe/modules/objectron/calculators/tflite_tensors_to_objects_calculator.proto similarity index 93% rename from mediapipe/graphs/object_detection_3d/calculators/tflite_tensors_to_objects_calculator.proto rename to mediapipe/modules/objectron/calculators/tflite_tensors_to_objects_calculator.proto index 4adf72f1a..7237ee559 100644 --- a/mediapipe/graphs/object_detection_3d/calculators/tflite_tensors_to_objects_calculator.proto +++ b/mediapipe/modules/objectron/calculators/tflite_tensors_to_objects_calculator.proto @@ -19,7 +19,7 @@ syntax = "proto2"; package mediapipe; import "mediapipe/framework/calculator.proto"; -import "mediapipe/graphs/object_detection_3d/calculators/belief_decoder_config.proto"; +import "mediapipe/modules/objectron/calculators/belief_decoder_config.proto"; message TfLiteTensorsToObjectsCalculatorOptions { extend CalculatorOptions { diff --git a/mediapipe/graphs/object_detection_3d/calculators/types.h b/mediapipe/modules/objectron/calculators/types.h similarity index 89% rename from mediapipe/graphs/object_detection_3d/calculators/types.h rename to mediapipe/modules/objectron/calculators/types.h index db4d14728..dcc477d74 100644 --- a/mediapipe/graphs/object_detection_3d/calculators/types.h +++ b/mediapipe/modules/objectron/calculators/types.h @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -#ifndef MEDIAPIPE_GRAPHS_OBJECT_DETECTION_3D_TYPES_H_ -#define MEDIAPIPE_GRAPHS_OBJECT_DETECTION_3D_TYPES_H_ +#ifndef MEDIAPIPE_MODULES_OBJECTRON_CALCULATORS_TYPES_H_ +#define MEDIAPIPE_MODULES_OBJECTRON_CALCULATORS_TYPES_H_ #include @@ -53,4 +53,4 @@ struct SuperPoint { } // namespace mediapipe -#endif // MEDIAPIPE_GRAPHS_OBJECT_DETECTION_3D_TYPES_H_ +#endif // MEDIAPIPE_MODULES_OBJECTRON_CALCULATORS_TYPES_H_ diff --git a/mediapipe/modules/objectron/object_detection_oid_v4_cpu.pbtxt b/mediapipe/modules/objectron/object_detection_oid_v4_cpu.pbtxt new file mode 100644 index 000000000..5f78ce690 --- /dev/null +++ b/mediapipe/modules/objectron/object_detection_oid_v4_cpu.pbtxt @@ -0,0 +1,177 @@ +# MediaPipe Objectron object detection CPU subgraph. + +type: "ObjectDetectionOidV4Subgraph" + +input_stream: "IMAGE:input_video" +input_side_packet: "LABELS_CSV:allowed_labels" +output_stream: "NORM_RECT:box_rect_from_object_detections" + +# Transforms the input image on GPU to a 300x300 image. To scale the image, by +# default it uses the STRETCH scale mode that maps the entire input image to the +# entire transformed image. As a result, image aspect ratio may be changed and +# objects in the image may be deformed (stretched or squeezed), but the object +# detection model used in this graph is agnostic to that deformation. +node: { + calculator: "ImageTransformationCalculator" + input_stream: "IMAGE:input_video" + output_stream: "IMAGE:transformed_input_video" + options: { + [mediapipe.ImageTransformationCalculatorOptions.ext] { + output_width: 300 + output_height: 300 + } + } +} + +# Converts the transformed input image on GPU into an image tensor stored as a +# TfLiteTensor. +node { + calculator: "TfLiteConverterCalculator" + input_stream: "IMAGE:transformed_input_video" + output_stream: "TENSORS:image_tensor" +} + +# Runs a TensorFlow Lite model on GPU that takes an image tensor and outputs a +# vector of tensors representing, for instance, detection boxes/keypoints and +# scores. +node { + calculator: "TfLiteInferenceCalculator" + input_stream: "TENSORS:image_tensor" + output_stream: "TENSORS:detection_tensors" + options: { + [mediapipe.TfLiteInferenceCalculatorOptions.ext] { + model_path: "mediapipe/models/object_detection_ssd_mobilenetv2_oidv4_fp16.tflite" + } + } +} + +# Generates a single side packet containing a vector of SSD anchors based on +# the specification in the options. +node { + calculator: "SsdAnchorsCalculator" + output_side_packet: "anchors" + options: { + [mediapipe.SsdAnchorsCalculatorOptions.ext] { + num_layers: 6 + min_scale: 0.2 + max_scale: 0.95 + input_size_height: 300 + input_size_width: 300 + anchor_offset_x: 0.5 + anchor_offset_y: 0.5 + strides: 16 + strides: 32 + strides: 64 + strides: 128 + strides: 256 + strides: 512 + aspect_ratios: 1.0 + aspect_ratios: 2.0 + aspect_ratios: 0.5 + aspect_ratios: 3.0 + aspect_ratios: 0.3333 + reduce_boxes_in_lowest_layer: true + } + } +} + +# Decodes the detection tensors generated by the TensorFlow Lite model, based on +# the SSD anchors and the specification in the options, into a vector of +# detections. Each detection describes a detected object. +node { + calculator: "TfLiteTensorsToDetectionsCalculator" + input_stream: "TENSORS:detection_tensors" + input_side_packet: "ANCHORS:anchors" + output_stream: "DETECTIONS:detections" + options: { + [mediapipe.TfLiteTensorsToDetectionsCalculatorOptions.ext] { + num_classes: 195 + num_boxes: 1917 + num_coords: 4 + ignore_classes: 0 + sigmoid_score: true + apply_exponential_on_box_size: true + x_scale: 10.0 + y_scale: 10.0 + h_scale: 5.0 + w_scale: 5.0 + min_score_thresh: 0.6 + } + } +} + +# Performs non-max suppression to remove excessive detections. +node { + calculator: "NonMaxSuppressionCalculator" + input_stream: "detections" + output_stream: "suppressed_detections" + options: { + [mediapipe.NonMaxSuppressionCalculatorOptions.ext] { + min_suppression_threshold: 0.4 + max_num_detections: 1 + overlap_type: INTERSECTION_OVER_UNION + return_empty_detections: true + } + } +} + +# Maps detection label IDs to the corresponding label text. The label map is +# provided in the label_map_path option. +node { + calculator: "DetectionLabelIdToTextCalculator" + input_stream: "suppressed_detections" + output_stream: "labeled_detections" + options: { + [mediapipe.DetectionLabelIdToTextCalculatorOptions.ext] { + label_map_path: "mediapipe/models/object_detection_oidv4_labelmap.pbtxt" + } + } +} + +node { + calculator: "FilterDetectionCalculator" + input_stream: "DETECTIONS:labeled_detections" + output_stream: "DETECTIONS:filtered_detections" + input_side_packet: "LABELS_CSV:allowed_labels" + options: { + [mediapipe.FilterDetectionCalculatorOptions.ext]: { + min_score: 0.4 + } + } +} + +# Extracts image size from the input images. +node { + calculator: "ImagePropertiesCalculator" + input_stream: "IMAGE:input_video" + output_stream: "SIZE:image_size" +} + +# Converts results of box detection into a rectangle (normalized by image size) +# that encloses the box. +node { + calculator: "DetectionsToRectsCalculator" + input_stream: "DETECTIONS:filtered_detections" + input_stream: "IMAGE_SIZE:image_size" + output_stream: "NORM_RECT:box_rect" + options: { + [mediapipe.DetectionsToRectsCalculatorOptions.ext] { + output_zero_rect_for_empty_detections: true + } + } +} + +# Expands the rectangle that contains the box so that it's likely to cover the +# entire box. +node { + calculator: "RectTransformationCalculator" + input_stream: "NORM_RECT:box_rect" + input_stream: "IMAGE_SIZE:image_size" + output_stream: "box_rect_from_object_detections" + options: { + [mediapipe.RectTransformationCalculatorOptions.ext] { + scale_x: 1.5 + scale_y: 1.5 + } + } +} diff --git a/mediapipe/graphs/object_detection_3d/subgraphs/object_detection_oid_v4_gpu.pbtxt b/mediapipe/modules/objectron/object_detection_oid_v4_gpu.pbtxt similarity index 77% rename from mediapipe/graphs/object_detection_3d/subgraphs/object_detection_oid_v4_gpu.pbtxt rename to mediapipe/modules/objectron/object_detection_oid_v4_gpu.pbtxt index 7dc01e6e1..9eadf7cb5 100644 --- a/mediapipe/graphs/object_detection_3d/subgraphs/object_detection_oid_v4_gpu.pbtxt +++ b/mediapipe/modules/objectron/object_detection_oid_v4_gpu.pbtxt @@ -1,12 +1,12 @@ -# MediaPipe Objectron object bounding box detection subgraph. +# MediaPipe Objectron object detection GPU subgraph. -type: "ObjectDetectionSubgraph" +type: "ObjectDetectionOidV4Subgraph" -input_stream: "input_video" -input_side_packet: "allowed_labels" +input_stream: "IMAGE_GPU:input_video" +input_side_packet: "LABELS_CSV:allowed_labels" output_stream: "NORM_RECT:box_rect_from_object_detections" -# Transforms the input image on GPU to a 320x320 image. To scale the image, by +# Transforms the input image on GPU to a 300x300 image. To scale the image, by # default it uses the STRETCH scale mode that maps the entire input image to the # entire transformed image. As a result, image aspect ratio may be changed and # objects in the image may be deformed (stretched or squeezed), but the object @@ -15,8 +15,8 @@ node: { calculator: "ImageTransformationCalculator" input_stream: "IMAGE_GPU:input_video" output_stream: "IMAGE_GPU:transformed_input_video" - node_options: { - [type.googleapis.com/mediapipe.ImageTransformationCalculatorOptions] { + options: { + [mediapipe.ImageTransformationCalculatorOptions.ext] { output_width: 300 output_height: 300 } @@ -38,9 +38,9 @@ node { calculator: "TfLiteInferenceCalculator" input_stream: "TENSORS_GPU:image_tensor" output_stream: "TENSORS_GPU:detection_tensors" - node_options: { - [type.googleapis.com/mediapipe.TfLiteInferenceCalculatorOptions] { - model_path: "object_detection_ssd_mobilenetv2_oidv4_fp16.tflite" + options: { + [mediapipe.TfLiteInferenceCalculatorOptions.ext] { + model_path: "mediapipe/models/object_detection_ssd_mobilenetv2_oidv4_fp16.tflite" } } } @@ -50,8 +50,8 @@ node { node { calculator: "SsdAnchorsCalculator" output_side_packet: "anchors" - node_options: { - [type.googleapis.com/mediapipe.SsdAnchorsCalculatorOptions] { + options: { + [mediapipe.SsdAnchorsCalculatorOptions.ext] { num_layers: 6 min_scale: 0.2 max_scale: 0.95 @@ -83,8 +83,8 @@ node { input_stream: "TENSORS_GPU:detection_tensors" input_side_packet: "ANCHORS:anchors" output_stream: "DETECTIONS:detections" - node_options: { - [type.googleapis.com/mediapipe.TfLiteTensorsToDetectionsCalculatorOptions] { + options: { + [mediapipe.TfLiteTensorsToDetectionsCalculatorOptions.ext] { num_classes: 195 num_boxes: 1917 num_coords: 4 @@ -105,8 +105,8 @@ node { calculator: "NonMaxSuppressionCalculator" input_stream: "detections" output_stream: "suppressed_detections" - node_options: { - [type.googleapis.com/mediapipe.NonMaxSuppressionCalculatorOptions] { + options: { + [mediapipe.NonMaxSuppressionCalculatorOptions.ext] { min_suppression_threshold: 0.4 max_num_detections: 1 overlap_type: INTERSECTION_OVER_UNION @@ -121,9 +121,9 @@ node { calculator: "DetectionLabelIdToTextCalculator" input_stream: "suppressed_detections" output_stream: "labeled_detections" - node_options: { - [type.googleapis.com/mediapipe.DetectionLabelIdToTextCalculatorOptions] { - label_map_path: "object_detection_oidv4_labelmap.pbtxt" + options: { + [mediapipe.DetectionLabelIdToTextCalculatorOptions.ext] { + label_map_path: "mediapipe/models/object_detection_oidv4_labelmap.pbtxt" } } } @@ -133,8 +133,8 @@ node { input_stream: "DETECTIONS:labeled_detections" output_stream: "DETECTIONS:filtered_detections" input_side_packet: "LABELS_CSV:allowed_labels" - node_options: { - [type.googleapis.com/mediapipe.FilterDetectionCalculatorOptions]: { + options: { + [mediapipe.FilterDetectionCalculatorOptions.ext]: { min_score: 0.4 } } @@ -154,8 +154,8 @@ node { input_stream: "DETECTIONS:filtered_detections" input_stream: "IMAGE_SIZE:image_size" output_stream: "NORM_RECT:box_rect" - node_options: { - [type.googleapis.com/mediapipe.DetectionsToRectsCalculatorOptions] { + options: { + [mediapipe.DetectionsToRectsCalculatorOptions.ext] { output_zero_rect_for_empty_detections: true } } @@ -168,8 +168,8 @@ node { input_stream: "NORM_RECT:box_rect" input_stream: "IMAGE_SIZE:image_size" output_stream: "box_rect_from_object_detections" - node_options: { - [type.googleapis.com/mediapipe.RectTransformationCalculatorOptions] { + options: { + [mediapipe.RectTransformationCalculatorOptions.ext] { scale_x: 1.5 scale_y: 1.5 } diff --git a/mediapipe/modules/objectron/objectron_cpu.pbtxt b/mediapipe/modules/objectron/objectron_cpu.pbtxt new file mode 100644 index 000000000..c903b3b41 --- /dev/null +++ b/mediapipe/modules/objectron/objectron_cpu.pbtxt @@ -0,0 +1,112 @@ +# MediaPipe Objectron on CPU that produces 3D bounding boxes for objects. +input_stream: "IMAGE:input_video" +# TfLite model for 3D bounding box landmark prediction +input_side_packet: "MODEL:box_landmark_model" +# Allowed category labels, e.g. Footwear, Coffee cup, Mug, Chair, Camera +input_side_packet: "LABELS_CSV:allowed_labels" +# Bounding box landmarks topology definition. +# The numbers are indices in the box_landmarks list. +# +# 3 + + + + + + + + 7 +# +\ +\ UP +# + \ + \ +# + \ + \ | +# + 4 + + + + + + + + 8 | y +# + + + + | +# + + + + | +# + + (0) + + .------- x +# + + + + \ +# 1 + + + + + + + + 5 + \ +# \ + \ + \ z +# \ + \ + \ +# \+ \+ +# 2 + + + + + + + + 6 +# +output_stream: "LANDMARKS:box_landmarks" +# Crop rectangle derived from bounding box landmarks. +output_stream: "NORM_RECT:box_rect" + + +# Caches a box-presence decision fed back from boxLandmarkSubgraph, and upon +# the arrival of the next input image sends out the cached decision with the +# timestamp replaced by that of the input image, essentially generating a packet +# that carries the previous box-presence decision. Note that upon the arrival +# of the very first input image, an empty packet is sent out to jump start the +# feedback loop. +node { + calculator: "PreviousLoopbackCalculator" + input_stream: "MAIN:input_video" + input_stream: "LOOP:box_presence" + input_stream_info: { + tag_index: "LOOP" + back_edge: true + } + output_stream: "PREV_LOOP:prev_box_presence" +} + +# Drops the incoming image if boxLandmarkSubgraph was able to identify box +# presence in the previous image. Otherwise, passes the incoming image through +# to trigger a new round of box detection in boxDetectionSubgraph. +node { + calculator: "GateCalculator" + input_stream: "input_video" + input_stream: "DISALLOW:prev_box_presence" + output_stream: "box_detection_input_video" + + options: { + [mediapipe.GateCalculatorOptions.ext] { + empty_packets_as_allow: true + } + } +} + +# Subgraph that detections boxs (see object_detection_oid_v4_cpu.pbtxt). +node { + calculator: "ObjectDetectionOidV4Subgraph" + input_stream: "IMAGE:box_detection_input_video" + input_side_packet: "LABELS_CSV:allowed_labels" + output_stream: "NORM_RECT:box_rect_from_object_detections" +} + +# Subgraph that localizes box landmarks (see box_landmark_gpu.pbtxt). +node { + calculator: "BoxLandmarkSubgraph" + input_stream: "IMAGE:input_video" + input_stream: "NORM_RECT:box_rect" + input_side_packet: "MODEL:box_landmark_model" + output_stream: "LANDMARKS:box_landmarks" + output_stream: "NORM_RECT:box_rect_from_landmarks" + output_stream: "PRESENCE:box_presence" +} + +# Caches a box rectangle fed back from boxLandmarkSubgraph, and upon the +# arrival of the next input image sends out the cached rectangle with the +# timestamp replaced by that of the input image, essentially generating a packet +# that carries the previous box rectangle. Note that upon the arrival of the +# very first input image, an empty packet is sent out to jump start the +# feedback loop. +node { + calculator: "PreviousLoopbackCalculator" + input_stream: "MAIN:input_video" + input_stream: "LOOP:box_rect_from_landmarks" + input_stream_info: { + tag_index: "LOOP" + back_edge: true + } + output_stream: "PREV_LOOP:prev_box_rect_from_landmarks" +} + +# Merges a stream of box rectangles generated by ObjectDetectionSubgraph and that +# generated by BoxLandmarkSubgraph into a single output stream by selecting +# between one of the two streams. The former is selected if the incoming packet +# is not empty, i.e., box detection is performed on the current image by +# BoxDetectionSubgraph (because BoxLandmarkSubgraph could not identify box +# presence in the previous image). Otherwise, the latter is selected, which is +# never empty because BoxLandmarkSubgraphs processes all images (that went +# through FlowLimiterCaculator). +node { + calculator: "MergeCalculator" + input_stream: "box_rect_from_object_detections" + input_stream: "prev_box_rect_from_landmarks" + output_stream: "box_rect" +} diff --git a/mediapipe/graphs/object_detection_3d/subgraphs/objectron_detection_gpu.pbtxt b/mediapipe/modules/objectron/objectron_detection_1stage_gpu.pbtxt similarity index 85% rename from mediapipe/graphs/object_detection_3d/subgraphs/objectron_detection_gpu.pbtxt rename to mediapipe/modules/objectron/objectron_detection_1stage_gpu.pbtxt index ad0d3653b..96d4eec7a 100644 --- a/mediapipe/graphs/object_detection_3d/subgraphs/objectron_detection_gpu.pbtxt +++ b/mediapipe/modules/objectron/objectron_detection_1stage_gpu.pbtxt @@ -12,8 +12,8 @@ node: { calculator: "ImageTransformationCalculator" input_stream: "IMAGE_GPU:input_video" output_stream: "IMAGE_GPU:transformed_input_video" - node_options: { - [type.googleapis.com/mediapipe.ImageTransformationCalculatorOptions] { + options: { + [mediapipe.ImageTransformationCalculatorOptions.ext] { output_width: 480 output_height: 640 scale_mode: FIT @@ -34,8 +34,8 @@ node { node { calculator: "TfLiteCustomOpResolverCalculator" output_side_packet: "opresolver" - node_options: { - [type.googleapis.com/mediapipe.TfLiteCustomOpResolverCalculatorOptions] { + options: { + [mediapipe.TfLiteCustomOpResolverCalculatorOptions.ext] { use_gpu: true } } @@ -49,8 +49,8 @@ node { input_stream: "TENSORS_GPU:image_tensor" output_stream: "TENSORS:detection_tensors" input_side_packet: "CUSTOM_OP_RESOLVER:opresolver" - node_options: { - [type.googleapis.com/mediapipe.TfLiteInferenceCalculatorOptions] { + options: { + [mediapipe.TfLiteInferenceCalculatorOptions.ext] { model_path: "object_detection_3d.tflite" } } @@ -64,8 +64,8 @@ node { calculator: "TfLiteTensorsToObjectsCalculator" input_stream: "TENSORS:detection_tensors" output_stream: "ANNOTATIONS:objects" - node_options: { - [type.googleapis.com/mediapipe.TfLiteTensorsToObjectsCalculatorOptions] { + options: { + [mediapipe.TfLiteTensorsToObjectsCalculatorOptions.ext] { num_classes: 1 num_keypoints: 9 decoder_config { diff --git a/mediapipe/modules/objectron/objectron_gpu.pbtxt b/mediapipe/modules/objectron/objectron_gpu.pbtxt new file mode 100644 index 000000000..b76c4039b --- /dev/null +++ b/mediapipe/modules/objectron/objectron_gpu.pbtxt @@ -0,0 +1,92 @@ +# MediaPipe Objectron on GPU that produces 3D bounding boxes for objects. + +# Input/Output streams and input side packets. +# Note that the input video is assumed to have aspect ratio 3:4 (width:height). +input_stream: "IMAGE_GPU:input_video" +# Allowed category labels, e.g. Footwear, Coffee cup, Mug, Chair, Camera +input_side_packet: "LABELS_CSV:allowed_labels" +output_stream: "FRAME_ANNOTATION:lifted_objects" + + +# Caches a box-presence decision fed back from boxLandmarkSubgraph, and upon +# the arrival of the next input image sends out the cached decision with the +# timestamp replaced by that of the input image, essentially generating a packet +# that carries the previous box-presence decision. Note that upon the arrival +# of the very first input image, an empty packet is sent out to jump start the +# feedback loop. +node { + calculator: "PreviousLoopbackCalculator" + input_stream: "MAIN:input_video" + input_stream: "LOOP:box_presence" + input_stream_info: { + tag_index: "LOOP" + back_edge: true + } + output_stream: "PREV_LOOP:prev_box_presence" +} + +# Drops the incoming image if boxLandmarkSubgraph was able to identify box +# presence in the previous image. Otherwise, passes the incoming image through +# to trigger a new round of box detection in boxDetectionSubgraph. +node { + calculator: "GateCalculator" + input_stream: "input_video" + input_stream: "DISALLOW:prev_box_presence" + output_stream: "detection_input_video" + + options: { + [mediapipe.GateCalculatorOptions.ext] { + empty_packets_as_allow: true + } + } +} + +# Subgraph that performs 2D object detection. +node { + calculator: "ObjectDetectionOidV4Subgraph" + input_stream: "IMAGE_GPU:detection_input_video" + input_side_packet: "LABELS_CSV:allowed_labels" + output_stream: "NORM_RECT:box_rect_from_object_detections" +} + +# Subgraph that localizes box landmarks. +node { + calculator: "BoxLandmarkSubgraph" + input_stream: "IMAGE:input_video" + input_stream: "NORM_RECT:box_rect" + output_stream: "FRAME_ANNOTATION:lifted_objects" + output_stream: "NORM_RECT:box_rect_from_landmarks" + output_stream: "PRESENCE:box_presence" +} + +# Caches a box rectangle fed back from boxLandmarkSubgraph, and upon the +# arrival of the next input image sends out the cached rectangle with the +# timestamp replaced by that of the input image, essentially generating a packet +# that carries the previous box rectangle. Note that upon the arrival of the +# very first input image, an empty packet is sent out to jump start the +# feedback loop. +node { + calculator: "PreviousLoopbackCalculator" + input_stream: "MAIN:input_video" + input_stream: "LOOP:box_rect_from_landmarks" + input_stream_info: { + tag_index: "LOOP" + back_edge: true + } + output_stream: "PREV_LOOP:prev_box_rect_from_landmarks" +} + +# Merges a stream of box rectangles generated by boxDetectionSubgraph and that +# generated by boxLandmarkSubgraph into a single output stream by selecting +# between one of the two streams. The former is selected if the incoming packet +# is not empty, i.e., box detection is performed on the current image by +# boxDetectionSubgraph (because boxLandmarkSubgraph could not identify box +# presence in the previous image). Otherwise, the latter is selected, which is +# never empty because boxLandmarkSubgraphs processes all images (that went +# through FlowLimiterCaculator). +node { + calculator: "MergeCalculator" + input_stream: "box_rect_from_object_detections" + input_stream: "prev_box_rect_from_landmarks" + output_stream: "box_rect" +} diff --git a/mediapipe/graphs/object_detection_3d/subgraphs/objectron_tracking_gpu.pbtxt b/mediapipe/modules/objectron/objectron_tracking_1stage_gpu.pbtxt similarity index 90% rename from mediapipe/graphs/object_detection_3d/subgraphs/objectron_tracking_gpu.pbtxt rename to mediapipe/modules/objectron/objectron_tracking_1stage_gpu.pbtxt index b6c778c44..5b33f6055 100644 --- a/mediapipe/graphs/object_detection_3d/subgraphs/objectron_tracking_gpu.pbtxt +++ b/mediapipe/modules/objectron/objectron_tracking_1stage_gpu.pbtxt @@ -18,8 +18,8 @@ node: { calculator: "ImageTransformationCalculator" input_stream: "IMAGE_GPU:input_video" output_stream: "IMAGE_GPU:downscaled_input_video" - node_options: { - [type.googleapis.com/mediapipe.ImageTransformationCalculatorOptions] { + options: { + [mediapipe.ImageTransformationCalculatorOptions.ext] { output_width: 240 output_height: 320 } @@ -40,8 +40,8 @@ node: { output_stream: "CAMERA:camera_motion" output_stream: "FLOW:region_flow" - node_options: { - [type.googleapis.com/mediapipe.MotionAnalysisCalculatorOptions]: { + options: { + [mediapipe.MotionAnalysisCalculatorOptions.ext]: { analysis_options { analysis_policy: ANALYSIS_POLICY_CAMERA_MOBILE flow_options { @@ -76,8 +76,8 @@ node: { input_stream: "CAMERA:camera_motion" output_stream: "TRACKING:tracking_data" - node_options: { - [type.googleapis.com/mediapipe.FlowPackagerCalculatorOptions]: { + options: { + [mediapipe.FlowPackagerCalculatorOptions.ext]: { flow_packager_options: { binary_tracking_data_support: false } @@ -116,8 +116,8 @@ node: { } } - node_options: { - [type.googleapis.com/mediapipe.BoxTrackerCalculatorOptions]: { + options: { + [mediapipe.BoxTrackerCalculatorOptions.ext]: { tracker_options: { track_step_options { track_object_and_camera: true @@ -139,8 +139,8 @@ node { input_stream: "TRACKED_BOXES:boxes" output_stream: "TRACKED_FRAME_ANNOTATION:tracked_objects" output_stream: "CANCEL_OBJECT_ID:cancel_object_id" - node_options: { - [type.googleapis.com/mediapipe.FrameAnnotationTrackerCalculatorOptions] { + options: { + [mediapipe.FrameAnnotationTrackerCalculatorOptions.ext] { img_width: 240 img_height: 320 iou_threshold: 0.1 diff --git a/mediapipe/modules/palm_detection/palm_detection_cpu.pbtxt b/mediapipe/modules/palm_detection/palm_detection_cpu.pbtxt index 44d026cf4..706722018 100644 --- a/mediapipe/modules/palm_detection/palm_detection_cpu.pbtxt +++ b/mediapipe/modules/palm_detection/palm_detection_cpu.pbtxt @@ -28,6 +28,7 @@ node { min: -1.0 max: 1.0 } + border_mode: BORDER_ZERO } } } diff --git a/mediapipe/modules/palm_detection/palm_detection_gpu.pbtxt b/mediapipe/modules/palm_detection/palm_detection_gpu.pbtxt index a6c773f7f..ff51d4a74 100644 --- a/mediapipe/modules/palm_detection/palm_detection_gpu.pbtxt +++ b/mediapipe/modules/palm_detection/palm_detection_gpu.pbtxt @@ -28,6 +28,7 @@ node { min: -1.0 max: 1.0 } + border_mode: BORDER_ZERO gpu_origin: TOP_LEFT } } diff --git a/mediapipe/modules/pose_detection/pose_detection.tflite b/mediapipe/modules/pose_detection/pose_detection.tflite index ababe424a..55deb3c20 100755 Binary files a/mediapipe/modules/pose_detection/pose_detection.tflite and b/mediapipe/modules/pose_detection/pose_detection.tflite differ diff --git a/mediapipe/modules/pose_detection/pose_detection_cpu.pbtxt b/mediapipe/modules/pose_detection/pose_detection_cpu.pbtxt index 2c3cdc298..81079fc27 100644 --- a/mediapipe/modules/pose_detection/pose_detection_cpu.pbtxt +++ b/mediapipe/modules/pose_detection/pose_detection_cpu.pbtxt @@ -53,6 +53,7 @@ node: { min: -1.0 max: 1.0 } + border_mode: BORDER_ZERO } } } diff --git a/mediapipe/modules/pose_detection/pose_detection_gpu.pbtxt b/mediapipe/modules/pose_detection/pose_detection_gpu.pbtxt index fb7fe6117..b61f44477 100644 --- a/mediapipe/modules/pose_detection/pose_detection_gpu.pbtxt +++ b/mediapipe/modules/pose_detection/pose_detection_gpu.pbtxt @@ -53,6 +53,7 @@ node: { min: -1.0 max: 1.0 } + border_mode: BORDER_ZERO gpu_origin: TOP_LEFT } } diff --git a/mediapipe/modules/pose_landmark/BUILD b/mediapipe/modules/pose_landmark/BUILD index 32cb33ba6..198ba6a1d 100644 --- a/mediapipe/modules/pose_landmark/BUILD +++ b/mediapipe/modules/pose_landmark/BUILD @@ -22,28 +22,23 @@ licenses(["notice"]) package(default_visibility = ["//visibility:public"]) mediapipe_simple_subgraph( - name = "pose_landmark_upper_body_by_roi_gpu", - graph = "pose_landmark_upper_body_by_roi_gpu.pbtxt", - register_as = "PoseLandmarkUpperBodyByRoiGpu", + name = "pose_landmark_model_loader", + graph = "pose_landmark_model_loader.pbtxt", + register_as = "PoseLandmarkModelLoader", deps = [ - "//mediapipe/calculators/core:gate_calculator", - "//mediapipe/calculators/core:split_normalized_landmark_list_calculator", - "//mediapipe/calculators/core:split_vector_calculator", - "//mediapipe/calculators/tensor:image_to_tensor_calculator", - "//mediapipe/calculators/tensor:inference_calculator", - "//mediapipe/calculators/tensor:tensors_to_floats_calculator", - "//mediapipe/calculators/tensor:tensors_to_landmarks_calculator", - "//mediapipe/calculators/util:landmark_letterbox_removal_calculator", - "//mediapipe/calculators/util:landmark_projection_calculator", - "//mediapipe/calculators/util:thresholding_calculator", + "//mediapipe/calculators/core:constant_side_packet_calculator", + "//mediapipe/calculators/tflite:tflite_model_calculator", + "//mediapipe/calculators/util:local_file_contents_calculator", + "//mediapipe/framework/tool:switch_container", ], ) mediapipe_simple_subgraph( - name = "pose_landmark_upper_body_by_roi_cpu", - graph = "pose_landmark_upper_body_by_roi_cpu.pbtxt", - register_as = "PoseLandmarkUpperBodyByRoiCpu", + name = "pose_landmark_by_roi_gpu", + graph = "pose_landmark_by_roi_gpu.pbtxt", + register_as = "PoseLandmarkByRoiGpu", deps = [ + ":pose_landmark_model_loader", "//mediapipe/calculators/core:gate_calculator", "//mediapipe/calculators/core:split_normalized_landmark_list_calculator", "//mediapipe/calculators/core:split_vector_calculator", @@ -54,6 +49,77 @@ mediapipe_simple_subgraph( "//mediapipe/calculators/util:landmark_letterbox_removal_calculator", "//mediapipe/calculators/util:landmark_projection_calculator", "//mediapipe/calculators/util:thresholding_calculator", + "//mediapipe/framework/tool:switch_container", + ], +) + +mediapipe_simple_subgraph( + name = "pose_landmark_by_roi_cpu", + graph = "pose_landmark_by_roi_cpu.pbtxt", + register_as = "PoseLandmarkByRoiCpu", + deps = [ + ":pose_landmark_model_loader", + "//mediapipe/calculators/core:gate_calculator", + "//mediapipe/calculators/core:split_normalized_landmark_list_calculator", + "//mediapipe/calculators/core:split_vector_calculator", + "//mediapipe/calculators/tensor:image_to_tensor_calculator", + "//mediapipe/calculators/tensor:inference_calculator", + "//mediapipe/calculators/tensor:tensors_to_floats_calculator", + "//mediapipe/calculators/tensor:tensors_to_landmarks_calculator", + "//mediapipe/calculators/util:landmark_letterbox_removal_calculator", + "//mediapipe/calculators/util:landmark_projection_calculator", + "//mediapipe/calculators/util:thresholding_calculator", + "//mediapipe/framework/tool:switch_container", + ], +) + +mediapipe_simple_subgraph( + name = "pose_landmark_filtering", + graph = "pose_landmark_filtering.pbtxt", + register_as = "PoseLandmarkFiltering", + deps = [ + "//mediapipe/calculators/util:landmarks_smoothing_calculator", + "//mediapipe/framework/tool:switch_container", + ], +) + +mediapipe_simple_subgraph( + name = "pose_landmark_gpu", + graph = "pose_landmark_gpu.pbtxt", + register_as = "PoseLandmarkGpu", + deps = [ + ":pose_detection_to_roi", + ":pose_landmark_by_roi_gpu", + ":pose_landmark_filtering", + ":pose_landmarks_to_roi", + "//mediapipe/calculators/core:constant_side_packet_calculator", + "//mediapipe/calculators/core:gate_calculator", + "//mediapipe/calculators/core:merge_calculator", + "//mediapipe/calculators/core:packet_presence_calculator", + "//mediapipe/calculators/core:previous_loopback_calculator", + "//mediapipe/calculators/core:split_vector_calculator", + "//mediapipe/calculators/image:image_properties_calculator", + "//mediapipe/modules/pose_detection:pose_detection_gpu", + ], +) + +mediapipe_simple_subgraph( + name = "pose_landmark_cpu", + graph = "pose_landmark_cpu.pbtxt", + register_as = "PoseLandmarkCpu", + deps = [ + ":pose_detection_to_roi", + ":pose_landmark_by_roi_cpu", + ":pose_landmark_filtering", + ":pose_landmarks_to_roi", + "//mediapipe/calculators/core:constant_side_packet_calculator", + "//mediapipe/calculators/core:gate_calculator", + "//mediapipe/calculators/core:merge_calculator", + "//mediapipe/calculators/core:packet_presence_calculator", + "//mediapipe/calculators/core:previous_loopback_calculator", + "//mediapipe/calculators/core:split_vector_calculator", + "//mediapipe/calculators/image:image_properties_calculator", + "//mediapipe/modules/pose_detection:pose_detection_cpu", ], ) @@ -62,17 +128,8 @@ mediapipe_simple_subgraph( graph = "pose_landmark_upper_body_gpu.pbtxt", register_as = "PoseLandmarkUpperBodyGpu", deps = [ - ":pose_detection_to_roi", - ":pose_landmark_upper_body_by_roi_gpu", - ":pose_landmark_upper_body_landmarks_to_roi", - "//mediapipe/calculators/core:gate_calculator", - "//mediapipe/calculators/core:merge_calculator", - "//mediapipe/calculators/core:packet_presence_calculator", - "//mediapipe/calculators/core:previous_loopback_calculator", - "//mediapipe/calculators/core:split_vector_calculator", - "//mediapipe/calculators/image:image_properties_calculator", - "//mediapipe/calculators/util:logic_calculator", - "//mediapipe/modules/pose_detection:pose_detection_gpu", + ":pose_landmark_gpu", + "//mediapipe/calculators/core:constant_side_packet_calculator", ], ) @@ -81,44 +138,14 @@ mediapipe_simple_subgraph( graph = "pose_landmark_upper_body_cpu.pbtxt", register_as = "PoseLandmarkUpperBodyCpu", deps = [ - ":pose_detection_to_roi", - ":pose_landmark_upper_body_by_roi_cpu", - ":pose_landmark_upper_body_landmarks_to_roi", - "//mediapipe/calculators/core:gate_calculator", - "//mediapipe/calculators/core:merge_calculator", - "//mediapipe/calculators/core:packet_presence_calculator", - "//mediapipe/calculators/core:previous_loopback_calculator", - "//mediapipe/calculators/core:split_vector_calculator", - "//mediapipe/calculators/image:image_properties_calculator", - "//mediapipe/calculators/util:logic_calculator", - "//mediapipe/modules/pose_detection:pose_detection_cpu", - ], -) - -mediapipe_simple_subgraph( - name = "pose_landmark_upper_body_smoothed_cpu", - graph = "pose_landmark_upper_body_smoothed_cpu.pbtxt", - register_as = "PoseLandmarkUpperBodySmoothedCpu", - deps = [ - ":pose_landmark_upper_body_cpu", - "//mediapipe/calculators/image:image_properties_calculator", - "//mediapipe/calculators/util:landmarks_smoothing_calculator", - ], -) - -mediapipe_simple_subgraph( - name = "pose_landmark_upper_body_smoothed_gpu", - graph = "pose_landmark_upper_body_smoothed_gpu.pbtxt", - register_as = "PoseLandmarkUpperBodySmoothedGpu", - deps = [ - ":pose_landmark_upper_body_gpu", - "//mediapipe/calculators/image:image_properties_calculator", - "//mediapipe/calculators/util:landmarks_smoothing_calculator", + ":pose_landmark_cpu", + "//mediapipe/calculators/core:constant_side_packet_calculator", ], ) exports_files( srcs = [ + "pose_landmark_full_body.tflite", "pose_landmark_upper_body.tflite", ], ) @@ -130,13 +157,14 @@ mediapipe_simple_subgraph( deps = [ "//mediapipe/calculators/util:alignment_points_to_rects_calculator", "//mediapipe/calculators/util:rect_transformation_calculator", + "//mediapipe/framework/tool:switch_container", ], ) mediapipe_simple_subgraph( - name = "pose_landmark_upper_body_landmarks_to_roi", - graph = "pose_landmark_upper_body_landmarks_to_roi.pbtxt", - register_as = "PoseLandmarkUpperBodyLandmarksToRoi", + name = "pose_landmarks_to_roi", + graph = "pose_landmarks_to_roi.pbtxt", + register_as = "PoseLandmarksToRoi", deps = [ "//mediapipe/calculators/util:alignment_points_to_rects_calculator", "//mediapipe/calculators/util:landmarks_to_detection_calculator", diff --git a/mediapipe/modules/pose_landmark/README.md b/mediapipe/modules/pose_landmark/README.md index f0c55252e..35f4062aa 100644 --- a/mediapipe/modules/pose_landmark/README.md +++ b/mediapipe/modules/pose_landmark/README.md @@ -2,8 +2,9 @@ Subgraphs|Details :--- | :--- -[`PoseLandmarkUpperBodyByRoiCpu`](https://github.com/google/mediapipe/tree/master/mediapipe/modules/pose_landmark/pose_landmark_upper_body_by_roi_cpu.pbtxt)| Detects landmarks of a single pose. See landmarks (key points) [scheme](https://github.com/google/mediapipe/tree/master/mediapipe/modules/pose_landmark/pose_landmark_upper_body_topology.svg). (CPU input, and inference is executed on CPU.) -[`PoseLandmarkUpperBodyByRoiGpu`](https://github.com/google/mediapipe/tree/master/mediapipe/modules/pose_landmark/pose_landmark_upper_body_gpu.pbtxt)| Detects landmarks of a single pose. See landmarks (key points) [scheme](https://github.com/google/mediapipe/tree/master/mediapipe/modules/pose_landmark/pose_landmark_upper_body_topology.svg). (GPU input, and inference is executed on GPU) -[`PoseLandmarkUpperBodyCpu`](https://github.com/google/mediapipe/tree/master/mediapipe/modules/pose_landmark/pose_landmark_upper_body_cpu.pbtxt)| Detects and tracks landmarks of a single pose. See landmarks (key points) [scheme](https://github.com/google/mediapipe/tree/master/mediapipe/modules/pose_landmark/pose_landmark_upper_body_topology.svg). (CPU input, and inference is executed on CPU) -[`PoseLandmarkUpperBodyGpu`](https://github.com/google/mediapipe/tree/master/mediapipe/modules/pose_landmark/pose_landmark_upper_body_gpu.pbtxt)| Detects and tracks landmarks of a single pose. See landmarks (key points) [scheme](https://github.com/google/mediapipe/tree/master/mediapipe/modules/pose_landmark/pose_landmark_upper_body_topology.svg). (GPU input, and inference is executed on GPU.) - +[`PoseLandmarkByRoiCpu`](https://github.com/google/mediapipe/tree/master/mediapipe/modules/pose_landmark/pose_landmark_by_roi_cpu.pbtxt)| Detects landmarks of a single body pose, full-body by default but can be configured (via an input side packet) to cover upper-body only. See landmarks (aka keypoints) [scheme](https://github.com/google/mediapipe/tree/master/mediapipe/modules/pose_landmark/pose_landmark_full_body_topology.svg). (CPU input, and inference is executed on CPU.) +[`PoseLandmarkByRoiGpu`](https://github.com/google/mediapipe/tree/master/mediapipe/modules/pose_landmark/pose_landmark_by_roi_gpu.pbtxt)| Detects landmarks of a single body pose, full-body by default but can be configured (via an input side packet) to cover upper-body only. See landmarks (aka keypoints) [scheme](https://github.com/google/mediapipe/tree/master/mediapipe/modules/pose_landmark/pose_landmark_full_body_topology.svg). (GPU input, and inference is executed on GPU) +[`PoseLandmarkCpu`](https://github.com/google/mediapipe/tree/master/mediapipe/modules/pose_landmark/pose_landmark_cpu.pbtxt)| Detects landmarks of a single body pose, full-body by default but can be configured (via an input side packet) to cover upper-body only. See landmarks (aka keypoints) [scheme](https://github.com/google/mediapipe/tree/master/mediapipe/modules/pose_landmark/pose_landmark_full_body_topology.svg). (CPU input, and inference is executed on CPU) +[`PoseLandmarkGpu`](https://github.com/google/mediapipe/tree/master/mediapipe/modules/pose_landmark/pose_landmark_gpu.pbtxt)| Detects landmarks of a single body pose, full-body by default but can be configured (via an input side packet) to cover upper-body only. See landmarks (aka keypoints) [scheme](https://github.com/google/mediapipe/tree/master/mediapipe/modules/pose_landmark/pose_landmark_full_body_topology.svg). (GPU input, and inference is executed on GPU.) +[`PoseLandmarkUpperBodyCpu`](https://github.com/google/mediapipe/tree/master/mediapipe/modules/pose_landmark/pose_landmark_upper_body_cpu.pbtxt)| Detects and tracks landmarks of a single upper-body pose. See landmarks (aka keypoints) [scheme](https://github.com/google/mediapipe/tree/master/mediapipe/modules/pose_landmark/pose_landmark_upper_body_topology.svg). (CPU input, and inference is executed on CPU) +[`PoseLandmarkUpperBodyGpu`](https://github.com/google/mediapipe/tree/master/mediapipe/modules/pose_landmark/pose_landmark_upper_body_gpu.pbtxt)| Detects and tracks landmarks of a single upper-body pose. See landmarks (aka keypoints) [scheme](https://github.com/google/mediapipe/tree/master/mediapipe/modules/pose_landmark/pose_landmark_upper_body_topology.svg). (GPU input, and inference is executed on GPU.) diff --git a/mediapipe/modules/pose_landmark/pose_detection_to_roi.pbtxt b/mediapipe/modules/pose_landmark/pose_detection_to_roi.pbtxt index 06b2476ba..b348837cb 100644 --- a/mediapipe/modules/pose_landmark/pose_detection_to_roi.pbtxt +++ b/mediapipe/modules/pose_landmark/pose_detection_to_roi.pbtxt @@ -9,6 +9,10 @@ type: "PoseDetectionToRoi" input_stream: "DETECTION:detection" # Frame size (width and height). (std::pair) input_stream: "IMAGE_SIZE:image_size" +# Whether to detect/predict the full set of pose landmarks, or only those on the +# upper body. If unspecified, functions as set to false. (bool) +input_side_packet: "UPPER_BODY_ONLY:upper_body_only" + # ROI according to the first detection of input detections. (NormalizedRect) output_stream: "ROI:roi" @@ -16,16 +20,33 @@ output_stream: "ROI:roi" # points. Pose detection contains four key points: first two for full-body pose # and two more for upper-body pose. node { - calculator: "AlignmentPointsRectsCalculator" + calculator: "SwitchContainer" + input_side_packet: "ENABLE:upper_body_only" input_stream: "DETECTION:detection" input_stream: "IMAGE_SIZE:image_size" output_stream: "NORM_RECT:raw_roi" - options: { - [mediapipe.DetectionsToRectsCalculatorOptions.ext] { - rotation_vector_start_keypoint_index: 2 - rotation_vector_end_keypoint_index: 3 - rotation_vector_target_angle_degrees: 90 - output_zero_rect_for_empty_detections: true + options { + [mediapipe.SwitchContainerOptions.ext] { + contained_node: { + calculator: "AlignmentPointsRectsCalculator" + options: { + [mediapipe.DetectionsToRectsCalculatorOptions.ext] { + rotation_vector_start_keypoint_index: 0 + rotation_vector_end_keypoint_index: 1 + rotation_vector_target_angle_degrees: 90 + } + } + } + contained_node: { + calculator: "AlignmentPointsRectsCalculator" + options: { + [mediapipe.DetectionsToRectsCalculatorOptions.ext] { + rotation_vector_start_keypoint_index: 2 + rotation_vector_end_keypoint_index: 3 + rotation_vector_target_angle_degrees: 90 + } + } + } } } } diff --git a/mediapipe/modules/pose_landmark/pose_landmark_upper_body_by_roi_cpu.pbtxt b/mediapipe/modules/pose_landmark/pose_landmark_by_roi_cpu.pbtxt similarity index 56% rename from mediapipe/modules/pose_landmark/pose_landmark_upper_body_by_roi_cpu.pbtxt rename to mediapipe/modules/pose_landmark/pose_landmark_by_roi_cpu.pbtxt index df0c7402c..3ff3d9897 100644 --- a/mediapipe/modules/pose_landmark/pose_landmark_upper_body_by_roi_cpu.pbtxt +++ b/mediapipe/modules/pose_landmark/pose_landmark_by_roi_cpu.pbtxt @@ -1,19 +1,24 @@ # MediaPipe graph to detect/predict upper-body pose landmarks. (CPU input, and # inference is executed on CPU.) # -# It is required that "pose_landmark_upper_body.tflite" is available at +# It is required that "pose_landmark_full_body.tflite" or +# "pose_landmark_upper_body.tflite" is available at +# "mediapipe/modules/pose_landmark/pose_landmark_full_body.tflite" +# or # "mediapipe/modules/pose_landmark/pose_landmark_upper_body.tflite" -# path during execution. +# path respectively during execution, depending on the specification in the +# UPPER_BODY_ONLY input side packet. # # EXAMPLE: # node { -# calculator: "PoseLandmarkUpperBodyByRoiCpu" +# calculator: "PoseLandmarkByRoiCpu" +# input_side_packet: "UPPER_BODY_ONLY:upper_body_only" # input_stream: "IMAGE:image" # input_stream: "ROI:roi" # output_stream: "LANDMARKS:landmarks" # } -type: "PoseLandmarkUpperBodyByRoiCpu" +type: "PoseLandmarkByRoiCpu" # CPU image. (ImageFrame) input_stream: "IMAGE:image" @@ -21,33 +26,49 @@ input_stream: "IMAGE:image" # (NormalizedRect) input_stream: "ROI:roi" +# Whether to detect/predict the full set of pose landmarks (see below), or only +# those on the upper body. If unspecified, functions as set to false. (bool) +# Note that upper-body-only prediction may be more accurate for use cases where +# the lower-body parts are mostly out of view. +input_side_packet: "UPPER_BODY_ONLY:upper_body_only" + # Pose landmarks within the given ROI. (NormalizedLandmarkList) -# We have 25 (upper-body) landmarks (see pose_landmark_upper_body_topology.svg). +# We have 33 landmarks (see pose_landmark_full_body_topology.svg) with the +# first 25 fall on the upper body (see pose_landmark_upper_body_topology.svg), +# and there are other auxiliary key points. # 0 - nose -# 1 - right eye (inner) -# 2 - right eye -# 3 - right eye (outer) -# 4 - left eye (inner) -# 5 - left eye -# 6 - left eye (outer) -# 7 - right ear -# 8 - left ear -# 9 - mouth (right) -# 10 - mouth (left) -# 11 - right shoulder -# 12 - left shoulder -# 13 - right elbow -# 14 - left elbow -# 15 - right wrist -# 16 - left wrist -# 17 - right pinky -# 18 - left pinky -# 19 - right index -# 20 - left index -# 21 - right thumb -# 22 - left thumb -# 23 - right hip -# 24 - left hip +# 1 - left eye (inner) +# 2 - left eye +# 3 - left eye (outer) +# 4 - right eye (inner) +# 5 - right eye +# 6 - right eye (outer) +# 7 - left ear +# 8 - right ear +# 9 - mouth (left) +# 10 - mouth (right) +# 11 - left shoulder +# 12 - right shoulder +# 13 - left elbow +# 14 - right elbow +# 15 - left wrist +# 16 - right wrist +# 17 - left pinky +# 18 - right pinky +# 19 - left index +# 20 - right index +# 21 - left thumb +# 22 - right thumb +# 23 - left hip +# 24 - right hip +# 25 - left knee +# 26 - right knee +# 27 - left ankle +# 28 - right ankle +# 29 - left heel +# 30 - right heel +# 31 - left foot index +# 32 - right foot index # # NOTE: if a pose is not present within the given ROI, for this particular # timestamp there will not be an output packet in the LANDMARKS stream. However, @@ -80,14 +101,21 @@ node: { } } -# Runs a TensorFlow Lite model inference on CPU. +# Loads the pose landmark TF Lite model. +node { + calculator: "PoseLandmarkModelLoader" + input_side_packet: "UPPER_BODY_ONLY:upper_body_only" + output_side_packet: "MODEL:model" +} + +# Runs model inference on CPU. node { calculator: "InferenceCalculator" + input_side_packet: "MODEL:model" input_stream: "TENSORS:input_tensors" output_stream: "TENSORS:output_tensors" options: { [mediapipe.InferenceCalculatorOptions.ext] { - model_path: "mediapipe/modules/pose_landmark/pose_landmark_upper_body.tflite" delegate { xnnpack {} } } } @@ -140,14 +168,36 @@ node { # Decodes the landmark tensors into a vector of landmarks, where the landmark # coordinates are normalized by the size of the input image to the model. node { - calculator: "TensorsToLandmarksCalculator" + calculator: "SwitchContainer" + input_side_packet: "ENABLE:upper_body_only" input_stream: "TENSORS:ensured_landmark_tensors" output_stream: "NORM_LANDMARKS:raw_landmarks" options: { - [mediapipe.TensorsToLandmarksCalculatorOptions.ext] { - num_landmarks: 27 - input_image_width: 256 - input_image_height: 256 + [mediapipe.SwitchContainerOptions.ext] { + contained_node: { + calculator: "TensorsToLandmarksCalculator" + options: { + [mediapipe.TensorsToLandmarksCalculatorOptions.ext] { + num_landmarks: 35 + input_image_width: 256 + input_image_height: 256 + visibility_activation: SIGMOID + presence_activation: SIGMOID + } + } + } + contained_node: { + calculator: "TensorsToLandmarksCalculator" + options: { + [mediapipe.TensorsToLandmarksCalculatorOptions.ext] { + num_landmarks: 27 + input_image_width: 256 + input_image_height: 256 + visibility_activation: SIGMOID + presence_activation: SIGMOID + } + } + } } } } @@ -175,14 +225,31 @@ node { # Splits the landmarks into two sets: the actual pose landmarks and the # auxiliary landmarks. node { - calculator: "SplitNormalizedLandmarkListCalculator" + calculator: "SwitchContainer" + input_side_packet: "ENABLE:upper_body_only" input_stream: "all_landmarks" output_stream: "landmarks" output_stream: "auxiliary_landmarks" options: { - [mediapipe.SplitVectorCalculatorOptions.ext] { - ranges: { begin: 0 end: 25 } - ranges: { begin: 25 end: 27 } + [mediapipe.SwitchContainerOptions.ext] { + contained_node: { + calculator: "SplitNormalizedLandmarkListCalculator" + options: { + [mediapipe.SplitVectorCalculatorOptions.ext] { + ranges: { begin: 0 end: 33 } + ranges: { begin: 33 end: 35 } + } + } + } + contained_node: { + calculator: "SplitNormalizedLandmarkListCalculator" + options: { + [mediapipe.SplitVectorCalculatorOptions.ext] { + ranges: { begin: 0 end: 25 } + ranges: { begin: 25 end: 27 } + } + } + } } } } diff --git a/mediapipe/modules/pose_landmark/pose_landmark_upper_body_by_roi_gpu.pbtxt b/mediapipe/modules/pose_landmark/pose_landmark_by_roi_gpu.pbtxt similarity index 56% rename from mediapipe/modules/pose_landmark/pose_landmark_upper_body_by_roi_gpu.pbtxt rename to mediapipe/modules/pose_landmark/pose_landmark_by_roi_gpu.pbtxt index 5fa116318..75ede0774 100644 --- a/mediapipe/modules/pose_landmark/pose_landmark_upper_body_by_roi_gpu.pbtxt +++ b/mediapipe/modules/pose_landmark/pose_landmark_by_roi_gpu.pbtxt @@ -1,19 +1,24 @@ # MediaPipe graph to detect/predict upper-body pose landmarks. (GPU input, and # inference is executed on GPU.) # -# It is required that "pose_landmark_upper_body.tflite" is available at +# It is required that "pose_landmark_full_body.tflite" or +# "pose_landmark_upper_body.tflite" is available at +# "mediapipe/modules/pose_landmark/pose_landmark_full_body.tflite" +# or # "mediapipe/modules/pose_landmark/pose_landmark_upper_body.tflite" -# path during execution. +# path respectively during execution, depending on the specification in the +# UPPER_BODY_ONLY input side packet. # # EXAMPLE: # node { -# calculator: "PoseLandmarkUpperBodyByRoiGpu" +# calculator: "PoseLandmarkByRoiGpu" +# input_side_packet: "UPPER_BODY_ONLY:upper_body_only" # input_stream: "IMAGE:image" # input_stream: "ROI:roi" # output_stream: "LANDMARKS:landmarks" # } -type: "PoseLandmarkUpperBodyByRoiGpu" +type: "PoseLandmarkByRoiGpu" # GPU image. (GpuBuffer) input_stream: "IMAGE:image" @@ -21,33 +26,49 @@ input_stream: "IMAGE:image" # (NormalizedRect) input_stream: "ROI:roi" +# Whether to detect/predict the full set of pose landmarks (see below), or only +# those on the upper body. If unspecified, functions as set to false. (bool) +# Note that upper-body-only prediction may be more accurate for use cases where +# the lower-body parts are mostly out of view. +input_side_packet: "UPPER_BODY_ONLY:upper_body_only" + # Pose landmarks within the given ROI. (NormalizedLandmarkList) -# We have 25 (upper-body) landmarks (see pose_landmark_upper_body_topology.svg). +# We have 33 landmarks (see pose_landmark_full_body_topology.svg) with the +# first 25 fall on the upper body (see pose_landmark_upper_body_topology.svg), +# and there are other auxiliary key points. # 0 - nose -# 1 - right eye (inner) -# 2 - right eye -# 3 - right eye (outer) -# 4 - left eye (inner) -# 5 - left eye -# 6 - left eye (outer) -# 7 - right ear -# 8 - left ear -# 9 - mouth (right) -# 10 - mouth (left) -# 11 - right shoulder -# 12 - left shoulder -# 13 - right elbow -# 14 - left elbow -# 15 - right wrist -# 16 - left wrist -# 17 - right pinky -# 18 - left pinky -# 19 - right index -# 20 - left index -# 21 - right thumb -# 22 - left thumb -# 23 - right hip -# 24 - left hip +# 1 - left eye (inner) +# 2 - left eye +# 3 - left eye (outer) +# 4 - right eye (inner) +# 5 - right eye +# 6 - right eye (outer) +# 7 - left ear +# 8 - right ear +# 9 - mouth (left) +# 10 - mouth (right) +# 11 - left shoulder +# 12 - right shoulder +# 13 - left elbow +# 14 - right elbow +# 15 - left wrist +# 16 - right wrist +# 17 - left pinky +# 18 - right pinky +# 19 - left index +# 20 - right index +# 21 - left thumb +# 22 - right thumb +# 23 - left hip +# 24 - right hip +# 25 - left knee +# 26 - right knee +# 27 - left ankle +# 28 - right ankle +# 29 - left heel +# 30 - right heel +# 31 - left foot index +# 32 - right foot index # # NOTE: if a pose is not present within the given ROI, for this particular # timestamp there will not be an output packet in the LANDMARKS stream. However, @@ -81,16 +102,19 @@ node: { } } -# Runs a TensorFlow Lite model inference on GPU. +# Loads the pose landmark TF Lite model. +node { + calculator: "PoseLandmarkModelLoader" + input_side_packet: "UPPER_BODY_ONLY:upper_body_only" + output_side_packet: "MODEL:model" +} + +# Runs model inference on GPU. node { calculator: "InferenceCalculator" + input_side_packet: "MODEL:model" input_stream: "TENSORS:input_tensors" output_stream: "TENSORS:output_tensors" - options: { - [mediapipe.InferenceCalculatorOptions.ext] { - model_path: "mediapipe/modules/pose_landmark/pose_landmark_upper_body.tflite" - } - } } # Splits a vector of TFLite tensors to multiple vectors according to the ranges @@ -140,14 +164,36 @@ node { # Decodes the landmark tensors into a vector of landmarks, where the landmark # coordinates are normalized by the size of the input image to the model. node { - calculator: "TensorsToLandmarksCalculator" + calculator: "SwitchContainer" + input_side_packet: "ENABLE:upper_body_only" input_stream: "TENSORS:ensured_landmark_tensors" output_stream: "NORM_LANDMARKS:raw_landmarks" options: { - [mediapipe.TensorsToLandmarksCalculatorOptions.ext] { - num_landmarks: 27 - input_image_width: 256 - input_image_height: 256 + [mediapipe.SwitchContainerOptions.ext] { + contained_node: { + calculator: "TensorsToLandmarksCalculator" + options: { + [mediapipe.TensorsToLandmarksCalculatorOptions.ext] { + num_landmarks: 35 + input_image_width: 256 + input_image_height: 256 + visibility_activation: SIGMOID + presence_activation: SIGMOID + } + } + } + contained_node: { + calculator: "TensorsToLandmarksCalculator" + options: { + [mediapipe.TensorsToLandmarksCalculatorOptions.ext] { + num_landmarks: 27 + input_image_width: 256 + input_image_height: 256 + visibility_activation: SIGMOID + presence_activation: SIGMOID + } + } + } } } } @@ -175,14 +221,31 @@ node { # Splits the landmarks into two sets: the actual pose landmarks and the # auxiliary landmarks. node { - calculator: "SplitNormalizedLandmarkListCalculator" + calculator: "SwitchContainer" + input_side_packet: "ENABLE:upper_body_only" input_stream: "all_landmarks" output_stream: "landmarks" output_stream: "auxiliary_landmarks" options: { - [mediapipe.SplitVectorCalculatorOptions.ext] { - ranges: { begin: 0 end: 25 } - ranges: { begin: 25 end: 27 } + [mediapipe.SwitchContainerOptions.ext] { + contained_node: { + calculator: "SplitNormalizedLandmarkListCalculator" + options: { + [mediapipe.SplitVectorCalculatorOptions.ext] { + ranges: { begin: 0 end: 33 } + ranges: { begin: 33 end: 35 } + } + } + } + contained_node: { + calculator: "SplitNormalizedLandmarkListCalculator" + options: { + [mediapipe.SplitVectorCalculatorOptions.ext] { + ranges: { begin: 0 end: 25 } + ranges: { begin: 25 end: 27 } + } + } + } } } } diff --git a/mediapipe/modules/pose_landmark/pose_landmark_cpu.pbtxt b/mediapipe/modules/pose_landmark/pose_landmark_cpu.pbtxt new file mode 100644 index 000000000..a2da957c6 --- /dev/null +++ b/mediapipe/modules/pose_landmark/pose_landmark_cpu.pbtxt @@ -0,0 +1,227 @@ +# MediaPipe graph to detect/predict pose landmarks. (CPU input, and inference is +# executed on CPU.) This graph tries to skip pose detection as much as possible +# by using previously detected/predicted landmarks for new images. +# +# It is required that "pose_detection.tflite" is available at +# "mediapipe/modules/pose_detection/pose_detection.tflite" +# path during execution. +# +# It is required that "pose_landmark_full_body.tflite" or +# "pose_landmark_upper_body.tflite" is available at +# "mediapipe/modules/pose_landmark/pose_landmark_full_body.tflite" +# or +# "mediapipe/modules/pose_landmark/pose_landmark_upper_body.tflite" +# path respectively during execution, depending on the specification in the +# UPPER_BODY_ONLY input side packet. +# +# EXAMPLE: +# node { +# calculator: "PoseLandmarkCpu" +# input_side_packet: "UPPER_BODY_ONLY:upper_body_only" +# input_side_packet: "SMOOTH_LANDMARKS:smooth_landmarks" +# input_stream: "IMAGE:image" +# output_stream: "LANDMARKS:pose_landmarks" +# } + +type: "PoseLandmarkCpu" + +# CPU image. (ImageFrame) +input_stream: "IMAGE:image" + +# Whether to detect/predict the full set of pose landmarks (see below), or only +# those on the upper body. If unspecified, functions as set to false. (bool) +# Note that upper-body-only prediction may be more accurate for use cases where +# the lower-body parts are mostly out of view. +input_side_packet: "UPPER_BODY_ONLY:upper_body_only" + +# Whether to filter landmarks across different input images to reduce jitter. +# If unspecified, functions as set to false. (bool) +input_side_packet: "SMOOTH_LANDMARKS:smooth_landmarks" + +# Pose landmarks within the given ROI. (NormalizedLandmarkList) +# We have 33 landmarks (see pose_landmark_full_body_topology.svg) with the +# first 25 fall on the upper body (see pose_landmark_upper_body_topology.svg), +# and there are other auxiliary key points. +# 0 - nose +# 1 - left eye (inner) +# 2 - left eye +# 3 - left eye (outer) +# 4 - right eye (inner) +# 5 - right eye +# 6 - right eye (outer) +# 7 - left ear +# 8 - right ear +# 9 - mouth (left) +# 10 - mouth (right) +# 11 - left shoulder +# 12 - right shoulder +# 13 - left elbow +# 14 - right elbow +# 15 - left wrist +# 16 - right wrist +# 17 - left pinky +# 18 - right pinky +# 19 - left index +# 20 - right index +# 21 - left thumb +# 22 - right thumb +# 23 - left hip +# 24 - right hip +# 25 - left knee +# 26 - right knee +# 27 - left ankle +# 28 - right ankle +# 29 - left heel +# 30 - right heel +# 31 - left foot index +# 32 - right foot index +# +# NOTE: if a pose is not present within the given ROI, for this particular +# timestamp there will not be an output packet in the LANDMARKS stream. However, +# the MediaPipe framework will internally inform the downstream calculators of +# the absence of this packet so that they don't wait for it unnecessarily. +output_stream: "LANDMARKS:pose_landmarks" + +# Extra outputs (for debugging, for instance). +# Detected poses. (Detection) +output_stream: "DETECTION:pose_detection" +# Regions of interest calculated based on landmarks. (NormalizedRect) +output_stream: "ROI_FROM_LANDMARKS:pose_rect_from_landmarks" +# Regions of interest calculated based on pose detections. (NormalizedRect) +output_stream: "ROI_FROM_DETECTION:pose_rect_from_detection" + +# Defines whether landmarks on the previous image should be used to help +# localize landmarks on the current image. +node { + name: "ConstantSidePacketCalculator" + calculator: "ConstantSidePacketCalculator" + output_side_packet: "PACKET:use_prev_landmarks" + options: { + [mediapipe.ConstantSidePacketCalculatorOptions.ext]: { + packet { bool_value: true } + } + } +} +node { + calculator: "GateCalculator" + input_side_packet: "ALLOW:use_prev_landmarks" + input_stream: "prev_pose_rect_from_landmarks" + output_stream: "gated_prev_pose_rect_from_landmarks" +} + +# Checks if there's previous pose rect calculated from landmarks. +node: { + calculator: "PacketPresenceCalculator" + input_stream: "PACKET:gated_prev_pose_rect_from_landmarks" + output_stream: "PRESENCE:prev_pose_rect_from_landmarks_is_present" +} + +# Calculates size of the image. +node { + calculator: "ImagePropertiesCalculator" + input_stream: "IMAGE:image" + output_stream: "SIZE:image_size" +} + +# Drops the incoming image if the pose has already been identified from the +# previous image. Otherwise, passes the incoming image through to trigger a new +# round of pose detection. +node { + calculator: "GateCalculator" + input_stream: "image" + input_stream: "image_size" + input_stream: "DISALLOW:prev_pose_rect_from_landmarks_is_present" + output_stream: "image_for_pose_detection" + output_stream: "image_size_for_pose_detection" + options: { + [mediapipe.GateCalculatorOptions.ext] { + empty_packets_as_allow: true + } + } +} + +# Detects poses. +node { + calculator: "PoseDetectionCpu" + input_stream: "IMAGE:image_for_pose_detection" + output_stream: "DETECTIONS:pose_detections" +} + +# Gets the very first detection from "pose_detections" vector. +node { + calculator: "SplitDetectionVectorCalculator" + input_stream: "pose_detections" + output_stream: "pose_detection" + options: { + [mediapipe.SplitVectorCalculatorOptions.ext] { + ranges: { begin: 0 end: 1 } + element_only: true + } + } +} + +# Calculates region of interest based on pose detection, so that can be used +# to detect landmarks. +node { + calculator: "PoseDetectionToRoi" + input_side_packet: "UPPER_BODY_ONLY:upper_body_only" + input_stream: "DETECTION:pose_detection" + input_stream: "IMAGE_SIZE:image_size_for_pose_detection" + output_stream: "ROI:pose_rect_from_detection" +} + +# Selects either pose rect (or ROI) calculated from detection or from previously +# detected landmarks if available (in this case, calculation of pose rect from +# detection is skipped). +node { + calculator: "MergeCalculator" + input_stream: "pose_rect_from_detection" + input_stream: "gated_prev_pose_rect_from_landmarks" + output_stream: "pose_rect" +} + +# Detects pose landmarks within specified region of interest of the image. +node { + calculator: "PoseLandmarkByRoiCpu" + input_side_packet: "UPPER_BODY_ONLY:upper_body_only" + input_stream: "IMAGE:image" + input_stream: "ROI:pose_rect" + output_stream: "LANDMARKS:unfiltered_pose_landmarks" + output_stream: "AUXILIARY_LANDMARKS:unfiltered_auxiliary_landmarks" +} + +# Smoothes landmarks to reduce jitter. +node { + calculator: "PoseLandmarkFiltering" + input_side_packet: "ENABLE:smooth_landmarks" + input_stream: "IMAGE_SIZE:image_size" + input_stream: "NORM_LANDMARKS:unfiltered_pose_landmarks" + input_stream: "AUX_NORM_LANDMARKS:unfiltered_auxiliary_landmarks" + output_stream: "FILTERED_NORM_LANDMARKS:pose_landmarks" + output_stream: "FILTERED_AUX_NORM_LANDMARKS:auxiliary_landmarks" +} + +# Calculates region of interest based on the auxiliary landmarks, to be used in +# the subsequent image. +node { + calculator: "PoseLandmarksToRoi" + input_stream: "LANDMARKS:auxiliary_landmarks" + input_stream: "IMAGE_SIZE:image_size" + output_stream: "ROI:pose_rect_from_landmarks" +} + +# Caches pose rects calculated from landmarks, and upon the arrival of the next +# input image, sends out the cached rects with timestamps replaced by that of +# the input image, essentially generating a packet that carries the previous +# pose rects. Note that upon the arrival of the very first input image, a +# timestamp bound update occurs to jump start the feedback loop. +node { + calculator: "PreviousLoopbackCalculator" + input_stream: "MAIN:image" + input_stream: "LOOP:pose_rect_from_landmarks" + input_stream_info: { + tag_index: "LOOP" + back_edge: true + } + output_stream: "PREV_LOOP:prev_pose_rect_from_landmarks" +} diff --git a/mediapipe/modules/pose_landmark/pose_landmark_filtering.pbtxt b/mediapipe/modules/pose_landmark/pose_landmark_filtering.pbtxt new file mode 100644 index 000000000..cb477378d --- /dev/null +++ b/mediapipe/modules/pose_landmark/pose_landmark_filtering.pbtxt @@ -0,0 +1,94 @@ +# MediaPipe graph to filter landmarks temporally (across packets with +# incremental timestamps) to reduce jitter. +# +# EXAMPLE: +# node { +# calculator: "PoseLandmarkFiltering" +# input_side_packet: "ENABLE:enable" +# input_stream: "IMAGE_SIZE:image_size" +# input_stream: "NORM_LANDMARKS:landmarks" +# input_stream: "AUX_NORM_LANDMARKS:aux_landmarks" +# output_stream: "FILTERED_NORM_LANDMARKS:filtered_landmarks" +# output_stream: "FILTERED_AUX_NORM_LANDMARKS:filtered_aux_landmarks" +# } + +type: "PoseLandmarkFiltering" + +# Whether to enable filtering. If unspecified, functions as not enabled. (bool) +input_side_packet: "ENABLE:enable" + +# Size of the image (width & height) where the landmarks are estimated from. +# (std::pair) +input_stream: "IMAGE_SIZE:image_size" +# Normalized landmarks. (NormalizedRect) +input_stream: "NORM_LANDMARKS:landmarks" +# Auxiliary set of normalized landmarks. (NormalizedRect) +input_stream: "AUX_NORM_LANDMARKS:aux_landmarks" +# Filtered normalized landmarks. (NormalizedRect) +output_stream: "FILTERED_NORM_LANDMARKS:filtered_landmarks" +# Filtered auxiliary set of normalized landmarks. (NormalizedRect) +output_stream: "FILTERED_AUX_NORM_LANDMARKS:filtered_aux_landmarks" + +# Smoothes pose landmarks to reduce jitter. +node { + calculator: "SwitchContainer" + input_side_packet: "ENABLE:enable" + input_stream: "NORM_LANDMARKS:landmarks" + input_stream: "IMAGE_SIZE:image_size" + output_stream: "NORM_FILTERED_LANDMARKS:filtered_landmarks" + options: { + [mediapipe.SwitchContainerOptions.ext] { + contained_node: { + calculator: "LandmarksSmoothingCalculator" + options: { + [mediapipe.LandmarksSmoothingCalculatorOptions.ext] { + no_filter: {} + } + } + } + contained_node: { + calculator: "LandmarksSmoothingCalculator" + options: { + [mediapipe.LandmarksSmoothingCalculatorOptions.ext] { + velocity_filter: { + window_size: 5 + velocity_scale: 10.0 + } + } + } + } + } + } +} + +# Smoothes auxiliary landmarks to reduce jitter. +node { + calculator: "SwitchContainer" + input_side_packet: "ENABLE:enable" + input_stream: "NORM_LANDMARKS:aux_landmarks" + input_stream: "IMAGE_SIZE:image_size" + output_stream: "NORM_FILTERED_LANDMARKS:filtered_aux_landmarks" + options: { + [mediapipe.SwitchContainerOptions.ext] { + contained_node: { + calculator: "LandmarksSmoothingCalculator" + options: { + [mediapipe.LandmarksSmoothingCalculatorOptions.ext] { + no_filter: {} + } + } + } + contained_node: { + calculator: "LandmarksSmoothingCalculator" + options: { + [mediapipe.LandmarksSmoothingCalculatorOptions.ext] { + velocity_filter: { + window_size: 5 + velocity_scale: 10.0 + } + } + } + } + } + } +} diff --git a/mediapipe/modules/pose_landmark/pose_landmark_full_body.tflite b/mediapipe/modules/pose_landmark/pose_landmark_full_body.tflite new file mode 100755 index 000000000..186885611 Binary files /dev/null and b/mediapipe/modules/pose_landmark/pose_landmark_full_body.tflite differ diff --git a/mediapipe/modules/pose_landmark/pose_landmark_full_body_topology.svg b/mediapipe/modules/pose_landmark/pose_landmark_full_body_topology.svg new file mode 100644 index 000000000..bc4afa734 --- /dev/null +++ b/mediapipe/modules/pose_landmark/pose_landmark_full_body_topology.svg @@ -0,0 +1,651 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + 6 + 5 + 4 + 1 + 2 + 3 + 0 + 8 + 7 + 10 + 9 + 12 + 11 + 21 + 22 + 20 + 18 + 16 + 14 + 13 + 15 + 17 + 19 + 23 + 24 + 26 + 25 + 27 + 28 + 31 + 32 + 30 + 29 + + diff --git a/mediapipe/modules/pose_landmark/pose_landmark_gpu.pbtxt b/mediapipe/modules/pose_landmark/pose_landmark_gpu.pbtxt new file mode 100644 index 000000000..72ed00f5f --- /dev/null +++ b/mediapipe/modules/pose_landmark/pose_landmark_gpu.pbtxt @@ -0,0 +1,227 @@ +# MediaPipe graph to detect/predict pose landmarks. (GPU input, and inference is +# executed on GPU.) This graph tries to skip pose detection as much as possible +# by using previously detected/predicted landmarks for new images. +# +# It is required that "pose_detection.tflite" is available at +# "mediapipe/modules/pose_detection/pose_detection.tflite" +# path during execution. +# +# It is required that "pose_landmark_full_body.tflite" or +# "pose_landmark_upper_body.tflite" is available at +# "mediapipe/modules/pose_landmark/pose_landmark_full_body.tflite" +# or +# "mediapipe/modules/pose_landmark/pose_landmark_upper_body.tflite" +# path respectively during execution, depending on the specification in the +# UPPER_BODY_ONLY input side packet. +# +# EXAMPLE: +# node { +# calculator: "PoseLandmarkGpu" +# input_side_packet: "UPPER_BODY_ONLY:upper_body_only" +# input_side_packet: "SMOOTH_LANDMARKS:smooth_landmarks" +# input_stream: "IMAGE:image" +# output_stream: "LANDMARKS:pose_landmarks" +# } + +type: "PoseLandmarkGpu" + +# GPU image. (GpuBuffer) +input_stream: "IMAGE:image" + +# Whether to detect/predict the full set of pose landmarks (see below), or only +# those on the upper body. If unspecified, functions as set to false. (bool) +# Note that upper-body-only prediction may be more accurate for use cases where +# the lower-body parts are mostly out of view. +input_side_packet: "UPPER_BODY_ONLY:upper_body_only" + +# Whether to filter landmarks across different input images to reduce jitter. +# If unspecified, functions as set to false. (bool) +input_side_packet: "SMOOTH_LANDMARKS:smooth_landmarks" + +# Pose landmarks within the given ROI. (NormalizedLandmarkList) +# We have 33 landmarks (see pose_landmark_full_body_topology.svg) with the +# first 25 fall on the upper body (see pose_landmark_upper_body_topology.svg), +# and there are other auxiliary key points. +# 0 - nose +# 1 - left eye (inner) +# 2 - left eye +# 3 - left eye (outer) +# 4 - right eye (inner) +# 5 - right eye +# 6 - right eye (outer) +# 7 - left ear +# 8 - right ear +# 9 - mouth (left) +# 10 - mouth (right) +# 11 - left shoulder +# 12 - right shoulder +# 13 - left elbow +# 14 - right elbow +# 15 - left wrist +# 16 - right wrist +# 17 - left pinky +# 18 - right pinky +# 19 - left index +# 20 - right index +# 21 - left thumb +# 22 - right thumb +# 23 - left hip +# 24 - right hip +# 25 - left knee +# 26 - right knee +# 27 - left ankle +# 28 - right ankle +# 29 - left heel +# 30 - right heel +# 31 - left foot index +# 32 - right foot index +# +# NOTE: if a pose is not present within the given ROI, for this particular +# timestamp there will not be an output packet in the LANDMARKS stream. However, +# the MediaPipe framework will internally inform the downstream calculators of +# the absence of this packet so that they don't wait for it unnecessarily. +output_stream: "LANDMARKS:pose_landmarks" + +# Extra outputs (for debugging, for instance). +# Detected poses. (Detection) +output_stream: "DETECTION:pose_detection" +# Regions of interest calculated based on landmarks. (NormalizedRect) +output_stream: "ROI_FROM_LANDMARKS:pose_rect_from_landmarks" +# Regions of interest calculated based on pose detections. (NormalizedRect) +output_stream: "ROI_FROM_DETECTION:pose_rect_from_detection" + +# Defines whether landmarks on the previous image should be used to help +# localize landmarks on the current image. +node { + name: "ConstantSidePacketCalculator" + calculator: "ConstantSidePacketCalculator" + output_side_packet: "PACKET:use_prev_landmarks" + options: { + [mediapipe.ConstantSidePacketCalculatorOptions.ext]: { + packet { bool_value: true } + } + } +} +node { + calculator: "GateCalculator" + input_side_packet: "ALLOW:use_prev_landmarks" + input_stream: "prev_pose_rect_from_landmarks" + output_stream: "gated_prev_pose_rect_from_landmarks" +} + +# Checks if there's previous pose rect calculated from landmarks. +node: { + calculator: "PacketPresenceCalculator" + input_stream: "PACKET:gated_prev_pose_rect_from_landmarks" + output_stream: "PRESENCE:prev_pose_rect_from_landmarks_is_present" +} + +# Calculates size of the image. +node { + calculator: "ImagePropertiesCalculator" + input_stream: "IMAGE_GPU:image" + output_stream: "SIZE:image_size" +} + +# Drops the incoming image if the pose has already been identified from the +# previous image. Otherwise, passes the incoming image through to trigger a new +# round of pose detection. +node { + calculator: "GateCalculator" + input_stream: "image" + input_stream: "image_size" + input_stream: "DISALLOW:prev_pose_rect_from_landmarks_is_present" + output_stream: "image_for_pose_detection" + output_stream: "image_size_for_pose_detection" + options: { + [mediapipe.GateCalculatorOptions.ext] { + empty_packets_as_allow: true + } + } +} + +# Detects poses. +node { + calculator: "PoseDetectionGpu" + input_stream: "IMAGE:image_for_pose_detection" + output_stream: "DETECTIONS:pose_detections" +} + +# Gets the very first detection from "pose_detections" vector. +node { + calculator: "SplitDetectionVectorCalculator" + input_stream: "pose_detections" + output_stream: "pose_detection" + options: { + [mediapipe.SplitVectorCalculatorOptions.ext] { + ranges: { begin: 0 end: 1 } + element_only: true + } + } +} + +# Calculates region of interest based on pose detection, so that can be used +# to detect landmarks. +node { + calculator: "PoseDetectionToRoi" + input_side_packet: "UPPER_BODY_ONLY:upper_body_only" + input_stream: "DETECTION:pose_detection" + input_stream: "IMAGE_SIZE:image_size_for_pose_detection" + output_stream: "ROI:pose_rect_from_detection" +} + +# Selects either pose rect (or ROI) calculated from detection or from previously +# detected landmarks if available (in this case, calculation of pose rect from +# detection is skipped). +node { + calculator: "MergeCalculator" + input_stream: "pose_rect_from_detection" + input_stream: "gated_prev_pose_rect_from_landmarks" + output_stream: "pose_rect" +} + +# Detects pose landmarks within specified region of interest of the image. +node { + calculator: "PoseLandmarkByRoiGpu" + input_side_packet: "UPPER_BODY_ONLY:upper_body_only" + input_stream: "IMAGE:image" + input_stream: "ROI:pose_rect" + output_stream: "LANDMARKS:unfiltered_pose_landmarks" + output_stream: "AUXILIARY_LANDMARKS:unfiltered_auxiliary_landmarks" +} + +# Smoothes landmarks to reduce jitter. +node { + calculator: "PoseLandmarkFiltering" + input_side_packet: "ENABLE:smooth_landmarks" + input_stream: "IMAGE_SIZE:image_size" + input_stream: "NORM_LANDMARKS:unfiltered_pose_landmarks" + input_stream: "AUX_NORM_LANDMARKS:unfiltered_auxiliary_landmarks" + output_stream: "FILTERED_NORM_LANDMARKS:pose_landmarks" + output_stream: "FILTERED_AUX_NORM_LANDMARKS:auxiliary_landmarks" +} + +# Calculates region of interest based on the auxiliary landmarks, to be used in +# the subsequent image. +node { + calculator: "PoseLandmarksToRoi" + input_stream: "LANDMARKS:auxiliary_landmarks" + input_stream: "IMAGE_SIZE:image_size" + output_stream: "ROI:pose_rect_from_landmarks" +} + +# Caches pose rects calculated from landmarks, and upon the arrival of the next +# input image, sends out the cached rects with timestamps replaced by that of +# the input image, essentially generating a packet that carries the previous +# pose rects. Note that upon the arrival of the very first input image, a +# timestamp bound update occurs to jump start the feedback loop. +node { + calculator: "PreviousLoopbackCalculator" + input_stream: "MAIN:image" + input_stream: "LOOP:pose_rect_from_landmarks" + input_stream_info: { + tag_index: "LOOP" + back_edge: true + } + output_stream: "PREV_LOOP:prev_pose_rect_from_landmarks" +} diff --git a/mediapipe/modules/pose_landmark/pose_landmark_model_loader.pbtxt b/mediapipe/modules/pose_landmark/pose_landmark_model_loader.pbtxt new file mode 100644 index 000000000..4315679fe --- /dev/null +++ b/mediapipe/modules/pose_landmark/pose_landmark_model_loader.pbtxt @@ -0,0 +1,56 @@ +# MediaPipe graph to load a selected pose landmark TF Lite model. + +type: "PoseLandmarkModelLoader" + +# Whether to load the full-body landmark model or the upper-body on. (bool) +input_side_packet: "UPPER_BODY_ONLY:upper_body_only" + +# TF Lite model represented as a FlatBuffer. +# (std::unique_ptr>) +output_side_packet: "MODEL:model" + +# Determines path to the desired pose landmark model file based on specification +# in the input side packet. +node { + calculator: "SwitchContainer" + input_side_packet: "ENABLE:upper_body_only" + output_side_packet: "PACKET:model_path" + options: { + [mediapipe.SwitchContainerOptions.ext] { + contained_node: { + calculator: "ConstantSidePacketCalculator" + options: { + [mediapipe.ConstantSidePacketCalculatorOptions.ext]: { + packet { + string_value: "mediapipe/modules/pose_landmark/pose_landmark_full_body.tflite" + } + } + } + } + contained_node: { + calculator: "ConstantSidePacketCalculator" + options: { + [mediapipe.ConstantSidePacketCalculatorOptions.ext]: { + packet { + string_value: "mediapipe/modules/pose_landmark/pose_landmark_upper_body.tflite" + } + } + } + } + } + } +} + +# Loads the file in the specified path into a blob. +node { + calculator: "LocalFileContentsCalculator" + input_side_packet: "FILE_PATH:model_path" + output_side_packet: "CONTENTS:model_blob" +} + +# Converts the input blob into a TF Lite model. +node { + calculator: "TfLiteModelCalculator" + input_side_packet: "MODEL_BLOB:model_blob" + output_side_packet: "MODEL:model" +} diff --git a/mediapipe/modules/pose_landmark/pose_landmark_upper_body.tflite b/mediapipe/modules/pose_landmark/pose_landmark_upper_body.tflite index 8d36c1b19..27ca75017 100755 Binary files a/mediapipe/modules/pose_landmark/pose_landmark_upper_body.tflite and b/mediapipe/modules/pose_landmark/pose_landmark_upper_body.tflite differ diff --git a/mediapipe/modules/pose_landmark/pose_landmark_upper_body_cpu.pbtxt b/mediapipe/modules/pose_landmark/pose_landmark_upper_body_cpu.pbtxt index bb7ed5288..2c5182078 100644 --- a/mediapipe/modules/pose_landmark/pose_landmark_upper_body_cpu.pbtxt +++ b/mediapipe/modules/pose_landmark/pose_landmark_upper_body_cpu.pbtxt @@ -13,6 +13,7 @@ # EXAMPLE: # node { # calculator: "PoseLandmarkUpperBodyCpu" +# input_side_packet: "SMOOTH_LANDMARKS:smooth_landmarks" # input_stream: "IMAGE:image" # output_stream: "LANDMARKS:pose_landmarks" # } @@ -22,37 +23,37 @@ type: "PoseLandmarkUpperBodyCpu" # CPU image. (ImageFrame) input_stream: "IMAGE:image" -# Whether pose detection can be skipped when pose regions can already be -# approximated from pose landmarks on the previous frame. -input_side_packet: "CAN_SKIP_DETECTION:can_skip_detection" +# Whether to filter landmarks across different input images to reduce jitter. +# If unspecified, functions as set to false. (bool) +input_side_packet: "SMOOTH_LANDMARKS:smooth_landmarks" # Pose landmarks within the given ROI. (NormalizedLandmarkList) # We have 25 (upper-body) landmarks (see pose_landmark_upper_body_topology.svg). # 0 - nose -# 1 - right eye (inner) -# 2 - right eye -# 3 - right eye (outer) -# 4 - left eye (inner) -# 5 - left eye -# 6 - left eye (outer) -# 7 - right ear -# 8 - left ear -# 9 - mouth (right) -# 10 - mouth (left) -# 11 - right shoulder -# 12 - left shoulder -# 13 - right elbow -# 14 - left elbow -# 15 - right wrist -# 16 - left wrist -# 17 - right pinky -# 18 - left pinky -# 19 - right index -# 20 - left index -# 21 - right thumb -# 22 - left thumb -# 23 - right hip -# 24 - left hip +# 1 - left eye (inner) +# 2 - left eye +# 3 - left eye (outer) +# 4 - right eye (inner) +# 5 - right eye +# 6 - right eye (outer) +# 7 - left ear +# 8 - right ear +# 9 - mouth (left) +# 10 - mouth (right) +# 11 - left shoulder +# 12 - right shoulder +# 13 - left elbow +# 14 - right elbow +# 15 - left wrist +# 16 - right wrist +# 17 - left pinky +# 18 - right pinky +# 19 - left index +# 20 - right index +# 21 - left thumb +# 22 - right thumb +# 23 - left hip +# 24 - right hip # # NOTE: if a pose is not present within the given ROI, for this particular # timestamp there will not be an output packet in the LANDMARKS stream. However, @@ -68,116 +69,23 @@ output_stream: "ROI_FROM_LANDMARKS:pose_rect_from_landmarks" # Regions of interest calculated based on pose detections. (NormalizedRect) output_stream: "ROI_FROM_DETECTION:pose_rect_from_detection" -# Caches pose rects calculated from landmarks, and upon the arrival of the next -# input image, sends out the cached rects with timestamps replaced by that of -# the input image, essentially generating a packet that carries the previous -# pose rects. Note that upon the arrival of the very first input image, a -# timestamp bound update occurs to jump start the feedback loop. node { - calculator: "PreviousLoopbackCalculator" - input_stream: "MAIN:image" - input_stream: "LOOP:pose_rect_from_landmarks" - input_stream_info: { - tag_index: "LOOP" - back_edge: true - } - output_stream: "PREV_LOOP:prev_pose_rect_from_landmarks" -} - -# Checks if there's previous pose rect calculatoed from landmarks. -node: { - calculator: "PacketPresenceCalculator" - input_stream: "PACKET:prev_pose_rect_from_landmarks" - output_stream: "PRESENCE:prev_pose_rect_from_landmarks_is_present" -} - -# Calculates size of the image. -node { - calculator: "ImagePropertiesCalculator" - input_stream: "IMAGE:image" - output_stream: "SIZE:image_size" -} - -# Drops the incoming image if PoseLandmarkUpperBodyByRoiCpu was able to identify -# pose presence in the previous image and skipping pose detection is enabled. -# Otherwise, passes the incoming image through to trigger a new round of pose -# detection in PoseDetectionCpu. -node { - calculator: "LogicCalculator" + calculator: "ConstantSidePacketCalculator" + output_side_packet: "PACKET:upper_body_only" options: { - [mediapipe.LogicCalculatorOptions.ext] { op: AND } - } - input_side_packet: "can_skip_detection" - input_stream: "prev_pose_rect_from_landmarks_is_present" - output_stream: "skip_detection" -} -node { - calculator: "GateCalculator" - input_stream: "image" - input_stream: "image_size" - input_stream: "DISALLOW:skip_detection" - output_stream: "image_for_pose_detection" - output_stream: "image_size_for_pose_detection" - options: { - [mediapipe.GateCalculatorOptions.ext] { - empty_packets_as_allow: true + [mediapipe.ConstantSidePacketCalculatorOptions.ext]: { + packet { bool_value: true } } } } -# Detects poses. node { - calculator: "PoseDetectionCpu" - input_stream: "IMAGE:image_for_pose_detection" - output_stream: "DETECTIONS:pose_detections" -} - -# Gets the very first detection from "pose_detections" vector. -node { - calculator: "SplitDetectionVectorCalculator" - input_stream: "pose_detections" - output_stream: "pose_detection" - options: { - [mediapipe.SplitVectorCalculatorOptions.ext] { - ranges: { begin: 0 end: 1 } - element_only: true - } - } -} - -# Calculates region of interest based on pose detection, so that can be used -# to detect landmarks. -node { - calculator: "PoseDetectionToRoi" - input_stream: "DETECTION:pose_detection" - input_stream: "IMAGE_SIZE:image_size_for_pose_detection" - output_stream: "ROI:pose_rect_from_detection" -} - -# Selects either pose rect (or ROI) calculated from detection or from previously -# detected landmarks if available (in this case, calculation of pose rect from -# detection is skipped). -node { - calculator: "MergeCalculator" - input_stream: "pose_rect_from_detection" - input_stream: "prev_pose_rect_from_landmarks" - output_stream: "pose_rect" -} - -# Detects pose landmarks within specified region of interest of the image. -node { - calculator: "PoseLandmarkUpperBodyByRoiCpu" + calculator: "PoseLandmarkCpu" + input_side_packet: "UPPER_BODY_ONLY:upper_body_only" + input_side_packet: "SMOOTH_LANDMARKS:smooth_landmarks" input_stream: "IMAGE:image" - input_stream: "ROI:pose_rect" output_stream: "LANDMARKS:pose_landmarks" - output_stream: "AUXILIARY_LANDMARKS:auxiliary_landmarks" -} - -# Calculates region of interest based on the auxiliary landmarks, to be used in -# the subsequent image. -node { - calculator: "PoseLandmarkUpperBodyLandmarksToRoi" - input_stream: "LANDMARKS:auxiliary_landmarks" - input_stream: "IMAGE_SIZE:image_size" - output_stream: "ROI:pose_rect_from_landmarks" + output_stream: "DETECTION:pose_detection" + output_stream: "ROI_FROM_LANDMARKS:pose_rect_from_landmarks" + output_stream: "ROI_FROM_DETECTION:pose_rect_from_detection" } diff --git a/mediapipe/modules/pose_landmark/pose_landmark_upper_body_gpu.pbtxt b/mediapipe/modules/pose_landmark/pose_landmark_upper_body_gpu.pbtxt index 8369d2615..cf6d3c837 100644 --- a/mediapipe/modules/pose_landmark/pose_landmark_upper_body_gpu.pbtxt +++ b/mediapipe/modules/pose_landmark/pose_landmark_upper_body_gpu.pbtxt @@ -13,6 +13,7 @@ # EXAMPLE: # node { # calculator: "PoseLandmarkUpperBodyGpu" +# input_side_packet: "SMOOTH_LANDMARKS:smooth_landmarks" # input_stream: "IMAGE:image" # output_stream: "LANDMARKS:pose_landmarks" # } @@ -22,37 +23,37 @@ type: "PoseLandmarkUpperBodyGpu" # GPU image. (GpuBuffer) input_stream: "IMAGE:image" -# Whether pose detection can be skipped when pose regions can already be -# approximated from pose landmarks on the previous frame. -input_side_packet: "CAN_SKIP_DETECTION:can_skip_detection" +# Whether to filter landmarks across different input images to reduce jitter. +# If unspecified, functions as set to false. (bool) +input_side_packet: "SMOOTH_LANDMARKS:smooth_landmarks" # Pose landmarks within the given ROI. (NormalizedLandmarkList) # We have 25 (upper-body) landmarks (see pose_landmark_upper_body_topology.svg). # 0 - nose -# 1 - right eye (inner) -# 2 - right eye -# 3 - right eye (outer) -# 4 - left eye (inner) -# 5 - left eye -# 6 - left eye (outer) -# 7 - right ear -# 8 - left ear -# 9 - mouth (right) -# 10 - mouth (left) -# 11 - right shoulder -# 12 - left shoulder -# 13 - right elbow -# 14 - left elbow -# 15 - right wrist -# 16 - left wrist -# 17 - right pinky -# 18 - left pinky -# 19 - right index -# 20 - left index -# 21 - right thumb -# 22 - left thumb -# 23 - right hip -# 24 - left hip +# 1 - left eye (inner) +# 2 - left eye +# 3 - left eye (outer) +# 4 - right eye (inner) +# 5 - right eye +# 6 - right eye (outer) +# 7 - left ear +# 8 - right ear +# 9 - mouth (left) +# 10 - mouth (right) +# 11 - left shoulder +# 12 - right shoulder +# 13 - left elbow +# 14 - right elbow +# 15 - left wrist +# 16 - right wrist +# 17 - left pinky +# 18 - right pinky +# 19 - left index +# 20 - right index +# 21 - left thumb +# 22 - right thumb +# 23 - left hip +# 24 - right hip # # NOTE: if a pose is not present within the given ROI, for this particular # timestamp there will not be an output packet in the LANDMARKS stream. However, @@ -68,116 +69,23 @@ output_stream: "ROI_FROM_LANDMARKS:pose_rect_from_landmarks" # Regions of interest calculated based on pose detections. (NormalizedRect) output_stream: "ROI_FROM_DETECTION:pose_rect_from_detection" -# Caches pose rects calculated from landmarks, and upon the arrival of the next -# input image, sends out the cached rects with timestamps replaced by that of -# the input image, essentially generating a packet that carries the previous -# pose rects. Note that upon the arrival of the very first input image, a -# timestamp bound update occurs to jump start the feedback loop. node { - calculator: "PreviousLoopbackCalculator" - input_stream: "MAIN:image" - input_stream: "LOOP:pose_rect_from_landmarks" - input_stream_info: { - tag_index: "LOOP" - back_edge: true - } - output_stream: "PREV_LOOP:prev_pose_rect_from_landmarks" -} - -# Checks if there's previous pose rect calculatoed from landmarks. -node: { - calculator: "PacketPresenceCalculator" - input_stream: "PACKET:prev_pose_rect_from_landmarks" - output_stream: "PRESENCE:prev_pose_rect_from_landmarks_is_present" -} - -# Calculates size of the image. -node { - calculator: "ImagePropertiesCalculator" - input_stream: "IMAGE_GPU:image" - output_stream: "SIZE:image_size" -} - -# Drops the incoming image if PoseLandmarkUpperBodyByRoiGpu was able to identify -# pose presence in the previous image and skipping pose detection is enabled. -# Otherwise, passes the incoming image through to trigger a new round of pose -# detection in PoseDetectionGpu. -node { - calculator: "LogicCalculator" + calculator: "ConstantSidePacketCalculator" + output_side_packet: "PACKET:upper_body_only" options: { - [mediapipe.LogicCalculatorOptions.ext] { op: AND } - } - input_side_packet: "can_skip_detection" - input_stream: "prev_pose_rect_from_landmarks_is_present" - output_stream: "skip_detection" -} -node { - calculator: "GateCalculator" - input_stream: "image" - input_stream: "image_size" - input_stream: "DISALLOW:skip_detection" - output_stream: "image_for_pose_detection" - output_stream: "image_size_for_pose_detection" - options: { - [mediapipe.GateCalculatorOptions.ext] { - empty_packets_as_allow: true + [mediapipe.ConstantSidePacketCalculatorOptions.ext]: { + packet { bool_value: true } } } } -# Detects poses. node { - calculator: "PoseDetectionGpu" - input_stream: "IMAGE:image_for_pose_detection" - output_stream: "DETECTIONS:pose_detections" -} - -# Gets the very first detection from "pose_detections" vector. -node { - calculator: "SplitDetectionVectorCalculator" - input_stream: "pose_detections" - output_stream: "pose_detection" - options: { - [mediapipe.SplitVectorCalculatorOptions.ext] { - ranges: { begin: 0 end: 1 } - element_only: true - } - } -} - -# Calculates region of interest based on pose detection, so that can be used -# to detect landmarks. -node { - calculator: "PoseDetectionToRoi" - input_stream: "DETECTION:pose_detection" - input_stream: "IMAGE_SIZE:image_size_for_pose_detection" - output_stream: "ROI:pose_rect_from_detection" -} - -# Selects either pose rect (or ROI) calculated from detection or from previously -# detected landmarks if available (in this case, calculation of pose rect from -# detection is skipped). -node { - calculator: "MergeCalculator" - input_stream: "pose_rect_from_detection" - input_stream: "prev_pose_rect_from_landmarks" - output_stream: "pose_rect" -} - -# Detects pose landmarks within specified region of interest of the image. -node { - calculator: "PoseLandmarkUpperBodyByRoiGpu" + calculator: "PoseLandmarkGpu" + input_side_packet: "UPPER_BODY_ONLY:upper_body_only" + input_side_packet: "SMOOTH_LANDMARKS:smooth_landmarks" input_stream: "IMAGE:image" - input_stream: "ROI:pose_rect" output_stream: "LANDMARKS:pose_landmarks" - output_stream: "AUXILIARY_LANDMARKS:auxiliary_landmarks" -} - -# Calculates region of interest based on the auxiliary landmarks, to be used in -# the subsequent image. -node { - calculator: "PoseLandmarkUpperBodyLandmarksToRoi" - input_stream: "LANDMARKS:auxiliary_landmarks" - input_stream: "IMAGE_SIZE:image_size" - output_stream: "ROI:pose_rect_from_landmarks" + output_stream: "DETECTION:pose_detection" + output_stream: "ROI_FROM_LANDMARKS:pose_rect_from_landmarks" + output_stream: "ROI_FROM_DETECTION:pose_rect_from_detection" } diff --git a/mediapipe/modules/pose_landmark/pose_landmark_upper_body_smoothed_cpu.pbtxt b/mediapipe/modules/pose_landmark/pose_landmark_upper_body_smoothed_cpu.pbtxt deleted file mode 100644 index 466d32043..000000000 --- a/mediapipe/modules/pose_landmark/pose_landmark_upper_body_smoothed_cpu.pbtxt +++ /dev/null @@ -1,108 +0,0 @@ -# Experimental: Adds additional temporal filtering of the landmarks as a post -# processing step to reduce jitter. -# -# MediaPipe graph to detect/predict pose landmarks. (CPU input, and inference is -# executed on CPU.) This graph tries to skip pose detection as much as possible -# by using previously detected/predicted landmarks for new images. -# -# It is required that "pose_detection.tflite" is available at -# "mediapipe/modules/pose_detection/pose_detection.tflite" -# path during execution. -# -# It is required that "pose_landmark_upper_body.tflite" is available at -# "mediapipe/modules/pose_landmark/pose_landmark_upper_body.tflite" -# path during execution. -# -# EXAMPLE: -# node { -# calculator: "PoseLandmarkUpperBodySmoothedCpu" -# input_stream: "IMAGE:image" -# output_stream: "LANDMARKS:pose_landmarks" -# } - -type: "PoseLandmarkUpperBodySmoothedCpu" - -# CPU image. (ImageFrame) -input_stream: "IMAGE:image" - -# Whether pose detection can be skipped when pose regions can already be -# approximated from pose landmarks on the previous frame. -input_side_packet: "CAN_SKIP_DETECTION:can_skip_detection" - -# The pose landmarks within the given ROI. (NormalizedLandmarkList) -# We have 25 (upper-body) landmarks -# (see pose_landmark_upper_body_topology.svg), and there are other auxiliary key -# points. -# 0 - nose -# 1 - right eye (inner) -# 2 - right eye -# 3 - right eye (outer) -# 4 - left eye (inner) -# 5 - left eye -# 6 - left eye (outer) -# 7 - right ear -# 8 - left ear -# 9 - mouth (right) -# 10 - mouth (left) -# 11 - right shoulder -# 12 - left shoulder -# 13 - right elbow -# 14 - left elbow -# 15 - right wrist -# 16 - left wrist -# 17 - right pinky -# 18 - left pinky -# 19 - right index -# 20 - left index -# 21 - right thumb -# 22 - left thumb -# 23 - right hip -# 24 - left hip -# -# NOTE: if a pose is not present within the given ROI, for this particular -# timestamp there will not be an output packet in the LANDMARKS stream. However, -# the MediaPipe framework will internally inform the downstream calculators of -# the absence of this packet so that they don't wait for it unnecessarily. -output_stream: "LANDMARKS:pose_landmarks" - -# Extra outputs (for debugging, for instance). -# Detected poses. (Detection) -output_stream: "DETECTION:pose_detection" -# Regions of interest calculated based on landmarks. (NormalizedRect) -output_stream: "ROI_FROM_LANDMARKS:pose_rect_from_landmarks" -# Regions of interest calculated based on pose detections. (NormalizedRect) -output_stream: "ROI_FROM_DETECTION:pose_rect_from_detection" - -# Subgraph that detects poses and corresponding landmarks. -node { - calculator: "PoseLandmarkUpperBodyCpu" - input_stream: "IMAGE:image" - output_stream: "LANDMARKS:unsmoothed_pose_landmarks" - output_stream: "DETECTION:pose_detection" - output_stream: "ROI_FROM_LANDMARKS:pose_rect_from_landmarks" - output_stream: "ROI_FROM_DETECTION:pose_rect_from_detection" - input_side_packet: "CAN_SKIP_DETECTION:can_skip_detection" -} - -# Calculates size of the image. -node { - calculator: "ImagePropertiesCalculator" - input_stream: "IMAGE:image" - output_stream: "SIZE:image_size" -} - -# Smoothes pose landmarks in order to reduce jitter. -node { - calculator: "LandmarksSmoothingCalculator" - input_stream: "NORM_LANDMARKS:unsmoothed_pose_landmarks" - input_stream: "IMAGE_SIZE:image_size" - output_stream: "NORM_FILTERED_LANDMARKS:pose_landmarks" - options: { - [mediapipe.LandmarksSmoothingCalculatorOptions.ext] { - velocity_filter: { - window_size: 5 - velocity_scale: 10.0 - } - } - } -} diff --git a/mediapipe/modules/pose_landmark/pose_landmark_upper_body_smoothed_gpu.pbtxt b/mediapipe/modules/pose_landmark/pose_landmark_upper_body_smoothed_gpu.pbtxt deleted file mode 100644 index 22171912a..000000000 --- a/mediapipe/modules/pose_landmark/pose_landmark_upper_body_smoothed_gpu.pbtxt +++ /dev/null @@ -1,109 +0,0 @@ -# Experimental: Adds additional temporal filtering of the landmarks as a post -# processing step to reduce jitter. -# -# MediaPipe graph to detect/predict pose landmarks. (GPU input, and inference is -# executed on GPU.) This graph tries to skip pose detection as much as possible -# by using previously detected/predicted landmarks for new images. -# -# It is required that "pose_detection.tflite" is available at -# "mediapipe/modules/pose_detection/pose_detection.tflite" -# path during execution. -# -# It is required that "pose_landmark_upper_body.tflite" is available at -# "mediapipe/modules/pose_landmark/pose_landmark_upper_body.tflite" -# path during execution. -# -# EXAMPLE: -# node { -# calculator: "PoseLandmarkUpperBodySmoothedGpu" -# input_stream: "IMAGE:image" -# output_stream: "LANDMARKS:pose_landmarks" -# } - -type: "PoseLandmarkUpperBodySmoothedGpu" - -# GPU image. (GpuBuffer) -input_stream: "IMAGE:image" - -# Whether pose detection can be skipped when pose regions can already be -# approximated from pose landmarks on the previous frame. -input_side_packet: "CAN_SKIP_DETECTION:can_skip_detection" - -# Pose landmarks within the given ROI. (NormalizedLandmarkList) -# We have 25 (upper-body) landmarks -# (see pose_landmark_upper_body_topology.svg), and there are other auxiliary key -# points. -# 0 - nose -# 1 - right eye (inner) -# 2 - right eye -# 3 - right eye (outer) -# 4 - left eye (inner) -# 5 - left eye -# 6 - left eye (outer) -# 7 - right ear -# 8 - left ear -# 9 - mouth (right) -# 10 - mouth (left) -# 11 - right shoulder -# 12 - left shoulder -# 13 - right elbow -# 14 - left elbow -# 15 - right wrist -# 16 - left wrist -# 17 - right pinky -# 18 - left pinky -# 19 - right index -# 20 - left index -# 21 - right thumb -# 22 - left thumb -# 23 - right hip -# 24 - left hip -# -# NOTE: if a pose is not present within the given ROI, for this particular -# timestamp there will not be an output packet in the LANDMARKS stream. However, -# the MediaPipe framework will internally inform the downstream calculators of -# the absence of this packet so that they don't wait for it unnecessarily. -output_stream: "LANDMARKS:pose_landmarks" - -# Extra outputs (for debugging, for instance). -# Detected poses. (Detection) -output_stream: "DETECTION:pose_detection" -# Regions of interest calculated based on landmarks. (NormalizedRect) -output_stream: "ROI_FROM_LANDMARKS:pose_rect_from_landmarks" -# Regions of interest calculated based on pose detections. (NormalizedRect) -output_stream: "ROI_FROM_DETECTION:pose_rect_from_detection" - -# Subgraph that detects poses and corresponding landmarks. -node { - calculator: "PoseLandmarkUpperBodyGpu" - input_stream: "IMAGE:image" - output_stream: "LANDMARKS:unsmoothed_pose_landmarks" - output_stream: "DETECTION:pose_detection" - output_stream: "ROI_FROM_LANDMARKS:pose_rect_from_landmarks" - output_stream: "ROI_FROM_DETECTION:pose_rect_from_detection" - input_side_packet: "CAN_SKIP_DETECTION:can_skip_detection" -} - -# Calculates size of the image. -node { - calculator: "ImagePropertiesCalculator" - input_stream: "IMAGE_GPU:image" - output_stream: "SIZE:image_size" -} - -# Smoothes pose landmarks in order to reduce jitter. -node { - calculator: "LandmarksSmoothingCalculator" - input_stream: "NORM_LANDMARKS:unsmoothed_pose_landmarks" - input_stream: "IMAGE_SIZE:image_size" - output_stream: "NORM_FILTERED_LANDMARKS:pose_landmarks" - options: { - [mediapipe.LandmarksSmoothingCalculatorOptions.ext] { - velocity_filter: { - window_size: 5 - velocity_scale: 10.0 - } - } - } -} - diff --git a/mediapipe/modules/pose_landmark/pose_landmark_upper_body_topology.svg b/mediapipe/modules/pose_landmark/pose_landmark_upper_body_topology.svg index 09373251f..171e3a518 100644 --- a/mediapipe/modules/pose_landmark/pose_landmark_upper_body_topology.svg +++ b/mediapipe/modules/pose_landmark/pose_landmark_upper_body_topology.svg @@ -5,510 +5,510 @@ xmlns:svg="http://www.w3.org/2000/svg" xmlns="http://www.w3.org/2000/svg" xmlns:sodipodi="http://sodipodi.sourceforge.net/DTD/sodipodi-0.dtd" - sodipodi:docname="upper_body_pose_key_points.svg" - viewBox="0 0 1000 1000" - height="1000" - width="1000" + version="1.1" id="svg901" - version="1.1"> + width="1000" + height="1000" + viewBox="0 0 1000 1000" + sodipodi:docname="pose_landmark_upper_body_topology.svg"> + refX="0" + id="DotL" + style="overflow:visible"> + style="fill:#000000;fill-opacity:1;fill-rule:evenodd;stroke:#000000;stroke-width:1pt;stroke-opacity:1" + transform="matrix(0.8,0,0,0.8,5.92,0.8)" /> + borderopacity="1" + objecttolerance="10" + gridtolerance="10" + guidetolerance="10" + id="namedview903" + showgrid="false" /> + id="layer4" + transform="translate(-4)"> + cy="305.81415" + cx="505.18814" + id="path1424" + style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:1.96674;stroke-miterlimit:4;stroke-dasharray:3.9335, 3.9335;stroke-dashoffset:0;stroke-opacity:1;paint-order:normal" /> + transform="translate(-4)"> + style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:9.07087;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" /> - + + style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:9.07087;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" /> + style="fill:none;fill-opacity:1;stroke:#000000;stroke-width:9.07087;stroke-linecap:butt;stroke-linejoin:miter;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1" /> + id="path950" + sodipodi:nodetypes="ccccc" /> + id="path952" /> + transform="translate(-4)"> + cx="404.98865" + cy="294.37057" /> + id="ellipse1275" + style="fill:#de2b00;fill-opacity:1;stroke:#000000;stroke-width:1.5423;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;paint-order:normal" /> + cx="452.83154" + cy="269.62979" /> + cx="473.70761" + id="ellipse1279" + style="fill:#de2b00;fill-opacity:1;stroke:#000000;stroke-width:1.5423;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;paint-order:normal" /> + cx="535.23712" + cy="271.27792" /> + cx="556.6626" + id="ellipse1283" + style="fill:#de2b00;fill-opacity:1;stroke:#000000;stroke-width:1.5423;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;paint-order:normal" /> + cx="577.53864" + cy="271.82727" /> + cx="503.92303" + id="ellipse1287" + style="fill:#de2b00;fill-opacity:1;stroke:#000000;stroke-width:1.5423;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;paint-order:normal" /> + cx="610.31799" + cy="297.1174" /> + id="ellipse1291" + style="fill:#de2b00;fill-opacity:1;stroke:#000000;stroke-width:1.5423;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;paint-order:normal" /> + cx="475.90512" + cy="362.67599" /> + id="ellipse1295" + style="fill:#de2b00;fill-opacity:1;stroke:#000000;stroke-width:1.5423;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;paint-order:normal" /> + cx="648.56439" + cy="462.78937" /> + cx="742.87408" + id="ellipse1299" + style="fill:#de2b00;fill-opacity:1;stroke:#000000;stroke-width:1.5423;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;paint-order:normal" /> + cx="837.18384" + cy="516.47339" /> + cx="835.73291" + id="ellipse1303" + style="fill:#de2b00;fill-opacity:1;stroke:#000000;stroke-width:1.5423;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;paint-order:normal" /> + cx="887.96594" + cy="446.82925" /> + cx="914.08246" + id="ellipse1307" + style="fill:#de2b00;fill-opacity:1;stroke:#000000;stroke-width:1.5423;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;paint-order:normal" /> + cx="91.41188" + cy="516.47339" /> + cx="118.97932" + id="ellipse1311" + style="fill:#de2b00;fill-opacity:1;stroke:#000000;stroke-width:1.5423;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;paint-order:normal" /> + cx="175.51056" + cy="468.59305" /> + cx="168.31055" + id="ellipse1315" + style="fill:#de2b00;fill-opacity:1;stroke:#000000;stroke-width:1.5423;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;paint-order:normal" /> + cx="261.16931" + cy="554.1972" /> + id="ellipse1319" + style="fill:#de2b00;fill-opacity:1;stroke:#000000;stroke-width:1.5423;stroke-miterlimit:4;stroke-dasharray:none;stroke-opacity:1;paint-order:normal" /> + id="ellipse1321" + cx="600.68414" + cy="722.50366" /> + id="layer3" + transform="translate(-4)"> 3 + x="419.60342" + y="255.16025" + style="font-size:32px;stroke-width:1.2">6 2 - 1 - 4 - 5 6 + style="font-size:32px;stroke-width:1.2">4 0 + y="255.16025" + x="524.30243" + id="tspan1336" + sodipodi:role="line">1 7 + id="tspan1340" + x="546.92578" + y="255.16025" + style="font-size:32px;stroke-width:1.2">2 3 + 0 + 8 9 + id="tspan1356" + x="623.38293" + y="310.21472" + style="font-size:32px;stroke-width:1.2">7 10 9 + 12 + 11 12 - 21 + 22 21 - 19 + x="97.174942" + y="427.29883" + style="font-size:32px;stroke-width:1.2">20 17 - 15 - 13 - 4 - 16 - 18 20 + id="tspan1392" + x="145.23141" + y="559.1496" + style="font-size:32px;stroke-width:1.2">16 24 + y="531.68073" + x="238.75079" + id="tspan1396" + sodipodi:role="line">14 13 + 15 + 17 + 19 + 23 + 24 diff --git a/mediapipe/modules/pose_landmark/pose_landmark_upper_body_landmarks_to_roi.pbtxt b/mediapipe/modules/pose_landmark/pose_landmarks_to_roi.pbtxt similarity index 85% rename from mediapipe/modules/pose_landmark/pose_landmark_upper_body_landmarks_to_roi.pbtxt rename to mediapipe/modules/pose_landmark/pose_landmarks_to_roi.pbtxt index 76854d575..3d7fd28b2 100644 --- a/mediapipe/modules/pose_landmark/pose_landmark_upper_body_landmarks_to_roi.pbtxt +++ b/mediapipe/modules/pose_landmark/pose_landmarks_to_roi.pbtxt @@ -1,14 +1,13 @@ # MediaPipe graph to calculate pose region of interest (ROI) from landmarks -# detected by "PoseLandmarkUpperBodyByRoiCpu" or -# "PoseLandmarkUpperBodyByRoiGpu". +# detected by "PoseLandmarkByRoiCpu" or "PoseLandmarkByRoiGpu". # # NOTE: this graph is subject to change and should not be used directly. -type: "PoseLandmarkUpperBodyLandmarksToRoi" +type: "PoseLandmarksToRoi" # Normalized landmarks. (NormalizedLandmarkList) input_stream: "LANDMARKS:landmarks" -# Frame size (width & height). (std::pair) +# Image size (width & height). (std::pair) input_stream: "IMAGE_SIZE:image_size" # ROI according to landmarks. (NormalizedRect) output_stream: "ROI:roi" @@ -32,7 +31,6 @@ node { rotation_vector_start_keypoint_index: 0 rotation_vector_end_keypoint_index: 1 rotation_vector_target_angle_degrees: 90 - output_zero_rect_for_empty_detections: true } } } diff --git a/mediapipe/objc/BUILD b/mediapipe/objc/BUILD index cc90b8d3c..266c52466 100644 --- a/mediapipe/objc/BUILD +++ b/mediapipe/objc/BUILD @@ -1,3 +1,5 @@ +load("//mediapipe/framework/tool:mediapipe_graph.bzl", "mediapipe_binary_graph") + package(default_visibility = ["//visibility:private"]) licenses(["notice"]) @@ -55,6 +57,7 @@ objc_library( hdrs = MEDIAPIPE_IOS_HDRS, copts = [ "-Wno-shorten-64-to-32", + "-std=c++17", ], sdk_frameworks = [ # Needed for OpenCV. @@ -125,6 +128,7 @@ objc_library( ], copts = [ "-Wno-shorten-64-to-32", + "-std=c++17", ], visibility = ["//mediapipe/framework:mediapipe_internal"], deps = [ @@ -164,6 +168,7 @@ objc_library( ], copts = [ "-Wno-shorten-64-to-32", + "-std=c++17", ], sdk_frameworks = [ "CoreVideo", @@ -186,6 +191,7 @@ objc_library( ], copts = [ "-Wno-shorten-64-to-32", + "-std=c++17", ], sdk_frameworks = [ "Accelerate", @@ -216,6 +222,7 @@ objc_library( ], copts = [ "-Wno-shorten-64-to-32", + "-std=c++17", ], data = [ "testdata/googlelogo_color_272x92dp.png", @@ -240,8 +247,6 @@ objc_library( ], ) -load("//mediapipe/framework/tool:mediapipe_graph.bzl", "mediapipe_binary_graph") - [ mediapipe_binary_graph( name = graph.split("/")[-1].rsplit(".", 1)[0] + "_graph", diff --git a/mediapipe/objc/MPPGraph.mm b/mediapipe/objc/MPPGraph.mm index dec76047e..0aa1590d2 100644 --- a/mediapipe/objc/MPPGraph.mm +++ b/mediapipe/objc/MPPGraph.mm @@ -111,6 +111,7 @@ void CallFrameDelegate(void* wrapperVoid, const std::string& streamName, didOutputPacket:packet fromStream:streamName]; } else if (packetType == MPPPacketTypeImageFrame) { + wrapper->_framesInFlight--; const auto& frame = packet.Get(); mediapipe::ImageFormat::Format format = frame.Format(); @@ -163,6 +164,7 @@ void CallFrameDelegate(void* wrapperVoid, const std::string& streamName, } #if MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER } else if (packetType == MPPPacketTypePixelBuffer) { + wrapper->_framesInFlight--; CVPixelBufferRef pixelBuffer = packet.Get().GetCVPixelBufferRef(); if ([wrapper.delegate respondsToSelector:@selector @@ -182,8 +184,6 @@ void CallFrameDelegate(void* wrapperVoid, const std::string& streamName, } else { _GTMDevLog(@"unsupported packet type"); } - - wrapper->_framesInFlight--; } } diff --git a/mediapipe/objc/MPPGraphTestBase.h b/mediapipe/objc/MPPGraphTestBase.h index 7c457fbbe..dc81bec80 100644 --- a/mediapipe/objc/MPPGraphTestBase.h +++ b/mediapipe/objc/MPPGraphTestBase.h @@ -84,6 +84,21 @@ maxLocalDifference:(int)maxLocalDiff maxAverageDifference:(float)maxAvgDiff; +/// Compares two pixel buffers with some leniency. +/// Returns true iff the two buffers have the same size and format, and: +/// - the difference between each pixel of A and the corresponding pixel of B does +/// not exceed maxLocalDiff, and +/// - the average difference between corresponding pixels of A and B does not +/// exceed maxAvgDiff. +/// The maximum local difference and average difference will be written +/// to @c maxLocalDiffOut and @c maxAvgDiffOut respectively. +- (BOOL)pixelBuffer:(CVPixelBufferRef)a + isCloseTo:(CVPixelBufferRef)b + maxLocalDifference:(int)maxLocalDiff + maxAverageDifference:(float)maxAvgDiff + maxLocalDifferenceOut:(int*)maxLocalDiffOut + maxAverageDifferenceOut:(float*)maxAvgDiffOut; + /// Utility function for making a copy of a pixel buffer with a different pixel /// format. - (CVPixelBufferRef)convertPixelBuffer:(CVPixelBufferRef)input toPixelFormat:(OSType)pixelFormat; diff --git a/mediapipe/objc/MPPGraphTestBase.mm b/mediapipe/objc/MPPGraphTestBase.mm index 46fe42755..b8c19a145 100644 --- a/mediapipe/objc/MPPGraphTestBase.mm +++ b/mediapipe/objc/MPPGraphTestBase.mm @@ -187,8 +187,24 @@ static void EnsureOutputDirFor(NSString *outputFile) { return [self pixelBuffer:a isCloseTo:b maxLocalDifference:0 maxAverageDifference:0]; } -- (BOOL)pixelBuffer:(CVPixelBufferRef)a isCloseTo:(CVPixelBufferRef)b - maxLocalDifference:(int)maxLocalDiff maxAverageDifference:(float)maxAvgDiff { +- (BOOL)pixelBuffer:(CVPixelBufferRef)a + isCloseTo:(CVPixelBufferRef)b + maxLocalDifference:(int)maxLocalDiff + maxAverageDifference:(float)maxAvgDiff { + return [self pixelBuffer:a + isCloseTo:b + maxLocalDifference:maxLocalDiff + maxAverageDifference:maxAvgDiff + maxLocalDifferenceOut:nil + maxAverageDifferenceOut:nil]; +} + +- (BOOL)pixelBuffer:(CVPixelBufferRef)a + isCloseTo:(CVPixelBufferRef)b + maxLocalDifference:(int)maxLocalDiff + maxAverageDifference:(float)maxAvgDiff + maxLocalDifferenceOut:(int*)maxLocalDiffOut + maxAverageDifferenceOut:(float*)maxAvgDiffOut { size_t aBytesPerRow = CVPixelBufferGetBytesPerRow(a); size_t aWidth = CVPixelBufferGetWidth(a); size_t aHeight = CVPixelBufferGetHeight(a); @@ -229,17 +245,19 @@ static void EnsureOutputDirFor(NSString *outputFile) { // even if bytesPerRow match. size_t usedRowWidth = aWidth * bytesPerPixel; BOOL equal = YES; - float averageDiff = 0; float count = 0; + BOOL canSkipSomeDiffs = !maxLocalDiffOut && !maxAvgDiffOut; + int computedMaxLocalDiff = 0; + float computedAvgDiff = 0; for (int i = aHeight; i > 0 && equal; --i) { - if (maxLocalDiff == 0) { + if (maxLocalDiff == 0 && canSkipSomeDiffs) { // If we can, use memcmp for speed. equal = memcmp(aData, bData, usedRowWidth) == 0; } else { for (int j = 0; j < usedRowWidth; j++) { int diff = abs(aData[j] - bData[j]); - if (diff > maxLocalDiff) { - equal = NO; + computedMaxLocalDiff = MAX(computedMaxLocalDiff, diff); + if (diff > maxLocalDiff && canSkipSomeDiffs) { break; } // We use Welford's algorithm for computing a sample mean. This has better @@ -247,13 +265,21 @@ static void EnsureOutputDirFor(NSString *outputFile) { // particularly matters here. // Welford: http://www.jstor.org/stable/1266577 // Knuth: The Art of Computer Programming Vol 2, section 4.2.2 - averageDiff += (diff - averageDiff) / ++count; + computedAvgDiff += (diff - computedAvgDiff) / ++count; } } aData += aBytesPerRow; bData += bBytesPerRow; } - if (averageDiff > maxAvgDiff) equal = NO; + if (computedMaxLocalDiff > maxLocalDiff || computedAvgDiff > maxAvgDiff) { + equal = NO; + } + if (maxLocalDiffOut) { + *maxLocalDiffOut = computedMaxLocalDiff; + } + if (maxAvgDiffOut) { + *maxAvgDiffOut = computedAvgDiff; + } err = CVPixelBufferUnlockBaseAddress(b, kCVPixelBufferLock_ReadOnly); XCTAssertEqual(err, kCVReturnSuccess); diff --git a/mediapipe/python/BUILD b/mediapipe/python/BUILD index b8cdefceb..59b20a2c7 100644 --- a/mediapipe/python/BUILD +++ b/mediapipe/python/BUILD @@ -53,14 +53,20 @@ pybind_extension( cc_library( name = "builtin_calculators", deps = [ + "//mediapipe/calculators/core:gate_calculator", "//mediapipe/calculators/core:pass_through_calculator", + "//mediapipe/calculators/core:side_packet_to_stream_calculator", "//mediapipe/calculators/core:split_normalized_landmark_list_calculator", + "//mediapipe/calculators/core:string_to_int_calculator", + "//mediapipe/calculators/image:image_transformation_calculator", + "//mediapipe/calculators/util:detection_unique_id_calculator", "//mediapipe/modules/face_detection:face_detection_front_cpu", "//mediapipe/modules/face_landmark:face_landmark_front_cpu", "//mediapipe/modules/hand_landmark:hand_landmark_tracking_cpu", + "//mediapipe/modules/holistic_landmark:holistic_landmark_cpu", "//mediapipe/modules/palm_detection:palm_detection_cpu", "//mediapipe/modules/pose_detection:pose_detection_cpu", - "//mediapipe/modules/pose_landmark:pose_landmark_upper_body_by_roi_cpu", - "//mediapipe/modules/pose_landmark:pose_landmark_upper_body_smoothed_cpu", + "//mediapipe/modules/pose_landmark:pose_landmark_by_roi_cpu", + "//mediapipe/modules/pose_landmark:pose_landmark_cpu", ], ) diff --git a/mediapipe/python/calculator_graph_test.py b/mediapipe/python/calculator_graph_test.py index 622b18a40..657c81178 100644 --- a/mediapipe/python/calculator_graph_test.py +++ b/mediapipe/python/calculator_graph_test.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Lint as: python3 """Tests for mediapipe.python._framework_bindings.calculator_graph.""" # Dependency imports @@ -25,11 +24,13 @@ from mediapipe.framework import calculator_pb2 class GraphTest(absltest.TestCase): - def testInvalidBinaryGraphFile(self): - with self.assertRaisesRegex(FileNotFoundError, 'No such file or directory'): + def test_invalid_binary_graph_file(self): + with self.assertRaisesRegex( + FileNotFoundError, + '(No such file or directory|The path does not exist)'): mp.CalculatorGraph(binary_graph_path='/tmp/abc.binarypb') - def testInvalidNodeConfig(self): + def test_invalid_node_config(self): text_config = """ node { calculator: 'PassThroughCalculator' @@ -46,7 +47,7 @@ class GraphTest(absltest.TestCase): ): mp.CalculatorGraph(graph_config=config_proto) - def testInvalidCalculatorType(self): + def test_invalid_calculator_type(self): text_config = """ node { calculator: 'SomeUnknownCalculator' @@ -60,7 +61,7 @@ class GraphTest(absltest.TestCase): RuntimeError, 'Unable to find Calculator \"SomeUnknownCalculator\"'): mp.CalculatorGraph(graph_config=config_proto) - def testGraphInitializedWithProtoConfig(self): + def test_graph_initialized_with_proto_config(self): text_config = """ max_queue_size: 1 input_stream: 'in' @@ -95,7 +96,7 @@ class GraphTest(absltest.TestCase): self.assertEqual(mp.packet_getter.get_str(out[0]), 'hello world') self.assertEqual(mp.packet_getter.get_str(out[1]), 'hello world') - def testGraphInitializedWithTextConfig(self): + def test_graph_initialized_with_text_config(self): text_config = """ max_queue_size: 1 input_stream: 'in' @@ -127,7 +128,7 @@ class GraphTest(absltest.TestCase): self.assertEqual(mp.packet_getter.get_str(out[0]), 'hello world') self.assertEqual(mp.packet_getter.get_str(out[1]), 'hello world') - def testGraphValidationAndInitialization(self): + def test_graph_validation_and_initialization(self): text_config = """ max_queue_size: 1 input_stream: 'in' @@ -164,7 +165,7 @@ class GraphTest(absltest.TestCase): self.assertEqual(mp.packet_getter.get_str(out[0]), 'hello world') self.assertEqual(mp.packet_getter.get_str(out[1]), 'hello world') - def testInsertPacketsWithSameTimestamp(self): + def test_insert_packets_with_same_timestamp(self): text_config = """ max_queue_size: 1 input_stream: 'in' @@ -192,7 +193,7 @@ class GraphTest(absltest.TestCase): ValueError, 'Current minimum expected timestamp is 1 but received 0.'): graph.wait_until_idle() - def testSidePacketGraph(self): + def test_side_packet_graph(self): text_config = """ node { calculator: 'StringToUint64Calculator' diff --git a/mediapipe/python/image_frame_test.py b/mediapipe/python/image_frame_test.py index b099e0402..11f08cbc8 100644 --- a/mediapipe/python/image_frame_test.py +++ b/mediapipe/python/image_frame_test.py @@ -27,7 +27,7 @@ import PIL.Image # TODO: Add unit tests specifically for memory management. class ImageFrameTest(absltest.TestCase): - def testCreateImageFrameFromGrayCvMat(self): + def test_create_image_frame_from_gray_cv_mat(self): w, h = random.randrange(3, 100), random.randrange(3, 100) mat = cv2.cvtColor( np.random.randint(2**8 - 1, size=(h, w, 3), dtype=np.uint8), @@ -41,7 +41,7 @@ class ImageFrameTest(absltest.TestCase): print(image_frame[w, h]) self.assertEqual(42, image_frame[2, 2]) - def testCreateImageFrameFromRgbCvMat(self): + def test_create_image_frame_from_rgb_cv_mat(self): w, h, channels = random.randrange(3, 100), random.randrange(3, 100), 3 mat = cv2.cvtColor( np.random.randint(2**8 - 1, size=(h, w, channels), dtype=np.uint8), @@ -53,7 +53,7 @@ class ImageFrameTest(absltest.TestCase): print(image_frame[w, h, channels]) self.assertEqual(42, image_frame[2, 2, 1]) - def testCreateImageFrameFromRgb48CvMat(self): + def test_create_image_frame_from_rgb48_cv_mat(self): w, h, channels = random.randrange(3, 100), random.randrange(3, 100), 3 mat = cv2.cvtColor( np.random.randint(2**16 - 1, size=(h, w, channels), dtype=np.uint16), @@ -65,7 +65,7 @@ class ImageFrameTest(absltest.TestCase): print(image_frame[w, h, channels]) self.assertEqual(42, image_frame[2, 2, 1]) - def testCreateImageFrameFromGrayPilImage(self): + def test_create_image_frame_from_gray_pil_image(self): w, h = random.randrange(3, 100), random.randrange(3, 100) img = PIL.Image.fromarray( np.random.randint(2**8 - 1, size=(h, w), dtype=np.uint8), 'L') @@ -77,7 +77,7 @@ class ImageFrameTest(absltest.TestCase): with self.assertRaisesRegex(IndexError, 'out of bounds'): print(image_frame[w, h]) - def testCreateImageFrameFromRgbPilImage(self): + def test_create_image_frame_from_rgb_pil_image(self): w, h, channels = random.randrange(3, 100), random.randrange(3, 100), 3 img = PIL.Image.fromarray( np.random.randint(2**8 - 1, size=(h, w, channels), dtype=np.uint8), @@ -88,7 +88,7 @@ class ImageFrameTest(absltest.TestCase): with self.assertRaisesRegex(IndexError, 'out of bounds'): print(image_frame[w, h, channels]) - def testCreateImageFrameFromRgba64PilImage(self): + def test_create_image_frame_from_rgba64_pil_image(self): w, h, channels = random.randrange(3, 100), random.randrange(3, 100), 4 img = PIL.Image.fromarray( np.random.randint(2**16 - 1, size=(h, w, channels), dtype=np.uint16), @@ -100,7 +100,7 @@ class ImageFrameTest(absltest.TestCase): with self.assertRaisesRegex(IndexError, 'out of bounds'): print(image_frame[1000, 1000, 1000]) - def testImageFrameNumbyView(self): + def test_image_frame_numby_view(self): w, h, channels = random.randrange(3, 100), random.randrange(3, 100), 3 mat = cv2.cvtColor( np.random.randint(2**8 - 1, size=(h, w, channels), dtype=np.uint8), @@ -116,7 +116,7 @@ class ImageFrameTest(absltest.TestCase): copied_ndarray = np.copy(output_ndarray) copied_ndarray[0, 0, 0] = 0 - def testCroppedGray8Image(self): + def test_cropped_gray8_image(self): w, h = random.randrange(20, 100), random.randrange(20, 100) channels, offset = 3, 10 mat = cv2.cvtColor( @@ -129,7 +129,7 @@ class ImageFrameTest(absltest.TestCase): np.array_equal(mat[offset:-offset, offset:-offset], image_frame.numpy_view())) - def testCroppedRGBImage(self): + def test_cropped_rgb_image(self): w, h = random.randrange(20, 100), random.randrange(20, 100) channels, offset = 3, 10 mat = cv2.cvtColor( @@ -145,7 +145,7 @@ class ImageFrameTest(absltest.TestCase): # For image frames that store contiguous data, the output of numpy_view() # points to the pixel data of the original image frame object. The life cycle # of the data array should tie to the image frame object. - def testImageFrameNumpyViewWithContiguousData(self): + def test_image_frame_numpy_view_with_contiguous_data(self): w, h = 640, 480 mat = np.random.randint(2**8 - 1, size=(h, w, 3), dtype=np.uint8) image_frame = mp.ImageFrame(image_format=mp.ImageFormat.SRGB, data=mat) @@ -168,7 +168,7 @@ class ImageFrameTest(absltest.TestCase): # For image frames that store non contiguous data, the output of numpy_view() # stores a copy of the pixel data of the image frame object. The life cycle of # the data array doesn't tie to the image frame object. - def testImageFrameNumpyViewWithNonContiguousData(self): + def test_image_frame_numpy_view_with_non_contiguous_data(self): w, h = 641, 481 mat = np.random.randint(2**8 - 1, size=(h, w, 3), dtype=np.uint8) image_frame = mp.ImageFrame(image_format=mp.ImageFormat.SRGB, data=mat) diff --git a/mediapipe/python/packet_creator.py b/mediapipe/python/packet_creator.py index 1040ea688..112b3f336 100644 --- a/mediapipe/python/packet_creator.py +++ b/mediapipe/python/packet_creator.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Lint as: python3 """The public facing packet creator APIs.""" from typing import List, Union @@ -42,6 +41,7 @@ create_double = _packet_creator.create_double create_int_array = _packet_creator.create_int_array create_float_array = _packet_creator.create_float_array create_int_vector = _packet_creator.create_int_vector +create_bool_vector = _packet_creator.create_bool_vector create_float_vector = _packet_creator.create_float_vector create_string_vector = _packet_creator.create_string_vector create_packet_vector = _packet_creator.create_packet_vector diff --git a/mediapipe/python/packet_getter.py b/mediapipe/python/packet_getter.py index 1822890e7..e8a6629df 100644 --- a/mediapipe/python/packet_getter.py +++ b/mediapipe/python/packet_getter.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Lint as: python3 """The public facing packet getter APIs.""" from typing import List, Type @@ -29,6 +28,7 @@ get_int = _packet_getter.get_int get_uint = _packet_getter.get_uint get_float = _packet_getter.get_float get_int_list = _packet_getter.get_int_list +get_bool_list = _packet_getter.get_bool_list get_float_list = _packet_getter.get_float_list get_str_list = _packet_getter.get_str_list get_packet_list = _packet_getter.get_packet_list diff --git a/mediapipe/python/packet_test.py b/mediapipe/python/packet_test.py index fe1290ce0..766c5304d 100644 --- a/mediapipe/python/packet_test.py +++ b/mediapipe/python/packet_test.py @@ -26,17 +26,17 @@ from mediapipe.framework.formats import detection_pb2 class PacketTest(absltest.TestCase): - def testEmptyPacket(self): + def test_empty_packet(self): p = mp.Packet() self.assertTrue(p.is_empty()) - def testBooleanPacket(self): + def test_boolean_packet(self): p = mp.packet_creator.create_bool(True) p.timestamp = 0 self.assertEqual(mp.packet_getter.get_bool(p), True) self.assertEqual(p.timestamp, 0) - def testIntPacket(self): + def test_int_packet(self): with self.assertRaisesRegex(OverflowError, 'execeeds the maximum value'): p = mp.packet_creator.create_int(2**32) p = mp.packet_creator.create_int(42) @@ -48,7 +48,7 @@ class PacketTest(absltest.TestCase): self.assertEqual(mp.packet_getter.get_int(p2), 1) self.assertEqual(p2.timestamp, 0) - def testInt8Packet(self): + def test_int8_packet(self): with self.assertRaisesRegex(OverflowError, 'execeeds the maximum value'): p = mp.packet_creator.create_int8(2**7) p = mp.packet_creator.create_int8(2**7 - 1) @@ -60,7 +60,7 @@ class PacketTest(absltest.TestCase): self.assertEqual(mp.packet_getter.get_int(p2), 1) self.assertEqual(p2.timestamp, 0) - def testInt16Packet(self): + def test_int16_packet(self): with self.assertRaisesRegex(OverflowError, 'execeeds the maximum value'): p = mp.packet_creator.create_int16(2**15) p = mp.packet_creator.create_int16(2**15 - 1) @@ -72,7 +72,7 @@ class PacketTest(absltest.TestCase): self.assertEqual(mp.packet_getter.get_int(p2), 1) self.assertEqual(p2.timestamp, 0) - def testInt32Packet(self): + def test_int32_packet(self): with self.assertRaisesRegex(OverflowError, 'execeeds the maximum value'): p = mp.packet_creator.create_int32(2**31) @@ -85,7 +85,7 @@ class PacketTest(absltest.TestCase): self.assertEqual(mp.packet_getter.get_int(p2), 1) self.assertEqual(p2.timestamp, 0) - def testInt64Packet(self): + def test_int64_packet(self): p = mp.packet_creator.create_int64(2**63 - 1) p.timestamp = 0 self.assertEqual(mp.packet_getter.get_int(p), 2**63 - 1) @@ -95,7 +95,7 @@ class PacketTest(absltest.TestCase): self.assertEqual(mp.packet_getter.get_int(p2), 1) self.assertEqual(p2.timestamp, 0) - def testUint8Packet(self): + def test_uint8_packet(self): with self.assertRaisesRegex(OverflowError, 'execeeds the maximum value'): p = mp.packet_creator.create_uint8(2**8) p = mp.packet_creator.create_uint8(2**8 - 1) @@ -107,7 +107,7 @@ class PacketTest(absltest.TestCase): self.assertEqual(mp.packet_getter.get_uint(p2), 1) self.assertEqual(p2.timestamp, 0) - def testUint16Packet(self): + def test_uint16_packet(self): with self.assertRaisesRegex(OverflowError, 'execeeds the maximum value'): p = mp.packet_creator.create_uint16(2**16) p = mp.packet_creator.create_uint16(2**16 - 1) @@ -119,7 +119,7 @@ class PacketTest(absltest.TestCase): self.assertEqual(mp.packet_getter.get_uint(p2), 1) self.assertEqual(p2.timestamp, 0) - def testUint32Packet(self): + def test_uint32_packet(self): with self.assertRaisesRegex(OverflowError, 'execeeds the maximum value'): p = mp.packet_creator.create_uint32(2**32) p = mp.packet_creator.create_uint32(2**32 - 1) @@ -131,7 +131,7 @@ class PacketTest(absltest.TestCase): self.assertEqual(mp.packet_getter.get_uint(p2), 1) self.assertEqual(p2.timestamp, 0) - def testUint64Packet(self): + def test_uint64_packet(self): p = mp.packet_creator.create_uint64(2**64 - 1) p.timestamp = 0 self.assertEqual(mp.packet_getter.get_uint(p), 2**64 - 1) @@ -141,7 +141,7 @@ class PacketTest(absltest.TestCase): self.assertEqual(mp.packet_getter.get_uint(p2), 1) self.assertEqual(p2.timestamp, 0) - def testFloatPacket(self): + def test_float_packet(self): p = mp.packet_creator.create_float(0.42) p.timestamp = 0 self.assertAlmostEqual(mp.packet_getter.get_float(p), 0.42) @@ -151,7 +151,7 @@ class PacketTest(absltest.TestCase): self.assertAlmostEqual(mp.packet_getter.get_float(p2), 0.42) self.assertEqual(p2.timestamp, 0) - def testDoublePacket(self): + def test_double_packet(self): p = mp.packet_creator.create_double(0.42) p.timestamp = 0 self.assertAlmostEqual(mp.packet_getter.get_float(p), 0.42) @@ -161,37 +161,37 @@ class PacketTest(absltest.TestCase): self.assertAlmostEqual(mp.packet_getter.get_float(p2), 0.42) self.assertEqual(p2.timestamp, 0) - def testDetectionProtoPacket(self): + def test_detection_proto_packet(self): detection = detection_pb2.Detection() text_format.Parse('score: 0.5', detection) p = mp.packet_creator.create_proto(detection).at(100) - def testStringPacket(self): + def test_string_packet(self): p = mp.packet_creator.create_string('abc').at(100) self.assertEqual(mp.packet_getter.get_str(p), 'abc') self.assertEqual(p.timestamp, 100) p.timestamp = 200 self.assertEqual(p.timestamp, 200) - def testBytesPacket(self): + def test_bytes_packet(self): p = mp.packet_creator.create_string(b'xd0\xba\xd0').at(300) self.assertEqual(mp.packet_getter.get_bytes(p), b'xd0\xba\xd0') self.assertEqual(p.timestamp, 300) - def testIntArrayPacket(self): + def test_int_array_packet(self): p = mp.packet_creator.create_int_array([1, 2, 3]).at(100) self.assertEqual(p.timestamp, 100) - def testFloatArrayPacket(self): + def test_float_array_packet(self): p = mp.packet_creator.create_float_array([0.1, 0.2, 0.3]).at(100) self.assertEqual(p.timestamp, 100) - def testIntVectorPacket(self): + def test_int_vector_packet(self): p = mp.packet_creator.create_int_vector([1, 2, 3]).at(100) self.assertEqual(mp.packet_getter.get_int_list(p), [1, 2, 3]) self.assertEqual(p.timestamp, 100) - def testFloatVectorPacket(self): + def test_float_vector_packet(self): p = mp.packet_creator.create_float_vector([0.1, 0.2, 0.3]).at(100) output_list = mp.packet_getter.get_float_list(p) self.assertAlmostEqual(output_list[0], 0.1) @@ -199,7 +199,7 @@ class PacketTest(absltest.TestCase): self.assertAlmostEqual(output_list[2], 0.3) self.assertEqual(p.timestamp, 100) - def testStringVectorPacket(self): + def test_string_vector_packet(self): p = mp.packet_creator.create_string_vector(['a', 'b', 'c']).at(100) output_list = mp.packet_getter.get_str_list(p) self.assertEqual(output_list[0], 'a') @@ -207,7 +207,7 @@ class PacketTest(absltest.TestCase): self.assertEqual(output_list[2], 'c') self.assertEqual(p.timestamp, 100) - def testPacketVectorPacket(self): + def test_packet_vector_packet(self): p = mp.packet_creator.create_packet_vector([ mp.packet_creator.create_float(0.42), mp.packet_creator.create_int(42), @@ -219,7 +219,7 @@ class PacketTest(absltest.TestCase): self.assertEqual(mp.packet_getter.get_str(output_list[2]), '42') self.assertEqual(p.timestamp, 100) - def testStringToPacketMapPacket(self): + def test_string_to_packet_map_packet(self): p = mp.packet_creator.create_string_to_packet_map({ 'float': mp.packet_creator.create_float(0.42), 'int': mp.packet_creator.create_int(42), @@ -232,7 +232,7 @@ class PacketTest(absltest.TestCase): self.assertEqual(mp.packet_getter.get_str(output_list['string']), '42') self.assertEqual(p.timestamp, 100) - def testUint8ImageFramePacket(self): + def test_uint8_image_frame_packet(self): uint8_img = np.random.randint( 2**8 - 1, size=(random.randrange(3, 100), random.randrange(3, 100), 3), @@ -242,7 +242,7 @@ class PacketTest(absltest.TestCase): output_image_frame = mp.packet_getter.get_image_frame(p) self.assertTrue(np.array_equal(output_image_frame.numpy_view(), uint8_img)) - def testUint16ImageFramePacket(self): + def test_uint16_image_frame_packet(self): uint16_img = np.random.randint( 2**16 - 1, size=(random.randrange(3, 100), random.randrange(3, 100), 4), @@ -252,7 +252,7 @@ class PacketTest(absltest.TestCase): output_image_frame = mp.packet_getter.get_image_frame(p) self.assertTrue(np.array_equal(output_image_frame.numpy_view(), uint16_img)) - def testFloatImageFramePacket(self): + def test_float_image_frame_packet(self): float_img = np.float32( np.random.random_sample( (random.randrange(3, 100), random.randrange(3, 100), 2))) @@ -261,7 +261,7 @@ class PacketTest(absltest.TestCase): output_image_frame = mp.packet_getter.get_image_frame(p) self.assertTrue(np.allclose(output_image_frame.numpy_view(), float_img)) - def testImageFramePacketCreationCopyMode(self): + def test_image_frame_packet_creation_copy_mode(self): w, h, channels = random.randrange(3, 100), random.randrange(3, 100), 3 rgb_data = np.random.randint(255, size=(h, w, channels), dtype=np.uint8) # rgb_data is c_contiguous. @@ -294,7 +294,7 @@ class PacketTest(absltest.TestCase): # copy mode. self.assertEqual(sys.getrefcount(rgb_data), initial_ref_count) - def testImageFramePacketCreationReferenceMode(self): + def test_image_frame_packet_creation_reference_mode(self): w, h, channels = random.randrange(3, 100), random.randrange(3, 100), 3 rgb_data = np.random.randint(255, size=(h, w, channels), dtype=np.uint8) rgb_data.flags.writeable = False @@ -338,7 +338,7 @@ class PacketTest(absltest.TestCase): mp.packet_getter.get_image_frame(output_packet).numpy_view(), rgb_data_copy)) - def testImageFramePacketCopyCreationWithCropping(self): + def test_image_frame_packet_copy_creation_with_cropping(self): w, h, channels = random.randrange(40, 100), random.randrange(40, 100), 3 channels, offset = 3, 10 rgb_data = np.random.randint(255, size=(h, w, channels), dtype=np.uint8) @@ -362,7 +362,7 @@ class PacketTest(absltest.TestCase): # copy mode. self.assertEqual(sys.getrefcount(rgb_data), initial_ref_count) - def testMatrixPacket(self): + def test_matrix_packet(self): np_matrix = np.array([[.1, .2, .3], [.4, .5, .6]]) initial_ref_count = sys.getrefcount(np_matrix) p = mp.packet_creator.create_matrix(np_matrix) @@ -374,7 +374,7 @@ class PacketTest(absltest.TestCase): self.assertTrue( np.allclose(output_matrix, np.array([[.1, .2, .3], [.4, .5, .6]]))) - def testMatrixPacketWithNonCContiguousData(self): + def test_matrix_packet_with_non_c_contiguous_data(self): np_matrix = np.array([[.1, .2, .3], [.4, .5, .6]])[:, ::-1] # np_matrix is not c_contiguous. self.assertFalse(np_matrix.flags.c_contiguous) diff --git a/mediapipe/python/pybind/calculator_graph.cc b/mediapipe/python/pybind/calculator_graph.cc index bf86366f0..1a4ddd325 100644 --- a/mediapipe/python/pybind/calculator_graph.cc +++ b/mediapipe/python/pybind/calculator_graph.cc @@ -284,7 +284,7 @@ void CalculatorGraphSubmodule(pybind11::module* module) { graph.close() )doc", - py::arg("input_side_packets") = (py::dict){}); + py::arg("input_side_packets") = py::dict()); calculator_graph.def( "wait_until_done", @@ -376,7 +376,7 @@ void CalculatorGraphSubmodule(pybind11::module* module) { calculator_graph.def( "get_combined_error_message", [](CalculatorGraph* self) { - ::mediapipe::Status error_status; + mediapipe::Status error_status; if (self->GetCombinedErrors(&error_status) && !error_status.ok()) { return error_status.ToString(); } diff --git a/mediapipe/python/pybind/image_frame.cc b/mediapipe/python/pybind/image_frame.cc index 0747f08f1..8bd79a8aa 100644 --- a/mediapipe/python/pybind/image_frame.cc +++ b/mediapipe/python/pybind/image_frame.cc @@ -367,5 +367,5 @@ void ImageFrameSubmodule(pybind11::module* module) { } // namespace mediapipe #include "mediapipe/framework/type_map.h" -MEDIAPIPE_REGISTER_TYPE(::mediapipe::ImageFrame, "::mediapipe::ImageFrame", +MEDIAPIPE_REGISTER_TYPE(mediapipe::ImageFrame, "::mediapipe::ImageFrame", nullptr, nullptr); diff --git a/mediapipe/python/pybind/packet_creator.cc b/mediapipe/python/pybind/packet_creator.cc index 579ecb608..d990b3791 100644 --- a/mediapipe/python/pybind/packet_creator.cc +++ b/mediapipe/python/pybind/packet_creator.cc @@ -426,6 +426,28 @@ void PublicPacketCreators(pybind11::module* m) { )doc", py::arg().noconvert(), py::return_value_policy::move); + m->def( + "create_bool_vector", + [](const std::vector& data) { + return MakePacket>(data); + }, + R"doc(Create a MediaPipe bool vector Packet from a list of booleans. + + Args: + data: A list of booleans. + + Returns: + A MediaPipe bool vector Packet. + + Raises: + TypeError: If the input is not a list of booleans. + + Examples: + packet = mp.packet_creator.create_bool_vector([True, True, False]) + data = mp.packet_getter.get_bool_vector(packet) +)doc", + py::arg().noconvert(), py::return_value_policy::move); + m->def( "create_float_vector", [](const std::vector& data) { diff --git a/mediapipe/python/pybind/packet_getter.cc b/mediapipe/python/pybind/packet_getter.cc index 57411a54a..0287519e4 100644 --- a/mediapipe/python/pybind/packet_getter.cc +++ b/mediapipe/python/pybind/packet_getter.cc @@ -205,6 +205,24 @@ void PublicPacketGetters(pybind11::module* m) { data = mp.packet_getter.get_int_list(packet) )doc"); + m->def( + "get_bool_list", &GetContent>, + R"doc(Get the content of a MediaPipe bool vector Packet as a boolean list. + + Args: + packet: A MediaPipe Packet that holds std:vector. + + Returns: + An boolean list. + + Raises: + ValueError: If the Packet doesn't contain std:vector. + + Examples: + packet = mp.packet_creator.create_bool_vector([True, True, False]) + data = mp.packet_getter.get_bool_list(packet) +)doc"); + m->def( "get_float_list", &GetContent>, R"doc(Get the content of a MediaPipe float vector Packet as a float list. diff --git a/mediapipe/python/pybind/util.h b/mediapipe/python/pybind/util.h index d776d53b9..b84539e8f 100644 --- a/mediapipe/python/pybind/util.h +++ b/mediapipe/python/pybind/util.h @@ -97,7 +97,8 @@ inline ::mediapipe::CalculatorGraphConfig ReadCalculatorGraphConfigFromFile( throw RaisePyError(PyExc_FileNotFoundError, status.message().data()); } std::string graph_config_string; - RaisePyErrorIfNotOk(file::GetContents(file_name, &graph_config_string)); + RaisePyErrorIfNotOk(file::GetContents(file_name, &graph_config_string, + /*read_as_binary=*/true)); if (!graph_config_proto.ParseFromArray(graph_config_string.c_str(), graph_config_string.length())) { throw RaisePyError( diff --git a/mediapipe/python/solution_base.py b/mediapipe/python/solution_base.py index 2ebdb2cf8..90c36560c 100644 --- a/mediapipe/python/solution_base.py +++ b/mediapipe/python/solution_base.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Lint as: python3 """MediaPipe SolutionBase module. MediaPipe SolutionBase is the common base class for the high-level MediaPipe @@ -32,9 +31,12 @@ import numpy as np from google.protobuf import descriptor # resources dependency +# pylint: disable=unused-import +# pylint: enable=unused-import from mediapipe.framework import calculator_pb2 # pylint: disable=unused-import from mediapipe.framework.formats import detection_pb2 +from mediapipe.calculators.core import constant_side_packet_calculator_pb2 from mediapipe.calculators.image import image_transformation_calculator_pb2 from mediapipe.calculators.tensor import tensors_to_detections_calculator_pb2 from mediapipe.calculators.util import landmarks_smoothing_calculator_pb2 @@ -55,6 +57,8 @@ import mediapipe.python.packet_getter as packet_getter RGB_CHANNELS = 3 # TODO: Enable calculator options modification for more calculators. CALCULATOR_TO_OPTIONS = { + 'ConstantSidePacketCalculator': + constant_side_packet_calculator_pb2.ConstantSidePacketCalculatorOptions, 'ImageTransformationCalculator': image_transformation_calculator_pb2 .ImageTransformationCalculatorOptions, @@ -76,6 +80,7 @@ class _PacketDataType(enum.Enum): """The packet data types supported by the SolutionBase class.""" STRING = 'string' BOOL = 'bool' + BOOL_LIST = 'bool_list' INT = 'int' FLOAT = 'float' AUDIO = 'matrix' @@ -93,6 +98,8 @@ NAME_TO_TYPE: Mapping[str, '_PacketDataType'] = { _PacketDataType.STRING, 'bool': _PacketDataType.BOOL, + '::std::vector': + _PacketDataType.BOOL_LIST, 'int': _PacketDataType.INT, 'float': @@ -113,6 +120,8 @@ NAME_TO_TYPE: Mapping[str, '_PacketDataType'] = { _PacketDataType.PROTO, '::mediapipe::NormalizedLandmark': _PacketDataType.PROTO, + '::mediapipe::Trigger': + _PacketDataType.PROTO, '::mediapipe::Rect': _PacketDataType.PROTO, '::mediapipe::NormalizedRect': @@ -198,7 +207,7 @@ class SolutionBase: raise ValueError( "Must provide exactly one of 'binary_graph_path' or 'graph_config'.") # MediaPipe package root path - root_path = os.sep.join( os.path.abspath(__file__).split(os.sep)[:-3]) + root_path = os.sep.join(os.path.abspath(__file__).split(os.sep)[:-3]) resource_util.set_resource_dir(root_path) validated_graph = validated_graph_config.ValidatedGraphConfig() if binary_graph_path: diff --git a/mediapipe/python/solution_base_test.py b/mediapipe/python/solution_base_test.py index e3e597c12..35abca9f1 100644 --- a/mediapipe/python/solution_base_test.py +++ b/mediapipe/python/solution_base_test.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Lint as: python3 """Tests for mediapipe.python.solution_base.""" from absl.testing import absltest diff --git a/mediapipe/python/solutions/__init__.py b/mediapipe/python/solutions/__init__.py index bf50d9e43..64875c029 100644 --- a/mediapipe/python/solutions/__init__.py +++ b/mediapipe/python/solutions/__init__.py @@ -17,4 +17,5 @@ import mediapipe.python.solutions.drawing_utils import mediapipe.python.solutions.face_mesh import mediapipe.python.solutions.hands +import mediapipe.python.solutions.holistic import mediapipe.python.solutions.pose diff --git a/mediapipe/python/solutions/drawing_utils.py b/mediapipe/python/solutions/drawing_utils.py index 47fdd8419..ae1e0c401 100644 --- a/mediapipe/python/solutions/drawing_utils.py +++ b/mediapipe/python/solutions/drawing_utils.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Lint as: python3 """MediaPipe solution drawing utils.""" import math @@ -24,8 +23,10 @@ import numpy as np from mediapipe.framework.formats import landmark_pb2 +PRESENCE_THRESHOLD = 0.5 RGB_CHANNELS = 3 RED_COLOR = (0, 0, 255) +VISIBILITY_THRESHOLD = 0.5 @dataclasses.dataclass @@ -88,7 +89,10 @@ def draw_landmarks( image_rows, image_cols, _ = image.shape idx_to_coordinates = {} for idx, landmark in enumerate(landmark_list.landmark): - if landmark.visibility < 0 or landmark.presence < 0: + if ((landmark.HasField('visibility') and + landmark.visibility < VISIBILITY_THRESHOLD) or + (landmark.HasField('presence') and + landmark.presence < PRESENCE_THRESHOLD)): continue landmark_px = _normalized_to_pixel_coordinates(landmark.x, landmark.y, image_cols, image_rows) diff --git a/mediapipe/python/solutions/drawing_utils_test.py b/mediapipe/python/solutions/drawing_utils_test.py index 2241d5873..0c5e21e01 100644 --- a/mediapipe/python/solutions/drawing_utils_test.py +++ b/mediapipe/python/solutions/drawing_utils_test.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Lint as: python3 """Tests for mediapipe.python.solutions.drawing_utils.""" from absl.testing import absltest @@ -48,7 +47,7 @@ class DrawingUtilTest(parameterized.TestCase): @parameterized.named_parameters( ('landmark_list_has_only_one_element', 'landmark {x: 0.1 y: 0.1}'), ('second_landmark_is_invisible', - 'landmark {x: 0.1 y: 0.1} landmark {x: 0.5 y: 0.5 visibility: -1.0}')) + 'landmark {x: 0.1 y: 0.1} landmark {x: 0.5 y: 0.5 visibility: 0.0}')) def test_draw_single_landmark_point(self, landmark_list_text): landmark_list = text_format.Parse(landmark_list_text, landmark_pb2.NormalizedLandmarkList()) @@ -65,8 +64,8 @@ class DrawingUtilTest(parameterized.TestCase): ('landmarks_have_x_and_y_only', 'landmark {x: 0.1 y: 0.5} landmark {x: 0.5 y: 0.1}'), ('landmark_zero_visibility_and_presence', - 'landmark {x: 0.1 y: 0.5 presence: 0.0}' - 'landmark {x: 0.5 y: 0.1 visibility: 0.0}')) + 'landmark {x: 0.1 y: 0.5 presence: 0.5}' + 'landmark {x: 0.5 y: 0.1 visibility: 0.5}')) def test_draw_landmarks_and_connections(self, landmark_list_text): landmark_list = text_format.Parse(landmark_list_text, landmark_pb2.NormalizedLandmarkList()) diff --git a/mediapipe/python/solutions/face_mesh.py b/mediapipe/python/solutions/face_mesh.py index 2afcbfdf3..b1b84aedd 100644 --- a/mediapipe/python/solutions/face_mesh.py +++ b/mediapipe/python/solutions/face_mesh.py @@ -12,16 +12,17 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Lint as: python3 """MediaPipe FaceMesh.""" from typing import NamedTuple import numpy as np +from mediapipe.calculators.core import constant_side_packet_calculator_pb2 # pylint: disable=unused-import from mediapipe.calculators.core import gate_calculator_pb2 from mediapipe.calculators.core import split_vector_calculator_pb2 +from mediapipe.calculators.tensor import image_to_tensor_calculator_pb2 from mediapipe.calculators.tensor import inference_calculator_pb2 from mediapipe.calculators.tensor import tensors_to_classification_calculator_pb2 from mediapipe.calculators.tensor import tensors_to_detections_calculator_pb2 @@ -80,32 +81,6 @@ FACE_CONNECTIONS = frozenset([ (310, 415), (415, 308), # Left eye. - (33, 7), - (7, 163), - (163, 144), - (144, 145), - (145, 153), - (153, 154), - (154, 155), - (155, 133), - (33, 246), - (246, 161), - (161, 160), - (160, 159), - (159, 158), - (158, 157), - (157, 173), - (173, 133), - # Left eyebrow. - (46, 53), - (53, 52), - (52, 65), - (65, 55), - (70, 63), - (63, 105), - (105, 66), - (66, 107), - # Right eye. (263, 249), (249, 390), (390, 373), @@ -122,7 +97,7 @@ FACE_CONNECTIONS = frozenset([ (385, 384), (384, 398), (398, 362), - # Right eyebrow. + # Left eyebrow. (276, 283), (283, 282), (282, 295), @@ -131,6 +106,32 @@ FACE_CONNECTIONS = frozenset([ (293, 334), (334, 296), (296, 336), + # Right eye. + (33, 7), + (7, 163), + (163, 144), + (144, 145), + (145, 153), + (153, 154), + (154, 155), + (155, 133), + (33, 246), + (246, 161), + (161, 160), + (160, 159), + (159, 158), + (158, 157), + (157, 173), + (173, 133), + # Right eyebrow. + (46, 53), + (53, 52), + (52, 65), + (65, 55), + (70, 63), + (63, 105), + (105, 66), + (66, 107), # Face oval. (10, 338), (338, 297), @@ -177,111 +178,41 @@ class FaceMesh(SolutionBase): MediaPipe FaceMesh processes an RGB image and returns the face landmarks on each detected face. - Usage examples: - import cv2 - import mediapipe as mp - mp_drawing = mp.solutions.drawing_utils - mp_face_mesh = mp.solutions.face_mesh - - # For static images: - face_mesh = mp_face_mesh.FaceMesh( - static_image_mode=True, - max_num_faces=1, - min_detection_confidence=0.5) - drawing_spec = mp_drawing.DrawingSpec(thickness=1, circle_radius=1) - for idx, file in enumerate(file_list): - image = cv2.imread(file) - # Convert the BGR image to RGB before processing. - results = face_mesh.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) - - # Print and draw face mesh landmarks on the image. - if not results.multi_face_landmarks: - continue - annotated_image = image.copy() - for face_landmarks in results.multi_face_landmarks: - print('face_landmarks:', face_landmarks) - mp_drawing.draw_landmarks( - image=annotated_image, - landmark_list=face_landmarks, - connections=mp_face_mesh.FACE_CONNECTIONS, - landmark_drawing_spec=drawing_spec, - connection_drawing_spec=drawing_spec) - cv2.imwrite('/tmp/annotated_image' + str(idx) + '.png', image) - face_mesh.close() - - # For webcam input: - face_mesh = mp_face_mesh.FaceMesh( - min_detection_confidence=0.5, min_tracking_confidence=0.5) - drawing_spec = mp_drawing.DrawingSpec(thickness=1, circle_radius=1) - cap = cv2.VideoCapture(0) - while cap.isOpened(): - success, image = cap.read() - if not success: - break - - # Flip the image horizontally for a later selfie-view display, and convert - # the BGR image to RGB. - image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB) - # To improve performance, optionally mark the image as not writeable to - # pass by reference. - image.flags.writeable = False - results = face_mesh.process(image) - - # Draw the face mesh annotations on the image. - image.flags.writeable = True - image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) - if results.multi_face_landmarks: - for face_landmarks in results.multi_face_landmarks: - mp_drawing.draw_landmarks( - image=image, - landmark_list=face_landmarks, - connections=mp_face_mesh.FACE_CONNECTIONS, - landmark_drawing_spec=drawing_spec, - connection_drawing_spec=drawing_spec) - cv2.imshow('MediaPipe FaceMesh', image) - if cv2.waitKey(5) & 0xFF == 27: - break - face_mesh.close() - cap.release() + Please refer to https://solutions.mediapipe.dev/face_mesh#python-solution-api + for usage examples. """ def __init__(self, static_image_mode=False, - max_num_faces=2, + max_num_faces=1, min_detection_confidence=0.5, min_tracking_confidence=0.5): """Initializes a MediaPipe FaceMesh object. Args: - static_image_mode: If set to False, the solution treats the input images - as a video stream. It will try to detect faces in the first input - images, and upon a successful detection further localizes the face - landmarks. In subsequent images, once all "max_num_faces" faces are - detected and the corresponding face landmarks are localized, it simply - tracks those landmarks without invoking another detection until it loses - track of any of the faces. This reduces latency and is ideal for - processing video frames. If set to True, face detection runs on every - input image, ideal for processing a batch of static, possibly unrelated, - images. Default to False. - max_num_faces: Maximum number of faces to detect. Default to 2. - min_detection_confidence: Minimum confidence value ([0.0, 1.0]) from the - face detection model for the detection to be considered successful. - Default to 0.5. - min_tracking_confidence: Minimum confidence value ([0.0, 1.0]) from the - landmark-tracking model for the face landmarks to be considered tracked - successfully, or otherwise face detection will be invoked automatically - on the next input image. Setting it to a higher value can increase - robustness of the solution, at the expense of a higher latency. Ignored - if "static_image_mode" is True, where face detection simply runs on - every image. Default to 0.5. + static_image_mode: Whether to treat the input images as a batch of static + and possibly unrelated images, or a video stream. See details in + https://solutions.mediapipe.dev/face_mesh#static-image-mode. + max_num_faces: Maximum number of faces to detect. See details in + https://solutions.mediapipe.dev/face_mesh#max-num-faces. + min_detection_confidence: Minimum confidence value ([0.0, 1.0]) for face + detection to be considered successful. See details in + https://solutions.mediapipe.dev/face_mesh#min-detection-confidence. + min_tracking_confidence: Minimum confidence value ([0.0, 1.0]) for the + face landmarks to be considered tracked successfully. See details in + https://solutions.mediapipe.dev/face_mesh#min-tracking-confidence. """ super().__init__( binary_graph_path=BINARYPB_FILE_PATH, side_inputs={ 'num_faces': max_num_faces, - 'can_skip_detection': not static_image_mode, }, calculator_params={ + 'ConstantSidePacketCalculator.packet': [ + constant_side_packet_calculator_pb2 + .ConstantSidePacketCalculatorOptions.ConstantSidePacket( + bool_value=not static_image_mode) + ], 'facedetectionfrontcpu__TensorsToDetectionsCalculator.min_score_thresh': min_detection_confidence, 'facelandmarkcpu__ThresholdingCalculator.threshold': diff --git a/mediapipe/python/solutions/face_mesh_test.py b/mediapipe/python/solutions/face_mesh_test.py new file mode 100644 index 000000000..6edb21b62 --- /dev/null +++ b/mediapipe/python/solutions/face_mesh_test.py @@ -0,0 +1,112 @@ +# Copyright 2020 The MediaPipe Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for mediapipe.python.solutions.face_mesh.""" + +import os + +from absl.testing import absltest +from absl.testing import parameterized +import cv2 +import numpy as np +import numpy.testing as npt + +# resources dependency +from mediapipe.python.solutions import face_mesh as mp_faces + +TEST_IMAGE_PATH = 'mediapipe/python/solutions/testdata' +DIFF_THRESHOLOD = 20 +EYE_INDICES_TO_LANDMARKS = { + 33: [176, 350], + 7: [177, 353], + 163: [178, 357], + 144: [179, 362], + 145: [179, 369], + 153: [179, 376], + 154: [178, 382], + 155: [177, 386], + 133: [177, 388], + 246: [175, 352], + 161: [174, 355], + 160: [172, 360], + 159: [170, 367], + 158: [171, 374], + 157: [172, 381], + 173: [175, 386], + 263: [176, 475], + 249: [177, 471], + 390: [177, 467], + 373: [178, 462], + 374: [179, 454], + 380: [179, 448], + 381: [178, 441], + 382: [177, 437], + 362: [177, 435], + 466: [175, 473], + 388: [173, 469], + 387: [171, 464], + 386: [170, 457], + 385: [171, 450], + 384: [172, 443], + 398: [175, 438] +} + + +class FaceMeshTest(parameterized.TestCase): + + def test_invalid_image_shape(self): + faces = mp_faces.FaceMesh() + with self.assertRaisesRegex( + ValueError, 'Input image must contain three channel rgb data.'): + faces.process(np.arange(36, dtype=np.uint8).reshape(3, 3, 4)) + + def test_blank_image(self): + faces = mp_faces.FaceMesh() + image = np.zeros([100, 100, 3], dtype=np.uint8) + image.fill(255) + results = faces.process(image) + self.assertIsNone(results.multi_face_landmarks) + faces.close() + + @parameterized.named_parameters(('static_image_mode', True, 1), + ('video_mode', False, 5)) + def test_face(self, static_image_mode: bool, num_frames: int): + image_path = os.path.join(os.path.dirname(__file__), 'testdata/face.jpg') + faces = mp_faces.FaceMesh( + static_image_mode=static_image_mode, min_detection_confidence=0.5) + image = cv2.flip(cv2.imread(image_path), 1) + + def process_one_frame(): + results = faces.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) + multi_face_landmarks = [] + for landmarks in results.multi_face_landmarks: + self.assertLen(landmarks.landmark, 468) + x = [landmark.x for landmark in landmarks.landmark] + y = [landmark.y for landmark in landmarks.landmark] + face_landmarks = np.transpose(np.stack((y, x))) * image.shape[0:2] + multi_face_landmarks.append(face_landmarks) + self.assertLen(multi_face_landmarks, 1) + # Verify the eye landmarks are correct as sanity check. + for idx, gt_lds in EYE_INDICES_TO_LANDMARKS.items(): + prediction_error = np.abs( + np.asarray(multi_face_landmarks[0][idx]) - np.asarray(gt_lds)) + npt.assert_array_less(prediction_error, DIFF_THRESHOLOD) + + for _ in range(num_frames): + process_one_frame() + faces.close() + + +if __name__ == '__main__': + absltest.main() diff --git a/mediapipe/python/solutions/hands.py b/mediapipe/python/solutions/hands.py index 8253e344c..72738984e 100644 --- a/mediapipe/python/solutions/hands.py +++ b/mediapipe/python/solutions/hands.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Lint as: python3 """MediaPipe Hands.""" import enum @@ -20,6 +19,7 @@ from typing import NamedTuple import numpy as np +from mediapipe.calculators.core import constant_side_packet_calculator_pb2 # pylint: disable=unused-import from mediapipe.calculators.core import gate_calculator_pb2 from mediapipe.calculators.core import split_vector_calculator_pb2 @@ -102,105 +102,41 @@ class Hands(SolutionBase): horizontally. If that is not the case, use, for instance, cv2.flip(image, 1) to flip the image first for a correct handedness output. - Usage examples: - import cv2 - import mediapipe as mp - mp_drawing = mp.solutions.drawing_utils - mp_hands = mp.solutions.hands - - # For static images: - hands = mp_hands.Hands( - static_image_mode=True, - max_num_hands=2, - min_detection_confidence=0.7) - for idx, file in enumerate(file_list): - # Read an image, flip it around y-axis for correct handedness output (see - # above). - image = cv2.flip(cv2.imread(file), 1) - # Convert the BGR image to RGB before processing. - results = hands.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) - - # Print handedness and draw hand landmarks on the image. - print('handedness:', results.multi_handedness) - if not results.multi_hand_landmarks: - continue - annotated_image = image.copy() - for hand_landmarks in results.multi_hand_landmarks: - print('hand_landmarks:', hand_landmarks) - mp_drawing.draw_landmarks( - annotated_image, hand_landmarks, mp_hands.HAND_CONNECTIONS) - cv2.imwrite( - '/tmp/annotated_image' + str(idx) + '.png', cv2.flip(image, 1)) - hands.close() - - # For webcam input: - hands = mp_hands.Hands( - min_detection_confidence=0.7, min_tracking_confidence=0.5) - cap = cv2.VideoCapture(0) - while cap.isOpened(): - success, image = cap.read() - if not success: - break - - # Flip the image horizontally for a later selfie-view display, and convert - # the BGR image to RGB. - image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB) - # To improve performance, optionally mark the image as not writeable to - # pass by reference. - image.flags.writeable = False - results = hands.process(image) - - # Draw the hand annotations on the image. - image.flags.writeable = True - image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) - if results.multi_hand_landmarks: - for hand_landmarks in results.multi_hand_landmarks: - mp_drawing.draw_landmarks( - image, hand_landmarks, mp_hands.HAND_CONNECTIONS) - cv2.imshow('MediaPipe Hands', image) - if cv2.waitKey(5) & 0xFF == 27: - break - hands.close() - cap.release() + Please refer to https://solutions.mediapipe.dev/hands#python-solution-api for + usage examples. """ def __init__(self, static_image_mode=False, max_num_hands=2, - min_detection_confidence=0.7, + min_detection_confidence=0.5, min_tracking_confidence=0.5): """Initializes a MediaPipe Hand object. Args: - static_image_mode: If set to False, the solution treats the input images - as a video stream. It will try to detect hands in the first input - images, and upon a successful detection further localizes the hand - landmarks. In subsequent images, once all "max_num_hands" hands are - detected and the corresponding hand landmarks are localized, it simply - tracks those landmarks without invoking another detection until it loses - track of any of the hands. This reduces latency and is ideal for - processing video frames. If set to True, hand detection runs on every - input image, ideal for processing a batch of static, possibly unrelated, - images. Default to False. - max_num_hands: Maximum number of hands to detect. Default to 2. - min_detection_confidence: Minimum confidence value ([0.0, 1.0]) from the - hand detection model for the detection to be considered successful. - Default to 0.7. - min_tracking_confidence: Minimum confidence value ([0.0, 1.0]) from the - landmark-tracking model for the hand landmarks to be considered tracked - successfully, or otherwise hand detection will be invoked automatically - on the next input image. Setting it to a higher value can increase - robustness of the solution, at the expense of a higher latency. Ignored - if "static_image_mode" is True, where hand detection simply runs on - every image. Default to 0.5. + static_image_mode: Whether to treat the input images as a batch of static + and possibly unrelated images, or a video stream. See details in + https://solutions.mediapipe.dev/hands#static-image-mode. + max_num_hands: Maximum number of hands to detect. See details in + https://solutions.mediapipe.dev/hands#max-num-hands. + min_detection_confidence: Minimum confidence value ([0.0, 1.0]) for hand + detection to be considered successful. See details in + https://solutions.mediapipe.dev/hands#min-detection-confidence. + min_tracking_confidence: Minimum confidence value ([0.0, 1.0]) for the + hand landmarks to be considered tracked successfully. See details in + https://solutions.mediapipe.dev/hands#min-tracking-confidence. """ super().__init__( binary_graph_path=BINARYPB_FILE_PATH, side_inputs={ 'num_hands': max_num_hands, - 'can_skip_detection': not static_image_mode, }, calculator_params={ + 'ConstantSidePacketCalculator.packet': [ + constant_side_packet_calculator_pb2 + .ConstantSidePacketCalculatorOptions.ConstantSidePacket( + bool_value=not static_image_mode) + ], 'palmdetectioncpu__TensorsToDetectionsCalculator.min_score_thresh': min_detection_confidence, 'handlandmarkcpu__ThresholdingCalculator.threshold': diff --git a/mediapipe/python/solutions/hands_test.py b/mediapipe/python/solutions/hands_test.py new file mode 100644 index 000000000..b6c67ca85 --- /dev/null +++ b/mediapipe/python/solutions/hands_test.py @@ -0,0 +1,99 @@ +# Copyright 2020 The MediaPipe Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Tests for mediapipe.python.solutions.hands.""" + +import os + +from absl.testing import absltest +from absl.testing import parameterized +import cv2 +import numpy as np +import numpy.testing as npt + +# resources dependency +from mediapipe.python.solutions import hands as mp_hands + +TEST_IMAGE_PATH = 'mediapipe/python/solutions/testdata' +DIFF_THRESHOLOD = 20 +EXPECTED_HAND_COORDINATES_PREDICTION = [[[332, 144], [323, 211], [286, 257], + [237, 289], [203, 322], [216, 219], + [138, 238], [90, 249], [51, 253], + [204, 177], [115, 184], [60, 187], + [19, 185], [208, 138], [127, 131], + [77, 124], [36, 117], [222, 106], + [159, 92], [124, 79], [93, 68]], + [[43, 570], [56, 504], [94, 459], + [146, 429], [182, 397], [167, 496], + [245, 479], [292, 469], [330, 464], + [177, 540], [265, 534], [319, 533], + [360, 536], [172, 581], [252, 587], + [304, 593], [346, 599], [157, 615], + [219, 628], [255, 638], [288, 648]]] + + +class HandsTest(parameterized.TestCase): + + def test_invalid_image_shape(self): + hands = mp_hands.Hands() + with self.assertRaisesRegex( + ValueError, 'Input image must contain three channel rgb data.'): + hands.process(np.arange(36, dtype=np.uint8).reshape(3, 3, 4)) + + def test_blank_image(self): + hands = mp_hands.Hands() + image = np.zeros([100, 100, 3], dtype=np.uint8) + image.fill(255) + results = hands.process(image) + self.assertIsNone(results.multi_hand_landmarks) + self.assertIsNone(results.multi_handedness) + hands.close() + + @parameterized.named_parameters(('static_image_mode', True, 1), + ('video_mode', False, 5)) + def test_multi_hands(self, static_image_mode, num_frames): + image_path = os.path.join(os.path.dirname(__file__), 'testdata/hands.jpg') + hands = mp_hands.Hands( + static_image_mode=static_image_mode, + max_num_hands=2, + min_detection_confidence=0.5) + image = cv2.flip(cv2.imread(image_path), 1) + + def process_one_frame(): + results = hands.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) + handedness = [ + handedness.classification[0].label + for handedness in results.multi_handedness + ] + self.assertLen(handedness, 2) + multi_hand_coordinates = [] + for landmarks in results.multi_hand_landmarks: + self.assertLen(landmarks.landmark, 21) + x = [landmark.x for landmark in landmarks.landmark] + y = [landmark.y for landmark in landmarks.landmark] + hand_coordinates = np.transpose(np.stack((y, x))) * image.shape[0:2] + multi_hand_coordinates.append(hand_coordinates) + self.assertLen(multi_hand_coordinates, 2) + prediction_error = np.abs( + np.asarray(multi_hand_coordinates) - + np.asarray(EXPECTED_HAND_COORDINATES_PREDICTION)) + npt.assert_array_less(prediction_error, DIFF_THRESHOLOD) + + for _ in range(num_frames): + process_one_frame() + hands.close() + + +if __name__ == '__main__': + absltest.main() diff --git a/mediapipe/python/solutions/holistic.py b/mediapipe/python/solutions/holistic.py new file mode 100644 index 000000000..8c4baa7d4 --- /dev/null +++ b/mediapipe/python/solutions/holistic.py @@ -0,0 +1,130 @@ +# Copyright 2020 The MediaPipe Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""MediaPipe Holistic.""" + +from typing import NamedTuple + +import numpy as np + +from mediapipe.calculators.core import constant_side_packet_calculator_pb2 +# pylint: disable=unused-import +from mediapipe.calculators.core import gate_calculator_pb2 +from mediapipe.calculators.core import split_vector_calculator_pb2 +from mediapipe.calculators.tensor import image_to_tensor_calculator_pb2 +from mediapipe.calculators.tensor import inference_calculator_pb2 +from mediapipe.calculators.tensor import tensors_to_classification_calculator_pb2 +from mediapipe.calculators.tensor import tensors_to_floats_calculator_pb2 +from mediapipe.calculators.tensor import tensors_to_landmarks_calculator_pb2 +from mediapipe.calculators.tflite import ssd_anchors_calculator_pb2 +from mediapipe.calculators.util import detections_to_rects_calculator_pb2 +from mediapipe.calculators.util import landmark_projection_calculator_pb2 +from mediapipe.calculators.util import non_max_suppression_calculator_pb2 +from mediapipe.calculators.util import rect_transformation_calculator_pb2 +from mediapipe.modules.holistic_landmark.calculators import roi_tracking_calculator_pb2 +# pylint: enable=unused-import +from mediapipe.python.solution_base import SolutionBase +# pylint: disable=unused-import +from mediapipe.python.solutions.face_mesh import FACE_CONNECTIONS +from mediapipe.python.solutions.hands import HAND_CONNECTIONS +from mediapipe.python.solutions.hands import HandLandmark +from mediapipe.python.solutions.pose import POSE_CONNECTIONS +from mediapipe.python.solutions.pose import PoseLandmark +# pylint: enable=unused-import + +BINARYPB_FILE_PATH = 'mediapipe/modules/holistic_landmark/holistic_landmark_cpu.binarypb' + + +class Holistic(SolutionBase): + """MediaPipe Holistic. + + MediaPipe Holistic processes an RGB image and returns pose landmarks, left and + right hand landmarks, and face mesh landmarks on the most prominent person + detected. + + Please refer to https://solutions.mediapipe.dev/holistic#python-solution-api + for usage examples. + """ + + def __init__(self, + static_image_mode=False, + upper_body_only=False, + smooth_landmarks=True, + min_detection_confidence=0.5, + min_tracking_confidence=0.5): + """Initializes a MediaPipe Holistic object. + + Args: + static_image_mode: Whether to treat the input images as a batch of static + and possibly unrelated images, or a video stream. See details in + https://solutions.mediapipe.dev/holistic#static-image-mode. + upper_body_only: Whether to track the full set of 33 pose landmarks or + only the 25 upper-body pose landmarks. See details in + https://solutions.mediapipe.dev/holistic#upper-body-only. + smooth_landmarks: Whether to filter landmarks across different input + images to reduce jitter. See details in + https://solutions.mediapipe.dev/holistic#smooth_landmarks. + min_detection_confidence: Minimum confidence value ([0.0, 1.0]) for person + detection to be considered successful. See details in + https://solutions.mediapipe.dev/holistic#min-detection-confidence. + min_tracking_confidence: Minimum confidence value ([0.0, 1.0]) for the + pose landmarks to be considered tracked successfully. See details in + https://solutions.mediapipe.dev/holistic#min-tracking-confidence. + """ + super().__init__( + binary_graph_path=BINARYPB_FILE_PATH, + side_inputs={ + 'upper_body_only': upper_body_only, + 'smooth_landmarks': smooth_landmarks and not static_image_mode, + }, + calculator_params={ + 'poselandmarkcpu__ConstantSidePacketCalculator.packet': [ + constant_side_packet_calculator_pb2 + .ConstantSidePacketCalculatorOptions.ConstantSidePacket( + bool_value=not static_image_mode) + ], + 'poselandmarkcpu__posedetectioncpu__TensorsToDetectionsCalculator.min_score_thresh': + min_detection_confidence, + 'poselandmarkcpu__poselandmarkbyroicpu__ThresholdingCalculator.threshold': + min_tracking_confidence, + }, + outputs=[ + 'pose_landmarks', 'left_hand_landmarks', 'right_hand_landmarks', + 'face_landmarks' + ]) + + def process(self, image: np.ndarray) -> NamedTuple: + """Processes an RGB image and returns the pose landmarks, left and right hand landmarks, and face landmarks on the most prominent person detected. + + Args: + image: An RGB image represented as a numpy ndarray. + + Raises: + RuntimeError: If the underlying graph occurs any error. + ValueError: If the input image is not three channel RGB. + + Returns: + A NamedTuple that has four fields: + 1) "pose_landmarks" field that contains the pose landmarks on the most + prominent person detected. + 2) "left_hand_landmarks" and "right_hand_landmarks" fields that contain + the left and right hand landmarks of the most prominent person detected. + 3) "face_landmarks" field that contains the face landmarks of the most + prominent person detected. + """ + + results = super().process(input_data={'image': image}) + if results.pose_landmarks: + for landmark in results.pose_landmarks.landmark: + landmark.ClearField('presence') + return results diff --git a/mediapipe/python/solutions/holistic_test.py b/mediapipe/python/solutions/holistic_test.py new file mode 100644 index 000000000..38c70abc7 --- /dev/null +++ b/mediapipe/python/solutions/holistic_test.py @@ -0,0 +1,143 @@ +# Copyright 2020 The MediaPipe Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tests for mediapipe.python.solutions.pose.""" + +import math +import os + +from absl.testing import absltest +from absl.testing import parameterized +import cv2 +import numpy as np +import numpy.testing as npt + +# resources dependency +from mediapipe.python.solutions import holistic as mp_holistic + +TEST_IMAGE_PATH = 'mediapipe/python/solutions/testdata' +POSE_DIFF_THRESHOLOD = 30 # pixels +HAND_DIFF_THRESHOLOD = 10 # pixels +EXPECTED_POSE_COORDINATES_PREDICTION = [[593, 645], [593, 626], [599, 621], + [605, 617], [575, 637], [569, 640], + [563, 643], [621, 616], [565, 652], + [617, 652], [595, 667], [714, 662], + [567, 749], [792, 559], [497, 844], + [844, 435], [407, 906], [866, 403], + [381, 921], [859, 392], [366, 922], + [850, 405], [381, 918], [707, 948], + [631, 940], [582, 1122], [599, 1097], + [495, 1277], [641, 1239], [485, 1300], + [658, 1257], [453, 1332], [626, 1308]] +EXPECTED_LEFT_HAND_COORDINATES_PREDICTION = [[843, 404], [862, 395], [876, 383], + [887, 369], [896, 359], [854, 367], + [868, 347], [879, 346], [885, 349], + [843, 362], [859, 341], [871, 340], + [878, 344], [837, 361], [849, 341], + [859, 338], [867, 339], [834, 361], + [841, 346], [848, 342], [854, 341]] +EXPECTED_RIGHT_HAND_COORDINATES_PREDICTION = [[391, 934], [371, + 930], [354, 930], + [340, 934], [328, + 939], [350, 938], + [339, 946], [347, + 951], [355, 952], + [356, 946], [346, + 955], [358, 956], + [366, 953], [361, + 952], [354, 959], + [364, 958], [372, + 954], [366, 957], + [359, 963], [364, 962], + [368, 960]] + + +class PoseTest(parameterized.TestCase): + + def _verify_output_landmarks(self, landmark_list, image_shape, num_landmarks, + expected_results, diff_thresholds): + self.assertLen(landmark_list.landmark, num_landmarks) + image_rows, image_cols, _ = image_shape + pose_coordinates = [(math.floor(landmark.x * image_cols), + math.floor(landmark.y * image_rows)) + for landmark in landmark_list.landmark] + prediction_error = np.abs( + np.asarray(pose_coordinates) - + np.asarray(expected_results[:num_landmarks])) + npt.assert_array_less(prediction_error, diff_thresholds) + + def test_invalid_image_shape(self): + holistic = mp_holistic.Holistic() + with self.assertRaisesRegex( + ValueError, 'Input image must contain three channel rgb data.'): + holistic.process(np.arange(36, dtype=np.uint8).reshape(3, 3, 4)) + + def test_blank_image(self): + holistic = mp_holistic.Holistic() + image = np.zeros([100, 100, 3], dtype=np.uint8) + image.fill(255) + results = holistic.process(image) + self.assertIsNone(results.pose_landmarks) + holistic.close() + + @parameterized.named_parameters(('static_image_mode', True, 3), + ('video_mode', False, 3)) + def test_upper_body_model(self, static_image_mode, num_frames): + image_path = os.path.join(os.path.dirname(__file__), 'testdata/pose.jpg') + holistic = mp_holistic.Holistic( + static_image_mode=static_image_mode, upper_body_only=True) + image = cv2.imread(image_path) + for _ in range(num_frames): + results = holistic.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) + self._verify_output_landmarks(results.pose_landmarks, image.shape, 25, + EXPECTED_POSE_COORDINATES_PREDICTION, + POSE_DIFF_THRESHOLOD) + self._verify_output_landmarks(results.left_hand_landmarks, image.shape, + 21, + EXPECTED_LEFT_HAND_COORDINATES_PREDICTION, + HAND_DIFF_THRESHOLOD) + self._verify_output_landmarks(results.right_hand_landmarks, image.shape, + 21, + EXPECTED_RIGHT_HAND_COORDINATES_PREDICTION, + HAND_DIFF_THRESHOLOD) + # TODO: Verify the correctness of the face landmarks. + self.assertLen(results.face_landmarks.landmark, 468) + holistic.close() + + @parameterized.named_parameters(('static_image_mode', True, 3), + ('video_mode', False, 3)) + def test_full_body_model(self, static_image_mode, num_frames): + image_path = os.path.join(os.path.dirname(__file__), 'testdata/pose.jpg') + holistic = mp_holistic.Holistic(static_image_mode=static_image_mode) + image = cv2.imread(image_path) + + for _ in range(num_frames): + results = holistic.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) + self._verify_output_landmarks(results.pose_landmarks, image.shape, 33, + EXPECTED_POSE_COORDINATES_PREDICTION, + POSE_DIFF_THRESHOLOD) + self._verify_output_landmarks(results.left_hand_landmarks, image.shape, + 21, + EXPECTED_LEFT_HAND_COORDINATES_PREDICTION, + HAND_DIFF_THRESHOLOD) + self._verify_output_landmarks(results.right_hand_landmarks, image.shape, + 21, + EXPECTED_RIGHT_HAND_COORDINATES_PREDICTION, + HAND_DIFF_THRESHOLOD) + # TODO: Verify the correctness of the face landmarks. + self.assertLen(results.face_landmarks.landmark, 468) + holistic.close() + + +if __name__ == '__main__': + absltest.main() diff --git a/mediapipe/python/solutions/pose.py b/mediapipe/python/solutions/pose.py index 2f60be8b0..1fd1f3439 100644 --- a/mediapipe/python/solutions/pose.py +++ b/mediapipe/python/solutions/pose.py @@ -12,7 +12,6 @@ # See the License for the specific language governing permissions and # limitations under the License. -# Lint as: python3 """MediaPipe Pose.""" import enum @@ -20,6 +19,7 @@ from typing import NamedTuple import numpy as np +from mediapipe.calculators.core import constant_side_packet_calculator_pb2 # pylint: disable=unused-import from mediapipe.calculators.core import gate_calculator_pb2 from mediapipe.calculators.core import split_vector_calculator_pb2 @@ -28,6 +28,7 @@ from mediapipe.calculators.tensor import inference_calculator_pb2 from mediapipe.calculators.tensor import tensors_to_classification_calculator_pb2 from mediapipe.calculators.tensor import tensors_to_detections_calculator_pb2 from mediapipe.calculators.tensor import tensors_to_landmarks_calculator_pb2 +from mediapipe.calculators.tflite import ssd_anchors_calculator_pb2 from mediapipe.calculators.util import detections_to_rects_calculator_pb2 from mediapipe.calculators.util import landmarks_smoothing_calculator_pb2 from mediapipe.calculators.util import logic_calculator_pb2 @@ -41,33 +42,40 @@ from mediapipe.python.solution_base import SolutionBase class PoseLandmark(enum.IntEnum): """The 25 (upper-body) pose landmarks.""" NOSE = 0 - RIGHT_EYE_INNER = 1 - RIGHT_EYE = 2 - RIGHT_EYE_OUTER = 3 - LEFT_EYE_INNER = 4 - LEFT_EYE = 5 - LEFT_EYE_OUTER = 6 - RIGHT_EAR = 7 - LEFT_EAR = 8 - MOUTH_RIGHT = 9 - MOUTH_LEFT = 10 - RIGHT_SHOULDER = 11 - LEFT_SHOULDER = 12 - RIGHT_ELBOW = 13 - LEFT_ELBOW = 14 - RIGHT_WRIST = 15 - LEFT_WRIST = 16 - RIGHT_PINKY = 17 - LEFT_PINKY = 18 - RIGHT_INDEX = 19 - LEFT_INDEX = 20 - RIGHT_THUMB = 21 - LEFT_THUMB = 22 - RIGHT_HIP = 23 - LEFT_HIP = 24 + LEFT_EYE_INNER = 1 + LEFT_EYE = 2 + LEFT_EYE_OUTER = 3 + RIGHT_EYE_INNER = 4 + RIGHT_EYE = 5 + RIGHT_EYE_OUTER = 6 + LEFT_EAR = 7 + RIGHT_EAR = 8 + MOUTH_LEFT = 9 + MOUTH_RIGHT = 10 + LEFT_SHOULDER = 11 + RIGHT_SHOULDER = 12 + LEFT_ELBOW = 13 + RIGHT_ELBOW = 14 + LEFT_WRIST = 15 + RIGHT_WRIST = 16 + LEFT_PINKY = 17 + RIGHT_PINKY = 18 + LEFT_INDEX = 19 + RIGHT_INDEX = 20 + LEFT_THUMB = 21 + RIGHT_THUMB = 22 + LEFT_HIP = 23 + RIGHT_HIP = 24 + LEFT_KNEE = 25 + RIGHT_KNEE = 26 + LEFT_ANKLE = 27 + RIGHT_ANKLE = 28 + LEFT_HEEL = 29 + RIGHT_HEEL = 30 + LEFT_FOOT_INDEX = 31 + RIGHT_FOOT_INDEX = 32 - -BINARYPB_FILE_PATH = 'mediapipe/modules/pose_landmark/pose_landmark_upper_body_smoothed_cpu.binarypb' +BINARYPB_FILE_PATH = 'mediapipe/modules/pose_landmark/pose_landmark_cpu.binarypb' POSE_CONNECTIONS = frozenset([ (PoseLandmark.NOSE, PoseLandmark.RIGHT_EYE_INNER), (PoseLandmark.RIGHT_EYE_INNER, PoseLandmark.RIGHT_EYE), @@ -93,7 +101,18 @@ POSE_CONNECTIONS = frozenset([ (PoseLandmark.LEFT_PINKY, PoseLandmark.LEFT_INDEX), (PoseLandmark.RIGHT_SHOULDER, PoseLandmark.RIGHT_HIP), (PoseLandmark.LEFT_SHOULDER, PoseLandmark.LEFT_HIP), - (PoseLandmark.RIGHT_HIP, PoseLandmark.LEFT_HIP) + (PoseLandmark.RIGHT_HIP, PoseLandmark.LEFT_HIP), + (PoseLandmark.RIGHT_HIP, PoseLandmark.LEFT_HIP), + (PoseLandmark.RIGHT_HIP, PoseLandmark.RIGHT_KNEE), + (PoseLandmark.LEFT_HIP, PoseLandmark.LEFT_KNEE), + (PoseLandmark.RIGHT_KNEE, PoseLandmark.RIGHT_ANKLE), + (PoseLandmark.LEFT_KNEE, PoseLandmark.LEFT_ANKLE), + (PoseLandmark.RIGHT_ANKLE, PoseLandmark.RIGHT_HEEL), + (PoseLandmark.LEFT_ANKLE, PoseLandmark.LEFT_HEEL), + (PoseLandmark.RIGHT_HEEL, PoseLandmark.RIGHT_FOOT_INDEX), + (PoseLandmark.LEFT_HEEL, PoseLandmark.LEFT_FOOT_INDEX), + (PoseLandmark.RIGHT_ANKLE, PoseLandmark.RIGHT_FOOT_INDEX), + (PoseLandmark.LEFT_ANKLE, PoseLandmark.LEFT_FOOT_INDEX), ]) @@ -103,94 +122,50 @@ class Pose(SolutionBase): MediaPipe Pose processes an RGB image and returns pose landmarks on the most prominent person detected. - Usage examples: - import cv2 - import mediapipe as mp - mp_drawing = mp.solutions.drawing_utils - mp_pose = mp.solutions.pose - - # For static images: - pose = mp_pose.Pose( - static_image_mode=True, min_detection_confidence=0.5) - for idx, file in enumerate(file_list): - image = cv2.imread(file) - # Convert the BGR image to RGB before processing. - results = pose.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) - - # Print and draw pose landmarks on the image. - print( - 'nose landmark:', - results.pose_landmarks.landmark[mp_pose.PoseLandmark.NOSE]) - annotated_image = image.copy() - mp_drawing.draw_landmarks( - annotated_image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS) - cv2.imwrite('/tmp/annotated_image' + str(idx) + '.png', image) - pose.close() - - # For webcam input: - pose = mp_pose.Pose( - min_detection_confidence=0.5, min_tracking_confidence=0.5) - cap = cv2.VideoCapture(0) - while cap.isOpened(): - success, image = cap.read() - if not success: - break - - # Flip the image horizontally for a later selfie-view display, and convert - # the BGR image to RGB. - image = cv2.cvtColor(cv2.flip(image, 1), cv2.COLOR_BGR2RGB) - # To improve performance, optionally mark the image as not writeable to - # pass by reference. - image.flags.writeable = False - results = pose.process(image) - - # Draw the pose annotation on the image. - image.flags.writeable = True - image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) - mp_drawing.draw_landmarks( - image, results.pose_landmarks, mp_pose.POSE_CONNECTIONS) - cv2.imshow('MediaPipe Pose', image) - if cv2.waitKey(5) & 0xFF == 27: - break - pose.close() - cap.release() + Please refer to https://solutions.mediapipe.dev/pose#python-solution-api for + usage examples. """ def __init__(self, static_image_mode=False, + upper_body_only=False, + smooth_landmarks=True, min_detection_confidence=0.5, min_tracking_confidence=0.5): """Initializes a MediaPipe Pose object. Args: - static_image_mode: If set to False, the solution treats the input images - as a video stream. It will try to detect the most prominent person in - the very first images, and upon a successful detection further localizes - the pose landmarks. In subsequent images, it then simply tracks those - landmarks without invoking another detection until it loses track, on - reducing computation and latency. If set to True, person detection runs - every input image, ideal for processing a batch of static, possibly - unrelated, images. Default to False. - min_detection_confidence: Minimum confidence value ([0.0, 1.0]) from the - person-detection model for the detection to be considered successful. - Default to 0.5. - min_tracking_confidence: Minimum confidence value ([0.0, 1.0]) from the - landmark-tracking model for the pose landmarks to be considered tracked - successfully, or otherwise person detection will be invoked - automatically on the next input image. Setting it to a higher value can - increase robustness of the solution, at the expense of a higher latency. - Ignored if "static_image_mode" is True, where person detection simply - runs on every image. Default to 0.5. + static_image_mode: Whether to treat the input images as a batch of static + and possibly unrelated images, or a video stream. See details in + https://solutions.mediapipe.dev/pose#static-image-mode. + upper_body_only: Whether to track the full set of 33 pose landmarks or + only the 25 upper-body pose landmarks. See details in + https://solutions.mediapipe.dev/pose#upper-body-only. + smooth_landmarks: Whether to filter landmarks across different input + images to reduce jitter. See details in + https://solutions.mediapipe.dev/pose#smooth_landmarks. + min_detection_confidence: Minimum confidence value ([0.0, 1.0]) for person + detection to be considered successful. See details in + https://solutions.mediapipe.dev/pose#min-detection-confidence. + min_tracking_confidence: Minimum confidence value ([0.0, 1.0]) for the + pose landmarks to be considered tracked successfully. See details in + https://solutions.mediapipe.dev/pose#min-tracking-confidence. """ super().__init__( binary_graph_path=BINARYPB_FILE_PATH, side_inputs={ - 'can_skip_detection': not static_image_mode, + 'upper_body_only': upper_body_only, + 'smooth_landmarks': smooth_landmarks and not static_image_mode, }, calculator_params={ - 'poselandmarkupperbodycpu__posedetectioncpu__TensorsToDetectionsCalculator.min_score_thresh': + 'ConstantSidePacketCalculator.packet': [ + constant_side_packet_calculator_pb2 + .ConstantSidePacketCalculatorOptions.ConstantSidePacket( + bool_value=not static_image_mode) + ], + 'poselandmarkcpu__posedetectioncpu__TensorsToDetectionsCalculator.min_score_thresh': min_detection_confidence, - 'poselandmarkupperbodycpu__poselandmarkupperbodybyroicpu__ThresholdingCalculator.threshold': + 'poselandmarkcpu__poselandmarkbyroicpu__ThresholdingCalculator.threshold': min_tracking_confidence, }, outputs=['pose_landmarks']) @@ -210,4 +185,8 @@ class Pose(SolutionBase): landmarks on the most prominent person detected. """ - return super().process(input_data={'image': image}) + results = super().process(input_data={'image': image}) + if results.pose_landmarks: + for landmark in results.pose_landmarks.landmark: + landmark.ClearField('presence') + return results diff --git a/mediapipe/python/solutions/pose_test.py b/mediapipe/python/solutions/pose_test.py new file mode 100644 index 000000000..38131c596 --- /dev/null +++ b/mediapipe/python/solutions/pose_test.py @@ -0,0 +1,97 @@ +# Copyright 2020 The MediaPipe Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. +"""Tests for mediapipe.python.solutions.pose.""" + +import math +import os + +from absl.testing import absltest +from absl.testing import parameterized +import cv2 +import numpy as np +import numpy.testing as npt + +# resources dependency +from mediapipe.python.solutions import pose as mp_pose + +TEST_IMAGE_PATH = 'mediapipe/python/solutions/testdata' +DIFF_THRESHOLOD = 30 # pixels +EXPECTED_POSE_COORDINATES_PREDICTION = [[593, 645], [593, 626], [599, 621], + [605, 617], [575, 637], [569, 640], + [563, 643], [621, 616], [565, 652], + [617, 652], [595, 667], [714, 662], + [567, 749], [792, 559], [497, 844], + [844, 435], [407, 906], [866, 403], + [381, 921], [859, 392], [366, 922], + [850, 405], [381, 918], [707, 948], + [631, 940], [582, 1122], [599, 1097], + [495, 1277], [641, 1239], [485, 1300], + [658, 1257], [453, 1332], [626, 1308]] + + +class PoseTest(parameterized.TestCase): + + def _verify_output_landmarks(self, landmark_list, image_shape, num_landmarks): + self.assertLen(landmark_list.landmark, num_landmarks) + image_rows, image_cols, _ = image_shape + pose_coordinates = [(math.floor(landmark.x * image_cols), + math.floor(landmark.y * image_rows)) + for landmark in landmark_list.landmark] + prediction_error = np.abs( + np.asarray(pose_coordinates) - + np.asarray(EXPECTED_POSE_COORDINATES_PREDICTION[:num_landmarks])) + npt.assert_array_less(prediction_error, DIFF_THRESHOLOD) + + def test_invalid_image_shape(self): + pose = mp_pose.Pose() + with self.assertRaisesRegex( + ValueError, 'Input image must contain three channel rgb data.'): + pose.process(np.arange(36, dtype=np.uint8).reshape(3, 3, 4)) + + def test_blank_image(self): + pose = mp_pose.Pose() + image = np.zeros([100, 100, 3], dtype=np.uint8) + image.fill(255) + results = pose.process(image) + self.assertIsNone(results.pose_landmarks) + pose.close() + + @parameterized.named_parameters(('static_image_mode', True, 3), + ('video_mode', False, 3)) + def test_upper_body_model(self, static_image_mode, num_frames): + image_path = os.path.join(os.path.dirname(__file__), 'testdata/pose.jpg') + pose = mp_pose.Pose(static_image_mode=static_image_mode, + upper_body_only=True) + image = cv2.imread(image_path) + + for _ in range(num_frames): + results = pose.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) + self._verify_output_landmarks(results.pose_landmarks, image.shape, 25) + pose.close() + + @parameterized.named_parameters(('static_image_mode', True, 3), + ('video_mode', False, 3)) + def test_full_body_model(self, static_image_mode, num_frames): + image_path = os.path.join(os.path.dirname(__file__), 'testdata/pose.jpg') + pose = mp_pose.Pose(static_image_mode=static_image_mode) + image = cv2.imread(image_path) + + for _ in range(num_frames): + results = pose.process(cv2.cvtColor(image, cv2.COLOR_BGR2RGB)) + self._verify_output_landmarks(results.pose_landmarks, image.shape, 33) + pose.close() + + +if __name__ == '__main__': + absltest.main() diff --git a/mediapipe/python/timestamp_test.py b/mediapipe/python/timestamp_test.py index bbcd21fa4..02fa760c3 100644 --- a/mediapipe/python/timestamp_test.py +++ b/mediapipe/python/timestamp_test.py @@ -22,25 +22,25 @@ import mediapipe as mp class TimestampTest(absltest.TestCase): - def testTimesatmp(self): + def test_timestamp(self): t = mp.Timestamp(100) self.assertEqual(t.value, 100) self.assertEqual(t, 100) self.assertEqual(str(t), '') - def testTimestampCopyConstructor(self): + def test_timestamp_copy_constructor(self): ts1 = mp.Timestamp(100) ts2 = mp.Timestamp(ts1) self.assertEqual(ts1, ts2) - def testTimesatmpComparsion(self): + def test_timestamp_comparsion(self): ts1 = mp.Timestamp(100) ts2 = mp.Timestamp(100) self.assertEqual(ts1, ts2) ts3 = mp.Timestamp(200) self.assertNotEqual(ts1, ts3) - def testTimesatmpSpecialValues(self): + def test_timestamp_special_values(self): t1 = mp.Timestamp.UNSET self.assertEqual(str(t1), '') t2 = mp.Timestamp.UNSTARTED @@ -56,7 +56,7 @@ class TimestampTest(absltest.TestCase): t7 = mp.Timestamp.DONE self.assertEqual(str(t7), '') - def testTimestampComparisons(self): + def test_timestamp_comparisons(self): ts1 = mp.Timestamp(100) ts2 = mp.Timestamp(101) self.assertGreater(ts2, ts1) @@ -65,7 +65,7 @@ class TimestampTest(absltest.TestCase): self.assertLessEqual(ts1, ts2) self.assertNotEqual(ts1, ts2) - def testFromSeconds(self): + def test_from_seconds(self): now = time.time() ts = mp.Timestamp.from_seconds(now) self.assertAlmostEqual(now, ts.seconds(), delta=1) diff --git a/mediapipe/util/android/asset_manager_util.cc b/mediapipe/util/android/asset_manager_util.cc index 80a593cdd..315078137 100644 --- a/mediapipe/util/android/asset_manager_util.cc +++ b/mediapipe/util/android/asset_manager_util.cc @@ -143,7 +143,7 @@ bool AssetManager::ReadFile(const std::string& filename, std::string* output) { return true; } -::mediapipe::StatusOr AssetManager::CachedFileFromAsset( +mediapipe::StatusOr AssetManager::CachedFileFromAsset( const std::string& asset_path) { RET_CHECK(cache_dir_path_.size()) << "asset manager not initialized"; diff --git a/mediapipe/util/android/file/base/filesystem.cc b/mediapipe/util/android/file/base/filesystem.cc index b678ec5b1..53cdb16e1 100644 --- a/mediapipe/util/android/file/base/filesystem.cc +++ b/mediapipe/util/android/file/base/filesystem.cc @@ -25,10 +25,10 @@ static_assert(sizeof(off_t) == 8, "Large file support is required"); namespace mediapipe { namespace file { -::mediapipe::Status RecursivelyCreateDir(absl::string_view path, - const file::Options& options) { +mediapipe::Status RecursivelyCreateDir(absl::string_view path, + const file::Options& options) { if (path.empty()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } std::vector path_comp = absl::StrSplit(path, '/'); @@ -45,45 +45,44 @@ namespace file { if (S_ISDIR(stat_buf.st_mode)) { continue; } - return ::mediapipe::Status(mediapipe::StatusCode::kInternal, - "Could not stat " + std::string(crpath)); + return mediapipe::Status(mediapipe::StatusCode::kInternal, + "Could not stat " + std::string(crpath)); } else { int mkval = mkdir(crpath, options.permissions()); if (mkval == -1) { - return ::mediapipe::Status(mediapipe::StatusCode::kInternal, - "Could not create " + std::string(crpath)); + return mediapipe::Status(mediapipe::StatusCode::kInternal, + "Could not create " + std::string(crpath)); } } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status Exists(absl::string_view path, - const file::Options& ignored) { +mediapipe::Status Exists(absl::string_view path, const file::Options& ignored) { struct stat64 stat_buf; int statval = lstat64(std::string(path).c_str(), &stat_buf); if (statval == 0) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } else { - return ::mediapipe::Status(mediapipe::StatusCode::kNotFound, - "Could not stat file."); + return mediapipe::Status(mediapipe::StatusCode::kNotFound, + "Could not stat file."); } } -::mediapipe::Status IsDirectory(absl::string_view path, - const file::Options& /*ignored*/) { +mediapipe::Status IsDirectory(absl::string_view path, + const file::Options& /*ignored*/) { struct stat64 stat_buf; int statval = lstat64(std::string(path).c_str(), &stat_buf); bool is_dir = (statval == 0 && S_ISREG(stat_buf.st_mode)); if (is_dir) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } else if (statval != 0) { - return ::mediapipe::Status(mediapipe::StatusCode::kNotFound, - "File does not exists"); + return mediapipe::Status(mediapipe::StatusCode::kNotFound, + "File does not exists"); } else { - return ::mediapipe::Status(mediapipe::StatusCode::kNotFound, - "Not a directory"); + return mediapipe::Status(mediapipe::StatusCode::kNotFound, + "Not a directory"); } } diff --git a/mediapipe/util/android/file/base/filesystem.h b/mediapipe/util/android/file/base/filesystem.h index 90f4bb7eb..af86de70c 100644 --- a/mediapipe/util/android/file/base/filesystem.h +++ b/mediapipe/util/android/file/base/filesystem.h @@ -22,14 +22,13 @@ namespace mediapipe { namespace file { -::mediapipe::Status RecursivelyCreateDir(absl::string_view path, - const file::Options& options); +mediapipe::Status RecursivelyCreateDir(absl::string_view path, + const file::Options& options); -::mediapipe::Status Exists(absl::string_view path, - const file::Options& options); +mediapipe::Status Exists(absl::string_view path, const file::Options& options); -::mediapipe::Status IsDirectory(absl::string_view path, - const file::Options& options); +mediapipe::Status IsDirectory(absl::string_view path, + const file::Options& options); } // namespace file. } // namespace mediapipe diff --git a/mediapipe/util/android/file/base/helpers.cc b/mediapipe/util/android/file/base/helpers.cc index 34411b272..417069069 100644 --- a/mediapipe/util/android/file/base/helpers.cc +++ b/mediapipe/util/android/file/base/helpers.cc @@ -42,16 +42,16 @@ class FdCloser { } // namespace // Read contents of a file to a std::string. -::mediapipe::Status GetContents(int fd, std::string* output) { +mediapipe::Status GetContents(int fd, std::string* output) { // Determine the length of the file. struct stat buf; if (fstat(fd, &buf) != 0) { - return ::mediapipe::Status(mediapipe::StatusCode::kUnknown, - "Failed to get file status"); + return mediapipe::Status(mediapipe::StatusCode::kUnknown, + "Failed to get file status"); } if (buf.st_size < 0 || buf.st_size > SIZE_MAX) { - return ::mediapipe::Status(mediapipe::StatusCode::kInternal, - "Invalid file size"); + return mediapipe::Status(mediapipe::StatusCode::kInternal, + "Invalid file size"); } size_t length = buf.st_size; @@ -61,46 +61,43 @@ class FdCloser { while (length != 0) { const ssize_t nread = read(fd, output_ptr, length); if (nread <= 0) { - return ::mediapipe::Status(mediapipe::StatusCode::kUnknown, - "Failed to read file"); + return mediapipe::Status(mediapipe::StatusCode::kUnknown, + "Failed to read file"); } output_ptr += nread; length -= nread; } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Read contents of a file to a std::string. -::mediapipe::Status GetContents(absl::string_view file_name, - std::string* output, - const file::Options& /*options*/) { +mediapipe::Status GetContents(absl::string_view file_name, std::string* output, + const file::Options& /*options*/) { int fd = open(std::string(file_name).c_str(), O_RDONLY); if (fd < 0) { - return ::mediapipe::Status( - mediapipe::StatusCode::kUnknown, - "Failed to open file: " + std::string(file_name)); + return mediapipe::Status(mediapipe::StatusCode::kUnknown, + "Failed to open file: " + std::string(file_name)); } FdCloser closer(fd); return GetContents(fd, output); } -::mediapipe::Status GetContents(absl::string_view file_name, - std::string* output) { +mediapipe::Status GetContents(absl::string_view file_name, + std::string* output) { return GetContents(file_name, output, file::Defaults()); } -::mediapipe::Status SetContents(absl::string_view file_name, - absl::string_view content, - const file::Options& options) { +mediapipe::Status SetContents(absl::string_view file_name, + absl::string_view content, + const file::Options& options) { // Mode -rw-r--r-- mode_t mode = S_IRUSR | S_IWUSR | S_IRGRP | S_IROTH; int fd = open(std::string(file_name).c_str(), O_WRONLY | O_CREAT | O_TRUNC, mode); if (fd < 0) { - return ::mediapipe::Status( - mediapipe::StatusCode::kUnknown, - "Failed to open file: " + std::string(file_name)); + return mediapipe::Status(mediapipe::StatusCode::kUnknown, + "Failed to open file: " + std::string(file_name)); } int bytes_written = 0; @@ -110,15 +107,15 @@ class FdCloser { close(fd); if (bytes_written == content.size()) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } else { - return ::mediapipe::Status(mediapipe::StatusCode::kUnknown, - "Failed to write file"); + return mediapipe::Status(mediapipe::StatusCode::kUnknown, + "Failed to write file"); } } -::mediapipe::Status SetContents(absl::string_view file_name, - absl::string_view content) { +mediapipe::Status SetContents(absl::string_view file_name, + absl::string_view content) { return SetContents(file_name, content, file::Defaults()); } diff --git a/mediapipe/util/android/file/base/helpers.h b/mediapipe/util/android/file/base/helpers.h index 4dc10d6e9..cd92cfc86 100644 --- a/mediapipe/util/android/file/base/helpers.h +++ b/mediapipe/util/android/file/base/helpers.h @@ -25,25 +25,23 @@ namespace mediapipe { namespace file { // Read contents of a file to a std::string. -::mediapipe::Status GetContents(absl::string_view file_name, - std::string* output, - const file::Options& options); +mediapipe::Status GetContents(absl::string_view file_name, std::string* output, + const file::Options& options); // Read contents of a file to a std::string with default file options. -::mediapipe::Status GetContents(absl::string_view file_name, - std::string* output); +mediapipe::Status GetContents(absl::string_view file_name, std::string* output); // Read contents of a file to a std::string from an open file descriptor. -::mediapipe::Status GetContents(int fd, std::string* output); +mediapipe::Status GetContents(int fd, std::string* output); // Write std::string to file. -::mediapipe::Status SetContents(absl::string_view file_name, - absl::string_view content, - const file::Options& options); +mediapipe::Status SetContents(absl::string_view file_name, + absl::string_view content, + const file::Options& options); // Write std::string to file with default file options. -::mediapipe::Status SetContents(absl::string_view file_name, - absl::string_view content); +mediapipe::Status SetContents(absl::string_view file_name, + absl::string_view content); } // namespace file } // namespace mediapipe diff --git a/mediapipe/util/annotation_renderer.cc b/mediapipe/util/annotation_renderer.cc index 7d0673908..d464995e2 100644 --- a/mediapipe/util/annotation_renderer.cc +++ b/mediapipe/util/annotation_renderer.cc @@ -16,6 +16,7 @@ #include +#include #include #include "mediapipe/framework/port/logging.h" @@ -37,6 +38,11 @@ using Rectangle = RenderAnnotation::Rectangle; using RoundedRectangle = RenderAnnotation::RoundedRectangle; using Text = RenderAnnotation::Text; +int ClampThickness(int thickness) { + constexpr int kMaxThickness = 32767; // OpenCV MAX_THICKNESS + return std::clamp(thickness, 1, kMaxThickness); +} + bool NormalizedtoPixelCoordinates(double normalized_x, double normalized_y, int image_width, int image_height, int* x_px, int* y_px) { @@ -152,7 +158,8 @@ void AnnotationRenderer::DrawRectangle(const RenderAnnotation& annotation) { } const cv::Scalar color = MediapipeColorToOpenCVColor(annotation.color()); - const int thickness = round(annotation.thickness() * scale_factor_); + const int thickness = + ClampThickness(round(annotation.thickness() * scale_factor_)); if (rectangle.rotation() != 0.0) { const auto& rect = RectangleToOpenCVRotatedRect(left, top, right, bottom, rectangle.rotation()); @@ -231,7 +238,8 @@ void AnnotationRenderer::DrawRoundedRectangle( } const cv::Scalar color = MediapipeColorToOpenCVColor(annotation.color()); - const int thickness = round(annotation.thickness() * scale_factor_); + const int thickness = + ClampThickness(round(annotation.thickness() * scale_factor_)); const int corner_radius = round(annotation.rounded_rectangle().corner_radius() * scale_factor_); const int line_type = annotation.rounded_rectangle().line_type(); @@ -336,9 +344,11 @@ void AnnotationRenderer::DrawOval(const RenderAnnotation& annotation) { cv::Point center((left + right) / 2, (top + bottom) / 2); cv::Size size((right - left) / 2, (bottom - top) / 2); + const double rotation = enclosing_rectangle.rotation() / M_PI * 180.f; const cv::Scalar color = MediapipeColorToOpenCVColor(annotation.color()); - const int thickness = round(annotation.thickness() * scale_factor_); - cv::ellipse(mat_image_, center, size, 0, 0, 360, color, thickness); + const int thickness = + ClampThickness(round(annotation.thickness() * scale_factor_)); + cv::ellipse(mat_image_, center, size, rotation, 0, 360, color, thickness); } void AnnotationRenderer::DrawFilledOval(const RenderAnnotation& annotation) { @@ -364,8 +374,9 @@ void AnnotationRenderer::DrawFilledOval(const RenderAnnotation& annotation) { cv::Point center((left + right) / 2, (top + bottom) / 2); cv::Size size(std::max(0, (right - left) / 2), std::max(0, (bottom - top) / 2)); + const double rotation = enclosing_rectangle.rotation() / M_PI * 180.f; const cv::Scalar color = MediapipeColorToOpenCVColor(annotation.color()); - cv::ellipse(mat_image_, center, size, 0, 0, 360, color, -1); + cv::ellipse(mat_image_, center, size, rotation, 0, 360, color, -1); } void AnnotationRenderer::DrawArrow(const RenderAnnotation& annotation) { @@ -392,7 +403,8 @@ void AnnotationRenderer::DrawArrow(const RenderAnnotation& annotation) { cv::Point arrow_start(x_start, y_start); cv::Point arrow_end(x_end, y_end); const cv::Scalar color = MediapipeColorToOpenCVColor(annotation.color()); - const int thickness = round(annotation.thickness() * scale_factor_); + const int thickness = + ClampThickness(round(annotation.thickness() * scale_factor_)); // Draw the main arrow line. cv::line(mat_image_, arrow_start, arrow_end, color, thickness); @@ -431,7 +443,8 @@ void AnnotationRenderer::DrawPoint(const RenderAnnotation& annotation) { cv::Point point_to_draw(x, y); const cv::Scalar color = MediapipeColorToOpenCVColor(annotation.color()); - const int thickness = round(annotation.thickness() * scale_factor_); + const int thickness = + ClampThickness(round(annotation.thickness() * scale_factor_)); cv::circle(mat_image_, point_to_draw, thickness, color, -1); } @@ -458,7 +471,8 @@ void AnnotationRenderer::DrawLine(const RenderAnnotation& annotation) { cv::Point start(x_start, y_start); cv::Point end(x_end, y_end); const cv::Scalar color = MediapipeColorToOpenCVColor(annotation.color()); - const int thickness = round(annotation.thickness() * scale_factor_); + const int thickness = + ClampThickness(round(annotation.thickness() * scale_factor_)); cv::line(mat_image_, start, end, color, thickness); } @@ -484,7 +498,8 @@ void AnnotationRenderer::DrawGradientLine(const RenderAnnotation& annotation) { const cv::Point start(x_start, y_start); const cv::Point end(x_end, y_end); - const int thickness = round(annotation.thickness() * scale_factor_); + const int thickness = + ClampThickness(round(annotation.thickness() * scale_factor_)); const cv::Scalar color1 = MediapipeColorToOpenCVColor(line.color1()); const cv::Scalar color2 = MediapipeColorToOpenCVColor(line.color2()); cv_line2(mat_image_, start, end, color1, color2, thickness); @@ -509,7 +524,8 @@ void AnnotationRenderer::DrawText(const RenderAnnotation& annotation) { cv::Point origin(left, baseline); const cv::Scalar color = MediapipeColorToOpenCVColor(annotation.color()); - const int thickness = round(annotation.thickness() * scale_factor_); + const int thickness = + ClampThickness(round(annotation.thickness() * scale_factor_)); const int font_face = text.font_face(); const double font_scale = ComputeFontScale(font_face, font_size, thickness); diff --git a/mediapipe/util/audio_decoder.cc b/mediapipe/util/audio_decoder.cc index b101e1c3e..1aab2baf7 100644 --- a/mediapipe/util/audio_decoder.cc +++ b/mediapipe/util/audio_decoder.cc @@ -344,7 +344,7 @@ mediapipe::Status AudioPacketProcessor::Open(int id, AVStream* stream) { id_ = id; avcodec_ = avcodec_find_decoder(stream->codecpar->codec_id); if (!avcodec_) { - return ::mediapipe::InvalidArgumentError("Failed to find codec"); + return mediapipe::InvalidArgumentError("Failed to find codec"); } avcodec_ctx_ = avcodec_alloc_context3(avcodec_); avcodec_parameters_to_context(avcodec_ctx_, stream->codecpar); @@ -588,18 +588,18 @@ int64 AudioPacketProcessor::MaybeCorrectPtsForRollover(int64 media_pts) { AudioDecoder::AudioDecoder() { av_register_all(); } AudioDecoder::~AudioDecoder() { - ::mediapipe::Status status = Close(); + mediapipe::Status status = Close(); if (!status.ok()) { LOG(ERROR) << "Encountered error while closing media file: " << status.message(); } } -::mediapipe::Status AudioDecoder::Initialize( +mediapipe::Status AudioDecoder::Initialize( const std::string& input_file, const mediapipe::AudioDecoderOptions options) { if (options.audio_stream().empty()) { - return ::mediapipe::InvalidArgumentError( + return mediapipe::InvalidArgumentError( "At least one audio_stream must be defined in AudioDecoderOptions"); } std::map stream_index_to_audio_options_index; @@ -611,7 +611,7 @@ AudioDecoder::~AudioDecoder() { } Cleanup> decoder_closer([this]() { - ::mediapipe::Status status = Close(); + mediapipe::Status status = Close(); if (!status.ok()) { LOG(ERROR) << "Encountered error while closing media file: " << status.message(); @@ -620,12 +620,12 @@ AudioDecoder::~AudioDecoder() { avformat_ctx_ = avformat_alloc_context(); if (avformat_open_input(&avformat_ctx_, input_file.c_str(), NULL, NULL) < 0) { - return ::mediapipe::InvalidArgumentError( + return mediapipe::InvalidArgumentError( absl::StrCat("Could not open file: ", input_file)); } if (avformat_find_stream_info(avformat_ctx_, NULL) < 0) { - return ::mediapipe::InvalidArgumentError(absl::StrCat( + return mediapipe::InvalidArgumentError(absl::StrCat( "Could not find stream information of file: ", input_file)); } @@ -686,10 +686,10 @@ AudioDecoder::~AudioDecoder() { is_first_packet_.resize(avformat_ctx_->nb_streams, true); decoder_closer.release(); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status AudioDecoder::GetData(int* options_index, Packet* data) { +mediapipe::Status AudioDecoder::GetData(int* options_index, Packet* data) { while (true) { for (auto& item : audio_processor_) { while (item.second && item.second->HasData()) { @@ -697,7 +697,7 @@ AudioDecoder::~AudioDecoder() { is_first_packet_[item.first] = false; *options_index = FindOrDie(stream_id_to_audio_options_index_, item.first); - ::mediapipe::Status status = item.second->GetData(data); + mediapipe::Status status = item.second->GetData(data); // Ignore packets which are out of the requested timestamp range. if (start_time_ != Timestamp::Unset()) { if (is_first_packet && data->Timestamp() > start_time_) { @@ -735,10 +735,10 @@ AudioDecoder::~AudioDecoder() { } MP_RETURN_IF_ERROR(ProcessPacket()); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status AudioDecoder::Close() { +mediapipe::Status AudioDecoder::Close() { for (auto& item : audio_processor_) { if (item.second) { item.second->Close(); @@ -749,10 +749,10 @@ AudioDecoder::~AudioDecoder() { if (avformat_ctx_) { avformat_close_input(&avformat_ctx_); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status AudioDecoder::FillAudioHeader( +mediapipe::Status AudioDecoder::FillAudioHeader( const AudioStreamOptions& stream_option, TimeSeriesHeader* header) const { const std::unique_ptr* processor_ptr_ = FindOrNull( audio_processor_, @@ -760,10 +760,10 @@ AudioDecoder::~AudioDecoder() { RET_CHECK(processor_ptr_ && *processor_ptr_) << "audio stream is not open."; MP_RETURN_IF_ERROR((*processor_ptr_)->FillHeader(header)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status AudioDecoder::ProcessPacket() { +mediapipe::Status AudioDecoder::ProcessPacket() { std::unique_ptr av_packet(new AVPacket()); av_init_packet(av_packet.get()); av_packet->size = 0; @@ -785,14 +785,14 @@ AudioDecoder::~AudioDecoder() { } else { VLOG(3) << "Ignoring packet for stream " << stream_id; } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } VLOG(1) << "Demuxing returned error (or EOF): " << AvErrorToString(ret); if (ret == AVERROR(EAGAIN)) { // EAGAIN is used to signify that the av_packet should be skipped // (maybe the demuxer is trying to re-sync). This definitely // occurs in the FLV and MpegT demuxers. - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } // Unrecoverable demuxing error with details in avformat_ctx_->pb->error. @@ -819,8 +819,8 @@ AudioDecoder::~AudioDecoder() { "Failed to read a frame: retval = $0 ($1)", ret, AvErrorToString(ret)); } -::mediapipe::Status AudioDecoder::Flush() { - std::vector<::mediapipe::Status> statuses; +mediapipe::Status AudioDecoder::Flush() { + std::vector statuses; for (auto& item : audio_processor_) { if (item.second) { statuses.push_back(item.second->Flush()); diff --git a/mediapipe/util/audio_decoder.h b/mediapipe/util/audio_decoder.h index 100b7f970..b0ae65e17 100644 --- a/mediapipe/util/audio_decoder.h +++ b/mediapipe/util/audio_decoder.h @@ -194,19 +194,19 @@ class AudioDecoder { AudioDecoder(); ~AudioDecoder(); - ::mediapipe::Status Initialize(const std::string& input_file, - const mediapipe::AudioDecoderOptions options); + mediapipe::Status Initialize(const std::string& input_file, + const mediapipe::AudioDecoderOptions options); - ::mediapipe::Status GetData(int* options_index, Packet* data); + mediapipe::Status GetData(int* options_index, Packet* data); - ::mediapipe::Status Close(); + mediapipe::Status Close(); - ::mediapipe::Status FillAudioHeader(const AudioStreamOptions& stream_option, - TimeSeriesHeader* header) const; + mediapipe::Status FillAudioHeader(const AudioStreamOptions& stream_option, + TimeSeriesHeader* header) const; private: - ::mediapipe::Status ProcessPacket(); - ::mediapipe::Status Flush(); + mediapipe::Status ProcessPacket(); + mediapipe::Status Flush(); std::map stream_id_to_audio_options_index_; std::map stream_index_to_stream_id_; diff --git a/mediapipe/util/cpu_util.cc b/mediapipe/util/cpu_util.cc index 984d7c576..99d1315dd 100644 --- a/mediapipe/util/cpu_util.cc +++ b/mediapipe/util/cpu_util.cc @@ -38,12 +38,12 @@ namespace { constexpr uint32 kBufferLength = 64; -::mediapipe::StatusOr GetFilePath(int cpu) { +mediapipe::StatusOr GetFilePath(int cpu) { return absl::Substitute( "/sys/devices/system/cpu/cpu$0/cpufreq/cpuinfo_max_freq", cpu); } -::mediapipe::StatusOr GetCpuMaxFrequency(int cpu) { +mediapipe::StatusOr GetCpuMaxFrequency(int cpu) { auto path_or_status = GetFilePath(cpu); if (!path_or_status.ok()) { return path_or_status.status(); diff --git a/mediapipe/util/filtering/BUILD b/mediapipe/util/filtering/BUILD index 0a9300ab3..e02842186 100644 --- a/mediapipe/util/filtering/BUILD +++ b/mediapipe/util/filtering/BUILD @@ -34,6 +34,7 @@ cc_test( deps = [ ":low_pass_filter", "//mediapipe/framework/port:gtest_main", + "@com_google_absl//absl/memory", ], ) @@ -56,6 +57,7 @@ cc_test( ":relative_velocity_filter", "//mediapipe/framework/port:gtest_main", "//mediapipe/framework/port:logging", + "@com_google_absl//absl/memory", "@com_google_absl//absl/time", ], ) diff --git a/mediapipe/util/filtering/low_pass_filter_test.cc b/mediapipe/util/filtering/low_pass_filter_test.cc index d8078624c..49c03285a 100644 --- a/mediapipe/util/filtering/low_pass_filter_test.cc +++ b/mediapipe/util/filtering/low_pass_filter_test.cc @@ -14,6 +14,7 @@ #include "mediapipe/util/filtering/low_pass_filter.h" +#include "absl/memory/memory.h" #include "mediapipe/framework/port/gtest.h" namespace mediapipe { diff --git a/mediapipe/util/filtering/relative_velocity_filter_test.cc b/mediapipe/util/filtering/relative_velocity_filter_test.cc index 793564721..717237bbe 100644 --- a/mediapipe/util/filtering/relative_velocity_filter_test.cc +++ b/mediapipe/util/filtering/relative_velocity_filter_test.cc @@ -18,6 +18,7 @@ #include #include +#include "absl/memory/memory.h" #include "absl/time/time.h" #include "mediapipe/framework/port/gtest.h" #include "mediapipe/framework/port/logging.h" @@ -25,7 +26,7 @@ namespace mediapipe { using DistanceEstimationMode = - ::mediapipe::RelativeVelocityFilter::DistanceEstimationMode; + mediapipe::RelativeVelocityFilter::DistanceEstimationMode; absl::Duration DurationFromNanos(int64_t nanos) { return absl::FromChrono(std::chrono::nanoseconds{nanos}); diff --git a/mediapipe/util/header_util.cc b/mediapipe/util/header_util.cc index 203f4cad0..a4db9ea5f 100644 --- a/mediapipe/util/header_util.cc +++ b/mediapipe/util/header_util.cc @@ -19,8 +19,8 @@ namespace mediapipe { -::mediapipe::Status CopyInputHeadersToOutputs(const InputStreamSet& inputs, - const OutputStreamSet& outputs) { +mediapipe::Status CopyInputHeadersToOutputs(const InputStreamSet& inputs, + const OutputStreamSet& outputs) { for (auto id = inputs.BeginId(); id < inputs.EndId(); ++id) { std::pair tag_index = inputs.TagAndIndexFromId(id); auto output_id = outputs.GetId(tag_index.first, tag_index.second); @@ -29,11 +29,11 @@ namespace mediapipe { } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } -::mediapipe::Status CopyInputHeadersToOutputs(const InputStreamShardSet& inputs, - OutputStreamShardSet* outputs) { +mediapipe::Status CopyInputHeadersToOutputs(const InputStreamShardSet& inputs, + OutputStreamShardSet* outputs) { for (auto id = inputs.BeginId(); id < inputs.EndId(); ++id) { std::pair tag_index = inputs.TagAndIndexFromId(id); auto output_id = outputs->GetId(tag_index.first, tag_index.second); @@ -42,7 +42,7 @@ namespace mediapipe { } } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/util/header_util.h b/mediapipe/util/header_util.h index d630e34de..a11dd78ef 100644 --- a/mediapipe/util/header_util.h +++ b/mediapipe/util/header_util.h @@ -22,11 +22,11 @@ namespace mediapipe { // Copies headers from |inputs| into |outputs| respectively. The size of // |inputs| and |outputs| must be equal. -::mediapipe::Status CopyInputHeadersToOutputs(const InputStreamSet& inputs, - const OutputStreamSet& outputs); +mediapipe::Status CopyInputHeadersToOutputs(const InputStreamSet& inputs, + const OutputStreamSet& outputs); -::mediapipe::Status CopyInputHeadersToOutputs(const InputStreamShardSet& inputs, - OutputStreamShardSet* outputs); +mediapipe::Status CopyInputHeadersToOutputs(const InputStreamShardSet& inputs, + OutputStreamShardSet* outputs); } // namespace mediapipe diff --git a/mediapipe/util/resource_util.cc b/mediapipe/util/resource_util.cc index 298217dbb..69555b17f 100644 --- a/mediapipe/util/resource_util.cc +++ b/mediapipe/util/resource_util.cc @@ -27,14 +27,13 @@ ABSL_FLAG( namespace mediapipe { -::mediapipe::StatusOr PathToResourceAsFile( - const std::string& path) { - return ::mediapipe::file::JoinPath(FLAGS_resource_root_dir.CurrentValue(), - path); +mediapipe::StatusOr PathToResourceAsFile(const std::string& path) { + return mediapipe::file::JoinPath(FLAGS_resource_root_dir.CurrentValue(), + path); } -::mediapipe::Status GetResourceContents(const std::string& path, - std::string* output) { +mediapipe::Status GetResourceContents(const std::string& path, + std::string* output) { return mediapipe::file::GetContents(path, output); } diff --git a/mediapipe/util/resource_util.h b/mediapipe/util/resource_util.h index d55706af9..a2af1c07b 100644 --- a/mediapipe/util/resource_util.h +++ b/mediapipe/util/resource_util.h @@ -39,13 +39,12 @@ namespace mediapipe { // accepts file paths. Code that can access data as a stream or as a buffer // should read from an asset directly on Android; an API for this will be // provided later. TODO. -::mediapipe::StatusOr PathToResourceAsFile( - const std::string& path); +mediapipe::StatusOr PathToResourceAsFile(const std::string& path); // Reads the entire contents of a resource. The search path is as in // PathToResourceAsFile. -::mediapipe::Status GetResourceContents(const std::string& path, - std::string* output); +mediapipe::Status GetResourceContents(const std::string& path, + std::string* output); } // namespace mediapipe diff --git a/mediapipe/util/resource_util_android.cc b/mediapipe/util/resource_util_android.cc index 04dde6e25..462c63414 100644 --- a/mediapipe/util/resource_util_android.cc +++ b/mediapipe/util/resource_util_android.cc @@ -24,14 +24,13 @@ namespace mediapipe { namespace { -::mediapipe::StatusOr PathToResourceAsFileInternal( +mediapipe::StatusOr PathToResourceAsFileInternal( const std::string& path) { return Singleton::get()->CachedFileFromAsset(path); } } // namespace -::mediapipe::StatusOr PathToResourceAsFile( - const std::string& path) { +mediapipe::StatusOr PathToResourceAsFile(const std::string& path) { // Return full path. if (absl::StartsWith(path, "/")) { return path; @@ -57,8 +56,8 @@ namespace { } } -::mediapipe::Status GetResourceContents(const std::string& path, - std::string* output) { +mediapipe::Status GetResourceContents(const std::string& path, + std::string* output) { if (absl::StartsWith(path, "/")) { return file::GetContents(path, output, file::Defaults()); } @@ -66,12 +65,12 @@ namespace { if (absl::StartsWith(path, "content://")) { MP_RETURN_IF_ERROR( Singleton::get()->ReadContentUri(path, output)); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } RET_CHECK(Singleton::get()->ReadFile(path, output)) << "could not read asset: " << path; - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/util/resource_util_apple.cc b/mediapipe/util/resource_util_apple.cc index 9b7677679..2d8e8ad78 100644 --- a/mediapipe/util/resource_util_apple.cc +++ b/mediapipe/util/resource_util_apple.cc @@ -24,7 +24,7 @@ namespace mediapipe { namespace { -::mediapipe::StatusOr PathToResourceAsFileInternal( +mediapipe::StatusOr PathToResourceAsFileInternal( const std::string& path) { NSString* ns_path = [NSString stringWithUTF8String:path.c_str()]; Class mediapipeGraphClass = NSClassFromString(@"MPPGraph"); @@ -39,8 +39,7 @@ namespace { } } // namespace -::mediapipe::StatusOr PathToResourceAsFile( - const std::string& path) { +mediapipe::StatusOr PathToResourceAsFile(const std::string& path) { // Return full path. if (absl::StartsWith(path, "/")) { return path; @@ -66,15 +65,15 @@ namespace { } } -::mediapipe::Status GetResourceContents(const std::string& path, - std::string* output) { +mediapipe::Status GetResourceContents(const std::string& path, + std::string* output) { ASSIGN_OR_RETURN(std::string full_path, PathToResourceAsFile(path)); std::ifstream input_file(full_path); std::stringstream buffer; buffer << input_file.rdbuf(); buffer.str().swap(*output); - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/util/tensor_to_detection.cc b/mediapipe/util/tensor_to_detection.cc index 3627674de..57f03e3b6 100644 --- a/mediapipe/util/tensor_to_detection.cc +++ b/mediapipe/util/tensor_to_detection.cc @@ -168,14 +168,14 @@ Status TensorsToDetections(const ::tensorflow::Tensor& num_detections, TensorToDetection(boxes_mat(i, 0), boxes_mat(i, 1), boxes_mat(i, 2), boxes_mat(i, 3), score, class_id); } else { - if (!::mediapipe::ContainsKey(label_map, class_id)) { + if (!mediapipe::ContainsKey(label_map, class_id)) { return InvalidArgumentError(StrFormat( "Input label_map does not contain entry for integer label: %d", class_id)); } - detection = TensorToDetection( - boxes_mat(i, 0), boxes_mat(i, 1), boxes_mat(i, 2), boxes_mat(i, 3), - score, ::mediapipe::FindOrDie(label_map, class_id)); + detection = TensorToDetection(boxes_mat(i, 0), boxes_mat(i, 1), + boxes_mat(i, 2), boxes_mat(i, 3), score, + mediapipe::FindOrDie(label_map, class_id)); } // Adding keypoints LocationData* location_data = detection.mutable_location_data(); @@ -201,7 +201,7 @@ Status TensorsToDetections(const ::tensorflow::Tensor& num_detections, } detections->emplace_back(detection); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } } // namespace mediapipe diff --git a/mediapipe/util/tensor_to_detection.h b/mediapipe/util/tensor_to_detection.h index e127f9e62..767e639b2 100644 --- a/mediapipe/util/tensor_to_detection.h +++ b/mediapipe/util/tensor_to_detection.h @@ -28,7 +28,7 @@ Detection TensorToDetection( const ::tensorflow::TTypes::Vec& box, float score, const ::absl::variant& class_label); -::mediapipe::Status TensorsToDetections( +mediapipe::Status TensorsToDetections( const ::tensorflow::Tensor& num_detections, const ::tensorflow::Tensor& boxes, const ::tensorflow::Tensor& scores, const ::tensorflow::Tensor& classes, @@ -36,7 +36,7 @@ Detection TensorToDetection( std::vector* detections); // Use this version if keypoints or masks are available. -::mediapipe::Status TensorsToDetections( +mediapipe::Status TensorsToDetections( const ::tensorflow::Tensor& num_detections, const ::tensorflow::Tensor& boxes, const ::tensorflow::Tensor& scores, const ::tensorflow::Tensor& classes, const ::tensorflow::Tensor& keypoints, diff --git a/mediapipe/util/tflite/BUILD b/mediapipe/util/tflite/BUILD index 2d2229388..6e33cd181 100644 --- a/mediapipe/util/tflite/BUILD +++ b/mediapipe/util/tflite/BUILD @@ -98,3 +98,17 @@ cc_library( ], }) + ["@org_tensorflow//tensorflow/lite/core/api"], ) + +cc_library( + name = "tflite_model_loader", + srcs = ["tflite_model_loader.cc"], + hdrs = ["tflite_model_loader.h"], + deps = [ + "//mediapipe/framework:packet", + "//mediapipe/framework/port:ret_check", + "//mediapipe/framework/port:status", + "//mediapipe/framework/port:statusor", + "//mediapipe/util:resource_util", + "@org_tensorflow//tensorflow/lite:framework", + ], +) diff --git a/mediapipe/util/tflite/tflite_gpu_runner.cc b/mediapipe/util/tflite/tflite_gpu_runner.cc index 2d72997e4..a4d97acbf 100644 --- a/mediapipe/util/tflite/tflite_gpu_runner.cc +++ b/mediapipe/util/tflite/tflite_gpu_runner.cc @@ -41,6 +41,34 @@ namespace tflite { namespace gpu { namespace { +// TODO: Find a better place for these utility functions. +void UpdateShapes(const tflite::Interpreter& interpreter, + const std::vector& indices, + std::vector>* shapes) { + shapes->resize(indices.size()); + for (int i = 0; i < indices.size(); ++i) { + const TfLiteTensor* tensor = interpreter.tensor(indices[i]); + shapes->at(i).resize(tensor->dims->size); + for (int j = 0; j < tensor->dims->size; ++j) { + shapes->at(i)[j] = tensor->dims->data[j]; + } + } +} + +absl::Status InitializeShapes(const tflite::FlatBufferModel& flatbuffer, + const tflite::OpResolver& op_resolver, + std::vector>* input_shapes, + std::vector>* output_shapes) { + std::unique_ptr interpreter; + tflite::InterpreterBuilder interpreter_builder(flatbuffer, op_resolver); + if (interpreter_builder(&interpreter) != kTfLiteOk || !interpreter) { + return absl::InternalError("Unable to prepare TfLite interpreter."); + } + UpdateShapes(*interpreter, interpreter->inputs(), input_shapes); + UpdateShapes(*interpreter, interpreter->outputs(), output_shapes); + return absl::OkStatus(); +} + ObjectDef GetSSBOObjectDef(int channels) { ObjectDef gpu_object_def; gpu_object_def.data_type = DataType::FLOAT32; @@ -77,12 +105,15 @@ mediapipe::Status TFLiteGPURunner::InitializeWithModel( for (const auto& output : graph_gl_->outputs()) { output_shapes_.push_back(output->tensor.shape); } + MP_RETURN_IF_ERROR(InitializeShapes(flatbuffer, op_resolver, + &input_shape_from_model_, + &output_shape_from_model_)); return absl::OkStatus(); } mediapipe::StatusOr TFLiteGPURunner::GetInputElements(int id) { if (id >= input_shapes_.size()) { - return ::mediapipe::InternalError("Wrong input tensor id."); + return mediapipe::InternalError("Wrong input tensor id."); } else { return input_shapes_[id].DimensionsProduct(); } @@ -90,7 +121,7 @@ mediapipe::StatusOr TFLiteGPURunner::GetInputElements(int id) { mediapipe::StatusOr TFLiteGPURunner::GetOutputElements(int id) { if (id >= output_shapes_.size()) { - return ::mediapipe::InternalError("Wrong output tensor id."); + return mediapipe::InternalError("Wrong output tensor id."); } else { return output_shapes_[id].DimensionsProduct(); } diff --git a/mediapipe/util/tflite/tflite_gpu_runner.h b/mediapipe/util/tflite/tflite_gpu_runner.h index cc20c8ebf..389e33b94 100644 --- a/mediapipe/util/tflite/tflite_gpu_runner.h +++ b/mediapipe/util/tflite/tflite_gpu_runner.h @@ -75,6 +75,13 @@ class TFLiteGPURunner { std::vector GetInputShapes() { return input_shapes_; } std::vector GetOutputShapes() { return output_shapes_; } + std::vector> GetTFLiteInputShapes() { + return input_shape_from_model_; + } + std::vector> GetTFLiteOutputShapes() { + return output_shape_from_model_; + } + #ifdef __ANDROID__ void SetSerializedBinaryCache(std::vector&& cache) { serialized_binary_cache_ = std::move(cache); @@ -110,6 +117,12 @@ class TFLiteGPURunner { std::vector input_shapes_; std::vector output_shapes_; + // Input/output shapes above belong to the internal graph representation. It + // is handy in certain situations to have the original tflite model's + // input/output shapes, which differ conceptually. + std::vector> input_shape_from_model_; + std::vector> output_shape_from_model_; + bool opencl_is_forced_ = false; bool opengl_is_forced_ = false; }; diff --git a/mediapipe/util/tflite/tflite_model_loader.cc b/mediapipe/util/tflite/tflite_model_loader.cc new file mode 100644 index 000000000..941d08ef4 --- /dev/null +++ b/mediapipe/util/tflite/tflite_model_loader.cc @@ -0,0 +1,34 @@ +// Copyright 2020 The MediaPipe Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#include "mediapipe/util/tflite/tflite_model_loader.h" + +#include "mediapipe/framework/port/ret_check.h" +#include "mediapipe/util/resource_util.h" + +namespace mediapipe { + +mediapipe::StatusOr TfLiteModelLoader::LoadFromPath( + const std::string& path) { + std::string model_path = path; + + ASSIGN_OR_RETURN(model_path, mediapipe::PathToResourceAsFile(model_path)); + + auto model = tflite::FlatBufferModel::BuildFromFile(model_path.c_str()); + RET_CHECK(model) << "Failed to load model from path " << model_path; + return MakePacket(TfLiteModelPtr( + model.release(), [](tflite::FlatBufferModel* model) { delete model; })); +} + +} // namespace mediapipe diff --git a/mediapipe/util/tflite/tflite_model_loader.h b/mediapipe/util/tflite/tflite_model_loader.h new file mode 100644 index 000000000..5e759649e --- /dev/null +++ b/mediapipe/util/tflite/tflite_model_loader.h @@ -0,0 +1,38 @@ +// Copyright 2020 The MediaPipe Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +#ifndef MEDIAPIPE_UTIL_TFLITE_TFLITE_MODEL_LOADER_H_ +#define MEDIAPIPE_UTIL_TFLITE_TFLITE_MODEL_LOADER_H_ + +#include "mediapipe/framework/packet.h" +#include "mediapipe/framework/port/status.h" +#include "mediapipe/framework/port/statusor.h" +#include "tensorflow/lite/model.h" + +namespace mediapipe { +// Represents a TfLite model as a FlatBuffer. +using TfLiteModelPtr = + std::unique_ptr>; + +class TfLiteModelLoader { + public: + // Returns a Packet containing a TfLiteModelPtr, pointing to a model loaded + // from the specified file path. + static mediapipe::StatusOr LoadFromPath(const std::string& path); +}; + +} // namespace mediapipe + +#endif // MEDIAPIPE_UTIL_TFLITE_TFLITE_MODEL_LOADER_H_ diff --git a/mediapipe/util/time_series_test_util.h b/mediapipe/util/time_series_test_util.h index 9c72fd9bf..2ac08206a 100644 --- a/mediapipe/util/time_series_test_util.h +++ b/mediapipe/util/time_series_test_util.h @@ -308,7 +308,7 @@ class TimeSeriesCalculatorTest : public ::testing::Test { AppendInputPacket(payload, Timestamp(timestamp), input_tag); } - ::mediapipe::Status RunGraph() { return runner_->Run(); } + mediapipe::Status RunGraph() { return runner_->Run(); } bool HasInputHeader(const size_t input_index = 0) const { return input(input_index) diff --git a/mediapipe/util/time_series_util.cc b/mediapipe/util/time_series_util.cc index 69f5d2587..35e89108a 100644 --- a/mediapipe/util/time_series_util.cc +++ b/mediapipe/util/time_series_util.cc @@ -62,10 +62,10 @@ bool LogWarningIfTimestampIsInconsistent(const Timestamp& current_timestamp, } } -::mediapipe::Status IsTimeSeriesHeaderValid(const TimeSeriesHeader& header) { +mediapipe::Status IsTimeSeriesHeaderValid(const TimeSeriesHeader& header) { if (header.has_sample_rate() && header.sample_rate() >= 0 && header.has_num_channels() && header.num_channels() >= 0) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } else { std::string error_message = "TimeSeriesHeader is missing necessary fields: " @@ -77,8 +77,8 @@ bool LogWarningIfTimestampIsInconsistent(const Timestamp& current_timestamp, } } -::mediapipe::Status FillTimeSeriesHeaderIfValid(const Packet& header_packet, - TimeSeriesHeader* header) { +mediapipe::Status FillTimeSeriesHeaderIfValid(const Packet& header_packet, + TimeSeriesHeader* header) { CHECK(header); if (header_packet.IsEmpty()) { return tool::StatusFail("No header found."); @@ -90,7 +90,7 @@ bool LogWarningIfTimestampIsInconsistent(const Timestamp& current_timestamp, return IsTimeSeriesHeaderValid(*header); } -::mediapipe::Status FillMultiStreamTimeSeriesHeaderIfValid( +mediapipe::Status FillMultiStreamTimeSeriesHeaderIfValid( const Packet& header_packet, MultiStreamTimeSeriesHeader* header) { CHECK(header); if (header_packet.IsEmpty()) { @@ -107,7 +107,7 @@ bool LogWarningIfTimestampIsInconsistent(const Timestamp& current_timestamp, return IsTimeSeriesHeaderValid(header->time_series_header()); } -::mediapipe::Status IsMatrixShapeConsistentWithHeader( +mediapipe::Status IsMatrixShapeConsistentWithHeader( const Matrix& matrix, const TimeSeriesHeader& header) { if (header.has_num_samples() && matrix.cols() != header.num_samples()) { return tool::StatusInvalid(absl::StrCat( @@ -119,7 +119,7 @@ bool LogWarningIfTimestampIsInconsistent(const Timestamp& current_timestamp, "Matrix size is inconsistent with header. Expected ", header.num_channels(), " rows, but found ", matrix.rows())); } - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } int64 SecondsToSamples(double time_in_seconds, double sample_rate) { diff --git a/mediapipe/util/time_series_util.h b/mediapipe/util/time_series_util.h index f881dd144..292749552 100644 --- a/mediapipe/util/time_series_util.h +++ b/mediapipe/util/time_series_util.h @@ -45,25 +45,25 @@ bool LogWarningIfTimestampIsInconsistent(const Timestamp& current_timestamp, // Returns mediapipe::status::OK if the header is valid. Otherwise, returns a // Status object with an error message. -::mediapipe::Status IsTimeSeriesHeaderValid(const TimeSeriesHeader& header); +mediapipe::Status IsTimeSeriesHeaderValid(const TimeSeriesHeader& header); // Fills header and returns mediapipe::status::OK if the header is non-empty and // valid. Otherwise, returns a Status object with an error message. -::mediapipe::Status FillTimeSeriesHeaderIfValid(const Packet& header_packet, - TimeSeriesHeader* header); +mediapipe::Status FillTimeSeriesHeaderIfValid(const Packet& header_packet, + TimeSeriesHeader* header); // Fills header and returns mediapipe::status::OK if the header contains a // non-empty and valid TimeSeriesHeader. Otherwise, returns a Status object with // an error message. -::mediapipe::Status FillMultiStreamTimeSeriesHeaderIfValid( +mediapipe::Status FillMultiStreamTimeSeriesHeaderIfValid( const Packet& header_packet, MultiStreamTimeSeriesHeader* header); -// Returns::mediapipe::Status::OK iff options contains an extension of type +// Returnsmediapipe::Status::OK iff options contains an extension of type // OptionsClass. template -::mediapipe::Status HasOptionsExtension(const CalculatorOptions& options) { +mediapipe::Status HasOptionsExtension(const CalculatorOptions& options) { if (options.HasExtension(OptionsClass::ext)) { - return ::mediapipe::OkStatus(); + return mediapipe::OkStatus(); } std::string error_message = "Options proto does not contain extension "; absl::StrAppend(&error_message, @@ -72,15 +72,15 @@ template // Avoid lite proto APIs on mobile targets. absl::StrAppend(&error_message, " : ", options.DebugString()); #endif - return ::mediapipe::InvalidArgumentError(error_message); + return mediapipe::InvalidArgumentError(error_message); } -// Returns::mediapipe::Status::OK if the shape of 'matrix' is consistent +// Returnsmediapipe::Status::OK if the shape of 'matrix' is consistent // with the num_samples and num_channels fields present in 'header'. // The corresponding matrix dimensions of unset header fields are // ignored, so e.g. an empty header (which is not valid according to // FillTimeSeriesHeaderIfValid) is considered consistent with any matrix. -::mediapipe::Status IsMatrixShapeConsistentWithHeader( +mediapipe::Status IsMatrixShapeConsistentWithHeader( const Matrix& matrix, const TimeSeriesHeader& header); template diff --git a/mediapipe/util/tracking/BUILD b/mediapipe/util/tracking/BUILD index 0ff5e67eb..f1114e86f 100644 --- a/mediapipe/util/tracking/BUILD +++ b/mediapipe/util/tracking/BUILD @@ -522,6 +522,7 @@ cc_library( "//mediapipe/framework/port:opencv_imgproc", "//mediapipe/framework/port:opencv_video", "//mediapipe/framework/port:vector", + "@com_google_absl//absl/container:flat_hash_map", "@com_google_absl//absl/container:node_hash_set", "@com_google_absl//absl/memory", "@eigen_archive//:eigen", diff --git a/mediapipe/util/tracking/region_flow_computation.cc b/mediapipe/util/tracking/region_flow_computation.cc index 88a954f5f..9685c5e72 100644 --- a/mediapipe/util/tracking/region_flow_computation.cc +++ b/mediapipe/util/tracking/region_flow_computation.cc @@ -26,6 +26,7 @@ #include #include "Eigen/Core" +#include "absl/container/flat_hash_map.h" #include "absl/container/node_hash_set.h" #include "absl/memory/memory.h" #include "mediapipe/framework/port/logging.h" @@ -557,7 +558,7 @@ struct RegionFlowComputation::LongTrackData { float motion_mag = 0; // Smoothed average motion. -1 for unknown. }; - std::unordered_map track_info; + absl::flat_hash_map track_info; }; template diff --git a/mediapipe/util/tracking/tracked_detection_manager.cc b/mediapipe/util/tracking/tracked_detection_manager.cc index a5896bbf6..597827f3c 100644 --- a/mediapipe/util/tracking/tracked_detection_manager.cc +++ b/mediapipe/util/tracking/tracked_detection_manager.cc @@ -21,7 +21,7 @@ namespace { -using ::mediapipe::TrackedDetection; +using mediapipe::TrackedDetection; // Checks if a point is out of view. // x and y should both be in [0, 1] to be considered in view. diff --git a/requirements.txt b/requirements.txt index cbd753554..cee4e454a 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,7 +1,7 @@ absl-py dataclasses -numpy -opencv-python>=3.4.0,<4.0.0 +numpy == 1.19.3 +opencv-python protobuf>=3.11.4 six wheel diff --git a/setup.py b/setup.py index 32ccac55f..188312bc5 100644 --- a/setup.py +++ b/setup.py @@ -17,6 +17,7 @@ Setup for MediaPipe package with setuptools. import glob import os +import platform import posixpath import re import shutil @@ -32,21 +33,30 @@ from distutils import spawn import distutils.command.build as build import distutils.command.clean as clean -__version__ = '0.7' +__version__ = '0.8' +IS_WINDOWS = (platform.system() == 'Windows') MP_ROOT_PATH = os.path.dirname(os.path.abspath(__file__)) ROOT_INIT_PY = os.path.join(MP_ROOT_PATH, '__init__.py') MP_DIR_INIT_PY = os.path.join(MP_ROOT_PATH, 'mediapipe/__init__.py') -MP_DIR_INIT_PY_BACKUP = os.path.join(MP_ROOT_PATH, - 'mediapipe/__init__.py.backup') MP_THIRD_PARTY_BUILD = os.path.join(MP_ROOT_PATH, 'third_party/BUILD') -MP_THIRD_PARTY_BUILD_BACKUP = os.path.join(MP_ROOT_PATH, - 'third_party/BUILD.backup') -MP_CALCULATORS_DIR_INIT_PY = os.path.join(MP_ROOT_PATH, - 'mediapipe/calculators/__init__.py') +SUBDIR_INIT_PY_FILES = [ + os.path.join(MP_ROOT_PATH, 'mediapipe/calculators/__init__.py'), + os.path.join(MP_ROOT_PATH, 'mediapipe/modules/__init__.py'), + os.path.join(MP_ROOT_PATH, + 'mediapipe/modules/holistic_landmark/__init__.py') +] if not os.path.exists(ROOT_INIT_PY): open(ROOT_INIT_PY, 'w').close() +def _normalize_path(path): + return path.replace('\\', '/') if IS_WINDOWS else path + + +def _get_backup_file(path): + return path + '.backup' + + def _parse_requirements(path): with open(os.path.join(MP_ROOT_PATH, path)) as f: return [ @@ -61,7 +71,8 @@ def _get_long_description(): return re.sub( r'(docs/images/|docs/images/mobile/)([A-Za-z0-9_]*\.(png|gif))', r'https://github.com/google/mediapipe/blob/master/\g<1>\g<2>?raw=true', - open(os.path.join(MP_ROOT_PATH, 'README.md')).read()) + open(os.path.join(MP_ROOT_PATH, 'README.md'), + 'rb').read().decode('utf-8')) def _check_bazel(): @@ -93,6 +104,25 @@ def _check_bazel(): sys.exit(-1) +def _modify_opencv_cmake_rule(link_opencv): + """Modify opencv_cmake rule to build the static opencv libraries.""" + + # Ask the opencv_cmake rule to build the static opencv libraries for the + # mediapipe python package. By doing this, we can avoid copying the opencv + # .so file into the package. + # On Windows, the opencv_cmake rule may need Visual Studio to compile OpenCV + # from source. For simplicity, we continue to link the prebuilt version of + # the OpenCV library through "@windows_opencv//:opencv". + if not link_opencv and not IS_WINDOWS: + content = open(MP_THIRD_PARTY_BUILD, + 'r').read().replace('OPENCV_SHARED_LIBS = True', + 'OPENCV_SHARED_LIBS = False') + shutil.move(MP_THIRD_PARTY_BUILD, _get_backup_file(MP_THIRD_PARTY_BUILD)) + build_file = open(MP_THIRD_PARTY_BUILD, 'w') + build_file.write(content) + build_file.close() + + class ModifyInitFiles(setuptools.Command): """Modify the init files for building MediaPipe Python package.""" @@ -106,7 +136,7 @@ class ModifyInitFiles(setuptools.Command): def run(self): # Save the original init file. - shutil.copyfile(MP_DIR_INIT_PY, MP_DIR_INIT_PY_BACKUP) + shutil.copyfile(MP_DIR_INIT_PY, _get_backup_file(MP_DIR_INIT_PY)) mp_dir_init_file = open(MP_DIR_INIT_PY, 'a') mp_dir_init_file.writelines( ['\n', 'from mediapipe.python import *\n', @@ -137,21 +167,23 @@ class GeneratePyProtos(setuptools.Command): '-compiler\' (linux) or \'brew install protobuf\'(macos) to install ' 'protobuf compiler binary.') sys.exit(-1) + # Add __init__.py to make the generated py proto files visiable. + for init_py in SUBDIR_INIT_PY_FILES: + if not os.path.exists(init_py): + sys.stderr.write('adding __init__ file: %s\n' % init_py) + open(init_py, 'w').close() # Build framework and calculator protos. - if not os.path.exists(MP_CALCULATORS_DIR_INIT_PY): - sys.stderr.write('adding __init__ file: %s\n' % - MP_CALCULATORS_DIR_INIT_PY) - open(MP_CALCULATORS_DIR_INIT_PY, 'w').close() for pattern in [ 'mediapipe/framework/**/*.proto', 'mediapipe/calculators/**/*.proto', - 'mediapipe/gpu/**/*.proto', 'mediapipe/util/**/*.proto' + 'mediapipe/gpu/**/*.proto', 'mediapipe/modules/**/*.proto', + 'mediapipe/util/**/*.proto' ]: for proto_file in glob.glob(pattern, recursive=True): # Ignore test protos. if proto_file.endswith('test.proto'): continue # Ignore tensorflow protos. - if 'mediapipe/calculators/tensorflow' in proto_file: + if 'tensorflow' in proto_file: continue proto_dir = os.path.dirname(os.path.abspath(proto_file)) # Ignore testdata dir. @@ -189,7 +221,8 @@ class BuildBinaryGraphs(build.build): binary_graphs = [ 'face_landmark/face_landmark_front_cpu', 'hand_landmark/hand_landmark_tracking_cpu', - 'pose_landmark/pose_landmark_upper_body_smoothed_cpu' + 'holistic_landmark/holistic_landmark_cpu', + 'pose_landmark/pose_landmark_cpu' ] for binary_graph in binary_graphs: sys.stderr.write('generating binarypb: %s\n' % @@ -204,9 +237,11 @@ class BuildBinaryGraphs(build.build): 'build', '--compilation_mode=opt', '--define=MEDIAPIPE_DISABLE_GPU=1', - '--action_env=PYTHON_BIN_PATH=' + sys.executable, + '--action_env=PYTHON_BIN_PATH=' + _normalize_path(sys.executable), os.path.join('mediapipe/modules/', graph_path), ] + if not self.link_opencv and not IS_WINDOWS: + bazel_command.append('--define=OPENCV=source') if subprocess.call(bazel_command) != 0: sys.exit(-1) output_name = graph_path + '.binarypb' @@ -253,27 +288,17 @@ class BuildBazelExtension(build_ext.build_ext): def bazel_build(self, ext): if not os.path.exists(self.build_temp): os.makedirs(self.build_temp) - bazel_argv = [ + bazel_command = [ 'bazel', 'build', '--compilation_mode=opt', '--define=MEDIAPIPE_DISABLE_GPU=1', - '--action_env=PYTHON_BIN_PATH=' + sys.executable, + '--action_env=PYTHON_BIN_PATH=' + _normalize_path(sys.executable), str(ext.bazel_target + '.so'), ] - if not self.link_opencv: - # Ask the opencv_cmake rule to build the static opencv libraries for - # mediapipe python package. By doing this, we can avoid copying the opencv - # .so file into the package. - content = open(MP_THIRD_PARTY_BUILD, - 'r').read().replace('OPENCV_SHARED_LIBS = True', - 'OPENCV_SHARED_LIBS = False') - shutil.move(MP_THIRD_PARTY_BUILD, MP_THIRD_PARTY_BUILD_BACKUP) - build_file = open(MP_THIRD_PARTY_BUILD, 'w') - build_file.write(content) - build_file.close() - bazel_argv.append('--define=OPENCV=source') - self.spawn(bazel_argv) + if not self.link_opencv and not IS_WINDOWS: + bazel_command.append('--define=OPENCV=source') + self.spawn(bazel_command) ext_bazel_bin_path = os.path.join('bazel-bin', ext.relpath, ext.target_name + '.so') ext_dest_path = self.get_ext_fullpath(ext.name) @@ -281,6 +306,10 @@ class BuildBazelExtension(build_ext.build_ext): if not os.path.exists(ext_dest_dir): os.makedirs(ext_dest_dir) shutil.copyfile(ext_bazel_bin_path, ext_dest_path) + if IS_WINDOWS: + for opencv_dll in glob.glob( + os.path.join('bazel-bin', ext.relpath, '*opencv*.dll')): + shutil.copyfile(opencv_dll, ext_dest_dir) class Build(build.build): @@ -299,6 +328,10 @@ class Build(build.build): build.build.finalize_options(self) def run(self): + _modify_opencv_cmake_rule(self.link_opencv) + build_binary_graphs_obj = self.distribution.get_command_obj( + 'build_binary_graphs') + build_binary_graphs_obj.link_opencv = self.link_opencv build_ext_obj = self.distribution.get_command_obj('build_ext') build_ext_obj.link_opencv = self.link_opencv self.run_command('build_binary_graphs') @@ -324,6 +357,10 @@ class Install(install.install): install.install.finalize_options(self) def run(self): + _modify_opencv_cmake_rule(self.link_opencv) + build_binary_graphs_obj = self.distribution.get_command_obj( + 'build_binary_graphs') + build_binary_graphs_obj.link_opencv = self.link_opencv build_ext_obj = self.distribution.get_command_obj('build_ext') build_ext_obj.link_opencv = self.link_opencv self.run_command('build_binary_graphs') @@ -337,21 +374,27 @@ class RemoveGenerated(clean.clean): """Remove the generated files.""" def run(self): - for py_file in glob.glob('mediapipe/framework/**/*.py', recursive=True): - sys.stderr.write('removing generated files: %s\n' % py_file) - os.remove(py_file) + for pattern in [ + 'mediapipe/framework/**/*pb2.py', 'mediapipe/calculators/**/*pb2.py', + 'mediapipe/gpu/**/*pb2.py', 'mediapipe/util/**/*pb2.py' + ]: + for py_file in glob.glob(pattern, recursive=True): + sys.stderr.write('removing generated files: %s\n' % py_file) + os.remove(py_file) for binarypb_file in glob.glob( 'mediapipe/modules/**/*.binarypb', recursive=True): sys.stderr.write('removing generated binary graphs: %s\n' % binarypb_file) os.remove(binarypb_file) # Restore the original init file from the backup. - if os.path.exists(MP_DIR_INIT_PY_BACKUP): + if os.path.exists(_get_backup_file(MP_DIR_INIT_PY)): os.remove(MP_DIR_INIT_PY) - shutil.move(MP_DIR_INIT_PY_BACKUP, MP_DIR_INIT_PY) + shutil.move(_get_backup_file(MP_DIR_INIT_PY), MP_DIR_INIT_PY) # Restore the original BUILD file from the backup. - if os.path.exists(MP_THIRD_PARTY_BUILD_BACKUP): + if os.path.exists(_get_backup_file(MP_THIRD_PARTY_BUILD)): os.remove(MP_THIRD_PARTY_BUILD) - shutil.move(MP_THIRD_PARTY_BUILD_BACKUP, MP_THIRD_PARTY_BUILD) + shutil.move(_get_backup_file(MP_THIRD_PARTY_BUILD), MP_THIRD_PARTY_BUILD) + for init_py in SUBDIR_INIT_PY_FILES: + os.remove(init_py) clean.clean.run(self) @@ -387,6 +430,7 @@ setuptools.setup( 'Intended Audience :: Science/Research', 'License :: OSI Approved :: Apache Software License', 'Operating System :: MacOS :: MacOS X', + 'Operating System :: Microsoft :: Windows', 'Operating System :: POSIX :: Linux', 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', diff --git a/setup_android_sdk_and_ndk.sh b/setup_android_sdk_and_ndk.sh index 4236d84d3..11f33555e 100644 --- a/setup_android_sdk_and_ndk.sh +++ b/setup_android_sdk_and_ndk.sh @@ -23,8 +23,10 @@ set -e if [ "$(uname)" == "Darwin" ]; then platform="darwin" + platform_android_sdk="mac" elif [ "$(expr substr $(uname -s) 1 5)" == "Linux" ]; then platform="linux" + platform_android_sdk="linux" fi if [[ $ANDROID_HOME ]] && [[ $ANDROID_NDK_HOME ]] @@ -62,7 +64,7 @@ then else rm -rf /tmp/android_sdk/ mkdir /tmp/android_sdk/ - curl https://dl.google.com/android/repository/commandlinetools-${platform}-6609375_latest.zip -o /tmp/android_sdk/commandline_tools.zip + curl https://dl.google.com/android/repository/commandlinetools-${platform_android_sdk}-6609375_latest.zip -o /tmp/android_sdk/commandline_tools.zip unzip /tmp/android_sdk/commandline_tools.zip -d /tmp/android_sdk/ mkdir -p $android_sdk_path /tmp/android_sdk/tools/bin/sdkmanager --update --sdk_root=${android_sdk_path} diff --git a/third_party/ceres_solver_compatibility_fixes.diff b/third_party/ceres_solver_compatibility_fixes.diff index 87077d06e..7b1abc9d6 100644 --- a/third_party/ceres_solver_compatibility_fixes.diff +++ b/third_party/ceres_solver_compatibility_fixes.diff @@ -1,22 +1,14 @@ diff --git a/bazel/ceres.bzl b/bazel/ceres.bzl -index d90e5a3..32a3fef 100644 +index ce170b2..bb5aa82 100644 --- a/bazel/ceres.bzl +++ b/bazel/ceres.bzl -@@ -180,7 +180,6 @@ def ceres_library(name, - ]), - copts = [ - "-I" + internal, -- "-Wno-sign-compare", - ] + schur_eliminator_copts, - - # These include directories and defines are propagated to other targets -@@ -202,7 +201,7 @@ def ceres_library(name, +@@ -204,7 +204,7 @@ def ceres_library(name, ], visibility = ["//visibility:public"], deps = [ -- "@com_github_eigen_eigen//:eigen", +- "@com_gitlab_libeigen_eigen//:eigen", - "@com_github_google_glog//:glog", -+ "@eigen_archive//:eigen", ++ "@eigen_archive//:eigen", # from TensorFlow + "@com_github_glog_glog//:glog", ], - ) \ No newline at end of file + ) diff --git a/third_party/com_github_glog_glog_f2cf2e1bd040fd15016af53598db0cb9b16a6655.diff b/third_party/com_github_glog_glog_f2cf2e1bd040fd15016af53598db0cb9b16a6655.diff new file mode 100644 index 000000000..560e83ecc --- /dev/null +++ b/third_party/com_github_glog_glog_f2cf2e1bd040fd15016af53598db0cb9b16a6655.diff @@ -0,0 +1,45 @@ +https://github.com/google/glog/pull/342 + +diff --git a/CONTRIBUTORS b/CONTRIBUTORS +index d63f62d1..aa0dd4a8 100644 +--- a/CONTRIBUTORS ++++ b/CONTRIBUTORS +@@ -26,6 +26,7 @@ Abhishek Dasgupta + Abhishek Parmar + Andrew Schwartzmeyer + Andy Ying ++Bret McKee + Brian Silverman + Fumitoshi Ukai + Guillaume Dumont +diff --git a/src/glog/logging.h.in b/src/glog/logging.h.in +index 9968b96d..f6dccb29 100644 +--- a/src/glog/logging.h.in ++++ b/src/glog/logging.h.in +@@ -649,6 +649,10 @@ void MakeCheckOpValueString(std::ostream* os, const signed char& v); + template <> GOOGLE_GLOG_DLL_DECL + void MakeCheckOpValueString(std::ostream* os, const unsigned char& v); + ++// Provide printable value for nullptr_t ++template <> GOOGLE_GLOG_DLL_DECL ++void MakeCheckOpValueString(std::ostream* os, const std::nullptr_t& v); ++ + // Build the error message string. Specify no inlining for code size. + template + std::string* MakeCheckOpString(const T1& v1, const T2& v2, const char* exprtext) +diff --git a/src/logging.cc b/src/logging.cc +index 0c86cf62..256655e5 100644 +--- a/src/logging.cc ++++ b/src/logging.cc +@@ -2163,6 +2163,11 @@ void MakeCheckOpValueString(std::ostream* os, const unsigned char& v) { + } + } + ++template <> ++void MakeCheckOpValueString(std::ostream* os, const std::nullptr_t& v) { ++ (*os) << "nullptr"; ++} ++ + void InitGoogleLogging(const char* argv0) { + glog_internal_namespace_::InitGoogleLoggingUtilities(argv0); + } diff --git a/third_party/org_tensorflow_compatibility_fixes.diff b/third_party/org_tensorflow_compatibility_fixes.diff index 2f965cf41..502a994e8 100644 --- a/third_party/org_tensorflow_compatibility_fixes.diff +++ b/third_party/org_tensorflow_compatibility_fixes.diff @@ -34,4 +34,17 @@ index ba50783765..5de5ea01f0 100644 -#include #include #include - #endif + #include +diff --git a/tensorflow/lite/delegates/gpu/cl/serialization.fbs b/tensorflow/lite/delegates/gpu/cl/serialization.fbs +index 67bd587162e..2a3c6bd30dc 100644 +--- a/tensorflow/lite/delegates/gpu/cl/serialization.fbs ++++ b/tensorflow/lite/delegates/gpu/cl/serialization.fbs +@@ -12,7 +12,7 @@ + // See the License for the specific language governing permissions and + // limitations under the License. + +-include "tensorflow/lite/delegates/gpu/common/task/serialization_base.fbs"; ++include "../common/task/serialization_base.fbs"; + + namespace tflite.gpu.cl.data; + diff --git a/third_party/org_tensorflow_objc_cxx17.diff b/third_party/org_tensorflow_objc_cxx17.diff new file mode 100644 index 000000000..3242f65bd --- /dev/null +++ b/third_party/org_tensorflow_objc_cxx17.diff @@ -0,0 +1,24 @@ +diff --git a/tensorflow/lite/delegates/gpu/BUILD b/tensorflow/lite/delegates/gpu/BUILD +index 069230ebcf6..3924d7cced7 100644 +--- a/tensorflow/lite/delegates/gpu/BUILD ++++ b/tensorflow/lite/delegates/gpu/BUILD +@@ -83,6 +83,7 @@ objc_library( + hdrs = ["metal_delegate.h"], + module_name = "TensorFlowLiteCMetal", + sdk_frameworks = ["Metal"], ++ copts = ["-std=c++17"], + deps = [ + "//tensorflow/lite:kernel_api", + "//tensorflow/lite:minimal_logging", +diff --git a/tensorflow/lite/delegates/gpu/metal/BUILD b/tensorflow/lite/delegates/gpu/metal/BUILD +index 6dcde34a62f..1adfc28aad9 100644 +--- a/tensorflow/lite/delegates/gpu/metal/BUILD ++++ b/tensorflow/lite/delegates/gpu/metal/BUILD +@@ -17,6 +17,7 @@ package( + + DEFAULT_COPTS = [ + "-Wno-shorten-64-to-32", ++ "-std=c++17", + ] + + cc_library(