diff --git a/.bazelversion b/.bazelversion
index 91ff57278..f3b5af39e 100644
--- a/.bazelversion
+++ b/.bazelversion
@@ -1 +1 @@
-5.2.0
+6.1.1
diff --git a/Dockerfile b/Dockerfile
index 3df22cc04..03b335823 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -61,7 +61,7 @@ RUN pip3 install tf_slim
RUN ln -s /usr/bin/python3 /usr/bin/python
# Install bazel
-ARG BAZEL_VERSION=5.2.0
+ARG BAZEL_VERSION=6.1.1
RUN mkdir /bazel && \
wget --no-check-certificate -O /bazel/installer.sh "https://github.com/bazelbuild/bazel/releases/download/${BAZEL_VERSION}/b\
azel-${BAZEL_VERSION}-installer-linux-x86_64.sh" && \
diff --git a/README.md b/README.md
index 012ea3a27..a82c88ab1 100644
--- a/README.md
+++ b/README.md
@@ -6,6 +6,20 @@ nav_order: 1

+----
+
+**Attention:** *Thanks for your interest in MediaPipe! We have moved to
+[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
+as the primary developer documentation site for MediaPipe as of April 3, 2023.*
+
+*This notice and web page will be removed on June 1, 2023.*
+
+----
+
+
+
+
+
--------------------------------------------------------------------------------
## Live ML anywhere
@@ -21,15 +35,6 @@ ML solutions for live and streaming media.
----
-**Attention:** *Thanks for your interest in MediaPipe! We are moving to
-[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
-as the primary developer documentation
-site for MediaPipe starting April 3, 2023.*
-
-*This notice and web page will be removed on April 3, 2023.*
-
-----
-
## ML solutions in MediaPipe
Face Detection | Face Mesh | Iris | Hands | Pose | Holistic
diff --git a/WORKSPACE b/WORKSPACE
index 17e96c0b2..199b6a000 100644
--- a/WORKSPACE
+++ b/WORKSPACE
@@ -54,6 +54,76 @@ load("@rules_foreign_cc//:workspace_definitions.bzl", "rules_foreign_cc_dependen
rules_foreign_cc_dependencies()
+http_archive(
+ name = "com_google_protobuf",
+ sha256 = "87407cd28e7a9c95d9f61a098a53cf031109d451a7763e7dd1253abf8b4df422",
+ strip_prefix = "protobuf-3.19.1",
+ urls = ["https://github.com/protocolbuffers/protobuf/archive/v3.19.1.tar.gz"],
+ patches = [
+ "@//third_party:com_google_protobuf_fixes.diff"
+ ],
+ patch_args = [
+ "-p1",
+ ],
+)
+
+# Load Zlib before initializing TensorFlow and the iOS build rules to guarantee
+# that the target @zlib//:mini_zlib is available
+http_archive(
+ name = "zlib",
+ build_file = "@//third_party:zlib.BUILD",
+ sha256 = "c3e5e9fdd5004dcb542feda5ee4f0ff0744628baf8ed2dd5d66f8ca1197cb1a1",
+ strip_prefix = "zlib-1.2.11",
+ urls = [
+ "http://mirror.bazel.build/zlib.net/fossils/zlib-1.2.11.tar.gz",
+ "http://zlib.net/fossils/zlib-1.2.11.tar.gz", # 2017-01-15
+ ],
+ patches = [
+ "@//third_party:zlib.diff",
+ ],
+ patch_args = [
+ "-p1",
+ ],
+)
+
+# iOS basic build deps.
+http_archive(
+ name = "build_bazel_rules_apple",
+ sha256 = "3e2c7ae0ddd181c4053b6491dad1d01ae29011bc322ca87eea45957c76d3a0c3",
+ url = "https://github.com/bazelbuild/rules_apple/releases/download/2.1.0/rules_apple.2.1.0.tar.gz",
+ patches = [
+ # Bypass checking ios unit test runner when building MP ios applications.
+ "@//third_party:build_bazel_rules_apple_bypass_test_runner_check.diff"
+ ],
+ patch_args = [
+ "-p1",
+ ],
+)
+
+load(
+ "@build_bazel_rules_apple//apple:repositories.bzl",
+ "apple_rules_dependencies",
+)
+apple_rules_dependencies()
+
+load(
+ "@build_bazel_rules_swift//swift:repositories.bzl",
+ "swift_rules_dependencies",
+)
+swift_rules_dependencies()
+
+load(
+ "@build_bazel_rules_swift//swift:extras.bzl",
+ "swift_rules_extra_dependencies",
+)
+swift_rules_extra_dependencies()
+
+load(
+ "@build_bazel_apple_support//lib:repositories.bzl",
+ "apple_support_dependencies",
+)
+apple_support_dependencies()
+
# This is used to select all contents of the archives for CMake-based packages to give CMake access to them.
all_content = """filegroup(name = "all", srcs = glob(["**"]), visibility = ["//visibility:public"])"""
@@ -133,19 +203,6 @@ http_archive(
urls = ["https://github.com/protocolbuffers/protobuf/archive/v3.19.1.tar.gz"],
)
-http_archive(
- name = "com_google_protobuf",
- sha256 = "87407cd28e7a9c95d9f61a098a53cf031109d451a7763e7dd1253abf8b4df422",
- strip_prefix = "protobuf-3.19.1",
- urls = ["https://github.com/protocolbuffers/protobuf/archive/v3.19.1.tar.gz"],
- patches = [
- "@//third_party:com_google_protobuf_fixes.diff"
- ],
- patch_args = [
- "-p1",
- ],
-)
-
load("@//third_party/flatbuffers:workspace.bzl", flatbuffers = "repo")
flatbuffers()
@@ -319,63 +376,6 @@ http_archive(
],
)
-# Load Zlib before initializing TensorFlow and the iOS build rules to guarantee
-# that the target @zlib//:mini_zlib is available
-http_archive(
- name = "zlib",
- build_file = "@//third_party:zlib.BUILD",
- sha256 = "c3e5e9fdd5004dcb542feda5ee4f0ff0744628baf8ed2dd5d66f8ca1197cb1a1",
- strip_prefix = "zlib-1.2.11",
- urls = [
- "http://mirror.bazel.build/zlib.net/fossils/zlib-1.2.11.tar.gz",
- "http://zlib.net/fossils/zlib-1.2.11.tar.gz", # 2017-01-15
- ],
- patches = [
- "@//third_party:zlib.diff",
- ],
- patch_args = [
- "-p1",
- ],
-)
-
-# iOS basic build deps.
-http_archive(
- name = "build_bazel_rules_apple",
- sha256 = "f94e6dddf74739ef5cb30f000e13a2a613f6ebfa5e63588305a71fce8a8a9911",
- url = "https://github.com/bazelbuild/rules_apple/releases/download/1.1.3/rules_apple.1.1.3.tar.gz",
- patches = [
- # Bypass checking ios unit test runner when building MP ios applications.
- "@//third_party:build_bazel_rules_apple_bypass_test_runner_check.diff"
- ],
- patch_args = [
- "-p1",
- ],
-)
-
-load(
- "@build_bazel_rules_apple//apple:repositories.bzl",
- "apple_rules_dependencies",
-)
-apple_rules_dependencies()
-
-load(
- "@build_bazel_rules_swift//swift:repositories.bzl",
- "swift_rules_dependencies",
-)
-swift_rules_dependencies()
-
-load(
- "@build_bazel_rules_swift//swift:extras.bzl",
- "swift_rules_extra_dependencies",
-)
-swift_rules_extra_dependencies()
-
-load(
- "@build_bazel_apple_support//lib:repositories.bzl",
- "apple_support_dependencies",
-)
-apple_support_dependencies()
-
# More iOS deps.
http_archive(
diff --git a/docs/framework_concepts/building_graphs_cpp.md b/docs/framework_concepts/building_graphs_cpp.md
index 250cd89e2..26cdfe1e4 100644
--- a/docs/framework_concepts/building_graphs_cpp.md
+++ b/docs/framework_concepts/building_graphs_cpp.md
@@ -1,5 +1,6 @@
---
-layout: default
+layout: forward
+target: https://developers.google.com/mediapipe/framework/framework_concepts/graphs_cpp
title: Building Graphs in C++
parent: Graphs
nav_order: 1
@@ -12,6 +13,12 @@ nav_order: 1
{:toc}
---
+**Attention:** *Thanks for your interest in MediaPipe! We have moved to
+[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
+as the primary developer documentation site for MediaPipe as of April 3, 2023.*
+
+----
+
C++ graph builder is a powerful tool for:
* Building complex graphs
diff --git a/docs/framework_concepts/calculators.md b/docs/framework_concepts/calculators.md
index 5c51a3ec5..3a3661dd4 100644
--- a/docs/framework_concepts/calculators.md
+++ b/docs/framework_concepts/calculators.md
@@ -13,6 +13,12 @@ nav_order: 1
{:toc}
---
+**Attention:** *Thanks for your interest in MediaPipe! We have moved to
+[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
+as the primary developer documentation site for MediaPipe as of April 3, 2023.*
+
+----
+
Each calculator is a node of a graph. We describe how to create a new
calculator, how to initialize a calculator, how to perform its calculations,
input and output streams, timestamps, and options. Each node in the graph is
diff --git a/docs/framework_concepts/framework_concepts.md b/docs/framework_concepts/framework_concepts.md
index 5d953480e..004c75cff 100644
--- a/docs/framework_concepts/framework_concepts.md
+++ b/docs/framework_concepts/framework_concepts.md
@@ -14,6 +14,12 @@ has_toc: false
{:toc}
---
+**Attention:** *Thanks for your interest in MediaPipe! We have moved to
+[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
+as the primary developer documentation site for MediaPipe as of April 3, 2023.*
+
+----
+
## The basics
### Packet
diff --git a/docs/framework_concepts/gpu.md b/docs/framework_concepts/gpu.md
index 3c411d55c..8900ab3b4 100644
--- a/docs/framework_concepts/gpu.md
+++ b/docs/framework_concepts/gpu.md
@@ -13,6 +13,12 @@ nav_order: 5
{:toc}
---
+**Attention:** *Thanks for your interest in MediaPipe! We have moved to
+[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
+as the primary developer documentation site for MediaPipe as of April 3, 2023.*
+
+----
+
## Overview
MediaPipe supports calculator nodes for GPU compute and rendering, and allows combining multiple GPU nodes, as well as mixing them with CPU based calculator nodes. There exist several GPU APIs on mobile platforms (eg, OpenGL ES, Metal and Vulkan). MediaPipe does not attempt to offer a single cross-API GPU abstraction. Individual nodes can be written using different APIs, allowing them to take advantage of platform specific features when needed.
diff --git a/docs/framework_concepts/graphs.md b/docs/framework_concepts/graphs.md
index 0d38c75fc..5f9c68e08 100644
--- a/docs/framework_concepts/graphs.md
+++ b/docs/framework_concepts/graphs.md
@@ -13,6 +13,12 @@ nav_order: 2
{:toc}
---
+**Attention:** *Thanks for your interest in MediaPipe! We have moved to
+[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
+as the primary developer documentation site for MediaPipe as of April 3, 2023.*
+
+----
+
## Graph
A `CalculatorGraphConfig` proto specifies the topology and functionality of a
diff --git a/docs/framework_concepts/packets.md b/docs/framework_concepts/packets.md
index 100bc6b01..1bfad376d 100644
--- a/docs/framework_concepts/packets.md
+++ b/docs/framework_concepts/packets.md
@@ -13,6 +13,12 @@ nav_order: 3
{:toc}
---
+**Attention:** *Thanks for your interest in MediaPipe! We have moved to
+[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
+as the primary developer documentation site for MediaPipe as of April 3, 2023.*
+
+----
+
Calculators communicate by sending and receiving packets. Typically a single
packet is sent along each input stream at each input timestamp. A packet can
contain any kind of data, such as a single frame of video or a single integer
diff --git a/docs/framework_concepts/realtime_streams.md b/docs/framework_concepts/realtime_streams.md
index 43d147f55..60f586cb9 100644
--- a/docs/framework_concepts/realtime_streams.md
+++ b/docs/framework_concepts/realtime_streams.md
@@ -13,6 +13,12 @@ nav_order: 6
{:toc}
---
+**Attention:** *Thanks for your interest in MediaPipe! We have moved to
+[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
+as the primary developer documentation site for MediaPipe as of April 3, 2023.*
+
+----
+
## Real-time timestamps
MediaPipe calculator graphs are often used to process streams of video or audio
diff --git a/docs/framework_concepts/synchronization.md b/docs/framework_concepts/synchronization.md
index e12d077a7..8a0a907c5 100644
--- a/docs/framework_concepts/synchronization.md
+++ b/docs/framework_concepts/synchronization.md
@@ -13,6 +13,12 @@ nav_order: 4
{:toc}
---
+**Attention:** *Thanks for your interest in MediaPipe! We have moved to
+[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
+as the primary developer documentation site for MediaPipe as of April 3, 2023.*
+
+----
+
## Scheduling mechanics
Data processing in a MediaPipe graph occurs inside processing nodes defined as
diff --git a/docs/getting_started/android.md b/docs/getting_started/android.md
index cb99a6fef..83fbd1c93 100644
--- a/docs/getting_started/android.md
+++ b/docs/getting_started/android.md
@@ -15,6 +15,12 @@ nav_order: 1
{:toc}
---
+**Attention:** *Thanks for your interest in MediaPipe! We have moved to
+[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
+as the primary developer documentation site for MediaPipe as of April 3, 2023.*
+
+----
+
Please follow instructions below to build Android example apps in the supported
MediaPipe [solutions](../solutions/solutions.md). To learn more about these
example apps, start from [Hello World! on Android](./hello_world_android.md).
diff --git a/docs/getting_started/android_archive_library.md b/docs/getting_started/android_archive_library.md
index 9b92ef498..7d98b32c5 100644
--- a/docs/getting_started/android_archive_library.md
+++ b/docs/getting_started/android_archive_library.md
@@ -14,6 +14,12 @@ nav_order: 3
{:toc}
---
+**Attention:** *Thanks for your interest in MediaPipe! We have moved to
+[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
+as the primary developer documentation site for MediaPipe as of April 3, 2023.*
+
+----
+
***Experimental Only***
The MediaPipe Android Archive (AAR) library is a convenient way to use MediaPipe
diff --git a/docs/getting_started/android_solutions.md b/docs/getting_started/android_solutions.md
index 0c492c1bb..159d1358d 100644
--- a/docs/getting_started/android_solutions.md
+++ b/docs/getting_started/android_solutions.md
@@ -1,5 +1,6 @@
---
-layout: default
+layout: forward
+target: https://developers.google.com/mediapipe/
title: MediaPipe Android Solutions
parent: MediaPipe on Android
grand_parent: Getting Started
@@ -13,14 +14,9 @@ nav_order: 2
{:toc}
---
-**Attention:** *Thanks for your interest in MediaPipe! We are moving to
+**Attention:** *Thanks for your interest in MediaPipe! We have moved to
[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
-as the primary developer documentation
-site for MediaPipe starting April 3, 2023. This content will not be moved to
-the new site, but will remain available in the source code repository on an
-as-is basis.*
-
-*This notice and web page will be removed on April 3, 2023.*
+as the primary developer documentation site for MediaPipe as of April 3, 2023.*
----
diff --git a/docs/getting_started/building_examples.md b/docs/getting_started/building_examples.md
index 20c30bef2..a77f6ea66 100644
--- a/docs/getting_started/building_examples.md
+++ b/docs/getting_started/building_examples.md
@@ -1,5 +1,6 @@
---
-layout: default
+layout: forward
+target: https://developers.google.com/mediapipe/
title: Building MediaPipe Examples
parent: Getting Started
nav_exclude: true
@@ -12,14 +13,9 @@ nav_exclude: true
{:toc}
---
-**Attention:** *Thanks for your interest in MediaPipe! We are moving to
+**Attention:** *Thanks for your interest in MediaPipe! We have moved to
[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
-as the primary developer documentation
-site for MediaPipe starting April 3, 2023. This content will not be moved to
-the new site, but will remain available in the source code repository on an
-as-is basis.*
-
-*This notice and web page will be removed on April 3, 2023.*
+as the primary developer documentation site for MediaPipe as of April 3, 2023.*
----
diff --git a/docs/getting_started/cpp.md b/docs/getting_started/cpp.md
index 3afde767f..d708866a7 100644
--- a/docs/getting_started/cpp.md
+++ b/docs/getting_started/cpp.md
@@ -15,6 +15,12 @@ nav_order: 5
{:toc}
---
+**Attention:** *Thanks for your interest in MediaPipe! We have moved to
+[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
+as the primary developer documentation site for MediaPipe as of April 3, 2023.*
+
+----
+
Please follow instructions below to build C++ command-line example apps in the
supported MediaPipe [solutions](../solutions/solutions.md). To learn more about
these example apps, start from [Hello World! in C++](./hello_world_cpp.md).
diff --git a/docs/getting_started/faq.md b/docs/getting_started/faq.md
index b7c24e6ec..ca50ae530 100644
--- a/docs/getting_started/faq.md
+++ b/docs/getting_started/faq.md
@@ -13,6 +13,12 @@ nav_order: 9
{:toc}
---
+**Attention:** *Thanks for your interest in MediaPipe! We have moved to
+[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
+as the primary developer documentation site for MediaPipe as of April 3, 2023.*
+
+----
+
### How to convert ImageFrames and GpuBuffers
The Calculators [`ImageFrameToGpuBufferCalculator`] and
diff --git a/docs/getting_started/getting_started.md b/docs/getting_started/getting_started.md
index fea9cfa73..db605b4b4 100644
--- a/docs/getting_started/getting_started.md
+++ b/docs/getting_started/getting_started.md
@@ -1,5 +1,6 @@
---
-layout: default
+layout: forward
+target: https://developers.google.com/mediapipe/
title: Getting Started
nav_order: 2
has_children: true
@@ -12,13 +13,8 @@ has_children: true
{:toc}
---
-**Attention:** *Thanks for your interest in MediaPipe! We are moving to
+**Attention:** *Thanks for your interest in MediaPipe! We have moved to
[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
-as the primary developer documentation
-site for MediaPipe starting April 3, 2023. This content will not be moved to
-the new site, but will remain available in the source code repository on an
-as-is basis.*
-
-*This notice and web page will be removed on April 3, 2023.*
+as the primary developer documentation site for MediaPipe as of April 3, 2023.*
----
diff --git a/docs/getting_started/gpu_support.md b/docs/getting_started/gpu_support.md
index 4bd1efeb8..6c0e8be0f 100644
--- a/docs/getting_started/gpu_support.md
+++ b/docs/getting_started/gpu_support.md
@@ -13,6 +13,12 @@ nav_order: 7
{:toc}
---
+**Attention:** *Thanks for your interest in MediaPipe! We have moved to
+[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
+as the primary developer documentation site for MediaPipe as of April 3, 2023.*
+
+----
+
## OpenGL ES Support
MediaPipe supports OpenGL ES up to version 3.2 on Android/Linux and up to ES 3.0
diff --git a/docs/getting_started/hello_world_android.md b/docs/getting_started/hello_world_android.md
index 012743048..1148ff5a9 100644
--- a/docs/getting_started/hello_world_android.md
+++ b/docs/getting_started/hello_world_android.md
@@ -14,6 +14,12 @@ nav_order: 1
{:toc}
---
+**Attention:** *Thanks for your interest in MediaPipe! We have moved to
+[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
+as the primary developer documentation site for MediaPipe as of April 3, 2023.*
+
+----
+
## Introduction
This codelab uses MediaPipe on an Android device.
diff --git a/docs/getting_started/hello_world_cpp.md b/docs/getting_started/hello_world_cpp.md
index 880248725..7c8f9be3e 100644
--- a/docs/getting_started/hello_world_cpp.md
+++ b/docs/getting_started/hello_world_cpp.md
@@ -14,6 +14,12 @@ nav_order: 1
{:toc}
---
+**Attention:** *Thanks for your interest in MediaPipe! We have moved to
+[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
+as the primary developer documentation site for MediaPipe as of April 3, 2023.*
+
+----
+
1. Ensure you have a working version of MediaPipe. See
[installation instructions](./install.md).
diff --git a/docs/getting_started/hello_world_ios.md b/docs/getting_started/hello_world_ios.md
index 713dbc79a..4be097646 100644
--- a/docs/getting_started/hello_world_ios.md
+++ b/docs/getting_started/hello_world_ios.md
@@ -14,6 +14,12 @@ nav_order: 1
{:toc}
---
+**Attention:** *Thanks for your interest in MediaPipe! We have moved to
+[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
+as the primary developer documentation site for MediaPipe as of April 3, 2023.*
+
+----
+
## Introduction
This codelab uses MediaPipe on an iOS device.
diff --git a/docs/getting_started/help.md b/docs/getting_started/help.md
index 3ba052741..2cb6b9e68 100644
--- a/docs/getting_started/help.md
+++ b/docs/getting_started/help.md
@@ -13,6 +13,12 @@ nav_order: 8
{:toc}
---
+**Attention:** *Thanks for your interest in MediaPipe! We have moved to
+[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
+as the primary developer documentation site for MediaPipe as of April 3, 2023.*
+
+----
+
## Technical questions
For help with technical or algorithmic questions, visit
diff --git a/docs/getting_started/install.md b/docs/getting_started/install.md
index cc5c0241d..b30284779 100644
--- a/docs/getting_started/install.md
+++ b/docs/getting_started/install.md
@@ -13,6 +13,12 @@ nav_order: 6
{:toc}
---
+**Attention:** *Thanks for your interest in MediaPipe! We have moved to
+[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
+as the primary developer documentation site for MediaPipe as of April 3, 2023.*
+
+----
+
Note: To interoperate with OpenCV, OpenCV 3.x to 4.1 are preferred. OpenCV
2.x currently works but interoperability support may be deprecated in the
future.
@@ -577,7 +583,7 @@ next section.
Option 1. Follow
[the official Bazel documentation](https://docs.bazel.build/versions/master/install-windows.html)
- to install Bazel 5.2.0 or higher.
+ to install Bazel 6.1.1 or higher.
Option 2. Follow the official
[Bazel documentation](https://docs.bazel.build/versions/master/install-bazelisk.html)
diff --git a/docs/getting_started/ios.md b/docs/getting_started/ios.md
index c4b8fb99e..798017aa3 100644
--- a/docs/getting_started/ios.md
+++ b/docs/getting_started/ios.md
@@ -15,6 +15,12 @@ nav_order: 2
{:toc}
---
+**Attention:** *Thanks for your interest in MediaPipe! We have moved to
+[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
+as the primary developer documentation site for MediaPipe as of April 3, 2023.*
+
+----
+
Please follow instructions below to build iOS example apps in the supported
MediaPipe [solutions](../solutions/solutions.md). To learn more about these
example apps, start from, start from
diff --git a/docs/getting_started/javascript.md b/docs/getting_started/javascript.md
index 79269827b..e68d40917 100644
--- a/docs/getting_started/javascript.md
+++ b/docs/getting_started/javascript.md
@@ -1,5 +1,6 @@
---
-layout: default
+layout: forward
+target: https://developers.google.com/mediapipe/
title: MediaPipe in JavaScript
parent: Getting Started
nav_order: 4
@@ -14,12 +15,7 @@ nav_order: 4
**Attention:** *Thanks for your interest in MediaPipe! We are moving to
[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
-as the primary developer documentation
-site for MediaPipe starting April 3, 2023. This content will not be moved to
-the new site, but will remain available in the source code repository on an
-as-is basis.*
-
-*This notice and web page will be removed on April 3, 2023.*
+as the primary developer documentation site for MediaPipe starting April 3, 2023.*
----
diff --git a/docs/getting_started/python.md b/docs/getting_started/python.md
index 880d5c85d..43f452a50 100644
--- a/docs/getting_started/python.md
+++ b/docs/getting_started/python.md
@@ -1,5 +1,6 @@
---
-layout: default
+layout: forward
+target: https://developers.google.com/mediapipe/
title: MediaPipe in Python
parent: Getting Started
has_children: true
@@ -14,6 +15,12 @@ nav_order: 3
{:toc}
---
+**Attention:** *Thanks for your interest in MediaPipe! We have moved to
+[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
+as the primary developer documentation site for MediaPipe as of April 3, 2023.*
+
+----
+
## Ready-to-use Python Solutions
MediaPipe offers ready-to-use yet customizable Python solutions as a prebuilt
diff --git a/docs/getting_started/python_framework.md b/docs/getting_started/python_framework.md
index db5bc0cd4..6d4b7d450 100644
--- a/docs/getting_started/python_framework.md
+++ b/docs/getting_started/python_framework.md
@@ -12,6 +12,11 @@ nav_order: 1
1. TOC
{:toc}
---
+**Attention:** *Thanks for your interest in MediaPipe! We have moved to
+[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
+as the primary developer documentation site for MediaPipe as of April 3, 2023.*
+
+----
The MediaPipe Python framework grants direct access to the core components of
the MediaPipe C++ framework such as Timestamp, Packet, and CalculatorGraph,
diff --git a/docs/getting_started/troubleshooting.md b/docs/getting_started/troubleshooting.md
index 0da25497d..e7dff332c 100644
--- a/docs/getting_started/troubleshooting.md
+++ b/docs/getting_started/troubleshooting.md
@@ -13,6 +13,12 @@ nav_order: 10
{:toc}
---
+**Attention:** *Thanks for your interest in MediaPipe! We have moved to
+[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
+as the primary developer documentation site for MediaPipe as of April 3, 2023.*
+
+----
+
## Missing Python binary path
The error message:
diff --git a/docs/index.md b/docs/index.md
index 012ea3a27..a82c88ab1 100644
--- a/docs/index.md
+++ b/docs/index.md
@@ -6,6 +6,20 @@ nav_order: 1

+----
+
+**Attention:** *Thanks for your interest in MediaPipe! We have moved to
+[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
+as the primary developer documentation site for MediaPipe as of April 3, 2023.*
+
+*This notice and web page will be removed on June 1, 2023.*
+
+----
+
+
+
+
+
--------------------------------------------------------------------------------
## Live ML anywhere
@@ -21,15 +35,6 @@ ML solutions for live and streaming media.
----
-**Attention:** *Thanks for your interest in MediaPipe! We are moving to
-[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
-as the primary developer documentation
-site for MediaPipe starting April 3, 2023.*
-
-*This notice and web page will be removed on April 3, 2023.*
-
-----
-
## ML solutions in MediaPipe
Face Detection | Face Mesh | Iris | Hands | Pose | Holistic
diff --git a/docs/index.rst b/docs/index.rst
index 4563284bd..fc7a6f50f 100644
--- a/docs/index.rst
+++ b/docs/index.rst
@@ -1,3 +1,3 @@
MediaPipe
=====================================
-Please see https://docs.mediapipe.dev.
+Please see https://developers.google.com/mediapipe/
diff --git a/docs/solutions/autoflip.md b/docs/solutions/autoflip.md
index d0a763436..a9e1e7052 100644
--- a/docs/solutions/autoflip.md
+++ b/docs/solutions/autoflip.md
@@ -1,7 +1,8 @@
---
-layout: default
+layout: forward
+target: https://developers.google.com/mediapipe/solutions/guide#legacy
title: AutoFlip (Saliency-aware Video Cropping)
-parent: Solutions
+parent: MediaPipe Legacy Solutions
nav_order: 14
---
@@ -20,12 +21,10 @@ nav_order: 14
**Attention:** *Thank you for your interest in MediaPipe Solutions.
We have ended support for this MediaPipe Legacy Solution as of March 1, 2023.
-For more information, see the new
+For more information, see the
[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy)
site.*
-*This notice and web page will be removed on April 3, 2023.*
-
----
## Overview
diff --git a/docs/solutions/box_tracking.md b/docs/solutions/box_tracking.md
index 4fecc5150..537916ac4 100644
--- a/docs/solutions/box_tracking.md
+++ b/docs/solutions/box_tracking.md
@@ -1,7 +1,8 @@
---
-layout: default
+layout: forward
+target: https://developers.google.com/mediapipe/solutions/guide#legacy
title: Box Tracking
-parent: Solutions
+parent: MediaPipe Legacy Solutions
nav_order: 10
---
@@ -20,12 +21,10 @@ nav_order: 10
**Attention:** *Thank you for your interest in MediaPipe Solutions.
We have ended support for this MediaPipe Legacy Solution as of March 1, 2023.
-For more information, see the new
+For more information, see the
[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy)
site.*
-*This notice and web page will be removed on April 3, 2023.*
-
----
## Overview
diff --git a/docs/solutions/face_detection.md b/docs/solutions/face_detection.md
index 789d9b3dd..f060d062c 100644
--- a/docs/solutions/face_detection.md
+++ b/docs/solutions/face_detection.md
@@ -1,7 +1,8 @@
---
-layout: default
+layout: forward
+target: https://developers.google.com/mediapipe/solutions/vision/face_detector/
title: Face Detection
-parent: Solutions
+parent: MediaPipe Legacy Solutions
nav_order: 1
---
@@ -20,12 +21,10 @@ nav_order: 1
**Attention:** *Thank you for your interest in MediaPipe Solutions.
As of March 1, 2023, this solution is planned to be upgraded to a new MediaPipe
-Solution. For more information, see the new
+Solution. For more information, see the
[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy)
site.*
-*This notice and web page will be removed on April 3, 2023.*
-
----
## Overview
diff --git a/docs/solutions/face_mesh.md b/docs/solutions/face_mesh.md
index 84fbb22a5..ab34ba401 100644
--- a/docs/solutions/face_mesh.md
+++ b/docs/solutions/face_mesh.md
@@ -1,7 +1,8 @@
---
-layout: default
+layout: forward
+target: https://developers.google.com/mediapipe/solutions/vision/face_landmarker/
title: Face Mesh
-parent: Solutions
+parent: MediaPipe Legacy Solutions
nav_order: 2
---
@@ -20,12 +21,10 @@ nav_order: 2
**Attention:** *Thank you for your interest in MediaPipe Solutions.
As of March 1, 2023, this solution is planned to be upgraded to a new MediaPipe
-Solution. For more information, see the new
+Solution. For more information, see the
[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy)
site.*
-*This notice and web page will be removed on April 3, 2023.*
-
----
## Overview
diff --git a/docs/solutions/hair_segmentation.md b/docs/solutions/hair_segmentation.md
index 481cd0058..feb40f9c0 100644
--- a/docs/solutions/hair_segmentation.md
+++ b/docs/solutions/hair_segmentation.md
@@ -1,7 +1,8 @@
---
-layout: default
+layout: forward
+target: https://developers.google.com/mediapipe/solutions/vision/image_segmenter/
title: Hair Segmentation
-parent: Solutions
+parent: MediaPipe Legacy Solutions
nav_order: 8
---
@@ -19,13 +20,11 @@ nav_order: 8
---
**Attention:** *Thank you for your interest in MediaPipe Solutions.
-As of March 1, 2023, this solution is planned to be upgraded to a new MediaPipe
-Solution. For more information, see the new
-[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy)
+As of April 4, 2023, this solution was upgraded to a new MediaPipe
+Solution. For more information, see the
+[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/vision/image_segmenter/)
site.*
-*This notice and web page will be removed on April 3, 2023.*
-
----

diff --git a/docs/solutions/hands.md b/docs/solutions/hands.md
index a4cd90baa..6cf2264ed 100644
--- a/docs/solutions/hands.md
+++ b/docs/solutions/hands.md
@@ -1,7 +1,8 @@
---
-layout: default
+layout: forward
+target: https://developers.google.com/mediapipe/solutions/vision/hand_landmarker
title: Hands
-parent: Solutions
+parent: MediaPipe Legacy Solutions
nav_order: 4
---
@@ -19,13 +20,11 @@ nav_order: 4
---
**Attention:** *Thank you for your interest in MediaPipe Solutions.
-As of March 1, 2023, this solution is planned to be upgraded to a new MediaPipe
-Solution. For more information, see the new
-[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy)
+As of March 1, 2023, this solution was upgraded to a new MediaPipe
+Solution. For more information, see the
+[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/vision/hand_landmarker)
site.*
-*This notice and web page will be removed on April 3, 2023.*
-
----
## Overview
diff --git a/docs/solutions/holistic.md b/docs/solutions/holistic.md
index 876a88572..25288ab55 100644
--- a/docs/solutions/holistic.md
+++ b/docs/solutions/holistic.md
@@ -1,7 +1,8 @@
---
-layout: default
+layout: forward
+target: https://github.com/google/mediapipe/blob/master/docs/solutions/holistic.md
title: Holistic
-parent: Solutions
+parent: MediaPipe Legacy Solutions
nav_order: 6
---
@@ -20,12 +21,10 @@ nav_order: 6
**Attention:** *Thank you for your interest in MediaPipe Solutions.
As of March 1, 2023, this solution is planned to be upgraded to a new MediaPipe
-Solution. For more information, see the new
+Solution. For more information, see the
[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy)
site.*
-*This notice and web page will be removed on April 3, 2023.*
-
----
## Overview
diff --git a/docs/solutions/instant_motion_tracking.md b/docs/solutions/instant_motion_tracking.md
index 1e714bdc8..361bc91ff 100644
--- a/docs/solutions/instant_motion_tracking.md
+++ b/docs/solutions/instant_motion_tracking.md
@@ -1,7 +1,8 @@
---
-layout: default
+layout: forward
+target: https://developers.google.com/mediapipe/solutions/guide#legacy
title: Instant Motion Tracking
-parent: Solutions
+parent: MediaPipe Legacy Solutions
nav_order: 11
---
@@ -20,12 +21,10 @@ nav_order: 11
**Attention:** *Thank you for your interest in MediaPipe Solutions.
We have ended support for this MediaPipe Legacy Solution as of March 1, 2023.
-For more information, see the new
+For more information, see the
[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy)
site.*
-*This notice and web page will be removed on April 3, 2023.*
-
----
## Overview
diff --git a/docs/solutions/iris.md b/docs/solutions/iris.md
index b8459a0e3..eab3dedf6 100644
--- a/docs/solutions/iris.md
+++ b/docs/solutions/iris.md
@@ -1,7 +1,8 @@
---
-layout: default
+layout: forward
+target: https://developers.google.com/mediapipe/solutions/vision/face_landmarker/
title: Iris
-parent: Solutions
+parent: MediaPipe Legacy Solutions
nav_order: 3
---
@@ -20,12 +21,10 @@ nav_order: 3
**Attention:** *Thank you for your interest in MediaPipe Solutions.
As of March 1, 2023, this solution is planned to be upgraded to a new MediaPipe
-Solution. For more information, see the new
+Solution. For more information, see the
[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy)
site.*
-*This notice and web page will be removed on April 3, 2023.*
-
----
## Overview
diff --git a/docs/solutions/knift.md b/docs/solutions/knift.md
index ad5d39f22..19e04cb5e 100644
--- a/docs/solutions/knift.md
+++ b/docs/solutions/knift.md
@@ -1,7 +1,8 @@
---
-layout: default
+layout: forward
+target: https://developers.google.com/mediapipe/solutions/guide#legacy
title: KNIFT (Template-based Feature Matching)
-parent: Solutions
+parent: MediaPipe Legacy Solutions
nav_order: 13
---
@@ -20,12 +21,10 @@ nav_order: 13
**Attention:** *Thank you for your interest in MediaPipe Solutions.
We have ended support for this MediaPipe Legacy Solution as of March 1, 2023.
-For more information, see the new
+For more information, see the
[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy)
site.*
-*This notice and web page will be removed on April 3, 2023.*
-
----
## Overview
diff --git a/docs/solutions/media_sequence.md b/docs/solutions/media_sequence.md
index 5c479ea4c..5224dd371 100644
--- a/docs/solutions/media_sequence.md
+++ b/docs/solutions/media_sequence.md
@@ -1,7 +1,8 @@
---
-layout: default
+layout: forward
+target: https://developers.google.com/mediapipe/solutions/guide#legacy
title: Dataset Preparation with MediaSequence
-parent: Solutions
+parent: MediaPipe Legacy Solutions
nav_order: 15
---
@@ -24,8 +25,6 @@ For more information, see the new
[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy)
site.*
-*This notice and web page will be removed on April 3, 2023.*
-
----
## Overview
diff --git a/docs/solutions/models.md b/docs/solutions/models.md
index c45aefa44..0af91eb48 100644
--- a/docs/solutions/models.md
+++ b/docs/solutions/models.md
@@ -1,7 +1,8 @@
---
-layout: default
+layout: forward
+target: https://developers.google.com/mediapipe/solutions/guide#legacy
title: Models and Model Cards
-parent: Solutions
+parent: MediaPipe Legacy Solutions
nav_order: 30
---
@@ -22,8 +23,6 @@ MediaPipe Legacy Solutions will continue to be provided on an as-is basis.
We encourage you to check out the new MediaPipe Solutions at:
[https://developers.google.com/mediapipe/solutions](https://developers.google.com/mediapipe/solutions)*
-*This notice and web page will be removed on April 3, 2023.*
-
----
### [Face Detection](https://google.github.io/mediapipe/solutions/face_detection)
diff --git a/docs/solutions/object_detection.md b/docs/solutions/object_detection.md
index ef7db8671..efa2e5266 100644
--- a/docs/solutions/object_detection.md
+++ b/docs/solutions/object_detection.md
@@ -1,7 +1,8 @@
---
-layout: default
+layout: forward
+target: https://developers.google.com/mediapipe/solutions/vision/object_detector/
title: Object Detection
-parent: Solutions
+parent: MediaPipe Legacy Solutions
nav_order: 9
---
@@ -19,13 +20,11 @@ nav_order: 9
---
**Attention:** *Thank you for your interest in MediaPipe Solutions.
-As of March 1, 2023, this solution is planned to be upgraded to a new MediaPipe
-Solution. For more information, see the new
-[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy)
+As of March 1, 2023, this solution was upgraded to a new MediaPipe
+Solution. For more information, see the
+[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/vision/object_detector/)
site.*
-*This notice and web page will be removed on April 3, 2023.*
-
----

diff --git a/docs/solutions/object_detection_saved_model.md b/docs/solutions/object_detection_saved_model.md
index 6acac0a1b..1c67bca68 100644
--- a/docs/solutions/object_detection_saved_model.md
+++ b/docs/solutions/object_detection_saved_model.md
@@ -1,4 +1,31 @@
-## TensorFlow/TFLite Object Detection Model
+---
+layout: forward
+target: https://developers.google.com/mediapipe/solutions/vision/object_detector
+title: Object Detection
+parent: MediaPipe Legacy Solutions
+nav_order: 9
+---
+
+# MediaPipe Object Detection
+{: .no_toc }
+
+
+
+ Table of contents
+
+ {: .text-delta }
+1. TOC
+{:toc}
+
+---
+
+**Attention:** *Thank you for your interest in MediaPipe Solutions.
+As of March 1, 2023, this solution was upgraded to a new MediaPipe
+Solution. For more information, see the
+[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/vision/object_detector)
+site.*
+
+----
### TensorFlow model
diff --git a/docs/solutions/objectron.md b/docs/solutions/objectron.md
index 4ffb27bd0..09f8028bc 100644
--- a/docs/solutions/objectron.md
+++ b/docs/solutions/objectron.md
@@ -1,7 +1,8 @@
---
-layout: default
+layout: forward
+target: https://developers.google.com/mediapipe/solutions/guide#legacy
title: Objectron (3D Object Detection)
-parent: Solutions
+parent: MediaPipe Legacy Solutions
nav_order: 12
---
@@ -20,12 +21,10 @@ nav_order: 12
**Attention:** *Thank you for your interest in MediaPipe Solutions.
We have ended support for this MediaPipe Legacy Solution as of March 1, 2023.
-For more information, see the new
+For more information, see the
[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy)
site.*
-*This notice and web page will be removed on April 3, 2023.*
-
----
## Overview
diff --git a/docs/solutions/pose.md b/docs/solutions/pose.md
index ce0197ebd..3c9f14f54 100644
--- a/docs/solutions/pose.md
+++ b/docs/solutions/pose.md
@@ -1,7 +1,8 @@
---
-layout: default
+layout: forward
+target: https://developers.google.com/mediapipe/solutions/vision/pose_landmarker/
title: Pose
-parent: Solutions
+parent: MediaPipe Legacy Solutions
has_children: true
has_toc: false
nav_order: 5
@@ -22,12 +23,10 @@ nav_order: 5
**Attention:** *Thank you for your interest in MediaPipe Solutions.
As of March 1, 2023, this solution is planned to be upgraded to a new MediaPipe
-Solution. For more information, see the new
-[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy)
+Solution. For more information, see the
+[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/vision/pose_landmarker/)
site.*
-*This notice and web page will be removed on April 3, 2023.*
-
----
## Overview
diff --git a/docs/solutions/pose_classification.md b/docs/solutions/pose_classification.md
index 24f20f727..8420e2d7c 100644
--- a/docs/solutions/pose_classification.md
+++ b/docs/solutions/pose_classification.md
@@ -1,8 +1,9 @@
---
-layout: default
+layout: forward
+target: https://developers.google.com/mediapipe/solutions/vision/pose_landmarker/
title: Pose Classification
parent: Pose
-grand_parent: Solutions
+grand_parent: MediaPipe Legacy Solutions
nav_order: 1
---
@@ -21,12 +22,10 @@ nav_order: 1
**Attention:** *Thank you for your interest in MediaPipe Solutions.
As of March 1, 2023, this solution is planned to be upgraded to a new MediaPipe
-Solution. For more information, see the new
-[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy)
+Solution. For more information, see the
+[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/vision/pose_landmarker/)
site.*
-*This notice and web page will be removed on April 3, 2023.*
-
----
## Overview
diff --git a/docs/solutions/selfie_segmentation.md b/docs/solutions/selfie_segmentation.md
index 5febf29f0..17e6fc252 100644
--- a/docs/solutions/selfie_segmentation.md
+++ b/docs/solutions/selfie_segmentation.md
@@ -1,7 +1,8 @@
---
-layout: default
+layout: forward
+target: https://developers.google.com/mediapipe/solutions/vision/image_segmenter/
title: Selfie Segmentation
-parent: Solutions
+parent: MediaPipe Legacy Solutions
nav_order: 7
---
@@ -19,13 +20,11 @@ nav_order: 7
---
**Attention:** *Thank you for your interest in MediaPipe Solutions.
-As of March 1, 2023, this solution is planned to be upgraded to a new MediaPipe
-Solution. For more information, see the new
-[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy)
+As of April 4, 2023, this solution was upgraded to a new MediaPipe
+Solution. For more information, see the
+[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/vision/image_segmenter/)
site.*
-*This notice and web page will be removed on April 3, 2023.*
-
----
## Overview
diff --git a/docs/solutions/solutions.md b/docs/solutions/solutions.md
index b65390af7..7bc32d169 100644
--- a/docs/solutions/solutions.md
+++ b/docs/solutions/solutions.md
@@ -1,12 +1,12 @@
---
layout: default
-title: Solutions
+title: MediaPipe Legacy Solutions
nav_order: 3
has_children: true
has_toc: false
---
-# Solutions
+# MediaPipe Legacy Solutions
{: .no_toc }
1. TOC
@@ -29,6 +29,12 @@ Solutions at:
----
+
+
+
+
+----
+
MediaPipe offers open source cross-platform, customizable ML solutions for live
and streaming media.
diff --git a/docs/solutions/youtube_8m.md b/docs/solutions/youtube_8m.md
index 2e82b85d3..80fb9d9a6 100644
--- a/docs/solutions/youtube_8m.md
+++ b/docs/solutions/youtube_8m.md
@@ -1,7 +1,8 @@
---
-layout: default
+layout: forward
+target: https://developers.google.com/mediapipe/solutions/guide#legacy
title: YouTube-8M Feature Extraction and Model Inference
-parent: Solutions
+parent: MediaPipe Legacy Solutions
nav_order: 16
---
@@ -20,12 +21,10 @@ nav_order: 16
**Attention:** *Thank you for your interest in MediaPipe Solutions.
We have ended support for this MediaPipe Legacy Solution as of March 1, 2023.
-For more information, see the new
+For more information, see the
[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy)
site.*
-*This notice and web page will be removed on April 3, 2023.*
-
----
MediaPipe is a useful and general framework for media processing that can assist
diff --git a/docs/tools/performance_benchmarking.md b/docs/tools/performance_benchmarking.md
index f0d334f58..fedbb6e8a 100644
--- a/docs/tools/performance_benchmarking.md
+++ b/docs/tools/performance_benchmarking.md
@@ -1,5 +1,6 @@
---
-layout: default
+layout: forward
+target: https://developers.google.com/mediapipe/
title: Performance Benchmarking
parent: Tools
nav_order: 3
@@ -12,6 +13,12 @@ nav_order: 3
{:toc}
---
+**Attention:** *Thanks for your interest in MediaPipe! We have moved to
+[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
+as the primary developer documentation site for MediaPipe as of April 3, 2023.*
+
+---
+
*Coming soon.*
Future mediapipe releases will include tools for visualizing and analysing the
diff --git a/docs/tools/tools.md b/docs/tools/tools.md
index 568ba76a7..8e4c55db3 100644
--- a/docs/tools/tools.md
+++ b/docs/tools/tools.md
@@ -1,5 +1,6 @@
---
-layout: default
+layout: forward
+target: https://developers.google.com/mediapipe/
title: Tools
nav_order: 4
has_children: true
@@ -11,3 +12,9 @@ has_children: true
1. TOC
{:toc}
---
+
+**Attention:** *Thanks for your interest in MediaPipe! We have moved to
+[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
+as the primary developer documentation site for MediaPipe as of April 3, 2023.*
+
+----
diff --git a/docs/tools/tracing_and_profiling.md b/docs/tools/tracing_and_profiling.md
index 861564c99..0ed6f57ab 100644
--- a/docs/tools/tracing_and_profiling.md
+++ b/docs/tools/tracing_and_profiling.md
@@ -1,5 +1,6 @@
---
-layout: default
+layout: forward
+target: https://developers.google.com/mediapipe/
title: Tracing and Profiling
parent: Tools
nav_order: 2
@@ -12,6 +13,12 @@ nav_order: 2
{:toc}
---
+**Attention:** *Thanks for your interest in MediaPipe! We have moved to
+[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
+as the primary developer documentation site for MediaPipe as of April 3, 2023.*
+
+----
+
The MediaPipe framework includes a built-in tracer and profiler. The tracer
records various timing events related to packet processing, including the start
and end time of each Calculator::Process call. The tracer writes trace log files
diff --git a/docs/tools/visualizer.md b/docs/tools/visualizer.md
index 45111a36e..eb24a7fc5 100644
--- a/docs/tools/visualizer.md
+++ b/docs/tools/visualizer.md
@@ -13,6 +13,12 @@ nav_order: 1
{:toc}
---
+**Attention:** *Thanks for your interest in MediaPipe! We have moved to
+[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
+as the primary developer documentation site for MediaPipe as of April 3, 2023.*
+
+---
+
To help users understand the structure of their calculator graphs and to
understand the overall behavior of their machine learning inference pipelines,
we have built the [MediaPipe Visualizer](https://viz.mediapipe.dev/)
diff --git a/mediapipe/calculators/audio/basic_time_series_calculators.cc b/mediapipe/calculators/audio/basic_time_series_calculators.cc
index f7b24f6f6..5006a0b54 100644
--- a/mediapipe/calculators/audio/basic_time_series_calculators.cc
+++ b/mediapipe/calculators/audio/basic_time_series_calculators.cc
@@ -26,10 +26,11 @@
namespace mediapipe {
namespace {
static bool SafeMultiply(int x, int y, int* result) {
- static_assert(sizeof(int64) >= 2 * sizeof(int),
+ static_assert(sizeof(int64_t) >= 2 * sizeof(int),
"Unable to detect overflow after multiplication");
- const int64 big = static_cast(x) * static_cast(y);
- if (big > static_cast(INT_MIN) && big < static_cast(INT_MAX)) {
+ const int64_t big = static_cast(x) * static_cast(y);
+ if (big > static_cast(INT_MIN) &&
+ big < static_cast(INT_MAX)) {
if (result != nullptr) *result = static_cast(big);
return true;
} else {
diff --git a/mediapipe/calculators/audio/spectrogram_calculator.cc b/mediapipe/calculators/audio/spectrogram_calculator.cc
index bd4d8f3bf..939e721ab 100644
--- a/mediapipe/calculators/audio/spectrogram_calculator.cc
+++ b/mediapipe/calculators/audio/spectrogram_calculator.cc
@@ -182,12 +182,12 @@ class SpectrogramCalculator : public CalculatorBase {
int frame_duration_samples_;
int frame_overlap_samples_;
// How many samples we've been passed, used for checking input time stamps.
- int64 cumulative_input_samples_;
+ int64_t cumulative_input_samples_;
// How many frames we've emitted, used for calculating output time stamps.
- int64 cumulative_completed_frames_;
+ int64_t cumulative_completed_frames_;
// How many frames were emitted last, used for estimating the timestamp on
// Close when use_local_timestamp_ is true;
- int64 last_completed_frames_;
+ int64_t last_completed_frames_;
Timestamp initial_input_timestamp_;
int num_input_channels_;
// How many frequency bins we emit (=N_FFT/2 + 1).
diff --git a/mediapipe/calculators/audio/spectrogram_calculator_test.cc b/mediapipe/calculators/audio/spectrogram_calculator_test.cc
index 3c2b8435d..b35f30583 100644
--- a/mediapipe/calculators/audio/spectrogram_calculator_test.cc
+++ b/mediapipe/calculators/audio/spectrogram_calculator_test.cc
@@ -92,8 +92,8 @@ class SpectrogramCalculatorTest
.cos()
.transpose();
}
- int64 input_timestamp = round(packet_start_time_seconds *
- Timestamp::kTimestampUnitsPerSecond);
+ int64_t input_timestamp = round(packet_start_time_seconds *
+ Timestamp::kTimestampUnitsPerSecond);
AppendInputPacket(packet_data, input_timestamp);
total_num_input_samples += packet_size_samples;
}
@@ -116,8 +116,8 @@ class SpectrogramCalculatorTest
double packet_start_time_seconds =
kInitialTimestampOffsetMicroseconds * 1e-6 +
total_num_input_samples / input_sample_rate_;
- int64 input_timestamp = round(packet_start_time_seconds *
- Timestamp::kTimestampUnitsPerSecond);
+ int64_t input_timestamp = round(packet_start_time_seconds *
+ Timestamp::kTimestampUnitsPerSecond);
std::unique_ptr impulse(
new Matrix(Matrix::Zero(1, packet_sizes_samples[i])));
(*impulse)(0, impulse_offsets_samples[i]) = 1.0;
@@ -157,8 +157,8 @@ class SpectrogramCalculatorTest
.cos()
.transpose();
}
- int64 input_timestamp = round(packet_start_time_seconds *
- Timestamp::kTimestampUnitsPerSecond);
+ int64_t input_timestamp = round(packet_start_time_seconds *
+ Timestamp::kTimestampUnitsPerSecond);
AppendInputPacket(packet_data, input_timestamp);
total_num_input_samples += packet_size_samples;
}
@@ -218,7 +218,7 @@ class SpectrogramCalculatorTest
const double expected_timestamp_seconds =
packet_timestamp_offset_seconds +
cumulative_output_frames * frame_step_seconds;
- const int64 expected_timestamp_ticks =
+ const int64_t expected_timestamp_ticks =
expected_timestamp_seconds * Timestamp::kTimestampUnitsPerSecond;
EXPECT_EQ(expected_timestamp_ticks, packet.Timestamp().Value());
// Accept the timestamp of the first packet as the baseline for checking
diff --git a/mediapipe/calculators/audio/stabilized_log_calculator_test.cc b/mediapipe/calculators/audio/stabilized_log_calculator_test.cc
index e6e0b5c6f..f04202676 100644
--- a/mediapipe/calculators/audio/stabilized_log_calculator_test.cc
+++ b/mediapipe/calculators/audio/stabilized_log_calculator_test.cc
@@ -54,7 +54,8 @@ TEST_F(StabilizedLogCalculatorTest, BasicOperation) {
std::vector input_data_matrices;
for (int input_packet = 0; input_packet < kNumPackets; ++input_packet) {
- const int64 timestamp = input_packet * Timestamp::kTimestampUnitsPerSecond;
+ const int64_t timestamp =
+ input_packet * Timestamp::kTimestampUnitsPerSecond;
Matrix input_data_matrix =
Matrix::Random(kNumChannels, kNumSamples).array().abs();
input_data_matrices.push_back(input_data_matrix);
@@ -80,7 +81,8 @@ TEST_F(StabilizedLogCalculatorTest, OutputScaleWorks) {
std::vector input_data_matrices;
for (int input_packet = 0; input_packet < kNumPackets; ++input_packet) {
- const int64 timestamp = input_packet * Timestamp::kTimestampUnitsPerSecond;
+ const int64_t timestamp =
+ input_packet * Timestamp::kTimestampUnitsPerSecond;
Matrix input_data_matrix =
Matrix::Random(kNumChannels, kNumSamples).array().abs();
input_data_matrices.push_back(input_data_matrix);
diff --git a/mediapipe/calculators/audio/time_series_framer_calculator.cc b/mediapipe/calculators/audio/time_series_framer_calculator.cc
index fbbf34226..a200b898a 100644
--- a/mediapipe/calculators/audio/time_series_framer_calculator.cc
+++ b/mediapipe/calculators/audio/time_series_framer_calculator.cc
@@ -109,7 +109,7 @@ class TimeSeriesFramerCalculator : public CalculatorBase {
// Returns the timestamp of a sample on a base, which is usually the time
// stamp of a packet.
Timestamp CurrentSampleTimestamp(const Timestamp& timestamp_base,
- int64 number_of_samples) {
+ int64_t number_of_samples) {
return timestamp_base + round(number_of_samples / sample_rate_ *
Timestamp::kTimestampUnitsPerSecond);
}
@@ -118,10 +118,10 @@ class TimeSeriesFramerCalculator : public CalculatorBase {
// emitted.
int next_frame_step_samples() const {
// All numbers are in input samples.
- const int64 current_output_frame_start = static_cast(
+ const int64_t current_output_frame_start = static_cast(
round(cumulative_output_frames_ * average_frame_step_samples_));
CHECK_EQ(current_output_frame_start, cumulative_completed_samples_);
- const int64 next_output_frame_start = static_cast(
+ const int64_t next_output_frame_start = static_cast(
round((cumulative_output_frames_ + 1) * average_frame_step_samples_));
return next_output_frame_start - current_output_frame_start;
}
@@ -134,11 +134,11 @@ class TimeSeriesFramerCalculator : public CalculatorBase {
// emulate_fractional_frame_overlap is true.
double average_frame_step_samples_;
int samples_still_to_drop_;
- int64 cumulative_output_frames_;
+ int64_t cumulative_output_frames_;
// "Completed" samples are samples that are no longer needed because
// the framer has completely stepped past them (taking into account
// any overlap).
- int64 cumulative_completed_samples_;
+ int64_t cumulative_completed_samples_;
Timestamp initial_input_timestamp_;
// The current timestamp is updated along with the incoming packets.
Timestamp current_timestamp_;
diff --git a/mediapipe/calculators/audio/time_series_framer_calculator_test.cc b/mediapipe/calculators/audio/time_series_framer_calculator_test.cc
index ca88cebb5..72e9c88f7 100644
--- a/mediapipe/calculators/audio/time_series_framer_calculator_test.cc
+++ b/mediapipe/calculators/audio/time_series_framer_calculator_test.cc
@@ -49,7 +49,7 @@ class TimeSeriesFramerCalculatorTest
// Returns a float value with the channel and timestamp separated by
// an order of magnitude, for easy parsing by humans.
- float TestValue(int64 timestamp_in_microseconds, int channel) {
+ float TestValue(int64_t timestamp_in_microseconds, int channel) {
return timestamp_in_microseconds + channel / 10.0;
}
@@ -59,7 +59,7 @@ class TimeSeriesFramerCalculatorTest
auto matrix = new Matrix(num_channels, num_samples);
for (int c = 0; c < num_channels; ++c) {
for (int i = 0; i < num_samples; ++i) {
- int64 timestamp = time_series_util::SecondsToSamples(
+ int64_t timestamp = time_series_util::SecondsToSamples(
starting_timestamp_seconds + i / input_sample_rate_,
Timestamp::kTimestampUnitsPerSecond);
(*matrix)(c, i) = TestValue(timestamp, c);
@@ -429,7 +429,7 @@ class TimeSeriesFramerCalculatorTimestampingTest
num_full_packets -= 1;
}
- int64 num_samples = 0;
+ int64_t num_samples = 0;
for (int packet_num = 0; packet_num < num_full_packets; ++packet_num) {
const Packet& packet = output().packets[packet_num];
num_samples += FrameDurationSamples();
diff --git a/mediapipe/calculators/internal/BUILD b/mediapipe/calculators/internal/BUILD
index 8647e3f3f..a92a2f252 100644
--- a/mediapipe/calculators/internal/BUILD
+++ b/mediapipe/calculators/internal/BUILD
@@ -12,25 +12,20 @@
# See the License for the specific language governing permissions and
# limitations under the License.
-load("//mediapipe/framework/port:build_config.bzl", "mediapipe_cc_proto_library")
+load("//mediapipe/framework/port:build_config.bzl", "mediapipe_proto_library")
licenses(["notice"])
package(default_visibility = ["//visibility:private"])
-proto_library(
+mediapipe_proto_library(
name = "callback_packet_calculator_proto",
srcs = ["callback_packet_calculator.proto"],
visibility = ["//mediapipe/framework:__subpackages__"],
- deps = ["//mediapipe/framework:calculator_proto"],
-)
-
-mediapipe_cc_proto_library(
- name = "callback_packet_calculator_cc_proto",
- srcs = ["callback_packet_calculator.proto"],
- cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
- visibility = ["//mediapipe/framework:__subpackages__"],
- deps = [":callback_packet_calculator_proto"],
+ deps = [
+ "//mediapipe/framework:calculator_options_proto",
+ "//mediapipe/framework:calculator_proto",
+ ],
)
cc_library(
diff --git a/mediapipe/calculators/tensor/BUILD b/mediapipe/calculators/tensor/BUILD
index fd926a8fe..9ae884253 100644
--- a/mediapipe/calculators/tensor/BUILD
+++ b/mediapipe/calculators/tensor/BUILD
@@ -467,10 +467,6 @@ cc_library(
"-x objective-c++",
"-fobjc-arc", # enable reference-counting
],
- linkopts = [
- "-framework CoreVideo",
- "-framework MetalKit",
- ],
tags = ["ios"],
deps = [
"inference_calculator_interface",
@@ -486,7 +482,13 @@ cc_library(
"@org_tensorflow//tensorflow/lite/delegates/gpu:metal_delegate_internal",
"@org_tensorflow//tensorflow/lite/delegates/gpu/common:shape",
"@org_tensorflow//tensorflow/lite/delegates/gpu/metal:buffer_convert",
- ],
+ ] + select({
+ "//mediapipe:apple": [
+ "//third_party/apple_frameworks:CoreVideo",
+ "//third_party/apple_frameworks:MetalKit",
+ ],
+ "//conditions:default": [],
+ }),
alwayslink = 1,
)
@@ -721,13 +723,6 @@ cc_library(
"//conditions:default": [],
}),
features = ["-layering_check"], # allow depending on tensors_to_detections_calculator_gpu_deps
- linkopts = select({
- "//mediapipe:apple": [
- "-framework CoreVideo",
- "-framework MetalKit",
- ],
- "//conditions:default": [],
- }),
deps = [
":tensors_to_detections_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
@@ -744,6 +739,12 @@ cc_library(
] + selects.with_or({
":compute_shader_unavailable": [],
"//conditions:default": [":tensors_to_detections_calculator_gpu_deps"],
+ }) + select({
+ "//mediapipe:apple": [
+ "//third_party/apple_frameworks:CoreVideo",
+ "//third_party/apple_frameworks:MetalKit",
+ ],
+ "//conditions:default": [],
}),
alwayslink = 1,
)
@@ -1333,6 +1334,7 @@ cc_library(
"//mediapipe:ios": [
"//mediapipe/gpu:MPPMetalUtil",
"//mediapipe/gpu:MPPMetalHelper",
+ "//third_party/apple_frameworks:MetalKit",
],
"//conditions:default": [
"@org_tensorflow//tensorflow/lite/delegates/gpu:gl_delegate",
diff --git a/mediapipe/calculators/tensor/image_to_tensor_converter_opencv.cc b/mediapipe/calculators/tensor/image_to_tensor_converter_opencv.cc
index 95e38f89c..bb4c6de79 100644
--- a/mediapipe/calculators/tensor/image_to_tensor_converter_opencv.cc
+++ b/mediapipe/calculators/tensor/image_to_tensor_converter_opencv.cc
@@ -92,13 +92,14 @@ class OpenCvProcessor : public ImageToTensorConverter {
const int dst_data_type = output_channels == 1 ? mat_gray_type_ : mat_type_;
switch (tensor_type_) {
case Tensor::ElementType::kInt8:
- RET_CHECK_GE(output_shape.num_elements(),
- tensor_buffer_offset / sizeof(int8) + num_elements_per_img)
+ RET_CHECK_GE(
+ output_shape.num_elements(),
+ tensor_buffer_offset / sizeof(int8_t) + num_elements_per_img)
<< "The buffer offset + the input image size is larger than the "
"allocated tensor buffer.";
- dst = cv::Mat(
- output_height, output_width, dst_data_type,
- buffer_view.buffer() + tensor_buffer_offset / sizeof(int8));
+ dst = cv::Mat(output_height, output_width, dst_data_type,
+ buffer_view.buffer() +
+ tensor_buffer_offset / sizeof(int8_t));
break;
case Tensor::ElementType::kFloat32:
RET_CHECK_GE(
@@ -113,12 +114,12 @@ class OpenCvProcessor : public ImageToTensorConverter {
case Tensor::ElementType::kUInt8:
RET_CHECK_GE(
output_shape.num_elements(),
- tensor_buffer_offset / sizeof(uint8) + num_elements_per_img)
+ tensor_buffer_offset / sizeof(uint8_t) + num_elements_per_img)
<< "The buffer offset + the input image size is larger than the "
"allocated tensor buffer.";
- dst = cv::Mat(
- output_height, output_width, dst_data_type,
- buffer_view.buffer() + tensor_buffer_offset / sizeof(uint8));
+ dst = cv::Mat(output_height, output_width, dst_data_type,
+ buffer_view.buffer() +
+ tensor_buffer_offset / sizeof(uint8_t));
break;
default:
return InvalidArgumentError(
diff --git a/mediapipe/calculators/tensor/tensor_converter_calculator_test.cc b/mediapipe/calculators/tensor/tensor_converter_calculator_test.cc
index bdea0795e..2cfbd3d1e 100644
--- a/mediapipe/calculators/tensor/tensor_converter_calculator_test.cc
+++ b/mediapipe/calculators/tensor/tensor_converter_calculator_test.cc
@@ -41,7 +41,7 @@ constexpr char kTransposeOptionsString[] =
using RandomEngine = std::mt19937_64;
using testing::Eq;
-const uint32 kSeed = 1234;
+const uint32_t kSeed = 1234;
const int kNumSizes = 8;
const int sizes[kNumSizes][2] = {{1, 1}, {12, 1}, {1, 9}, {2, 2},
{5, 3}, {7, 13}, {16, 32}, {101, 2}};
@@ -49,7 +49,7 @@ const int sizes[kNumSizes][2] = {{1, 1}, {12, 1}, {1, 9}, {2, 2},
class TensorConverterCalculatorTest : public ::testing::Test {
protected:
// Adds a packet with a matrix filled with random values in [0,1].
- void AddRandomMatrix(int num_rows, int num_columns, uint32 seed,
+ void AddRandomMatrix(int num_rows, int num_columns, uint32_t seed,
bool row_major_matrix = false) {
RandomEngine random(kSeed);
std::uniform_real_distribution<> uniform_dist(0, 1.0);
@@ -229,7 +229,7 @@ TEST_F(TensorConverterCalculatorTest, CustomDivAndSub) {
MP_ASSERT_OK(graph.StartRun({}));
auto input_image = absl::make_unique(ImageFormat::GRAY8, 1, 1);
cv::Mat mat = mediapipe::formats::MatView(input_image.get());
- mat.at(0, 0) = 200;
+ mat.at(0, 0) = 200;
MP_ASSERT_OK(graph.AddPacketToInputStream(
"input_image", Adopt(input_image.release()).At(Timestamp(0))));
@@ -286,7 +286,7 @@ TEST_F(TensorConverterCalculatorTest, SetOutputRange) {
MP_ASSERT_OK(graph.StartRun({}));
auto input_image = absl::make_unique(ImageFormat::GRAY8, 1, 1);
cv::Mat mat = mediapipe::formats::MatView(input_image.get());
- mat.at(0, 0) = 200;
+ mat.at(0, 0) = 200;
MP_ASSERT_OK(graph.AddPacketToInputStream(
"input_image", Adopt(input_image.release()).At(Timestamp(0))));
diff --git a/mediapipe/calculators/tensor/tensors_to_classification_calculator.cc b/mediapipe/calculators/tensor/tensors_to_classification_calculator.cc
index 5bfc00ed7..7041c02e4 100644
--- a/mediapipe/calculators/tensor/tensors_to_classification_calculator.cc
+++ b/mediapipe/calculators/tensor/tensors_to_classification_calculator.cc
@@ -84,7 +84,7 @@ class TensorsToClassificationCalculator : public Node {
private:
int top_k_ = 0;
bool sort_by_descending_score_ = false;
- proto_ns::Map local_label_map_;
+ proto_ns::Map local_label_map_;
bool label_map_loaded_ = false;
bool is_binary_classification_ = false;
float min_score_threshold_ = std::numeric_limits::lowest();
@@ -98,7 +98,8 @@ class TensorsToClassificationCalculator : public Node {
// These are used to filter out the output classification results.
ClassIndexSet class_index_set_;
bool IsClassIndexAllowed(int class_index);
- const proto_ns::Map& GetLabelMap(CalculatorContext* cc);
+ const proto_ns::Map& GetLabelMap(
+ CalculatorContext* cc);
};
MEDIAPIPE_REGISTER_NODE(TensorsToClassificationCalculator);
@@ -252,7 +253,7 @@ bool TensorsToClassificationCalculator::IsClassIndexAllowed(int class_index) {
}
}
-const proto_ns::Map&
+const proto_ns::Map&
TensorsToClassificationCalculator::GetLabelMap(CalculatorContext* cc) {
return !local_label_map_.empty()
? local_label_map_
diff --git a/mediapipe/calculators/tensorflow/BUILD b/mediapipe/calculators/tensorflow/BUILD
index e7cc9cc94..0b30689eb 100644
--- a/mediapipe/calculators/tensorflow/BUILD
+++ b/mediapipe/calculators/tensorflow/BUILD
@@ -399,7 +399,7 @@ cc_library(
# On android, this calculator is configured to run with lite protos. Therefore,
# compile your binary with the flag TENSORFLOW_PROTOS=lite.
cc_library(
- name = "tensorflow_inference_calculator",
+ name = "tensorflow_inference_calculator_no_envelope_loader",
srcs = ["tensorflow_inference_calculator.cc"],
deps = [
":tensorflow_inference_calculator_cc_proto",
@@ -432,6 +432,19 @@ cc_library(
alwayslink = 1,
)
+cc_library(
+ name = "tensorflow_inference_calculator",
+ deps = [
+ ":tensorflow_inference_calculator_no_envelope_loader",
+ ] + select({
+ # Since "select" has "exactly one match" rule, we will need default condition to avoid
+ # "no matching conditions" error. Since all necessary dependencies are specified in
+ # "tensorflow_inference_calculator_no_envelope_loader" dependency, it is empty here.
+ "//conditions:default": [],
+ }),
+ alwayslink = 1,
+)
+
cc_library(
name = "tensorflow_session",
hdrs = [
diff --git a/mediapipe/calculators/tflite/BUILD b/mediapipe/calculators/tflite/BUILD
index 435ea9fc1..333de2069 100644
--- a/mediapipe/calculators/tflite/BUILD
+++ b/mediapipe/calculators/tflite/BUILD
@@ -193,13 +193,6 @@ cc_library(
":edge_tpu_pci": ["MEDIAPIPE_EDGE_TPU=pci"],
":edge_tpu_all": ["MEDIAPIPE_EDGE_TPU=all"],
}),
- linkopts = select({
- "//mediapipe:ios": [
- "-framework CoreVideo",
- "-framework MetalKit",
- ],
- "//conditions:default": [],
- }),
deps = [
":tflite_inference_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
@@ -222,6 +215,8 @@ cc_library(
"@org_tensorflow//tensorflow/lite/delegates/gpu/metal:buffer_convert",
"@org_tensorflow//tensorflow/lite/delegates/gpu:metal_delegate",
"@org_tensorflow//tensorflow/lite/delegates/gpu:metal_delegate_internal",
+ "//third_party/apple_frameworks:MetalKit",
+ "//third_party/apple_frameworks:CoreVideo",
],
"//conditions:default": [
"//mediapipe/util/tflite:tflite_gpu_runner",
@@ -271,13 +266,6 @@ cc_library(
],
"//conditions:default": [],
}),
- linkopts = select({
- "//mediapipe:ios": [
- "-framework CoreVideo",
- "-framework MetalKit",
- ],
- "//conditions:default": [],
- }),
deps = [
":tflite_converter_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
@@ -296,6 +284,8 @@ cc_library(
"//mediapipe/gpu:MPPMetalHelper",
"//mediapipe/objc:mediapipe_framework_ios",
"@org_tensorflow//tensorflow/lite/delegates/gpu:metal_delegate",
+ "//third_party/apple_frameworks:MetalKit",
+ "//third_party/apple_frameworks:CoreVideo",
],
"//conditions:default": [
"//mediapipe/gpu:gl_calculator_helper",
@@ -393,13 +383,6 @@ cc_library(
],
"//conditions:default": [],
}),
- linkopts = select({
- "//mediapipe:ios": [
- "-framework CoreVideo",
- "-framework MetalKit",
- ],
- "//conditions:default": [],
- }),
deps = [
":tflite_tensors_to_detections_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
@@ -420,6 +403,8 @@ cc_library(
"//mediapipe/gpu:MPPMetalHelper",
"//mediapipe/objc:mediapipe_framework_ios",
"@org_tensorflow//tensorflow/lite/delegates/gpu:metal_delegate",
+ "//third_party/apple_frameworks:MetalKit",
+ "//third_party/apple_frameworks:CoreVideo",
],
"//conditions:default": [
"//mediapipe/gpu:gl_calculator_helper",
diff --git a/mediapipe/calculators/util/clock_latency_calculator.cc b/mediapipe/calculators/util/clock_latency_calculator.cc
index 5c5711731..beaa41e66 100644
--- a/mediapipe/calculators/util/clock_latency_calculator.cc
+++ b/mediapipe/calculators/util/clock_latency_calculator.cc
@@ -66,17 +66,17 @@ class ClockLatencyCalculator : public CalculatorBase {
absl::Status Process(CalculatorContext* cc) override;
private:
- int64 num_packet_streams_ = -1;
+ int64_t num_packet_streams_ = -1;
};
REGISTER_CALCULATOR(ClockLatencyCalculator);
absl::Status ClockLatencyCalculator::GetContract(CalculatorContract* cc) {
RET_CHECK_GT(cc->Inputs().NumEntries(), 1);
- int64 num_packet_streams = cc->Inputs().NumEntries() - 1;
+ int64_t num_packet_streams = cc->Inputs().NumEntries() - 1;
RET_CHECK_EQ(cc->Outputs().NumEntries(), num_packet_streams);
- for (int64 i = 0; i < num_packet_streams; ++i) {
+ for (int64_t i = 0; i < num_packet_streams; ++i) {
cc->Inputs().Index(i).Set();
cc->Outputs().Index(i).Set();
}
@@ -99,7 +99,7 @@ absl::Status ClockLatencyCalculator::Process(CalculatorContext* cc) {
cc->Inputs().Tag(kReferenceTag).Get();
// Push Duration packets for every input stream we have.
- for (int64 i = 0; i < num_packet_streams_; ++i) {
+ for (int64_t i = 0; i < num_packet_streams_; ++i) {
if (!cc->Inputs().Index(i).IsEmpty()) {
const absl::Time& input_stream_time =
cc->Inputs().Index(i).Get();
diff --git a/mediapipe/calculators/util/collection_has_min_size_calculator_test.cc b/mediapipe/calculators/util/collection_has_min_size_calculator_test.cc
index 62eb1d8ae..71cba9430 100644
--- a/mediapipe/calculators/util/collection_has_min_size_calculator_test.cc
+++ b/mediapipe/calculators/util/collection_has_min_size_calculator_test.cc
@@ -33,7 +33,7 @@ typedef CollectionHasMinSizeCalculator>
TestIntCollectionHasMinSizeCalculator;
REGISTER_CALCULATOR(TestIntCollectionHasMinSizeCalculator);
-void AddInputVector(const std::vector& input, int64 timestamp,
+void AddInputVector(const std::vector& input, int64_t timestamp,
CalculatorRunner* runner) {
runner->MutableInputs()
->Tag(kIterableTag)
diff --git a/mediapipe/calculators/util/detection_label_id_to_text_calculator.cc b/mediapipe/calculators/util/detection_label_id_to_text_calculator.cc
index 0b8dde20d..0c1d6892e 100644
--- a/mediapipe/calculators/util/detection_label_id_to_text_calculator.cc
+++ b/mediapipe/calculators/util/detection_label_id_to_text_calculator.cc
@@ -57,9 +57,10 @@ class DetectionLabelIdToTextCalculator : public CalculatorBase {
private:
// Local label map built from the calculator options' `label_map_path` or
// `label` field.
- proto_ns::Map local_label_map_;
+ proto_ns::Map local_label_map_;
bool keep_label_id_;
- const proto_ns::Map& GetLabelMap(CalculatorContext* cc);
+ const proto_ns::Map& GetLabelMap(
+ CalculatorContext* cc);
};
REGISTER_CALCULATOR(DetectionLabelIdToTextCalculator);
@@ -115,7 +116,7 @@ absl::Status DetectionLabelIdToTextCalculator::Process(CalculatorContext* cc) {
output_detections.push_back(input_detection);
Detection& output_detection = output_detections.back();
bool has_text_label = false;
- for (const int32 label_id : output_detection.label_id()) {
+ for (const int32_t label_id : output_detection.label_id()) {
if (GetLabelMap(cc).contains(label_id)) {
auto item = GetLabelMap(cc).at(label_id);
output_detection.add_label(item.name());
@@ -136,7 +137,7 @@ absl::Status DetectionLabelIdToTextCalculator::Process(CalculatorContext* cc) {
return absl::OkStatus();
}
-const proto_ns::Map&
+const proto_ns::Map&
DetectionLabelIdToTextCalculator::GetLabelMap(CalculatorContext* cc) {
return !local_label_map_.empty()
? local_label_map_
diff --git a/mediapipe/calculators/util/detection_letterbox_removal_calculator_test.cc b/mediapipe/calculators/util/detection_letterbox_removal_calculator_test.cc
index c4f084363..75dd93cc3 100644
--- a/mediapipe/calculators/util/detection_letterbox_removal_calculator_test.cc
+++ b/mediapipe/calculators/util/detection_letterbox_removal_calculator_test.cc
@@ -40,7 +40,7 @@ LocationData CreateRelativeLocationData(double xmin, double ymin, double width,
}
Detection CreateDetection(const std::vector& labels,
- const std::vector& label_ids,
+ const std::vector& label_ids,
const std::vector& scores,
const LocationData& location_data,
const std::string& feature_tag) {
diff --git a/mediapipe/calculators/util/detection_transformation_calculator_test.cc b/mediapipe/calculators/util/detection_transformation_calculator_test.cc
index e280b5153..30d1bc64b 100644
--- a/mediapipe/calculators/util/detection_transformation_calculator_test.cc
+++ b/mediapipe/calculators/util/detection_transformation_calculator_test.cc
@@ -39,8 +39,8 @@ constexpr char kPixelDetectionsTag[] = "PIXEL_DETECTIONS";
constexpr char kRelativeDetectionListTag[] = "RELATIVE_DETECTION_LIST";
constexpr char kRelativeDetectionsTag[] = "RELATIVE_DETECTIONS";
-Detection DetectionWithBoundingBox(int32 xmin, int32 ymin, int32 width,
- int32 height) {
+Detection DetectionWithBoundingBox(int32_t xmin, int32_t ymin, int32_t width,
+ int32_t height) {
Detection detection;
LocationData* location_data = detection.mutable_location_data();
location_data->set_format(LocationData::BOUNDING_BOX);
diff --git a/mediapipe/calculators/util/detection_unique_id_calculator.cc b/mediapipe/calculators/util/detection_unique_id_calculator.cc
index ac8889ffb..d5b1cffa3 100644
--- a/mediapipe/calculators/util/detection_unique_id_calculator.cc
+++ b/mediapipe/calculators/util/detection_unique_id_calculator.cc
@@ -26,7 +26,7 @@ constexpr char kDetectionListTag[] = "DETECTION_LIST";
// Each detection processed by DetectionUniqueIDCalculator will be assigned an
// unique id that starts from 1. If a detection already has an ID other than 0,
// the ID will be overwritten.
-static int64 detection_id = 0;
+static int64_t detection_id = 0;
inline int GetNextDetectionId() { return ++detection_id; }
diff --git a/mediapipe/calculators/util/detections_to_rects_calculator_test.cc b/mediapipe/calculators/util/detections_to_rects_calculator_test.cc
index 63de60a60..95e18e90c 100644
--- a/mediapipe/calculators/util/detections_to_rects_calculator_test.cc
+++ b/mediapipe/calculators/util/detections_to_rects_calculator_test.cc
@@ -56,8 +56,8 @@ MATCHER_P4(NormRectEq, x_center, y_center, width, height, "") {
testing::Value(arg.height(), testing::FloatEq(height));
}
-Detection DetectionWithLocationData(int32 xmin, int32 ymin, int32 width,
- int32 height) {
+Detection DetectionWithLocationData(int32_t xmin, int32_t ymin, int32_t width,
+ int32_t height) {
Detection detection;
LocationData* location_data = detection.mutable_location_data();
location_data->set_format(LocationData::BOUNDING_BOX);
diff --git a/mediapipe/calculators/util/detections_to_render_data_calculator_test.cc b/mediapipe/calculators/util/detections_to_render_data_calculator_test.cc
index 0d0da2350..6da8c449a 100644
--- a/mediapipe/calculators/util/detections_to_render_data_calculator_test.cc
+++ b/mediapipe/calculators/util/detections_to_render_data_calculator_test.cc
@@ -43,8 +43,8 @@ void VerifyRenderAnnotationColorThickness(
EXPECT_EQ(annotation.thickness(), options.thickness());
}
-LocationData CreateLocationData(int32 xmin, int32 ymin, int32 width,
- int32 height) {
+LocationData CreateLocationData(int32_t xmin, int32_t ymin, int32_t width,
+ int32_t height) {
LocationData location_data;
location_data.set_format(LocationData::BOUNDING_BOX);
location_data.mutable_bounding_box()->set_xmin(xmin);
@@ -66,7 +66,7 @@ LocationData CreateRelativeLocationData(double xmin, double ymin, double width,
}
Detection CreateDetection(const std::vector& labels,
- const std::vector& label_ids,
+ const std::vector& label_ids,
const std::vector& scores,
const LocationData& location_data,
const std::string& feature_tag) {
diff --git a/mediapipe/calculators/util/filter_collection_calculator.cc b/mediapipe/calculators/util/filter_collection_calculator.cc
index ab361f450..2cf41ead8 100644
--- a/mediapipe/calculators/util/filter_collection_calculator.cc
+++ b/mediapipe/calculators/util/filter_collection_calculator.cc
@@ -24,7 +24,7 @@
namespace mediapipe {
-typedef FilterCollectionCalculator>
+typedef FilterCollectionCalculator>
FilterUInt64CollectionCalculator;
REGISTER_CALCULATOR(FilterUInt64CollectionCalculator);
diff --git a/mediapipe/calculators/util/from_image_calculator.cc b/mediapipe/calculators/util/from_image_calculator.cc
index 0ddb342eb..706f8727b 100644
--- a/mediapipe/calculators/util/from_image_calculator.cc
+++ b/mediapipe/calculators/util/from_image_calculator.cc
@@ -163,8 +163,8 @@ absl::Status FromImageCalculator::Process(CalculatorContext* cc) {
std::unique_ptr output =
std::make_unique(
input.image_format(), input.width(), input.height(), input.step(),
- const_cast(input.GetImageFrameSharedPtr()->PixelData()),
- [packet_copy_ptr](uint8*) { delete packet_copy_ptr; });
+ const_cast(input.GetImageFrameSharedPtr()->PixelData()),
+ [packet_copy_ptr](uint8_t*) { delete packet_copy_ptr; });
cc->Outputs()
.Tag(kImageFrameTag)
.Add(output.release(), cc->InputTimestamp());
diff --git a/mediapipe/calculators/util/packet_frequency_calculator.cc b/mediapipe/calculators/util/packet_frequency_calculator.cc
index 19ffae70e..f9c28f5ff 100644
--- a/mediapipe/calculators/util/packet_frequency_calculator.cc
+++ b/mediapipe/calculators/util/packet_frequency_calculator.cc
@@ -84,23 +84,24 @@ class PacketFrequencyCalculator : public CalculatorBase {
const Timestamp& input_timestamp);
// Adds the input timestamp in the particular stream's timestamp buffer.
- absl::Status AddPacketTimestampForStream(int stream_id, int64 timestamp);
+ absl::Status AddPacketTimestampForStream(int stream_id, int64_t timestamp);
// For the specified input stream, clears timestamps from buffer that are
// older than the configured time_window_sec.
- absl::Status ClearOldpacketTimestamps(int stream_id, int64 current_timestamp);
+ absl::Status ClearOldpacketTimestamps(int stream_id,
+ int64_t current_timestamp);
// Options for the calculator.
PacketFrequencyCalculatorOptions options_;
// Map where key is the input stream ID and value is the timestamp of the
// first packet received on that stream.
- std::map first_timestamp_for_stream_id_usec_;
+ std::map first_timestamp_for_stream_id_usec_;
// Map where key is the input stream ID and value is a vector that stores
// timestamps of recently received packets on the stream. Timestamps older
// than the time_window_sec are continuously deleted for all the streams.
- std::map> previous_timestamps_for_stream_id_;
+ std::map> previous_timestamps_for_stream_id_;
};
REGISTER_CALCULATOR(PacketFrequencyCalculator);
@@ -166,7 +167,7 @@ absl::Status PacketFrequencyCalculator::Process(CalculatorContext* cc) {
}
absl::Status PacketFrequencyCalculator::AddPacketTimestampForStream(
- int stream_id, int64 timestamp_usec) {
+ int stream_id, int64_t timestamp_usec) {
if (previous_timestamps_for_stream_id_.find(stream_id) ==
previous_timestamps_for_stream_id_.end()) {
return absl::InvalidArgumentError("Input stream id is invalid");
@@ -178,19 +179,20 @@ absl::Status PacketFrequencyCalculator::AddPacketTimestampForStream(
}
absl::Status PacketFrequencyCalculator::ClearOldpacketTimestamps(
- int stream_id, int64 current_timestamp_usec) {
+ int stream_id, int64_t current_timestamp_usec) {
if (previous_timestamps_for_stream_id_.find(stream_id) ==
previous_timestamps_for_stream_id_.end()) {
return absl::InvalidArgumentError("Input stream id is invalid");
}
auto& timestamps_buffer = previous_timestamps_for_stream_id_[stream_id];
- int64 time_window_usec = options_.time_window_sec() * kSecondsToMicroseconds;
+ int64_t time_window_usec =
+ options_.time_window_sec() * kSecondsToMicroseconds;
timestamps_buffer.erase(
std::remove_if(timestamps_buffer.begin(), timestamps_buffer.end(),
[&time_window_usec,
- ¤t_timestamp_usec](const int64 timestamp_usec) {
+ ¤t_timestamp_usec](const int64_t timestamp_usec) {
return current_timestamp_usec - timestamp_usec >
time_window_usec;
}),
diff --git a/mediapipe/calculators/util/packet_latency_calculator.cc b/mediapipe/calculators/util/packet_latency_calculator.cc
index 0e5b2e885..6509f016f 100644
--- a/mediapipe/calculators/util/packet_latency_calculator.cc
+++ b/mediapipe/calculators/util/packet_latency_calculator.cc
@@ -118,24 +118,24 @@ class PacketLatencyCalculator : public CalculatorBase {
std::shared_ptr<::mediapipe::Clock> clock_;
// Clock time when the first reference packet was received.
- int64 first_process_time_usec_ = -1;
+ int64_t first_process_time_usec_ = -1;
// Timestamp of the first reference packet received.
- int64 first_reference_timestamp_usec_ = -1;
+ int64_t first_reference_timestamp_usec_ = -1;
// Number of packet streams.
- int64 num_packet_streams_ = -1;
+ int64_t num_packet_streams_ = -1;
// Latency output for each packet stream.
std::vector packet_latencies_;
// Running sum and count of latencies for each packet stream. This is required
// to compute the average latency.
- std::vector sum_latencies_usec_;
- std::vector num_latencies_;
+ std::vector sum_latencies_usec_;
+ std::vector num_latencies_;
// Clock time when last reset was done for histogram and running average.
- int64 last_reset_time_usec_ = -1;
+ int64_t last_reset_time_usec_ = -1;
};
REGISTER_CALCULATOR(PacketLatencyCalculator);
@@ -143,9 +143,9 @@ absl::Status PacketLatencyCalculator::GetContract(CalculatorContract* cc) {
RET_CHECK_GT(cc->Inputs().NumEntries(), 1);
// Input and output streams.
- int64 num_packet_streams = cc->Inputs().NumEntries() - 1;
+ int64_t num_packet_streams = cc->Inputs().NumEntries() - 1;
RET_CHECK_EQ(cc->Outputs().NumEntries(), num_packet_streams);
- for (int64 i = 0; i < num_packet_streams; ++i) {
+ for (int64_t i = 0; i < num_packet_streams; ++i) {
cc->Inputs().Index(i).SetAny();
cc->Outputs().Index(i).Set();
}
@@ -165,8 +165,8 @@ absl::Status PacketLatencyCalculator::GetContract(CalculatorContract* cc) {
void PacketLatencyCalculator::ResetStatistics() {
// Initialize histogram with zero counts and set running average to zero.
- for (int64 i = 0; i < num_packet_streams_; ++i) {
- for (int64 interval_index = 0; interval_index < options_.num_intervals();
+ for (int64_t i = 0; i < num_packet_streams_; ++i) {
+ for (int64_t interval_index = 0; interval_index < options_.num_intervals();
++interval_index) {
packet_latencies_[i].set_counts(interval_index, 0);
}
@@ -196,7 +196,7 @@ absl::Status PacketLatencyCalculator::Open(CalculatorContext* cc) {
packet_latencies_.resize(num_packet_streams_);
sum_latencies_usec_.resize(num_packet_streams_);
num_latencies_.resize(num_packet_streams_);
- for (int64 i = 0; i < num_packet_streams_; ++i) {
+ for (int64_t i = 0; i < num_packet_streams_; ++i) {
// Initialize latency histograms with zero counts.
packet_latencies_[i].set_num_intervals(options_.num_intervals());
packet_latencies_[i].set_interval_size_usec(options_.interval_size_usec());
@@ -208,7 +208,7 @@ absl::Status PacketLatencyCalculator::Open(CalculatorContext* cc) {
if (labels_provided) {
packet_latencies_[i].set_label(options_.packet_labels(i));
} else {
- int64 input_stream_index = cc->Inputs().TagMap()->GetId("", i).value();
+ int64_t input_stream_index = cc->Inputs().TagMap()->GetId("", i).value();
packet_latencies_[i].set_label(
cc->Inputs().TagMap()->Names()[input_stream_index]);
}
@@ -242,7 +242,7 @@ absl::Status PacketLatencyCalculator::Process(CalculatorContext* cc) {
}
if (options_.reset_duration_usec() > 0) {
- const int64 time_now_usec = absl::ToUnixMicros(clock_->TimeNow());
+ const int64_t time_now_usec = absl::ToUnixMicros(clock_->TimeNow());
if (time_now_usec - last_reset_time_usec_ >=
options_.reset_duration_usec()) {
ResetStatistics();
@@ -251,16 +251,16 @@ absl::Status PacketLatencyCalculator::Process(CalculatorContext* cc) {
}
// Update latency info if there is any incoming packet.
- for (int64 i = 0; i < num_packet_streams_; ++i) {
+ for (int64_t i = 0; i < num_packet_streams_; ++i) {
if (!cc->Inputs().Index(i).IsEmpty()) {
const auto& packet_timestamp_usec = cc->InputTimestamp().Value();
// Update latency statistics for this stream.
- int64 current_clock_time_usec = absl::ToUnixMicros(clock_->TimeNow());
- int64 current_calibrated_timestamp_usec =
+ int64_t current_clock_time_usec = absl::ToUnixMicros(clock_->TimeNow());
+ int64_t current_calibrated_timestamp_usec =
(current_clock_time_usec - first_process_time_usec_) +
first_reference_timestamp_usec_;
- int64 packet_latency_usec =
+ int64_t packet_latency_usec =
current_calibrated_timestamp_usec - packet_timestamp_usec;
// Invalid timestamps in input signals could result in negative latencies.
@@ -270,7 +270,7 @@ absl::Status PacketLatencyCalculator::Process(CalculatorContext* cc) {
// Update the latency, running average and histogram for this stream.
packet_latencies_[i].set_current_latency_usec(packet_latency_usec);
- int64 interval_index =
+ int64_t interval_index =
packet_latency_usec / packet_latencies_[i].interval_size_usec();
if (interval_index >= packet_latencies_[i].num_intervals()) {
interval_index = packet_latencies_[i].num_intervals() - 1;
diff --git a/mediapipe/calculators/util/packet_latency_calculator_test.cc b/mediapipe/calculators/util/packet_latency_calculator_test.cc
index 6f03f2e75..d323a14f9 100644
--- a/mediapipe/calculators/util/packet_latency_calculator_test.cc
+++ b/mediapipe/calculators/util/packet_latency_calculator_test.cc
@@ -169,10 +169,10 @@ class PacketLatencyCalculatorTest : public ::testing::Test {
}
PacketLatency CreatePacketLatency(const double latency_usec,
- const int64 num_intervals,
- const int64 interval_size_usec,
+ const int64_t num_intervals,
+ const int64_t interval_size_usec,
const std::vector& counts,
- const int64 avg_latency_usec,
+ const int64_t avg_latency_usec,
const std::string& label) {
PacketLatency latency_info;
latency_info.set_current_latency_usec(latency_usec);
diff --git a/mediapipe/calculators/video/BUILD b/mediapipe/calculators/video/BUILD
index e4aa1bff8..7245b13c2 100644
--- a/mediapipe/calculators/video/BUILD
+++ b/mediapipe/calculators/video/BUILD
@@ -13,7 +13,7 @@
# limitations under the License.
#
-load("//mediapipe/framework/port:build_config.bzl", "mediapipe_cc_proto_library")
+load("//mediapipe/framework/port:build_config.bzl", "mediapipe_proto_library")
load(
"//mediapipe/framework/tool:mediapipe_graph.bzl",
"mediapipe_binary_graph",
@@ -23,28 +23,35 @@ licenses(["notice"])
package(default_visibility = ["//visibility:public"])
-proto_library(
+mediapipe_proto_library(
name = "flow_to_image_calculator_proto",
srcs = ["flow_to_image_calculator.proto"],
- deps = ["//mediapipe/framework:calculator_proto"],
+ deps = [
+ "//mediapipe/framework:calculator_options_proto",
+ "//mediapipe/framework:calculator_proto",
+ ],
)
-proto_library(
+mediapipe_proto_library(
name = "opencv_video_encoder_calculator_proto",
srcs = ["opencv_video_encoder_calculator.proto"],
- deps = ["//mediapipe/framework:calculator_proto"],
+ deps = [
+ "//mediapipe/framework:calculator_options_proto",
+ "//mediapipe/framework:calculator_proto",
+ ],
)
-proto_library(
+mediapipe_proto_library(
name = "motion_analysis_calculator_proto",
srcs = ["motion_analysis_calculator.proto"],
deps = [
+ "//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
"//mediapipe/util/tracking:motion_analysis_proto",
],
)
-proto_library(
+mediapipe_proto_library(
name = "flow_packager_calculator_proto",
srcs = ["flow_packager_calculator.proto"],
deps = [
@@ -54,114 +61,45 @@ proto_library(
],
)
-proto_library(
+mediapipe_proto_library(
name = "box_tracker_calculator_proto",
srcs = ["box_tracker_calculator.proto"],
deps = [
+ "//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
"//mediapipe/util/tracking:box_tracker_proto",
],
)
-proto_library(
+mediapipe_proto_library(
name = "tracked_detection_manager_calculator_proto",
srcs = ["tracked_detection_manager_calculator.proto"],
deps = [
+ "//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
"//mediapipe/util/tracking:tracked_detection_manager_config_proto",
],
)
-proto_library(
+mediapipe_proto_library(
name = "box_detector_calculator_proto",
srcs = ["box_detector_calculator.proto"],
deps = [
+ "//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
"//mediapipe/util/tracking:box_detector_proto",
],
)
-proto_library(
+mediapipe_proto_library(
name = "video_pre_stream_calculator_proto",
srcs = ["video_pre_stream_calculator.proto"],
deps = [
+ "//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
],
)
-mediapipe_cc_proto_library(
- name = "motion_analysis_calculator_cc_proto",
- srcs = ["motion_analysis_calculator.proto"],
- cc_deps = [
- "//mediapipe/framework:calculator_cc_proto",
- "//mediapipe/util/tracking:motion_analysis_cc_proto",
- ],
- deps = [":motion_analysis_calculator_proto"],
-)
-
-mediapipe_cc_proto_library(
- name = "flow_packager_calculator_cc_proto",
- srcs = ["flow_packager_calculator.proto"],
- cc_deps = [
- "//mediapipe/framework:calculator_cc_proto",
- "//mediapipe/util/tracking:flow_packager_cc_proto",
- ],
- deps = [":flow_packager_calculator_proto"],
-)
-
-mediapipe_cc_proto_library(
- name = "box_tracker_calculator_cc_proto",
- srcs = ["box_tracker_calculator.proto"],
- cc_deps = [
- "//mediapipe/framework:calculator_cc_proto",
- "//mediapipe/util/tracking:box_tracker_cc_proto",
- ],
- deps = [":box_tracker_calculator_proto"],
-)
-
-mediapipe_cc_proto_library(
- name = "tracked_detection_manager_calculator_cc_proto",
- srcs = ["tracked_detection_manager_calculator.proto"],
- cc_deps = [
- "//mediapipe/framework:calculator_cc_proto",
- "//mediapipe/util/tracking:tracked_detection_manager_config_cc_proto",
- ],
- deps = [":tracked_detection_manager_calculator_proto"],
-)
-
-mediapipe_cc_proto_library(
- name = "box_detector_calculator_cc_proto",
- srcs = ["box_detector_calculator.proto"],
- cc_deps = [
- "//mediapipe/framework:calculator_cc_proto",
- "//mediapipe/util/tracking:box_detector_cc_proto",
- ],
- deps = [":box_detector_calculator_proto"],
-)
-
-mediapipe_cc_proto_library(
- name = "video_pre_stream_calculator_cc_proto",
- srcs = ["video_pre_stream_calculator.proto"],
- cc_deps = [
- "//mediapipe/framework:calculator_cc_proto",
- ],
- deps = [":video_pre_stream_calculator_proto"],
-)
-
-mediapipe_cc_proto_library(
- name = "flow_to_image_calculator_cc_proto",
- srcs = ["flow_to_image_calculator.proto"],
- cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
- deps = [":flow_to_image_calculator_proto"],
-)
-
-mediapipe_cc_proto_library(
- name = "opencv_video_encoder_calculator_cc_proto",
- srcs = ["opencv_video_encoder_calculator.proto"],
- cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
- deps = [":opencv_video_encoder_calculator_proto"],
-)
-
cc_library(
name = "flow_to_image_calculator",
srcs = ["flow_to_image_calculator.cc"],
diff --git a/mediapipe/examples/desktop/autoflip/BUILD b/mediapipe/examples/desktop/autoflip/BUILD
index 340205caa..fe994e2e0 100644
--- a/mediapipe/examples/desktop/autoflip/BUILD
+++ b/mediapipe/examples/desktop/autoflip/BUILD
@@ -1,4 +1,4 @@
-load("//mediapipe/framework/port:build_config.bzl", "mediapipe_cc_proto_library")
+load("//mediapipe/framework/port:build_config.bzl", "mediapipe_proto_library")
# Copyright 2019 The MediaPipe Authors.
#
@@ -22,7 +22,7 @@ package(default_visibility = [
"//photos/editing/mobile/mediapipe/proto:__subpackages__",
])
-proto_library(
+mediapipe_proto_library(
name = "autoflip_messages_proto",
srcs = ["autoflip_messages.proto"],
deps = [
@@ -30,29 +30,6 @@ proto_library(
],
)
-java_lite_proto_library(
- name = "autoflip_messages_java_proto_lite",
- visibility = [
- "//java/com/google/android/apps/photos:__subpackages__",
- "//javatests/com/google/android/apps/photos:__subpackages__",
- ],
- deps = [
- ":autoflip_messages_proto",
- ],
-)
-
-mediapipe_cc_proto_library(
- name = "autoflip_messages_cc_proto",
- srcs = ["autoflip_messages.proto"],
- cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
- visibility = [
- "//mediapipe/examples:__subpackages__",
- "//photos/editing/mobile/mediapipe/calculators:__pkg__",
- "//photos/editing/mobile/mediapipe/calculators:__subpackages__",
- ],
- deps = [":autoflip_messages_proto"],
-)
-
cc_binary(
name = "run_autoflip",
data = [
diff --git a/mediapipe/examples/desktop/autoflip/calculators/BUILD b/mediapipe/examples/desktop/autoflip/calculators/BUILD
index 18f56cc4f..a3b2ace2a 100644
--- a/mediapipe/examples/desktop/autoflip/calculators/BUILD
+++ b/mediapipe/examples/desktop/autoflip/calculators/BUILD
@@ -1,4 +1,4 @@
-load("//mediapipe/framework/port:build_config.bzl", "mediapipe_cc_proto_library")
+load("//mediapipe/framework/port:build_config.bzl", "mediapipe_proto_library")
# Copyright 2019 The MediaPipe Authors.
#
@@ -40,22 +40,16 @@ cc_library(
alwayslink = 1,
)
-proto_library(
+mediapipe_proto_library(
name = "border_detection_calculator_proto",
srcs = ["border_detection_calculator.proto"],
+ visibility = ["//mediapipe/examples:__subpackages__"],
deps = [
+ "//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
],
)
-mediapipe_cc_proto_library(
- name = "border_detection_calculator_cc_proto",
- srcs = ["border_detection_calculator.proto"],
- cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
- visibility = ["//mediapipe/examples:__subpackages__"],
- deps = [":border_detection_calculator_proto"],
-)
-
cc_library(
name = "content_zooming_calculator_state",
hdrs = ["content_zooming_calculator_state.h"],
@@ -85,27 +79,16 @@ cc_library(
alwayslink = 1,
)
-proto_library(
+mediapipe_proto_library(
name = "content_zooming_calculator_proto",
srcs = ["content_zooming_calculator.proto"],
- deps = [
- "//mediapipe/examples/desktop/autoflip/quality:kinematic_path_solver_proto",
- "//mediapipe/framework:calculator_proto",
- ],
-)
-
-mediapipe_cc_proto_library(
- name = "content_zooming_calculator_cc_proto",
- srcs = ["content_zooming_calculator.proto"],
- cc_deps = [
- "//mediapipe/examples/desktop/autoflip/quality:kinematic_path_solver_cc_proto",
- "//mediapipe/framework:calculator_cc_proto",
- ],
visibility = [
"//mediapipe/examples:__subpackages__",
],
deps = [
- ":content_zooming_calculator_proto",
+ "//mediapipe/examples/desktop/autoflip/quality:kinematic_path_solver_proto",
+ "//mediapipe/framework:calculator_options_proto",
+ "//mediapipe/framework:calculator_proto",
],
)
@@ -177,23 +160,16 @@ cc_library(
alwayslink = 1,
)
-proto_library(
+mediapipe_proto_library(
name = "video_filtering_calculator_proto",
srcs = ["video_filtering_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
+ "//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
],
)
-mediapipe_cc_proto_library(
- name = "video_filtering_calculator_cc_proto",
- srcs = ["video_filtering_calculator.proto"],
- cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
- visibility = ["//visibility:public"],
- deps = [":video_filtering_calculator_proto"],
-)
-
cc_test(
name = "video_filtering_calculator_test",
srcs = ["video_filtering_calculator_test.cc"],
@@ -209,27 +185,17 @@ cc_test(
],
)
-proto_library(
+mediapipe_proto_library(
name = "scene_cropping_calculator_proto",
srcs = ["scene_cropping_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/examples/desktop/autoflip/quality:cropping_proto",
+ "//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
],
)
-mediapipe_cc_proto_library(
- name = "scene_cropping_calculator_cc_proto",
- srcs = ["scene_cropping_calculator.proto"],
- cc_deps = [
- "//mediapipe/examples/desktop/autoflip/quality:cropping_cc_proto",
- "//mediapipe/framework:calculator_cc_proto",
- ],
- visibility = ["//visibility:public"],
- deps = [":scene_cropping_calculator_proto"],
-)
-
cc_library(
name = "scene_cropping_calculator",
srcs = ["scene_cropping_calculator.cc"],
@@ -296,26 +262,17 @@ cc_library(
alwayslink = 1,
)
-proto_library(
+mediapipe_proto_library(
name = "signal_fusing_calculator_proto",
srcs = ["signal_fusing_calculator.proto"],
+ visibility = ["//mediapipe/examples:__subpackages__"],
deps = [
"//mediapipe/examples/desktop/autoflip:autoflip_messages_proto",
+ "//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
],
)
-mediapipe_cc_proto_library(
- name = "signal_fusing_calculator_cc_proto",
- srcs = ["signal_fusing_calculator.proto"],
- cc_deps = [
- "//mediapipe/examples/desktop/autoflip:autoflip_messages_cc_proto",
- "//mediapipe/framework:calculator_cc_proto",
- ],
- visibility = ["//mediapipe/examples:__subpackages__"],
- deps = [":signal_fusing_calculator_proto"],
-)
-
cc_test(
name = "signal_fusing_calculator_test",
srcs = ["signal_fusing_calculator_test.cc"],
@@ -353,18 +310,14 @@ cc_library(
alwayslink = 1,
)
-proto_library(
+mediapipe_proto_library(
name = "shot_boundary_calculator_proto",
srcs = ["shot_boundary_calculator.proto"],
- deps = ["//mediapipe/framework:calculator_proto"],
-)
-
-mediapipe_cc_proto_library(
- name = "shot_boundary_calculator_cc_proto",
- srcs = ["shot_boundary_calculator.proto"],
- cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = ["//mediapipe/examples:__subpackages__"],
- deps = [":shot_boundary_calculator_proto"],
+ deps = [
+ "//mediapipe/framework:calculator_options_proto",
+ "//mediapipe/framework:calculator_proto",
+ ],
)
cc_test(
@@ -413,26 +366,17 @@ cc_library(
alwayslink = 1,
)
-proto_library(
+mediapipe_proto_library(
name = "face_to_region_calculator_proto",
srcs = ["face_to_region_calculator.proto"],
+ visibility = ["//mediapipe/examples:__subpackages__"],
deps = [
"//mediapipe/examples/desktop/autoflip/quality:visual_scorer_proto",
+ "//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
],
)
-mediapipe_cc_proto_library(
- name = "face_to_region_calculator_cc_proto",
- srcs = ["face_to_region_calculator.proto"],
- cc_deps = [
- "//mediapipe/examples/desktop/autoflip/quality:visual_scorer_cc_proto",
- "//mediapipe/framework:calculator_cc_proto",
- ],
- visibility = ["//mediapipe/examples:__subpackages__"],
- deps = [":face_to_region_calculator_proto"],
-)
-
cc_test(
name = "face_to_region_calculator_test",
srcs = ["face_to_region_calculator_test.cc"],
@@ -454,22 +398,16 @@ cc_test(
],
)
-proto_library(
+mediapipe_proto_library(
name = "localization_to_region_calculator_proto",
srcs = ["localization_to_region_calculator.proto"],
+ visibility = ["//mediapipe/examples:__subpackages__"],
deps = [
+ "//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
],
)
-mediapipe_cc_proto_library(
- name = "localization_to_region_calculator_cc_proto",
- srcs = ["localization_to_region_calculator.proto"],
- cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
- visibility = ["//mediapipe/examples:__subpackages__"],
- deps = [":localization_to_region_calculator_proto"],
-)
-
cc_library(
name = "localization_to_region_calculator",
srcs = ["localization_to_region_calculator.cc"],
diff --git a/mediapipe/examples/desktop/autoflip/calculators/border_detection_calculator.cc b/mediapipe/examples/desktop/autoflip/calculators/border_detection_calculator.cc
index caaa368a7..238bcf8be 100644
--- a/mediapipe/examples/desktop/autoflip/calculators/border_detection_calculator.cc
+++ b/mediapipe/examples/desktop/autoflip/calculators/border_detection_calculator.cc
@@ -214,7 +214,7 @@ double BorderDetectionCalculator::ColorCount(const Color& mask_color,
const cv::Mat& image) const {
int background_count = 0;
for (int i = 0; i < image.rows; i++) {
- const uint8* row_ptr = image.ptr(i);
+ const uint8_t* row_ptr = image.ptr(i);
for (int j = 0; j < image.cols * 3; j += 3) {
if (std::abs(mask_color.r() - static_cast(row_ptr[j + 2])) <=
options_.color_tolerance() &&
diff --git a/mediapipe/examples/desktop/autoflip/calculators/content_zooming_calculator.cc b/mediapipe/examples/desktop/autoflip/calculators/content_zooming_calculator.cc
index 823080786..5241f56e4 100644
--- a/mediapipe/examples/desktop/autoflip/calculators/content_zooming_calculator.cc
+++ b/mediapipe/examples/desktop/autoflip/calculators/content_zooming_calculator.cc
@@ -142,7 +142,7 @@ class ContentZoomingCalculator : public CalculatorBase {
// Stores the first crop rectangle.
mediapipe::NormalizedRect first_rect_;
// Stores the time of the last "only_required" input.
- int64 last_only_required_detection_;
+ int64_t last_only_required_detection_;
// Rect values of last message with detection(s).
int last_measured_height_;
int last_measured_x_offset_;
@@ -500,7 +500,7 @@ bool ContentZoomingCalculator::IsAnimatingToFirstRect(
return false;
}
- const int64 delta_us = (timestamp - first_rect_timestamp_).Value();
+ const int64_t delta_us = (timestamp - first_rect_timestamp_).Value();
return (0 <= delta_us && delta_us <= options_.us_to_first_rect());
}
@@ -522,8 +522,8 @@ absl::StatusOr ContentZoomingCalculator::GetAnimationRect(
RET_CHECK(IsAnimatingToFirstRect(timestamp))
<< "Must only be called if animating to first rect.";
- const int64 delta_us = (timestamp - first_rect_timestamp_).Value();
- const int64 delay = options_.us_to_first_rect_delay();
+ const int64_t delta_us = (timestamp - first_rect_timestamp_).Value();
+ const int64_t delay = options_.us_to_first_rect_delay();
const double interpolation = easeInOutQuad(std::max(
0.0, (delta_us - delay) /
static_cast(options_.us_to_first_rect() - delay)));
diff --git a/mediapipe/examples/desktop/autoflip/calculators/content_zooming_calculator_test.cc b/mediapipe/examples/desktop/autoflip/calculators/content_zooming_calculator_test.cc
index 48e4a28a8..0e817b260 100644
--- a/mediapipe/examples/desktop/autoflip/calculators/content_zooming_calculator_test.cc
+++ b/mediapipe/examples/desktop/autoflip/calculators/content_zooming_calculator_test.cc
@@ -226,7 +226,7 @@ struct AddDetectionFlags {
std::optional max_zoom_factor_percent;
};
-void AddDetectionFrameSize(const cv::Rect_& position, const int64 time,
+void AddDetectionFrameSize(const cv::Rect_& position, const int64_t time,
const int width, const int height,
CalculatorRunner* runner,
const AddDetectionFlags& flags = {}) {
@@ -275,7 +275,7 @@ void AddDetectionFrameSize(const cv::Rect_& position, const int64 time,
}
}
-void AddDetection(const cv::Rect_& position, const int64 time,
+void AddDetection(const cv::Rect_& position, const int64_t time,
CalculatorRunner* runner) {
AddDetectionFrameSize(position, time, 1000, 1000, runner);
}
diff --git a/mediapipe/examples/desktop/autoflip/calculators/scene_cropping_calculator.cc b/mediapipe/examples/desktop/autoflip/calculators/scene_cropping_calculator.cc
index 7e286b743..f4cc98674 100644
--- a/mediapipe/examples/desktop/autoflip/calculators/scene_cropping_calculator.cc
+++ b/mediapipe/examples/desktop/autoflip/calculators/scene_cropping_calculator.cc
@@ -200,7 +200,7 @@ absl::Status ParseAspectRatioString(const std::string& aspect_ratio_string,
}
void ConstructExternalRenderMessage(
const cv::Rect& crop_from_location, const cv::Rect& render_to_location,
- const cv::Scalar& padding_color, const uint64 timestamp_us,
+ const cv::Scalar& padding_color, const uint64_t timestamp_us,
ExternalRenderFrame* external_render_message, int frame_width,
int frame_height) {
auto crop_from_message =
@@ -717,7 +717,7 @@ absl::Status SceneCroppingCalculator::FormatAndOutputCroppedFrames(
for (int i = 0; i < num_frames; ++i) {
// Set default padding color to white.
cv::Scalar padding_color_to_add = cv::Scalar(255, 255, 255);
- const int64 time_ms = scene_frame_timestamps_[i];
+ const int64_t time_ms = scene_frame_timestamps_[i];
if (*apply_padding) {
if (has_solid_background_) {
double lab[3];
@@ -747,7 +747,7 @@ absl::Status SceneCroppingCalculator::FormatAndOutputCroppedFrames(
// Resizes cropped frames, pads frames, and output frames.
for (int i = 0; i < num_frames; ++i) {
- const int64 time_ms = scene_frame_timestamps_[i];
+ const int64_t time_ms = scene_frame_timestamps_[i];
const Timestamp timestamp(time_ms);
auto scaled_frame = absl::make_unique(
frame_format_, scaled_width, scaled_height);
diff --git a/mediapipe/examples/desktop/autoflip/calculators/scene_cropping_calculator_test.cc b/mediapipe/examples/desktop/autoflip/calculators/scene_cropping_calculator_test.cc
index c3285ea58..74535022d 100644
--- a/mediapipe/examples/desktop/autoflip/calculators/scene_cropping_calculator_test.cc
+++ b/mediapipe/examples/desktop/autoflip/calculators/scene_cropping_calculator_test.cc
@@ -175,7 +175,7 @@ constexpr int kMinNumDetections = 0;
constexpr int kMaxNumDetections = 10;
constexpr int kDownSampleRate = 4;
-constexpr int64 kTimestampDiff = 20000;
+constexpr int64_t kTimestampDiff = 20000;
// Returns a singleton random engine for generating random values. The seed is
// fixed for reproducibility.
@@ -254,7 +254,7 @@ std::unique_ptr MakeImageFrameFromColor(const cv::Scalar& color,
// Randomly generates a number of detections in the range of kMinNumDetections
// and kMaxNumDetections. Optionally add a key image frame of random solid color
// and given size.
-void AddKeyFrameFeatures(const int64 time_ms, const int key_frame_width,
+void AddKeyFrameFeatures(const int64_t time_ms, const int key_frame_width,
const int key_frame_height, bool randomize,
CalculatorRunner::StreamContentsSet* inputs) {
Timestamp timestamp(time_ms);
@@ -286,7 +286,7 @@ void AddScene(const int start_frame_index, const int num_scene_frames,
const int key_frame_width, const int key_frame_height,
const int DownSampleRate,
CalculatorRunner::StreamContentsSet* inputs) {
- int64 time_ms = start_frame_index * kTimestampDiff;
+ int64_t time_ms = start_frame_index * kTimestampDiff;
for (int i = 0; i < num_scene_frames; ++i) {
Timestamp timestamp(time_ms);
if (inputs->HasTag(kVideoFramesTag)) {
@@ -657,7 +657,7 @@ TEST(SceneCroppingCalculatorTest, PadsWithSolidColorFromStaticFeatures) {
// Add inputs.
auto* inputs = runner->MutableInputs();
- int64 time_ms = 0;
+ int64_t time_ms = 0;
int num_static_features = 0;
for (int i = 0; i < kSceneSize; ++i) {
Timestamp timestamp(time_ms);
diff --git a/mediapipe/examples/desktop/autoflip/quality/BUILD b/mediapipe/examples/desktop/autoflip/quality/BUILD
index 0b5970ee9..20e286107 100644
--- a/mediapipe/examples/desktop/autoflip/quality/BUILD
+++ b/mediapipe/examples/desktop/autoflip/quality/BUILD
@@ -1,4 +1,4 @@
-load("//mediapipe/framework/port:build_config.bzl", "mediapipe_cc_proto_library")
+load("//mediapipe/framework/port:build_config.bzl", "mediapipe_proto_library")
# Copyright 2019 The MediaPipe Authors.
#
@@ -20,7 +20,7 @@ package(default_visibility = [
"//mediapipe/examples:__subpackages__",
])
-proto_library(
+mediapipe_proto_library(
name = "cropping_proto",
srcs = ["cropping.proto"],
deps = [
@@ -29,41 +29,18 @@ proto_library(
],
)
-mediapipe_cc_proto_library(
- name = "cropping_cc_proto",
- srcs = ["cropping.proto"],
- cc_deps = [
- ":kinematic_path_solver_cc_proto",
- "//mediapipe/examples/desktop/autoflip:autoflip_messages_cc_proto",
- ],
- visibility = ["//mediapipe/examples:__subpackages__"],
- deps = [":cropping_proto"],
-)
-
-proto_library(
+mediapipe_proto_library(
name = "kinematic_path_solver_proto",
srcs = ["kinematic_path_solver.proto"],
-)
-
-mediapipe_cc_proto_library(
- name = "kinematic_path_solver_cc_proto",
- srcs = ["kinematic_path_solver.proto"],
visibility = [
"//mediapipe/examples:__subpackages__",
],
- deps = [":kinematic_path_solver_proto"],
)
-proto_library(
+mediapipe_proto_library(
name = "focus_point_proto",
srcs = ["focus_point.proto"],
-)
-
-mediapipe_cc_proto_library(
- name = "focus_point_cc_proto",
- srcs = ["focus_point.proto"],
visibility = ["//mediapipe/examples:__subpackages__"],
- deps = [":focus_point_proto"],
)
cc_library(
@@ -333,16 +310,10 @@ cc_test(
],
)
-proto_library(
+mediapipe_proto_library(
name = "visual_scorer_proto",
srcs = ["visual_scorer.proto"],
-)
-
-mediapipe_cc_proto_library(
- name = "visual_scorer_cc_proto",
- srcs = ["visual_scorer.proto"],
visibility = ["//mediapipe/examples:__subpackages__"],
- deps = [":visual_scorer_proto"],
)
cc_library(
diff --git a/mediapipe/examples/desktop/autoflip/quality/scene_camera_motion_analyzer.cc b/mediapipe/examples/desktop/autoflip/quality/scene_camera_motion_analyzer.cc
index 0bfe72548..96fc5f888 100644
--- a/mediapipe/examples/desktop/autoflip/quality/scene_camera_motion_analyzer.cc
+++ b/mediapipe/examples/desktop/autoflip/quality/scene_camera_motion_analyzer.cc
@@ -34,7 +34,7 @@ absl::Status SceneCameraMotionAnalyzer::AnalyzeSceneAndPopulateFocusPointFrames(
const KeyFrameCropOptions& key_frame_crop_options,
const std::vector& key_frame_crop_results,
const int scene_frame_width, const int scene_frame_height,
- const std::vector& scene_frame_timestamps,
+ const std::vector& scene_frame_timestamps,
const bool has_solid_color_background,
SceneKeyFrameCropSummary* scene_summary,
std::vector* focus_point_frames,
@@ -45,7 +45,7 @@ absl::Status SceneCameraMotionAnalyzer::AnalyzeSceneAndPopulateFocusPointFrames(
key_frame_crop_options, key_frame_crop_results, scene_frame_width,
scene_frame_height, scene_summary));
- const int64 scene_span_ms =
+ const int64_t scene_span_ms =
scene_frame_timestamps.empty()
? 0
: scene_frame_timestamps.back() - scene_frame_timestamps.front();
@@ -103,7 +103,7 @@ absl::Status SceneCameraMotionAnalyzer::ToUseSweepingMotion(
absl::Status SceneCameraMotionAnalyzer::DecideCameraMotionType(
const KeyFrameCropOptions& key_frame_crop_options,
- const double scene_span_sec, const int64 end_time_us,
+ const double scene_span_sec, const int64_t end_time_us,
SceneKeyFrameCropSummary* scene_summary,
SceneCameraMotion* scene_camera_motion) const {
RET_CHECK_GE(scene_span_sec, 0.0) << "Scene time span is negative.";
@@ -298,7 +298,7 @@ absl::Status SceneCameraMotionAnalyzer::AddFocusPointsFromCenterTypeAndWeight(
absl::Status SceneCameraMotionAnalyzer::PopulateFocusPointFrames(
const SceneKeyFrameCropSummary& scene_summary,
const SceneCameraMotion& scene_camera_motion,
- const std::vector& scene_frame_timestamps,
+ const std::vector& scene_frame_timestamps,
std::vector* focus_point_frames) const {
RET_CHECK_NE(focus_point_frames, nullptr)
<< "Output vector of FocusPointFrame is null.";
@@ -380,7 +380,7 @@ absl::Status SceneCameraMotionAnalyzer::PopulateFocusPointFrames(
absl::Status SceneCameraMotionAnalyzer::PopulateFocusPointFramesForTracking(
const SceneKeyFrameCropSummary& scene_summary,
const FocusPointFrameType focus_point_frame_type,
- const std::vector& scene_frame_timestamps,
+ const std::vector& scene_frame_timestamps,
std::vector* focus_point_frames) const {
RET_CHECK_GE(scene_summary.key_frame_max_score(), 0.0)
<< "Maximum score is negative.";
@@ -392,7 +392,7 @@ absl::Status SceneCameraMotionAnalyzer::PopulateFocusPointFramesForTracking(
const int scene_frame_height = scene_summary.scene_frame_height();
PiecewiseLinearFunction center_x_function, center_y_function, score_function;
- const int64 timestamp_offset = key_frame_compact_infos[0].timestamp_ms();
+ const int64_t timestamp_offset = key_frame_compact_infos[0].timestamp_ms();
for (int i = 0; i < num_key_frames; ++i) {
const float center_x = key_frame_compact_infos[i].center_x();
const float center_y = key_frame_compact_infos[i].center_y();
diff --git a/mediapipe/framework/api2/builder.h b/mediapipe/framework/api2/builder.h
index da09acc83..ee9796e49 100644
--- a/mediapipe/framework/api2/builder.h
+++ b/mediapipe/framework/api2/builder.h
@@ -425,7 +425,10 @@ using GenericNode = Node;
template
class Node : public NodeBase {
public:
- Node() : NodeBase(std::string(Calc::kCalculatorName)) {}
+ Node()
+ : NodeBase(
+ FunctionRegistry::GetLookupName(Calc::kCalculatorName)) {}
+
// Overrides the built-in calculator type string with the provided argument.
// Can be used to create nodes from pure interfaces.
// TODO: only use this for pure interfaces
@@ -546,6 +549,7 @@ class Graph {
// Creates a node of a specific type. Should be used for pure interfaces,
// which do not have a built-in type string.
+ // `type` is a calculator type-name with dot-separated namespaces.
template
Node& AddNode(absl::string_view type) {
auto node =
@@ -557,6 +561,7 @@ class Graph {
// Creates a generic node, with no compile-time checking of inputs and
// outputs. This can be used for calculators whose contract is not visible.
+ // `type` is a calculator type-name with dot-separated namespaces.
GenericNode& AddNode(absl::string_view type) {
auto node =
std::make_unique(std::string(type.data(), type.size()));
diff --git a/mediapipe/framework/calculator_graph.cc b/mediapipe/framework/calculator_graph.cc
index b49930b7a..06a57fa6d 100644
--- a/mediapipe/framework/calculator_graph.cc
+++ b/mediapipe/framework/calculator_graph.cc
@@ -192,8 +192,7 @@ absl::Status CalculatorGraph::InitializeStreams() {
auto input_tag_map,
tool::TagMap::Create(validated_graph_->Config().input_stream()));
for (const auto& stream_name : input_tag_map->Names()) {
- RET_CHECK(!mediapipe::ContainsKey(graph_input_streams_, stream_name))
- .SetNoLogging()
+ RET_CHECK(!graph_input_streams_.contains(stream_name)).SetNoLogging()
<< "CalculatorGraph Initialization failed, graph input stream \""
<< stream_name << "\" was specified twice.";
int output_stream_index = validated_graph_->OutputStreamIndex(stream_name);
diff --git a/mediapipe/framework/calculator_graph_bounds_test.cc b/mediapipe/framework/calculator_graph_bounds_test.cc
index d149337cc..81ce9902c 100644
--- a/mediapipe/framework/calculator_graph_bounds_test.cc
+++ b/mediapipe/framework/calculator_graph_bounds_test.cc
@@ -679,7 +679,7 @@ REGISTER_CALCULATOR(BoundToPacketCalculator);
// A Calculator that produces packets at timestamps beyond the input timestamp.
class FuturePacketCalculator : public CalculatorBase {
public:
- static constexpr int64 kOutputFutureMicros = 3;
+ static constexpr int64_t kOutputFutureMicros = 3;
static absl::Status GetContract(CalculatorContract* cc) {
cc->Inputs().Index(0).Set();
diff --git a/mediapipe/framework/calculator_graph_side_packet_test.cc b/mediapipe/framework/calculator_graph_side_packet_test.cc
index 57fcff866..a9567c805 100644
--- a/mediapipe/framework/calculator_graph_side_packet_test.cc
+++ b/mediapipe/framework/calculator_graph_side_packet_test.cc
@@ -188,21 +188,21 @@ class Uint64PacketGenerator : public PacketGenerator {
static absl::Status FillExpectations(
const PacketGeneratorOptions& extendable_options,
PacketTypeSet* input_side_packets, PacketTypeSet* output_side_packets) {
- output_side_packets->Index(0).Set();
+ output_side_packets->Index(0).Set();
return absl::OkStatus();
}
static absl::Status Generate(const PacketGeneratorOptions& extendable_options,
const PacketSet& input_side_packets,
PacketSet* output_side_packets) {
- output_side_packets->Index(0) = Adopt(new uint64(15LL << 32 | 5));
+ output_side_packets->Index(0) = Adopt(new uint64_t(15LL << 32 | 5));
return absl::OkStatus();
}
};
REGISTER_PACKET_GENERATOR(Uint64PacketGenerator);
TEST(CalculatorGraph, OutputSidePacketInProcess) {
- const int64 offset = 100;
+ const int64_t offset = 100;
CalculatorGraphConfig config =
mediapipe::ParseTextProtoOrDie(R"pb(
input_stream: "offset"
@@ -400,7 +400,7 @@ TEST(CalculatorGraph, SharePacketGeneratorGraph) {
}
TEST(CalculatorGraph, OutputSidePacketAlreadySet) {
- const int64 offset = 100;
+ const int64_t offset = 100;
CalculatorGraphConfig config =
mediapipe::ParseTextProtoOrDie(R"pb(
input_stream: "offset"
@@ -427,7 +427,7 @@ TEST(CalculatorGraph, OutputSidePacketAlreadySet) {
}
TEST(CalculatorGraph, OutputSidePacketWithTimestamp) {
- const int64 offset = 100;
+ const int64_t offset = 100;
CalculatorGraphConfig config =
mediapipe::ParseTextProtoOrDie(R"pb(
input_stream: "offset"
@@ -716,7 +716,7 @@ TEST(CalculatorGraph, GetOutputSidePacket) {
// Run the graph twice.
int max_count = 100;
std::map extra_side_packets;
- extra_side_packets.insert({"input_uint64", MakePacket(1123)});
+ extra_side_packets.insert({"input_uint64", MakePacket(1123)});
for (int run = 0; run < 1; ++run) {
MP_ASSERT_OK(graph.StartRun(extra_side_packets));
status_or_packet = graph.GetOutputSidePacket("output_uint32_pair");
diff --git a/mediapipe/framework/calculator_graph_test.cc b/mediapipe/framework/calculator_graph_test.cc
index 6ca206ab1..2e7d99ef6 100644
--- a/mediapipe/framework/calculator_graph_test.cc
+++ b/mediapipe/framework/calculator_graph_test.cc
@@ -439,7 +439,7 @@ class GlobalCountSourceCalculator : public CalculatorBase {
++local_count_;
}
- int64 local_count_ = 0;
+ int64_t local_count_ = 0;
};
const int GlobalCountSourceCalculator::kNumOutputPackets = 5;
REGISTER_CALCULATOR(GlobalCountSourceCalculator);
@@ -765,7 +765,7 @@ class TypedStatusHandler : public StatusHandler {
}
};
typedef TypedStatusHandler StringStatusHandler;
-typedef TypedStatusHandler Uint32StatusHandler;
+typedef TypedStatusHandler Uint32StatusHandler;
REGISTER_STATUS_HANDLER(StringStatusHandler);
REGISTER_STATUS_HANDLER(Uint32StatusHandler);
@@ -1398,9 +1398,9 @@ void RunComprehensiveTest(CalculatorGraph* graph,
MP_ASSERT_OK(graph->Initialize(proto));
std::map extra_side_packets;
- extra_side_packets.emplace("node_3", Adopt(new uint64((15LL << 32) | 3)));
+ extra_side_packets.emplace("node_3", Adopt(new uint64_t((15LL << 32) | 3)));
if (define_node_5) {
- extra_side_packets.emplace("node_5", Adopt(new uint64((15LL << 32) | 5)));
+ extra_side_packets.emplace("node_5", Adopt(new uint64_t((15LL << 32) | 5)));
}
// Call graph->Run() several times, to make sure that the appropriate
@@ -1452,9 +1452,9 @@ void RunComprehensiveTest(CalculatorGraph* graph,
// Verify that the graph can still run (but not successfully) when
// one of the nodes is caused to fail.
extra_side_packets.clear();
- extra_side_packets.emplace("node_3", Adopt(new uint64((15LL << 32) | 0)));
+ extra_side_packets.emplace("node_3", Adopt(new uint64_t((15LL << 32) | 0)));
if (define_node_5) {
- extra_side_packets.emplace("node_5", Adopt(new uint64((15LL << 32) | 5)));
+ extra_side_packets.emplace("node_5", Adopt(new uint64_t((15LL << 32) | 5)));
}
dumped_final_sum_packet = Packet();
dumped_final_stddev_packet = Packet();
@@ -1579,14 +1579,14 @@ class Uint64PacketGenerator : public PacketGenerator {
static absl::Status FillExpectations(
const PacketGeneratorOptions& extendable_options,
PacketTypeSet* input_side_packets, PacketTypeSet* output_side_packets) {
- output_side_packets->Index(0).Set();
+ output_side_packets->Index(0).Set();
return absl::OkStatus();
}
static absl::Status Generate(const PacketGeneratorOptions& extendable_options,
const PacketSet& input_side_packets,
PacketSet* output_side_packets) {
- output_side_packets->Index(0) = Adopt(new uint64(15LL << 32 | 5));
+ output_side_packets->Index(0) = Adopt(new uint64_t(15LL << 32 | 5));
return absl::OkStatus();
}
};
@@ -1759,7 +1759,7 @@ TEST(CalculatorGraph, StatusHandlerInputVerification) {
)pb");
MP_ASSERT_OK(graph->Initialize(config));
Packet extra_string = Adopt(new std::string("foo"));
- Packet a_uint64 = Adopt(new uint64(0));
+ Packet a_uint64 = Adopt(new uint64_t(0));
MP_EXPECT_OK(
graph->Run({{"extra_string", extra_string}, {"a_uint64", a_uint64}}));
@@ -1789,7 +1789,7 @@ TEST(CalculatorGraph, StatusHandlerInputVerification) {
testing::HasSubstr("string"),
// Expected type.
testing::HasSubstr(
- MediaPipeTypeStringOrDemangled())));
+ MediaPipeTypeStringOrDemangled())));
// Should fail verification when the type of a to-be-generated packet is
// wrong. The added handler now expects a string but will receive the uint32
@@ -1802,14 +1802,14 @@ TEST(CalculatorGraph, StatusHandlerInputVerification) {
status = graph->Initialize(config);
EXPECT_THAT(status.message(),
- testing::AllOf(
- testing::HasSubstr("StringStatusHandler"),
- // The problematic input side packet.
- testing::HasSubstr("generated_by_generator"),
- // Actual type.
- testing::HasSubstr(MediaPipeTypeStringOrDemangled()),
- // Expected type.
- testing::HasSubstr("string")));
+ testing::AllOf(testing::HasSubstr("StringStatusHandler"),
+ // The problematic input side packet.
+ testing::HasSubstr("generated_by_generator"),
+ // Actual type.
+ testing::HasSubstr(
+ MediaPipeTypeStringOrDemangled()),
+ // Expected type.
+ testing::HasSubstr("string")));
}
TEST(CalculatorGraph, GenerateInInitialize) {
diff --git a/mediapipe/framework/calculator_runner.cc b/mediapipe/framework/calculator_runner.cc
index 833797483..1bd3211ed 100644
--- a/mediapipe/framework/calculator_runner.cc
+++ b/mediapipe/framework/calculator_runner.cc
@@ -216,7 +216,7 @@ mediapipe::Counter* CalculatorRunner::GetCounter(const std::string& name) {
return graph_->GetCounterFactory()->GetCounter(name);
}
-std::map CalculatorRunner::GetCountersValues() {
+std::map CalculatorRunner::GetCountersValues() {
return graph_->GetCounterFactory()->GetCounterSet()->GetCountersValues();
}
diff --git a/mediapipe/framework/counter_factory.cc b/mediapipe/framework/counter_factory.cc
index 94a6a4213..895b44ea6 100644
--- a/mediapipe/framework/counter_factory.cc
+++ b/mediapipe/framework/counter_factory.cc
@@ -39,14 +39,14 @@ class BasicCounter : public Counter {
value_ += amount;
}
- int64 Get() ABSL_LOCKS_EXCLUDED(mu_) override {
+ int64_t Get() ABSL_LOCKS_EXCLUDED(mu_) override {
absl::ReaderMutexLock lock(&mu_);
return value_;
}
private:
absl::Mutex mu_;
- int64 value_ ABSL_GUARDED_BY(mu_);
+ int64_t value_ ABSL_GUARDED_BY(mu_);
};
} // namespace
@@ -73,10 +73,10 @@ Counter* CounterSet::Get(const std::string& name) ABSL_LOCKS_EXCLUDED(mu_) {
return counters_[name].get();
}
-std::map CounterSet::GetCountersValues()
+std::map CounterSet::GetCountersValues()
ABSL_LOCKS_EXCLUDED(mu_) {
absl::ReaderMutexLock lock(&mu_);
- std::map result;
+ std::map result;
for (const auto& it : counters_) {
result[it.first] = it.second->Get();
}
diff --git a/mediapipe/framework/deps/mathutil_unittest.cc b/mediapipe/framework/deps/mathutil_unittest.cc
index 7468e927a..b25b73306 100644
--- a/mediapipe/framework/deps/mathutil_unittest.cc
+++ b/mediapipe/framework/deps/mathutil_unittest.cc
@@ -75,17 +75,17 @@ BENCHMARK(BM_IntCast);
static void BM_Int64Cast(benchmark::State& state) {
double x = 0.1;
- int64 sum = 0;
+ int64_t sum = 0;
for (auto _ : state) {
- sum += static_cast(x);
+ sum += static_cast(x);
x += 0.1;
- sum += static_cast(x);
+ sum += static_cast(x);
x += 0.1;
- sum += static_cast(x);
+ sum += static_cast(x);
x += 0.1;
- sum += static_cast(x);
+ sum += static_cast(x);
x += 0.1;
- sum += static_cast(x);
+ sum += static_cast(x);
x += 0.1;
}
EXPECT_NE(sum, 0); // Don't let 'sum' get optimized away.
@@ -134,15 +134,15 @@ static void BM_Int64Round(benchmark::State& state) {
double x = 0.1;
int sum = 0;
for (auto _ : state) {
- sum += mediapipe::MathUtil::Round(x);
+ sum += mediapipe::MathUtil::Round(x);
x += 0.1;
- sum += mediapipe::MathUtil::Round(x);
+ sum += mediapipe::MathUtil::Round(x);
x += 0.1;
- sum += mediapipe::MathUtil::Round(x);
+ sum += mediapipe::MathUtil::Round(x);
x += 0.1;
- sum += mediapipe::MathUtil::Round(x);
+ sum += mediapipe::MathUtil::Round(x);
x += 0.1;
- sum += mediapipe::MathUtil::Round(x);
+ sum += mediapipe::MathUtil::Round(x);
x += 0.1;
}
EXPECT_NE(sum, 0); // Don't let 'sum' get optimized away.
@@ -153,15 +153,15 @@ static void BM_UintRound(benchmark::State& state) {
double x = 0.1;
int sum = 0;
for (auto _ : state) {
- sum += mediapipe::MathUtil::Round(x);
+ sum += mediapipe::MathUtil::Round(x);
x += 0.1;
- sum += mediapipe::MathUtil::Round(x);
+ sum += mediapipe::MathUtil::Round(x);
x += 0.1;
- sum += mediapipe::MathUtil::Round(x);
+ sum += mediapipe::MathUtil::Round(x);
x += 0.1;
- sum += mediapipe::MathUtil::Round(x);
+ sum += mediapipe::MathUtil::Round(x);
x += 0.1;
- sum += mediapipe::MathUtil::Round(x);
+ sum += mediapipe::MathUtil::Round(x);
x += 0.1;
}
EXPECT_NE(sum, 0); // Don't let 'sum' get optimized away.
@@ -191,15 +191,15 @@ static void BM_SafeInt64Cast(benchmark::State& state) {
double x = 0.1;
int sum = 0;
for (auto _ : state) {
- sum += mediapipe::MathUtil::SafeCast(x);
+ sum += mediapipe::MathUtil::SafeCast(x);
x += 0.1;
- sum += mediapipe::MathUtil::SafeCast(x);
+ sum += mediapipe::MathUtil::SafeCast(x);
x += 0.1;
- sum += mediapipe::MathUtil::SafeCast(x);
+ sum += mediapipe::MathUtil::SafeCast(x);
x += 0.1;
- sum += mediapipe::MathUtil::SafeCast(x);
+ sum += mediapipe::MathUtil::SafeCast(x);
x += 0.1;
- sum += mediapipe::MathUtil::SafeCast(x);
+ sum += mediapipe::MathUtil::SafeCast(x);
x += 0.1;
}
EXPECT_NE(sum, 0); // Don't let 'sum' get optimized away.
@@ -229,15 +229,15 @@ static void BM_SafeInt64Round(benchmark::State& state) {
double x = 0.1;
int sum = 0;
for (auto _ : state) {
- sum += mediapipe::MathUtil::SafeRound(x);
+ sum += mediapipe::MathUtil::SafeRound(x);
x += 0.1;
- sum += mediapipe::MathUtil::SafeRound(x);
+ sum += mediapipe::MathUtil::SafeRound(x);
x += 0.1;
- sum += mediapipe::MathUtil::SafeRound(x);
+ sum += mediapipe::MathUtil::SafeRound(x);
x += 0.1;
- sum += mediapipe::MathUtil::SafeRound(x);
+ sum += mediapipe::MathUtil::SafeRound(x);
x += 0.1;
- sum += mediapipe::MathUtil::SafeRound(x);
+ sum += mediapipe::MathUtil::SafeRound(x);
x += 0.1;
}
EXPECT_NE(sum, 0); // Don't let 'sum' get optimized away.
@@ -262,8 +262,8 @@ TEST(MathUtil, IntRound) {
// A double-precision number has a 53-bit mantissa (52 fraction bits),
// so the following value can be represented exactly.
- int64 value64 = static_cast(0x1234567890abcd00);
- EXPECT_EQ(mediapipe::MathUtil::Round(static_cast(value64)),
+ int64_t value64 = static_cast(0x1234567890abcd00);
+ EXPECT_EQ(mediapipe::MathUtil::Round(static_cast(value64)),
value64);
}
@@ -369,7 +369,7 @@ class SafeCastTester {
if (sizeof(FloatIn) >= 64) {
// A double-precision number has a 53-bit mantissa (52 fraction bits),
// so the following value can be represented exactly by a double.
- int64 value64 = static_cast(0x1234567890abcd00);
+ int64_t value64 = static_cast(0x1234567890abcd00);
const IntOut expected =
(sizeof(IntOut) >= 64) ? static_cast(value64) : imax;
EXPECT_EQ(
@@ -536,22 +536,22 @@ class SafeCastTester {
};
TEST(MathUtil, SafeCast) {
- SafeCastTester::Run();
- SafeCastTester::Run();
- SafeCastTester::Run();
- SafeCastTester::Run();
- SafeCastTester::Run();
- SafeCastTester::Run();
- SafeCastTester::Run();
- SafeCastTester::Run();
- SafeCastTester::Run();
- SafeCastTester::Run();
- SafeCastTester::Run();
- SafeCastTester::Run();
- SafeCastTester::Run();
- SafeCastTester::Run();
- SafeCastTester::Run();
- SafeCastTester::Run();
+ SafeCastTester::Run();
+ SafeCastTester::Run();
+ SafeCastTester::Run();
+ SafeCastTester::Run();
+ SafeCastTester::Run();
+ SafeCastTester::Run();
+ SafeCastTester::Run();
+ SafeCastTester::Run();
+ SafeCastTester::Run();
+ SafeCastTester::Run();
+ SafeCastTester::Run();
+ SafeCastTester::Run();
+ SafeCastTester::Run();
+ SafeCastTester::Run();
+ SafeCastTester::Run();
+ SafeCastTester::Run();
// Spot-check SafeCast
EXPECT_EQ(mediapipe::MathUtil::SafeCast(static_cast(12345.678)),
@@ -682,7 +682,7 @@ class SafeRoundTester {
if (sizeof(FloatIn) >= 64) {
// A double-precision number has a 53-bit mantissa (52 fraction bits),
// so the following value can be represented exactly by a double.
- int64 value64 = static_cast(0x1234567890abcd00);
+ int64_t value64 = static_cast(0x1234567890abcd00);
const IntOut expected =
(sizeof(IntOut) >= 64) ? static_cast(value64) : imax;
EXPECT_EQ(
@@ -843,22 +843,22 @@ class SafeRoundTester {
};
TEST(MathUtil, SafeRound) {
- SafeRoundTester::Run();
- SafeRoundTester::Run();
- SafeRoundTester::Run();
- SafeRoundTester::Run();
- SafeRoundTester::Run();
- SafeRoundTester::Run();
- SafeRoundTester::Run();
- SafeRoundTester::Run();
- SafeRoundTester::Run();
- SafeRoundTester::Run();
- SafeRoundTester::Run();
- SafeRoundTester::Run();
- SafeRoundTester::Run();
- SafeRoundTester::Run();
- SafeRoundTester::Run();
- SafeRoundTester::Run();
+ SafeRoundTester::Run();
+ SafeRoundTester::Run();
+ SafeRoundTester::Run();
+ SafeRoundTester::Run();
+ SafeRoundTester::Run();
+ SafeRoundTester::Run();
+ SafeRoundTester::Run();
+ SafeRoundTester::Run();
+ SafeRoundTester::Run();
+ SafeRoundTester::Run();
+ SafeRoundTester::Run();
+ SafeRoundTester::Run();
+ SafeRoundTester::Run();
+ SafeRoundTester::Run();
+ SafeRoundTester::Run();
+ SafeRoundTester::Run();
// Spot-check SafeRound