Merged master

This commit is contained in:
Prianka Liz Kariat 2023-04-06 13:02:26 +05:30
commit 768824f850
249 changed files with 4744 additions and 1993 deletions

View File

@ -1 +1 @@
5.2.0 6.1.1

View File

@ -61,7 +61,7 @@ RUN pip3 install tf_slim
RUN ln -s /usr/bin/python3 /usr/bin/python RUN ln -s /usr/bin/python3 /usr/bin/python
# Install bazel # Install bazel
ARG BAZEL_VERSION=5.2.0 ARG BAZEL_VERSION=6.1.1
RUN mkdir /bazel && \ RUN mkdir /bazel && \
wget --no-check-certificate -O /bazel/installer.sh "https://github.com/bazelbuild/bazel/releases/download/${BAZEL_VERSION}/b\ wget --no-check-certificate -O /bazel/installer.sh "https://github.com/bazelbuild/bazel/releases/download/${BAZEL_VERSION}/b\
azel-${BAZEL_VERSION}-installer-linux-x86_64.sh" && \ azel-${BAZEL_VERSION}-installer-linux-x86_64.sh" && \

View File

@ -6,6 +6,20 @@ nav_order: 1
![MediaPipe](https://mediapipe.dev/images/mediapipe_small.png) ![MediaPipe](https://mediapipe.dev/images/mediapipe_small.png)
----
**Attention:** *Thanks for your interest in MediaPipe! We have moved to
[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
as the primary developer documentation site for MediaPipe as of April 3, 2023.*
*This notice and web page will be removed on June 1, 2023.*
----
<br><br><br><br><br><br><br><br><br><br>
<br><br><br><br><br><br><br><br><br><br>
<br><br><br><br><br><br><br><br><br><br>
-------------------------------------------------------------------------------- --------------------------------------------------------------------------------
## Live ML anywhere ## Live ML anywhere
@ -21,15 +35,6 @@ ML solutions for live and streaming media.
---- ----
**Attention:** *Thanks for your interest in MediaPipe! We are moving to
[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
as the primary developer documentation
site for MediaPipe starting April 3, 2023.*
*This notice and web page will be removed on April 3, 2023.*
----
## ML solutions in MediaPipe ## ML solutions in MediaPipe
Face Detection | Face Mesh | Iris | Hands | Pose | Holistic Face Detection | Face Mesh | Iris | Hands | Pose | Holistic

140
WORKSPACE
View File

@ -54,6 +54,76 @@ load("@rules_foreign_cc//:workspace_definitions.bzl", "rules_foreign_cc_dependen
rules_foreign_cc_dependencies() rules_foreign_cc_dependencies()
http_archive(
name = "com_google_protobuf",
sha256 = "87407cd28e7a9c95d9f61a098a53cf031109d451a7763e7dd1253abf8b4df422",
strip_prefix = "protobuf-3.19.1",
urls = ["https://github.com/protocolbuffers/protobuf/archive/v3.19.1.tar.gz"],
patches = [
"@//third_party:com_google_protobuf_fixes.diff"
],
patch_args = [
"-p1",
],
)
# Load Zlib before initializing TensorFlow and the iOS build rules to guarantee
# that the target @zlib//:mini_zlib is available
http_archive(
name = "zlib",
build_file = "@//third_party:zlib.BUILD",
sha256 = "c3e5e9fdd5004dcb542feda5ee4f0ff0744628baf8ed2dd5d66f8ca1197cb1a1",
strip_prefix = "zlib-1.2.11",
urls = [
"http://mirror.bazel.build/zlib.net/fossils/zlib-1.2.11.tar.gz",
"http://zlib.net/fossils/zlib-1.2.11.tar.gz", # 2017-01-15
],
patches = [
"@//third_party:zlib.diff",
],
patch_args = [
"-p1",
],
)
# iOS basic build deps.
http_archive(
name = "build_bazel_rules_apple",
sha256 = "3e2c7ae0ddd181c4053b6491dad1d01ae29011bc322ca87eea45957c76d3a0c3",
url = "https://github.com/bazelbuild/rules_apple/releases/download/2.1.0/rules_apple.2.1.0.tar.gz",
patches = [
# Bypass checking ios unit test runner when building MP ios applications.
"@//third_party:build_bazel_rules_apple_bypass_test_runner_check.diff"
],
patch_args = [
"-p1",
],
)
load(
"@build_bazel_rules_apple//apple:repositories.bzl",
"apple_rules_dependencies",
)
apple_rules_dependencies()
load(
"@build_bazel_rules_swift//swift:repositories.bzl",
"swift_rules_dependencies",
)
swift_rules_dependencies()
load(
"@build_bazel_rules_swift//swift:extras.bzl",
"swift_rules_extra_dependencies",
)
swift_rules_extra_dependencies()
load(
"@build_bazel_apple_support//lib:repositories.bzl",
"apple_support_dependencies",
)
apple_support_dependencies()
# This is used to select all contents of the archives for CMake-based packages to give CMake access to them. # This is used to select all contents of the archives for CMake-based packages to give CMake access to them.
all_content = """filegroup(name = "all", srcs = glob(["**"]), visibility = ["//visibility:public"])""" all_content = """filegroup(name = "all", srcs = glob(["**"]), visibility = ["//visibility:public"])"""
@ -133,19 +203,6 @@ http_archive(
urls = ["https://github.com/protocolbuffers/protobuf/archive/v3.19.1.tar.gz"], urls = ["https://github.com/protocolbuffers/protobuf/archive/v3.19.1.tar.gz"],
) )
http_archive(
name = "com_google_protobuf",
sha256 = "87407cd28e7a9c95d9f61a098a53cf031109d451a7763e7dd1253abf8b4df422",
strip_prefix = "protobuf-3.19.1",
urls = ["https://github.com/protocolbuffers/protobuf/archive/v3.19.1.tar.gz"],
patches = [
"@//third_party:com_google_protobuf_fixes.diff"
],
patch_args = [
"-p1",
],
)
load("@//third_party/flatbuffers:workspace.bzl", flatbuffers = "repo") load("@//third_party/flatbuffers:workspace.bzl", flatbuffers = "repo")
flatbuffers() flatbuffers()
@ -319,63 +376,6 @@ http_archive(
], ],
) )
# Load Zlib before initializing TensorFlow and the iOS build rules to guarantee
# that the target @zlib//:mini_zlib is available
http_archive(
name = "zlib",
build_file = "@//third_party:zlib.BUILD",
sha256 = "c3e5e9fdd5004dcb542feda5ee4f0ff0744628baf8ed2dd5d66f8ca1197cb1a1",
strip_prefix = "zlib-1.2.11",
urls = [
"http://mirror.bazel.build/zlib.net/fossils/zlib-1.2.11.tar.gz",
"http://zlib.net/fossils/zlib-1.2.11.tar.gz", # 2017-01-15
],
patches = [
"@//third_party:zlib.diff",
],
patch_args = [
"-p1",
],
)
# iOS basic build deps.
http_archive(
name = "build_bazel_rules_apple",
sha256 = "f94e6dddf74739ef5cb30f000e13a2a613f6ebfa5e63588305a71fce8a8a9911",
url = "https://github.com/bazelbuild/rules_apple/releases/download/1.1.3/rules_apple.1.1.3.tar.gz",
patches = [
# Bypass checking ios unit test runner when building MP ios applications.
"@//third_party:build_bazel_rules_apple_bypass_test_runner_check.diff"
],
patch_args = [
"-p1",
],
)
load(
"@build_bazel_rules_apple//apple:repositories.bzl",
"apple_rules_dependencies",
)
apple_rules_dependencies()
load(
"@build_bazel_rules_swift//swift:repositories.bzl",
"swift_rules_dependencies",
)
swift_rules_dependencies()
load(
"@build_bazel_rules_swift//swift:extras.bzl",
"swift_rules_extra_dependencies",
)
swift_rules_extra_dependencies()
load(
"@build_bazel_apple_support//lib:repositories.bzl",
"apple_support_dependencies",
)
apple_support_dependencies()
# More iOS deps. # More iOS deps.
http_archive( http_archive(

View File

@ -1,5 +1,6 @@
--- ---
layout: default layout: forward
target: https://developers.google.com/mediapipe/framework/framework_concepts/graphs_cpp
title: Building Graphs in C++ title: Building Graphs in C++
parent: Graphs parent: Graphs
nav_order: 1 nav_order: 1
@ -12,6 +13,12 @@ nav_order: 1
{:toc} {:toc}
--- ---
**Attention:** *Thanks for your interest in MediaPipe! We have moved to
[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
as the primary developer documentation site for MediaPipe as of April 3, 2023.*
----
C++ graph builder is a powerful tool for: C++ graph builder is a powerful tool for:
* Building complex graphs * Building complex graphs

View File

@ -13,6 +13,12 @@ nav_order: 1
{:toc} {:toc}
--- ---
**Attention:** *Thanks for your interest in MediaPipe! We have moved to
[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
as the primary developer documentation site for MediaPipe as of April 3, 2023.*
----
Each calculator is a node of a graph. We describe how to create a new Each calculator is a node of a graph. We describe how to create a new
calculator, how to initialize a calculator, how to perform its calculations, calculator, how to initialize a calculator, how to perform its calculations,
input and output streams, timestamps, and options. Each node in the graph is input and output streams, timestamps, and options. Each node in the graph is

View File

@ -14,6 +14,12 @@ has_toc: false
{:toc} {:toc}
--- ---
**Attention:** *Thanks for your interest in MediaPipe! We have moved to
[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
as the primary developer documentation site for MediaPipe as of April 3, 2023.*
----
## The basics ## The basics
### Packet ### Packet

View File

@ -13,6 +13,12 @@ nav_order: 5
{:toc} {:toc}
--- ---
**Attention:** *Thanks for your interest in MediaPipe! We have moved to
[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
as the primary developer documentation site for MediaPipe as of April 3, 2023.*
----
## Overview ## Overview
MediaPipe supports calculator nodes for GPU compute and rendering, and allows combining multiple GPU nodes, as well as mixing them with CPU based calculator nodes. There exist several GPU APIs on mobile platforms (eg, OpenGL ES, Metal and Vulkan). MediaPipe does not attempt to offer a single cross-API GPU abstraction. Individual nodes can be written using different APIs, allowing them to take advantage of platform specific features when needed. MediaPipe supports calculator nodes for GPU compute and rendering, and allows combining multiple GPU nodes, as well as mixing them with CPU based calculator nodes. There exist several GPU APIs on mobile platforms (eg, OpenGL ES, Metal and Vulkan). MediaPipe does not attempt to offer a single cross-API GPU abstraction. Individual nodes can be written using different APIs, allowing them to take advantage of platform specific features when needed.

View File

@ -13,6 +13,12 @@ nav_order: 2
{:toc} {:toc}
--- ---
**Attention:** *Thanks for your interest in MediaPipe! We have moved to
[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
as the primary developer documentation site for MediaPipe as of April 3, 2023.*
----
## Graph ## Graph
A `CalculatorGraphConfig` proto specifies the topology and functionality of a A `CalculatorGraphConfig` proto specifies the topology and functionality of a

View File

@ -13,6 +13,12 @@ nav_order: 3
{:toc} {:toc}
--- ---
**Attention:** *Thanks for your interest in MediaPipe! We have moved to
[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
as the primary developer documentation site for MediaPipe as of April 3, 2023.*
----
Calculators communicate by sending and receiving packets. Typically a single Calculators communicate by sending and receiving packets. Typically a single
packet is sent along each input stream at each input timestamp. A packet can packet is sent along each input stream at each input timestamp. A packet can
contain any kind of data, such as a single frame of video or a single integer contain any kind of data, such as a single frame of video or a single integer

View File

@ -13,6 +13,12 @@ nav_order: 6
{:toc} {:toc}
--- ---
**Attention:** *Thanks for your interest in MediaPipe! We have moved to
[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
as the primary developer documentation site for MediaPipe as of April 3, 2023.*
----
## Real-time timestamps ## Real-time timestamps
MediaPipe calculator graphs are often used to process streams of video or audio MediaPipe calculator graphs are often used to process streams of video or audio

View File

@ -13,6 +13,12 @@ nav_order: 4
{:toc} {:toc}
--- ---
**Attention:** *Thanks for your interest in MediaPipe! We have moved to
[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
as the primary developer documentation site for MediaPipe as of April 3, 2023.*
----
## Scheduling mechanics ## Scheduling mechanics
Data processing in a MediaPipe graph occurs inside processing nodes defined as Data processing in a MediaPipe graph occurs inside processing nodes defined as

View File

@ -15,6 +15,12 @@ nav_order: 1
{:toc} {:toc}
--- ---
**Attention:** *Thanks for your interest in MediaPipe! We have moved to
[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
as the primary developer documentation site for MediaPipe as of April 3, 2023.*
----
Please follow instructions below to build Android example apps in the supported Please follow instructions below to build Android example apps in the supported
MediaPipe [solutions](../solutions/solutions.md). To learn more about these MediaPipe [solutions](../solutions/solutions.md). To learn more about these
example apps, start from [Hello World! on Android](./hello_world_android.md). example apps, start from [Hello World! on Android](./hello_world_android.md).

View File

@ -14,6 +14,12 @@ nav_order: 3
{:toc} {:toc}
--- ---
**Attention:** *Thanks for your interest in MediaPipe! We have moved to
[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
as the primary developer documentation site for MediaPipe as of April 3, 2023.*
----
***Experimental Only*** ***Experimental Only***
The MediaPipe Android Archive (AAR) library is a convenient way to use MediaPipe The MediaPipe Android Archive (AAR) library is a convenient way to use MediaPipe

View File

@ -1,5 +1,6 @@
--- ---
layout: default layout: forward
target: https://developers.google.com/mediapipe/
title: MediaPipe Android Solutions title: MediaPipe Android Solutions
parent: MediaPipe on Android parent: MediaPipe on Android
grand_parent: Getting Started grand_parent: Getting Started
@ -13,14 +14,9 @@ nav_order: 2
{:toc} {:toc}
--- ---
**Attention:** *Thanks for your interest in MediaPipe! We are moving to **Attention:** *Thanks for your interest in MediaPipe! We have moved to
[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe) [https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
as the primary developer documentation as the primary developer documentation site for MediaPipe as of April 3, 2023.*
site for MediaPipe starting April 3, 2023. This content will not be moved to
the new site, but will remain available in the source code repository on an
as-is basis.*
*This notice and web page will be removed on April 3, 2023.*
---- ----

View File

@ -1,5 +1,6 @@
--- ---
layout: default layout: forward
target: https://developers.google.com/mediapipe/
title: Building MediaPipe Examples title: Building MediaPipe Examples
parent: Getting Started parent: Getting Started
nav_exclude: true nav_exclude: true
@ -12,14 +13,9 @@ nav_exclude: true
{:toc} {:toc}
--- ---
**Attention:** *Thanks for your interest in MediaPipe! We are moving to **Attention:** *Thanks for your interest in MediaPipe! We have moved to
[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe) [https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
as the primary developer documentation as the primary developer documentation site for MediaPipe as of April 3, 2023.*
site for MediaPipe starting April 3, 2023. This content will not be moved to
the new site, but will remain available in the source code repository on an
as-is basis.*
*This notice and web page will be removed on April 3, 2023.*
---- ----

View File

@ -15,6 +15,12 @@ nav_order: 5
{:toc} {:toc}
--- ---
**Attention:** *Thanks for your interest in MediaPipe! We have moved to
[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
as the primary developer documentation site for MediaPipe as of April 3, 2023.*
----
Please follow instructions below to build C++ command-line example apps in the Please follow instructions below to build C++ command-line example apps in the
supported MediaPipe [solutions](../solutions/solutions.md). To learn more about supported MediaPipe [solutions](../solutions/solutions.md). To learn more about
these example apps, start from [Hello World! in C++](./hello_world_cpp.md). these example apps, start from [Hello World! in C++](./hello_world_cpp.md).

View File

@ -13,6 +13,12 @@ nav_order: 9
{:toc} {:toc}
--- ---
**Attention:** *Thanks for your interest in MediaPipe! We have moved to
[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
as the primary developer documentation site for MediaPipe as of April 3, 2023.*
----
### How to convert ImageFrames and GpuBuffers ### How to convert ImageFrames and GpuBuffers
The Calculators [`ImageFrameToGpuBufferCalculator`] and The Calculators [`ImageFrameToGpuBufferCalculator`] and

View File

@ -1,5 +1,6 @@
--- ---
layout: default layout: forward
target: https://developers.google.com/mediapipe/
title: Getting Started title: Getting Started
nav_order: 2 nav_order: 2
has_children: true has_children: true
@ -12,13 +13,8 @@ has_children: true
{:toc} {:toc}
--- ---
**Attention:** *Thanks for your interest in MediaPipe! We are moving to **Attention:** *Thanks for your interest in MediaPipe! We have moved to
[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe) [https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
as the primary developer documentation as the primary developer documentation site for MediaPipe as of April 3, 2023.*
site for MediaPipe starting April 3, 2023. This content will not be moved to
the new site, but will remain available in the source code repository on an
as-is basis.*
*This notice and web page will be removed on April 3, 2023.*
---- ----

View File

@ -13,6 +13,12 @@ nav_order: 7
{:toc} {:toc}
--- ---
**Attention:** *Thanks for your interest in MediaPipe! We have moved to
[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
as the primary developer documentation site for MediaPipe as of April 3, 2023.*
----
## OpenGL ES Support ## OpenGL ES Support
MediaPipe supports OpenGL ES up to version 3.2 on Android/Linux and up to ES 3.0 MediaPipe supports OpenGL ES up to version 3.2 on Android/Linux and up to ES 3.0

View File

@ -14,6 +14,12 @@ nav_order: 1
{:toc} {:toc}
--- ---
**Attention:** *Thanks for your interest in MediaPipe! We have moved to
[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
as the primary developer documentation site for MediaPipe as of April 3, 2023.*
----
## Introduction ## Introduction
This codelab uses MediaPipe on an Android device. This codelab uses MediaPipe on an Android device.

View File

@ -14,6 +14,12 @@ nav_order: 1
{:toc} {:toc}
--- ---
**Attention:** *Thanks for your interest in MediaPipe! We have moved to
[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
as the primary developer documentation site for MediaPipe as of April 3, 2023.*
----
1. Ensure you have a working version of MediaPipe. See 1. Ensure you have a working version of MediaPipe. See
[installation instructions](./install.md). [installation instructions](./install.md).

View File

@ -14,6 +14,12 @@ nav_order: 1
{:toc} {:toc}
--- ---
**Attention:** *Thanks for your interest in MediaPipe! We have moved to
[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
as the primary developer documentation site for MediaPipe as of April 3, 2023.*
----
## Introduction ## Introduction
This codelab uses MediaPipe on an iOS device. This codelab uses MediaPipe on an iOS device.

View File

@ -13,6 +13,12 @@ nav_order: 8
{:toc} {:toc}
--- ---
**Attention:** *Thanks for your interest in MediaPipe! We have moved to
[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
as the primary developer documentation site for MediaPipe as of April 3, 2023.*
----
## Technical questions ## Technical questions
For help with technical or algorithmic questions, visit For help with technical or algorithmic questions, visit

View File

@ -13,6 +13,12 @@ nav_order: 6
{:toc} {:toc}
--- ---
**Attention:** *Thanks for your interest in MediaPipe! We have moved to
[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
as the primary developer documentation site for MediaPipe as of April 3, 2023.*
----
Note: To interoperate with OpenCV, OpenCV 3.x to 4.1 are preferred. OpenCV Note: To interoperate with OpenCV, OpenCV 3.x to 4.1 are preferred. OpenCV
2.x currently works but interoperability support may be deprecated in the 2.x currently works but interoperability support may be deprecated in the
future. future.
@ -577,7 +583,7 @@ next section.
Option 1. Follow Option 1. Follow
[the official Bazel documentation](https://docs.bazel.build/versions/master/install-windows.html) [the official Bazel documentation](https://docs.bazel.build/versions/master/install-windows.html)
to install Bazel 5.2.0 or higher. to install Bazel 6.1.1 or higher.
Option 2. Follow the official Option 2. Follow the official
[Bazel documentation](https://docs.bazel.build/versions/master/install-bazelisk.html) [Bazel documentation](https://docs.bazel.build/versions/master/install-bazelisk.html)

View File

@ -15,6 +15,12 @@ nav_order: 2
{:toc} {:toc}
--- ---
**Attention:** *Thanks for your interest in MediaPipe! We have moved to
[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
as the primary developer documentation site for MediaPipe as of April 3, 2023.*
----
Please follow instructions below to build iOS example apps in the supported Please follow instructions below to build iOS example apps in the supported
MediaPipe [solutions](../solutions/solutions.md). To learn more about these MediaPipe [solutions](../solutions/solutions.md). To learn more about these
example apps, start from, start from example apps, start from, start from

View File

@ -1,5 +1,6 @@
--- ---
layout: default layout: forward
target: https://developers.google.com/mediapipe/
title: MediaPipe in JavaScript title: MediaPipe in JavaScript
parent: Getting Started parent: Getting Started
nav_order: 4 nav_order: 4
@ -14,12 +15,7 @@ nav_order: 4
**Attention:** *Thanks for your interest in MediaPipe! We are moving to **Attention:** *Thanks for your interest in MediaPipe! We are moving to
[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe) [https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
as the primary developer documentation as the primary developer documentation site for MediaPipe starting April 3, 2023.*
site for MediaPipe starting April 3, 2023. This content will not be moved to
the new site, but will remain available in the source code repository on an
as-is basis.*
*This notice and web page will be removed on April 3, 2023.*
---- ----

View File

@ -1,5 +1,6 @@
--- ---
layout: default layout: forward
target: https://developers.google.com/mediapipe/
title: MediaPipe in Python title: MediaPipe in Python
parent: Getting Started parent: Getting Started
has_children: true has_children: true
@ -14,6 +15,12 @@ nav_order: 3
{:toc} {:toc}
--- ---
**Attention:** *Thanks for your interest in MediaPipe! We have moved to
[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
as the primary developer documentation site for MediaPipe as of April 3, 2023.*
----
## Ready-to-use Python Solutions ## Ready-to-use Python Solutions
MediaPipe offers ready-to-use yet customizable Python solutions as a prebuilt MediaPipe offers ready-to-use yet customizable Python solutions as a prebuilt

View File

@ -12,6 +12,11 @@ nav_order: 1
1. TOC 1. TOC
{:toc} {:toc}
--- ---
**Attention:** *Thanks for your interest in MediaPipe! We have moved to
[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
as the primary developer documentation site for MediaPipe as of April 3, 2023.*
----
The MediaPipe Python framework grants direct access to the core components of The MediaPipe Python framework grants direct access to the core components of
the MediaPipe C++ framework such as Timestamp, Packet, and CalculatorGraph, the MediaPipe C++ framework such as Timestamp, Packet, and CalculatorGraph,

View File

@ -13,6 +13,12 @@ nav_order: 10
{:toc} {:toc}
--- ---
**Attention:** *Thanks for your interest in MediaPipe! We have moved to
[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
as the primary developer documentation site for MediaPipe as of April 3, 2023.*
----
## Missing Python binary path ## Missing Python binary path
The error message: The error message:

View File

@ -6,6 +6,20 @@ nav_order: 1
![MediaPipe](https://mediapipe.dev/images/mediapipe_small.png) ![MediaPipe](https://mediapipe.dev/images/mediapipe_small.png)
----
**Attention:** *Thanks for your interest in MediaPipe! We have moved to
[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
as the primary developer documentation site for MediaPipe as of April 3, 2023.*
*This notice and web page will be removed on June 1, 2023.*
----
<br><br><br><br><br><br><br><br><br><br>
<br><br><br><br><br><br><br><br><br><br>
<br><br><br><br><br><br><br><br><br><br>
-------------------------------------------------------------------------------- --------------------------------------------------------------------------------
## Live ML anywhere ## Live ML anywhere
@ -21,15 +35,6 @@ ML solutions for live and streaming media.
---- ----
**Attention:** *Thanks for your interest in MediaPipe! We are moving to
[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
as the primary developer documentation
site for MediaPipe starting April 3, 2023.*
*This notice and web page will be removed on April 3, 2023.*
----
## ML solutions in MediaPipe ## ML solutions in MediaPipe
Face Detection | Face Mesh | Iris | Hands | Pose | Holistic Face Detection | Face Mesh | Iris | Hands | Pose | Holistic

View File

@ -1,3 +1,3 @@
MediaPipe MediaPipe
===================================== =====================================
Please see https://docs.mediapipe.dev. Please see https://developers.google.com/mediapipe/

View File

@ -1,7 +1,8 @@
--- ---
layout: default layout: forward
target: https://developers.google.com/mediapipe/solutions/guide#legacy
title: AutoFlip (Saliency-aware Video Cropping) title: AutoFlip (Saliency-aware Video Cropping)
parent: Solutions parent: MediaPipe Legacy Solutions
nav_order: 14 nav_order: 14
--- ---
@ -20,12 +21,10 @@ nav_order: 14
**Attention:** *Thank you for your interest in MediaPipe Solutions. **Attention:** *Thank you for your interest in MediaPipe Solutions.
We have ended support for this MediaPipe Legacy Solution as of March 1, 2023. We have ended support for this MediaPipe Legacy Solution as of March 1, 2023.
For more information, see the new For more information, see the
[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy) [MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy)
site.* site.*
*This notice and web page will be removed on April 3, 2023.*
---- ----
## Overview ## Overview

View File

@ -1,7 +1,8 @@
--- ---
layout: default layout: forward
target: https://developers.google.com/mediapipe/solutions/guide#legacy
title: Box Tracking title: Box Tracking
parent: Solutions parent: MediaPipe Legacy Solutions
nav_order: 10 nav_order: 10
--- ---
@ -20,12 +21,10 @@ nav_order: 10
**Attention:** *Thank you for your interest in MediaPipe Solutions. **Attention:** *Thank you for your interest in MediaPipe Solutions.
We have ended support for this MediaPipe Legacy Solution as of March 1, 2023. We have ended support for this MediaPipe Legacy Solution as of March 1, 2023.
For more information, see the new For more information, see the
[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy) [MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy)
site.* site.*
*This notice and web page will be removed on April 3, 2023.*
---- ----
## Overview ## Overview

View File

@ -1,7 +1,8 @@
--- ---
layout: default layout: forward
target: https://developers.google.com/mediapipe/solutions/vision/face_detector/
title: Face Detection title: Face Detection
parent: Solutions parent: MediaPipe Legacy Solutions
nav_order: 1 nav_order: 1
--- ---
@ -20,12 +21,10 @@ nav_order: 1
**Attention:** *Thank you for your interest in MediaPipe Solutions. **Attention:** *Thank you for your interest in MediaPipe Solutions.
As of March 1, 2023, this solution is planned to be upgraded to a new MediaPipe As of March 1, 2023, this solution is planned to be upgraded to a new MediaPipe
Solution. For more information, see the new Solution. For more information, see the
[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy) [MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy)
site.* site.*
*This notice and web page will be removed on April 3, 2023.*
---- ----
## Overview ## Overview

View File

@ -1,7 +1,8 @@
--- ---
layout: default layout: forward
target: https://developers.google.com/mediapipe/solutions/vision/face_landmarker/
title: Face Mesh title: Face Mesh
parent: Solutions parent: MediaPipe Legacy Solutions
nav_order: 2 nav_order: 2
--- ---
@ -20,12 +21,10 @@ nav_order: 2
**Attention:** *Thank you for your interest in MediaPipe Solutions. **Attention:** *Thank you for your interest in MediaPipe Solutions.
As of March 1, 2023, this solution is planned to be upgraded to a new MediaPipe As of March 1, 2023, this solution is planned to be upgraded to a new MediaPipe
Solution. For more information, see the new Solution. For more information, see the
[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy) [MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy)
site.* site.*
*This notice and web page will be removed on April 3, 2023.*
---- ----
## Overview ## Overview

View File

@ -1,7 +1,8 @@
--- ---
layout: default layout: forward
target: https://developers.google.com/mediapipe/solutions/vision/image_segmenter/
title: Hair Segmentation title: Hair Segmentation
parent: Solutions parent: MediaPipe Legacy Solutions
nav_order: 8 nav_order: 8
--- ---
@ -19,13 +20,11 @@ nav_order: 8
--- ---
**Attention:** *Thank you for your interest in MediaPipe Solutions. **Attention:** *Thank you for your interest in MediaPipe Solutions.
As of March 1, 2023, this solution is planned to be upgraded to a new MediaPipe As of April 4, 2023, this solution was upgraded to a new MediaPipe
Solution. For more information, see the new Solution. For more information, see the
[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy) [MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/vision/image_segmenter/)
site.* site.*
*This notice and web page will be removed on April 3, 2023.*
---- ----
![hair_segmentation_android_gpu_gif](https://mediapipe.dev/images/mobile/hair_segmentation_android_gpu.gif) ![hair_segmentation_android_gpu_gif](https://mediapipe.dev/images/mobile/hair_segmentation_android_gpu.gif)

View File

@ -1,7 +1,8 @@
--- ---
layout: default layout: forward
target: https://developers.google.com/mediapipe/solutions/vision/hand_landmarker
title: Hands title: Hands
parent: Solutions parent: MediaPipe Legacy Solutions
nav_order: 4 nav_order: 4
--- ---
@ -19,13 +20,11 @@ nav_order: 4
--- ---
**Attention:** *Thank you for your interest in MediaPipe Solutions. **Attention:** *Thank you for your interest in MediaPipe Solutions.
As of March 1, 2023, this solution is planned to be upgraded to a new MediaPipe As of March 1, 2023, this solution was upgraded to a new MediaPipe
Solution. For more information, see the new Solution. For more information, see the
[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy) [MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/vision/hand_landmarker)
site.* site.*
*This notice and web page will be removed on April 3, 2023.*
---- ----
## Overview ## Overview

View File

@ -1,7 +1,8 @@
--- ---
layout: default layout: forward
target: https://github.com/google/mediapipe/blob/master/docs/solutions/holistic.md
title: Holistic title: Holistic
parent: Solutions parent: MediaPipe Legacy Solutions
nav_order: 6 nav_order: 6
--- ---
@ -20,12 +21,10 @@ nav_order: 6
**Attention:** *Thank you for your interest in MediaPipe Solutions. **Attention:** *Thank you for your interest in MediaPipe Solutions.
As of March 1, 2023, this solution is planned to be upgraded to a new MediaPipe As of March 1, 2023, this solution is planned to be upgraded to a new MediaPipe
Solution. For more information, see the new Solution. For more information, see the
[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy) [MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy)
site.* site.*
*This notice and web page will be removed on April 3, 2023.*
---- ----
## Overview ## Overview

View File

@ -1,7 +1,8 @@
--- ---
layout: default layout: forward
target: https://developers.google.com/mediapipe/solutions/guide#legacy
title: Instant Motion Tracking title: Instant Motion Tracking
parent: Solutions parent: MediaPipe Legacy Solutions
nav_order: 11 nav_order: 11
--- ---
@ -20,12 +21,10 @@ nav_order: 11
**Attention:** *Thank you for your interest in MediaPipe Solutions. **Attention:** *Thank you for your interest in MediaPipe Solutions.
We have ended support for this MediaPipe Legacy Solution as of March 1, 2023. We have ended support for this MediaPipe Legacy Solution as of March 1, 2023.
For more information, see the new For more information, see the
[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy) [MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy)
site.* site.*
*This notice and web page will be removed on April 3, 2023.*
---- ----
## Overview ## Overview

View File

@ -1,7 +1,8 @@
--- ---
layout: default layout: forward
target: https://developers.google.com/mediapipe/solutions/vision/face_landmarker/
title: Iris title: Iris
parent: Solutions parent: MediaPipe Legacy Solutions
nav_order: 3 nav_order: 3
--- ---
@ -20,12 +21,10 @@ nav_order: 3
**Attention:** *Thank you for your interest in MediaPipe Solutions. **Attention:** *Thank you for your interest in MediaPipe Solutions.
As of March 1, 2023, this solution is planned to be upgraded to a new MediaPipe As of March 1, 2023, this solution is planned to be upgraded to a new MediaPipe
Solution. For more information, see the new Solution. For more information, see the
[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy) [MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy)
site.* site.*
*This notice and web page will be removed on April 3, 2023.*
---- ----
## Overview ## Overview

View File

@ -1,7 +1,8 @@
--- ---
layout: default layout: forward
target: https://developers.google.com/mediapipe/solutions/guide#legacy
title: KNIFT (Template-based Feature Matching) title: KNIFT (Template-based Feature Matching)
parent: Solutions parent: MediaPipe Legacy Solutions
nav_order: 13 nav_order: 13
--- ---
@ -20,12 +21,10 @@ nav_order: 13
**Attention:** *Thank you for your interest in MediaPipe Solutions. **Attention:** *Thank you for your interest in MediaPipe Solutions.
We have ended support for this MediaPipe Legacy Solution as of March 1, 2023. We have ended support for this MediaPipe Legacy Solution as of March 1, 2023.
For more information, see the new For more information, see the
[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy) [MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy)
site.* site.*
*This notice and web page will be removed on April 3, 2023.*
---- ----
## Overview ## Overview

View File

@ -1,7 +1,8 @@
--- ---
layout: default layout: forward
target: https://developers.google.com/mediapipe/solutions/guide#legacy
title: Dataset Preparation with MediaSequence title: Dataset Preparation with MediaSequence
parent: Solutions parent: MediaPipe Legacy Solutions
nav_order: 15 nav_order: 15
--- ---
@ -24,8 +25,6 @@ For more information, see the new
[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy) [MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy)
site.* site.*
*This notice and web page will be removed on April 3, 2023.*
---- ----
## Overview ## Overview

View File

@ -1,7 +1,8 @@
--- ---
layout: default layout: forward
target: https://developers.google.com/mediapipe/solutions/guide#legacy
title: Models and Model Cards title: Models and Model Cards
parent: Solutions parent: MediaPipe Legacy Solutions
nav_order: 30 nav_order: 30
--- ---
@ -22,8 +23,6 @@ MediaPipe Legacy Solutions will continue to be provided on an as-is basis.
We encourage you to check out the new MediaPipe Solutions at: We encourage you to check out the new MediaPipe Solutions at:
[https://developers.google.com/mediapipe/solutions](https://developers.google.com/mediapipe/solutions)* [https://developers.google.com/mediapipe/solutions](https://developers.google.com/mediapipe/solutions)*
*This notice and web page will be removed on April 3, 2023.*
---- ----
### [Face Detection](https://google.github.io/mediapipe/solutions/face_detection) ### [Face Detection](https://google.github.io/mediapipe/solutions/face_detection)

View File

@ -1,7 +1,8 @@
--- ---
layout: default layout: forward
target: https://developers.google.com/mediapipe/solutions/vision/object_detector/
title: Object Detection title: Object Detection
parent: Solutions parent: MediaPipe Legacy Solutions
nav_order: 9 nav_order: 9
--- ---
@ -19,13 +20,11 @@ nav_order: 9
--- ---
**Attention:** *Thank you for your interest in MediaPipe Solutions. **Attention:** *Thank you for your interest in MediaPipe Solutions.
As of March 1, 2023, this solution is planned to be upgraded to a new MediaPipe As of March 1, 2023, this solution was upgraded to a new MediaPipe
Solution. For more information, see the new Solution. For more information, see the
[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy) [MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/vision/object_detector/)
site.* site.*
*This notice and web page will be removed on April 3, 2023.*
---- ----
![object_detection_android_gpu.gif](https://mediapipe.dev/images/mobile/object_detection_android_gpu.gif) ![object_detection_android_gpu.gif](https://mediapipe.dev/images/mobile/object_detection_android_gpu.gif)

View File

@ -1,4 +1,31 @@
## TensorFlow/TFLite Object Detection Model ---
layout: forward
target: https://developers.google.com/mediapipe/solutions/vision/object_detector
title: Object Detection
parent: MediaPipe Legacy Solutions
nav_order: 9
---
# MediaPipe Object Detection
{: .no_toc }
<details close markdown="block">
<summary>
Table of contents
</summary>
{: .text-delta }
1. TOC
{:toc}
</details>
---
**Attention:** *Thank you for your interest in MediaPipe Solutions.
As of March 1, 2023, this solution was upgraded to a new MediaPipe
Solution. For more information, see the
[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/vision/object_detector)
site.*
----
### TensorFlow model ### TensorFlow model

View File

@ -1,7 +1,8 @@
--- ---
layout: default layout: forward
target: https://developers.google.com/mediapipe/solutions/guide#legacy
title: Objectron (3D Object Detection) title: Objectron (3D Object Detection)
parent: Solutions parent: MediaPipe Legacy Solutions
nav_order: 12 nav_order: 12
--- ---
@ -20,12 +21,10 @@ nav_order: 12
**Attention:** *Thank you for your interest in MediaPipe Solutions. **Attention:** *Thank you for your interest in MediaPipe Solutions.
We have ended support for this MediaPipe Legacy Solution as of March 1, 2023. We have ended support for this MediaPipe Legacy Solution as of March 1, 2023.
For more information, see the new For more information, see the
[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy) [MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy)
site.* site.*
*This notice and web page will be removed on April 3, 2023.*
---- ----
## Overview ## Overview

View File

@ -1,7 +1,8 @@
--- ---
layout: default layout: forward
target: https://developers.google.com/mediapipe/solutions/vision/pose_landmarker/
title: Pose title: Pose
parent: Solutions parent: MediaPipe Legacy Solutions
has_children: true has_children: true
has_toc: false has_toc: false
nav_order: 5 nav_order: 5
@ -22,12 +23,10 @@ nav_order: 5
**Attention:** *Thank you for your interest in MediaPipe Solutions. **Attention:** *Thank you for your interest in MediaPipe Solutions.
As of March 1, 2023, this solution is planned to be upgraded to a new MediaPipe As of March 1, 2023, this solution is planned to be upgraded to a new MediaPipe
Solution. For more information, see the new Solution. For more information, see the
[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy) [MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/vision/pose_landmarker/)
site.* site.*
*This notice and web page will be removed on April 3, 2023.*
---- ----
## Overview ## Overview

View File

@ -1,8 +1,9 @@
--- ---
layout: default layout: forward
target: https://developers.google.com/mediapipe/solutions/vision/pose_landmarker/
title: Pose Classification title: Pose Classification
parent: Pose parent: Pose
grand_parent: Solutions grand_parent: MediaPipe Legacy Solutions
nav_order: 1 nav_order: 1
--- ---
@ -21,12 +22,10 @@ nav_order: 1
**Attention:** *Thank you for your interest in MediaPipe Solutions. **Attention:** *Thank you for your interest in MediaPipe Solutions.
As of March 1, 2023, this solution is planned to be upgraded to a new MediaPipe As of March 1, 2023, this solution is planned to be upgraded to a new MediaPipe
Solution. For more information, see the new Solution. For more information, see the
[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy) [MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/vision/pose_landmarker/)
site.* site.*
*This notice and web page will be removed on April 3, 2023.*
---- ----
## Overview ## Overview

View File

@ -1,7 +1,8 @@
--- ---
layout: default layout: forward
target: https://developers.google.com/mediapipe/solutions/vision/image_segmenter/
title: Selfie Segmentation title: Selfie Segmentation
parent: Solutions parent: MediaPipe Legacy Solutions
nav_order: 7 nav_order: 7
--- ---
@ -19,13 +20,11 @@ nav_order: 7
--- ---
**Attention:** *Thank you for your interest in MediaPipe Solutions. **Attention:** *Thank you for your interest in MediaPipe Solutions.
As of March 1, 2023, this solution is planned to be upgraded to a new MediaPipe As of April 4, 2023, this solution was upgraded to a new MediaPipe
Solution. For more information, see the new Solution. For more information, see the
[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy) [MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/vision/image_segmenter/)
site.* site.*
*This notice and web page will be removed on April 3, 2023.*
---- ----
## Overview ## Overview

View File

@ -1,12 +1,12 @@
--- ---
layout: default layout: default
title: Solutions title: MediaPipe Legacy Solutions
nav_order: 3 nav_order: 3
has_children: true has_children: true
has_toc: false has_toc: false
--- ---
# Solutions # MediaPipe Legacy Solutions
{: .no_toc } {: .no_toc }
1. TOC 1. TOC
@ -29,6 +29,12 @@ Solutions at:
---- ----
<br><br><br><br><br><br><br><br><br><br>
<br><br><br><br><br><br><br><br><br><br>
<br><br><br><br><br><br><br><br><br><br>
----
MediaPipe offers open source cross-platform, customizable ML solutions for live MediaPipe offers open source cross-platform, customizable ML solutions for live
and streaming media. and streaming media.

View File

@ -1,7 +1,8 @@
--- ---
layout: default layout: forward
target: https://developers.google.com/mediapipe/solutions/guide#legacy
title: YouTube-8M Feature Extraction and Model Inference title: YouTube-8M Feature Extraction and Model Inference
parent: Solutions parent: MediaPipe Legacy Solutions
nav_order: 16 nav_order: 16
--- ---
@ -20,12 +21,10 @@ nav_order: 16
**Attention:** *Thank you for your interest in MediaPipe Solutions. **Attention:** *Thank you for your interest in MediaPipe Solutions.
We have ended support for this MediaPipe Legacy Solution as of March 1, 2023. We have ended support for this MediaPipe Legacy Solution as of March 1, 2023.
For more information, see the new For more information, see the
[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy) [MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy)
site.* site.*
*This notice and web page will be removed on April 3, 2023.*
---- ----
MediaPipe is a useful and general framework for media processing that can assist MediaPipe is a useful and general framework for media processing that can assist

View File

@ -1,5 +1,6 @@
--- ---
layout: default layout: forward
target: https://developers.google.com/mediapipe/
title: Performance Benchmarking title: Performance Benchmarking
parent: Tools parent: Tools
nav_order: 3 nav_order: 3
@ -12,6 +13,12 @@ nav_order: 3
{:toc} {:toc}
--- ---
**Attention:** *Thanks for your interest in MediaPipe! We have moved to
[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
as the primary developer documentation site for MediaPipe as of April 3, 2023.*
---
*Coming soon.* *Coming soon.*
Future mediapipe releases will include tools for visualizing and analysing the Future mediapipe releases will include tools for visualizing and analysing the

View File

@ -1,5 +1,6 @@
--- ---
layout: default layout: forward
target: https://developers.google.com/mediapipe/
title: Tools title: Tools
nav_order: 4 nav_order: 4
has_children: true has_children: true
@ -11,3 +12,9 @@ has_children: true
1. TOC 1. TOC
{:toc} {:toc}
--- ---
**Attention:** *Thanks for your interest in MediaPipe! We have moved to
[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
as the primary developer documentation site for MediaPipe as of April 3, 2023.*
----

View File

@ -1,5 +1,6 @@
--- ---
layout: default layout: forward
target: https://developers.google.com/mediapipe/
title: Tracing and Profiling title: Tracing and Profiling
parent: Tools parent: Tools
nav_order: 2 nav_order: 2
@ -12,6 +13,12 @@ nav_order: 2
{:toc} {:toc}
--- ---
**Attention:** *Thanks for your interest in MediaPipe! We have moved to
[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
as the primary developer documentation site for MediaPipe as of April 3, 2023.*
----
The MediaPipe framework includes a built-in tracer and profiler. The tracer The MediaPipe framework includes a built-in tracer and profiler. The tracer
records various timing events related to packet processing, including the start records various timing events related to packet processing, including the start
and end time of each Calculator::Process call. The tracer writes trace log files and end time of each Calculator::Process call. The tracer writes trace log files

View File

@ -13,6 +13,12 @@ nav_order: 1
{:toc} {:toc}
--- ---
**Attention:** *Thanks for your interest in MediaPipe! We have moved to
[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
as the primary developer documentation site for MediaPipe as of April 3, 2023.*
---
To help users understand the structure of their calculator graphs and to To help users understand the structure of their calculator graphs and to
understand the overall behavior of their machine learning inference pipelines, understand the overall behavior of their machine learning inference pipelines,
we have built the [MediaPipe Visualizer](https://viz.mediapipe.dev/) we have built the [MediaPipe Visualizer](https://viz.mediapipe.dev/)

View File

@ -26,10 +26,11 @@
namespace mediapipe { namespace mediapipe {
namespace { namespace {
static bool SafeMultiply(int x, int y, int* result) { static bool SafeMultiply(int x, int y, int* result) {
static_assert(sizeof(int64) >= 2 * sizeof(int), static_assert(sizeof(int64_t) >= 2 * sizeof(int),
"Unable to detect overflow after multiplication"); "Unable to detect overflow after multiplication");
const int64 big = static_cast<int64>(x) * static_cast<int64>(y); const int64_t big = static_cast<int64_t>(x) * static_cast<int64_t>(y);
if (big > static_cast<int64>(INT_MIN) && big < static_cast<int64>(INT_MAX)) { if (big > static_cast<int64_t>(INT_MIN) &&
big < static_cast<int64_t>(INT_MAX)) {
if (result != nullptr) *result = static_cast<int>(big); if (result != nullptr) *result = static_cast<int>(big);
return true; return true;
} else { } else {

View File

@ -182,12 +182,12 @@ class SpectrogramCalculator : public CalculatorBase {
int frame_duration_samples_; int frame_duration_samples_;
int frame_overlap_samples_; int frame_overlap_samples_;
// How many samples we've been passed, used for checking input time stamps. // How many samples we've been passed, used for checking input time stamps.
int64 cumulative_input_samples_; int64_t cumulative_input_samples_;
// How many frames we've emitted, used for calculating output time stamps. // How many frames we've emitted, used for calculating output time stamps.
int64 cumulative_completed_frames_; int64_t cumulative_completed_frames_;
// How many frames were emitted last, used for estimating the timestamp on // How many frames were emitted last, used for estimating the timestamp on
// Close when use_local_timestamp_ is true; // Close when use_local_timestamp_ is true;
int64 last_completed_frames_; int64_t last_completed_frames_;
Timestamp initial_input_timestamp_; Timestamp initial_input_timestamp_;
int num_input_channels_; int num_input_channels_;
// How many frequency bins we emit (=N_FFT/2 + 1). // How many frequency bins we emit (=N_FFT/2 + 1).

View File

@ -92,7 +92,7 @@ class SpectrogramCalculatorTest
.cos() .cos()
.transpose(); .transpose();
} }
int64 input_timestamp = round(packet_start_time_seconds * int64_t input_timestamp = round(packet_start_time_seconds *
Timestamp::kTimestampUnitsPerSecond); Timestamp::kTimestampUnitsPerSecond);
AppendInputPacket(packet_data, input_timestamp); AppendInputPacket(packet_data, input_timestamp);
total_num_input_samples += packet_size_samples; total_num_input_samples += packet_size_samples;
@ -116,7 +116,7 @@ class SpectrogramCalculatorTest
double packet_start_time_seconds = double packet_start_time_seconds =
kInitialTimestampOffsetMicroseconds * 1e-6 + kInitialTimestampOffsetMicroseconds * 1e-6 +
total_num_input_samples / input_sample_rate_; total_num_input_samples / input_sample_rate_;
int64 input_timestamp = round(packet_start_time_seconds * int64_t input_timestamp = round(packet_start_time_seconds *
Timestamp::kTimestampUnitsPerSecond); Timestamp::kTimestampUnitsPerSecond);
std::unique_ptr<Matrix> impulse( std::unique_ptr<Matrix> impulse(
new Matrix(Matrix::Zero(1, packet_sizes_samples[i]))); new Matrix(Matrix::Zero(1, packet_sizes_samples[i])));
@ -157,7 +157,7 @@ class SpectrogramCalculatorTest
.cos() .cos()
.transpose(); .transpose();
} }
int64 input_timestamp = round(packet_start_time_seconds * int64_t input_timestamp = round(packet_start_time_seconds *
Timestamp::kTimestampUnitsPerSecond); Timestamp::kTimestampUnitsPerSecond);
AppendInputPacket(packet_data, input_timestamp); AppendInputPacket(packet_data, input_timestamp);
total_num_input_samples += packet_size_samples; total_num_input_samples += packet_size_samples;
@ -218,7 +218,7 @@ class SpectrogramCalculatorTest
const double expected_timestamp_seconds = const double expected_timestamp_seconds =
packet_timestamp_offset_seconds + packet_timestamp_offset_seconds +
cumulative_output_frames * frame_step_seconds; cumulative_output_frames * frame_step_seconds;
const int64 expected_timestamp_ticks = const int64_t expected_timestamp_ticks =
expected_timestamp_seconds * Timestamp::kTimestampUnitsPerSecond; expected_timestamp_seconds * Timestamp::kTimestampUnitsPerSecond;
EXPECT_EQ(expected_timestamp_ticks, packet.Timestamp().Value()); EXPECT_EQ(expected_timestamp_ticks, packet.Timestamp().Value());
// Accept the timestamp of the first packet as the baseline for checking // Accept the timestamp of the first packet as the baseline for checking

View File

@ -54,7 +54,8 @@ TEST_F(StabilizedLogCalculatorTest, BasicOperation) {
std::vector<Matrix> input_data_matrices; std::vector<Matrix> input_data_matrices;
for (int input_packet = 0; input_packet < kNumPackets; ++input_packet) { for (int input_packet = 0; input_packet < kNumPackets; ++input_packet) {
const int64 timestamp = input_packet * Timestamp::kTimestampUnitsPerSecond; const int64_t timestamp =
input_packet * Timestamp::kTimestampUnitsPerSecond;
Matrix input_data_matrix = Matrix input_data_matrix =
Matrix::Random(kNumChannels, kNumSamples).array().abs(); Matrix::Random(kNumChannels, kNumSamples).array().abs();
input_data_matrices.push_back(input_data_matrix); input_data_matrices.push_back(input_data_matrix);
@ -80,7 +81,8 @@ TEST_F(StabilizedLogCalculatorTest, OutputScaleWorks) {
std::vector<Matrix> input_data_matrices; std::vector<Matrix> input_data_matrices;
for (int input_packet = 0; input_packet < kNumPackets; ++input_packet) { for (int input_packet = 0; input_packet < kNumPackets; ++input_packet) {
const int64 timestamp = input_packet * Timestamp::kTimestampUnitsPerSecond; const int64_t timestamp =
input_packet * Timestamp::kTimestampUnitsPerSecond;
Matrix input_data_matrix = Matrix input_data_matrix =
Matrix::Random(kNumChannels, kNumSamples).array().abs(); Matrix::Random(kNumChannels, kNumSamples).array().abs();
input_data_matrices.push_back(input_data_matrix); input_data_matrices.push_back(input_data_matrix);

View File

@ -109,7 +109,7 @@ class TimeSeriesFramerCalculator : public CalculatorBase {
// Returns the timestamp of a sample on a base, which is usually the time // Returns the timestamp of a sample on a base, which is usually the time
// stamp of a packet. // stamp of a packet.
Timestamp CurrentSampleTimestamp(const Timestamp& timestamp_base, Timestamp CurrentSampleTimestamp(const Timestamp& timestamp_base,
int64 number_of_samples) { int64_t number_of_samples) {
return timestamp_base + round(number_of_samples / sample_rate_ * return timestamp_base + round(number_of_samples / sample_rate_ *
Timestamp::kTimestampUnitsPerSecond); Timestamp::kTimestampUnitsPerSecond);
} }
@ -118,10 +118,10 @@ class TimeSeriesFramerCalculator : public CalculatorBase {
// emitted. // emitted.
int next_frame_step_samples() const { int next_frame_step_samples() const {
// All numbers are in input samples. // All numbers are in input samples.
const int64 current_output_frame_start = static_cast<int64>( const int64_t current_output_frame_start = static_cast<int64_t>(
round(cumulative_output_frames_ * average_frame_step_samples_)); round(cumulative_output_frames_ * average_frame_step_samples_));
CHECK_EQ(current_output_frame_start, cumulative_completed_samples_); CHECK_EQ(current_output_frame_start, cumulative_completed_samples_);
const int64 next_output_frame_start = static_cast<int64>( const int64_t next_output_frame_start = static_cast<int64_t>(
round((cumulative_output_frames_ + 1) * average_frame_step_samples_)); round((cumulative_output_frames_ + 1) * average_frame_step_samples_));
return next_output_frame_start - current_output_frame_start; return next_output_frame_start - current_output_frame_start;
} }
@ -134,11 +134,11 @@ class TimeSeriesFramerCalculator : public CalculatorBase {
// emulate_fractional_frame_overlap is true. // emulate_fractional_frame_overlap is true.
double average_frame_step_samples_; double average_frame_step_samples_;
int samples_still_to_drop_; int samples_still_to_drop_;
int64 cumulative_output_frames_; int64_t cumulative_output_frames_;
// "Completed" samples are samples that are no longer needed because // "Completed" samples are samples that are no longer needed because
// the framer has completely stepped past them (taking into account // the framer has completely stepped past them (taking into account
// any overlap). // any overlap).
int64 cumulative_completed_samples_; int64_t cumulative_completed_samples_;
Timestamp initial_input_timestamp_; Timestamp initial_input_timestamp_;
// The current timestamp is updated along with the incoming packets. // The current timestamp is updated along with the incoming packets.
Timestamp current_timestamp_; Timestamp current_timestamp_;

View File

@ -49,7 +49,7 @@ class TimeSeriesFramerCalculatorTest
// Returns a float value with the channel and timestamp separated by // Returns a float value with the channel and timestamp separated by
// an order of magnitude, for easy parsing by humans. // an order of magnitude, for easy parsing by humans.
float TestValue(int64 timestamp_in_microseconds, int channel) { float TestValue(int64_t timestamp_in_microseconds, int channel) {
return timestamp_in_microseconds + channel / 10.0; return timestamp_in_microseconds + channel / 10.0;
} }
@ -59,7 +59,7 @@ class TimeSeriesFramerCalculatorTest
auto matrix = new Matrix(num_channels, num_samples); auto matrix = new Matrix(num_channels, num_samples);
for (int c = 0; c < num_channels; ++c) { for (int c = 0; c < num_channels; ++c) {
for (int i = 0; i < num_samples; ++i) { for (int i = 0; i < num_samples; ++i) {
int64 timestamp = time_series_util::SecondsToSamples( int64_t timestamp = time_series_util::SecondsToSamples(
starting_timestamp_seconds + i / input_sample_rate_, starting_timestamp_seconds + i / input_sample_rate_,
Timestamp::kTimestampUnitsPerSecond); Timestamp::kTimestampUnitsPerSecond);
(*matrix)(c, i) = TestValue(timestamp, c); (*matrix)(c, i) = TestValue(timestamp, c);
@ -429,7 +429,7 @@ class TimeSeriesFramerCalculatorTimestampingTest
num_full_packets -= 1; num_full_packets -= 1;
} }
int64 num_samples = 0; int64_t num_samples = 0;
for (int packet_num = 0; packet_num < num_full_packets; ++packet_num) { for (int packet_num = 0; packet_num < num_full_packets; ++packet_num) {
const Packet& packet = output().packets[packet_num]; const Packet& packet = output().packets[packet_num];
num_samples += FrameDurationSamples(); num_samples += FrameDurationSamples();

View File

@ -12,25 +12,20 @@
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
load("//mediapipe/framework/port:build_config.bzl", "mediapipe_cc_proto_library") load("//mediapipe/framework/port:build_config.bzl", "mediapipe_proto_library")
licenses(["notice"]) licenses(["notice"])
package(default_visibility = ["//visibility:private"]) package(default_visibility = ["//visibility:private"])
proto_library( mediapipe_proto_library(
name = "callback_packet_calculator_proto", name = "callback_packet_calculator_proto",
srcs = ["callback_packet_calculator.proto"], srcs = ["callback_packet_calculator.proto"],
visibility = ["//mediapipe/framework:__subpackages__"], visibility = ["//mediapipe/framework:__subpackages__"],
deps = ["//mediapipe/framework:calculator_proto"], deps = [
) "//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
mediapipe_cc_proto_library( ],
name = "callback_packet_calculator_cc_proto",
srcs = ["callback_packet_calculator.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = ["//mediapipe/framework:__subpackages__"],
deps = [":callback_packet_calculator_proto"],
) )
cc_library( cc_library(

View File

@ -467,10 +467,6 @@ cc_library(
"-x objective-c++", "-x objective-c++",
"-fobjc-arc", # enable reference-counting "-fobjc-arc", # enable reference-counting
], ],
linkopts = [
"-framework CoreVideo",
"-framework MetalKit",
],
tags = ["ios"], tags = ["ios"],
deps = [ deps = [
"inference_calculator_interface", "inference_calculator_interface",
@ -486,7 +482,13 @@ cc_library(
"@org_tensorflow//tensorflow/lite/delegates/gpu:metal_delegate_internal", "@org_tensorflow//tensorflow/lite/delegates/gpu:metal_delegate_internal",
"@org_tensorflow//tensorflow/lite/delegates/gpu/common:shape", "@org_tensorflow//tensorflow/lite/delegates/gpu/common:shape",
"@org_tensorflow//tensorflow/lite/delegates/gpu/metal:buffer_convert", "@org_tensorflow//tensorflow/lite/delegates/gpu/metal:buffer_convert",
] + select({
"//mediapipe:apple": [
"//third_party/apple_frameworks:CoreVideo",
"//third_party/apple_frameworks:MetalKit",
], ],
"//conditions:default": [],
}),
alwayslink = 1, alwayslink = 1,
) )
@ -721,13 +723,6 @@ cc_library(
"//conditions:default": [], "//conditions:default": [],
}), }),
features = ["-layering_check"], # allow depending on tensors_to_detections_calculator_gpu_deps features = ["-layering_check"], # allow depending on tensors_to_detections_calculator_gpu_deps
linkopts = select({
"//mediapipe:apple": [
"-framework CoreVideo",
"-framework MetalKit",
],
"//conditions:default": [],
}),
deps = [ deps = [
":tensors_to_detections_calculator_cc_proto", ":tensors_to_detections_calculator_cc_proto",
"//mediapipe/framework:calculator_framework", "//mediapipe/framework:calculator_framework",
@ -744,6 +739,12 @@ cc_library(
] + selects.with_or({ ] + selects.with_or({
":compute_shader_unavailable": [], ":compute_shader_unavailable": [],
"//conditions:default": [":tensors_to_detections_calculator_gpu_deps"], "//conditions:default": [":tensors_to_detections_calculator_gpu_deps"],
}) + select({
"//mediapipe:apple": [
"//third_party/apple_frameworks:CoreVideo",
"//third_party/apple_frameworks:MetalKit",
],
"//conditions:default": [],
}), }),
alwayslink = 1, alwayslink = 1,
) )
@ -1333,6 +1334,7 @@ cc_library(
"//mediapipe:ios": [ "//mediapipe:ios": [
"//mediapipe/gpu:MPPMetalUtil", "//mediapipe/gpu:MPPMetalUtil",
"//mediapipe/gpu:MPPMetalHelper", "//mediapipe/gpu:MPPMetalHelper",
"//third_party/apple_frameworks:MetalKit",
], ],
"//conditions:default": [ "//conditions:default": [
"@org_tensorflow//tensorflow/lite/delegates/gpu:gl_delegate", "@org_tensorflow//tensorflow/lite/delegates/gpu:gl_delegate",

View File

@ -92,13 +92,14 @@ class OpenCvProcessor : public ImageToTensorConverter {
const int dst_data_type = output_channels == 1 ? mat_gray_type_ : mat_type_; const int dst_data_type = output_channels == 1 ? mat_gray_type_ : mat_type_;
switch (tensor_type_) { switch (tensor_type_) {
case Tensor::ElementType::kInt8: case Tensor::ElementType::kInt8:
RET_CHECK_GE(output_shape.num_elements(), RET_CHECK_GE(
tensor_buffer_offset / sizeof(int8) + num_elements_per_img) output_shape.num_elements(),
tensor_buffer_offset / sizeof(int8_t) + num_elements_per_img)
<< "The buffer offset + the input image size is larger than the " << "The buffer offset + the input image size is larger than the "
"allocated tensor buffer."; "allocated tensor buffer.";
dst = cv::Mat( dst = cv::Mat(output_height, output_width, dst_data_type,
output_height, output_width, dst_data_type, buffer_view.buffer<int8_t>() +
buffer_view.buffer<int8>() + tensor_buffer_offset / sizeof(int8)); tensor_buffer_offset / sizeof(int8_t));
break; break;
case Tensor::ElementType::kFloat32: case Tensor::ElementType::kFloat32:
RET_CHECK_GE( RET_CHECK_GE(
@ -113,12 +114,12 @@ class OpenCvProcessor : public ImageToTensorConverter {
case Tensor::ElementType::kUInt8: case Tensor::ElementType::kUInt8:
RET_CHECK_GE( RET_CHECK_GE(
output_shape.num_elements(), output_shape.num_elements(),
tensor_buffer_offset / sizeof(uint8) + num_elements_per_img) tensor_buffer_offset / sizeof(uint8_t) + num_elements_per_img)
<< "The buffer offset + the input image size is larger than the " << "The buffer offset + the input image size is larger than the "
"allocated tensor buffer."; "allocated tensor buffer.";
dst = cv::Mat( dst = cv::Mat(output_height, output_width, dst_data_type,
output_height, output_width, dst_data_type, buffer_view.buffer<uint8_t>() +
buffer_view.buffer<uint8>() + tensor_buffer_offset / sizeof(uint8)); tensor_buffer_offset / sizeof(uint8_t));
break; break;
default: default:
return InvalidArgumentError( return InvalidArgumentError(

View File

@ -41,7 +41,7 @@ constexpr char kTransposeOptionsString[] =
using RandomEngine = std::mt19937_64; using RandomEngine = std::mt19937_64;
using testing::Eq; using testing::Eq;
const uint32 kSeed = 1234; const uint32_t kSeed = 1234;
const int kNumSizes = 8; const int kNumSizes = 8;
const int sizes[kNumSizes][2] = {{1, 1}, {12, 1}, {1, 9}, {2, 2}, const int sizes[kNumSizes][2] = {{1, 1}, {12, 1}, {1, 9}, {2, 2},
{5, 3}, {7, 13}, {16, 32}, {101, 2}}; {5, 3}, {7, 13}, {16, 32}, {101, 2}};
@ -49,7 +49,7 @@ const int sizes[kNumSizes][2] = {{1, 1}, {12, 1}, {1, 9}, {2, 2},
class TensorConverterCalculatorTest : public ::testing::Test { class TensorConverterCalculatorTest : public ::testing::Test {
protected: protected:
// Adds a packet with a matrix filled with random values in [0,1]. // Adds a packet with a matrix filled with random values in [0,1].
void AddRandomMatrix(int num_rows, int num_columns, uint32 seed, void AddRandomMatrix(int num_rows, int num_columns, uint32_t seed,
bool row_major_matrix = false) { bool row_major_matrix = false) {
RandomEngine random(kSeed); RandomEngine random(kSeed);
std::uniform_real_distribution<> uniform_dist(0, 1.0); std::uniform_real_distribution<> uniform_dist(0, 1.0);
@ -229,7 +229,7 @@ TEST_F(TensorConverterCalculatorTest, CustomDivAndSub) {
MP_ASSERT_OK(graph.StartRun({})); MP_ASSERT_OK(graph.StartRun({}));
auto input_image = absl::make_unique<ImageFrame>(ImageFormat::GRAY8, 1, 1); auto input_image = absl::make_unique<ImageFrame>(ImageFormat::GRAY8, 1, 1);
cv::Mat mat = mediapipe::formats::MatView(input_image.get()); cv::Mat mat = mediapipe::formats::MatView(input_image.get());
mat.at<uint8>(0, 0) = 200; mat.at<uint8_t>(0, 0) = 200;
MP_ASSERT_OK(graph.AddPacketToInputStream( MP_ASSERT_OK(graph.AddPacketToInputStream(
"input_image", Adopt(input_image.release()).At(Timestamp(0)))); "input_image", Adopt(input_image.release()).At(Timestamp(0))));
@ -286,7 +286,7 @@ TEST_F(TensorConverterCalculatorTest, SetOutputRange) {
MP_ASSERT_OK(graph.StartRun({})); MP_ASSERT_OK(graph.StartRun({}));
auto input_image = absl::make_unique<ImageFrame>(ImageFormat::GRAY8, 1, 1); auto input_image = absl::make_unique<ImageFrame>(ImageFormat::GRAY8, 1, 1);
cv::Mat mat = mediapipe::formats::MatView(input_image.get()); cv::Mat mat = mediapipe::formats::MatView(input_image.get());
mat.at<uint8>(0, 0) = 200; mat.at<uint8_t>(0, 0) = 200;
MP_ASSERT_OK(graph.AddPacketToInputStream( MP_ASSERT_OK(graph.AddPacketToInputStream(
"input_image", Adopt(input_image.release()).At(Timestamp(0)))); "input_image", Adopt(input_image.release()).At(Timestamp(0))));

View File

@ -84,7 +84,7 @@ class TensorsToClassificationCalculator : public Node {
private: private:
int top_k_ = 0; int top_k_ = 0;
bool sort_by_descending_score_ = false; bool sort_by_descending_score_ = false;
proto_ns::Map<int64, LabelMapItem> local_label_map_; proto_ns::Map<int64_t, LabelMapItem> local_label_map_;
bool label_map_loaded_ = false; bool label_map_loaded_ = false;
bool is_binary_classification_ = false; bool is_binary_classification_ = false;
float min_score_threshold_ = std::numeric_limits<float>::lowest(); float min_score_threshold_ = std::numeric_limits<float>::lowest();
@ -98,7 +98,8 @@ class TensorsToClassificationCalculator : public Node {
// These are used to filter out the output classification results. // These are used to filter out the output classification results.
ClassIndexSet class_index_set_; ClassIndexSet class_index_set_;
bool IsClassIndexAllowed(int class_index); bool IsClassIndexAllowed(int class_index);
const proto_ns::Map<int64, LabelMapItem>& GetLabelMap(CalculatorContext* cc); const proto_ns::Map<int64_t, LabelMapItem>& GetLabelMap(
CalculatorContext* cc);
}; };
MEDIAPIPE_REGISTER_NODE(TensorsToClassificationCalculator); MEDIAPIPE_REGISTER_NODE(TensorsToClassificationCalculator);
@ -252,7 +253,7 @@ bool TensorsToClassificationCalculator::IsClassIndexAllowed(int class_index) {
} }
} }
const proto_ns::Map<int64, LabelMapItem>& const proto_ns::Map<int64_t, LabelMapItem>&
TensorsToClassificationCalculator::GetLabelMap(CalculatorContext* cc) { TensorsToClassificationCalculator::GetLabelMap(CalculatorContext* cc) {
return !local_label_map_.empty() return !local_label_map_.empty()
? local_label_map_ ? local_label_map_

View File

@ -399,7 +399,7 @@ cc_library(
# On android, this calculator is configured to run with lite protos. Therefore, # On android, this calculator is configured to run with lite protos. Therefore,
# compile your binary with the flag TENSORFLOW_PROTOS=lite. # compile your binary with the flag TENSORFLOW_PROTOS=lite.
cc_library( cc_library(
name = "tensorflow_inference_calculator", name = "tensorflow_inference_calculator_no_envelope_loader",
srcs = ["tensorflow_inference_calculator.cc"], srcs = ["tensorflow_inference_calculator.cc"],
deps = [ deps = [
":tensorflow_inference_calculator_cc_proto", ":tensorflow_inference_calculator_cc_proto",
@ -432,6 +432,19 @@ cc_library(
alwayslink = 1, alwayslink = 1,
) )
cc_library(
name = "tensorflow_inference_calculator",
deps = [
":tensorflow_inference_calculator_no_envelope_loader",
] + select({
# Since "select" has "exactly one match" rule, we will need default condition to avoid
# "no matching conditions" error. Since all necessary dependencies are specified in
# "tensorflow_inference_calculator_no_envelope_loader" dependency, it is empty here.
"//conditions:default": [],
}),
alwayslink = 1,
)
cc_library( cc_library(
name = "tensorflow_session", name = "tensorflow_session",
hdrs = [ hdrs = [

View File

@ -193,13 +193,6 @@ cc_library(
":edge_tpu_pci": ["MEDIAPIPE_EDGE_TPU=pci"], ":edge_tpu_pci": ["MEDIAPIPE_EDGE_TPU=pci"],
":edge_tpu_all": ["MEDIAPIPE_EDGE_TPU=all"], ":edge_tpu_all": ["MEDIAPIPE_EDGE_TPU=all"],
}), }),
linkopts = select({
"//mediapipe:ios": [
"-framework CoreVideo",
"-framework MetalKit",
],
"//conditions:default": [],
}),
deps = [ deps = [
":tflite_inference_calculator_cc_proto", ":tflite_inference_calculator_cc_proto",
"//mediapipe/framework:calculator_framework", "//mediapipe/framework:calculator_framework",
@ -222,6 +215,8 @@ cc_library(
"@org_tensorflow//tensorflow/lite/delegates/gpu/metal:buffer_convert", "@org_tensorflow//tensorflow/lite/delegates/gpu/metal:buffer_convert",
"@org_tensorflow//tensorflow/lite/delegates/gpu:metal_delegate", "@org_tensorflow//tensorflow/lite/delegates/gpu:metal_delegate",
"@org_tensorflow//tensorflow/lite/delegates/gpu:metal_delegate_internal", "@org_tensorflow//tensorflow/lite/delegates/gpu:metal_delegate_internal",
"//third_party/apple_frameworks:MetalKit",
"//third_party/apple_frameworks:CoreVideo",
], ],
"//conditions:default": [ "//conditions:default": [
"//mediapipe/util/tflite:tflite_gpu_runner", "//mediapipe/util/tflite:tflite_gpu_runner",
@ -271,13 +266,6 @@ cc_library(
], ],
"//conditions:default": [], "//conditions:default": [],
}), }),
linkopts = select({
"//mediapipe:ios": [
"-framework CoreVideo",
"-framework MetalKit",
],
"//conditions:default": [],
}),
deps = [ deps = [
":tflite_converter_calculator_cc_proto", ":tflite_converter_calculator_cc_proto",
"//mediapipe/framework:calculator_framework", "//mediapipe/framework:calculator_framework",
@ -296,6 +284,8 @@ cc_library(
"//mediapipe/gpu:MPPMetalHelper", "//mediapipe/gpu:MPPMetalHelper",
"//mediapipe/objc:mediapipe_framework_ios", "//mediapipe/objc:mediapipe_framework_ios",
"@org_tensorflow//tensorflow/lite/delegates/gpu:metal_delegate", "@org_tensorflow//tensorflow/lite/delegates/gpu:metal_delegate",
"//third_party/apple_frameworks:MetalKit",
"//third_party/apple_frameworks:CoreVideo",
], ],
"//conditions:default": [ "//conditions:default": [
"//mediapipe/gpu:gl_calculator_helper", "//mediapipe/gpu:gl_calculator_helper",
@ -393,13 +383,6 @@ cc_library(
], ],
"//conditions:default": [], "//conditions:default": [],
}), }),
linkopts = select({
"//mediapipe:ios": [
"-framework CoreVideo",
"-framework MetalKit",
],
"//conditions:default": [],
}),
deps = [ deps = [
":tflite_tensors_to_detections_calculator_cc_proto", ":tflite_tensors_to_detections_calculator_cc_proto",
"//mediapipe/framework:calculator_framework", "//mediapipe/framework:calculator_framework",
@ -420,6 +403,8 @@ cc_library(
"//mediapipe/gpu:MPPMetalHelper", "//mediapipe/gpu:MPPMetalHelper",
"//mediapipe/objc:mediapipe_framework_ios", "//mediapipe/objc:mediapipe_framework_ios",
"@org_tensorflow//tensorflow/lite/delegates/gpu:metal_delegate", "@org_tensorflow//tensorflow/lite/delegates/gpu:metal_delegate",
"//third_party/apple_frameworks:MetalKit",
"//third_party/apple_frameworks:CoreVideo",
], ],
"//conditions:default": [ "//conditions:default": [
"//mediapipe/gpu:gl_calculator_helper", "//mediapipe/gpu:gl_calculator_helper",

View File

@ -66,17 +66,17 @@ class ClockLatencyCalculator : public CalculatorBase {
absl::Status Process(CalculatorContext* cc) override; absl::Status Process(CalculatorContext* cc) override;
private: private:
int64 num_packet_streams_ = -1; int64_t num_packet_streams_ = -1;
}; };
REGISTER_CALCULATOR(ClockLatencyCalculator); REGISTER_CALCULATOR(ClockLatencyCalculator);
absl::Status ClockLatencyCalculator::GetContract(CalculatorContract* cc) { absl::Status ClockLatencyCalculator::GetContract(CalculatorContract* cc) {
RET_CHECK_GT(cc->Inputs().NumEntries(), 1); RET_CHECK_GT(cc->Inputs().NumEntries(), 1);
int64 num_packet_streams = cc->Inputs().NumEntries() - 1; int64_t num_packet_streams = cc->Inputs().NumEntries() - 1;
RET_CHECK_EQ(cc->Outputs().NumEntries(), num_packet_streams); RET_CHECK_EQ(cc->Outputs().NumEntries(), num_packet_streams);
for (int64 i = 0; i < num_packet_streams; ++i) { for (int64_t i = 0; i < num_packet_streams; ++i) {
cc->Inputs().Index(i).Set<absl::Time>(); cc->Inputs().Index(i).Set<absl::Time>();
cc->Outputs().Index(i).Set<absl::Duration>(); cc->Outputs().Index(i).Set<absl::Duration>();
} }
@ -99,7 +99,7 @@ absl::Status ClockLatencyCalculator::Process(CalculatorContext* cc) {
cc->Inputs().Tag(kReferenceTag).Get<absl::Time>(); cc->Inputs().Tag(kReferenceTag).Get<absl::Time>();
// Push Duration packets for every input stream we have. // Push Duration packets for every input stream we have.
for (int64 i = 0; i < num_packet_streams_; ++i) { for (int64_t i = 0; i < num_packet_streams_; ++i) {
if (!cc->Inputs().Index(i).IsEmpty()) { if (!cc->Inputs().Index(i).IsEmpty()) {
const absl::Time& input_stream_time = const absl::Time& input_stream_time =
cc->Inputs().Index(i).Get<absl::Time>(); cc->Inputs().Index(i).Get<absl::Time>();

View File

@ -33,7 +33,7 @@ typedef CollectionHasMinSizeCalculator<std::vector<int>>
TestIntCollectionHasMinSizeCalculator; TestIntCollectionHasMinSizeCalculator;
REGISTER_CALCULATOR(TestIntCollectionHasMinSizeCalculator); REGISTER_CALCULATOR(TestIntCollectionHasMinSizeCalculator);
void AddInputVector(const std::vector<int>& input, int64 timestamp, void AddInputVector(const std::vector<int>& input, int64_t timestamp,
CalculatorRunner* runner) { CalculatorRunner* runner) {
runner->MutableInputs() runner->MutableInputs()
->Tag(kIterableTag) ->Tag(kIterableTag)

View File

@ -57,9 +57,10 @@ class DetectionLabelIdToTextCalculator : public CalculatorBase {
private: private:
// Local label map built from the calculator options' `label_map_path` or // Local label map built from the calculator options' `label_map_path` or
// `label` field. // `label` field.
proto_ns::Map<int64, LabelMapItem> local_label_map_; proto_ns::Map<int64_t, LabelMapItem> local_label_map_;
bool keep_label_id_; bool keep_label_id_;
const proto_ns::Map<int64, LabelMapItem>& GetLabelMap(CalculatorContext* cc); const proto_ns::Map<int64_t, LabelMapItem>& GetLabelMap(
CalculatorContext* cc);
}; };
REGISTER_CALCULATOR(DetectionLabelIdToTextCalculator); REGISTER_CALCULATOR(DetectionLabelIdToTextCalculator);
@ -115,7 +116,7 @@ absl::Status DetectionLabelIdToTextCalculator::Process(CalculatorContext* cc) {
output_detections.push_back(input_detection); output_detections.push_back(input_detection);
Detection& output_detection = output_detections.back(); Detection& output_detection = output_detections.back();
bool has_text_label = false; bool has_text_label = false;
for (const int32 label_id : output_detection.label_id()) { for (const int32_t label_id : output_detection.label_id()) {
if (GetLabelMap(cc).contains(label_id)) { if (GetLabelMap(cc).contains(label_id)) {
auto item = GetLabelMap(cc).at(label_id); auto item = GetLabelMap(cc).at(label_id);
output_detection.add_label(item.name()); output_detection.add_label(item.name());
@ -136,7 +137,7 @@ absl::Status DetectionLabelIdToTextCalculator::Process(CalculatorContext* cc) {
return absl::OkStatus(); return absl::OkStatus();
} }
const proto_ns::Map<int64, LabelMapItem>& const proto_ns::Map<int64_t, LabelMapItem>&
DetectionLabelIdToTextCalculator::GetLabelMap(CalculatorContext* cc) { DetectionLabelIdToTextCalculator::GetLabelMap(CalculatorContext* cc) {
return !local_label_map_.empty() return !local_label_map_.empty()
? local_label_map_ ? local_label_map_

View File

@ -40,7 +40,7 @@ LocationData CreateRelativeLocationData(double xmin, double ymin, double width,
} }
Detection CreateDetection(const std::vector<std::string>& labels, Detection CreateDetection(const std::vector<std::string>& labels,
const std::vector<int32>& label_ids, const std::vector<int32_t>& label_ids,
const std::vector<float>& scores, const std::vector<float>& scores,
const LocationData& location_data, const LocationData& location_data,
const std::string& feature_tag) { const std::string& feature_tag) {

View File

@ -39,8 +39,8 @@ constexpr char kPixelDetectionsTag[] = "PIXEL_DETECTIONS";
constexpr char kRelativeDetectionListTag[] = "RELATIVE_DETECTION_LIST"; constexpr char kRelativeDetectionListTag[] = "RELATIVE_DETECTION_LIST";
constexpr char kRelativeDetectionsTag[] = "RELATIVE_DETECTIONS"; constexpr char kRelativeDetectionsTag[] = "RELATIVE_DETECTIONS";
Detection DetectionWithBoundingBox(int32 xmin, int32 ymin, int32 width, Detection DetectionWithBoundingBox(int32_t xmin, int32_t ymin, int32_t width,
int32 height) { int32_t height) {
Detection detection; Detection detection;
LocationData* location_data = detection.mutable_location_data(); LocationData* location_data = detection.mutable_location_data();
location_data->set_format(LocationData::BOUNDING_BOX); location_data->set_format(LocationData::BOUNDING_BOX);

View File

@ -26,7 +26,7 @@ constexpr char kDetectionListTag[] = "DETECTION_LIST";
// Each detection processed by DetectionUniqueIDCalculator will be assigned an // Each detection processed by DetectionUniqueIDCalculator will be assigned an
// unique id that starts from 1. If a detection already has an ID other than 0, // unique id that starts from 1. If a detection already has an ID other than 0,
// the ID will be overwritten. // the ID will be overwritten.
static int64 detection_id = 0; static int64_t detection_id = 0;
inline int GetNextDetectionId() { return ++detection_id; } inline int GetNextDetectionId() { return ++detection_id; }

View File

@ -56,8 +56,8 @@ MATCHER_P4(NormRectEq, x_center, y_center, width, height, "") {
testing::Value(arg.height(), testing::FloatEq(height)); testing::Value(arg.height(), testing::FloatEq(height));
} }
Detection DetectionWithLocationData(int32 xmin, int32 ymin, int32 width, Detection DetectionWithLocationData(int32_t xmin, int32_t ymin, int32_t width,
int32 height) { int32_t height) {
Detection detection; Detection detection;
LocationData* location_data = detection.mutable_location_data(); LocationData* location_data = detection.mutable_location_data();
location_data->set_format(LocationData::BOUNDING_BOX); location_data->set_format(LocationData::BOUNDING_BOX);

View File

@ -43,8 +43,8 @@ void VerifyRenderAnnotationColorThickness(
EXPECT_EQ(annotation.thickness(), options.thickness()); EXPECT_EQ(annotation.thickness(), options.thickness());
} }
LocationData CreateLocationData(int32 xmin, int32 ymin, int32 width, LocationData CreateLocationData(int32_t xmin, int32_t ymin, int32_t width,
int32 height) { int32_t height) {
LocationData location_data; LocationData location_data;
location_data.set_format(LocationData::BOUNDING_BOX); location_data.set_format(LocationData::BOUNDING_BOX);
location_data.mutable_bounding_box()->set_xmin(xmin); location_data.mutable_bounding_box()->set_xmin(xmin);
@ -66,7 +66,7 @@ LocationData CreateRelativeLocationData(double xmin, double ymin, double width,
} }
Detection CreateDetection(const std::vector<std::string>& labels, Detection CreateDetection(const std::vector<std::string>& labels,
const std::vector<int32>& label_ids, const std::vector<int32_t>& label_ids,
const std::vector<float>& scores, const std::vector<float>& scores,
const LocationData& location_data, const LocationData& location_data,
const std::string& feature_tag) { const std::string& feature_tag) {

View File

@ -24,7 +24,7 @@
namespace mediapipe { namespace mediapipe {
typedef FilterCollectionCalculator<std::vector<uint64>> typedef FilterCollectionCalculator<std::vector<uint64_t>>
FilterUInt64CollectionCalculator; FilterUInt64CollectionCalculator;
REGISTER_CALCULATOR(FilterUInt64CollectionCalculator); REGISTER_CALCULATOR(FilterUInt64CollectionCalculator);

View File

@ -163,8 +163,8 @@ absl::Status FromImageCalculator::Process(CalculatorContext* cc) {
std::unique_ptr<mediapipe::ImageFrame> output = std::unique_ptr<mediapipe::ImageFrame> output =
std::make_unique<mediapipe::ImageFrame>( std::make_unique<mediapipe::ImageFrame>(
input.image_format(), input.width(), input.height(), input.step(), input.image_format(), input.width(), input.height(), input.step(),
const_cast<uint8*>(input.GetImageFrameSharedPtr()->PixelData()), const_cast<uint8_t*>(input.GetImageFrameSharedPtr()->PixelData()),
[packet_copy_ptr](uint8*) { delete packet_copy_ptr; }); [packet_copy_ptr](uint8_t*) { delete packet_copy_ptr; });
cc->Outputs() cc->Outputs()
.Tag(kImageFrameTag) .Tag(kImageFrameTag)
.Add(output.release(), cc->InputTimestamp()); .Add(output.release(), cc->InputTimestamp());

View File

@ -84,23 +84,24 @@ class PacketFrequencyCalculator : public CalculatorBase {
const Timestamp& input_timestamp); const Timestamp& input_timestamp);
// Adds the input timestamp in the particular stream's timestamp buffer. // Adds the input timestamp in the particular stream's timestamp buffer.
absl::Status AddPacketTimestampForStream(int stream_id, int64 timestamp); absl::Status AddPacketTimestampForStream(int stream_id, int64_t timestamp);
// For the specified input stream, clears timestamps from buffer that are // For the specified input stream, clears timestamps from buffer that are
// older than the configured time_window_sec. // older than the configured time_window_sec.
absl::Status ClearOldpacketTimestamps(int stream_id, int64 current_timestamp); absl::Status ClearOldpacketTimestamps(int stream_id,
int64_t current_timestamp);
// Options for the calculator. // Options for the calculator.
PacketFrequencyCalculatorOptions options_; PacketFrequencyCalculatorOptions options_;
// Map where key is the input stream ID and value is the timestamp of the // Map where key is the input stream ID and value is the timestamp of the
// first packet received on that stream. // first packet received on that stream.
std::map<int, int64> first_timestamp_for_stream_id_usec_; std::map<int, int64_t> first_timestamp_for_stream_id_usec_;
// Map where key is the input stream ID and value is a vector that stores // Map where key is the input stream ID and value is a vector that stores
// timestamps of recently received packets on the stream. Timestamps older // timestamps of recently received packets on the stream. Timestamps older
// than the time_window_sec are continuously deleted for all the streams. // than the time_window_sec are continuously deleted for all the streams.
std::map<int, std::vector<int64>> previous_timestamps_for_stream_id_; std::map<int, std::vector<int64_t>> previous_timestamps_for_stream_id_;
}; };
REGISTER_CALCULATOR(PacketFrequencyCalculator); REGISTER_CALCULATOR(PacketFrequencyCalculator);
@ -166,7 +167,7 @@ absl::Status PacketFrequencyCalculator::Process(CalculatorContext* cc) {
} }
absl::Status PacketFrequencyCalculator::AddPacketTimestampForStream( absl::Status PacketFrequencyCalculator::AddPacketTimestampForStream(
int stream_id, int64 timestamp_usec) { int stream_id, int64_t timestamp_usec) {
if (previous_timestamps_for_stream_id_.find(stream_id) == if (previous_timestamps_for_stream_id_.find(stream_id) ==
previous_timestamps_for_stream_id_.end()) { previous_timestamps_for_stream_id_.end()) {
return absl::InvalidArgumentError("Input stream id is invalid"); return absl::InvalidArgumentError("Input stream id is invalid");
@ -178,19 +179,20 @@ absl::Status PacketFrequencyCalculator::AddPacketTimestampForStream(
} }
absl::Status PacketFrequencyCalculator::ClearOldpacketTimestamps( absl::Status PacketFrequencyCalculator::ClearOldpacketTimestamps(
int stream_id, int64 current_timestamp_usec) { int stream_id, int64_t current_timestamp_usec) {
if (previous_timestamps_for_stream_id_.find(stream_id) == if (previous_timestamps_for_stream_id_.find(stream_id) ==
previous_timestamps_for_stream_id_.end()) { previous_timestamps_for_stream_id_.end()) {
return absl::InvalidArgumentError("Input stream id is invalid"); return absl::InvalidArgumentError("Input stream id is invalid");
} }
auto& timestamps_buffer = previous_timestamps_for_stream_id_[stream_id]; auto& timestamps_buffer = previous_timestamps_for_stream_id_[stream_id];
int64 time_window_usec = options_.time_window_sec() * kSecondsToMicroseconds; int64_t time_window_usec =
options_.time_window_sec() * kSecondsToMicroseconds;
timestamps_buffer.erase( timestamps_buffer.erase(
std::remove_if(timestamps_buffer.begin(), timestamps_buffer.end(), std::remove_if(timestamps_buffer.begin(), timestamps_buffer.end(),
[&time_window_usec, [&time_window_usec,
&current_timestamp_usec](const int64 timestamp_usec) { &current_timestamp_usec](const int64_t timestamp_usec) {
return current_timestamp_usec - timestamp_usec > return current_timestamp_usec - timestamp_usec >
time_window_usec; time_window_usec;
}), }),

View File

@ -118,24 +118,24 @@ class PacketLatencyCalculator : public CalculatorBase {
std::shared_ptr<::mediapipe::Clock> clock_; std::shared_ptr<::mediapipe::Clock> clock_;
// Clock time when the first reference packet was received. // Clock time when the first reference packet was received.
int64 first_process_time_usec_ = -1; int64_t first_process_time_usec_ = -1;
// Timestamp of the first reference packet received. // Timestamp of the first reference packet received.
int64 first_reference_timestamp_usec_ = -1; int64_t first_reference_timestamp_usec_ = -1;
// Number of packet streams. // Number of packet streams.
int64 num_packet_streams_ = -1; int64_t num_packet_streams_ = -1;
// Latency output for each packet stream. // Latency output for each packet stream.
std::vector<PacketLatency> packet_latencies_; std::vector<PacketLatency> packet_latencies_;
// Running sum and count of latencies for each packet stream. This is required // Running sum and count of latencies for each packet stream. This is required
// to compute the average latency. // to compute the average latency.
std::vector<int64> sum_latencies_usec_; std::vector<int64_t> sum_latencies_usec_;
std::vector<int64> num_latencies_; std::vector<int64_t> num_latencies_;
// Clock time when last reset was done for histogram and running average. // Clock time when last reset was done for histogram and running average.
int64 last_reset_time_usec_ = -1; int64_t last_reset_time_usec_ = -1;
}; };
REGISTER_CALCULATOR(PacketLatencyCalculator); REGISTER_CALCULATOR(PacketLatencyCalculator);
@ -143,9 +143,9 @@ absl::Status PacketLatencyCalculator::GetContract(CalculatorContract* cc) {
RET_CHECK_GT(cc->Inputs().NumEntries(), 1); RET_CHECK_GT(cc->Inputs().NumEntries(), 1);
// Input and output streams. // Input and output streams.
int64 num_packet_streams = cc->Inputs().NumEntries() - 1; int64_t num_packet_streams = cc->Inputs().NumEntries() - 1;
RET_CHECK_EQ(cc->Outputs().NumEntries(), num_packet_streams); RET_CHECK_EQ(cc->Outputs().NumEntries(), num_packet_streams);
for (int64 i = 0; i < num_packet_streams; ++i) { for (int64_t i = 0; i < num_packet_streams; ++i) {
cc->Inputs().Index(i).SetAny(); cc->Inputs().Index(i).SetAny();
cc->Outputs().Index(i).Set<PacketLatency>(); cc->Outputs().Index(i).Set<PacketLatency>();
} }
@ -165,8 +165,8 @@ absl::Status PacketLatencyCalculator::GetContract(CalculatorContract* cc) {
void PacketLatencyCalculator::ResetStatistics() { void PacketLatencyCalculator::ResetStatistics() {
// Initialize histogram with zero counts and set running average to zero. // Initialize histogram with zero counts and set running average to zero.
for (int64 i = 0; i < num_packet_streams_; ++i) { for (int64_t i = 0; i < num_packet_streams_; ++i) {
for (int64 interval_index = 0; interval_index < options_.num_intervals(); for (int64_t interval_index = 0; interval_index < options_.num_intervals();
++interval_index) { ++interval_index) {
packet_latencies_[i].set_counts(interval_index, 0); packet_latencies_[i].set_counts(interval_index, 0);
} }
@ -196,7 +196,7 @@ absl::Status PacketLatencyCalculator::Open(CalculatorContext* cc) {
packet_latencies_.resize(num_packet_streams_); packet_latencies_.resize(num_packet_streams_);
sum_latencies_usec_.resize(num_packet_streams_); sum_latencies_usec_.resize(num_packet_streams_);
num_latencies_.resize(num_packet_streams_); num_latencies_.resize(num_packet_streams_);
for (int64 i = 0; i < num_packet_streams_; ++i) { for (int64_t i = 0; i < num_packet_streams_; ++i) {
// Initialize latency histograms with zero counts. // Initialize latency histograms with zero counts.
packet_latencies_[i].set_num_intervals(options_.num_intervals()); packet_latencies_[i].set_num_intervals(options_.num_intervals());
packet_latencies_[i].set_interval_size_usec(options_.interval_size_usec()); packet_latencies_[i].set_interval_size_usec(options_.interval_size_usec());
@ -208,7 +208,7 @@ absl::Status PacketLatencyCalculator::Open(CalculatorContext* cc) {
if (labels_provided) { if (labels_provided) {
packet_latencies_[i].set_label(options_.packet_labels(i)); packet_latencies_[i].set_label(options_.packet_labels(i));
} else { } else {
int64 input_stream_index = cc->Inputs().TagMap()->GetId("", i).value(); int64_t input_stream_index = cc->Inputs().TagMap()->GetId("", i).value();
packet_latencies_[i].set_label( packet_latencies_[i].set_label(
cc->Inputs().TagMap()->Names()[input_stream_index]); cc->Inputs().TagMap()->Names()[input_stream_index]);
} }
@ -242,7 +242,7 @@ absl::Status PacketLatencyCalculator::Process(CalculatorContext* cc) {
} }
if (options_.reset_duration_usec() > 0) { if (options_.reset_duration_usec() > 0) {
const int64 time_now_usec = absl::ToUnixMicros(clock_->TimeNow()); const int64_t time_now_usec = absl::ToUnixMicros(clock_->TimeNow());
if (time_now_usec - last_reset_time_usec_ >= if (time_now_usec - last_reset_time_usec_ >=
options_.reset_duration_usec()) { options_.reset_duration_usec()) {
ResetStatistics(); ResetStatistics();
@ -251,16 +251,16 @@ absl::Status PacketLatencyCalculator::Process(CalculatorContext* cc) {
} }
// Update latency info if there is any incoming packet. // Update latency info if there is any incoming packet.
for (int64 i = 0; i < num_packet_streams_; ++i) { for (int64_t i = 0; i < num_packet_streams_; ++i) {
if (!cc->Inputs().Index(i).IsEmpty()) { if (!cc->Inputs().Index(i).IsEmpty()) {
const auto& packet_timestamp_usec = cc->InputTimestamp().Value(); const auto& packet_timestamp_usec = cc->InputTimestamp().Value();
// Update latency statistics for this stream. // Update latency statistics for this stream.
int64 current_clock_time_usec = absl::ToUnixMicros(clock_->TimeNow()); int64_t current_clock_time_usec = absl::ToUnixMicros(clock_->TimeNow());
int64 current_calibrated_timestamp_usec = int64_t current_calibrated_timestamp_usec =
(current_clock_time_usec - first_process_time_usec_) + (current_clock_time_usec - first_process_time_usec_) +
first_reference_timestamp_usec_; first_reference_timestamp_usec_;
int64 packet_latency_usec = int64_t packet_latency_usec =
current_calibrated_timestamp_usec - packet_timestamp_usec; current_calibrated_timestamp_usec - packet_timestamp_usec;
// Invalid timestamps in input signals could result in negative latencies. // Invalid timestamps in input signals could result in negative latencies.
@ -270,7 +270,7 @@ absl::Status PacketLatencyCalculator::Process(CalculatorContext* cc) {
// Update the latency, running average and histogram for this stream. // Update the latency, running average and histogram for this stream.
packet_latencies_[i].set_current_latency_usec(packet_latency_usec); packet_latencies_[i].set_current_latency_usec(packet_latency_usec);
int64 interval_index = int64_t interval_index =
packet_latency_usec / packet_latencies_[i].interval_size_usec(); packet_latency_usec / packet_latencies_[i].interval_size_usec();
if (interval_index >= packet_latencies_[i].num_intervals()) { if (interval_index >= packet_latencies_[i].num_intervals()) {
interval_index = packet_latencies_[i].num_intervals() - 1; interval_index = packet_latencies_[i].num_intervals() - 1;

View File

@ -169,10 +169,10 @@ class PacketLatencyCalculatorTest : public ::testing::Test {
} }
PacketLatency CreatePacketLatency(const double latency_usec, PacketLatency CreatePacketLatency(const double latency_usec,
const int64 num_intervals, const int64_t num_intervals,
const int64 interval_size_usec, const int64_t interval_size_usec,
const std::vector<int>& counts, const std::vector<int>& counts,
const int64 avg_latency_usec, const int64_t avg_latency_usec,
const std::string& label) { const std::string& label) {
PacketLatency latency_info; PacketLatency latency_info;
latency_info.set_current_latency_usec(latency_usec); latency_info.set_current_latency_usec(latency_usec);

View File

@ -13,7 +13,7 @@
# limitations under the License. # limitations under the License.
# #
load("//mediapipe/framework/port:build_config.bzl", "mediapipe_cc_proto_library") load("//mediapipe/framework/port:build_config.bzl", "mediapipe_proto_library")
load( load(
"//mediapipe/framework/tool:mediapipe_graph.bzl", "//mediapipe/framework/tool:mediapipe_graph.bzl",
"mediapipe_binary_graph", "mediapipe_binary_graph",
@ -23,28 +23,35 @@ licenses(["notice"])
package(default_visibility = ["//visibility:public"]) package(default_visibility = ["//visibility:public"])
proto_library( mediapipe_proto_library(
name = "flow_to_image_calculator_proto", name = "flow_to_image_calculator_proto",
srcs = ["flow_to_image_calculator.proto"], srcs = ["flow_to_image_calculator.proto"],
deps = ["//mediapipe/framework:calculator_proto"], deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
],
) )
proto_library( mediapipe_proto_library(
name = "opencv_video_encoder_calculator_proto", name = "opencv_video_encoder_calculator_proto",
srcs = ["opencv_video_encoder_calculator.proto"], srcs = ["opencv_video_encoder_calculator.proto"],
deps = ["//mediapipe/framework:calculator_proto"], deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
],
) )
proto_library( mediapipe_proto_library(
name = "motion_analysis_calculator_proto", name = "motion_analysis_calculator_proto",
srcs = ["motion_analysis_calculator.proto"], srcs = ["motion_analysis_calculator.proto"],
deps = [ deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto", "//mediapipe/framework:calculator_proto",
"//mediapipe/util/tracking:motion_analysis_proto", "//mediapipe/util/tracking:motion_analysis_proto",
], ],
) )
proto_library( mediapipe_proto_library(
name = "flow_packager_calculator_proto", name = "flow_packager_calculator_proto",
srcs = ["flow_packager_calculator.proto"], srcs = ["flow_packager_calculator.proto"],
deps = [ deps = [
@ -54,114 +61,45 @@ proto_library(
], ],
) )
proto_library( mediapipe_proto_library(
name = "box_tracker_calculator_proto", name = "box_tracker_calculator_proto",
srcs = ["box_tracker_calculator.proto"], srcs = ["box_tracker_calculator.proto"],
deps = [ deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto", "//mediapipe/framework:calculator_proto",
"//mediapipe/util/tracking:box_tracker_proto", "//mediapipe/util/tracking:box_tracker_proto",
], ],
) )
proto_library( mediapipe_proto_library(
name = "tracked_detection_manager_calculator_proto", name = "tracked_detection_manager_calculator_proto",
srcs = ["tracked_detection_manager_calculator.proto"], srcs = ["tracked_detection_manager_calculator.proto"],
deps = [ deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto", "//mediapipe/framework:calculator_proto",
"//mediapipe/util/tracking:tracked_detection_manager_config_proto", "//mediapipe/util/tracking:tracked_detection_manager_config_proto",
], ],
) )
proto_library( mediapipe_proto_library(
name = "box_detector_calculator_proto", name = "box_detector_calculator_proto",
srcs = ["box_detector_calculator.proto"], srcs = ["box_detector_calculator.proto"],
deps = [ deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto", "//mediapipe/framework:calculator_proto",
"//mediapipe/util/tracking:box_detector_proto", "//mediapipe/util/tracking:box_detector_proto",
], ],
) )
proto_library( mediapipe_proto_library(
name = "video_pre_stream_calculator_proto", name = "video_pre_stream_calculator_proto",
srcs = ["video_pre_stream_calculator.proto"], srcs = ["video_pre_stream_calculator.proto"],
deps = [ deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto", "//mediapipe/framework:calculator_proto",
], ],
) )
mediapipe_cc_proto_library(
name = "motion_analysis_calculator_cc_proto",
srcs = ["motion_analysis_calculator.proto"],
cc_deps = [
"//mediapipe/framework:calculator_cc_proto",
"//mediapipe/util/tracking:motion_analysis_cc_proto",
],
deps = [":motion_analysis_calculator_proto"],
)
mediapipe_cc_proto_library(
name = "flow_packager_calculator_cc_proto",
srcs = ["flow_packager_calculator.proto"],
cc_deps = [
"//mediapipe/framework:calculator_cc_proto",
"//mediapipe/util/tracking:flow_packager_cc_proto",
],
deps = [":flow_packager_calculator_proto"],
)
mediapipe_cc_proto_library(
name = "box_tracker_calculator_cc_proto",
srcs = ["box_tracker_calculator.proto"],
cc_deps = [
"//mediapipe/framework:calculator_cc_proto",
"//mediapipe/util/tracking:box_tracker_cc_proto",
],
deps = [":box_tracker_calculator_proto"],
)
mediapipe_cc_proto_library(
name = "tracked_detection_manager_calculator_cc_proto",
srcs = ["tracked_detection_manager_calculator.proto"],
cc_deps = [
"//mediapipe/framework:calculator_cc_proto",
"//mediapipe/util/tracking:tracked_detection_manager_config_cc_proto",
],
deps = [":tracked_detection_manager_calculator_proto"],
)
mediapipe_cc_proto_library(
name = "box_detector_calculator_cc_proto",
srcs = ["box_detector_calculator.proto"],
cc_deps = [
"//mediapipe/framework:calculator_cc_proto",
"//mediapipe/util/tracking:box_detector_cc_proto",
],
deps = [":box_detector_calculator_proto"],
)
mediapipe_cc_proto_library(
name = "video_pre_stream_calculator_cc_proto",
srcs = ["video_pre_stream_calculator.proto"],
cc_deps = [
"//mediapipe/framework:calculator_cc_proto",
],
deps = [":video_pre_stream_calculator_proto"],
)
mediapipe_cc_proto_library(
name = "flow_to_image_calculator_cc_proto",
srcs = ["flow_to_image_calculator.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
deps = [":flow_to_image_calculator_proto"],
)
mediapipe_cc_proto_library(
name = "opencv_video_encoder_calculator_cc_proto",
srcs = ["opencv_video_encoder_calculator.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
deps = [":opencv_video_encoder_calculator_proto"],
)
cc_library( cc_library(
name = "flow_to_image_calculator", name = "flow_to_image_calculator",
srcs = ["flow_to_image_calculator.cc"], srcs = ["flow_to_image_calculator.cc"],

View File

@ -1,4 +1,4 @@
load("//mediapipe/framework/port:build_config.bzl", "mediapipe_cc_proto_library") load("//mediapipe/framework/port:build_config.bzl", "mediapipe_proto_library")
# Copyright 2019 The MediaPipe Authors. # Copyright 2019 The MediaPipe Authors.
# #
@ -22,7 +22,7 @@ package(default_visibility = [
"//photos/editing/mobile/mediapipe/proto:__subpackages__", "//photos/editing/mobile/mediapipe/proto:__subpackages__",
]) ])
proto_library( mediapipe_proto_library(
name = "autoflip_messages_proto", name = "autoflip_messages_proto",
srcs = ["autoflip_messages.proto"], srcs = ["autoflip_messages.proto"],
deps = [ deps = [
@ -30,29 +30,6 @@ proto_library(
], ],
) )
java_lite_proto_library(
name = "autoflip_messages_java_proto_lite",
visibility = [
"//java/com/google/android/apps/photos:__subpackages__",
"//javatests/com/google/android/apps/photos:__subpackages__",
],
deps = [
":autoflip_messages_proto",
],
)
mediapipe_cc_proto_library(
name = "autoflip_messages_cc_proto",
srcs = ["autoflip_messages.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = [
"//mediapipe/examples:__subpackages__",
"//photos/editing/mobile/mediapipe/calculators:__pkg__",
"//photos/editing/mobile/mediapipe/calculators:__subpackages__",
],
deps = [":autoflip_messages_proto"],
)
cc_binary( cc_binary(
name = "run_autoflip", name = "run_autoflip",
data = [ data = [

View File

@ -1,4 +1,4 @@
load("//mediapipe/framework/port:build_config.bzl", "mediapipe_cc_proto_library") load("//mediapipe/framework/port:build_config.bzl", "mediapipe_proto_library")
# Copyright 2019 The MediaPipe Authors. # Copyright 2019 The MediaPipe Authors.
# #
@ -40,22 +40,16 @@ cc_library(
alwayslink = 1, alwayslink = 1,
) )
proto_library( mediapipe_proto_library(
name = "border_detection_calculator_proto", name = "border_detection_calculator_proto",
srcs = ["border_detection_calculator.proto"], srcs = ["border_detection_calculator.proto"],
visibility = ["//mediapipe/examples:__subpackages__"],
deps = [ deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto", "//mediapipe/framework:calculator_proto",
], ],
) )
mediapipe_cc_proto_library(
name = "border_detection_calculator_cc_proto",
srcs = ["border_detection_calculator.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = ["//mediapipe/examples:__subpackages__"],
deps = [":border_detection_calculator_proto"],
)
cc_library( cc_library(
name = "content_zooming_calculator_state", name = "content_zooming_calculator_state",
hdrs = ["content_zooming_calculator_state.h"], hdrs = ["content_zooming_calculator_state.h"],
@ -85,27 +79,16 @@ cc_library(
alwayslink = 1, alwayslink = 1,
) )
proto_library( mediapipe_proto_library(
name = "content_zooming_calculator_proto", name = "content_zooming_calculator_proto",
srcs = ["content_zooming_calculator.proto"], srcs = ["content_zooming_calculator.proto"],
deps = [
"//mediapipe/examples/desktop/autoflip/quality:kinematic_path_solver_proto",
"//mediapipe/framework:calculator_proto",
],
)
mediapipe_cc_proto_library(
name = "content_zooming_calculator_cc_proto",
srcs = ["content_zooming_calculator.proto"],
cc_deps = [
"//mediapipe/examples/desktop/autoflip/quality:kinematic_path_solver_cc_proto",
"//mediapipe/framework:calculator_cc_proto",
],
visibility = [ visibility = [
"//mediapipe/examples:__subpackages__", "//mediapipe/examples:__subpackages__",
], ],
deps = [ deps = [
":content_zooming_calculator_proto", "//mediapipe/examples/desktop/autoflip/quality:kinematic_path_solver_proto",
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
], ],
) )
@ -177,23 +160,16 @@ cc_library(
alwayslink = 1, alwayslink = 1,
) )
proto_library( mediapipe_proto_library(
name = "video_filtering_calculator_proto", name = "video_filtering_calculator_proto",
srcs = ["video_filtering_calculator.proto"], srcs = ["video_filtering_calculator.proto"],
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
deps = [ deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto", "//mediapipe/framework:calculator_proto",
], ],
) )
mediapipe_cc_proto_library(
name = "video_filtering_calculator_cc_proto",
srcs = ["video_filtering_calculator.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = ["//visibility:public"],
deps = [":video_filtering_calculator_proto"],
)
cc_test( cc_test(
name = "video_filtering_calculator_test", name = "video_filtering_calculator_test",
srcs = ["video_filtering_calculator_test.cc"], srcs = ["video_filtering_calculator_test.cc"],
@ -209,27 +185,17 @@ cc_test(
], ],
) )
proto_library( mediapipe_proto_library(
name = "scene_cropping_calculator_proto", name = "scene_cropping_calculator_proto",
srcs = ["scene_cropping_calculator.proto"], srcs = ["scene_cropping_calculator.proto"],
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
deps = [ deps = [
"//mediapipe/examples/desktop/autoflip/quality:cropping_proto", "//mediapipe/examples/desktop/autoflip/quality:cropping_proto",
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto", "//mediapipe/framework:calculator_proto",
], ],
) )
mediapipe_cc_proto_library(
name = "scene_cropping_calculator_cc_proto",
srcs = ["scene_cropping_calculator.proto"],
cc_deps = [
"//mediapipe/examples/desktop/autoflip/quality:cropping_cc_proto",
"//mediapipe/framework:calculator_cc_proto",
],
visibility = ["//visibility:public"],
deps = [":scene_cropping_calculator_proto"],
)
cc_library( cc_library(
name = "scene_cropping_calculator", name = "scene_cropping_calculator",
srcs = ["scene_cropping_calculator.cc"], srcs = ["scene_cropping_calculator.cc"],
@ -296,26 +262,17 @@ cc_library(
alwayslink = 1, alwayslink = 1,
) )
proto_library( mediapipe_proto_library(
name = "signal_fusing_calculator_proto", name = "signal_fusing_calculator_proto",
srcs = ["signal_fusing_calculator.proto"], srcs = ["signal_fusing_calculator.proto"],
visibility = ["//mediapipe/examples:__subpackages__"],
deps = [ deps = [
"//mediapipe/examples/desktop/autoflip:autoflip_messages_proto", "//mediapipe/examples/desktop/autoflip:autoflip_messages_proto",
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto", "//mediapipe/framework:calculator_proto",
], ],
) )
mediapipe_cc_proto_library(
name = "signal_fusing_calculator_cc_proto",
srcs = ["signal_fusing_calculator.proto"],
cc_deps = [
"//mediapipe/examples/desktop/autoflip:autoflip_messages_cc_proto",
"//mediapipe/framework:calculator_cc_proto",
],
visibility = ["//mediapipe/examples:__subpackages__"],
deps = [":signal_fusing_calculator_proto"],
)
cc_test( cc_test(
name = "signal_fusing_calculator_test", name = "signal_fusing_calculator_test",
srcs = ["signal_fusing_calculator_test.cc"], srcs = ["signal_fusing_calculator_test.cc"],
@ -353,18 +310,14 @@ cc_library(
alwayslink = 1, alwayslink = 1,
) )
proto_library( mediapipe_proto_library(
name = "shot_boundary_calculator_proto", name = "shot_boundary_calculator_proto",
srcs = ["shot_boundary_calculator.proto"], srcs = ["shot_boundary_calculator.proto"],
deps = ["//mediapipe/framework:calculator_proto"],
)
mediapipe_cc_proto_library(
name = "shot_boundary_calculator_cc_proto",
srcs = ["shot_boundary_calculator.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = ["//mediapipe/examples:__subpackages__"], visibility = ["//mediapipe/examples:__subpackages__"],
deps = [":shot_boundary_calculator_proto"], deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
],
) )
cc_test( cc_test(
@ -413,26 +366,17 @@ cc_library(
alwayslink = 1, alwayslink = 1,
) )
proto_library( mediapipe_proto_library(
name = "face_to_region_calculator_proto", name = "face_to_region_calculator_proto",
srcs = ["face_to_region_calculator.proto"], srcs = ["face_to_region_calculator.proto"],
visibility = ["//mediapipe/examples:__subpackages__"],
deps = [ deps = [
"//mediapipe/examples/desktop/autoflip/quality:visual_scorer_proto", "//mediapipe/examples/desktop/autoflip/quality:visual_scorer_proto",
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto", "//mediapipe/framework:calculator_proto",
], ],
) )
mediapipe_cc_proto_library(
name = "face_to_region_calculator_cc_proto",
srcs = ["face_to_region_calculator.proto"],
cc_deps = [
"//mediapipe/examples/desktop/autoflip/quality:visual_scorer_cc_proto",
"//mediapipe/framework:calculator_cc_proto",
],
visibility = ["//mediapipe/examples:__subpackages__"],
deps = [":face_to_region_calculator_proto"],
)
cc_test( cc_test(
name = "face_to_region_calculator_test", name = "face_to_region_calculator_test",
srcs = ["face_to_region_calculator_test.cc"], srcs = ["face_to_region_calculator_test.cc"],
@ -454,22 +398,16 @@ cc_test(
], ],
) )
proto_library( mediapipe_proto_library(
name = "localization_to_region_calculator_proto", name = "localization_to_region_calculator_proto",
srcs = ["localization_to_region_calculator.proto"], srcs = ["localization_to_region_calculator.proto"],
visibility = ["//mediapipe/examples:__subpackages__"],
deps = [ deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto", "//mediapipe/framework:calculator_proto",
], ],
) )
mediapipe_cc_proto_library(
name = "localization_to_region_calculator_cc_proto",
srcs = ["localization_to_region_calculator.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = ["//mediapipe/examples:__subpackages__"],
deps = [":localization_to_region_calculator_proto"],
)
cc_library( cc_library(
name = "localization_to_region_calculator", name = "localization_to_region_calculator",
srcs = ["localization_to_region_calculator.cc"], srcs = ["localization_to_region_calculator.cc"],

View File

@ -214,7 +214,7 @@ double BorderDetectionCalculator::ColorCount(const Color& mask_color,
const cv::Mat& image) const { const cv::Mat& image) const {
int background_count = 0; int background_count = 0;
for (int i = 0; i < image.rows; i++) { for (int i = 0; i < image.rows; i++) {
const uint8* row_ptr = image.ptr<uint8>(i); const uint8_t* row_ptr = image.ptr<uint8_t>(i);
for (int j = 0; j < image.cols * 3; j += 3) { for (int j = 0; j < image.cols * 3; j += 3) {
if (std::abs(mask_color.r() - static_cast<int>(row_ptr[j + 2])) <= if (std::abs(mask_color.r() - static_cast<int>(row_ptr[j + 2])) <=
options_.color_tolerance() && options_.color_tolerance() &&

View File

@ -142,7 +142,7 @@ class ContentZoomingCalculator : public CalculatorBase {
// Stores the first crop rectangle. // Stores the first crop rectangle.
mediapipe::NormalizedRect first_rect_; mediapipe::NormalizedRect first_rect_;
// Stores the time of the last "only_required" input. // Stores the time of the last "only_required" input.
int64 last_only_required_detection_; int64_t last_only_required_detection_;
// Rect values of last message with detection(s). // Rect values of last message with detection(s).
int last_measured_height_; int last_measured_height_;
int last_measured_x_offset_; int last_measured_x_offset_;
@ -500,7 +500,7 @@ bool ContentZoomingCalculator::IsAnimatingToFirstRect(
return false; return false;
} }
const int64 delta_us = (timestamp - first_rect_timestamp_).Value(); const int64_t delta_us = (timestamp - first_rect_timestamp_).Value();
return (0 <= delta_us && delta_us <= options_.us_to_first_rect()); return (0 <= delta_us && delta_us <= options_.us_to_first_rect());
} }
@ -522,8 +522,8 @@ absl::StatusOr<mediapipe::Rect> ContentZoomingCalculator::GetAnimationRect(
RET_CHECK(IsAnimatingToFirstRect(timestamp)) RET_CHECK(IsAnimatingToFirstRect(timestamp))
<< "Must only be called if animating to first rect."; << "Must only be called if animating to first rect.";
const int64 delta_us = (timestamp - first_rect_timestamp_).Value(); const int64_t delta_us = (timestamp - first_rect_timestamp_).Value();
const int64 delay = options_.us_to_first_rect_delay(); const int64_t delay = options_.us_to_first_rect_delay();
const double interpolation = easeInOutQuad(std::max( const double interpolation = easeInOutQuad(std::max(
0.0, (delta_us - delay) / 0.0, (delta_us - delay) /
static_cast<double>(options_.us_to_first_rect() - delay))); static_cast<double>(options_.us_to_first_rect() - delay)));

View File

@ -226,7 +226,7 @@ struct AddDetectionFlags {
std::optional<int> max_zoom_factor_percent; std::optional<int> max_zoom_factor_percent;
}; };
void AddDetectionFrameSize(const cv::Rect_<float>& position, const int64 time, void AddDetectionFrameSize(const cv::Rect_<float>& position, const int64_t time,
const int width, const int height, const int width, const int height,
CalculatorRunner* runner, CalculatorRunner* runner,
const AddDetectionFlags& flags = {}) { const AddDetectionFlags& flags = {}) {
@ -275,7 +275,7 @@ void AddDetectionFrameSize(const cv::Rect_<float>& position, const int64 time,
} }
} }
void AddDetection(const cv::Rect_<float>& position, const int64 time, void AddDetection(const cv::Rect_<float>& position, const int64_t time,
CalculatorRunner* runner) { CalculatorRunner* runner) {
AddDetectionFrameSize(position, time, 1000, 1000, runner); AddDetectionFrameSize(position, time, 1000, 1000, runner);
} }

View File

@ -200,7 +200,7 @@ absl::Status ParseAspectRatioString(const std::string& aspect_ratio_string,
} }
void ConstructExternalRenderMessage( void ConstructExternalRenderMessage(
const cv::Rect& crop_from_location, const cv::Rect& render_to_location, const cv::Rect& crop_from_location, const cv::Rect& render_to_location,
const cv::Scalar& padding_color, const uint64 timestamp_us, const cv::Scalar& padding_color, const uint64_t timestamp_us,
ExternalRenderFrame* external_render_message, int frame_width, ExternalRenderFrame* external_render_message, int frame_width,
int frame_height) { int frame_height) {
auto crop_from_message = auto crop_from_message =
@ -717,7 +717,7 @@ absl::Status SceneCroppingCalculator::FormatAndOutputCroppedFrames(
for (int i = 0; i < num_frames; ++i) { for (int i = 0; i < num_frames; ++i) {
// Set default padding color to white. // Set default padding color to white.
cv::Scalar padding_color_to_add = cv::Scalar(255, 255, 255); cv::Scalar padding_color_to_add = cv::Scalar(255, 255, 255);
const int64 time_ms = scene_frame_timestamps_[i]; const int64_t time_ms = scene_frame_timestamps_[i];
if (*apply_padding) { if (*apply_padding) {
if (has_solid_background_) { if (has_solid_background_) {
double lab[3]; double lab[3];
@ -747,7 +747,7 @@ absl::Status SceneCroppingCalculator::FormatAndOutputCroppedFrames(
// Resizes cropped frames, pads frames, and output frames. // Resizes cropped frames, pads frames, and output frames.
for (int i = 0; i < num_frames; ++i) { for (int i = 0; i < num_frames; ++i) {
const int64 time_ms = scene_frame_timestamps_[i]; const int64_t time_ms = scene_frame_timestamps_[i];
const Timestamp timestamp(time_ms); const Timestamp timestamp(time_ms);
auto scaled_frame = absl::make_unique<ImageFrame>( auto scaled_frame = absl::make_unique<ImageFrame>(
frame_format_, scaled_width, scaled_height); frame_format_, scaled_width, scaled_height);

View File

@ -175,7 +175,7 @@ constexpr int kMinNumDetections = 0;
constexpr int kMaxNumDetections = 10; constexpr int kMaxNumDetections = 10;
constexpr int kDownSampleRate = 4; constexpr int kDownSampleRate = 4;
constexpr int64 kTimestampDiff = 20000; constexpr int64_t kTimestampDiff = 20000;
// Returns a singleton random engine for generating random values. The seed is // Returns a singleton random engine for generating random values. The seed is
// fixed for reproducibility. // fixed for reproducibility.
@ -254,7 +254,7 @@ std::unique_ptr<ImageFrame> MakeImageFrameFromColor(const cv::Scalar& color,
// Randomly generates a number of detections in the range of kMinNumDetections // Randomly generates a number of detections in the range of kMinNumDetections
// and kMaxNumDetections. Optionally add a key image frame of random solid color // and kMaxNumDetections. Optionally add a key image frame of random solid color
// and given size. // and given size.
void AddKeyFrameFeatures(const int64 time_ms, const int key_frame_width, void AddKeyFrameFeatures(const int64_t time_ms, const int key_frame_width,
const int key_frame_height, bool randomize, const int key_frame_height, bool randomize,
CalculatorRunner::StreamContentsSet* inputs) { CalculatorRunner::StreamContentsSet* inputs) {
Timestamp timestamp(time_ms); Timestamp timestamp(time_ms);
@ -286,7 +286,7 @@ void AddScene(const int start_frame_index, const int num_scene_frames,
const int key_frame_width, const int key_frame_height, const int key_frame_width, const int key_frame_height,
const int DownSampleRate, const int DownSampleRate,
CalculatorRunner::StreamContentsSet* inputs) { CalculatorRunner::StreamContentsSet* inputs) {
int64 time_ms = start_frame_index * kTimestampDiff; int64_t time_ms = start_frame_index * kTimestampDiff;
for (int i = 0; i < num_scene_frames; ++i) { for (int i = 0; i < num_scene_frames; ++i) {
Timestamp timestamp(time_ms); Timestamp timestamp(time_ms);
if (inputs->HasTag(kVideoFramesTag)) { if (inputs->HasTag(kVideoFramesTag)) {
@ -657,7 +657,7 @@ TEST(SceneCroppingCalculatorTest, PadsWithSolidColorFromStaticFeatures) {
// Add inputs. // Add inputs.
auto* inputs = runner->MutableInputs(); auto* inputs = runner->MutableInputs();
int64 time_ms = 0; int64_t time_ms = 0;
int num_static_features = 0; int num_static_features = 0;
for (int i = 0; i < kSceneSize; ++i) { for (int i = 0; i < kSceneSize; ++i) {
Timestamp timestamp(time_ms); Timestamp timestamp(time_ms);

View File

@ -1,4 +1,4 @@
load("//mediapipe/framework/port:build_config.bzl", "mediapipe_cc_proto_library") load("//mediapipe/framework/port:build_config.bzl", "mediapipe_proto_library")
# Copyright 2019 The MediaPipe Authors. # Copyright 2019 The MediaPipe Authors.
# #
@ -20,7 +20,7 @@ package(default_visibility = [
"//mediapipe/examples:__subpackages__", "//mediapipe/examples:__subpackages__",
]) ])
proto_library( mediapipe_proto_library(
name = "cropping_proto", name = "cropping_proto",
srcs = ["cropping.proto"], srcs = ["cropping.proto"],
deps = [ deps = [
@ -29,41 +29,18 @@ proto_library(
], ],
) )
mediapipe_cc_proto_library( mediapipe_proto_library(
name = "cropping_cc_proto",
srcs = ["cropping.proto"],
cc_deps = [
":kinematic_path_solver_cc_proto",
"//mediapipe/examples/desktop/autoflip:autoflip_messages_cc_proto",
],
visibility = ["//mediapipe/examples:__subpackages__"],
deps = [":cropping_proto"],
)
proto_library(
name = "kinematic_path_solver_proto", name = "kinematic_path_solver_proto",
srcs = ["kinematic_path_solver.proto"], srcs = ["kinematic_path_solver.proto"],
)
mediapipe_cc_proto_library(
name = "kinematic_path_solver_cc_proto",
srcs = ["kinematic_path_solver.proto"],
visibility = [ visibility = [
"//mediapipe/examples:__subpackages__", "//mediapipe/examples:__subpackages__",
], ],
deps = [":kinematic_path_solver_proto"],
) )
proto_library( mediapipe_proto_library(
name = "focus_point_proto", name = "focus_point_proto",
srcs = ["focus_point.proto"], srcs = ["focus_point.proto"],
)
mediapipe_cc_proto_library(
name = "focus_point_cc_proto",
srcs = ["focus_point.proto"],
visibility = ["//mediapipe/examples:__subpackages__"], visibility = ["//mediapipe/examples:__subpackages__"],
deps = [":focus_point_proto"],
) )
cc_library( cc_library(
@ -333,16 +310,10 @@ cc_test(
], ],
) )
proto_library( mediapipe_proto_library(
name = "visual_scorer_proto", name = "visual_scorer_proto",
srcs = ["visual_scorer.proto"], srcs = ["visual_scorer.proto"],
)
mediapipe_cc_proto_library(
name = "visual_scorer_cc_proto",
srcs = ["visual_scorer.proto"],
visibility = ["//mediapipe/examples:__subpackages__"], visibility = ["//mediapipe/examples:__subpackages__"],
deps = [":visual_scorer_proto"],
) )
cc_library( cc_library(

View File

@ -34,7 +34,7 @@ absl::Status SceneCameraMotionAnalyzer::AnalyzeSceneAndPopulateFocusPointFrames(
const KeyFrameCropOptions& key_frame_crop_options, const KeyFrameCropOptions& key_frame_crop_options,
const std::vector<KeyFrameCropResult>& key_frame_crop_results, const std::vector<KeyFrameCropResult>& key_frame_crop_results,
const int scene_frame_width, const int scene_frame_height, const int scene_frame_width, const int scene_frame_height,
const std::vector<int64>& scene_frame_timestamps, const std::vector<int64_t>& scene_frame_timestamps,
const bool has_solid_color_background, const bool has_solid_color_background,
SceneKeyFrameCropSummary* scene_summary, SceneKeyFrameCropSummary* scene_summary,
std::vector<FocusPointFrame>* focus_point_frames, std::vector<FocusPointFrame>* focus_point_frames,
@ -45,7 +45,7 @@ absl::Status SceneCameraMotionAnalyzer::AnalyzeSceneAndPopulateFocusPointFrames(
key_frame_crop_options, key_frame_crop_results, scene_frame_width, key_frame_crop_options, key_frame_crop_results, scene_frame_width,
scene_frame_height, scene_summary)); scene_frame_height, scene_summary));
const int64 scene_span_ms = const int64_t scene_span_ms =
scene_frame_timestamps.empty() scene_frame_timestamps.empty()
? 0 ? 0
: scene_frame_timestamps.back() - scene_frame_timestamps.front(); : scene_frame_timestamps.back() - scene_frame_timestamps.front();
@ -103,7 +103,7 @@ absl::Status SceneCameraMotionAnalyzer::ToUseSweepingMotion(
absl::Status SceneCameraMotionAnalyzer::DecideCameraMotionType( absl::Status SceneCameraMotionAnalyzer::DecideCameraMotionType(
const KeyFrameCropOptions& key_frame_crop_options, const KeyFrameCropOptions& key_frame_crop_options,
const double scene_span_sec, const int64 end_time_us, const double scene_span_sec, const int64_t end_time_us,
SceneKeyFrameCropSummary* scene_summary, SceneKeyFrameCropSummary* scene_summary,
SceneCameraMotion* scene_camera_motion) const { SceneCameraMotion* scene_camera_motion) const {
RET_CHECK_GE(scene_span_sec, 0.0) << "Scene time span is negative."; RET_CHECK_GE(scene_span_sec, 0.0) << "Scene time span is negative.";
@ -298,7 +298,7 @@ absl::Status SceneCameraMotionAnalyzer::AddFocusPointsFromCenterTypeAndWeight(
absl::Status SceneCameraMotionAnalyzer::PopulateFocusPointFrames( absl::Status SceneCameraMotionAnalyzer::PopulateFocusPointFrames(
const SceneKeyFrameCropSummary& scene_summary, const SceneKeyFrameCropSummary& scene_summary,
const SceneCameraMotion& scene_camera_motion, const SceneCameraMotion& scene_camera_motion,
const std::vector<int64>& scene_frame_timestamps, const std::vector<int64_t>& scene_frame_timestamps,
std::vector<FocusPointFrame>* focus_point_frames) const { std::vector<FocusPointFrame>* focus_point_frames) const {
RET_CHECK_NE(focus_point_frames, nullptr) RET_CHECK_NE(focus_point_frames, nullptr)
<< "Output vector of FocusPointFrame is null."; << "Output vector of FocusPointFrame is null.";
@ -380,7 +380,7 @@ absl::Status SceneCameraMotionAnalyzer::PopulateFocusPointFrames(
absl::Status SceneCameraMotionAnalyzer::PopulateFocusPointFramesForTracking( absl::Status SceneCameraMotionAnalyzer::PopulateFocusPointFramesForTracking(
const SceneKeyFrameCropSummary& scene_summary, const SceneKeyFrameCropSummary& scene_summary,
const FocusPointFrameType focus_point_frame_type, const FocusPointFrameType focus_point_frame_type,
const std::vector<int64>& scene_frame_timestamps, const std::vector<int64_t>& scene_frame_timestamps,
std::vector<FocusPointFrame>* focus_point_frames) const { std::vector<FocusPointFrame>* focus_point_frames) const {
RET_CHECK_GE(scene_summary.key_frame_max_score(), 0.0) RET_CHECK_GE(scene_summary.key_frame_max_score(), 0.0)
<< "Maximum score is negative."; << "Maximum score is negative.";
@ -392,7 +392,7 @@ absl::Status SceneCameraMotionAnalyzer::PopulateFocusPointFramesForTracking(
const int scene_frame_height = scene_summary.scene_frame_height(); const int scene_frame_height = scene_summary.scene_frame_height();
PiecewiseLinearFunction center_x_function, center_y_function, score_function; PiecewiseLinearFunction center_x_function, center_y_function, score_function;
const int64 timestamp_offset = key_frame_compact_infos[0].timestamp_ms(); const int64_t timestamp_offset = key_frame_compact_infos[0].timestamp_ms();
for (int i = 0; i < num_key_frames; ++i) { for (int i = 0; i < num_key_frames; ++i) {
const float center_x = key_frame_compact_infos[i].center_x(); const float center_x = key_frame_compact_infos[i].center_x();
const float center_y = key_frame_compact_infos[i].center_y(); const float center_y = key_frame_compact_infos[i].center_y();

View File

@ -425,7 +425,10 @@ using GenericNode = Node<internal::Generic>;
template <class Calc> template <class Calc>
class Node : public NodeBase { class Node : public NodeBase {
public: public:
Node() : NodeBase(std::string(Calc::kCalculatorName)) {} Node()
: NodeBase(
FunctionRegistry<NodeBase>::GetLookupName(Calc::kCalculatorName)) {}
// Overrides the built-in calculator type string with the provided argument. // Overrides the built-in calculator type string with the provided argument.
// Can be used to create nodes from pure interfaces. // Can be used to create nodes from pure interfaces.
// TODO: only use this for pure interfaces // TODO: only use this for pure interfaces
@ -546,6 +549,7 @@ class Graph {
// Creates a node of a specific type. Should be used for pure interfaces, // Creates a node of a specific type. Should be used for pure interfaces,
// which do not have a built-in type string. // which do not have a built-in type string.
// `type` is a calculator type-name with dot-separated namespaces.
template <class Calc> template <class Calc>
Node<Calc>& AddNode(absl::string_view type) { Node<Calc>& AddNode(absl::string_view type) {
auto node = auto node =
@ -557,6 +561,7 @@ class Graph {
// Creates a generic node, with no compile-time checking of inputs and // Creates a generic node, with no compile-time checking of inputs and
// outputs. This can be used for calculators whose contract is not visible. // outputs. This can be used for calculators whose contract is not visible.
// `type` is a calculator type-name with dot-separated namespaces.
GenericNode& AddNode(absl::string_view type) { GenericNode& AddNode(absl::string_view type) {
auto node = auto node =
std::make_unique<GenericNode>(std::string(type.data(), type.size())); std::make_unique<GenericNode>(std::string(type.data(), type.size()));

View File

@ -192,8 +192,7 @@ absl::Status CalculatorGraph::InitializeStreams() {
auto input_tag_map, auto input_tag_map,
tool::TagMap::Create(validated_graph_->Config().input_stream())); tool::TagMap::Create(validated_graph_->Config().input_stream()));
for (const auto& stream_name : input_tag_map->Names()) { for (const auto& stream_name : input_tag_map->Names()) {
RET_CHECK(!mediapipe::ContainsKey(graph_input_streams_, stream_name)) RET_CHECK(!graph_input_streams_.contains(stream_name)).SetNoLogging()
.SetNoLogging()
<< "CalculatorGraph Initialization failed, graph input stream \"" << "CalculatorGraph Initialization failed, graph input stream \""
<< stream_name << "\" was specified twice."; << stream_name << "\" was specified twice.";
int output_stream_index = validated_graph_->OutputStreamIndex(stream_name); int output_stream_index = validated_graph_->OutputStreamIndex(stream_name);

View File

@ -679,7 +679,7 @@ REGISTER_CALCULATOR(BoundToPacketCalculator);
// A Calculator that produces packets at timestamps beyond the input timestamp. // A Calculator that produces packets at timestamps beyond the input timestamp.
class FuturePacketCalculator : public CalculatorBase { class FuturePacketCalculator : public CalculatorBase {
public: public:
static constexpr int64 kOutputFutureMicros = 3; static constexpr int64_t kOutputFutureMicros = 3;
static absl::Status GetContract(CalculatorContract* cc) { static absl::Status GetContract(CalculatorContract* cc) {
cc->Inputs().Index(0).Set<int>(); cc->Inputs().Index(0).Set<int>();

View File

@ -188,21 +188,21 @@ class Uint64PacketGenerator : public PacketGenerator {
static absl::Status FillExpectations( static absl::Status FillExpectations(
const PacketGeneratorOptions& extendable_options, const PacketGeneratorOptions& extendable_options,
PacketTypeSet* input_side_packets, PacketTypeSet* output_side_packets) { PacketTypeSet* input_side_packets, PacketTypeSet* output_side_packets) {
output_side_packets->Index(0).Set<uint64>(); output_side_packets->Index(0).Set<uint64_t>();
return absl::OkStatus(); return absl::OkStatus();
} }
static absl::Status Generate(const PacketGeneratorOptions& extendable_options, static absl::Status Generate(const PacketGeneratorOptions& extendable_options,
const PacketSet& input_side_packets, const PacketSet& input_side_packets,
PacketSet* output_side_packets) { PacketSet* output_side_packets) {
output_side_packets->Index(0) = Adopt(new uint64(15LL << 32 | 5)); output_side_packets->Index(0) = Adopt(new uint64_t(15LL << 32 | 5));
return absl::OkStatus(); return absl::OkStatus();
} }
}; };
REGISTER_PACKET_GENERATOR(Uint64PacketGenerator); REGISTER_PACKET_GENERATOR(Uint64PacketGenerator);
TEST(CalculatorGraph, OutputSidePacketInProcess) { TEST(CalculatorGraph, OutputSidePacketInProcess) {
const int64 offset = 100; const int64_t offset = 100;
CalculatorGraphConfig config = CalculatorGraphConfig config =
mediapipe::ParseTextProtoOrDie<CalculatorGraphConfig>(R"pb( mediapipe::ParseTextProtoOrDie<CalculatorGraphConfig>(R"pb(
input_stream: "offset" input_stream: "offset"
@ -400,7 +400,7 @@ TEST(CalculatorGraph, SharePacketGeneratorGraph) {
} }
TEST(CalculatorGraph, OutputSidePacketAlreadySet) { TEST(CalculatorGraph, OutputSidePacketAlreadySet) {
const int64 offset = 100; const int64_t offset = 100;
CalculatorGraphConfig config = CalculatorGraphConfig config =
mediapipe::ParseTextProtoOrDie<CalculatorGraphConfig>(R"pb( mediapipe::ParseTextProtoOrDie<CalculatorGraphConfig>(R"pb(
input_stream: "offset" input_stream: "offset"
@ -427,7 +427,7 @@ TEST(CalculatorGraph, OutputSidePacketAlreadySet) {
} }
TEST(CalculatorGraph, OutputSidePacketWithTimestamp) { TEST(CalculatorGraph, OutputSidePacketWithTimestamp) {
const int64 offset = 100; const int64_t offset = 100;
CalculatorGraphConfig config = CalculatorGraphConfig config =
mediapipe::ParseTextProtoOrDie<CalculatorGraphConfig>(R"pb( mediapipe::ParseTextProtoOrDie<CalculatorGraphConfig>(R"pb(
input_stream: "offset" input_stream: "offset"
@ -716,7 +716,7 @@ TEST(CalculatorGraph, GetOutputSidePacket) {
// Run the graph twice. // Run the graph twice.
int max_count = 100; int max_count = 100;
std::map<std::string, Packet> extra_side_packets; std::map<std::string, Packet> extra_side_packets;
extra_side_packets.insert({"input_uint64", MakePacket<uint64>(1123)}); extra_side_packets.insert({"input_uint64", MakePacket<uint64_t>(1123)});
for (int run = 0; run < 1; ++run) { for (int run = 0; run < 1; ++run) {
MP_ASSERT_OK(graph.StartRun(extra_side_packets)); MP_ASSERT_OK(graph.StartRun(extra_side_packets));
status_or_packet = graph.GetOutputSidePacket("output_uint32_pair"); status_or_packet = graph.GetOutputSidePacket("output_uint32_pair");

View File

@ -439,7 +439,7 @@ class GlobalCountSourceCalculator : public CalculatorBase {
++local_count_; ++local_count_;
} }
int64 local_count_ = 0; int64_t local_count_ = 0;
}; };
const int GlobalCountSourceCalculator::kNumOutputPackets = 5; const int GlobalCountSourceCalculator::kNumOutputPackets = 5;
REGISTER_CALCULATOR(GlobalCountSourceCalculator); REGISTER_CALCULATOR(GlobalCountSourceCalculator);
@ -765,7 +765,7 @@ class TypedStatusHandler : public StatusHandler {
} }
}; };
typedef TypedStatusHandler<std::string> StringStatusHandler; typedef TypedStatusHandler<std::string> StringStatusHandler;
typedef TypedStatusHandler<uint32> Uint32StatusHandler; typedef TypedStatusHandler<uint32_t> Uint32StatusHandler;
REGISTER_STATUS_HANDLER(StringStatusHandler); REGISTER_STATUS_HANDLER(StringStatusHandler);
REGISTER_STATUS_HANDLER(Uint32StatusHandler); REGISTER_STATUS_HANDLER(Uint32StatusHandler);
@ -1398,9 +1398,9 @@ void RunComprehensiveTest(CalculatorGraph* graph,
MP_ASSERT_OK(graph->Initialize(proto)); MP_ASSERT_OK(graph->Initialize(proto));
std::map<std::string, Packet> extra_side_packets; std::map<std::string, Packet> extra_side_packets;
extra_side_packets.emplace("node_3", Adopt(new uint64((15LL << 32) | 3))); extra_side_packets.emplace("node_3", Adopt(new uint64_t((15LL << 32) | 3)));
if (define_node_5) { if (define_node_5) {
extra_side_packets.emplace("node_5", Adopt(new uint64((15LL << 32) | 5))); extra_side_packets.emplace("node_5", Adopt(new uint64_t((15LL << 32) | 5)));
} }
// Call graph->Run() several times, to make sure that the appropriate // Call graph->Run() several times, to make sure that the appropriate
@ -1452,9 +1452,9 @@ void RunComprehensiveTest(CalculatorGraph* graph,
// Verify that the graph can still run (but not successfully) when // Verify that the graph can still run (but not successfully) when
// one of the nodes is caused to fail. // one of the nodes is caused to fail.
extra_side_packets.clear(); extra_side_packets.clear();
extra_side_packets.emplace("node_3", Adopt(new uint64((15LL << 32) | 0))); extra_side_packets.emplace("node_3", Adopt(new uint64_t((15LL << 32) | 0)));
if (define_node_5) { if (define_node_5) {
extra_side_packets.emplace("node_5", Adopt(new uint64((15LL << 32) | 5))); extra_side_packets.emplace("node_5", Adopt(new uint64_t((15LL << 32) | 5)));
} }
dumped_final_sum_packet = Packet(); dumped_final_sum_packet = Packet();
dumped_final_stddev_packet = Packet(); dumped_final_stddev_packet = Packet();
@ -1579,14 +1579,14 @@ class Uint64PacketGenerator : public PacketGenerator {
static absl::Status FillExpectations( static absl::Status FillExpectations(
const PacketGeneratorOptions& extendable_options, const PacketGeneratorOptions& extendable_options,
PacketTypeSet* input_side_packets, PacketTypeSet* output_side_packets) { PacketTypeSet* input_side_packets, PacketTypeSet* output_side_packets) {
output_side_packets->Index(0).Set<uint64>(); output_side_packets->Index(0).Set<uint64_t>();
return absl::OkStatus(); return absl::OkStatus();
} }
static absl::Status Generate(const PacketGeneratorOptions& extendable_options, static absl::Status Generate(const PacketGeneratorOptions& extendable_options,
const PacketSet& input_side_packets, const PacketSet& input_side_packets,
PacketSet* output_side_packets) { PacketSet* output_side_packets) {
output_side_packets->Index(0) = Adopt(new uint64(15LL << 32 | 5)); output_side_packets->Index(0) = Adopt(new uint64_t(15LL << 32 | 5));
return absl::OkStatus(); return absl::OkStatus();
} }
}; };
@ -1759,7 +1759,7 @@ TEST(CalculatorGraph, StatusHandlerInputVerification) {
)pb"); )pb");
MP_ASSERT_OK(graph->Initialize(config)); MP_ASSERT_OK(graph->Initialize(config));
Packet extra_string = Adopt(new std::string("foo")); Packet extra_string = Adopt(new std::string("foo"));
Packet a_uint64 = Adopt(new uint64(0)); Packet a_uint64 = Adopt(new uint64_t(0));
MP_EXPECT_OK( MP_EXPECT_OK(
graph->Run({{"extra_string", extra_string}, {"a_uint64", a_uint64}})); graph->Run({{"extra_string", extra_string}, {"a_uint64", a_uint64}}));
@ -1789,7 +1789,7 @@ TEST(CalculatorGraph, StatusHandlerInputVerification) {
testing::HasSubstr("string"), testing::HasSubstr("string"),
// Expected type. // Expected type.
testing::HasSubstr( testing::HasSubstr(
MediaPipeTypeStringOrDemangled<uint32>()))); MediaPipeTypeStringOrDemangled<uint32_t>())));
// Should fail verification when the type of a to-be-generated packet is // Should fail verification when the type of a to-be-generated packet is
// wrong. The added handler now expects a string but will receive the uint32 // wrong. The added handler now expects a string but will receive the uint32
@ -1802,12 +1802,12 @@ TEST(CalculatorGraph, StatusHandlerInputVerification) {
status = graph->Initialize(config); status = graph->Initialize(config);
EXPECT_THAT(status.message(), EXPECT_THAT(status.message(),
testing::AllOf( testing::AllOf(testing::HasSubstr("StringStatusHandler"),
testing::HasSubstr("StringStatusHandler"),
// The problematic input side packet. // The problematic input side packet.
testing::HasSubstr("generated_by_generator"), testing::HasSubstr("generated_by_generator"),
// Actual type. // Actual type.
testing::HasSubstr(MediaPipeTypeStringOrDemangled<uint32>()), testing::HasSubstr(
MediaPipeTypeStringOrDemangled<uint32_t>()),
// Expected type. // Expected type.
testing::HasSubstr("string"))); testing::HasSubstr("string")));
} }

View File

@ -216,7 +216,7 @@ mediapipe::Counter* CalculatorRunner::GetCounter(const std::string& name) {
return graph_->GetCounterFactory()->GetCounter(name); return graph_->GetCounterFactory()->GetCounter(name);
} }
std::map<std::string, int64> CalculatorRunner::GetCountersValues() { std::map<std::string, int64_t> CalculatorRunner::GetCountersValues() {
return graph_->GetCounterFactory()->GetCounterSet()->GetCountersValues(); return graph_->GetCounterFactory()->GetCounterSet()->GetCountersValues();
} }

View File

@ -39,14 +39,14 @@ class BasicCounter : public Counter {
value_ += amount; value_ += amount;
} }
int64 Get() ABSL_LOCKS_EXCLUDED(mu_) override { int64_t Get() ABSL_LOCKS_EXCLUDED(mu_) override {
absl::ReaderMutexLock lock(&mu_); absl::ReaderMutexLock lock(&mu_);
return value_; return value_;
} }
private: private:
absl::Mutex mu_; absl::Mutex mu_;
int64 value_ ABSL_GUARDED_BY(mu_); int64_t value_ ABSL_GUARDED_BY(mu_);
}; };
} // namespace } // namespace
@ -73,10 +73,10 @@ Counter* CounterSet::Get(const std::string& name) ABSL_LOCKS_EXCLUDED(mu_) {
return counters_[name].get(); return counters_[name].get();
} }
std::map<std::string, int64> CounterSet::GetCountersValues() std::map<std::string, int64_t> CounterSet::GetCountersValues()
ABSL_LOCKS_EXCLUDED(mu_) { ABSL_LOCKS_EXCLUDED(mu_) {
absl::ReaderMutexLock lock(&mu_); absl::ReaderMutexLock lock(&mu_);
std::map<std::string, int64> result; std::map<std::string, int64_t> result;
for (const auto& it : counters_) { for (const auto& it : counters_) {
result[it.first] = it.second->Get(); result[it.first] = it.second->Get();
} }

View File

@ -75,17 +75,17 @@ BENCHMARK(BM_IntCast);
static void BM_Int64Cast(benchmark::State& state) { static void BM_Int64Cast(benchmark::State& state) {
double x = 0.1; double x = 0.1;
int64 sum = 0; int64_t sum = 0;
for (auto _ : state) { for (auto _ : state) {
sum += static_cast<int64>(x); sum += static_cast<int64_t>(x);
x += 0.1; x += 0.1;
sum += static_cast<int64>(x); sum += static_cast<int64_t>(x);
x += 0.1; x += 0.1;
sum += static_cast<int64>(x); sum += static_cast<int64_t>(x);
x += 0.1; x += 0.1;
sum += static_cast<int64>(x); sum += static_cast<int64_t>(x);
x += 0.1; x += 0.1;
sum += static_cast<int64>(x); sum += static_cast<int64_t>(x);
x += 0.1; x += 0.1;
} }
EXPECT_NE(sum, 0); // Don't let 'sum' get optimized away. EXPECT_NE(sum, 0); // Don't let 'sum' get optimized away.
@ -134,15 +134,15 @@ static void BM_Int64Round(benchmark::State& state) {
double x = 0.1; double x = 0.1;
int sum = 0; int sum = 0;
for (auto _ : state) { for (auto _ : state) {
sum += mediapipe::MathUtil::Round<int64>(x); sum += mediapipe::MathUtil::Round<int64_t>(x);
x += 0.1; x += 0.1;
sum += mediapipe::MathUtil::Round<int64>(x); sum += mediapipe::MathUtil::Round<int64_t>(x);
x += 0.1; x += 0.1;
sum += mediapipe::MathUtil::Round<int64>(x); sum += mediapipe::MathUtil::Round<int64_t>(x);
x += 0.1; x += 0.1;
sum += mediapipe::MathUtil::Round<int64>(x); sum += mediapipe::MathUtil::Round<int64_t>(x);
x += 0.1; x += 0.1;
sum += mediapipe::MathUtil::Round<int64>(x); sum += mediapipe::MathUtil::Round<int64_t>(x);
x += 0.1; x += 0.1;
} }
EXPECT_NE(sum, 0); // Don't let 'sum' get optimized away. EXPECT_NE(sum, 0); // Don't let 'sum' get optimized away.
@ -153,15 +153,15 @@ static void BM_UintRound(benchmark::State& state) {
double x = 0.1; double x = 0.1;
int sum = 0; int sum = 0;
for (auto _ : state) { for (auto _ : state) {
sum += mediapipe::MathUtil::Round<uint32>(x); sum += mediapipe::MathUtil::Round<uint32_t>(x);
x += 0.1; x += 0.1;
sum += mediapipe::MathUtil::Round<uint32>(x); sum += mediapipe::MathUtil::Round<uint32_t>(x);
x += 0.1; x += 0.1;
sum += mediapipe::MathUtil::Round<uint32>(x); sum += mediapipe::MathUtil::Round<uint32_t>(x);
x += 0.1; x += 0.1;
sum += mediapipe::MathUtil::Round<uint32>(x); sum += mediapipe::MathUtil::Round<uint32_t>(x);
x += 0.1; x += 0.1;
sum += mediapipe::MathUtil::Round<uint32>(x); sum += mediapipe::MathUtil::Round<uint32_t>(x);
x += 0.1; x += 0.1;
} }
EXPECT_NE(sum, 0); // Don't let 'sum' get optimized away. EXPECT_NE(sum, 0); // Don't let 'sum' get optimized away.
@ -191,15 +191,15 @@ static void BM_SafeInt64Cast(benchmark::State& state) {
double x = 0.1; double x = 0.1;
int sum = 0; int sum = 0;
for (auto _ : state) { for (auto _ : state) {
sum += mediapipe::MathUtil::SafeCast<int64>(x); sum += mediapipe::MathUtil::SafeCast<int64_t>(x);
x += 0.1; x += 0.1;
sum += mediapipe::MathUtil::SafeCast<int64>(x); sum += mediapipe::MathUtil::SafeCast<int64_t>(x);
x += 0.1; x += 0.1;
sum += mediapipe::MathUtil::SafeCast<int64>(x); sum += mediapipe::MathUtil::SafeCast<int64_t>(x);
x += 0.1; x += 0.1;
sum += mediapipe::MathUtil::SafeCast<int64>(x); sum += mediapipe::MathUtil::SafeCast<int64_t>(x);
x += 0.1; x += 0.1;
sum += mediapipe::MathUtil::SafeCast<int64>(x); sum += mediapipe::MathUtil::SafeCast<int64_t>(x);
x += 0.1; x += 0.1;
} }
EXPECT_NE(sum, 0); // Don't let 'sum' get optimized away. EXPECT_NE(sum, 0); // Don't let 'sum' get optimized away.
@ -229,15 +229,15 @@ static void BM_SafeInt64Round(benchmark::State& state) {
double x = 0.1; double x = 0.1;
int sum = 0; int sum = 0;
for (auto _ : state) { for (auto _ : state) {
sum += mediapipe::MathUtil::SafeRound<int64>(x); sum += mediapipe::MathUtil::SafeRound<int64_t>(x);
x += 0.1; x += 0.1;
sum += mediapipe::MathUtil::SafeRound<int64>(x); sum += mediapipe::MathUtil::SafeRound<int64_t>(x);
x += 0.1; x += 0.1;
sum += mediapipe::MathUtil::SafeRound<int64>(x); sum += mediapipe::MathUtil::SafeRound<int64_t>(x);
x += 0.1; x += 0.1;
sum += mediapipe::MathUtil::SafeRound<int64>(x); sum += mediapipe::MathUtil::SafeRound<int64_t>(x);
x += 0.1; x += 0.1;
sum += mediapipe::MathUtil::SafeRound<int64>(x); sum += mediapipe::MathUtil::SafeRound<int64_t>(x);
x += 0.1; x += 0.1;
} }
EXPECT_NE(sum, 0); // Don't let 'sum' get optimized away. EXPECT_NE(sum, 0); // Don't let 'sum' get optimized away.
@ -262,8 +262,8 @@ TEST(MathUtil, IntRound) {
// A double-precision number has a 53-bit mantissa (52 fraction bits), // A double-precision number has a 53-bit mantissa (52 fraction bits),
// so the following value can be represented exactly. // so the following value can be represented exactly.
int64 value64 = static_cast<int64_t>(0x1234567890abcd00); int64_t value64 = static_cast<int64_t>(0x1234567890abcd00);
EXPECT_EQ(mediapipe::MathUtil::Round<int64>(static_cast<double>(value64)), EXPECT_EQ(mediapipe::MathUtil::Round<int64_t>(static_cast<double>(value64)),
value64); value64);
} }
@ -369,7 +369,7 @@ class SafeCastTester {
if (sizeof(FloatIn) >= 64) { if (sizeof(FloatIn) >= 64) {
// A double-precision number has a 53-bit mantissa (52 fraction bits), // A double-precision number has a 53-bit mantissa (52 fraction bits),
// so the following value can be represented exactly by a double. // so the following value can be represented exactly by a double.
int64 value64 = static_cast<int64_t>(0x1234567890abcd00); int64_t value64 = static_cast<int64_t>(0x1234567890abcd00);
const IntOut expected = const IntOut expected =
(sizeof(IntOut) >= 64) ? static_cast<IntOut>(value64) : imax; (sizeof(IntOut) >= 64) ? static_cast<IntOut>(value64) : imax;
EXPECT_EQ( EXPECT_EQ(
@ -536,22 +536,22 @@ class SafeCastTester {
}; };
TEST(MathUtil, SafeCast) { TEST(MathUtil, SafeCast) {
SafeCastTester<float, int8>::Run(); SafeCastTester<float, int8_t>::Run();
SafeCastTester<double, int8>::Run(); SafeCastTester<double, int8_t>::Run();
SafeCastTester<float, int16>::Run(); SafeCastTester<float, int16_t>::Run();
SafeCastTester<double, int16>::Run(); SafeCastTester<double, int16_t>::Run();
SafeCastTester<float, int32>::Run(); SafeCastTester<float, int32_t>::Run();
SafeCastTester<double, int32>::Run(); SafeCastTester<double, int32_t>::Run();
SafeCastTester<float, int64>::Run(); SafeCastTester<float, int64_t>::Run();
SafeCastTester<double, int64>::Run(); SafeCastTester<double, int64_t>::Run();
SafeCastTester<float, uint8>::Run(); SafeCastTester<float, uint8_t>::Run();
SafeCastTester<double, uint8>::Run(); SafeCastTester<double, uint8_t>::Run();
SafeCastTester<float, uint16>::Run(); SafeCastTester<float, uint16_t>::Run();
SafeCastTester<double, uint16>::Run(); SafeCastTester<double, uint16_t>::Run();
SafeCastTester<float, uint32>::Run(); SafeCastTester<float, uint32_t>::Run();
SafeCastTester<double, uint32>::Run(); SafeCastTester<double, uint32_t>::Run();
SafeCastTester<float, uint64>::Run(); SafeCastTester<float, uint64_t>::Run();
SafeCastTester<double, uint64>::Run(); SafeCastTester<double, uint64_t>::Run();
// Spot-check SafeCast<int> // Spot-check SafeCast<int>
EXPECT_EQ(mediapipe::MathUtil::SafeCast<int>(static_cast<float>(12345.678)), EXPECT_EQ(mediapipe::MathUtil::SafeCast<int>(static_cast<float>(12345.678)),
@ -682,7 +682,7 @@ class SafeRoundTester {
if (sizeof(FloatIn) >= 64) { if (sizeof(FloatIn) >= 64) {
// A double-precision number has a 53-bit mantissa (52 fraction bits), // A double-precision number has a 53-bit mantissa (52 fraction bits),
// so the following value can be represented exactly by a double. // so the following value can be represented exactly by a double.
int64 value64 = static_cast<int64_t>(0x1234567890abcd00); int64_t value64 = static_cast<int64_t>(0x1234567890abcd00);
const IntOut expected = const IntOut expected =
(sizeof(IntOut) >= 64) ? static_cast<IntOut>(value64) : imax; (sizeof(IntOut) >= 64) ? static_cast<IntOut>(value64) : imax;
EXPECT_EQ( EXPECT_EQ(
@ -843,22 +843,22 @@ class SafeRoundTester {
}; };
TEST(MathUtil, SafeRound) { TEST(MathUtil, SafeRound) {
SafeRoundTester<float, int8>::Run(); SafeRoundTester<float, int8_t>::Run();
SafeRoundTester<double, int8>::Run(); SafeRoundTester<double, int8_t>::Run();
SafeRoundTester<float, int16>::Run(); SafeRoundTester<float, int16_t>::Run();
SafeRoundTester<double, int16>::Run(); SafeRoundTester<double, int16_t>::Run();
SafeRoundTester<float, int32>::Run(); SafeRoundTester<float, int32_t>::Run();
SafeRoundTester<double, int32>::Run(); SafeRoundTester<double, int32_t>::Run();
SafeRoundTester<float, int64>::Run(); SafeRoundTester<float, int64_t>::Run();
SafeRoundTester<double, int64>::Run(); SafeRoundTester<double, int64_t>::Run();
SafeRoundTester<float, uint8>::Run(); SafeRoundTester<float, uint8_t>::Run();
SafeRoundTester<double, uint8>::Run(); SafeRoundTester<double, uint8_t>::Run();
SafeRoundTester<float, uint16>::Run(); SafeRoundTester<float, uint16_t>::Run();
SafeRoundTester<double, uint16>::Run(); SafeRoundTester<double, uint16_t>::Run();
SafeRoundTester<float, uint32>::Run(); SafeRoundTester<float, uint32_t>::Run();
SafeRoundTester<double, uint32>::Run(); SafeRoundTester<double, uint32_t>::Run();
SafeRoundTester<float, uint64>::Run(); SafeRoundTester<float, uint64_t>::Run();
SafeRoundTester<double, uint64>::Run(); SafeRoundTester<double, uint64_t>::Run();
// Spot-check SafeRound<int> // Spot-check SafeRound<int>
EXPECT_EQ(mediapipe::MathUtil::SafeRound<int>(static_cast<float>(12345.678)), EXPECT_EQ(mediapipe::MathUtil::SafeRound<int>(static_cast<float>(12345.678)),

Some files were not shown because too many files have changed in this diff Show More