Compare commits
1 Commits
master
...
revert-428
Author | SHA1 | Date | |
---|---|---|---|
|
7d05709f62 |
6
.bazelrc
6
.bazelrc
|
@ -87,9 +87,6 @@ build:ios_fat --config=ios
|
|||
build:ios_fat --ios_multi_cpus=armv7,arm64
|
||||
build:ios_fat --watchos_cpus=armv7k
|
||||
|
||||
build:ios_sim_fat --config=ios
|
||||
build:ios_sim_fat --ios_multi_cpus=x86_64,sim_arm64
|
||||
|
||||
build:darwin_x86_64 --apple_platform_type=macos
|
||||
build:darwin_x86_64 --macos_minimum_os=10.12
|
||||
build:darwin_x86_64 --cpu=darwin_x86_64
|
||||
|
@ -98,9 +95,6 @@ build:darwin_arm64 --apple_platform_type=macos
|
|||
build:darwin_arm64 --macos_minimum_os=10.16
|
||||
build:darwin_arm64 --cpu=darwin_arm64
|
||||
|
||||
# Turn off maximum stdout size
|
||||
build --experimental_ui_max_stdouterr_bytes=-1
|
||||
|
||||
# This bazelrc file is meant to be written by a setup script.
|
||||
try-import %workspace%/.configure.bazelrc
|
||||
|
||||
|
|
|
@ -48,16 +48,18 @@ body:
|
|||
placeholder: e.g. C++, Python, Java
|
||||
validations:
|
||||
required: false
|
||||
- type: input
|
||||
- type: textarea
|
||||
id: current_model
|
||||
attributes:
|
||||
label: Describe the actual behavior
|
||||
render: shell
|
||||
validations:
|
||||
required: false
|
||||
- type: input
|
||||
- type: textarea
|
||||
id: expected_model
|
||||
attributes:
|
||||
label: Describe the expected behaviour
|
||||
render: shell
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
|
@ -80,16 +80,18 @@ body:
|
|||
label: Xcode & Tulsi version (if issue is related to building for iOS)
|
||||
validations:
|
||||
required: false
|
||||
- type: input
|
||||
- type: textarea
|
||||
id: current_model
|
||||
attributes:
|
||||
label: Describe the actual behavior
|
||||
render: shell
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
- type: textarea
|
||||
id: expected_model
|
||||
attributes:
|
||||
label: Describe the expected behaviour
|
||||
render: shell
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
|
@ -87,13 +87,14 @@ body:
|
|||
placeholder:
|
||||
validations:
|
||||
required: false
|
||||
- type: input
|
||||
- type: textarea
|
||||
id: what-happened
|
||||
attributes:
|
||||
label: Describe the problem
|
||||
description: Provide the exact sequence of commands / steps that you executed before running into the [problem](https://google.github.io/mediapipe/getting_started/getting_started.html)
|
||||
placeholder: Tell us what you see!
|
||||
value: "A bug happened!"
|
||||
render: shell
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
|
@ -28,33 +28,37 @@ body:
|
|||
- 'No'
|
||||
validations:
|
||||
required: false
|
||||
- type: input
|
||||
- type: textarea
|
||||
id: behaviour
|
||||
attributes:
|
||||
label: Describe the feature and the current behaviour/state
|
||||
render: shell
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
- type: textarea
|
||||
id: api_change
|
||||
attributes:
|
||||
label: Will this change the current API? How?
|
||||
render: shell
|
||||
validations:
|
||||
required: false
|
||||
- type: input
|
||||
- type: textarea
|
||||
id: benifit
|
||||
attributes:
|
||||
label: Who will benefit with this feature?
|
||||
validations:
|
||||
required: false
|
||||
- type: input
|
||||
- type: textarea
|
||||
id: use_case
|
||||
attributes:
|
||||
label: Please specify the use cases for this feature
|
||||
render: shell
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
- type: textarea
|
||||
id: info_other
|
||||
attributes:
|
||||
label: Any Other info
|
||||
render: shell
|
||||
validations:
|
||||
required: false
|
|
@ -41,16 +41,18 @@ body:
|
|||
label: Task name (e.g. Image classification, Gesture recognition etc.)
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
- type: textarea
|
||||
id: current_model
|
||||
attributes:
|
||||
label: Describe the actual behavior
|
||||
render: shell
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
- type: textarea
|
||||
id: expected_model
|
||||
attributes:
|
||||
label: Describe the expected behaviour
|
||||
render: shell
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
|
@ -31,16 +31,18 @@ body:
|
|||
label: URL that shows the problem
|
||||
validations:
|
||||
required: false
|
||||
- type: input
|
||||
- type: textarea
|
||||
id: current_model
|
||||
attributes:
|
||||
label: Describe the actual behavior
|
||||
render: shell
|
||||
validations:
|
||||
required: false
|
||||
- type: input
|
||||
- type: textarea
|
||||
id: expected_model
|
||||
attributes:
|
||||
label: Describe the expected behaviour
|
||||
render: shell
|
||||
validations:
|
||||
required: false
|
||||
- type: textarea
|
|
@ -40,16 +40,18 @@ body:
|
|||
label: Programming Language and version (e.g. C++, Python, Java)
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
- type: textarea
|
||||
id: current_model
|
||||
attributes:
|
||||
label: Describe the actual behavior
|
||||
render: shell
|
||||
validations:
|
||||
required: true
|
||||
- type: input
|
||||
- type: textarea
|
||||
id: expected_model
|
||||
attributes:
|
||||
label: Describe the expected behaviour
|
||||
render: shell
|
||||
validations:
|
||||
required: true
|
||||
- type: textarea
|
34
.github/stale.yml
vendored
Normal file
34
.github/stale.yml
vendored
Normal file
|
@ -0,0 +1,34 @@
|
|||
# Copyright 2021 The MediaPipe Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ============================================================================
|
||||
#
|
||||
# This file was assembled from multiple pieces, whose use is documented
|
||||
# throughout. Please refer to the TensorFlow dockerfiles documentation
|
||||
# for more information.
|
||||
|
||||
# Number of days of inactivity before an Issue or Pull Request becomes stale
|
||||
daysUntilStale: 7
|
||||
# Number of days of inactivity before a stale Issue or Pull Request is closed
|
||||
daysUntilClose: 7
|
||||
# Only issues or pull requests with all of these labels are checked if stale. Defaults to `[]` (disabled)
|
||||
onlyLabels:
|
||||
- stat:awaiting response
|
||||
# Comment to post when marking as stale. Set to `false` to disable
|
||||
markComment: >
|
||||
This issue has been automatically marked as stale because it has not had
|
||||
recent activity. It will be closed if no further activity occurs. Thank you.
|
||||
# Comment to post when removing the stale label. Set to `false` to disable
|
||||
unmarkComment: false
|
||||
closeComment: >
|
||||
Closing as stale. Please reopen if you'd like to work on this further.
|
68
.github/workflows/stale.yaml
vendored
68
.github/workflows/stale.yaml
vendored
|
@ -1,68 +0,0 @@
|
|||
# Copyright 2023 The TensorFlow Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# ==============================================================================
|
||||
|
||||
# This workflow alerts and then closes the stale issues/PRs after specific time
|
||||
# You can adjust the behavior by modifying this file.
|
||||
# For more information, see:
|
||||
# https://github.com/actions/stale
|
||||
|
||||
name: 'Close stale issues and PRs'
|
||||
"on":
|
||||
schedule:
|
||||
- cron: "30 1 * * *"
|
||||
permissions:
|
||||
contents: read
|
||||
issues: write
|
||||
pull-requests: write
|
||||
jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: 'actions/stale@v7'
|
||||
with:
|
||||
# Comma separated list of labels that can be assigned to issues to exclude them from being marked as stale.
|
||||
exempt-issue-labels: 'override-stale'
|
||||
# Comma separated list of labels that can be assigned to PRs to exclude them from being marked as stale.
|
||||
exempt-pr-labels: "override-stale"
|
||||
# Limit the No. of API calls in one run default value is 30.
|
||||
operations-per-run: 500
|
||||
# Prevent to remove stale label when PRs or issues are updated.
|
||||
remove-stale-when-updated: true
|
||||
# List of labels to remove when issues/PRs unstale.
|
||||
labels-to-remove-when-unstale: 'stat:awaiting response'
|
||||
# comment on issue if not active for more then 7 days.
|
||||
stale-issue-message: 'This issue has been marked stale because it has no recent activity since 7 days. It will be closed if no further activity occurs. Thank you.'
|
||||
# comment on PR if not active for more then 14 days.
|
||||
stale-pr-message: 'This PR has been marked stale because it has no recent activity since 14 days. It will be closed if no further activity occurs. Thank you.'
|
||||
# comment on issue if stale for more then 7 days.
|
||||
close-issue-message: This issue was closed due to lack of activity after being marked stale for past 7 days.
|
||||
# comment on PR if stale for more then 14 days.
|
||||
close-pr-message: This PR was closed due to lack of activity after being marked stale for past 14 days.
|
||||
# Number of days of inactivity before an Issue Request becomes stale
|
||||
days-before-issue-stale: 7
|
||||
# Number of days of inactivity before a stale Issue is closed
|
||||
days-before-issue-close: 7
|
||||
# reason for closed the issue default value is not_planned
|
||||
close-issue-reason: completed
|
||||
# Number of days of inactivity before a stale PR is closed
|
||||
days-before-pr-close: 14
|
||||
# Number of days of inactivity before an PR Request becomes stale
|
||||
days-before-pr-stale: 14
|
||||
# Check for label to stale or close the issue/PR
|
||||
any-of-labels: 'stat:awaiting response'
|
||||
# override stale to stalled for PR
|
||||
stale-pr-label: 'stale'
|
||||
# override stale to stalled for Issue
|
||||
stale-issue-label: "stale"
|
200
README.md
200
README.md
|
@ -1,121 +1,99 @@
|
|||
---
|
||||
layout: forward
|
||||
target: https://developers.google.com/mediapipe
|
||||
layout: default
|
||||
title: Home
|
||||
nav_order: 1
|
||||
---
|
||||
|
||||
![MediaPipe](https://mediapipe.dev/images/mediapipe_small.png)
|
||||
|
||||
----
|
||||
|
||||
**Attention:** *We have moved to
|
||||
**Attention:** *Thanks for your interest in MediaPipe! We have moved to
|
||||
[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
|
||||
as the primary developer documentation site for MediaPipe as of April 3, 2023.*
|
||||
|
||||
![MediaPipe](https://developers.google.com/static/mediapipe/images/home/hero_01_1920.png)
|
||||
*This notice and web page will be removed on June 1, 2023.*
|
||||
|
||||
**Attention**: MediaPipe Solutions Preview is an early release. [Learn
|
||||
more](https://developers.google.com/mediapipe/solutions/about#notice).
|
||||
----
|
||||
|
||||
**On-device machine learning for everyone**
|
||||
<br><br><br><br><br><br><br><br><br><br>
|
||||
<br><br><br><br><br><br><br><br><br><br>
|
||||
<br><br><br><br><br><br><br><br><br><br>
|
||||
|
||||
Delight your customers with innovative machine learning features. MediaPipe
|
||||
contains everything that you need to customize and deploy to mobile (Android,
|
||||
iOS), web, desktop, edge devices, and IoT, effortlessly.
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
* [See demos](https://goo.gle/mediapipe-studio)
|
||||
* [Learn more](https://developers.google.com/mediapipe/solutions)
|
||||
## Live ML anywhere
|
||||
|
||||
## Get started
|
||||
[MediaPipe](https://google.github.io/mediapipe/) offers cross-platform, customizable
|
||||
ML solutions for live and streaming media.
|
||||
|
||||
You can get started with MediaPipe Solutions by by checking out any of the
|
||||
developer guides for
|
||||
[vision](https://developers.google.com/mediapipe/solutions/vision/object_detector),
|
||||
[text](https://developers.google.com/mediapipe/solutions/text/text_classifier),
|
||||
and
|
||||
[audio](https://developers.google.com/mediapipe/solutions/audio/audio_classifier)
|
||||
tasks. If you need help setting up a development environment for use with
|
||||
MediaPipe Tasks, check out the setup guides for
|
||||
[Android](https://developers.google.com/mediapipe/solutions/setup_android), [web
|
||||
apps](https://developers.google.com/mediapipe/solutions/setup_web), and
|
||||
[Python](https://developers.google.com/mediapipe/solutions/setup_python).
|
||||
![accelerated.png](https://mediapipe.dev/images/accelerated_small.png) | ![cross_platform.png](https://mediapipe.dev/images/cross_platform_small.png)
|
||||
:------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------:
|
||||
***End-to-End acceleration***: *Built-in fast ML inference and processing accelerated even on common hardware* | ***Build once, deploy anywhere***: *Unified solution works across Android, iOS, desktop/cloud, web and IoT*
|
||||
![ready_to_use.png](https://mediapipe.dev/images/ready_to_use_small.png) | ![open_source.png](https://mediapipe.dev/images/open_source_small.png)
|
||||
***Ready-to-use solutions***: *Cutting-edge ML solutions demonstrating full power of the framework* | ***Free and open source***: *Framework and solutions both under Apache 2.0, fully extensible and customizable*
|
||||
|
||||
## Solutions
|
||||
----
|
||||
|
||||
MediaPipe Solutions provides a suite of libraries and tools for you to quickly
|
||||
apply artificial intelligence (AI) and machine learning (ML) techniques in your
|
||||
applications. You can plug these solutions into your applications immediately,
|
||||
customize them to your needs, and use them across multiple development
|
||||
platforms. MediaPipe Solutions is part of the MediaPipe [open source
|
||||
project](https://github.com/google/mediapipe), so you can further customize the
|
||||
solutions code to meet your application needs.
|
||||
## ML solutions in MediaPipe
|
||||
|
||||
These libraries and resources provide the core functionality for each MediaPipe
|
||||
Solution:
|
||||
Face Detection | Face Mesh | Iris | Hands | Pose | Holistic
|
||||
:----------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------: | :------:
|
||||
[![face_detection](https://mediapipe.dev/images/mobile/face_detection_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/face_detection) | [![face_mesh](https://mediapipe.dev/images/mobile/face_mesh_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/face_mesh) | [![iris](https://mediapipe.dev/images/mobile/iris_tracking_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/iris) | [![hand](https://mediapipe.dev/images/mobile/hand_tracking_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/hands) | [![pose](https://mediapipe.dev/images/mobile/pose_tracking_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/pose) | [![hair_segmentation](https://mediapipe.dev/images/mobile/holistic_tracking_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/holistic)
|
||||
|
||||
* **MediaPipe Tasks**: Cross-platform APIs and libraries for deploying
|
||||
solutions. [Learn
|
||||
more](https://developers.google.com/mediapipe/solutions/tasks).
|
||||
* **MediaPipe models**: Pre-trained, ready-to-run models for use with each
|
||||
solution.
|
||||
Hair Segmentation | Object Detection | Box Tracking | Instant Motion Tracking | Objectron | KNIFT
|
||||
:-------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------: | :---:
|
||||
[![hair_segmentation](https://mediapipe.dev/images/mobile/hair_segmentation_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/hair_segmentation) | [![object_detection](https://mediapipe.dev/images/mobile/object_detection_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/object_detection) | [![box_tracking](https://mediapipe.dev/images/mobile/object_tracking_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/box_tracking) | [![instant_motion_tracking](https://mediapipe.dev/images/mobile/instant_motion_tracking_android_small.gif)](https://google.github.io/mediapipe/solutions/instant_motion_tracking) | [![objectron](https://mediapipe.dev/images/mobile/objectron_chair_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/objectron) | [![knift](https://mediapipe.dev/images/mobile/template_matching_android_cpu_small.gif)](https://google.github.io/mediapipe/solutions/knift)
|
||||
|
||||
These tools let you customize and evaluate solutions:
|
||||
<!-- []() in the first cell is needed to preserve table formatting in GitHub Pages. -->
|
||||
<!-- Whenever this table is updated, paste a copy to solutions/solutions.md. -->
|
||||
|
||||
* **MediaPipe Model Maker**: Customize models for solutions with your data.
|
||||
[Learn more](https://developers.google.com/mediapipe/solutions/model_maker).
|
||||
* **MediaPipe Studio**: Visualize, evaluate, and benchmark solutions in your
|
||||
browser. [Learn
|
||||
more](https://developers.google.com/mediapipe/solutions/studio).
|
||||
[]() | [Android](https://google.github.io/mediapipe/getting_started/android) | [iOS](https://google.github.io/mediapipe/getting_started/ios) | [C++](https://google.github.io/mediapipe/getting_started/cpp) | [Python](https://google.github.io/mediapipe/getting_started/python) | [JS](https://google.github.io/mediapipe/getting_started/javascript) | [Coral](https://github.com/google/mediapipe/tree/master/mediapipe/examples/coral/README.md)
|
||||
:---------------------------------------------------------------------------------------- | :-------------------------------------------------------------: | :-----------------------------------------------------: | :-----------------------------------------------------: | :-----------------------------------------------------------: | :-----------------------------------------------------------: | :--------------------------------------------------------------------:
|
||||
[Face Detection](https://google.github.io/mediapipe/solutions/face_detection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅
|
||||
[Face Mesh](https://google.github.io/mediapipe/solutions/face_mesh) | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
[Iris](https://google.github.io/mediapipe/solutions/iris) | ✅ | ✅ | ✅ | | |
|
||||
[Hands](https://google.github.io/mediapipe/solutions/hands) | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
[Pose](https://google.github.io/mediapipe/solutions/pose) | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
[Holistic](https://google.github.io/mediapipe/solutions/holistic) | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
[Selfie Segmentation](https://google.github.io/mediapipe/solutions/selfie_segmentation) | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
[Hair Segmentation](https://google.github.io/mediapipe/solutions/hair_segmentation) | ✅ | | ✅ | | |
|
||||
[Object Detection](https://google.github.io/mediapipe/solutions/object_detection) | ✅ | ✅ | ✅ | | | ✅
|
||||
[Box Tracking](https://google.github.io/mediapipe/solutions/box_tracking) | ✅ | ✅ | ✅ | | |
|
||||
[Instant Motion Tracking](https://google.github.io/mediapipe/solutions/instant_motion_tracking) | ✅ | | | | |
|
||||
[Objectron](https://google.github.io/mediapipe/solutions/objectron) | ✅ | | ✅ | ✅ | ✅ |
|
||||
[KNIFT](https://google.github.io/mediapipe/solutions/knift) | ✅ | | | | |
|
||||
[AutoFlip](https://google.github.io/mediapipe/solutions/autoflip) | | | ✅ | | |
|
||||
[MediaSequence](https://google.github.io/mediapipe/solutions/media_sequence) | | | ✅ | | |
|
||||
[YouTube 8M](https://google.github.io/mediapipe/solutions/youtube_8m) | | | ✅ | | |
|
||||
|
||||
### Legacy solutions
|
||||
See also
|
||||
[MediaPipe Models and Model Cards](https://google.github.io/mediapipe/solutions/models)
|
||||
for ML models released in MediaPipe.
|
||||
|
||||
We have ended support for [these MediaPipe Legacy Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy)
|
||||
as of March 1, 2023. All other MediaPipe Legacy Solutions will be upgraded to
|
||||
a new MediaPipe Solution. See the [Solutions guide](https://developers.google.com/mediapipe/solutions/guide#legacy)
|
||||
for details. The [code repository](https://github.com/google/mediapipe/tree/master/mediapipe)
|
||||
and prebuilt binaries for all MediaPipe Legacy Solutions will continue to be
|
||||
provided on an as-is basis.
|
||||
## Getting started
|
||||
|
||||
For more on the legacy solutions, see the [documentation](https://github.com/google/mediapipe/tree/master/docs/solutions).
|
||||
To start using MediaPipe
|
||||
[solutions](https://google.github.io/mediapipe/solutions/solutions) with only a few
|
||||
lines code, see example code and demos in
|
||||
[MediaPipe in Python](https://google.github.io/mediapipe/getting_started/python) and
|
||||
[MediaPipe in JavaScript](https://google.github.io/mediapipe/getting_started/javascript).
|
||||
|
||||
## Framework
|
||||
To use MediaPipe in C++, Android and iOS, which allow further customization of
|
||||
the [solutions](https://google.github.io/mediapipe/solutions/solutions) as well as
|
||||
building your own, learn how to
|
||||
[install](https://google.github.io/mediapipe/getting_started/install) MediaPipe and
|
||||
start building example applications in
|
||||
[C++](https://google.github.io/mediapipe/getting_started/cpp),
|
||||
[Android](https://google.github.io/mediapipe/getting_started/android) and
|
||||
[iOS](https://google.github.io/mediapipe/getting_started/ios).
|
||||
|
||||
To start using MediaPipe Framework, [install MediaPipe
|
||||
Framework](https://developers.google.com/mediapipe/framework/getting_started/install)
|
||||
and start building example applications in C++, Android, and iOS.
|
||||
The source code is hosted in the
|
||||
[MediaPipe Github repository](https://github.com/google/mediapipe), and you can
|
||||
run code search using
|
||||
[Google Open Source Code Search](https://cs.opensource.google/mediapipe/mediapipe).
|
||||
|
||||
[MediaPipe Framework](https://developers.google.com/mediapipe/framework) is the
|
||||
low-level component used to build efficient on-device machine learning
|
||||
pipelines, similar to the premade MediaPipe Solutions.
|
||||
|
||||
Before using MediaPipe Framework, familiarize yourself with the following key
|
||||
[Framework
|
||||
concepts](https://developers.google.com/mediapipe/framework/framework_concepts/overview.md):
|
||||
|
||||
* [Packets](https://developers.google.com/mediapipe/framework/framework_concepts/packets.md)
|
||||
* [Graphs](https://developers.google.com/mediapipe/framework/framework_concepts/graphs.md)
|
||||
* [Calculators](https://developers.google.com/mediapipe/framework/framework_concepts/calculators.md)
|
||||
|
||||
## Community
|
||||
|
||||
* [Slack community](https://mediapipe.page.link/joinslack) for MediaPipe
|
||||
users.
|
||||
* [Discuss](https://groups.google.com/forum/#!forum/mediapipe) - General
|
||||
community discussion around MediaPipe.
|
||||
* [Awesome MediaPipe](https://mediapipe.page.link/awesome-mediapipe) - A
|
||||
curated list of awesome MediaPipe related frameworks, libraries and
|
||||
software.
|
||||
|
||||
## Contributing
|
||||
|
||||
We welcome contributions. Please follow these
|
||||
[guidelines](https://github.com/google/mediapipe/blob/master/CONTRIBUTING.md).
|
||||
|
||||
We use GitHub issues for tracking requests and bugs. Please post questions to
|
||||
the MediaPipe Stack Overflow with a `mediapipe` tag.
|
||||
|
||||
## Resources
|
||||
|
||||
### Publications
|
||||
## Publications
|
||||
|
||||
* [Bringing artworks to life with AR](https://developers.googleblog.com/2021/07/bringing-artworks-to-life-with-ar.html)
|
||||
in Google Developers Blog
|
||||
|
@ -124,8 +102,7 @@ the MediaPipe Stack Overflow with a `mediapipe` tag.
|
|||
* [SignAll SDK: Sign language interface using MediaPipe is now available for
|
||||
developers](https://developers.googleblog.com/2021/04/signall-sdk-sign-language-interface-using-mediapipe-now-available.html)
|
||||
in Google Developers Blog
|
||||
* [MediaPipe Holistic - Simultaneous Face, Hand and Pose Prediction, on
|
||||
Device](https://ai.googleblog.com/2020/12/mediapipe-holistic-simultaneous-face.html)
|
||||
* [MediaPipe Holistic - Simultaneous Face, Hand and Pose Prediction, on Device](https://ai.googleblog.com/2020/12/mediapipe-holistic-simultaneous-face.html)
|
||||
in Google AI Blog
|
||||
* [Background Features in Google Meet, Powered by Web ML](https://ai.googleblog.com/2020/10/background-features-in-google-meet.html)
|
||||
in Google AI Blog
|
||||
|
@ -153,6 +130,43 @@ the MediaPipe Stack Overflow with a `mediapipe` tag.
|
|||
in Google AI Blog
|
||||
* [MediaPipe: A Framework for Building Perception Pipelines](https://arxiv.org/abs/1906.08172)
|
||||
|
||||
### Videos
|
||||
## Videos
|
||||
|
||||
* [YouTube Channel](https://www.youtube.com/c/MediaPipe)
|
||||
|
||||
## Events
|
||||
|
||||
* [MediaPipe Seattle Meetup, Google Building Waterside, 13 Feb 2020](https://mediapipe.page.link/seattle2020)
|
||||
* [AI Nextcon 2020, 12-16 Feb 2020, Seattle](http://aisea20.xnextcon.com/)
|
||||
* [MediaPipe Madrid Meetup, 16 Dec 2019](https://www.meetup.com/Madrid-AI-Developers-Group/events/266329088/)
|
||||
* [MediaPipe London Meetup, Google 123 Building, 12 Dec 2019](https://www.meetup.com/London-AI-Tech-Talk/events/266329038)
|
||||
* [ML Conference, Berlin, 11 Dec 2019](https://mlconference.ai/machine-learning-advanced-development/mediapipe-building-real-time-cross-platform-mobile-web-edge-desktop-video-audio-ml-pipelines/)
|
||||
* [MediaPipe Berlin Meetup, Google Berlin, 11 Dec 2019](https://www.meetup.com/Berlin-AI-Tech-Talk/events/266328794/)
|
||||
* [The 3rd Workshop on YouTube-8M Large Scale Video Understanding Workshop,
|
||||
Seoul, Korea ICCV
|
||||
2019](https://research.google.com/youtube8m/workshop2019/index.html)
|
||||
* [AI DevWorld 2019, 10 Oct 2019, San Jose, CA](https://aidevworld.com)
|
||||
* [Google Industry Workshop at ICIP 2019, 24 Sept 2019, Taipei, Taiwan](http://2019.ieeeicip.org/?action=page4&id=14#Google)
|
||||
([presentation](https://docs.google.com/presentation/d/e/2PACX-1vRIBBbO_LO9v2YmvbHHEt1cwyqH6EjDxiILjuT0foXy1E7g6uyh4CesB2DkkEwlRDO9_lWfuKMZx98T/pub?start=false&loop=false&delayms=3000&slide=id.g556cc1a659_0_5))
|
||||
* [Open sourced at CVPR 2019, 17~20 June, Long Beach, CA](https://sites.google.com/corp/view/perception-cv4arvr/mediapipe)
|
||||
|
||||
## Community
|
||||
|
||||
* [Awesome MediaPipe](https://mediapipe.page.link/awesome-mediapipe) - A
|
||||
curated list of awesome MediaPipe related frameworks, libraries and software
|
||||
* [Slack community](https://mediapipe.page.link/joinslack) for MediaPipe users
|
||||
* [Discuss](https://groups.google.com/forum/#!forum/mediapipe) - General
|
||||
community discussion around MediaPipe
|
||||
|
||||
## Alpha disclaimer
|
||||
|
||||
MediaPipe is currently in alpha at v0.7. We may be still making breaking API
|
||||
changes and expect to get to stable APIs by v1.0.
|
||||
|
||||
## Contributing
|
||||
|
||||
We welcome contributions. Please follow these
|
||||
[guidelines](https://github.com/google/mediapipe/blob/master/CONTRIBUTING.md).
|
||||
|
||||
We use GitHub issues for tracking requests and bugs. Please post questions to
|
||||
the MediaPipe Stack Overflow with a `mediapipe` tag.
|
||||
|
|
106
WORKSPACE
106
WORKSPACE
|
@ -45,13 +45,12 @@ http_archive(
|
|||
)
|
||||
|
||||
http_archive(
|
||||
name = "rules_foreign_cc",
|
||||
sha256 = "2a4d07cd64b0719b39a7c12218a3e507672b82a97b98c6a89d38565894cf7c51",
|
||||
strip_prefix = "rules_foreign_cc-0.9.0",
|
||||
url = "https://github.com/bazelbuild/rules_foreign_cc/archive/refs/tags/0.9.0.tar.gz",
|
||||
name = "rules_foreign_cc",
|
||||
strip_prefix = "rules_foreign_cc-0.1.0",
|
||||
url = "https://github.com/bazelbuild/rules_foreign_cc/archive/0.1.0.zip",
|
||||
)
|
||||
|
||||
load("@rules_foreign_cc//foreign_cc:repositories.bzl", "rules_foreign_cc_dependencies")
|
||||
load("@rules_foreign_cc//:workspace_definitions.bzl", "rules_foreign_cc_dependencies")
|
||||
|
||||
rules_foreign_cc_dependencies()
|
||||
|
||||
|
@ -73,9 +72,12 @@ http_archive(
|
|||
http_archive(
|
||||
name = "zlib",
|
||||
build_file = "@//third_party:zlib.BUILD",
|
||||
sha256 = "b3a24de97a8fdbc835b9833169501030b8977031bcb54b3b3ac13740f846ab30",
|
||||
strip_prefix = "zlib-1.2.13",
|
||||
url = "http://zlib.net/fossils/zlib-1.2.13.tar.gz",
|
||||
sha256 = "c3e5e9fdd5004dcb542feda5ee4f0ff0744628baf8ed2dd5d66f8ca1197cb1a1",
|
||||
strip_prefix = "zlib-1.2.11",
|
||||
urls = [
|
||||
"http://mirror.bazel.build/zlib.net/fossils/zlib-1.2.11.tar.gz",
|
||||
"http://zlib.net/fossils/zlib-1.2.11.tar.gz", # 2017-01-15
|
||||
],
|
||||
patches = [
|
||||
"@//third_party:zlib.diff",
|
||||
],
|
||||
|
@ -154,41 +156,22 @@ http_archive(
|
|||
# 2020-08-21
|
||||
http_archive(
|
||||
name = "com_github_glog_glog",
|
||||
strip_prefix = "glog-0.6.0",
|
||||
sha256 = "8a83bf982f37bb70825df71a9709fa90ea9f4447fb3c099e1d720a439d88bad6",
|
||||
strip_prefix = "glog-0a2e5931bd5ff22fd3bf8999eb8ce776f159cda6",
|
||||
sha256 = "58c9b3b6aaa4dd8b836c0fd8f65d0f941441fb95e27212c5eeb9979cfd3592ab",
|
||||
urls = [
|
||||
"https://github.com/google/glog/archive/v0.6.0.tar.gz",
|
||||
"https://github.com/google/glog/archive/0a2e5931bd5ff22fd3bf8999eb8ce776f159cda6.zip",
|
||||
],
|
||||
)
|
||||
http_archive(
|
||||
name = "com_github_glog_glog_no_gflags",
|
||||
strip_prefix = "glog-0.6.0",
|
||||
sha256 = "8a83bf982f37bb70825df71a9709fa90ea9f4447fb3c099e1d720a439d88bad6",
|
||||
strip_prefix = "glog-0a2e5931bd5ff22fd3bf8999eb8ce776f159cda6",
|
||||
sha256 = "58c9b3b6aaa4dd8b836c0fd8f65d0f941441fb95e27212c5eeb9979cfd3592ab",
|
||||
build_file = "@//third_party:glog_no_gflags.BUILD",
|
||||
urls = [
|
||||
"https://github.com/google/glog/archive/v0.6.0.tar.gz",
|
||||
"https://github.com/google/glog/archive/0a2e5931bd5ff22fd3bf8999eb8ce776f159cda6.zip",
|
||||
],
|
||||
patches = [
|
||||
"@//third_party:com_github_glog_glog.diff",
|
||||
],
|
||||
patch_args = [
|
||||
"-p1",
|
||||
],
|
||||
)
|
||||
|
||||
# 2023-06-05
|
||||
# This version of Glog is required for Windows support, but currently causes
|
||||
# crashes on some Android devices.
|
||||
http_archive(
|
||||
name = "com_github_glog_glog_windows",
|
||||
strip_prefix = "glog-3a0d4d22c5ae0b9a2216988411cfa6bf860cc372",
|
||||
sha256 = "170d08f80210b82d95563f4723a15095eff1aad1863000e8eeb569c96a98fefb",
|
||||
urls = [
|
||||
"https://github.com/google/glog/archive/3a0d4d22c5ae0b9a2216988411cfa6bf860cc372.zip",
|
||||
],
|
||||
patches = [
|
||||
"@//third_party:com_github_glog_glog.diff",
|
||||
"@//third_party:com_github_glog_glog_windows_patch.diff",
|
||||
"@//third_party:com_github_glog_glog_9779e5ea6ef59562b030248947f787d1256132ae.diff",
|
||||
],
|
||||
patch_args = [
|
||||
"-p1",
|
||||
|
@ -244,24 +227,16 @@ http_archive(
|
|||
# sentencepiece
|
||||
http_archive(
|
||||
name = "com_google_sentencepiece",
|
||||
strip_prefix = "sentencepiece-0.1.96",
|
||||
sha256 = "8409b0126ebd62b256c685d5757150cf7fcb2b92a2f2b98efb3f38fc36719754",
|
||||
strip_prefix = "sentencepiece-1.0.0",
|
||||
sha256 = "c05901f30a1d0ed64cbcf40eba08e48894e1b0e985777217b7c9036cac631346",
|
||||
urls = [
|
||||
"https://github.com/google/sentencepiece/archive/refs/tags/v0.1.96.zip"
|
||||
"https://github.com/google/sentencepiece/archive/1.0.0.zip",
|
||||
],
|
||||
patches = [
|
||||
"@//third_party:com_google_sentencepiece_no_gflag_no_gtest.diff",
|
||||
],
|
||||
build_file = "@//third_party:sentencepiece.BUILD",
|
||||
patches = ["@//third_party:com_google_sentencepiece.diff"],
|
||||
patch_args = ["-p1"],
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "darts_clone",
|
||||
build_file = "@//third_party:darts_clone.BUILD",
|
||||
sha256 = "c97f55d05c98da6fcaf7f9ecc6a6dc6bc5b18b8564465f77abff8879d446491c",
|
||||
strip_prefix = "darts-clone-e40ce4627526985a7767444b6ed6893ab6ff8983",
|
||||
urls = [
|
||||
"https://github.com/s-yata/darts-clone/archive/e40ce4627526985a7767444b6ed6893ab6ff8983.zip",
|
||||
],
|
||||
repo_mapping = {"@com_google_glog" : "@com_github_glog_glog_no_gflags"},
|
||||
)
|
||||
|
||||
http_archive(
|
||||
|
@ -281,10 +256,10 @@ http_archive(
|
|||
|
||||
http_archive(
|
||||
name = "com_googlesource_code_re2",
|
||||
sha256 = "ef516fb84824a597c4d5d0d6d330daedb18363b5a99eda87d027e6bdd9cba299",
|
||||
strip_prefix = "re2-03da4fc0857c285e3a26782f6bc8931c4c950df4",
|
||||
sha256 = "e06b718c129f4019d6e7aa8b7631bee38d3d450dd980246bfaf493eb7db67868",
|
||||
strip_prefix = "re2-fe4a310131c37f9a7e7f7816fa6ce2a8b27d65a8",
|
||||
urls = [
|
||||
"https://github.com/google/re2/archive/03da4fc0857c285e3a26782f6bc8931c4c950df4.tar.gz",
|
||||
"https://github.com/google/re2/archive/fe4a310131c37f9a7e7f7816fa6ce2a8b27d65a8.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -390,22 +365,6 @@ http_archive(
|
|||
url = "https://github.com/opencv/opencv/releases/download/3.2.0/opencv-3.2.0-ios-framework.zip",
|
||||
)
|
||||
|
||||
# Building an opencv.xcframework from the OpenCV 4.5.3 sources is necessary for
|
||||
# MediaPipe iOS Task Libraries to be supported on arm64(M1) Macs. An
|
||||
# `opencv.xcframework` archive has not been released and it is recommended to
|
||||
# build the same from source using a script provided in OpenCV 4.5.0 upwards.
|
||||
# OpenCV is fixed to version to 4.5.3 since swift support can only be disabled
|
||||
# from 4.5.3 upwards. This is needed to avoid errors when the library is linked
|
||||
# in Xcode. Swift support will be added in when the final binary MediaPipe iOS
|
||||
# Task libraries are built.
|
||||
http_archive(
|
||||
name = "ios_opencv_source",
|
||||
sha256 = "a61e7a4618d353140c857f25843f39b2abe5f451b018aab1604ef0bc34cd23d5",
|
||||
build_file = "@//third_party:opencv_ios_source.BUILD",
|
||||
type = "zip",
|
||||
url = "https://github.com/opencv/opencv/archive/refs/tags/4.5.3.zip",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "stblib",
|
||||
strip_prefix = "stb-b42009b3b9d4ca35bc703f5310eedc74f584be58",
|
||||
|
@ -499,10 +458,9 @@ http_archive(
|
|||
)
|
||||
|
||||
# TensorFlow repo should always go after the other external dependencies.
|
||||
# TF on 2023-07-26.
|
||||
_TENSORFLOW_GIT_COMMIT = "e92261fd4cec0b726692081c4d2966b75abf31dd"
|
||||
# curl -L https://github.com/tensorflow/tensorflow/archive/<TENSORFLOW_GIT_COMMIT>.tar.gz | shasum -a 256
|
||||
_TENSORFLOW_SHA256 = "478a229bd4ec70a5b568ac23b5ea013d9fca46a47d6c43e30365a0412b9febf4"
|
||||
# TF on 2023-03-08.
|
||||
_TENSORFLOW_GIT_COMMIT = "24f7ee636d62e1f8d8330357f8bbd65956dfb84d"
|
||||
_TENSORFLOW_SHA256 = "7f8a96dd99215c0cdc77230d3dbce43e60102b64a89203ad04aa09b0a187a4bd"
|
||||
http_archive(
|
||||
name = "org_tensorflow",
|
||||
urls = [
|
||||
|
@ -510,12 +468,8 @@ http_archive(
|
|||
],
|
||||
patches = [
|
||||
"@//third_party:org_tensorflow_compatibility_fixes.diff",
|
||||
"@//third_party:org_tensorflow_system_python.diff",
|
||||
# Diff is generated with a script, don't update it manually.
|
||||
"@//third_party:org_tensorflow_custom_ops.diff",
|
||||
# Works around Bazel issue with objc_library.
|
||||
# See https://github.com/bazelbuild/bazel/issues/19912
|
||||
"@//third_party:org_tensorflow_objc_build_fixes.diff",
|
||||
],
|
||||
patch_args = [
|
||||
"-p1",
|
||||
|
|
|
@ -1,342 +0,0 @@
|
|||
// !$*UTF8*$!
|
||||
{
|
||||
archiveVersion = 1;
|
||||
classes = {
|
||||
};
|
||||
objectVersion = 56;
|
||||
objects = {
|
||||
|
||||
/* Begin PBXBuildFile section */
|
||||
8566B55D2ABABF9A00AAB22A /* MediaPipeTasksDocGen.h in Headers */ = {isa = PBXBuildFile; fileRef = 8566B55C2ABABF9A00AAB22A /* MediaPipeTasksDocGen.h */; settings = {ATTRIBUTES = (Public, ); }; };
|
||||
/* End PBXBuildFile section */
|
||||
|
||||
/* Begin PBXFileReference section */
|
||||
8566B5592ABABF9A00AAB22A /* MediaPipeTasksDocGen.framework */ = {isa = PBXFileReference; explicitFileType = wrapper.framework; includeInIndex = 0; path = MediaPipeTasksDocGen.framework; sourceTree = BUILT_PRODUCTS_DIR; };
|
||||
8566B55C2ABABF9A00AAB22A /* MediaPipeTasksDocGen.h */ = {isa = PBXFileReference; lastKnownFileType = sourcecode.c.h; path = MediaPipeTasksDocGen.h; sourceTree = "<group>"; };
|
||||
/* End PBXFileReference section */
|
||||
|
||||
/* Begin PBXFrameworksBuildPhase section */
|
||||
8566B5562ABABF9A00AAB22A /* Frameworks */ = {
|
||||
isa = PBXFrameworksBuildPhase;
|
||||
buildActionMask = 2147483647;
|
||||
files = (
|
||||
);
|
||||
runOnlyForDeploymentPostprocessing = 0;
|
||||
};
|
||||
/* End PBXFrameworksBuildPhase section */
|
||||
|
||||
/* Begin PBXGroup section */
|
||||
8566B54F2ABABF9A00AAB22A = {
|
||||
isa = PBXGroup;
|
||||
children = (
|
||||
8566B55B2ABABF9A00AAB22A /* MediaPipeTasksDocGen */,
|
||||
8566B55A2ABABF9A00AAB22A /* Products */,
|
||||
);
|
||||
sourceTree = "<group>";
|
||||
};
|
||||
8566B55A2ABABF9A00AAB22A /* Products */ = {
|
||||
isa = PBXGroup;
|
||||
children = (
|
||||
8566B5592ABABF9A00AAB22A /* MediaPipeTasksDocGen.framework */,
|
||||
);
|
||||
name = Products;
|
||||
sourceTree = "<group>";
|
||||
};
|
||||
8566B55B2ABABF9A00AAB22A /* MediaPipeTasksDocGen */ = {
|
||||
isa = PBXGroup;
|
||||
children = (
|
||||
8566B55C2ABABF9A00AAB22A /* MediaPipeTasksDocGen.h */,
|
||||
);
|
||||
path = MediaPipeTasksDocGen;
|
||||
sourceTree = "<group>";
|
||||
};
|
||||
/* End PBXGroup section */
|
||||
|
||||
/* Begin PBXHeadersBuildPhase section */
|
||||
8566B5542ABABF9A00AAB22A /* Headers */ = {
|
||||
isa = PBXHeadersBuildPhase;
|
||||
buildActionMask = 2147483647;
|
||||
files = (
|
||||
8566B55D2ABABF9A00AAB22A /* MediaPipeTasksDocGen.h in Headers */,
|
||||
);
|
||||
runOnlyForDeploymentPostprocessing = 0;
|
||||
};
|
||||
/* End PBXHeadersBuildPhase section */
|
||||
|
||||
/* Begin PBXNativeTarget section */
|
||||
8566B5582ABABF9A00AAB22A /* MediaPipeTasksDocGen */ = {
|
||||
isa = PBXNativeTarget;
|
||||
buildConfigurationList = 8566B5602ABABF9A00AAB22A /* Build configuration list for PBXNativeTarget "MediaPipeTasksDocGen" */;
|
||||
buildPhases = (
|
||||
8566B5542ABABF9A00AAB22A /* Headers */,
|
||||
8566B5552ABABF9A00AAB22A /* Sources */,
|
||||
8566B5562ABABF9A00AAB22A /* Frameworks */,
|
||||
8566B5572ABABF9A00AAB22A /* Resources */,
|
||||
);
|
||||
buildRules = (
|
||||
);
|
||||
dependencies = (
|
||||
);
|
||||
name = MediaPipeTasksDocGen;
|
||||
productName = MediaPipeTasksDocGen;
|
||||
productReference = 8566B5592ABABF9A00AAB22A /* MediaPipeTasksDocGen.framework */;
|
||||
productType = "com.apple.product-type.framework";
|
||||
};
|
||||
/* End PBXNativeTarget section */
|
||||
|
||||
/* Begin PBXProject section */
|
||||
8566B5502ABABF9A00AAB22A /* Project object */ = {
|
||||
isa = PBXProject;
|
||||
attributes = {
|
||||
BuildIndependentTargetsInParallel = 1;
|
||||
LastUpgradeCheck = 1430;
|
||||
TargetAttributes = {
|
||||
8566B5582ABABF9A00AAB22A = {
|
||||
CreatedOnToolsVersion = 14.3.1;
|
||||
};
|
||||
};
|
||||
};
|
||||
buildConfigurationList = 8566B5532ABABF9A00AAB22A /* Build configuration list for PBXProject "MediaPipeTasksDocGen" */;
|
||||
compatibilityVersion = "Xcode 14.0";
|
||||
developmentRegion = en;
|
||||
hasScannedForEncodings = 0;
|
||||
knownRegions = (
|
||||
en,
|
||||
Base,
|
||||
);
|
||||
mainGroup = 8566B54F2ABABF9A00AAB22A;
|
||||
productRefGroup = 8566B55A2ABABF9A00AAB22A /* Products */;
|
||||
projectDirPath = "";
|
||||
projectRoot = "";
|
||||
targets = (
|
||||
8566B5582ABABF9A00AAB22A /* MediaPipeTasksDocGen */,
|
||||
);
|
||||
};
|
||||
/* End PBXProject section */
|
||||
|
||||
/* Begin PBXResourcesBuildPhase section */
|
||||
8566B5572ABABF9A00AAB22A /* Resources */ = {
|
||||
isa = PBXResourcesBuildPhase;
|
||||
buildActionMask = 2147483647;
|
||||
files = (
|
||||
);
|
||||
runOnlyForDeploymentPostprocessing = 0;
|
||||
};
|
||||
/* End PBXResourcesBuildPhase section */
|
||||
|
||||
/* Begin PBXSourcesBuildPhase section */
|
||||
8566B5552ABABF9A00AAB22A /* Sources */ = {
|
||||
isa = PBXSourcesBuildPhase;
|
||||
buildActionMask = 2147483647;
|
||||
files = (
|
||||
);
|
||||
runOnlyForDeploymentPostprocessing = 0;
|
||||
};
|
||||
/* End PBXSourcesBuildPhase section */
|
||||
|
||||
/* Begin XCBuildConfiguration section */
|
||||
8566B55E2ABABF9A00AAB22A /* Debug */ = {
|
||||
isa = XCBuildConfiguration;
|
||||
buildSettings = {
|
||||
ALWAYS_SEARCH_USER_PATHS = NO;
|
||||
CLANG_ANALYZER_NONNULL = YES;
|
||||
CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE;
|
||||
CLANG_CXX_LANGUAGE_STANDARD = "gnu++20";
|
||||
CLANG_ENABLE_MODULES = YES;
|
||||
CLANG_ENABLE_OBJC_ARC = YES;
|
||||
CLANG_ENABLE_OBJC_WEAK = YES;
|
||||
CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES;
|
||||
CLANG_WARN_BOOL_CONVERSION = YES;
|
||||
CLANG_WARN_COMMA = YES;
|
||||
CLANG_WARN_CONSTANT_CONVERSION = YES;
|
||||
CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES;
|
||||
CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;
|
||||
CLANG_WARN_DOCUMENTATION_COMMENTS = YES;
|
||||
CLANG_WARN_EMPTY_BODY = YES;
|
||||
CLANG_WARN_ENUM_CONVERSION = YES;
|
||||
CLANG_WARN_INFINITE_RECURSION = YES;
|
||||
CLANG_WARN_INT_CONVERSION = YES;
|
||||
CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES;
|
||||
CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES;
|
||||
CLANG_WARN_OBJC_LITERAL_CONVERSION = YES;
|
||||
CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;
|
||||
CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES;
|
||||
CLANG_WARN_RANGE_LOOP_ANALYSIS = YES;
|
||||
CLANG_WARN_STRICT_PROTOTYPES = YES;
|
||||
CLANG_WARN_SUSPICIOUS_MOVE = YES;
|
||||
CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE;
|
||||
CLANG_WARN_UNREACHABLE_CODE = YES;
|
||||
CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
|
||||
COPY_PHASE_STRIP = NO;
|
||||
CURRENT_PROJECT_VERSION = 1;
|
||||
DEBUG_INFORMATION_FORMAT = dwarf;
|
||||
ENABLE_STRICT_OBJC_MSGSEND = YES;
|
||||
ENABLE_TESTABILITY = YES;
|
||||
GCC_C_LANGUAGE_STANDARD = gnu11;
|
||||
GCC_DYNAMIC_NO_PIC = NO;
|
||||
GCC_NO_COMMON_BLOCKS = YES;
|
||||
GCC_OPTIMIZATION_LEVEL = 0;
|
||||
GCC_PREPROCESSOR_DEFINITIONS = (
|
||||
"DEBUG=1",
|
||||
"$(inherited)",
|
||||
);
|
||||
GCC_WARN_64_TO_32_BIT_CONVERSION = YES;
|
||||
GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;
|
||||
GCC_WARN_UNDECLARED_SELECTOR = YES;
|
||||
GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;
|
||||
GCC_WARN_UNUSED_FUNCTION = YES;
|
||||
GCC_WARN_UNUSED_VARIABLE = YES;
|
||||
IPHONEOS_DEPLOYMENT_TARGET = 16.4;
|
||||
MTL_ENABLE_DEBUG_INFO = INCLUDE_SOURCE;
|
||||
MTL_FAST_MATH = YES;
|
||||
ONLY_ACTIVE_ARCH = YES;
|
||||
SDKROOT = iphoneos;
|
||||
SWIFT_ACTIVE_COMPILATION_CONDITIONS = DEBUG;
|
||||
SWIFT_OPTIMIZATION_LEVEL = "-Onone";
|
||||
VERSIONING_SYSTEM = "apple-generic";
|
||||
VERSION_INFO_PREFIX = "";
|
||||
};
|
||||
name = Debug;
|
||||
};
|
||||
8566B55F2ABABF9A00AAB22A /* Release */ = {
|
||||
isa = XCBuildConfiguration;
|
||||
buildSettings = {
|
||||
ALWAYS_SEARCH_USER_PATHS = NO;
|
||||
CLANG_ANALYZER_NONNULL = YES;
|
||||
CLANG_ANALYZER_NUMBER_OBJECT_CONVERSION = YES_AGGRESSIVE;
|
||||
CLANG_CXX_LANGUAGE_STANDARD = "gnu++20";
|
||||
CLANG_ENABLE_MODULES = YES;
|
||||
CLANG_ENABLE_OBJC_ARC = YES;
|
||||
CLANG_ENABLE_OBJC_WEAK = YES;
|
||||
CLANG_WARN_BLOCK_CAPTURE_AUTORELEASING = YES;
|
||||
CLANG_WARN_BOOL_CONVERSION = YES;
|
||||
CLANG_WARN_COMMA = YES;
|
||||
CLANG_WARN_CONSTANT_CONVERSION = YES;
|
||||
CLANG_WARN_DEPRECATED_OBJC_IMPLEMENTATIONS = YES;
|
||||
CLANG_WARN_DIRECT_OBJC_ISA_USAGE = YES_ERROR;
|
||||
CLANG_WARN_DOCUMENTATION_COMMENTS = YES;
|
||||
CLANG_WARN_EMPTY_BODY = YES;
|
||||
CLANG_WARN_ENUM_CONVERSION = YES;
|
||||
CLANG_WARN_INFINITE_RECURSION = YES;
|
||||
CLANG_WARN_INT_CONVERSION = YES;
|
||||
CLANG_WARN_NON_LITERAL_NULL_CONVERSION = YES;
|
||||
CLANG_WARN_OBJC_IMPLICIT_RETAIN_SELF = YES;
|
||||
CLANG_WARN_OBJC_LITERAL_CONVERSION = YES;
|
||||
CLANG_WARN_OBJC_ROOT_CLASS = YES_ERROR;
|
||||
CLANG_WARN_QUOTED_INCLUDE_IN_FRAMEWORK_HEADER = YES;
|
||||
CLANG_WARN_RANGE_LOOP_ANALYSIS = YES;
|
||||
CLANG_WARN_STRICT_PROTOTYPES = YES;
|
||||
CLANG_WARN_SUSPICIOUS_MOVE = YES;
|
||||
CLANG_WARN_UNGUARDED_AVAILABILITY = YES_AGGRESSIVE;
|
||||
CLANG_WARN_UNREACHABLE_CODE = YES;
|
||||
CLANG_WARN__DUPLICATE_METHOD_MATCH = YES;
|
||||
COPY_PHASE_STRIP = NO;
|
||||
CURRENT_PROJECT_VERSION = 1;
|
||||
DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym";
|
||||
ENABLE_NS_ASSERTIONS = NO;
|
||||
ENABLE_STRICT_OBJC_MSGSEND = YES;
|
||||
GCC_C_LANGUAGE_STANDARD = gnu11;
|
||||
GCC_NO_COMMON_BLOCKS = YES;
|
||||
GCC_WARN_64_TO_32_BIT_CONVERSION = YES;
|
||||
GCC_WARN_ABOUT_RETURN_TYPE = YES_ERROR;
|
||||
GCC_WARN_UNDECLARED_SELECTOR = YES;
|
||||
GCC_WARN_UNINITIALIZED_AUTOS = YES_AGGRESSIVE;
|
||||
GCC_WARN_UNUSED_FUNCTION = YES;
|
||||
GCC_WARN_UNUSED_VARIABLE = YES;
|
||||
IPHONEOS_DEPLOYMENT_TARGET = 16.4;
|
||||
MTL_ENABLE_DEBUG_INFO = NO;
|
||||
MTL_FAST_MATH = YES;
|
||||
SDKROOT = iphoneos;
|
||||
SWIFT_COMPILATION_MODE = wholemodule;
|
||||
SWIFT_OPTIMIZATION_LEVEL = "-O";
|
||||
VALIDATE_PRODUCT = YES;
|
||||
VERSIONING_SYSTEM = "apple-generic";
|
||||
VERSION_INFO_PREFIX = "";
|
||||
};
|
||||
name = Release;
|
||||
};
|
||||
8566B5612ABABF9A00AAB22A /* Debug */ = {
|
||||
isa = XCBuildConfiguration;
|
||||
buildSettings = {
|
||||
CODE_SIGN_STYLE = Automatic;
|
||||
CURRENT_PROJECT_VERSION = 1;
|
||||
DEFINES_MODULE = YES;
|
||||
DYLIB_COMPATIBILITY_VERSION = 1;
|
||||
DYLIB_CURRENT_VERSION = 1;
|
||||
DYLIB_INSTALL_NAME_BASE = "@rpath";
|
||||
ENABLE_MODULE_VERIFIER = YES;
|
||||
GENERATE_INFOPLIST_FILE = YES;
|
||||
INFOPLIST_KEY_NSHumanReadableCopyright = "";
|
||||
INSTALL_PATH = "$(LOCAL_LIBRARY_DIR)/Frameworks";
|
||||
LD_RUNPATH_SEARCH_PATHS = (
|
||||
"$(inherited)",
|
||||
"@executable_path/Frameworks",
|
||||
"@loader_path/Frameworks",
|
||||
);
|
||||
MARKETING_VERSION = 1.0;
|
||||
MODULE_VERIFIER_SUPPORTED_LANGUAGES = "objective-c objective-c++";
|
||||
MODULE_VERIFIER_SUPPORTED_LANGUAGE_STANDARDS = "gnu11 gnu++20";
|
||||
PRODUCT_BUNDLE_IDENTIFIER = com.google.mediapipe.MediaPipeTasksDocGen;
|
||||
PRODUCT_NAME = "$(TARGET_NAME:c99extidentifier)";
|
||||
SKIP_INSTALL = YES;
|
||||
SWIFT_EMIT_LOC_STRINGS = YES;
|
||||
SWIFT_VERSION = 5.0;
|
||||
TARGETED_DEVICE_FAMILY = "1,2";
|
||||
};
|
||||
name = Debug;
|
||||
};
|
||||
8566B5622ABABF9A00AAB22A /* Release */ = {
|
||||
isa = XCBuildConfiguration;
|
||||
buildSettings = {
|
||||
CODE_SIGN_STYLE = Automatic;
|
||||
CURRENT_PROJECT_VERSION = 1;
|
||||
DEFINES_MODULE = YES;
|
||||
DYLIB_COMPATIBILITY_VERSION = 1;
|
||||
DYLIB_CURRENT_VERSION = 1;
|
||||
DYLIB_INSTALL_NAME_BASE = "@rpath";
|
||||
ENABLE_MODULE_VERIFIER = YES;
|
||||
GENERATE_INFOPLIST_FILE = YES;
|
||||
INFOPLIST_KEY_NSHumanReadableCopyright = "";
|
||||
INSTALL_PATH = "$(LOCAL_LIBRARY_DIR)/Frameworks";
|
||||
LD_RUNPATH_SEARCH_PATHS = (
|
||||
"$(inherited)",
|
||||
"@executable_path/Frameworks",
|
||||
"@loader_path/Frameworks",
|
||||
);
|
||||
MARKETING_VERSION = 1.0;
|
||||
MODULE_VERIFIER_SUPPORTED_LANGUAGES = "objective-c objective-c++";
|
||||
MODULE_VERIFIER_SUPPORTED_LANGUAGE_STANDARDS = "gnu11 gnu++20";
|
||||
PRODUCT_BUNDLE_IDENTIFIER = com.google.mediapipe.MediaPipeTasksDocGen;
|
||||
PRODUCT_NAME = "$(TARGET_NAME:c99extidentifier)";
|
||||
SKIP_INSTALL = YES;
|
||||
SWIFT_EMIT_LOC_STRINGS = YES;
|
||||
SWIFT_VERSION = 5.0;
|
||||
TARGETED_DEVICE_FAMILY = "1,2";
|
||||
};
|
||||
name = Release;
|
||||
};
|
||||
/* End XCBuildConfiguration section */
|
||||
|
||||
/* Begin XCConfigurationList section */
|
||||
8566B5532ABABF9A00AAB22A /* Build configuration list for PBXProject "MediaPipeTasksDocGen" */ = {
|
||||
isa = XCConfigurationList;
|
||||
buildConfigurations = (
|
||||
8566B55E2ABABF9A00AAB22A /* Debug */,
|
||||
8566B55F2ABABF9A00AAB22A /* Release */,
|
||||
);
|
||||
defaultConfigurationIsVisible = 0;
|
||||
defaultConfigurationName = Release;
|
||||
};
|
||||
8566B5602ABABF9A00AAB22A /* Build configuration list for PBXNativeTarget "MediaPipeTasksDocGen" */ = {
|
||||
isa = XCConfigurationList;
|
||||
buildConfigurations = (
|
||||
8566B5612ABABF9A00AAB22A /* Debug */,
|
||||
8566B5622ABABF9A00AAB22A /* Release */,
|
||||
);
|
||||
defaultConfigurationIsVisible = 0;
|
||||
defaultConfigurationName = Release;
|
||||
};
|
||||
/* End XCConfigurationList section */
|
||||
};
|
||||
rootObject = 8566B5502ABABF9A00AAB22A /* Project object */;
|
||||
}
|
|
@ -1,7 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<Workspace
|
||||
version = "1.0">
|
||||
<FileRef
|
||||
location = "self:">
|
||||
</FileRef>
|
||||
</Workspace>
|
|
@ -1,8 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
||||
<plist version="1.0">
|
||||
<dict>
|
||||
<key>IDEDidComputeMac32BitWarning</key>
|
||||
<true/>
|
||||
</dict>
|
||||
</plist>
|
Binary file not shown.
|
@ -1,14 +0,0 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE plist PUBLIC "-//Apple//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
|
||||
<plist version="1.0">
|
||||
<dict>
|
||||
<key>SchemeUserState</key>
|
||||
<dict>
|
||||
<key>MediaPipeTasksDocGen.xcscheme_^#shared#^_</key>
|
||||
<dict>
|
||||
<key>orderHint</key>
|
||||
<integer>0</integer>
|
||||
</dict>
|
||||
</dict>
|
||||
</dict>
|
||||
</plist>
|
|
@ -1,17 +0,0 @@
|
|||
//
|
||||
// MediaPipeTasksDocGen.h
|
||||
// MediaPipeTasksDocGen
|
||||
//
|
||||
// Created by Mark McDonald on 20/9/2023.
|
||||
//
|
||||
|
||||
#import <Foundation/Foundation.h>
|
||||
|
||||
//! Project version number for MediaPipeTasksDocGen.
|
||||
FOUNDATION_EXPORT double MediaPipeTasksDocGenVersionNumber;
|
||||
|
||||
//! Project version string for MediaPipeTasksDocGen.
|
||||
FOUNDATION_EXPORT const unsigned char MediaPipeTasksDocGenVersionString[];
|
||||
|
||||
// In this header, you should import all the public headers of your framework using statements like
|
||||
// #import <MediaPipeTasksDocGen/PublicHeader.h>
|
|
@ -1,11 +0,0 @@
|
|||
# Uncomment the next line to define a global platform for your project
|
||||
platform :ios, '15.0'
|
||||
|
||||
target 'MediaPipeTasksDocGen' do
|
||||
# Comment the next line if you don't want to use dynamic frameworks
|
||||
use_frameworks!
|
||||
|
||||
# Pods for MediaPipeTasksDocGen
|
||||
pod 'MediaPipeTasksText'
|
||||
pod 'MediaPipeTasksVision'
|
||||
end
|
|
@ -1,9 +0,0 @@
|
|||
# MediaPipeTasksDocGen
|
||||
|
||||
This empty project is used to generate reference documentation for the
|
||||
ObjectiveC and Swift libraries.
|
||||
|
||||
Docs are generated using [Jazzy](https://github.com/realm/jazzy) and published
|
||||
to [the developer site](https://developers.google.com/mediapipe/solutions/).
|
||||
|
||||
To bump the API version used, edit [`Podfile`](./Podfile).
|
|
@ -1,4 +1,4 @@
|
|||
# Copyright 2022 The MediaPipe Authors.
|
||||
# Copyright 2022 The MediaPipe Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
@ -14,7 +14,6 @@
|
|||
# ==============================================================================
|
||||
"""Generate Java reference docs for MediaPipe."""
|
||||
import pathlib
|
||||
import shutil
|
||||
|
||||
from absl import app
|
||||
from absl import flags
|
||||
|
@ -42,9 +41,7 @@ def main(_) -> None:
|
|||
mp_root = pathlib.Path(__file__)
|
||||
while (mp_root := mp_root.parent).name != 'mediapipe':
|
||||
# Find the nearest `mediapipe` dir.
|
||||
if not mp_root.name:
|
||||
# We've hit the filesystem root - abort.
|
||||
raise FileNotFoundError('"mediapipe" root not found')
|
||||
pass
|
||||
|
||||
# Find the root from which all packages are relative.
|
||||
root = mp_root.parent
|
||||
|
@ -54,14 +51,6 @@ def main(_) -> None:
|
|||
if (mp_root / 'mediapipe').exists():
|
||||
mp_root = mp_root / 'mediapipe'
|
||||
|
||||
# We need to copy this into the tasks dir to ensure we don't leave broken
|
||||
# links in the generated docs.
|
||||
old_api_dir = 'java/com/google/mediapipe/framework/image'
|
||||
shutil.copytree(
|
||||
mp_root / old_api_dir,
|
||||
mp_root / 'tasks' / old_api_dir,
|
||||
dirs_exist_ok=True)
|
||||
|
||||
gen_java.gen_java_docs(
|
||||
package='com.google.mediapipe',
|
||||
source_path=mp_root / 'tasks/java',
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# Copyright 2022 The MediaPipe Authors.
|
||||
# Copyright 2022 The MediaPipe Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# Copyright 2022 The MediaPipe Authors.
|
||||
# Copyright 2022 The MediaPipe Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -50,7 +50,7 @@ as the primary developer documentation site for MediaPipe as of April 3, 2023.*
|
|||
3. The [`hello world`] example uses a simple MediaPipe graph in the
|
||||
`PrintHelloWorld()` function, defined in a [`CalculatorGraphConfig`] proto.
|
||||
|
||||
```c++
|
||||
```C++
|
||||
absl::Status PrintHelloWorld() {
|
||||
// Configures a simple graph, which concatenates 2 PassThroughCalculators.
|
||||
CalculatorGraphConfig config = ParseTextProtoOrDie<CalculatorGraphConfig>(R"(
|
||||
|
@ -126,7 +126,7 @@ as the primary developer documentation site for MediaPipe as of April 3, 2023.*
|
|||
```c++
|
||||
mediapipe::Packet packet;
|
||||
while (poller.Next(&packet)) {
|
||||
ABSL_LOG(INFO) << packet.Get<string>();
|
||||
LOG(INFO) << packet.Get<string>();
|
||||
}
|
||||
```
|
||||
|
||||
|
|
|
@ -138,7 +138,7 @@ Create a `BUILD` file in the `$APPLICATION_PATH` and add the following build
|
|||
rules:
|
||||
|
||||
```
|
||||
MIN_IOS_VERSION = "12.0"
|
||||
MIN_IOS_VERSION = "11.0"
|
||||
|
||||
load(
|
||||
"@build_bazel_rules_apple//apple:ios.bzl",
|
||||
|
|
200
docs/index.md
200
docs/index.md
|
@ -1,121 +1,99 @@
|
|||
---
|
||||
layout: forward
|
||||
target: https://developers.google.com/mediapipe
|
||||
layout: default
|
||||
title: Home
|
||||
nav_order: 1
|
||||
---
|
||||
|
||||
![MediaPipe](https://mediapipe.dev/images/mediapipe_small.png)
|
||||
|
||||
----
|
||||
|
||||
**Attention:** *We have moved to
|
||||
**Attention:** *Thanks for your interest in MediaPipe! We have moved to
|
||||
[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
|
||||
as the primary developer documentation site for MediaPipe as of April 3, 2023.*
|
||||
|
||||
![MediaPipe](https://developers.google.com/static/mediapipe/images/home/hero_01_1920.png)
|
||||
*This notice and web page will be removed on June 1, 2023.*
|
||||
|
||||
**Attention**: MediaPipe Solutions Preview is an early release. [Learn
|
||||
more](https://developers.google.com/mediapipe/solutions/about#notice).
|
||||
----
|
||||
|
||||
**On-device machine learning for everyone**
|
||||
<br><br><br><br><br><br><br><br><br><br>
|
||||
<br><br><br><br><br><br><br><br><br><br>
|
||||
<br><br><br><br><br><br><br><br><br><br>
|
||||
|
||||
Delight your customers with innovative machine learning features. MediaPipe
|
||||
contains everything that you need to customize and deploy to mobile (Android,
|
||||
iOS), web, desktop, edge devices, and IoT, effortlessly.
|
||||
--------------------------------------------------------------------------------
|
||||
|
||||
* [See demos](https://goo.gle/mediapipe-studio)
|
||||
* [Learn more](https://developers.google.com/mediapipe/solutions)
|
||||
## Live ML anywhere
|
||||
|
||||
## Get started
|
||||
[MediaPipe](https://google.github.io/mediapipe/) offers cross-platform, customizable
|
||||
ML solutions for live and streaming media.
|
||||
|
||||
You can get started with MediaPipe Solutions by by checking out any of the
|
||||
developer guides for
|
||||
[vision](https://developers.google.com/mediapipe/solutions/vision/object_detector),
|
||||
[text](https://developers.google.com/mediapipe/solutions/text/text_classifier),
|
||||
and
|
||||
[audio](https://developers.google.com/mediapipe/solutions/audio/audio_classifier)
|
||||
tasks. If you need help setting up a development environment for use with
|
||||
MediaPipe Tasks, check out the setup guides for
|
||||
[Android](https://developers.google.com/mediapipe/solutions/setup_android), [web
|
||||
apps](https://developers.google.com/mediapipe/solutions/setup_web), and
|
||||
[Python](https://developers.google.com/mediapipe/solutions/setup_python).
|
||||
![accelerated.png](https://mediapipe.dev/images/accelerated_small.png) | ![cross_platform.png](https://mediapipe.dev/images/cross_platform_small.png)
|
||||
:------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------:
|
||||
***End-to-End acceleration***: *Built-in fast ML inference and processing accelerated even on common hardware* | ***Build once, deploy anywhere***: *Unified solution works across Android, iOS, desktop/cloud, web and IoT*
|
||||
![ready_to_use.png](https://mediapipe.dev/images/ready_to_use_small.png) | ![open_source.png](https://mediapipe.dev/images/open_source_small.png)
|
||||
***Ready-to-use solutions***: *Cutting-edge ML solutions demonstrating full power of the framework* | ***Free and open source***: *Framework and solutions both under Apache 2.0, fully extensible and customizable*
|
||||
|
||||
## Solutions
|
||||
----
|
||||
|
||||
MediaPipe Solutions provides a suite of libraries and tools for you to quickly
|
||||
apply artificial intelligence (AI) and machine learning (ML) techniques in your
|
||||
applications. You can plug these solutions into your applications immediately,
|
||||
customize them to your needs, and use them across multiple development
|
||||
platforms. MediaPipe Solutions is part of the MediaPipe [open source
|
||||
project](https://github.com/google/mediapipe), so you can further customize the
|
||||
solutions code to meet your application needs.
|
||||
## ML solutions in MediaPipe
|
||||
|
||||
These libraries and resources provide the core functionality for each MediaPipe
|
||||
Solution:
|
||||
Face Detection | Face Mesh | Iris | Hands | Pose | Holistic
|
||||
:----------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------: | :--------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------: | :------:
|
||||
[![face_detection](https://mediapipe.dev/images/mobile/face_detection_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/face_detection) | [![face_mesh](https://mediapipe.dev/images/mobile/face_mesh_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/face_mesh) | [![iris](https://mediapipe.dev/images/mobile/iris_tracking_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/iris) | [![hand](https://mediapipe.dev/images/mobile/hand_tracking_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/hands) | [![pose](https://mediapipe.dev/images/mobile/pose_tracking_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/pose) | [![hair_segmentation](https://mediapipe.dev/images/mobile/holistic_tracking_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/holistic)
|
||||
|
||||
* **MediaPipe Tasks**: Cross-platform APIs and libraries for deploying
|
||||
solutions. [Learn
|
||||
more](https://developers.google.com/mediapipe/solutions/tasks).
|
||||
* **MediaPipe models**: Pre-trained, ready-to-run models for use with each
|
||||
solution.
|
||||
Hair Segmentation | Object Detection | Box Tracking | Instant Motion Tracking | Objectron | KNIFT
|
||||
:-------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------------: | :---------------------------------------------------------------------------------------------------------------------------------------------------: | :-------------------------------------------------------------------------------------------------------------------: | :---:
|
||||
[![hair_segmentation](https://mediapipe.dev/images/mobile/hair_segmentation_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/hair_segmentation) | [![object_detection](https://mediapipe.dev/images/mobile/object_detection_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/object_detection) | [![box_tracking](https://mediapipe.dev/images/mobile/object_tracking_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/box_tracking) | [![instant_motion_tracking](https://mediapipe.dev/images/mobile/instant_motion_tracking_android_small.gif)](https://google.github.io/mediapipe/solutions/instant_motion_tracking) | [![objectron](https://mediapipe.dev/images/mobile/objectron_chair_android_gpu_small.gif)](https://google.github.io/mediapipe/solutions/objectron) | [![knift](https://mediapipe.dev/images/mobile/template_matching_android_cpu_small.gif)](https://google.github.io/mediapipe/solutions/knift)
|
||||
|
||||
These tools let you customize and evaluate solutions:
|
||||
<!-- []() in the first cell is needed to preserve table formatting in GitHub Pages. -->
|
||||
<!-- Whenever this table is updated, paste a copy to solutions/solutions.md. -->
|
||||
|
||||
* **MediaPipe Model Maker**: Customize models for solutions with your data.
|
||||
[Learn more](https://developers.google.com/mediapipe/solutions/model_maker).
|
||||
* **MediaPipe Studio**: Visualize, evaluate, and benchmark solutions in your
|
||||
browser. [Learn
|
||||
more](https://developers.google.com/mediapipe/solutions/studio).
|
||||
[]() | [Android](https://google.github.io/mediapipe/getting_started/android) | [iOS](https://google.github.io/mediapipe/getting_started/ios) | [C++](https://google.github.io/mediapipe/getting_started/cpp) | [Python](https://google.github.io/mediapipe/getting_started/python) | [JS](https://google.github.io/mediapipe/getting_started/javascript) | [Coral](https://github.com/google/mediapipe/tree/master/mediapipe/examples/coral/README.md)
|
||||
:---------------------------------------------------------------------------------------- | :-------------------------------------------------------------: | :-----------------------------------------------------: | :-----------------------------------------------------: | :-----------------------------------------------------------: | :-----------------------------------------------------------: | :--------------------------------------------------------------------:
|
||||
[Face Detection](https://google.github.io/mediapipe/solutions/face_detection) | ✅ | ✅ | ✅ | ✅ | ✅ | ✅
|
||||
[Face Mesh](https://google.github.io/mediapipe/solutions/face_mesh) | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
[Iris](https://google.github.io/mediapipe/solutions/iris) | ✅ | ✅ | ✅ | | |
|
||||
[Hands](https://google.github.io/mediapipe/solutions/hands) | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
[Pose](https://google.github.io/mediapipe/solutions/pose) | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
[Holistic](https://google.github.io/mediapipe/solutions/holistic) | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
[Selfie Segmentation](https://google.github.io/mediapipe/solutions/selfie_segmentation) | ✅ | ✅ | ✅ | ✅ | ✅ |
|
||||
[Hair Segmentation](https://google.github.io/mediapipe/solutions/hair_segmentation) | ✅ | | ✅ | | |
|
||||
[Object Detection](https://google.github.io/mediapipe/solutions/object_detection) | ✅ | ✅ | ✅ | | | ✅
|
||||
[Box Tracking](https://google.github.io/mediapipe/solutions/box_tracking) | ✅ | ✅ | ✅ | | |
|
||||
[Instant Motion Tracking](https://google.github.io/mediapipe/solutions/instant_motion_tracking) | ✅ | | | | |
|
||||
[Objectron](https://google.github.io/mediapipe/solutions/objectron) | ✅ | | ✅ | ✅ | ✅ |
|
||||
[KNIFT](https://google.github.io/mediapipe/solutions/knift) | ✅ | | | | |
|
||||
[AutoFlip](https://google.github.io/mediapipe/solutions/autoflip) | | | ✅ | | |
|
||||
[MediaSequence](https://google.github.io/mediapipe/solutions/media_sequence) | | | ✅ | | |
|
||||
[YouTube 8M](https://google.github.io/mediapipe/solutions/youtube_8m) | | | ✅ | | |
|
||||
|
||||
### Legacy solutions
|
||||
See also
|
||||
[MediaPipe Models and Model Cards](https://google.github.io/mediapipe/solutions/models)
|
||||
for ML models released in MediaPipe.
|
||||
|
||||
We have ended support for [these MediaPipe Legacy Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy)
|
||||
as of March 1, 2023. All other MediaPipe Legacy Solutions will be upgraded to
|
||||
a new MediaPipe Solution. See the [Solutions guide](https://developers.google.com/mediapipe/solutions/guide#legacy)
|
||||
for details. The [code repository](https://github.com/google/mediapipe/tree/master/mediapipe)
|
||||
and prebuilt binaries for all MediaPipe Legacy Solutions will continue to be
|
||||
provided on an as-is basis.
|
||||
## Getting started
|
||||
|
||||
For more on the legacy solutions, see the [documentation](https://github.com/google/mediapipe/tree/master/docs/solutions).
|
||||
To start using MediaPipe
|
||||
[solutions](https://google.github.io/mediapipe/solutions/solutions) with only a few
|
||||
lines code, see example code and demos in
|
||||
[MediaPipe in Python](https://google.github.io/mediapipe/getting_started/python) and
|
||||
[MediaPipe in JavaScript](https://google.github.io/mediapipe/getting_started/javascript).
|
||||
|
||||
## Framework
|
||||
To use MediaPipe in C++, Android and iOS, which allow further customization of
|
||||
the [solutions](https://google.github.io/mediapipe/solutions/solutions) as well as
|
||||
building your own, learn how to
|
||||
[install](https://google.github.io/mediapipe/getting_started/install) MediaPipe and
|
||||
start building example applications in
|
||||
[C++](https://google.github.io/mediapipe/getting_started/cpp),
|
||||
[Android](https://google.github.io/mediapipe/getting_started/android) and
|
||||
[iOS](https://google.github.io/mediapipe/getting_started/ios).
|
||||
|
||||
To start using MediaPipe Framework, [install MediaPipe
|
||||
Framework](https://developers.google.com/mediapipe/framework/getting_started/install)
|
||||
and start building example applications in C++, Android, and iOS.
|
||||
The source code is hosted in the
|
||||
[MediaPipe Github repository](https://github.com/google/mediapipe), and you can
|
||||
run code search using
|
||||
[Google Open Source Code Search](https://cs.opensource.google/mediapipe/mediapipe).
|
||||
|
||||
[MediaPipe Framework](https://developers.google.com/mediapipe/framework) is the
|
||||
low-level component used to build efficient on-device machine learning
|
||||
pipelines, similar to the premade MediaPipe Solutions.
|
||||
|
||||
Before using MediaPipe Framework, familiarize yourself with the following key
|
||||
[Framework
|
||||
concepts](https://developers.google.com/mediapipe/framework/framework_concepts/overview.md):
|
||||
|
||||
* [Packets](https://developers.google.com/mediapipe/framework/framework_concepts/packets.md)
|
||||
* [Graphs](https://developers.google.com/mediapipe/framework/framework_concepts/graphs.md)
|
||||
* [Calculators](https://developers.google.com/mediapipe/framework/framework_concepts/calculators.md)
|
||||
|
||||
## Community
|
||||
|
||||
* [Slack community](https://mediapipe.page.link/joinslack) for MediaPipe
|
||||
users.
|
||||
* [Discuss](https://groups.google.com/forum/#!forum/mediapipe) - General
|
||||
community discussion around MediaPipe.
|
||||
* [Awesome MediaPipe](https://mediapipe.page.link/awesome-mediapipe) - A
|
||||
curated list of awesome MediaPipe related frameworks, libraries and
|
||||
software.
|
||||
|
||||
## Contributing
|
||||
|
||||
We welcome contributions. Please follow these
|
||||
[guidelines](https://github.com/google/mediapipe/blob/master/CONTRIBUTING.md).
|
||||
|
||||
We use GitHub issues for tracking requests and bugs. Please post questions to
|
||||
the MediaPipe Stack Overflow with a `mediapipe` tag.
|
||||
|
||||
## Resources
|
||||
|
||||
### Publications
|
||||
## Publications
|
||||
|
||||
* [Bringing artworks to life with AR](https://developers.googleblog.com/2021/07/bringing-artworks-to-life-with-ar.html)
|
||||
in Google Developers Blog
|
||||
|
@ -124,8 +102,7 @@ the MediaPipe Stack Overflow with a `mediapipe` tag.
|
|||
* [SignAll SDK: Sign language interface using MediaPipe is now available for
|
||||
developers](https://developers.googleblog.com/2021/04/signall-sdk-sign-language-interface-using-mediapipe-now-available.html)
|
||||
in Google Developers Blog
|
||||
* [MediaPipe Holistic - Simultaneous Face, Hand and Pose Prediction, on
|
||||
Device](https://ai.googleblog.com/2020/12/mediapipe-holistic-simultaneous-face.html)
|
||||
* [MediaPipe Holistic - Simultaneous Face, Hand and Pose Prediction, on Device](https://ai.googleblog.com/2020/12/mediapipe-holistic-simultaneous-face.html)
|
||||
in Google AI Blog
|
||||
* [Background Features in Google Meet, Powered by Web ML](https://ai.googleblog.com/2020/10/background-features-in-google-meet.html)
|
||||
in Google AI Blog
|
||||
|
@ -153,6 +130,43 @@ the MediaPipe Stack Overflow with a `mediapipe` tag.
|
|||
in Google AI Blog
|
||||
* [MediaPipe: A Framework for Building Perception Pipelines](https://arxiv.org/abs/1906.08172)
|
||||
|
||||
### Videos
|
||||
## Videos
|
||||
|
||||
* [YouTube Channel](https://www.youtube.com/c/MediaPipe)
|
||||
|
||||
## Events
|
||||
|
||||
* [MediaPipe Seattle Meetup, Google Building Waterside, 13 Feb 2020](https://mediapipe.page.link/seattle2020)
|
||||
* [AI Nextcon 2020, 12-16 Feb 2020, Seattle](http://aisea20.xnextcon.com/)
|
||||
* [MediaPipe Madrid Meetup, 16 Dec 2019](https://www.meetup.com/Madrid-AI-Developers-Group/events/266329088/)
|
||||
* [MediaPipe London Meetup, Google 123 Building, 12 Dec 2019](https://www.meetup.com/London-AI-Tech-Talk/events/266329038)
|
||||
* [ML Conference, Berlin, 11 Dec 2019](https://mlconference.ai/machine-learning-advanced-development/mediapipe-building-real-time-cross-platform-mobile-web-edge-desktop-video-audio-ml-pipelines/)
|
||||
* [MediaPipe Berlin Meetup, Google Berlin, 11 Dec 2019](https://www.meetup.com/Berlin-AI-Tech-Talk/events/266328794/)
|
||||
* [The 3rd Workshop on YouTube-8M Large Scale Video Understanding Workshop,
|
||||
Seoul, Korea ICCV
|
||||
2019](https://research.google.com/youtube8m/workshop2019/index.html)
|
||||
* [AI DevWorld 2019, 10 Oct 2019, San Jose, CA](https://aidevworld.com)
|
||||
* [Google Industry Workshop at ICIP 2019, 24 Sept 2019, Taipei, Taiwan](http://2019.ieeeicip.org/?action=page4&id=14#Google)
|
||||
([presentation](https://docs.google.com/presentation/d/e/2PACX-1vRIBBbO_LO9v2YmvbHHEt1cwyqH6EjDxiILjuT0foXy1E7g6uyh4CesB2DkkEwlRDO9_lWfuKMZx98T/pub?start=false&loop=false&delayms=3000&slide=id.g556cc1a659_0_5))
|
||||
* [Open sourced at CVPR 2019, 17~20 June, Long Beach, CA](https://sites.google.com/corp/view/perception-cv4arvr/mediapipe)
|
||||
|
||||
## Community
|
||||
|
||||
* [Awesome MediaPipe](https://mediapipe.page.link/awesome-mediapipe) - A
|
||||
curated list of awesome MediaPipe related frameworks, libraries and software
|
||||
* [Slack community](https://mediapipe.page.link/joinslack) for MediaPipe users
|
||||
* [Discuss](https://groups.google.com/forum/#!forum/mediapipe) - General
|
||||
community discussion around MediaPipe
|
||||
|
||||
## Alpha disclaimer
|
||||
|
||||
MediaPipe is currently in alpha at v0.7. We may be still making breaking API
|
||||
changes and expect to get to stable APIs by v1.0.
|
||||
|
||||
## Contributing
|
||||
|
||||
We welcome contributions. Please follow these
|
||||
[guidelines](https://github.com/google/mediapipe/blob/master/CONTRIBUTING.md).
|
||||
|
||||
We use GitHub issues for tracking requests and bugs. Please post questions to
|
||||
the MediaPipe Stack Overflow with a `mediapipe` tag.
|
||||
|
|
|
@ -20,9 +20,9 @@ nav_order: 1
|
|||
---
|
||||
|
||||
**Attention:** *Thank you for your interest in MediaPipe Solutions.
|
||||
As of May 10, 2023, this solution was upgraded to a new MediaPipe
|
||||
As of March 1, 2023, this solution is planned to be upgraded to a new MediaPipe
|
||||
Solution. For more information, see the
|
||||
[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/vision/face_detector)
|
||||
[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy)
|
||||
site.*
|
||||
|
||||
----
|
||||
|
|
|
@ -20,9 +20,9 @@ nav_order: 2
|
|||
---
|
||||
|
||||
**Attention:** *Thank you for your interest in MediaPipe Solutions.
|
||||
As of May 10, 2023, this solution was upgraded to a new MediaPipe
|
||||
As of March 1, 2023, this solution is planned to be upgraded to a new MediaPipe
|
||||
Solution. For more information, see the
|
||||
[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/vision/face_landmarker)
|
||||
[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy)
|
||||
site.*
|
||||
|
||||
----
|
||||
|
|
|
@ -20,9 +20,9 @@ nav_order: 3
|
|||
---
|
||||
|
||||
**Attention:** *Thank you for your interest in MediaPipe Solutions.
|
||||
As of May 10, 2023, this solution was upgraded to a new MediaPipe
|
||||
As of March 1, 2023, this solution is planned to be upgraded to a new MediaPipe
|
||||
Solution. For more information, see the
|
||||
[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/vision/face_landmarker)
|
||||
[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy)
|
||||
site.*
|
||||
|
||||
----
|
||||
|
|
|
@ -22,9 +22,9 @@ nav_order: 5
|
|||
---
|
||||
|
||||
**Attention:** *Thank you for your interest in MediaPipe Solutions.
|
||||
As of May 10, 2023, this solution was upgraded to a new MediaPipe
|
||||
As of March 1, 2023, this solution is planned to be upgraded to a new MediaPipe
|
||||
Solution. For more information, see the
|
||||
[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/vision/pose_landmarker)
|
||||
[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/vision/pose_landmarker/)
|
||||
site.*
|
||||
|
||||
----
|
||||
|
|
|
@ -21,7 +21,7 @@ nav_order: 1
|
|||
---
|
||||
|
||||
**Attention:** *Thank you for your interest in MediaPipe Solutions.
|
||||
As of May 10, 2023, this solution was upgraded to a new MediaPipe
|
||||
As of March 1, 2023, this solution is planned to be upgraded to a new MediaPipe
|
||||
Solution. For more information, see the
|
||||
[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/vision/pose_landmarker/)
|
||||
site.*
|
||||
|
|
|
@ -1,6 +1,5 @@
|
|||
---
|
||||
layout: forward
|
||||
target: https://developers.google.com/mediapipe/solutions/guide#legacy
|
||||
layout: default
|
||||
title: MediaPipe Legacy Solutions
|
||||
nav_order: 3
|
||||
has_children: true
|
||||
|
@ -14,7 +13,8 @@ has_toc: false
|
|||
{:toc}
|
||||
---
|
||||
|
||||
**Attention:** *We have ended support for
|
||||
**Attention:** *Thank you for your interest in MediaPipe Solutions. We have
|
||||
ended support for
|
||||
[these MediaPipe Legacy Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy)
|
||||
as of March 1, 2023. All other
|
||||
[MediaPipe Legacy Solutions will be upgraded](https://developers.google.com/mediapipe/solutions/guide#legacy)
|
||||
|
@ -25,6 +25,14 @@ be provided on an as-is basis. We encourage you to check out the new MediaPipe
|
|||
Solutions at:
|
||||
[https://developers.google.com/mediapipe/solutions](https://developers.google.com/mediapipe/solutions)*
|
||||
|
||||
*This notice and web page will be removed on June 1, 2023.*
|
||||
|
||||
----
|
||||
|
||||
<br><br><br><br><br><br><br><br><br><br>
|
||||
<br><br><br><br><br><br><br><br><br><br>
|
||||
<br><br><br><br><br><br><br><br><br><br>
|
||||
|
||||
----
|
||||
|
||||
MediaPipe offers open source cross-platform, customizable ML solutions for live
|
||||
|
|
253
mediapipe/BUILD
253
mediapipe/BUILD
|
@ -14,155 +14,81 @@
|
|||
|
||||
licenses(["notice"]) # Apache 2.0
|
||||
|
||||
load("@mediapipe//mediapipe:platforms.bzl", "config_setting_and_platform")
|
||||
# Note: yes, these need to use "//external:android/crosstool", not
|
||||
# @androidndk//:default_crosstool.
|
||||
|
||||
# Generic Android
|
||||
config_setting(
|
||||
name = "android",
|
||||
constraint_values = [
|
||||
"@platforms//os:android",
|
||||
],
|
||||
values = {"crosstool_top": "//external:android/crosstool"},
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
# Android x86 32-bit.
|
||||
config_setting_and_platform(
|
||||
name = "android_x86",
|
||||
constraint_values = [
|
||||
"@platforms//os:android",
|
||||
"@platforms//cpu:x86_32",
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
# Android x86 64-bit.
|
||||
config_setting_and_platform(
|
||||
name = "android_x86_64",
|
||||
constraint_values = [
|
||||
"@platforms//os:android",
|
||||
"@platforms//cpu:x86_64",
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
# Android ARMv7.
|
||||
config_setting_and_platform(
|
||||
name = "android_arm",
|
||||
constraint_values = [
|
||||
"@platforms//os:android",
|
||||
"@platforms//cpu:armv7",
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
# Android ARM64.
|
||||
config_setting_and_platform(
|
||||
name = "android_arm64",
|
||||
constraint_values = [
|
||||
"@platforms//os:android",
|
||||
"@platforms//cpu:arm64",
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
# Generic MacOS.
|
||||
config_setting(
|
||||
name = "android_x86",
|
||||
values = {
|
||||
"crosstool_top": "//external:android/crosstool",
|
||||
"cpu": "x86",
|
||||
},
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
config_setting(
|
||||
name = "android_x86_64",
|
||||
values = {
|
||||
"crosstool_top": "//external:android/crosstool",
|
||||
"cpu": "x86_64",
|
||||
},
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
config_setting(
|
||||
name = "android_armeabi",
|
||||
values = {
|
||||
"crosstool_top": "//external:android/crosstool",
|
||||
"cpu": "armeabi",
|
||||
},
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
config_setting(
|
||||
name = "android_arm",
|
||||
values = {
|
||||
"crosstool_top": "//external:android/crosstool",
|
||||
"cpu": "armeabi-v7a",
|
||||
},
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
config_setting(
|
||||
name = "android_arm64",
|
||||
values = {
|
||||
"crosstool_top": "//external:android/crosstool",
|
||||
"cpu": "arm64-v8a",
|
||||
},
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
# Note: this cannot just match "apple_platform_type": "macos" because that option
|
||||
# defaults to "macos" even when building on Linux!
|
||||
alias(
|
||||
name = "macos",
|
||||
constraint_values = [
|
||||
"@platforms//os:macos",
|
||||
],
|
||||
actual = select({
|
||||
":macos_i386": ":macos_i386",
|
||||
":macos_x86_64": ":macos_x86_64",
|
||||
":macos_arm64": ":macos_arm64",
|
||||
"//conditions:default": ":macos_i386", # Arbitrarily chosen from above.
|
||||
}),
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
# MacOS x86 64-bit.
|
||||
config_setting_and_platform(
|
||||
name = "macos_x86_64",
|
||||
constraint_values = [
|
||||
"@platforms//os:macos",
|
||||
"@platforms//cpu:x86_64",
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
# MacOS ARM64.
|
||||
config_setting_and_platform(
|
||||
name = "macos_arm64",
|
||||
constraint_values = [
|
||||
"@platforms//os:macos",
|
||||
"@platforms//cpu:arm64",
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
# Generic iOS.
|
||||
# Note: this also matches on crosstool_top so that it does not produce ambiguous
|
||||
# selectors when used together with "android".
|
||||
config_setting(
|
||||
name = "ios",
|
||||
constraint_values = [
|
||||
"@platforms//os:ios",
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
# iOS device ARM32.
|
||||
config_setting_and_platform(
|
||||
name = "ios_armv7",
|
||||
constraint_values = [
|
||||
"@platforms//os:ios",
|
||||
"@platforms//cpu:arm",
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
# iOS device ARM64.
|
||||
config_setting_and_platform(
|
||||
name = "ios_arm64",
|
||||
constraint_values = [
|
||||
"@platforms//os:ios",
|
||||
"@platforms//cpu:arm64",
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
# iOS device ARM64E.
|
||||
config_setting_and_platform(
|
||||
name = "ios_arm64e",
|
||||
constraint_values = [
|
||||
"@platforms//os:ios",
|
||||
"@platforms//cpu:arm64e",
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
# iOS simulator x86 32-bit.
|
||||
config_setting_and_platform(
|
||||
name = "ios_i386",
|
||||
constraint_values = [
|
||||
"@platforms//os:ios",
|
||||
"@platforms//cpu:x86_32",
|
||||
"@build_bazel_apple_support//constraints:simulator",
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
# iOS simulator x86 64-bit.
|
||||
config_setting_and_platform(
|
||||
name = "ios_x86_64",
|
||||
constraint_values = [
|
||||
"@platforms//os:ios",
|
||||
"@platforms//cpu:x86_64",
|
||||
"@build_bazel_apple_support//constraints:simulator",
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
# iOS simulator ARM64.
|
||||
config_setting_and_platform(
|
||||
name = "ios_sim_arm64",
|
||||
constraint_values = [
|
||||
"@platforms//os:ios",
|
||||
"@platforms//cpu:arm64",
|
||||
"@build_bazel_apple_support//constraints:simulator",
|
||||
],
|
||||
values = {
|
||||
"crosstool_top": "@bazel_tools//tools/cpp:toolchain",
|
||||
"apple_platform_type": "ios",
|
||||
},
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
|
@ -176,26 +102,53 @@ alias(
|
|||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
# Windows 64-bit.
|
||||
config_setting_and_platform(
|
||||
name = "windows",
|
||||
constraint_values = [
|
||||
"@platforms//os:windows",
|
||||
"@platforms//cpu:x86_64",
|
||||
],
|
||||
config_setting(
|
||||
name = "macos_i386",
|
||||
values = {
|
||||
"apple_platform_type": "macos",
|
||||
"cpu": "darwin",
|
||||
},
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
# Linux 64-bit.
|
||||
config_setting_and_platform(
|
||||
name = "linux",
|
||||
constraint_values = [
|
||||
"@platforms//os:linux",
|
||||
"@platforms//cpu:x86_64",
|
||||
],
|
||||
config_setting(
|
||||
name = "macos_x86_64",
|
||||
values = {
|
||||
"apple_platform_type": "macos",
|
||||
"cpu": "darwin_x86_64",
|
||||
},
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
config_setting(
|
||||
name = "macos_arm64",
|
||||
values = {
|
||||
"apple_platform_type": "macos",
|
||||
"cpu": "darwin_arm64",
|
||||
},
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
[
|
||||
config_setting(
|
||||
name = arch,
|
||||
values = {"cpu": arch},
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
for arch in [
|
||||
"ios_i386",
|
||||
"ios_x86_64",
|
||||
"ios_armv7",
|
||||
"ios_arm64",
|
||||
"ios_arm64e",
|
||||
]
|
||||
]
|
||||
|
||||
config_setting(
|
||||
name = "windows",
|
||||
values = {"cpu": "x64_windows"},
|
||||
)
|
||||
|
||||
exports_files(
|
||||
["provisioning_profile.mobileprovision"],
|
||||
visibility = ["//visibility:public"],
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Placeholder: load py_proto_library
|
||||
load("//mediapipe/framework/port:build_config.bzl", "mediapipe_cc_proto_library")
|
||||
|
||||
licenses(["notice"])
|
||||
|
@ -146,7 +145,6 @@ cc_library(
|
|||
"//mediapipe/framework/port:logging",
|
||||
"//mediapipe/framework/port:status",
|
||||
"//mediapipe/util:time_series_util",
|
||||
"@com_google_absl//absl/log:absl_check",
|
||||
"@com_google_absl//absl/strings",
|
||||
"@com_google_audio_tools//audio/dsp/mfcc",
|
||||
"@eigen_archive//:eigen3",
|
||||
|
@ -165,9 +163,8 @@ cc_library(
|
|||
"//mediapipe/framework/formats:matrix",
|
||||
"//mediapipe/framework/formats:time_series_header_cc_proto",
|
||||
"//mediapipe/framework/port:integral_types",
|
||||
"//mediapipe/framework/port:logging",
|
||||
"//mediapipe/util:time_series_util",
|
||||
"@com_google_absl//absl/log:absl_check",
|
||||
"@com_google_absl//absl/log:absl_log",
|
||||
"@com_google_absl//absl/strings",
|
||||
"@com_google_audio_tools//audio/dsp:resampler",
|
||||
"@com_google_audio_tools//audio/dsp:resampler_q",
|
||||
|
@ -188,7 +185,6 @@ cc_library(
|
|||
"//mediapipe/framework/port:core_proto",
|
||||
"//mediapipe/framework/port:status",
|
||||
"//mediapipe/util:time_series_util",
|
||||
"@com_google_absl//absl/log:absl_check",
|
||||
],
|
||||
alwayslink = 1,
|
||||
)
|
||||
|
@ -223,12 +219,13 @@ cc_library(
|
|||
deps = [
|
||||
":time_series_framer_calculator_cc_proto",
|
||||
"//mediapipe/framework:calculator_framework",
|
||||
"//mediapipe/framework:timestamp",
|
||||
"//mediapipe/framework/formats:matrix",
|
||||
"//mediapipe/framework/formats:time_series_header_cc_proto",
|
||||
"//mediapipe/framework/port:integral_types",
|
||||
"//mediapipe/framework/port:logging",
|
||||
"//mediapipe/framework/port:ret_check",
|
||||
"//mediapipe/framework/port:status",
|
||||
"//mediapipe/util:time_series_util",
|
||||
"@com_google_absl//absl/log:absl_check",
|
||||
"@com_google_audio_tools//audio/dsp:window_functions",
|
||||
"@eigen_archive//:eigen3",
|
||||
],
|
||||
|
@ -299,7 +296,6 @@ cc_test(
|
|||
"//mediapipe/framework/port:integral_types",
|
||||
"//mediapipe/framework/port:status",
|
||||
"//mediapipe/util:time_series_test_util",
|
||||
"@com_google_absl//absl/log:absl_log",
|
||||
"@com_google_audio_tools//audio/dsp:number_util",
|
||||
"@eigen_archive//:eigen3",
|
||||
],
|
||||
|
@ -323,21 +319,6 @@ cc_test(
|
|||
],
|
||||
)
|
||||
|
||||
cc_binary(
|
||||
name = "time_series_framer_calculator_benchmark",
|
||||
srcs = ["time_series_framer_calculator_benchmark.cc"],
|
||||
deps = [
|
||||
":time_series_framer_calculator",
|
||||
":time_series_framer_calculator_cc_proto",
|
||||
"//mediapipe/framework:calculator_framework",
|
||||
"//mediapipe/framework:packet",
|
||||
"//mediapipe/framework/formats:matrix",
|
||||
"//mediapipe/framework/formats:time_series_header_cc_proto",
|
||||
"@com_google_absl//absl/log:absl_check",
|
||||
"@com_google_benchmark//:benchmark",
|
||||
],
|
||||
)
|
||||
|
||||
cc_test(
|
||||
name = "time_series_framer_calculator_test",
|
||||
srcs = ["time_series_framer_calculator_test.cc"],
|
||||
|
@ -352,7 +333,6 @@ cc_test(
|
|||
"//mediapipe/framework/port:integral_types",
|
||||
"//mediapipe/framework/port:status",
|
||||
"//mediapipe/util:time_series_test_util",
|
||||
"@com_google_absl//absl/log:absl_log",
|
||||
"@com_google_audio_tools//audio/dsp:window_functions",
|
||||
"@eigen_archive//:eigen3",
|
||||
],
|
||||
|
|
|
@ -23,7 +23,6 @@
|
|||
#include <vector>
|
||||
|
||||
#include "Eigen/Core"
|
||||
#include "absl/log/absl_check.h"
|
||||
#include "absl/strings/str_cat.h"
|
||||
#include "absl/strings/string_view.h"
|
||||
#include "absl/strings/substitute.h"
|
||||
|
@ -139,7 +138,7 @@ absl::Status FramewiseTransformCalculatorBase::Process(CalculatorContext* cc) {
|
|||
TransformFrame(input_frame, &output_frame);
|
||||
|
||||
// Copy output from vector<float> to Eigen::Vector.
|
||||
ABSL_CHECK_EQ(output_frame.size(), num_output_channels_);
|
||||
CHECK_EQ(output_frame.size(), num_output_channels_);
|
||||
Eigen::Map<const Eigen::MatrixXd> output_frame_map(&output_frame[0],
|
||||
output_frame.size(), 1);
|
||||
output->col(frame) = output_frame_map.cast<float>();
|
||||
|
|
|
@ -16,8 +16,6 @@
|
|||
|
||||
#include "mediapipe/calculators/audio/rational_factor_resample_calculator.h"
|
||||
|
||||
#include "absl/log/absl_check.h"
|
||||
#include "absl/log/absl_log.h"
|
||||
#include "audio/dsp/resampler_q.h"
|
||||
|
||||
using audio_dsp::Resampler;
|
||||
|
@ -47,9 +45,9 @@ void CopyVectorToChannel(const std::vector<float>& vec, Matrix* matrix,
|
|||
if (matrix->cols() == 0) {
|
||||
matrix->resize(matrix->rows(), vec.size());
|
||||
} else {
|
||||
ABSL_CHECK_EQ(vec.size(), matrix->cols());
|
||||
CHECK_EQ(vec.size(), matrix->cols());
|
||||
}
|
||||
ABSL_CHECK_LT(channel, matrix->rows());
|
||||
CHECK_LT(channel, matrix->rows());
|
||||
matrix->row(channel) =
|
||||
Eigen::Map<const Eigen::ArrayXf>(vec.data(), vec.size());
|
||||
}
|
||||
|
@ -79,7 +77,7 @@ absl::Status RationalFactorResampleCalculator::Open(CalculatorContext* cc) {
|
|||
r = ResamplerFromOptions(source_sample_rate_, target_sample_rate_,
|
||||
resample_options);
|
||||
if (!r) {
|
||||
ABSL_LOG(ERROR) << "Failed to initialize resampler.";
|
||||
LOG(ERROR) << "Failed to initialize resampler.";
|
||||
return absl::UnknownError("Failed to initialize resampler.");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -27,6 +27,7 @@
|
|||
#include "mediapipe/framework/formats/matrix.h"
|
||||
#include "mediapipe/framework/formats/time_series_header.pb.h"
|
||||
#include "mediapipe/framework/port/integral_types.h"
|
||||
#include "mediapipe/framework/port/logging.h"
|
||||
#include "mediapipe/util/time_series_util.h"
|
||||
|
||||
namespace mediapipe {
|
||||
|
|
|
@ -210,23 +210,6 @@ REGISTER_CALCULATOR(SpectrogramCalculator);
|
|||
// Factor to convert ln(SQUARED_MAGNITUDE) to deciBels = 10.0/ln(10.0).
|
||||
const float SpectrogramCalculator::kLnSquaredMagnitudeToDb = 4.342944819032518;
|
||||
|
||||
namespace {
|
||||
std::unique_ptr<audio_dsp::WindowFunction> MakeWindowFun(
|
||||
const SpectrogramCalculatorOptions::WindowType window_type) {
|
||||
switch (window_type) {
|
||||
// The cosine window and square root of Hann are equivalent.
|
||||
case SpectrogramCalculatorOptions::COSINE:
|
||||
case SpectrogramCalculatorOptions::SQRT_HANN:
|
||||
return std::make_unique<audio_dsp::CosineWindow>();
|
||||
case SpectrogramCalculatorOptions::HANN:
|
||||
return std::make_unique<audio_dsp::HannWindow>();
|
||||
case SpectrogramCalculatorOptions::HAMMING:
|
||||
return std::make_unique<audio_dsp::HammingWindow>();
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
absl::Status SpectrogramCalculator::Open(CalculatorContext* cc) {
|
||||
SpectrogramCalculatorOptions spectrogram_options =
|
||||
cc->Options<SpectrogramCalculatorOptions>();
|
||||
|
@ -283,14 +266,28 @@ absl::Status SpectrogramCalculator::Open(CalculatorContext* cc) {
|
|||
|
||||
output_scale_ = spectrogram_options.output_scale();
|
||||
|
||||
auto window_fun = MakeWindowFun(spectrogram_options.window_type());
|
||||
if (window_fun == nullptr) {
|
||||
return absl::Status(absl::StatusCode::kInvalidArgument,
|
||||
absl::StrCat("Invalid window type ",
|
||||
spectrogram_options.window_type()));
|
||||
}
|
||||
std::vector<double> window;
|
||||
window_fun->GetPeriodicSamples(frame_duration_samples_, &window);
|
||||
switch (spectrogram_options.window_type()) {
|
||||
case SpectrogramCalculatorOptions::COSINE:
|
||||
audio_dsp::CosineWindow().GetPeriodicSamples(frame_duration_samples_,
|
||||
&window);
|
||||
break;
|
||||
case SpectrogramCalculatorOptions::HANN:
|
||||
audio_dsp::HannWindow().GetPeriodicSamples(frame_duration_samples_,
|
||||
&window);
|
||||
break;
|
||||
case SpectrogramCalculatorOptions::HAMMING:
|
||||
audio_dsp::HammingWindow().GetPeriodicSamples(frame_duration_samples_,
|
||||
&window);
|
||||
break;
|
||||
case SpectrogramCalculatorOptions::SQRT_HANN: {
|
||||
audio_dsp::HannWindow().GetPeriodicSamples(frame_duration_samples_,
|
||||
&window);
|
||||
absl::c_transform(window, window.begin(),
|
||||
[](double x) { return std::sqrt(x); });
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Propagate settings down to the actual Spectrogram object.
|
||||
spectrogram_generators_.clear();
|
||||
|
@ -436,9 +433,9 @@ absl::Status SpectrogramCalculator::ProcessVectorToOutput(
|
|||
absl::Status SpectrogramCalculator::ProcessVector(const Matrix& input_stream,
|
||||
CalculatorContext* cc) {
|
||||
switch (output_type_) {
|
||||
// These blocks deliberately ignore clang-format to preserve the
|
||||
// "silhouette" of the different cases.
|
||||
// clang-format off
|
||||
// These blocks deliberately ignore clang-format to preserve the
|
||||
// "silhouette" of the different cases.
|
||||
// clang-format off
|
||||
case SpectrogramCalculatorOptions::COMPLEX: {
|
||||
return ProcessVectorToOutput(
|
||||
input_stream,
|
||||
|
|
|
@ -68,7 +68,7 @@ message SpectrogramCalculatorOptions {
|
|||
HANN = 0;
|
||||
HAMMING = 1;
|
||||
COSINE = 2;
|
||||
SQRT_HANN = 4; // Alias of COSINE.
|
||||
SQRT_HANN = 4;
|
||||
}
|
||||
optional WindowType window_type = 6 [default = HANN];
|
||||
|
||||
|
@ -80,7 +80,7 @@ message SpectrogramCalculatorOptions {
|
|||
// If use_local_timestamp is true, the output packet's timestamp is based on
|
||||
// the last sample of the packet and it's inferred from the latest input
|
||||
// packet's timestamp. If false, the output packet's timestamp is based on
|
||||
// the cumulative timestamping, which is inferred from the initial input
|
||||
// the cumulative timestamping, which is inferred from the intial input
|
||||
// timestamp and the cumulative number of samples.
|
||||
optional bool use_local_timestamp = 8 [default = false];
|
||||
}
|
||||
|
|
|
@ -22,7 +22,6 @@
|
|||
#include <vector>
|
||||
|
||||
#include "Eigen/Core"
|
||||
#include "absl/log/absl_log.h"
|
||||
#include "audio/dsp/number_util.h"
|
||||
#include "mediapipe/calculators/audio/spectrogram_calculator.pb.h"
|
||||
#include "mediapipe/framework/calculator_framework.h"
|
||||
|
@ -883,11 +882,11 @@ void BM_ProcessDC(benchmark::State& state) {
|
|||
|
||||
const CalculatorRunner::StreamContents& output = runner.Outputs().Index(0);
|
||||
const Matrix& output_matrix = output.packets[0].Get<Matrix>();
|
||||
ABSL_LOG(INFO) << "Output matrix=" << output_matrix.rows() << "x"
|
||||
<< output_matrix.cols();
|
||||
ABSL_LOG(INFO) << "First values=" << output_matrix(0, 0) << ", "
|
||||
<< output_matrix(1, 0) << ", " << output_matrix(2, 0) << ", "
|
||||
<< output_matrix(3, 0);
|
||||
LOG(INFO) << "Output matrix=" << output_matrix.rows() << "x"
|
||||
<< output_matrix.cols();
|
||||
LOG(INFO) << "First values=" << output_matrix(0, 0) << ", "
|
||||
<< output_matrix(1, 0) << ", " << output_matrix(2, 0) << ", "
|
||||
<< output_matrix(3, 0);
|
||||
}
|
||||
|
||||
BENCHMARK(BM_ProcessDC);
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
#include "absl/log/absl_check.h"
|
||||
#include "mediapipe/calculators/audio/stabilized_log_calculator.pb.h"
|
||||
#include "mediapipe/framework/calculator_framework.h"
|
||||
#include "mediapipe/framework/formats/matrix.h"
|
||||
|
@ -60,7 +59,7 @@ class StabilizedLogCalculator : public CalculatorBase {
|
|||
output_scale_ = stabilized_log_calculator_options.output_scale();
|
||||
check_nonnegativity_ =
|
||||
stabilized_log_calculator_options.check_nonnegativity();
|
||||
ABSL_CHECK_GE(stabilizer_, 0.0)
|
||||
CHECK_GE(stabilizer_, 0.0)
|
||||
<< "stabilizer must be >= 0.0, received a value of " << stabilizer_;
|
||||
|
||||
// If the input packets have a header, propagate the header to the output.
|
||||
|
|
|
@ -15,17 +15,19 @@
|
|||
// Defines TimeSeriesFramerCalculator.
|
||||
#include <math.h>
|
||||
|
||||
#include <vector>
|
||||
#include <deque>
|
||||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
#include "Eigen/Core"
|
||||
#include "absl/log/absl_check.h"
|
||||
#include "audio/dsp/window_functions.h"
|
||||
#include "mediapipe/calculators/audio/time_series_framer_calculator.pb.h"
|
||||
#include "mediapipe/framework/calculator_framework.h"
|
||||
#include "mediapipe/framework/formats/matrix.h"
|
||||
#include "mediapipe/framework/formats/time_series_header.pb.h"
|
||||
#include "mediapipe/framework/port/integral_types.h"
|
||||
#include "mediapipe/framework/port/logging.h"
|
||||
#include "mediapipe/framework/port/ret_check.h"
|
||||
#include "mediapipe/framework/timestamp.h"
|
||||
#include "mediapipe/util/time_series_util.h"
|
||||
|
||||
namespace mediapipe {
|
||||
|
@ -86,6 +88,11 @@ class TimeSeriesFramerCalculator : public CalculatorBase {
|
|||
absl::Status Close(CalculatorContext* cc) override;
|
||||
|
||||
private:
|
||||
// Adds input data to the internal buffer.
|
||||
void EnqueueInput(CalculatorContext* cc);
|
||||
// Constructs and emits framed output packets.
|
||||
void FrameOutput(CalculatorContext* cc);
|
||||
|
||||
Timestamp CurrentOutputTimestamp() {
|
||||
if (use_local_timestamp_) {
|
||||
return current_timestamp_;
|
||||
|
@ -99,13 +106,21 @@ class TimeSeriesFramerCalculator : public CalculatorBase {
|
|||
Timestamp::kTimestampUnitsPerSecond);
|
||||
}
|
||||
|
||||
// Returns the timestamp of a sample on a base, which is usually the time
|
||||
// stamp of a packet.
|
||||
Timestamp CurrentSampleTimestamp(const Timestamp& timestamp_base,
|
||||
int64_t number_of_samples) {
|
||||
return timestamp_base + round(number_of_samples / sample_rate_ *
|
||||
Timestamp::kTimestampUnitsPerSecond);
|
||||
}
|
||||
|
||||
// The number of input samples to advance after the current output frame is
|
||||
// emitted.
|
||||
int next_frame_step_samples() const {
|
||||
// All numbers are in input samples.
|
||||
const int64_t current_output_frame_start = static_cast<int64_t>(
|
||||
round(cumulative_output_frames_ * average_frame_step_samples_));
|
||||
ABSL_CHECK_EQ(current_output_frame_start, cumulative_completed_samples_);
|
||||
CHECK_EQ(current_output_frame_start, cumulative_completed_samples_);
|
||||
const int64_t next_output_frame_start = static_cast<int64_t>(
|
||||
round((cumulative_output_frames_ + 1) * average_frame_step_samples_));
|
||||
return next_output_frame_start - current_output_frame_start;
|
||||
|
@ -127,174 +142,61 @@ class TimeSeriesFramerCalculator : public CalculatorBase {
|
|||
Timestamp initial_input_timestamp_;
|
||||
// The current timestamp is updated along with the incoming packets.
|
||||
Timestamp current_timestamp_;
|
||||
int num_channels_;
|
||||
|
||||
// Samples are buffered in a vector of sample blocks.
|
||||
class SampleBlockBuffer {
|
||||
public:
|
||||
// Initializes the buffer.
|
||||
void Init(double sample_rate, int num_channels) {
|
||||
ts_units_per_sample_ = Timestamp::kTimestampUnitsPerSecond / sample_rate;
|
||||
num_channels_ = num_channels;
|
||||
num_samples_ = 0;
|
||||
first_block_offset_ = 0;
|
||||
}
|
||||
|
||||
// Number of channels, equal to the number of rows in each Matrix.
|
||||
int num_channels() const { return num_channels_; }
|
||||
// Total number of available samples over all blocks.
|
||||
int num_samples() const { return num_samples_; }
|
||||
|
||||
// Pushes a new block of samples on the back of the buffer with `timestamp`
|
||||
// being the input timestamp of the packet containing the Matrix.
|
||||
void Push(const Matrix& samples, Timestamp timestamp);
|
||||
// Copies `count` samples from the front of the buffer. If there are fewer
|
||||
// samples than this, the result is zero padded to have `count` samples.
|
||||
// The timestamp of the last copied sample is written to *last_timestamp.
|
||||
// This output is used below to update `current_timestamp_`, which is only
|
||||
// used when `use_local_timestamp` is true.
|
||||
Matrix CopySamples(int count, Timestamp* last_timestamp) const;
|
||||
// Drops `count` samples from the front of the buffer. If `count` exceeds
|
||||
// `num_samples()`, the buffer is emptied. Returns how many samples were
|
||||
// dropped.
|
||||
int DropSamples(int count);
|
||||
|
||||
private:
|
||||
struct Block {
|
||||
// Matrix of num_channels rows by num_samples columns, a block of possibly
|
||||
// multiple samples.
|
||||
Matrix samples;
|
||||
// Timestamp of the first sample in the Block. This comes from the input
|
||||
// packet's timestamp that contains this Matrix.
|
||||
Timestamp timestamp;
|
||||
|
||||
Block() : timestamp(Timestamp::Unstarted()) {}
|
||||
Block(const Matrix& samples, Timestamp timestamp)
|
||||
: samples(samples), timestamp(timestamp) {}
|
||||
int num_samples() const { return samples.cols(); }
|
||||
};
|
||||
std::vector<Block> blocks_;
|
||||
// Number of timestamp units per sample. Used to compute timestamps as
|
||||
// nth sample timestamp = base_timestamp + round(ts_units_per_sample_ * n).
|
||||
double ts_units_per_sample_;
|
||||
// Number of rows in each Matrix.
|
||||
int num_channels_;
|
||||
// The total number of samples over all blocks, equal to
|
||||
// (sum_i blocks_[i].num_samples()) - first_block_offset_.
|
||||
int num_samples_;
|
||||
// The number of samples in the first block that have been discarded. This
|
||||
// way we can cheaply represent "partially discarding" a block.
|
||||
int first_block_offset_;
|
||||
} sample_buffer_;
|
||||
// Each entry in this deque consists of a single sample, i.e. a
|
||||
// single column vector, and its timestamp.
|
||||
std::deque<std::pair<Matrix, Timestamp>> sample_buffer_;
|
||||
|
||||
bool use_window_;
|
||||
Eigen::RowVectorXf window_;
|
||||
Matrix window_;
|
||||
|
||||
bool use_local_timestamp_;
|
||||
};
|
||||
REGISTER_CALCULATOR(TimeSeriesFramerCalculator);
|
||||
|
||||
void TimeSeriesFramerCalculator::SampleBlockBuffer::Push(const Matrix& samples,
|
||||
Timestamp timestamp) {
|
||||
num_samples_ += samples.cols();
|
||||
blocks_.emplace_back(samples, timestamp);
|
||||
void TimeSeriesFramerCalculator::EnqueueInput(CalculatorContext* cc) {
|
||||
const Matrix& input_frame = cc->Inputs().Index(0).Get<Matrix>();
|
||||
|
||||
for (int i = 0; i < input_frame.cols(); ++i) {
|
||||
sample_buffer_.emplace_back(std::make_pair(
|
||||
input_frame.col(i), CurrentSampleTimestamp(cc->InputTimestamp(), i)));
|
||||
}
|
||||
}
|
||||
|
||||
Matrix TimeSeriesFramerCalculator::SampleBlockBuffer::CopySamples(
|
||||
int count, Timestamp* last_timestamp) const {
|
||||
Matrix copied(num_channels_, count);
|
||||
|
||||
if (!blocks_.empty()) {
|
||||
int num_copied = 0;
|
||||
// First block has an offset for samples that have been discarded.
|
||||
int offset = first_block_offset_;
|
||||
int n;
|
||||
Timestamp last_block_ts;
|
||||
int last_sample_index;
|
||||
|
||||
for (auto it = blocks_.begin(); it != blocks_.end() && count > 0; ++it) {
|
||||
n = std::min(it->num_samples() - offset, count);
|
||||
// Copy `n` samples from the next block.
|
||||
copied.middleCols(num_copied, n) = it->samples.middleCols(offset, n);
|
||||
count -= n;
|
||||
num_copied += n;
|
||||
last_block_ts = it->timestamp;
|
||||
last_sample_index = offset + n - 1;
|
||||
offset = 0; // No samples have been discarded in subsequent blocks.
|
||||
}
|
||||
|
||||
// Compute the timestamp of the last copied sample.
|
||||
*last_timestamp =
|
||||
last_block_ts + std::round(ts_units_per_sample_ * last_sample_index);
|
||||
}
|
||||
|
||||
if (count > 0) {
|
||||
copied.rightCols(count).setZero(); // Zero pad if needed.
|
||||
}
|
||||
|
||||
return copied;
|
||||
}
|
||||
|
||||
int TimeSeriesFramerCalculator::SampleBlockBuffer::DropSamples(int count) {
|
||||
if (blocks_.empty()) {
|
||||
return 0;
|
||||
}
|
||||
|
||||
auto block_it = blocks_.begin();
|
||||
if (first_block_offset_ + count < block_it->num_samples()) {
|
||||
// `count` is less than the remaining samples in the first block.
|
||||
first_block_offset_ += count;
|
||||
num_samples_ -= count;
|
||||
return count;
|
||||
}
|
||||
|
||||
int num_samples_dropped = block_it->num_samples() - first_block_offset_;
|
||||
count -= num_samples_dropped;
|
||||
first_block_offset_ = 0;
|
||||
|
||||
for (++block_it; block_it != blocks_.end(); ++block_it) {
|
||||
if (block_it->num_samples() > count) {
|
||||
break;
|
||||
}
|
||||
num_samples_dropped += block_it->num_samples();
|
||||
count -= block_it->num_samples();
|
||||
}
|
||||
|
||||
blocks_.erase(blocks_.begin(), block_it); // Drop whole blocks.
|
||||
if (!blocks_.empty()) {
|
||||
first_block_offset_ = count; // Drop part of the next block.
|
||||
num_samples_dropped += count;
|
||||
}
|
||||
|
||||
num_samples_ -= num_samples_dropped;
|
||||
return num_samples_dropped;
|
||||
}
|
||||
|
||||
absl::Status TimeSeriesFramerCalculator::Process(CalculatorContext* cc) {
|
||||
if (initial_input_timestamp_ == Timestamp::Unstarted()) {
|
||||
initial_input_timestamp_ = cc->InputTimestamp();
|
||||
current_timestamp_ = initial_input_timestamp_;
|
||||
}
|
||||
|
||||
// Add input data to the internal buffer.
|
||||
sample_buffer_.Push(cc->Inputs().Index(0).Get<Matrix>(),
|
||||
cc->InputTimestamp());
|
||||
|
||||
// Construct and emit framed output packets.
|
||||
while (sample_buffer_.num_samples() >=
|
||||
void TimeSeriesFramerCalculator::FrameOutput(CalculatorContext* cc) {
|
||||
while (sample_buffer_.size() >=
|
||||
frame_duration_samples_ + samples_still_to_drop_) {
|
||||
sample_buffer_.DropSamples(samples_still_to_drop_);
|
||||
Matrix output_frame = sample_buffer_.CopySamples(frame_duration_samples_,
|
||||
¤t_timestamp_);
|
||||
while (samples_still_to_drop_ > 0) {
|
||||
sample_buffer_.pop_front();
|
||||
--samples_still_to_drop_;
|
||||
}
|
||||
const int frame_step_samples = next_frame_step_samples();
|
||||
samples_still_to_drop_ = frame_step_samples;
|
||||
std::unique_ptr<Matrix> output_frame(
|
||||
new Matrix(num_channels_, frame_duration_samples_));
|
||||
for (int i = 0; i < std::min(frame_step_samples, frame_duration_samples_);
|
||||
++i) {
|
||||
output_frame->col(i) = sample_buffer_.front().first;
|
||||
current_timestamp_ = sample_buffer_.front().second;
|
||||
sample_buffer_.pop_front();
|
||||
}
|
||||
const int frame_overlap_samples =
|
||||
frame_duration_samples_ - frame_step_samples;
|
||||
if (frame_overlap_samples > 0) {
|
||||
for (int i = 0; i < frame_overlap_samples; ++i) {
|
||||
output_frame->col(i + frame_step_samples) = sample_buffer_[i].first;
|
||||
current_timestamp_ = sample_buffer_[i].second;
|
||||
}
|
||||
} else {
|
||||
samples_still_to_drop_ = -frame_overlap_samples;
|
||||
}
|
||||
|
||||
if (use_window_) {
|
||||
// Apply the window to each row of output_frame.
|
||||
output_frame.array().rowwise() *= window_.array();
|
||||
*output_frame = (output_frame->array() * window_.array()).matrix();
|
||||
}
|
||||
|
||||
cc->Outputs().Index(0).AddPacket(MakePacket<Matrix>(std::move(output_frame))
|
||||
.At(CurrentOutputTimestamp()));
|
||||
cc->Outputs().Index(0).Add(output_frame.release(),
|
||||
CurrentOutputTimestamp());
|
||||
++cumulative_output_frames_;
|
||||
cumulative_completed_samples_ += frame_step_samples;
|
||||
}
|
||||
|
@ -304,18 +206,35 @@ absl::Status TimeSeriesFramerCalculator::Process(CalculatorContext* cc) {
|
|||
// fact to enable packet queueing optimizations.
|
||||
cc->Outputs().Index(0).SetNextTimestampBound(CumulativeOutputTimestamp());
|
||||
}
|
||||
}
|
||||
|
||||
absl::Status TimeSeriesFramerCalculator::Process(CalculatorContext* cc) {
|
||||
if (initial_input_timestamp_ == Timestamp::Unstarted()) {
|
||||
initial_input_timestamp_ = cc->InputTimestamp();
|
||||
current_timestamp_ = initial_input_timestamp_;
|
||||
}
|
||||
|
||||
EnqueueInput(cc);
|
||||
FrameOutput(cc);
|
||||
|
||||
return absl::OkStatus();
|
||||
}
|
||||
|
||||
absl::Status TimeSeriesFramerCalculator::Close(CalculatorContext* cc) {
|
||||
sample_buffer_.DropSamples(samples_still_to_drop_);
|
||||
while (samples_still_to_drop_ > 0 && !sample_buffer_.empty()) {
|
||||
sample_buffer_.pop_front();
|
||||
--samples_still_to_drop_;
|
||||
}
|
||||
if (!sample_buffer_.empty() && pad_final_packet_) {
|
||||
std::unique_ptr<Matrix> output_frame(new Matrix);
|
||||
output_frame->setZero(num_channels_, frame_duration_samples_);
|
||||
for (int i = 0; i < sample_buffer_.size(); ++i) {
|
||||
output_frame->col(i) = sample_buffer_[i].first;
|
||||
current_timestamp_ = sample_buffer_[i].second;
|
||||
}
|
||||
|
||||
if (sample_buffer_.num_samples() > 0 && pad_final_packet_) {
|
||||
Matrix output_frame = sample_buffer_.CopySamples(frame_duration_samples_,
|
||||
¤t_timestamp_);
|
||||
cc->Outputs().Index(0).AddPacket(MakePacket<Matrix>(std::move(output_frame))
|
||||
.At(CurrentOutputTimestamp()));
|
||||
cc->Outputs().Index(0).Add(output_frame.release(),
|
||||
CurrentOutputTimestamp());
|
||||
}
|
||||
|
||||
return absl::OkStatus();
|
||||
|
@ -339,7 +258,7 @@ absl::Status TimeSeriesFramerCalculator::Open(CalculatorContext* cc) {
|
|||
cc->Inputs().Index(0).Header(), &input_header));
|
||||
|
||||
sample_rate_ = input_header.sample_rate();
|
||||
sample_buffer_.Init(sample_rate_, input_header.num_channels());
|
||||
num_channels_ = input_header.num_channels();
|
||||
frame_duration_samples_ = time_series_util::SecondsToSamples(
|
||||
framer_options.frame_duration_seconds(), sample_rate_);
|
||||
RET_CHECK_GT(frame_duration_samples_, 0)
|
||||
|
@ -393,8 +312,9 @@ absl::Status TimeSeriesFramerCalculator::Open(CalculatorContext* cc) {
|
|||
}
|
||||
|
||||
if (use_window_) {
|
||||
window_ = Eigen::Map<Eigen::RowVectorXd>(window_vector.data(),
|
||||
frame_duration_samples_)
|
||||
window_ = Matrix::Ones(num_channels_, 1) *
|
||||
Eigen::Map<Eigen::MatrixXd>(window_vector.data(), 1,
|
||||
frame_duration_samples_)
|
||||
.cast<float>();
|
||||
}
|
||||
use_local_timestamp_ = framer_options.use_local_timestamp();
|
||||
|
|
|
@ -66,7 +66,7 @@ message TimeSeriesFramerCalculatorOptions {
|
|||
// If use_local_timestamp is true, the output packet's timestamp is based on
|
||||
// the last sample of the packet and it's inferred from the latest input
|
||||
// packet's timestamp. If false, the output packet's timestamp is based on
|
||||
// the cumulative timestamping, which is inferred from the initial input
|
||||
// the cumulative timestamping, which is inferred from the intial input
|
||||
// timestamp and the cumulative number of samples.
|
||||
optional bool use_local_timestamp = 6 [default = false];
|
||||
}
|
||||
|
|
|
@ -1,93 +0,0 @@
|
|||
// Copyright 2023 The MediaPipe Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// Benchmark for TimeSeriesFramerCalculator.
|
||||
#include <memory>
|
||||
#include <random>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/log/absl_check.h"
|
||||
#include "benchmark/benchmark.h"
|
||||
#include "mediapipe/calculators/audio/time_series_framer_calculator.pb.h"
|
||||
#include "mediapipe/framework/calculator_framework.h"
|
||||
#include "mediapipe/framework/formats/matrix.h"
|
||||
#include "mediapipe/framework/formats/time_series_header.pb.h"
|
||||
#include "mediapipe/framework/packet.h"
|
||||
|
||||
using ::mediapipe::Matrix;
|
||||
|
||||
void BM_TimeSeriesFramerCalculator(benchmark::State& state) {
|
||||
constexpr float kSampleRate = 32000.0;
|
||||
constexpr int kNumChannels = 2;
|
||||
constexpr int kFrameDurationSeconds = 5.0;
|
||||
std::mt19937 rng(0 /*seed*/);
|
||||
// Input around a half second's worth of samples at a time.
|
||||
std::uniform_int_distribution<int> input_size_dist(15000, 17000);
|
||||
// Generate a pool of random blocks of samples up front.
|
||||
std::vector<Matrix> sample_pool;
|
||||
sample_pool.reserve(20);
|
||||
for (int i = 0; i < 20; ++i) {
|
||||
sample_pool.push_back(Matrix::Random(kNumChannels, input_size_dist(rng)));
|
||||
}
|
||||
std::uniform_int_distribution<int> pool_index_dist(0, sample_pool.size() - 1);
|
||||
|
||||
mediapipe::CalculatorGraphConfig config;
|
||||
config.add_input_stream("input");
|
||||
config.add_output_stream("output");
|
||||
auto* node = config.add_node();
|
||||
node->set_calculator("TimeSeriesFramerCalculator");
|
||||
node->add_input_stream("input");
|
||||
node->add_output_stream("output");
|
||||
mediapipe::TimeSeriesFramerCalculatorOptions* options =
|
||||
node->mutable_options()->MutableExtension(
|
||||
mediapipe::TimeSeriesFramerCalculatorOptions::ext);
|
||||
options->set_frame_duration_seconds(kFrameDurationSeconds);
|
||||
|
||||
for (auto _ : state) {
|
||||
state.PauseTiming(); // Pause benchmark timing.
|
||||
|
||||
// Prepare input packets of random blocks of samples.
|
||||
std::vector<mediapipe::Packet> input_packets;
|
||||
input_packets.reserve(32);
|
||||
float t = 0;
|
||||
for (int i = 0; i < 32; ++i) {
|
||||
auto samples =
|
||||
std::make_unique<Matrix>(sample_pool[pool_index_dist(rng)]);
|
||||
const int num_samples = samples->cols();
|
||||
input_packets.push_back(mediapipe::Adopt(samples.release())
|
||||
.At(mediapipe::Timestamp::FromSeconds(t)));
|
||||
t += num_samples / kSampleRate;
|
||||
}
|
||||
// Initialize graph.
|
||||
mediapipe::CalculatorGraph graph;
|
||||
ABSL_CHECK_OK(graph.Initialize(config));
|
||||
// Prepare input header.
|
||||
auto header = std::make_unique<mediapipe::TimeSeriesHeader>();
|
||||
header->set_sample_rate(kSampleRate);
|
||||
header->set_num_channels(kNumChannels);
|
||||
|
||||
state.ResumeTiming(); // Resume benchmark timing.
|
||||
|
||||
ABSL_CHECK_OK(graph.StartRun({}, {{"input", Adopt(header.release())}}));
|
||||
for (auto& packet : input_packets) {
|
||||
ABSL_CHECK_OK(graph.AddPacketToInputStream("input", packet));
|
||||
}
|
||||
ABSL_CHECK(!graph.HasError());
|
||||
ABSL_CHECK_OK(graph.CloseAllInputStreams());
|
||||
ABSL_CHECK_OK(graph.WaitUntilIdle());
|
||||
}
|
||||
}
|
||||
BENCHMARK(BM_TimeSeriesFramerCalculator);
|
||||
|
||||
BENCHMARK_MAIN();
|
|
@ -19,7 +19,6 @@
|
|||
#include <vector>
|
||||
|
||||
#include "Eigen/Core"
|
||||
#include "absl/log/absl_log.h"
|
||||
#include "audio/dsp/window_functions.h"
|
||||
#include "mediapipe/calculators/audio/time_series_framer_calculator.pb.h"
|
||||
#include "mediapipe/framework/calculator_framework.h"
|
||||
|
@ -187,12 +186,11 @@ class TimeSeriesFramerCalculatorTest
|
|||
const int num_unique_output_samples =
|
||||
round((output().packets.size() - 1) * frame_step_samples) +
|
||||
frame_duration_samples;
|
||||
ABSL_LOG(INFO) << "packets.size()=" << output().packets.size()
|
||||
<< " frame_duration_samples=" << frame_duration_samples
|
||||
<< " frame_step_samples=" << frame_step_samples
|
||||
<< " num_input_samples_=" << num_input_samples_
|
||||
<< " num_unique_output_samples="
|
||||
<< num_unique_output_samples;
|
||||
LOG(INFO) << "packets.size()=" << output().packets.size()
|
||||
<< " frame_duration_samples=" << frame_duration_samples
|
||||
<< " frame_step_samples=" << frame_step_samples
|
||||
<< " num_input_samples_=" << num_input_samples_
|
||||
<< " num_unique_output_samples=" << num_unique_output_samples;
|
||||
const int num_padding_samples =
|
||||
num_unique_output_samples - num_input_samples_;
|
||||
if (options_.pad_final_packet()) {
|
||||
|
|
|
@ -21,10 +21,10 @@ licenses(["notice"])
|
|||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
selects.config_setting_group(
|
||||
name = "apple_or_disable_gpu",
|
||||
name = "ios_or_disable_gpu",
|
||||
match_any = [
|
||||
"//mediapipe/gpu:disable_gpu",
|
||||
"//mediapipe:apple",
|
||||
"//mediapipe:ios",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -117,7 +117,6 @@ mediapipe_proto_library(
|
|||
"//mediapipe/framework:calculator_proto",
|
||||
"//mediapipe/framework/formats:classification_proto",
|
||||
"//mediapipe/framework/formats:landmark_proto",
|
||||
"//mediapipe/framework/formats:matrix_data_proto",
|
||||
"//mediapipe/framework/formats:time_series_header_proto",
|
||||
],
|
||||
)
|
||||
|
@ -193,19 +192,17 @@ cc_library(
|
|||
"//mediapipe/framework:calculator_context",
|
||||
"//mediapipe/framework:calculator_contract",
|
||||
"//mediapipe/framework:calculator_framework",
|
||||
"//mediapipe/framework:collection_item_id",
|
||||
"//mediapipe/framework:packet",
|
||||
"//mediapipe/framework/formats:detection_cc_proto",
|
||||
"//mediapipe/framework/formats:image",
|
||||
"//mediapipe/framework/formats:image_frame",
|
||||
"//mediapipe/framework/formats:landmark_cc_proto",
|
||||
"//mediapipe/framework/formats:matrix",
|
||||
"//mediapipe/framework/formats:rect_cc_proto",
|
||||
"//mediapipe/framework/formats:tensor",
|
||||
"//mediapipe/framework/port:integral_types",
|
||||
"//mediapipe/framework/port:ret_check",
|
||||
"//mediapipe/framework/port:status",
|
||||
"//mediapipe/gpu:gpu_buffer",
|
||||
"@com_google_absl//absl/memory",
|
||||
"@com_google_absl//absl/status",
|
||||
],
|
||||
alwayslink = 1,
|
||||
)
|
||||
|
@ -218,18 +215,18 @@ cc_library(
|
|||
"//mediapipe/framework:calculator_context",
|
||||
"//mediapipe/framework:calculator_contract",
|
||||
"//mediapipe/framework:calculator_framework",
|
||||
"//mediapipe/framework:collection_item_id",
|
||||
"//mediapipe/framework/formats:classification_cc_proto",
|
||||
"//mediapipe/framework/formats:detection_cc_proto",
|
||||
"//mediapipe/framework/formats:image",
|
||||
"//mediapipe/framework/formats:image_frame",
|
||||
"//mediapipe/framework/formats:landmark_cc_proto",
|
||||
"//mediapipe/framework/formats:matrix",
|
||||
"//mediapipe/framework/formats:rect_cc_proto",
|
||||
"//mediapipe/framework/formats:tensor",
|
||||
"//mediapipe/framework/port:integral_types",
|
||||
"//mediapipe/framework/port:ret_check",
|
||||
"//mediapipe/gpu:gpu_buffer",
|
||||
"//mediapipe/framework/port:status",
|
||||
"//mediapipe/util:render_data_cc_proto",
|
||||
"@com_google_absl//absl/status",
|
||||
"@org_tensorflow//tensorflow/lite:framework",
|
||||
],
|
||||
alwayslink = 1,
|
||||
|
@ -290,7 +287,6 @@ cc_library(
|
|||
"//mediapipe/framework/api2:node",
|
||||
"//mediapipe/framework/api2:port",
|
||||
"//mediapipe/framework/formats:classification_cc_proto",
|
||||
"//mediapipe/framework/formats:image",
|
||||
"//mediapipe/framework/formats:landmark_cc_proto",
|
||||
"//mediapipe/framework/formats:tensor",
|
||||
"//mediapipe/framework/port:integral_types",
|
||||
|
@ -299,7 +295,8 @@ cc_library(
|
|||
"//mediapipe/util:render_data_cc_proto",
|
||||
"@org_tensorflow//tensorflow/lite:framework",
|
||||
] + select({
|
||||
":apple_or_disable_gpu": [],
|
||||
"//mediapipe/gpu:disable_gpu": [],
|
||||
"//mediapipe:ios": [],
|
||||
"//conditions:default": [
|
||||
"@org_tensorflow//tensorflow/lite/delegates/gpu/gl:gl_buffer",
|
||||
],
|
||||
|
@ -325,7 +322,6 @@ cc_library(
|
|||
":concatenate_vector_calculator_cc_proto",
|
||||
"//mediapipe/framework:calculator_framework",
|
||||
"//mediapipe/framework/api2:node",
|
||||
"//mediapipe/framework/formats:body_rig_cc_proto",
|
||||
"//mediapipe/framework/formats:classification_cc_proto",
|
||||
"//mediapipe/framework/formats:landmark_cc_proto",
|
||||
"//mediapipe/framework/port:ret_check",
|
||||
|
@ -382,6 +378,17 @@ cc_library(
|
|||
alwayslink = 1,
|
||||
)
|
||||
|
||||
cc_library(
|
||||
name = "clip_detection_vector_size_calculator",
|
||||
srcs = ["clip_detection_vector_size_calculator.cc"],
|
||||
deps = [
|
||||
":clip_vector_size_calculator",
|
||||
"//mediapipe/framework:calculator_framework",
|
||||
"//mediapipe/framework/formats:detection_cc_proto",
|
||||
],
|
||||
alwayslink = 1,
|
||||
)
|
||||
|
||||
cc_test(
|
||||
name = "clip_vector_size_calculator_test",
|
||||
srcs = ["clip_vector_size_calculator_test.cc"],
|
||||
|
@ -583,7 +590,6 @@ cc_library(
|
|||
"//mediapipe/framework/port:logging",
|
||||
"//mediapipe/framework/port:status",
|
||||
"//mediapipe/framework/tool:options_util",
|
||||
"@com_google_absl//absl/log:absl_check",
|
||||
],
|
||||
alwayslink = 1,
|
||||
)
|
||||
|
@ -599,7 +605,6 @@ cc_test(
|
|||
"//mediapipe/framework/formats:video_stream_header",
|
||||
"//mediapipe/framework/port:gtest_main",
|
||||
"//mediapipe/framework/port:integral_types",
|
||||
"@com_google_absl//absl/log:absl_check",
|
||||
"@com_google_absl//absl/strings",
|
||||
],
|
||||
)
|
||||
|
@ -632,7 +637,6 @@ cc_library(
|
|||
"//mediapipe/framework:calculator_framework",
|
||||
"//mediapipe/framework/port:ret_check",
|
||||
"//mediapipe/framework/port:status",
|
||||
"@com_google_absl//absl/log:absl_log",
|
||||
],
|
||||
alwayslink = 1,
|
||||
)
|
||||
|
@ -727,7 +731,6 @@ cc_library(
|
|||
"//mediapipe/framework/port:logging",
|
||||
"//mediapipe/framework/port:ret_check",
|
||||
"//mediapipe/framework/port:status",
|
||||
"@com_google_absl//absl/status",
|
||||
],
|
||||
alwayslink = 1,
|
||||
)
|
||||
|
@ -743,7 +746,6 @@ cc_test(
|
|||
"//mediapipe/framework/port:parse_text_proto",
|
||||
"//mediapipe/framework/port:status",
|
||||
"//mediapipe/framework/tool:options_util",
|
||||
"//mediapipe/util:packet_test_util",
|
||||
"@com_google_absl//absl/memory",
|
||||
"@com_google_absl//absl/strings",
|
||||
],
|
||||
|
@ -782,11 +784,10 @@ cc_library(
|
|||
"//mediapipe/framework/deps:random",
|
||||
"//mediapipe/framework/formats:video_stream_header",
|
||||
"//mediapipe/framework/port:integral_types",
|
||||
"//mediapipe/framework/port:logging",
|
||||
"//mediapipe/framework/port:ret_check",
|
||||
"//mediapipe/framework/port:status",
|
||||
"//mediapipe/framework/tool:options_util",
|
||||
"@com_google_absl//absl/log:absl_check",
|
||||
"@com_google_absl//absl/log:absl_log",
|
||||
"@com_google_absl//absl/strings",
|
||||
],
|
||||
alwayslink = 1,
|
||||
|
@ -842,7 +843,6 @@ cc_test(
|
|||
"//mediapipe/framework/port:gtest_main",
|
||||
"//mediapipe/framework/port:logging",
|
||||
"//mediapipe/framework/tool:validate_type",
|
||||
"@com_google_absl//absl/log:absl_check",
|
||||
"@eigen_archive//:eigen3",
|
||||
],
|
||||
)
|
||||
|
@ -904,7 +904,6 @@ cc_library(
|
|||
"//mediapipe/framework:calculator_framework",
|
||||
"//mediapipe/framework/formats:classification_cc_proto",
|
||||
"//mediapipe/framework/formats:detection_cc_proto",
|
||||
"//mediapipe/framework/formats:image",
|
||||
"//mediapipe/framework/formats:landmark_cc_proto",
|
||||
"//mediapipe/framework/formats:matrix",
|
||||
"//mediapipe/framework/formats:rect_cc_proto",
|
||||
|
@ -915,7 +914,7 @@ cc_library(
|
|||
"@org_tensorflow//tensorflow/lite:framework",
|
||||
"@org_tensorflow//tensorflow/lite/kernels:builtin_ops",
|
||||
] + select({
|
||||
":apple_or_disable_gpu": [],
|
||||
":ios_or_disable_gpu": [],
|
||||
"//conditions:default": [
|
||||
"@org_tensorflow//tensorflow/lite/delegates/gpu/gl:gl_buffer",
|
||||
],
|
||||
|
@ -947,7 +946,6 @@ cc_library(
|
|||
deps = [
|
||||
":split_vector_calculator_cc_proto",
|
||||
"//mediapipe/framework:calculator_framework",
|
||||
"//mediapipe/framework/formats:body_rig_cc_proto",
|
||||
"//mediapipe/framework/formats:landmark_cc_proto",
|
||||
"//mediapipe/framework/port:ret_check",
|
||||
"//mediapipe/framework/port:status",
|
||||
|
@ -1031,7 +1029,6 @@ cc_library(
|
|||
"//mediapipe/framework:calculator_framework",
|
||||
"//mediapipe/framework/api2:node",
|
||||
"//mediapipe/framework/port:status",
|
||||
"@com_google_absl//absl/log:absl_log",
|
||||
],
|
||||
alwayslink = 1,
|
||||
)
|
||||
|
@ -1070,7 +1067,6 @@ cc_test(
|
|||
"//mediapipe/framework:calculator_runner",
|
||||
"//mediapipe/framework/port:gtest_main",
|
||||
"//mediapipe/framework/port:parse_text_proto",
|
||||
"@com_google_absl//absl/log:absl_log",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -1117,7 +1113,6 @@ cc_library(
|
|||
"//mediapipe/framework/api2:node",
|
||||
"//mediapipe/framework/port:ret_check",
|
||||
"//mediapipe/framework/port:status",
|
||||
"@com_google_absl//absl/log:absl_log",
|
||||
],
|
||||
alwayslink = 1,
|
||||
)
|
||||
|
@ -1141,7 +1136,6 @@ cc_library(
|
|||
deps = [
|
||||
"//mediapipe/framework:calculator_framework",
|
||||
"//mediapipe/framework:timestamp",
|
||||
"//mediapipe/framework/api2:node",
|
||||
"//mediapipe/framework/port:status",
|
||||
],
|
||||
alwayslink = 1,
|
||||
|
@ -1170,7 +1164,6 @@ cc_library(
|
|||
"//mediapipe/framework:collection_item_id",
|
||||
"//mediapipe/framework/formats:classification_cc_proto",
|
||||
"//mediapipe/framework/formats:landmark_cc_proto",
|
||||
"//mediapipe/framework/formats:matrix_data_cc_proto",
|
||||
"//mediapipe/framework/formats:time_series_header_cc_proto",
|
||||
"//mediapipe/framework/port:integral_types",
|
||||
"//mediapipe/framework/port:ret_check",
|
||||
|
@ -1245,7 +1238,6 @@ cc_library(
|
|||
"//mediapipe/framework/formats:classification_cc_proto",
|
||||
"//mediapipe/framework/formats:detection_cc_proto",
|
||||
"//mediapipe/framework/formats:landmark_cc_proto",
|
||||
"//mediapipe/framework/formats:rect_cc_proto",
|
||||
"//mediapipe/framework/port:ret_check",
|
||||
"//mediapipe/framework/port:status",
|
||||
],
|
||||
|
@ -1393,26 +1385,3 @@ cc_test(
|
|||
"@com_google_absl//absl/types:optional",
|
||||
],
|
||||
)
|
||||
|
||||
cc_library(
|
||||
name = "value_or_default_calculator",
|
||||
srcs = ["value_or_default_calculator.cc"],
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//mediapipe/framework:calculator_framework",
|
||||
"//mediapipe/framework/port:status",
|
||||
],
|
||||
alwayslink = True,
|
||||
)
|
||||
|
||||
cc_test(
|
||||
name = "value_or_default_calculator_test",
|
||||
srcs = ["value_or_default_calculator_test.cc"],
|
||||
deps = [
|
||||
":value_or_default_calculator",
|
||||
"//mediapipe/framework:calculator_framework",
|
||||
"//mediapipe/framework:calculator_runner",
|
||||
"//mediapipe/framework:packet",
|
||||
"//mediapipe/framework/port:gtest_main",
|
||||
],
|
||||
)
|
||||
|
|
|
@ -13,7 +13,6 @@
|
|||
// limitations under the License.
|
||||
|
||||
#include <string>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/memory/memory.h"
|
||||
|
@ -164,75 +163,6 @@ TEST_F(BeginEndLoopCalculatorGraphTest, MultipleVectors) {
|
|||
PacketOfIntsEq(input_timestamp2, std::vector<int>{3, 4})));
|
||||
}
|
||||
|
||||
TEST(BeginEndLoopCalculatorPossibleDataRaceTest,
|
||||
EndLoopForIntegersDoesNotRace) {
|
||||
auto graph_config = ParseTextProtoOrDie<CalculatorGraphConfig>(
|
||||
R"pb(
|
||||
num_threads: 4
|
||||
input_stream: "ints"
|
||||
node {
|
||||
calculator: "BeginLoopIntegerCalculator"
|
||||
input_stream: "ITERABLE:ints"
|
||||
output_stream: "ITEM:int"
|
||||
output_stream: "BATCH_END:timestamp"
|
||||
}
|
||||
node {
|
||||
calculator: "IncrementCalculator"
|
||||
input_stream: "int"
|
||||
output_stream: "int_plus_one"
|
||||
}
|
||||
# BEGIN: Data race possibility
|
||||
# EndLoop###Calculator and another calculator using the same input
|
||||
# may introduce race due to EndLoop###Calculator possibly consuming
|
||||
# packet.
|
||||
node {
|
||||
calculator: "EndLoopIntegersCalculator"
|
||||
input_stream: "ITEM:int_plus_one"
|
||||
input_stream: "BATCH_END:timestamp"
|
||||
output_stream: "ITERABLE:ints_plus_one"
|
||||
}
|
||||
node {
|
||||
calculator: "IncrementCalculator"
|
||||
input_stream: "int_plus_one"
|
||||
output_stream: "int_plus_two"
|
||||
}
|
||||
# END: Data race possibility
|
||||
node {
|
||||
calculator: "EndLoopIntegersCalculator"
|
||||
input_stream: "ITEM:int_plus_two"
|
||||
input_stream: "BATCH_END:timestamp"
|
||||
output_stream: "ITERABLE:ints_plus_two"
|
||||
}
|
||||
)pb");
|
||||
std::vector<Packet> int_plus_one_packets;
|
||||
tool::AddVectorSink("ints_plus_one", &graph_config, &int_plus_one_packets);
|
||||
std::vector<Packet> int_original_packets;
|
||||
tool::AddVectorSink("ints_plus_two", &graph_config, &int_original_packets);
|
||||
|
||||
CalculatorGraph graph;
|
||||
MP_ASSERT_OK(graph.Initialize(graph_config));
|
||||
MP_ASSERT_OK(graph.StartRun({}));
|
||||
for (int i = 0; i < 100; ++i) {
|
||||
std::vector<int> ints = {i, i + 1, i + 2};
|
||||
Timestamp ts = Timestamp(i);
|
||||
MP_ASSERT_OK(graph.AddPacketToInputStream(
|
||||
"ints", MakePacket<std::vector<int>>(std::move(ints)).At(ts)));
|
||||
MP_ASSERT_OK(graph.WaitUntilIdle());
|
||||
EXPECT_THAT(int_plus_one_packets,
|
||||
testing::ElementsAre(
|
||||
PacketOfIntsEq(ts, std::vector<int>{i + 1, i + 2, i + 3})));
|
||||
EXPECT_THAT(int_original_packets,
|
||||
testing::ElementsAre(
|
||||
PacketOfIntsEq(ts, std::vector<int>{i + 2, i + 3, i + 4})));
|
||||
|
||||
int_plus_one_packets.clear();
|
||||
int_original_packets.clear();
|
||||
}
|
||||
|
||||
MP_ASSERT_OK(graph.CloseAllPacketSources());
|
||||
MP_ASSERT_OK(graph.WaitUntilDone());
|
||||
}
|
||||
|
||||
// Passes non empty vector through or outputs empty vector in case of timestamp
|
||||
// bound update.
|
||||
class PassThroughOrEmptyVectorCalculator : public CalculatorBase {
|
||||
|
|
|
@ -17,13 +17,10 @@
|
|||
#include <vector>
|
||||
|
||||
#include "mediapipe/framework/formats/detection.pb.h"
|
||||
#include "mediapipe/framework/formats/image.h"
|
||||
#include "mediapipe/framework/formats/image_frame.h"
|
||||
#include "mediapipe/framework/formats/landmark.pb.h"
|
||||
#include "mediapipe/framework/formats/matrix.h"
|
||||
#include "mediapipe/framework/formats/rect.pb.h"
|
||||
#include "mediapipe/framework/formats/tensor.h"
|
||||
#include "mediapipe/gpu/gpu_buffer.h"
|
||||
|
||||
namespace mediapipe {
|
||||
|
||||
|
@ -63,22 +60,4 @@ REGISTER_CALCULATOR(BeginLoopUint64tCalculator);
|
|||
typedef BeginLoopCalculator<std::vector<Tensor>> BeginLoopTensorCalculator;
|
||||
REGISTER_CALCULATOR(BeginLoopTensorCalculator);
|
||||
|
||||
// A calculator to process std::vector<mediapipe::ImageFrame>.
|
||||
typedef BeginLoopCalculator<std::vector<ImageFrame>>
|
||||
BeginLoopImageFrameCalculator;
|
||||
REGISTER_CALCULATOR(BeginLoopImageFrameCalculator);
|
||||
|
||||
// A calculator to process std::vector<mediapipe::GpuBuffer>.
|
||||
typedef BeginLoopCalculator<std::vector<GpuBuffer>>
|
||||
BeginLoopGpuBufferCalculator;
|
||||
REGISTER_CALCULATOR(BeginLoopGpuBufferCalculator);
|
||||
|
||||
// A calculator to process std::vector<mediapipe::Image>.
|
||||
typedef BeginLoopCalculator<std::vector<Image>> BeginLoopImageCalculator;
|
||||
REGISTER_CALCULATOR(BeginLoopImageCalculator);
|
||||
|
||||
// A calculator to process std::vector<float>.
|
||||
typedef BeginLoopCalculator<std::vector<float>> BeginLoopFloatCalculator;
|
||||
REGISTER_CALCULATOR(BeginLoopFloatCalculator);
|
||||
|
||||
} // namespace mediapipe
|
||||
|
|
|
@ -15,57 +15,47 @@
|
|||
#ifndef MEDIAPIPE_CALCULATORS_CORE_BEGIN_LOOP_CALCULATOR_H_
|
||||
#define MEDIAPIPE_CALCULATORS_CORE_BEGIN_LOOP_CALCULATOR_H_
|
||||
|
||||
#include "absl/status/status.h"
|
||||
#include "mediapipe/framework/calculator_context.h"
|
||||
#include "mediapipe/framework/calculator_contract.h"
|
||||
#include "mediapipe/framework/calculator_framework.h"
|
||||
#include "mediapipe/framework/collection_item_id.h"
|
||||
#include "mediapipe/framework/packet.h"
|
||||
#include "mediapipe/framework/port/integral_types.h"
|
||||
#include "mediapipe/framework/port/ret_check.h"
|
||||
#include "mediapipe/framework/port/status.h"
|
||||
#include "mediapipe/framework/port/status_macros.h"
|
||||
|
||||
namespace mediapipe {
|
||||
|
||||
// Calculator for implementing loops on iterable collections inside a MediaPipe
|
||||
// graph. Assume InputIterT is an iterable for type InputT, and OutputIterT is
|
||||
// an iterable for type OutputT, e.g. vector<InputT> and vector<OutputT>.
|
||||
// First, instantiate specializations in the loop calculators' implementations
|
||||
// if missing:
|
||||
// BeginLoopInputTCalculator = BeginLoopCalculator<InputIterT>
|
||||
// EndLoopOutputTCalculator = EndLoopCalculator<OutputIterT>
|
||||
// Then, the following graph transforms an item of type InputIterT to an
|
||||
// OutputIterT by applying InputToOutputConverter to every element:
|
||||
// graph.
|
||||
//
|
||||
// node { # Type @timestamp
|
||||
// calculator: "BeginLoopInputTCalculator"
|
||||
// input_stream: "ITERABLE:input_iterable" # InputIterT @iterable_ts
|
||||
// input_stream: "CLONE:extra_input" # ExtraT @extra_ts
|
||||
// output_stream: "ITEM:input_iterator" # InputT @loop_internal_ts
|
||||
// output_stream: "CLONE:cloned_extra_input" # ExtraT @loop_internal_ts
|
||||
// output_stream: "BATCH_END:iterable_ts" # Timestamp @loop_internal_ts
|
||||
// It is designed to be used like:
|
||||
//
|
||||
// node {
|
||||
// calculator: "BeginLoopWithIterableCalculator"
|
||||
// input_stream: "ITERABLE:input_iterable" # IterableT @ext_ts
|
||||
// output_stream: "ITEM:input_element" # ItemT @loop_internal_ts
|
||||
// output_stream: "BATCH_END:ext_ts" # Timestamp @loop_internal_ts
|
||||
// }
|
||||
//
|
||||
// node {
|
||||
// calculator: "InputToOutputConverter"
|
||||
// input_stream: "INPUT:input_iterator" # InputT @loop_internal_ts
|
||||
// input_stream: "EXTRA:cloned_extra_input" # ExtraT @loop_internal_ts
|
||||
// output_stream: "OUTPUT:output_iterator" # OutputT @loop_internal_ts
|
||||
// calculator: "ElementToBlaConverterSubgraph"
|
||||
// input_stream: "ITEM:input_to_loop_body" # ItemT @loop_internal_ts
|
||||
// output_stream: "BLA:output_of_loop_body" # ItemU @loop_internal_ts
|
||||
// }
|
||||
//
|
||||
// node {
|
||||
// calculator: "EndLoopOutputTCalculator"
|
||||
// input_stream: "ITEM:output_iterator" # OutputT @loop_internal_ts
|
||||
// input_stream: "BATCH_END:iterable_ts" # Timestamp @loop_internal_ts
|
||||
// output_stream: "ITERABLE:output_iterable" # OutputIterT @iterable_ts
|
||||
// calculator: "EndLoopWithOutputCalculator"
|
||||
// input_stream: "ITEM:output_of_loop_body" # ItemU @loop_internal_ts
|
||||
// input_stream: "BATCH_END:ext_ts" # Timestamp @loop_internal_ts
|
||||
// output_stream: "ITERABLE:aggregated_result" # IterableU @ext_ts
|
||||
// }
|
||||
//
|
||||
// The resulting 'output_iterable' has the same timestamp as 'input_iterable'.
|
||||
// The output packets of this calculator are part of the loop body and have
|
||||
// loop-internal timestamps that are unrelated to the input iterator timestamp.
|
||||
//
|
||||
// Input streams tagged with "CLONE" are cloned to the corresponding output
|
||||
// streams at loop-internal timestamps. This ensures that a MediaPipe graph or
|
||||
// sub-graph can run multiple times, once per element in the "ITERABLE" for each
|
||||
// packet clone of the packets in the "CLONE" input streams. Think of CLONEd
|
||||
// inputs as loop-wide constants.
|
||||
// streams at loop timestamps. This ensures that a MediaPipe graph or sub-graph
|
||||
// can run multiple times, once per element in the "ITERABLE" for each pakcet
|
||||
// clone of the packets in the "CLONE" input streams.
|
||||
template <typename IterableT>
|
||||
class BeginLoopCalculator : public CalculatorBase {
|
||||
using ItemT = typename IterableT::value_type;
|
||||
|
|
|
@ -92,7 +92,7 @@ class BypassCalculator : public Node {
|
|||
auto options = cc->Options<BypassCalculatorOptions>();
|
||||
RET_CHECK_EQ(options.pass_input_stream().size(),
|
||||
options.pass_output_stream().size());
|
||||
MP_ASSIGN_OR_RETURN(
|
||||
ASSIGN_OR_RETURN(
|
||||
auto pass_streams,
|
||||
GetPassMap(options, *cc->Inputs().TagMap(), *cc->Outputs().TagMap()));
|
||||
std::set<CollectionItemId> pass_out;
|
||||
|
@ -121,9 +121,8 @@ class BypassCalculator : public Node {
|
|||
// Saves the map of passthrough input and output stream ids.
|
||||
absl::Status Open(CalculatorContext* cc) override {
|
||||
auto options = cc->Options<BypassCalculatorOptions>();
|
||||
MP_ASSIGN_OR_RETURN(
|
||||
pass_streams_,
|
||||
GetPassMap(options, *cc->Inputs().TagMap(), *cc->Outputs().TagMap()));
|
||||
ASSIGN_OR_RETURN(pass_streams_, GetPassMap(options, *cc->Inputs().TagMap(),
|
||||
*cc->Outputs().TagMap()));
|
||||
return absl::OkStatus();
|
||||
}
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2023 The MediaPipe Authors.
|
||||
// Copyright 2019 The MediaPipe Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -12,16 +12,15 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#import <Foundation/Foundation.h>
|
||||
#include <vector>
|
||||
|
||||
#import "mediapipe/tasks/ios/core/sources/MPPTaskOptions.h"
|
||||
#include "mediapipe/calculators/core/clip_vector_size_calculator.h"
|
||||
#include "mediapipe/framework/formats/detection.pb.h"
|
||||
|
||||
NS_ASSUME_NONNULL_BEGIN
|
||||
namespace mediapipe {
|
||||
|
||||
/** Options for setting up a `FaceStylizer`. */
|
||||
NS_SWIFT_NAME(FaceStylizerOptions)
|
||||
@interface MPPFaceStylizerOptions : MPPTaskOptions <NSCopying>
|
||||
typedef ClipVectorSizeCalculator<::mediapipe::Detection>
|
||||
ClipDetectionVectorSizeCalculator;
|
||||
REGISTER_CALCULATOR(ClipDetectionVectorSizeCalculator);
|
||||
|
||||
@end
|
||||
|
||||
NS_ASSUME_NONNULL_END
|
||||
} // namespace mediapipe
|
|
@ -18,7 +18,6 @@
|
|||
#include "mediapipe/calculators/core/concatenate_vector_calculator.pb.h"
|
||||
#include "mediapipe/framework/api2/node.h"
|
||||
#include "mediapipe/framework/calculator_framework.h"
|
||||
#include "mediapipe/framework/formats/body_rig.pb.h"
|
||||
#include "mediapipe/framework/formats/classification.pb.h"
|
||||
#include "mediapipe/framework/formats/landmark.pb.h"
|
||||
#include "mediapipe/framework/port/canonical_errors.h"
|
||||
|
@ -129,19 +128,6 @@ class ConcatenateClassificationListCalculator
|
|||
};
|
||||
MEDIAPIPE_REGISTER_NODE(ConcatenateClassificationListCalculator);
|
||||
|
||||
class ConcatenateJointListCalculator
|
||||
: public ConcatenateListsCalculator<Joint, JointList> {
|
||||
protected:
|
||||
int ListSize(const JointList& list) const override {
|
||||
return list.joint_size();
|
||||
}
|
||||
const Joint GetItem(const JointList& list, int idx) const override {
|
||||
return list.joint(idx);
|
||||
}
|
||||
Joint* AddItem(JointList& list) const override { return list.add_joint(); }
|
||||
};
|
||||
MEDIAPIPE_REGISTER_NODE(ConcatenateJointListCalculator);
|
||||
|
||||
} // namespace api2
|
||||
} // namespace mediapipe
|
||||
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
#include <vector>
|
||||
|
||||
#include "mediapipe/framework/formats/classification.pb.h"
|
||||
#include "mediapipe/framework/formats/image.h"
|
||||
#include "mediapipe/framework/formats/landmark.pb.h"
|
||||
#include "mediapipe/framework/formats/tensor.h"
|
||||
#include "mediapipe/framework/port/integral_types.h"
|
||||
|
@ -56,10 +55,6 @@ MEDIAPIPE_REGISTER_NODE(ConcatenateUInt64VectorCalculator);
|
|||
typedef ConcatenateVectorCalculator<bool> ConcatenateBoolVectorCalculator;
|
||||
MEDIAPIPE_REGISTER_NODE(ConcatenateBoolVectorCalculator);
|
||||
|
||||
typedef ConcatenateVectorCalculator<std::string>
|
||||
ConcatenateStringVectorCalculator;
|
||||
MEDIAPIPE_REGISTER_NODE(ConcatenateStringVectorCalculator);
|
||||
|
||||
// Example config:
|
||||
// node {
|
||||
// calculator: "ConcatenateTfLiteTensorVectorCalculator"
|
||||
|
@ -105,7 +100,4 @@ typedef ConcatenateVectorCalculator<mediapipe::RenderData>
|
|||
ConcatenateRenderDataVectorCalculator;
|
||||
MEDIAPIPE_REGISTER_NODE(ConcatenateRenderDataVectorCalculator);
|
||||
|
||||
typedef ConcatenateVectorCalculator<mediapipe::Image>
|
||||
ConcatenateImageVectorCalculator;
|
||||
MEDIAPIPE_REGISTER_NODE(ConcatenateImageVectorCalculator);
|
||||
} // namespace mediapipe
|
||||
|
|
|
@ -30,15 +30,13 @@ namespace mediapipe {
|
|||
typedef ConcatenateVectorCalculator<int> TestConcatenateIntVectorCalculator;
|
||||
MEDIAPIPE_REGISTER_NODE(TestConcatenateIntVectorCalculator);
|
||||
|
||||
template <typename T>
|
||||
void AddInputVector(int index, const std::vector<T>& input, int64_t timestamp,
|
||||
void AddInputVector(int index, const std::vector<int>& input, int64_t timestamp,
|
||||
CalculatorRunner* runner) {
|
||||
runner->MutableInputs()->Index(index).packets.push_back(
|
||||
MakePacket<std::vector<T>>(input).At(Timestamp(timestamp)));
|
||||
MakePacket<std::vector<int>>(input).At(Timestamp(timestamp)));
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
void AddInputVectors(const std::vector<std::vector<T>>& inputs,
|
||||
void AddInputVectors(const std::vector<std::vector<int>>& inputs,
|
||||
int64_t timestamp, CalculatorRunner* runner) {
|
||||
for (int i = 0; i < inputs.size(); ++i) {
|
||||
AddInputVector(i, inputs[i], timestamp, runner);
|
||||
|
@ -384,23 +382,6 @@ TEST(ConcatenateFloatVectorCalculatorTest, OneEmptyStreamNoOutput) {
|
|||
EXPECT_EQ(0, outputs.size());
|
||||
}
|
||||
|
||||
TEST(ConcatenateStringVectorCalculatorTest, OneTimestamp) {
|
||||
CalculatorRunner runner("ConcatenateStringVectorCalculator",
|
||||
/*options_string=*/"", /*num_inputs=*/3,
|
||||
/*num_outputs=*/1, /*num_side_packets=*/0);
|
||||
|
||||
std::vector<std::vector<std::string>> inputs = {
|
||||
{"a", "b"}, {"c"}, {"d", "e", "f"}};
|
||||
AddInputVectors(inputs, /*timestamp=*/1, &runner);
|
||||
MP_ASSERT_OK(runner.Run());
|
||||
|
||||
const std::vector<Packet>& outputs = runner.Outputs().Index(0).packets;
|
||||
EXPECT_EQ(1, outputs.size());
|
||||
EXPECT_EQ(Timestamp(1), outputs[0].Timestamp());
|
||||
std::vector<std::string> expected_vector = {"a", "b", "c", "d", "e", "f"};
|
||||
EXPECT_EQ(expected_vector, outputs[0].Get<std::vector<std::string>>());
|
||||
}
|
||||
|
||||
typedef ConcatenateVectorCalculator<std::unique_ptr<int>>
|
||||
TestConcatenateUniqueIntPtrCalculator;
|
||||
MEDIAPIPE_REGISTER_NODE(TestConcatenateUniqueIntPtrCalculator);
|
||||
|
|
|
@ -19,7 +19,6 @@
|
|||
#include "mediapipe/framework/collection_item_id.h"
|
||||
#include "mediapipe/framework/formats/classification.pb.h"
|
||||
#include "mediapipe/framework/formats/landmark.pb.h"
|
||||
#include "mediapipe/framework/formats/matrix_data.pb.h"
|
||||
#include "mediapipe/framework/formats/time_series_header.pb.h"
|
||||
#include "mediapipe/framework/port/canonical_errors.h"
|
||||
#include "mediapipe/framework/port/integral_types.h"
|
||||
|
@ -79,19 +78,15 @@ class ConstantSidePacketCalculator : public CalculatorBase {
|
|||
} else if (packet_options.has_string_value()) {
|
||||
packet.Set<std::string>();
|
||||
} else if (packet_options.has_uint64_value()) {
|
||||
packet.Set<uint64_t>();
|
||||
packet.Set<uint64>();
|
||||
} else if (packet_options.has_classification_list_value()) {
|
||||
packet.Set<ClassificationList>();
|
||||
} else if (packet_options.has_landmark_list_value()) {
|
||||
packet.Set<LandmarkList>();
|
||||
} else if (packet_options.has_double_value()) {
|
||||
packet.Set<double>();
|
||||
} else if (packet_options.has_matrix_data_value()) {
|
||||
packet.Set<MatrixData>();
|
||||
} else if (packet_options.has_time_series_header_value()) {
|
||||
packet.Set<TimeSeriesHeader>();
|
||||
} else if (packet_options.has_int64_value()) {
|
||||
packet.Set<int64_t>();
|
||||
} else {
|
||||
return absl::InvalidArgumentError(
|
||||
"None of supported values were specified in options.");
|
||||
|
@ -117,7 +112,7 @@ class ConstantSidePacketCalculator : public CalculatorBase {
|
|||
} else if (packet_options.has_string_value()) {
|
||||
packet.Set(MakePacket<std::string>(packet_options.string_value()));
|
||||
} else if (packet_options.has_uint64_value()) {
|
||||
packet.Set(MakePacket<uint64_t>(packet_options.uint64_value()));
|
||||
packet.Set(MakePacket<uint64>(packet_options.uint64_value()));
|
||||
} else if (packet_options.has_classification_list_value()) {
|
||||
packet.Set(MakePacket<ClassificationList>(
|
||||
packet_options.classification_list_value()));
|
||||
|
@ -126,13 +121,9 @@ class ConstantSidePacketCalculator : public CalculatorBase {
|
|||
MakePacket<LandmarkList>(packet_options.landmark_list_value()));
|
||||
} else if (packet_options.has_double_value()) {
|
||||
packet.Set(MakePacket<double>(packet_options.double_value()));
|
||||
} else if (packet_options.has_matrix_data_value()) {
|
||||
packet.Set(MakePacket<MatrixData>(packet_options.matrix_data_value()));
|
||||
} else if (packet_options.has_time_series_header_value()) {
|
||||
packet.Set(MakePacket<TimeSeriesHeader>(
|
||||
packet_options.time_series_header_value()));
|
||||
} else if (packet_options.has_int64_value()) {
|
||||
packet.Set(MakePacket<int64_t>(packet_options.int64_value()));
|
||||
} else {
|
||||
return absl::InvalidArgumentError(
|
||||
"None of supported values were specified in options.");
|
||||
|
|
|
@ -19,7 +19,6 @@ package mediapipe;
|
|||
import "mediapipe/framework/calculator.proto";
|
||||
import "mediapipe/framework/formats/classification.proto";
|
||||
import "mediapipe/framework/formats/landmark.proto";
|
||||
import "mediapipe/framework/formats/matrix_data.proto";
|
||||
import "mediapipe/framework/formats/time_series_header.proto";
|
||||
|
||||
message ConstantSidePacketCalculatorOptions {
|
||||
|
@ -30,16 +29,14 @@ message ConstantSidePacketCalculatorOptions {
|
|||
message ConstantSidePacket {
|
||||
oneof value {
|
||||
int32 int_value = 1;
|
||||
uint64 uint64_value = 5;
|
||||
int64 int64_value = 11;
|
||||
float float_value = 2;
|
||||
double double_value = 9;
|
||||
bool bool_value = 3;
|
||||
string string_value = 4;
|
||||
uint64 uint64_value = 5;
|
||||
ClassificationList classification_list_value = 6;
|
||||
LandmarkList landmark_list_value = 7;
|
||||
double double_value = 9;
|
||||
TimeSeriesHeader time_series_header_value = 10;
|
||||
MatrixData matrix_data_value = 12;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include <cstdint>
|
||||
#include <string>
|
||||
|
||||
#include "absl/strings/string_view.h"
|
||||
|
@ -59,7 +58,6 @@ TEST(ConstantSidePacketCalculatorTest, EveryPossibleType) {
|
|||
DoTestSingleSidePacket("{ float_value: 6.5f }", 6.5f);
|
||||
DoTestSingleSidePacket("{ bool_value: true }", true);
|
||||
DoTestSingleSidePacket<std::string>(R"({ string_value: "str" })", "str");
|
||||
DoTestSingleSidePacket<int64_t>("{ int64_value: 63 }", 63);
|
||||
}
|
||||
|
||||
TEST(ConstantSidePacketCalculatorTest, MultiplePackets) {
|
||||
|
|
|
@ -14,19 +14,15 @@
|
|||
|
||||
#include "mediapipe/calculators/core/end_loop_calculator.h"
|
||||
|
||||
#include <array>
|
||||
#include <utility>
|
||||
#include <vector>
|
||||
|
||||
#include "mediapipe/framework/formats/classification.pb.h"
|
||||
#include "mediapipe/framework/formats/detection.pb.h"
|
||||
#include "mediapipe/framework/formats/image.h"
|
||||
#include "mediapipe/framework/formats/image_frame.h"
|
||||
#include "mediapipe/framework/formats/landmark.pb.h"
|
||||
#include "mediapipe/framework/formats/matrix.h"
|
||||
#include "mediapipe/framework/formats/rect.pb.h"
|
||||
#include "mediapipe/framework/formats/tensor.h"
|
||||
#include "mediapipe/gpu/gpu_buffer.h"
|
||||
#include "mediapipe/util/render_data.pb.h"
|
||||
#include "tensorflow/lite/interpreter.h"
|
||||
|
||||
|
@ -72,22 +68,8 @@ REGISTER_CALCULATOR(EndLoopMatrixCalculator);
|
|||
typedef EndLoopCalculator<std::vector<Tensor>> EndLoopTensorCalculator;
|
||||
REGISTER_CALCULATOR(EndLoopTensorCalculator);
|
||||
|
||||
typedef EndLoopCalculator<std::vector<ImageFrame>> EndLoopImageFrameCalculator;
|
||||
REGISTER_CALCULATOR(EndLoopImageFrameCalculator);
|
||||
|
||||
typedef EndLoopCalculator<std::vector<GpuBuffer>> EndLoopGpuBufferCalculator;
|
||||
REGISTER_CALCULATOR(EndLoopGpuBufferCalculator);
|
||||
|
||||
typedef EndLoopCalculator<std::vector<::mediapipe::Image>>
|
||||
EndLoopImageCalculator;
|
||||
REGISTER_CALCULATOR(EndLoopImageCalculator);
|
||||
|
||||
typedef EndLoopCalculator<std::vector<std::array<float, 16>>>
|
||||
EndLoopAffineMatrixCalculator;
|
||||
REGISTER_CALCULATOR(EndLoopAffineMatrixCalculator);
|
||||
|
||||
typedef EndLoopCalculator<std::vector<std::pair<int, int>>>
|
||||
EndLoopImageSizeCalculator;
|
||||
REGISTER_CALCULATOR(EndLoopImageSizeCalculator);
|
||||
|
||||
} // namespace mediapipe
|
||||
|
|
|
@ -17,11 +17,13 @@
|
|||
|
||||
#include <type_traits>
|
||||
|
||||
#include "absl/status/status.h"
|
||||
#include "mediapipe/framework/calculator_context.h"
|
||||
#include "mediapipe/framework/calculator_contract.h"
|
||||
#include "mediapipe/framework/calculator_framework.h"
|
||||
#include "mediapipe/framework/collection_item_id.h"
|
||||
#include "mediapipe/framework/port/integral_types.h"
|
||||
#include "mediapipe/framework/port/ret_check.h"
|
||||
#include "mediapipe/framework/port/status.h"
|
||||
|
||||
namespace mediapipe {
|
||||
|
||||
|
@ -31,7 +33,27 @@ namespace mediapipe {
|
|||
// from the "BATCH_END" tagged input stream, it emits the aggregated results
|
||||
// at the original timestamp contained in the "BATCH_END" input stream.
|
||||
//
|
||||
// See BeginLoopCalculator for a usage example.
|
||||
// It is designed to be used like:
|
||||
//
|
||||
// node {
|
||||
// calculator: "BeginLoopWithIterableCalculator"
|
||||
// input_stream: "ITERABLE:input_iterable" # IterableT @ext_ts
|
||||
// output_stream: "ITEM:input_element" # ItemT @loop_internal_ts
|
||||
// output_stream: "BATCH_END:ext_ts" # Timestamp @loop_internal_ts
|
||||
// }
|
||||
//
|
||||
// node {
|
||||
// calculator: "ElementToBlaConverterSubgraph"
|
||||
// input_stream: "ITEM:input_to_loop_body" # ItemT @loop_internal_ts
|
||||
// output_stream: "BLA:output_of_loop_body" # ItemU @loop_internal_ts
|
||||
// }
|
||||
//
|
||||
// node {
|
||||
// calculator: "EndLoopWithOutputCalculator"
|
||||
// input_stream: "ITEM:output_of_loop_body" # ItemU @loop_internal_ts
|
||||
// input_stream: "BATCH_END:ext_ts" # Timestamp @loop_internal_ts
|
||||
// output_stream: "ITERABLE:aggregated_result" # IterableU @ext_ts
|
||||
// }
|
||||
template <typename IterableT>
|
||||
class EndLoopCalculator : public CalculatorBase {
|
||||
using ItemT = typename IterableT::value_type;
|
||||
|
@ -55,16 +77,16 @@ class EndLoopCalculator : public CalculatorBase {
|
|||
if (!input_stream_collection_) {
|
||||
input_stream_collection_.reset(new IterableT);
|
||||
}
|
||||
|
||||
if constexpr (std::is_copy_constructible_v<ItemT>) {
|
||||
input_stream_collection_->push_back(
|
||||
cc->Inputs().Tag("ITEM").Get<ItemT>());
|
||||
// Try to consume the item and move it into the collection. If the items
|
||||
// are not consumable, then try to copy them instead. If the items are
|
||||
// not copiable, then an error will be returned.
|
||||
auto item_ptr_or = cc->Inputs().Tag("ITEM").Value().Consume<ItemT>();
|
||||
if (item_ptr_or.ok()) {
|
||||
input_stream_collection_->push_back(std::move(*item_ptr_or.value()));
|
||||
} else {
|
||||
// Try to consume the item and move it into the collection. Return an
|
||||
// error if the items are not consumable.
|
||||
auto item_ptr_or = cc->Inputs().Tag("ITEM").Value().Consume<ItemT>();
|
||||
if (item_ptr_or.ok()) {
|
||||
input_stream_collection_->push_back(std::move(*item_ptr_or.value()));
|
||||
if constexpr (std::is_copy_constructible_v<ItemT>) {
|
||||
input_stream_collection_->push_back(
|
||||
cc->Inputs().Tag("ITEM").template Get<ItemT>());
|
||||
} else {
|
||||
return absl::InternalError(
|
||||
"The item type is not copiable. Consider making the "
|
||||
|
|
|
@ -42,7 +42,7 @@ constexpr char kOptionsTag[] = "OPTIONS";
|
|||
//
|
||||
// Increasing `max_in_flight` to 2 or more can yield the better throughput
|
||||
// when the graph exhibits a high degree of pipeline parallelism. Decreasing
|
||||
// `max_in_queue` to 0 can yield a better average latency, but at the cost of
|
||||
// `max_in_flight` to 0 can yield a better average latency, but at the cost of
|
||||
// lower throughput (lower framerate) due to the time during which the graph
|
||||
// is idle awaiting the next input frame.
|
||||
//
|
||||
|
|
|
@ -26,15 +26,19 @@ constexpr char kStateChangeTag[] = "STATE_CHANGE";
|
|||
constexpr char kDisallowTag[] = "DISALLOW";
|
||||
constexpr char kAllowTag[] = "ALLOW";
|
||||
|
||||
std::string ToString(GateCalculatorOptions::GateState state) {
|
||||
enum GateState {
|
||||
GATE_UNINITIALIZED,
|
||||
GATE_ALLOW,
|
||||
GATE_DISALLOW,
|
||||
};
|
||||
|
||||
std::string ToString(GateState state) {
|
||||
switch (state) {
|
||||
case GateCalculatorOptions::UNSPECIFIED:
|
||||
return "UNSPECIFIED";
|
||||
case GateCalculatorOptions::GATE_UNINITIALIZED:
|
||||
case GATE_UNINITIALIZED:
|
||||
return "UNINITIALIZED";
|
||||
case GateCalculatorOptions::GATE_ALLOW:
|
||||
case GATE_ALLOW:
|
||||
return "ALLOW";
|
||||
case GateCalculatorOptions::GATE_DISALLOW:
|
||||
case GATE_DISALLOW:
|
||||
return "DISALLOW";
|
||||
}
|
||||
DLOG(FATAL) << "Unknown GateState";
|
||||
|
@ -149,12 +153,10 @@ class GateCalculator : public CalculatorBase {
|
|||
|
||||
cc->SetOffset(TimestampDiff(0));
|
||||
num_data_streams_ = cc->Inputs().NumEntries("");
|
||||
|
||||
const auto& options = cc->Options<::mediapipe::GateCalculatorOptions>();
|
||||
last_gate_state_ = options.initial_gate_state();
|
||||
|
||||
last_gate_state_ = GATE_UNINITIALIZED;
|
||||
RET_CHECK_OK(CopyInputHeadersToOutputs(cc->Inputs(), &cc->Outputs()));
|
||||
|
||||
const auto& options = cc->Options<::mediapipe::GateCalculatorOptions>();
|
||||
empty_packets_as_allow_ = options.empty_packets_as_allow();
|
||||
|
||||
if (!use_side_packet_for_allow_disallow_ &&
|
||||
|
@ -182,12 +184,10 @@ class GateCalculator : public CalculatorBase {
|
|||
allow = !cc->Inputs().Tag(kDisallowTag).Get<bool>();
|
||||
}
|
||||
}
|
||||
const GateCalculatorOptions::GateState new_gate_state =
|
||||
allow ? GateCalculatorOptions::GATE_ALLOW
|
||||
: GateCalculatorOptions::GATE_DISALLOW;
|
||||
const GateState new_gate_state = allow ? GATE_ALLOW : GATE_DISALLOW;
|
||||
|
||||
if (cc->Outputs().HasTag(kStateChangeTag)) {
|
||||
if (last_gate_state_ != GateCalculatorOptions::GATE_UNINITIALIZED &&
|
||||
if (last_gate_state_ != GATE_UNINITIALIZED &&
|
||||
last_gate_state_ != new_gate_state) {
|
||||
VLOG(2) << "State transition in " << cc->NodeName() << " @ "
|
||||
<< cc->InputTimestamp().Value() << " from "
|
||||
|
@ -223,8 +223,7 @@ class GateCalculator : public CalculatorBase {
|
|||
}
|
||||
|
||||
private:
|
||||
GateCalculatorOptions::GateState last_gate_state_ =
|
||||
GateCalculatorOptions::GATE_UNINITIALIZED;
|
||||
GateState last_gate_state_ = GATE_UNINITIALIZED;
|
||||
int num_data_streams_;
|
||||
bool empty_packets_as_allow_;
|
||||
bool use_side_packet_for_allow_disallow_ = false;
|
||||
|
|
|
@ -31,13 +31,4 @@ message GateCalculatorOptions {
|
|||
// Whether to allow or disallow the input streams to pass when no
|
||||
// ALLOW/DISALLOW input or side input is specified.
|
||||
optional bool allow = 2 [default = false];
|
||||
|
||||
enum GateState {
|
||||
UNSPECIFIED = 0;
|
||||
GATE_UNINITIALIZED = 1;
|
||||
GATE_ALLOW = 2;
|
||||
GATE_DISALLOW = 3;
|
||||
}
|
||||
|
||||
optional GateState initial_gate_state = 3 [default = GATE_UNINITIALIZED];
|
||||
}
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "absl/log/absl_log.h"
|
||||
#include "mediapipe/framework/calculator_framework.h"
|
||||
#include "mediapipe/framework/calculator_runner.h"
|
||||
#include "mediapipe/framework/port/gtest.h"
|
||||
|
@ -36,14 +35,14 @@ class GateCalculatorTest : public ::testing::Test {
|
|||
}
|
||||
|
||||
// Use this when ALLOW/DISALLOW input is provided as a side packet.
|
||||
void RunTimeStep(int64_t timestamp, bool stream_payload) {
|
||||
void RunTimeStep(int64 timestamp, bool stream_payload) {
|
||||
runner_->MutableInputs()->Get("", 0).packets.push_back(
|
||||
MakePacket<bool>(stream_payload).At(Timestamp(timestamp)));
|
||||
MP_ASSERT_OK(runner_->Run()) << "Calculator execution failed.";
|
||||
}
|
||||
|
||||
// Use this when ALLOW/DISALLOW input is provided as an input stream.
|
||||
void RunTimeStep(int64_t timestamp, const std::string& control_tag,
|
||||
void RunTimeStep(int64 timestamp, const std::string& control_tag,
|
||||
bool control) {
|
||||
runner_->MutableInputs()->Get("", 0).packets.push_back(
|
||||
MakePacket<bool>(true).At(Timestamp(timestamp)));
|
||||
|
@ -135,9 +134,9 @@ TEST_F(GateCalculatorTest, AllowByALLOWOptionToTrue) {
|
|||
}
|
||||
)");
|
||||
|
||||
constexpr int64_t kTimestampValue0 = 42;
|
||||
constexpr int64 kTimestampValue0 = 42;
|
||||
RunTimeStep(kTimestampValue0, true);
|
||||
constexpr int64_t kTimestampValue1 = 43;
|
||||
constexpr int64 kTimestampValue1 = 43;
|
||||
RunTimeStep(kTimestampValue1, false);
|
||||
|
||||
const std::vector<Packet>& output = runner()->Outputs().Get("", 0).packets;
|
||||
|
@ -160,9 +159,9 @@ TEST_F(GateCalculatorTest, DisallowByALLOWOptionSetToFalse) {
|
|||
}
|
||||
)");
|
||||
|
||||
constexpr int64_t kTimestampValue0 = 42;
|
||||
constexpr int64 kTimestampValue0 = 42;
|
||||
RunTimeStep(kTimestampValue0, true);
|
||||
constexpr int64_t kTimestampValue1 = 43;
|
||||
constexpr int64 kTimestampValue1 = 43;
|
||||
RunTimeStep(kTimestampValue1, false);
|
||||
|
||||
const std::vector<Packet>& output = runner()->Outputs().Get("", 0).packets;
|
||||
|
@ -176,9 +175,9 @@ TEST_F(GateCalculatorTest, DisallowByALLOWOptionNotSet) {
|
|||
output_stream: "test_output"
|
||||
)");
|
||||
|
||||
constexpr int64_t kTimestampValue0 = 42;
|
||||
constexpr int64 kTimestampValue0 = 42;
|
||||
RunTimeStep(kTimestampValue0, true);
|
||||
constexpr int64_t kTimestampValue1 = 43;
|
||||
constexpr int64 kTimestampValue1 = 43;
|
||||
RunTimeStep(kTimestampValue1, false);
|
||||
|
||||
const std::vector<Packet>& output = runner()->Outputs().Get("", 0).packets;
|
||||
|
@ -194,9 +193,9 @@ TEST_F(GateCalculatorTest, AllowByALLOWSidePacketSetToTrue) {
|
|||
)");
|
||||
runner()->MutableSidePackets()->Tag(kAllowTag) = Adopt(new bool(true));
|
||||
|
||||
constexpr int64_t kTimestampValue0 = 42;
|
||||
constexpr int64 kTimestampValue0 = 42;
|
||||
RunTimeStep(kTimestampValue0, true);
|
||||
constexpr int64_t kTimestampValue1 = 43;
|
||||
constexpr int64 kTimestampValue1 = 43;
|
||||
RunTimeStep(kTimestampValue1, false);
|
||||
|
||||
const std::vector<Packet>& output = runner()->Outputs().Get("", 0).packets;
|
||||
|
@ -216,9 +215,9 @@ TEST_F(GateCalculatorTest, AllowByDisallowSidePacketSetToFalse) {
|
|||
)");
|
||||
runner()->MutableSidePackets()->Tag(kDisallowTag) = Adopt(new bool(false));
|
||||
|
||||
constexpr int64_t kTimestampValue0 = 42;
|
||||
constexpr int64 kTimestampValue0 = 42;
|
||||
RunTimeStep(kTimestampValue0, true);
|
||||
constexpr int64_t kTimestampValue1 = 43;
|
||||
constexpr int64 kTimestampValue1 = 43;
|
||||
RunTimeStep(kTimestampValue1, false);
|
||||
|
||||
const std::vector<Packet>& output = runner()->Outputs().Get("", 0).packets;
|
||||
|
@ -238,9 +237,9 @@ TEST_F(GateCalculatorTest, DisallowByALLOWSidePacketSetToFalse) {
|
|||
)");
|
||||
runner()->MutableSidePackets()->Tag(kAllowTag) = Adopt(new bool(false));
|
||||
|
||||
constexpr int64_t kTimestampValue0 = 42;
|
||||
constexpr int64 kTimestampValue0 = 42;
|
||||
RunTimeStep(kTimestampValue0, true);
|
||||
constexpr int64_t kTimestampValue1 = 43;
|
||||
constexpr int64 kTimestampValue1 = 43;
|
||||
RunTimeStep(kTimestampValue1, false);
|
||||
|
||||
const std::vector<Packet>& output = runner()->Outputs().Get("", 0).packets;
|
||||
|
@ -256,9 +255,9 @@ TEST_F(GateCalculatorTest, DisallowByDISALLOWSidePacketSetToTrue) {
|
|||
)");
|
||||
runner()->MutableSidePackets()->Tag(kDisallowTag) = Adopt(new bool(true));
|
||||
|
||||
constexpr int64_t kTimestampValue0 = 42;
|
||||
constexpr int64 kTimestampValue0 = 42;
|
||||
RunTimeStep(kTimestampValue0, true);
|
||||
constexpr int64_t kTimestampValue1 = 43;
|
||||
constexpr int64 kTimestampValue1 = 43;
|
||||
RunTimeStep(kTimestampValue1, false);
|
||||
|
||||
const std::vector<Packet>& output = runner()->Outputs().Get("", 0).packets;
|
||||
|
@ -273,13 +272,13 @@ TEST_F(GateCalculatorTest, Allow) {
|
|||
output_stream: "test_output"
|
||||
)");
|
||||
|
||||
constexpr int64_t kTimestampValue0 = 42;
|
||||
constexpr int64 kTimestampValue0 = 42;
|
||||
RunTimeStep(kTimestampValue0, "ALLOW", true);
|
||||
constexpr int64_t kTimestampValue1 = 43;
|
||||
constexpr int64 kTimestampValue1 = 43;
|
||||
RunTimeStep(kTimestampValue1, "ALLOW", false);
|
||||
constexpr int64_t kTimestampValue2 = 44;
|
||||
constexpr int64 kTimestampValue2 = 44;
|
||||
RunTimeStep(kTimestampValue2, "ALLOW", true);
|
||||
constexpr int64_t kTimestampValue3 = 45;
|
||||
constexpr int64 kTimestampValue3 = 45;
|
||||
RunTimeStep(kTimestampValue3, "ALLOW", false);
|
||||
|
||||
const std::vector<Packet>& output = runner()->Outputs().Get("", 0).packets;
|
||||
|
@ -298,13 +297,13 @@ TEST_F(GateCalculatorTest, Disallow) {
|
|||
output_stream: "test_output"
|
||||
)");
|
||||
|
||||
constexpr int64_t kTimestampValue0 = 42;
|
||||
constexpr int64 kTimestampValue0 = 42;
|
||||
RunTimeStep(kTimestampValue0, "DISALLOW", true);
|
||||
constexpr int64_t kTimestampValue1 = 43;
|
||||
constexpr int64 kTimestampValue1 = 43;
|
||||
RunTimeStep(kTimestampValue1, "DISALLOW", false);
|
||||
constexpr int64_t kTimestampValue2 = 44;
|
||||
constexpr int64 kTimestampValue2 = 44;
|
||||
RunTimeStep(kTimestampValue2, "DISALLOW", true);
|
||||
constexpr int64_t kTimestampValue3 = 45;
|
||||
constexpr int64 kTimestampValue3 = 45;
|
||||
RunTimeStep(kTimestampValue3, "DISALLOW", false);
|
||||
|
||||
const std::vector<Packet>& output = runner()->Outputs().Get("", 0).packets;
|
||||
|
@ -324,13 +323,13 @@ TEST_F(GateCalculatorTest, AllowWithStateChange) {
|
|||
output_stream: "STATE_CHANGE:state_changed"
|
||||
)");
|
||||
|
||||
constexpr int64_t kTimestampValue0 = 42;
|
||||
constexpr int64 kTimestampValue0 = 42;
|
||||
RunTimeStep(kTimestampValue0, "ALLOW", false);
|
||||
constexpr int64_t kTimestampValue1 = 43;
|
||||
constexpr int64 kTimestampValue1 = 43;
|
||||
RunTimeStep(kTimestampValue1, "ALLOW", true);
|
||||
constexpr int64_t kTimestampValue2 = 44;
|
||||
constexpr int64 kTimestampValue2 = 44;
|
||||
RunTimeStep(kTimestampValue2, "ALLOW", true);
|
||||
constexpr int64_t kTimestampValue3 = 45;
|
||||
constexpr int64 kTimestampValue3 = 45;
|
||||
RunTimeStep(kTimestampValue3, "ALLOW", false);
|
||||
|
||||
const std::vector<Packet>& output =
|
||||
|
@ -357,18 +356,18 @@ TEST_F(GateCalculatorTest, AllowWithStateChangeNoDataStreams) {
|
|||
RunTimeStepWithoutDataStream(kTimestampValue2, "ALLOW", true);
|
||||
constexpr int64_t kTimestampValue3 = 45;
|
||||
RunTimeStepWithoutDataStream(kTimestampValue3, "ALLOW", false);
|
||||
ABSL_LOG(INFO) << "a";
|
||||
LOG(INFO) << "a";
|
||||
const std::vector<Packet>& output =
|
||||
runner()->Outputs().Get("STATE_CHANGE", 0).packets;
|
||||
ABSL_LOG(INFO) << "s";
|
||||
LOG(INFO) << "s";
|
||||
ASSERT_EQ(2, output.size());
|
||||
ABSL_LOG(INFO) << "d";
|
||||
LOG(INFO) << "d";
|
||||
EXPECT_EQ(kTimestampValue1, output[0].Timestamp().Value());
|
||||
EXPECT_EQ(kTimestampValue3, output[1].Timestamp().Value());
|
||||
ABSL_LOG(INFO) << "f";
|
||||
LOG(INFO) << "f";
|
||||
EXPECT_EQ(true, output[0].Get<bool>()); // Allow.
|
||||
EXPECT_EQ(false, output[1].Get<bool>()); // Disallow.
|
||||
ABSL_LOG(INFO) << "g";
|
||||
LOG(INFO) << "g";
|
||||
}
|
||||
|
||||
TEST_F(GateCalculatorTest, DisallowWithStateChange) {
|
||||
|
@ -380,13 +379,13 @@ TEST_F(GateCalculatorTest, DisallowWithStateChange) {
|
|||
output_stream: "STATE_CHANGE:state_changed"
|
||||
)");
|
||||
|
||||
constexpr int64_t kTimestampValue0 = 42;
|
||||
constexpr int64 kTimestampValue0 = 42;
|
||||
RunTimeStep(kTimestampValue0, "DISALLOW", true);
|
||||
constexpr int64_t kTimestampValue1 = 43;
|
||||
constexpr int64 kTimestampValue1 = 43;
|
||||
RunTimeStep(kTimestampValue1, "DISALLOW", false);
|
||||
constexpr int64_t kTimestampValue2 = 44;
|
||||
constexpr int64 kTimestampValue2 = 44;
|
||||
RunTimeStep(kTimestampValue2, "DISALLOW", false);
|
||||
constexpr int64_t kTimestampValue3 = 45;
|
||||
constexpr int64 kTimestampValue3 = 45;
|
||||
RunTimeStep(kTimestampValue3, "DISALLOW", true);
|
||||
|
||||
const std::vector<Packet>& output =
|
||||
|
@ -433,7 +432,7 @@ TEST_F(GateCalculatorTest, DisallowInitialNoStateTransition) {
|
|||
output_stream: "STATE_CHANGE:state_changed"
|
||||
)");
|
||||
|
||||
constexpr int64_t kTimestampValue0 = 42;
|
||||
constexpr int64 kTimestampValue0 = 42;
|
||||
RunTimeStep(kTimestampValue0, "DISALLOW", false);
|
||||
|
||||
const std::vector<Packet>& output =
|
||||
|
@ -451,7 +450,7 @@ TEST_F(GateCalculatorTest, AllowInitialNoStateTransition) {
|
|||
output_stream: "STATE_CHANGE:state_changed"
|
||||
)");
|
||||
|
||||
constexpr int64_t kTimestampValue0 = 42;
|
||||
constexpr int64 kTimestampValue0 = 42;
|
||||
RunTimeStep(kTimestampValue0, "ALLOW", true);
|
||||
|
||||
const std::vector<Packet>& output =
|
||||
|
@ -459,29 +458,5 @@ TEST_F(GateCalculatorTest, AllowInitialNoStateTransition) {
|
|||
ASSERT_EQ(0, output.size());
|
||||
}
|
||||
|
||||
// Must detect allow value for first timestamp as a state change when the
|
||||
// initial state is set to GATE_DISALLOW.
|
||||
TEST_F(GateCalculatorTest, StateChangeTriggeredWithInitialGateStateOption) {
|
||||
SetRunner(R"(
|
||||
calculator: "GateCalculator"
|
||||
input_stream: "test_input"
|
||||
input_stream: "ALLOW:allow"
|
||||
output_stream: "test_output"
|
||||
output_stream: "STATE_CHANGE:state_change"
|
||||
options: {
|
||||
[mediapipe.GateCalculatorOptions.ext] {
|
||||
initial_gate_state: GATE_DISALLOW
|
||||
}
|
||||
}
|
||||
)");
|
||||
|
||||
constexpr int64_t kTimestampValue0 = 42;
|
||||
RunTimeStep(kTimestampValue0, "ALLOW", true);
|
||||
|
||||
const std::vector<Packet>& output =
|
||||
runner()->Outputs().Get("STATE_CHANGE", 0).packets;
|
||||
ASSERT_EQ(1, output.size());
|
||||
}
|
||||
|
||||
} // namespace
|
||||
} // namespace mediapipe
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
#include "mediapipe/framework/formats/classification.pb.h"
|
||||
#include "mediapipe/framework/formats/detection.pb.h"
|
||||
#include "mediapipe/framework/formats/landmark.pb.h"
|
||||
#include "mediapipe/framework/formats/rect.pb.h"
|
||||
|
||||
namespace mediapipe {
|
||||
namespace api2 {
|
||||
|
@ -38,12 +37,5 @@ using GetDetectionVectorItemCalculator =
|
|||
GetVectorItemCalculator<mediapipe::Detection>;
|
||||
REGISTER_CALCULATOR(GetDetectionVectorItemCalculator);
|
||||
|
||||
using GetNormalizedRectVectorItemCalculator =
|
||||
GetVectorItemCalculator<NormalizedRect>;
|
||||
REGISTER_CALCULATOR(GetNormalizedRectVectorItemCalculator);
|
||||
|
||||
using GetRectVectorItemCalculator = GetVectorItemCalculator<Rect>;
|
||||
REGISTER_CALCULATOR(GetRectVectorItemCalculator);
|
||||
|
||||
} // namespace api2
|
||||
} // namespace mediapipe
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "absl/log/absl_log.h"
|
||||
#include "mediapipe/framework/calculator_framework.h"
|
||||
#include "mediapipe/framework/port/ret_check.h"
|
||||
#include "mediapipe/framework/port/status.h"
|
||||
|
@ -79,7 +78,7 @@ absl::Status ImmediateMuxCalculator::Process(CalculatorContext* cc) {
|
|||
if (packet.Timestamp() >= cc->Outputs().Index(0).NextTimestampBound()) {
|
||||
cc->Outputs().Index(0).AddPacket(packet);
|
||||
} else {
|
||||
ABSL_LOG_FIRST_N(WARNING, 5)
|
||||
LOG_FIRST_N(WARNING, 5)
|
||||
<< "Dropping a packet with timestamp " << packet.Timestamp();
|
||||
}
|
||||
if (cc->Outputs().NumEntries() >= 2) {
|
||||
|
|
|
@ -16,7 +16,6 @@
|
|||
#include <vector>
|
||||
|
||||
#include "Eigen/Core"
|
||||
#include "absl/log/absl_check.h"
|
||||
#include "mediapipe/framework/calculator_framework.h"
|
||||
#include "mediapipe/framework/calculator_runner.h"
|
||||
#include "mediapipe/framework/formats/matrix.h"
|
||||
|
@ -210,7 +209,7 @@ TEST(MatrixMultiplyCalculatorTest, Multiply) {
|
|||
MatrixFromTextProto(kSamplesText, &samples);
|
||||
Matrix expected;
|
||||
MatrixFromTextProto(kExpectedText, &expected);
|
||||
ABSL_CHECK_EQ(samples.cols(), expected.cols());
|
||||
CHECK_EQ(samples.cols(), expected.cols());
|
||||
|
||||
for (int i = 0; i < samples.cols(); ++i) {
|
||||
// Take a column from samples and produce a packet with just that
|
||||
|
|
|
@ -35,7 +35,7 @@ class MatrixToVectorCalculatorTest
|
|||
void SetUp() override { calculator_name_ = "MatrixToVectorCalculator"; }
|
||||
|
||||
void AppendInput(const std::vector<float>& column_major_data,
|
||||
int64_t timestamp) {
|
||||
int64 timestamp) {
|
||||
ASSERT_EQ(num_input_samples_ * num_input_channels_,
|
||||
column_major_data.size());
|
||||
Eigen::Map<const Matrix> data_map(&column_major_data[0],
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "absl/log/absl_log.h"
|
||||
#include "mediapipe/framework/api2/node.h"
|
||||
#include "mediapipe/framework/calculator_framework.h"
|
||||
#include "mediapipe/framework/port/ret_check.h"
|
||||
|
@ -54,7 +53,7 @@ class MergeCalculator : public Node {
|
|||
static absl::Status UpdateContract(CalculatorContract* cc) {
|
||||
RET_CHECK_GT(kIn(cc).Count(), 0) << "Needs at least one input stream";
|
||||
if (kIn(cc).Count() == 1) {
|
||||
ABSL_LOG(WARNING)
|
||||
LOG(WARNING)
|
||||
<< "MergeCalculator expects multiple input streams to merge but is "
|
||||
"receiving only one. Make sure the calculator is configured "
|
||||
"correctly or consider removing this calculator to reduce "
|
||||
|
@ -73,8 +72,8 @@ class MergeCalculator : public Node {
|
|||
}
|
||||
}
|
||||
|
||||
ABSL_LOG(WARNING) << "Empty input packets at timestamp "
|
||||
<< cc->InputTimestamp().Value();
|
||||
LOG(WARNING) << "Empty input packets at timestamp "
|
||||
<< cc->InputTimestamp().Value();
|
||||
|
||||
return absl::OkStatus();
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* Copyright 2022 The MediaPipe Authors.
|
||||
/* Copyright 2022 The MediaPipe Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
/* Copyright 2022 The MediaPipe Authors.
|
||||
/* Copyright 2022 The MediaPipe Authors. All Rights Reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -16,12 +16,9 @@
|
|||
|
||||
#include <memory>
|
||||
|
||||
#include "absl/log/absl_check.h"
|
||||
#include "absl/log/absl_log.h"
|
||||
|
||||
namespace {
|
||||
// Reflect an integer against the lower and upper bound of an interval.
|
||||
int64_t ReflectBetween(int64_t ts, int64_t ts_min, int64_t ts_max) {
|
||||
int64 ReflectBetween(int64 ts, int64 ts_min, int64 ts_max) {
|
||||
if (ts < ts_min) return 2 * ts_min - ts - 1;
|
||||
if (ts >= ts_max) return 2 * ts_max - ts - 1;
|
||||
return ts;
|
||||
|
@ -50,7 +47,7 @@ constexpr char kOptionsTag[] = "OPTIONS";
|
|||
// Returns a TimestampDiff (assuming microseconds) corresponding to the
|
||||
// given time in seconds.
|
||||
TimestampDiff TimestampDiffFromSeconds(double seconds) {
|
||||
return TimestampDiff(MathUtil::SafeRound<int64_t, double>(
|
||||
return TimestampDiff(MathUtil::SafeRound<int64, double>(
|
||||
seconds * Timestamp::kTimestampUnitsPerSecond));
|
||||
}
|
||||
} // namespace
|
||||
|
@ -120,8 +117,8 @@ absl::Status PacketResamplerCalculator::Open(CalculatorContext* cc) {
|
|||
<< "The output frame rate must be smaller than "
|
||||
<< Timestamp::kTimestampUnitsPerSecond;
|
||||
|
||||
frame_time_usec_ = static_cast<int64_t>(1000000.0 / frame_rate_);
|
||||
jitter_usec_ = static_cast<int64_t>(1000000.0 * jitter_ / frame_rate_);
|
||||
frame_time_usec_ = static_cast<int64>(1000000.0 / frame_rate_);
|
||||
jitter_usec_ = static_cast<int64>(1000000.0 * jitter_ / frame_rate_);
|
||||
RET_CHECK_LE(jitter_usec_, frame_time_usec_);
|
||||
|
||||
video_header_.frame_rate = frame_rate_;
|
||||
|
@ -180,7 +177,7 @@ PacketResamplerCalculator::GetSamplingStrategy(
|
|||
const PacketResamplerCalculatorOptions& options) {
|
||||
if (options.reproducible_sampling()) {
|
||||
if (!options.jitter_with_reflection()) {
|
||||
ABSL_LOG(WARNING)
|
||||
LOG(WARNING)
|
||||
<< "reproducible_sampling enabled w/ jitter_with_reflection "
|
||||
"disabled. "
|
||||
<< "reproducible_sampling always uses jitter with reflection, "
|
||||
|
@ -201,18 +198,17 @@ PacketResamplerCalculator::GetSamplingStrategy(
|
|||
return absl::make_unique<JitterWithoutReflectionStrategy>(this);
|
||||
}
|
||||
|
||||
Timestamp PacketResamplerCalculator::PeriodIndexToTimestamp(
|
||||
int64_t index) const {
|
||||
ABSL_CHECK_EQ(jitter_, 0.0);
|
||||
ABSL_CHECK_NE(first_timestamp_, Timestamp::Unset());
|
||||
Timestamp PacketResamplerCalculator::PeriodIndexToTimestamp(int64 index) const {
|
||||
CHECK_EQ(jitter_, 0.0);
|
||||
CHECK_NE(first_timestamp_, Timestamp::Unset());
|
||||
return first_timestamp_ + TimestampDiffFromSeconds(index / frame_rate_);
|
||||
}
|
||||
|
||||
int64_t PacketResamplerCalculator::TimestampToPeriodIndex(
|
||||
int64 PacketResamplerCalculator::TimestampToPeriodIndex(
|
||||
Timestamp timestamp) const {
|
||||
ABSL_CHECK_EQ(jitter_, 0.0);
|
||||
ABSL_CHECK_NE(first_timestamp_, Timestamp::Unset());
|
||||
return MathUtil::SafeRound<int64_t, double>(
|
||||
CHECK_EQ(jitter_, 0.0);
|
||||
CHECK_NE(first_timestamp_, Timestamp::Unset());
|
||||
return MathUtil::SafeRound<int64, double>(
|
||||
(timestamp - first_timestamp_).Seconds() * frame_rate_);
|
||||
}
|
||||
|
||||
|
@ -232,15 +228,13 @@ absl::Status LegacyJitterWithReflectionStrategy::Open(CalculatorContext* cc) {
|
|||
|
||||
if (resampler_options.output_header() !=
|
||||
PacketResamplerCalculatorOptions::NONE) {
|
||||
ABSL_LOG(WARNING)
|
||||
<< "VideoHeader::frame_rate holds the target value and not "
|
||||
"the actual value.";
|
||||
LOG(WARNING) << "VideoHeader::frame_rate holds the target value and not "
|
||||
"the actual value.";
|
||||
}
|
||||
|
||||
if (calculator_->flush_last_packet_) {
|
||||
ABSL_LOG(WARNING)
|
||||
<< "PacketResamplerCalculatorOptions.flush_last_packet is "
|
||||
"ignored, because we are adding jitter.";
|
||||
LOG(WARNING) << "PacketResamplerCalculatorOptions.flush_last_packet is "
|
||||
"ignored, because we are adding jitter.";
|
||||
}
|
||||
|
||||
const auto& seed = cc->InputSidePackets().Tag(kSeedTag).Get<std::string>();
|
||||
|
@ -259,7 +253,7 @@ absl::Status LegacyJitterWithReflectionStrategy::Open(CalculatorContext* cc) {
|
|||
}
|
||||
absl::Status LegacyJitterWithReflectionStrategy::Close(CalculatorContext* cc) {
|
||||
if (!packet_reservoir_->IsEmpty()) {
|
||||
ABSL_LOG(INFO) << "Emitting pack from reservoir.";
|
||||
LOG(INFO) << "Emitting pack from reservoir.";
|
||||
calculator_->OutputWithinLimits(cc, packet_reservoir_->GetSample());
|
||||
}
|
||||
return absl::OkStatus();
|
||||
|
@ -290,16 +284,16 @@ absl::Status LegacyJitterWithReflectionStrategy::Process(
|
|||
|
||||
if (calculator_->frame_time_usec_ <
|
||||
(cc->InputTimestamp() - calculator_->last_packet_.Timestamp()).Value()) {
|
||||
ABSL_LOG_FIRST_N(WARNING, 2)
|
||||
LOG_FIRST_N(WARNING, 2)
|
||||
<< "Adding jitter is not very useful when upsampling.";
|
||||
}
|
||||
|
||||
while (true) {
|
||||
const int64_t last_diff =
|
||||
const int64 last_diff =
|
||||
(next_output_timestamp_ - calculator_->last_packet_.Timestamp())
|
||||
.Value();
|
||||
RET_CHECK_GT(last_diff, 0);
|
||||
const int64_t curr_diff =
|
||||
const int64 curr_diff =
|
||||
(next_output_timestamp_ - cc->InputTimestamp()).Value();
|
||||
if (curr_diff > 0) {
|
||||
break;
|
||||
|
@ -345,8 +339,8 @@ void LegacyJitterWithReflectionStrategy::UpdateNextOutputTimestampWithJitter() {
|
|||
next_output_timestamp_ = Timestamp(ReflectBetween(
|
||||
next_output_timestamp_.Value(), next_output_timestamp_min_.Value(),
|
||||
next_output_timestamp_max_.Value()));
|
||||
ABSL_CHECK_GE(next_output_timestamp_, next_output_timestamp_min_);
|
||||
ABSL_CHECK_LT(next_output_timestamp_, next_output_timestamp_max_);
|
||||
CHECK_GE(next_output_timestamp_, next_output_timestamp_min_);
|
||||
CHECK_LT(next_output_timestamp_, next_output_timestamp_max_);
|
||||
}
|
||||
|
||||
absl::Status ReproducibleJitterWithReflectionStrategy::Open(
|
||||
|
@ -357,15 +351,13 @@ absl::Status ReproducibleJitterWithReflectionStrategy::Open(
|
|||
|
||||
if (resampler_options.output_header() !=
|
||||
PacketResamplerCalculatorOptions::NONE) {
|
||||
ABSL_LOG(WARNING)
|
||||
<< "VideoHeader::frame_rate holds the target value and not "
|
||||
"the actual value.";
|
||||
LOG(WARNING) << "VideoHeader::frame_rate holds the target value and not "
|
||||
"the actual value.";
|
||||
}
|
||||
|
||||
if (calculator_->flush_last_packet_) {
|
||||
ABSL_LOG(WARNING)
|
||||
<< "PacketResamplerCalculatorOptions.flush_last_packet is "
|
||||
"ignored, because we are adding jitter.";
|
||||
LOG(WARNING) << "PacketResamplerCalculatorOptions.flush_last_packet is "
|
||||
"ignored, because we are adding jitter.";
|
||||
}
|
||||
|
||||
const auto& seed = cc->InputSidePackets().Tag(kSeedTag).Get<std::string>();
|
||||
|
@ -418,7 +410,7 @@ absl::Status ReproducibleJitterWithReflectionStrategy::Process(
|
|||
// Note, if the stream is upsampling, this could lead to the same packet
|
||||
// being emitted twice. Upsampling and jitter doesn't make much sense
|
||||
// but does technically work.
|
||||
ABSL_LOG_FIRST_N(WARNING, 2)
|
||||
LOG_FIRST_N(WARNING, 2)
|
||||
<< "Adding jitter is not very useful when upsampling.";
|
||||
}
|
||||
|
||||
|
@ -506,15 +498,13 @@ absl::Status JitterWithoutReflectionStrategy::Open(CalculatorContext* cc) {
|
|||
|
||||
if (resampler_options.output_header() !=
|
||||
PacketResamplerCalculatorOptions::NONE) {
|
||||
ABSL_LOG(WARNING)
|
||||
<< "VideoHeader::frame_rate holds the target value and not "
|
||||
"the actual value.";
|
||||
LOG(WARNING) << "VideoHeader::frame_rate holds the target value and not "
|
||||
"the actual value.";
|
||||
}
|
||||
|
||||
if (calculator_->flush_last_packet_) {
|
||||
ABSL_LOG(WARNING)
|
||||
<< "PacketResamplerCalculatorOptions.flush_last_packet is "
|
||||
"ignored, because we are adding jitter.";
|
||||
LOG(WARNING) << "PacketResamplerCalculatorOptions.flush_last_packet is "
|
||||
"ignored, because we are adding jitter.";
|
||||
}
|
||||
|
||||
const auto& seed = cc->InputSidePackets().Tag(kSeedTag).Get<std::string>();
|
||||
|
@ -564,16 +554,16 @@ absl::Status JitterWithoutReflectionStrategy::Process(CalculatorContext* cc) {
|
|||
|
||||
if (calculator_->frame_time_usec_ <
|
||||
(cc->InputTimestamp() - calculator_->last_packet_.Timestamp()).Value()) {
|
||||
ABSL_LOG_FIRST_N(WARNING, 2)
|
||||
LOG_FIRST_N(WARNING, 2)
|
||||
<< "Adding jitter is not very useful when upsampling.";
|
||||
}
|
||||
|
||||
while (true) {
|
||||
const int64_t last_diff =
|
||||
const int64 last_diff =
|
||||
(next_output_timestamp_ - calculator_->last_packet_.Timestamp())
|
||||
.Value();
|
||||
RET_CHECK_GT(last_diff, 0);
|
||||
const int64_t curr_diff =
|
||||
const int64 curr_diff =
|
||||
(next_output_timestamp_ - cc->InputTimestamp()).Value();
|
||||
if (curr_diff > 0) {
|
||||
break;
|
||||
|
@ -641,7 +631,7 @@ absl::Status NoJitterStrategy::Process(CalculatorContext* cc) {
|
|||
} else {
|
||||
// Initialize first_timestamp_ with the first packet timestamp
|
||||
// aligned to the base_timestamp_.
|
||||
int64_t first_index = MathUtil::SafeRound<int64_t, double>(
|
||||
int64 first_index = MathUtil::SafeRound<int64, double>(
|
||||
(cc->InputTimestamp() - base_timestamp_).Seconds() *
|
||||
calculator_->frame_rate_);
|
||||
calculator_->first_timestamp_ =
|
||||
|
@ -656,7 +646,7 @@ absl::Status NoJitterStrategy::Process(CalculatorContext* cc) {
|
|||
}
|
||||
}
|
||||
const Timestamp received_timestamp = cc->InputTimestamp();
|
||||
const int64_t received_timestamp_idx =
|
||||
const int64 received_timestamp_idx =
|
||||
calculator_->TimestampToPeriodIndex(received_timestamp);
|
||||
// Only consider the received packet if it belongs to the current period
|
||||
// (== period_count_) or to a newer one (> period_count_).
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include "mediapipe/framework/deps/random_base.h"
|
||||
#include "mediapipe/framework/formats/video_stream_header.h"
|
||||
#include "mediapipe/framework/port/integral_types.h"
|
||||
#include "mediapipe/framework/port/logging.h"
|
||||
#include "mediapipe/framework/port/ret_check.h"
|
||||
#include "mediapipe/framework/port/status.h"
|
||||
#include "mediapipe/framework/port/status_macros.h"
|
||||
|
|
|
@ -51,9 +51,9 @@ class SimpleRunner : public CalculatorRunner {
|
|||
|
||||
virtual ~SimpleRunner() {}
|
||||
|
||||
void SetInput(const std::vector<int64_t>& timestamp_list) {
|
||||
void SetInput(const std::vector<int64>& timestamp_list) {
|
||||
MutableInputs()->Index(0).packets.clear();
|
||||
for (const int64_t ts : timestamp_list) {
|
||||
for (const int64 ts : timestamp_list) {
|
||||
MutableInputs()->Index(0).packets.push_back(
|
||||
Adopt(new std::string(absl::StrCat("Frame #", ts)))
|
||||
.At(Timestamp(ts)));
|
||||
|
@ -72,8 +72,8 @@ class SimpleRunner : public CalculatorRunner {
|
|||
}
|
||||
|
||||
void CheckOutputTimestamps(
|
||||
const std::vector<int64_t>& expected_frames,
|
||||
const std::vector<int64_t>& expected_timestamps) const {
|
||||
const std::vector<int64>& expected_frames,
|
||||
const std::vector<int64>& expected_timestamps) const {
|
||||
EXPECT_EQ(expected_frames.size(), Outputs().Index(0).packets.size());
|
||||
EXPECT_EQ(expected_timestamps.size(), Outputs().Index(0).packets.size());
|
||||
int count = 0;
|
||||
|
@ -112,7 +112,7 @@ MATCHER_P2(PacketAtTimestamp, payload, timestamp,
|
|||
*result_listener << "at incorrect timestamp = " << arg.Timestamp().Value();
|
||||
return false;
|
||||
}
|
||||
int64_t actual_payload = arg.template Get<int64_t>();
|
||||
int64 actual_payload = arg.template Get<int64>();
|
||||
if (actual_payload != payload) {
|
||||
*result_listener << "with incorrect payload = " << actual_payload;
|
||||
return false;
|
||||
|
@ -137,18 +137,18 @@ class ReproducibleJitterWithReflectionStrategyForTesting
|
|||
//
|
||||
// An EXPECT will fail if sequence is less than the number requested during
|
||||
// processing.
|
||||
static std::vector<uint64_t> random_sequence;
|
||||
static std::vector<uint64> random_sequence;
|
||||
|
||||
protected:
|
||||
virtual uint64_t GetNextRandom(uint64_t n) {
|
||||
virtual uint64 GetNextRandom(uint64 n) {
|
||||
EXPECT_LT(sequence_index_, random_sequence.size());
|
||||
return random_sequence[sequence_index_++] % n;
|
||||
}
|
||||
|
||||
private:
|
||||
int32_t sequence_index_ = 0;
|
||||
int32 sequence_index_ = 0;
|
||||
};
|
||||
std::vector<uint64_t>
|
||||
std::vector<uint64>
|
||||
ReproducibleJitterWithReflectionStrategyForTesting::random_sequence;
|
||||
|
||||
// PacketResamplerCalculator child class which injects a specified stream
|
||||
|
@ -469,7 +469,7 @@ TEST(PacketResamplerCalculatorTest, SetVideoHeader) {
|
|||
}
|
||||
)pb"));
|
||||
|
||||
for (const int64_t ts : {0, 5000, 10010, 15001, 19990}) {
|
||||
for (const int64 ts : {0, 5000, 10010, 15001, 19990}) {
|
||||
runner.MutableInputs()->Tag(kDataTag).packets.push_back(
|
||||
Adopt(new std::string(absl::StrCat("Frame #", ts))).At(Timestamp(ts)));
|
||||
}
|
||||
|
|
|
@ -71,7 +71,7 @@ TEST_F(PacketSequencerCalculatorTest, IsRegistered) {
|
|||
CalculatorBaseRegistry::IsRegistered("PacketSequencerCalculator"));
|
||||
}
|
||||
|
||||
// Shows how control packets receive timestamps before and after frame packets
|
||||
// Shows how control packets recieve timestamps before and after frame packets
|
||||
// have arrived.
|
||||
TEST_F(PacketSequencerCalculatorTest, ChannelEarly) {
|
||||
CalculatorGraphConfig::Node node_config = BuildNodeConfig();
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
#include <cmath> // for ceil
|
||||
#include <memory>
|
||||
|
||||
#include "absl/log/absl_check.h"
|
||||
#include "mediapipe/calculators/core/packet_thinner_calculator.pb.h"
|
||||
#include "mediapipe/framework/calculator_context.h"
|
||||
#include "mediapipe/framework/calculator_framework.h"
|
||||
|
@ -98,7 +97,7 @@ class PacketThinnerCalculator : public CalculatorBase {
|
|||
cc->Inputs().Index(0).SetAny();
|
||||
cc->Outputs().Index(0).SetSameAs(&cc->Inputs().Index(0));
|
||||
if (cc->InputSidePackets().HasTag(kPeriodTag)) {
|
||||
cc->InputSidePackets().Tag(kPeriodTag).Set<int64_t>();
|
||||
cc->InputSidePackets().Tag(kPeriodTag).Set<int64>();
|
||||
}
|
||||
return absl::OkStatus();
|
||||
}
|
||||
|
@ -161,8 +160,8 @@ absl::Status PacketThinnerCalculator::Open(CalculatorContext* cc) {
|
|||
|
||||
thinner_type_ = options.thinner_type();
|
||||
// This check enables us to assume only two thinner types exist in Process()
|
||||
ABSL_CHECK(thinner_type_ == PacketThinnerCalculatorOptions::ASYNC ||
|
||||
thinner_type_ == PacketThinnerCalculatorOptions::SYNC)
|
||||
CHECK(thinner_type_ == PacketThinnerCalculatorOptions::ASYNC ||
|
||||
thinner_type_ == PacketThinnerCalculatorOptions::SYNC)
|
||||
<< "Unsupported thinner type.";
|
||||
|
||||
if (thinner_type_ == PacketThinnerCalculatorOptions::ASYNC) {
|
||||
|
@ -174,12 +173,11 @@ absl::Status PacketThinnerCalculator::Open(CalculatorContext* cc) {
|
|||
|
||||
if (cc->InputSidePackets().HasTag(kPeriodTag)) {
|
||||
period_ =
|
||||
TimestampDiff(cc->InputSidePackets().Tag(kPeriodTag).Get<int64_t>());
|
||||
TimestampDiff(cc->InputSidePackets().Tag(kPeriodTag).Get<int64>());
|
||||
} else {
|
||||
period_ = TimestampDiff(options.period());
|
||||
}
|
||||
ABSL_CHECK_LT(TimestampDiff(0), period_)
|
||||
<< "Specified period must be positive.";
|
||||
CHECK_LT(TimestampDiff(0), period_) << "Specified period must be positive.";
|
||||
|
||||
if (options.has_start_time()) {
|
||||
start_time_ = Timestamp(options.start_time());
|
||||
|
@ -191,7 +189,7 @@ absl::Status PacketThinnerCalculator::Open(CalculatorContext* cc) {
|
|||
|
||||
end_time_ =
|
||||
options.has_end_time() ? Timestamp(options.end_time()) : Timestamp::Max();
|
||||
ABSL_CHECK_LT(start_time_, end_time_)
|
||||
CHECK_LT(start_time_, end_time_)
|
||||
<< "Invalid PacketThinner: start_time must be earlier than end_time";
|
||||
|
||||
sync_output_timestamps_ = options.sync_output_timestamps();
|
||||
|
@ -234,7 +232,7 @@ absl::Status PacketThinnerCalculator::Close(CalculatorContext* cc) {
|
|||
// Emit any saved packets before quitting.
|
||||
if (!saved_packet_.IsEmpty()) {
|
||||
// Only sync thinner should have saved packets.
|
||||
ABSL_CHECK_EQ(PacketThinnerCalculatorOptions::SYNC, thinner_type_);
|
||||
CHECK_EQ(PacketThinnerCalculatorOptions::SYNC, thinner_type_);
|
||||
if (sync_output_timestamps_) {
|
||||
cc->Outputs().Index(0).AddPacket(
|
||||
saved_packet_.At(NearestSyncTimestamp(saved_packet_.Timestamp())));
|
||||
|
@ -271,7 +269,7 @@ absl::Status PacketThinnerCalculator::SyncThinnerProcess(
|
|||
const Timestamp saved_sync = NearestSyncTimestamp(saved);
|
||||
const Timestamp now = cc->InputTimestamp();
|
||||
const Timestamp now_sync = NearestSyncTimestamp(now);
|
||||
ABSL_CHECK_LE(saved_sync, now_sync);
|
||||
CHECK_LE(saved_sync, now_sync);
|
||||
if (saved_sync == now_sync) {
|
||||
// Saved Packet is in same interval as current packet.
|
||||
// Replace saved packet with current if it is at least as
|
||||
|
@ -297,20 +295,20 @@ absl::Status PacketThinnerCalculator::SyncThinnerProcess(
|
|||
}
|
||||
|
||||
Timestamp PacketThinnerCalculator::NearestSyncTimestamp(Timestamp now) const {
|
||||
ABSL_CHECK_NE(start_time_, Timestamp::Unset())
|
||||
CHECK_NE(start_time_, Timestamp::Unset())
|
||||
<< "Method only valid for sync thinner calculator.";
|
||||
|
||||
// Computation is done using int64 arithmetic. No easy way to avoid
|
||||
// since Timestamps don't support div and multiply.
|
||||
const int64_t now64 = now.Value();
|
||||
const int64_t start64 = start_time_.Value();
|
||||
const int64_t period64 = period_.Value();
|
||||
ABSL_CHECK_LE(0, period64);
|
||||
const int64 now64 = now.Value();
|
||||
const int64 start64 = start_time_.Value();
|
||||
const int64 period64 = period_.Value();
|
||||
CHECK_LE(0, period64);
|
||||
|
||||
// Round now64 to its closest interval (units of period64).
|
||||
int64_t sync64 =
|
||||
int64 sync64 =
|
||||
(now64 - start64 + period64 / 2) / period64 * period64 + start64;
|
||||
ABSL_CHECK_LE(abs(now64 - sync64), period64 / 2)
|
||||
CHECK_LE(abs(now64 - sync64), period64 / 2)
|
||||
<< "start64: " << start64 << "; now64: " << now64
|
||||
<< "; sync64: " << sync64;
|
||||
|
||||
|
|
|
@ -16,7 +16,6 @@
|
|||
#include <string>
|
||||
#include <vector>
|
||||
|
||||
#include "absl/log/absl_check.h"
|
||||
#include "absl/strings/str_cat.h"
|
||||
#include "mediapipe/calculators/core/packet_thinner_calculator.pb.h"
|
||||
#include "mediapipe/framework/calculator_framework.h"
|
||||
|
@ -71,7 +70,7 @@ class SimpleRunner : public CalculatorRunner {
|
|||
}
|
||||
|
||||
double GetFrameRate() const {
|
||||
ABSL_CHECK(!Outputs().Index(0).header.IsEmpty());
|
||||
CHECK(!Outputs().Index(0).header.IsEmpty());
|
||||
return Outputs().Index(0).header.Get<VideoHeader>().frame_rate;
|
||||
}
|
||||
};
|
||||
|
|
|
@ -123,10 +123,7 @@ class PreviousLoopbackCalculator : public Node {
|
|||
// However, LOOP packet is empty.
|
||||
kPrevLoop(cc).SetNextTimestampBound(main_spec.timestamp + 1);
|
||||
} else {
|
||||
// Avoids sending leftovers to a stream that's already closed.
|
||||
if (!kPrevLoop(cc).IsClosed()) {
|
||||
kPrevLoop(cc).Send(loop_candidate.At(main_spec.timestamp));
|
||||
}
|
||||
kPrevLoop(cc).Send(loop_candidate.At(main_spec.timestamp));
|
||||
}
|
||||
loop_packets_.pop_front();
|
||||
main_packet_specs_.pop_front();
|
||||
|
|
|
@ -43,8 +43,8 @@ constexpr char kDisallowTag[] = "DISALLOW";
|
|||
|
||||
// Returns the timestamp values for a vector of Packets.
|
||||
// TODO: puth this kind of test util in a common place.
|
||||
std::vector<int64_t> TimestampValues(const std::vector<Packet>& packets) {
|
||||
std::vector<int64_t> result;
|
||||
std::vector<int64> TimestampValues(const std::vector<Packet>& packets) {
|
||||
std::vector<int64> result;
|
||||
for (const Packet& packet : packets) {
|
||||
result.push_back(packet.Timestamp().Value());
|
||||
}
|
||||
|
@ -371,7 +371,7 @@ TEST(PreviousLoopbackCalculator, EmptyLoopForever) {
|
|||
for (int main_ts = 0; main_ts < 50; ++main_ts) {
|
||||
send_packet("in", main_ts);
|
||||
MP_EXPECT_OK(graph_.WaitUntilIdle());
|
||||
std::vector<int64_t> ts_values = TimestampValues(outputs);
|
||||
std::vector<int64> ts_values = TimestampValues(outputs);
|
||||
EXPECT_EQ(ts_values.size(), main_ts + 1);
|
||||
for (int j = 0; j < main_ts + 1; ++j) {
|
||||
EXPECT_EQ(ts_values[j], j);
|
||||
|
|
|
@ -14,7 +14,6 @@
|
|||
|
||||
#include <deque>
|
||||
|
||||
#include "absl/log/absl_log.h"
|
||||
#include "mediapipe/calculators/core/sequence_shift_calculator.pb.h"
|
||||
#include "mediapipe/framework/api2/node.h"
|
||||
#include "mediapipe/framework/calculator_framework.h"
|
||||
|
@ -102,7 +101,7 @@ void SequenceShiftCalculator::ProcessPositiveOffset(CalculatorContext* cc) {
|
|||
kOut(cc).Send(packet_cache_.front().At(cc->InputTimestamp()));
|
||||
packet_cache_.pop_front();
|
||||
} else if (emit_empty_packets_before_first_packet_) {
|
||||
ABSL_LOG(FATAL) << "Not supported yet";
|
||||
LOG(FATAL) << "Not supported yet";
|
||||
}
|
||||
// Store current packet for later output.
|
||||
packet_cache_.push_back(kIn(cc).packet());
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
#include <set>
|
||||
#include <string>
|
||||
|
||||
#include "absl/status/status.h"
|
||||
#include "mediapipe/framework/calculator_framework.h"
|
||||
#include "mediapipe/framework/port/logging.h"
|
||||
#include "mediapipe/framework/port/ret_check.h"
|
||||
|
@ -33,7 +32,6 @@ namespace {
|
|||
constexpr char kTagAtPreStream[] = "AT_PRESTREAM";
|
||||
constexpr char kTagAtPostStream[] = "AT_POSTSTREAM";
|
||||
constexpr char kTagAtZero[] = "AT_ZERO";
|
||||
constexpr char kTagAtFirstTick[] = "AT_FIRST_TICK";
|
||||
constexpr char kTagAtTick[] = "AT_TICK";
|
||||
constexpr char kTagTick[] = "TICK";
|
||||
constexpr char kTagAtTimestamp[] = "AT_TIMESTAMP";
|
||||
|
@ -45,7 +43,6 @@ static std::map<std::string, Timestamp>* kTimestampMap = []() {
|
|||
res->emplace(kTagAtPostStream, Timestamp::PostStream());
|
||||
res->emplace(kTagAtZero, Timestamp(0));
|
||||
res->emplace(kTagAtTick, Timestamp::Unset());
|
||||
res->emplace(kTagAtFirstTick, Timestamp::Unset());
|
||||
res->emplace(kTagAtTimestamp, Timestamp::Unset());
|
||||
return res;
|
||||
}();
|
||||
|
@ -62,8 +59,8 @@ std::string GetOutputTag(const CC& cc) {
|
|||
// timestamp, depending on the tag used to define output stream(s). (One tag can
|
||||
// be used only.)
|
||||
//
|
||||
// Valid tags are AT_PRESTREAM, AT_POSTSTREAM, AT_ZERO, AT_TICK, AT_FIRST_TICK,
|
||||
// AT_TIMESTAMP and corresponding timestamps are Timestamp::PreStream(),
|
||||
// Valid tags are AT_PRESTREAM, AT_POSTSTREAM, AT_ZERO, AT_TICK, AT_TIMESTAMP
|
||||
// and corresponding timestamps are Timestamp::PreStream(),
|
||||
// Timestamp::PostStream(), Timestamp(0), timestamp of a packet received in TICK
|
||||
// input, and timestamp received from a side input.
|
||||
//
|
||||
|
@ -99,7 +96,6 @@ class SidePacketToStreamCalculator : public CalculatorBase {
|
|||
|
||||
private:
|
||||
bool is_tick_processing_ = false;
|
||||
bool close_on_first_tick_ = false;
|
||||
std::string output_tag_;
|
||||
};
|
||||
REGISTER_CALCULATOR(SidePacketToStreamCalculator);
|
||||
|
@ -107,16 +103,13 @@ REGISTER_CALCULATOR(SidePacketToStreamCalculator);
|
|||
absl::Status SidePacketToStreamCalculator::GetContract(CalculatorContract* cc) {
|
||||
const auto& tags = cc->Outputs().GetTags();
|
||||
RET_CHECK(tags.size() == 1 && kTimestampMap->count(*tags.begin()) == 1)
|
||||
<< "Only one of AT_PRESTREAM, AT_POSTSTREAM, AT_ZERO, AT_TICK, "
|
||||
"AT_FIRST_TICK and AT_TIMESTAMP tags is allowed and required to "
|
||||
"specify output stream(s).";
|
||||
const bool has_tick_output =
|
||||
cc->Outputs().HasTag(kTagAtTick) || cc->Outputs().HasTag(kTagAtFirstTick);
|
||||
const bool has_tick_input = cc->Inputs().HasTag(kTagTick);
|
||||
RET_CHECK((has_tick_output && has_tick_input) ||
|
||||
(!has_tick_output && !has_tick_input))
|
||||
<< "Either both TICK input and tick (AT_TICK/AT_FIRST_TICK) output "
|
||||
"should be used or none of them.";
|
||||
<< "Only one of AT_PRESTREAM, AT_POSTSTREAM, AT_ZERO, AT_TICK and "
|
||||
"AT_TIMESTAMP tags is allowed and required to specify output "
|
||||
"stream(s).";
|
||||
RET_CHECK(
|
||||
(cc->Outputs().HasTag(kTagAtTick) && cc->Inputs().HasTag(kTagTick)) ||
|
||||
(!cc->Outputs().HasTag(kTagAtTick) && !cc->Inputs().HasTag(kTagTick)))
|
||||
<< "Either both of TICK and AT_TICK should be used or none of them.";
|
||||
RET_CHECK((cc->Outputs().HasTag(kTagAtTimestamp) &&
|
||||
cc->InputSidePackets().HasTag(kTagSideInputTimestamp)) ||
|
||||
(!cc->Outputs().HasTag(kTagAtTimestamp) &&
|
||||
|
@ -128,7 +121,7 @@ absl::Status SidePacketToStreamCalculator::GetContract(CalculatorContract* cc) {
|
|||
if (cc->Outputs().HasTag(kTagAtTimestamp)) {
|
||||
RET_CHECK_EQ(num_entries + 1, cc->InputSidePackets().NumEntries())
|
||||
<< "For AT_TIMESTAMP tag, 2 input side packets are required.";
|
||||
cc->InputSidePackets().Tag(kTagSideInputTimestamp).Set<int64_t>();
|
||||
cc->InputSidePackets().Tag(kTagSideInputTimestamp).Set<int64>();
|
||||
} else {
|
||||
RET_CHECK_EQ(num_entries, cc->InputSidePackets().NumEntries())
|
||||
<< "Same number of input side packets and output streams is required.";
|
||||
|
@ -155,17 +148,11 @@ absl::Status SidePacketToStreamCalculator::Open(CalculatorContext* cc) {
|
|||
// timestamp bound update.
|
||||
cc->SetOffset(TimestampDiff(0));
|
||||
}
|
||||
if (output_tag_ == kTagAtFirstTick) {
|
||||
close_on_first_tick_ = true;
|
||||
}
|
||||
return absl::OkStatus();
|
||||
}
|
||||
|
||||
absl::Status SidePacketToStreamCalculator::Process(CalculatorContext* cc) {
|
||||
if (is_tick_processing_) {
|
||||
if (cc->Outputs().Get(output_tag_, 0).IsClosed()) {
|
||||
return absl::OkStatus();
|
||||
}
|
||||
// TICK input is guaranteed to be non-empty, as it's the only input stream
|
||||
// for this calculator.
|
||||
const auto& timestamp = cc->Inputs().Tag(kTagTick).Value().Timestamp();
|
||||
|
@ -173,9 +160,6 @@ absl::Status SidePacketToStreamCalculator::Process(CalculatorContext* cc) {
|
|||
cc->Outputs()
|
||||
.Get(output_tag_, i)
|
||||
.AddPacket(cc->InputSidePackets().Index(i).At(timestamp));
|
||||
if (close_on_first_tick_) {
|
||||
cc->Outputs().Get(output_tag_, i).Close();
|
||||
}
|
||||
}
|
||||
|
||||
return absl::OkStatus();
|
||||
|
@ -186,7 +170,6 @@ absl::Status SidePacketToStreamCalculator::Process(CalculatorContext* cc) {
|
|||
|
||||
absl::Status SidePacketToStreamCalculator::Close(CalculatorContext* cc) {
|
||||
if (!cc->Outputs().HasTag(kTagAtTick) &&
|
||||
!cc->Outputs().HasTag(kTagAtFirstTick) &&
|
||||
!cc->Outputs().HasTag(kTagAtTimestamp)) {
|
||||
const auto& timestamp = kTimestampMap->at(output_tag_);
|
||||
for (int i = 0; i < cc->Outputs().NumEntries(output_tag_); ++i) {
|
||||
|
@ -195,8 +178,8 @@ absl::Status SidePacketToStreamCalculator::Close(CalculatorContext* cc) {
|
|||
.AddPacket(cc->InputSidePackets().Index(i).At(timestamp));
|
||||
}
|
||||
} else if (cc->Outputs().HasTag(kTagAtTimestamp)) {
|
||||
int64_t timestamp =
|
||||
cc->InputSidePackets().Tag(kTagSideInputTimestamp).Get<int64_t>();
|
||||
int64 timestamp =
|
||||
cc->InputSidePackets().Tag(kTagSideInputTimestamp).Get<int64>();
|
||||
for (int i = 0; i < cc->Outputs().NumEntries(output_tag_); ++i) {
|
||||
cc->Outputs()
|
||||
.Get(output_tag_, i)
|
||||
|
|
|
@ -27,17 +27,13 @@
|
|||
#include "mediapipe/framework/port/status.h"
|
||||
#include "mediapipe/framework/port/status_matchers.h"
|
||||
#include "mediapipe/framework/tool/options_util.h"
|
||||
#include "mediapipe/util/packet_test_util.h"
|
||||
|
||||
namespace mediapipe {
|
||||
namespace {
|
||||
|
||||
using ::testing::ElementsAre;
|
||||
using ::testing::Eq;
|
||||
using ::testing::HasSubstr;
|
||||
using ::testing::IsEmpty;
|
||||
using testing::HasSubstr;
|
||||
|
||||
TEST(SidePacketToStreamCalculator, WrongConfigWithMissingTick) {
|
||||
TEST(SidePacketToStreamCalculator, WrongConfig_MissingTick) {
|
||||
CalculatorGraphConfig graph_config =
|
||||
ParseTextProtoOrDie<CalculatorGraphConfig>(
|
||||
R"pb(
|
||||
|
@ -56,35 +52,10 @@ TEST(SidePacketToStreamCalculator, WrongConfigWithMissingTick) {
|
|||
EXPECT_THAT(
|
||||
status.message(),
|
||||
HasSubstr(
|
||||
"Either both TICK input and tick (AT_TICK/AT_FIRST_TICK) output "
|
||||
"should be used or none of them."));
|
||||
"Either both of TICK and AT_TICK should be used or none of them."));
|
||||
}
|
||||
|
||||
TEST(SidePacketToStreamCalculator,
|
||||
WrongConfigWithMissingTickForFirstTickProcessing) {
|
||||
CalculatorGraphConfig graph_config =
|
||||
ParseTextProtoOrDie<CalculatorGraphConfig>(
|
||||
R"pb(
|
||||
input_stream: "tick"
|
||||
input_side_packet: "side_packet"
|
||||
output_stream: "packet"
|
||||
node {
|
||||
calculator: "SidePacketToStreamCalculator"
|
||||
input_side_packet: "side_packet"
|
||||
output_stream: "AT_FIRST_TICK:packet"
|
||||
}
|
||||
)pb");
|
||||
CalculatorGraph graph;
|
||||
auto status = graph.Initialize(graph_config);
|
||||
EXPECT_FALSE(status.ok());
|
||||
EXPECT_THAT(
|
||||
status.message(),
|
||||
HasSubstr(
|
||||
"Either both TICK input and tick (AT_TICK/AT_FIRST_TICK) output "
|
||||
"should be used or none of them."));
|
||||
}
|
||||
|
||||
TEST(SidePacketToStreamCalculator, WrongConfigWithMissingTimestampSideInput) {
|
||||
TEST(SidePacketToStreamCalculator, WrongConfig_MissingTimestampSideInput) {
|
||||
CalculatorGraphConfig graph_config =
|
||||
ParseTextProtoOrDie<CalculatorGraphConfig>(
|
||||
R"pb(
|
||||
|
@ -105,7 +76,7 @@ TEST(SidePacketToStreamCalculator, WrongConfigWithMissingTimestampSideInput) {
|
|||
"or none of them."));
|
||||
}
|
||||
|
||||
TEST(SidePacketToStreamCalculator, WrongConfigWithNonExistentTag) {
|
||||
TEST(SidePacketToStreamCalculator, WrongConfig_NonExistentTag) {
|
||||
CalculatorGraphConfig graph_config =
|
||||
ParseTextProtoOrDie<CalculatorGraphConfig>(
|
||||
R"pb(
|
||||
|
@ -121,13 +92,14 @@ TEST(SidePacketToStreamCalculator, WrongConfigWithNonExistentTag) {
|
|||
CalculatorGraph graph;
|
||||
auto status = graph.Initialize(graph_config);
|
||||
EXPECT_FALSE(status.ok());
|
||||
EXPECT_THAT(status.message(),
|
||||
HasSubstr("Only one of AT_PRESTREAM, AT_POSTSTREAM, AT_ZERO, "
|
||||
"AT_TICK, AT_FIRST_TICK and AT_TIMESTAMP tags is "
|
||||
"allowed and required to specify output stream(s)."));
|
||||
EXPECT_THAT(
|
||||
status.message(),
|
||||
HasSubstr("Only one of AT_PRESTREAM, AT_POSTSTREAM, AT_ZERO, AT_TICK and "
|
||||
"AT_TIMESTAMP tags is allowed and required to specify output "
|
||||
"stream(s)."));
|
||||
}
|
||||
|
||||
TEST(SidePacketToStreamCalculator, WrongConfigWithMixedTags) {
|
||||
TEST(SidePacketToStreamCalculator, WrongConfig_MixedTags) {
|
||||
CalculatorGraphConfig graph_config =
|
||||
ParseTextProtoOrDie<CalculatorGraphConfig>(
|
||||
R"pb(
|
||||
|
@ -145,13 +117,14 @@ TEST(SidePacketToStreamCalculator, WrongConfigWithMixedTags) {
|
|||
CalculatorGraph graph;
|
||||
auto status = graph.Initialize(graph_config);
|
||||
EXPECT_FALSE(status.ok());
|
||||
EXPECT_THAT(status.message(),
|
||||
HasSubstr("Only one of AT_PRESTREAM, AT_POSTSTREAM, AT_ZERO, "
|
||||
"AT_TICK, AT_FIRST_TICK and AT_TIMESTAMP tags is "
|
||||
"allowed and required to specify output stream(s)."));
|
||||
EXPECT_THAT(
|
||||
status.message(),
|
||||
HasSubstr("Only one of AT_PRESTREAM, AT_POSTSTREAM, AT_ZERO, AT_TICK and "
|
||||
"AT_TIMESTAMP tags is allowed and required to specify output "
|
||||
"stream(s)."));
|
||||
}
|
||||
|
||||
TEST(SidePacketToStreamCalculator, WrongConfigWithNotEnoughSidePackets) {
|
||||
TEST(SidePacketToStreamCalculator, WrongConfig_NotEnoughSidePackets) {
|
||||
CalculatorGraphConfig graph_config =
|
||||
ParseTextProtoOrDie<CalculatorGraphConfig>(
|
||||
R"pb(
|
||||
|
@ -173,7 +146,7 @@ TEST(SidePacketToStreamCalculator, WrongConfigWithNotEnoughSidePackets) {
|
|||
"Same number of input side packets and output streams is required."));
|
||||
}
|
||||
|
||||
TEST(SidePacketToStreamCalculator, WrongConfigWithNotEnoughOutputStreams) {
|
||||
TEST(SidePacketToStreamCalculator, WrongConfig_NotEnoughOutputStreams) {
|
||||
CalculatorGraphConfig graph_config =
|
||||
ParseTextProtoOrDie<CalculatorGraphConfig>(
|
||||
R"pb(
|
||||
|
@ -275,50 +248,7 @@ TEST(SidePacketToStreamCalculator, AtTick) {
|
|||
tick_and_verify(/*at_timestamp=*/1025);
|
||||
}
|
||||
|
||||
TEST(SidePacketToStreamCalculator, AtFirstTick) {
|
||||
CalculatorGraphConfig graph_config =
|
||||
ParseTextProtoOrDie<CalculatorGraphConfig>(
|
||||
R"pb(
|
||||
input_stream: "tick"
|
||||
input_side_packet: "side_packet"
|
||||
output_stream: "packet"
|
||||
node {
|
||||
calculator: "SidePacketToStreamCalculator"
|
||||
input_stream: "TICK:tick"
|
||||
input_side_packet: "side_packet"
|
||||
output_stream: "AT_FIRST_TICK:packet"
|
||||
}
|
||||
)pb");
|
||||
std::vector<Packet> output_packets;
|
||||
tool::AddVectorSink("packet", &graph_config, &output_packets);
|
||||
CalculatorGraph graph;
|
||||
|
||||
MP_ASSERT_OK(graph.Initialize(graph_config));
|
||||
const int expected_value = 20;
|
||||
const Timestamp kTestTimestamp(1234);
|
||||
MP_ASSERT_OK(
|
||||
graph.StartRun({{"side_packet", MakePacket<int>(expected_value)}}));
|
||||
|
||||
auto insert_tick = [&graph](Timestamp at_timestamp) {
|
||||
MP_ASSERT_OK(graph.AddPacketToInputStream(
|
||||
"tick", MakePacket<int>(/*doesn't matter*/ 1).At(at_timestamp)));
|
||||
MP_ASSERT_OK(graph.WaitUntilIdle());
|
||||
};
|
||||
|
||||
insert_tick(kTestTimestamp);
|
||||
|
||||
EXPECT_THAT(output_packets,
|
||||
ElementsAre(PacketContainsTimestampAndPayload<int>(
|
||||
Eq(kTestTimestamp), Eq(expected_value))));
|
||||
|
||||
output_packets.clear();
|
||||
|
||||
// Should not result in an additional output.
|
||||
insert_tick(kTestTimestamp + 1);
|
||||
EXPECT_THAT(output_packets, IsEmpty());
|
||||
}
|
||||
|
||||
TEST(SidePacketToStreamCalculator, AtTickWithMultipleSidePackets) {
|
||||
TEST(SidePacketToStreamCalculator, AtTick_MultipleSidePackets) {
|
||||
CalculatorGraphConfig graph_config =
|
||||
ParseTextProtoOrDie<CalculatorGraphConfig>(
|
||||
R"pb(
|
||||
|
@ -372,62 +302,6 @@ TEST(SidePacketToStreamCalculator, AtTickWithMultipleSidePackets) {
|
|||
tick_and_verify(/*at_timestamp=*/1025);
|
||||
}
|
||||
|
||||
TEST(SidePacketToStreamCalculator, AtFirstTickWithMultipleSidePackets) {
|
||||
CalculatorGraphConfig graph_config =
|
||||
ParseTextProtoOrDie<CalculatorGraphConfig>(
|
||||
R"pb(
|
||||
input_stream: "tick"
|
||||
input_side_packet: "side_packet0"
|
||||
input_side_packet: "side_packet1"
|
||||
output_stream: "packet0"
|
||||
output_stream: "packet1"
|
||||
node {
|
||||
calculator: "SidePacketToStreamCalculator"
|
||||
input_stream: "TICK:tick"
|
||||
input_side_packet: "side_packet0"
|
||||
input_side_packet: "side_packet1"
|
||||
output_stream: "AT_FIRST_TICK:0:packet0"
|
||||
output_stream: "AT_FIRST_TICK:1:packet1"
|
||||
}
|
||||
)pb");
|
||||
std::vector<Packet> output_packets0;
|
||||
tool::AddVectorSink("packet0", &graph_config, &output_packets0);
|
||||
std::vector<Packet> output_packets1;
|
||||
tool::AddVectorSink("packet1", &graph_config, &output_packets1);
|
||||
CalculatorGraph graph;
|
||||
|
||||
MP_ASSERT_OK(graph.Initialize(graph_config));
|
||||
const int expected_value0 = 20;
|
||||
const int expected_value1 = 128;
|
||||
const Timestamp kTestTimestamp(1234);
|
||||
MP_ASSERT_OK(
|
||||
graph.StartRun({{"side_packet0", MakePacket<int>(expected_value0)},
|
||||
{"side_packet1", MakePacket<int>(expected_value1)}}));
|
||||
|
||||
auto insert_tick = [&graph](Timestamp at_timestamp) {
|
||||
MP_ASSERT_OK(graph.AddPacketToInputStream(
|
||||
"tick", MakePacket<int>(/*doesn't matter*/ 1).At(at_timestamp)));
|
||||
MP_ASSERT_OK(graph.WaitUntilIdle());
|
||||
};
|
||||
|
||||
insert_tick(kTestTimestamp);
|
||||
|
||||
EXPECT_THAT(output_packets0,
|
||||
ElementsAre(PacketContainsTimestampAndPayload<int>(
|
||||
Eq(kTestTimestamp), Eq(expected_value0))));
|
||||
EXPECT_THAT(output_packets1,
|
||||
ElementsAre(PacketContainsTimestampAndPayload<int>(
|
||||
Eq(kTestTimestamp), Eq(expected_value1))));
|
||||
|
||||
output_packets0.clear();
|
||||
output_packets1.clear();
|
||||
|
||||
// Should not result in an additional output.
|
||||
insert_tick(kTestTimestamp + 1);
|
||||
EXPECT_THAT(output_packets0, IsEmpty());
|
||||
EXPECT_THAT(output_packets1, IsEmpty());
|
||||
}
|
||||
|
||||
TEST(SidePacketToStreamCalculator, AtTimestamp) {
|
||||
CalculatorGraphConfig graph_config =
|
||||
ParseTextProtoOrDie<CalculatorGraphConfig>(
|
||||
|
@ -460,7 +334,7 @@ TEST(SidePacketToStreamCalculator, AtTimestamp) {
|
|||
EXPECT_EQ(expected_value, output_packets.back().Get<int>());
|
||||
}
|
||||
|
||||
TEST(SidePacketToStreamCalculator, AtTimestampWithMultipleOutputs) {
|
||||
TEST(SidePacketToStreamCalculator, AtTimestamp_MultipleOutputs) {
|
||||
CalculatorGraphConfig graph_config =
|
||||
ParseTextProtoOrDie<CalculatorGraphConfig>(
|
||||
R"pb(
|
||||
|
|
|
@ -17,7 +17,6 @@
|
|||
|
||||
#include "mediapipe/calculators/core/split_vector_calculator.pb.h"
|
||||
#include "mediapipe/framework/calculator_framework.h"
|
||||
#include "mediapipe/framework/formats/body_rig.pb.h"
|
||||
#include "mediapipe/framework/formats/landmark.pb.h"
|
||||
#include "mediapipe/framework/port/canonical_errors.h"
|
||||
#include "mediapipe/framework/port/ret_check.h"
|
||||
|
@ -197,18 +196,6 @@ class SplitLandmarkListCalculator
|
|||
};
|
||||
REGISTER_CALCULATOR(SplitLandmarkListCalculator);
|
||||
|
||||
class SplitJointListCalculator : public SplitListsCalculator<Joint, JointList> {
|
||||
protected:
|
||||
int ListSize(const JointList& list) const override {
|
||||
return list.joint_size();
|
||||
}
|
||||
const Joint GetItem(const JointList& list, int idx) const override {
|
||||
return list.joint(idx);
|
||||
}
|
||||
Joint* AddItem(JointList& list) const override { return list.add_joint(); }
|
||||
};
|
||||
REGISTER_CALCULATOR(SplitJointListCalculator);
|
||||
|
||||
} // namespace mediapipe
|
||||
|
||||
// NOLINTNEXTLINE
|
||||
|
|
|
@ -18,7 +18,6 @@
|
|||
|
||||
#include "mediapipe/framework/formats/classification.pb.h"
|
||||
#include "mediapipe/framework/formats/detection.pb.h"
|
||||
#include "mediapipe/framework/formats/image.h"
|
||||
#include "mediapipe/framework/formats/landmark.pb.h"
|
||||
#include "mediapipe/framework/formats/matrix.h"
|
||||
#include "mediapipe/framework/formats/rect.pb.h"
|
||||
|
@ -87,12 +86,4 @@ REGISTER_CALCULATOR(SplitUint64tVectorCalculator);
|
|||
typedef SplitVectorCalculator<float, false> SplitFloatVectorCalculator;
|
||||
REGISTER_CALCULATOR(SplitFloatVectorCalculator);
|
||||
|
||||
typedef SplitVectorCalculator<mediapipe::Image, false>
|
||||
SplitImageVectorCalculator;
|
||||
REGISTER_CALCULATOR(SplitImageVectorCalculator);
|
||||
|
||||
typedef SplitVectorCalculator<std::array<float, 16>, false>
|
||||
SplitAffineMatrixVectorCalculator;
|
||||
REGISTER_CALCULATOR(SplitAffineMatrixVectorCalculator);
|
||||
|
||||
} // namespace mediapipe
|
||||
|
|
|
@ -12,13 +12,11 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "mediapipe/framework/api2/node.h"
|
||||
#include "mediapipe/framework/calculator_framework.h"
|
||||
#include "mediapipe/framework/port/status.h"
|
||||
#include "mediapipe/framework/timestamp.h"
|
||||
|
||||
namespace mediapipe {
|
||||
namespace api2 {
|
||||
|
||||
// A calculator that takes a packet of an input stream and converts it to an
|
||||
// output side packet. This calculator only works under the assumption that the
|
||||
|
@ -30,21 +28,21 @@ namespace api2 {
|
|||
// input_stream: "stream"
|
||||
// output_side_packet: "side_packet"
|
||||
// }
|
||||
class StreamToSidePacketCalculator : public Node {
|
||||
class StreamToSidePacketCalculator : public mediapipe::CalculatorBase {
|
||||
public:
|
||||
static constexpr Input<AnyType>::Optional kIn{""};
|
||||
static constexpr SideOutput<SameType<kIn>> kOut{""};
|
||||
|
||||
MEDIAPIPE_NODE_CONTRACT(kIn, kOut);
|
||||
static absl::Status GetContract(mediapipe::CalculatorContract* cc) {
|
||||
cc->Inputs().Index(0).SetAny();
|
||||
cc->OutputSidePackets().Index(0).SetAny();
|
||||
return absl::OkStatus();
|
||||
}
|
||||
|
||||
absl::Status Process(mediapipe::CalculatorContext* cc) override {
|
||||
kOut(cc).Set(
|
||||
kIn(cc).packet().As<AnyType>().At(mediapipe::Timestamp::Unset()));
|
||||
mediapipe::Packet& packet = cc->Inputs().Index(0).Value();
|
||||
cc->OutputSidePackets().Index(0).Set(
|
||||
packet.At(mediapipe::Timestamp::Unset()));
|
||||
return absl::OkStatus();
|
||||
}
|
||||
};
|
||||
REGISTER_CALCULATOR(StreamToSidePacketCalculator);
|
||||
|
||||
MEDIAPIPE_REGISTER_NODE(StreamToSidePacketCalculator);
|
||||
|
||||
} // namespace api2
|
||||
} // namespace mediapipe
|
||||
|
|
|
@ -64,16 +64,16 @@ REGISTER_CALCULATOR(StringToIntCalculator);
|
|||
using StringToUintCalculator = StringToIntCalculatorTemplate<unsigned int>;
|
||||
REGISTER_CALCULATOR(StringToUintCalculator);
|
||||
|
||||
using StringToInt32Calculator = StringToIntCalculatorTemplate<int32_t>;
|
||||
using StringToInt32Calculator = StringToIntCalculatorTemplate<int32>;
|
||||
REGISTER_CALCULATOR(StringToInt32Calculator);
|
||||
|
||||
using StringToUint32Calculator = StringToIntCalculatorTemplate<uint32_t>;
|
||||
using StringToUint32Calculator = StringToIntCalculatorTemplate<uint32>;
|
||||
REGISTER_CALCULATOR(StringToUint32Calculator);
|
||||
|
||||
using StringToInt64Calculator = StringToIntCalculatorTemplate<int64_t>;
|
||||
using StringToInt64Calculator = StringToIntCalculatorTemplate<int64>;
|
||||
REGISTER_CALCULATOR(StringToInt64Calculator);
|
||||
|
||||
using StringToUint64Calculator = StringToIntCalculatorTemplate<uint64_t>;
|
||||
using StringToUint64Calculator = StringToIntCalculatorTemplate<uint64>;
|
||||
REGISTER_CALCULATOR(StringToUint64Calculator);
|
||||
|
||||
} // namespace mediapipe
|
||||
|
|
|
@ -1,90 +0,0 @@
|
|||
#include "mediapipe/framework/calculator_framework.h"
|
||||
#include "mediapipe/framework/port/status.h"
|
||||
|
||||
namespace mediapipe {
|
||||
namespace {
|
||||
|
||||
constexpr char kInputValueTag[] = "IN";
|
||||
constexpr char kTickerTag[] = "TICK";
|
||||
constexpr char kOutputTag[] = "OUT";
|
||||
constexpr char kIndicationTag[] = "FLAG";
|
||||
|
||||
} // namespace
|
||||
// For every packet received on the TICK stream, if the IN stream is not
|
||||
// empty - emit its value as is as OUT. Otherwise output a default packet.
|
||||
// FLAG outputs true every time the default value has been used. It does not
|
||||
// output anything when IN has a value.
|
||||
//
|
||||
// Example config:
|
||||
// node {
|
||||
// calculator: "ValueOrDefaultCalculator"
|
||||
// input_stream: "IN:sometimes_missing_value"
|
||||
// input_stream: "TICK:clock"
|
||||
// output_stream: "OUT:value_or_default"
|
||||
// output_stream: "FLAG:used_default"
|
||||
// input_side_packet: "default"
|
||||
// }
|
||||
//
|
||||
// TODO: Consider adding an option for a default value as a input-stream
|
||||
// instead of a side-packet, so it will enable using standard calculators
|
||||
// instead of creating a new packet-generators. It will also allow a dynamic
|
||||
// default value.
|
||||
class ValueOrDefaultCalculator : public mediapipe::CalculatorBase {
|
||||
public:
|
||||
ValueOrDefaultCalculator() {}
|
||||
|
||||
ValueOrDefaultCalculator(const ValueOrDefaultCalculator&) = delete;
|
||||
ValueOrDefaultCalculator& operator=(const ValueOrDefaultCalculator&) = delete;
|
||||
|
||||
static mediapipe::Status GetContract(mediapipe::CalculatorContract* cc) {
|
||||
cc->Inputs().Tag(kInputValueTag).SetAny();
|
||||
cc->Inputs().Tag(kTickerTag).SetAny();
|
||||
cc->Outputs().Tag(kOutputTag).SetSameAs(&cc->Inputs().Tag(kInputValueTag));
|
||||
cc->Outputs().Tag(kIndicationTag).Set<bool>();
|
||||
cc->InputSidePackets().Index(0).SetSameAs(
|
||||
&cc->Inputs().Tag(kInputValueTag));
|
||||
|
||||
return mediapipe::OkStatus();
|
||||
}
|
||||
|
||||
mediapipe::Status Open(mediapipe::CalculatorContext* cc) override {
|
||||
if (!cc->Inputs().Tag(kInputValueTag).Header().IsEmpty()) {
|
||||
cc->Outputs()
|
||||
.Tag(kOutputTag)
|
||||
.SetHeader(cc->Inputs().Tag(kInputValueTag).Header());
|
||||
}
|
||||
default_ = cc->InputSidePackets().Index(0);
|
||||
cc->SetOffset(mediapipe::TimestampDiff(0));
|
||||
return mediapipe::OkStatus();
|
||||
}
|
||||
|
||||
mediapipe::Status Process(mediapipe::CalculatorContext* cc) override {
|
||||
// Output according to the TICK signal.
|
||||
if (cc->Inputs().Tag(kTickerTag).IsEmpty()) {
|
||||
return mediapipe::OkStatus();
|
||||
}
|
||||
if (!cc->Inputs().Tag(kInputValueTag).IsEmpty()) {
|
||||
// Output the input as is:
|
||||
cc->Outputs()
|
||||
.Tag(kOutputTag)
|
||||
.AddPacket(cc->Inputs().Tag(kInputValueTag).Value());
|
||||
} else {
|
||||
// Output default:
|
||||
cc->Outputs()
|
||||
.Tag(kOutputTag)
|
||||
.AddPacket(default_.At(cc->InputTimestamp()));
|
||||
cc->Outputs()
|
||||
.Tag(kIndicationTag)
|
||||
.Add(new bool(true), cc->InputTimestamp());
|
||||
}
|
||||
return mediapipe::OkStatus();
|
||||
}
|
||||
|
||||
private:
|
||||
// The default value to replicate every time there is no new value.
|
||||
mediapipe::Packet default_;
|
||||
};
|
||||
|
||||
REGISTER_CALCULATOR(ValueOrDefaultCalculator);
|
||||
|
||||
} // namespace mediapipe
|
|
@ -1,240 +0,0 @@
|
|||
#include <algorithm>
|
||||
#include <cstdint>
|
||||
#include <vector>
|
||||
|
||||
#include "mediapipe/framework/calculator_framework.h"
|
||||
#include "mediapipe/framework/calculator_runner.h"
|
||||
#include "mediapipe/framework/packet.h"
|
||||
#include "mediapipe/framework/port/gmock.h"
|
||||
#include "mediapipe/framework/port/gtest.h"
|
||||
#include "mediapipe/framework/port/status_matchers.h"
|
||||
|
||||
namespace mediapipe {
|
||||
namespace {
|
||||
|
||||
using ::testing::AllOf;
|
||||
using ::testing::ContainerEq;
|
||||
using ::testing::Each;
|
||||
using ::testing::ElementsAre;
|
||||
using ::testing::IsEmpty;
|
||||
using ::testing::SizeIs;
|
||||
using ::testing::Test;
|
||||
|
||||
const int kDefaultValue = 0;
|
||||
|
||||
// Utility to a create a mediapipe graph runner with the tested calculator and a
|
||||
// default value, for all the tests.
|
||||
class ValueOrDefaultRunner : public mediapipe::CalculatorRunner {
|
||||
public:
|
||||
ValueOrDefaultRunner()
|
||||
: mediapipe::CalculatorRunner(R"pb(
|
||||
calculator: "ValueOrDefaultCalculator"
|
||||
input_stream: "IN:in"
|
||||
input_stream: "TICK:tick"
|
||||
input_side_packet: "default"
|
||||
output_stream: "OUT:out"
|
||||
output_stream: "FLAG:used_default"
|
||||
)pb") {
|
||||
MutableSidePackets()->Index(0) = mediapipe::MakePacket<int>(kDefaultValue);
|
||||
}
|
||||
|
||||
// Utility to push inputs to the runner to the TICK stream, so we could easily
|
||||
// tick.
|
||||
void TickAt(int64_t time) {
|
||||
// The type or value of the stream isn't relevant, we use just a bool.
|
||||
MutableInputs()->Tag("TICK").packets.push_back(
|
||||
mediapipe::Adopt(new bool(false)).At(mediapipe::Timestamp(time)));
|
||||
}
|
||||
|
||||
// Utility to push the real inputs to the runner (IN stream).
|
||||
void ProvideInput(int64_t time, int value) {
|
||||
MutableInputs()->Tag("IN").packets.push_back(
|
||||
mediapipe::Adopt(new int(value)).At(mediapipe::Timestamp(time)));
|
||||
}
|
||||
|
||||
// Extracts the timestamps (as int64) of the output stream of the calculator.
|
||||
std::vector<int64_t> GetOutputTimestamps() const {
|
||||
std::vector<int64_t> timestamps;
|
||||
for (const mediapipe::Packet& packet : Outputs().Tag("OUT").packets) {
|
||||
timestamps.emplace_back(packet.Timestamp().Value());
|
||||
}
|
||||
return timestamps;
|
||||
}
|
||||
|
||||
// Extracts the values from the output stream of the calculator.
|
||||
std::vector<int> GetOutputValues() const {
|
||||
std::vector<int> values;
|
||||
for (const mediapipe::Packet& packet : Outputs().Tag("OUT").packets) {
|
||||
values.emplace_back(packet.Get<int>());
|
||||
}
|
||||
return values;
|
||||
}
|
||||
|
||||
// Extracts the timestamps (as int64) of the flag stream, which indicates on
|
||||
// times without an input value (i.e. using the default value).
|
||||
std::vector<int64_t> GetFlagTimestamps() const {
|
||||
std::vector<int64_t> timestamps;
|
||||
for (const mediapipe::Packet& packet : Outputs().Tag("FLAG").packets) {
|
||||
timestamps.emplace_back(packet.Timestamp().Value());
|
||||
}
|
||||
return timestamps;
|
||||
}
|
||||
|
||||
// Extracts the output from the flags stream (which should always be true).
|
||||
std::vector<bool> GetFlagValues() const {
|
||||
std::vector<bool> flags;
|
||||
for (const mediapipe::Packet& packet : Outputs().Tag("FLAG").packets) {
|
||||
flags.emplace_back(packet.Get<bool>());
|
||||
}
|
||||
return flags;
|
||||
}
|
||||
};
|
||||
|
||||
// To be used as input values:
|
||||
std::vector<int> GetIntegersRange(int size) {
|
||||
std::vector<int> result;
|
||||
for (int i = 0; i < size; ++i) {
|
||||
// We start with default-value+1 so it won't contain the default value.
|
||||
result.push_back(kDefaultValue + 1 + i);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
TEST(ValueOrDefaultCalculatorTest, NoInputs) {
|
||||
// Check that when no real inputs are provided - we get the default value over
|
||||
// and over, with the correct timestamps.
|
||||
ValueOrDefaultRunner runner;
|
||||
const std::vector<int64_t> ticks = {0, 1, 2, 5, 8, 12, 33, 231};
|
||||
|
||||
for (int tick : ticks) {
|
||||
runner.TickAt(tick);
|
||||
}
|
||||
|
||||
MP_EXPECT_OK(runner.Run());
|
||||
|
||||
// Make sure we get the right timestamps:
|
||||
EXPECT_THAT(runner.GetOutputTimestamps(), ContainerEq(ticks));
|
||||
// All should be default value:
|
||||
EXPECT_THAT(runner.GetOutputValues(),
|
||||
AllOf(Each(kDefaultValue), SizeIs(ticks.size())));
|
||||
// We should get the default indication all the time:
|
||||
EXPECT_THAT(runner.GetFlagTimestamps(), ContainerEq(ticks));
|
||||
}
|
||||
|
||||
TEST(ValueOrDefaultCalculatorTest, NeverDefault) {
|
||||
// Check that when we provide the inputs on time - we get them as outputs.
|
||||
ValueOrDefaultRunner runner;
|
||||
const std::vector<int64_t> ticks = {0, 1, 2, 5, 8, 12, 33, 231};
|
||||
const std::vector<int> values = GetIntegersRange(ticks.size());
|
||||
|
||||
for (int i = 0; i < ticks.size(); ++i) {
|
||||
runner.TickAt(ticks[i]);
|
||||
runner.ProvideInput(ticks[i], values[i]);
|
||||
}
|
||||
|
||||
MP_EXPECT_OK(runner.Run());
|
||||
|
||||
// Make sure we get the right timestamps:
|
||||
EXPECT_THAT(runner.GetOutputTimestamps(), ContainerEq(ticks));
|
||||
// Should get the inputs values:
|
||||
EXPECT_THAT(runner.GetOutputValues(), ContainerEq(values));
|
||||
// We should never get the default indication:
|
||||
EXPECT_THAT(runner.GetFlagTimestamps(), IsEmpty());
|
||||
}
|
||||
|
||||
TEST(ValueOrDefaultCalculatorTest, DefaultAndValues) {
|
||||
// Check that when we provide inputs only part of the time - we get them, but
|
||||
// defaults at the missing times.
|
||||
// That's the usual use case for this calculator.
|
||||
ValueOrDefaultRunner runner;
|
||||
const std::vector<int64_t> ticks = {0, 1, 5, 8, 12, 231};
|
||||
// Provide inputs only part of the ticks.
|
||||
// Chosen so there will be defaults before the first input, between the
|
||||
// inputs and after the last input.
|
||||
const std::vector<int64_t> in_ticks = {/*0,*/ 1, 5, /*8,*/ 12, /*, 231*/};
|
||||
const std::vector<int> in_values = GetIntegersRange(in_ticks.size());
|
||||
|
||||
for (int tick : ticks) {
|
||||
runner.TickAt(tick);
|
||||
}
|
||||
for (int i = 0; i < in_ticks.size(); ++i) {
|
||||
runner.ProvideInput(in_ticks[i], in_values[i]);
|
||||
}
|
||||
|
||||
MP_EXPECT_OK(runner.Run());
|
||||
|
||||
// Make sure we get all the timestamps:
|
||||
EXPECT_THAT(runner.GetOutputTimestamps(), ContainerEq(ticks));
|
||||
// The timestamps of the flag should be exactly the ones not in in_ticks.
|
||||
EXPECT_THAT(runner.GetFlagTimestamps(), ElementsAre(0, 8, 231));
|
||||
// And the values are default in these times, and the input values for
|
||||
// in_ticks.
|
||||
EXPECT_THAT(
|
||||
runner.GetOutputValues(),
|
||||
ElementsAre(kDefaultValue, 1, 2, kDefaultValue, 3, kDefaultValue));
|
||||
}
|
||||
|
||||
TEST(ValueOrDefaultCalculatorTest, TimestampsMismatch) {
|
||||
// Check that when we provide the inputs not on time - we don't get them.
|
||||
ValueOrDefaultRunner runner;
|
||||
const std::vector<int64_t> ticks = {1, 2, 5, 8, 12, 33, 231};
|
||||
// The timestamps chosen so it will be before the first tick, in between ticks
|
||||
// and after the last one. Also - more inputs than ticks.
|
||||
const std::vector<int64_t> in_ticks = {0, 3, 4, 6, 7, 9, 10,
|
||||
11, 13, 14, 15, 16, 232};
|
||||
const std::vector<int> in_values = GetIntegersRange(in_ticks.size());
|
||||
for (int tick : ticks) {
|
||||
runner.TickAt(tick);
|
||||
}
|
||||
for (int i = 0; i < in_ticks.size(); ++i) {
|
||||
runner.ProvideInput(in_ticks[i], in_values[i]);
|
||||
}
|
||||
|
||||
MP_EXPECT_OK(runner.Run());
|
||||
|
||||
// Non of the in_ticks should be inserted:
|
||||
EXPECT_THAT(runner.GetOutputTimestamps(), ContainerEq(ticks));
|
||||
EXPECT_THAT(runner.GetOutputValues(),
|
||||
AllOf(Each(kDefaultValue), SizeIs(ticks.size())));
|
||||
// All (and only) ticks should get the default.
|
||||
EXPECT_THAT(runner.GetFlagTimestamps(), ContainerEq(ticks));
|
||||
}
|
||||
|
||||
TEST(ValueOrDefaultCalculatorTest, FlagValue) {
|
||||
// Since we anyway suppose that the Flag is a bool - there is nothing
|
||||
// interesting to check, but we should check once that the value is the right
|
||||
// (true) one.
|
||||
ValueOrDefaultRunner runner;
|
||||
runner.TickAt(0);
|
||||
MP_EXPECT_OK(runner.Run());
|
||||
EXPECT_THAT(runner.GetFlagValues(), ElementsAre(true));
|
||||
}
|
||||
|
||||
TEST(ValueOrDefaultCalculatorTest, FullTest) {
|
||||
// Make sure that nothing gets wrong with an input that have both right and
|
||||
// wrong timestamps, some defaults etc.
|
||||
ValueOrDefaultRunner runner;
|
||||
const std::vector<int64_t> ticks = {1, 2, 5, 8, 12, 33, 231};
|
||||
const std::vector<int64_t> in_ticks = {0, 2, 4, 6, 8, 9, 12, 33, 54, 232};
|
||||
const std::vector<int> in_values = GetIntegersRange(in_ticks.size());
|
||||
|
||||
for (int tick : ticks) {
|
||||
runner.TickAt(tick);
|
||||
}
|
||||
for (int i = 0; i < in_ticks.size(); ++i) {
|
||||
runner.ProvideInput(in_ticks[i], in_values[i]);
|
||||
}
|
||||
|
||||
MP_EXPECT_OK(runner.Run());
|
||||
|
||||
EXPECT_THAT(runner.GetOutputTimestamps(), ContainerEq(ticks));
|
||||
// Calculated by hand:
|
||||
EXPECT_THAT(
|
||||
runner.GetOutputValues(),
|
||||
ElementsAre(kDefaultValue, 2, kDefaultValue, 5, 7, 8, kDefaultValue));
|
||||
EXPECT_THAT(runner.GetFlagTimestamps(), ElementsAre(1, 5, 231));
|
||||
EXPECT_THAT(runner.GetFlagValues(), AllOf(Each(true), SizeIs(3)));
|
||||
}
|
||||
|
||||
} // namespace
|
||||
} // namespace mediapipe
|
|
@ -97,7 +97,6 @@ cc_library(
|
|||
"//mediapipe/framework/port:ret_check",
|
||||
"//mediapipe/framework/port:source_location",
|
||||
"//mediapipe/framework/port:status",
|
||||
"@com_google_absl//absl/log:absl_check",
|
||||
],
|
||||
alwayslink = 1,
|
||||
)
|
||||
|
@ -126,7 +125,6 @@ cc_library(
|
|||
"//mediapipe/framework/port:opencv_imgcodecs",
|
||||
"//mediapipe/framework/port:opencv_imgproc",
|
||||
"//mediapipe/framework/port:status",
|
||||
"@com_google_absl//absl/log:absl_check",
|
||||
],
|
||||
alwayslink = 1,
|
||||
)
|
||||
|
@ -137,6 +135,7 @@ cc_library(
|
|||
deps = [
|
||||
"//mediapipe/framework:calculator_framework",
|
||||
"//mediapipe/framework/formats:image_frame_opencv",
|
||||
"//mediapipe/framework/port:opencv_imgcodecs",
|
||||
"//mediapipe/framework/port:opencv_imgproc",
|
||||
"//mediapipe/framework/port:status",
|
||||
],
|
||||
|
@ -153,11 +152,11 @@ cc_library(
|
|||
"//mediapipe/framework/formats:image_format_cc_proto",
|
||||
"//mediapipe/framework/formats:image_frame",
|
||||
"//mediapipe/framework/formats:image_frame_opencv",
|
||||
"//mediapipe/framework/port:logging",
|
||||
"//mediapipe/framework/port:opencv_core",
|
||||
"//mediapipe/framework/port:opencv_imgproc",
|
||||
"//mediapipe/framework/port:status",
|
||||
"//mediapipe/framework/port:vector",
|
||||
"@com_google_absl//absl/log:absl_log",
|
||||
] + select({
|
||||
"//mediapipe/gpu:disable_gpu": [],
|
||||
"//conditions:default": [
|
||||
|
@ -204,7 +203,6 @@ cc_library(
|
|||
"//mediapipe/framework/port:opencv_imgproc",
|
||||
"//mediapipe/framework/port:status",
|
||||
"//mediapipe/framework/port:vector",
|
||||
"@com_google_absl//absl/log:absl_check",
|
||||
"@com_google_absl//absl/strings",
|
||||
] + select({
|
||||
"//mediapipe/gpu:disable_gpu": [],
|
||||
|
@ -264,12 +262,9 @@ cc_library(
|
|||
"//mediapipe/framework/port:ret_check",
|
||||
"//mediapipe/framework/port:status",
|
||||
"//mediapipe/gpu:scale_mode_cc_proto",
|
||||
"@com_google_absl//absl/status",
|
||||
"@com_google_absl//absl/strings",
|
||||
] + select({
|
||||
"//mediapipe/gpu:disable_gpu": [],
|
||||
"//conditions:default": [
|
||||
"//mediapipe/gpu:gl_base_hdr",
|
||||
"//mediapipe/gpu:gl_calculator_helper",
|
||||
"//mediapipe/gpu:gl_quad_renderer",
|
||||
"//mediapipe/gpu:gl_simple_shaders",
|
||||
|
@ -279,38 +274,6 @@ cc_library(
|
|||
alwayslink = 1,
|
||||
)
|
||||
|
||||
cc_test(
|
||||
name = "image_transformation_calculator_test",
|
||||
srcs = ["image_transformation_calculator_test.cc"],
|
||||
data = ["//mediapipe/calculators/image/testdata:test_images"],
|
||||
tags = [
|
||||
"desktop_only_test",
|
||||
],
|
||||
deps = [
|
||||
":image_transformation_calculator",
|
||||
"//mediapipe/framework:calculator_cc_proto",
|
||||
"//mediapipe/framework:calculator_framework",
|
||||
"//mediapipe/framework:calculator_runner",
|
||||
"//mediapipe/framework/deps:file_path",
|
||||
"//mediapipe/framework/formats:image_format_cc_proto",
|
||||
"//mediapipe/framework/formats:image_frame",
|
||||
"//mediapipe/framework/formats:image_frame_opencv",
|
||||
"//mediapipe/framework/port:gtest",
|
||||
"//mediapipe/framework/port:opencv_imgcodecs",
|
||||
"//mediapipe/framework/port:opencv_imgproc",
|
||||
"//mediapipe/framework/port:parse_text_proto",
|
||||
"//mediapipe/gpu:gpu_buffer_to_image_frame_calculator",
|
||||
"//mediapipe/gpu:image_frame_to_gpu_buffer_calculator",
|
||||
"//mediapipe/gpu:multi_pool",
|
||||
"//third_party:opencv",
|
||||
"@com_google_absl//absl/container:flat_hash_set",
|
||||
"@com_google_absl//absl/flags:flag",
|
||||
"@com_google_absl//absl/log:absl_check",
|
||||
"@com_google_absl//absl/strings",
|
||||
"@com_google_googletest//:gtest_main",
|
||||
],
|
||||
)
|
||||
|
||||
cc_library(
|
||||
name = "image_cropping_calculator",
|
||||
srcs = ["image_cropping_calculator.cc"],
|
||||
|
@ -338,7 +301,6 @@ cc_library(
|
|||
"//mediapipe/framework/port:opencv_imgproc",
|
||||
"//mediapipe/framework/port:ret_check",
|
||||
"//mediapipe/framework/port:status",
|
||||
"@com_google_absl//absl/log:absl_log",
|
||||
] + select({
|
||||
"//mediapipe/gpu:disable_gpu": [],
|
||||
"//conditions:default": [
|
||||
|
@ -355,7 +317,6 @@ cc_library(
|
|||
cc_test(
|
||||
name = "image_cropping_calculator_test",
|
||||
srcs = ["image_cropping_calculator_test.cc"],
|
||||
tags = ["not_run:arm"],
|
||||
deps = [
|
||||
":image_cropping_calculator",
|
||||
":image_cropping_calculator_cc_proto",
|
||||
|
@ -435,7 +396,6 @@ cc_library(
|
|||
"//mediapipe/framework/port:logging",
|
||||
"//mediapipe/framework/port:ret_check",
|
||||
"//mediapipe/framework/port:status",
|
||||
"@com_google_absl//absl/log:absl_check",
|
||||
"@com_google_absl//absl/strings",
|
||||
],
|
||||
)
|
||||
|
@ -460,8 +420,6 @@ cc_library(
|
|||
"//mediapipe/framework/port:ret_check",
|
||||
"//mediapipe/framework/port:status",
|
||||
"//mediapipe/util:image_frame_util",
|
||||
"@com_google_absl//absl/log:absl_check",
|
||||
"@com_google_absl//absl/log:absl_log",
|
||||
"@com_google_absl//absl/strings",
|
||||
"@libyuv",
|
||||
],
|
||||
|
@ -667,9 +625,9 @@ cc_library(
|
|||
"//mediapipe/framework/formats:image",
|
||||
"//mediapipe/framework/formats:image_format_cc_proto",
|
||||
"//mediapipe/framework/formats:image_frame",
|
||||
"//mediapipe/framework/port:logging",
|
||||
"//mediapipe/framework/port:status",
|
||||
"//mediapipe/framework/port:vector",
|
||||
"@com_google_absl//absl/log:absl_log",
|
||||
] + select({
|
||||
"//mediapipe/gpu:disable_gpu": [],
|
||||
"//conditions:default": [
|
||||
|
@ -692,7 +650,6 @@ cc_library(
|
|||
cc_test(
|
||||
name = "segmentation_smoothing_calculator_test",
|
||||
srcs = ["segmentation_smoothing_calculator_test.cc"],
|
||||
tags = ["not_run:arm"],
|
||||
deps = [
|
||||
":image_clone_calculator",
|
||||
":image_clone_calculator_cc_proto",
|
||||
|
@ -707,7 +664,6 @@ cc_test(
|
|||
"//mediapipe/framework/port:opencv_imgcodecs",
|
||||
"//mediapipe/framework/port:opencv_imgproc",
|
||||
"//mediapipe/framework/port:parse_text_proto",
|
||||
"@com_google_absl//absl/log:absl_log",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -730,7 +686,6 @@ cc_library(
|
|||
"//mediapipe/gpu:gpu_buffer",
|
||||
"//mediapipe/gpu:gpu_origin_cc_proto",
|
||||
"//mediapipe/gpu:shader_util",
|
||||
"@com_google_absl//absl/log:absl_log",
|
||||
"@com_google_absl//absl/memory",
|
||||
"@com_google_absl//absl/status",
|
||||
"@com_google_absl//absl/status:statusor",
|
||||
|
@ -788,7 +743,6 @@ cc_library(
|
|||
":affine_transformation_runner_gl",
|
||||
"//mediapipe/gpu:gl_calculator_helper",
|
||||
"//mediapipe/gpu:gpu_buffer",
|
||||
"//mediapipe/gpu:gpu_service",
|
||||
],
|
||||
}) + select({
|
||||
"//mediapipe/framework/port:disable_opencv": [],
|
||||
|
@ -817,10 +771,7 @@ cc_test(
|
|||
"//mediapipe/calculators/tensor:testdata/image_to_tensor/medium_sub_rect_with_rotation_border_zero_interp_cubic.png",
|
||||
"//mediapipe/calculators/tensor:testdata/image_to_tensor/noop_except_range.png",
|
||||
],
|
||||
tags = [
|
||||
"desktop_only_test",
|
||||
"not_run:arm",
|
||||
],
|
||||
tags = ["desktop_only_test"],
|
||||
deps = [
|
||||
":affine_transformation",
|
||||
":image_transformation_calculator",
|
||||
|
|
|
@ -20,7 +20,6 @@
|
|||
#include "Eigen/Core"
|
||||
#include "Eigen/Geometry"
|
||||
#include "Eigen/LU"
|
||||
#include "absl/log/absl_log.h"
|
||||
#include "absl/memory/memory.h"
|
||||
#include "absl/status/status.h"
|
||||
#include "absl/status/statusor.h"
|
||||
|
@ -54,10 +53,6 @@ bool IsMatrixVerticalFlipNeeded(GpuOrigin::Mode gpu_origin) {
|
|||
#endif // __APPLE__
|
||||
case GpuOrigin::TOP_LEFT:
|
||||
return false;
|
||||
default:
|
||||
ABSL_LOG(ERROR) << "Incorrect GpuOrigin: "
|
||||
<< static_cast<int>(gpu_origin);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -223,7 +218,7 @@ class GlTextureWarpAffineRunner
|
|||
absl::StrCat(mediapipe::kMediaPipeFragmentShaderPreamble,
|
||||
interpolation_def, kFragShader);
|
||||
|
||||
MP_ASSIGN_OR_RETURN(program_, create_fn(vert_src, frag_src));
|
||||
ASSIGN_OR_RETURN(program_, create_fn(vert_src, frag_src));
|
||||
|
||||
auto create_custom_zero_fn = [&]() -> absl::StatusOr<Program> {
|
||||
std::string custom_zero_border_mode_def = R"(
|
||||
|
@ -236,10 +231,10 @@ class GlTextureWarpAffineRunner
|
|||
};
|
||||
#if GL_CLAMP_TO_BORDER_MAY_BE_SUPPORTED
|
||||
if (!IsGlClampToBorderSupported(gl_helper_->GetGlContext())) {
|
||||
MP_ASSIGN_OR_RETURN(program_custom_zero_, create_custom_zero_fn());
|
||||
ASSIGN_OR_RETURN(program_custom_zero_, create_custom_zero_fn());
|
||||
}
|
||||
#else
|
||||
MP_ASSIGN_OR_RETURN(program_custom_zero_, create_custom_zero_fn());
|
||||
ASSIGN_OR_RETURN(program_custom_zero_, create_custom_zero_fn());
|
||||
#endif // GL_CLAMP_TO_BORDER_MAY_BE_SUPPORTED
|
||||
|
||||
glGenFramebuffers(1, &framebuffer_);
|
||||
|
@ -389,8 +384,6 @@ class GlTextureWarpAffineRunner
|
|||
glActiveTexture(GL_TEXTURE0);
|
||||
glBindTexture(GL_TEXTURE_2D, 0);
|
||||
|
||||
glFlush();
|
||||
|
||||
return absl::OkStatus();
|
||||
}
|
||||
|
||||
|
|
|
@ -59,7 +59,7 @@ class OpenCvRunner
|
|||
const ImageFrame& input, const std::array<float, 16>& matrix,
|
||||
const AffineTransformation::Size& size,
|
||||
AffineTransformation::BorderMode border_mode) override {
|
||||
// OpenCV warpAffine works in absolute coordinates, so the transform (which
|
||||
// OpenCV warpAffine works in absolute coordinates, so the transfom (which
|
||||
// accepts and produces relative coordinates) should be adjusted to first
|
||||
// normalize coordinates and then scale them.
|
||||
// clang-format off
|
||||
|
|
|
@ -15,7 +15,6 @@
|
|||
#include <memory>
|
||||
#include <string>
|
||||
|
||||
#include "absl/log/absl_check.h"
|
||||
#include "absl/strings/str_replace.h"
|
||||
#include "mediapipe/calculators/image/bilateral_filter_calculator.pb.h"
|
||||
#include "mediapipe/framework/calculator_framework.h"
|
||||
|
@ -113,7 +112,7 @@ class BilateralFilterCalculator : public CalculatorBase {
|
|||
REGISTER_CALCULATOR(BilateralFilterCalculator);
|
||||
|
||||
absl::Status BilateralFilterCalculator::GetContract(CalculatorContract* cc) {
|
||||
RET_CHECK_GE(cc->Inputs().NumEntries(), 1);
|
||||
CHECK_GE(cc->Inputs().NumEntries(), 1);
|
||||
|
||||
if (cc->Inputs().HasTag(kInputFrameTag) &&
|
||||
cc->Inputs().HasTag(kInputFrameTagGpu)) {
|
||||
|
@ -184,8 +183,8 @@ absl::Status BilateralFilterCalculator::Open(CalculatorContext* cc) {
|
|||
|
||||
sigma_color_ = options_.sigma_color();
|
||||
sigma_space_ = options_.sigma_space();
|
||||
ABSL_CHECK_GE(sigma_color_, 0.0);
|
||||
ABSL_CHECK_GE(sigma_space_, 0.0);
|
||||
CHECK_GE(sigma_color_, 0.0);
|
||||
CHECK_GE(sigma_space_, 0.0);
|
||||
if (!use_gpu_) sigma_color_ *= 255.0;
|
||||
|
||||
if (use_gpu_) {
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "absl/log/absl_check.h"
|
||||
#include "mediapipe/framework/calculator_framework.h"
|
||||
#include "mediapipe/framework/formats/image_frame.h"
|
||||
#include "mediapipe/framework/formats/image_frame_opencv.h"
|
||||
|
@ -26,8 +25,8 @@
|
|||
namespace mediapipe {
|
||||
namespace {
|
||||
void SetColorChannel(int channel, uint8 value, cv::Mat* mat) {
|
||||
ABSL_CHECK(mat->depth() == CV_8U);
|
||||
ABSL_CHECK(channel < mat->channels());
|
||||
CHECK(mat->depth() == CV_8U);
|
||||
CHECK(channel < mat->channels());
|
||||
const int step = mat->channels();
|
||||
for (int r = 0; r < mat->rows; ++r) {
|
||||
uint8* row_ptr = mat->ptr<uint8>(r);
|
||||
|
|
|
@ -64,8 +64,7 @@ class ImageCloneCalculator : public Node {
|
|||
"GPU processing is disabled in build flags");
|
||||
}
|
||||
#else
|
||||
MP_RETURN_IF_ERROR(mediapipe::GlCalculatorHelper::UpdateContract(
|
||||
cc, /*request_gpu_as_optional=*/true));
|
||||
MP_RETURN_IF_ERROR(mediapipe::GlCalculatorHelper::UpdateContract(cc));
|
||||
#endif // MEDIAPIPE_DISABLE_GPU
|
||||
return absl::OkStatus();
|
||||
}
|
||||
|
@ -73,6 +72,9 @@ class ImageCloneCalculator : public Node {
|
|||
absl::Status Open(CalculatorContext* cc) override {
|
||||
const auto& options = cc->Options<mediapipe::ImageCloneCalculatorOptions>();
|
||||
output_on_gpu_ = options.output_on_gpu();
|
||||
#if !MEDIAPIPE_DISABLE_GPU
|
||||
MP_RETURN_IF_ERROR(gpu_helper_.Open(cc));
|
||||
#endif // !MEDIAPIPE_DISABLE_GPU
|
||||
return absl::OkStatus();
|
||||
}
|
||||
|
||||
|
@ -102,10 +104,6 @@ class ImageCloneCalculator : public Node {
|
|||
|
||||
if (output_on_gpu_ && !input_on_gpu) {
|
||||
#if !MEDIAPIPE_DISABLE_GPU
|
||||
if (!gpu_initialized_) {
|
||||
MP_RETURN_IF_ERROR(gpu_helper_.Open(cc));
|
||||
gpu_initialized_ = true;
|
||||
}
|
||||
gpu_helper_.RunInGlContext([&output]() { output->ConvertToGpu(); });
|
||||
#endif // !MEDIAPIPE_DISABLE_GPU
|
||||
} else if (!output_on_gpu_ && input_on_gpu) {
|
||||
|
@ -120,7 +118,6 @@ class ImageCloneCalculator : public Node {
|
|||
bool output_on_gpu_;
|
||||
#if !MEDIAPIPE_DISABLE_GPU
|
||||
mediapipe::GlCalculatorHelper gpu_helper_;
|
||||
bool gpu_initialized_ = false;
|
||||
#endif // !MEDIAPIPE_DISABLE_GPU
|
||||
};
|
||||
MEDIAPIPE_REGISTER_NODE(ImageCloneCalculator);
|
||||
|
|
|
@ -16,7 +16,6 @@
|
|||
|
||||
#include <cmath>
|
||||
|
||||
#include "absl/log/absl_log.h"
|
||||
#include "mediapipe/framework/formats/image_frame.h"
|
||||
#include "mediapipe/framework/formats/image_frame_opencv.h"
|
||||
#include "mediapipe/framework/formats/rect.pb.h"
|
||||
|
@ -203,9 +202,8 @@ absl::Status ImageCroppingCalculator::ValidateBorderModeForGPU(
|
|||
|
||||
switch (options.border_mode()) {
|
||||
case mediapipe::ImageCroppingCalculatorOptions::BORDER_ZERO:
|
||||
ABSL_LOG(WARNING)
|
||||
<< "BORDER_ZERO mode is not supported by GPU "
|
||||
<< "implementation and will fall back into BORDER_REPLICATE";
|
||||
LOG(WARNING) << "BORDER_ZERO mode is not supported by GPU "
|
||||
<< "implementation and will fall back into BORDER_REPLICATE";
|
||||
break;
|
||||
case mediapipe::ImageCroppingCalculatorOptions::BORDER_REPLICATE:
|
||||
break;
|
||||
|
|
|
@ -24,7 +24,7 @@ message ImageCroppingCalculatorOptions {
|
|||
}
|
||||
|
||||
// Output texture buffer dimensions. The values defined in the options will be
|
||||
// overridden by the WIDTH and HEIGHT input streams if they exist.
|
||||
// overriden by the WIDTH and HEIGHT input streams if they exist.
|
||||
optional int32 width = 1;
|
||||
optional int32 height = 2;
|
||||
|
||||
|
|
|
@ -77,7 +77,7 @@ absl::StatusOr<double> ComputeFocalLengthInPixels(int image_width,
|
|||
return focal_length_pixels;
|
||||
}
|
||||
|
||||
absl::StatusOr<ImageFileProperties> GetImageFileProperties(
|
||||
absl::StatusOr<ImageFileProperties> GetImageFileProperites(
|
||||
const std::string& image_bytes) {
|
||||
easyexif::EXIFInfo result;
|
||||
int code = result.parseFrom(image_bytes);
|
||||
|
@ -92,11 +92,11 @@ absl::StatusOr<ImageFileProperties> GetImageFileProperties(
|
|||
properties.set_focal_length_mm(result.FocalLength);
|
||||
properties.set_focal_length_35mm(result.FocalLengthIn35mm);
|
||||
|
||||
MP_ASSIGN_OR_RETURN(auto focal_length_pixels,
|
||||
ComputeFocalLengthInPixels(properties.image_width(),
|
||||
properties.image_height(),
|
||||
properties.focal_length_35mm(),
|
||||
properties.focal_length_mm()));
|
||||
ASSIGN_OR_RETURN(auto focal_length_pixels,
|
||||
ComputeFocalLengthInPixels(properties.image_width(),
|
||||
properties.image_height(),
|
||||
properties.focal_length_35mm(),
|
||||
properties.focal_length_mm()));
|
||||
properties.set_focal_length_pixels(focal_length_pixels);
|
||||
|
||||
return properties;
|
||||
|
@ -151,7 +151,7 @@ class ImageFilePropertiesCalculator : public CalculatorBase {
|
|||
if (cc->InputSidePackets().NumEntries() == 1) {
|
||||
const std::string& image_bytes =
|
||||
cc->InputSidePackets().Index(0).Get<std::string>();
|
||||
MP_ASSIGN_OR_RETURN(properties_, GetImageFileProperties(image_bytes));
|
||||
ASSIGN_OR_RETURN(properties_, GetImageFileProperites(image_bytes));
|
||||
read_properties_ = true;
|
||||
}
|
||||
|
||||
|
@ -169,7 +169,7 @@ class ImageFilePropertiesCalculator : public CalculatorBase {
|
|||
return absl::OkStatus();
|
||||
}
|
||||
const std::string& image_bytes = cc->Inputs().Index(0).Get<std::string>();
|
||||
MP_ASSIGN_OR_RETURN(properties_, GetImageFileProperties(image_bytes));
|
||||
ASSIGN_OR_RETURN(properties_, GetImageFileProperites(image_bytes));
|
||||
read_properties_ = true;
|
||||
}
|
||||
if (read_properties_) {
|
||||
|
|
|
@ -12,7 +12,6 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include "absl/status/status.h"
|
||||
#include "mediapipe/calculators/image/image_transformation_calculator.pb.h"
|
||||
#include "mediapipe/calculators/image/rotation_mode.pb.h"
|
||||
#include "mediapipe/framework/calculator_framework.h"
|
||||
|
@ -28,7 +27,6 @@
|
|||
#include "mediapipe/gpu/scale_mode.pb.h"
|
||||
|
||||
#if !MEDIAPIPE_DISABLE_GPU
|
||||
#include "mediapipe/gpu/gl_base.h"
|
||||
#include "mediapipe/gpu/gl_calculator_helper.h"
|
||||
#include "mediapipe/gpu/gl_quad_renderer.h"
|
||||
#include "mediapipe/gpu/gl_simple_shaders.h"
|
||||
|
@ -62,42 +60,42 @@ constexpr char kVideoPrestreamTag[] = "VIDEO_PRESTREAM";
|
|||
|
||||
int RotationModeToDegrees(mediapipe::RotationMode_Mode rotation) {
|
||||
switch (rotation) {
|
||||
case mediapipe::RotationMode::UNKNOWN:
|
||||
case mediapipe::RotationMode::ROTATION_0:
|
||||
case mediapipe::RotationMode_Mode_UNKNOWN:
|
||||
case mediapipe::RotationMode_Mode_ROTATION_0:
|
||||
return 0;
|
||||
case mediapipe::RotationMode::ROTATION_90:
|
||||
case mediapipe::RotationMode_Mode_ROTATION_90:
|
||||
return 90;
|
||||
case mediapipe::RotationMode::ROTATION_180:
|
||||
case mediapipe::RotationMode_Mode_ROTATION_180:
|
||||
return 180;
|
||||
case mediapipe::RotationMode::ROTATION_270:
|
||||
case mediapipe::RotationMode_Mode_ROTATION_270:
|
||||
return 270;
|
||||
}
|
||||
}
|
||||
mediapipe::RotationMode_Mode DegreesToRotationMode(int degrees) {
|
||||
switch (degrees) {
|
||||
case 0:
|
||||
return mediapipe::RotationMode::ROTATION_0;
|
||||
return mediapipe::RotationMode_Mode_ROTATION_0;
|
||||
case 90:
|
||||
return mediapipe::RotationMode::ROTATION_90;
|
||||
return mediapipe::RotationMode_Mode_ROTATION_90;
|
||||
case 180:
|
||||
return mediapipe::RotationMode::ROTATION_180;
|
||||
return mediapipe::RotationMode_Mode_ROTATION_180;
|
||||
case 270:
|
||||
return mediapipe::RotationMode::ROTATION_270;
|
||||
return mediapipe::RotationMode_Mode_ROTATION_270;
|
||||
default:
|
||||
return mediapipe::RotationMode::UNKNOWN;
|
||||
return mediapipe::RotationMode_Mode_UNKNOWN;
|
||||
}
|
||||
}
|
||||
mediapipe::ScaleMode_Mode ParseScaleMode(
|
||||
mediapipe::ScaleMode_Mode scale_mode,
|
||||
mediapipe::ScaleMode_Mode default_mode) {
|
||||
switch (scale_mode) {
|
||||
case mediapipe::ScaleMode::DEFAULT:
|
||||
case mediapipe::ScaleMode_Mode_DEFAULT:
|
||||
return default_mode;
|
||||
case mediapipe::ScaleMode::STRETCH:
|
||||
case mediapipe::ScaleMode_Mode_STRETCH:
|
||||
return scale_mode;
|
||||
case mediapipe::ScaleMode::FIT:
|
||||
case mediapipe::ScaleMode_Mode_FIT:
|
||||
return scale_mode;
|
||||
case mediapipe::ScaleMode::FILL_AND_CROP:
|
||||
case mediapipe::ScaleMode_Mode_FILL_AND_CROP:
|
||||
return scale_mode;
|
||||
default:
|
||||
return default_mode;
|
||||
|
@ -210,8 +208,6 @@ class ImageTransformationCalculator : public CalculatorBase {
|
|||
|
||||
bool use_gpu_ = false;
|
||||
cv::Scalar padding_color_;
|
||||
ImageTransformationCalculatorOptions::InterpolationMode interpolation_mode_;
|
||||
|
||||
#if !MEDIAPIPE_DISABLE_GPU
|
||||
GlCalculatorHelper gpu_helper_;
|
||||
std::unique_ptr<QuadRenderer> rgb_renderer_;
|
||||
|
@ -347,11 +343,6 @@ absl::Status ImageTransformationCalculator::Open(CalculatorContext* cc) {
|
|||
options_.padding_color().green(),
|
||||
options_.padding_color().blue());
|
||||
|
||||
interpolation_mode_ = options_.interpolation_mode();
|
||||
if (options_.interpolation_mode() ==
|
||||
ImageTransformationCalculatorOptions::DEFAULT) {
|
||||
interpolation_mode_ = ImageTransformationCalculatorOptions::LINEAR;
|
||||
}
|
||||
if (use_gpu_) {
|
||||
#if !MEDIAPIPE_DISABLE_GPU
|
||||
// Let the helper access the GL context information.
|
||||
|
@ -466,48 +457,26 @@ absl::Status ImageTransformationCalculator::RenderCpu(CalculatorContext* cc) {
|
|||
ComputeOutputDimensions(input_width, input_height, &output_width,
|
||||
&output_height);
|
||||
|
||||
int opencv_interpolation_mode = cv::INTER_LINEAR;
|
||||
if (output_width_ > 0 && output_height_ > 0) {
|
||||
cv::Mat scaled_mat;
|
||||
if (scale_mode_ == mediapipe::ScaleMode::STRETCH) {
|
||||
if (interpolation_mode_ == ImageTransformationCalculatorOptions::LINEAR) {
|
||||
// Use INTER_AREA for downscaling if interpolation mode is set to
|
||||
// LINEAR.
|
||||
if (input_mat.cols > output_width_ && input_mat.rows > output_height_) {
|
||||
opencv_interpolation_mode = cv::INTER_AREA;
|
||||
|
||||
} else {
|
||||
opencv_interpolation_mode = cv::INTER_LINEAR;
|
||||
}
|
||||
} else {
|
||||
opencv_interpolation_mode = cv::INTER_NEAREST;
|
||||
}
|
||||
if (scale_mode_ == mediapipe::ScaleMode_Mode_STRETCH) {
|
||||
int scale_flag =
|
||||
input_mat.cols > output_width_ && input_mat.rows > output_height_
|
||||
? cv::INTER_AREA
|
||||
: cv::INTER_LINEAR;
|
||||
cv::resize(input_mat, scaled_mat, cv::Size(output_width_, output_height_),
|
||||
0, 0, opencv_interpolation_mode);
|
||||
0, 0, scale_flag);
|
||||
} else {
|
||||
const float scale =
|
||||
std::min(static_cast<float>(output_width_) / input_width,
|
||||
static_cast<float>(output_height_) / input_height);
|
||||
const int target_width = std::round(input_width * scale);
|
||||
const int target_height = std::round(input_height * scale);
|
||||
|
||||
if (interpolation_mode_ == ImageTransformationCalculatorOptions::LINEAR) {
|
||||
// Use INTER_AREA for downscaling if interpolation mode is set to
|
||||
// LINEAR.
|
||||
if (scale < 1.0f) {
|
||||
opencv_interpolation_mode = cv::INTER_AREA;
|
||||
} else {
|
||||
opencv_interpolation_mode = cv::INTER_LINEAR;
|
||||
}
|
||||
} else {
|
||||
opencv_interpolation_mode = cv::INTER_NEAREST;
|
||||
}
|
||||
|
||||
if (scale_mode_ == mediapipe::ScaleMode::FIT) {
|
||||
int scale_flag = scale < 1.0f ? cv::INTER_AREA : cv::INTER_LINEAR;
|
||||
if (scale_mode_ == mediapipe::ScaleMode_Mode_FIT) {
|
||||
cv::Mat intermediate_mat;
|
||||
cv::resize(input_mat, intermediate_mat,
|
||||
cv::Size(target_width, target_height), 0, 0,
|
||||
opencv_interpolation_mode);
|
||||
cv::Size(target_width, target_height), 0, 0, scale_flag);
|
||||
const int top = (output_height_ - target_height) / 2;
|
||||
const int bottom = output_height_ - target_height - top;
|
||||
const int left = (output_width_ - target_width) / 2;
|
||||
|
@ -519,7 +488,7 @@ absl::Status ImageTransformationCalculator::RenderCpu(CalculatorContext* cc) {
|
|||
padding_color_);
|
||||
} else {
|
||||
cv::resize(input_mat, scaled_mat, cv::Size(target_width, target_height),
|
||||
0, 0, opencv_interpolation_mode);
|
||||
0, 0, scale_flag);
|
||||
output_width = target_width;
|
||||
output_height = target_height;
|
||||
}
|
||||
|
@ -545,17 +514,17 @@ absl::Status ImageTransformationCalculator::RenderCpu(CalculatorContext* cc) {
|
|||
cv::warpAffine(input_mat, rotated_mat, rotation_mat, rotated_size);
|
||||
} else {
|
||||
switch (rotation_) {
|
||||
case mediapipe::RotationMode::UNKNOWN:
|
||||
case mediapipe::RotationMode::ROTATION_0:
|
||||
case mediapipe::RotationMode_Mode_UNKNOWN:
|
||||
case mediapipe::RotationMode_Mode_ROTATION_0:
|
||||
rotated_mat = input_mat;
|
||||
break;
|
||||
case mediapipe::RotationMode::ROTATION_90:
|
||||
case mediapipe::RotationMode_Mode_ROTATION_90:
|
||||
cv::rotate(input_mat, rotated_mat, cv::ROTATE_90_COUNTERCLOCKWISE);
|
||||
break;
|
||||
case mediapipe::RotationMode::ROTATION_180:
|
||||
case mediapipe::RotationMode_Mode_ROTATION_180:
|
||||
cv::rotate(input_mat, rotated_mat, cv::ROTATE_180);
|
||||
break;
|
||||
case mediapipe::RotationMode::ROTATION_270:
|
||||
case mediapipe::RotationMode_Mode_ROTATION_270:
|
||||
cv::rotate(input_mat, rotated_mat, cv::ROTATE_90_CLOCKWISE);
|
||||
break;
|
||||
}
|
||||
|
@ -592,7 +561,7 @@ absl::Status ImageTransformationCalculator::RenderGpu(CalculatorContext* cc) {
|
|||
ComputeOutputDimensions(input_width, input_height, &output_width,
|
||||
&output_height);
|
||||
|
||||
if (scale_mode_ == mediapipe::ScaleMode::FILL_AND_CROP) {
|
||||
if (scale_mode_ == mediapipe::ScaleMode_Mode_FILL_AND_CROP) {
|
||||
const float scale =
|
||||
std::min(static_cast<float>(output_width_) / input_width,
|
||||
static_cast<float>(output_height_) / input_height);
|
||||
|
@ -656,24 +625,9 @@ absl::Status ImageTransformationCalculator::RenderGpu(CalculatorContext* cc) {
|
|||
input.format());
|
||||
|
||||
gpu_helper_.BindFramebuffer(dst);
|
||||
|
||||
if (scale_mode_ == mediapipe::ScaleMode::FIT) {
|
||||
// In kFit scale mode, the rendered quad does not fill the whole
|
||||
// framebuffer, so clear it beforehand.
|
||||
glClearColor(padding_color_[0] / 255.0f, padding_color_[1] / 255.0f,
|
||||
padding_color_[2] / 255.0f, 1.0f);
|
||||
glClear(GL_COLOR_BUFFER_BIT);
|
||||
}
|
||||
|
||||
glActiveTexture(GL_TEXTURE1);
|
||||
glBindTexture(src1.target(), src1.name());
|
||||
|
||||
if (interpolation_mode_ == ImageTransformationCalculatorOptions::NEAREST) {
|
||||
// TODO: revert texture params.
|
||||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
|
||||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
|
||||
}
|
||||
|
||||
MP_RETURN_IF_ERROR(renderer->GlRender(
|
||||
src1.width(), src1.height(), dst.width(), dst.height(), scale_mode,
|
||||
rotation, flip_horizontally_, flip_vertically_,
|
||||
|
@ -698,8 +652,8 @@ void ImageTransformationCalculator::ComputeOutputDimensions(
|
|||
if (output_width_ > 0 && output_height_ > 0) {
|
||||
*output_width = output_width_;
|
||||
*output_height = output_height_;
|
||||
} else if (rotation_ == mediapipe::RotationMode::ROTATION_90 ||
|
||||
rotation_ == mediapipe::RotationMode::ROTATION_270) {
|
||||
} else if (rotation_ == mediapipe::RotationMode_Mode_ROTATION_90 ||
|
||||
rotation_ == mediapipe::RotationMode_Mode_ROTATION_270) {
|
||||
*output_width = input_height;
|
||||
*output_height = input_width;
|
||||
} else {
|
||||
|
@ -712,9 +666,9 @@ void ImageTransformationCalculator::ComputeOutputLetterboxPadding(
|
|||
int input_width, int input_height, int output_width, int output_height,
|
||||
std::array<float, 4>* padding) {
|
||||
padding->fill(0.f);
|
||||
if (scale_mode_ == mediapipe::ScaleMode::FIT) {
|
||||
if (rotation_ == mediapipe::RotationMode::ROTATION_90 ||
|
||||
rotation_ == mediapipe::RotationMode::ROTATION_270) {
|
||||
if (scale_mode_ == mediapipe::ScaleMode_Mode_FIT) {
|
||||
if (rotation_ == mediapipe::RotationMode_Mode_ROTATION_90 ||
|
||||
rotation_ == mediapipe::RotationMode_Mode_ROTATION_270) {
|
||||
std::swap(input_width, input_height);
|
||||
}
|
||||
const float input_aspect_ratio =
|
||||
|
|
|
@ -46,24 +46,12 @@ message ImageTransformationCalculatorOptions {
|
|||
optional bool flip_horizontally = 5 [default = false];
|
||||
// Scale mode.
|
||||
optional ScaleMode.Mode scale_mode = 6;
|
||||
// Padding type. This option is only used when the scale mode is FIT. If set
|
||||
// to true (default), a constant border is added with color specified by
|
||||
// padding_color. If set to false, a border is added by replicating edge
|
||||
// pixels (only supported for CPU).
|
||||
// Padding type. This option is only used when the scale mode is FIT.
|
||||
// Default is to use BORDER_CONSTANT. If set to false, it will use
|
||||
// BORDER_REPLICATE instead.
|
||||
optional bool constant_padding = 7 [default = true];
|
||||
|
||||
// The color for the padding. This option is only used when the scale mode is
|
||||
// FIT. Default is black.
|
||||
// FIT. Default is black. This is for CPU only.
|
||||
optional Color padding_color = 8;
|
||||
|
||||
// Interpolation method to use. Note that on CPU when LINEAR is specified,
|
||||
// INTER_LINEAR is used for upscaling and INTER_AREA is used for downscaling.
|
||||
enum InterpolationMode {
|
||||
DEFAULT = 0;
|
||||
LINEAR = 1;
|
||||
NEAREST = 2;
|
||||
}
|
||||
|
||||
// Mode DEFAULT will use LINEAR interpolation.
|
||||
optional InterpolationMode interpolation_mode = 9;
|
||||
}
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user