Project import generated by Copybara.

GitOrigin-RevId: 3864d8399e063cfcacc9e3f6e7ba653ea9045c52
This commit is contained in:
MediaPipe Team 2020-06-09 16:59:11 -04:00 committed by chuoling
parent b09cc090d4
commit 59ee17c1f3
19 changed files with 130 additions and 685 deletions

2
BUILD
View File

@ -1,4 +1,4 @@
# Copyright 2019 The MediaPipe Authors.
# Copyright 2019-2020 The MediaPipe Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.

View File

@ -127,7 +127,7 @@ run code search using
* [Discuss](https://groups.google.com/forum/#!forum/mediapipe) - General
community discussion around MediaPipe
## Alpha Disclaimer
## Alpha disclaimer
MediaPipe is currently in alpha at v0.7. We may be still making breaking API
changes and expect to get to stable APIs by v1.0.

View File

@ -19,7 +19,7 @@ project = 'MediaPipe'
author = 'Google LLC'
# The full version, including alpha/beta/rc tags
release = 'v0.5'
release = 'v0.7.5'
# -- General configuration ---------------------------------------------------

View File

@ -1,96 +0,0 @@
---
nav_exclude: true
---
# Examples
Below are code samples on how to run MediaPipe on both mobile and desktop. We
currently support MediaPipe APIs on mobile for Android only but will add support
for Objective-C shortly.
## Mobile
### [Hello World! on Android](./getting_started/hello_world_android.md)
This should be the first mobile Android example users go through in detail. It
teaches the following:
* Introduction of a simple MediaPipe graph running on mobile GPUs for
[Sobel edge detection](https://en.wikipedia.org/wiki/Sobel_operator).
* Building a simple baseline Android application that displays "Hello World!".
* Adding camera preview support into the baseline application using the
Android [CameraX] API.
* Incorporating the Sobel edge detection graph to process the live camera
preview and display the processed video in real-time.
[Sobel edge detection]:https://en.wikipedia.org/wiki/Sobel_operator
[CameraX]:https://developer.android.com/training/camerax
### [Hello World! on iOS](./getting_started/hello_world_ios.md)
This is the iOS version of Sobel edge detection example.
### [Face Detection](./solutions/face_detection.md)
### [Face Mesh](./solutions/face_mesh.md)
### [Hand](./solutions/hand.md)
### [Hair Segmentation](./solutions/hair_segmentation.md)
### [Object Detection](./solutions/object_detection.md)
### [Box Tracking](./solutions/box_tracking.md)
### [Objectron: 3D Object Detection](./solutions/objectron.md)
### [KNIFT: Template-based Feature Matching](./solutions/knift.md)
## Desktop
### [Hello World for C++](./getting_started/hello_world_desktop.md)
This shows how to run a simple graph using the MediaPipe C++ APIs.
### [Face Detection](./solutions/face_detection.md)
### [Face Mesh](./solutions/face_mesh.md)
### [Hand](./solutions/hand.md)
### [Hair Segmentation](./solutions/hair_segmentation.md)
### [Object Detection](./solutions/object_detection.md)
### [Box Tracking](./solutions/box_tracking.md)
### [AutoFlip - Semantic-aware Video Cropping](./solutions/autoflip.md)
### [Preparing Data Sets with MediaSequence](./solutions/media_sequence.md)
This shows how to use MediaPipe for media processing to prepare video data sets
for training a TensorFlow model.
### [Feature Extraction and Model Inference for YouTube-8M Challenge](./solutions/youtube_8m.md)
This shows how to use MediaPipe to prepare training data for the YouTube-8M
Challenge and do the model inference with the baseline model.
## Google Coral (ML acceleration with Google EdgeTPU)
### [Face Detection](./solutions/face_detection.md)
### [Object Detection](./solutions/object_detection.md)
## Web Browser
See more details [here](./getting_started/web.md) and
[Google Developer blog post](https://mediapipe.page.link/webdevblog).
### [Face Detection in Browser](https://viz.mediapipe.dev/demo/face_detection)
### [Hand Detection in Browser](https://viz.mediapipe.dev/demo/hand_detection)
### [Hand Tracking in Browser](https://viz.mediapipe.dev/demo/hand_tracking)
### [Hair Segmentation in Browser](https://viz.mediapipe.dev/demo/hair_segmentation)

View File

@ -127,7 +127,7 @@ run code search using
* [Discuss](https://groups.google.com/forum/#!forum/mediapipe) - General
community discussion around MediaPipe
## Alpha Disclaimer
## Alpha disclaimer
MediaPipe is currently in alpha at v0.7. We may be still making breaking API
changes and expect to get to stable APIs by v1.0.

View File

@ -1,66 +1,3 @@
MediaPipe
=====================================
`MediaPipe <http://github.com/google/mediapipe>`_ is a graph-based framework for
building multimodal (video, audio, and sensor) applied machine learning pipelines.
MediaPipe is cross-platform running on mobile devices, workstations and servers,
and supports mobile GPU acceleration. With MediaPipe, an applied
machine learning pipeline can be built as a graph of modular components,
including, for instance, inference models and media processing functions. Sensory
data such as audio and video streams enter the graph, and perceived descriptions
such as object-localization and face-landmark streams exit the graph. An example
graph that performs real-time hand tracking on mobile GPU is shown below.
.. image:: images/mobile/hand_tracking_mobile.png
:width: 400
:alt: Example MediaPipe graph
MediaPipe is designed for machine learning (ML) practitioners, including
researchers, students, and software developers, who implement production-ready
ML applications, publish code accompanying research work, and build technology
prototypes. The main use case for MediaPipe is rapid prototyping of applied
machine learning pipelines with inference models and other reusable components.
MediaPipe also facilitates the deployment of machine learning technology into
demos and applications on a wide variety of different hardware platforms
(e.g., Android, iOS, workstations).
APIs for MediaPipe
* Calculator API in C++
* Graph Construction API in ProtoBuf
* (Coming Soon) Graph Construction API in C++
* Graph Execution API in C++
* Graph Execution API in Java (Android)
* Graph Execution API in Objective-C (iOS)
Alpha Disclaimer
==================
MediaPipe is currently in alpha for v0.6. We are still making breaking API changes and expect to get to stable API by v1.0. We recommend that you target a specific version of MediaPipe, and periodically bump to the latest release. That way you have control over when a breaking change affects you.
User Documentation
==================
.. toctree::
:maxdepth: 3
getting_started/install
Examples <examples>
tools/visualizer
tools/tracing_and_profiling
tools/performance_benchmarking
getting_started/help
getting_started/faq
getting_started/troubleshooting
framework_concepts/framework_concepts
framework_concepts/calculators
framework_concepts/graphs
framework_concepts/packets
framework_concepts/synchronization
framework_concepts/gpu
license
Indices and tables
==================
* :ref:`genindex`
* :ref:`modindex`
* :ref:`search`
Please see https://docs.mediapipe.dev.

View File

@ -1,209 +0,0 @@
---
nav_exclude: true
---
License
===============
Copyright 2019 The MediaPipe Authors. All rights reserved.
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2017, The MediaPipe Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.\n

View File

@ -144,6 +144,10 @@ public class MainActivity extends AppCompatActivity {
previewDisplayView.setVisibility(View.VISIBLE);
}
protected Size cameraTargetResolution() {
return null; // No preference and let the camera (helper) decide.
}
public void startCamera() {
cameraHelper = new CameraXPreviewHelper();
cameraHelper.setOnCameraStartedListener(
@ -154,7 +158,30 @@ public class MainActivity extends AppCompatActivity {
applicationInfo.metaData.getBoolean("cameraFacingFront", false)
? CameraHelper.CameraFacing.FRONT
: CameraHelper.CameraFacing.BACK;
cameraHelper.startCamera(this, cameraFacing, /*surfaceTexture=*/ null);
cameraHelper.startCamera(
this, cameraFacing, /*surfaceTexture=*/ null, cameraTargetResolution());
}
protected Size computeViewSize(int width, int height) {
return new Size(width, height);
}
protected void onPreviewDisplaySurfaceChanged(
SurfaceHolder holder, int format, int width, int height) {
// (Re-)Compute the ideal size of the camera-preview display (the area that the
// camera-preview frames get rendered onto, potentially with scaling and rotation)
// based on the size of the SurfaceView that contains the display.
Size viewSize = computeViewSize(width, height);
Size displaySize = cameraHelper.computeDisplaySizeFromViewSize(viewSize);
boolean isCameraRotated = cameraHelper.isCameraRotated();
// Connect the converter to the camera-preview frames as its input (via
// previewFrameTexture), and configure the output width and height as the computed
// display size.
converter.setSurfaceTextureAndAttachToGLContext(
previewFrameTexture,
isCameraRotated ? displaySize.getHeight() : displaySize.getWidth(),
isCameraRotated ? displaySize.getWidth() : displaySize.getHeight());
}
private void setupPreviewDisplayView() {
@ -173,20 +200,7 @@ public class MainActivity extends AppCompatActivity {
@Override
public void surfaceChanged(SurfaceHolder holder, int format, int width, int height) {
// (Re-)Compute the ideal size of the camera-preview display (the area that the
// camera-preview frames get rendered onto, potentially with scaling and rotation)
// based on the size of the SurfaceView that contains the display.
Size viewSize = new Size(width, height);
Size displaySize = cameraHelper.computeDisplaySizeFromViewSize(viewSize);
boolean isCameraRotated = cameraHelper.isCameraRotated();
// Connect the converter to the camera-preview frames as its input (via
// previewFrameTexture), and configure the output width and height as the computed
// display size.
converter.setSurfaceTextureAndAttachToGLContext(
previewFrameTexture,
isCameraRotated ? displaySize.getHeight() : displaySize.getWidth(),
isCameraRotated ? displaySize.getWidth() : displaySize.getHeight());
onPreviewDisplaySurfaceChanged(holder, format, width, height);
}
@Override

View File

@ -1,33 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<manifest xmlns:android="http://schemas.android.com/apk/res/android"
package="com.google.mediapipe.apps.objectdetection3d">
<uses-sdk
android:minSdkVersion="21"
android:targetSdkVersion="27" />
<!-- For using the camera -->
<uses-permission android:name="android.permission.CAMERA" />
<uses-feature android:name="android.hardware.camera" />
<uses-feature android:name="android.hardware.camera.autofocus" />
<!-- For MediaPipe -->
<uses-feature android:glEsVersion="0x00020000" android:required="true" />
<application
android:allowBackup="true"
android:label="@string/app_name"
android:supportsRtl="true"
android:theme="@style/AppTheme">
<activity
android:name=".MainActivity"
android:exported="true"
android:screenOrientation="portrait">
<intent-filter>
<action android:name="android.intent.action.MAIN" />
<category android:name="android.intent.category.LAUNCHER" />
</intent-filter>
</activity>
</application>
</manifest>

View File

@ -41,16 +41,13 @@ config_setting(
},
)
# Maps the binary graph to an alias (e.g., the app name) for convenience so that the alias can be
# easily incorporated into the app via, for example,
# MainActivity.BINARY_GRAPH_NAME = "appname.binarypb".
genrule(
name = "binary_graph",
srcs = select({
"//conditions:default": ["//mediapipe/graphs/object_detection_3d:mobile_gpu_binary_graph_shoe"],
":use_chair_model": ["//mediapipe/graphs/object_detection_3d:mobile_gpu_binary_graph_chair"],
}),
outs = ["objectdetection3d.binarypb"],
outs = ["object_detection_3d.binarypb"],
cmd = "cp $< $@",
)
@ -64,8 +61,8 @@ genrule(
cmd = "cp $< $@",
)
android_library(
name = "mediapipe_lib",
android_binary(
name = "objectdetection3d",
srcs = glob(["*.java"]),
assets = [
":binary_graph",
@ -83,33 +80,22 @@ android_library(
],
}),
assets_dir = "",
manifest = "AndroidManifest.xml",
resource_files = glob(["res/**"]),
deps = [
":mediapipe_jni_lib",
"//mediapipe/framework/formats:landmark_java_proto_lite",
"//mediapipe/java/com/google/mediapipe/components:android_camerax_helper",
"//mediapipe/java/com/google/mediapipe/components:android_components",
"//mediapipe/java/com/google/mediapipe/framework:android_framework",
"//mediapipe/java/com/google/mediapipe/glutil",
"//third_party:androidx_appcompat",
"//third_party:androidx_constraint_layout",
"//third_party:androidx_legacy_support_v4",
"//third_party:androidx_recyclerview",
"//third_party:opencv",
"@maven//:androidx_concurrent_concurrent_futures",
"@maven//:androidx_lifecycle_lifecycle_common",
"@maven//:com_google_code_findbugs_jsr305",
"@maven//:com_google_guava_guava",
],
)
android_binary(
name = "objectdetection3d",
manifest = "AndroidManifest.xml",
manifest_values = {"applicationId": "com.google.mediapipe.apps.objectdetection3d"},
manifest = "//mediapipe/examples/android/src/java/com/google/mediapipe/apps/basic:AndroidManifest.xml",
manifest_values = {
"applicationId": "com.google.mediapipe.apps.objectdetection3d",
"appName": "Object Detection 3D",
"mainActivity": ".MainActivity",
"cameraFacingFront": "False",
"binaryGraphName": "object_detection_3d.binarypb",
"inputVideoStreamName": "input_video",
"outputVideoStreamName": "output_video",
"flipFramesVertically": "True",
},
multidex = "native",
deps = [
":mediapipe_lib",
":mediapipe_jni_lib",
"//mediapipe/examples/android/src/java/com/google/mediapipe/apps/basic:basic_lib",
"//mediapipe/framework/formats:landmark_java_proto_lite",
"//mediapipe/java/com/google/mediapipe/framework:android_framework",
],
)

View File

@ -16,102 +16,33 @@ package com.google.mediapipe.apps.objectdetection3d;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import android.graphics.SurfaceTexture;
import android.os.Bundle;
import androidx.appcompat.app.AppCompatActivity;
import android.util.Log;
import android.util.Size;
import android.view.SurfaceHolder;
import android.view.SurfaceView;
import android.view.View;
import android.view.ViewGroup;
import com.google.mediapipe.components.CameraHelper;
import com.google.mediapipe.components.CameraXPreviewHelper;
import com.google.mediapipe.components.ExternalTextureConverter;
import com.google.mediapipe.components.FrameProcessor;
import com.google.mediapipe.components.PermissionHelper;
import com.google.mediapipe.framework.AndroidAssetUtil;
import com.google.mediapipe.framework.AndroidPacketCreator;
import com.google.mediapipe.framework.Packet;
import com.google.mediapipe.glutil.EglManager;
import java.io.InputStream;
import java.util.HashMap;
import java.util.Map;
/** Main activity of MediaPipe example apps. */
public class MainActivity extends AppCompatActivity {
/** Main activity of MediaPipe object detection 3D app. */
public class MainActivity extends com.google.mediapipe.apps.basic.MainActivity {
private static final String TAG = "MainActivity";
private static final String BINARY_GRAPH_NAME = "objectdetection3d.binarypb";
private static final String INPUT_VIDEO_STREAM_NAME = "input_video";
private static final String OUTPUT_VIDEO_STREAM_NAME = "output_video";
private static final String OBJ_TEXTURE = "texture.bmp";
private static final String OBJ_FILE = "model.obj.uuu";
private static final String BOX_TEXTURE = "classic_colors.png";
private static final String BOX_FILE = "box.obj.uuu";
private static final CameraHelper.CameraFacing CAMERA_FACING = CameraHelper.CameraFacing.BACK;
// Flips the camera-preview frames vertically before sending them into FrameProcessor to be
// processed in a MediaPipe graph, and flips the processed frames back when they are displayed.
// This is needed because OpenGL represents images assuming the image origin is at the bottom-left
// corner, whereas MediaPipe in general assumes the image origin is at top-left.
private static final boolean FLIP_FRAMES_VERTICALLY = true;
// Target resolution should be 4:3 for this application, as expected by the model and tracker.
private static final Size TARGET_RESOLUTION = new Size(1280, 960);
static {
// Load all native libraries needed by the app.
System.loadLibrary("mediapipe_jni");
System.loadLibrary("opencv_java3");
}
// {@link SurfaceTexture} where the camera-preview frames can be accessed.
private SurfaceTexture previewFrameTexture;
// {@link SurfaceView} that displays the camera-preview frames processed by a MediaPipe graph.
private SurfaceView previewDisplayView;
// Creates and manages an {@link EGLContext}.
private EglManager eglManager;
// Sends camera-preview frames into a MediaPipe graph for processing, and displays the processed
// frames onto a {@link Surface}.
private FrameProcessor processor;
// Converts the GL_TEXTURE_EXTERNAL_OES texture from Android camera into a regular texture to be
// consumed by {@link FrameProcessor} and the underlying MediaPipe graph.
private ExternalTextureConverter converter;
// Handles camera access via the {@link CameraX} Jetpack support library.
private CameraXPreviewHelper cameraHelper;
// Assets.
private Bitmap objTexture = null;
private Bitmap boxTexture = null;
Size cameraImageSize;
@Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
previewDisplayView = new SurfaceView(this);
setupPreviewDisplayView();
// Initialize asset manager so that MediaPipe native libraries can access the app assets, e.g.,
// binary graphs.
AndroidAssetUtil.initializeNativeAssetManager(this);
eglManager = new EglManager(null);
processor =
new FrameProcessor(
this,
eglManager.getNativeContext(),
BINARY_GRAPH_NAME,
INPUT_VIDEO_STREAM_NAME,
OUTPUT_VIDEO_STREAM_NAME);
processor.getVideoSurfaceOutput().setFlipY(FLIP_FRAMES_VERTICALLY);
prepareDemoAssets();
AndroidPacketCreator packetCreator = processor.getPacketCreator();
@ -121,134 +52,60 @@ public class MainActivity extends AppCompatActivity {
inputSidePackets.put("obj_texture", packetCreator.createRgbaImageFrame(objTexture));
inputSidePackets.put("box_texture", packetCreator.createRgbaImageFrame(boxTexture));
processor.setInputSidePackets(inputSidePackets);
PermissionHelper.checkAndRequestCameraPermissions(this);
}
@Override
protected void onResume() {
super.onResume();
converter = new ExternalTextureConverter(eglManager.getContext());
converter.setFlipY(FLIP_FRAMES_VERTICALLY);
converter.setConsumer(processor);
if (PermissionHelper.cameraPermissionsGranted(this)) {
startCamera();
}
protected Size cameraTargetResolution() {
return new Size(1280, 960); // Prefer 4:3 aspect ratio (camera size is in landscape).
}
@Override
protected void onPause() {
super.onPause();
converter.close();
protected Size computeViewSize(int width, int height) {
return new Size(height, height * 3 / 4); // Prefer 3:4 aspect ratio.
}
@Override
public void onRequestPermissionsResult(
int requestCode, String[] permissions, int[] grantResults) {
super.onRequestPermissionsResult(requestCode, permissions, grantResults);
PermissionHelper.onRequestPermissionsResult(requestCode, permissions, grantResults);
}
protected void onPreviewDisplaySurfaceChanged(
SurfaceHolder holder, int format, int width, int height) {
super.onPreviewDisplaySurfaceChanged(holder, format, width, height);
private void setupPreviewDisplayView() {
previewDisplayView.setVisibility(View.GONE);
ViewGroup viewGroup = findViewById(R.id.preview_display_layout);
viewGroup.addView(previewDisplayView);
boolean isCameraRotated = cameraHelper.isCameraRotated();
Size cameraImageSize = cameraHelper.getFrameSize();
processor.setOnWillAddFrameListener(
(timestamp) -> {
try {
int cameraTextureWidth =
isCameraRotated ? cameraImageSize.getHeight() : cameraImageSize.getWidth();
int cameraTextureHeight =
isCameraRotated ? cameraImageSize.getWidth() : cameraImageSize.getHeight();
previewDisplayView
.getHolder()
.addCallback(
new SurfaceHolder.Callback() {
@Override
public void surfaceCreated(SurfaceHolder holder) {
processor.getVideoSurfaceOutput().setSurface(holder.getSurface());
}
// Find limiting side and scale to 3:4 aspect ratio
float aspectRatio = (float) cameraTextureWidth / (float) cameraTextureHeight;
if (aspectRatio > 3.0 / 4.0) {
// width too big
cameraTextureWidth = (int) ((float) cameraTextureHeight * 3.0 / 4.0);
} else {
// height too big
cameraTextureHeight = (int) ((float) cameraTextureWidth * 4.0 / 3.0);
}
Packet widthPacket = processor.getPacketCreator().createInt32(cameraTextureWidth);
Packet heightPacket = processor.getPacketCreator().createInt32(cameraTextureHeight);
@Override
public void surfaceChanged(SurfaceHolder holder, int format, int width, int height) {
// (Re-)Compute the ideal size of the camera-preview display (the area that the
// camera-preview frames get rendered onto, potentially with scaling and rotation)
// based on the size of the SurfaceView that contains the display.
Size viewSize = new Size(height, height * 3 / 4); // Prefer 3:4 aspect ratio.
Size displaySize = cameraHelper.computeDisplaySizeFromViewSize(viewSize);
boolean isCameraRotated = cameraHelper.isCameraRotated();
cameraImageSize = cameraHelper.getFrameSize();
// Connect the converter to the camera-preview frames as its input (via
// previewFrameTexture), and configure the output width and height as the computed
// display size.
converter.setSurfaceTextureAndAttachToGLContext(
previewFrameTexture,
isCameraRotated ? displaySize.getHeight() : displaySize.getWidth(),
isCameraRotated ? displaySize.getWidth() : displaySize.getHeight());
processor.setOnWillAddFrameListener(
(timestamp) -> {
try {
int cameraTextureWidth =
isCameraRotated
? cameraImageSize.getHeight()
: cameraImageSize.getWidth();
int cameraTextureHeight =
isCameraRotated
? cameraImageSize.getWidth()
: cameraImageSize.getHeight();
// Find limiting side and scale to 3:4 aspect ratio
float aspectRatio =
(float) cameraTextureWidth / (float) cameraTextureHeight;
if (aspectRatio > 3.0 / 4.0) {
// width too big
cameraTextureWidth = (int) ((float) cameraTextureHeight * 3.0 / 4.0);
} else {
// height too big
cameraTextureHeight = (int) ((float) cameraTextureWidth * 4.0 / 3.0);
}
Packet widthPacket =
processor.getPacketCreator().createInt32(cameraTextureWidth);
Packet heightPacket =
processor.getPacketCreator().createInt32(cameraTextureHeight);
try {
processor
.getGraph()
.addPacketToInputStream("input_width", widthPacket, timestamp);
processor
.getGraph()
.addPacketToInputStream("input_height", heightPacket, timestamp);
} catch (Exception e) {
Log.e(
TAG,
"MediaPipeException encountered adding packets to width and height"
+ " input streams.");
}
widthPacket.release();
heightPacket.release();
} catch (IllegalStateException ise) {
Log.e(
TAG,
"Exception while adding packets to width and height input streams.");
}
});
}
@Override
public void surfaceDestroyed(SurfaceHolder holder) {
processor.getVideoSurfaceOutput().setSurface(null);
}
});
}
private void startCamera() {
cameraHelper = new CameraXPreviewHelper();
cameraHelper.setOnCameraStartedListener(
surfaceTexture -> {
previewFrameTexture = surfaceTexture;
// Make the display view visible to start showing the preview. This triggers the
// SurfaceHolder.Callback added to (the holder of) previewDisplayView.
previewDisplayView.setVisibility(View.VISIBLE);
try {
processor.getGraph().addPacketToInputStream("input_width", widthPacket, timestamp);
processor.getGraph().addPacketToInputStream("input_height", heightPacket, timestamp);
} catch (RuntimeException e) {
Log.e(
TAG,
"MediaPipeException encountered adding packets to width and height"
+ " input streams.");
}
widthPacket.release();
heightPacket.release();
} catch (IllegalStateException ise) {
Log.e(TAG, "Exception while adding packets to width and height input streams.");
}
});
cameraHelper.startCamera(
this, CAMERA_FACING, /*surfaceTexture=*/ null, /*targetSize=*/ TARGET_RESOLUTION);
cameraImageSize = cameraHelper.getFrameSize();
}
private void prepareDemoAssets() {

View File

@ -1,20 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<androidx.constraintlayout.widget.ConstraintLayout xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:app="http://schemas.android.com/apk/res-auto"
xmlns:tools="http://schemas.android.com/tools"
android:layout_width="match_parent"
android:layout_height="match_parent">
<FrameLayout
android:id="@+id/preview_display_layout"
android:layout_width="fill_parent"
android:layout_height="fill_parent"
android:layout_weight="1">
<TextView
android:id="@+id/no_camera_access_view"
android:layout_height="fill_parent"
android:layout_width="fill_parent"
android:gravity="center"
android:text="@string/no_camera_access" />
</FrameLayout>
</androidx.constraintlayout.widget.ConstraintLayout>

View File

@ -1,6 +0,0 @@
<?xml version="1.0" encoding="utf-8"?>
<resources>
<color name="colorPrimary">#008577</color>
<color name="colorPrimaryDark">#00574B</color>
<color name="colorAccent">#D81B60</color>
</resources>

View File

@ -1,4 +0,0 @@
<resources>
<string name="app_name" translatable="false">Object Detection 3D</string>
<string name="no_camera_access" translatable="false">Please grant camera permissions.</string>
</resources>

View File

@ -1,11 +0,0 @@
<resources>
<!-- Base application theme. -->
<style name="AppTheme" parent="Theme.AppCompat.Light.DarkActionBar">
<!-- Customize your theme here. -->
<item name="colorPrimary">@color/colorPrimary</item>
<item name="colorPrimaryDark">@color/colorPrimaryDark</item>
<item name="colorAccent">@color/colorAccent</item>
</style>
</resources>

View File

@ -84,6 +84,8 @@ FrameRotation FrameRotationFromDegrees(int degrees_ccw) {
scale_unif_ = glGetUniformLocation(program_, "scale");
RET_CHECK(scale_unif_ != -1) << "could not find uniform 'scale'";
glGenBuffers(2, vbo_);
return ::mediapipe::OkStatus();
}
@ -92,6 +94,11 @@ void QuadRenderer::GlTeardown() {
glDeleteProgram(program_);
program_ = 0;
}
if (vbo_[0]) {
glDeleteBuffers(2, vbo_);
vbo_[0] = 0;
vbo_[1] = 0;
}
}
::mediapipe::Status QuadRenderer::GlRender(
@ -155,13 +162,31 @@ void QuadRenderer::GlTeardown() {
}
// Draw.
glVertexAttribPointer(ATTRIB_VERTEX, 2, GL_FLOAT, 0, 0, vertices);
// TODO: In practice, our vertex attributes almost never change, so
// convert this to being actually static, with initialization done in the
// GLSetup.
glEnableVertexAttribArray(ATTRIB_VERTEX);
glVertexAttribPointer(
ATTRIB_TEXTURE_POSITION, 2, GL_FLOAT, 0, 0,
flip_texture ? kBasicTextureVerticesFlipY : kBasicTextureVertices);
glBindBuffer(GL_ARRAY_BUFFER, vbo_[0]);
glBufferData(GL_ARRAY_BUFFER, sizeof(mediapipe::kBasicSquareVertices),
mediapipe::kBasicSquareVertices, GL_STATIC_DRAW);
glVertexAttribPointer(ATTRIB_VERTEX, 2, GL_FLOAT, 0, 0, nullptr);
glEnableVertexAttribArray(ATTRIB_TEXTURE_POSITION);
glBindBuffer(GL_ARRAY_BUFFER, vbo_[1]);
glBufferData(
GL_ARRAY_BUFFER,
flip_texture ? sizeof(mediapipe::kBasicTextureVerticesFlipY)
: sizeof(mediapipe::kBasicTextureVertices),
flip_texture ? kBasicTextureVerticesFlipY : kBasicTextureVertices,
GL_STATIC_DRAW);
glVertexAttribPointer(ATTRIB_TEXTURE_POSITION, 2, GL_FLOAT, 0, 0, nullptr);
glDrawArrays(GL_TRIANGLE_STRIP, 0, 4);
glDisableVertexAttribArray(ATTRIB_VERTEX);
glDisableVertexAttribArray(ATTRIB_TEXTURE_POSITION);
glBindBuffer(GL_ARRAY_BUFFER, 0);
return ::mediapipe::OkStatus();
}

View File

@ -83,6 +83,7 @@ class QuadRenderer {
GLuint program_ = 0;
GLint scale_unif_ = -1;
std::vector<GLint> frame_unifs_;
GLuint vbo_[2] = {0, 0}; // for vertex buffer storage
};
::mediapipe::Status FrameRotationFromInt(FrameRotation* rotation,

View File

@ -57,7 +57,7 @@ class GlThreadCollector {
return active_threads_ == 0;
};
absl::MutexLock l(&mutex_);
mutex_.Await(Condition(&done));
mutex_.Await(absl::Condition(&done));
}
absl::Mutex mutex_;

View File

@ -74,6 +74,10 @@ public class CameraXPreviewHelper extends CameraHelper {
public void startCamera(
Activity context, CameraFacing cameraFacing, SurfaceTexture surfaceTexture, Size targetSize) {
if (targetSize == null) {
targetSize = TARGET_SIZE;
}
LensFacing cameraLensFacing =
cameraFacing == CameraHelper.CameraFacing.FRONT ? LensFacing.FRONT : LensFacing.BACK;
PreviewConfig previewConfig =