Project import generated by Copybara.
PiperOrigin-RevId: 264105834
This commit is contained in:
parent
71a47bb18b
commit
f5df228d9b
|
@ -8,33 +8,24 @@ that performs face detection with TensorFlow Lite on GPU.
|
||||||
|
|
||||||
## Android
|
## Android
|
||||||
|
|
||||||
Please see [Hello World! in MediaPipe on Android](hello_world_android.md) for
|
[Source](https://github.com/google/mediapipe/tree/master/mediapipe/examples/android/src/java/com/google/mediapipe/apps/facedetectiongpu)
|
||||||
general instructions to develop an Android application that uses MediaPipe.
|
|
||||||
|
|
||||||
The graph below is used in the
|
To build and install the app:
|
||||||
[Face Detection GPU Android example app](https://github.com/google/mediapipe/tree/master/mediapipe/examples/android/src/java/com/google/mediapipe/apps/facedetectiongpu).
|
|
||||||
To build the app, run:
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
bazel build -c opt --config=android_arm64 mediapipe/examples/android/src/java/com/google/mediapipe/apps/facedetectiongpu
|
bazel build -c opt --config=android_arm64 mediapipe/examples/android/src/java/com/google/mediapipe/apps/facedetectiongpu
|
||||||
```
|
|
||||||
|
|
||||||
To further install the app on an Android device, run:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
adb install bazel-bin/mediapipe/examples/android/src/java/com/google/mediapipe/apps/facedetectiongpu/facedetectiongpu.apk
|
adb install bazel-bin/mediapipe/examples/android/src/java/com/google/mediapipe/apps/facedetectiongpu/facedetectiongpu.apk
|
||||||
```
|
```
|
||||||
|
|
||||||
## iOS
|
## iOS
|
||||||
|
|
||||||
Please see [Hello World! in MediaPipe on iOS](hello_world_ios.md) for general
|
[Source](https://github.com/google/mediapipe/tree/master/mediapipe/examples/ios/facedetectiongpu).
|
||||||
instructions to develop an iOS application that uses MediaPipe.
|
|
||||||
|
|
||||||
The graph below is used in the
|
See the general [instructions](./mediapipe_ios_setup.md) for building iOS
|
||||||
[Face Detection GPU iOS example app](https://github.com/google/mediapipe/tree/master/mediapipe/examples/ios/facedetectiongpu).
|
examples and generating an Xcode project. This will be the FaceDetectionGpuApp
|
||||||
To build the app, please see the general
|
target.
|
||||||
[MediaPipe iOS app building and setup instructions](./mediapipe_ios_setup.md).
|
|
||||||
Specific to this example, run:
|
To build on the command line:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
bazel build -c opt --config=ios_arm64 mediapipe/examples/ios/facedetectiongpu:FaceDetectionGpuApp
|
bazel build -c opt --config=ios_arm64 mediapipe/examples/ios/facedetectiongpu:FaceDetectionGpuApp
|
||||||
|
@ -51,7 +42,7 @@ below and paste it into [MediaPipe Visualizer](https://viz.mediapipe.dev/).
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# MediaPipe graph that performs face detection with TensorFlow Lite on GPU.
|
# MediaPipe graph that performs face detection with TensorFlow Lite on GPU.
|
||||||
# Used in the example in
|
# Used in the examples in
|
||||||
# mediapipie/examples/android/src/java/com/mediapipe/apps/facedetectiongpu and
|
# mediapipie/examples/android/src/java/com/mediapipe/apps/facedetectiongpu and
|
||||||
# mediapipie/examples/ios/facedetectiongpu.
|
# mediapipie/examples/ios/facedetectiongpu.
|
||||||
|
|
||||||
|
@ -227,9 +218,7 @@ node {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# Draws annotations and overlays them on top of a GPU copy of the original
|
# Draws annotations and overlays them on top of the input images.
|
||||||
# image coming into the graph. The calculator assumes that image origin is
|
|
||||||
# always at the top-left corner and renders text accordingly.
|
|
||||||
node {
|
node {
|
||||||
calculator: "AnnotationOverlayCalculator"
|
calculator: "AnnotationOverlayCalculator"
|
||||||
input_stream: "INPUT_FRAME_GPU:throttled_input_video"
|
input_stream: "INPUT_FRAME_GPU:throttled_input_video"
|
||||||
|
|
|
@ -8,20 +8,12 @@ that performs hair segmentation with TensorFlow Lite on GPU.
|
||||||
|
|
||||||
## Android
|
## Android
|
||||||
|
|
||||||
Please see [Hello World! in MediaPipe on Android](hello_world_android.md) for
|
[Source](https://github.com/google/mediapipe/tree/master/mediapipe/examples/android/src/java/com/google/mediapipe/apps/hairsegmentationgpu)
|
||||||
general instructions to develop an Android application that uses MediaPipe.
|
|
||||||
|
|
||||||
The graph below is used in the
|
To build and install the app:
|
||||||
[Hair Segmentation GPU Android example app](https://github.com/google/mediapipe/tree/master/mediapipe/examples/android/src/java/com/google/mediapipe/apps/hairsegmentationgpu).
|
|
||||||
To build the app, run:
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
bazel build -c opt --config=android_arm64 mediapipe/examples/android/src/java/com/google/mediapipe/apps/hairsegmentationgpu
|
bazel build -c opt --config=android_arm64 mediapipe/examples/android/src/java/com/google/mediapipe/apps/hairsegmentationgpu
|
||||||
```
|
|
||||||
|
|
||||||
To further install the app on an Android device, run:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
adb install bazel-bin/mediapipe/examples/android/src/java/com/google/mediapipe/apps/hairsegmentationgpu/hairsegmentationgpu.apk
|
adb install bazel-bin/mediapipe/examples/android/src/java/com/google/mediapipe/apps/hairsegmentationgpu/hairsegmentationgpu.apk
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -37,7 +29,7 @@ below and paste it into [MediaPipe Visualizer](https://viz.mediapipe.dev/).
|
||||||
```bash
|
```bash
|
||||||
# MediaPipe graph that performs hair segmentation with TensorFlow Lite on GPU.
|
# MediaPipe graph that performs hair segmentation with TensorFlow Lite on GPU.
|
||||||
# Used in the example in
|
# Used in the example in
|
||||||
# mediapipie/examples/ios/hairsegmentationgpu.
|
# mediapipie/examples/android/src/java/com/mediapipe/apps/hairsegmentationgpu.
|
||||||
|
|
||||||
# Images on GPU coming into and out of the graph.
|
# Images on GPU coming into and out of the graph.
|
||||||
input_stream: "input_video"
|
input_stream: "input_video"
|
||||||
|
@ -84,14 +76,11 @@ node: {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# Waits for a mask from the previous round of hair segmentation to be fed back
|
# Caches a mask fed back from the previous round of hair segmentation, and upon
|
||||||
# as an input, and caches it. Upon the arrival of an input image, it checks if
|
# the arrival of the next input image sends out the cached mask with the
|
||||||
# there is a mask cached, and sends out the mask with the timestamp replaced by
|
# timestamp replaced by that of the input image, essentially generating a packet
|
||||||
# that of the input image. This is needed so that the "current image" and the
|
# that carries the previous mask. Note that upon the arrival of the very first
|
||||||
# "previous mask" share the same timestamp, and as a result can be synchronized
|
# input image, an empty packet is sent out to jump start the feedback loop.
|
||||||
# and combined in the subsequent calculator. Note that upon the arrival of the
|
|
||||||
# very first input frame, an empty packet is sent out to jump start the feedback
|
|
||||||
# loop.
|
|
||||||
node {
|
node {
|
||||||
calculator: "PreviousLoopbackCalculator"
|
calculator: "PreviousLoopbackCalculator"
|
||||||
input_stream: "MAIN:throttled_input_video"
|
input_stream: "MAIN:throttled_input_video"
|
||||||
|
@ -114,9 +103,9 @@ node {
|
||||||
|
|
||||||
# Converts the transformed input image on GPU into an image tensor stored in
|
# Converts the transformed input image on GPU into an image tensor stored in
|
||||||
# tflite::gpu::GlBuffer. The zero_center option is set to false to normalize the
|
# tflite::gpu::GlBuffer. The zero_center option is set to false to normalize the
|
||||||
# pixel values to [0.f, 1.f] as opposed to [-1.f, 1.f].
|
# pixel values to [0.f, 1.f] as opposed to [-1.f, 1.f]. With the
|
||||||
# With the max_num_channels option set to 4, all 4 RGBA channels are contained
|
# max_num_channels option set to 4, all 4 RGBA channels are contained in the
|
||||||
# in the image tensor.
|
# image tensor.
|
||||||
node {
|
node {
|
||||||
calculator: "TfLiteConverterCalculator"
|
calculator: "TfLiteConverterCalculator"
|
||||||
input_stream: "IMAGE_GPU:mask_embedded_input_video"
|
input_stream: "IMAGE_GPU:mask_embedded_input_video"
|
||||||
|
@ -147,7 +136,7 @@ node {
|
||||||
node {
|
node {
|
||||||
calculator: "TfLiteInferenceCalculator"
|
calculator: "TfLiteInferenceCalculator"
|
||||||
input_stream: "TENSORS_GPU:image_tensor"
|
input_stream: "TENSORS_GPU:image_tensor"
|
||||||
output_stream: "TENSORS:segmentation_tensor"
|
output_stream: "TENSORS_GPU:segmentation_tensor"
|
||||||
input_side_packet: "CUSTOM_OP_RESOLVER:op_resolver"
|
input_side_packet: "CUSTOM_OP_RESOLVER:op_resolver"
|
||||||
node_options: {
|
node_options: {
|
||||||
[type.googleapis.com/mediapipe.TfLiteInferenceCalculatorOptions] {
|
[type.googleapis.com/mediapipe.TfLiteInferenceCalculatorOptions] {
|
||||||
|
@ -157,23 +146,15 @@ node {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# The next step (tensors to segmentation) is not yet supported on iOS GPU.
|
|
||||||
# Convert the previous segmentation mask to CPU for processing.
|
|
||||||
node: {
|
|
||||||
calculator: "GpuBufferToImageFrameCalculator"
|
|
||||||
input_stream: "previous_hair_mask"
|
|
||||||
output_stream: "previous_hair_mask_cpu"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Decodes the segmentation tensor generated by the TensorFlow Lite model into a
|
# Decodes the segmentation tensor generated by the TensorFlow Lite model into a
|
||||||
# mask of values in [0.f, 1.f], stored in the R channel of a CPU buffer. It also
|
# mask of values in [0.f, 1.f], stored in the R channel of a GPU buffer. It also
|
||||||
# takes the mask generated previously as another input to improve the temporal
|
# takes the mask generated previously as another input to improve the temporal
|
||||||
# consistency.
|
# consistency.
|
||||||
node {
|
node {
|
||||||
calculator: "TfLiteTensorsToSegmentationCalculator"
|
calculator: "TfLiteTensorsToSegmentationCalculator"
|
||||||
input_stream: "TENSORS:segmentation_tensor"
|
input_stream: "TENSORS_GPU:segmentation_tensor"
|
||||||
input_stream: "PREV_MASK:previous_hair_mask_cpu"
|
input_stream: "PREV_MASK_GPU:previous_hair_mask"
|
||||||
output_stream: "MASK:hair_mask_cpu"
|
output_stream: "MASK_GPU:hair_mask"
|
||||||
node_options: {
|
node_options: {
|
||||||
[type.googleapis.com/mediapipe.TfLiteTensorsToSegmentationCalculatorOptions] {
|
[type.googleapis.com/mediapipe.TfLiteTensorsToSegmentationCalculatorOptions] {
|
||||||
tensor_width: 512
|
tensor_width: 512
|
||||||
|
@ -185,13 +166,6 @@ node {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# Send the current segmentation mask to GPU for the last step, blending.
|
|
||||||
node: {
|
|
||||||
calculator: "ImageFrameToGpuBufferCalculator"
|
|
||||||
input_stream: "hair_mask_cpu"
|
|
||||||
output_stream: "hair_mask"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Colors the hair segmentation with the color specified in the option.
|
# Colors the hair segmentation with the color specified in the option.
|
||||||
node {
|
node {
|
||||||
calculator: "RecolorCalculator"
|
calculator: "RecolorCalculator"
|
||||||
|
|
|
@ -20,18 +20,18 @@ confidence score to generate the hand rectangle, to be further utilized in the
|
||||||
|
|
||||||
## Android
|
## Android
|
||||||
|
|
||||||
Please see [Hello World! in MediaPipe on Android](hello_world_android.md) for
|
[Source](https://github.com/google/mediapipe/tree/master/mediapipe/examples/android/src/java/com/google/mediapipe/apps/handdetectiongpu)
|
||||||
general instructions to develop an Android application that uses MediaPipe.
|
|
||||||
|
|
||||||
The graph below is used in the
|
An arm64 APK can be
|
||||||
[Hand Detection GPU Android example app](https://github.com/google/mediapipe/tree/master/mediapipe/examples/android/src/java/com/google/mediapipe/apps/handdetectiongpu).
|
[downloaded here](https://drive.google.com/open?id=1qUlTtH7Ydg-wl_H6VVL8vueu2UCTu37E).
|
||||||
To build the app, run:
|
|
||||||
|
To build the app yourself:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
bazel build -c opt --config=android_arm64 mediapipe/examples/android/src/java/com/google/mediapipe/apps/handdetectiongpu
|
bazel build -c opt --config=android_arm64 mediapipe/examples/android/src/java/com/google/mediapipe/apps/handdetectiongpu
|
||||||
```
|
```
|
||||||
|
|
||||||
To further install the app on an Android device, run:
|
Once the app is built, install it on Android device with:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
adb install bazel-bin/mediapipe/examples/android/src/java/com/google/mediapipe/apps/handdetectiongpu/handdetectiongpu.apk
|
adb install bazel-bin/mediapipe/examples/android/src/java/com/google/mediapipe/apps/handdetectiongpu/handdetectiongpu.apk
|
||||||
|
@ -39,14 +39,13 @@ adb install bazel-bin/mediapipe/examples/android/src/java/com/google/mediapipe/a
|
||||||
|
|
||||||
## iOS
|
## iOS
|
||||||
|
|
||||||
Please see [Hello World! in MediaPipe on iOS](hello_world_ios.md) for general
|
[Source](https://github.com/google/mediapipe/tree/master/mediapipe/examples/ios/handdetectiongpu).
|
||||||
instructions to develop an iOS application that uses MediaPipe.
|
|
||||||
|
|
||||||
The graph below is used in the
|
See the general [instructions](./mediapipe_ios_setup.md) for building iOS
|
||||||
[Hand Detection GPU iOS example app](https://github.com/google/mediapipe/tree/master/mediapipe/examples/ios/handdetectiongpu).
|
examples and generating an Xcode project. This will be the HandDetectionGpuApp
|
||||||
To build the app, please see the general
|
target.
|
||||||
[MediaPipe iOS app building and setup instructions](./mediapipe_ios_setup.md).
|
|
||||||
Specific to this example, run:
|
To build on the command line:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
bazel build -c opt --config=ios_arm64 mediapipe/examples/ios/handdetectiongpu:HandDetectionGpuApp
|
bazel build -c opt --config=ios_arm64 mediapipe/examples/ios/handdetectiongpu:HandDetectionGpuApp
|
||||||
|
@ -70,14 +69,24 @@ Visualizing Subgraphs section in the
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# MediaPipe graph that performs hand detection with TensorFlow Lite on GPU.
|
# MediaPipe graph that performs hand detection with TensorFlow Lite on GPU.
|
||||||
# Used in the example in
|
# Used in the examples in
|
||||||
# mediapipie/examples/android/src/java/com/mediapipe/apps/handdetectiongpu.
|
# mediapipie/examples/android/src/java/com/mediapipe/apps/handdetectiongpu and
|
||||||
# mediapipie/examples/ios/handdetectiongpu.
|
# mediapipie/examples/ios/handdetectiongpu.
|
||||||
|
|
||||||
# Images coming into and out of the graph.
|
# Images coming into and out of the graph.
|
||||||
input_stream: "input_video"
|
input_stream: "input_video"
|
||||||
output_stream: "output_video"
|
output_stream: "output_video"
|
||||||
|
|
||||||
|
# Throttles the images flowing downstream for flow control. It passes through
|
||||||
|
# the very first incoming image unaltered, and waits for HandDetectionSubgraph
|
||||||
|
# downstream in the graph to finish its tasks before it passes through another
|
||||||
|
# image. All images that come in while waiting are dropped, limiting the number
|
||||||
|
# of in-flight images in HandDetectionSubgraph to 1. This prevents the nodes in
|
||||||
|
# HandDetectionSubgraph from queuing up incoming images and data excessively,
|
||||||
|
# which leads to increased latency and memory usage, unwanted in real-time
|
||||||
|
# mobile applications. It also eliminates unnecessarily computation, e.g., the
|
||||||
|
# output produced by a node in the subgraph may get dropped downstream if the
|
||||||
|
# subsequent nodes are still busy processing previous inputs.
|
||||||
node {
|
node {
|
||||||
calculator: "FlowLimiterCalculator"
|
calculator: "FlowLimiterCalculator"
|
||||||
input_stream: "input_video"
|
input_stream: "input_video"
|
||||||
|
@ -89,6 +98,7 @@ node {
|
||||||
output_stream: "throttled_input_video"
|
output_stream: "throttled_input_video"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Subgraph that detections hands (see hand_detection_gpu.pbtxt).
|
||||||
node {
|
node {
|
||||||
calculator: "HandDetectionSubgraph"
|
calculator: "HandDetectionSubgraph"
|
||||||
input_stream: "throttled_input_video"
|
input_stream: "throttled_input_video"
|
||||||
|
@ -123,7 +133,7 @@ node {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# Draws annotations and overlays them on top of the input image into the graph.
|
# Draws annotations and overlays them on top of the input images.
|
||||||
node {
|
node {
|
||||||
calculator: "AnnotationOverlayCalculator"
|
calculator: "AnnotationOverlayCalculator"
|
||||||
input_stream: "INPUT_FRAME_GPU:throttled_input_video"
|
input_stream: "INPUT_FRAME_GPU:throttled_input_video"
|
||||||
|
@ -271,8 +281,8 @@ node {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# Maps detection label IDs to the corresponding label text. The label map is
|
# Maps detection label IDs to the corresponding label text ("Palm"). The label
|
||||||
# provided in the label_map_path option.
|
# map is provided in the label_map_path option.
|
||||||
node {
|
node {
|
||||||
calculator: "DetectionLabelIdToTextCalculator"
|
calculator: "DetectionLabelIdToTextCalculator"
|
||||||
input_stream: "filtered_detections"
|
input_stream: "filtered_detections"
|
||||||
|
|
|
@ -22,8 +22,8 @@ performed only within the hand rectangle for computational efficiency and
|
||||||
accuracy, and hand detection is only invoked when landmark localization could
|
accuracy, and hand detection is only invoked when landmark localization could
|
||||||
not identify hand presence in the previous iteration.
|
not identify hand presence in the previous iteration.
|
||||||
|
|
||||||
The example also comes with an experimental mode that localizes hand landmarks
|
The example can also run in a mode that localizes hand landmarks in 3D (i.e.,
|
||||||
in 3D (i.e., estimating an extra z coordinate):
|
estimating an extra z coordinate):
|
||||||
|
|
||||||
![hand_tracking_3d_android_gpu.gif](images/mobile/hand_tracking_3d_android_gpu.gif)
|
![hand_tracking_3d_android_gpu.gif](images/mobile/hand_tracking_3d_android_gpu.gif)
|
||||||
|
|
||||||
|
@ -33,24 +33,26 @@ camera.
|
||||||
|
|
||||||
## Android
|
## Android
|
||||||
|
|
||||||
Please see [Hello World! in MediaPipe on Android](hello_world_android.md) for
|
[Source](https://github.com/google/mediapipe/tree/master/mediapipe/examples/android/src/java/com/google/mediapipe/apps/handtrackinggpu)
|
||||||
general instructions to develop an Android application that uses MediaPipe.
|
|
||||||
|
|
||||||
The graph below is used in the
|
An arm64 APK can be
|
||||||
[Hand Tracking GPU Android example app](https://github.com/google/mediapipe/tree/master/mediapipe/examples/android/src/java/com/google/mediapipe/apps/handtrackinggpu).
|
[downloaded here](https://drive.google.com/open?id=1uCjS0y0O0dTDItsMh8x2cf4-l3uHW1vE),
|
||||||
To build the app, run:
|
and a version running the 3D mode can be
|
||||||
|
[downloaded here](https://drive.google.com/open?id=1tGgzOGkcZglJO2i7e8NKSxJgVtJYS3ka).
|
||||||
|
|
||||||
|
To build the app yourself, run:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
bazel build -c opt --config=android_arm64 mediapipe/examples/android/src/java/com/google/mediapipe/apps/handtrackinggpu
|
bazel build -c opt --config=android_arm64 mediapipe/examples/android/src/java/com/google/mediapipe/apps/handtrackinggpu
|
||||||
```
|
```
|
||||||
|
|
||||||
To build for the experimental mode that localizes hand landmarks in 3D, run:
|
To build for the 3D mode, run:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
bazel build -c opt --config=android_arm64 --define 3D=true mediapipe/examples/android/src/java/com/google/mediapipe/apps/handtrackinggpu
|
bazel build -c opt --config=android_arm64 --define 3D=true mediapipe/examples/android/src/java/com/google/mediapipe/apps/handtrackinggpu
|
||||||
```
|
```
|
||||||
|
|
||||||
To further install the app on an Android device, run:
|
Once the app is built, install it on Android device with:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
adb install bazel-bin/mediapipe/examples/android/src/java/com/google/mediapipe/apps/handtrackinggpu/handtrackinggpu.apk
|
adb install bazel-bin/mediapipe/examples/android/src/java/com/google/mediapipe/apps/handtrackinggpu/handtrackinggpu.apk
|
||||||
|
@ -58,20 +60,19 @@ adb install bazel-bin/mediapipe/examples/android/src/java/com/google/mediapipe/a
|
||||||
|
|
||||||
## iOS
|
## iOS
|
||||||
|
|
||||||
Please see [Hello World! in MediaPipe on iOS](hello_world_ios.md) for general
|
[Source](https://github.com/google/mediapipe/tree/master/mediapipe/examples/ios/handtrackinggpu).
|
||||||
instructions to develop an iOS application that uses MediaPipe.
|
|
||||||
|
|
||||||
The graph below is used in the
|
See the general [instructions](./mediapipe_ios_setup.md) for building iOS
|
||||||
[Hand Tracking GPU iOS example app](https://github.com/google/mediapipe/tree/master/mediapipe/examples/ios/handtrackinggpu).
|
examples and generating an Xcode project. This will be the HandDetectionGpuApp
|
||||||
To build the app, please see the general
|
target.
|
||||||
[MediaPipe iOS app building and setup instructions](./mediapipe_ios_setup.md).
|
|
||||||
Specific to this example, run:
|
To build on the command line:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
bazel build -c opt --config=ios_arm64 mediapipe/examples/ios/handtrackinggpu:HandTrackingGpuApp
|
bazel build -c opt --config=ios_arm64 mediapipe/examples/ios/handtrackinggpu:HandTrackingGpuApp
|
||||||
```
|
```
|
||||||
|
|
||||||
To build for the experimental mode that localizes hand landmarks in 3D, run:
|
To build for the 3D mode, run:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
bazel build -c opt --config=ios_arm64 --define 3D=true mediapipe/examples/ios/handtrackinggpu:HandTrackingGpuApp
|
bazel build -c opt --config=ios_arm64 --define 3D=true mediapipe/examples/ios/handtrackinggpu:HandTrackingGpuApp
|
||||||
|
@ -98,13 +99,24 @@ see the Visualizing Subgraphs section in the
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# MediaPipe graph that performs hand tracking with TensorFlow Lite on GPU.
|
# MediaPipe graph that performs hand tracking with TensorFlow Lite on GPU.
|
||||||
# Used in the example in
|
# Used in the examples in
|
||||||
# mediapipie/examples/android/src/java/com/mediapipe/apps/handtrackinggpu.
|
# mediapipie/examples/android/src/java/com/mediapipe/apps/handtrackinggpu and
|
||||||
|
# mediapipie/examples/ios/handtrackinggpu.
|
||||||
|
|
||||||
# Images coming into and out of the graph.
|
# Images coming into and out of the graph.
|
||||||
input_stream: "input_video"
|
input_stream: "input_video"
|
||||||
output_stream: "output_video"
|
output_stream: "output_video"
|
||||||
|
|
||||||
|
# Throttles the images flowing downstream for flow control. It passes through
|
||||||
|
# the very first incoming image unaltered, and waits for downstream nodes
|
||||||
|
# (calculators and subgraphs) in the graph to finish their tasks before it
|
||||||
|
# passes through another image. All images that come in while waiting are
|
||||||
|
# dropped, limiting the number of in-flight images in most part of the graph to
|
||||||
|
# 1. This prevents the downstream nodes from queuing up incoming images and data
|
||||||
|
# excessively, which leads to increased latency and memory usage, unwanted in
|
||||||
|
# real-time mobile applications. It also eliminates unnecessarily computation,
|
||||||
|
# e.g., the output produced by a node may get dropped downstream if the
|
||||||
|
# subsequent nodes are still busy processing previous inputs.
|
||||||
node {
|
node {
|
||||||
calculator: "FlowLimiterCalculator"
|
calculator: "FlowLimiterCalculator"
|
||||||
input_stream: "input_video"
|
input_stream: "input_video"
|
||||||
|
@ -116,6 +128,12 @@ node {
|
||||||
output_stream: "throttled_input_video"
|
output_stream: "throttled_input_video"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Caches a hand-presence decision fed back from HandLandmarkSubgraph, and upon
|
||||||
|
# the arrival of the next input image sends out the cached decision with the
|
||||||
|
# timestamp replaced by that of the input image, essentially generating a packet
|
||||||
|
# that carries the previous hand-presence decision. Note that upon the arrival
|
||||||
|
# of the very first input image, an empty packet is sent out to jump start the
|
||||||
|
# feedback loop.
|
||||||
node {
|
node {
|
||||||
calculator: "PreviousLoopbackCalculator"
|
calculator: "PreviousLoopbackCalculator"
|
||||||
input_stream: "MAIN:throttled_input_video"
|
input_stream: "MAIN:throttled_input_video"
|
||||||
|
@ -127,6 +145,9 @@ node {
|
||||||
output_stream: "PREV_LOOP:prev_hand_presence"
|
output_stream: "PREV_LOOP:prev_hand_presence"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Drops the incoming image if HandLandmarkSubgraph was able to identify hand
|
||||||
|
# presence in the previous image. Otherwise, passes the incoming image through
|
||||||
|
# to trigger a new round of hand detection in HandDetectionSubgraph.
|
||||||
node {
|
node {
|
||||||
calculator: "GateCalculator"
|
calculator: "GateCalculator"
|
||||||
input_stream: "throttled_input_video"
|
input_stream: "throttled_input_video"
|
||||||
|
@ -140,6 +161,7 @@ node {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Subgraph that detections hands (see hand_detection_gpu.pbtxt).
|
||||||
node {
|
node {
|
||||||
calculator: "HandDetectionSubgraph"
|
calculator: "HandDetectionSubgraph"
|
||||||
input_stream: "hand_detection_input_video"
|
input_stream: "hand_detection_input_video"
|
||||||
|
@ -147,6 +169,7 @@ node {
|
||||||
output_stream: "NORM_RECT:hand_rect_from_palm_detections"
|
output_stream: "NORM_RECT:hand_rect_from_palm_detections"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Subgraph that localizes hand landmarks (see hand_landmark_gpu.pbtxt).
|
||||||
node {
|
node {
|
||||||
calculator: "HandLandmarkSubgraph"
|
calculator: "HandLandmarkSubgraph"
|
||||||
input_stream: "IMAGE:throttled_input_video"
|
input_stream: "IMAGE:throttled_input_video"
|
||||||
|
@ -156,6 +179,12 @@ node {
|
||||||
output_stream: "PRESENCE:hand_presence"
|
output_stream: "PRESENCE:hand_presence"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Caches a hand rectangle fed back from HandLandmarkSubgraph, and upon the
|
||||||
|
# arrival of the next input image sends out the cached rectangle with the
|
||||||
|
# timestamp replaced by that of the input image, essentially generating a packet
|
||||||
|
# that carries the previous hand rectangle. Note that upon the arrival of the
|
||||||
|
# very first input image, an empty packet is sent out to jump start the
|
||||||
|
# feedback loop.
|
||||||
node {
|
node {
|
||||||
calculator: "PreviousLoopbackCalculator"
|
calculator: "PreviousLoopbackCalculator"
|
||||||
input_stream: "MAIN:throttled_input_video"
|
input_stream: "MAIN:throttled_input_video"
|
||||||
|
@ -167,6 +196,14 @@ node {
|
||||||
output_stream: "PREV_LOOP:prev_hand_rect_from_landmarks"
|
output_stream: "PREV_LOOP:prev_hand_rect_from_landmarks"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Merges a stream of hand rectangles generated by HandDetectionSubgraph and that
|
||||||
|
# generated by HandLandmarkSubgraph into a single output stream by selecting
|
||||||
|
# between one of the two streams. The formal is selected if the incoming packet
|
||||||
|
# is not empty, i.e., hand detection is performed on the current image by
|
||||||
|
# HandDetectionSubgraph (because HandLandmarkSubgraph could not identify hand
|
||||||
|
# presence in the previous image). Otherwise, the latter is selected, which is
|
||||||
|
# never empty because HandLandmarkSubgraphs processes all images (that went
|
||||||
|
# through FlowLimiterCaculator).
|
||||||
node {
|
node {
|
||||||
calculator: "MergeCalculator"
|
calculator: "MergeCalculator"
|
||||||
input_stream: "hand_rect_from_palm_detections"
|
input_stream: "hand_rect_from_palm_detections"
|
||||||
|
@ -174,6 +211,8 @@ node {
|
||||||
output_stream: "hand_rect"
|
output_stream: "hand_rect"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Subgraph that renders annotations and overlays them on top of the input
|
||||||
|
# images (see renderer_gpu.pbtxt).
|
||||||
node {
|
node {
|
||||||
calculator: "RendererSubgraph"
|
calculator: "RendererSubgraph"
|
||||||
input_stream: "IMAGE:throttled_input_video"
|
input_stream: "IMAGE:throttled_input_video"
|
||||||
|
@ -322,8 +361,8 @@ node {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# Maps detection label IDs to the corresponding label text. The label map is
|
# Maps detection label IDs to the corresponding label text ("Palm"). The label
|
||||||
# provided in the label_map_path option.
|
# map is provided in the label_map_path option.
|
||||||
node {
|
node {
|
||||||
calculator: "DetectionLabelIdToTextCalculator"
|
calculator: "DetectionLabelIdToTextCalculator"
|
||||||
input_stream: "filtered_detections"
|
input_stream: "filtered_detections"
|
||||||
|
@ -655,7 +694,7 @@ node {
|
||||||
landmark_connections: 20
|
landmark_connections: 20
|
||||||
landmark_color { r: 255 g: 0 b: 0 }
|
landmark_color { r: 255 g: 0 b: 0 }
|
||||||
connection_color { r: 0 g: 255 b: 0 }
|
connection_color { r: 0 g: 255 b: 0 }
|
||||||
thickness: 5.0
|
thickness: 4.0
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -302,7 +302,7 @@ initialize the `_renderer` object:
|
||||||
_renderer = [[MPPLayerRenderer alloc] init];
|
_renderer = [[MPPLayerRenderer alloc] init];
|
||||||
_renderer.layer.frame = _liveView.layer.bounds;
|
_renderer.layer.frame = _liveView.layer.bounds;
|
||||||
[_liveView.layer addSublayer:_renderer.layer];
|
[_liveView.layer addSublayer:_renderer.layer];
|
||||||
_renderer.frameScaleMode = MediaPipeFrameScaleFillAndCrop;
|
_renderer.frameScaleMode = MPPFrameScaleModeFillAndCrop;
|
||||||
```
|
```
|
||||||
|
|
||||||
To get frames from the camera, we will implement the following method:
|
To get frames from the camera, we will implement the following method:
|
||||||
|
@ -444,7 +444,7 @@ using the following function:
|
||||||
|
|
||||||
// Create MediaPipe graph with mediapipe::CalculatorGraphConfig proto object.
|
// Create MediaPipe graph with mediapipe::CalculatorGraphConfig proto object.
|
||||||
MPPGraph* newGraph = [[MPPGraph alloc] initWithGraphConfig:config];
|
MPPGraph* newGraph = [[MPPGraph alloc] initWithGraphConfig:config];
|
||||||
[newGraph addFrameOutputStream:kOutputStream outputPacketType:MediaPipePacketPixelBuffer];
|
[newGraph addFrameOutputStream:kOutputStream outputPacketType:MPPPacketTypePixelBuffer];
|
||||||
return newGraph;
|
return newGraph;
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
@ -508,12 +508,12 @@ this function's implementation to do the following:
|
||||||
}
|
}
|
||||||
[self.mediapipeGraph sendPixelBuffer:imageBuffer
|
[self.mediapipeGraph sendPixelBuffer:imageBuffer
|
||||||
intoStream:kInputStream
|
intoStream:kInputStream
|
||||||
packetType:MediaPipePacketPixelBuffer];
|
packetType:MPPPacketTypePixelBuffer];
|
||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
We send the `imageBuffer` to `self.mediapipeGraph` as a packet of type
|
We send the `imageBuffer` to `self.mediapipeGraph` as a packet of type
|
||||||
`MediaPipePacketPixelBuffer` into the input stream `kInputStream`, i.e.
|
`MPPPacketTypePixelBuffer` into the input stream `kInputStream`, i.e.
|
||||||
"input_video".
|
"input_video".
|
||||||
|
|
||||||
The graph will run with this input packet and output a result in
|
The graph will run with this input packet and output a result in
|
||||||
|
|
|
@ -28,14 +28,31 @@
|
||||||
ln -s ~/Downloads/MyProvisioningProfile.mobileprovision mediapipe/provisioning_profile.mobileprovision
|
ln -s ~/Downloads/MyProvisioningProfile.mobileprovision mediapipe/provisioning_profile.mobileprovision
|
||||||
```
|
```
|
||||||
|
|
||||||
|
Tip: You can use this command to see the provisioning profiles you have
|
||||||
|
previously downloaded using Xcode: `open ~/Library/MobileDevice/"Provisioning Profiles"`.
|
||||||
|
If there are none, generate and download a profile on [Apple's developer site](https://developer.apple.com/account/resources/).
|
||||||
|
|
||||||
## Creating an Xcode project
|
## Creating an Xcode project
|
||||||
|
|
||||||
|
Note: This workflow requires a separate tool in addition to Bazel. If it fails
|
||||||
|
to work for any reason, you can always use the command-line build instructions
|
||||||
|
in the next section.
|
||||||
|
|
||||||
1. We will use a tool called [Tulsi](https://tulsi.bazel.build/) for generating Xcode projects from Bazel
|
1. We will use a tool called [Tulsi](https://tulsi.bazel.build/) for generating Xcode projects from Bazel
|
||||||
build configurations.
|
build configurations.
|
||||||
|
|
||||||
|
IMPORTANT: At the time of this writing, Tulsi has a small [issue](https://github.com/bazelbuild/tulsi/issues/98)
|
||||||
|
that keeps it from building with Xcode 10.3. The instructions below apply a
|
||||||
|
fix from a [pull request](https://github.com/bazelbuild/tulsi/pull/99).
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
# cd out of the mediapipe directory, then:
|
||||||
git clone https://github.com/bazelbuild/tulsi.git
|
git clone https://github.com/bazelbuild/tulsi.git
|
||||||
cd tulsi
|
cd tulsi
|
||||||
|
# Apply the fix for Xcode 10.3 compatibility:
|
||||||
|
git fetch origin pull/99/head:xcodefix
|
||||||
|
git checkout xcodefix
|
||||||
|
# Now we can build Tulsi.
|
||||||
sh build_and_run.sh
|
sh build_and_run.sh
|
||||||
```
|
```
|
||||||
|
|
||||||
|
@ -51,12 +68,21 @@
|
||||||
4. You can now select any of the MediaPipe demos in the target menu, and build
|
4. You can now select any of the MediaPipe demos in the target menu, and build
|
||||||
and run them as normal.
|
and run them as normal.
|
||||||
|
|
||||||
|
Note: When you ask Xcode to run an app, by default it will use the Debug
|
||||||
|
configuration. Some of our demos are computationally heavy; you may want to use
|
||||||
|
the Release configuration for better performance.
|
||||||
|
|
||||||
|
Tip: To switch build configuration in Xcode, click on the target menu, choose
|
||||||
|
"Edit Scheme...", select the Run action, and switch the Build Configuration from
|
||||||
|
Debug to Release. Note that this is set independently for each target.
|
||||||
|
|
||||||
## Building an iOS app from the command line
|
## Building an iOS app from the command line
|
||||||
|
|
||||||
1. Build one of the example apps for iOS. We will be using the
|
1. Build one of the example apps for iOS. We will be using the
|
||||||
[Face Detection GPU App example](./face_detection_mobile_gpu.md)
|
[Face Detection GPU App example](./face_detection_mobile_gpu.md)
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
|
cd mediapipe
|
||||||
bazel build --config=ios_arm64 mediapipe/examples/ios/facedetectiongpu:FaceDetectionGpuApp
|
bazel build --config=ios_arm64 mediapipe/examples/ios/facedetectiongpu:FaceDetectionGpuApp
|
||||||
```
|
```
|
||||||
|
|
||||||
|
|
|
@ -16,33 +16,24 @@ CPU.
|
||||||
|
|
||||||
## Android
|
## Android
|
||||||
|
|
||||||
Please see [Hello World! in MediaPipe on Android](hello_world_android.md) for
|
[Source](https://github.com/google/mediapipe/tree/master/mediapipe/examples/android/src/java/com/google/mediapipe/apps/objectdetectioncpu)
|
||||||
general instructions to develop an Android application that uses MediaPipe.
|
|
||||||
|
|
||||||
The graph below is used in the
|
To build and install the app:
|
||||||
[Object Detection CPU Android example app](https://github.com/google/mediapipe/tree/master/mediapipe/examples/android/src/java/com/google/mediapipe/apps/objectdetectioncpu).
|
|
||||||
To build the app, run:
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
bazel build -c opt --config=android_arm64 mediapipe/examples/android/src/java/com/google/mediapipe/apps/objectdetectioncpu
|
bazel build -c opt --config=android_arm64 mediapipe/examples/android/src/java/com/google/mediapipe/apps/objectdetectioncpu
|
||||||
```
|
|
||||||
|
|
||||||
To further install the app on an Android device, run:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
adb install bazel-bin/mediapipe/examples/android/src/java/com/google/mediapipe/apps/objectdetectioncpu/objectdetectioncpu.apk
|
adb install bazel-bin/mediapipe/examples/android/src/java/com/google/mediapipe/apps/objectdetectioncpu/objectdetectioncpu.apk
|
||||||
```
|
```
|
||||||
|
|
||||||
## iOS
|
## iOS
|
||||||
|
|
||||||
Please see [Hello World! in MediaPipe on iOS](hello_world_ios.md) for general
|
[Source](https://github.com/google/mediapipe/tree/master/mediapipe/examples/ios/handdetectiongpu).
|
||||||
instructions to develop an iOS application that uses MediaPipe.
|
|
||||||
|
|
||||||
The graph below is used in the
|
See the general [instructions](./mediapipe_ios_setup.md) for building iOS
|
||||||
[Object Detection GPU iOS example app](https://github.com/google/mediapipe/tree/master/mediapipe/examples/ios/objectdetectioncpu).
|
examples and generating an Xcode project. This will be the ObjectDetectionCpuApp
|
||||||
To build the app, please see the general
|
target.
|
||||||
[MediaPipe iOS app building and setup instructions](./mediapipe_ios_setup.md).
|
|
||||||
Specific to this example, run:
|
To build on the command line:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
bazel build -c opt --config=ios_arm64 mediapipe/examples/ios/objectdetectioncpu:ObjectDetectionCpuApp
|
bazel build -c opt --config=ios_arm64 mediapipe/examples/ios/objectdetectioncpu:ObjectDetectionCpuApp
|
||||||
|
@ -59,7 +50,7 @@ below and paste it into [MediaPipe Visualizer](https://viz.mediapipe.dev/).
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# MediaPipe graph that performs object detection with TensorFlow Lite on CPU.
|
# MediaPipe graph that performs object detection with TensorFlow Lite on CPU.
|
||||||
# Used in the example in
|
# Used in the examples in
|
||||||
# mediapipie/examples/android/src/java/com/mediapipe/apps/objectdetectioncpu and
|
# mediapipie/examples/android/src/java/com/mediapipe/apps/objectdetectioncpu and
|
||||||
# mediapipie/examples/ios/objectdetectioncpu.
|
# mediapipie/examples/ios/objectdetectioncpu.
|
||||||
|
|
||||||
|
@ -236,9 +227,7 @@ node {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# Draws annotations and overlays them on top of the CPU copy of the original
|
# Draws annotations and overlays them on top of the input images.
|
||||||
# image coming into the graph. The calculator assumes that image origin is
|
|
||||||
# always at the top-left corner and renders text accordingly.
|
|
||||||
node {
|
node {
|
||||||
calculator: "AnnotationOverlayCalculator"
|
calculator: "AnnotationOverlayCalculator"
|
||||||
input_stream: "INPUT_FRAME:throttled_input_video_cpu"
|
input_stream: "INPUT_FRAME:throttled_input_video_cpu"
|
||||||
|
|
|
@ -8,33 +8,24 @@ that performs object detection with TensorFlow Lite on GPU.
|
||||||
|
|
||||||
## Android
|
## Android
|
||||||
|
|
||||||
Please see [Hello World! in MediaPipe on Android](hello_world_android.md) for
|
[Source](https://github.com/google/mediapipe/tree/master/mediapipe/examples/android/src/java/com/google/mediapipe/apps/objectdetectiongpu)
|
||||||
general instructions to develop an Android application that uses MediaPipe.
|
|
||||||
|
|
||||||
The graph below is used in the
|
To build and install the app:
|
||||||
[Object Detection GPU Android example app](https://github.com/google/mediapipe/tree/master/mediapipe/examples/android/src/java/com/google/mediapipe/apps/objectdetectiongpu).
|
|
||||||
To build the app, run:
|
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
bazel build -c opt --config=android_arm64 mediapipe/examples/android/src/java/com/google/mediapipe/apps/objectdetectiongpu
|
bazel build -c opt --config=android_arm64 mediapipe/examples/android/src/java/com/google/mediapipe/apps/objectdetectiongpu
|
||||||
```
|
|
||||||
|
|
||||||
To further install the app on an Android device, run:
|
|
||||||
|
|
||||||
```bash
|
|
||||||
adb install bazel-bin/mediapipe/examples/android/src/java/com/google/mediapipe/apps/objectdetectiongpu/objectdetectiongpu.apk
|
adb install bazel-bin/mediapipe/examples/android/src/java/com/google/mediapipe/apps/objectdetectiongpu/objectdetectiongpu.apk
|
||||||
```
|
```
|
||||||
|
|
||||||
## iOS
|
## iOS
|
||||||
|
|
||||||
Please see [Hello World! in MediaPipe on iOS](hello_world_ios.md) for general
|
[Source](https://github.com/google/mediapipe/tree/master/mediapipe/examples/ios/objectdetectiongpu).
|
||||||
instructions to develop an iOS application that uses MediaPipe.
|
|
||||||
|
|
||||||
The graph below is used in the
|
See the general [instructions](./mediapipe_ios_setup.md) for building iOS
|
||||||
[Object Detection GPU iOS example app](https://github.com/google/mediapipe/tree/master/mediapipe/examples/ios/objectdetectiongpu).
|
examples and generating an Xcode project. This will be the ObjectDetectionGpuApp
|
||||||
To build the app, please see the general
|
target.
|
||||||
[MediaPipe iOS app building and setup instructions](./mediapipe_ios_setup.md).
|
|
||||||
Specific to this example, run:
|
To build on the command line:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
bazel build -c opt --config=ios_arm64 mediapipe/examples/ios/objectdetectiongpu:ObjectDetectionGpuApp
|
bazel build -c opt --config=ios_arm64 mediapipe/examples/ios/objectdetectiongpu:ObjectDetectionGpuApp
|
||||||
|
@ -51,7 +42,7 @@ below and paste it into [MediaPipe Visualizer](https://viz.mediapipe.dev/).
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
# MediaPipe graph that performs object detection with TensorFlow Lite on GPU.
|
# MediaPipe graph that performs object detection with TensorFlow Lite on GPU.
|
||||||
# Used in the example in
|
# Used in the examples in
|
||||||
# mediapipie/examples/android/src/java/com/mediapipe/apps/objectdetectiongpu and
|
# mediapipie/examples/android/src/java/com/mediapipe/apps/objectdetectiongpu and
|
||||||
# mediapipie/examples/ios/objectdetectiongpu.
|
# mediapipie/examples/ios/objectdetectiongpu.
|
||||||
|
|
||||||
|
@ -218,9 +209,7 @@ node {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
# Draws annotations and overlays them on top of a GPU copy of the original
|
# Draws annotations and overlays them on top of the input images.
|
||||||
# image coming into the graph. The calculator assumes that image origin is
|
|
||||||
# always at the top-left corner and renders text accordingly.
|
|
||||||
node {
|
node {
|
||||||
calculator: "AnnotationOverlayCalculator"
|
calculator: "AnnotationOverlayCalculator"
|
||||||
input_stream: "INPUT_FRAME_GPU:throttled_input_video"
|
input_stream: "INPUT_FRAME_GPU:throttled_input_video"
|
||||||
|
|
|
@ -79,7 +79,7 @@ static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue";
|
||||||
|
|
||||||
// Create MediaPipe graph with mediapipe::CalculatorGraphConfig proto object.
|
// Create MediaPipe graph with mediapipe::CalculatorGraphConfig proto object.
|
||||||
MPPGraph* newGraph = [[MPPGraph alloc] initWithGraphConfig:config];
|
MPPGraph* newGraph = [[MPPGraph alloc] initWithGraphConfig:config];
|
||||||
[newGraph addFrameOutputStream:kOutputStream outputPacketType:MediaPipePacketPixelBuffer];
|
[newGraph addFrameOutputStream:kOutputStream outputPacketType:MPPPacketTypePixelBuffer];
|
||||||
return newGraph;
|
return newGraph;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -91,7 +91,7 @@ static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue";
|
||||||
_renderer = [[MPPLayerRenderer alloc] init];
|
_renderer = [[MPPLayerRenderer alloc] init];
|
||||||
_renderer.layer.frame = _liveView.layer.bounds;
|
_renderer.layer.frame = _liveView.layer.bounds;
|
||||||
[_liveView.layer addSublayer:_renderer.layer];
|
[_liveView.layer addSublayer:_renderer.layer];
|
||||||
_renderer.frameScaleMode = MediaPipeFrameScaleFillAndCrop;
|
_renderer.frameScaleMode = MPPFrameScaleModeFillAndCrop;
|
||||||
|
|
||||||
dispatch_queue_attr_t qosAttribute = dispatch_queue_attr_make_with_qos_class(
|
dispatch_queue_attr_t qosAttribute = dispatch_queue_attr_make_with_qos_class(
|
||||||
DISPATCH_QUEUE_SERIAL, QOS_CLASS_USER_INTERACTIVE, /*relative_priority=*/0);
|
DISPATCH_QUEUE_SERIAL, QOS_CLASS_USER_INTERACTIVE, /*relative_priority=*/0);
|
||||||
|
@ -170,7 +170,7 @@ static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue";
|
||||||
}
|
}
|
||||||
[self.mediapipeGraph sendPixelBuffer:imageBuffer
|
[self.mediapipeGraph sendPixelBuffer:imageBuffer
|
||||||
intoStream:kInputStream
|
intoStream:kInputStream
|
||||||
packetType:MediaPipePacketPixelBuffer];
|
packetType:MPPPacketTypePixelBuffer];
|
||||||
}
|
}
|
||||||
|
|
||||||
@end
|
@end
|
||||||
|
|
|
@ -79,7 +79,7 @@ static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue";
|
||||||
|
|
||||||
// Create MediaPipe graph with mediapipe::CalculatorGraphConfig proto object.
|
// Create MediaPipe graph with mediapipe::CalculatorGraphConfig proto object.
|
||||||
MPPGraph* newGraph = [[MPPGraph alloc] initWithGraphConfig:config];
|
MPPGraph* newGraph = [[MPPGraph alloc] initWithGraphConfig:config];
|
||||||
[newGraph addFrameOutputStream:kOutputStream outputPacketType:MediaPipePacketPixelBuffer];
|
[newGraph addFrameOutputStream:kOutputStream outputPacketType:MPPPacketTypePixelBuffer];
|
||||||
return newGraph;
|
return newGraph;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -91,7 +91,7 @@ static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue";
|
||||||
_renderer = [[MPPLayerRenderer alloc] init];
|
_renderer = [[MPPLayerRenderer alloc] init];
|
||||||
_renderer.layer.frame = _liveView.layer.bounds;
|
_renderer.layer.frame = _liveView.layer.bounds;
|
||||||
[_liveView.layer addSublayer:_renderer.layer];
|
[_liveView.layer addSublayer:_renderer.layer];
|
||||||
_renderer.frameScaleMode = MediaPipeFrameScaleFillAndCrop;
|
_renderer.frameScaleMode = MPPFrameScaleModeFillAndCrop;
|
||||||
// When using the front camera, mirror the input for a more natural look.
|
// When using the front camera, mirror the input for a more natural look.
|
||||||
_renderer.mirrored = YES;
|
_renderer.mirrored = YES;
|
||||||
|
|
||||||
|
@ -172,7 +172,7 @@ static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue";
|
||||||
}
|
}
|
||||||
[self.mediapipeGraph sendPixelBuffer:imageBuffer
|
[self.mediapipeGraph sendPixelBuffer:imageBuffer
|
||||||
intoStream:kInputStream
|
intoStream:kInputStream
|
||||||
packetType:MediaPipePacketPixelBuffer];
|
packetType:MPPPacketTypePixelBuffer];
|
||||||
}
|
}
|
||||||
|
|
||||||
@end
|
@end
|
||||||
|
|
|
@ -79,7 +79,7 @@ static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue";
|
||||||
|
|
||||||
// Create MediaPipe graph with mediapipe::CalculatorGraphConfig proto object.
|
// Create MediaPipe graph with mediapipe::CalculatorGraphConfig proto object.
|
||||||
MPPGraph* newGraph = [[MPPGraph alloc] initWithGraphConfig:config];
|
MPPGraph* newGraph = [[MPPGraph alloc] initWithGraphConfig:config];
|
||||||
[newGraph addFrameOutputStream:kOutputStream outputPacketType:MediaPipePacketPixelBuffer];
|
[newGraph addFrameOutputStream:kOutputStream outputPacketType:MPPPacketTypePixelBuffer];
|
||||||
return newGraph;
|
return newGraph;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -91,7 +91,7 @@ static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue";
|
||||||
_renderer = [[MPPLayerRenderer alloc] init];
|
_renderer = [[MPPLayerRenderer alloc] init];
|
||||||
_renderer.layer.frame = _liveView.layer.bounds;
|
_renderer.layer.frame = _liveView.layer.bounds;
|
||||||
[_liveView.layer addSublayer:_renderer.layer];
|
[_liveView.layer addSublayer:_renderer.layer];
|
||||||
_renderer.frameScaleMode = MediaPipeFrameScaleFillAndCrop;
|
_renderer.frameScaleMode = MPPFrameScaleModeFillAndCrop;
|
||||||
// When using the front camera, mirror the input for a more natural look.
|
// When using the front camera, mirror the input for a more natural look.
|
||||||
_renderer.mirrored = YES;
|
_renderer.mirrored = YES;
|
||||||
|
|
||||||
|
@ -172,7 +172,7 @@ static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue";
|
||||||
}
|
}
|
||||||
[self.mediapipeGraph sendPixelBuffer:imageBuffer
|
[self.mediapipeGraph sendPixelBuffer:imageBuffer
|
||||||
intoStream:kInputStream
|
intoStream:kInputStream
|
||||||
packetType:MediaPipePacketPixelBuffer];
|
packetType:MPPPacketTypePixelBuffer];
|
||||||
}
|
}
|
||||||
|
|
||||||
@end
|
@end
|
||||||
|
|
|
@ -79,7 +79,7 @@ static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue";
|
||||||
|
|
||||||
// Create MediaPipe graph with mediapipe::CalculatorGraphConfig proto object.
|
// Create MediaPipe graph with mediapipe::CalculatorGraphConfig proto object.
|
||||||
MPPGraph* newGraph = [[MPPGraph alloc] initWithGraphConfig:config];
|
MPPGraph* newGraph = [[MPPGraph alloc] initWithGraphConfig:config];
|
||||||
[newGraph addFrameOutputStream:kOutputStream outputPacketType:MediaPipePacketPixelBuffer];
|
[newGraph addFrameOutputStream:kOutputStream outputPacketType:MPPPacketTypePixelBuffer];
|
||||||
return newGraph;
|
return newGraph;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -91,7 +91,7 @@ static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue";
|
||||||
_renderer = [[MPPLayerRenderer alloc] init];
|
_renderer = [[MPPLayerRenderer alloc] init];
|
||||||
_renderer.layer.frame = _liveView.layer.bounds;
|
_renderer.layer.frame = _liveView.layer.bounds;
|
||||||
[_liveView.layer addSublayer:_renderer.layer];
|
[_liveView.layer addSublayer:_renderer.layer];
|
||||||
_renderer.frameScaleMode = MediaPipeFrameScaleFillAndCrop;
|
_renderer.frameScaleMode = MPPFrameScaleModeFillAndCrop;
|
||||||
// When using the front camera, mirror the input for a more natural look.
|
// When using the front camera, mirror the input for a more natural look.
|
||||||
_renderer.mirrored = YES;
|
_renderer.mirrored = YES;
|
||||||
|
|
||||||
|
@ -172,7 +172,7 @@ static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue";
|
||||||
}
|
}
|
||||||
[self.mediapipeGraph sendPixelBuffer:imageBuffer
|
[self.mediapipeGraph sendPixelBuffer:imageBuffer
|
||||||
intoStream:kInputStream
|
intoStream:kInputStream
|
||||||
packetType:MediaPipePacketPixelBuffer];
|
packetType:MPPPacketTypePixelBuffer];
|
||||||
}
|
}
|
||||||
|
|
||||||
@end
|
@end
|
||||||
|
|
|
@ -79,7 +79,7 @@ static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue";
|
||||||
|
|
||||||
// Create MediaPipe graph with mediapipe::CalculatorGraphConfig proto object.
|
// Create MediaPipe graph with mediapipe::CalculatorGraphConfig proto object.
|
||||||
MPPGraph* newGraph = [[MPPGraph alloc] initWithGraphConfig:config];
|
MPPGraph* newGraph = [[MPPGraph alloc] initWithGraphConfig:config];
|
||||||
[newGraph addFrameOutputStream:kOutputStream outputPacketType:MediaPipePacketPixelBuffer];
|
[newGraph addFrameOutputStream:kOutputStream outputPacketType:MPPPacketTypePixelBuffer];
|
||||||
return newGraph;
|
return newGraph;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -91,7 +91,7 @@ static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue";
|
||||||
_renderer = [[MPPLayerRenderer alloc] init];
|
_renderer = [[MPPLayerRenderer alloc] init];
|
||||||
_renderer.layer.frame = _liveView.layer.bounds;
|
_renderer.layer.frame = _liveView.layer.bounds;
|
||||||
[_liveView.layer addSublayer:_renderer.layer];
|
[_liveView.layer addSublayer:_renderer.layer];
|
||||||
_renderer.frameScaleMode = MediaPipeFrameScaleFillAndCrop;
|
_renderer.frameScaleMode = MPPFrameScaleModeFillAndCrop;
|
||||||
// When using the front camera, mirror the input for a more natural look.
|
// When using the front camera, mirror the input for a more natural look.
|
||||||
_renderer.mirrored = YES;
|
_renderer.mirrored = YES;
|
||||||
|
|
||||||
|
@ -172,7 +172,7 @@ static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue";
|
||||||
}
|
}
|
||||||
[self.mediapipeGraph sendPixelBuffer:imageBuffer
|
[self.mediapipeGraph sendPixelBuffer:imageBuffer
|
||||||
intoStream:kInputStream
|
intoStream:kInputStream
|
||||||
packetType:MediaPipePacketPixelBuffer];
|
packetType:MPPPacketTypePixelBuffer];
|
||||||
}
|
}
|
||||||
|
|
||||||
@end
|
@end
|
||||||
|
|
|
@ -79,7 +79,7 @@ static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue";
|
||||||
|
|
||||||
// Create MediaPipe graph with mediapipe::CalculatorGraphConfig proto object.
|
// Create MediaPipe graph with mediapipe::CalculatorGraphConfig proto object.
|
||||||
MPPGraph* newGraph = [[MPPGraph alloc] initWithGraphConfig:config];
|
MPPGraph* newGraph = [[MPPGraph alloc] initWithGraphConfig:config];
|
||||||
[newGraph addFrameOutputStream:kOutputStream outputPacketType:MediaPipePacketPixelBuffer];
|
[newGraph addFrameOutputStream:kOutputStream outputPacketType:MPPPacketTypePixelBuffer];
|
||||||
return newGraph;
|
return newGraph;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -91,7 +91,7 @@ static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue";
|
||||||
_renderer = [[MPPLayerRenderer alloc] init];
|
_renderer = [[MPPLayerRenderer alloc] init];
|
||||||
_renderer.layer.frame = _liveView.layer.bounds;
|
_renderer.layer.frame = _liveView.layer.bounds;
|
||||||
[_liveView.layer addSublayer:_renderer.layer];
|
[_liveView.layer addSublayer:_renderer.layer];
|
||||||
_renderer.frameScaleMode = MediaPipeFrameScaleFillAndCrop;
|
_renderer.frameScaleMode = MPPFrameScaleModeFillAndCrop;
|
||||||
|
|
||||||
dispatch_queue_attr_t qosAttribute = dispatch_queue_attr_make_with_qos_class(
|
dispatch_queue_attr_t qosAttribute = dispatch_queue_attr_make_with_qos_class(
|
||||||
DISPATCH_QUEUE_SERIAL, QOS_CLASS_USER_INTERACTIVE, /*relative_priority=*/0);
|
DISPATCH_QUEUE_SERIAL, QOS_CLASS_USER_INTERACTIVE, /*relative_priority=*/0);
|
||||||
|
@ -170,7 +170,7 @@ static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue";
|
||||||
}
|
}
|
||||||
[self.mediapipeGraph sendPixelBuffer:imageBuffer
|
[self.mediapipeGraph sendPixelBuffer:imageBuffer
|
||||||
intoStream:kInputStream
|
intoStream:kInputStream
|
||||||
packetType:MediaPipePacketPixelBuffer];
|
packetType:MPPPacketTypePixelBuffer];
|
||||||
}
|
}
|
||||||
|
|
||||||
@end
|
@end
|
||||||
|
|
|
@ -79,7 +79,7 @@ static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue";
|
||||||
|
|
||||||
// Create MediaPipe graph with mediapipe::CalculatorGraphConfig proto object.
|
// Create MediaPipe graph with mediapipe::CalculatorGraphConfig proto object.
|
||||||
MPPGraph* newGraph = [[MPPGraph alloc] initWithGraphConfig:config];
|
MPPGraph* newGraph = [[MPPGraph alloc] initWithGraphConfig:config];
|
||||||
[newGraph addFrameOutputStream:kOutputStream outputPacketType:MediaPipePacketPixelBuffer];
|
[newGraph addFrameOutputStream:kOutputStream outputPacketType:MPPPacketTypePixelBuffer];
|
||||||
return newGraph;
|
return newGraph;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -91,7 +91,7 @@ static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue";
|
||||||
_renderer = [[MPPLayerRenderer alloc] init];
|
_renderer = [[MPPLayerRenderer alloc] init];
|
||||||
_renderer.layer.frame = _liveView.layer.bounds;
|
_renderer.layer.frame = _liveView.layer.bounds;
|
||||||
[_liveView.layer addSublayer:_renderer.layer];
|
[_liveView.layer addSublayer:_renderer.layer];
|
||||||
_renderer.frameScaleMode = MediaPipeFrameScaleFillAndCrop;
|
_renderer.frameScaleMode = MPPFrameScaleModeFillAndCrop;
|
||||||
|
|
||||||
dispatch_queue_attr_t qosAttribute = dispatch_queue_attr_make_with_qos_class(
|
dispatch_queue_attr_t qosAttribute = dispatch_queue_attr_make_with_qos_class(
|
||||||
DISPATCH_QUEUE_SERIAL, QOS_CLASS_USER_INTERACTIVE, /*relative_priority=*/0);
|
DISPATCH_QUEUE_SERIAL, QOS_CLASS_USER_INTERACTIVE, /*relative_priority=*/0);
|
||||||
|
@ -170,7 +170,7 @@ static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue";
|
||||||
}
|
}
|
||||||
[self.mediapipeGraph sendPixelBuffer:imageBuffer
|
[self.mediapipeGraph sendPixelBuffer:imageBuffer
|
||||||
intoStream:kInputStream
|
intoStream:kInputStream
|
||||||
packetType:MediaPipePacketPixelBuffer];
|
packetType:MPPPacketTypePixelBuffer];
|
||||||
}
|
}
|
||||||
|
|
||||||
@end
|
@end
|
||||||
|
|
|
@ -50,7 +50,7 @@ static const char* kOutputStream = "counter";
|
||||||
profilerConfig->set_trace_log_disabled(false);
|
profilerConfig->set_trace_log_disabled(false);
|
||||||
|
|
||||||
MPPGraph* graph = [[MPPGraph alloc] initWithGraphConfig:graphConfig];
|
MPPGraph* graph = [[MPPGraph alloc] initWithGraphConfig:graphConfig];
|
||||||
[graph addFrameOutputStream:kOutputStream outputPacketType:MediaPipePacketRaw];
|
[graph addFrameOutputStream:kOutputStream outputPacketType:MPPPacketTypeRaw];
|
||||||
graph.delegate = self;
|
graph.delegate = self;
|
||||||
|
|
||||||
NSError* error;
|
NSError* error;
|
||||||
|
|
|
@ -115,7 +115,7 @@
|
||||||
|
|
||||||
_graph = [[MPPGraph alloc] initWithGraphConfig:config];
|
_graph = [[MPPGraph alloc] initWithGraphConfig:config];
|
||||||
[_graph addFrameOutputStream:"output_frames"
|
[_graph addFrameOutputStream:"output_frames"
|
||||||
outputPacketType:MediaPipePacketPixelBuffer];
|
outputPacketType:MPPPacketTypePixelBuffer];
|
||||||
[self testGraph:_graph input:*originalPixelBuffer expectedOutput:*originalPixelBuffer];
|
[self testGraph:_graph input:*originalPixelBuffer expectedOutput:*originalPixelBuffer];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -143,7 +143,7 @@
|
||||||
|
|
||||||
_graph = [[MPPGraph alloc] initWithGraphConfig:config];
|
_graph = [[MPPGraph alloc] initWithGraphConfig:config];
|
||||||
[_graph addFrameOutputStream:"output_frames"
|
[_graph addFrameOutputStream:"output_frames"
|
||||||
outputPacketType:MediaPipePacketPixelBuffer];
|
outputPacketType:MPPPacketTypePixelBuffer];
|
||||||
[self testGraph:_graph input:*originalPixelBuffer expectedOutput:*originalPixelBuffer];
|
[self testGraph:_graph input:*originalPixelBuffer expectedOutput:*originalPixelBuffer];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -166,7 +166,7 @@
|
||||||
|
|
||||||
_graph = [[MPPGraph alloc] initWithGraphConfig:config];
|
_graph = [[MPPGraph alloc] initWithGraphConfig:config];
|
||||||
[_graph addFrameOutputStream:"output_frames"
|
[_graph addFrameOutputStream:"output_frames"
|
||||||
outputPacketType:MediaPipePacketPixelBuffer];
|
outputPacketType:MPPPacketTypePixelBuffer];
|
||||||
[self testGraph:_graph input:*originalPixelBuffer expectedOutput:*originalPixelBuffer];
|
[self testGraph:_graph input:*originalPixelBuffer expectedOutput:*originalPixelBuffer];
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -210,7 +210,7 @@
|
||||||
|
|
||||||
_graph = [[MPPGraph alloc] initWithGraphConfig:config];
|
_graph = [[MPPGraph alloc] initWithGraphConfig:config];
|
||||||
[_graph addFrameOutputStream:"output_frames"
|
[_graph addFrameOutputStream:"output_frames"
|
||||||
outputPacketType:MediaPipePacketPixelBuffer];
|
outputPacketType:MPPPacketTypePixelBuffer];
|
||||||
[self testGraph:_graph input:convertedPixelBuffer expectedOutput:bgraPixelBuffer];
|
[self testGraph:_graph input:convertedPixelBuffer expectedOutput:bgraPixelBuffer];
|
||||||
CFRelease(convertedPixelBuffer);
|
CFRelease(convertedPixelBuffer);
|
||||||
CFRelease(bgraPixelBuffer);
|
CFRelease(bgraPixelBuffer);
|
||||||
|
@ -240,7 +240,7 @@
|
||||||
|
|
||||||
_graph = [[MPPGraph alloc] initWithGraphConfig:config];
|
_graph = [[MPPGraph alloc] initWithGraphConfig:config];
|
||||||
[_graph addFrameOutputStream:"output_frames"
|
[_graph addFrameOutputStream:"output_frames"
|
||||||
outputPacketType:MediaPipePacketPixelBuffer];
|
outputPacketType:MPPPacketTypePixelBuffer];
|
||||||
[_graph setSidePacket:(mediapipe::MakePacket<float[3]>(1.0, 0.0, 0.0))
|
[_graph setSidePacket:(mediapipe::MakePacket<float[3]>(1.0, 0.0, 0.0))
|
||||||
named:"rgb_weights"];
|
named:"rgb_weights"];
|
||||||
|
|
||||||
|
|
|
@ -15,20 +15,20 @@
|
||||||
#import <Foundation/Foundation.h>
|
#import <Foundation/Foundation.h>
|
||||||
#import <GLKit/GLKit.h>
|
#import <GLKit/GLKit.h>
|
||||||
|
|
||||||
/// Modes of clockwise rotation for input frames.
|
/// Modes of rotation (clockwise) for input frames.
|
||||||
typedef enum {
|
typedef NS_ENUM(int, MPPFrameRotation) {
|
||||||
MediaPipeFrameRotationNone,
|
MPPFrameRotationNone,
|
||||||
MediaPipeFrameRotation90,
|
MPPFrameRotationCw90,
|
||||||
MediaPipeFrameRotation180,
|
MPPFrameRotationCw180,
|
||||||
MediaPipeFrameRotation270
|
MPPFrameRotationCw270,
|
||||||
} MediaPipeFrameRotationMode;
|
};
|
||||||
|
|
||||||
typedef enum {
|
typedef NS_ENUM(int, MPPFrameScaleMode) {
|
||||||
// Scale the frame up to fit the drawing area, preserving aspect ratio; may letterbox.
|
// Scale the frame up to fit the drawing area, preserving aspect ratio; may letterbox.
|
||||||
MediaPipeFrameScaleFit,
|
MPPFrameScaleModeFit,
|
||||||
// Scale the frame up to fill the drawing area, preserving aspect ratio; may crop.
|
// Scale the frame up to fill the drawing area, preserving aspect ratio; may crop.
|
||||||
MediaPipeFrameScaleFillAndCrop,
|
MPPFrameScaleModeFillAndCrop,
|
||||||
} MediaPipeFrameScaleMode;
|
};
|
||||||
|
|
||||||
/// Renders frames in a GLKView.
|
/// Renders frames in a GLKView.
|
||||||
@interface MPPGLViewRenderer : NSObject <GLKViewDelegate>
|
@interface MPPGLViewRenderer : NSObject <GLKViewDelegate>
|
||||||
|
@ -47,15 +47,15 @@ typedef enum {
|
||||||
@property(nonatomic, assign) BOOL retainsLastPixelBuffer;
|
@property(nonatomic, assign) BOOL retainsLastPixelBuffer;
|
||||||
|
|
||||||
/// Sets which way to rotate input frames before rendering them.
|
/// Sets which way to rotate input frames before rendering them.
|
||||||
/// Default value is MediaPipeFrameRotationNone.
|
/// Default value is MPPFrameRotationNone.
|
||||||
/// Note that changing the transform property of a GLKView once rendering has
|
/// Note that changing the transform property of a GLKView once rendering has
|
||||||
/// started causes problems inside GLKView. Instead, we perform the rotation
|
/// started causes problems inside GLKView. Instead, we perform the rotation
|
||||||
/// in our rendering code.
|
/// in our rendering code.
|
||||||
@property(nonatomic) MediaPipeFrameRotationMode frameRotationMode;
|
@property(nonatomic) MPPFrameRotation frameRotationMode;
|
||||||
|
|
||||||
/// Sets how to scale the frame within the view.
|
/// Sets how to scale the frame within the view.
|
||||||
/// Default value is MediaPipeFrameScaleScaleToFit.
|
/// Default value is MPPFrameScaleModeFit.
|
||||||
@property(nonatomic) MediaPipeFrameScaleMode frameScaleMode;
|
@property(nonatomic) MPPFrameScaleMode frameScaleMode;
|
||||||
|
|
||||||
/// If YES, swap left and right. Useful for the front camera.
|
/// If YES, swap left and right. Useful for the front camera.
|
||||||
@property(nonatomic) BOOL mirrored;
|
@property(nonatomic) BOOL mirrored;
|
||||||
|
|
|
@ -46,8 +46,8 @@
|
||||||
if (self) {
|
if (self) {
|
||||||
_glContext = [[EAGLContext alloc] initWithAPI:kEAGLRenderingAPIOpenGLES2];
|
_glContext = [[EAGLContext alloc] initWithAPI:kEAGLRenderingAPIOpenGLES2];
|
||||||
_bufferLock = OS_SPINLOCK_INIT;
|
_bufferLock = OS_SPINLOCK_INIT;
|
||||||
_frameRotationMode = MediaPipeFrameRotationNone;
|
_frameRotationMode = MPPFrameRotationNone;
|
||||||
_frameScaleMode = MediaPipeFrameScaleFit;
|
_frameScaleMode = MPPFrameScaleModeFit;
|
||||||
}
|
}
|
||||||
return self;
|
return self;
|
||||||
}
|
}
|
||||||
|
@ -90,24 +90,24 @@
|
||||||
@"renderer setup failed: %@", [NSError gus_errorWithStatus:status]);
|
@"renderer setup failed: %@", [NSError gus_errorWithStatus:status]);
|
||||||
}
|
}
|
||||||
|
|
||||||
mediapipe::FrameScaleMode InternalScaleMode(MediaPipeFrameScaleMode mode) {
|
mediapipe::FrameScaleMode InternalScaleMode(MPPFrameScaleMode mode) {
|
||||||
switch (mode) {
|
switch (mode) {
|
||||||
case MediaPipeFrameScaleFit:
|
case MPPFrameScaleModeFit:
|
||||||
return mediapipe::FrameScaleMode::kFit;
|
return mediapipe::FrameScaleMode::kFit;
|
||||||
case MediaPipeFrameScaleFillAndCrop:
|
case MPPFrameScaleModeFillAndCrop:
|
||||||
return mediapipe::FrameScaleMode::kFillAndCrop;
|
return mediapipe::FrameScaleMode::kFillAndCrop;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
mediapipe::FrameRotation InternalRotationMode(MediaPipeFrameRotationMode rot) {
|
mediapipe::FrameRotation InternalRotationMode(MPPFrameRotation rot) {
|
||||||
switch (rot) {
|
switch (rot) {
|
||||||
case MediaPipeFrameRotationNone:
|
case MPPFrameRotationNone:
|
||||||
return mediapipe::FrameRotation::kNone;
|
return mediapipe::FrameRotation::kNone;
|
||||||
case MediaPipeFrameRotation90:
|
case MPPFrameRotationCw90:
|
||||||
return mediapipe::FrameRotation::k90;
|
return mediapipe::FrameRotation::k90;
|
||||||
case MediaPipeFrameRotation180:
|
case MPPFrameRotationCw180:
|
||||||
return mediapipe::FrameRotation::k180;
|
return mediapipe::FrameRotation::k180;
|
||||||
case MediaPipeFrameRotation270:
|
case MPPFrameRotationCw270:
|
||||||
return mediapipe::FrameRotation::k270;
|
return mediapipe::FrameRotation::k270;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -50,7 +50,7 @@
|
||||||
MPPGraph* mediapipeGraph = [[MPPGraph alloc] initWithGraphConfig:config];
|
MPPGraph* mediapipeGraph = [[MPPGraph alloc] initWithGraphConfig:config];
|
||||||
// We receive output by setting ourselves as the delegate.
|
// We receive output by setting ourselves as the delegate.
|
||||||
mediapipeGraph.delegate = self;
|
mediapipeGraph.delegate = self;
|
||||||
[mediapipeGraph addFrameOutputStream:"output_video" outputPacketType:MediaPipePacketPixelBuffer];
|
[mediapipeGraph addFrameOutputStream:"output_video" outputPacketType:MPPPacketTypePixelBuffer];
|
||||||
|
|
||||||
// Start running the graph.
|
// Start running the graph.
|
||||||
NSError *error;
|
NSError *error;
|
||||||
|
@ -60,7 +60,7 @@
|
||||||
// Send a frame.
|
// Send a frame.
|
||||||
XCTAssertTrue([mediapipeGraph sendPixelBuffer:*_inputPixelBuffer
|
XCTAssertTrue([mediapipeGraph sendPixelBuffer:*_inputPixelBuffer
|
||||||
intoStream:"input_video"
|
intoStream:"input_video"
|
||||||
packetType:MediaPipePacketPixelBuffer
|
packetType:MPPPacketTypePixelBuffer
|
||||||
timestamp:mediapipe::Timestamp(0)]);
|
timestamp:mediapipe::Timestamp(0)]);
|
||||||
|
|
||||||
// Shut down the graph.
|
// Shut down the graph.
|
||||||
|
|
|
@ -54,26 +54,26 @@ struct GpuSharedData;
|
||||||
|
|
||||||
/// Chooses the packet type used by MPPGraph to send and receive packets
|
/// Chooses the packet type used by MPPGraph to send and receive packets
|
||||||
/// from the graph.
|
/// from the graph.
|
||||||
typedef NS_ENUM(int, MediaPipePacketType) {
|
typedef NS_ENUM(int, MPPPacketType) {
|
||||||
/// Any packet type.
|
/// Any packet type.
|
||||||
/// Calls mediapipeGraph:didOutputPacket:fromStream:
|
/// Calls mediapipeGraph:didOutputPacket:fromStream:
|
||||||
MediaPipePacketRaw,
|
MPPPacketTypeRaw,
|
||||||
|
|
||||||
/// CFHolder<CVPixelBufferRef>.
|
/// CFHolder<CVPixelBufferRef>.
|
||||||
/// Calls mediapipeGraph:didOutputPixelBuffer:fromStream:
|
/// Calls mediapipeGraph:didOutputPixelBuffer:fromStream:
|
||||||
/// Use this packet type to pass GPU frames to calculators.
|
/// Use this packet type to pass GPU frames to calculators.
|
||||||
MediaPipePacketPixelBuffer,
|
MPPPacketTypePixelBuffer,
|
||||||
|
|
||||||
/// ImageFrame.
|
/// ImageFrame.
|
||||||
/// Calls mediapipeGraph:didOutputPixelBuffer:fromStream:
|
/// Calls mediapipeGraph:didOutputPixelBuffer:fromStream:
|
||||||
MediaPipePacketImageFrame,
|
MPPPacketTypeImageFrame,
|
||||||
|
|
||||||
/// RGBA ImageFrame, but do not swap the channels if the input pixel buffer
|
/// RGBA ImageFrame, but do not swap the channels if the input pixel buffer
|
||||||
/// is BGRA. This is useful when the graph needs RGBA ImageFrames, but the
|
/// is BGRA. This is useful when the graph needs RGBA ImageFrames, but the
|
||||||
/// calculators do not care about the order of the channels, so BGRA data can
|
/// calculators do not care about the order of the channels, so BGRA data can
|
||||||
/// be used as-is.
|
/// be used as-is.
|
||||||
/// Calls mediapipeGraph:didOutputPixelBuffer:fromStream:
|
/// Calls mediapipeGraph:didOutputPixelBuffer:fromStream:
|
||||||
MediaPipePacketImageFrameBGRANoSwap,
|
MPPPacketTypeImageFrameBGRANoSwap,
|
||||||
};
|
};
|
||||||
|
|
||||||
/// This class is an Objective-C wrapper around a MediaPipe graph object, and
|
/// This class is an Objective-C wrapper around a MediaPipe graph object, and
|
||||||
|
@ -129,7 +129,7 @@ typedef NS_ENUM(int, MediaPipePacketType) {
|
||||||
/// the delegate will receive frames.
|
/// the delegate will receive frames.
|
||||||
/// @param packetType The type of packet provided by the output streams.
|
/// @param packetType The type of packet provided by the output streams.
|
||||||
- (void)addFrameOutputStream:(const std::string&)outputStreamName
|
- (void)addFrameOutputStream:(const std::string&)outputStreamName
|
||||||
outputPacketType:(MediaPipePacketType)packetType;
|
outputPacketType:(MPPPacketType)packetType;
|
||||||
|
|
||||||
/// Starts running the graph.
|
/// Starts running the graph.
|
||||||
/// @return YES if successful.
|
/// @return YES if successful.
|
||||||
|
@ -155,7 +155,7 @@ typedef NS_ENUM(int, MediaPipePacketType) {
|
||||||
|
|
||||||
/// Creates a MediaPipe packet wrapping the given pixelBuffer;
|
/// Creates a MediaPipe packet wrapping the given pixelBuffer;
|
||||||
- (mediapipe::Packet)packetWithPixelBuffer:(CVPixelBufferRef)pixelBuffer
|
- (mediapipe::Packet)packetWithPixelBuffer:(CVPixelBufferRef)pixelBuffer
|
||||||
packetType:(MediaPipePacketType)packetType;
|
packetType:(MPPPacketType)packetType;
|
||||||
|
|
||||||
/// Sends a pixel buffer into a graph input stream, using the specified packet
|
/// Sends a pixel buffer into a graph input stream, using the specified packet
|
||||||
/// type. The graph must have been started before calling this. Drops frames and
|
/// type. The graph must have been started before calling this. Drops frames and
|
||||||
|
@ -164,7 +164,7 @@ typedef NS_ENUM(int, MediaPipePacketType) {
|
||||||
/// possibly increased efficiency. Returns YES if the packet was successfully sent.
|
/// possibly increased efficiency. Returns YES if the packet was successfully sent.
|
||||||
- (BOOL)sendPixelBuffer:(CVPixelBufferRef)imageBuffer
|
- (BOOL)sendPixelBuffer:(CVPixelBufferRef)imageBuffer
|
||||||
intoStream:(const std::string&)inputName
|
intoStream:(const std::string&)inputName
|
||||||
packetType:(MediaPipePacketType)packetType
|
packetType:(MPPPacketType)packetType
|
||||||
timestamp:(const mediapipe::Timestamp&)timestamp
|
timestamp:(const mediapipe::Timestamp&)timestamp
|
||||||
allowOverwrite:(BOOL)allowOverwrite;
|
allowOverwrite:(BOOL)allowOverwrite;
|
||||||
|
|
||||||
|
@ -174,7 +174,7 @@ typedef NS_ENUM(int, MediaPipePacketType) {
|
||||||
/// successfully sent.
|
/// successfully sent.
|
||||||
- (BOOL)sendPixelBuffer:(CVPixelBufferRef)pixelBuffer
|
- (BOOL)sendPixelBuffer:(CVPixelBufferRef)pixelBuffer
|
||||||
intoStream:(const std::string&)inputName
|
intoStream:(const std::string&)inputName
|
||||||
packetType:(MediaPipePacketType)packetType
|
packetType:(MPPPacketType)packetType
|
||||||
timestamp:(const mediapipe::Timestamp&)timestamp;
|
timestamp:(const mediapipe::Timestamp&)timestamp;
|
||||||
|
|
||||||
/// Sends a pixel buffer into a graph input stream, using the specified packet
|
/// Sends a pixel buffer into a graph input stream, using the specified packet
|
||||||
|
@ -184,7 +184,7 @@ typedef NS_ENUM(int, MediaPipePacketType) {
|
||||||
/// packet was successfully sent.
|
/// packet was successfully sent.
|
||||||
- (BOOL)sendPixelBuffer:(CVPixelBufferRef)pixelBuffer
|
- (BOOL)sendPixelBuffer:(CVPixelBufferRef)pixelBuffer
|
||||||
intoStream:(const std::string&)inputName
|
intoStream:(const std::string&)inputName
|
||||||
packetType:(MediaPipePacketType)packetType;
|
packetType:(MPPPacketType)packetType;
|
||||||
|
|
||||||
/// Cancels a graph run. You must still call waitUntilDoneWithError: after this.
|
/// Cancels a graph run. You must still call waitUntilDoneWithError: after this.
|
||||||
- (void)cancel;
|
- (void)cancel;
|
||||||
|
|
|
@ -77,7 +77,7 @@
|
||||||
}
|
}
|
||||||
|
|
||||||
- (void)addFrameOutputStream:(const std::string&)outputStreamName
|
- (void)addFrameOutputStream:(const std::string&)outputStreamName
|
||||||
outputPacketType:(MediaPipePacketType)packetType {
|
outputPacketType:(MPPPacketType)packetType {
|
||||||
std::string callbackInputName;
|
std::string callbackInputName;
|
||||||
mediapipe::tool::AddCallbackCalculator(outputStreamName, &_config, &callbackInputName,
|
mediapipe::tool::AddCallbackCalculator(outputStreamName, &_config, &callbackInputName,
|
||||||
/*use_std_function=*/true);
|
/*use_std_function=*/true);
|
||||||
|
@ -99,14 +99,14 @@
|
||||||
/// This is the function that gets called by the CallbackCalculator that
|
/// This is the function that gets called by the CallbackCalculator that
|
||||||
/// receives the graph's output.
|
/// receives the graph's output.
|
||||||
void CallFrameDelegate(void* wrapperVoid, const std::string& streamName,
|
void CallFrameDelegate(void* wrapperVoid, const std::string& streamName,
|
||||||
MediaPipePacketType packetType, const mediapipe::Packet& packet) {
|
MPPPacketType packetType, const mediapipe::Packet& packet) {
|
||||||
MPPGraph* wrapper = (__bridge MPPGraph*)wrapperVoid;
|
MPPGraph* wrapper = (__bridge MPPGraph*)wrapperVoid;
|
||||||
@autoreleasepool {
|
@autoreleasepool {
|
||||||
if (packetType == MediaPipePacketRaw) {
|
if (packetType == MPPPacketTypeRaw) {
|
||||||
[wrapper.delegate mediapipeGraph:wrapper
|
[wrapper.delegate mediapipeGraph:wrapper
|
||||||
didOutputPacket:packet
|
didOutputPacket:packet
|
||||||
fromStream:streamName];
|
fromStream:streamName];
|
||||||
} else if (packetType == MediaPipePacketImageFrame) {
|
} else if (packetType == MPPPacketTypeImageFrame) {
|
||||||
const auto& frame = packet.Get<mediapipe::ImageFrame>();
|
const auto& frame = packet.Get<mediapipe::ImageFrame>();
|
||||||
mediapipe::ImageFormat::Format format = frame.Format();
|
mediapipe::ImageFormat::Format format = frame.Format();
|
||||||
|
|
||||||
|
@ -162,7 +162,7 @@ void CallFrameDelegate(void* wrapperVoid, const std::string& streamName,
|
||||||
_GTMDevLog(@"unsupported ImageFormat: %d", format);
|
_GTMDevLog(@"unsupported ImageFormat: %d", format);
|
||||||
}
|
}
|
||||||
#if MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
|
#if MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
|
||||||
} else if (packetType == MediaPipePacketPixelBuffer) {
|
} else if (packetType == MPPPacketTypePixelBuffer) {
|
||||||
CVPixelBufferRef pixelBuffer = packet.Get<mediapipe::GpuBuffer>().GetCVPixelBufferRef();
|
CVPixelBufferRef pixelBuffer = packet.Get<mediapipe::GpuBuffer>().GetCVPixelBufferRef();
|
||||||
if ([wrapper.delegate
|
if ([wrapper.delegate
|
||||||
respondsToSelector:@selector
|
respondsToSelector:@selector
|
||||||
|
@ -283,15 +283,15 @@ void CallFrameDelegate(void* wrapperVoid, const std::string& streamName,
|
||||||
}
|
}
|
||||||
|
|
||||||
- (mediapipe::Packet)packetWithPixelBuffer:(CVPixelBufferRef)imageBuffer
|
- (mediapipe::Packet)packetWithPixelBuffer:(CVPixelBufferRef)imageBuffer
|
||||||
packetType:(MediaPipePacketType)packetType {
|
packetType:(MPPPacketType)packetType {
|
||||||
mediapipe::Packet packet;
|
mediapipe::Packet packet;
|
||||||
if (packetType == MediaPipePacketImageFrame || packetType == MediaPipePacketImageFrameBGRANoSwap) {
|
if (packetType == MPPPacketTypeImageFrame || packetType == MPPPacketTypeImageFrameBGRANoSwap) {
|
||||||
auto frame = CreateImageFrameForCVPixelBuffer(
|
auto frame = CreateImageFrameForCVPixelBuffer(
|
||||||
imageBuffer, /* canOverwrite = */ false,
|
imageBuffer, /* canOverwrite = */ false,
|
||||||
/* bgrAsRgb = */ packetType == MediaPipePacketImageFrameBGRANoSwap);
|
/* bgrAsRgb = */ packetType == MPPPacketTypeImageFrameBGRANoSwap);
|
||||||
packet = mediapipe::Adopt(frame.release());
|
packet = mediapipe::Adopt(frame.release());
|
||||||
#if MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
|
#if MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
|
||||||
} else if (packetType == MediaPipePacketPixelBuffer) {
|
} else if (packetType == MPPPacketTypePixelBuffer) {
|
||||||
packet = mediapipe::MakePacket<mediapipe::GpuBuffer>(imageBuffer);
|
packet = mediapipe::MakePacket<mediapipe::GpuBuffer>(imageBuffer);
|
||||||
#endif // MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
|
#endif // MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
|
||||||
} else {
|
} else {
|
||||||
|
@ -302,7 +302,7 @@ void CallFrameDelegate(void* wrapperVoid, const std::string& streamName,
|
||||||
|
|
||||||
- (BOOL)sendPixelBuffer:(CVPixelBufferRef)imageBuffer
|
- (BOOL)sendPixelBuffer:(CVPixelBufferRef)imageBuffer
|
||||||
intoStream:(const std::string&)inputName
|
intoStream:(const std::string&)inputName
|
||||||
packetType:(MediaPipePacketType)packetType
|
packetType:(MPPPacketType)packetType
|
||||||
timestamp:(const mediapipe::Timestamp&)timestamp
|
timestamp:(const mediapipe::Timestamp&)timestamp
|
||||||
allowOverwrite:(BOOL)allowOverwrite {
|
allowOverwrite:(BOOL)allowOverwrite {
|
||||||
if (_maxFramesInFlight && _framesInFlight >= _maxFramesInFlight) return NO;
|
if (_maxFramesInFlight && _framesInFlight >= _maxFramesInFlight) return NO;
|
||||||
|
@ -326,7 +326,7 @@ void CallFrameDelegate(void* wrapperVoid, const std::string& streamName,
|
||||||
|
|
||||||
- (BOOL)sendPixelBuffer:(CVPixelBufferRef)imageBuffer
|
- (BOOL)sendPixelBuffer:(CVPixelBufferRef)imageBuffer
|
||||||
intoStream:(const std::string&)inputName
|
intoStream:(const std::string&)inputName
|
||||||
packetType:(MediaPipePacketType)packetType
|
packetType:(MPPPacketType)packetType
|
||||||
timestamp:(const mediapipe::Timestamp&)timestamp {
|
timestamp:(const mediapipe::Timestamp&)timestamp {
|
||||||
return [self sendPixelBuffer:imageBuffer
|
return [self sendPixelBuffer:imageBuffer
|
||||||
intoStream:inputName
|
intoStream:inputName
|
||||||
|
@ -337,7 +337,7 @@ void CallFrameDelegate(void* wrapperVoid, const std::string& streamName,
|
||||||
|
|
||||||
- (BOOL)sendPixelBuffer:(CVPixelBufferRef)imageBuffer
|
- (BOOL)sendPixelBuffer:(CVPixelBufferRef)imageBuffer
|
||||||
intoStream:(const std::string&)inputName
|
intoStream:(const std::string&)inputName
|
||||||
packetType:(MediaPipePacketType)packetType {
|
packetType:(MPPPacketType)packetType {
|
||||||
_GTMDevAssert(_frameTimestamp < mediapipe::Timestamp::Done(),
|
_GTMDevAssert(_frameTimestamp < mediapipe::Timestamp::Done(),
|
||||||
@"Trying to send frame after stream is done.");
|
@"Trying to send frame after stream is done.");
|
||||||
if (_frameTimestamp < mediapipe::Timestamp::Min()) {
|
if (_frameTimestamp < mediapipe::Timestamp::Min()) {
|
||||||
|
|
|
@ -43,7 +43,7 @@
|
||||||
/// completes the run, and returns the output frame.
|
/// completes the run, and returns the output frame.
|
||||||
- (CVPixelBufferRef)runGraph:(MPPGraph*)graph
|
- (CVPixelBufferRef)runGraph:(MPPGraph*)graph
|
||||||
withPixelBuffer:(CVPixelBufferRef)inputBuffer
|
withPixelBuffer:(CVPixelBufferRef)inputBuffer
|
||||||
packetType:(MediaPipePacketType)inputPacketType;
|
packetType:(MPPPacketType)inputPacketType;
|
||||||
|
|
||||||
/// Runs a simple graph, providing a single frame to zero or more inputs. Input images are wrapped
|
/// Runs a simple graph, providing a single frame to zero or more inputs. Input images are wrapped
|
||||||
/// in packets each with timestamp mediapipe::Timestamp(1). Those packets are added to the
|
/// in packets each with timestamp mediapipe::Timestamp(1). Those packets are added to the
|
||||||
|
@ -53,7 +53,7 @@
|
||||||
withInputPixelBuffers:
|
withInputPixelBuffers:
|
||||||
(const std::unordered_map<std::string, CFHolder<CVPixelBufferRef>>&)inputBuffers
|
(const std::unordered_map<std::string, CFHolder<CVPixelBufferRef>>&)inputBuffers
|
||||||
outputStream:(const std::string&)output
|
outputStream:(const std::string&)output
|
||||||
packetType:(MediaPipePacketType)inputPacketType;
|
packetType:(MPPPacketType)inputPacketType;
|
||||||
|
|
||||||
/// Loads a data file from the test bundle.
|
/// Loads a data file from the test bundle.
|
||||||
- (NSData*)testDataNamed:(NSString*)name extension:(NSString*)extension;
|
- (NSData*)testDataNamed:(NSString*)name extension:(NSString*)extension;
|
||||||
|
|
|
@ -81,7 +81,7 @@ static void EnsureOutputDirFor(NSString *outputFile) {
|
||||||
inputPackets:(const std::map<std::string, mediapipe::Packet>&)inputPackets
|
inputPackets:(const std::map<std::string, mediapipe::Packet>&)inputPackets
|
||||||
timestamp:(mediapipe::Timestamp)timestamp
|
timestamp:(mediapipe::Timestamp)timestamp
|
||||||
outputStream:(const std::string&)outputStream
|
outputStream:(const std::string&)outputStream
|
||||||
packetType:(MediaPipePacketType)inputPacketType {
|
packetType:(MPPPacketType)inputPacketType {
|
||||||
__block CVPixelBufferRef output;
|
__block CVPixelBufferRef output;
|
||||||
graph.delegate = self;
|
graph.delegate = self;
|
||||||
|
|
||||||
|
@ -143,7 +143,7 @@ static void EnsureOutputDirFor(NSString *outputFile) {
|
||||||
|
|
||||||
- (CVPixelBufferRef)runGraph:(MPPGraph*)graph
|
- (CVPixelBufferRef)runGraph:(MPPGraph*)graph
|
||||||
withPixelBuffer:(CVPixelBufferRef)inputBuffer
|
withPixelBuffer:(CVPixelBufferRef)inputBuffer
|
||||||
packetType:(MediaPipePacketType)inputPacketType {
|
packetType:(MPPPacketType)inputPacketType {
|
||||||
return [self runGraph:graph
|
return [self runGraph:graph
|
||||||
withInputPixelBuffers:{{"input_frames", MakeCFHolder(inputBuffer)}}
|
withInputPixelBuffers:{{"input_frames", MakeCFHolder(inputBuffer)}}
|
||||||
inputPackets:{}
|
inputPackets:{}
|
||||||
|
@ -156,7 +156,7 @@ static void EnsureOutputDirFor(NSString *outputFile) {
|
||||||
withInputPixelBuffers:
|
withInputPixelBuffers:
|
||||||
(const std::unordered_map<std::string, CFHolder<CVPixelBufferRef>>&)inputBuffers
|
(const std::unordered_map<std::string, CFHolder<CVPixelBufferRef>>&)inputBuffers
|
||||||
outputStream:(const std::string&)output
|
outputStream:(const std::string&)output
|
||||||
packetType:(MediaPipePacketType)inputPacketType {
|
packetType:(MPPPacketType)inputPacketType {
|
||||||
return [self runGraph:graph
|
return [self runGraph:graph
|
||||||
withInputPixelBuffers:inputBuffers
|
withInputPixelBuffers:inputBuffers
|
||||||
inputPackets:{}
|
inputPackets:{}
|
||||||
|
@ -362,7 +362,7 @@ static void EnsureOutputDirFor(NSString *outputFile) {
|
||||||
{
|
{
|
||||||
CVPixelBufferRef outputBuffer = [self runGraph:graph
|
CVPixelBufferRef outputBuffer = [self runGraph:graph
|
||||||
withPixelBuffer:inputBuffer
|
withPixelBuffer:inputBuffer
|
||||||
packetType:MediaPipePacketPixelBuffer];
|
packetType:MPPPacketTypePixelBuffer];
|
||||||
#if DEBUG
|
#if DEBUG
|
||||||
// Xcode can display UIImage objects right in the debugger. It is handy to
|
// Xcode can display UIImage objects right in the debugger. It is handy to
|
||||||
// have these variables defined if the test fails.
|
// have these variables defined if the test fails.
|
||||||
|
@ -405,7 +405,7 @@ static void EnsureOutputDirFor(NSString *outputFile) {
|
||||||
MPPGraph* graph = [[MPPGraph alloc] initWithGraphConfig:config];
|
MPPGraph* graph = [[MPPGraph alloc] initWithGraphConfig:config];
|
||||||
[graph addSidePackets:sidePackets];
|
[graph addSidePackets:sidePackets];
|
||||||
[graph addFrameOutputStream:outputStream.UTF8String
|
[graph addFrameOutputStream:outputStream.UTF8String
|
||||||
outputPacketType:MediaPipePacketPixelBuffer];
|
outputPacketType:MPPPacketTypePixelBuffer];
|
||||||
|
|
||||||
std::unordered_map<std::string, CFHolder<CVPixelBufferRef>> inputBuffers;
|
std::unordered_map<std::string, CFHolder<CVPixelBufferRef>> inputBuffers;
|
||||||
for (NSString* inputStream in fileInputs) {
|
for (NSString* inputStream in fileInputs) {
|
||||||
|
@ -428,7 +428,7 @@ static void EnsureOutputDirFor(NSString *outputFile) {
|
||||||
inputPackets:packetInputs
|
inputPackets:packetInputs
|
||||||
timestamp:timestamp
|
timestamp:timestamp
|
||||||
outputStream:outputStream.UTF8String
|
outputStream:outputStream.UTF8String
|
||||||
packetType:MediaPipePacketPixelBuffer];
|
packetType:MPPPacketTypePixelBuffer];
|
||||||
|
|
||||||
UIImage* output = UIImageWithPixelBuffer(outputBuffer);
|
UIImage* output = UIImageWithPixelBuffer(outputBuffer);
|
||||||
XCTAssertNotNil(output);
|
XCTAssertNotNil(output);
|
||||||
|
|
|
@ -125,13 +125,13 @@ REGISTER_CALCULATOR(ErrorCalculator);
|
||||||
node->add_output_stream("output_frames");
|
node->add_output_stream("output_frames");
|
||||||
|
|
||||||
_graph = [[MPPGraph alloc] initWithGraphConfig:config];
|
_graph = [[MPPGraph alloc] initWithGraphConfig:config];
|
||||||
[_graph addFrameOutputStream:"output_frames" outputPacketType:MediaPipePacketPixelBuffer];
|
[_graph addFrameOutputStream:"output_frames" outputPacketType:MPPPacketTypePixelBuffer];
|
||||||
CFHolder<CVPixelBufferRef> inputBuffer;
|
CFHolder<CVPixelBufferRef> inputBuffer;
|
||||||
::mediapipe::Status status = CreateCVPixelBufferFromCGImage(_sourceImage.CGImage, &inputBuffer);
|
::mediapipe::Status status = CreateCVPixelBufferFromCGImage(_sourceImage.CGImage, &inputBuffer);
|
||||||
XCTAssert(status.ok());
|
XCTAssert(status.ok());
|
||||||
CVPixelBufferRef outputBuffer = [self runGraph:_graph
|
CVPixelBufferRef outputBuffer = [self runGraph:_graph
|
||||||
withPixelBuffer:*inputBuffer
|
withPixelBuffer:*inputBuffer
|
||||||
packetType:MediaPipePacketPixelBuffer];
|
packetType:MPPPacketTypePixelBuffer];
|
||||||
XCTAssert([self pixelBuffer:outputBuffer isEqualTo:*inputBuffer]);
|
XCTAssert([self pixelBuffer:outputBuffer isEqualTo:*inputBuffer]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -162,8 +162,8 @@ REGISTER_CALCULATOR(ErrorCalculator);
|
||||||
grayNode->add_output_stream("gray_frames");
|
grayNode->add_output_stream("gray_frames");
|
||||||
|
|
||||||
_graph = [[MPPGraph alloc] initWithGraphConfig:config];
|
_graph = [[MPPGraph alloc] initWithGraphConfig:config];
|
||||||
[_graph addFrameOutputStream:"pass_frames" outputPacketType:MediaPipePacketImageFrame];
|
[_graph addFrameOutputStream:"pass_frames" outputPacketType:MPPPacketTypeImageFrame];
|
||||||
[_graph addFrameOutputStream:"gray_frames" outputPacketType:MediaPipePacketImageFrame];
|
[_graph addFrameOutputStream:"gray_frames" outputPacketType:MPPPacketTypeImageFrame];
|
||||||
|
|
||||||
CFHolder<CVPixelBufferRef> inputBuffer;
|
CFHolder<CVPixelBufferRef> inputBuffer;
|
||||||
::mediapipe::Status status = CreateCVPixelBufferFromCGImage(_sourceImage.CGImage, &inputBuffer);
|
::mediapipe::Status status = CreateCVPixelBufferFromCGImage(_sourceImage.CGImage, &inputBuffer);
|
||||||
|
@ -185,7 +185,7 @@ REGISTER_CALCULATOR(ErrorCalculator);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
[self runGraph:_graph withPixelBuffer:*inputBuffer packetType:MediaPipePacketImageFrame];
|
[self runGraph:_graph withPixelBuffer:*inputBuffer packetType:MPPPacketTypeImageFrame];
|
||||||
}
|
}
|
||||||
|
|
||||||
- (void)testGrayscaleOutput {
|
- (void)testGrayscaleOutput {
|
||||||
|
@ -202,13 +202,13 @@ REGISTER_CALCULATOR(ErrorCalculator);
|
||||||
node->add_output_stream("output_frames");
|
node->add_output_stream("output_frames");
|
||||||
|
|
||||||
_graph = [[MPPGraph alloc] initWithGraphConfig:config];
|
_graph = [[MPPGraph alloc] initWithGraphConfig:config];
|
||||||
[_graph addFrameOutputStream:"output_frames" outputPacketType:MediaPipePacketImageFrame];
|
[_graph addFrameOutputStream:"output_frames" outputPacketType:MPPPacketTypeImageFrame];
|
||||||
CFHolder<CVPixelBufferRef> inputBuffer;
|
CFHolder<CVPixelBufferRef> inputBuffer;
|
||||||
::mediapipe::Status status = CreateCVPixelBufferFromCGImage(grayImage.CGImage, &inputBuffer);
|
::mediapipe::Status status = CreateCVPixelBufferFromCGImage(grayImage.CGImage, &inputBuffer);
|
||||||
XCTAssert(status.ok());
|
XCTAssert(status.ok());
|
||||||
CVPixelBufferRef outputBuffer = [self runGraph:_graph
|
CVPixelBufferRef outputBuffer = [self runGraph:_graph
|
||||||
withPixelBuffer:*inputBuffer
|
withPixelBuffer:*inputBuffer
|
||||||
packetType:MediaPipePacketImageFrame];
|
packetType:MPPPacketTypeImageFrame];
|
||||||
// We accept a small difference due to gamma correction and whatnot.
|
// We accept a small difference due to gamma correction and whatnot.
|
||||||
XCTAssert([self pixelBuffer:outputBuffer isCloseTo:*inputBuffer
|
XCTAssert([self pixelBuffer:outputBuffer isCloseTo:*inputBuffer
|
||||||
maxLocalDifference:5 maxAverageDifference:FLT_MAX]);
|
maxLocalDifference:5 maxAverageDifference:FLT_MAX]);
|
||||||
|
@ -226,13 +226,13 @@ REGISTER_CALCULATOR(ErrorCalculator);
|
||||||
CreateCVPixelBufferFromCGImage(_sourceImage.CGImage, &srcPixelBuffer);
|
CreateCVPixelBufferFromCGImage(_sourceImage.CGImage, &srcPixelBuffer);
|
||||||
XCTAssert(status.ok());
|
XCTAssert(status.ok());
|
||||||
_graph = [[MPPGraph alloc] initWithGraphConfig:config];
|
_graph = [[MPPGraph alloc] initWithGraphConfig:config];
|
||||||
[_graph addFrameOutputStream:"output_frames" outputPacketType:MediaPipePacketImageFrame];
|
[_graph addFrameOutputStream:"output_frames" outputPacketType:MPPPacketTypeImageFrame];
|
||||||
_graph.delegate = self;
|
_graph.delegate = self;
|
||||||
|
|
||||||
XCTAssert([_graph startWithError:nil]);
|
XCTAssert([_graph startWithError:nil]);
|
||||||
[_graph sendPixelBuffer:*srcPixelBuffer
|
[_graph sendPixelBuffer:*srcPixelBuffer
|
||||||
intoStream:"input_frames"
|
intoStream:"input_frames"
|
||||||
packetType:MediaPipePacketImageFrame];
|
packetType:MPPPacketTypeImageFrame];
|
||||||
XCTAssert([_graph closeInputStream:"input_frames" error:nil]);
|
XCTAssert([_graph closeInputStream:"input_frames" error:nil]);
|
||||||
|
|
||||||
__block NSError* error = nil;
|
__block NSError* error = nil;
|
||||||
|
@ -259,7 +259,7 @@ REGISTER_CALCULATOR(ErrorCalculator);
|
||||||
node->add_output_stream("output_frames");
|
node->add_output_stream("output_frames");
|
||||||
|
|
||||||
_graph = [[MPPGraph alloc] initWithGraphConfig:config];
|
_graph = [[MPPGraph alloc] initWithGraphConfig:config];
|
||||||
[_graph addFrameOutputStream:"output_frames" outputPacketType:MediaPipePacketImageFrame];
|
[_graph addFrameOutputStream:"output_frames" outputPacketType:MPPPacketTypeImageFrame];
|
||||||
|
|
||||||
// We're no longer using video headers, let's just use an int as the header.
|
// We're no longer using video headers, let's just use an int as the header.
|
||||||
auto header_packet = mediapipe::MakePacket<int>(0xDEADBEEF);
|
auto header_packet = mediapipe::MakePacket<int>(0xDEADBEEF);
|
||||||
|
@ -288,13 +288,13 @@ REGISTER_CALCULATOR(ErrorCalculator);
|
||||||
node->add_output_stream("output_frames");
|
node->add_output_stream("output_frames");
|
||||||
|
|
||||||
_graph = [[MPPGraph alloc] initWithGraphConfig:config];
|
_graph = [[MPPGraph alloc] initWithGraphConfig:config];
|
||||||
[_graph addFrameOutputStream:"output_frames" outputPacketType:MediaPipePacketPixelBuffer];
|
[_graph addFrameOutputStream:"output_frames" outputPacketType:MPPPacketTypePixelBuffer];
|
||||||
CFHolder<CVPixelBufferRef> inputBuffer;
|
CFHolder<CVPixelBufferRef> inputBuffer;
|
||||||
::mediapipe::Status status = CreateCVPixelBufferFromCGImage(_sourceImage.CGImage, &inputBuffer);
|
::mediapipe::Status status = CreateCVPixelBufferFromCGImage(_sourceImage.CGImage, &inputBuffer);
|
||||||
XCTAssert(status.ok());
|
XCTAssert(status.ok());
|
||||||
CVPixelBufferRef outputBuffer = [self runGraph:_graph
|
CVPixelBufferRef outputBuffer = [self runGraph:_graph
|
||||||
withPixelBuffer:*inputBuffer
|
withPixelBuffer:*inputBuffer
|
||||||
packetType:MediaPipePacketPixelBuffer];
|
packetType:MPPPacketTypePixelBuffer];
|
||||||
__weak MPPGraph* weakGraph = _graph;
|
__weak MPPGraph* weakGraph = _graph;
|
||||||
_graph = nil;
|
_graph = nil;
|
||||||
XCTAssertNil(weakGraph);
|
XCTAssertNil(weakGraph);
|
||||||
|
@ -311,7 +311,7 @@ REGISTER_CALCULATOR(ErrorCalculator);
|
||||||
const int kTestValue = 10;
|
const int kTestValue = 10;
|
||||||
|
|
||||||
_graph = [[MPPGraph alloc] initWithGraphConfig:config];
|
_graph = [[MPPGraph alloc] initWithGraphConfig:config];
|
||||||
[_graph addFrameOutputStream:"output_ints" outputPacketType:MediaPipePacketRaw];
|
[_graph addFrameOutputStream:"output_ints" outputPacketType:MPPPacketTypeRaw];
|
||||||
_graph.delegate = self;
|
_graph.delegate = self;
|
||||||
|
|
||||||
WEAKIFY(self);
|
WEAKIFY(self);
|
||||||
|
|
|
@ -26,12 +26,12 @@
|
||||||
- (void)renderPixelBuffer:(CVPixelBufferRef)pixelBuffer;
|
- (void)renderPixelBuffer:(CVPixelBufferRef)pixelBuffer;
|
||||||
|
|
||||||
/// Sets which way to rotate input frames before rendering them.
|
/// Sets which way to rotate input frames before rendering them.
|
||||||
/// Default value is MediaPipeFrameRotationNone.
|
/// Default value is MPPFrameRotationNone.
|
||||||
@property(nonatomic) MediaPipeFrameRotationMode frameRotationMode;
|
@property(nonatomic) MPPFrameRotation frameRotationMode;
|
||||||
|
|
||||||
/// Sets how to scale the frame within the layer.
|
/// Sets how to scale the frame within the layer.
|
||||||
/// Default value is MediaPipeFrameScaleScaleToFit.
|
/// Default value is MediaPipeFrameScaleScaleToFit.
|
||||||
@property(nonatomic) MediaPipeFrameScaleMode frameScaleMode;
|
@property(nonatomic) MPPFrameScaleMode frameScaleMode;
|
||||||
|
|
||||||
/// If YES, swap left and right. Useful for the front camera.
|
/// If YES, swap left and right. Useful for the front camera.
|
||||||
@property(nonatomic) BOOL mirrored;
|
@property(nonatomic) BOOL mirrored;
|
||||||
|
|
|
@ -73,19 +73,19 @@
|
||||||
if (!success) NSLog(@"presentRenderbuffer failed");
|
if (!success) NSLog(@"presentRenderbuffer failed");
|
||||||
}
|
}
|
||||||
|
|
||||||
- (MediaPipeFrameRotationMode)frameRotationMode {
|
- (MPPFrameRotation)frameRotationMode {
|
||||||
return _glRenderer.frameRotationMode;
|
return _glRenderer.frameRotationMode;
|
||||||
}
|
}
|
||||||
|
|
||||||
- (void)setFrameRotationMode:(MediaPipeFrameRotationMode)frameRotationMode {
|
- (void)setFrameRotationMode:(MPPFrameRotation)frameRotationMode {
|
||||||
_glRenderer.frameRotationMode = frameRotationMode;
|
_glRenderer.frameRotationMode = frameRotationMode;
|
||||||
}
|
}
|
||||||
|
|
||||||
- (MediaPipeFrameScaleMode)frameScaleMode {
|
- (MPPFrameScaleMode)frameScaleMode {
|
||||||
return _glRenderer.frameScaleMode;
|
return _glRenderer.frameScaleMode;
|
||||||
}
|
}
|
||||||
|
|
||||||
- (void)setFrameScaleMode:(MediaPipeFrameScaleMode)frameScaleMode {
|
- (void)setFrameScaleMode:(MPPFrameScaleMode)frameScaleMode {
|
||||||
_glRenderer.frameScaleMode = frameScaleMode;
|
_glRenderer.frameScaleMode = frameScaleMode;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
11
third_party/glog.BUILD
vendored
11
third_party/glog.BUILD
vendored
|
@ -170,8 +170,11 @@ genrule(
|
||||||
name = "logging_h",
|
name = "logging_h",
|
||||||
srcs = select({
|
srcs = select({
|
||||||
"//conditions:default": ["src/glog/logging.h.tmp"],
|
"//conditions:default": ["src/glog/logging.h.tmp"],
|
||||||
":android_arm": ["src/glog/logging.h.android_arm"],
|
":android_arm": ["src/glog/logging.h.arm"],
|
||||||
":android_arm64": ["src/glog/logging.h.android_arm"],
|
":android_arm64": ["src/glog/logging.h.arm"],
|
||||||
|
":ios_armv7": ["src/glog/logging.h.arm"],
|
||||||
|
":ios_arm64": ["src/glog/logging.h.arm"],
|
||||||
|
":ios_arm64e": ["src/glog/logging.h.arm"],
|
||||||
}),
|
}),
|
||||||
outs = ["src/glog/logging.h"],
|
outs = ["src/glog/logging.h"],
|
||||||
cmd = "echo select $< to be the glog logging.h file. && cp $< $@",
|
cmd = "echo select $< to be the glog logging.h file. && cp $< $@",
|
||||||
|
@ -371,9 +374,9 @@ genrule(
|
||||||
)
|
)
|
||||||
|
|
||||||
genrule(
|
genrule(
|
||||||
name = "generate_android_arm_glog_logging_h",
|
name = "generate_arm_glog_logging_h",
|
||||||
srcs = ["src/glog/logging.h.in"],
|
srcs = ["src/glog/logging.h.in"],
|
||||||
outs = ["src/glog/logging.h.android_arm"],
|
outs = ["src/glog/logging.h.arm"],
|
||||||
cmd = ("sed -e 's/@ac_cv___attribute___noinline@/__attribute__((__noinline__))/g'" +
|
cmd = ("sed -e 's/@ac_cv___attribute___noinline@/__attribute__((__noinline__))/g'" +
|
||||||
" -e 's/@ac_cv___attribute___noreturn@/__attribute__((__noreturn__))/g'" +
|
" -e 's/@ac_cv___attribute___noreturn@/__attribute__((__noreturn__))/g'" +
|
||||||
" -e 's/@ac_cv_have___builtin_expect@/1/g'" +
|
" -e 's/@ac_cv_have___builtin_expect@/1/g'" +
|
||||||
|
|
Loading…
Reference in New Issue
Block a user