Project import generated by Copybara.
PiperOrigin-RevId: 264105834
This commit is contained in:
parent
71a47bb18b
commit
f5df228d9b
|
@ -8,33 +8,24 @@ that performs face detection with TensorFlow Lite on GPU.
|
|||
|
||||
## Android
|
||||
|
||||
Please see [Hello World! in MediaPipe on Android](hello_world_android.md) for
|
||||
general instructions to develop an Android application that uses MediaPipe.
|
||||
[Source](https://github.com/google/mediapipe/tree/master/mediapipe/examples/android/src/java/com/google/mediapipe/apps/facedetectiongpu)
|
||||
|
||||
The graph below is used in the
|
||||
[Face Detection GPU Android example app](https://github.com/google/mediapipe/tree/master/mediapipe/examples/android/src/java/com/google/mediapipe/apps/facedetectiongpu).
|
||||
To build the app, run:
|
||||
To build and install the app:
|
||||
|
||||
```bash
|
||||
bazel build -c opt --config=android_arm64 mediapipe/examples/android/src/java/com/google/mediapipe/apps/facedetectiongpu
|
||||
```
|
||||
|
||||
To further install the app on an Android device, run:
|
||||
|
||||
```bash
|
||||
adb install bazel-bin/mediapipe/examples/android/src/java/com/google/mediapipe/apps/facedetectiongpu/facedetectiongpu.apk
|
||||
```
|
||||
|
||||
## iOS
|
||||
|
||||
Please see [Hello World! in MediaPipe on iOS](hello_world_ios.md) for general
|
||||
instructions to develop an iOS application that uses MediaPipe.
|
||||
[Source](https://github.com/google/mediapipe/tree/master/mediapipe/examples/ios/facedetectiongpu).
|
||||
|
||||
The graph below is used in the
|
||||
[Face Detection GPU iOS example app](https://github.com/google/mediapipe/tree/master/mediapipe/examples/ios/facedetectiongpu).
|
||||
To build the app, please see the general
|
||||
[MediaPipe iOS app building and setup instructions](./mediapipe_ios_setup.md).
|
||||
Specific to this example, run:
|
||||
See the general [instructions](./mediapipe_ios_setup.md) for building iOS
|
||||
examples and generating an Xcode project. This will be the FaceDetectionGpuApp
|
||||
target.
|
||||
|
||||
To build on the command line:
|
||||
|
||||
```bash
|
||||
bazel build -c opt --config=ios_arm64 mediapipe/examples/ios/facedetectiongpu:FaceDetectionGpuApp
|
||||
|
@ -51,7 +42,7 @@ below and paste it into [MediaPipe Visualizer](https://viz.mediapipe.dev/).
|
|||
|
||||
```bash
|
||||
# MediaPipe graph that performs face detection with TensorFlow Lite on GPU.
|
||||
# Used in the example in
|
||||
# Used in the examples in
|
||||
# mediapipie/examples/android/src/java/com/mediapipe/apps/facedetectiongpu and
|
||||
# mediapipie/examples/ios/facedetectiongpu.
|
||||
|
||||
|
@ -227,9 +218,7 @@ node {
|
|||
}
|
||||
}
|
||||
|
||||
# Draws annotations and overlays them on top of a GPU copy of the original
|
||||
# image coming into the graph. The calculator assumes that image origin is
|
||||
# always at the top-left corner and renders text accordingly.
|
||||
# Draws annotations and overlays them on top of the input images.
|
||||
node {
|
||||
calculator: "AnnotationOverlayCalculator"
|
||||
input_stream: "INPUT_FRAME_GPU:throttled_input_video"
|
||||
|
|
|
@ -8,20 +8,12 @@ that performs hair segmentation with TensorFlow Lite on GPU.
|
|||
|
||||
## Android
|
||||
|
||||
Please see [Hello World! in MediaPipe on Android](hello_world_android.md) for
|
||||
general instructions to develop an Android application that uses MediaPipe.
|
||||
[Source](https://github.com/google/mediapipe/tree/master/mediapipe/examples/android/src/java/com/google/mediapipe/apps/hairsegmentationgpu)
|
||||
|
||||
The graph below is used in the
|
||||
[Hair Segmentation GPU Android example app](https://github.com/google/mediapipe/tree/master/mediapipe/examples/android/src/java/com/google/mediapipe/apps/hairsegmentationgpu).
|
||||
To build the app, run:
|
||||
To build and install the app:
|
||||
|
||||
```bash
|
||||
bazel build -c opt --config=android_arm64 mediapipe/examples/android/src/java/com/google/mediapipe/apps/hairsegmentationgpu
|
||||
```
|
||||
|
||||
To further install the app on an Android device, run:
|
||||
|
||||
```bash
|
||||
adb install bazel-bin/mediapipe/examples/android/src/java/com/google/mediapipe/apps/hairsegmentationgpu/hairsegmentationgpu.apk
|
||||
```
|
||||
|
||||
|
@ -37,7 +29,7 @@ below and paste it into [MediaPipe Visualizer](https://viz.mediapipe.dev/).
|
|||
```bash
|
||||
# MediaPipe graph that performs hair segmentation with TensorFlow Lite on GPU.
|
||||
# Used in the example in
|
||||
# mediapipie/examples/ios/hairsegmentationgpu.
|
||||
# mediapipie/examples/android/src/java/com/mediapipe/apps/hairsegmentationgpu.
|
||||
|
||||
# Images on GPU coming into and out of the graph.
|
||||
input_stream: "input_video"
|
||||
|
@ -84,14 +76,11 @@ node: {
|
|||
}
|
||||
}
|
||||
|
||||
# Waits for a mask from the previous round of hair segmentation to be fed back
|
||||
# as an input, and caches it. Upon the arrival of an input image, it checks if
|
||||
# there is a mask cached, and sends out the mask with the timestamp replaced by
|
||||
# that of the input image. This is needed so that the "current image" and the
|
||||
# "previous mask" share the same timestamp, and as a result can be synchronized
|
||||
# and combined in the subsequent calculator. Note that upon the arrival of the
|
||||
# very first input frame, an empty packet is sent out to jump start the feedback
|
||||
# loop.
|
||||
# Caches a mask fed back from the previous round of hair segmentation, and upon
|
||||
# the arrival of the next input image sends out the cached mask with the
|
||||
# timestamp replaced by that of the input image, essentially generating a packet
|
||||
# that carries the previous mask. Note that upon the arrival of the very first
|
||||
# input image, an empty packet is sent out to jump start the feedback loop.
|
||||
node {
|
||||
calculator: "PreviousLoopbackCalculator"
|
||||
input_stream: "MAIN:throttled_input_video"
|
||||
|
@ -114,9 +103,9 @@ node {
|
|||
|
||||
# Converts the transformed input image on GPU into an image tensor stored in
|
||||
# tflite::gpu::GlBuffer. The zero_center option is set to false to normalize the
|
||||
# pixel values to [0.f, 1.f] as opposed to [-1.f, 1.f].
|
||||
# With the max_num_channels option set to 4, all 4 RGBA channels are contained
|
||||
# in the image tensor.
|
||||
# pixel values to [0.f, 1.f] as opposed to [-1.f, 1.f]. With the
|
||||
# max_num_channels option set to 4, all 4 RGBA channels are contained in the
|
||||
# image tensor.
|
||||
node {
|
||||
calculator: "TfLiteConverterCalculator"
|
||||
input_stream: "IMAGE_GPU:mask_embedded_input_video"
|
||||
|
@ -147,7 +136,7 @@ node {
|
|||
node {
|
||||
calculator: "TfLiteInferenceCalculator"
|
||||
input_stream: "TENSORS_GPU:image_tensor"
|
||||
output_stream: "TENSORS:segmentation_tensor"
|
||||
output_stream: "TENSORS_GPU:segmentation_tensor"
|
||||
input_side_packet: "CUSTOM_OP_RESOLVER:op_resolver"
|
||||
node_options: {
|
||||
[type.googleapis.com/mediapipe.TfLiteInferenceCalculatorOptions] {
|
||||
|
@ -157,23 +146,15 @@ node {
|
|||
}
|
||||
}
|
||||
|
||||
# The next step (tensors to segmentation) is not yet supported on iOS GPU.
|
||||
# Convert the previous segmentation mask to CPU for processing.
|
||||
node: {
|
||||
calculator: "GpuBufferToImageFrameCalculator"
|
||||
input_stream: "previous_hair_mask"
|
||||
output_stream: "previous_hair_mask_cpu"
|
||||
}
|
||||
|
||||
# Decodes the segmentation tensor generated by the TensorFlow Lite model into a
|
||||
# mask of values in [0.f, 1.f], stored in the R channel of a CPU buffer. It also
|
||||
# mask of values in [0.f, 1.f], stored in the R channel of a GPU buffer. It also
|
||||
# takes the mask generated previously as another input to improve the temporal
|
||||
# consistency.
|
||||
node {
|
||||
calculator: "TfLiteTensorsToSegmentationCalculator"
|
||||
input_stream: "TENSORS:segmentation_tensor"
|
||||
input_stream: "PREV_MASK:previous_hair_mask_cpu"
|
||||
output_stream: "MASK:hair_mask_cpu"
|
||||
input_stream: "TENSORS_GPU:segmentation_tensor"
|
||||
input_stream: "PREV_MASK_GPU:previous_hair_mask"
|
||||
output_stream: "MASK_GPU:hair_mask"
|
||||
node_options: {
|
||||
[type.googleapis.com/mediapipe.TfLiteTensorsToSegmentationCalculatorOptions] {
|
||||
tensor_width: 512
|
||||
|
@ -185,13 +166,6 @@ node {
|
|||
}
|
||||
}
|
||||
|
||||
# Send the current segmentation mask to GPU for the last step, blending.
|
||||
node: {
|
||||
calculator: "ImageFrameToGpuBufferCalculator"
|
||||
input_stream: "hair_mask_cpu"
|
||||
output_stream: "hair_mask"
|
||||
}
|
||||
|
||||
# Colors the hair segmentation with the color specified in the option.
|
||||
node {
|
||||
calculator: "RecolorCalculator"
|
||||
|
|
|
@ -20,18 +20,18 @@ confidence score to generate the hand rectangle, to be further utilized in the
|
|||
|
||||
## Android
|
||||
|
||||
Please see [Hello World! in MediaPipe on Android](hello_world_android.md) for
|
||||
general instructions to develop an Android application that uses MediaPipe.
|
||||
[Source](https://github.com/google/mediapipe/tree/master/mediapipe/examples/android/src/java/com/google/mediapipe/apps/handdetectiongpu)
|
||||
|
||||
The graph below is used in the
|
||||
[Hand Detection GPU Android example app](https://github.com/google/mediapipe/tree/master/mediapipe/examples/android/src/java/com/google/mediapipe/apps/handdetectiongpu).
|
||||
To build the app, run:
|
||||
An arm64 APK can be
|
||||
[downloaded here](https://drive.google.com/open?id=1qUlTtH7Ydg-wl_H6VVL8vueu2UCTu37E).
|
||||
|
||||
To build the app yourself:
|
||||
|
||||
```bash
|
||||
bazel build -c opt --config=android_arm64 mediapipe/examples/android/src/java/com/google/mediapipe/apps/handdetectiongpu
|
||||
```
|
||||
|
||||
To further install the app on an Android device, run:
|
||||
Once the app is built, install it on Android device with:
|
||||
|
||||
```bash
|
||||
adb install bazel-bin/mediapipe/examples/android/src/java/com/google/mediapipe/apps/handdetectiongpu/handdetectiongpu.apk
|
||||
|
@ -39,14 +39,13 @@ adb install bazel-bin/mediapipe/examples/android/src/java/com/google/mediapipe/a
|
|||
|
||||
## iOS
|
||||
|
||||
Please see [Hello World! in MediaPipe on iOS](hello_world_ios.md) for general
|
||||
instructions to develop an iOS application that uses MediaPipe.
|
||||
[Source](https://github.com/google/mediapipe/tree/master/mediapipe/examples/ios/handdetectiongpu).
|
||||
|
||||
The graph below is used in the
|
||||
[Hand Detection GPU iOS example app](https://github.com/google/mediapipe/tree/master/mediapipe/examples/ios/handdetectiongpu).
|
||||
To build the app, please see the general
|
||||
[MediaPipe iOS app building and setup instructions](./mediapipe_ios_setup.md).
|
||||
Specific to this example, run:
|
||||
See the general [instructions](./mediapipe_ios_setup.md) for building iOS
|
||||
examples and generating an Xcode project. This will be the HandDetectionGpuApp
|
||||
target.
|
||||
|
||||
To build on the command line:
|
||||
|
||||
```bash
|
||||
bazel build -c opt --config=ios_arm64 mediapipe/examples/ios/handdetectiongpu:HandDetectionGpuApp
|
||||
|
@ -70,14 +69,24 @@ Visualizing Subgraphs section in the
|
|||
|
||||
```bash
|
||||
# MediaPipe graph that performs hand detection with TensorFlow Lite on GPU.
|
||||
# Used in the example in
|
||||
# mediapipie/examples/android/src/java/com/mediapipe/apps/handdetectiongpu.
|
||||
# Used in the examples in
|
||||
# mediapipie/examples/android/src/java/com/mediapipe/apps/handdetectiongpu and
|
||||
# mediapipie/examples/ios/handdetectiongpu.
|
||||
|
||||
# Images coming into and out of the graph.
|
||||
input_stream: "input_video"
|
||||
output_stream: "output_video"
|
||||
|
||||
# Throttles the images flowing downstream for flow control. It passes through
|
||||
# the very first incoming image unaltered, and waits for HandDetectionSubgraph
|
||||
# downstream in the graph to finish its tasks before it passes through another
|
||||
# image. All images that come in while waiting are dropped, limiting the number
|
||||
# of in-flight images in HandDetectionSubgraph to 1. This prevents the nodes in
|
||||
# HandDetectionSubgraph from queuing up incoming images and data excessively,
|
||||
# which leads to increased latency and memory usage, unwanted in real-time
|
||||
# mobile applications. It also eliminates unnecessarily computation, e.g., the
|
||||
# output produced by a node in the subgraph may get dropped downstream if the
|
||||
# subsequent nodes are still busy processing previous inputs.
|
||||
node {
|
||||
calculator: "FlowLimiterCalculator"
|
||||
input_stream: "input_video"
|
||||
|
@ -89,6 +98,7 @@ node {
|
|||
output_stream: "throttled_input_video"
|
||||
}
|
||||
|
||||
# Subgraph that detections hands (see hand_detection_gpu.pbtxt).
|
||||
node {
|
||||
calculator: "HandDetectionSubgraph"
|
||||
input_stream: "throttled_input_video"
|
||||
|
@ -123,7 +133,7 @@ node {
|
|||
}
|
||||
}
|
||||
|
||||
# Draws annotations and overlays them on top of the input image into the graph.
|
||||
# Draws annotations and overlays them on top of the input images.
|
||||
node {
|
||||
calculator: "AnnotationOverlayCalculator"
|
||||
input_stream: "INPUT_FRAME_GPU:throttled_input_video"
|
||||
|
@ -271,8 +281,8 @@ node {
|
|||
}
|
||||
}
|
||||
|
||||
# Maps detection label IDs to the corresponding label text. The label map is
|
||||
# provided in the label_map_path option.
|
||||
# Maps detection label IDs to the corresponding label text ("Palm"). The label
|
||||
# map is provided in the label_map_path option.
|
||||
node {
|
||||
calculator: "DetectionLabelIdToTextCalculator"
|
||||
input_stream: "filtered_detections"
|
||||
|
|
|
@ -22,8 +22,8 @@ performed only within the hand rectangle for computational efficiency and
|
|||
accuracy, and hand detection is only invoked when landmark localization could
|
||||
not identify hand presence in the previous iteration.
|
||||
|
||||
The example also comes with an experimental mode that localizes hand landmarks
|
||||
in 3D (i.e., estimating an extra z coordinate):
|
||||
The example can also run in a mode that localizes hand landmarks in 3D (i.e.,
|
||||
estimating an extra z coordinate):
|
||||
|
||||
![hand_tracking_3d_android_gpu.gif](images/mobile/hand_tracking_3d_android_gpu.gif)
|
||||
|
||||
|
@ -33,24 +33,26 @@ camera.
|
|||
|
||||
## Android
|
||||
|
||||
Please see [Hello World! in MediaPipe on Android](hello_world_android.md) for
|
||||
general instructions to develop an Android application that uses MediaPipe.
|
||||
[Source](https://github.com/google/mediapipe/tree/master/mediapipe/examples/android/src/java/com/google/mediapipe/apps/handtrackinggpu)
|
||||
|
||||
The graph below is used in the
|
||||
[Hand Tracking GPU Android example app](https://github.com/google/mediapipe/tree/master/mediapipe/examples/android/src/java/com/google/mediapipe/apps/handtrackinggpu).
|
||||
To build the app, run:
|
||||
An arm64 APK can be
|
||||
[downloaded here](https://drive.google.com/open?id=1uCjS0y0O0dTDItsMh8x2cf4-l3uHW1vE),
|
||||
and a version running the 3D mode can be
|
||||
[downloaded here](https://drive.google.com/open?id=1tGgzOGkcZglJO2i7e8NKSxJgVtJYS3ka).
|
||||
|
||||
To build the app yourself, run:
|
||||
|
||||
```bash
|
||||
bazel build -c opt --config=android_arm64 mediapipe/examples/android/src/java/com/google/mediapipe/apps/handtrackinggpu
|
||||
```
|
||||
|
||||
To build for the experimental mode that localizes hand landmarks in 3D, run:
|
||||
To build for the 3D mode, run:
|
||||
|
||||
```bash
|
||||
bazel build -c opt --config=android_arm64 --define 3D=true mediapipe/examples/android/src/java/com/google/mediapipe/apps/handtrackinggpu
|
||||
```
|
||||
|
||||
To further install the app on an Android device, run:
|
||||
Once the app is built, install it on Android device with:
|
||||
|
||||
```bash
|
||||
adb install bazel-bin/mediapipe/examples/android/src/java/com/google/mediapipe/apps/handtrackinggpu/handtrackinggpu.apk
|
||||
|
@ -58,20 +60,19 @@ adb install bazel-bin/mediapipe/examples/android/src/java/com/google/mediapipe/a
|
|||
|
||||
## iOS
|
||||
|
||||
Please see [Hello World! in MediaPipe on iOS](hello_world_ios.md) for general
|
||||
instructions to develop an iOS application that uses MediaPipe.
|
||||
[Source](https://github.com/google/mediapipe/tree/master/mediapipe/examples/ios/handtrackinggpu).
|
||||
|
||||
The graph below is used in the
|
||||
[Hand Tracking GPU iOS example app](https://github.com/google/mediapipe/tree/master/mediapipe/examples/ios/handtrackinggpu).
|
||||
To build the app, please see the general
|
||||
[MediaPipe iOS app building and setup instructions](./mediapipe_ios_setup.md).
|
||||
Specific to this example, run:
|
||||
See the general [instructions](./mediapipe_ios_setup.md) for building iOS
|
||||
examples and generating an Xcode project. This will be the HandDetectionGpuApp
|
||||
target.
|
||||
|
||||
To build on the command line:
|
||||
|
||||
```bash
|
||||
bazel build -c opt --config=ios_arm64 mediapipe/examples/ios/handtrackinggpu:HandTrackingGpuApp
|
||||
```
|
||||
|
||||
To build for the experimental mode that localizes hand landmarks in 3D, run:
|
||||
To build for the 3D mode, run:
|
||||
|
||||
```bash
|
||||
bazel build -c opt --config=ios_arm64 --define 3D=true mediapipe/examples/ios/handtrackinggpu:HandTrackingGpuApp
|
||||
|
@ -98,13 +99,24 @@ see the Visualizing Subgraphs section in the
|
|||
|
||||
```bash
|
||||
# MediaPipe graph that performs hand tracking with TensorFlow Lite on GPU.
|
||||
# Used in the example in
|
||||
# mediapipie/examples/android/src/java/com/mediapipe/apps/handtrackinggpu.
|
||||
# Used in the examples in
|
||||
# mediapipie/examples/android/src/java/com/mediapipe/apps/handtrackinggpu and
|
||||
# mediapipie/examples/ios/handtrackinggpu.
|
||||
|
||||
# Images coming into and out of the graph.
|
||||
input_stream: "input_video"
|
||||
output_stream: "output_video"
|
||||
|
||||
# Throttles the images flowing downstream for flow control. It passes through
|
||||
# the very first incoming image unaltered, and waits for downstream nodes
|
||||
# (calculators and subgraphs) in the graph to finish their tasks before it
|
||||
# passes through another image. All images that come in while waiting are
|
||||
# dropped, limiting the number of in-flight images in most part of the graph to
|
||||
# 1. This prevents the downstream nodes from queuing up incoming images and data
|
||||
# excessively, which leads to increased latency and memory usage, unwanted in
|
||||
# real-time mobile applications. It also eliminates unnecessarily computation,
|
||||
# e.g., the output produced by a node may get dropped downstream if the
|
||||
# subsequent nodes are still busy processing previous inputs.
|
||||
node {
|
||||
calculator: "FlowLimiterCalculator"
|
||||
input_stream: "input_video"
|
||||
|
@ -116,6 +128,12 @@ node {
|
|||
output_stream: "throttled_input_video"
|
||||
}
|
||||
|
||||
# Caches a hand-presence decision fed back from HandLandmarkSubgraph, and upon
|
||||
# the arrival of the next input image sends out the cached decision with the
|
||||
# timestamp replaced by that of the input image, essentially generating a packet
|
||||
# that carries the previous hand-presence decision. Note that upon the arrival
|
||||
# of the very first input image, an empty packet is sent out to jump start the
|
||||
# feedback loop.
|
||||
node {
|
||||
calculator: "PreviousLoopbackCalculator"
|
||||
input_stream: "MAIN:throttled_input_video"
|
||||
|
@ -127,6 +145,9 @@ node {
|
|||
output_stream: "PREV_LOOP:prev_hand_presence"
|
||||
}
|
||||
|
||||
# Drops the incoming image if HandLandmarkSubgraph was able to identify hand
|
||||
# presence in the previous image. Otherwise, passes the incoming image through
|
||||
# to trigger a new round of hand detection in HandDetectionSubgraph.
|
||||
node {
|
||||
calculator: "GateCalculator"
|
||||
input_stream: "throttled_input_video"
|
||||
|
@ -140,6 +161,7 @@ node {
|
|||
}
|
||||
}
|
||||
|
||||
# Subgraph that detections hands (see hand_detection_gpu.pbtxt).
|
||||
node {
|
||||
calculator: "HandDetectionSubgraph"
|
||||
input_stream: "hand_detection_input_video"
|
||||
|
@ -147,6 +169,7 @@ node {
|
|||
output_stream: "NORM_RECT:hand_rect_from_palm_detections"
|
||||
}
|
||||
|
||||
# Subgraph that localizes hand landmarks (see hand_landmark_gpu.pbtxt).
|
||||
node {
|
||||
calculator: "HandLandmarkSubgraph"
|
||||
input_stream: "IMAGE:throttled_input_video"
|
||||
|
@ -156,6 +179,12 @@ node {
|
|||
output_stream: "PRESENCE:hand_presence"
|
||||
}
|
||||
|
||||
# Caches a hand rectangle fed back from HandLandmarkSubgraph, and upon the
|
||||
# arrival of the next input image sends out the cached rectangle with the
|
||||
# timestamp replaced by that of the input image, essentially generating a packet
|
||||
# that carries the previous hand rectangle. Note that upon the arrival of the
|
||||
# very first input image, an empty packet is sent out to jump start the
|
||||
# feedback loop.
|
||||
node {
|
||||
calculator: "PreviousLoopbackCalculator"
|
||||
input_stream: "MAIN:throttled_input_video"
|
||||
|
@ -167,6 +196,14 @@ node {
|
|||
output_stream: "PREV_LOOP:prev_hand_rect_from_landmarks"
|
||||
}
|
||||
|
||||
# Merges a stream of hand rectangles generated by HandDetectionSubgraph and that
|
||||
# generated by HandLandmarkSubgraph into a single output stream by selecting
|
||||
# between one of the two streams. The formal is selected if the incoming packet
|
||||
# is not empty, i.e., hand detection is performed on the current image by
|
||||
# HandDetectionSubgraph (because HandLandmarkSubgraph could not identify hand
|
||||
# presence in the previous image). Otherwise, the latter is selected, which is
|
||||
# never empty because HandLandmarkSubgraphs processes all images (that went
|
||||
# through FlowLimiterCaculator).
|
||||
node {
|
||||
calculator: "MergeCalculator"
|
||||
input_stream: "hand_rect_from_palm_detections"
|
||||
|
@ -174,6 +211,8 @@ node {
|
|||
output_stream: "hand_rect"
|
||||
}
|
||||
|
||||
# Subgraph that renders annotations and overlays them on top of the input
|
||||
# images (see renderer_gpu.pbtxt).
|
||||
node {
|
||||
calculator: "RendererSubgraph"
|
||||
input_stream: "IMAGE:throttled_input_video"
|
||||
|
@ -322,8 +361,8 @@ node {
|
|||
}
|
||||
}
|
||||
|
||||
# Maps detection label IDs to the corresponding label text. The label map is
|
||||
# provided in the label_map_path option.
|
||||
# Maps detection label IDs to the corresponding label text ("Palm"). The label
|
||||
# map is provided in the label_map_path option.
|
||||
node {
|
||||
calculator: "DetectionLabelIdToTextCalculator"
|
||||
input_stream: "filtered_detections"
|
||||
|
@ -655,7 +694,7 @@ node {
|
|||
landmark_connections: 20
|
||||
landmark_color { r: 255 g: 0 b: 0 }
|
||||
connection_color { r: 0 g: 255 b: 0 }
|
||||
thickness: 5.0
|
||||
thickness: 4.0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -302,7 +302,7 @@ initialize the `_renderer` object:
|
|||
_renderer = [[MPPLayerRenderer alloc] init];
|
||||
_renderer.layer.frame = _liveView.layer.bounds;
|
||||
[_liveView.layer addSublayer:_renderer.layer];
|
||||
_renderer.frameScaleMode = MediaPipeFrameScaleFillAndCrop;
|
||||
_renderer.frameScaleMode = MPPFrameScaleModeFillAndCrop;
|
||||
```
|
||||
|
||||
To get frames from the camera, we will implement the following method:
|
||||
|
@ -444,7 +444,7 @@ using the following function:
|
|||
|
||||
// Create MediaPipe graph with mediapipe::CalculatorGraphConfig proto object.
|
||||
MPPGraph* newGraph = [[MPPGraph alloc] initWithGraphConfig:config];
|
||||
[newGraph addFrameOutputStream:kOutputStream outputPacketType:MediaPipePacketPixelBuffer];
|
||||
[newGraph addFrameOutputStream:kOutputStream outputPacketType:MPPPacketTypePixelBuffer];
|
||||
return newGraph;
|
||||
}
|
||||
```
|
||||
|
@ -508,12 +508,12 @@ this function's implementation to do the following:
|
|||
}
|
||||
[self.mediapipeGraph sendPixelBuffer:imageBuffer
|
||||
intoStream:kInputStream
|
||||
packetType:MediaPipePacketPixelBuffer];
|
||||
packetType:MPPPacketTypePixelBuffer];
|
||||
}
|
||||
```
|
||||
|
||||
We send the `imageBuffer` to `self.mediapipeGraph` as a packet of type
|
||||
`MediaPipePacketPixelBuffer` into the input stream `kInputStream`, i.e.
|
||||
`MPPPacketTypePixelBuffer` into the input stream `kInputStream`, i.e.
|
||||
"input_video".
|
||||
|
||||
The graph will run with this input packet and output a result in
|
||||
|
|
|
@ -28,14 +28,31 @@
|
|||
ln -s ~/Downloads/MyProvisioningProfile.mobileprovision mediapipe/provisioning_profile.mobileprovision
|
||||
```
|
||||
|
||||
Tip: You can use this command to see the provisioning profiles you have
|
||||
previously downloaded using Xcode: `open ~/Library/MobileDevice/"Provisioning Profiles"`.
|
||||
If there are none, generate and download a profile on [Apple's developer site](https://developer.apple.com/account/resources/).
|
||||
|
||||
## Creating an Xcode project
|
||||
|
||||
Note: This workflow requires a separate tool in addition to Bazel. If it fails
|
||||
to work for any reason, you can always use the command-line build instructions
|
||||
in the next section.
|
||||
|
||||
1. We will use a tool called [Tulsi](https://tulsi.bazel.build/) for generating Xcode projects from Bazel
|
||||
build configurations.
|
||||
|
||||
IMPORTANT: At the time of this writing, Tulsi has a small [issue](https://github.com/bazelbuild/tulsi/issues/98)
|
||||
that keeps it from building with Xcode 10.3. The instructions below apply a
|
||||
fix from a [pull request](https://github.com/bazelbuild/tulsi/pull/99).
|
||||
|
||||
```bash
|
||||
# cd out of the mediapipe directory, then:
|
||||
git clone https://github.com/bazelbuild/tulsi.git
|
||||
cd tulsi
|
||||
# Apply the fix for Xcode 10.3 compatibility:
|
||||
git fetch origin pull/99/head:xcodefix
|
||||
git checkout xcodefix
|
||||
# Now we can build Tulsi.
|
||||
sh build_and_run.sh
|
||||
```
|
||||
|
||||
|
@ -51,12 +68,21 @@
|
|||
4. You can now select any of the MediaPipe demos in the target menu, and build
|
||||
and run them as normal.
|
||||
|
||||
Note: When you ask Xcode to run an app, by default it will use the Debug
|
||||
configuration. Some of our demos are computationally heavy; you may want to use
|
||||
the Release configuration for better performance.
|
||||
|
||||
Tip: To switch build configuration in Xcode, click on the target menu, choose
|
||||
"Edit Scheme...", select the Run action, and switch the Build Configuration from
|
||||
Debug to Release. Note that this is set independently for each target.
|
||||
|
||||
## Building an iOS app from the command line
|
||||
|
||||
1. Build one of the example apps for iOS. We will be using the
|
||||
[Face Detection GPU App example](./face_detection_mobile_gpu.md)
|
||||
|
||||
```bash
|
||||
cd mediapipe
|
||||
bazel build --config=ios_arm64 mediapipe/examples/ios/facedetectiongpu:FaceDetectionGpuApp
|
||||
```
|
||||
|
||||
|
|
|
@ -16,33 +16,24 @@ CPU.
|
|||
|
||||
## Android
|
||||
|
||||
Please see [Hello World! in MediaPipe on Android](hello_world_android.md) for
|
||||
general instructions to develop an Android application that uses MediaPipe.
|
||||
[Source](https://github.com/google/mediapipe/tree/master/mediapipe/examples/android/src/java/com/google/mediapipe/apps/objectdetectioncpu)
|
||||
|
||||
The graph below is used in the
|
||||
[Object Detection CPU Android example app](https://github.com/google/mediapipe/tree/master/mediapipe/examples/android/src/java/com/google/mediapipe/apps/objectdetectioncpu).
|
||||
To build the app, run:
|
||||
To build and install the app:
|
||||
|
||||
```bash
|
||||
bazel build -c opt --config=android_arm64 mediapipe/examples/android/src/java/com/google/mediapipe/apps/objectdetectioncpu
|
||||
```
|
||||
|
||||
To further install the app on an Android device, run:
|
||||
|
||||
```bash
|
||||
adb install bazel-bin/mediapipe/examples/android/src/java/com/google/mediapipe/apps/objectdetectioncpu/objectdetectioncpu.apk
|
||||
```
|
||||
|
||||
## iOS
|
||||
|
||||
Please see [Hello World! in MediaPipe on iOS](hello_world_ios.md) for general
|
||||
instructions to develop an iOS application that uses MediaPipe.
|
||||
[Source](https://github.com/google/mediapipe/tree/master/mediapipe/examples/ios/handdetectiongpu).
|
||||
|
||||
The graph below is used in the
|
||||
[Object Detection GPU iOS example app](https://github.com/google/mediapipe/tree/master/mediapipe/examples/ios/objectdetectioncpu).
|
||||
To build the app, please see the general
|
||||
[MediaPipe iOS app building and setup instructions](./mediapipe_ios_setup.md).
|
||||
Specific to this example, run:
|
||||
See the general [instructions](./mediapipe_ios_setup.md) for building iOS
|
||||
examples and generating an Xcode project. This will be the ObjectDetectionCpuApp
|
||||
target.
|
||||
|
||||
To build on the command line:
|
||||
|
||||
```bash
|
||||
bazel build -c opt --config=ios_arm64 mediapipe/examples/ios/objectdetectioncpu:ObjectDetectionCpuApp
|
||||
|
@ -59,7 +50,7 @@ below and paste it into [MediaPipe Visualizer](https://viz.mediapipe.dev/).
|
|||
|
||||
```bash
|
||||
# MediaPipe graph that performs object detection with TensorFlow Lite on CPU.
|
||||
# Used in the example in
|
||||
# Used in the examples in
|
||||
# mediapipie/examples/android/src/java/com/mediapipe/apps/objectdetectioncpu and
|
||||
# mediapipie/examples/ios/objectdetectioncpu.
|
||||
|
||||
|
@ -236,9 +227,7 @@ node {
|
|||
}
|
||||
}
|
||||
|
||||
# Draws annotations and overlays them on top of the CPU copy of the original
|
||||
# image coming into the graph. The calculator assumes that image origin is
|
||||
# always at the top-left corner and renders text accordingly.
|
||||
# Draws annotations and overlays them on top of the input images.
|
||||
node {
|
||||
calculator: "AnnotationOverlayCalculator"
|
||||
input_stream: "INPUT_FRAME:throttled_input_video_cpu"
|
||||
|
|
|
@ -8,33 +8,24 @@ that performs object detection with TensorFlow Lite on GPU.
|
|||
|
||||
## Android
|
||||
|
||||
Please see [Hello World! in MediaPipe on Android](hello_world_android.md) for
|
||||
general instructions to develop an Android application that uses MediaPipe.
|
||||
[Source](https://github.com/google/mediapipe/tree/master/mediapipe/examples/android/src/java/com/google/mediapipe/apps/objectdetectiongpu)
|
||||
|
||||
The graph below is used in the
|
||||
[Object Detection GPU Android example app](https://github.com/google/mediapipe/tree/master/mediapipe/examples/android/src/java/com/google/mediapipe/apps/objectdetectiongpu).
|
||||
To build the app, run:
|
||||
To build and install the app:
|
||||
|
||||
```bash
|
||||
bazel build -c opt --config=android_arm64 mediapipe/examples/android/src/java/com/google/mediapipe/apps/objectdetectiongpu
|
||||
```
|
||||
|
||||
To further install the app on an Android device, run:
|
||||
|
||||
```bash
|
||||
adb install bazel-bin/mediapipe/examples/android/src/java/com/google/mediapipe/apps/objectdetectiongpu/objectdetectiongpu.apk
|
||||
```
|
||||
|
||||
## iOS
|
||||
|
||||
Please see [Hello World! in MediaPipe on iOS](hello_world_ios.md) for general
|
||||
instructions to develop an iOS application that uses MediaPipe.
|
||||
[Source](https://github.com/google/mediapipe/tree/master/mediapipe/examples/ios/objectdetectiongpu).
|
||||
|
||||
The graph below is used in the
|
||||
[Object Detection GPU iOS example app](https://github.com/google/mediapipe/tree/master/mediapipe/examples/ios/objectdetectiongpu).
|
||||
To build the app, please see the general
|
||||
[MediaPipe iOS app building and setup instructions](./mediapipe_ios_setup.md).
|
||||
Specific to this example, run:
|
||||
See the general [instructions](./mediapipe_ios_setup.md) for building iOS
|
||||
examples and generating an Xcode project. This will be the ObjectDetectionGpuApp
|
||||
target.
|
||||
|
||||
To build on the command line:
|
||||
|
||||
```bash
|
||||
bazel build -c opt --config=ios_arm64 mediapipe/examples/ios/objectdetectiongpu:ObjectDetectionGpuApp
|
||||
|
@ -51,7 +42,7 @@ below and paste it into [MediaPipe Visualizer](https://viz.mediapipe.dev/).
|
|||
|
||||
```bash
|
||||
# MediaPipe graph that performs object detection with TensorFlow Lite on GPU.
|
||||
# Used in the example in
|
||||
# Used in the examples in
|
||||
# mediapipie/examples/android/src/java/com/mediapipe/apps/objectdetectiongpu and
|
||||
# mediapipie/examples/ios/objectdetectiongpu.
|
||||
|
||||
|
@ -218,9 +209,7 @@ node {
|
|||
}
|
||||
}
|
||||
|
||||
# Draws annotations and overlays them on top of a GPU copy of the original
|
||||
# image coming into the graph. The calculator assumes that image origin is
|
||||
# always at the top-left corner and renders text accordingly.
|
||||
# Draws annotations and overlays them on top of the input images.
|
||||
node {
|
||||
calculator: "AnnotationOverlayCalculator"
|
||||
input_stream: "INPUT_FRAME_GPU:throttled_input_video"
|
||||
|
|
|
@ -79,7 +79,7 @@ static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue";
|
|||
|
||||
// Create MediaPipe graph with mediapipe::CalculatorGraphConfig proto object.
|
||||
MPPGraph* newGraph = [[MPPGraph alloc] initWithGraphConfig:config];
|
||||
[newGraph addFrameOutputStream:kOutputStream outputPacketType:MediaPipePacketPixelBuffer];
|
||||
[newGraph addFrameOutputStream:kOutputStream outputPacketType:MPPPacketTypePixelBuffer];
|
||||
return newGraph;
|
||||
}
|
||||
|
||||
|
@ -91,7 +91,7 @@ static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue";
|
|||
_renderer = [[MPPLayerRenderer alloc] init];
|
||||
_renderer.layer.frame = _liveView.layer.bounds;
|
||||
[_liveView.layer addSublayer:_renderer.layer];
|
||||
_renderer.frameScaleMode = MediaPipeFrameScaleFillAndCrop;
|
||||
_renderer.frameScaleMode = MPPFrameScaleModeFillAndCrop;
|
||||
|
||||
dispatch_queue_attr_t qosAttribute = dispatch_queue_attr_make_with_qos_class(
|
||||
DISPATCH_QUEUE_SERIAL, QOS_CLASS_USER_INTERACTIVE, /*relative_priority=*/0);
|
||||
|
@ -170,7 +170,7 @@ static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue";
|
|||
}
|
||||
[self.mediapipeGraph sendPixelBuffer:imageBuffer
|
||||
intoStream:kInputStream
|
||||
packetType:MediaPipePacketPixelBuffer];
|
||||
packetType:MPPPacketTypePixelBuffer];
|
||||
}
|
||||
|
||||
@end
|
||||
|
|
|
@ -79,7 +79,7 @@ static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue";
|
|||
|
||||
// Create MediaPipe graph with mediapipe::CalculatorGraphConfig proto object.
|
||||
MPPGraph* newGraph = [[MPPGraph alloc] initWithGraphConfig:config];
|
||||
[newGraph addFrameOutputStream:kOutputStream outputPacketType:MediaPipePacketPixelBuffer];
|
||||
[newGraph addFrameOutputStream:kOutputStream outputPacketType:MPPPacketTypePixelBuffer];
|
||||
return newGraph;
|
||||
}
|
||||
|
||||
|
@ -91,7 +91,7 @@ static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue";
|
|||
_renderer = [[MPPLayerRenderer alloc] init];
|
||||
_renderer.layer.frame = _liveView.layer.bounds;
|
||||
[_liveView.layer addSublayer:_renderer.layer];
|
||||
_renderer.frameScaleMode = MediaPipeFrameScaleFillAndCrop;
|
||||
_renderer.frameScaleMode = MPPFrameScaleModeFillAndCrop;
|
||||
// When using the front camera, mirror the input for a more natural look.
|
||||
_renderer.mirrored = YES;
|
||||
|
||||
|
@ -172,7 +172,7 @@ static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue";
|
|||
}
|
||||
[self.mediapipeGraph sendPixelBuffer:imageBuffer
|
||||
intoStream:kInputStream
|
||||
packetType:MediaPipePacketPixelBuffer];
|
||||
packetType:MPPPacketTypePixelBuffer];
|
||||
}
|
||||
|
||||
@end
|
||||
|
|
|
@ -79,7 +79,7 @@ static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue";
|
|||
|
||||
// Create MediaPipe graph with mediapipe::CalculatorGraphConfig proto object.
|
||||
MPPGraph* newGraph = [[MPPGraph alloc] initWithGraphConfig:config];
|
||||
[newGraph addFrameOutputStream:kOutputStream outputPacketType:MediaPipePacketPixelBuffer];
|
||||
[newGraph addFrameOutputStream:kOutputStream outputPacketType:MPPPacketTypePixelBuffer];
|
||||
return newGraph;
|
||||
}
|
||||
|
||||
|
@ -91,7 +91,7 @@ static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue";
|
|||
_renderer = [[MPPLayerRenderer alloc] init];
|
||||
_renderer.layer.frame = _liveView.layer.bounds;
|
||||
[_liveView.layer addSublayer:_renderer.layer];
|
||||
_renderer.frameScaleMode = MediaPipeFrameScaleFillAndCrop;
|
||||
_renderer.frameScaleMode = MPPFrameScaleModeFillAndCrop;
|
||||
// When using the front camera, mirror the input for a more natural look.
|
||||
_renderer.mirrored = YES;
|
||||
|
||||
|
@ -172,7 +172,7 @@ static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue";
|
|||
}
|
||||
[self.mediapipeGraph sendPixelBuffer:imageBuffer
|
||||
intoStream:kInputStream
|
||||
packetType:MediaPipePacketPixelBuffer];
|
||||
packetType:MPPPacketTypePixelBuffer];
|
||||
}
|
||||
|
||||
@end
|
||||
|
|
|
@ -79,7 +79,7 @@ static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue";
|
|||
|
||||
// Create MediaPipe graph with mediapipe::CalculatorGraphConfig proto object.
|
||||
MPPGraph* newGraph = [[MPPGraph alloc] initWithGraphConfig:config];
|
||||
[newGraph addFrameOutputStream:kOutputStream outputPacketType:MediaPipePacketPixelBuffer];
|
||||
[newGraph addFrameOutputStream:kOutputStream outputPacketType:MPPPacketTypePixelBuffer];
|
||||
return newGraph;
|
||||
}
|
||||
|
||||
|
@ -91,7 +91,7 @@ static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue";
|
|||
_renderer = [[MPPLayerRenderer alloc] init];
|
||||
_renderer.layer.frame = _liveView.layer.bounds;
|
||||
[_liveView.layer addSublayer:_renderer.layer];
|
||||
_renderer.frameScaleMode = MediaPipeFrameScaleFillAndCrop;
|
||||
_renderer.frameScaleMode = MPPFrameScaleModeFillAndCrop;
|
||||
// When using the front camera, mirror the input for a more natural look.
|
||||
_renderer.mirrored = YES;
|
||||
|
||||
|
@ -172,7 +172,7 @@ static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue";
|
|||
}
|
||||
[self.mediapipeGraph sendPixelBuffer:imageBuffer
|
||||
intoStream:kInputStream
|
||||
packetType:MediaPipePacketPixelBuffer];
|
||||
packetType:MPPPacketTypePixelBuffer];
|
||||
}
|
||||
|
||||
@end
|
||||
|
|
|
@ -79,7 +79,7 @@ static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue";
|
|||
|
||||
// Create MediaPipe graph with mediapipe::CalculatorGraphConfig proto object.
|
||||
MPPGraph* newGraph = [[MPPGraph alloc] initWithGraphConfig:config];
|
||||
[newGraph addFrameOutputStream:kOutputStream outputPacketType:MediaPipePacketPixelBuffer];
|
||||
[newGraph addFrameOutputStream:kOutputStream outputPacketType:MPPPacketTypePixelBuffer];
|
||||
return newGraph;
|
||||
}
|
||||
|
||||
|
@ -91,7 +91,7 @@ static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue";
|
|||
_renderer = [[MPPLayerRenderer alloc] init];
|
||||
_renderer.layer.frame = _liveView.layer.bounds;
|
||||
[_liveView.layer addSublayer:_renderer.layer];
|
||||
_renderer.frameScaleMode = MediaPipeFrameScaleFillAndCrop;
|
||||
_renderer.frameScaleMode = MPPFrameScaleModeFillAndCrop;
|
||||
// When using the front camera, mirror the input for a more natural look.
|
||||
_renderer.mirrored = YES;
|
||||
|
||||
|
@ -172,7 +172,7 @@ static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue";
|
|||
}
|
||||
[self.mediapipeGraph sendPixelBuffer:imageBuffer
|
||||
intoStream:kInputStream
|
||||
packetType:MediaPipePacketPixelBuffer];
|
||||
packetType:MPPPacketTypePixelBuffer];
|
||||
}
|
||||
|
||||
@end
|
||||
|
|
|
@ -79,7 +79,7 @@ static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue";
|
|||
|
||||
// Create MediaPipe graph with mediapipe::CalculatorGraphConfig proto object.
|
||||
MPPGraph* newGraph = [[MPPGraph alloc] initWithGraphConfig:config];
|
||||
[newGraph addFrameOutputStream:kOutputStream outputPacketType:MediaPipePacketPixelBuffer];
|
||||
[newGraph addFrameOutputStream:kOutputStream outputPacketType:MPPPacketTypePixelBuffer];
|
||||
return newGraph;
|
||||
}
|
||||
|
||||
|
@ -91,7 +91,7 @@ static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue";
|
|||
_renderer = [[MPPLayerRenderer alloc] init];
|
||||
_renderer.layer.frame = _liveView.layer.bounds;
|
||||
[_liveView.layer addSublayer:_renderer.layer];
|
||||
_renderer.frameScaleMode = MediaPipeFrameScaleFillAndCrop;
|
||||
_renderer.frameScaleMode = MPPFrameScaleModeFillAndCrop;
|
||||
|
||||
dispatch_queue_attr_t qosAttribute = dispatch_queue_attr_make_with_qos_class(
|
||||
DISPATCH_QUEUE_SERIAL, QOS_CLASS_USER_INTERACTIVE, /*relative_priority=*/0);
|
||||
|
@ -170,7 +170,7 @@ static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue";
|
|||
}
|
||||
[self.mediapipeGraph sendPixelBuffer:imageBuffer
|
||||
intoStream:kInputStream
|
||||
packetType:MediaPipePacketPixelBuffer];
|
||||
packetType:MPPPacketTypePixelBuffer];
|
||||
}
|
||||
|
||||
@end
|
||||
|
|
|
@ -79,7 +79,7 @@ static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue";
|
|||
|
||||
// Create MediaPipe graph with mediapipe::CalculatorGraphConfig proto object.
|
||||
MPPGraph* newGraph = [[MPPGraph alloc] initWithGraphConfig:config];
|
||||
[newGraph addFrameOutputStream:kOutputStream outputPacketType:MediaPipePacketPixelBuffer];
|
||||
[newGraph addFrameOutputStream:kOutputStream outputPacketType:MPPPacketTypePixelBuffer];
|
||||
return newGraph;
|
||||
}
|
||||
|
||||
|
@ -91,7 +91,7 @@ static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue";
|
|||
_renderer = [[MPPLayerRenderer alloc] init];
|
||||
_renderer.layer.frame = _liveView.layer.bounds;
|
||||
[_liveView.layer addSublayer:_renderer.layer];
|
||||
_renderer.frameScaleMode = MediaPipeFrameScaleFillAndCrop;
|
||||
_renderer.frameScaleMode = MPPFrameScaleModeFillAndCrop;
|
||||
|
||||
dispatch_queue_attr_t qosAttribute = dispatch_queue_attr_make_with_qos_class(
|
||||
DISPATCH_QUEUE_SERIAL, QOS_CLASS_USER_INTERACTIVE, /*relative_priority=*/0);
|
||||
|
@ -170,7 +170,7 @@ static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue";
|
|||
}
|
||||
[self.mediapipeGraph sendPixelBuffer:imageBuffer
|
||||
intoStream:kInputStream
|
||||
packetType:MediaPipePacketPixelBuffer];
|
||||
packetType:MPPPacketTypePixelBuffer];
|
||||
}
|
||||
|
||||
@end
|
||||
|
|
|
@ -50,7 +50,7 @@ static const char* kOutputStream = "counter";
|
|||
profilerConfig->set_trace_log_disabled(false);
|
||||
|
||||
MPPGraph* graph = [[MPPGraph alloc] initWithGraphConfig:graphConfig];
|
||||
[graph addFrameOutputStream:kOutputStream outputPacketType:MediaPipePacketRaw];
|
||||
[graph addFrameOutputStream:kOutputStream outputPacketType:MPPPacketTypeRaw];
|
||||
graph.delegate = self;
|
||||
|
||||
NSError* error;
|
||||
|
|
|
@ -115,7 +115,7 @@
|
|||
|
||||
_graph = [[MPPGraph alloc] initWithGraphConfig:config];
|
||||
[_graph addFrameOutputStream:"output_frames"
|
||||
outputPacketType:MediaPipePacketPixelBuffer];
|
||||
outputPacketType:MPPPacketTypePixelBuffer];
|
||||
[self testGraph:_graph input:*originalPixelBuffer expectedOutput:*originalPixelBuffer];
|
||||
}
|
||||
|
||||
|
@ -143,7 +143,7 @@
|
|||
|
||||
_graph = [[MPPGraph alloc] initWithGraphConfig:config];
|
||||
[_graph addFrameOutputStream:"output_frames"
|
||||
outputPacketType:MediaPipePacketPixelBuffer];
|
||||
outputPacketType:MPPPacketTypePixelBuffer];
|
||||
[self testGraph:_graph input:*originalPixelBuffer expectedOutput:*originalPixelBuffer];
|
||||
}
|
||||
|
||||
|
@ -166,7 +166,7 @@
|
|||
|
||||
_graph = [[MPPGraph alloc] initWithGraphConfig:config];
|
||||
[_graph addFrameOutputStream:"output_frames"
|
||||
outputPacketType:MediaPipePacketPixelBuffer];
|
||||
outputPacketType:MPPPacketTypePixelBuffer];
|
||||
[self testGraph:_graph input:*originalPixelBuffer expectedOutput:*originalPixelBuffer];
|
||||
}
|
||||
|
||||
|
@ -210,7 +210,7 @@
|
|||
|
||||
_graph = [[MPPGraph alloc] initWithGraphConfig:config];
|
||||
[_graph addFrameOutputStream:"output_frames"
|
||||
outputPacketType:MediaPipePacketPixelBuffer];
|
||||
outputPacketType:MPPPacketTypePixelBuffer];
|
||||
[self testGraph:_graph input:convertedPixelBuffer expectedOutput:bgraPixelBuffer];
|
||||
CFRelease(convertedPixelBuffer);
|
||||
CFRelease(bgraPixelBuffer);
|
||||
|
@ -240,7 +240,7 @@
|
|||
|
||||
_graph = [[MPPGraph alloc] initWithGraphConfig:config];
|
||||
[_graph addFrameOutputStream:"output_frames"
|
||||
outputPacketType:MediaPipePacketPixelBuffer];
|
||||
outputPacketType:MPPPacketTypePixelBuffer];
|
||||
[_graph setSidePacket:(mediapipe::MakePacket<float[3]>(1.0, 0.0, 0.0))
|
||||
named:"rgb_weights"];
|
||||
|
||||
|
|
|
@ -15,20 +15,20 @@
|
|||
#import <Foundation/Foundation.h>
|
||||
#import <GLKit/GLKit.h>
|
||||
|
||||
/// Modes of clockwise rotation for input frames.
|
||||
typedef enum {
|
||||
MediaPipeFrameRotationNone,
|
||||
MediaPipeFrameRotation90,
|
||||
MediaPipeFrameRotation180,
|
||||
MediaPipeFrameRotation270
|
||||
} MediaPipeFrameRotationMode;
|
||||
/// Modes of rotation (clockwise) for input frames.
|
||||
typedef NS_ENUM(int, MPPFrameRotation) {
|
||||
MPPFrameRotationNone,
|
||||
MPPFrameRotationCw90,
|
||||
MPPFrameRotationCw180,
|
||||
MPPFrameRotationCw270,
|
||||
};
|
||||
|
||||
typedef enum {
|
||||
typedef NS_ENUM(int, MPPFrameScaleMode) {
|
||||
// Scale the frame up to fit the drawing area, preserving aspect ratio; may letterbox.
|
||||
MediaPipeFrameScaleFit,
|
||||
MPPFrameScaleModeFit,
|
||||
// Scale the frame up to fill the drawing area, preserving aspect ratio; may crop.
|
||||
MediaPipeFrameScaleFillAndCrop,
|
||||
} MediaPipeFrameScaleMode;
|
||||
MPPFrameScaleModeFillAndCrop,
|
||||
};
|
||||
|
||||
/// Renders frames in a GLKView.
|
||||
@interface MPPGLViewRenderer : NSObject <GLKViewDelegate>
|
||||
|
@ -47,15 +47,15 @@ typedef enum {
|
|||
@property(nonatomic, assign) BOOL retainsLastPixelBuffer;
|
||||
|
||||
/// Sets which way to rotate input frames before rendering them.
|
||||
/// Default value is MediaPipeFrameRotationNone.
|
||||
/// Default value is MPPFrameRotationNone.
|
||||
/// Note that changing the transform property of a GLKView once rendering has
|
||||
/// started causes problems inside GLKView. Instead, we perform the rotation
|
||||
/// in our rendering code.
|
||||
@property(nonatomic) MediaPipeFrameRotationMode frameRotationMode;
|
||||
@property(nonatomic) MPPFrameRotation frameRotationMode;
|
||||
|
||||
/// Sets how to scale the frame within the view.
|
||||
/// Default value is MediaPipeFrameScaleScaleToFit.
|
||||
@property(nonatomic) MediaPipeFrameScaleMode frameScaleMode;
|
||||
/// Default value is MPPFrameScaleModeFit.
|
||||
@property(nonatomic) MPPFrameScaleMode frameScaleMode;
|
||||
|
||||
/// If YES, swap left and right. Useful for the front camera.
|
||||
@property(nonatomic) BOOL mirrored;
|
||||
|
|
|
@ -46,8 +46,8 @@
|
|||
if (self) {
|
||||
_glContext = [[EAGLContext alloc] initWithAPI:kEAGLRenderingAPIOpenGLES2];
|
||||
_bufferLock = OS_SPINLOCK_INIT;
|
||||
_frameRotationMode = MediaPipeFrameRotationNone;
|
||||
_frameScaleMode = MediaPipeFrameScaleFit;
|
||||
_frameRotationMode = MPPFrameRotationNone;
|
||||
_frameScaleMode = MPPFrameScaleModeFit;
|
||||
}
|
||||
return self;
|
||||
}
|
||||
|
@ -90,24 +90,24 @@
|
|||
@"renderer setup failed: %@", [NSError gus_errorWithStatus:status]);
|
||||
}
|
||||
|
||||
mediapipe::FrameScaleMode InternalScaleMode(MediaPipeFrameScaleMode mode) {
|
||||
mediapipe::FrameScaleMode InternalScaleMode(MPPFrameScaleMode mode) {
|
||||
switch (mode) {
|
||||
case MediaPipeFrameScaleFit:
|
||||
case MPPFrameScaleModeFit:
|
||||
return mediapipe::FrameScaleMode::kFit;
|
||||
case MediaPipeFrameScaleFillAndCrop:
|
||||
case MPPFrameScaleModeFillAndCrop:
|
||||
return mediapipe::FrameScaleMode::kFillAndCrop;
|
||||
}
|
||||
}
|
||||
|
||||
mediapipe::FrameRotation InternalRotationMode(MediaPipeFrameRotationMode rot) {
|
||||
mediapipe::FrameRotation InternalRotationMode(MPPFrameRotation rot) {
|
||||
switch (rot) {
|
||||
case MediaPipeFrameRotationNone:
|
||||
case MPPFrameRotationNone:
|
||||
return mediapipe::FrameRotation::kNone;
|
||||
case MediaPipeFrameRotation90:
|
||||
case MPPFrameRotationCw90:
|
||||
return mediapipe::FrameRotation::k90;
|
||||
case MediaPipeFrameRotation180:
|
||||
case MPPFrameRotationCw180:
|
||||
return mediapipe::FrameRotation::k180;
|
||||
case MediaPipeFrameRotation270:
|
||||
case MPPFrameRotationCw270:
|
||||
return mediapipe::FrameRotation::k270;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -50,7 +50,7 @@
|
|||
MPPGraph* mediapipeGraph = [[MPPGraph alloc] initWithGraphConfig:config];
|
||||
// We receive output by setting ourselves as the delegate.
|
||||
mediapipeGraph.delegate = self;
|
||||
[mediapipeGraph addFrameOutputStream:"output_video" outputPacketType:MediaPipePacketPixelBuffer];
|
||||
[mediapipeGraph addFrameOutputStream:"output_video" outputPacketType:MPPPacketTypePixelBuffer];
|
||||
|
||||
// Start running the graph.
|
||||
NSError *error;
|
||||
|
@ -60,7 +60,7 @@
|
|||
// Send a frame.
|
||||
XCTAssertTrue([mediapipeGraph sendPixelBuffer:*_inputPixelBuffer
|
||||
intoStream:"input_video"
|
||||
packetType:MediaPipePacketPixelBuffer
|
||||
packetType:MPPPacketTypePixelBuffer
|
||||
timestamp:mediapipe::Timestamp(0)]);
|
||||
|
||||
// Shut down the graph.
|
||||
|
|
|
@ -54,26 +54,26 @@ struct GpuSharedData;
|
|||
|
||||
/// Chooses the packet type used by MPPGraph to send and receive packets
|
||||
/// from the graph.
|
||||
typedef NS_ENUM(int, MediaPipePacketType) {
|
||||
typedef NS_ENUM(int, MPPPacketType) {
|
||||
/// Any packet type.
|
||||
/// Calls mediapipeGraph:didOutputPacket:fromStream:
|
||||
MediaPipePacketRaw,
|
||||
MPPPacketTypeRaw,
|
||||
|
||||
/// CFHolder<CVPixelBufferRef>.
|
||||
/// Calls mediapipeGraph:didOutputPixelBuffer:fromStream:
|
||||
/// Use this packet type to pass GPU frames to calculators.
|
||||
MediaPipePacketPixelBuffer,
|
||||
MPPPacketTypePixelBuffer,
|
||||
|
||||
/// ImageFrame.
|
||||
/// Calls mediapipeGraph:didOutputPixelBuffer:fromStream:
|
||||
MediaPipePacketImageFrame,
|
||||
MPPPacketTypeImageFrame,
|
||||
|
||||
/// RGBA ImageFrame, but do not swap the channels if the input pixel buffer
|
||||
/// is BGRA. This is useful when the graph needs RGBA ImageFrames, but the
|
||||
/// calculators do not care about the order of the channels, so BGRA data can
|
||||
/// be used as-is.
|
||||
/// Calls mediapipeGraph:didOutputPixelBuffer:fromStream:
|
||||
MediaPipePacketImageFrameBGRANoSwap,
|
||||
MPPPacketTypeImageFrameBGRANoSwap,
|
||||
};
|
||||
|
||||
/// This class is an Objective-C wrapper around a MediaPipe graph object, and
|
||||
|
@ -129,7 +129,7 @@ typedef NS_ENUM(int, MediaPipePacketType) {
|
|||
/// the delegate will receive frames.
|
||||
/// @param packetType The type of packet provided by the output streams.
|
||||
- (void)addFrameOutputStream:(const std::string&)outputStreamName
|
||||
outputPacketType:(MediaPipePacketType)packetType;
|
||||
outputPacketType:(MPPPacketType)packetType;
|
||||
|
||||
/// Starts running the graph.
|
||||
/// @return YES if successful.
|
||||
|
@ -155,7 +155,7 @@ typedef NS_ENUM(int, MediaPipePacketType) {
|
|||
|
||||
/// Creates a MediaPipe packet wrapping the given pixelBuffer;
|
||||
- (mediapipe::Packet)packetWithPixelBuffer:(CVPixelBufferRef)pixelBuffer
|
||||
packetType:(MediaPipePacketType)packetType;
|
||||
packetType:(MPPPacketType)packetType;
|
||||
|
||||
/// Sends a pixel buffer into a graph input stream, using the specified packet
|
||||
/// type. The graph must have been started before calling this. Drops frames and
|
||||
|
@ -164,7 +164,7 @@ typedef NS_ENUM(int, MediaPipePacketType) {
|
|||
/// possibly increased efficiency. Returns YES if the packet was successfully sent.
|
||||
- (BOOL)sendPixelBuffer:(CVPixelBufferRef)imageBuffer
|
||||
intoStream:(const std::string&)inputName
|
||||
packetType:(MediaPipePacketType)packetType
|
||||
packetType:(MPPPacketType)packetType
|
||||
timestamp:(const mediapipe::Timestamp&)timestamp
|
||||
allowOverwrite:(BOOL)allowOverwrite;
|
||||
|
||||
|
@ -174,7 +174,7 @@ typedef NS_ENUM(int, MediaPipePacketType) {
|
|||
/// successfully sent.
|
||||
- (BOOL)sendPixelBuffer:(CVPixelBufferRef)pixelBuffer
|
||||
intoStream:(const std::string&)inputName
|
||||
packetType:(MediaPipePacketType)packetType
|
||||
packetType:(MPPPacketType)packetType
|
||||
timestamp:(const mediapipe::Timestamp&)timestamp;
|
||||
|
||||
/// Sends a pixel buffer into a graph input stream, using the specified packet
|
||||
|
@ -184,7 +184,7 @@ typedef NS_ENUM(int, MediaPipePacketType) {
|
|||
/// packet was successfully sent.
|
||||
- (BOOL)sendPixelBuffer:(CVPixelBufferRef)pixelBuffer
|
||||
intoStream:(const std::string&)inputName
|
||||
packetType:(MediaPipePacketType)packetType;
|
||||
packetType:(MPPPacketType)packetType;
|
||||
|
||||
/// Cancels a graph run. You must still call waitUntilDoneWithError: after this.
|
||||
- (void)cancel;
|
||||
|
|
|
@ -77,7 +77,7 @@
|
|||
}
|
||||
|
||||
- (void)addFrameOutputStream:(const std::string&)outputStreamName
|
||||
outputPacketType:(MediaPipePacketType)packetType {
|
||||
outputPacketType:(MPPPacketType)packetType {
|
||||
std::string callbackInputName;
|
||||
mediapipe::tool::AddCallbackCalculator(outputStreamName, &_config, &callbackInputName,
|
||||
/*use_std_function=*/true);
|
||||
|
@ -99,14 +99,14 @@
|
|||
/// This is the function that gets called by the CallbackCalculator that
|
||||
/// receives the graph's output.
|
||||
void CallFrameDelegate(void* wrapperVoid, const std::string& streamName,
|
||||
MediaPipePacketType packetType, const mediapipe::Packet& packet) {
|
||||
MPPPacketType packetType, const mediapipe::Packet& packet) {
|
||||
MPPGraph* wrapper = (__bridge MPPGraph*)wrapperVoid;
|
||||
@autoreleasepool {
|
||||
if (packetType == MediaPipePacketRaw) {
|
||||
if (packetType == MPPPacketTypeRaw) {
|
||||
[wrapper.delegate mediapipeGraph:wrapper
|
||||
didOutputPacket:packet
|
||||
fromStream:streamName];
|
||||
} else if (packetType == MediaPipePacketImageFrame) {
|
||||
} else if (packetType == MPPPacketTypeImageFrame) {
|
||||
const auto& frame = packet.Get<mediapipe::ImageFrame>();
|
||||
mediapipe::ImageFormat::Format format = frame.Format();
|
||||
|
||||
|
@ -162,7 +162,7 @@ void CallFrameDelegate(void* wrapperVoid, const std::string& streamName,
|
|||
_GTMDevLog(@"unsupported ImageFormat: %d", format);
|
||||
}
|
||||
#if MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
|
||||
} else if (packetType == MediaPipePacketPixelBuffer) {
|
||||
} else if (packetType == MPPPacketTypePixelBuffer) {
|
||||
CVPixelBufferRef pixelBuffer = packet.Get<mediapipe::GpuBuffer>().GetCVPixelBufferRef();
|
||||
if ([wrapper.delegate
|
||||
respondsToSelector:@selector
|
||||
|
@ -283,15 +283,15 @@ void CallFrameDelegate(void* wrapperVoid, const std::string& streamName,
|
|||
}
|
||||
|
||||
- (mediapipe::Packet)packetWithPixelBuffer:(CVPixelBufferRef)imageBuffer
|
||||
packetType:(MediaPipePacketType)packetType {
|
||||
packetType:(MPPPacketType)packetType {
|
||||
mediapipe::Packet packet;
|
||||
if (packetType == MediaPipePacketImageFrame || packetType == MediaPipePacketImageFrameBGRANoSwap) {
|
||||
if (packetType == MPPPacketTypeImageFrame || packetType == MPPPacketTypeImageFrameBGRANoSwap) {
|
||||
auto frame = CreateImageFrameForCVPixelBuffer(
|
||||
imageBuffer, /* canOverwrite = */ false,
|
||||
/* bgrAsRgb = */ packetType == MediaPipePacketImageFrameBGRANoSwap);
|
||||
/* bgrAsRgb = */ packetType == MPPPacketTypeImageFrameBGRANoSwap);
|
||||
packet = mediapipe::Adopt(frame.release());
|
||||
#if MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
|
||||
} else if (packetType == MediaPipePacketPixelBuffer) {
|
||||
} else if (packetType == MPPPacketTypePixelBuffer) {
|
||||
packet = mediapipe::MakePacket<mediapipe::GpuBuffer>(imageBuffer);
|
||||
#endif // MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
|
||||
} else {
|
||||
|
@ -302,7 +302,7 @@ void CallFrameDelegate(void* wrapperVoid, const std::string& streamName,
|
|||
|
||||
- (BOOL)sendPixelBuffer:(CVPixelBufferRef)imageBuffer
|
||||
intoStream:(const std::string&)inputName
|
||||
packetType:(MediaPipePacketType)packetType
|
||||
packetType:(MPPPacketType)packetType
|
||||
timestamp:(const mediapipe::Timestamp&)timestamp
|
||||
allowOverwrite:(BOOL)allowOverwrite {
|
||||
if (_maxFramesInFlight && _framesInFlight >= _maxFramesInFlight) return NO;
|
||||
|
@ -326,7 +326,7 @@ void CallFrameDelegate(void* wrapperVoid, const std::string& streamName,
|
|||
|
||||
- (BOOL)sendPixelBuffer:(CVPixelBufferRef)imageBuffer
|
||||
intoStream:(const std::string&)inputName
|
||||
packetType:(MediaPipePacketType)packetType
|
||||
packetType:(MPPPacketType)packetType
|
||||
timestamp:(const mediapipe::Timestamp&)timestamp {
|
||||
return [self sendPixelBuffer:imageBuffer
|
||||
intoStream:inputName
|
||||
|
@ -337,7 +337,7 @@ void CallFrameDelegate(void* wrapperVoid, const std::string& streamName,
|
|||
|
||||
- (BOOL)sendPixelBuffer:(CVPixelBufferRef)imageBuffer
|
||||
intoStream:(const std::string&)inputName
|
||||
packetType:(MediaPipePacketType)packetType {
|
||||
packetType:(MPPPacketType)packetType {
|
||||
_GTMDevAssert(_frameTimestamp < mediapipe::Timestamp::Done(),
|
||||
@"Trying to send frame after stream is done.");
|
||||
if (_frameTimestamp < mediapipe::Timestamp::Min()) {
|
||||
|
|
|
@ -43,7 +43,7 @@
|
|||
/// completes the run, and returns the output frame.
|
||||
- (CVPixelBufferRef)runGraph:(MPPGraph*)graph
|
||||
withPixelBuffer:(CVPixelBufferRef)inputBuffer
|
||||
packetType:(MediaPipePacketType)inputPacketType;
|
||||
packetType:(MPPPacketType)inputPacketType;
|
||||
|
||||
/// Runs a simple graph, providing a single frame to zero or more inputs. Input images are wrapped
|
||||
/// in packets each with timestamp mediapipe::Timestamp(1). Those packets are added to the
|
||||
|
@ -53,7 +53,7 @@
|
|||
withInputPixelBuffers:
|
||||
(const std::unordered_map<std::string, CFHolder<CVPixelBufferRef>>&)inputBuffers
|
||||
outputStream:(const std::string&)output
|
||||
packetType:(MediaPipePacketType)inputPacketType;
|
||||
packetType:(MPPPacketType)inputPacketType;
|
||||
|
||||
/// Loads a data file from the test bundle.
|
||||
- (NSData*)testDataNamed:(NSString*)name extension:(NSString*)extension;
|
||||
|
|
|
@ -81,7 +81,7 @@ static void EnsureOutputDirFor(NSString *outputFile) {
|
|||
inputPackets:(const std::map<std::string, mediapipe::Packet>&)inputPackets
|
||||
timestamp:(mediapipe::Timestamp)timestamp
|
||||
outputStream:(const std::string&)outputStream
|
||||
packetType:(MediaPipePacketType)inputPacketType {
|
||||
packetType:(MPPPacketType)inputPacketType {
|
||||
__block CVPixelBufferRef output;
|
||||
graph.delegate = self;
|
||||
|
||||
|
@ -143,7 +143,7 @@ static void EnsureOutputDirFor(NSString *outputFile) {
|
|||
|
||||
- (CVPixelBufferRef)runGraph:(MPPGraph*)graph
|
||||
withPixelBuffer:(CVPixelBufferRef)inputBuffer
|
||||
packetType:(MediaPipePacketType)inputPacketType {
|
||||
packetType:(MPPPacketType)inputPacketType {
|
||||
return [self runGraph:graph
|
||||
withInputPixelBuffers:{{"input_frames", MakeCFHolder(inputBuffer)}}
|
||||
inputPackets:{}
|
||||
|
@ -156,7 +156,7 @@ static void EnsureOutputDirFor(NSString *outputFile) {
|
|||
withInputPixelBuffers:
|
||||
(const std::unordered_map<std::string, CFHolder<CVPixelBufferRef>>&)inputBuffers
|
||||
outputStream:(const std::string&)output
|
||||
packetType:(MediaPipePacketType)inputPacketType {
|
||||
packetType:(MPPPacketType)inputPacketType {
|
||||
return [self runGraph:graph
|
||||
withInputPixelBuffers:inputBuffers
|
||||
inputPackets:{}
|
||||
|
@ -362,7 +362,7 @@ static void EnsureOutputDirFor(NSString *outputFile) {
|
|||
{
|
||||
CVPixelBufferRef outputBuffer = [self runGraph:graph
|
||||
withPixelBuffer:inputBuffer
|
||||
packetType:MediaPipePacketPixelBuffer];
|
||||
packetType:MPPPacketTypePixelBuffer];
|
||||
#if DEBUG
|
||||
// Xcode can display UIImage objects right in the debugger. It is handy to
|
||||
// have these variables defined if the test fails.
|
||||
|
@ -405,7 +405,7 @@ static void EnsureOutputDirFor(NSString *outputFile) {
|
|||
MPPGraph* graph = [[MPPGraph alloc] initWithGraphConfig:config];
|
||||
[graph addSidePackets:sidePackets];
|
||||
[graph addFrameOutputStream:outputStream.UTF8String
|
||||
outputPacketType:MediaPipePacketPixelBuffer];
|
||||
outputPacketType:MPPPacketTypePixelBuffer];
|
||||
|
||||
std::unordered_map<std::string, CFHolder<CVPixelBufferRef>> inputBuffers;
|
||||
for (NSString* inputStream in fileInputs) {
|
||||
|
@ -428,7 +428,7 @@ static void EnsureOutputDirFor(NSString *outputFile) {
|
|||
inputPackets:packetInputs
|
||||
timestamp:timestamp
|
||||
outputStream:outputStream.UTF8String
|
||||
packetType:MediaPipePacketPixelBuffer];
|
||||
packetType:MPPPacketTypePixelBuffer];
|
||||
|
||||
UIImage* output = UIImageWithPixelBuffer(outputBuffer);
|
||||
XCTAssertNotNil(output);
|
||||
|
|
|
@ -125,13 +125,13 @@ REGISTER_CALCULATOR(ErrorCalculator);
|
|||
node->add_output_stream("output_frames");
|
||||
|
||||
_graph = [[MPPGraph alloc] initWithGraphConfig:config];
|
||||
[_graph addFrameOutputStream:"output_frames" outputPacketType:MediaPipePacketPixelBuffer];
|
||||
[_graph addFrameOutputStream:"output_frames" outputPacketType:MPPPacketTypePixelBuffer];
|
||||
CFHolder<CVPixelBufferRef> inputBuffer;
|
||||
::mediapipe::Status status = CreateCVPixelBufferFromCGImage(_sourceImage.CGImage, &inputBuffer);
|
||||
XCTAssert(status.ok());
|
||||
CVPixelBufferRef outputBuffer = [self runGraph:_graph
|
||||
withPixelBuffer:*inputBuffer
|
||||
packetType:MediaPipePacketPixelBuffer];
|
||||
packetType:MPPPacketTypePixelBuffer];
|
||||
XCTAssert([self pixelBuffer:outputBuffer isEqualTo:*inputBuffer]);
|
||||
}
|
||||
|
||||
|
@ -162,8 +162,8 @@ REGISTER_CALCULATOR(ErrorCalculator);
|
|||
grayNode->add_output_stream("gray_frames");
|
||||
|
||||
_graph = [[MPPGraph alloc] initWithGraphConfig:config];
|
||||
[_graph addFrameOutputStream:"pass_frames" outputPacketType:MediaPipePacketImageFrame];
|
||||
[_graph addFrameOutputStream:"gray_frames" outputPacketType:MediaPipePacketImageFrame];
|
||||
[_graph addFrameOutputStream:"pass_frames" outputPacketType:MPPPacketTypeImageFrame];
|
||||
[_graph addFrameOutputStream:"gray_frames" outputPacketType:MPPPacketTypeImageFrame];
|
||||
|
||||
CFHolder<CVPixelBufferRef> inputBuffer;
|
||||
::mediapipe::Status status = CreateCVPixelBufferFromCGImage(_sourceImage.CGImage, &inputBuffer);
|
||||
|
@ -185,7 +185,7 @@ REGISTER_CALCULATOR(ErrorCalculator);
|
|||
}
|
||||
};
|
||||
|
||||
[self runGraph:_graph withPixelBuffer:*inputBuffer packetType:MediaPipePacketImageFrame];
|
||||
[self runGraph:_graph withPixelBuffer:*inputBuffer packetType:MPPPacketTypeImageFrame];
|
||||
}
|
||||
|
||||
- (void)testGrayscaleOutput {
|
||||
|
@ -202,13 +202,13 @@ REGISTER_CALCULATOR(ErrorCalculator);
|
|||
node->add_output_stream("output_frames");
|
||||
|
||||
_graph = [[MPPGraph alloc] initWithGraphConfig:config];
|
||||
[_graph addFrameOutputStream:"output_frames" outputPacketType:MediaPipePacketImageFrame];
|
||||
[_graph addFrameOutputStream:"output_frames" outputPacketType:MPPPacketTypeImageFrame];
|
||||
CFHolder<CVPixelBufferRef> inputBuffer;
|
||||
::mediapipe::Status status = CreateCVPixelBufferFromCGImage(grayImage.CGImage, &inputBuffer);
|
||||
XCTAssert(status.ok());
|
||||
CVPixelBufferRef outputBuffer = [self runGraph:_graph
|
||||
withPixelBuffer:*inputBuffer
|
||||
packetType:MediaPipePacketImageFrame];
|
||||
packetType:MPPPacketTypeImageFrame];
|
||||
// We accept a small difference due to gamma correction and whatnot.
|
||||
XCTAssert([self pixelBuffer:outputBuffer isCloseTo:*inputBuffer
|
||||
maxLocalDifference:5 maxAverageDifference:FLT_MAX]);
|
||||
|
@ -226,13 +226,13 @@ REGISTER_CALCULATOR(ErrorCalculator);
|
|||
CreateCVPixelBufferFromCGImage(_sourceImage.CGImage, &srcPixelBuffer);
|
||||
XCTAssert(status.ok());
|
||||
_graph = [[MPPGraph alloc] initWithGraphConfig:config];
|
||||
[_graph addFrameOutputStream:"output_frames" outputPacketType:MediaPipePacketImageFrame];
|
||||
[_graph addFrameOutputStream:"output_frames" outputPacketType:MPPPacketTypeImageFrame];
|
||||
_graph.delegate = self;
|
||||
|
||||
XCTAssert([_graph startWithError:nil]);
|
||||
[_graph sendPixelBuffer:*srcPixelBuffer
|
||||
intoStream:"input_frames"
|
||||
packetType:MediaPipePacketImageFrame];
|
||||
packetType:MPPPacketTypeImageFrame];
|
||||
XCTAssert([_graph closeInputStream:"input_frames" error:nil]);
|
||||
|
||||
__block NSError* error = nil;
|
||||
|
@ -259,7 +259,7 @@ REGISTER_CALCULATOR(ErrorCalculator);
|
|||
node->add_output_stream("output_frames");
|
||||
|
||||
_graph = [[MPPGraph alloc] initWithGraphConfig:config];
|
||||
[_graph addFrameOutputStream:"output_frames" outputPacketType:MediaPipePacketImageFrame];
|
||||
[_graph addFrameOutputStream:"output_frames" outputPacketType:MPPPacketTypeImageFrame];
|
||||
|
||||
// We're no longer using video headers, let's just use an int as the header.
|
||||
auto header_packet = mediapipe::MakePacket<int>(0xDEADBEEF);
|
||||
|
@ -288,13 +288,13 @@ REGISTER_CALCULATOR(ErrorCalculator);
|
|||
node->add_output_stream("output_frames");
|
||||
|
||||
_graph = [[MPPGraph alloc] initWithGraphConfig:config];
|
||||
[_graph addFrameOutputStream:"output_frames" outputPacketType:MediaPipePacketPixelBuffer];
|
||||
[_graph addFrameOutputStream:"output_frames" outputPacketType:MPPPacketTypePixelBuffer];
|
||||
CFHolder<CVPixelBufferRef> inputBuffer;
|
||||
::mediapipe::Status status = CreateCVPixelBufferFromCGImage(_sourceImage.CGImage, &inputBuffer);
|
||||
XCTAssert(status.ok());
|
||||
CVPixelBufferRef outputBuffer = [self runGraph:_graph
|
||||
withPixelBuffer:*inputBuffer
|
||||
packetType:MediaPipePacketPixelBuffer];
|
||||
packetType:MPPPacketTypePixelBuffer];
|
||||
__weak MPPGraph* weakGraph = _graph;
|
||||
_graph = nil;
|
||||
XCTAssertNil(weakGraph);
|
||||
|
@ -311,7 +311,7 @@ REGISTER_CALCULATOR(ErrorCalculator);
|
|||
const int kTestValue = 10;
|
||||
|
||||
_graph = [[MPPGraph alloc] initWithGraphConfig:config];
|
||||
[_graph addFrameOutputStream:"output_ints" outputPacketType:MediaPipePacketRaw];
|
||||
[_graph addFrameOutputStream:"output_ints" outputPacketType:MPPPacketTypeRaw];
|
||||
_graph.delegate = self;
|
||||
|
||||
WEAKIFY(self);
|
||||
|
|
|
@ -26,12 +26,12 @@
|
|||
- (void)renderPixelBuffer:(CVPixelBufferRef)pixelBuffer;
|
||||
|
||||
/// Sets which way to rotate input frames before rendering them.
|
||||
/// Default value is MediaPipeFrameRotationNone.
|
||||
@property(nonatomic) MediaPipeFrameRotationMode frameRotationMode;
|
||||
/// Default value is MPPFrameRotationNone.
|
||||
@property(nonatomic) MPPFrameRotation frameRotationMode;
|
||||
|
||||
/// Sets how to scale the frame within the layer.
|
||||
/// Default value is MediaPipeFrameScaleScaleToFit.
|
||||
@property(nonatomic) MediaPipeFrameScaleMode frameScaleMode;
|
||||
@property(nonatomic) MPPFrameScaleMode frameScaleMode;
|
||||
|
||||
/// If YES, swap left and right. Useful for the front camera.
|
||||
@property(nonatomic) BOOL mirrored;
|
||||
|
|
|
@ -73,19 +73,19 @@
|
|||
if (!success) NSLog(@"presentRenderbuffer failed");
|
||||
}
|
||||
|
||||
- (MediaPipeFrameRotationMode)frameRotationMode {
|
||||
- (MPPFrameRotation)frameRotationMode {
|
||||
return _glRenderer.frameRotationMode;
|
||||
}
|
||||
|
||||
- (void)setFrameRotationMode:(MediaPipeFrameRotationMode)frameRotationMode {
|
||||
- (void)setFrameRotationMode:(MPPFrameRotation)frameRotationMode {
|
||||
_glRenderer.frameRotationMode = frameRotationMode;
|
||||
}
|
||||
|
||||
- (MediaPipeFrameScaleMode)frameScaleMode {
|
||||
- (MPPFrameScaleMode)frameScaleMode {
|
||||
return _glRenderer.frameScaleMode;
|
||||
}
|
||||
|
||||
- (void)setFrameScaleMode:(MediaPipeFrameScaleMode)frameScaleMode {
|
||||
- (void)setFrameScaleMode:(MPPFrameScaleMode)frameScaleMode {
|
||||
_glRenderer.frameScaleMode = frameScaleMode;
|
||||
}
|
||||
|
||||
|
|
11
third_party/glog.BUILD
vendored
11
third_party/glog.BUILD
vendored
|
@ -170,8 +170,11 @@ genrule(
|
|||
name = "logging_h",
|
||||
srcs = select({
|
||||
"//conditions:default": ["src/glog/logging.h.tmp"],
|
||||
":android_arm": ["src/glog/logging.h.android_arm"],
|
||||
":android_arm64": ["src/glog/logging.h.android_arm"],
|
||||
":android_arm": ["src/glog/logging.h.arm"],
|
||||
":android_arm64": ["src/glog/logging.h.arm"],
|
||||
":ios_armv7": ["src/glog/logging.h.arm"],
|
||||
":ios_arm64": ["src/glog/logging.h.arm"],
|
||||
":ios_arm64e": ["src/glog/logging.h.arm"],
|
||||
}),
|
||||
outs = ["src/glog/logging.h"],
|
||||
cmd = "echo select $< to be the glog logging.h file. && cp $< $@",
|
||||
|
@ -371,9 +374,9 @@ genrule(
|
|||
)
|
||||
|
||||
genrule(
|
||||
name = "generate_android_arm_glog_logging_h",
|
||||
name = "generate_arm_glog_logging_h",
|
||||
srcs = ["src/glog/logging.h.in"],
|
||||
outs = ["src/glog/logging.h.android_arm"],
|
||||
outs = ["src/glog/logging.h.arm"],
|
||||
cmd = ("sed -e 's/@ac_cv___attribute___noinline@/__attribute__((__noinline__))/g'" +
|
||||
" -e 's/@ac_cv___attribute___noreturn@/__attribute__((__noreturn__))/g'" +
|
||||
" -e 's/@ac_cv_have___builtin_expect@/1/g'" +
|
||||
|
|
Loading…
Reference in New Issue
Block a user