This is only necessary if one wants to release the program while keeping the context around.
+ */
+ public void release() {
+ GLES20.glDeleteProgram(program);
+ }
+
+ private void drawDetection(Detection detection) {
+ if (!detection.hasLocationData()) {
+ return;
+ }
+ // Draw keypoints.
+// float[] points = new float[FaceKeypoint.NUM_KEY_POINTS * 2];
+// for (int i = 0; i < FaceKeypoint.NUM_KEY_POINTS; ++i) {
+// points[2 * i] = detection.getLocationData().getRelativeKeypoints(i).getX();
+// points[2 * i + 1] = detection.getLocationData().getRelativeKeypoints(i).getY();
+// }
+ GLES20.glUniform4fv(colorHandle, 1, KEYPOINT_COLOR, 0);
+// FloatBuffer vertexBuffer =
+// ByteBuffer.allocateDirect(points.length * 4)
+// .order(ByteOrder.nativeOrder())
+// .asFloatBuffer()
+// .put(points);
+// vertexBuffer.position(0);
+ GLES20.glEnableVertexAttribArray(positionHandle);
+// GLES20.glVertexAttribPointer(positionHandle, 2, GLES20.GL_FLOAT, false, 0, vertexBuffer);
+// GLES20.glDrawArrays(GLES20.GL_POINTS, 0, FaceKeypoint.NUM_KEY_POINTS);
+ if (!detection.getLocationData().hasRelativeBoundingBox()) {
+ return;
+ }
+ // Draw bounding box.
+ float left = detection.getLocationData().getRelativeBoundingBox().getXmin();
+ float top = detection.getLocationData().getRelativeBoundingBox().getYmin();
+ float right = left + detection.getLocationData().getRelativeBoundingBox().getWidth();
+ float bottom = top + detection.getLocationData().getRelativeBoundingBox().getHeight();
+ drawLine(top, left, top, right);
+ drawLine(bottom, left, bottom, right);
+ drawLine(top, left, bottom, left);
+ drawLine(top, right, bottom, right);
+ }
+
+ private void drawLine(float y1, float x1, float y2, float x2) {
+ GLES20.glUniform4fv(colorHandle, 1, BBOX_COLOR, 0);
+ GLES20.glLineWidth(BBOX_THICKNESS);
+ float[] vertex = {x1, y1, x2, y2};
+ FloatBuffer vertexBuffer =
+ ByteBuffer.allocateDirect(vertex.length * 4)
+ .order(ByteOrder.nativeOrder())
+ .asFloatBuffer()
+ .put(vertex);
+ vertexBuffer.position(0);
+ GLES20.glEnableVertexAttribArray(positionHandle);
+ GLES20.glVertexAttribPointer(positionHandle, 2, GLES20.GL_FLOAT, false, 0, vertexBuffer);
+ GLES20.glDrawArrays(GLES20.GL_LINES, 0, 2);
+ }
+}
diff --git a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/posetrackingsolutiongpu/res/drawable-v24/ic_launcher_foreground.xml b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/posetrackingsolutiongpu/res/drawable-v24/ic_launcher_foreground.xml
new file mode 100644
index 000000000..c7bd21dbd
--- /dev/null
+++ b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/posetrackingsolutiongpu/res/drawable-v24/ic_launcher_foreground.xml
@@ -0,0 +1,34 @@
+
MediaPipe Face Detection processes a {@link TextureFrame} or a {@link Bitmap} and returns the
+ * {@link PoseTrackingResult} representing each detected face. Please refer to
+ * https://solutions.mediapipe.dev/face_detection#android-solution-api for usage examples.
+ */
+public class PoseTracking extends ImageSolutionBase {
+ private static final String TAG = "PoseTracking";
+
+ private static final String SHORT_RANGE_GRAPH_NAME = "pose_tracking_gpu.binarypb";
+ private static final String FULL_RANGE_GRAPH_NAME = "face_detection_full_range_image.binarypb";
+ private static final String IMAGE_INPUT_STREAM = "input_video";
+ private static final ImmutableList staticImageMode: Whether to treat the input images as a batch of static and possibly unrelated
+ * images, or a video stream. Default to false. See details in
+ * https://solutions.mediapipe.dev/face_detection#static_image_mode.
+ *
+ * minDetectionConfidence: Minimum confidence value ([0.0, 1.0]) for face detection to be
+ * considered successful. See details in
+ * https://solutions.mediapipe.dev/face_detection#min_detection_confidence.
+ *
+ * modelSelection: 0 or 1. 0 to select a short-range model that works best for faces within 2
+ * meters from the camera, and 1 for a full-range model best for faces within 5 meters. See details
+ * in https://solutions.mediapipe.dev/face_detection#model_selection.
+ */
+@AutoValue
+public abstract class PoseTrackingOptions {
+ public abstract boolean staticImageMode();
+
+ public abstract int modelSelection();
+
+ public abstract float minDetectionConfidence();
+
+ public static Builder builder() {
+ return new AutoValue_PoseTrackingOptions.Builder().withDefaultValues();
+ }
+
+ /** Builder for {@link PoseTrackingOptions}. */
+ @AutoValue.Builder
+ public abstract static class Builder {
+ public Builder withDefaultValues() {
+ return setStaticImageMode(false).setModelSelection(0).setMinDetectionConfidence(0.5f);
+ }
+
+ public abstract Builder setStaticImageMode(boolean value);
+
+ public abstract Builder setModelSelection(int value);
+
+ public abstract Builder setMinDetectionConfidence(float value);
+
+ public abstract PoseTrackingOptions build();
+ }
+}
diff --git a/mediapipe/java/com/google/mediapipe/solutions/posetracking/PoseTrackingResult.java b/mediapipe/java/com/google/mediapipe/solutions/posetracking/PoseTrackingResult.java
new file mode 100644
index 000000000..b82989f73
--- /dev/null
+++ b/mediapipe/java/com/google/mediapipe/solutions/posetracking/PoseTrackingResult.java
@@ -0,0 +1,65 @@
+// Copyright 2021 The MediaPipe Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+package com.google.mediapipe.solutions.posetracking;
+
+import android.graphics.Bitmap;
+import com.google.auto.value.AutoBuilder;
+import com.google.common.collect.ImmutableList;
+import com.google.mediapipe.framework.Packet;
+import com.google.mediapipe.framework.TextureFrame;
+import com.google.mediapipe.solutioncore.ImageSolutionResult;
+import com.google.mediapipe.formats.proto.DetectionProto.Detection;
+import java.util.List;
+
+/**
+ * FaceDetectionResult contains the detected faces, and the input {@link Bitmap} or {@link
+ * TextureFrame}. If not in static image mode, the timestamp field will be set to the timestamp of
+ * the corresponding input image.
+ */
+public class PoseTrackingResult extends ImageSolutionResult {
+ private final ImmutableList