diff --git a/mediapipe/java/com/google/mediapipe/components/ExternalTextureConverter.java b/mediapipe/java/com/google/mediapipe/components/ExternalTextureConverter.java index 42fcdb4d8..c375aa61f 100644 --- a/mediapipe/java/com/google/mediapipe/components/ExternalTextureConverter.java +++ b/mediapipe/java/com/google/mediapipe/components/ExternalTextureConverter.java @@ -103,6 +103,18 @@ public class ExternalTextureConverter implements TextureFrameProducer { } } + /** + * Re-renders the current frame. Notifies all consumers as if it were a new frame. This should not + * typically be used but can be useful for cases where the consumer has lost ownership of the most + * recent frame and needs to get it again. This does nothing if no frame has yet been received. + */ + public void rerenderCurrentFrame() { + SurfaceTexture surfaceTexture = getSurfaceTexture(); + if (thread != null && surfaceTexture != null && thread.getHasReceivedFirstFrame()) { + thread.onFrameAvailable(surfaceTexture); + } + } + /** * Sets the new buffer pool size. This is safe to set at any time. * @@ -278,6 +290,7 @@ public class ExternalTextureConverter implements TextureFrameProducer { private volatile SurfaceTexture internalSurfaceTexture = null; private int[] textures = null; private final List consumers; + private volatile boolean hasReceivedFirstFrame = false; private final Queue framesAvailable = new ArrayDeque<>(); private int framesInUse = 0; @@ -335,6 +348,7 @@ public class ExternalTextureConverter implements TextureFrameProducer { } public void setSurfaceTexture(SurfaceTexture texture, int width, int height) { + hasReceivedFirstFrame = false; if (surfaceTexture != null) { surfaceTexture.setOnFrameAvailableListener(null); } @@ -381,6 +395,10 @@ public class ExternalTextureConverter implements TextureFrameProducer { return surfaceTexture != null ? surfaceTexture : internalSurfaceTexture; } + public boolean getHasReceivedFirstFrame() { + return hasReceivedFirstFrame; + } + @Override public void onFrameAvailable(SurfaceTexture surfaceTexture) { handler.post(() -> renderNext(surfaceTexture)); @@ -427,6 +445,7 @@ public class ExternalTextureConverter implements TextureFrameProducer { // pending on the handler. When that happens, we should simply disregard the call. return; } + hasReceivedFirstFrame = true; try { synchronized (consumers) { boolean frameUpdated = false; diff --git a/mediapipe/tasks/cc/components/calculators/tensor/tensors_to_segmentation_calculator.cc b/mediapipe/tasks/cc/components/calculators/tensor/tensors_to_segmentation_calculator.cc index 5e40d5d82..4ea41b163 100644 --- a/mediapipe/tasks/cc/components/calculators/tensor/tensors_to_segmentation_calculator.cc +++ b/mediapipe/tasks/cc/components/calculators/tensor/tensors_to_segmentation_calculator.cc @@ -159,12 +159,12 @@ absl::Status TensorsToSegmentationCalculator::Process( std::tie(output_width, output_height) = kOutputSizeIn(cc).Get(); } Shape output_shape = { - .height = output_height, - .width = output_width, - .channels = options_.segmenter_options().output_type() == - SegmenterOptions::CATEGORY_MASK - ? 1 - : input_shape.channels}; + /* height= */ output_height, + /* width= */ output_width, + /* channels= */ options_.segmenter_options().output_type() == + SegmenterOptions::CATEGORY_MASK + ? 1 + : input_shape.channels}; std::vector segmented_masks = GetSegmentationResult( input_shape, output_shape, input_tensor.GetCpuReadView().buffer()); diff --git a/mediapipe/tasks/cc/components/classification_postprocessing.cc b/mediapipe/tasks/cc/components/classification_postprocessing.cc index ebc34b8fc..fc28391bb 100644 --- a/mediapipe/tasks/cc/components/classification_postprocessing.cc +++ b/mediapipe/tasks/cc/components/classification_postprocessing.cc @@ -148,8 +148,9 @@ absl::StatusOr GetClassificationHeadsProperties( num_output_tensors, output_tensors_metadata->size()), MediaPipeTasksStatus::kMetadataInconsistencyError); } - return ClassificationHeadsProperties{.num_heads = num_output_tensors, - .quantized = num_quantized_tensors > 0}; + return ClassificationHeadsProperties{ + /* num_heads= */ num_output_tensors, + /* quantized= */ num_quantized_tensors > 0}; } // Builds the label map from the tensor metadata, if available. diff --git a/mediapipe/tasks/cc/components/image_preprocessing.cc b/mediapipe/tasks/cc/components/image_preprocessing.cc index 835196877..18958a911 100644 --- a/mediapipe/tasks/cc/components/image_preprocessing.cc +++ b/mediapipe/tasks/cc/components/image_preprocessing.cc @@ -226,12 +226,14 @@ class ImagePreprocessingSubgraph : public Subgraph { // Connect outputs. return { - .tensors = image_to_tensor[Output>(kTensorsTag)], - .matrix = image_to_tensor[Output>(kMatrixTag)], - .letterbox_padding = - image_to_tensor[Output>(kLetterboxPaddingTag)], - .image_size = image_size[Output>(kSizeTag)], - .image = pass_through[Output("")], + /* tensors= */ image_to_tensor[Output>( + kTensorsTag)], + /* matrix= */ + image_to_tensor[Output>(kMatrixTag)], + /* letterbox_padding= */ + image_to_tensor[Output>(kLetterboxPaddingTag)], + /* image_size= */ image_size[Output>(kSizeTag)], + /* image= */ pass_through[Output("")], }; } }; diff --git a/mediapipe/tasks/cc/vision/hand_landmark/hand_landmark_detector_graph.cc b/mediapipe/tasks/cc/vision/hand_landmark/hand_landmark_detector_graph.cc index f6bfbd1bf..c5677cd98 100644 --- a/mediapipe/tasks/cc/vision/hand_landmark/hand_landmark_detector_graph.cc +++ b/mediapipe/tasks/cc/vision/hand_landmark/hand_landmark_detector_graph.cc @@ -388,13 +388,13 @@ class HandLandmarkDetectorGraph : public core::ModelTaskGraph { hand_rect_transformation[Output("")]; return {{ - .hand_landmarks = projected_landmarks, - .world_hand_landmarks = projected_world_landmarks, - .hand_rect_next_frame = hand_rect_next_frame, - .hand_presence = hand_presence, - .hand_presence_score = hand_presence_score, - .handedness = handedness, - .image_size = image_size, + /* hand_landmarks= */ projected_landmarks, + /* world_hand_landmarks= */ projected_world_landmarks, + /* hand_rect_next_frame= */ hand_rect_next_frame, + /* hand_presence= */ hand_presence, + /* hand_presence_score= */ hand_presence_score, + /* handedness= */ handedness, + /* image_size= */ image_size, }}; } }; diff --git a/mediapipe/tasks/cc/vision/object_detector/object_detector_graph.cc b/mediapipe/tasks/cc/vision/object_detector/object_detector_graph.cc index e5f441731..94f217378 100644 --- a/mediapipe/tasks/cc/vision/object_detector/object_detector_graph.cc +++ b/mediapipe/tasks/cc/vision/object_detector/object_detector_graph.cc @@ -531,9 +531,9 @@ class ObjectDetectorGraph : public core::ModelTaskGraph { // Outputs the labeled detections and the processed image as the subgraph // output streams. return {{ - .detections = - detection_label_id_to_text[Output>("")], - .image = preprocessing[Output(kImageTag)], + /* detections= */ + detection_label_id_to_text[Output>("")], + /* image= */ preprocessing[Output(kImageTag)], }}; } };