Project import generated by Copybara.

GitOrigin-RevId: e207bb2a1b26cd799055d7735ed35ad2f0e56b83
This commit is contained in:
MediaPipe Team 2022-09-07 13:33:31 -07:00 committed by Sebastian Schmidt
parent d3f98334bf
commit ebec590cfe
6 changed files with 46 additions and 24 deletions

View File

@ -103,6 +103,18 @@ public class ExternalTextureConverter implements TextureFrameProducer {
}
}
/**
* Re-renders the current frame. Notifies all consumers as if it were a new frame. This should not
* typically be used but can be useful for cases where the consumer has lost ownership of the most
* recent frame and needs to get it again. This does nothing if no frame has yet been received.
*/
public void rerenderCurrentFrame() {
SurfaceTexture surfaceTexture = getSurfaceTexture();
if (thread != null && surfaceTexture != null && thread.getHasReceivedFirstFrame()) {
thread.onFrameAvailable(surfaceTexture);
}
}
/**
* Sets the new buffer pool size. This is safe to set at any time.
*
@ -278,6 +290,7 @@ public class ExternalTextureConverter implements TextureFrameProducer {
private volatile SurfaceTexture internalSurfaceTexture = null;
private int[] textures = null;
private final List<TextureFrameConsumer> consumers;
private volatile boolean hasReceivedFirstFrame = false;
private final Queue<PoolTextureFrame> framesAvailable = new ArrayDeque<>();
private int framesInUse = 0;
@ -335,6 +348,7 @@ public class ExternalTextureConverter implements TextureFrameProducer {
}
public void setSurfaceTexture(SurfaceTexture texture, int width, int height) {
hasReceivedFirstFrame = false;
if (surfaceTexture != null) {
surfaceTexture.setOnFrameAvailableListener(null);
}
@ -381,6 +395,10 @@ public class ExternalTextureConverter implements TextureFrameProducer {
return surfaceTexture != null ? surfaceTexture : internalSurfaceTexture;
}
public boolean getHasReceivedFirstFrame() {
return hasReceivedFirstFrame;
}
@Override
public void onFrameAvailable(SurfaceTexture surfaceTexture) {
handler.post(() -> renderNext(surfaceTexture));
@ -427,6 +445,7 @@ public class ExternalTextureConverter implements TextureFrameProducer {
// pending on the handler. When that happens, we should simply disregard the call.
return;
}
hasReceivedFirstFrame = true;
try {
synchronized (consumers) {
boolean frameUpdated = false;

View File

@ -159,12 +159,12 @@ absl::Status TensorsToSegmentationCalculator::Process(
std::tie(output_width, output_height) = kOutputSizeIn(cc).Get();
}
Shape output_shape = {
.height = output_height,
.width = output_width,
.channels = options_.segmenter_options().output_type() ==
SegmenterOptions::CATEGORY_MASK
? 1
: input_shape.channels};
/* height= */ output_height,
/* width= */ output_width,
/* channels= */ options_.segmenter_options().output_type() ==
SegmenterOptions::CATEGORY_MASK
? 1
: input_shape.channels};
std::vector<Image> segmented_masks = GetSegmentationResult(
input_shape, output_shape, input_tensor.GetCpuReadView().buffer<float>());

View File

@ -148,8 +148,9 @@ absl::StatusOr<ClassificationHeadsProperties> GetClassificationHeadsProperties(
num_output_tensors, output_tensors_metadata->size()),
MediaPipeTasksStatus::kMetadataInconsistencyError);
}
return ClassificationHeadsProperties{.num_heads = num_output_tensors,
.quantized = num_quantized_tensors > 0};
return ClassificationHeadsProperties{
/* num_heads= */ num_output_tensors,
/* quantized= */ num_quantized_tensors > 0};
}
// Builds the label map from the tensor metadata, if available.

View File

@ -226,12 +226,14 @@ class ImagePreprocessingSubgraph : public Subgraph {
// Connect outputs.
return {
.tensors = image_to_tensor[Output<std::vector<Tensor>>(kTensorsTag)],
.matrix = image_to_tensor[Output<std::array<float, 16>>(kMatrixTag)],
.letterbox_padding =
image_to_tensor[Output<std::array<float, 4>>(kLetterboxPaddingTag)],
.image_size = image_size[Output<std::pair<int, int>>(kSizeTag)],
.image = pass_through[Output<Image>("")],
/* tensors= */ image_to_tensor[Output<std::vector<Tensor>>(
kTensorsTag)],
/* matrix= */
image_to_tensor[Output<std::array<float, 16>>(kMatrixTag)],
/* letterbox_padding= */
image_to_tensor[Output<std::array<float, 4>>(kLetterboxPaddingTag)],
/* image_size= */ image_size[Output<std::pair<int, int>>(kSizeTag)],
/* image= */ pass_through[Output<Image>("")],
};
}
};

View File

@ -388,13 +388,13 @@ class HandLandmarkDetectorGraph : public core::ModelTaskGraph {
hand_rect_transformation[Output<NormalizedRect>("")];
return {{
.hand_landmarks = projected_landmarks,
.world_hand_landmarks = projected_world_landmarks,
.hand_rect_next_frame = hand_rect_next_frame,
.hand_presence = hand_presence,
.hand_presence_score = hand_presence_score,
.handedness = handedness,
.image_size = image_size,
/* hand_landmarks= */ projected_landmarks,
/* world_hand_landmarks= */ projected_world_landmarks,
/* hand_rect_next_frame= */ hand_rect_next_frame,
/* hand_presence= */ hand_presence,
/* hand_presence_score= */ hand_presence_score,
/* handedness= */ handedness,
/* image_size= */ image_size,
}};
}
};

View File

@ -531,9 +531,9 @@ class ObjectDetectorGraph : public core::ModelTaskGraph {
// Outputs the labeled detections and the processed image as the subgraph
// output streams.
return {{
.detections =
detection_label_id_to_text[Output<std::vector<Detection>>("")],
.image = preprocessing[Output<Image>(kImageTag)],
/* detections= */
detection_label_id_to_text[Output<std::vector<Detection>>("")],
/* image= */ preprocessing[Output<Image>(kImageTag)],
}};
}
};