Project import generated by Copybara.
GitOrigin-RevId: e207bb2a1b26cd799055d7735ed35ad2f0e56b83
This commit is contained in:
parent
d3f98334bf
commit
ebec590cfe
|
@ -103,6 +103,18 @@ public class ExternalTextureConverter implements TextureFrameProducer {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Re-renders the current frame. Notifies all consumers as if it were a new frame. This should not
|
||||||
|
* typically be used but can be useful for cases where the consumer has lost ownership of the most
|
||||||
|
* recent frame and needs to get it again. This does nothing if no frame has yet been received.
|
||||||
|
*/
|
||||||
|
public void rerenderCurrentFrame() {
|
||||||
|
SurfaceTexture surfaceTexture = getSurfaceTexture();
|
||||||
|
if (thread != null && surfaceTexture != null && thread.getHasReceivedFirstFrame()) {
|
||||||
|
thread.onFrameAvailable(surfaceTexture);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Sets the new buffer pool size. This is safe to set at any time.
|
* Sets the new buffer pool size. This is safe to set at any time.
|
||||||
*
|
*
|
||||||
|
@ -278,6 +290,7 @@ public class ExternalTextureConverter implements TextureFrameProducer {
|
||||||
private volatile SurfaceTexture internalSurfaceTexture = null;
|
private volatile SurfaceTexture internalSurfaceTexture = null;
|
||||||
private int[] textures = null;
|
private int[] textures = null;
|
||||||
private final List<TextureFrameConsumer> consumers;
|
private final List<TextureFrameConsumer> consumers;
|
||||||
|
private volatile boolean hasReceivedFirstFrame = false;
|
||||||
|
|
||||||
private final Queue<PoolTextureFrame> framesAvailable = new ArrayDeque<>();
|
private final Queue<PoolTextureFrame> framesAvailable = new ArrayDeque<>();
|
||||||
private int framesInUse = 0;
|
private int framesInUse = 0;
|
||||||
|
@ -335,6 +348,7 @@ public class ExternalTextureConverter implements TextureFrameProducer {
|
||||||
}
|
}
|
||||||
|
|
||||||
public void setSurfaceTexture(SurfaceTexture texture, int width, int height) {
|
public void setSurfaceTexture(SurfaceTexture texture, int width, int height) {
|
||||||
|
hasReceivedFirstFrame = false;
|
||||||
if (surfaceTexture != null) {
|
if (surfaceTexture != null) {
|
||||||
surfaceTexture.setOnFrameAvailableListener(null);
|
surfaceTexture.setOnFrameAvailableListener(null);
|
||||||
}
|
}
|
||||||
|
@ -381,6 +395,10 @@ public class ExternalTextureConverter implements TextureFrameProducer {
|
||||||
return surfaceTexture != null ? surfaceTexture : internalSurfaceTexture;
|
return surfaceTexture != null ? surfaceTexture : internalSurfaceTexture;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
public boolean getHasReceivedFirstFrame() {
|
||||||
|
return hasReceivedFirstFrame;
|
||||||
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
public void onFrameAvailable(SurfaceTexture surfaceTexture) {
|
public void onFrameAvailable(SurfaceTexture surfaceTexture) {
|
||||||
handler.post(() -> renderNext(surfaceTexture));
|
handler.post(() -> renderNext(surfaceTexture));
|
||||||
|
@ -427,6 +445,7 @@ public class ExternalTextureConverter implements TextureFrameProducer {
|
||||||
// pending on the handler. When that happens, we should simply disregard the call.
|
// pending on the handler. When that happens, we should simply disregard the call.
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
hasReceivedFirstFrame = true;
|
||||||
try {
|
try {
|
||||||
synchronized (consumers) {
|
synchronized (consumers) {
|
||||||
boolean frameUpdated = false;
|
boolean frameUpdated = false;
|
||||||
|
|
|
@ -159,12 +159,12 @@ absl::Status TensorsToSegmentationCalculator::Process(
|
||||||
std::tie(output_width, output_height) = kOutputSizeIn(cc).Get();
|
std::tie(output_width, output_height) = kOutputSizeIn(cc).Get();
|
||||||
}
|
}
|
||||||
Shape output_shape = {
|
Shape output_shape = {
|
||||||
.height = output_height,
|
/* height= */ output_height,
|
||||||
.width = output_width,
|
/* width= */ output_width,
|
||||||
.channels = options_.segmenter_options().output_type() ==
|
/* channels= */ options_.segmenter_options().output_type() ==
|
||||||
SegmenterOptions::CATEGORY_MASK
|
SegmenterOptions::CATEGORY_MASK
|
||||||
? 1
|
? 1
|
||||||
: input_shape.channels};
|
: input_shape.channels};
|
||||||
|
|
||||||
std::vector<Image> segmented_masks = GetSegmentationResult(
|
std::vector<Image> segmented_masks = GetSegmentationResult(
|
||||||
input_shape, output_shape, input_tensor.GetCpuReadView().buffer<float>());
|
input_shape, output_shape, input_tensor.GetCpuReadView().buffer<float>());
|
||||||
|
|
|
@ -148,8 +148,9 @@ absl::StatusOr<ClassificationHeadsProperties> GetClassificationHeadsProperties(
|
||||||
num_output_tensors, output_tensors_metadata->size()),
|
num_output_tensors, output_tensors_metadata->size()),
|
||||||
MediaPipeTasksStatus::kMetadataInconsistencyError);
|
MediaPipeTasksStatus::kMetadataInconsistencyError);
|
||||||
}
|
}
|
||||||
return ClassificationHeadsProperties{.num_heads = num_output_tensors,
|
return ClassificationHeadsProperties{
|
||||||
.quantized = num_quantized_tensors > 0};
|
/* num_heads= */ num_output_tensors,
|
||||||
|
/* quantized= */ num_quantized_tensors > 0};
|
||||||
}
|
}
|
||||||
|
|
||||||
// Builds the label map from the tensor metadata, if available.
|
// Builds the label map from the tensor metadata, if available.
|
||||||
|
|
|
@ -226,12 +226,14 @@ class ImagePreprocessingSubgraph : public Subgraph {
|
||||||
|
|
||||||
// Connect outputs.
|
// Connect outputs.
|
||||||
return {
|
return {
|
||||||
.tensors = image_to_tensor[Output<std::vector<Tensor>>(kTensorsTag)],
|
/* tensors= */ image_to_tensor[Output<std::vector<Tensor>>(
|
||||||
.matrix = image_to_tensor[Output<std::array<float, 16>>(kMatrixTag)],
|
kTensorsTag)],
|
||||||
.letterbox_padding =
|
/* matrix= */
|
||||||
image_to_tensor[Output<std::array<float, 4>>(kLetterboxPaddingTag)],
|
image_to_tensor[Output<std::array<float, 16>>(kMatrixTag)],
|
||||||
.image_size = image_size[Output<std::pair<int, int>>(kSizeTag)],
|
/* letterbox_padding= */
|
||||||
.image = pass_through[Output<Image>("")],
|
image_to_tensor[Output<std::array<float, 4>>(kLetterboxPaddingTag)],
|
||||||
|
/* image_size= */ image_size[Output<std::pair<int, int>>(kSizeTag)],
|
||||||
|
/* image= */ pass_through[Output<Image>("")],
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
|
@ -388,13 +388,13 @@ class HandLandmarkDetectorGraph : public core::ModelTaskGraph {
|
||||||
hand_rect_transformation[Output<NormalizedRect>("")];
|
hand_rect_transformation[Output<NormalizedRect>("")];
|
||||||
|
|
||||||
return {{
|
return {{
|
||||||
.hand_landmarks = projected_landmarks,
|
/* hand_landmarks= */ projected_landmarks,
|
||||||
.world_hand_landmarks = projected_world_landmarks,
|
/* world_hand_landmarks= */ projected_world_landmarks,
|
||||||
.hand_rect_next_frame = hand_rect_next_frame,
|
/* hand_rect_next_frame= */ hand_rect_next_frame,
|
||||||
.hand_presence = hand_presence,
|
/* hand_presence= */ hand_presence,
|
||||||
.hand_presence_score = hand_presence_score,
|
/* hand_presence_score= */ hand_presence_score,
|
||||||
.handedness = handedness,
|
/* handedness= */ handedness,
|
||||||
.image_size = image_size,
|
/* image_size= */ image_size,
|
||||||
}};
|
}};
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
|
@ -531,9 +531,9 @@ class ObjectDetectorGraph : public core::ModelTaskGraph {
|
||||||
// Outputs the labeled detections and the processed image as the subgraph
|
// Outputs the labeled detections and the processed image as the subgraph
|
||||||
// output streams.
|
// output streams.
|
||||||
return {{
|
return {{
|
||||||
.detections =
|
/* detections= */
|
||||||
detection_label_id_to_text[Output<std::vector<Detection>>("")],
|
detection_label_id_to_text[Output<std::vector<Detection>>("")],
|
||||||
.image = preprocessing[Output<Image>(kImageTag)],
|
/* image= */ preprocessing[Output<Image>(kImageTag)],
|
||||||
}};
|
}};
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
Loading…
Reference in New Issue
Block a user