Remove video and stream model in face stylizer.
PiperOrigin-RevId: 563233996
This commit is contained in:
parent
b40b3973fb
commit
7252f6f2e7
|
@ -60,7 +60,7 @@ using FaceStylizerGraphOptionsProto =
|
||||||
// "mediapipe.tasks.vision.face_stylizer.FaceStylizerGraph".
|
// "mediapipe.tasks.vision.face_stylizer.FaceStylizerGraph".
|
||||||
CalculatorGraphConfig CreateGraphConfig(
|
CalculatorGraphConfig CreateGraphConfig(
|
||||||
std::unique_ptr<FaceStylizerGraphOptionsProto> options,
|
std::unique_ptr<FaceStylizerGraphOptionsProto> options,
|
||||||
bool enable_flow_limiting) {
|
bool enable_flow_limiting = false) {
|
||||||
api2::builder::Graph graph;
|
api2::builder::Graph graph;
|
||||||
auto& task_subgraph = graph.AddNode(kSubgraphTypeName);
|
auto& task_subgraph = graph.AddNode(kSubgraphTypeName);
|
||||||
task_subgraph.GetOptions<FaceStylizerGraphOptionsProto>().Swap(options.get());
|
task_subgraph.GetOptions<FaceStylizerGraphOptionsProto>().Swap(options.get());
|
||||||
|
@ -87,8 +87,6 @@ ConvertFaceStylizerOptionsToProto(FaceStylizerOptions* options) {
|
||||||
auto base_options_proto = std::make_unique<tasks::core::proto::BaseOptions>(
|
auto base_options_proto = std::make_unique<tasks::core::proto::BaseOptions>(
|
||||||
tasks::core::ConvertBaseOptionsToProto(&(options->base_options)));
|
tasks::core::ConvertBaseOptionsToProto(&(options->base_options)));
|
||||||
options_proto->mutable_base_options()->Swap(base_options_proto.get());
|
options_proto->mutable_base_options()->Swap(base_options_proto.get());
|
||||||
options_proto->mutable_base_options()->set_use_stream_mode(
|
|
||||||
options->running_mode != core::RunningMode::IMAGE);
|
|
||||||
return options_proto;
|
return options_proto;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -125,10 +123,8 @@ absl::StatusOr<std::unique_ptr<FaceStylizer>> FaceStylizer::Create(
|
||||||
}
|
}
|
||||||
return core::VisionTaskApiFactory::Create<FaceStylizer,
|
return core::VisionTaskApiFactory::Create<FaceStylizer,
|
||||||
FaceStylizerGraphOptionsProto>(
|
FaceStylizerGraphOptionsProto>(
|
||||||
CreateGraphConfig(
|
CreateGraphConfig(std::move(options_proto)),
|
||||||
std::move(options_proto),
|
std::move(options->base_options.op_resolver), core::RunningMode::IMAGE,
|
||||||
options->running_mode == core::RunningMode::LIVE_STREAM),
|
|
||||||
std::move(options->base_options.op_resolver), options->running_mode,
|
|
||||||
std::move(packets_callback));
|
std::move(packets_callback));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -41,15 +41,6 @@ struct FaceStylizerOptions {
|
||||||
// file with metadata, accelerator options, op resolver, etc.
|
// file with metadata, accelerator options, op resolver, etc.
|
||||||
tasks::core::BaseOptions base_options;
|
tasks::core::BaseOptions base_options;
|
||||||
|
|
||||||
// The running mode of the task. Default to the image mode.
|
|
||||||
// Face stylizer has three running modes:
|
|
||||||
// 1) The image mode for stylizing faces on single image inputs.
|
|
||||||
// 2) The video mode for stylizing faces on the decoded frames of a video.
|
|
||||||
// 3) The live stream mode for stylizing faces on the live stream of input
|
|
||||||
// data, such as from camera. In this mode, the "result_callback" below must
|
|
||||||
// be specified to receive the stylization results asynchronously.
|
|
||||||
core::RunningMode running_mode = core::RunningMode::IMAGE;
|
|
||||||
|
|
||||||
// The user-defined result callback for processing live stream data.
|
// The user-defined result callback for processing live stream data.
|
||||||
// The result callback should only be specified when the running mode is set
|
// The result callback should only be specified when the running mode is set
|
||||||
// to RunningMode::LIVE_STREAM.
|
// to RunningMode::LIVE_STREAM.
|
||||||
|
|
|
@ -108,8 +108,6 @@ absl::Status SetSubTaskBaseOptions(const ModelAssetBundleResources& resources,
|
||||||
face_detector_graph_options->mutable_base_options()
|
face_detector_graph_options->mutable_base_options()
|
||||||
->mutable_acceleration()
|
->mutable_acceleration()
|
||||||
->CopyFrom(options->base_options().acceleration());
|
->CopyFrom(options->base_options().acceleration());
|
||||||
face_detector_graph_options->mutable_base_options()->set_use_stream_mode(
|
|
||||||
options->base_options().use_stream_mode());
|
|
||||||
auto* face_landmarks_detector_graph_options =
|
auto* face_landmarks_detector_graph_options =
|
||||||
options->mutable_face_landmarker_graph_options()
|
options->mutable_face_landmarker_graph_options()
|
||||||
->mutable_face_landmarks_detector_graph_options();
|
->mutable_face_landmarks_detector_graph_options();
|
||||||
|
|
|
@ -109,7 +109,7 @@ public final class FaceStylizer extends BaseVisionTaskApi {
|
||||||
return FaceStylizerResult.create(
|
return FaceStylizerResult.create(
|
||||||
Optional.empty(),
|
Optional.empty(),
|
||||||
BaseVisionTaskApi.generateResultTimestampMs(
|
BaseVisionTaskApi.generateResultTimestampMs(
|
||||||
stylizerOptions.runningMode(), packets.get(IMAGE_OUT_STREAM_INDEX)));
|
RunningMode.IMAGE, packets.get(IMAGE_OUT_STREAM_INDEX)));
|
||||||
}
|
}
|
||||||
int width = PacketGetter.getImageWidth(packet);
|
int width = PacketGetter.getImageWidth(packet);
|
||||||
int height = PacketGetter.getImageHeight(packet);
|
int height = PacketGetter.getImageHeight(packet);
|
||||||
|
@ -142,7 +142,7 @@ public final class FaceStylizer extends BaseVisionTaskApi {
|
||||||
return FaceStylizerResult.create(
|
return FaceStylizerResult.create(
|
||||||
Optional.of(imageBuilder.build()),
|
Optional.of(imageBuilder.build()),
|
||||||
BaseVisionTaskApi.generateResultTimestampMs(
|
BaseVisionTaskApi.generateResultTimestampMs(
|
||||||
stylizerOptions.runningMode(), packets.get(IMAGE_OUT_STREAM_INDEX)));
|
RunningMode.IMAGE, packets.get(IMAGE_OUT_STREAM_INDEX)));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Override
|
@Override
|
||||||
|
@ -153,9 +153,7 @@ public final class FaceStylizer extends BaseVisionTaskApi {
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
// Empty output image packets indicates that no face stylization is applied.
|
// Empty output image packets indicates that no face stylization is applied.
|
||||||
if (stylizerOptions.runningMode() != RunningMode.LIVE_STREAM) {
|
|
||||||
handler.setHandleTimestampBoundChanges(true);
|
handler.setHandleTimestampBoundChanges(true);
|
||||||
}
|
|
||||||
stylizerOptions.resultListener().ifPresent(handler::setResultListener);
|
stylizerOptions.resultListener().ifPresent(handler::setResultListener);
|
||||||
stylizerOptions.errorListener().ifPresent(handler::setErrorListener);
|
stylizerOptions.errorListener().ifPresent(handler::setErrorListener);
|
||||||
TaskRunner runner =
|
TaskRunner runner =
|
||||||
|
@ -163,16 +161,15 @@ public final class FaceStylizer extends BaseVisionTaskApi {
|
||||||
context,
|
context,
|
||||||
TaskInfo.<FaceStylizerOptions>builder()
|
TaskInfo.<FaceStylizerOptions>builder()
|
||||||
.setTaskName(FaceStylizer.class.getSimpleName())
|
.setTaskName(FaceStylizer.class.getSimpleName())
|
||||||
.setTaskRunningModeName(stylizerOptions.runningMode().name())
|
.setTaskRunningModeName(RunningMode.IMAGE.name())
|
||||||
.setTaskGraphName(TASK_GRAPH_NAME)
|
.setTaskGraphName(TASK_GRAPH_NAME)
|
||||||
.setInputStreams(INPUT_STREAMS)
|
.setInputStreams(INPUT_STREAMS)
|
||||||
.setOutputStreams(OUTPUT_STREAMS)
|
.setOutputStreams(OUTPUT_STREAMS)
|
||||||
.setTaskOptions(stylizerOptions)
|
.setTaskOptions(stylizerOptions)
|
||||||
.setEnableFlowLimiting(stylizerOptions.runningMode() == RunningMode.LIVE_STREAM)
|
|
||||||
.build(),
|
.build(),
|
||||||
handler);
|
handler);
|
||||||
return new FaceStylizer(
|
return new FaceStylizer(
|
||||||
runner, stylizerOptions.runningMode(), stylizerOptions.resultListener().isPresent());
|
runner, RunningMode.IMAGE, stylizerOptions.resultListener().isPresent());
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -306,194 +303,6 @@ public final class FaceStylizer extends BaseVisionTaskApi {
|
||||||
TaskResult unused = processImageData(image, imageProcessingOptions);
|
TaskResult unused = processImageData(image, imageProcessingOptions);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Performs face stylization on the provided video frame with default image processing options,
|
|
||||||
* i.e. without any rotation applied. Only use this method when the {@link FaceStylizer} is
|
|
||||||
* created with {@link RunningMode#VIDEO}.
|
|
||||||
*
|
|
||||||
* <p>It's required to provide the video frame's timestamp (in milliseconds). The input timestamps
|
|
||||||
* must be monotonically increasing.
|
|
||||||
*
|
|
||||||
* <p>{@link FaceStylizer} supports the following color space types:
|
|
||||||
*
|
|
||||||
* <ul>
|
|
||||||
* <li>{@link android.graphics.Bitmap.Config#ARGB_8888}
|
|
||||||
* </ul>
|
|
||||||
*
|
|
||||||
* <p>The input image can be of any size. The output image is the stylized image with the most
|
|
||||||
* visible face. The stylized output image size is the same as the model output size. When no face
|
|
||||||
* is detected on the input image, returns {@code Optional.empty()}.
|
|
||||||
*
|
|
||||||
* @param image a MediaPipe {@link MPImage} object for processing.
|
|
||||||
* @param timestampMs the input timestamp (in milliseconds).
|
|
||||||
* @throws MediaPipeException if there is an internal error. Or if {@link FaceStylizer} is created
|
|
||||||
* with a {@link ResultListener}.
|
|
||||||
*/
|
|
||||||
public FaceStylizerResult stylizeForVideo(MPImage image, long timestampMs) {
|
|
||||||
return stylizeForVideo(image, ImageProcessingOptions.builder().build(), timestampMs);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Performs face stylization on the provided video frame. Only use this method when the {@link
|
|
||||||
* FaceStylizer} is created with {@link RunningMode#VIDEO}.
|
|
||||||
*
|
|
||||||
* <p>It's required to provide the video frame's timestamp (in milliseconds). The input timestamps
|
|
||||||
* must be monotonically increasing.
|
|
||||||
*
|
|
||||||
* <p>{@link FaceStylizer} supports the following color space types:
|
|
||||||
*
|
|
||||||
* <ul>
|
|
||||||
* <li>{@link android.graphics.Bitmap.Config#ARGB_8888}
|
|
||||||
* </ul>
|
|
||||||
*
|
|
||||||
* <p>The input image can be of any size. The output image is the stylized image with the most
|
|
||||||
* visible face. The stylized output image size is the same as the model output size. When no face
|
|
||||||
* is detected on the input image, returns {@code Optional.empty()}. *
|
|
||||||
*
|
|
||||||
* @param image a MediaPipe {@link MPImage} object for processing.
|
|
||||||
* @param imageProcessingOptions the {@link ImageProcessingOptions} specifying how to process the
|
|
||||||
* input image before running inference. Note that region-of-interest is <b>not</b> supported
|
|
||||||
* by this task: specifying {@link ImageProcessingOptions#regionOfInterest()} will result in
|
|
||||||
* this method throwing an IllegalArgumentException.
|
|
||||||
* @param timestampMs the input timestamp (in milliseconds).
|
|
||||||
* @throws IllegalArgumentException if the {@link ImageProcessingOptions} specify a
|
|
||||||
* region-of-interest.
|
|
||||||
* @throws MediaPipeException if there is an internal error. Or if {@link FaceStylizer} is created
|
|
||||||
* with a {@link ResultListener}.
|
|
||||||
*/
|
|
||||||
public FaceStylizerResult stylizeForVideo(
|
|
||||||
MPImage image, ImageProcessingOptions imageProcessingOptions, long timestampMs) {
|
|
||||||
if (hasResultListener) {
|
|
||||||
throw new MediaPipeException(
|
|
||||||
MediaPipeException.StatusCode.FAILED_PRECONDITION.ordinal(),
|
|
||||||
"ResultListener is provided in the FaceStylizerOptions, but this method will return an"
|
|
||||||
+ " ImageSegmentationResult.");
|
|
||||||
}
|
|
||||||
return (FaceStylizerResult) processVideoData(image, imageProcessingOptions, timestampMs);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Performs face stylization on the provided video frame with default image processing options,
|
|
||||||
* i.e. without any rotation applied, and provides zero-copied results via {@link ResultListener}
|
|
||||||
* in {@link FaceStylizerOptions}. Only use this method when the {@link FaceStylizer} is created
|
|
||||||
* with {@link RunningMode#VIDEO}.
|
|
||||||
*
|
|
||||||
* <p>It's required to provide the video frame's timestamp (in milliseconds). The input timestamps
|
|
||||||
* must be monotonically increasing.
|
|
||||||
*
|
|
||||||
* <p>{@link FaceStylizer} supports the following color space types:
|
|
||||||
*
|
|
||||||
* <ul>
|
|
||||||
* <li>{@link android.graphics.Bitmap.Config#ARGB_8888}
|
|
||||||
* </ul>
|
|
||||||
*
|
|
||||||
* <p>The input image can be of any size. The output image is the stylized image with the most
|
|
||||||
* visible face. The stylized output image size is the same as the model output size. When no face
|
|
||||||
* is detected on the input image, returns {@code Optional.empty()}.
|
|
||||||
*
|
|
||||||
* @param image a MediaPipe {@link MPImage} object for processing.
|
|
||||||
* @param timestampMs the input timestamp (in milliseconds).
|
|
||||||
* @throws MediaPipeException if there is an internal error. Or if {@link FaceStylizer} is not
|
|
||||||
* created with {@link ResultListener} set in {@link FaceStylizerOptions}.
|
|
||||||
*/
|
|
||||||
public void stylizeForVideoWithResultListener(MPImage image, long timestampMs) {
|
|
||||||
stylizeForVideoWithResultListener(image, ImageProcessingOptions.builder().build(), timestampMs);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Performs face stylization on the provided video frame, and provides zero-copied results via
|
|
||||||
* {@link ResultListener} in {@link FaceStylizerOptions}. Only use this method when the {@link
|
|
||||||
* FaceStylizer} is created with {@link RunningMode#VIDEO}.
|
|
||||||
*
|
|
||||||
* <p>It's required to provide the video frame's timestamp (in milliseconds). The input timestamps
|
|
||||||
* must be monotonically increasing.
|
|
||||||
*
|
|
||||||
* <p>{@link FaceStylizer} supports the following color space types:
|
|
||||||
*
|
|
||||||
* <ul>
|
|
||||||
* <li>{@link android.graphics.Bitmap.Config#ARGB_8888}
|
|
||||||
* </ul>
|
|
||||||
*
|
|
||||||
* <p>The input image can be of any size. The output image is the stylized image with the most
|
|
||||||
* visible face. The stylized output image size is the same as the model output size. When no face
|
|
||||||
* is detected on the input image, returns {@code Optional.empty()}.
|
|
||||||
*
|
|
||||||
* @param image a MediaPipe {@link MPImage} object for processing.
|
|
||||||
* @param timestampMs the input timestamp (in milliseconds).
|
|
||||||
* @throws MediaPipeException if there is an internal error. Or if {@link FaceStylizer} is not
|
|
||||||
* created with {@link ResultListener} set in {@link FaceStylizerOptions}.
|
|
||||||
*/
|
|
||||||
public void stylizeForVideoWithResultListener(
|
|
||||||
MPImage image, ImageProcessingOptions imageProcessingOptions, long timestampMs) {
|
|
||||||
if (!hasResultListener) {
|
|
||||||
throw new MediaPipeException(
|
|
||||||
MediaPipeException.StatusCode.FAILED_PRECONDITION.ordinal(),
|
|
||||||
"ResultListener is not set in the FaceStylizerOptions, but this method expects a"
|
|
||||||
+ " ResultListener to process ImageSegmentationResult.");
|
|
||||||
}
|
|
||||||
TaskResult unused = processVideoData(image, imageProcessingOptions, timestampMs);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Sends live image data to perform face stylization with default image processing options, i.e.
|
|
||||||
* without any rotation applied, and the results will be available via the {@link ResultListener}
|
|
||||||
* provided in the {@link FaceStylizerOptions}. Only use this method when the {@link FaceStylizer
|
|
||||||
* } is created with {@link RunningMode#LIVE_STREAM}.
|
|
||||||
*
|
|
||||||
* <p>It's required to provide a timestamp (in milliseconds) to indicate when the input image is
|
|
||||||
* sent to the face stylizer. The input timestamps must be monotonically increasing.
|
|
||||||
*
|
|
||||||
* <p>{@link FaceStylizer} supports the following color space types:
|
|
||||||
*
|
|
||||||
* <p>The input image can be of any size. The output image is the stylized image with the most
|
|
||||||
* visible face. The stylized output image size is the same as the model output size. When no face
|
|
||||||
* is detected on the input image, returns {@code Optional.empty()}.
|
|
||||||
*
|
|
||||||
* <ul>
|
|
||||||
* <li>{@link android.graphics.Bitmap.Config#ARGB_8888}
|
|
||||||
* </ul>
|
|
||||||
*
|
|
||||||
* @param image a MediaPipe {@link MPImage} object for processing.
|
|
||||||
* @param timestampMs the input timestamp (in milliseconds).
|
|
||||||
* @throws MediaPipeException if there is an internal error.
|
|
||||||
*/
|
|
||||||
public void stylizeAsync(MPImage image, long timestampMs) {
|
|
||||||
stylizeAsync(image, ImageProcessingOptions.builder().build(), timestampMs);
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
|
||||||
* Sends live image data to perform face stylization, and the results will be available via the
|
|
||||||
* {@link ResultListener} provided in the {@link FaceStylizerOptions}. Only use this method when
|
|
||||||
* the {@link FaceStylizer} is created with {@link RunningMode#LIVE_STREAM}.
|
|
||||||
*
|
|
||||||
* <p>It's required to provide a timestamp (in milliseconds) to indicate when the input image is
|
|
||||||
* sent to the face stylizer. The input timestamps must be monotonically increasing.
|
|
||||||
*
|
|
||||||
* <p>{@link FaceStylizer} supports the following color space types:
|
|
||||||
*
|
|
||||||
* <ul>
|
|
||||||
* <li>{@link android.graphics.Bitmap.Config#ARGB_8888}
|
|
||||||
* </ul>
|
|
||||||
*
|
|
||||||
* <p>The input image can be of any size. The output image is the stylized image with the most
|
|
||||||
* visible face. The stylized output image size is the same as the model output size. When no face
|
|
||||||
* is detected on the input image, returns {@code Optional.empty()}.
|
|
||||||
*
|
|
||||||
* @param image a MediaPipe {@link MPImage} object for processing.
|
|
||||||
* @param imageProcessingOptions the {@link ImageProcessingOptions} specifying how to process the
|
|
||||||
* input image before running inference. Note that region-of-interest is <b>not</b> supported
|
|
||||||
* by this task: specifying {@link ImageProcessingOptions#regionOfInterest()} will result in
|
|
||||||
* this method throwing an IllegalArgumentException.
|
|
||||||
* @param timestampMs the input timestamp (in milliseconds).
|
|
||||||
* @throws IllegalArgumentException if the {@link ImageProcessingOptions} specify a
|
|
||||||
* region-of-interest.
|
|
||||||
* @throws MediaPipeException if there is an internal error.
|
|
||||||
*/
|
|
||||||
public void stylizeAsync(
|
|
||||||
MPImage image, ImageProcessingOptions imageProcessingOptions, long timestampMs) {
|
|
||||||
sendLiveStreamData(image, imageProcessingOptions, timestampMs);
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Options for setting up an {@link FaceStylizer}. */
|
/** Options for setting up an {@link FaceStylizer}. */
|
||||||
@AutoValue
|
@AutoValue
|
||||||
public abstract static class FaceStylizerOptions extends TaskOptions {
|
public abstract static class FaceStylizerOptions extends TaskOptions {
|
||||||
|
@ -504,20 +313,6 @@ public final class FaceStylizer extends BaseVisionTaskApi {
|
||||||
/** Sets the base options for the face stylizer task. */
|
/** Sets the base options for the face stylizer task. */
|
||||||
public abstract Builder setBaseOptions(BaseOptions value);
|
public abstract Builder setBaseOptions(BaseOptions value);
|
||||||
|
|
||||||
/**
|
|
||||||
* Sets the running mode for the face stylizer task. Default to the image mode. Image stylizer
|
|
||||||
* has three modes:
|
|
||||||
*
|
|
||||||
* <ul>
|
|
||||||
* <li>IMAGE: The mode for stylizeing image on single image inputs.
|
|
||||||
* <li>VIDEO: The mode for stylizeing image on the decoded frames of a video.
|
|
||||||
* <li>LIVE_STREAM: The mode for for stylizeing image on a live stream of input data, such
|
|
||||||
* as from camera. In this mode, {@code setResultListener} must be called to set up a
|
|
||||||
* listener to receive the recognition results asynchronously.
|
|
||||||
* </ul>
|
|
||||||
*/
|
|
||||||
public abstract Builder setRunningMode(RunningMode value);
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Sets an optional {@link ResultListener} to receive the stylization results when the graph
|
* Sets an optional {@link ResultListener} to receive the stylization results when the graph
|
||||||
* pipeline is done processing an image.
|
* pipeline is done processing an image.
|
||||||
|
@ -529,37 +324,20 @@ public final class FaceStylizer extends BaseVisionTaskApi {
|
||||||
|
|
||||||
abstract FaceStylizerOptions autoBuild();
|
abstract FaceStylizerOptions autoBuild();
|
||||||
|
|
||||||
/**
|
/** Builds the {@link FaceStylizerOptions} instance. */
|
||||||
* Validates and builds the {@link FaceStylizerOptions} instance.
|
|
||||||
*
|
|
||||||
* @throws IllegalArgumentException if the result listener and the running mode are not
|
|
||||||
* properly configured. The result listener must be set when the face stylizer is in the
|
|
||||||
* live stream mode.
|
|
||||||
*/
|
|
||||||
public final FaceStylizerOptions build() {
|
public final FaceStylizerOptions build() {
|
||||||
FaceStylizerOptions options = autoBuild();
|
return autoBuild();
|
||||||
if (options.runningMode() == RunningMode.LIVE_STREAM) {
|
|
||||||
if (!options.resultListener().isPresent()) {
|
|
||||||
throw new IllegalArgumentException(
|
|
||||||
"The face stylizer is in the live stream mode, a user-defined result listener"
|
|
||||||
+ " must be provided in FaceStylizerOptions.");
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return options;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
abstract BaseOptions baseOptions();
|
abstract BaseOptions baseOptions();
|
||||||
|
|
||||||
abstract RunningMode runningMode();
|
|
||||||
|
|
||||||
abstract Optional<ResultListener<FaceStylizerResult, MPImage>> resultListener();
|
abstract Optional<ResultListener<FaceStylizerResult, MPImage>> resultListener();
|
||||||
|
|
||||||
abstract Optional<ErrorListener> errorListener();
|
abstract Optional<ErrorListener> errorListener();
|
||||||
|
|
||||||
public static Builder builder() {
|
public static Builder builder() {
|
||||||
return new AutoValue_FaceStylizer_FaceStylizerOptions.Builder()
|
return new AutoValue_FaceStylizer_FaceStylizerOptions.Builder();
|
||||||
.setRunningMode(RunningMode.IMAGE);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/** Converts an {@link FaceStylizerOptions} to a {@link CalculatorOptions} protobuf message. */
|
/** Converts an {@link FaceStylizerOptions} to a {@link CalculatorOptions} protobuf message. */
|
||||||
|
@ -569,7 +347,6 @@ public final class FaceStylizer extends BaseVisionTaskApi {
|
||||||
FaceStylizerGraphOptionsProto.FaceStylizerGraphOptions.newBuilder()
|
FaceStylizerGraphOptionsProto.FaceStylizerGraphOptions.newBuilder()
|
||||||
.setBaseOptions(
|
.setBaseOptions(
|
||||||
BaseOptionsProto.BaseOptions.newBuilder()
|
BaseOptionsProto.BaseOptions.newBuilder()
|
||||||
.setUseStreamMode(runningMode() != RunningMode.IMAGE)
|
|
||||||
.mergeFrom(convertBaseOptionsToProto(baseOptions()))
|
.mergeFrom(convertBaseOptionsToProto(baseOptions()))
|
||||||
.build())
|
.build())
|
||||||
.build();
|
.build();
|
||||||
|
|
|
@ -22,12 +22,10 @@ import android.graphics.BitmapFactory;
|
||||||
import android.graphics.RectF;
|
import android.graphics.RectF;
|
||||||
import androidx.test.core.app.ApplicationProvider;
|
import androidx.test.core.app.ApplicationProvider;
|
||||||
import androidx.test.ext.junit.runners.AndroidJUnit4;
|
import androidx.test.ext.junit.runners.AndroidJUnit4;
|
||||||
import com.google.mediapipe.framework.MediaPipeException;
|
|
||||||
import com.google.mediapipe.framework.image.BitmapImageBuilder;
|
import com.google.mediapipe.framework.image.BitmapImageBuilder;
|
||||||
import com.google.mediapipe.framework.image.MPImage;
|
import com.google.mediapipe.framework.image.MPImage;
|
||||||
import com.google.mediapipe.tasks.core.BaseOptions;
|
import com.google.mediapipe.tasks.core.BaseOptions;
|
||||||
import com.google.mediapipe.tasks.vision.core.ImageProcessingOptions;
|
import com.google.mediapipe.tasks.vision.core.ImageProcessingOptions;
|
||||||
import com.google.mediapipe.tasks.vision.core.RunningMode;
|
|
||||||
import com.google.mediapipe.tasks.vision.facestylizer.FaceStylizer.FaceStylizerOptions;
|
import com.google.mediapipe.tasks.vision.facestylizer.FaceStylizer.FaceStylizerOptions;
|
||||||
import java.io.InputStream;
|
import java.io.InputStream;
|
||||||
import org.junit.After;
|
import org.junit.After;
|
||||||
|
@ -95,117 +93,11 @@ public class FaceStylizerTest {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
|
||||||
public void create_failsWithMissingResultListenerInLiveSteamMode() throws Exception {
|
|
||||||
IllegalArgumentException exception =
|
|
||||||
assertThrows(
|
|
||||||
IllegalArgumentException.class,
|
|
||||||
() ->
|
|
||||||
FaceStylizerOptions.builder()
|
|
||||||
.setBaseOptions(BaseOptions.builder().setModelAssetPath(modelFile).build())
|
|
||||||
.setRunningMode(RunningMode.LIVE_STREAM)
|
|
||||||
.build());
|
|
||||||
assertThat(exception)
|
|
||||||
.hasMessageThat()
|
|
||||||
.contains("a user-defined result listener must be provided");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void stylizer_failsWithCallingWrongApiInImageMode() throws Exception {
|
|
||||||
FaceStylizerOptions options =
|
|
||||||
FaceStylizerOptions.builder()
|
|
||||||
.setBaseOptions(BaseOptions.builder().setModelAssetPath(modelFile).build())
|
|
||||||
.setRunningMode(RunningMode.IMAGE)
|
|
||||||
.build();
|
|
||||||
faceStylizer =
|
|
||||||
FaceStylizer.createFromOptions(ApplicationProvider.getApplicationContext(), options);
|
|
||||||
MediaPipeException exception =
|
|
||||||
assertThrows(
|
|
||||||
MediaPipeException.class,
|
|
||||||
() ->
|
|
||||||
faceStylizer.stylizeForVideo(
|
|
||||||
getImageFromAsset(largeFaceTestImage), /* timestampsMs= */ 0));
|
|
||||||
assertThat(exception).hasMessageThat().contains("not initialized with the video mode");
|
|
||||||
exception =
|
|
||||||
assertThrows(
|
|
||||||
MediaPipeException.class,
|
|
||||||
() ->
|
|
||||||
faceStylizer.stylizeAsync(
|
|
||||||
getImageFromAsset(largeFaceTestImage), /* timestampsMs= */ 0));
|
|
||||||
assertThat(exception).hasMessageThat().contains("not initialized with the live stream mode");
|
|
||||||
exception =
|
|
||||||
assertThrows(
|
|
||||||
MediaPipeException.class,
|
|
||||||
() -> faceStylizer.stylizeWithResultListener(getImageFromAsset(largeFaceTestImage)));
|
|
||||||
assertThat(exception)
|
|
||||||
.hasMessageThat()
|
|
||||||
.contains("ResultListener is not set in the FaceStylizerOptions");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void stylizer_failsWithCallingWrongApiInVideoMode() throws Exception {
|
|
||||||
FaceStylizerOptions options =
|
|
||||||
FaceStylizerOptions.builder()
|
|
||||||
.setBaseOptions(BaseOptions.builder().setModelAssetPath(modelFile).build())
|
|
||||||
.setRunningMode(RunningMode.VIDEO)
|
|
||||||
.build();
|
|
||||||
|
|
||||||
faceStylizer =
|
|
||||||
FaceStylizer.createFromOptions(ApplicationProvider.getApplicationContext(), options);
|
|
||||||
MediaPipeException exception =
|
|
||||||
assertThrows(
|
|
||||||
MediaPipeException.class,
|
|
||||||
() -> faceStylizer.stylize(getImageFromAsset(largeFaceTestImage)));
|
|
||||||
assertThat(exception).hasMessageThat().contains("not initialized with the image mode");
|
|
||||||
exception =
|
|
||||||
assertThrows(
|
|
||||||
MediaPipeException.class,
|
|
||||||
() ->
|
|
||||||
faceStylizer.stylizeAsync(
|
|
||||||
getImageFromAsset(largeFaceTestImage), /* timestampsMs= */ 0));
|
|
||||||
assertThat(exception).hasMessageThat().contains("not initialized with the live stream mode");
|
|
||||||
exception =
|
|
||||||
assertThrows(
|
|
||||||
MediaPipeException.class,
|
|
||||||
() ->
|
|
||||||
faceStylizer.stylizeForVideoWithResultListener(
|
|
||||||
getImageFromAsset(largeFaceTestImage), /* timestampsMs= */ 0));
|
|
||||||
assertThat(exception)
|
|
||||||
.hasMessageThat()
|
|
||||||
.contains("ResultListener is not set in the FaceStylizerOptions");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void stylizer_failsWithCallingWrongApiInLiveSteamMode() throws Exception {
|
|
||||||
FaceStylizerOptions options =
|
|
||||||
FaceStylizerOptions.builder()
|
|
||||||
.setBaseOptions(BaseOptions.builder().setModelAssetPath(modelFile).build())
|
|
||||||
.setRunningMode(RunningMode.LIVE_STREAM)
|
|
||||||
.setResultListener((result, inputImage) -> {})
|
|
||||||
.build();
|
|
||||||
|
|
||||||
faceStylizer =
|
|
||||||
FaceStylizer.createFromOptions(ApplicationProvider.getApplicationContext(), options);
|
|
||||||
MediaPipeException exception =
|
|
||||||
assertThrows(
|
|
||||||
MediaPipeException.class,
|
|
||||||
() -> faceStylizer.stylizeWithResultListener(getImageFromAsset(largeFaceTestImage)));
|
|
||||||
assertThat(exception).hasMessageThat().contains("not initialized with the image mode");
|
|
||||||
exception =
|
|
||||||
assertThrows(
|
|
||||||
MediaPipeException.class,
|
|
||||||
() ->
|
|
||||||
faceStylizer.stylizeForVideoWithResultListener(
|
|
||||||
getImageFromAsset(largeFaceTestImage), /* timestampsMs= */ 0));
|
|
||||||
assertThat(exception).hasMessageThat().contains("not initialized with the video mode");
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void stylizer_succeedsWithImageMode() throws Exception {
|
public void stylizer_succeedsWithImageMode() throws Exception {
|
||||||
FaceStylizerOptions options =
|
FaceStylizerOptions options =
|
||||||
FaceStylizerOptions.builder()
|
FaceStylizerOptions.builder()
|
||||||
.setBaseOptions(BaseOptions.builder().setModelAssetPath(modelFile).build())
|
.setBaseOptions(BaseOptions.builder().setModelAssetPath(modelFile).build())
|
||||||
.setRunningMode(RunningMode.IMAGE)
|
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
faceStylizer =
|
faceStylizer =
|
||||||
|
@ -224,7 +116,6 @@ public class FaceStylizerTest {
|
||||||
FaceStylizerOptions options =
|
FaceStylizerOptions options =
|
||||||
FaceStylizerOptions.builder()
|
FaceStylizerOptions.builder()
|
||||||
.setBaseOptions(BaseOptions.builder().setModelAssetPath(modelFile).build())
|
.setBaseOptions(BaseOptions.builder().setModelAssetPath(modelFile).build())
|
||||||
.setRunningMode(RunningMode.IMAGE)
|
|
||||||
.build();
|
.build();
|
||||||
|
|
||||||
faceStylizer =
|
faceStylizer =
|
||||||
|
@ -243,7 +134,6 @@ public class FaceStylizerTest {
|
||||||
FaceStylizerOptions options =
|
FaceStylizerOptions options =
|
||||||
FaceStylizerOptions.builder()
|
FaceStylizerOptions.builder()
|
||||||
.setBaseOptions(BaseOptions.builder().setModelAssetPath(modelFile).build())
|
.setBaseOptions(BaseOptions.builder().setModelAssetPath(modelFile).build())
|
||||||
.setRunningMode(RunningMode.IMAGE)
|
|
||||||
.build();
|
.build();
|
||||||
faceStylizer =
|
faceStylizer =
|
||||||
FaceStylizer.createFromOptions(ApplicationProvider.getApplicationContext(), options);
|
FaceStylizer.createFromOptions(ApplicationProvider.getApplicationContext(), options);
|
||||||
|
@ -268,7 +158,6 @@ public class FaceStylizerTest {
|
||||||
FaceStylizerOptions options =
|
FaceStylizerOptions options =
|
||||||
FaceStylizerOptions.builder()
|
FaceStylizerOptions.builder()
|
||||||
.setBaseOptions(BaseOptions.builder().setModelAssetPath(modelFile).build())
|
.setBaseOptions(BaseOptions.builder().setModelAssetPath(modelFile).build())
|
||||||
.setRunningMode(RunningMode.IMAGE)
|
|
||||||
.build();
|
.build();
|
||||||
faceStylizer =
|
faceStylizer =
|
||||||
FaceStylizer.createFromOptions(ApplicationProvider.getApplicationContext(), options);
|
FaceStylizer.createFromOptions(ApplicationProvider.getApplicationContext(), options);
|
||||||
|
@ -291,7 +180,6 @@ public class FaceStylizerTest {
|
||||||
FaceStylizerOptions options =
|
FaceStylizerOptions options =
|
||||||
FaceStylizerOptions.builder()
|
FaceStylizerOptions.builder()
|
||||||
.setBaseOptions(BaseOptions.builder().setModelAssetPath(modelFile).build())
|
.setBaseOptions(BaseOptions.builder().setModelAssetPath(modelFile).build())
|
||||||
.setRunningMode(RunningMode.IMAGE)
|
|
||||||
.setResultListener(
|
.setResultListener(
|
||||||
(result, originalImage) -> {
|
(result, originalImage) -> {
|
||||||
MPImage stylizedImage = result.stylizedImage().get();
|
MPImage stylizedImage = result.stylizedImage().get();
|
||||||
|
@ -304,97 +192,6 @@ public class FaceStylizerTest {
|
||||||
FaceStylizer.createFromOptions(ApplicationProvider.getApplicationContext(), options);
|
FaceStylizer.createFromOptions(ApplicationProvider.getApplicationContext(), options);
|
||||||
faceStylizer.stylizeWithResultListener(getImageFromAsset(largeFaceTestImage));
|
faceStylizer.stylizeWithResultListener(getImageFromAsset(largeFaceTestImage));
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
|
||||||
public void stylizer_successWithVideoMode() throws Exception {
|
|
||||||
MPImage inputImage = getImageFromAsset(largeFaceTestImage);
|
|
||||||
|
|
||||||
FaceStylizerOptions options =
|
|
||||||
FaceStylizerOptions.builder()
|
|
||||||
.setBaseOptions(BaseOptions.builder().setModelAssetPath(modelFile).build())
|
|
||||||
.setRunningMode(RunningMode.VIDEO)
|
|
||||||
.build();
|
|
||||||
faceStylizer =
|
|
||||||
FaceStylizer.createFromOptions(ApplicationProvider.getApplicationContext(), options);
|
|
||||||
for (int i = 0; i < 3; i++) {
|
|
||||||
FaceStylizerResult actualResult =
|
|
||||||
faceStylizer.stylizeForVideo(
|
|
||||||
getImageFromAsset(largeFaceTestImage), /* timestampsMs= */ i);
|
|
||||||
|
|
||||||
MPImage stylizedImage = actualResult.stylizedImage().get();
|
|
||||||
assertThat(stylizedImage).isNotNull();
|
|
||||||
assertThat(stylizedImage.getWidth()).isEqualTo(modelImageSize);
|
|
||||||
assertThat(stylizedImage.getHeight()).isEqualTo(modelImageSize);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void stylizer_successWithVideoModeWithResultListener() throws Exception {
|
|
||||||
MPImage inputImage = getImageFromAsset(largeFaceTestImage);
|
|
||||||
|
|
||||||
FaceStylizerOptions options =
|
|
||||||
FaceStylizerOptions.builder()
|
|
||||||
.setBaseOptions(BaseOptions.builder().setModelAssetPath(modelFile).build())
|
|
||||||
.setRunningMode(RunningMode.VIDEO)
|
|
||||||
.setResultListener(
|
|
||||||
(result, originalImage) -> {
|
|
||||||
MPImage stylizedImage = result.stylizedImage().get();
|
|
||||||
assertThat(stylizedImage).isNotNull();
|
|
||||||
assertThat(stylizedImage.getWidth()).isEqualTo(modelImageSize);
|
|
||||||
assertThat(stylizedImage.getHeight()).isEqualTo(modelImageSize);
|
|
||||||
})
|
|
||||||
.build();
|
|
||||||
faceStylizer =
|
|
||||||
FaceStylizer.createFromOptions(ApplicationProvider.getApplicationContext(), options);
|
|
||||||
for (int i = 0; i < 3; i++) {
|
|
||||||
faceStylizer.stylizeForVideoWithResultListener(inputImage, /* timestampsMs= */ i);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void stylizer_successWithLiveStreamMode() throws Exception {
|
|
||||||
MPImage inputImage = getImageFromAsset(largeFaceTestImage);
|
|
||||||
|
|
||||||
FaceStylizerOptions options =
|
|
||||||
FaceStylizerOptions.builder()
|
|
||||||
.setBaseOptions(BaseOptions.builder().setModelAssetPath(modelFile).build())
|
|
||||||
.setRunningMode(RunningMode.LIVE_STREAM)
|
|
||||||
.setResultListener(
|
|
||||||
(result, originalImage) -> {
|
|
||||||
MPImage stylizedImage = result.stylizedImage().get();
|
|
||||||
assertThat(stylizedImage).isNotNull();
|
|
||||||
assertThat(stylizedImage.getWidth()).isEqualTo(modelImageSize);
|
|
||||||
assertThat(stylizedImage.getHeight()).isEqualTo(modelImageSize);
|
|
||||||
})
|
|
||||||
.build();
|
|
||||||
|
|
||||||
faceStylizer =
|
|
||||||
FaceStylizer.createFromOptions(ApplicationProvider.getApplicationContext(), options);
|
|
||||||
for (int i = 0; i < 3; i++) {
|
|
||||||
faceStylizer.stylizeAsync(inputImage, /* timestampsMs= */ i);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
@Test
|
|
||||||
public void stylizer_failsWithOutOfOrderInputTimestamps() throws Exception {
|
|
||||||
MPImage image = getImageFromAsset(largeFaceTestImage);
|
|
||||||
FaceStylizerOptions options =
|
|
||||||
FaceStylizerOptions.builder()
|
|
||||||
.setBaseOptions(BaseOptions.builder().setModelAssetPath(modelFile).build())
|
|
||||||
.setRunningMode(RunningMode.LIVE_STREAM)
|
|
||||||
.setResultListener((result, inputImage) -> {})
|
|
||||||
.build();
|
|
||||||
faceStylizer =
|
|
||||||
FaceStylizer.createFromOptions(ApplicationProvider.getApplicationContext(), options);
|
|
||||||
faceStylizer.stylizeAsync(image, /* timestampsMs= */ 1);
|
|
||||||
MediaPipeException exception =
|
|
||||||
assertThrows(
|
|
||||||
MediaPipeException.class,
|
|
||||||
() -> faceStylizer.stylizeAsync(image, /* timestampsMs= */ 0));
|
|
||||||
assertThat(exception)
|
|
||||||
.hasMessageThat()
|
|
||||||
.contains("having a smaller timestamp than the processed timestamp");
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
private static MPImage getImageFromAsset(String filePath) throws Exception {
|
private static MPImage getImageFromAsset(String filePath) throws Exception {
|
||||||
|
|
|
@ -14,25 +14,22 @@
|
||||||
"""MediaPipe face stylizer task."""
|
"""MediaPipe face stylizer task."""
|
||||||
|
|
||||||
import dataclasses
|
import dataclasses
|
||||||
from typing import Callable, Mapping, Optional
|
from typing import Optional
|
||||||
|
|
||||||
from mediapipe.python import packet_creator
|
from mediapipe.python import packet_creator
|
||||||
from mediapipe.python import packet_getter
|
from mediapipe.python import packet_getter
|
||||||
from mediapipe.python._framework_bindings import image as image_module
|
from mediapipe.python._framework_bindings import image as image_module
|
||||||
from mediapipe.python._framework_bindings import packet as packet_module
|
|
||||||
from mediapipe.tasks.cc.vision.face_stylizer.proto import face_stylizer_graph_options_pb2
|
from mediapipe.tasks.cc.vision.face_stylizer.proto import face_stylizer_graph_options_pb2
|
||||||
from mediapipe.tasks.python.core import base_options as base_options_module
|
from mediapipe.tasks.python.core import base_options as base_options_module
|
||||||
from mediapipe.tasks.python.core import task_info as task_info_module
|
from mediapipe.tasks.python.core import task_info as task_info_module
|
||||||
from mediapipe.tasks.python.core.optional_dependencies import doc_controls
|
from mediapipe.tasks.python.core.optional_dependencies import doc_controls
|
||||||
from mediapipe.tasks.python.vision.core import base_vision_task_api
|
from mediapipe.tasks.python.vision.core import base_vision_task_api
|
||||||
from mediapipe.tasks.python.vision.core import image_processing_options as image_processing_options_module
|
from mediapipe.tasks.python.vision.core import image_processing_options as image_processing_options_module
|
||||||
from mediapipe.tasks.python.vision.core import vision_task_running_mode as running_mode_module
|
|
||||||
|
|
||||||
_BaseOptions = base_options_module.BaseOptions
|
_BaseOptions = base_options_module.BaseOptions
|
||||||
_FaceStylizerGraphOptionsProto = (
|
_FaceStylizerGraphOptionsProto = (
|
||||||
face_stylizer_graph_options_pb2.FaceStylizerGraphOptions
|
face_stylizer_graph_options_pb2.FaceStylizerGraphOptions
|
||||||
)
|
)
|
||||||
_RunningMode = running_mode_module.VisionTaskRunningMode
|
|
||||||
_ImageProcessingOptions = image_processing_options_module.ImageProcessingOptions
|
_ImageProcessingOptions = image_processing_options_module.ImageProcessingOptions
|
||||||
_TaskInfo = task_info_module.TaskInfo
|
_TaskInfo = task_info_module.TaskInfo
|
||||||
|
|
||||||
|
@ -53,29 +50,14 @@ class FaceStylizerOptions:
|
||||||
|
|
||||||
Attributes:
|
Attributes:
|
||||||
base_options: Base options for the face stylizer task.
|
base_options: Base options for the face stylizer task.
|
||||||
running_mode: The running mode of the task. Default to the image mode. Face
|
|
||||||
stylizer task has three running modes: 1) The image mode for stylizing one
|
|
||||||
face on a single image input. 2) The video mode for stylizing one face per
|
|
||||||
frame on the decoded frames of a video. 3) The live stream mode for
|
|
||||||
stylizing one face on a live stream of input data, such as from camera.
|
|
||||||
result_callback: The user-defined result callback for processing live stream
|
|
||||||
data. The result callback should only be specified when the running mode
|
|
||||||
is set to the live stream mode.
|
|
||||||
"""
|
"""
|
||||||
|
|
||||||
base_options: _BaseOptions
|
base_options: _BaseOptions
|
||||||
running_mode: _RunningMode = _RunningMode.IMAGE
|
|
||||||
result_callback: Optional[
|
|
||||||
Callable[[image_module.Image, image_module.Image, int], None]
|
|
||||||
] = None
|
|
||||||
|
|
||||||
@doc_controls.do_not_generate_docs
|
@doc_controls.do_not_generate_docs
|
||||||
def to_pb2(self) -> _FaceStylizerGraphOptionsProto:
|
def to_pb2(self) -> _FaceStylizerGraphOptionsProto:
|
||||||
"""Generates an FaceStylizerOptions protobuf object."""
|
"""Generates an FaceStylizerOptions protobuf object."""
|
||||||
base_options_proto = self.base_options.to_pb2()
|
base_options_proto = self.base_options.to_pb2()
|
||||||
base_options_proto.use_stream_mode = (
|
|
||||||
False if self.running_mode == _RunningMode.IMAGE else True
|
|
||||||
)
|
|
||||||
return _FaceStylizerGraphOptionsProto(base_options=base_options_proto)
|
return _FaceStylizerGraphOptionsProto(base_options=base_options_proto)
|
||||||
|
|
||||||
|
|
||||||
|
@ -102,9 +84,7 @@ class FaceStylizer(base_vision_task_api.BaseVisionTaskApi):
|
||||||
RuntimeError: If other types of error occurred.
|
RuntimeError: If other types of error occurred.
|
||||||
"""
|
"""
|
||||||
base_options = _BaseOptions(model_asset_path=model_path)
|
base_options = _BaseOptions(model_asset_path=model_path)
|
||||||
options = FaceStylizerOptions(
|
options = FaceStylizerOptions(base_options=base_options)
|
||||||
base_options=base_options, running_mode=_RunningMode.IMAGE
|
|
||||||
)
|
|
||||||
return cls.create_from_options(options)
|
return cls.create_from_options(options)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
|
@ -123,28 +103,6 @@ class FaceStylizer(base_vision_task_api.BaseVisionTaskApi):
|
||||||
RuntimeError: If other types of error occurred.
|
RuntimeError: If other types of error occurred.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def packets_callback(output_packets: Mapping[str, packet_module.Packet]):
|
|
||||||
if output_packets[_IMAGE_OUT_STREAM_NAME].is_empty():
|
|
||||||
return
|
|
||||||
image = packet_getter.get_image(output_packets[_IMAGE_OUT_STREAM_NAME])
|
|
||||||
stylized_image_packet = output_packets[_STYLIZED_IMAGE_NAME]
|
|
||||||
if stylized_image_packet.is_empty():
|
|
||||||
options.result_callback(
|
|
||||||
None,
|
|
||||||
image,
|
|
||||||
stylized_image_packet.timestamp.value
|
|
||||||
// _MICRO_SECONDS_PER_MILLISECOND,
|
|
||||||
)
|
|
||||||
|
|
||||||
stylized_image = packet_getter.get_image(stylized_image_packet)
|
|
||||||
|
|
||||||
options.result_callback(
|
|
||||||
stylized_image,
|
|
||||||
image,
|
|
||||||
stylized_image_packet.timestamp.value
|
|
||||||
// _MICRO_SECONDS_PER_MILLISECOND,
|
|
||||||
)
|
|
||||||
|
|
||||||
task_info = _TaskInfo(
|
task_info = _TaskInfo(
|
||||||
task_graph=_TASK_GRAPH_NAME,
|
task_graph=_TASK_GRAPH_NAME,
|
||||||
input_streams=[
|
input_streams=[
|
||||||
|
@ -157,14 +115,7 @@ class FaceStylizer(base_vision_task_api.BaseVisionTaskApi):
|
||||||
],
|
],
|
||||||
task_options=options,
|
task_options=options,
|
||||||
)
|
)
|
||||||
return cls(
|
return cls(task_info.generate_graph_config())
|
||||||
task_info.generate_graph_config(
|
|
||||||
enable_flow_limiting=options.running_mode
|
|
||||||
== _RunningMode.LIVE_STREAM
|
|
||||||
),
|
|
||||||
options.running_mode,
|
|
||||||
packets_callback if options.result_callback else None,
|
|
||||||
)
|
|
||||||
|
|
||||||
def stylize(
|
def stylize(
|
||||||
self,
|
self,
|
||||||
|
@ -200,89 +151,3 @@ class FaceStylizer(base_vision_task_api.BaseVisionTaskApi):
|
||||||
if output_packets[_STYLIZED_IMAGE_NAME].is_empty():
|
if output_packets[_STYLIZED_IMAGE_NAME].is_empty():
|
||||||
return None
|
return None
|
||||||
return packet_getter.get_image(output_packets[_STYLIZED_IMAGE_NAME])
|
return packet_getter.get_image(output_packets[_STYLIZED_IMAGE_NAME])
|
||||||
|
|
||||||
def stylize_for_video(
|
|
||||||
self,
|
|
||||||
image: image_module.Image,
|
|
||||||
timestamp_ms: int,
|
|
||||||
image_processing_options: Optional[_ImageProcessingOptions] = None,
|
|
||||||
) -> image_module.Image:
|
|
||||||
"""Performs face stylization on the provided video frames.
|
|
||||||
|
|
||||||
Only use this method when the FaceStylizer is created with the video
|
|
||||||
running mode. It's required to provide the video frame's timestamp (in
|
|
||||||
milliseconds) along with the video frame. The input timestamps should be
|
|
||||||
monotonically increasing for adjacent calls of this method.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
image: MediaPipe Image.
|
|
||||||
timestamp_ms: The timestamp of the input video frame in milliseconds.
|
|
||||||
image_processing_options: Options for image processing.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
The stylized image of the most visible face. The stylized output image
|
|
||||||
size is the same as the model output size. None if no face is detected
|
|
||||||
on the input image.
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
ValueError: If any of the input arguments is invalid.
|
|
||||||
RuntimeError: If face stylization failed to run.
|
|
||||||
"""
|
|
||||||
normalized_rect = self.convert_to_normalized_rect(
|
|
||||||
image_processing_options, image)
|
|
||||||
output_packets = self._process_video_data({
|
|
||||||
_IMAGE_IN_STREAM_NAME: packet_creator.create_image(image).at(
|
|
||||||
timestamp_ms * _MICRO_SECONDS_PER_MILLISECOND
|
|
||||||
),
|
|
||||||
_NORM_RECT_STREAM_NAME: packet_creator.create_proto(
|
|
||||||
normalized_rect.to_pb2()
|
|
||||||
).at(timestamp_ms * _MICRO_SECONDS_PER_MILLISECOND),
|
|
||||||
})
|
|
||||||
if output_packets[_STYLIZED_IMAGE_NAME].is_empty():
|
|
||||||
return None
|
|
||||||
return packet_getter.get_image(output_packets[_STYLIZED_IMAGE_NAME])
|
|
||||||
|
|
||||||
def stylize_async(
|
|
||||||
self,
|
|
||||||
image: image_module.Image,
|
|
||||||
timestamp_ms: int,
|
|
||||||
image_processing_options: Optional[_ImageProcessingOptions] = None,
|
|
||||||
) -> None:
|
|
||||||
"""Sends live image data (an Image with a unique timestamp) to perform face stylization.
|
|
||||||
|
|
||||||
Only use this method when the FaceStylizer is created with the live stream
|
|
||||||
running mode. The input timestamps should be monotonically increasing for
|
|
||||||
adjacent calls of this method. This method will return immediately after the
|
|
||||||
input image is accepted. The results will be available via the
|
|
||||||
`result_callback` provided in the `FaceStylizerOptions`. The
|
|
||||||
`stylize_async` method is designed to process live stream data such as
|
|
||||||
camera input. To lower the overall latency, face stylizer may drop the input
|
|
||||||
images if needed. In other words, it's not guaranteed to have output per
|
|
||||||
input image.
|
|
||||||
|
|
||||||
The `result_callback` provides:
|
|
||||||
- The stylized image of the most visible face. The stylized output image
|
|
||||||
size is the same as the model output size. None if no face is detected
|
|
||||||
on the input image.
|
|
||||||
- The input image that the face stylizer runs on.
|
|
||||||
- The input timestamp in milliseconds.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
image: MediaPipe Image.
|
|
||||||
timestamp_ms: The timestamp of the input image in milliseconds.
|
|
||||||
image_processing_options: Options for image processing.
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
ValueError: If the current input timestamp is smaller than what the face
|
|
||||||
stylizer has already processed.
|
|
||||||
"""
|
|
||||||
normalized_rect = self.convert_to_normalized_rect(
|
|
||||||
image_processing_options, image)
|
|
||||||
self._send_live_stream_data({
|
|
||||||
_IMAGE_IN_STREAM_NAME: packet_creator.create_image(image).at(
|
|
||||||
timestamp_ms * _MICRO_SECONDS_PER_MILLISECOND
|
|
||||||
),
|
|
||||||
_NORM_RECT_STREAM_NAME: packet_creator.create_proto(
|
|
||||||
normalized_rect.to_pb2()
|
|
||||||
).at(timestamp_ms * _MICRO_SECONDS_PER_MILLISECOND),
|
|
||||||
})
|
|
||||||
|
|
|
@ -222,124 +222,6 @@ export class FaceStylizer extends VisionTaskRunner {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Performs face stylization on the provided video frame and invokes the
|
|
||||||
* callback with result. The method returns synchronously once the callback
|
|
||||||
* returns. Only use this method when the FaceStylizer is created with the
|
|
||||||
* video running mode.
|
|
||||||
*
|
|
||||||
* The input frame can be of any size. It's required to provide the video
|
|
||||||
* frame's timestamp (in milliseconds). The input timestamps must be
|
|
||||||
* monotonically increasing.
|
|
||||||
*
|
|
||||||
* @param videoFrame A video frame to process.
|
|
||||||
* @param timestamp The timestamp of the current frame, in ms.
|
|
||||||
* @param callback The callback that is invoked with the stylized image or
|
|
||||||
* `null` if no face was detected. The lifetime of the returned data is only
|
|
||||||
* guaranteed for the duration of the callback.
|
|
||||||
*/
|
|
||||||
stylizeForVideo(
|
|
||||||
videoFrame: ImageSource, timestamp: number,
|
|
||||||
callback: FaceStylizerCallback): void;
|
|
||||||
/**
|
|
||||||
* Performs face stylization on the provided video frame and invokes the
|
|
||||||
* callback with result. The method returns synchronously once the callback
|
|
||||||
* returns. Only use this method when the FaceStylizer is created with the
|
|
||||||
* video running mode.
|
|
||||||
*
|
|
||||||
* The 'imageProcessingOptions' parameter can be used to specify one or all
|
|
||||||
* of:
|
|
||||||
* - the rotation to apply to the image before performing stylization, by
|
|
||||||
* setting its 'rotationDegrees' property.
|
|
||||||
* - the region-of-interest on which to perform stylization, by setting its
|
|
||||||
* 'regionOfInterest' property. If not specified, the full image is used.
|
|
||||||
* If both are specified, the crop around the region-of-interest is
|
|
||||||
* extracted first, then the specified rotation is applied to the crop.
|
|
||||||
*
|
|
||||||
* The input frame can be of any size. It's required to provide the video
|
|
||||||
* frame's timestamp (in milliseconds). The input timestamps must be
|
|
||||||
* monotonically increasing.
|
|
||||||
*
|
|
||||||
* @param videoFrame A video frame to process.
|
|
||||||
* @param timestamp The timestamp of the current frame, in ms.
|
|
||||||
* @param imageProcessingOptions the `ImageProcessingOptions` specifying how
|
|
||||||
* to process the input image before running inference.
|
|
||||||
* @param callback The callback that is invoked with the stylized image or
|
|
||||||
* `null` if no face was detected. The lifetime of the returned data is only
|
|
||||||
* guaranteed for the duration of the callback.
|
|
||||||
*/
|
|
||||||
stylizeForVideo(
|
|
||||||
videoFrame: ImageSource, timestamp: number,
|
|
||||||
imageProcessingOptions: ImageProcessingOptions,
|
|
||||||
callback: FaceStylizerCallback): void;
|
|
||||||
/**
|
|
||||||
* Performs face stylization on the provided video frame. This method creates
|
|
||||||
* a copy of the resulting image and should not be used in high-throughput
|
|
||||||
* applications. Only use this method when the FaceStylizer is created with the
|
|
||||||
* video running mode.
|
|
||||||
*
|
|
||||||
* The input frame can be of any size. It's required to provide the video
|
|
||||||
* frame's timestamp (in milliseconds). The input timestamps must be
|
|
||||||
* monotonically increasing.
|
|
||||||
*
|
|
||||||
* @param videoFrame A video frame to process.
|
|
||||||
* @param timestamp The timestamp of the current frame, in ms.
|
|
||||||
* @return A stylized face or `null` if no face was detected. The result is
|
|
||||||
* copied to avoid lifetime issues.
|
|
||||||
*/
|
|
||||||
stylizeForVideo(videoFrame: ImageSource, timestamp: number): MPImage|null;
|
|
||||||
/**
|
|
||||||
* Performs face stylization on the provided video frame. This method creates
|
|
||||||
* a copy of the resulting image and should not be used in high-throughput
|
|
||||||
* applictions. Only use this method when the FaceStylizer is created with the
|
|
||||||
* video running mode.
|
|
||||||
*
|
|
||||||
* The 'imageProcessingOptions' parameter can be used to specify one or all
|
|
||||||
* of:
|
|
||||||
* - the rotation to apply to the image before performing stylization, by
|
|
||||||
* setting its 'rotationDegrees' property.
|
|
||||||
* - the region-of-interest on which to perform stylization, by setting its
|
|
||||||
* 'regionOfInterest' property. If not specified, the full image is used.
|
|
||||||
* If both are specified, the crop around the region-of-interest is
|
|
||||||
* extracted first, then the specified rotation is applied to the crop.
|
|
||||||
*
|
|
||||||
* The input frame can be of any size. It's required to provide the video
|
|
||||||
* frame's timestamp (in milliseconds). The input timestamps must be
|
|
||||||
* monotonically increasing.
|
|
||||||
*
|
|
||||||
* @param videoFrame A video frame to process.
|
|
||||||
* @param timestamp The timestamp of the current frame, in ms.
|
|
||||||
* @param imageProcessingOptions the `ImageProcessingOptions` specifying how
|
|
||||||
* to process the input image before running inference.
|
|
||||||
* @return A stylized face or `null` if no face was detected. The result is
|
|
||||||
* copied to avoid lifetime issues.
|
|
||||||
*/
|
|
||||||
stylizeForVideo(
|
|
||||||
videoFrame: ImageSource,
|
|
||||||
timestamp: number,
|
|
||||||
imageProcessingOptions: ImageProcessingOptions,
|
|
||||||
): MPImage|null;
|
|
||||||
stylizeForVideo(
|
|
||||||
videoFrame: ImageSource, timestamp: number,
|
|
||||||
imageProcessingOptionsOrCallback?: ImageProcessingOptions|
|
|
||||||
FaceStylizerCallback,
|
|
||||||
callback?: FaceStylizerCallback): MPImage|null|void {
|
|
||||||
const imageProcessingOptions =
|
|
||||||
typeof imageProcessingOptionsOrCallback !== 'function' ?
|
|
||||||
imageProcessingOptionsOrCallback :
|
|
||||||
{};
|
|
||||||
|
|
||||||
this.userCallback = typeof imageProcessingOptionsOrCallback === 'function' ?
|
|
||||||
imageProcessingOptionsOrCallback :
|
|
||||||
callback;
|
|
||||||
this.processVideoData(videoFrame, imageProcessingOptions, timestamp);
|
|
||||||
this.userCallback = undefined;
|
|
||||||
|
|
||||||
if (!this.userCallback) {
|
|
||||||
return this.result;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/** Updates the MediaPipe graph configuration. */
|
/** Updates the MediaPipe graph configuration. */
|
||||||
protected override refreshGraph(): void {
|
protected override refreshGraph(): void {
|
||||||
const graphConfig = new CalculatorGraphConfig();
|
const graphConfig = new CalculatorGraphConfig();
|
||||||
|
|
Loading…
Reference in New Issue
Block a user