diff --git a/mediapipe/tasks/cc/components/BUILD b/mediapipe/tasks/cc/components/BUILD index f563fbf64..344fafb4e 100644 --- a/mediapipe/tasks/cc/components/BUILD +++ b/mediapipe/tasks/cc/components/BUILD @@ -49,6 +49,7 @@ cc_library( "//mediapipe/gpu:gpu_origin_cc_proto", "//mediapipe/tasks/cc:common", "//mediapipe/tasks/cc/core:model_resources", + "//mediapipe/tasks/cc/core/proto:acceleration_cc_proto", "//mediapipe/tasks/cc/vision/utils:image_tensor_specs", "@com_google_absl//absl/status", "@com_google_absl//absl/status:statusor", diff --git a/mediapipe/tasks/cc/components/image_preprocessing.cc b/mediapipe/tasks/cc/components/image_preprocessing.cc index f3f3b6863..7940080e1 100644 --- a/mediapipe/tasks/cc/components/image_preprocessing.cc +++ b/mediapipe/tasks/cc/components/image_preprocessing.cc @@ -34,6 +34,7 @@ limitations under the License. #include "mediapipe/tasks/cc/common.h" #include "mediapipe/tasks/cc/components/image_preprocessing_options.pb.h" #include "mediapipe/tasks/cc/core/model_resources.h" +#include "mediapipe/tasks/cc/core/proto/acceleration.pb.h" #include "mediapipe/tasks/cc/vision/utils/image_tensor_specs.h" #include "tensorflow/lite/schema/schema_generated.h" @@ -129,7 +130,7 @@ absl::Status ConfigureImageToTensorCalculator( options->mutable_output_tensor_float_range()->set_max((255.0f - mean) / std); } - // TODO: need to.support different GPU origin on differnt + // TODO: need to support different GPU origin on differnt // platforms or applications. options->set_gpu_origin(mediapipe::GpuOrigin::TOP_LEFT); return absl::OkStatus(); @@ -137,7 +138,13 @@ absl::Status ConfigureImageToTensorCalculator( } // namespace +bool DetermineImagePreprocessingGpuBackend( + const core::proto::Acceleration& acceleration) { + return acceleration.has_gpu(); +} + absl::Status ConfigureImagePreprocessing(const ModelResources& model_resources, + bool use_gpu, ImagePreprocessingOptions* options) { ASSIGN_OR_RETURN(auto image_tensor_specs, BuildImageTensorSpecs(model_resources)); @@ -145,7 +152,9 @@ absl::Status ConfigureImagePreprocessing(const ModelResources& model_resources, image_tensor_specs, options->mutable_image_to_tensor_options())); // The GPU backend isn't able to process int data. If the input tensor is // quantized, forces the image preprocessing graph to use CPU backend. - if (image_tensor_specs.tensor_type == tflite::TensorType_UINT8) { + if (use_gpu && image_tensor_specs.tensor_type != tflite::TensorType_UINT8) { + options->set_backend(ImagePreprocessingOptions::GPU_BACKEND); + } else { options->set_backend(ImagePreprocessingOptions::CPU_BACKEND); } return absl::OkStatus(); diff --git a/mediapipe/tasks/cc/components/image_preprocessing.h b/mediapipe/tasks/cc/components/image_preprocessing.h index a5b767f3a..6963b6556 100644 --- a/mediapipe/tasks/cc/components/image_preprocessing.h +++ b/mediapipe/tasks/cc/components/image_preprocessing.h @@ -19,20 +19,26 @@ limitations under the License. #include "absl/status/status.h" #include "mediapipe/tasks/cc/components/image_preprocessing_options.pb.h" #include "mediapipe/tasks/cc/core/model_resources.h" +#include "mediapipe/tasks/cc/core/proto/acceleration.pb.h" namespace mediapipe { namespace tasks { namespace components { -// Configures an ImagePreprocessing subgraph using the provided model resources. +// Configures an ImagePreprocessing subgraph using the provided model resources +// When use_gpu is true, use GPU as backend to convert image to tensor. // - Accepts CPU input images and outputs CPU tensors. // // Example usage: // // auto& preprocessing = // graph.AddNode("mediapipe.tasks.components.ImagePreprocessingSubgraph"); +// core::proto::Acceleration acceleration; +// acceleration.mutable_xnnpack(); +// bool use_gpu = DetermineImagePreprocessingGpuBackend(acceleration); // MP_RETURN_IF_ERROR(ConfigureImagePreprocessing( // model_resources, +// use_gpu, // &preprocessing.GetOptions())); // // The resulting ImagePreprocessing subgraph has the following I/O: @@ -56,9 +62,14 @@ namespace components { // The image that has the pixel data stored on the target storage (CPU vs // GPU). absl::Status ConfigureImagePreprocessing( - const core::ModelResources& model_resources, + const core::ModelResources& model_resources, bool use_gpu, ImagePreprocessingOptions* options); +// Determine if the image preprocessing subgraph should use GPU as the backend +// according to the given acceleration setting. +bool DetermineImagePreprocessingGpuBackend( + const core::proto::Acceleration& acceleration); + } // namespace components } // namespace tasks } // namespace mediapipe diff --git a/mediapipe/tasks/cc/vision/hand_detector/hand_detector_graph.cc b/mediapipe/tasks/cc/vision/hand_detector/hand_detector_graph.cc index e876d7d09..06bb2e549 100644 --- a/mediapipe/tasks/cc/vision/hand_detector/hand_detector_graph.cc +++ b/mediapipe/tasks/cc/vision/hand_detector/hand_detector_graph.cc @@ -235,8 +235,10 @@ class HandDetectorGraph : public core::ModelTaskGraph { image_to_tensor_options.set_keep_aspect_ratio(true); image_to_tensor_options.set_border_mode( mediapipe::ImageToTensorCalculatorOptions::BORDER_ZERO); + bool use_gpu = components::DetermineImagePreprocessingGpuBackend( + subgraph_options.base_options().acceleration()); MP_RETURN_IF_ERROR(ConfigureImagePreprocessing( - model_resources, + model_resources, use_gpu, &preprocessing .GetOptions())); image_in >> preprocessing.In("IMAGE"); diff --git a/mediapipe/tasks/cc/vision/hand_landmarker/hand_landmarks_detector_graph.cc b/mediapipe/tasks/cc/vision/hand_landmarker/hand_landmarks_detector_graph.cc index 23521790d..1f127deb8 100644 --- a/mediapipe/tasks/cc/vision/hand_landmarker/hand_landmarks_detector_graph.cc +++ b/mediapipe/tasks/cc/vision/hand_landmarker/hand_landmarks_detector_graph.cc @@ -283,8 +283,10 @@ class SingleHandLandmarksDetectorGraph : public core::ModelTaskGraph { auto& preprocessing = graph.AddNode("mediapipe.tasks.components.ImagePreprocessingSubgraph"); + bool use_gpu = components::DetermineImagePreprocessingGpuBackend( + subgraph_options.base_options().acceleration()); MP_RETURN_IF_ERROR(ConfigureImagePreprocessing( - model_resources, + model_resources, use_gpu, &preprocessing .GetOptions())); image_in >> preprocessing.In("IMAGE"); diff --git a/mediapipe/tasks/cc/vision/image_classifier/image_classifier_graph.cc b/mediapipe/tasks/cc/vision/image_classifier/image_classifier_graph.cc index 9a0078c5c..8a1b17ce9 100644 --- a/mediapipe/tasks/cc/vision/image_classifier/image_classifier_graph.cc +++ b/mediapipe/tasks/cc/vision/image_classifier/image_classifier_graph.cc @@ -138,8 +138,10 @@ class ImageClassifierGraph : public core::ModelTaskGraph { // stream. auto& preprocessing = graph.AddNode("mediapipe.tasks.components.ImagePreprocessingSubgraph"); + bool use_gpu = components::DetermineImagePreprocessingGpuBackend( + task_options.base_options().acceleration()); MP_RETURN_IF_ERROR(ConfigureImagePreprocessing( - model_resources, + model_resources, use_gpu, &preprocessing .GetOptions())); image_in >> preprocessing.In(kImageTag); diff --git a/mediapipe/tasks/cc/vision/image_embedder/image_embedder_graph.cc b/mediapipe/tasks/cc/vision/image_embedder/image_embedder_graph.cc index fff0f4366..f0f440986 100644 --- a/mediapipe/tasks/cc/vision/image_embedder/image_embedder_graph.cc +++ b/mediapipe/tasks/cc/vision/image_embedder/image_embedder_graph.cc @@ -134,8 +134,10 @@ class ImageEmbedderGraph : public core::ModelTaskGraph { // stream. auto& preprocessing = graph.AddNode("mediapipe.tasks.components.ImagePreprocessingSubgraph"); + bool use_gpu = components::DetermineImagePreprocessingGpuBackend( + task_options.base_options().acceleration()); MP_RETURN_IF_ERROR(ConfigureImagePreprocessing( - model_resources, + model_resources, use_gpu, &preprocessing .GetOptions())); image_in >> preprocessing.In(kImageTag); diff --git a/mediapipe/tasks/cc/vision/image_segmenter/image_segmenter_graph.cc b/mediapipe/tasks/cc/vision/image_segmenter/image_segmenter_graph.cc index 629b940aa..d3e522d92 100644 --- a/mediapipe/tasks/cc/vision/image_segmenter/image_segmenter_graph.cc +++ b/mediapipe/tasks/cc/vision/image_segmenter/image_segmenter_graph.cc @@ -243,8 +243,10 @@ class ImageSegmenterGraph : public core::ModelTaskGraph { // stream. auto& preprocessing = graph.AddNode("mediapipe.tasks.components.ImagePreprocessingSubgraph"); + bool use_gpu = components::DetermineImagePreprocessingGpuBackend( + task_options.base_options().acceleration()); MP_RETURN_IF_ERROR(ConfigureImagePreprocessing( - model_resources, + model_resources, use_gpu, &preprocessing .GetOptions())); image_in >> preprocessing.In(kImageTag); diff --git a/mediapipe/tasks/cc/vision/object_detector/object_detector_graph.cc b/mediapipe/tasks/cc/vision/object_detector/object_detector_graph.cc index 07e912cfc..b149cea0f 100644 --- a/mediapipe/tasks/cc/vision/object_detector/object_detector_graph.cc +++ b/mediapipe/tasks/cc/vision/object_detector/object_detector_graph.cc @@ -563,8 +563,10 @@ class ObjectDetectorGraph : public core::ModelTaskGraph { // stream. auto& preprocessing = graph.AddNode("mediapipe.tasks.components.ImagePreprocessingSubgraph"); + bool use_gpu = components::DetermineImagePreprocessingGpuBackend( + task_options.base_options().acceleration()); MP_RETURN_IF_ERROR(ConfigureImagePreprocessing( - model_resources, + model_resources, use_gpu, &preprocessing .GetOptions())); image_in >> preprocessing.In(kImageTag);