Add EDGETPU_NNAPI delegate option in MediaPipe tasks API

PiperOrigin-RevId: 522344828
This commit is contained in:
MediaPipe Team 2023-04-06 08:40:12 -07:00 committed by Copybara-Service
parent 97bd9c2157
commit 7d8d3ab196
8 changed files with 35 additions and 2 deletions

View File

@ -117,7 +117,9 @@ absl::Status ConfigureImageToTensorCalculator(
bool DetermineImagePreprocessingGpuBackend(
const core::proto::Acceleration& acceleration) {
return acceleration.has_gpu();
return acceleration.has_gpu() ||
(acceleration.has_nnapi() &&
acceleration.nnapi().accelerator_name() == "google-edgetpu");
}
absl::Status ConfigureImagePreprocessingGraph(

View File

@ -44,7 +44,9 @@ cc_test(
deps = [
":base_options",
":utils",
"//mediapipe/calculators/tensor:inference_calculator_cc_proto",
"//mediapipe/framework/port:gtest",
"//mediapipe/tasks/cc/core/proto:acceleration_cc_proto",
"//mediapipe/tasks/cc/core/proto:external_file_cc_proto",
"@com_google_googletest//:gtest_main",
],

View File

@ -57,6 +57,11 @@ proto::BaseOptions ConvertBaseOptionsToProto(BaseOptions* base_options) {
case BaseOptions::Delegate::GPU:
base_options_proto.mutable_acceleration()->mutable_gpu();
break;
case BaseOptions::Delegate::EDGETPU_NNAPI:
base_options_proto.mutable_acceleration()
->mutable_nnapi()
->set_accelerator_name("google-edgetpu");
break;
}
return base_options_proto;

View File

@ -42,6 +42,8 @@ struct BaseOptions {
enum Delegate {
CPU = 0,
GPU = 1,
// Edge TPU acceleration using NNAPI delegate.
EDGETPU_NNAPI = 2,
};
Delegate delegate = CPU;

View File

@ -2,8 +2,10 @@
#include <string>
#include "mediapipe/calculators/tensor/inference_calculator.pb.h"
#include "mediapipe/framework/port/gmock.h"
#include "mediapipe/framework/port/gtest.h"
#include "mediapipe/tasks/cc/core/proto/acceleration.pb.h"
#include "mediapipe/tasks/cc/core/proto/external_file.pb.h"
#include "mediapipe/tasks/cc/core/utils.h"
@ -15,7 +17,7 @@ namespace tasks {
namespace core {
namespace {
TEST(BaseOptionsTest, ConverBaseOptionsToProtoWithFile) {
TEST(BaseOptionsTest, ConvertBaseOptionsToProtoWithFile) {
BaseOptions base_options;
base_options.model_asset_buffer =
std::make_unique<std::string>(LoadBinaryContent(kTestModelBundlePath));
@ -24,6 +26,20 @@ TEST(BaseOptionsTest, ConverBaseOptionsToProtoWithFile) {
EXPECT_TRUE(proto.model_asset().has_file_content());
}
TEST(BaseOptionsTest, ConvertBaseOptionsToProtoWithAcceleration) {
BaseOptions base_options;
proto::BaseOptions proto = ConvertBaseOptionsToProto(&base_options);
EXPECT_TRUE(proto.acceleration().has_tflite());
base_options.delegate = BaseOptions::Delegate::GPU;
proto = ConvertBaseOptionsToProto(&base_options);
EXPECT_TRUE(proto.acceleration().has_gpu());
base_options.delegate = BaseOptions::Delegate::EDGETPU_NNAPI;
proto = ConvertBaseOptionsToProto(&base_options);
EXPECT_EQ(proto.acceleration().nnapi().accelerator_name(), "google-edgetpu");
}
} // namespace
} // namespace core
} // namespace tasks

View File

@ -134,6 +134,9 @@ class InferenceSubgraph : public Subgraph {
case Acceleration::kGpu:
delegate.mutable_gpu()->CopyFrom(acceleration.gpu());
break;
case Acceleration::kNnapi:
delegate.mutable_nnapi()->CopyFrom(acceleration.nnapi());
break;
case Acceleration::kTflite:
delegate.mutable_tflite()->CopyFrom(acceleration.tflite());
break;

View File

@ -33,5 +33,6 @@ message Acceleration {
mediapipe.InferenceCalculatorOptions.Delegate.Xnnpack xnnpack = 1;
mediapipe.InferenceCalculatorOptions.Delegate.Gpu gpu = 2;
mediapipe.InferenceCalculatorOptions.Delegate.TfLite tflite = 4;
mediapipe.InferenceCalculatorOptions.Delegate.Nnapi nnapi = 5;
}
}

View File

@ -96,6 +96,7 @@ describe('TaskRunner', () => {
xnnpack: undefined,
gpu: undefined,
tflite: {},
nnapi: undefined,
},
};
const mockBytesResultWithGpuDelegate = {
@ -113,6 +114,7 @@ describe('TaskRunner', () => {
.SUSTAINED_SPEED,
},
tflite: undefined,
nnapi: undefined,
},
};