Add GPU support

This commit is contained in:
Sebastian Schmidt 2023-10-30 16:05:12 -06:00
parent ec032fb018
commit 95692c64a9
4 changed files with 21 additions and 0 deletions

View File

@ -14,6 +14,15 @@
#import <Foundation/Foundation.h>
/**
* The delegate to run MediaPipe. If the delegate is not set, the default
* delegate CPU is used.
*/
typedef NS_ENUM(NSUInteger, MPPDelegate) {
MPPDelegateCPU,
MPPDelegateGPU,
} NS_SWIFT_NAME(Delegate);
NS_ASSUME_NONNULL_BEGIN
/**
@ -26,6 +35,9 @@ NS_SWIFT_NAME(BaseOptions)
/** The path to the model asset to open and mmap in memory. */
@property(nonatomic, copy) NSString *modelAssetPath;
/** Overrides the default backend to use for the provided model. */
@property(nonatomic) MPPDelegate delegate;
@end
NS_ASSUME_NONNULL_END

View File

@ -20,6 +20,7 @@
self = [super init];
if (self) {
self.modelAssetPath = [[NSString alloc] init];
self.delegate = MPPDelegateCPU;
}
return self;
}
@ -28,6 +29,7 @@
MPPBaseOptions *baseOptions = [[MPPBaseOptions alloc] init];
baseOptions.modelAssetPath = self.modelAssetPath;
baseOptions.delegate = self.delegate;
return baseOptions;
}

View File

@ -24,6 +24,7 @@ objc_library(
"//mediapipe/tasks/cc/core/proto:acceleration_cc_proto",
"//mediapipe/tasks/cc/core/proto:base_options_cc_proto",
"//mediapipe/tasks/cc/core/proto:external_file_cc_proto",
"//mediapipe/calculators/tensor:inference_calculator_cc_proto",
"//mediapipe/tasks/ios/core:MPPBaseOptions",
],
)

View File

@ -15,9 +15,11 @@
#include "mediapipe/tasks/cc/core/proto/acceleration.pb.h"
#include "mediapipe/tasks/cc/core/proto/external_file.pb.h"
#import "mediapipe/tasks/ios/core/utils/sources/MPPBaseOptions+Helpers.h"
#include "mediapipe/calculators/tensor/inference_calculator.pb.h"
namespace {
using BaseOptionsProto = ::mediapipe::tasks::core::proto::BaseOptions;
using InferenceCalculatorOptionsProto = ::mediapipe::InferenceCalculatorOptions;
}
@implementation MPPBaseOptions (Helpers)
@ -33,6 +35,10 @@ using BaseOptionsProto = ::mediapipe::tasks::core::proto::BaseOptions;
if (self.modelAssetPath) {
baseOptionsProto->mutable_model_asset()->set_file_name(self.modelAssetPath.UTF8String);
}
if (self.delegate == MPPDelegateGPU) {
baseOptionsProto->mutable_acceleration()->mutable_gpu()->MergeFrom(InferenceCalculatorOptionsProto::Delegate::Gpu());
}
}
@end