Remove objc_library from Python build path for Mac GPU build

Addresses https://github.com/bazelbuild/bazel/issues/19912

PiperOrigin-RevId: 575896231
This commit is contained in:
Sebastian Schmidt 2023-10-23 12:30:37 -07:00 committed by Copybara-Service
parent a39df33664
commit aedafd63f9
10 changed files with 338 additions and 119 deletions

View File

@ -513,6 +513,9 @@ http_archive(
"@//third_party:org_tensorflow_system_python.diff", "@//third_party:org_tensorflow_system_python.diff",
# Diff is generated with a script, don't update it manually. # Diff is generated with a script, don't update it manually.
"@//third_party:org_tensorflow_custom_ops.diff", "@//third_party:org_tensorflow_custom_ops.diff",
# Works around Bazel issue with objc_library.
# See https://github.com/bazelbuild/bazel/issues/19912
"@//third_party:org_tensorflow_objc_build_fixes.diff",
], ],
patch_args = [ patch_args = [
"-p1", "-p1",

View File

@ -526,12 +526,14 @@ mediapipe_proto_library(
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
) )
objc_library( cc_library(
name = "pixel_buffer_pool_util", name = "pixel_buffer_pool_util",
srcs = ["pixel_buffer_pool_util.mm"], srcs = ["pixel_buffer_pool_util.cc"],
hdrs = ["pixel_buffer_pool_util.h"], hdrs = ["pixel_buffer_pool_util.h"],
copts = [ copts = [
"-x objective-c++",
"-Wno-shorten-64-to-32", "-Wno-shorten-64-to-32",
"-fobjc-arc", # enable reference-counting
], ],
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
deps = [ deps = [
@ -542,13 +544,14 @@ objc_library(
], ],
) )
objc_library( cc_library(
name = "metal_shared_resources", name = "metal_shared_resources",
srcs = ["metal_shared_resources.mm"], srcs = ["metal_shared_resources.cc"],
hdrs = ["metal_shared_resources.h"], hdrs = ["metal_shared_resources.h"],
copts = [ copts = [
"-x objective-c++", "-x objective-c++",
"-Wno-shorten-64-to-32", "-Wno-shorten-64-to-32",
"-fobjc-arc", # enable reference-counting
], ],
features = ["-layering_check"], features = ["-layering_check"],
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
@ -557,15 +560,17 @@ objc_library(
"@google_toolbox_for_mac//:GTM_Defines", "@google_toolbox_for_mac//:GTM_Defines",
] + [ ] + [
], ],
alwayslink = 1,
) )
objc_library( cc_library(
name = "MPPMetalUtil", name = "MPPMetalUtil",
srcs = ["MPPMetalUtil.mm"], srcs = ["MPPMetalUtil.cc"],
hdrs = ["MPPMetalUtil.h"], hdrs = ["MPPMetalUtil.h"],
copts = [ copts = [
"-x objective-c++", "-x objective-c++",
"-Wno-shorten-64-to-32", "-Wno-shorten-64-to-32",
"-fobjc-arc", # enable reference-counting
], ],
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
deps = [ deps = [
@ -575,6 +580,7 @@ objc_library(
"@com_google_absl//absl/time", "@com_google_absl//absl/time",
"@google_toolbox_for_mac//:GTM_Defines", "@google_toolbox_for_mac//:GTM_Defines",
], ],
alwayslink = 1,
) )
mediapipe_proto_library( mediapipe_proto_library(

View File

@ -69,10 +69,10 @@
while (!bufferCompleted) { while (!bufferCompleted) {
auto duration = absl::Now() - start_time; auto duration = absl::Now() - start_time;
// If the spin-lock takes more than 5 ms then go to blocking wait: // If the spin-lock takes more than 5 ms then go to blocking wait:
// - it frees the CPU core for another threads: increase the performance/decrease power // - it frees the CPU core for another threads: increase the
// consumption. // performance/decrease power consumption.
// - if a driver thread that notifies that the GPU buffer is completed has lower priority then // - if a driver thread that notifies that the GPU buffer is completed has
// the CPU core is allocated for the thread. // lower priority then the CPU core is allocated for the thread.
if (duration >= absl::Milliseconds(5)) { if (duration >= absl::Milliseconds(5)) {
[commandBuffer waitUntilCompleted]; [commandBuffer waitUntilCompleted];
break; break;

View File

@ -50,9 +50,10 @@
- (CVMetalTextureCacheRef)mtlTextureCache { - (CVMetalTextureCacheRef)mtlTextureCache {
@synchronized(self) { @synchronized(self) {
if (!_mtlTextureCache) { if (!_mtlTextureCache) {
CVReturn __unused err = CVReturn __unused err = CVMetalTextureCacheCreate(
CVMetalTextureCacheCreate(NULL, NULL, self.mtlDevice, NULL, &_mtlTextureCache); NULL, NULL, self.mtlDevice, NULL, &_mtlTextureCache);
NSAssert(err == kCVReturnSuccess, @"Error at CVMetalTextureCacheCreate %d ; device %@", err, NSAssert(err == kCVReturnSuccess,
@"Error at CVMetalTextureCacheCreate %d ; device %@", err,
self.mtlDevice); self.mtlDevice);
// TODO: register and flush metal caches too. // TODO: register and flush metal caches too.
} }

View File

@ -24,23 +24,27 @@
namespace mediapipe { namespace mediapipe {
CVPixelBufferPoolRef CreateCVPixelBufferPool( CVPixelBufferPoolRef CreateCVPixelBufferPool(int width, int height,
int width, int height, OSType pixelFormat, int keepCount, OSType pixelFormat, int keepCount,
CFTimeInterval maxAge) { CFTimeInterval maxAge) {
CVPixelBufferPoolRef pool = NULL; CVPixelBufferPoolRef pool = NULL;
NSMutableDictionary *sourcePixelBufferOptions = NSMutableDictionary *sourcePixelBufferOptions =
[(__bridge NSDictionary*)GetCVPixelBufferAttributesForGlCompatibility() mutableCopy]; [(__bridge NSDictionary *)GetCVPixelBufferAttributesForGlCompatibility()
mutableCopy];
[sourcePixelBufferOptions addEntriesFromDictionary:@{ [sourcePixelBufferOptions addEntriesFromDictionary:@{
(id)kCVPixelBufferPixelFormatTypeKey : @(pixelFormat), (id)kCVPixelBufferPixelFormatTypeKey : @(pixelFormat),
(id)kCVPixelBufferWidthKey : @(width), (id)kCVPixelBufferWidthKey : @(width),
(id)kCVPixelBufferHeightKey : @(height), (id)kCVPixelBufferHeightKey : @(height),
}]; }];
NSMutableDictionary *pixelBufferPoolOptions = [[NSMutableDictionary alloc] init]; NSMutableDictionary *pixelBufferPoolOptions =
pixelBufferPoolOptions[(id)kCVPixelBufferPoolMinimumBufferCountKey] = @(keepCount); [[NSMutableDictionary alloc] init];
pixelBufferPoolOptions[(id)kCVPixelBufferPoolMinimumBufferCountKey] =
@(keepCount);
if (maxAge > 0) { if (maxAge > 0) {
pixelBufferPoolOptions[(id)kCVPixelBufferPoolMaximumBufferAgeKey] = @(maxAge); pixelBufferPoolOptions[(id)kCVPixelBufferPoolMaximumBufferAgeKey] =
@(maxAge);
} }
CVPixelBufferPoolCreate( CVPixelBufferPoolCreate(
@ -50,8 +54,9 @@ CVPixelBufferPoolRef CreateCVPixelBufferPool(
return pool; return pool;
} }
OSStatus PreallocateCVPixelBufferPoolBuffers( OSStatus PreallocateCVPixelBufferPoolBuffers(CVPixelBufferPoolRef pool,
CVPixelBufferPoolRef pool, int count, CFDictionaryRef auxAttributes) { int count,
CFDictionaryRef auxAttributes) {
CVReturn err = kCVReturnSuccess; CVReturn err = kCVReturnSuccess;
NSMutableArray *pixelBuffers = [[NSMutableArray alloc] init]; NSMutableArray *pixelBuffers = [[NSMutableArray alloc] init];
for (int i = 0; i < count && err == kCVReturnSuccess; i++) { for (int i = 0; i < count && err == kCVReturnSuccess; i++) {
@ -68,30 +73,37 @@ OSStatus PreallocateCVPixelBufferPoolBuffers(
return err; return err;
} }
CFDictionaryRef CreateCVPixelBufferPoolAuxiliaryAttributesForThreshold(int allocationThreshold) { CFDictionaryRef CreateCVPixelBufferPoolAuxiliaryAttributesForThreshold(
int allocationThreshold) {
if (allocationThreshold > 0) { if (allocationThreshold > 0) {
return (CFDictionaryRef)CFBridgingRetain( return (CFDictionaryRef)CFBridgingRetain(@{
@{(id)kCVPixelBufferPoolAllocationThresholdKey: @(allocationThreshold)}); (id)kCVPixelBufferPoolAllocationThresholdKey : @(allocationThreshold)
});
} else { } else {
return nil; return nil;
} }
} }
CVReturn CreateCVPixelBufferWithPool( CVReturn CreateCVPixelBufferWithPool(CVPixelBufferPoolRef pool,
CVPixelBufferPoolRef pool, CFDictionaryRef auxAttributes, CFDictionaryRef auxAttributes,
CVTextureCacheType textureCache, CVPixelBufferRef* outBuffer) { CVTextureCacheType textureCache,
return CreateCVPixelBufferWithPool(pool, auxAttributes, [textureCache](){ CVPixelBufferRef *outBuffer) {
return CreateCVPixelBufferWithPool(
pool, auxAttributes,
[textureCache]() {
#if TARGET_OS_OSX #if TARGET_OS_OSX
CVOpenGLTextureCacheFlush(textureCache, 0); CVOpenGLTextureCacheFlush(textureCache, 0);
#else #else
CVOpenGLESTextureCacheFlush(textureCache, 0); CVOpenGLESTextureCacheFlush(textureCache, 0);
#endif // TARGET_OS_OSX #endif // TARGET_OS_OSX
}, outBuffer); },
outBuffer);
} }
CVReturn CreateCVPixelBufferWithPool( CVReturn CreateCVPixelBufferWithPool(CVPixelBufferPoolRef pool,
CVPixelBufferPoolRef pool, CFDictionaryRef auxAttributes, CFDictionaryRef auxAttributes,
std::function<void(void)> flush, CVPixelBufferRef* outBuffer) { std::function<void(void)> flush,
CVPixelBufferRef *outBuffer) {
CVReturn err = CVPixelBufferPoolCreatePixelBufferWithAuxAttributes( CVReturn err = CVPixelBufferPoolCreatePixelBufferWithAuxAttributes(
kCFAllocatorDefault, pool, auxAttributes, outBuffer); kCFAllocatorDefault, pool, auxAttributes, outBuffer);
if (err == kCVReturnWouldExceedAllocationThreshold) { if (err == kCVReturnWouldExceedAllocationThreshold) {
@ -103,11 +115,13 @@ CVReturn CreateCVPixelBufferWithPool(
kCFAllocatorDefault, pool, auxAttributes, outBuffer); kCFAllocatorDefault, pool, auxAttributes, outBuffer);
} }
if (err == kCVReturnWouldExceedAllocationThreshold) { if (err == kCVReturnWouldExceedAllocationThreshold) {
// TODO: allow the application to set the threshold. For now, disable it by // TODO: allow the application to set the threshold. For now, disable it
// default, since the threshold we are using is arbitrary and some graphs routinely cross it. // by default, since the threshold we are using is arbitrary and some
// graphs routinely cross it.
#ifdef ENABLE_MEDIAPIPE_GPU_BUFFER_THRESHOLD_CHECK #ifdef ENABLE_MEDIAPIPE_GPU_BUFFER_THRESHOLD_CHECK
NSLog(@"Using more buffers than expected! This is a debug-only warning, " NSLog(
"you can ignore it if your app works fine otherwise."); @"Using more buffers than expected! This is a debug-only warning, "
"you can ignore it if your app works fine otherwise.");
#ifdef DEBUG #ifdef DEBUG
NSLog(@"Pool status: %@", ((__bridge NSObject *)pool).description); NSLog(@"Pool status: %@", ((__bridge NSObject *)pool).description);
#endif // DEBUG #endif // DEBUG

View File

@ -52,9 +52,9 @@ objc_library(
) )
MEDIAPIPE_IOS_SRCS = [ MEDIAPIPE_IOS_SRCS = [
"MPPGraph.mm", "MPPGraph.cc",
"MPPTimestampConverter.mm", "MPPTimestampConverter.cc",
"NSError+util_status.mm", "NSError+util_status.cc",
] ]
MEDIAPIPE_IOS_HDRS = [ MEDIAPIPE_IOS_HDRS = [
@ -63,11 +63,13 @@ MEDIAPIPE_IOS_HDRS = [
"NSError+util_status.h", "NSError+util_status.h",
] ]
objc_library( cc_library(
name = "mediapipe_framework_ios", name = "mediapipe_framework_ios",
srcs = MEDIAPIPE_IOS_SRCS, srcs = MEDIAPIPE_IOS_SRCS,
hdrs = MEDIAPIPE_IOS_HDRS, hdrs = MEDIAPIPE_IOS_HDRS,
copts = [ copts = [
"-x objective-c++",
"-fobjc-arc", # enable reference-counting
"-Wno-shorten-64-to-32", "-Wno-shorten-64-to-32",
], ],
# This build rule is public to allow external customers to build their own iOS apps. # This build rule is public to allow external customers to build their own iOS apps.
@ -99,6 +101,7 @@ objc_library(
"@com_google_absl//absl/synchronization", "@com_google_absl//absl/synchronization",
"@google_toolbox_for_mac//:GTM_Defines", "@google_toolbox_for_mac//:GTM_Defines",
], ],
alwayslink = 1,
) )
objc_library( objc_library(

View File

@ -12,13 +12,12 @@
// See the License for the specific language governing permissions and // See the License for the specific language governing permissions and
// limitations under the License. // limitations under the License.
#import "mediapipe/objc/MPPGraph.h"
#import <AVFoundation/AVFoundation.h> #import <AVFoundation/AVFoundation.h>
#import <Accelerate/Accelerate.h> #import <Accelerate/Accelerate.h>
#include <atomic> #include <atomic>
#import "GTMDefines.h"
#include "absl/memory/memory.h" #include "absl/memory/memory.h"
#include "mediapipe/framework/calculator_framework.h" #include "mediapipe/framework/calculator_framework.h"
#include "mediapipe/framework/formats/image.h" #include "mediapipe/framework/formats/image.h"
@ -26,22 +25,23 @@
#include "mediapipe/framework/graph_service.h" #include "mediapipe/framework/graph_service.h"
#include "mediapipe/gpu/gl_base.h" #include "mediapipe/gpu/gl_base.h"
#include "mediapipe/gpu/gpu_shared_data_internal.h" #include "mediapipe/gpu/gpu_shared_data_internal.h"
#import "mediapipe/objc/MPPGraph.h"
#import "mediapipe/objc/NSError+util_status.h"
#include "mediapipe/objc/util.h" #include "mediapipe/objc/util.h"
#import "mediapipe/objc/NSError+util_status.h"
#import "GTMDefines.h"
@implementation MPPGraph { @implementation MPPGraph {
// Graph is wrapped in a unique_ptr because it was generating 39+KB of unnecessary ObjC runtime // Graph is wrapped in a unique_ptr because it was generating 39+KB of
// information. See https://medium.com/@dmaclach/objective-c-encoding-and-you-866624cc02de // unnecessary ObjC runtime information. See
// for details. // https://medium.com/@dmaclach/objective-c-encoding-and-you-866624cc02de for
// details.
std::unique_ptr<mediapipe::CalculatorGraph> _graph; std::unique_ptr<mediapipe::CalculatorGraph> _graph;
/// Input side packets that will be added to the graph when it is started. /// Input side packets that will be added to the graph when it is started.
std::map<std::string, mediapipe::Packet> _inputSidePackets; std::map<std::string, mediapipe::Packet> _inputSidePackets;
/// Packet headers that will be added to the graph when it is started. /// Packet headers that will be added to the graph when it is started.
std::map<std::string, mediapipe::Packet> _streamHeaders; std::map<std::string, mediapipe::Packet> _streamHeaders;
/// Service packets to be added to the graph when it is started. /// Service packets to be added to the graph when it is started.
std::map<const mediapipe::GraphServiceBase*, mediapipe::Packet> _servicePackets; std::map<const mediapipe::GraphServiceBase*, mediapipe::Packet>
_servicePackets;
/// Number of frames currently being processed by the graph. /// Number of frames currently being processed by the graph.
std::atomic<int32_t> _framesInFlight; std::atomic<int32_t> _framesInFlight;
@ -56,7 +56,8 @@
BOOL _started; BOOL _started;
} }
- (instancetype)initWithGraphConfig:(const mediapipe::CalculatorGraphConfig&)config { - (instancetype)initWithGraphConfig:
(const mediapipe::CalculatorGraphConfig&)config {
self = [super init]; self = [super init];
if (self) { if (self) {
// Turn on Cocoa multithreading, since MediaPipe uses threads. // Turn on Cocoa multithreading, since MediaPipe uses threads.
@ -76,40 +77,47 @@
return _graph->GetGraphInputStreamAddMode(); return _graph->GetGraphInputStreamAddMode();
} }
- (void)setPacketAddMode:(mediapipe::CalculatorGraph::GraphInputStreamAddMode)mode { - (void)setPacketAddMode:
(mediapipe::CalculatorGraph::GraphInputStreamAddMode)mode {
_graph->SetGraphInputStreamAddMode(mode); _graph->SetGraphInputStreamAddMode(mode);
} }
- (void)addFrameOutputStream:(const std::string&)outputStreamName - (void)addFrameOutputStream:(const std::string&)outputStreamName
outputPacketType:(MPPPacketType)packetType { outputPacketType:(MPPPacketType)packetType {
std::string callbackInputName; std::string callbackInputName;
mediapipe::tool::AddCallbackCalculator(outputStreamName, &_config, &callbackInputName, mediapipe::tool::AddCallbackCalculator(outputStreamName, &_config,
/*use_std_function=*/true); &callbackInputName,
// No matter what ownership qualifiers are put on the pointer, NewPermanentCallback will /*use_std_function=*/true);
// still end up with a strong pointer to MPPGraph*. That is why we use void* instead. // No matter what ownership qualifiers are put on the pointer,
// NewPermanentCallback will still end up with a strong pointer to MPPGraph*.
// That is why we use void* instead.
void* wrapperVoid = (__bridge void*)self; void* wrapperVoid = (__bridge void*)self;
_inputSidePackets[callbackInputName] = _inputSidePackets[callbackInputName] =
mediapipe::MakePacket<std::function<void(const mediapipe::Packet&)>>( mediapipe::MakePacket<std::function<void(const mediapipe::Packet&)>>(
[wrapperVoid, outputStreamName, packetType](const mediapipe::Packet& packet) { [wrapperVoid, outputStreamName,
CallFrameDelegate(wrapperVoid, outputStreamName, packetType, packet); packetType](const mediapipe::Packet& packet) {
CallFrameDelegate(wrapperVoid, outputStreamName, packetType,
packet);
}); });
} }
- (NSString *)description { - (NSString*)description {
return [NSString stringWithFormat:@"<%@: %p; framesInFlight = %d>", [self class], self, return [NSString
_framesInFlight.load(std::memory_order_relaxed)]; stringWithFormat:@"<%@: %p; framesInFlight = %d>", [self class], self,
_framesInFlight.load(std::memory_order_relaxed)];
} }
/// This is the function that gets called by the CallbackCalculator that /// This is the function that gets called by the CallbackCalculator that
/// receives the graph's output. /// receives the graph's output.
void CallFrameDelegate(void* wrapperVoid, const std::string& streamName, void CallFrameDelegate(void* wrapperVoid, const std::string& streamName,
MPPPacketType packetType, const mediapipe::Packet& packet) { MPPPacketType packetType,
const mediapipe::Packet& packet) {
MPPGraph* wrapper = (__bridge MPPGraph*)wrapperVoid; MPPGraph* wrapper = (__bridge MPPGraph*)wrapperVoid;
@autoreleasepool { @autoreleasepool {
if (packetType == MPPPacketTypeRaw) { if (packetType == MPPPacketTypeRaw) {
[wrapper.delegate mediapipeGraph:wrapper [wrapper.delegate mediapipeGraph:wrapper
didOutputPacket:packet didOutputPacket:packet
fromStream:streamName]; fromStream:streamName];
} else if (packetType == MPPPacketTypeImageFrame) { } else if (packetType == MPPPacketTypeImageFrame) {
wrapper->_framesInFlight--; wrapper->_framesInFlight--;
const auto& frame = packet.Get<mediapipe::ImageFrame>(); const auto& frame = packet.Get<mediapipe::ImageFrame>();
@ -118,13 +126,16 @@ void CallFrameDelegate(void* wrapperVoid, const std::string& streamName,
if (format == mediapipe::ImageFormat::SRGBA || if (format == mediapipe::ImageFormat::SRGBA ||
format == mediapipe::ImageFormat::GRAY8) { format == mediapipe::ImageFormat::GRAY8) {
CVPixelBufferRef pixelBuffer; CVPixelBufferRef pixelBuffer;
// If kCVPixelFormatType_32RGBA does not work, it returns kCVReturnInvalidPixelFormat. // If kCVPixelFormatType_32RGBA does not work, it returns
// kCVReturnInvalidPixelFormat.
CVReturn error = CVPixelBufferCreate( CVReturn error = CVPixelBufferCreate(
NULL, frame.Width(), frame.Height(), kCVPixelFormatType_32BGRA, NULL, frame.Width(), frame.Height(), kCVPixelFormatType_32BGRA,
GetCVPixelBufferAttributesForGlCompatibility(), &pixelBuffer); GetCVPixelBufferAttributesForGlCompatibility(), &pixelBuffer);
_GTMDevAssert(error == kCVReturnSuccess, @"CVPixelBufferCreate failed: %d", error); _GTMDevAssert(error == kCVReturnSuccess,
@"CVPixelBufferCreate failed: %d", error);
error = CVPixelBufferLockBaseAddress(pixelBuffer, 0); error = CVPixelBufferLockBaseAddress(pixelBuffer, 0);
_GTMDevAssert(error == kCVReturnSuccess, @"CVPixelBufferLockBaseAddress failed: %d", error); _GTMDevAssert(error == kCVReturnSuccess,
@"CVPixelBufferLockBaseAddress failed: %d", error);
vImage_Buffer vDestination = vImageForCVPixelBuffer(pixelBuffer); vImage_Buffer vDestination = vImageForCVPixelBuffer(pixelBuffer);
// Note: we have to throw away const here, but we should not overwrite // Note: we have to throw away const here, but we should not overwrite
@ -133,30 +144,35 @@ void CallFrameDelegate(void* wrapperVoid, const std::string& streamName,
if (format == mediapipe::ImageFormat::SRGBA) { if (format == mediapipe::ImageFormat::SRGBA) {
// Swap R and B channels. // Swap R and B channels.
const uint8_t permuteMap[4] = {2, 1, 0, 3}; const uint8_t permuteMap[4] = {2, 1, 0, 3};
vImage_Error __unused vError = vImage_Error __unused vError = vImagePermuteChannels_ARGB8888(
vImagePermuteChannels_ARGB8888(&vSource, &vDestination, permuteMap, kvImageNoFlags); &vSource, &vDestination, permuteMap, kvImageNoFlags);
_GTMDevAssert(vError == kvImageNoError, @"vImagePermuteChannels failed: %zd", vError); _GTMDevAssert(vError == kvImageNoError,
@"vImagePermuteChannels failed: %zd", vError);
} else { } else {
// Convert grayscale back to BGRA // Convert grayscale back to BGRA
vImage_Error __unused vError = vImageGrayToBGRA(&vSource, &vDestination); vImage_Error __unused vError =
_GTMDevAssert(vError == kvImageNoError, @"vImageGrayToBGRA failed: %zd", vError); vImageGrayToBGRA(&vSource, &vDestination);
_GTMDevAssert(vError == kvImageNoError,
@"vImageGrayToBGRA failed: %zd", vError);
} }
error = CVPixelBufferUnlockBaseAddress(pixelBuffer, 0); error = CVPixelBufferUnlockBaseAddress(pixelBuffer, 0);
_GTMDevAssert(error == kCVReturnSuccess, _GTMDevAssert(error == kCVReturnSuccess,
@"CVPixelBufferUnlockBaseAddress failed: %d", error); @"CVPixelBufferUnlockBaseAddress failed: %d", error);
if ([wrapper.delegate respondsToSelector:@selector if ([wrapper.delegate
(mediapipeGraph:didOutputPixelBuffer:fromStream:timestamp:)]) { respondsToSelector:@selector
(mediapipeGraph:didOutputPixelBuffer:fromStream:timestamp:)]) {
[wrapper.delegate mediapipeGraph:wrapper [wrapper.delegate mediapipeGraph:wrapper
didOutputPixelBuffer:pixelBuffer didOutputPixelBuffer:pixelBuffer
fromStream:streamName fromStream:streamName
timestamp:packet.Timestamp()]; timestamp:packet.Timestamp()];
} else if ([wrapper.delegate respondsToSelector:@selector } else if ([wrapper.delegate
(mediapipeGraph:didOutputPixelBuffer:fromStream:)]) { respondsToSelector:@selector
(mediapipeGraph:didOutputPixelBuffer:fromStream:)]) {
[wrapper.delegate mediapipeGraph:wrapper [wrapper.delegate mediapipeGraph:wrapper
didOutputPixelBuffer:pixelBuffer didOutputPixelBuffer:pixelBuffer
fromStream:streamName]; fromStream:streamName];
} }
CVPixelBufferRelease(pixelBuffer); CVPixelBufferRelease(pixelBuffer);
} else { } else {
@ -168,22 +184,23 @@ void CallFrameDelegate(void* wrapperVoid, const std::string& streamName,
wrapper->_framesInFlight--; wrapper->_framesInFlight--;
CVPixelBufferRef pixelBuffer; CVPixelBufferRef pixelBuffer;
if (packetType == MPPPacketTypePixelBuffer) if (packetType == MPPPacketTypePixelBuffer)
pixelBuffer = mediapipe::GetCVPixelBufferRef(packet.Get<mediapipe::GpuBuffer>()); pixelBuffer =
mediapipe::GetCVPixelBufferRef(packet.Get<mediapipe::GpuBuffer>());
else else
pixelBuffer = packet.Get<mediapipe::Image>().GetCVPixelBufferRef(); pixelBuffer = packet.Get<mediapipe::Image>().GetCVPixelBufferRef();
if ([wrapper.delegate if ([wrapper.delegate
respondsToSelector:@selector respondsToSelector:@selector
(mediapipeGraph:didOutputPixelBuffer:fromStream:timestamp:)]) { (mediapipeGraph:didOutputPixelBuffer:fromStream:timestamp:)]) {
[wrapper.delegate mediapipeGraph:wrapper [wrapper.delegate mediapipeGraph:wrapper
didOutputPixelBuffer:pixelBuffer didOutputPixelBuffer:pixelBuffer
fromStream:streamName fromStream:streamName
timestamp:packet.Timestamp()]; timestamp:packet.Timestamp()];
} else if ([wrapper.delegate } else if ([wrapper.delegate
respondsToSelector:@selector respondsToSelector:@selector
(mediapipeGraph:didOutputPixelBuffer:fromStream:)]) { (mediapipeGraph:didOutputPixelBuffer:fromStream:)]) {
[wrapper.delegate mediapipeGraph:wrapper [wrapper.delegate mediapipeGraph:wrapper
didOutputPixelBuffer:pixelBuffer didOutputPixelBuffer:pixelBuffer
fromStream:streamName]; fromStream:streamName];
} }
#endif // MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER #endif // MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
} else { } else {
@ -192,13 +209,15 @@ if ([wrapper.delegate
} }
} }
- (void)setHeaderPacket:(const mediapipe::Packet&)packet forStream:(const std::string&)streamName { - (void)setHeaderPacket:(const mediapipe::Packet&)packet
forStream:(const std::string&)streamName {
_GTMDevAssert(!_started, @"%@ must be called before the graph is started", _GTMDevAssert(!_started, @"%@ must be called before the graph is started",
NSStringFromSelector(_cmd)); NSStringFromSelector(_cmd));
_streamHeaders[streamName] = packet; _streamHeaders[streamName] = packet;
} }
- (void)setSidePacket:(const mediapipe::Packet&)packet named:(const std::string&)name { - (void)setSidePacket:(const mediapipe::Packet&)packet
named:(const std::string&)name {
_GTMDevAssert(!_started, @"%@ must be called before the graph is started", _GTMDevAssert(!_started, @"%@ must be called before the graph is started",
NSStringFromSelector(_cmd)); NSStringFromSelector(_cmd));
_inputSidePackets[name] = packet; _inputSidePackets[name] = packet;
@ -211,7 +230,8 @@ if ([wrapper.delegate
_servicePackets[&service] = std::move(packet); _servicePackets[&service] = std::move(packet);
} }
- (void)addSidePackets:(const std::map<std::string, mediapipe::Packet>&)extraSidePackets { - (void)addSidePackets:
(const std::map<std::string, mediapipe::Packet>&)extraSidePackets {
_GTMDevAssert(!_started, @"%@ must be called before the graph is started", _GTMDevAssert(!_started, @"%@ must be called before the graph is started",
NSStringFromSelector(_cmd)); NSStringFromSelector(_cmd));
_inputSidePackets.insert(extraSidePackets.begin(), extraSidePackets.end()); _inputSidePackets.insert(extraSidePackets.begin(), extraSidePackets.end());
@ -232,7 +252,8 @@ if ([wrapper.delegate
- (absl::Status)performStart { - (absl::Status)performStart {
absl::Status status; absl::Status status;
for (const auto& service_packet : _servicePackets) { for (const auto& service_packet : _servicePackets) {
status = _graph->SetServicePacket(*service_packet.first, service_packet.second); status =
_graph->SetServicePacket(*service_packet.first, service_packet.second);
if (!status.ok()) { if (!status.ok()) {
return status; return status;
} }
@ -269,11 +290,12 @@ if ([wrapper.delegate
} }
- (BOOL)waitUntilDoneWithError:(NSError**)error { - (BOOL)waitUntilDoneWithError:(NSError**)error {
// Since this method blocks with no timeout, it should not be called in the main thread in // Since this method blocks with no timeout, it should not be called in the
// an app. However, it's fine to allow that in a test. // main thread in an app. However, it's fine to allow that in a test.
// TODO: is this too heavy-handed? Maybe a warning would be fine. // TODO: is this too heavy-handed? Maybe a warning would be fine.
_GTMDevAssert(![NSThread isMainThread] || (NSClassFromString(@"XCTest")), _GTMDevAssert(
@"waitUntilDoneWithError: should not be called on the main thread"); ![NSThread isMainThread] || (NSClassFromString(@"XCTest")),
@"waitUntilDoneWithError: should not be called on the main thread");
absl::Status status = _graph->WaitUntilDone(); absl::Status status = _graph->WaitUntilDone();
_started = NO; _started = NO;
if (!status.ok() && error) *error = [NSError gus_errorWithStatus:status]; if (!status.ok() && error) *error = [NSError gus_errorWithStatus:status];
@ -289,7 +311,8 @@ if ([wrapper.delegate
- (BOOL)movePacket:(mediapipe::Packet&&)packet - (BOOL)movePacket:(mediapipe::Packet&&)packet
intoStream:(const std::string&)streamName intoStream:(const std::string&)streamName
error:(NSError**)error { error:(NSError**)error {
absl::Status status = _graph->AddPacketToInputStream(streamName, std::move(packet)); absl::Status status =
_graph->AddPacketToInputStream(streamName, std::move(packet));
if (!status.ok() && error) *error = [NSError gus_errorWithStatus:status]; if (!status.ok() && error) *error = [NSError gus_errorWithStatus:status];
return status.ok(); return status.ok();
} }
@ -305,15 +328,17 @@ if ([wrapper.delegate
- (BOOL)setMaxQueueSize:(int)maxQueueSize - (BOOL)setMaxQueueSize:(int)maxQueueSize
forStream:(const std::string&)streamName forStream:(const std::string&)streamName
error:(NSError**)error { error:(NSError**)error {
absl::Status status = _graph->SetInputStreamMaxQueueSize(streamName, maxQueueSize); absl::Status status =
_graph->SetInputStreamMaxQueueSize(streamName, maxQueueSize);
if (!status.ok() && error) *error = [NSError gus_errorWithStatus:status]; if (!status.ok() && error) *error = [NSError gus_errorWithStatus:status];
return status.ok(); return status.ok();
} }
- (mediapipe::Packet)packetWithPixelBuffer:(CVPixelBufferRef)imageBuffer - (mediapipe::Packet)packetWithPixelBuffer:(CVPixelBufferRef)imageBuffer
packetType:(MPPPacketType)packetType { packetType:(MPPPacketType)packetType {
mediapipe::Packet packet; mediapipe::Packet packet;
if (packetType == MPPPacketTypeImageFrame || packetType == MPPPacketTypeImageFrameBGRANoSwap) { if (packetType == MPPPacketTypeImageFrame ||
packetType == MPPPacketTypeImageFrameBGRANoSwap) {
auto frame = CreateImageFrameForCVPixelBuffer( auto frame = CreateImageFrameForCVPixelBuffer(
imageBuffer, /* canOverwrite = */ false, imageBuffer, /* canOverwrite = */ false,
/* bgrAsRgb = */ packetType == MPPPacketTypeImageFrameBGRANoSwap); /* bgrAsRgb = */ packetType == MPPPacketTypeImageFrameBGRANoSwap);
@ -328,7 +353,8 @@ if ([wrapper.delegate
packet = mediapipe::MakePacket<mediapipe::Image>(imageBuffer); packet = mediapipe::MakePacket<mediapipe::Image>(imageBuffer);
#else #else
// CPU // CPU
auto frame = CreateImageFrameForCVPixelBuffer(imageBuffer, /* canOverwrite = */ false, auto frame = CreateImageFrameForCVPixelBuffer(imageBuffer,
/* canOverwrite = */ false,
/* bgrAsRgb = */ false); /* bgrAsRgb = */ false);
packet = mediapipe::MakePacket<mediapipe::Image>(std::move(frame)); packet = mediapipe::MakePacket<mediapipe::Image>(std::move(frame));
#endif // MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER #endif // MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
@ -339,7 +365,8 @@ if ([wrapper.delegate
} }
- (mediapipe::Packet)imagePacketWithPixelBuffer:(CVPixelBufferRef)pixelBuffer { - (mediapipe::Packet)imagePacketWithPixelBuffer:(CVPixelBufferRef)pixelBuffer {
return [self packetWithPixelBuffer:(pixelBuffer) packetType:(MPPPacketTypeImage)]; return [self packetWithPixelBuffer:(pixelBuffer)
packetType:(MPPPacketTypeImage)];
} }
- (BOOL)sendPixelBuffer:(CVPixelBufferRef)imageBuffer - (BOOL)sendPixelBuffer:(CVPixelBufferRef)imageBuffer
@ -367,13 +394,16 @@ if ([wrapper.delegate
allowOverwrite:(BOOL)allowOverwrite allowOverwrite:(BOOL)allowOverwrite
error:(NSError**)error { error:(NSError**)error {
if (_maxFramesInFlight && _framesInFlight >= _maxFramesInFlight) return NO; if (_maxFramesInFlight && _framesInFlight >= _maxFramesInFlight) return NO;
mediapipe::Packet packet = [self packetWithPixelBuffer:imageBuffer packetType:packetType]; mediapipe::Packet packet =
[self packetWithPixelBuffer:imageBuffer packetType:packetType];
BOOL success; BOOL success;
if (allowOverwrite) { if (allowOverwrite) {
packet = std::move(packet).At(timestamp); packet = std::move(packet).At(timestamp);
success = [self movePacket:std::move(packet) intoStream:inputName error:error]; success =
[self movePacket:std::move(packet) intoStream:inputName error:error];
} else { } else {
success = [self sendPacket:packet.At(timestamp) intoStream:inputName error:error]; success =
[self sendPacket:packet.At(timestamp) intoStream:inputName error:error];
} }
if (success) _framesInFlight++; if (success) _framesInFlight++;
return success; return success;
@ -407,22 +437,24 @@ if ([wrapper.delegate
} }
- (void)debugPrintGlInfo { - (void)debugPrintGlInfo {
std::shared_ptr<mediapipe::GpuResources> gpu_resources = _graph->GetGpuResources(); std::shared_ptr<mediapipe::GpuResources> gpu_resources =
_graph->GetGpuResources();
if (!gpu_resources) { if (!gpu_resources) {
NSLog(@"GPU not set up."); NSLog(@"GPU not set up.");
return; return;
} }
NSString* extensionString; NSString* extensionString;
(void)gpu_resources->gl_context()->Run([&extensionString]{ (void)gpu_resources->gl_context()->Run([&extensionString] {
extensionString = [NSString stringWithUTF8String:(char*)glGetString(GL_EXTENSIONS)]; extensionString =
[NSString stringWithUTF8String:(char*)glGetString(GL_EXTENSIONS)];
return absl::OkStatus(); return absl::OkStatus();
}); });
NSArray* extensions = [extensionString componentsSeparatedByCharactersInSet: NSArray* extensions = [extensionString
[NSCharacterSet whitespaceCharacterSet]]; componentsSeparatedByCharactersInSet:[NSCharacterSet
for (NSString* oneExtension in extensions) whitespaceCharacterSet]];
NSLog(@"%@", oneExtension); for (NSString* oneExtension in extensions) NSLog(@"%@", oneExtension);
} }
@end @end

View File

@ -20,8 +20,7 @@
mediapipe::TimestampDiff _timestampOffset; mediapipe::TimestampDiff _timestampOffset;
} }
- (instancetype)init - (instancetype)init {
{
self = [super init]; self = [super init];
if (self) { if (self) {
[self reset]; [self reset];
@ -36,11 +35,14 @@
} }
- (mediapipe::Timestamp)timestampForMediaTime:(CMTime)mediaTime { - (mediapipe::Timestamp)timestampForMediaTime:(CMTime)mediaTime {
Float64 sampleSeconds = CMTIME_IS_VALID(mediaTime) ? CMTimeGetSeconds(mediaTime) : 0; Float64 sampleSeconds =
const int64 sampleUsec = sampleSeconds * mediapipe::Timestamp::kTimestampUnitsPerSecond; CMTIME_IS_VALID(mediaTime) ? CMTimeGetSeconds(mediaTime) : 0;
const int64 sampleUsec =
sampleSeconds * mediapipe::Timestamp::kTimestampUnitsPerSecond;
_mediapipeTimestamp = mediapipe::Timestamp(sampleUsec) + _timestampOffset; _mediapipeTimestamp = mediapipe::Timestamp(sampleUsec) + _timestampOffset;
if (_mediapipeTimestamp <= _lastTimestamp) { if (_mediapipeTimestamp <= _lastTimestamp) {
_timestampOffset = _timestampOffset + _lastTimestamp + 1 - _mediapipeTimestamp; _timestampOffset =
_timestampOffset + _lastTimestamp + 1 - _mediapipeTimestamp;
_mediapipeTimestamp = _lastTimestamp + 1; _mediapipeTimestamp = _lastTimestamp + 1;
} }
_lastTimestamp = _mediapipeTimestamp; _lastTimestamp = _mediapipeTimestamp;

View File

@ -0,0 +1,72 @@
// Copyright 2019 The MediaPipe Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#import "mediapipe/objc/NSError+util_status.h"
@implementation GUSUtilStatusWrapper
+ (instancetype)wrapStatus:(const absl::Status &)status {
return [[self alloc] initWithStatus:status];
}
- (instancetype)initWithStatus:(const absl::Status &)status {
self = [super init];
if (self) {
_status = status;
}
return self;
}
- (NSString *)description {
return [NSString stringWithFormat:@"<%@: %p; status = %s>", [self class],
self, _status.message().data()];
}
@end
@implementation NSError (GUSGoogleUtilStatus)
NSString *const kGUSGoogleUtilStatusErrorDomain =
@"GoogleUtilStatusErrorDomain";
NSString *const kGUSGoogleUtilStatusErrorKey = @"GUSGoogleUtilStatusErrorKey";
+ (NSError *)gus_errorWithStatus:(const absl::Status &)status {
NSDictionary *userInfo = @{
NSLocalizedDescriptionKey : @(status.message().data()),
kGUSGoogleUtilStatusErrorKey : [GUSUtilStatusWrapper wrapStatus:status],
};
NSError *error =
[NSError errorWithDomain:kGUSGoogleUtilStatusErrorDomain
code:static_cast<NSInteger>(status.code())
userInfo:userInfo];
return error;
}
- (absl::Status)gus_status {
NSString *domain = self.domain;
if ([domain isEqual:kGUSGoogleUtilStatusErrorDomain]) {
GUSUtilStatusWrapper *wrapper = self.userInfo[kGUSGoogleUtilStatusErrorKey];
if (wrapper) return wrapper.status;
#if 0
// Unfortunately, util/task/posixerrorspace.h is not in portable status yet.
// TODO: fix that.
} else if ([domain isEqual:NSPOSIXErrorDomain]) {
return ::util::PosixErrorToStatus(self.code, self.localizedDescription.UTF8String);
#endif
}
return absl::Status(absl::StatusCode::kUnknown,
self.localizedDescription.UTF8String);
}
@end

View File

@ -0,0 +1,86 @@
diff --git a/tensorflow/lite/delegates/gpu/BUILD b/tensorflow/lite/delegates/gpu/BUILD
index 875c2a4f3da..e513db47388 100644
--- a/tensorflow/lite/delegates/gpu/BUILD
+++ b/tensorflow/lite/delegates/gpu/BUILD
@@ -70,14 +70,17 @@ cc_library(
}) + tflite_extra_gles_deps(),
)
-objc_library(
+cc_library(
name = "metal_delegate",
- srcs = ["metal_delegate.mm"],
+ srcs = ["metal_delegate.cc"],
hdrs = ["metal_delegate.h"],
- copts = ["-std=c++17"],
+ copts = [
+ "-ObjC++",
+ "-std=c++17",
+ "-fobjc-arc",
+ ],
+ linkopts = ["-framework Metal"],
features = ["-layering_check"],
- module_name = "TensorFlowLiteCMetal",
- sdk_frameworks = ["Metal"],
deps = [
"//tensorflow/lite:kernel_api",
"//tensorflow/lite:minimal_logging",
@@ -98,14 +101,20 @@ objc_library(
"//tensorflow/lite/delegates/gpu/metal:metal_spatial_tensor",
"@com_google_absl//absl/types:span",
],
+ alwayslink = 1,
)
-objc_library(
+cc_library(
name = "metal_delegate_internal",
hdrs = ["metal_delegate_internal.h"],
- copts = ["-std=c++17"],
- sdk_frameworks = ["Metal"],
+ copts = [
+ "-ObjC++",
+ "-std=c++17",
+ "-fobjc-arc",
+ ],
+ linkopts = ["-framework Metal"],
deps = ["//tensorflow/lite/delegates/gpu:metal_delegate"],
+ alwayslink = 1,
)
# build -c opt --config android_arm64 --copt -Os --copt -DTFLITE_GPU_BINARY_RELEASE --linkopt -s --strip always :libtensorflowlite_gpu_gl.so
diff --git a/tensorflow/lite/delegates/gpu/metal/BUILD b/tensorflow/lite/delegates/gpu/metal/BUILD
index 8571ff7f041..82e6bb91d2d 100644
--- a/tensorflow/lite/delegates/gpu/metal/BUILD
+++ b/tensorflow/lite/delegates/gpu/metal/BUILD
@@ -137,15 +137,16 @@ objc_library(
],
)
-objc_library(
+cc_library(
name = "inference_context",
srcs = ["inference_context.cc"],
hdrs = ["inference_context.h"],
copts = DEFAULT_COPTS + [
"-ObjC++",
+ "-fobjc-arc",
],
features = ["-layering_check"],
- sdk_frameworks = ["Metal"],
+ linkopts = ["-framework Metal"],
deps = [
":compute_task",
":inference_context_cc_fbs",
@@ -171,6 +172,7 @@ objc_library(
"@com_google_absl//absl/strings",
"@com_google_absl//absl/time",
],
+ alwayslink = 1,
)
flatbuffer_cc_library(
diff --git a/tensorflow/lite/delegates/gpu/metal_delegate.mm b/tensorflow/lite/delegates/gpu/metal_delegate.cc
similarity index 100%
rename from tensorflow/lite/delegates/gpu/metal_delegate.mm
rename to tensorflow/lite/delegates/gpu/metal_delegate.cc