Remove objc_library from Python build path for Mac GPU build
Addresses https://github.com/bazelbuild/bazel/issues/19912 PiperOrigin-RevId: 575896231
This commit is contained in:
parent
a39df33664
commit
aedafd63f9
|
@ -513,6 +513,9 @@ http_archive(
|
|||
"@//third_party:org_tensorflow_system_python.diff",
|
||||
# Diff is generated with a script, don't update it manually.
|
||||
"@//third_party:org_tensorflow_custom_ops.diff",
|
||||
# Works around Bazel issue with objc_library.
|
||||
# See https://github.com/bazelbuild/bazel/issues/19912
|
||||
"@//third_party:org_tensorflow_objc_build_fixes.diff",
|
||||
],
|
||||
patch_args = [
|
||||
"-p1",
|
||||
|
|
|
@ -526,12 +526,14 @@ mediapipe_proto_library(
|
|||
visibility = ["//visibility:public"],
|
||||
)
|
||||
|
||||
objc_library(
|
||||
cc_library(
|
||||
name = "pixel_buffer_pool_util",
|
||||
srcs = ["pixel_buffer_pool_util.mm"],
|
||||
srcs = ["pixel_buffer_pool_util.cc"],
|
||||
hdrs = ["pixel_buffer_pool_util.h"],
|
||||
copts = [
|
||||
"-x objective-c++",
|
||||
"-Wno-shorten-64-to-32",
|
||||
"-fobjc-arc", # enable reference-counting
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
|
@ -542,13 +544,14 @@ objc_library(
|
|||
],
|
||||
)
|
||||
|
||||
objc_library(
|
||||
cc_library(
|
||||
name = "metal_shared_resources",
|
||||
srcs = ["metal_shared_resources.mm"],
|
||||
srcs = ["metal_shared_resources.cc"],
|
||||
hdrs = ["metal_shared_resources.h"],
|
||||
copts = [
|
||||
"-x objective-c++",
|
||||
"-Wno-shorten-64-to-32",
|
||||
"-fobjc-arc", # enable reference-counting
|
||||
],
|
||||
features = ["-layering_check"],
|
||||
visibility = ["//visibility:public"],
|
||||
|
@ -557,15 +560,17 @@ objc_library(
|
|||
"@google_toolbox_for_mac//:GTM_Defines",
|
||||
] + [
|
||||
],
|
||||
alwayslink = 1,
|
||||
)
|
||||
|
||||
objc_library(
|
||||
cc_library(
|
||||
name = "MPPMetalUtil",
|
||||
srcs = ["MPPMetalUtil.mm"],
|
||||
srcs = ["MPPMetalUtil.cc"],
|
||||
hdrs = ["MPPMetalUtil.h"],
|
||||
copts = [
|
||||
"-x objective-c++",
|
||||
"-Wno-shorten-64-to-32",
|
||||
"-fobjc-arc", # enable reference-counting
|
||||
],
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
|
@ -575,6 +580,7 @@ objc_library(
|
|||
"@com_google_absl//absl/time",
|
||||
"@google_toolbox_for_mac//:GTM_Defines",
|
||||
],
|
||||
alwayslink = 1,
|
||||
)
|
||||
|
||||
mediapipe_proto_library(
|
||||
|
|
|
@ -69,10 +69,10 @@
|
|||
while (!bufferCompleted) {
|
||||
auto duration = absl::Now() - start_time;
|
||||
// If the spin-lock takes more than 5 ms then go to blocking wait:
|
||||
// - it frees the CPU core for another threads: increase the performance/decrease power
|
||||
// consumption.
|
||||
// - if a driver thread that notifies that the GPU buffer is completed has lower priority then
|
||||
// the CPU core is allocated for the thread.
|
||||
// - it frees the CPU core for another threads: increase the
|
||||
// performance/decrease power consumption.
|
||||
// - if a driver thread that notifies that the GPU buffer is completed has
|
||||
// lower priority then the CPU core is allocated for the thread.
|
||||
if (duration >= absl::Milliseconds(5)) {
|
||||
[commandBuffer waitUntilCompleted];
|
||||
break;
|
|
@ -50,9 +50,10 @@
|
|||
- (CVMetalTextureCacheRef)mtlTextureCache {
|
||||
@synchronized(self) {
|
||||
if (!_mtlTextureCache) {
|
||||
CVReturn __unused err =
|
||||
CVMetalTextureCacheCreate(NULL, NULL, self.mtlDevice, NULL, &_mtlTextureCache);
|
||||
NSAssert(err == kCVReturnSuccess, @"Error at CVMetalTextureCacheCreate %d ; device %@", err,
|
||||
CVReturn __unused err = CVMetalTextureCacheCreate(
|
||||
NULL, NULL, self.mtlDevice, NULL, &_mtlTextureCache);
|
||||
NSAssert(err == kCVReturnSuccess,
|
||||
@"Error at CVMetalTextureCacheCreate %d ; device %@", err,
|
||||
self.mtlDevice);
|
||||
// TODO: register and flush metal caches too.
|
||||
}
|
|
@ -24,23 +24,27 @@
|
|||
|
||||
namespace mediapipe {
|
||||
|
||||
CVPixelBufferPoolRef CreateCVPixelBufferPool(
|
||||
int width, int height, OSType pixelFormat, int keepCount,
|
||||
CFTimeInterval maxAge) {
|
||||
CVPixelBufferPoolRef CreateCVPixelBufferPool(int width, int height,
|
||||
OSType pixelFormat, int keepCount,
|
||||
CFTimeInterval maxAge) {
|
||||
CVPixelBufferPoolRef pool = NULL;
|
||||
|
||||
NSMutableDictionary *sourcePixelBufferOptions =
|
||||
[(__bridge NSDictionary*)GetCVPixelBufferAttributesForGlCompatibility() mutableCopy];
|
||||
[(__bridge NSDictionary *)GetCVPixelBufferAttributesForGlCompatibility()
|
||||
mutableCopy];
|
||||
[sourcePixelBufferOptions addEntriesFromDictionary:@{
|
||||
(id)kCVPixelBufferPixelFormatTypeKey : @(pixelFormat),
|
||||
(id)kCVPixelBufferWidthKey : @(width),
|
||||
(id)kCVPixelBufferHeightKey : @(height),
|
||||
}];
|
||||
|
||||
NSMutableDictionary *pixelBufferPoolOptions = [[NSMutableDictionary alloc] init];
|
||||
pixelBufferPoolOptions[(id)kCVPixelBufferPoolMinimumBufferCountKey] = @(keepCount);
|
||||
NSMutableDictionary *pixelBufferPoolOptions =
|
||||
[[NSMutableDictionary alloc] init];
|
||||
pixelBufferPoolOptions[(id)kCVPixelBufferPoolMinimumBufferCountKey] =
|
||||
@(keepCount);
|
||||
if (maxAge > 0) {
|
||||
pixelBufferPoolOptions[(id)kCVPixelBufferPoolMaximumBufferAgeKey] = @(maxAge);
|
||||
pixelBufferPoolOptions[(id)kCVPixelBufferPoolMaximumBufferAgeKey] =
|
||||
@(maxAge);
|
||||
}
|
||||
|
||||
CVPixelBufferPoolCreate(
|
||||
|
@ -50,8 +54,9 @@ CVPixelBufferPoolRef CreateCVPixelBufferPool(
|
|||
return pool;
|
||||
}
|
||||
|
||||
OSStatus PreallocateCVPixelBufferPoolBuffers(
|
||||
CVPixelBufferPoolRef pool, int count, CFDictionaryRef auxAttributes) {
|
||||
OSStatus PreallocateCVPixelBufferPoolBuffers(CVPixelBufferPoolRef pool,
|
||||
int count,
|
||||
CFDictionaryRef auxAttributes) {
|
||||
CVReturn err = kCVReturnSuccess;
|
||||
NSMutableArray *pixelBuffers = [[NSMutableArray alloc] init];
|
||||
for (int i = 0; i < count && err == kCVReturnSuccess; i++) {
|
||||
|
@ -68,30 +73,37 @@ OSStatus PreallocateCVPixelBufferPoolBuffers(
|
|||
return err;
|
||||
}
|
||||
|
||||
CFDictionaryRef CreateCVPixelBufferPoolAuxiliaryAttributesForThreshold(int allocationThreshold) {
|
||||
CFDictionaryRef CreateCVPixelBufferPoolAuxiliaryAttributesForThreshold(
|
||||
int allocationThreshold) {
|
||||
if (allocationThreshold > 0) {
|
||||
return (CFDictionaryRef)CFBridgingRetain(
|
||||
@{(id)kCVPixelBufferPoolAllocationThresholdKey: @(allocationThreshold)});
|
||||
return (CFDictionaryRef)CFBridgingRetain(@{
|
||||
(id)kCVPixelBufferPoolAllocationThresholdKey : @(allocationThreshold)
|
||||
});
|
||||
} else {
|
||||
return nil;
|
||||
}
|
||||
}
|
||||
|
||||
CVReturn CreateCVPixelBufferWithPool(
|
||||
CVPixelBufferPoolRef pool, CFDictionaryRef auxAttributes,
|
||||
CVTextureCacheType textureCache, CVPixelBufferRef* outBuffer) {
|
||||
return CreateCVPixelBufferWithPool(pool, auxAttributes, [textureCache](){
|
||||
CVReturn CreateCVPixelBufferWithPool(CVPixelBufferPoolRef pool,
|
||||
CFDictionaryRef auxAttributes,
|
||||
CVTextureCacheType textureCache,
|
||||
CVPixelBufferRef *outBuffer) {
|
||||
return CreateCVPixelBufferWithPool(
|
||||
pool, auxAttributes,
|
||||
[textureCache]() {
|
||||
#if TARGET_OS_OSX
|
||||
CVOpenGLTextureCacheFlush(textureCache, 0);
|
||||
CVOpenGLTextureCacheFlush(textureCache, 0);
|
||||
#else
|
||||
CVOpenGLESTextureCacheFlush(textureCache, 0);
|
||||
CVOpenGLESTextureCacheFlush(textureCache, 0);
|
||||
#endif // TARGET_OS_OSX
|
||||
}, outBuffer);
|
||||
},
|
||||
outBuffer);
|
||||
}
|
||||
|
||||
CVReturn CreateCVPixelBufferWithPool(
|
||||
CVPixelBufferPoolRef pool, CFDictionaryRef auxAttributes,
|
||||
std::function<void(void)> flush, CVPixelBufferRef* outBuffer) {
|
||||
CVReturn CreateCVPixelBufferWithPool(CVPixelBufferPoolRef pool,
|
||||
CFDictionaryRef auxAttributes,
|
||||
std::function<void(void)> flush,
|
||||
CVPixelBufferRef *outBuffer) {
|
||||
CVReturn err = CVPixelBufferPoolCreatePixelBufferWithAuxAttributes(
|
||||
kCFAllocatorDefault, pool, auxAttributes, outBuffer);
|
||||
if (err == kCVReturnWouldExceedAllocationThreshold) {
|
||||
|
@ -103,11 +115,13 @@ CVReturn CreateCVPixelBufferWithPool(
|
|||
kCFAllocatorDefault, pool, auxAttributes, outBuffer);
|
||||
}
|
||||
if (err == kCVReturnWouldExceedAllocationThreshold) {
|
||||
// TODO: allow the application to set the threshold. For now, disable it by
|
||||
// default, since the threshold we are using is arbitrary and some graphs routinely cross it.
|
||||
// TODO: allow the application to set the threshold. For now, disable it
|
||||
// by default, since the threshold we are using is arbitrary and some
|
||||
// graphs routinely cross it.
|
||||
#ifdef ENABLE_MEDIAPIPE_GPU_BUFFER_THRESHOLD_CHECK
|
||||
NSLog(@"Using more buffers than expected! This is a debug-only warning, "
|
||||
"you can ignore it if your app works fine otherwise.");
|
||||
NSLog(
|
||||
@"Using more buffers than expected! This is a debug-only warning, "
|
||||
"you can ignore it if your app works fine otherwise.");
|
||||
#ifdef DEBUG
|
||||
NSLog(@"Pool status: %@", ((__bridge NSObject *)pool).description);
|
||||
#endif // DEBUG
|
|
@ -52,9 +52,9 @@ objc_library(
|
|||
)
|
||||
|
||||
MEDIAPIPE_IOS_SRCS = [
|
||||
"MPPGraph.mm",
|
||||
"MPPTimestampConverter.mm",
|
||||
"NSError+util_status.mm",
|
||||
"MPPGraph.cc",
|
||||
"MPPTimestampConverter.cc",
|
||||
"NSError+util_status.cc",
|
||||
]
|
||||
|
||||
MEDIAPIPE_IOS_HDRS = [
|
||||
|
@ -63,11 +63,13 @@ MEDIAPIPE_IOS_HDRS = [
|
|||
"NSError+util_status.h",
|
||||
]
|
||||
|
||||
objc_library(
|
||||
cc_library(
|
||||
name = "mediapipe_framework_ios",
|
||||
srcs = MEDIAPIPE_IOS_SRCS,
|
||||
hdrs = MEDIAPIPE_IOS_HDRS,
|
||||
copts = [
|
||||
"-x objective-c++",
|
||||
"-fobjc-arc", # enable reference-counting
|
||||
"-Wno-shorten-64-to-32",
|
||||
],
|
||||
# This build rule is public to allow external customers to build their own iOS apps.
|
||||
|
@ -99,6 +101,7 @@ objc_library(
|
|||
"@com_google_absl//absl/synchronization",
|
||||
"@google_toolbox_for_mac//:GTM_Defines",
|
||||
],
|
||||
alwayslink = 1,
|
||||
)
|
||||
|
||||
objc_library(
|
||||
|
|
|
@ -12,13 +12,12 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#import "mediapipe/objc/MPPGraph.h"
|
||||
|
||||
#import <AVFoundation/AVFoundation.h>
|
||||
#import <Accelerate/Accelerate.h>
|
||||
|
||||
#include <atomic>
|
||||
|
||||
#import "GTMDefines.h"
|
||||
#include "absl/memory/memory.h"
|
||||
#include "mediapipe/framework/calculator_framework.h"
|
||||
#include "mediapipe/framework/formats/image.h"
|
||||
|
@ -26,22 +25,23 @@
|
|||
#include "mediapipe/framework/graph_service.h"
|
||||
#include "mediapipe/gpu/gl_base.h"
|
||||
#include "mediapipe/gpu/gpu_shared_data_internal.h"
|
||||
#import "mediapipe/objc/MPPGraph.h"
|
||||
#import "mediapipe/objc/NSError+util_status.h"
|
||||
#include "mediapipe/objc/util.h"
|
||||
|
||||
#import "mediapipe/objc/NSError+util_status.h"
|
||||
#import "GTMDefines.h"
|
||||
|
||||
@implementation MPPGraph {
|
||||
// Graph is wrapped in a unique_ptr because it was generating 39+KB of unnecessary ObjC runtime
|
||||
// information. See https://medium.com/@dmaclach/objective-c-encoding-and-you-866624cc02de
|
||||
// for details.
|
||||
// Graph is wrapped in a unique_ptr because it was generating 39+KB of
|
||||
// unnecessary ObjC runtime information. See
|
||||
// https://medium.com/@dmaclach/objective-c-encoding-and-you-866624cc02de for
|
||||
// details.
|
||||
std::unique_ptr<mediapipe::CalculatorGraph> _graph;
|
||||
/// Input side packets that will be added to the graph when it is started.
|
||||
std::map<std::string, mediapipe::Packet> _inputSidePackets;
|
||||
/// Packet headers that will be added to the graph when it is started.
|
||||
std::map<std::string, mediapipe::Packet> _streamHeaders;
|
||||
/// Service packets to be added to the graph when it is started.
|
||||
std::map<const mediapipe::GraphServiceBase*, mediapipe::Packet> _servicePackets;
|
||||
std::map<const mediapipe::GraphServiceBase*, mediapipe::Packet>
|
||||
_servicePackets;
|
||||
|
||||
/// Number of frames currently being processed by the graph.
|
||||
std::atomic<int32_t> _framesInFlight;
|
||||
|
@ -56,7 +56,8 @@
|
|||
BOOL _started;
|
||||
}
|
||||
|
||||
- (instancetype)initWithGraphConfig:(const mediapipe::CalculatorGraphConfig&)config {
|
||||
- (instancetype)initWithGraphConfig:
|
||||
(const mediapipe::CalculatorGraphConfig&)config {
|
||||
self = [super init];
|
||||
if (self) {
|
||||
// Turn on Cocoa multithreading, since MediaPipe uses threads.
|
||||
|
@ -76,40 +77,47 @@
|
|||
return _graph->GetGraphInputStreamAddMode();
|
||||
}
|
||||
|
||||
- (void)setPacketAddMode:(mediapipe::CalculatorGraph::GraphInputStreamAddMode)mode {
|
||||
- (void)setPacketAddMode:
|
||||
(mediapipe::CalculatorGraph::GraphInputStreamAddMode)mode {
|
||||
_graph->SetGraphInputStreamAddMode(mode);
|
||||
}
|
||||
|
||||
- (void)addFrameOutputStream:(const std::string&)outputStreamName
|
||||
outputPacketType:(MPPPacketType)packetType {
|
||||
std::string callbackInputName;
|
||||
mediapipe::tool::AddCallbackCalculator(outputStreamName, &_config, &callbackInputName,
|
||||
/*use_std_function=*/true);
|
||||
// No matter what ownership qualifiers are put on the pointer, NewPermanentCallback will
|
||||
// still end up with a strong pointer to MPPGraph*. That is why we use void* instead.
|
||||
mediapipe::tool::AddCallbackCalculator(outputStreamName, &_config,
|
||||
&callbackInputName,
|
||||
/*use_std_function=*/true);
|
||||
// No matter what ownership qualifiers are put on the pointer,
|
||||
// NewPermanentCallback will still end up with a strong pointer to MPPGraph*.
|
||||
// That is why we use void* instead.
|
||||
void* wrapperVoid = (__bridge void*)self;
|
||||
_inputSidePackets[callbackInputName] =
|
||||
mediapipe::MakePacket<std::function<void(const mediapipe::Packet&)>>(
|
||||
[wrapperVoid, outputStreamName, packetType](const mediapipe::Packet& packet) {
|
||||
CallFrameDelegate(wrapperVoid, outputStreamName, packetType, packet);
|
||||
[wrapperVoid, outputStreamName,
|
||||
packetType](const mediapipe::Packet& packet) {
|
||||
CallFrameDelegate(wrapperVoid, outputStreamName, packetType,
|
||||
packet);
|
||||
});
|
||||
}
|
||||
|
||||
- (NSString *)description {
|
||||
return [NSString stringWithFormat:@"<%@: %p; framesInFlight = %d>", [self class], self,
|
||||
_framesInFlight.load(std::memory_order_relaxed)];
|
||||
- (NSString*)description {
|
||||
return [NSString
|
||||
stringWithFormat:@"<%@: %p; framesInFlight = %d>", [self class], self,
|
||||
_framesInFlight.load(std::memory_order_relaxed)];
|
||||
}
|
||||
|
||||
/// This is the function that gets called by the CallbackCalculator that
|
||||
/// receives the graph's output.
|
||||
void CallFrameDelegate(void* wrapperVoid, const std::string& streamName,
|
||||
MPPPacketType packetType, const mediapipe::Packet& packet) {
|
||||
MPPPacketType packetType,
|
||||
const mediapipe::Packet& packet) {
|
||||
MPPGraph* wrapper = (__bridge MPPGraph*)wrapperVoid;
|
||||
@autoreleasepool {
|
||||
if (packetType == MPPPacketTypeRaw) {
|
||||
[wrapper.delegate mediapipeGraph:wrapper
|
||||
didOutputPacket:packet
|
||||
fromStream:streamName];
|
||||
didOutputPacket:packet
|
||||
fromStream:streamName];
|
||||
} else if (packetType == MPPPacketTypeImageFrame) {
|
||||
wrapper->_framesInFlight--;
|
||||
const auto& frame = packet.Get<mediapipe::ImageFrame>();
|
||||
|
@ -118,13 +126,16 @@ void CallFrameDelegate(void* wrapperVoid, const std::string& streamName,
|
|||
if (format == mediapipe::ImageFormat::SRGBA ||
|
||||
format == mediapipe::ImageFormat::GRAY8) {
|
||||
CVPixelBufferRef pixelBuffer;
|
||||
// If kCVPixelFormatType_32RGBA does not work, it returns kCVReturnInvalidPixelFormat.
|
||||
// If kCVPixelFormatType_32RGBA does not work, it returns
|
||||
// kCVReturnInvalidPixelFormat.
|
||||
CVReturn error = CVPixelBufferCreate(
|
||||
NULL, frame.Width(), frame.Height(), kCVPixelFormatType_32BGRA,
|
||||
GetCVPixelBufferAttributesForGlCompatibility(), &pixelBuffer);
|
||||
_GTMDevAssert(error == kCVReturnSuccess, @"CVPixelBufferCreate failed: %d", error);
|
||||
_GTMDevAssert(error == kCVReturnSuccess,
|
||||
@"CVPixelBufferCreate failed: %d", error);
|
||||
error = CVPixelBufferLockBaseAddress(pixelBuffer, 0);
|
||||
_GTMDevAssert(error == kCVReturnSuccess, @"CVPixelBufferLockBaseAddress failed: %d", error);
|
||||
_GTMDevAssert(error == kCVReturnSuccess,
|
||||
@"CVPixelBufferLockBaseAddress failed: %d", error);
|
||||
|
||||
vImage_Buffer vDestination = vImageForCVPixelBuffer(pixelBuffer);
|
||||
// Note: we have to throw away const here, but we should not overwrite
|
||||
|
@ -133,30 +144,35 @@ void CallFrameDelegate(void* wrapperVoid, const std::string& streamName,
|
|||
if (format == mediapipe::ImageFormat::SRGBA) {
|
||||
// Swap R and B channels.
|
||||
const uint8_t permuteMap[4] = {2, 1, 0, 3};
|
||||
vImage_Error __unused vError =
|
||||
vImagePermuteChannels_ARGB8888(&vSource, &vDestination, permuteMap, kvImageNoFlags);
|
||||
_GTMDevAssert(vError == kvImageNoError, @"vImagePermuteChannels failed: %zd", vError);
|
||||
vImage_Error __unused vError = vImagePermuteChannels_ARGB8888(
|
||||
&vSource, &vDestination, permuteMap, kvImageNoFlags);
|
||||
_GTMDevAssert(vError == kvImageNoError,
|
||||
@"vImagePermuteChannels failed: %zd", vError);
|
||||
} else {
|
||||
// Convert grayscale back to BGRA
|
||||
vImage_Error __unused vError = vImageGrayToBGRA(&vSource, &vDestination);
|
||||
_GTMDevAssert(vError == kvImageNoError, @"vImageGrayToBGRA failed: %zd", vError);
|
||||
vImage_Error __unused vError =
|
||||
vImageGrayToBGRA(&vSource, &vDestination);
|
||||
_GTMDevAssert(vError == kvImageNoError,
|
||||
@"vImageGrayToBGRA failed: %zd", vError);
|
||||
}
|
||||
|
||||
error = CVPixelBufferUnlockBaseAddress(pixelBuffer, 0);
|
||||
_GTMDevAssert(error == kCVReturnSuccess,
|
||||
@"CVPixelBufferUnlockBaseAddress failed: %d", error);
|
||||
|
||||
if ([wrapper.delegate respondsToSelector:@selector
|
||||
(mediapipeGraph:didOutputPixelBuffer:fromStream:timestamp:)]) {
|
||||
if ([wrapper.delegate
|
||||
respondsToSelector:@selector
|
||||
(mediapipeGraph:didOutputPixelBuffer:fromStream:timestamp:)]) {
|
||||
[wrapper.delegate mediapipeGraph:wrapper
|
||||
didOutputPixelBuffer:pixelBuffer
|
||||
fromStream:streamName
|
||||
timestamp:packet.Timestamp()];
|
||||
} else if ([wrapper.delegate respondsToSelector:@selector
|
||||
(mediapipeGraph:didOutputPixelBuffer:fromStream:)]) {
|
||||
didOutputPixelBuffer:pixelBuffer
|
||||
fromStream:streamName
|
||||
timestamp:packet.Timestamp()];
|
||||
} else if ([wrapper.delegate
|
||||
respondsToSelector:@selector
|
||||
(mediapipeGraph:didOutputPixelBuffer:fromStream:)]) {
|
||||
[wrapper.delegate mediapipeGraph:wrapper
|
||||
didOutputPixelBuffer:pixelBuffer
|
||||
fromStream:streamName];
|
||||
didOutputPixelBuffer:pixelBuffer
|
||||
fromStream:streamName];
|
||||
}
|
||||
CVPixelBufferRelease(pixelBuffer);
|
||||
} else {
|
||||
|
@ -168,22 +184,23 @@ void CallFrameDelegate(void* wrapperVoid, const std::string& streamName,
|
|||
wrapper->_framesInFlight--;
|
||||
CVPixelBufferRef pixelBuffer;
|
||||
if (packetType == MPPPacketTypePixelBuffer)
|
||||
pixelBuffer = mediapipe::GetCVPixelBufferRef(packet.Get<mediapipe::GpuBuffer>());
|
||||
pixelBuffer =
|
||||
mediapipe::GetCVPixelBufferRef(packet.Get<mediapipe::GpuBuffer>());
|
||||
else
|
||||
pixelBuffer = packet.Get<mediapipe::Image>().GetCVPixelBufferRef();
|
||||
if ([wrapper.delegate
|
||||
if ([wrapper.delegate
|
||||
respondsToSelector:@selector
|
||||
(mediapipeGraph:didOutputPixelBuffer:fromStream:timestamp:)]) {
|
||||
[wrapper.delegate mediapipeGraph:wrapper
|
||||
didOutputPixelBuffer:pixelBuffer
|
||||
fromStream:streamName
|
||||
timestamp:packet.Timestamp()];
|
||||
didOutputPixelBuffer:pixelBuffer
|
||||
fromStream:streamName
|
||||
timestamp:packet.Timestamp()];
|
||||
} else if ([wrapper.delegate
|
||||
respondsToSelector:@selector
|
||||
(mediapipeGraph:didOutputPixelBuffer:fromStream:)]) {
|
||||
[wrapper.delegate mediapipeGraph:wrapper
|
||||
didOutputPixelBuffer:pixelBuffer
|
||||
fromStream:streamName];
|
||||
didOutputPixelBuffer:pixelBuffer
|
||||
fromStream:streamName];
|
||||
}
|
||||
#endif // MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
|
||||
} else {
|
||||
|
@ -192,13 +209,15 @@ if ([wrapper.delegate
|
|||
}
|
||||
}
|
||||
|
||||
- (void)setHeaderPacket:(const mediapipe::Packet&)packet forStream:(const std::string&)streamName {
|
||||
- (void)setHeaderPacket:(const mediapipe::Packet&)packet
|
||||
forStream:(const std::string&)streamName {
|
||||
_GTMDevAssert(!_started, @"%@ must be called before the graph is started",
|
||||
NSStringFromSelector(_cmd));
|
||||
_streamHeaders[streamName] = packet;
|
||||
}
|
||||
|
||||
- (void)setSidePacket:(const mediapipe::Packet&)packet named:(const std::string&)name {
|
||||
- (void)setSidePacket:(const mediapipe::Packet&)packet
|
||||
named:(const std::string&)name {
|
||||
_GTMDevAssert(!_started, @"%@ must be called before the graph is started",
|
||||
NSStringFromSelector(_cmd));
|
||||
_inputSidePackets[name] = packet;
|
||||
|
@ -211,7 +230,8 @@ if ([wrapper.delegate
|
|||
_servicePackets[&service] = std::move(packet);
|
||||
}
|
||||
|
||||
- (void)addSidePackets:(const std::map<std::string, mediapipe::Packet>&)extraSidePackets {
|
||||
- (void)addSidePackets:
|
||||
(const std::map<std::string, mediapipe::Packet>&)extraSidePackets {
|
||||
_GTMDevAssert(!_started, @"%@ must be called before the graph is started",
|
||||
NSStringFromSelector(_cmd));
|
||||
_inputSidePackets.insert(extraSidePackets.begin(), extraSidePackets.end());
|
||||
|
@ -232,7 +252,8 @@ if ([wrapper.delegate
|
|||
- (absl::Status)performStart {
|
||||
absl::Status status;
|
||||
for (const auto& service_packet : _servicePackets) {
|
||||
status = _graph->SetServicePacket(*service_packet.first, service_packet.second);
|
||||
status =
|
||||
_graph->SetServicePacket(*service_packet.first, service_packet.second);
|
||||
if (!status.ok()) {
|
||||
return status;
|
||||
}
|
||||
|
@ -269,11 +290,12 @@ if ([wrapper.delegate
|
|||
}
|
||||
|
||||
- (BOOL)waitUntilDoneWithError:(NSError**)error {
|
||||
// Since this method blocks with no timeout, it should not be called in the main thread in
|
||||
// an app. However, it's fine to allow that in a test.
|
||||
// Since this method blocks with no timeout, it should not be called in the
|
||||
// main thread in an app. However, it's fine to allow that in a test.
|
||||
// TODO: is this too heavy-handed? Maybe a warning would be fine.
|
||||
_GTMDevAssert(![NSThread isMainThread] || (NSClassFromString(@"XCTest")),
|
||||
@"waitUntilDoneWithError: should not be called on the main thread");
|
||||
_GTMDevAssert(
|
||||
![NSThread isMainThread] || (NSClassFromString(@"XCTest")),
|
||||
@"waitUntilDoneWithError: should not be called on the main thread");
|
||||
absl::Status status = _graph->WaitUntilDone();
|
||||
_started = NO;
|
||||
if (!status.ok() && error) *error = [NSError gus_errorWithStatus:status];
|
||||
|
@ -289,7 +311,8 @@ if ([wrapper.delegate
|
|||
- (BOOL)movePacket:(mediapipe::Packet&&)packet
|
||||
intoStream:(const std::string&)streamName
|
||||
error:(NSError**)error {
|
||||
absl::Status status = _graph->AddPacketToInputStream(streamName, std::move(packet));
|
||||
absl::Status status =
|
||||
_graph->AddPacketToInputStream(streamName, std::move(packet));
|
||||
if (!status.ok() && error) *error = [NSError gus_errorWithStatus:status];
|
||||
return status.ok();
|
||||
}
|
||||
|
@ -305,15 +328,17 @@ if ([wrapper.delegate
|
|||
- (BOOL)setMaxQueueSize:(int)maxQueueSize
|
||||
forStream:(const std::string&)streamName
|
||||
error:(NSError**)error {
|
||||
absl::Status status = _graph->SetInputStreamMaxQueueSize(streamName, maxQueueSize);
|
||||
absl::Status status =
|
||||
_graph->SetInputStreamMaxQueueSize(streamName, maxQueueSize);
|
||||
if (!status.ok() && error) *error = [NSError gus_errorWithStatus:status];
|
||||
return status.ok();
|
||||
}
|
||||
|
||||
- (mediapipe::Packet)packetWithPixelBuffer:(CVPixelBufferRef)imageBuffer
|
||||
packetType:(MPPPacketType)packetType {
|
||||
packetType:(MPPPacketType)packetType {
|
||||
mediapipe::Packet packet;
|
||||
if (packetType == MPPPacketTypeImageFrame || packetType == MPPPacketTypeImageFrameBGRANoSwap) {
|
||||
if (packetType == MPPPacketTypeImageFrame ||
|
||||
packetType == MPPPacketTypeImageFrameBGRANoSwap) {
|
||||
auto frame = CreateImageFrameForCVPixelBuffer(
|
||||
imageBuffer, /* canOverwrite = */ false,
|
||||
/* bgrAsRgb = */ packetType == MPPPacketTypeImageFrameBGRANoSwap);
|
||||
|
@ -328,7 +353,8 @@ if ([wrapper.delegate
|
|||
packet = mediapipe::MakePacket<mediapipe::Image>(imageBuffer);
|
||||
#else
|
||||
// CPU
|
||||
auto frame = CreateImageFrameForCVPixelBuffer(imageBuffer, /* canOverwrite = */ false,
|
||||
auto frame = CreateImageFrameForCVPixelBuffer(imageBuffer,
|
||||
/* canOverwrite = */ false,
|
||||
/* bgrAsRgb = */ false);
|
||||
packet = mediapipe::MakePacket<mediapipe::Image>(std::move(frame));
|
||||
#endif // MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
|
||||
|
@ -339,7 +365,8 @@ if ([wrapper.delegate
|
|||
}
|
||||
|
||||
- (mediapipe::Packet)imagePacketWithPixelBuffer:(CVPixelBufferRef)pixelBuffer {
|
||||
return [self packetWithPixelBuffer:(pixelBuffer) packetType:(MPPPacketTypeImage)];
|
||||
return [self packetWithPixelBuffer:(pixelBuffer)
|
||||
packetType:(MPPPacketTypeImage)];
|
||||
}
|
||||
|
||||
- (BOOL)sendPixelBuffer:(CVPixelBufferRef)imageBuffer
|
||||
|
@ -367,13 +394,16 @@ if ([wrapper.delegate
|
|||
allowOverwrite:(BOOL)allowOverwrite
|
||||
error:(NSError**)error {
|
||||
if (_maxFramesInFlight && _framesInFlight >= _maxFramesInFlight) return NO;
|
||||
mediapipe::Packet packet = [self packetWithPixelBuffer:imageBuffer packetType:packetType];
|
||||
mediapipe::Packet packet =
|
||||
[self packetWithPixelBuffer:imageBuffer packetType:packetType];
|
||||
BOOL success;
|
||||
if (allowOverwrite) {
|
||||
packet = std::move(packet).At(timestamp);
|
||||
success = [self movePacket:std::move(packet) intoStream:inputName error:error];
|
||||
success =
|
||||
[self movePacket:std::move(packet) intoStream:inputName error:error];
|
||||
} else {
|
||||
success = [self sendPacket:packet.At(timestamp) intoStream:inputName error:error];
|
||||
success =
|
||||
[self sendPacket:packet.At(timestamp) intoStream:inputName error:error];
|
||||
}
|
||||
if (success) _framesInFlight++;
|
||||
return success;
|
||||
|
@ -407,22 +437,24 @@ if ([wrapper.delegate
|
|||
}
|
||||
|
||||
- (void)debugPrintGlInfo {
|
||||
std::shared_ptr<mediapipe::GpuResources> gpu_resources = _graph->GetGpuResources();
|
||||
std::shared_ptr<mediapipe::GpuResources> gpu_resources =
|
||||
_graph->GetGpuResources();
|
||||
if (!gpu_resources) {
|
||||
NSLog(@"GPU not set up.");
|
||||
return;
|
||||
}
|
||||
|
||||
NSString* extensionString;
|
||||
(void)gpu_resources->gl_context()->Run([&extensionString]{
|
||||
extensionString = [NSString stringWithUTF8String:(char*)glGetString(GL_EXTENSIONS)];
|
||||
(void)gpu_resources->gl_context()->Run([&extensionString] {
|
||||
extensionString =
|
||||
[NSString stringWithUTF8String:(char*)glGetString(GL_EXTENSIONS)];
|
||||
return absl::OkStatus();
|
||||
});
|
||||
|
||||
NSArray* extensions = [extensionString componentsSeparatedByCharactersInSet:
|
||||
[NSCharacterSet whitespaceCharacterSet]];
|
||||
for (NSString* oneExtension in extensions)
|
||||
NSLog(@"%@", oneExtension);
|
||||
NSArray* extensions = [extensionString
|
||||
componentsSeparatedByCharactersInSet:[NSCharacterSet
|
||||
whitespaceCharacterSet]];
|
||||
for (NSString* oneExtension in extensions) NSLog(@"%@", oneExtension);
|
||||
}
|
||||
|
||||
@end
|
|
@ -20,8 +20,7 @@
|
|||
mediapipe::TimestampDiff _timestampOffset;
|
||||
}
|
||||
|
||||
- (instancetype)init
|
||||
{
|
||||
- (instancetype)init {
|
||||
self = [super init];
|
||||
if (self) {
|
||||
[self reset];
|
||||
|
@ -36,11 +35,14 @@
|
|||
}
|
||||
|
||||
- (mediapipe::Timestamp)timestampForMediaTime:(CMTime)mediaTime {
|
||||
Float64 sampleSeconds = CMTIME_IS_VALID(mediaTime) ? CMTimeGetSeconds(mediaTime) : 0;
|
||||
const int64 sampleUsec = sampleSeconds * mediapipe::Timestamp::kTimestampUnitsPerSecond;
|
||||
Float64 sampleSeconds =
|
||||
CMTIME_IS_VALID(mediaTime) ? CMTimeGetSeconds(mediaTime) : 0;
|
||||
const int64 sampleUsec =
|
||||
sampleSeconds * mediapipe::Timestamp::kTimestampUnitsPerSecond;
|
||||
_mediapipeTimestamp = mediapipe::Timestamp(sampleUsec) + _timestampOffset;
|
||||
if (_mediapipeTimestamp <= _lastTimestamp) {
|
||||
_timestampOffset = _timestampOffset + _lastTimestamp + 1 - _mediapipeTimestamp;
|
||||
_timestampOffset =
|
||||
_timestampOffset + _lastTimestamp + 1 - _mediapipeTimestamp;
|
||||
_mediapipeTimestamp = _lastTimestamp + 1;
|
||||
}
|
||||
_lastTimestamp = _mediapipeTimestamp;
|
72
mediapipe/objc/NSError+util_status.cc
Normal file
72
mediapipe/objc/NSError+util_status.cc
Normal file
|
@ -0,0 +1,72 @@
|
|||
// Copyright 2019 The MediaPipe Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#import "mediapipe/objc/NSError+util_status.h"
|
||||
|
||||
@implementation GUSUtilStatusWrapper
|
||||
|
||||
+ (instancetype)wrapStatus:(const absl::Status &)status {
|
||||
return [[self alloc] initWithStatus:status];
|
||||
}
|
||||
|
||||
- (instancetype)initWithStatus:(const absl::Status &)status {
|
||||
self = [super init];
|
||||
if (self) {
|
||||
_status = status;
|
||||
}
|
||||
return self;
|
||||
}
|
||||
|
||||
- (NSString *)description {
|
||||
return [NSString stringWithFormat:@"<%@: %p; status = %s>", [self class],
|
||||
self, _status.message().data()];
|
||||
}
|
||||
|
||||
@end
|
||||
|
||||
@implementation NSError (GUSGoogleUtilStatus)
|
||||
|
||||
NSString *const kGUSGoogleUtilStatusErrorDomain =
|
||||
@"GoogleUtilStatusErrorDomain";
|
||||
NSString *const kGUSGoogleUtilStatusErrorKey = @"GUSGoogleUtilStatusErrorKey";
|
||||
|
||||
+ (NSError *)gus_errorWithStatus:(const absl::Status &)status {
|
||||
NSDictionary *userInfo = @{
|
||||
NSLocalizedDescriptionKey : @(status.message().data()),
|
||||
kGUSGoogleUtilStatusErrorKey : [GUSUtilStatusWrapper wrapStatus:status],
|
||||
};
|
||||
NSError *error =
|
||||
[NSError errorWithDomain:kGUSGoogleUtilStatusErrorDomain
|
||||
code:static_cast<NSInteger>(status.code())
|
||||
userInfo:userInfo];
|
||||
return error;
|
||||
}
|
||||
|
||||
- (absl::Status)gus_status {
|
||||
NSString *domain = self.domain;
|
||||
if ([domain isEqual:kGUSGoogleUtilStatusErrorDomain]) {
|
||||
GUSUtilStatusWrapper *wrapper = self.userInfo[kGUSGoogleUtilStatusErrorKey];
|
||||
if (wrapper) return wrapper.status;
|
||||
#if 0
|
||||
// Unfortunately, util/task/posixerrorspace.h is not in portable status yet.
|
||||
// TODO: fix that.
|
||||
} else if ([domain isEqual:NSPOSIXErrorDomain]) {
|
||||
return ::util::PosixErrorToStatus(self.code, self.localizedDescription.UTF8String);
|
||||
#endif
|
||||
}
|
||||
return absl::Status(absl::StatusCode::kUnknown,
|
||||
self.localizedDescription.UTF8String);
|
||||
}
|
||||
|
||||
@end
|
86
third_party/org_tensorflow_objc_build_fixes.diff
vendored
Normal file
86
third_party/org_tensorflow_objc_build_fixes.diff
vendored
Normal file
|
@ -0,0 +1,86 @@
|
|||
diff --git a/tensorflow/lite/delegates/gpu/BUILD b/tensorflow/lite/delegates/gpu/BUILD
|
||||
index 875c2a4f3da..e513db47388 100644
|
||||
--- a/tensorflow/lite/delegates/gpu/BUILD
|
||||
+++ b/tensorflow/lite/delegates/gpu/BUILD
|
||||
@@ -70,14 +70,17 @@ cc_library(
|
||||
}) + tflite_extra_gles_deps(),
|
||||
)
|
||||
|
||||
-objc_library(
|
||||
+cc_library(
|
||||
name = "metal_delegate",
|
||||
- srcs = ["metal_delegate.mm"],
|
||||
+ srcs = ["metal_delegate.cc"],
|
||||
hdrs = ["metal_delegate.h"],
|
||||
- copts = ["-std=c++17"],
|
||||
+ copts = [
|
||||
+ "-ObjC++",
|
||||
+ "-std=c++17",
|
||||
+ "-fobjc-arc",
|
||||
+ ],
|
||||
+ linkopts = ["-framework Metal"],
|
||||
features = ["-layering_check"],
|
||||
- module_name = "TensorFlowLiteCMetal",
|
||||
- sdk_frameworks = ["Metal"],
|
||||
deps = [
|
||||
"//tensorflow/lite:kernel_api",
|
||||
"//tensorflow/lite:minimal_logging",
|
||||
@@ -98,14 +101,20 @@ objc_library(
|
||||
"//tensorflow/lite/delegates/gpu/metal:metal_spatial_tensor",
|
||||
"@com_google_absl//absl/types:span",
|
||||
],
|
||||
+ alwayslink = 1,
|
||||
)
|
||||
|
||||
-objc_library(
|
||||
+cc_library(
|
||||
name = "metal_delegate_internal",
|
||||
hdrs = ["metal_delegate_internal.h"],
|
||||
- copts = ["-std=c++17"],
|
||||
- sdk_frameworks = ["Metal"],
|
||||
+ copts = [
|
||||
+ "-ObjC++",
|
||||
+ "-std=c++17",
|
||||
+ "-fobjc-arc",
|
||||
+ ],
|
||||
+ linkopts = ["-framework Metal"],
|
||||
deps = ["//tensorflow/lite/delegates/gpu:metal_delegate"],
|
||||
+ alwayslink = 1,
|
||||
)
|
||||
|
||||
# build -c opt --config android_arm64 --copt -Os --copt -DTFLITE_GPU_BINARY_RELEASE --linkopt -s --strip always :libtensorflowlite_gpu_gl.so
|
||||
diff --git a/tensorflow/lite/delegates/gpu/metal/BUILD b/tensorflow/lite/delegates/gpu/metal/BUILD
|
||||
index 8571ff7f041..82e6bb91d2d 100644
|
||||
--- a/tensorflow/lite/delegates/gpu/metal/BUILD
|
||||
+++ b/tensorflow/lite/delegates/gpu/metal/BUILD
|
||||
@@ -137,15 +137,16 @@ objc_library(
|
||||
],
|
||||
)
|
||||
|
||||
-objc_library(
|
||||
+cc_library(
|
||||
name = "inference_context",
|
||||
srcs = ["inference_context.cc"],
|
||||
hdrs = ["inference_context.h"],
|
||||
copts = DEFAULT_COPTS + [
|
||||
"-ObjC++",
|
||||
+ "-fobjc-arc",
|
||||
],
|
||||
features = ["-layering_check"],
|
||||
- sdk_frameworks = ["Metal"],
|
||||
+ linkopts = ["-framework Metal"],
|
||||
deps = [
|
||||
":compute_task",
|
||||
":inference_context_cc_fbs",
|
||||
@@ -171,6 +172,7 @@ objc_library(
|
||||
"@com_google_absl//absl/strings",
|
||||
"@com_google_absl//absl/time",
|
||||
],
|
||||
+ alwayslink = 1,
|
||||
)
|
||||
|
||||
flatbuffer_cc_library(
|
||||
diff --git a/tensorflow/lite/delegates/gpu/metal_delegate.mm b/tensorflow/lite/delegates/gpu/metal_delegate.cc
|
||||
similarity index 100%
|
||||
rename from tensorflow/lite/delegates/gpu/metal_delegate.mm
|
||||
rename to tensorflow/lite/delegates/gpu/metal_delegate.cc
|
Loading…
Reference in New Issue
Block a user