No public description

PiperOrigin-RevId: 590629265
This commit is contained in:
MediaPipe Team 2023-12-13 09:47:13 -08:00 committed by Copybara-Service
parent 15f2b32006
commit 4892209da9
8 changed files with 591 additions and 124 deletions

View File

@ -155,6 +155,27 @@ cc_library(
], ],
) )
cc_library(
name = "hardware_buffer",
srcs = ["hardware_buffer_android.cc"],
hdrs = ["hardware_buffer.h"],
linkopts = select({
"//conditions:default": [],
# Option for vendor binaries to avoid linking libandroid.so.
"//mediapipe/framework:android_no_jni": [],
"//mediapipe:android": ["-landroid"],
":android_link_native_window": [
"-lnativewindow", # Provides <android/hardware_buffer.h> to vendor binaries on Android API >= 26.
],
}),
visibility = ["//visibility:private"],
deps = [
"//mediapipe/framework/port:ret_check",
"//mediapipe/framework/port:statusor",
"@com_google_absl//absl/log:absl_check",
],
)
cc_library( cc_library(
name = "image_frame", name = "image_frame",
srcs = ["image_frame.cc"], srcs = ["image_frame.cc"],
@ -493,28 +514,31 @@ cc_library(
"//conditions:default": [], "//conditions:default": [],
# Option for vendor binaries to avoid linking libandroid.so. # Option for vendor binaries to avoid linking libandroid.so.
"//mediapipe/framework:android_no_jni": [], "//mediapipe/framework:android_no_jni": [],
"//mediapipe:android": ["-landroid"],
":android_link_native_window": [
"-lnativewindow", # Provides <android/hardware_buffer.h> to vendor binaries on Android API >= 26.
],
}), }),
deps = [ deps = [
"//mediapipe/framework:port", "//mediapipe/framework:port",
"@com_google_absl//absl/container:flat_hash_map", "@com_google_absl//absl/container:flat_hash_map",
"@com_google_absl//absl/log:absl_check", "@com_google_absl//absl/log:absl_check",
"@com_google_absl//absl/log:absl_log", "@com_google_absl//absl/log:absl_log",
"@com_google_absl//absl/memory", "@com_google_absl//absl/memory",
"@com_google_absl//absl/synchronization", "@com_google_absl//absl/synchronization",
] + select({ ] + select({
"//mediapipe/gpu:disable_gpu": [], "//mediapipe/gpu:disable_gpu": [],
"//conditions:default": [ "//conditions:default": [
"//mediapipe/gpu:gl_base", "//mediapipe/gpu:gl_base",
"//mediapipe/gpu:gl_context", "//mediapipe/gpu:gl_context",
], ],
}) + "//mediapipe:android": [
select({ ":hardware_buffer",
"//conditions:default": [], "//mediapipe/gpu:gl_base",
}), "//mediapipe/gpu:gl_context",
],
":android_link_native_window": [
":hardware_buffer",
"//mediapipe/gpu:gl_base",
"//mediapipe/gpu:gl_context",
],
}),
) )
cc_test( cc_test(

View File

@ -0,0 +1,167 @@
// Copyright 2019 The MediaPipe Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef MEDIAPIPE_FRAMEWORK_FORMATS_HARDWARE_BUFFER_H_
#define MEDIAPIPE_FRAMEWORK_FORMATS_HARDWARE_BUFFER_H_
#include <cstdint>
#include <memory>
#include <optional>
#include <utility>
#include "absl/base/attributes.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
typedef struct AHardwareBuffer AHardwareBuffer;
namespace mediapipe {
struct HardwareBufferSpec {
// Buffer pixel formats. See NDK's hardware_buffer.h for descriptions.
enum {
// This must be kept in sync with NDK's hardware_buffer.h
AHARDWAREBUFFER_FORMAT_R8G8B8A8_UNORM = 0x01,
AHARDWAREBUFFER_FORMAT_R8G8B8_UNORM = 0x03,
AHARDWAREBUFFER_FORMAT_R16G16B16A16_FLOAT = 0x16,
AHARDWAREBUFFER_FORMAT_BLOB = 0x21,
AHARDWAREBUFFER_FORMAT_R8_UNORM = 0x38,
};
// Buffer usage descriptions. See NDK's hardware_buffer.h for descriptions.
enum {
// This must be kept in sync with NDK's hardware_buffer.h
AHARDWAREBUFFER_USAGE_CPU_READ_NEVER = 0x0UL,
AHARDWAREBUFFER_USAGE_CPU_READ_RARELY = 0x2UL,
AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN = 0x3UL,
AHARDWAREBUFFER_USAGE_CPU_WRITE_NEVER = UINT64_C(0) << 4,
AHARDWAREBUFFER_USAGE_CPU_WRITE_RARELY = UINT64_C(2) << 4,
AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN = UINT64_C(3) << 4,
AHARDWAREBUFFER_USAGE_GPU_SAMPLED_IMAGE = UINT64_C(1) << 8,
AHARDWAREBUFFER_USAGE_GPU_FRAMEBUFFER = UINT64_C(1) << 9,
AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER = UINT64_C(1) << 24,
};
// Hashing required to use HardwareBufferSpec as key in buffer pools. See
// absl::Hash for details.
template <typename H>
friend H AbslHashValue(H h, const HardwareBufferSpec& spec) {
return H::combine(std::move(h), spec.width, spec.height, spec.layers,
spec.format, spec.usage);
}
uint32_t width = 0;
uint32_t height = 0;
uint32_t layers = 0;
uint32_t format = 0;
uint64_t usage = 0;
};
// Equality operators
inline bool operator==(const HardwareBufferSpec& lhs,
const HardwareBufferSpec& rhs) {
return lhs.width == rhs.width && lhs.height == rhs.height &&
lhs.layers == rhs.layers && lhs.format == rhs.format &&
lhs.usage == rhs.usage;
}
inline bool operator!=(const HardwareBufferSpec& lhs,
const HardwareBufferSpec& rhs) {
return !operator==(lhs, rhs);
}
// For internal use only. Thinly wraps the Android NDK AHardwareBuffer.
class HardwareBuffer {
public:
// Constructs a HardwareBuffer instance from a newly allocated Android NDK
// AHardwareBuffer.
static absl::StatusOr<HardwareBuffer> Create(const HardwareBufferSpec& spec);
// Destructs the HardwareBuffer, releasing the AHardwareBuffer.
~HardwareBuffer();
// Support HardwareBuffer moves.
HardwareBuffer(HardwareBuffer&& other);
// Delete assignment and copy constructors.
HardwareBuffer(HardwareBuffer& other) = delete;
HardwareBuffer(const HardwareBuffer& other) = delete;
HardwareBuffer& operator=(const HardwareBuffer&) = delete;
// Returns true if AHWB is supported.
static bool IsSupported();
// Lock the hardware buffer for the given usage flags. fence_file_descriptor
// specifies a fence file descriptor on which to wait before locking the
// buffer. Returns raw memory address if lock is successful, nullptr
// otherwise.
ABSL_MUST_USE_RESULT absl::StatusOr<void*> Lock(
uint64_t usage, std::optional<int> fence_file_descriptor = std::nullopt);
// Unlocks the hardware buffer synchronously. This method blocks until
// unlocking is complete.
absl::Status Unlock();
// Unlocks the hardware buffer asynchronously. It returns a file_descriptor
// which can be used as a fence that is signaled once unlocking is complete.
absl::StatusOr<int> UnlockAsync();
// Returns the underlying raw AHardwareBuffer pointer to be used directly with
// AHardwareBuffer APIs.
AHardwareBuffer* GetAHardwareBuffer() const { return ahw_buffer_; }
// Returns whether this HardwareBuffer contains a valid AHardwareBuffer.
bool IsValid() const { return ahw_buffer_ != nullptr; }
// Returns whether this HardwareBuffer is locked.
bool IsLocked() const { return is_locked_; }
// Releases the AHardwareBuffer.
void Reset();
// Ahwb's are aligned to an implementation specific cacheline size.
uint32_t GetAlignedWidth() const;
// Returns buffer spec.
const HardwareBufferSpec& spec() const { return spec_; }
private:
// Allocates an AHardwareBuffer instance;
static absl::StatusOr<AHardwareBuffer*> AllocateAHardwareBuffer(
const HardwareBufferSpec& spec);
// Constructs a HardwareBuffer instance from an already aquired
// AHardwareBuffer instance and its spec.
HardwareBuffer(const HardwareBufferSpec& spec, AHardwareBuffer* ahwb);
// Unlocks the hardware buffer. If fence_file_descriptor_ptr is not nullptr,
// the function won't block and instead fence_file_descriptor_ptr will be set
// to a file descriptor to become signaled once unlocking is complete.
absl::Status UnlockInternal(int* fence_file_descriptor_ptr);
// Releases ahw_buffer_ AHardwareBuffer instance;
absl::Status ReleaseAHardwareBuffer();
// Buffer spec.
HardwareBufferSpec spec_ = {};
// Android NDK AHardwareBuffer.
AHardwareBuffer* ahw_buffer_ = nullptr;
// Indicates if AHardwareBuffer is locked for reading or writing.
bool is_locked_ = false;
};
} // namespace mediapipe
#endif // MEDIAPIPE_FRAMEWORK_FORMATS_AHWB_BUFFER_H_

View File

@ -0,0 +1,152 @@
// Copyright 2019 The MediaPipe Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#if !defined(MEDIAPIPE_NO_JNI) && \
(__ANDROID_API__ >= 26 || \
defined(__ANDROID_UNAVAILABLE_SYMBOLS_ARE_WEAK__))
#include <android/hardware_buffer.h>
#include <memory>
#include "absl/log/absl_check.h"
#include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "mediapipe/framework/formats/hardware_buffer.h"
#include "mediapipe/framework/port/ret_check.h"
namespace mediapipe {
HardwareBuffer::HardwareBuffer(HardwareBuffer &&other) {
spec_ = std::exchange(other.spec_, {});
ahw_buffer_ = std::exchange(other.ahw_buffer_, nullptr);
is_locked_ = std::exchange(other.is_locked_, false);
}
HardwareBuffer::HardwareBuffer(const HardwareBufferSpec &spec,
AHardwareBuffer *ahwb)
: spec_(spec), ahw_buffer_(ahwb), is_locked_(false) {}
HardwareBuffer::~HardwareBuffer() { Reset(); }
absl::StatusOr<HardwareBuffer> HardwareBuffer::Create(
const HardwareBufferSpec &spec) {
MP_ASSIGN_OR_RETURN(AHardwareBuffer * ahwb, AllocateAHardwareBuffer(spec));
return HardwareBuffer(spec, ahwb);
}
bool HardwareBuffer::IsSupported() {
if (__builtin_available(android 26, *)) {
return true;
}
return false;
}
absl::StatusOr<AHardwareBuffer *> HardwareBuffer::AllocateAHardwareBuffer(
const HardwareBufferSpec &spec) {
RET_CHECK(IsSupported()) << "AndroidHWBuffers not supported";
AHardwareBuffer *output = nullptr;
int error = 0;
if (__builtin_available(android 26, *)) {
AHardwareBuffer_Desc desc = {
.width = spec.width,
.height = spec.height,
.layers = spec.layers,
.format = spec.format,
.usage = spec.usage,
};
error = AHardwareBuffer_allocate(&desc, &output);
}
RET_CHECK(!error && output != nullptr) << "AHardwareBuffer_allocate failed";
return output;
}
absl::Status HardwareBuffer::ReleaseAHardwareBuffer() {
if (ahw_buffer_ == nullptr) {
return absl::OkStatus();
}
if (is_locked_) {
MP_RETURN_IF_ERROR(Unlock());
}
if (__builtin_available(android 26, *)) {
AHardwareBuffer_release(ahw_buffer_);
}
spec_ = {};
ahw_buffer_ = nullptr;
return absl::OkStatus();
}
absl::StatusOr<void *> HardwareBuffer::Lock(
uint64_t usage, std::optional<int> fence_file_descriptor) {
RET_CHECK(ahw_buffer_ != nullptr) << "Hardware Buffer not allocated";
RET_CHECK(!is_locked_) << "Hardware Buffer already locked";
void *mem = nullptr;
if (__builtin_available(android 26, *)) {
const int error = AHardwareBuffer_lock(
ahw_buffer_, usage,
fence_file_descriptor.has_value() ? *fence_file_descriptor : -1,
nullptr, &mem);
RET_CHECK(error == 0) << "Hardware Buffer lock failed. Error: " << error;
}
is_locked_ = true;
return mem;
}
absl::Status HardwareBuffer::Unlock() {
return UnlockInternal(/*fence_file_descriptor=*/nullptr);
}
absl::StatusOr<int> HardwareBuffer::UnlockAsync() {
int fence_file_descriptor = -1;
MP_RETURN_IF_ERROR(UnlockInternal(&fence_file_descriptor));
return fence_file_descriptor;
}
absl::Status HardwareBuffer::UnlockInternal(int *fence_file_descriptor) {
RET_CHECK(ahw_buffer_ != nullptr) << "Hardware Buffer not allocated";
if (!is_locked_) {
return absl::OkStatus();
}
if (__builtin_available(android 26, *)) {
const int error =
AHardwareBuffer_unlock(ahw_buffer_, fence_file_descriptor);
RET_CHECK(error == 0) << "Hardware Buffer unlock failed. error: " << error;
}
is_locked_ = false;
return absl::OkStatus();
}
uint32_t HardwareBuffer::GetAlignedWidth() const {
if (__builtin_available(android 26, *)) {
ABSL_CHECK(ahw_buffer_ != nullptr) << "Hardware Buffer not allocated";
AHardwareBuffer_Desc desc = {};
AHardwareBuffer_describe(ahw_buffer_, &desc);
ABSL_CHECK_GT(desc.stride, 0);
return desc.stride;
}
return 0;
}
void HardwareBuffer::Reset() {
const auto success = ReleaseAHardwareBuffer();
if (!success.ok()) {
ABSL_LOG(DFATAL) << "Failed to release AHardwareBuffer: " << success;
}
}
} // namespace mediapipe
#endif // !defined(MEDIAPIPE_NO_JNI) && (__ANDROID_API__>= 26 ||
// defined(__ANDROID_UNAVAILABLE_SYMBOLS_ARE_WEAK__))

View File

@ -0,0 +1,131 @@
#include "mediapipe/framework/formats/hardware_buffer.h"
#include <android/hardware_buffer.h>
#include <memory>
#include "base/logging.h"
#include "mediapipe/framework/port/status_macros.h"
#include "testing/base/public/gmock.h"
#include "testing/base/public/gunit.h"
namespace mediapipe {
namespace {
HardwareBufferSpec GetTestHardwareBufferSpec(uint32_t size_bytes) {
return {.width = size_bytes,
.height = 1,
.layers = 1,
.format = HardwareBufferSpec::AHARDWAREBUFFER_FORMAT_BLOB,
.usage = HardwareBufferSpec::AHARDWAREBUFFER_USAGE_CPU_WRITE_RARELY |
HardwareBufferSpec::AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN |
HardwareBufferSpec::AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN |
HardwareBufferSpec::AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER};
}
TEST(HardwareBufferTest, ShouldConstructValidAHardwareBuffer) {
MP_ASSERT_OK_AND_ASSIGN(
HardwareBuffer hardware_buffer,
HardwareBuffer::Create(GetTestHardwareBufferSpec(/*size_bytes=*/123)));
EXPECT_NE(hardware_buffer.GetAHardwareBuffer(), nullptr);
EXPECT_TRUE(hardware_buffer.IsValid());
}
TEST(HardwareBufferTest, ShouldResetValidAHardwareBuffer) {
MP_ASSERT_OK_AND_ASSIGN(
HardwareBuffer hardware_buffer,
HardwareBuffer::Create(GetTestHardwareBufferSpec(/*size_bytes=*/123)));
EXPECT_TRUE(hardware_buffer.IsValid());
EXPECT_NE(*hardware_buffer.Lock(
HardwareBufferSpec::AHARDWAREBUFFER_USAGE_CPU_WRITE_RARELY),
nullptr);
EXPECT_TRUE(hardware_buffer.IsLocked());
hardware_buffer.Reset();
EXPECT_FALSE(hardware_buffer.IsValid());
EXPECT_FALSE(hardware_buffer.IsLocked());
}
TEST(HardwareBufferTest, ShouldAllocateRequestedBufferSize) {
constexpr int kBufferSize = 123;
const HardwareBufferSpec spec = GetTestHardwareBufferSpec(kBufferSize);
MP_ASSERT_OK_AND_ASSIGN(HardwareBuffer hardware_buffer,
HardwareBuffer::Create(spec));
EXPECT_TRUE(hardware_buffer.IsValid());
if (__builtin_available(android 26, *)) {
AHardwareBuffer_Desc desc;
AHardwareBuffer_describe(hardware_buffer.GetAHardwareBuffer(), &desc);
EXPECT_EQ(desc.width, spec.width);
EXPECT_EQ(desc.height, spec.height);
EXPECT_EQ(desc.layers, spec.layers);
EXPECT_EQ(desc.format, spec.format);
EXPECT_EQ(desc.usage, spec.usage);
}
EXPECT_EQ(hardware_buffer.spec().width, spec.width);
EXPECT_EQ(hardware_buffer.spec().height, spec.height);
EXPECT_EQ(hardware_buffer.spec().layers, spec.layers);
EXPECT_EQ(hardware_buffer.spec().format, spec.format);
EXPECT_EQ(hardware_buffer.spec().usage, spec.usage);
}
TEST(HardwareBufferTest, ShouldSupportMoveConstructor) {
constexpr int kBufferSize = 123;
const auto spec = GetTestHardwareBufferSpec(kBufferSize);
MP_ASSERT_OK_AND_ASSIGN(HardwareBuffer hardware_buffer_a,
HardwareBuffer::Create(spec));
EXPECT_TRUE(hardware_buffer_a.IsValid());
void* const ahardware_buffer_ptr_a = hardware_buffer_a.GetAHardwareBuffer();
EXPECT_NE(ahardware_buffer_ptr_a, nullptr);
EXPECT_FALSE(hardware_buffer_a.IsLocked());
MP_ASSERT_OK_AND_ASSIGN(
void* const hardware_buffer_a_locked_ptr,
hardware_buffer_a.Lock(
HardwareBufferSpec::AHARDWAREBUFFER_USAGE_CPU_WRITE_RARELY));
EXPECT_NE(hardware_buffer_a_locked_ptr, nullptr);
EXPECT_TRUE(hardware_buffer_a.IsLocked());
HardwareBuffer hardware_buffer_b(std::move(hardware_buffer_a));
EXPECT_FALSE(hardware_buffer_a.IsValid());
EXPECT_FALSE(hardware_buffer_a.IsLocked());
void* const ahardware_buffer_ptr_b = hardware_buffer_b.GetAHardwareBuffer();
EXPECT_EQ(ahardware_buffer_ptr_a, ahardware_buffer_ptr_b);
EXPECT_TRUE(hardware_buffer_b.IsValid());
EXPECT_TRUE(hardware_buffer_b.IsLocked());
EXPECT_EQ(hardware_buffer_a.spec(), HardwareBufferSpec());
EXPECT_EQ(hardware_buffer_b.spec(), spec);
MP_ASSERT_OK(hardware_buffer_b.Unlock());
}
TEST(HardwareBufferTest, ShouldSupportReadWrite) {
constexpr std::string_view kTestString = "TestString";
constexpr int kBufferSize = kTestString.size();
MP_ASSERT_OK_AND_ASSIGN(
HardwareBuffer hardware_buffer,
HardwareBuffer::Create(GetTestHardwareBufferSpec(kBufferSize)));
// Write test string.
MP_ASSERT_OK_AND_ASSIGN(
void* const write_ptr,
hardware_buffer.Lock(
HardwareBufferSpec::AHARDWAREBUFFER_USAGE_CPU_WRITE_RARELY));
memcpy(write_ptr, kTestString.data(), kBufferSize);
MP_ASSERT_OK(hardware_buffer.Unlock());
// Read test string.
MP_ASSERT_OK_AND_ASSIGN(
void* const read_ptr,
hardware_buffer.Lock(
HardwareBufferSpec::AHARDWAREBUFFER_USAGE_CPU_READ_RARELY));
EXPECT_EQ(memcmp(read_ptr, kTestString.data(), kBufferSize), 0);
MP_ASSERT_OK(hardware_buffer.Unlock());
}
} // namespace
} // namespace mediapipe

View File

@ -24,6 +24,9 @@
#if MEDIAPIPE_OPENGL_ES_VERSION >= MEDIAPIPE_OPENGL_ES_30 #if MEDIAPIPE_OPENGL_ES_VERSION >= MEDIAPIPE_OPENGL_ES_30
#include "mediapipe/gpu/gl_base.h" #include "mediapipe/gpu/gl_base.h"
#endif // MEDIAPIPE_OPENGL_ES_VERSION >= MEDIAPIPE_OPENGL_ES_30 #endif // MEDIAPIPE_OPENGL_ES_VERSION >= MEDIAPIPE_OPENGL_ES_30
#ifdef MEDIAPIPE_TENSOR_USE_AHWB
#include "mediapipe/framework/formats/hardware_buffer.h"
#endif // MEDIAPIPE_TENSOR_USE_AHWB
#if MEDIAPIPE_METAL_ENABLED #if MEDIAPIPE_METAL_ENABLED
#import <Metal/Metal.h> #import <Metal/Metal.h>
@ -536,9 +539,8 @@ Tensor::CpuReadView Tensor::GetCpuReadView() const {
void* ptr = MapAhwbToCpuRead(); void* ptr = MapAhwbToCpuRead();
if (ptr) { if (ptr) {
valid_ |= kValidCpu; valid_ |= kValidCpu;
return {ptr, std::move(lock), [ahwb = ahwb_] { return {ptr, std::move(lock), [ahwb = ahwb_.get()] {
auto error = AHardwareBuffer_unlock(ahwb, nullptr); ABSL_CHECK_OK(ahwb->Unlock()) << "Unlock failed.";
ABSL_CHECK(error == 0) << "AHardwareBuffer_unlock " << error;
}}; }};
} }
} }
@ -620,9 +622,11 @@ Tensor::CpuWriteView Tensor::GetCpuWriteView(
if (__builtin_available(android 26, *)) { if (__builtin_available(android 26, *)) {
void* ptr = MapAhwbToCpuWrite(); void* ptr = MapAhwbToCpuWrite();
if (ptr) { if (ptr) {
return {ptr, std::move(lock), [ahwb = ahwb_, fence_fd = &fence_fd_] { return {ptr, std::move(lock),
auto error = AHardwareBuffer_unlock(ahwb, fence_fd); [ahwb = ahwb_.get(), fence_fd = &fence_fd_] {
ABSL_CHECK(error == 0) << "AHardwareBuffer_unlock " << error; auto fence_fd_status = ahwb->UnlockAsync();
ABSL_CHECK_OK(fence_fd_status) << "Unlock failed.";
*fence_fd = fence_fd_status.value();
}}; }};
} }
} }

View File

@ -44,7 +44,8 @@
#ifdef MEDIAPIPE_TENSOR_USE_AHWB #ifdef MEDIAPIPE_TENSOR_USE_AHWB
#include <EGL/egl.h> #include <EGL/egl.h>
#include <EGL/eglext.h> #include <EGL/eglext.h>
#include <android/hardware_buffer.h>
#include "mediapipe/framework/formats/hardware_buffer.h"
#endif // MEDIAPIPE_TENSOR_USE_AHWB #endif // MEDIAPIPE_TENSOR_USE_AHWB
#if MEDIAPIPE_OPENGL_ES_VERSION >= MEDIAPIPE_OPENGL_ES_30 #if MEDIAPIPE_OPENGL_ES_VERSION >= MEDIAPIPE_OPENGL_ES_30
#include "mediapipe/gpu/gl_base.h" #include "mediapipe/gpu/gl_base.h"
@ -195,9 +196,11 @@ class Tensor {
using FinishingFunc = std::function<bool(bool)>; using FinishingFunc = std::function<bool(bool)>;
class AHardwareBufferView : public View { class AHardwareBufferView : public View {
public: public:
AHardwareBuffer* handle() const { return handle_; } AHardwareBuffer* handle() const {
return hardware_buffer_->GetAHardwareBuffer();
}
AHardwareBufferView(AHardwareBufferView&& src) : View(std::move(src)) { AHardwareBufferView(AHardwareBufferView&& src) : View(std::move(src)) {
handle_ = std::exchange(src.handle_, nullptr); hardware_buffer_ = std::move(src.hardware_buffer_);
file_descriptor_ = src.file_descriptor_; file_descriptor_ = src.file_descriptor_;
fence_fd_ = std::exchange(src.fence_fd_, nullptr); fence_fd_ = std::exchange(src.fence_fd_, nullptr);
ahwb_written_ = std::exchange(src.ahwb_written_, nullptr); ahwb_written_ = std::exchange(src.ahwb_written_, nullptr);
@ -222,17 +225,17 @@ class Tensor {
protected: protected:
friend class Tensor; friend class Tensor;
AHardwareBufferView(AHardwareBuffer* handle, int file_descriptor, AHardwareBufferView(HardwareBuffer* hardware_buffer, int file_descriptor,
int* fence_fd, FinishingFunc* ahwb_written, int* fence_fd, FinishingFunc* ahwb_written,
std::function<void()>* release_callback, std::function<void()>* release_callback,
std::unique_ptr<absl::MutexLock>&& lock) std::unique_ptr<absl::MutexLock>&& lock)
: View(std::move(lock)), : View(std::move(lock)),
handle_(handle), hardware_buffer_(hardware_buffer),
file_descriptor_(file_descriptor), file_descriptor_(file_descriptor),
fence_fd_(fence_fd), fence_fd_(fence_fd),
ahwb_written_(ahwb_written), ahwb_written_(ahwb_written),
release_callback_(release_callback) {} release_callback_(release_callback) {}
AHardwareBuffer* handle_; HardwareBuffer* hardware_buffer_;
int file_descriptor_; int file_descriptor_;
// The view sets some Tensor's fields. The view is released prior to tensor. // The view sets some Tensor's fields. The view is released prior to tensor.
int* fence_fd_; int* fence_fd_;
@ -384,7 +387,7 @@ class Tensor {
mutable std::unique_ptr<MtlResources> mtl_resources_; mutable std::unique_ptr<MtlResources> mtl_resources_;
#ifdef MEDIAPIPE_TENSOR_USE_AHWB #ifdef MEDIAPIPE_TENSOR_USE_AHWB
mutable AHardwareBuffer* ahwb_ = nullptr; mutable std::unique_ptr<HardwareBuffer> ahwb_;
// Signals when GPU finished writing into SSBO so AHWB can be used then. Or // Signals when GPU finished writing into SSBO so AHWB can be used then. Or
// signals when writing into AHWB has been finished so GPU can read from SSBO. // signals when writing into AHWB has been finished so GPU can read from SSBO.
// Sync and FD are bound together. // Sync and FD are bound together.

View File

@ -10,7 +10,7 @@
#include "absl/log/absl_check.h" #include "absl/log/absl_check.h"
#include "absl/log/absl_log.h" #include "absl/log/absl_log.h"
#include "absl/synchronization/mutex.h" #include "absl/synchronization/mutex.h"
#include "mediapipe/framework/port.h" #include "mediapipe/framework/formats/hardware_buffer.h"
#include "mediapipe/gpu/gl_base.h" #include "mediapipe/gpu/gl_base.h"
#endif // MEDIAPIPE_TENSOR_USE_AHWB #endif // MEDIAPIPE_TENSOR_USE_AHWB
@ -97,7 +97,7 @@ class DelayedReleaser {
DelayedReleaser(DelayedReleaser&&) = delete; DelayedReleaser(DelayedReleaser&&) = delete;
DelayedReleaser& operator=(DelayedReleaser&&) = delete; DelayedReleaser& operator=(DelayedReleaser&&) = delete;
static void Add(AHardwareBuffer* ahwb, GLuint opengl_buffer, static void Add(std::unique_ptr<HardwareBuffer> ahwb, GLuint opengl_buffer,
EGLSyncKHR ssbo_sync, GLsync ssbo_read, EGLSyncKHR ssbo_sync, GLsync ssbo_read,
Tensor::FinishingFunc&& ahwb_written, Tensor::FinishingFunc&& ahwb_written,
std::shared_ptr<mediapipe::GlContext> gl_context, std::shared_ptr<mediapipe::GlContext> gl_context,
@ -115,8 +115,8 @@ class DelayedReleaser {
// Using `new` to access a non-public constructor. // Using `new` to access a non-public constructor.
to_release_local.emplace_back(absl::WrapUnique(new DelayedReleaser( to_release_local.emplace_back(absl::WrapUnique(new DelayedReleaser(
ahwb, opengl_buffer, ssbo_sync, ssbo_read, std::move(ahwb_written), std::move(ahwb), opengl_buffer, ssbo_sync, ssbo_read,
gl_context, std::move(callback)))); std::move(ahwb_written), gl_context, std::move(callback))));
for (auto it = to_release_local.begin(); it != to_release_local.end();) { for (auto it = to_release_local.begin(); it != to_release_local.end();) {
if ((*it)->IsSignaled()) { if ((*it)->IsSignaled()) {
it = to_release_local.erase(it); it = to_release_local.erase(it);
@ -136,9 +136,6 @@ class DelayedReleaser {
~DelayedReleaser() { ~DelayedReleaser() {
if (release_callback_) release_callback_(); if (release_callback_) release_callback_();
if (__builtin_available(android 26, *)) {
AHardwareBuffer_release(ahwb_);
}
} }
bool IsSignaled() { bool IsSignaled() {
@ -181,7 +178,7 @@ class DelayedReleaser {
} }
protected: protected:
AHardwareBuffer* ahwb_; std::unique_ptr<HardwareBuffer> ahwb_;
GLuint opengl_buffer_; GLuint opengl_buffer_;
// TODO: use wrapper instead. // TODO: use wrapper instead.
EGLSyncKHR fence_sync_; EGLSyncKHR fence_sync_;
@ -192,12 +189,12 @@ class DelayedReleaser {
std::function<void()> release_callback_; std::function<void()> release_callback_;
static inline std::deque<std::unique_ptr<DelayedReleaser>> to_release_; static inline std::deque<std::unique_ptr<DelayedReleaser>> to_release_;
DelayedReleaser(AHardwareBuffer* ahwb, GLuint opengl_buffer, DelayedReleaser(std::unique_ptr<HardwareBuffer> ahwb, GLuint opengl_buffer,
EGLSyncKHR fence_sync, GLsync ssbo_read, EGLSyncKHR fence_sync, GLsync ssbo_read,
Tensor::FinishingFunc&& ahwb_written, Tensor::FinishingFunc&& ahwb_written,
std::shared_ptr<mediapipe::GlContext> gl_context, std::shared_ptr<mediapipe::GlContext> gl_context,
std::function<void()>&& callback) std::function<void()>&& callback)
: ahwb_(ahwb), : ahwb_(std::move(ahwb)),
opengl_buffer_(opengl_buffer), opengl_buffer_(opengl_buffer),
fence_sync_(fence_sync), fence_sync_(fence_sync),
ssbo_read_(ssbo_read), ssbo_read_(ssbo_read),
@ -214,7 +211,7 @@ Tensor::AHardwareBufferView Tensor::GetAHardwareBufferReadView() const {
ABSL_CHECK(!(valid_ & kValidOpenGlTexture2d)) ABSL_CHECK(!(valid_ & kValidOpenGlTexture2d))
<< "Tensor conversion between OpenGL texture and AHardwareBuffer is not " << "Tensor conversion between OpenGL texture and AHardwareBuffer is not "
"supported."; "supported.";
bool transfer = !ahwb_; bool transfer = ahwb_ == nullptr;
ABSL_CHECK(AllocateAHardwareBuffer()) ABSL_CHECK(AllocateAHardwareBuffer())
<< "AHardwareBuffer is not supported on the target system."; << "AHardwareBuffer is not supported on the target system.";
valid_ |= kValidAHardwareBuffer; valid_ |= kValidAHardwareBuffer;
@ -223,12 +220,10 @@ Tensor::AHardwareBufferView Tensor::GetAHardwareBufferReadView() const {
} else { } else {
if (valid_ & kValidOpenGlBuffer) CreateEglSyncAndFd(); if (valid_ & kValidOpenGlBuffer) CreateEglSyncAndFd();
} }
return {ahwb_, return {ahwb_.get(), ssbo_written_,
ssbo_written_,
&fence_fd_, // The FD is created for SSBO -> AHWB synchronization. &fence_fd_, // The FD is created for SSBO -> AHWB synchronization.
&ahwb_written_, // Filled by SetReadingFinishedFunc. &ahwb_written_, // Filled by SetReadingFinishedFunc.
&release_callback_, &release_callback_, std::move(lock)};
std::move(lock)};
} }
void Tensor::CreateEglSyncAndFd() const { void Tensor::CreateEglSyncAndFd() const {
@ -258,12 +253,11 @@ Tensor::AHardwareBufferView Tensor::GetAHardwareBufferWriteView(
ABSL_CHECK(AllocateAHardwareBuffer(size_alignment)) ABSL_CHECK(AllocateAHardwareBuffer(size_alignment))
<< "AHardwareBuffer is not supported on the target system."; << "AHardwareBuffer is not supported on the target system.";
valid_ = kValidAHardwareBuffer; valid_ = kValidAHardwareBuffer;
return {ahwb_, return {ahwb_.get(),
/*ssbo_written=*/-1, /*ssbo_written=*/-1,
&fence_fd_, // For SetWritingFinishedFD. &fence_fd_, // For SetWritingFinishedFD.
&ahwb_written_, &ahwb_written_, // Filled by SetReadingFinishedFunc.
&release_callback_, &release_callback_, std::move(lock)};
std::move(lock)};
} }
bool Tensor::AllocateAHardwareBuffer(int size_alignment) const { bool Tensor::AllocateAHardwareBuffer(int size_alignment) const {
@ -276,40 +270,43 @@ bool Tensor::AllocateAHardwareBuffer(int size_alignment) const {
} }
use_ahwb_ = true; use_ahwb_ = true;
if (__builtin_available(android 26, *)) { if (ahwb_ == nullptr) {
if (ahwb_ == nullptr) { HardwareBufferSpec spec = {};
AHardwareBuffer_Desc desc = {}; if (size_alignment == 0) {
if (size_alignment == 0) { spec.width = bytes();
desc.width = bytes(); } else {
} else { // We expect allocations to be page-aligned, implicitly satisfying any
// We expect allocations to be page-aligned, implicitly satisfying any // requirements from Edge TPU. No need to add a check for this,
// requirements from Edge TPU. No need to add a check for this, // since Edge TPU will check for us.
// since Edge TPU will check for us. spec.width = AlignedToPowerOf2(bytes(), size_alignment);
desc.width = AlignedToPowerOf2(bytes(), size_alignment);
}
desc.height = 1;
desc.layers = 1;
desc.format = AHARDWAREBUFFER_FORMAT_BLOB;
desc.usage = AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN |
AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN |
AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER;
return AHardwareBuffer_allocate(&desc, &ahwb_) == 0;
} }
return true; spec.height = 1;
spec.layers = 1;
spec.format = HardwareBufferSpec::AHARDWAREBUFFER_FORMAT_BLOB;
spec.usage = HardwareBufferSpec::AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN |
HardwareBufferSpec::AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN |
HardwareBufferSpec::AHARDWAREBUFFER_USAGE_GPU_DATA_BUFFER;
auto new_ahwb = HardwareBuffer::Create(spec);
if (!new_ahwb.ok()) {
ABSL_LOG(ERROR) << "Allocation of NDK Hardware Buffer failed: "
<< new_ahwb.status();
return false;
}
ahwb_ = std::make_unique<HardwareBuffer>(std::move(*new_ahwb));
} }
return false; return true;
} }
bool Tensor::AllocateAhwbMapToSsbo() const { bool Tensor::AllocateAhwbMapToSsbo() const {
if (__builtin_available(android 26, *)) { if (__builtin_available(android 26, *)) {
if (AllocateAHardwareBuffer()) { if (AllocateAHardwareBuffer()) {
if (MapAHardwareBufferToGlBuffer(ahwb_, bytes()).ok()) { if (MapAHardwareBufferToGlBuffer(ahwb_->GetAHardwareBuffer(), bytes())
.ok()) {
glBindBuffer(GL_SHADER_STORAGE_BUFFER, 0); glBindBuffer(GL_SHADER_STORAGE_BUFFER, 0);
return true; return true;
} }
// Unable to make OpenGL <-> AHWB binding. Use regular SSBO instead. // Unable to make OpenGL <-> AHWB binding. Use regular SSBO instead.
AHardwareBuffer_release(ahwb_); ahwb_.reset();
ahwb_ = nullptr;
} }
} }
return false; return false;
@ -317,14 +314,11 @@ bool Tensor::AllocateAhwbMapToSsbo() const {
// Moves Cpu/Ssbo resource under the Ahwb backed memory. // Moves Cpu/Ssbo resource under the Ahwb backed memory.
void Tensor::MoveCpuOrSsboToAhwb() const { void Tensor::MoveCpuOrSsboToAhwb() const {
void* dest = nullptr; auto dest =
if (__builtin_available(android 26, *)) { ahwb_->Lock(HardwareBufferSpec::AHARDWAREBUFFER_USAGE_CPU_WRITE_RARELY);
auto error = AHardwareBuffer_lock( ABSL_CHECK_OK(dest) << "Lock of AHWB failed";
ahwb_, AHARDWAREBUFFER_USAGE_CPU_WRITE_RARELY, -1, nullptr, &dest);
ABSL_CHECK(error == 0) << "AHardwareBuffer_lock " << error;
}
if (valid_ & kValidCpu) { if (valid_ & kValidCpu) {
std::memcpy(dest, cpu_buffer_, bytes()); std::memcpy(*dest, cpu_buffer_, bytes());
// Free CPU memory because next time AHWB is mapped instead. // Free CPU memory because next time AHWB is mapped instead.
free(cpu_buffer_); free(cpu_buffer_);
cpu_buffer_ = nullptr; cpu_buffer_ = nullptr;
@ -334,7 +328,7 @@ void Tensor::MoveCpuOrSsboToAhwb() const {
glBindBuffer(GL_SHADER_STORAGE_BUFFER, opengl_buffer_); glBindBuffer(GL_SHADER_STORAGE_BUFFER, opengl_buffer_);
const void* src = glMapBufferRange(GL_SHADER_STORAGE_BUFFER, 0, bytes(), const void* src = glMapBufferRange(GL_SHADER_STORAGE_BUFFER, 0, bytes(),
GL_MAP_READ_BIT); GL_MAP_READ_BIT);
std::memcpy(dest, src, bytes()); std::memcpy(*dest, src, bytes());
glUnmapBuffer(GL_SHADER_STORAGE_BUFFER); glUnmapBuffer(GL_SHADER_STORAGE_BUFFER);
glDeleteBuffers(1, &opengl_buffer_); glDeleteBuffers(1, &opengl_buffer_);
}); });
@ -347,10 +341,7 @@ void Tensor::MoveCpuOrSsboToAhwb() const {
ABSL_LOG(FATAL) << "Can't convert tensor with mask " << valid_ ABSL_LOG(FATAL) << "Can't convert tensor with mask " << valid_
<< " into AHWB."; << " into AHWB.";
} }
if (__builtin_available(android 26, *)) { ABSL_CHECK_OK(ahwb_->Unlock()) << "Unlock of AHWB failed";
auto error = AHardwareBuffer_unlock(ahwb_, nullptr);
ABSL_CHECK(error == 0) << "AHardwareBuffer_unlock " << error;
}
} }
// SSBO is created on top of AHWB. A fence is inserted into the GPU queue before // SSBO is created on top of AHWB. A fence is inserted into the GPU queue before
@ -403,59 +394,52 @@ void Tensor::ReleaseAhwbStuff() {
if (ahwb_) { if (ahwb_) {
if (ssbo_read_ != 0 || fence_sync_ != EGL_NO_SYNC_KHR || ahwb_written_) { if (ssbo_read_ != 0 || fence_sync_ != EGL_NO_SYNC_KHR || ahwb_written_) {
if (ssbo_written_ != -1) close(ssbo_written_); if (ssbo_written_ != -1) close(ssbo_written_);
DelayedReleaser::Add(ahwb_, opengl_buffer_, fence_sync_, ssbo_read_, DelayedReleaser::Add(std::move(ahwb_), opengl_buffer_, fence_sync_,
std::move(ahwb_written_), gl_context_, ssbo_read_, std::move(ahwb_written_), gl_context_,
std::move(release_callback_)); std::move(release_callback_));
opengl_buffer_ = GL_INVALID_INDEX; opengl_buffer_ = GL_INVALID_INDEX;
} else { } else {
if (release_callback_) release_callback_(); if (release_callback_) release_callback_();
AHardwareBuffer_release(ahwb_); ahwb_.reset();
} }
} }
} }
} }
void* Tensor::MapAhwbToCpuRead() const { void* Tensor::MapAhwbToCpuRead() const {
if (__builtin_available(android 26, *)) { if (ahwb_ != nullptr) {
if (ahwb_) { if (!(valid_ & kValidCpu)) {
if (!(valid_ & kValidCpu)) { if ((valid_ & kValidOpenGlBuffer) && ssbo_written_ == -1) {
if ((valid_ & kValidOpenGlBuffer) && ssbo_written_ == -1) { // EGLSync is failed. Use another synchronization method.
// EGLSync is failed. Use another synchronization method. // TODO: Use tflite::gpu::GlBufferSync and GlActiveSync.
// TODO: Use tflite::gpu::GlBufferSync and GlActiveSync. gl_context_->Run([]() { glFinish(); });
gl_context_->Run([]() { glFinish(); }); } else if (valid_ & kValidAHardwareBuffer) {
} else if (valid_ & kValidAHardwareBuffer) { ABSL_CHECK(ahwb_written_) << "Ahwb-to-Cpu synchronization requires the "
ABSL_CHECK(ahwb_written_) "completion function to be set";
<< "Ahwb-to-Cpu synchronization requires the " ABSL_CHECK(ahwb_written_(true))
"completion function to be set"; << "An error oqcured while waiting for the buffer to be written";
ABSL_CHECK(ahwb_written_(true))
<< "An error oqcured while waiting for the buffer to be written";
}
} }
void* ptr;
auto error =
AHardwareBuffer_lock(ahwb_, AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN,
ssbo_written_, nullptr, &ptr);
ABSL_CHECK(error == 0) << "AHardwareBuffer_lock " << error;
close(ssbo_written_);
ssbo_written_ = -1;
return ptr;
} }
auto ptr =
ahwb_->Lock(HardwareBufferSpec::AHARDWAREBUFFER_USAGE_CPU_READ_OFTEN,
ssbo_written_);
ABSL_CHECK_OK(ptr) << "Lock of AHWB failed";
close(ssbo_written_);
ssbo_written_ = -1;
return *ptr;
} }
return nullptr; return nullptr;
} }
void* Tensor::MapAhwbToCpuWrite() const { void* Tensor::MapAhwbToCpuWrite() const {
if (__builtin_available(android 26, *)) { if (ahwb_ != nullptr) {
if (ahwb_) { // TODO: If previously acquired view is GPU write view then need
// TODO: If previously acquired view is GPU write view then need // to be sure that writing is finished. That's a warning: two consequent
// to be sure that writing is finished. That's a warning: two consequent // write views should be interleaved with read view.
// write views should be interleaved with read view. auto locked_ptr =
void* ptr; ahwb_->Lock(HardwareBufferSpec::AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN);
auto error = AHardwareBuffer_lock( ABSL_CHECK_OK(locked_ptr) << "Lock of AHWB failed";
ahwb_, AHARDWAREBUFFER_USAGE_CPU_WRITE_OFTEN, -1, nullptr, &ptr); return *locked_ptr;
ABSL_CHECK(error == 0) << "AHardwareBuffer_lock " << error;
return ptr;
}
} }
return nullptr; return nullptr;
} }

View File

@ -1,3 +1,5 @@
#include <android/hardware_buffer.h>
#include "mediapipe/framework/formats/tensor.h" #include "mediapipe/framework/formats/tensor.h"
#include "testing/base/public/gmock.h" #include "testing/base/public/gmock.h"
#include "testing/base/public/gunit.h" #include "testing/base/public/gunit.h"