Tensor: Interoperability GPU/Cpu -> Ahwb by transforming the underlying storage into Ahwb with releasing previously Cpu/Gpu resources.

PiperOrigin-RevId: 495748104
This commit is contained in:
Nikolay Chirkov 2022-12-15 18:32:10 -08:00 committed by Copybara-Service
parent 0a1f050f1f
commit d5562241cc
3 changed files with 41 additions and 10 deletions

View File

@ -408,8 +408,8 @@ class Tensor {
mutable std::function<void()> release_callback_;
bool AllocateAHardwareBuffer(int size_alignment = 0) const;
void CreateEglSyncAndFd() const;
// Use Ahwb for other views: OpenGL / CPU buffer.
#endif // MEDIAPIPE_TENSOR_USE_AHWB
// Use Ahwb for other views: OpenGL / CPU buffer.
static inline bool use_ahwb_ = false;
// Expects the target SSBO to be already bound.
bool AllocateAhwbMapToSsbo() const;

View File

@ -212,9 +212,6 @@ Tensor::AHardwareBufferView Tensor::GetAHardwareBufferReadView() const {
CHECK(!(valid_ & kValidOpenGlTexture2d))
<< "Tensor conversion between OpenGL texture and AHardwareBuffer is not "
"supported.";
CHECK(ahwb_ || !(valid_ & kValidOpenGlBuffer))
<< "Interoperability bettween OpenGL buffer and AHardwareBuffer is not "
"supported on target system.";
bool transfer = !ahwb_;
CHECK(AllocateAHardwareBuffer())
<< "AHardwareBuffer is not supported on the target system.";
@ -315,7 +312,13 @@ void Tensor::MoveCpuOrSsboToAhwb() const {
ahwb_, AHARDWAREBUFFER_USAGE_CPU_WRITE_RARELY, -1, nullptr, &dest);
CHECK(error == 0) << "AHardwareBuffer_lock " << error;
}
if (valid_ & kValidOpenGlBuffer) {
if (valid_ & kValidCpu) {
std::memcpy(dest, cpu_buffer_, bytes());
// Free CPU memory because next time AHWB is mapped instead.
free(cpu_buffer_);
cpu_buffer_ = nullptr;
valid_ &= ~kValidCpu;
} else if (valid_ & kValidOpenGlBuffer) {
gl_context_->Run([this, dest]() {
glBindBuffer(GL_SHADER_STORAGE_BUFFER, opengl_buffer_);
const void* src = glMapBufferRange(GL_SHADER_STORAGE_BUFFER, 0, bytes(),
@ -326,11 +329,9 @@ void Tensor::MoveCpuOrSsboToAhwb() const {
});
opengl_buffer_ = GL_INVALID_INDEX;
gl_context_ = nullptr;
} else if (valid_ & kValidCpu) {
std::memcpy(dest, cpu_buffer_, bytes());
// Free CPU memory because next time AHWB is mapped instead.
free(cpu_buffer_);
cpu_buffer_ = nullptr;
// Reset OpenGL Buffer validness. The OpenGL buffer will be allocated on top
// of the Ahwb at the next request to the OpenGlBufferView.
valid_ &= ~kValidOpenGlBuffer;
} else {
LOG(FATAL) << "Can't convert tensor with mask " << valid_ << " into AHWB.";
}

View File

@ -152,6 +152,36 @@ TEST_F(TensorAhwbGpuTest, TestReplacingCpuByAhwb) {
{
auto view = tensor.GetAHardwareBufferReadView();
EXPECT_NE(view.handle(), nullptr);
view.SetReadingFinishedFunc([](bool) { return true; });
}
auto ptr = tensor.GetCpuReadView().buffer<float>();
EXPECT_NE(ptr, nullptr);
std::vector<float> reference;
reference.resize(num_elements);
for (int i = 0; i < num_elements; i++) {
reference[i] = static_cast<float>(i) / 10.0f;
}
EXPECT_THAT(absl::Span<const float>(ptr, num_elements),
testing::Pointwise(testing::FloatEq(), reference));
}
TEST_F(TensorAhwbGpuTest, TestReplacingGpuByAhwb) {
// Request the GPU view to get the ssbo allocated internally.
// Request Ahwb view then to transform the storage into Ahwb.
Tensor::SetPreferredStorageType(Tensor::StorageType::kDefault);
constexpr size_t num_elements = 20;
Tensor tensor{Tensor::ElementType::kFloat32, Tensor::Shape({num_elements})};
RunInGlContext([&tensor] {
auto ssbo_view = tensor.GetOpenGlBufferWriteView();
auto ssbo_name = ssbo_view.name();
EXPECT_GT(ssbo_name, 0);
FillGpuBuffer(ssbo_name, tensor.shape().num_elements(),
tensor.element_type());
});
{
auto view = tensor.GetAHardwareBufferReadView();
EXPECT_NE(view.handle(), nullptr);
view.SetReadingFinishedFunc([](bool) { return true; });
}
auto ptr = tensor.GetCpuReadView().buffer<float>();
EXPECT_NE(ptr, nullptr);