Return error status when any tflite input and output tensor doesn't have valid dimensionality information that is needed to allocate Gl/Metal buffer before calling ModifyGraphWithDelegate.

PiperOrigin-RevId: 489539740
This commit is contained in:
Jiuqiang Tang 2022-11-18 12:42:58 -08:00 committed by Copybara-Service
parent 71ae496a20
commit 1b594a0310
3 changed files with 17 additions and 0 deletions

View File

@ -464,6 +464,7 @@ cc_library(
"//mediapipe/gpu:gl_calculator_helper", "//mediapipe/gpu:gl_calculator_helper",
"@com_google_absl//absl/memory", "@com_google_absl//absl/memory",
"@com_google_absl//absl/status", "@com_google_absl//absl/status",
"@com_google_absl//absl/strings:str_format",
"@org_tensorflow//tensorflow/lite/delegates/gpu:gl_delegate", "@org_tensorflow//tensorflow/lite/delegates/gpu:gl_delegate",
], ],
alwayslink = 1, alwayslink = 1,
@ -513,6 +514,7 @@ cc_library(
"//mediapipe/objc:mediapipe_framework_ios", "//mediapipe/objc:mediapipe_framework_ios",
"//mediapipe/util/tflite:config", "//mediapipe/util/tflite:config",
"@com_google_absl//absl/memory", "@com_google_absl//absl/memory",
"@com_google_absl//absl/strings:str_format",
"@org_tensorflow//tensorflow/lite/delegates/gpu:metal_delegate", "@org_tensorflow//tensorflow/lite/delegates/gpu:metal_delegate",
"@org_tensorflow//tensorflow/lite/delegates/gpu:metal_delegate_internal", "@org_tensorflow//tensorflow/lite/delegates/gpu:metal_delegate_internal",
"@org_tensorflow//tensorflow/lite/delegates/gpu/common:shape", "@org_tensorflow//tensorflow/lite/delegates/gpu/common:shape",

View File

@ -20,6 +20,7 @@
#include "absl/memory/memory.h" #include "absl/memory/memory.h"
#include "absl/status/status.h" #include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "mediapipe/calculators/tensor/inference_calculator.h" #include "mediapipe/calculators/tensor/inference_calculator.h"
#include "mediapipe/calculators/tensor/inference_calculator.pb.h" #include "mediapipe/calculators/tensor/inference_calculator.pb.h"
#include "mediapipe/framework/calculator_context.h" #include "mediapipe/framework/calculator_context.h"
@ -154,6 +155,10 @@ absl::Status InferenceCalculatorGlImpl::GpuInferenceRunner::LoadDelegate(
const auto& input_indices = interpreter_->inputs(); const auto& input_indices = interpreter_->inputs();
for (int i = 0; i < input_indices.size(); ++i) { for (int i = 0; i < input_indices.size(); ++i) {
const TfLiteTensor* tensor = interpreter_->tensor(input_indices[i]); const TfLiteTensor* tensor = interpreter_->tensor(input_indices[i]);
RET_CHECK(tensor->dims->size > 0) << absl::StrFormat(
"Input tensor at index [%d] doesn't specify dimensions.",
input_indices[i]);
gpu_buffers_in_.emplace_back(absl::make_unique<Tensor>( gpu_buffers_in_.emplace_back(absl::make_unique<Tensor>(
Tensor::ElementType::kFloat32, Tensor::ElementType::kFloat32,
Tensor::Shape{std::vector<int>{ Tensor::Shape{std::vector<int>{
@ -171,6 +176,9 @@ absl::Status InferenceCalculatorGlImpl::GpuInferenceRunner::LoadDelegate(
// Create and bind output buffers. // Create and bind output buffers.
for (int i = 0; i < output_size_; ++i) { for (int i = 0; i < output_size_; ++i) {
const TfLiteTensor* tensor = interpreter_->tensor(output_indices[i]); const TfLiteTensor* tensor = interpreter_->tensor(output_indices[i]);
RET_CHECK(tensor->dims->size > 0) << absl::StrFormat(
"Output tensor at index [%d] doesn't specify dimensions.",
output_indices[i]);
gpu_buffers_out_.emplace_back(absl::make_unique<Tensor>( gpu_buffers_out_.emplace_back(absl::make_unique<Tensor>(
Tensor::ElementType::kFloat32, Tensor::ElementType::kFloat32,
Tensor::Shape{std::vector<int>{ Tensor::Shape{std::vector<int>{

View File

@ -22,6 +22,7 @@
#include <vector> #include <vector>
#include "absl/memory/memory.h" #include "absl/memory/memory.h"
#include "absl/strings/str_format.h"
#include "mediapipe/calculators/tensor/inference_calculator.h" #include "mediapipe/calculators/tensor/inference_calculator.h"
#import "mediapipe/gpu/MPPMetalHelper.h" #import "mediapipe/gpu/MPPMetalHelper.h"
#include "mediapipe/gpu/MPPMetalUtil.h" #include "mediapipe/gpu/MPPMetalUtil.h"
@ -245,6 +246,9 @@ absl::Status InferenceCalculatorMetalImpl::CreateConverters(
const auto& input_indices = interpreter_->inputs(); const auto& input_indices = interpreter_->inputs();
for (int i = 0; i < input_indices.size(); ++i) { for (int i = 0; i < input_indices.size(); ++i) {
const TfLiteTensor* tensor = interpreter_->tensor(input_indices[i]); const TfLiteTensor* tensor = interpreter_->tensor(input_indices[i]);
RET_CHECK(tensor->dims->size > 0) << absl::StrFormat(
"Input tensor at index [%d] doesn't specify dimensions.",
input_indices[i]);
// Create and bind input buffer. // Create and bind input buffer.
std::vector<int> dims{tensor->dims->data, std::vector<int> dims{tensor->dims->data,
tensor->dims->data + tensor->dims->size}; tensor->dims->data + tensor->dims->size};
@ -266,6 +270,9 @@ absl::Status InferenceCalculatorMetalImpl::CreateConverters(
output_shapes_.resize(output_indices.size()); output_shapes_.resize(output_indices.size());
for (int i = 0; i < output_shapes_.size(); ++i) { for (int i = 0; i < output_shapes_.size(); ++i) {
const TfLiteTensor* tensor = interpreter_->tensor(output_indices[i]); const TfLiteTensor* tensor = interpreter_->tensor(output_indices[i]);
RET_CHECK(tensor->dims->size > 0) << absl::StrFormat(
"Output tensor at index [%d] doesn't specify dimensions.",
output_indices[i]);
RET_CHECK(tensor->dims->size <= 4); RET_CHECK(tensor->dims->size <= 4);
// Create and bind output buffers. // Create and bind output buffers.
// Channels are always padded to multiple of 4. // Channels are always padded to multiple of 4.