From b97d11fa76c88cb15b2e336b4c827821a09783c2 Mon Sep 17 00:00:00 2001 From: MediaPipe Team Date: Tue, 13 Jun 2023 15:00:40 -0700 Subject: [PATCH] Internal MediaPipe Tasks change PiperOrigin-RevId: 540083633 --- mediapipe/calculators/tensor/BUILD | 2 -- .../tensor/bert_preprocessor_calculator.cc | 4 ++-- .../tensor/inference_interpreter_delegate_runner.cc | 13 +++++++++++++ .../tensor/regex_preprocessor_calculator.cc | 3 +-- mediapipe/framework/formats/tensor.h | 7 +++++++ mediapipe/framework/formats/tensor_test.cc | 12 ++++++++++++ 6 files changed, 35 insertions(+), 6 deletions(-) diff --git a/mediapipe/calculators/tensor/BUILD b/mediapipe/calculators/tensor/BUILD index 2ad98f28d..a3e61c063 100644 --- a/mediapipe/calculators/tensor/BUILD +++ b/mediapipe/calculators/tensor/BUILD @@ -228,7 +228,6 @@ cc_library( "//mediapipe/tasks/metadata:metadata_schema_cc", "@com_google_absl//absl/container:flat_hash_set", "@com_google_absl//absl/status", - "@com_google_absl//absl/status:statusor", "@com_google_absl//absl/strings", ], alwayslink = 1, @@ -280,7 +279,6 @@ cc_library( "//mediapipe/tasks/cc/text/tokenizers:tokenizer_utils", "//mediapipe/tasks/metadata:metadata_schema_cc", "@com_google_absl//absl/status", - "@com_google_absl//absl/status:statusor", ], alwayslink = 1, ) diff --git a/mediapipe/calculators/tensor/bert_preprocessor_calculator.cc b/mediapipe/calculators/tensor/bert_preprocessor_calculator.cc index b56122805..12db1493c 100644 --- a/mediapipe/calculators/tensor/bert_preprocessor_calculator.cc +++ b/mediapipe/calculators/tensor/bert_preprocessor_calculator.cc @@ -22,7 +22,6 @@ #include "absl/container/flat_hash_set.h" #include "absl/status/status.h" -#include "absl/status/statusor.h" #include "absl/strings/ascii.h" #include "absl/strings/string_view.h" #include "absl/strings/substitute.h" @@ -244,7 +243,8 @@ std::vector BertPreprocessorCalculator::GenerateInputTensors( input_tensors.reserve(kNumInputTensorsForBert); for (int i = 0; i < kNumInputTensorsForBert; ++i) { input_tensors.push_back( - {Tensor::ElementType::kInt32, Tensor::Shape({tensor_size})}); + {Tensor::ElementType::kInt32, + Tensor::Shape({1, tensor_size}, has_dynamic_input_tensors_)}); } std::memcpy(input_tensors[input_ids_tensor_index_] .GetCpuWriteView() diff --git a/mediapipe/calculators/tensor/inference_interpreter_delegate_runner.cc b/mediapipe/calculators/tensor/inference_interpreter_delegate_runner.cc index a2b8a9285..b727f179d 100644 --- a/mediapipe/calculators/tensor/inference_interpreter_delegate_runner.cc +++ b/mediapipe/calculators/tensor/inference_interpreter_delegate_runner.cc @@ -96,6 +96,19 @@ absl::StatusOr> InferenceInterpreterDelegateRunner::Run( CalculatorContext* cc, const std::vector& input_tensors) { // Read CPU input into tensors. RET_CHECK_EQ(interpreter_->inputs().size(), input_tensors.size()); + + // If the input tensors have dynamic shape, then the tensors need to be + // resized and reallocated before we can copy the tensor values. + bool resized_tensor_shapes = false; + for (int i = 0; i < input_tensors.size(); ++i) { + if (input_tensors[i].shape().is_dynamic) { + interpreter_->ResizeInputTensorStrict(i, input_tensors[i].shape().dims); + resized_tensor_shapes = true; + } + } + // Reallocation is needed for memory sanity. + if (resized_tensor_shapes) interpreter_->AllocateTensors(); + for (int i = 0; i < input_tensors.size(); ++i) { const TfLiteType input_tensor_type = interpreter_->tensor(interpreter_->inputs()[i])->type; diff --git a/mediapipe/calculators/tensor/regex_preprocessor_calculator.cc b/mediapipe/calculators/tensor/regex_preprocessor_calculator.cc index 92a5f0266..8276462ff 100644 --- a/mediapipe/calculators/tensor/regex_preprocessor_calculator.cc +++ b/mediapipe/calculators/tensor/regex_preprocessor_calculator.cc @@ -20,7 +20,6 @@ #include #include "absl/status/status.h" -#include "absl/status/statusor.h" #include "mediapipe/calculators/tensor/regex_preprocessor_calculator.pb.h" #include "mediapipe/framework/api2/node.h" #include "mediapipe/framework/api2/port.h" @@ -161,7 +160,7 @@ absl::Status RegexPreprocessorCalculator::Process(CalculatorContext* cc) { // not found in the tokenizer vocab. std::vector result; result.push_back( - {Tensor::ElementType::kInt32, Tensor::Shape({max_seq_len_})}); + {Tensor::ElementType::kInt32, Tensor::Shape({1, max_seq_len_})}); std::memcpy(result[0].GetCpuWriteView().buffer(), input_tokens.data(), input_tokens.size() * sizeof(int32_t)); kTensorsOut(cc).Send(std::move(result)); diff --git a/mediapipe/framework/formats/tensor.h b/mediapipe/framework/formats/tensor.h index 1d670d805..4f95eb27b 100644 --- a/mediapipe/framework/formats/tensor.h +++ b/mediapipe/framework/formats/tensor.h @@ -117,11 +117,18 @@ class Tensor { Shape() = default; Shape(std::initializer_list dimensions) : dims(dimensions) {} Shape(const std::vector& dimensions) : dims(dimensions) {} + Shape(std::initializer_list dimensions, bool is_dynamic) + : dims(dimensions), is_dynamic(is_dynamic) {} + Shape(const std::vector& dimensions, bool is_dynamic) + : dims(dimensions), is_dynamic(is_dynamic) {} int num_elements() const { return std::accumulate(dims.begin(), dims.end(), 1, std::multiplies()); } std::vector dims; + // The Tensor has dynamic rather than static shape so the TFLite interpreter + // needs to be reallocated. Only relevant for CPU. + bool is_dynamic = false; }; // Quantization parameters corresponding to the zero_point and scale value // made available by TfLite quantized (uint8/int8) tensors. diff --git a/mediapipe/framework/formats/tensor_test.cc b/mediapipe/framework/formats/tensor_test.cc index 4ad4e18eb..468af4ab9 100644 --- a/mediapipe/framework/formats/tensor_test.cc +++ b/mediapipe/framework/formats/tensor_test.cc @@ -2,6 +2,7 @@ #include #include +#include #include "mediapipe/framework/port/gmock.h" #include "mediapipe/framework/port/gtest.h" @@ -34,6 +35,17 @@ TEST(General, TestDataTypes) { EXPECT_EQ(t_bool.bytes(), t_bool.shape().num_elements() * sizeof(bool)); } +TEST(General, TestDynamic) { + Tensor t1(Tensor::ElementType::kFloat32, Tensor::Shape({1, 2, 3, 4}, true)); + EXPECT_EQ(t1.shape().num_elements(), 1 * 2 * 3 * 4); + EXPECT_TRUE(t1.shape().is_dynamic); + + std::vector t2_dims = {4, 3, 2, 3}; + Tensor t2(Tensor::ElementType::kFloat16, Tensor::Shape(t2_dims, true)); + EXPECT_EQ(t2.shape().num_elements(), 4 * 3 * 2 * 3); + EXPECT_TRUE(t2.shape().is_dynamic); +} + TEST(Cpu, TestMemoryAllocation) { Tensor t1(Tensor::ElementType::kFloat32, Tensor::Shape{4, 3, 2, 3}); auto v1 = t1.GetCpuWriteView();