Internal MediaPipe Tasks change

PiperOrigin-RevId: 540083633
This commit is contained in:
MediaPipe Team 2023-06-13 15:00:40 -07:00 committed by Copybara-Service
parent 6cf7148f3b
commit b97d11fa76
6 changed files with 35 additions and 6 deletions

View File

@ -228,7 +228,6 @@ cc_library(
"//mediapipe/tasks/metadata:metadata_schema_cc", "//mediapipe/tasks/metadata:metadata_schema_cc",
"@com_google_absl//absl/container:flat_hash_set", "@com_google_absl//absl/container:flat_hash_set",
"@com_google_absl//absl/status", "@com_google_absl//absl/status",
"@com_google_absl//absl/status:statusor",
"@com_google_absl//absl/strings", "@com_google_absl//absl/strings",
], ],
alwayslink = 1, alwayslink = 1,
@ -280,7 +279,6 @@ cc_library(
"//mediapipe/tasks/cc/text/tokenizers:tokenizer_utils", "//mediapipe/tasks/cc/text/tokenizers:tokenizer_utils",
"//mediapipe/tasks/metadata:metadata_schema_cc", "//mediapipe/tasks/metadata:metadata_schema_cc",
"@com_google_absl//absl/status", "@com_google_absl//absl/status",
"@com_google_absl//absl/status:statusor",
], ],
alwayslink = 1, alwayslink = 1,
) )

View File

@ -22,7 +22,6 @@
#include "absl/container/flat_hash_set.h" #include "absl/container/flat_hash_set.h"
#include "absl/status/status.h" #include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "absl/strings/ascii.h" #include "absl/strings/ascii.h"
#include "absl/strings/string_view.h" #include "absl/strings/string_view.h"
#include "absl/strings/substitute.h" #include "absl/strings/substitute.h"
@ -244,7 +243,8 @@ std::vector<Tensor> BertPreprocessorCalculator::GenerateInputTensors(
input_tensors.reserve(kNumInputTensorsForBert); input_tensors.reserve(kNumInputTensorsForBert);
for (int i = 0; i < kNumInputTensorsForBert; ++i) { for (int i = 0; i < kNumInputTensorsForBert; ++i) {
input_tensors.push_back( input_tensors.push_back(
{Tensor::ElementType::kInt32, Tensor::Shape({tensor_size})}); {Tensor::ElementType::kInt32,
Tensor::Shape({1, tensor_size}, has_dynamic_input_tensors_)});
} }
std::memcpy(input_tensors[input_ids_tensor_index_] std::memcpy(input_tensors[input_ids_tensor_index_]
.GetCpuWriteView() .GetCpuWriteView()

View File

@ -96,6 +96,19 @@ absl::StatusOr<std::vector<Tensor>> InferenceInterpreterDelegateRunner::Run(
CalculatorContext* cc, const std::vector<Tensor>& input_tensors) { CalculatorContext* cc, const std::vector<Tensor>& input_tensors) {
// Read CPU input into tensors. // Read CPU input into tensors.
RET_CHECK_EQ(interpreter_->inputs().size(), input_tensors.size()); RET_CHECK_EQ(interpreter_->inputs().size(), input_tensors.size());
// If the input tensors have dynamic shape, then the tensors need to be
// resized and reallocated before we can copy the tensor values.
bool resized_tensor_shapes = false;
for (int i = 0; i < input_tensors.size(); ++i) {
if (input_tensors[i].shape().is_dynamic) {
interpreter_->ResizeInputTensorStrict(i, input_tensors[i].shape().dims);
resized_tensor_shapes = true;
}
}
// Reallocation is needed for memory sanity.
if (resized_tensor_shapes) interpreter_->AllocateTensors();
for (int i = 0; i < input_tensors.size(); ++i) { for (int i = 0; i < input_tensors.size(); ++i) {
const TfLiteType input_tensor_type = const TfLiteType input_tensor_type =
interpreter_->tensor(interpreter_->inputs()[i])->type; interpreter_->tensor(interpreter_->inputs()[i])->type;

View File

@ -20,7 +20,6 @@
#include <vector> #include <vector>
#include "absl/status/status.h" #include "absl/status/status.h"
#include "absl/status/statusor.h"
#include "mediapipe/calculators/tensor/regex_preprocessor_calculator.pb.h" #include "mediapipe/calculators/tensor/regex_preprocessor_calculator.pb.h"
#include "mediapipe/framework/api2/node.h" #include "mediapipe/framework/api2/node.h"
#include "mediapipe/framework/api2/port.h" #include "mediapipe/framework/api2/port.h"
@ -161,7 +160,7 @@ absl::Status RegexPreprocessorCalculator::Process(CalculatorContext* cc) {
// not found in the tokenizer vocab. // not found in the tokenizer vocab.
std::vector<Tensor> result; std::vector<Tensor> result;
result.push_back( result.push_back(
{Tensor::ElementType::kInt32, Tensor::Shape({max_seq_len_})}); {Tensor::ElementType::kInt32, Tensor::Shape({1, max_seq_len_})});
std::memcpy(result[0].GetCpuWriteView().buffer<int32_t>(), std::memcpy(result[0].GetCpuWriteView().buffer<int32_t>(),
input_tokens.data(), input_tokens.size() * sizeof(int32_t)); input_tokens.data(), input_tokens.size() * sizeof(int32_t));
kTensorsOut(cc).Send(std::move(result)); kTensorsOut(cc).Send(std::move(result));

View File

@ -117,11 +117,18 @@ class Tensor {
Shape() = default; Shape() = default;
Shape(std::initializer_list<int> dimensions) : dims(dimensions) {} Shape(std::initializer_list<int> dimensions) : dims(dimensions) {}
Shape(const std::vector<int>& dimensions) : dims(dimensions) {} Shape(const std::vector<int>& dimensions) : dims(dimensions) {}
Shape(std::initializer_list<int> dimensions, bool is_dynamic)
: dims(dimensions), is_dynamic(is_dynamic) {}
Shape(const std::vector<int>& dimensions, bool is_dynamic)
: dims(dimensions), is_dynamic(is_dynamic) {}
int num_elements() const { int num_elements() const {
return std::accumulate(dims.begin(), dims.end(), 1, return std::accumulate(dims.begin(), dims.end(), 1,
std::multiplies<int>()); std::multiplies<int>());
} }
std::vector<int> dims; std::vector<int> dims;
// The Tensor has dynamic rather than static shape so the TFLite interpreter
// needs to be reallocated. Only relevant for CPU.
bool is_dynamic = false;
}; };
// Quantization parameters corresponding to the zero_point and scale value // Quantization parameters corresponding to the zero_point and scale value
// made available by TfLite quantized (uint8/int8) tensors. // made available by TfLite quantized (uint8/int8) tensors.

View File

@ -2,6 +2,7 @@
#include <cstring> #include <cstring>
#include <string> #include <string>
#include <vector>
#include "mediapipe/framework/port/gmock.h" #include "mediapipe/framework/port/gmock.h"
#include "mediapipe/framework/port/gtest.h" #include "mediapipe/framework/port/gtest.h"
@ -34,6 +35,17 @@ TEST(General, TestDataTypes) {
EXPECT_EQ(t_bool.bytes(), t_bool.shape().num_elements() * sizeof(bool)); EXPECT_EQ(t_bool.bytes(), t_bool.shape().num_elements() * sizeof(bool));
} }
TEST(General, TestDynamic) {
Tensor t1(Tensor::ElementType::kFloat32, Tensor::Shape({1, 2, 3, 4}, true));
EXPECT_EQ(t1.shape().num_elements(), 1 * 2 * 3 * 4);
EXPECT_TRUE(t1.shape().is_dynamic);
std::vector<int> t2_dims = {4, 3, 2, 3};
Tensor t2(Tensor::ElementType::kFloat16, Tensor::Shape(t2_dims, true));
EXPECT_EQ(t2.shape().num_elements(), 4 * 3 * 2 * 3);
EXPECT_TRUE(t2.shape().is_dynamic);
}
TEST(Cpu, TestMemoryAllocation) { TEST(Cpu, TestMemoryAllocation) {
Tensor t1(Tensor::ElementType::kFloat32, Tensor::Shape{4, 3, 2, 3}); Tensor t1(Tensor::ElementType::kFloat32, Tensor::Shape{4, 3, 2, 3});
auto v1 = t1.GetCpuWriteView(); auto v1 = t1.GetCpuWriteView();