Merge Master

This commit is contained in:
Prianka Liz Kariat 2023-01-05 18:16:02 +05:30
commit a223b2f6cc
8 changed files with 100 additions and 20 deletions

View File

@ -37,8 +37,10 @@ class TensorToVectorIntCalculator : public CalculatorBase {
private:
void TokenizeVector(std::vector<int64>* vector) const;
void RemoveOverlapVector(std::vector<int64>* vector);
TensorToVectorIntCalculatorOptions options_;
int32_t overlapping_values_;
};
REGISTER_CALCULATOR(TensorToVectorIntCalculator);
@ -66,6 +68,7 @@ absl::Status TensorToVectorIntCalculator::GetContract(CalculatorContract* cc) {
absl::Status TensorToVectorIntCalculator::Open(CalculatorContext* cc) {
options_ = cc->Options<TensorToVectorIntCalculatorOptions>();
overlapping_values_ = 0;
// Inform mediapipe that this calculator produces an output at time t for
// each input received at time t (i.e. this calculator does not buffer
@ -106,6 +109,7 @@ absl::Status TensorToVectorIntCalculator::Process(CalculatorContext* cc) {
}
}
TokenizeVector(&instance_output);
RemoveOverlapVector(&instance_output);
}
cc->Outputs().Index(0).Add(output.release(), cc->InputTimestamp());
} else {
@ -128,12 +132,28 @@ absl::Status TensorToVectorIntCalculator::Process(CalculatorContext* cc) {
}
}
TokenizeVector(output.get());
RemoveOverlapVector(output.get());
cc->Outputs().Index(0).Add(output.release(), cc->InputTimestamp());
}
return absl::OkStatus();
}
void TensorToVectorIntCalculator::RemoveOverlapVector(
std::vector<int64>* vector) {
if (options_.overlap() <= 0) {
return;
}
if (overlapping_values_ > 0) {
if (vector->size() < overlapping_values_) {
vector->clear();
} else {
vector->erase(vector->begin(), vector->begin() + overlapping_values_);
}
}
overlapping_values_ = options_.overlap();
}
void TensorToVectorIntCalculator::TokenizeVector(
std::vector<int64>* vector) const {
if (!options_.tensor_is_token()) {

View File

@ -36,4 +36,8 @@ message TensorToVectorIntCalculatorOptions {
optional bool tensor_is_token = 3 [default = false];
// Threshold for the token generation.
optional float token_threshold = 4 [default = 0.5];
// Values which overlap between timely following vectors. They are removed
// from the output to reduce redundancy.
optional int32 overlap = 5 [default = 0];
}

View File

@ -28,7 +28,8 @@ namespace tf = ::tensorflow;
class TensorToVectorIntCalculatorTest : public ::testing::Test {
protected:
void SetUpRunner(const bool tensor_is_2d, const bool flatten_nd,
const bool tensor_is_token = false) {
const bool tensor_is_token = false,
const int32_t overlap = 0) {
CalculatorGraphConfig::Node config;
config.set_calculator("TensorToVectorIntCalculator");
config.add_input_stream("input_tensor");
@ -38,6 +39,7 @@ class TensorToVectorIntCalculatorTest : public ::testing::Test {
options->set_tensor_is_2d(tensor_is_2d);
options->set_flatten_nd(flatten_nd);
options->set_tensor_is_token(tensor_is_token);
options->set_overlap(overlap);
runner_ = absl::make_unique<CalculatorRunner>(config);
}
@ -188,5 +190,54 @@ TEST_F(TensorToVectorIntCalculatorTest, FlattenShouldTakeAllDimensions) {
}
}
TEST_F(TensorToVectorIntCalculatorTest, Overlap) {
SetUpRunner(false, false, false, 2);
for (int time = 0; time < 3; ++time) {
const tf::TensorShape tensor_shape(std::vector<tf::int64>{5});
auto tensor = absl::make_unique<tf::Tensor>(tf::DT_INT64, tensor_shape);
auto tensor_vec = tensor->vec<int64>();
for (int i = 0; i < 5; ++i) {
// 2^i can be represented exactly in floating point numbers if 'i' is
// small.
tensor_vec(i) = static_cast<int64>(time + (1 << i));
}
runner_->MutableInputs()->Index(0).packets.push_back(
Adopt(tensor.release()).At(Timestamp(time)));
}
ASSERT_TRUE(runner_->Run().ok());
const std::vector<Packet>& output_packets =
runner_->Outputs().Index(0).packets;
EXPECT_EQ(3, output_packets.size());
{
// First vector in full.
int time = 0;
EXPECT_EQ(time, output_packets[time].Timestamp().Value());
const std::vector<int64>& output_vector =
output_packets[time].Get<std::vector<int64>>();
EXPECT_EQ(5, output_vector.size());
for (int i = 0; i < 5; ++i) {
const int64 expected = static_cast<int64>(time + (1 << i));
EXPECT_EQ(expected, output_vector[i]);
}
}
// All following vectors the overlap removed
for (int time = 1; time < 3; ++time) {
EXPECT_EQ(time, output_packets[time].Timestamp().Value());
const std::vector<int64>& output_vector =
output_packets[time].Get<std::vector<int64>>();
EXPECT_EQ(3, output_vector.size());
for (int i = 0; i < 3; ++i) {
const int64 expected = static_cast<int64>(time + (1 << (i + 2)));
EXPECT_EQ(expected, output_vector[i]);
}
}
}
} // namespace
} // namespace mediapipe

View File

@ -18,6 +18,8 @@ licenses(["notice"])
package(default_visibility = [
"//mediapipe/examples:__subpackages__",
"//photos/editing/mobile/mediapipe/calculators:__subpackages__",
"//photos/editing/mobile/mediapipe/proto:__subpackages__",
])
proto_library(
@ -45,6 +47,8 @@ mediapipe_cc_proto_library(
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = [
"//mediapipe/examples:__subpackages__",
"//photos/editing/mobile/mediapipe/calculators:__pkg__",
"//photos/editing/mobile/mediapipe/calculators:__subpackages__",
],
deps = [":autoflip_messages_proto"],
)

View File

@ -68,8 +68,9 @@ void FillGpuBuffer(GLuint name, std::size_t size,
MP_ASSERT_OK(TFLITE_GPU_CALL_GL(glGetShaderiv, shader, GL_INFO_LOG_LENGTH,
&max_length));
std::vector<GLchar> error_log(max_length);
glGetShaderInfoLog(shader, max_length, &max_length, error_log.data());
glDeleteShader(shader);
MP_ASSERT_OK(TFLITE_GPU_CALL_GL(glGetShaderInfoLog, shader, max_length,
&max_length, error_log.data()));
MP_EXPECT_OK(TFLITE_GPU_CALL_GL(glDeleteShader, shader));
FAIL() << error_log.data();
return;
}
@ -83,8 +84,8 @@ void FillGpuBuffer(GLuint name, std::size_t size,
TFLITE_GPU_CALL_GL(glBindBufferBase, GL_SHADER_STORAGE_BUFFER, 0, name));
MP_ASSERT_OK(TFLITE_GPU_CALL_GL(glUseProgram, to_buffer_program));
MP_ASSERT_OK(TFLITE_GPU_CALL_GL(glDispatchCompute, size / 2, 1, 1));
MP_ASSERT_OK(TFLITE_GPU_CALL_GL(glBindBuffer, GL_SHADER_STORAGE_BUFFER, 0));
MP_ASSERT_OK(TFLITE_GPU_CALL_GL(glDeleteProgram, to_buffer_program));
MP_EXPECT_OK(TFLITE_GPU_CALL_GL(glBindBuffer, GL_SHADER_STORAGE_BUFFER, 0));
MP_EXPECT_OK(TFLITE_GPU_CALL_GL(glDeleteProgram, to_buffer_program));
}
class TensorAhwbGpuTest : public mediapipe::GpuTestBase {
@ -97,18 +98,18 @@ TEST_F(TensorAhwbGpuTest, TestGpuToCpuFloat32) {
{
// Request Ahwb first to get Ahwb storage allocated internally.
auto view = tensor.GetAHardwareBufferWriteView();
EXPECT_NE(view.handle(), nullptr);
ASSERT_NE(view.handle(), nullptr);
view.SetWritingFinishedFD(-1, [](bool) { return true; });
}
RunInGlContext([&tensor] {
auto ssbo_view = tensor.GetOpenGlBufferWriteView();
auto ssbo_name = ssbo_view.name();
EXPECT_GT(ssbo_name, 0);
ASSERT_GT(ssbo_name, 0);
FillGpuBuffer(ssbo_name, tensor.shape().num_elements(),
tensor.element_type());
});
auto ptr = tensor.GetCpuReadView().buffer<float>();
EXPECT_NE(ptr, nullptr);
ASSERT_NE(ptr, nullptr);
std::vector<float> reference;
reference.resize(num_elements);
for (int i = 0; i < num_elements; i++) {
@ -124,18 +125,18 @@ TEST_F(TensorAhwbGpuTest, TestGpuToCpuFloat16) {
{
// Request Ahwb first to get Ahwb storage allocated internally.
auto view = tensor.GetAHardwareBufferWriteView();
EXPECT_NE(view.handle(), nullptr);
ASSERT_NE(view.handle(), nullptr);
view.SetReadingFinishedFunc([](bool) { return true; });
}
RunInGlContext([&tensor] {
auto ssbo_view = tensor.GetOpenGlBufferWriteView();
auto ssbo_name = ssbo_view.name();
EXPECT_GT(ssbo_name, 0);
ASSERT_GT(ssbo_name, 0);
FillGpuBuffer(ssbo_name, tensor.shape().num_elements(),
tensor.element_type());
});
auto ptr = tensor.GetCpuReadView().buffer<Float16>();
EXPECT_NE(ptr, nullptr);
ASSERT_NE(ptr, nullptr);
std::vector<Float16> reference;
reference.resize(num_elements);
for (int i = 0; i < num_elements; i++) {
@ -153,18 +154,18 @@ TEST_F(TensorAhwbGpuTest, TestReplacingCpuByAhwb) {
Tensor tensor{Tensor::ElementType::kFloat32, Tensor::Shape({num_elements})};
{
auto ptr = tensor.GetCpuWriteView().buffer<float>();
EXPECT_NE(ptr, nullptr);
ASSERT_NE(ptr, nullptr);
for (int i = 0; i < num_elements; i++) {
ptr[i] = static_cast<float>(i) / 10.0f;
}
}
{
auto view = tensor.GetAHardwareBufferReadView();
EXPECT_NE(view.handle(), nullptr);
ASSERT_NE(view.handle(), nullptr);
view.SetReadingFinishedFunc([](bool) { return true; });
}
auto ptr = tensor.GetCpuReadView().buffer<float>();
EXPECT_NE(ptr, nullptr);
ASSERT_NE(ptr, nullptr);
std::vector<float> reference;
reference.resize(num_elements);
for (int i = 0; i < num_elements; i++) {
@ -182,17 +183,17 @@ TEST_F(TensorAhwbGpuTest, TestReplacingGpuByAhwb) {
RunInGlContext([&tensor] {
auto ssbo_view = tensor.GetOpenGlBufferWriteView();
auto ssbo_name = ssbo_view.name();
EXPECT_GT(ssbo_name, 0);
ASSERT_GT(ssbo_name, 0);
FillGpuBuffer(ssbo_name, tensor.shape().num_elements(),
tensor.element_type());
});
{
auto view = tensor.GetAHardwareBufferReadView();
EXPECT_NE(view.handle(), nullptr);
ASSERT_NE(view.handle(), nullptr);
view.SetReadingFinishedFunc([](bool) { return true; });
}
auto ptr = tensor.GetCpuReadView().buffer<float>();
EXPECT_NE(ptr, nullptr);
ASSERT_NE(ptr, nullptr);
std::vector<float> reference;
reference.resize(num_elements);
for (int i = 0; i < num_elements; i++) {

View File

@ -34,7 +34,7 @@ TEST(TensorAhwbTest, TestAhwbAlignment) {
Tensor tensor(Tensor::ElementType::kFloat32, Tensor::Shape{5});
{
auto view = tensor.GetAHardwareBufferWriteView(16);
EXPECT_NE(view.handle(), nullptr);
ASSERT_NE(view.handle(), nullptr);
if (__builtin_available(android 26, *)) {
AHardwareBuffer_Desc desc;
AHardwareBuffer_describe(view.handle(), &desc);

View File

@ -132,9 +132,9 @@ setuptools.setup(
'Operating System :: MacOS :: MacOS X',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
'Programming Language :: Python :: 3 :: Only',
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Artificial Intelligence',

View File

@ -91,8 +91,8 @@ objc_library(
"//mediapipe/framework:calculator_cc_proto",
"//mediapipe/tasks/cc/core:task_runner",
"//mediapipe/tasks/cc/core:mediapipe_builtin_op_resolver",
"@org_tensorflow//tensorflow/lite/core/api:op_resolver",
"//mediapipe/tasks/ios/common/utils:MPPCommonUtils",
"@org_tensorflow//tensorflow/lite/core/api:op_resolver",
],
)