From e894ae9cf477c5adb27d2aa6121b4aedaf9a8936 Mon Sep 17 00:00:00 2001 From: MediaPipe Team Date: Thu, 6 Apr 2023 03:13:21 -0700 Subject: [PATCH] Internal change PiperOrigin-RevId: 522291640 --- .../graph_tensors_packet_generator.cc | 2 +- .../image_frame_to_tensor_calculator.cc | 6 +- .../image_frame_to_tensor_calculator_test.cc | 70 +++++------ .../lapped_tensor_buffer_calculator_test.cc | 4 +- .../matrix_to_tensor_calculator_test.cc | 4 +- ...ection_tensors_to_detections_calculator.cc | 4 +- .../pack_media_sequence_calculator.cc | 10 +- .../pack_media_sequence_calculator_test.cc | 8 +- .../tensor_squeeze_dimensions_calculator.cc | 4 +- ...nsor_squeeze_dimensions_calculator_test.cc | 6 +- .../tensor_to_image_frame_calculator.cc | 12 +- .../tensor_to_image_frame_calculator_test.cc | 12 +- .../tensorflow/tensor_to_matrix_calculator.cc | 5 +- .../tensor_to_matrix_calculator_test.cc | 10 +- .../tensor_to_vector_float_calculator_test.cc | 6 +- .../tensor_to_vector_int_calculator.cc | 29 ++--- ...tensor_to_vector_string_calculator_test.cc | 6 +- .../tensorflow_inference_calculator_test.cc | 118 +++++++++--------- ...ow_session_from_frozen_graph_calculator.cc | 4 +- ...ssion_from_frozen_graph_calculator_test.cc | 2 +- ...low_session_from_frozen_graph_generator.cc | 4 +- ...ession_from_frozen_graph_generator_test.cc | 2 +- ...ession_from_saved_model_calculator_test.cc | 2 +- ...session_from_saved_model_generator_test.cc | 2 +- .../unpack_media_sequence_calculator_test.cc | 10 +- .../vector_float_to_tensor_calculator.cc | 6 +- .../vector_int_to_tensor_calculator.cc | 12 +- .../vector_int_to_tensor_calculator_test.cc | 16 +-- .../vector_string_to_tensor_calculator.cc | 6 +- ...vector_string_to_tensor_calculator_test.cc | 4 +- 30 files changed, 195 insertions(+), 191 deletions(-) diff --git a/mediapipe/calculators/tensorflow/graph_tensors_packet_generator.cc b/mediapipe/calculators/tensorflow/graph_tensors_packet_generator.cc index 310d412bf..b782c590a 100644 --- a/mediapipe/calculators/tensorflow/graph_tensors_packet_generator.cc +++ b/mediapipe/calculators/tensorflow/graph_tensors_packet_generator.cc @@ -59,7 +59,7 @@ class GraphTensorsPacketGenerator : public PacketGenerator { for (int i = 0; i < options.tensor_tag_size(); ++i) { const std::string& tensor_tag = options.tensor_tag(i); - const int32 tensor_num_nodes = options.tensor_num_nodes(i); + const int32_t tensor_num_nodes = options.tensor_num_nodes(i); (*tensor_map)[tensor_tag] = tf::Tensor(tf::DT_FLOAT, tf::TensorShape{1, tensor_num_nodes}); (*tensor_map)[tensor_tag].flat().setZero(); diff --git a/mediapipe/calculators/tensorflow/image_frame_to_tensor_calculator.cc b/mediapipe/calculators/tensorflow/image_frame_to_tensor_calculator.cc index cbc9d2aa2..155277d50 100644 --- a/mediapipe/calculators/tensorflow/image_frame_to_tensor_calculator.cc +++ b/mediapipe/calculators/tensorflow/image_frame_to_tensor_calculator.cc @@ -40,7 +40,7 @@ std::unique_ptr ImageFrameToNormalizedTensor( const int cols = image_frame.Width(); const int rows = image_frame.Height(); const int channels = image_frame.NumberOfChannels(); - const uint8* pixel = image_frame.PixelData(); + const uint8_t* pixel = image_frame.PixelData(); const int width_padding = image_frame.WidthStep() - cols * channels; auto tensor = ::absl::make_unique( tf::DT_FLOAT, tf::TensorShape({rows, cols, channels})); @@ -188,10 +188,10 @@ absl::Status ImageFrameToTensorCalculator::Process(CalculatorContext* cc) { // Copy pixel data from the ImageFrame to the tensor. if (data_type == tf::DT_UINT8) { - uint8* dst = tensor->flat().data(); + uint8_t* dst = tensor->flat().data(); video_frame.CopyToBuffer(dst, num_components); } else if (data_type == tf::DT_UINT16) { - uint16* dst = tensor->flat().data(); + uint16_t* dst = tensor->flat().data(); video_frame.CopyToBuffer(dst, num_components); } else { float* dst = tensor->flat().data(); diff --git a/mediapipe/calculators/tensorflow/image_frame_to_tensor_calculator_test.cc b/mediapipe/calculators/tensorflow/image_frame_to_tensor_calculator_test.cc index 5acfadd47..4998f4e1f 100644 --- a/mediapipe/calculators/tensorflow/image_frame_to_tensor_calculator_test.cc +++ b/mediapipe/calculators/tensorflow/image_frame_to_tensor_calculator_test.cc @@ -30,8 +30,8 @@ namespace mediapipe { namespace tf = tensorflow; using RandomEngine = std::mt19937_64; -const uint8 kGray8 = 42; -const uint16 kGray16 = 4242; +const uint8_t kGray8 = 42; +const uint16_t kGray16 = 4242; const float kFloat = 42.0; const uint kRed = 255; const uint kGreen = 36; @@ -40,7 +40,7 @@ const uint kAlpha = 42; const int kFixedNoiseWidth = 3; const int kFixedNoiseHeight = 2; -const uint8 kFixedNoiseData[kFixedNoiseWidth * kFixedNoiseHeight * 3] = { +const uint8_t kFixedNoiseData[kFixedNoiseWidth * kFixedNoiseHeight * 3] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 123, 213, 156, 9, 10, 11, 255, 0, 128}; class ImageFrameToTensorCalculatorTest : public ::testing::Test { @@ -69,8 +69,8 @@ class ImageFrameToTensorCalculatorTest : public ::testing::Test { void AddRGBFrame(int width, int height) { auto image_frame = ::absl::make_unique(ImageFormat::SRGB, width, height); - const uint8 color[] = {kRed, kGreen, kBlue}; - SetToColor(color, image_frame.get()); + const uint8_t color[] = {kRed, kGreen, kBlue}; + SetToColor(color, image_frame.get()); runner_->MutableInputs()->Index(0).packets.push_back( Adopt(image_frame.release()).At(Timestamp(0))); } @@ -79,8 +79,8 @@ class ImageFrameToTensorCalculatorTest : public ::testing::Test { void AddRGBAFrame(int width, int height) { auto image_frame = ::absl::make_unique(ImageFormat::SRGBA, width, height); - const uint8 color[] = {kRed, kGreen, kBlue, kAlpha}; - SetToColor(color, image_frame.get()); + const uint8_t color[] = {kRed, kGreen, kBlue, kAlpha}; + SetToColor(color, image_frame.get()); runner_->MutableInputs()->Index(0).packets.push_back( Adopt(image_frame.release()).At(Timestamp(0))); } @@ -89,8 +89,8 @@ class ImageFrameToTensorCalculatorTest : public ::testing::Test { void AddGray8Frame(int width, int height) { auto image_frame = ::absl::make_unique(ImageFormat::GRAY8, width, height); - const uint8 gray[] = {kGray8}; - SetToColor(gray, image_frame.get()); + const uint8_t gray[] = {kGray8}; + SetToColor(gray, image_frame.get()); runner_->MutableInputs()->Index(0).packets.push_back( Adopt(image_frame.release()).At(Timestamp(0))); } @@ -99,8 +99,8 @@ class ImageFrameToTensorCalculatorTest : public ::testing::Test { void AddGray16Frame(int width, int height) { auto image_frame = ::absl::make_unique(ImageFormat::GRAY16, width, height, 1); - const uint16 gray[] = {kGray16}; - SetToColor(gray, image_frame.get()); + const uint16_t gray[] = {kGray16}; + SetToColor(gray, image_frame.get()); runner_->MutableInputs()->Index(0).packets.push_back( Adopt(image_frame.release()).At(Timestamp(0))); } @@ -121,10 +121,10 @@ class ImageFrameToTensorCalculatorTest : public ::testing::Test { ImageFormat::SRGB, kFixedNoiseWidth, kFixedNoiseHeight); // Copy fixed noise data into the ImageFrame. - const uint8* src = kFixedNoiseData; - uint8* pixels = image_frame->MutablePixelData(); + const uint8_t* src = kFixedNoiseData; + uint8_t* pixels = image_frame->MutablePixelData(); for (int y = 0; y < kFixedNoiseHeight; ++y) { - uint8* row = pixels + y * image_frame->WidthStep(); + uint8_t* row = pixels + y * image_frame->WidthStep(); std::memcpy(row, src, kFixedNoiseWidth * 3); src += kFixedNoiseWidth * 3; } @@ -134,7 +134,7 @@ class ImageFrameToTensorCalculatorTest : public ::testing::Test { } // Adds a packet with an 8-bit RGB ImageFrame containing random noise. - void AddRandomRGBFrame(int width, int height, uint32 seed) { + void AddRandomRGBFrame(int width, int height, uint32_t seed) { RandomEngine random(seed); std::uniform_int_distribution uniform_dist{ 0, std::numeric_limits::max()}; @@ -143,9 +143,9 @@ class ImageFrameToTensorCalculatorTest : public ::testing::Test { // Copy "noisy data" into the ImageFrame. const int num_components_per_row = width * image_frame->NumberOfChannels(); - uint8* pixels = image_frame->MutablePixelData(); + uint8_t* pixels = image_frame->MutablePixelData(); for (int y = 0; y < kFixedNoiseHeight; ++y) { - uint8* p = pixels + y * image_frame->WidthStep(); + uint8_t* p = pixels + y * image_frame->WidthStep(); for (int i = 0; i < num_components_per_row; ++i) { p[i] = uniform_dist(random); } @@ -188,8 +188,8 @@ TEST_F(ImageFrameToTensorCalculatorTest, SolidRedRGBFrame) { ASSERT_EQ(3, shape.dim_size(2)); // Verify that the data in the tensor is correct. - const uint8* pixels = - reinterpret_cast(tensor.tensor_data().data()); + const uint8_t* pixels = + reinterpret_cast(tensor.tensor_data().data()); for (int i = 0; i < num_pixels; ++i) { ASSERT_EQ(kRed, pixels[0]); ASSERT_EQ(kGreen, pixels[1]); @@ -229,8 +229,8 @@ TEST_F(ImageFrameToTensorCalculatorTest, SolidRedRGBAFrame) { ASSERT_EQ(4, shape.dim_size(2)); // Verify that the data in the tensor is correct. - const uint8* pixels = - reinterpret_cast(tensor.tensor_data().data()); + const uint8_t* pixels = + reinterpret_cast(tensor.tensor_data().data()); for (int i = 0; i < num_pixels; ++i) { ASSERT_EQ(kRed, pixels[0]); ASSERT_EQ(kGreen, pixels[1]); @@ -271,8 +271,8 @@ TEST_F(ImageFrameToTensorCalculatorTest, SolidGray8Frame) { ASSERT_EQ(1, shape.dim_size(2)); // Verify that the data in the tensor is correct. - const uint8* pixels = - reinterpret_cast(tensor.tensor_data().data()); + const uint8_t* pixels = + reinterpret_cast(tensor.tensor_data().data()); for (int i = 0; i < num_pixels; ++i) { ASSERT_EQ(kGray8, pixels[0]); ++pixels; @@ -310,8 +310,8 @@ TEST_F(ImageFrameToTensorCalculatorTest, SolidGray16Frame) { ASSERT_EQ(1, shape.dim_size(2)); // Verify that the data in the tensor is correct. - const uint16* pixels = - reinterpret_cast(tensor.tensor_data().data()); + const uint16_t* pixels = + reinterpret_cast(tensor.tensor_data().data()); for (int i = 0; i < num_pixels; ++i) { ASSERT_EQ(kGray16, pixels[0]); ++pixels; @@ -381,8 +381,8 @@ TEST_F(ImageFrameToTensorCalculatorTest, FixedNoiseRGBFrame) { // Verify that the data in the tensor is correct. const int num_pixels = kFixedNoiseWidth * kFixedNoiseHeight; - const uint8* pixels = - reinterpret_cast(tensor.tensor_data().data()); + const uint8_t* pixels = + reinterpret_cast(tensor.tensor_data().data()); for (int i = 0; i < num_pixels; ++i) { ASSERT_EQ(kFixedNoiseData[i], pixels[i]); } @@ -390,7 +390,7 @@ TEST_F(ImageFrameToTensorCalculatorTest, FixedNoiseRGBFrame) { TEST_F(ImageFrameToTensorCalculatorTest, RandomRGBFrame) { // Run the calculator and verify that one output is generated. - const uint32 seed = 1234; + const uint32_t seed = 1234; const int height = 2; for (int width = 1; width <= 33; ++width) { runner_.reset( @@ -417,10 +417,10 @@ TEST_F(ImageFrameToTensorCalculatorTest, RandomRGBFrame) { std::uniform_int_distribution uniform_dist{ 0, std::numeric_limits::max()}; const int num_pixels = width * height; - const uint8* pixels = - reinterpret_cast(tensor.tensor_data().data()); + const uint8_t* pixels = + reinterpret_cast(tensor.tensor_data().data()); for (int i = 0; i < num_pixels; ++i) { - const uint8 expected = uniform_dist(random); + const uint8_t expected = uniform_dist(random); ASSERT_EQ(expected, pixels[i]); } } @@ -435,8 +435,8 @@ TEST_F(ImageFrameToTensorCalculatorTest, FixedRGBFrameWithMeanAndStddev) { // Create a single pixel image of fixed color #0080ff. auto image_frame = ::absl::make_unique(ImageFormat::SRGB, 1, 1); - const uint8 color[] = {0, 128, 255}; - SetToColor(color, image_frame.get()); + const uint8_t color[] = {0, 128, 255}; + SetToColor(color, image_frame.get()); runner_->MutableInputs()->Index(0).packets.push_back( Adopt(image_frame.release()).At(Timestamp(0))); @@ -464,8 +464,8 @@ TEST_F(ImageFrameToTensorCalculatorTest, FixedRGBFrameWithRepeatMeanAndStddev) { // Create a single pixel image of fixed color #0080ff. auto image_frame = ::absl::make_unique(ImageFormat::SRGB, 1, 1); - const uint8 color[] = {0, 128, 255}; - SetToColor(color, image_frame.get()); + const uint8_t color[] = {0, 128, 255}; + SetToColor(color, image_frame.get()); runner_->MutableInputs()->Index(0).packets.push_back( Adopt(image_frame.release()).At(Timestamp(0))); diff --git a/mediapipe/calculators/tensorflow/lapped_tensor_buffer_calculator_test.cc b/mediapipe/calculators/tensorflow/lapped_tensor_buffer_calculator_test.cc index e0e3000d2..6fca40c04 100644 --- a/mediapipe/calculators/tensorflow/lapped_tensor_buffer_calculator_test.cc +++ b/mediapipe/calculators/tensorflow/lapped_tensor_buffer_calculator_test.cc @@ -264,7 +264,7 @@ TEST_F(LappedTensorBufferCalculatorTest, OneToThreeBatchTimestampOffset) { ASSERT_EQ(num_timesteps - buffer_size + 1, output_packets.size()); for (int i = 0; i < num_timesteps - buffer_size + 1; ++i) { for (int j = 0; j < buffer_size; ++j) { - int64 value = output_packets[i].Timestamp().Value(); + int64_t value = output_packets[i].Timestamp().Value(); ASSERT_EQ(i + timestamp_offset, value); } } @@ -294,7 +294,7 @@ TEST_F(LappedTensorBufferCalculatorTest, runner_->Outputs().Index(0).packets; ASSERT_EQ(output_size, output_packets.size()); for (int i = 0; i < output_size; ++i) { - int64 value = output_packets[i].Timestamp().Value(); + int64_t value = output_packets[i].Timestamp().Value(); ASSERT_EQ(i * overlap + timestamp_offset, value); } const std::vector& output_timestamps = diff --git a/mediapipe/calculators/tensorflow/matrix_to_tensor_calculator_test.cc b/mediapipe/calculators/tensorflow/matrix_to_tensor_calculator_test.cc index 3379f86b9..55c327bba 100644 --- a/mediapipe/calculators/tensorflow/matrix_to_tensor_calculator_test.cc +++ b/mediapipe/calculators/tensorflow/matrix_to_tensor_calculator_test.cc @@ -39,7 +39,7 @@ constexpr char kAddDimensionOptionsString[] = namespace tf = tensorflow; using RandomEngine = std::mt19937_64; -const uint32 kSeed = 1234; +const uint32_t kSeed = 1234; const int kNumSizes = 8; const int sizes[kNumSizes][2] = {{1, 1}, {12, 1}, {1, 9}, {2, 2}, {5, 3}, {7, 13}, {16, 32}, {101, 2}}; @@ -47,7 +47,7 @@ const int sizes[kNumSizes][2] = {{1, 1}, {12, 1}, {1, 9}, {2, 2}, class MatrixToTensorCalculatorTest : public ::testing::Test { protected: // Adds a packet with a matrix filled with random values in [0,1]. - void AddRandomMatrix(int num_rows, int num_columns, uint32 seed) { + void AddRandomMatrix(int num_rows, int num_columns, uint32_t seed) { RandomEngine random(kSeed); std::uniform_real_distribution<> uniform_dist(0, 1.0); auto matrix = ::absl::make_unique(); diff --git a/mediapipe/calculators/tensorflow/object_detection_tensors_to_detections_calculator.cc b/mediapipe/calculators/tensorflow/object_detection_tensors_to_detections_calculator.cc index 85ebde97a..f85c7bbb0 100644 --- a/mediapipe/calculators/tensorflow/object_detection_tensors_to_detections_calculator.cc +++ b/mediapipe/calculators/tensorflow/object_detection_tensors_to_detections_calculator.cc @@ -137,7 +137,7 @@ class ObjectDetectionTensorsToDetectionsCalculator : public CalculatorBase { const auto& tensor_dim_to_squeeze_field = cc->Options() .tensor_dim_to_squeeze(); - tensor_dims_to_squeeze_ = std::vector( + tensor_dims_to_squeeze_ = std::vector( tensor_dim_to_squeeze_field.begin(), tensor_dim_to_squeeze_field.end()); std::sort(tensor_dims_to_squeeze_.rbegin(), tensor_dims_to_squeeze_.rend()); cc->SetOffset(0); @@ -210,7 +210,7 @@ class ObjectDetectionTensorsToDetectionsCalculator : public CalculatorBase { private: std::map* label_map_; - std::vector tensor_dims_to_squeeze_; + std::vector tensor_dims_to_squeeze_; absl::StatusOr MaybeSqueezeDims(const std::string& tensor_tag, const tf::Tensor& input_tensor) { diff --git a/mediapipe/calculators/tensorflow/pack_media_sequence_calculator.cc b/mediapipe/calculators/tensorflow/pack_media_sequence_calculator.cc index 3f7525c99..34136440d 100644 --- a/mediapipe/calculators/tensorflow/pack_media_sequence_calculator.cc +++ b/mediapipe/calculators/tensorflow/pack_media_sequence_calculator.cc @@ -89,9 +89,9 @@ namespace mpms = mediapipe::mediasequence; // } // } namespace { -uint8 ConvertFloatToByte(const float float_value) { +uint8_t ConvertFloatToByte(const float float_value) { float clamped_value = std::clamp(0.0f, 1.0f, float_value); - return static_cast(clamped_value * 255.0 + .5f); + return static_cast(clamped_value * 255.0 + .5f); } } // namespace @@ -157,7 +157,7 @@ class PackMediaSequenceCalculator : public CalculatorBase { cc->Inputs().Tag(tag).Set>(); } if (absl::StartsWith(tag, kIntFeaturePrefixTag)) { - cc->Inputs().Tag(tag).Set>(); + cc->Inputs().Tag(tag).Set>(); } if (absl::StartsWith(tag, kBytesFeaturePrefixTag)) { cc->Inputs().Tag(tag).Set>(); @@ -285,7 +285,7 @@ class PackMediaSequenceCalculator : public CalculatorBase { } absl::Status VerifySize() { - const int64 MAX_PROTO_BYTES = 1073741823; + const int64_t MAX_PROTO_BYTES = 1073741823; std::string id = mpms::HasExampleId(*sequence_) ? mpms::GetExampleId(*sequence_) : "example"; @@ -434,7 +434,7 @@ class PackMediaSequenceCalculator : public CalculatorBase { mpms::AddFeatureTimestamp(key, cc->InputTimestamp().Value(), sequence_.get()); mpms::AddFeatureInts(key, - cc->Inputs().Tag(tag).Get>(), + cc->Inputs().Tag(tag).Get>(), sequence_.get()); } if (absl::StartsWith(tag, kBytesFeaturePrefixTag) && diff --git a/mediapipe/calculators/tensorflow/pack_media_sequence_calculator_test.cc b/mediapipe/calculators/tensorflow/pack_media_sequence_calculator_test.cc index 4e74b06df..752db621e 100644 --- a/mediapipe/calculators/tensorflow/pack_media_sequence_calculator_test.cc +++ b/mediapipe/calculators/tensorflow/pack_media_sequence_calculator_test.cc @@ -227,11 +227,11 @@ TEST_F(PackMediaSequenceCalculatorTest, PacksTwoIntLists) { int num_timesteps = 2; for (int i = 0; i < num_timesteps; ++i) { - auto vi_ptr = ::absl::make_unique>(2, 2 << i); + auto vi_ptr = ::absl::make_unique>(2, 2 << i); runner_->MutableInputs() ->Tag(kIntFeatureTestTag) .packets.push_back(Adopt(vi_ptr.release()).At(Timestamp(i))); - vi_ptr = ::absl::make_unique>(2, 2 << i); + vi_ptr = ::absl::make_unique>(2, 2 << i); runner_->MutableInputs() ->Tag(kIntFeatureOtherTag) .packets.push_back(Adopt(vi_ptr.release()).At(Timestamp(i))); @@ -257,10 +257,10 @@ TEST_F(PackMediaSequenceCalculatorTest, PacksTwoIntLists) { for (int i = 0; i < num_timesteps; ++i) { ASSERT_EQ(i, mpms::GetFeatureTimestampAt("TEST", output_sequence, i)); ASSERT_THAT(mpms::GetFeatureIntsAt("TEST", output_sequence, i), - ::testing::ElementsAreArray(std::vector(2, 2 << i))); + ::testing::ElementsAreArray(std::vector(2, 2 << i))); ASSERT_EQ(i, mpms::GetFeatureTimestampAt("OTHER", output_sequence, i)); ASSERT_THAT(mpms::GetFeatureIntsAt("OTHER", output_sequence, i), - ::testing::ElementsAreArray(std::vector(2, 2 << i))); + ::testing::ElementsAreArray(std::vector(2, 2 << i))); } } diff --git a/mediapipe/calculators/tensorflow/tensor_squeeze_dimensions_calculator.cc b/mediapipe/calculators/tensorflow/tensor_squeeze_dimensions_calculator.cc index cbf494245..ad87297a9 100644 --- a/mediapipe/calculators/tensorflow/tensor_squeeze_dimensions_calculator.cc +++ b/mediapipe/calculators/tensorflow/tensor_squeeze_dimensions_calculator.cc @@ -47,7 +47,7 @@ class TensorSqueezeDimensionsCalculator : public CalculatorBase { << options_.DebugString(); if (options_.dim_size() > 0) { remove_dims_ = - std::vector(options_.dim().begin(), options_.dim().end()); + std::vector(options_.dim().begin(), options_.dim().end()); std::sort(remove_dims_.rbegin(), remove_dims_.rend()); remove_dims_initialized_ = true; } @@ -87,7 +87,7 @@ class TensorSqueezeDimensionsCalculator : public CalculatorBase { private: TensorSqueezeDimensionsCalculatorOptions options_; - std::vector remove_dims_; + std::vector remove_dims_; bool remove_dims_initialized_; void InitializeToRemoveAllSingletonDimensions( diff --git a/mediapipe/calculators/tensorflow/tensor_squeeze_dimensions_calculator_test.cc b/mediapipe/calculators/tensorflow/tensor_squeeze_dimensions_calculator_test.cc index e3b9f7233..83da407ba 100644 --- a/mediapipe/calculators/tensorflow/tensor_squeeze_dimensions_calculator_test.cc +++ b/mediapipe/calculators/tensorflow/tensor_squeeze_dimensions_calculator_test.cc @@ -32,7 +32,7 @@ class TensorSqueezeDimensionsCalculatorTest : public ::testing::Test { // Initialize tensor_ with deterministic values. tensor_shape_ = tf::TensorShape(std::vector({1, 3, 1, 3, 1})); tensor_ = tf::Tensor(tf::DT_INT32, tensor_shape_); - auto tensor_values = tensor_.tensor(); + auto tensor_values = tensor_.tensor(); for (int i = 0; i < 3; ++i) { for (int j = 0; j < 3; ++j) { tensor_values(0, i, 0, j, 0) = i * (j + 1); @@ -71,7 +71,7 @@ TEST_F(TensorSqueezeDimensionsCalculatorTest, CanSqueezeAllSingleDimensions) { const tf::Tensor& output_tensor = output_packets[0].Get(); const tf::TensorShape expected_shape(std::vector({3, 3})); EXPECT_EQ(expected_shape.DebugString(), output_tensor.shape().DebugString()); - const auto tensor_values = output_tensor.tensor(); + const auto tensor_values = output_tensor.tensor(); for (int i = 0; i < 3; ++i) { for (int j = 0; j < 3; ++j) { const int expected_value = i * (j + 1); @@ -107,7 +107,7 @@ TEST_F(TensorSqueezeDimensionsCalculatorTest, CanSqueezeSpecifiedDimensions) { const tf::Tensor& output_tensor = output_packets[0].Get(); const tf::TensorShape expected_shape(std::vector({3, 1, 3})); EXPECT_EQ(expected_shape.DebugString(), output_tensor.shape().DebugString()); - const auto tensor_values = output_tensor.tensor(); + const auto tensor_values = output_tensor.tensor(); for (int i = 0; i < 3; ++i) { for (int j = 0; j < 3; ++j) { const int expected_value = i * (j + 1); diff --git a/mediapipe/calculators/tensorflow/tensor_to_image_frame_calculator.cc b/mediapipe/calculators/tensorflow/tensor_to_image_frame_calculator.cc index 200a273f6..34e397b32 100644 --- a/mediapipe/calculators/tensorflow/tensor_to_image_frame_calculator.cc +++ b/mediapipe/calculators/tensorflow/tensor_to_image_frame_calculator.cc @@ -94,7 +94,7 @@ absl::Status TensorToImageFrameCalculator::Open(CalculatorContext* cc) { absl::Status TensorToImageFrameCalculator::Process(CalculatorContext* cc) { const tf::Tensor& input_tensor = cc->Inputs().Tag(kTensor).Get(); - int32 depth = 1; + int32_t depth = 1; if (input_tensor.dims() != 2) { // Depth is 1 for 2D tensors. CHECK(3 == input_tensor.dims()) << "Only 2 or 3-D Tensors can be converted to frames. Instead got: " @@ -104,10 +104,10 @@ absl::Status TensorToImageFrameCalculator::Process(CalculatorContext* cc) { RET_CHECK_EQ(depth, 3) << "Output tensor depth must be 3 or 1."; } } - int32 height = input_tensor.dim_size(0); - int32 width = input_tensor.dim_size(1); + int32_t height = input_tensor.dim_size(0); + int32_t width = input_tensor.dim_size(1); auto format = (depth == 3 ? ImageFormat::SRGB : ImageFormat::GRAY8); - const int32 total_size = height * width * depth; + const int32_t total_size = height * width * depth; ::std::unique_ptr output; if (input_tensor.dtype() == tensorflow::DT_FLOAT) { @@ -123,7 +123,7 @@ absl::Status TensorToImageFrameCalculator::Process(CalculatorContext* cc) { } output = ::absl::make_unique( format, width, height, width * depth, buffer.release(), - [total_size](uint8* ptr) { + [total_size](uint8_t* ptr) { ::operator delete[](ptr, total_size, std::align_val_t(EIGEN_MAX_ALIGN_BYTES)); }); @@ -139,7 +139,7 @@ absl::Status TensorToImageFrameCalculator::Process(CalculatorContext* cc) { auto copy = new tf::Tensor(input_tensor); output = ::absl::make_unique( format, width, height, width * depth, copy->flat().data(), - [copy](uint8*) { delete copy; }); + [copy](uint8_t*) { delete copy; }); } else { return absl::InvalidArgumentError( absl::StrCat("Expected float or uint8 tensor, received ", diff --git a/mediapipe/calculators/tensorflow/tensor_to_image_frame_calculator_test.cc b/mediapipe/calculators/tensorflow/tensor_to_image_frame_calculator_test.cc index 88e268907..aee9fee9b 100644 --- a/mediapipe/calculators/tensorflow/tensor_to_image_frame_calculator_test.cc +++ b/mediapipe/calculators/tensorflow/tensor_to_image_frame_calculator_test.cc @@ -64,7 +64,7 @@ TYPED_TEST(TensorToImageFrameCalculatorTest, Converts3DTensorToImageFrame) { tensor_vec[i] = i % 255; } - const int64 time = 1234; + const int64_t time = 1234; runner->MutableInputs()->Tag(kTensor).packets.push_back( Adopt(tensor.release()).At(Timestamp(time))); @@ -79,7 +79,7 @@ TYPED_TEST(TensorToImageFrameCalculatorTest, Converts3DTensorToImageFrame) { EXPECT_EQ(kHeight, output_image.Height()); for (int i = 0; i < kWidth * kHeight * 3; ++i) { - const uint8 pixel_value = output_image.PixelData()[i]; + const uint8_t pixel_value = output_image.PixelData()[i]; EXPECT_EQ(i % 255, pixel_value); } } @@ -100,7 +100,7 @@ TYPED_TEST(TensorToImageFrameCalculatorTest, Converts3DTensorToImageFrameGray) { tensor_vec[i] = i % 255; } - const int64 time = 1234; + const int64_t time = 1234; runner->MutableInputs()->Tag(kTensor).packets.push_back( Adopt(tensor.release()).At(Timestamp(time))); @@ -115,7 +115,7 @@ TYPED_TEST(TensorToImageFrameCalculatorTest, Converts3DTensorToImageFrameGray) { EXPECT_EQ(kHeight, output_image.Height()); for (int i = 0; i < kWidth * kHeight; ++i) { - const uint8 pixel_value = output_image.PixelData()[i]; + const uint8_t pixel_value = output_image.PixelData()[i]; EXPECT_EQ(i % 255, pixel_value); } } @@ -137,7 +137,7 @@ TYPED_TEST(TensorToImageFrameCalculatorTest, tensor_vec[i] = i % 255; } - const int64 time = 1234; + const int64_t time = 1234; runner->MutableInputs()->Tag(kTensor).packets.push_back( Adopt(tensor.release()).At(Timestamp(time))); @@ -152,7 +152,7 @@ TYPED_TEST(TensorToImageFrameCalculatorTest, EXPECT_EQ(kHeight, output_image.Height()); for (int i = 0; i < kWidth * kHeight; ++i) { - const uint8 pixel_value = output_image.PixelData()[i]; + const uint8_t pixel_value = output_image.PixelData()[i]; EXPECT_EQ(i % 255, pixel_value); } } diff --git a/mediapipe/calculators/tensorflow/tensor_to_matrix_calculator.cc b/mediapipe/calculators/tensorflow/tensor_to_matrix_calculator.cc index 85955c43b..081e0c83a 100644 --- a/mediapipe/calculators/tensorflow/tensor_to_matrix_calculator.cc +++ b/mediapipe/calculators/tensorflow/tensor_to_matrix_calculator.cc @@ -193,8 +193,9 @@ absl::Status TensorToMatrixCalculator::Process(CalculatorContext* cc) { const tf::Tensor& input_tensor = cc->Inputs().Tag(kTensor).Get(); CHECK(1 == input_tensor.dims() || 2 == input_tensor.dims()) << "Only 1-D or 2-D Tensors can be converted to matrices."; - const int32 length = input_tensor.dim_size(input_tensor.dims() - 1); - const int32 width = (1 == input_tensor.dims()) ? 1 : input_tensor.dim_size(0); + const int32_t length = input_tensor.dim_size(input_tensor.dims() - 1); + const int32_t width = + (1 == input_tensor.dims()) ? 1 : input_tensor.dim_size(0); if (header_.has_num_channels()) { RET_CHECK_EQ(length, header_.num_channels()) << "The number of channels at runtime does not match the header."; diff --git a/mediapipe/calculators/tensorflow/tensor_to_matrix_calculator_test.cc b/mediapipe/calculators/tensorflow/tensor_to_matrix_calculator_test.cc index 67ba5e90a..dbff0c6a8 100644 --- a/mediapipe/calculators/tensorflow/tensor_to_matrix_calculator_test.cc +++ b/mediapipe/calculators/tensorflow/tensor_to_matrix_calculator_test.cc @@ -88,7 +88,7 @@ TEST_F(TensorToMatrixCalculatorTest, Converts1DTensorToMatrix) { tensor_vec(i) = static_cast(1 << i); } - const int64 time = 1234; + const int64_t time = 1234; runner_->MutableInputs()->Tag(kTensor).packets.push_back( Adopt(tensor.release()).At(Timestamp(time))); @@ -114,7 +114,7 @@ TEST_F(TensorToMatrixCalculatorTest, Converts2DTensorofWidthOneToMatrix) { for (int i = 0; i < 4; ++i) { slice(i) = static_cast(1 << i); } - const int64 time = 1234; + const int64_t time = 1234; runner_->MutableInputs()->Tag(kTensor).packets.push_back( Adopt(tensor.release()).At(Timestamp(time))); @@ -143,7 +143,7 @@ TEST_F(TensorToMatrixCalculatorTest, Converts2DTensorToMatrix) { slice(i * 4 + j) = static_cast(i * j); } } - const int64 time = 1234; + const int64_t time = 1234; runner_->MutableInputs()->Tag(kTensor).packets.push_back( Adopt(tensor.release()).At(Timestamp(time))); @@ -174,7 +174,7 @@ TEST_F(TensorToMatrixCalculatorTest, ConvertsWithReferenceTimeSeriesHeader) { tensor_vec(i) = static_cast(1 << i); } - const int64 time = 1234; + const int64_t time = 1234; runner_->MutableInputs()->Tag(kTensor).packets.push_back( Adopt(tensor.release()).At(Timestamp(time))); @@ -206,7 +206,7 @@ TEST_F(TensorToMatrixCalculatorTest, TimeSeriesOverridesWork) { tensor_vec(i) = static_cast(1 << i); } - const int64 time = 1234; + const int64_t time = 1234; runner_->MutableInputs()->Tag(kTensor).packets.push_back( Adopt(tensor.release()).At(Timestamp(time))); diff --git a/mediapipe/calculators/tensorflow/tensor_to_vector_float_calculator_test.cc b/mediapipe/calculators/tensorflow/tensor_to_vector_float_calculator_test.cc index 98ba4f020..c4bc819e5 100644 --- a/mediapipe/calculators/tensorflow/tensor_to_vector_float_calculator_test.cc +++ b/mediapipe/calculators/tensorflow/tensor_to_vector_float_calculator_test.cc @@ -53,7 +53,7 @@ TEST_F(TensorToVectorFloatCalculatorTest, ConvertsToVectorFloat) { tensor_vec(i) = static_cast(1 << i); } - const int64 time = 1234; + const int64_t time = 1234; runner_->MutableInputs()->Index(0).packets.push_back( Adopt(tensor.release()).At(Timestamp(time))); @@ -82,7 +82,7 @@ TEST_F(TensorToVectorFloatCalculatorTest, ConvertsBatchedToVectorVectorFloat) { slice(i) = static_cast(1 << i); } - const int64 time = 1234; + const int64_t time = 1234; runner_->MutableInputs()->Index(0).packets.push_back( Adopt(tensor.release()).At(Timestamp(time))); @@ -112,7 +112,7 @@ TEST_F(TensorToVectorFloatCalculatorTest, FlattenShouldTakeAllDimensions) { slice(i) = static_cast(1 << i); } - const int64 time = 1234; + const int64_t time = 1234; runner_->MutableInputs()->Index(0).packets.push_back( Adopt(tensor.release()).At(Timestamp(time))); diff --git a/mediapipe/calculators/tensorflow/tensor_to_vector_int_calculator.cc b/mediapipe/calculators/tensorflow/tensor_to_vector_int_calculator.cc index f92ddf08d..7adb26daa 100644 --- a/mediapipe/calculators/tensorflow/tensor_to_vector_int_calculator.cc +++ b/mediapipe/calculators/tensorflow/tensor_to_vector_int_calculator.cc @@ -36,8 +36,8 @@ class TensorToVectorIntCalculator : public CalculatorBase { absl::Status Process(CalculatorContext* cc) override; private: - void TokenizeVector(std::vector* vector) const; - void RemoveOverlapVector(std::vector* vector); + void TokenizeVector(std::vector* vector) const; + void RemoveOverlapVector(std::vector* vector); TensorToVectorIntCalculatorOptions options_; int32_t overlapping_values_; @@ -56,10 +56,10 @@ absl::Status TensorToVectorIntCalculator::GetContract(CalculatorContract* cc) { const auto& options = cc->Options(); if (options.tensor_is_2d()) { RET_CHECK(!options.flatten_nd()); - cc->Outputs().Index(0).Set>>( + cc->Outputs().Index(0).Set>>( /* "Output vector>." */); } else { - cc->Outputs().Index(0).Set>( + cc->Outputs().Index(0).Set>( // Output vector. ); } @@ -91,19 +91,20 @@ absl::Status TensorToVectorIntCalculator::Process(CalculatorContext* cc) { RET_CHECK(2 == input_tensor.dims()) << "Expected 2-dimensional Tensor, but the tensor shape is: " << input_tensor.shape().DebugString(); - auto output = absl::make_unique>>( - input_tensor.dim_size(0), std::vector(input_tensor.dim_size(1))); + auto output = absl::make_unique>>( + input_tensor.dim_size(0), + std::vector(input_tensor.dim_size(1))); for (int i = 0; i < input_tensor.dim_size(0); ++i) { auto& instance_output = output->at(i); if (tf::DT_INT32 == input_tensor.dtype()) { const auto& slice = - input_tensor.Slice(i, i + 1).unaligned_flat(); + input_tensor.Slice(i, i + 1).unaligned_flat(); for (int j = 0; j < input_tensor.dim_size(1); ++j) { instance_output.at(j) = slice(j); } } else { const auto& slice = - input_tensor.Slice(i, i + 1).unaligned_flat(); + input_tensor.Slice(i, i + 1).unaligned_flat(); for (int j = 0; j < input_tensor.dim_size(1); ++j) { instance_output.at(j) = slice(j); } @@ -119,14 +120,14 @@ absl::Status TensorToVectorIntCalculator::Process(CalculatorContext* cc) { << "tensor shape is: " << input_tensor.shape().DebugString(); } auto output = - absl::make_unique>(input_tensor.NumElements()); + absl::make_unique>(input_tensor.NumElements()); if (tf::DT_INT32 == input_tensor.dtype()) { - const auto& tensor_values = input_tensor.flat(); + const auto& tensor_values = input_tensor.flat(); for (int i = 0; i < input_tensor.NumElements(); ++i) { output->at(i) = tensor_values(i); } } else { - const auto& tensor_values = input_tensor.flat(); + const auto& tensor_values = input_tensor.flat(); for (int i = 0; i < input_tensor.NumElements(); ++i) { output->at(i) = tensor_values(i); } @@ -140,7 +141,7 @@ absl::Status TensorToVectorIntCalculator::Process(CalculatorContext* cc) { } void TensorToVectorIntCalculator::RemoveOverlapVector( - std::vector* vector) { + std::vector* vector) { if (options_.overlap() <= 0) { return; } @@ -155,11 +156,11 @@ void TensorToVectorIntCalculator::RemoveOverlapVector( } void TensorToVectorIntCalculator::TokenizeVector( - std::vector* vector) const { + std::vector* vector) const { if (!options_.tensor_is_token()) { return; } - std::vector tokens; + std::vector tokens; for (int i = 0; i < vector->size(); ++i) { if (vector->at(i) > options_.token_threshold()) { tokens.push_back(i + 1); diff --git a/mediapipe/calculators/tensorflow/tensor_to_vector_string_calculator_test.cc b/mediapipe/calculators/tensorflow/tensor_to_vector_string_calculator_test.cc index 94dd9374d..dabaf8fb4 100644 --- a/mediapipe/calculators/tensorflow/tensor_to_vector_string_calculator_test.cc +++ b/mediapipe/calculators/tensorflow/tensor_to_vector_string_calculator_test.cc @@ -51,7 +51,7 @@ TEST_F(TensorToVectorStringCalculatorTest, ConvertsToVectorFloat) { tensor_vec(i) = absl::StrCat("foo", i); } - const int64 time = 1234; + const int64_t time = 1234; runner_->MutableInputs()->Index(0).packets.push_back( Adopt(tensor.release()).At(Timestamp(time))); @@ -79,7 +79,7 @@ TEST_F(TensorToVectorStringCalculatorTest, ConvertsBatchedToVectorVectorFloat) { slice(i) = absl::StrCat("foo", i); } - const int64 time = 1234; + const int64_t time = 1234; runner_->MutableInputs()->Index(0).packets.push_back( Adopt(tensor.release()).At(Timestamp(time))); @@ -108,7 +108,7 @@ TEST_F(TensorToVectorStringCalculatorTest, FlattenShouldTakeAllDimensions) { slice(i) = absl::StrCat("foo", i); } - const int64 time = 1234; + const int64_t time = 1234; runner_->MutableInputs()->Index(0).packets.push_back( Adopt(tensor.release()).At(Timestamp(time))); diff --git a/mediapipe/calculators/tensorflow/tensorflow_inference_calculator_test.cc b/mediapipe/calculators/tensorflow/tensorflow_inference_calculator_test.cc index 3598f09cc..c93008373 100644 --- a/mediapipe/calculators/tensorflow/tensorflow_inference_calculator_test.cc +++ b/mediapipe/calculators/tensorflow/tensorflow_inference_calculator_test.cc @@ -98,19 +98,19 @@ class TensorflowInferenceCalculatorTest : public ::testing::Test { output_side_packets.Tag(kSessionTag); } - Packet CreateTensorPacket(const std::vector& input, int64 time) { + Packet CreateTensorPacket(const std::vector& input, int64_t time) { tf::TensorShape tensor_shape; tensor_shape.AddDim(input.size()); auto tensor = absl::make_unique(tf::DT_INT32, tensor_shape); for (int i = 0; i < input.size(); ++i) { - tensor->vec()(i) = input[i]; + tensor->vec()(i) = input[i]; } return Adopt(tensor.release()).At(Timestamp(time)); } // Create tensor from Vector and add as a Packet to the provided tag as input. - void AddVectorToInputsAsTensor(const std::vector& input, - const std::string& tag, int64 time) { + void AddVectorToInputsAsTensor(const std::vector& input, + const std::string& tag, int64_t time) { runner_->MutableInputs()->Tag(tag).packets.push_back( CreateTensorPacket(input, time)); } @@ -152,15 +152,15 @@ TEST_F(TensorflowInferenceCalculatorTest, GetConstants) { ASSERT_EQ(output_packets_b.size(), 1); const tf::Tensor& tensor_b = output_packets_b[0].Get(); tf::TensorShape expected_shape({1, 3}); - auto expected_tensor = tf::test::AsTensor({3, 2, 1}, expected_shape); - tf::test::ExpectTensorEqual(expected_tensor, tensor_b); + auto expected_tensor = tf::test::AsTensor({3, 2, 1}, expected_shape); + tf::test::ExpectTensorEqual(expected_tensor, tensor_b); const std::vector& output_packets_mult = runner_->Outputs().Tag(kMultipliedTag).packets; ASSERT_EQ(1, output_packets_mult.size()); const tf::Tensor& tensor_mult = output_packets_mult[0].Get(); - expected_tensor = tf::test::AsTensor({0, 0, 0}, expected_shape); - tf::test::ExpectTensorEqual(expected_tensor, tensor_mult); + expected_tensor = tf::test::AsTensor({0, 0, 0}, expected_shape); + tf::test::ExpectTensorEqual(expected_tensor, tensor_mult); EXPECT_EQ(1, runner_ ->GetCounter( @@ -193,8 +193,9 @@ TEST_F(TensorflowInferenceCalculatorTest, GetComputed) { ASSERT_EQ(1, output_packets_mult.size()); const tf::Tensor& tensor_mult = output_packets_mult[0].Get(); tf::TensorShape expected_shape({3}); - auto expected_tensor = tf::test::AsTensor({6, 8, 10}, expected_shape); - tf::test::ExpectTensorEqual(expected_tensor, tensor_mult); + auto expected_tensor = + tf::test::AsTensor({6, 8, 10}, expected_shape); + tf::test::ExpectTensorEqual(expected_tensor, tensor_mult); // Add only one of the two expected tensors at the next timestamp, expect // useful failure message. @@ -232,8 +233,9 @@ TEST_F(TensorflowInferenceCalculatorTest, GetComputed_MaxInFlight) { ASSERT_EQ(1, output_packets_mult.size()); const tf::Tensor& tensor_mult = output_packets_mult[0].Get(); tf::TensorShape expected_shape({3}); - auto expected_tensor = tf::test::AsTensor({6, 8, 10}, expected_shape); - tf::test::ExpectTensorEqual(expected_tensor, tensor_mult); + auto expected_tensor = + tf::test::AsTensor({6, 8, 10}, expected_shape); + tf::test::ExpectTensorEqual(expected_tensor, tensor_mult); // Add only one of the two expected tensors at the next timestamp, expect // useful failure message. @@ -290,11 +292,11 @@ TEST_F(TensorflowInferenceCalculatorTest, GetMultiBatchComputed) { runner_->Outputs().Tag(kMultipliedTag).packets; ASSERT_EQ(2, output_packets_mult.size()); const tf::Tensor& tensor_mult = output_packets_mult[0].Get(); - auto expected_tensor = tf::test::AsTensor({6, 8, 10}); - tf::test::ExpectTensorEqual(tensor_mult, expected_tensor); + auto expected_tensor = tf::test::AsTensor({6, 8, 10}); + tf::test::ExpectTensorEqual(tensor_mult, expected_tensor); const tf::Tensor& tensor_mult1 = output_packets_mult[1].Get(); - auto expected_tensor1 = tf::test::AsTensor({9, 12, 15}); - tf::test::ExpectTensorEqual(tensor_mult1, expected_tensor1); + auto expected_tensor1 = tf::test::AsTensor({9, 12, 15}); + tf::test::ExpectTensorEqual(tensor_mult1, expected_tensor1); EXPECT_EQ(2, runner_ ->GetCounter( @@ -327,11 +329,11 @@ TEST_F(TensorflowInferenceCalculatorTest, GetMultiBatchComputed_MaxInFlight) { runner_->Outputs().Tag(kMultipliedTag).packets; ASSERT_EQ(2, output_packets_mult.size()); const tf::Tensor& tensor_mult = output_packets_mult[0].Get(); - auto expected_tensor = tf::test::AsTensor({6, 8, 10}); - tf::test::ExpectTensorEqual(tensor_mult, expected_tensor); + auto expected_tensor = tf::test::AsTensor({6, 8, 10}); + tf::test::ExpectTensorEqual(tensor_mult, expected_tensor); const tf::Tensor& tensor_mult1 = output_packets_mult[1].Get(); - auto expected_tensor1 = tf::test::AsTensor({9, 12, 15}); - tf::test::ExpectTensorEqual(tensor_mult1, expected_tensor1); + auto expected_tensor1 = tf::test::AsTensor({9, 12, 15}); + tf::test::ExpectTensorEqual(tensor_mult1, expected_tensor1); EXPECT_EQ(2, runner_ ->GetCounter( @@ -367,14 +369,14 @@ TEST_F(TensorflowInferenceCalculatorTest, runner_->Outputs().Tag(kMultipliedTag).packets; ASSERT_EQ(3, output_packets_mult.size()); const tf::Tensor& tensor_mult = output_packets_mult[0].Get(); - auto expected_tensor = tf::test::AsTensor({6, 8, 10}); - tf::test::ExpectTensorEqual(tensor_mult, expected_tensor); + auto expected_tensor = tf::test::AsTensor({6, 8, 10}); + tf::test::ExpectTensorEqual(tensor_mult, expected_tensor); const tf::Tensor& tensor_mult1 = output_packets_mult[1].Get(); - auto expected_tensor1 = tf::test::AsTensor({9, 12, 15}); - tf::test::ExpectTensorEqual(tensor_mult1, expected_tensor1); + auto expected_tensor1 = tf::test::AsTensor({9, 12, 15}); + tf::test::ExpectTensorEqual(tensor_mult1, expected_tensor1); const tf::Tensor& tensor_mult2 = output_packets_mult[2].Get(); - auto expected_tensor2 = tf::test::AsTensor({12, 16, 20}); - tf::test::ExpectTensorEqual(tensor_mult2, expected_tensor2); + auto expected_tensor2 = tf::test::AsTensor({12, 16, 20}); + tf::test::ExpectTensorEqual(tensor_mult2, expected_tensor2); EXPECT_EQ(3, runner_ ->GetCounter( @@ -408,11 +410,11 @@ TEST_F(TensorflowInferenceCalculatorTest, GetSingleBatchComputed) { runner_->Outputs().Tag(kMultipliedTag).packets; ASSERT_EQ(2, output_packets_mult.size()); const tf::Tensor& tensor_mult = output_packets_mult[0].Get(); - auto expected_tensor = tf::test::AsTensor({6, 8, 10}); - tf::test::ExpectTensorEqual(tensor_mult, expected_tensor); + auto expected_tensor = tf::test::AsTensor({6, 8, 10}); + tf::test::ExpectTensorEqual(tensor_mult, expected_tensor); const tf::Tensor& tensor_mult1 = output_packets_mult[1].Get(); - auto expected_tensor1 = tf::test::AsTensor({9, 12, 15}); - tf::test::ExpectTensorEqual(tensor_mult1, expected_tensor1); + auto expected_tensor1 = tf::test::AsTensor({9, 12, 15}); + tf::test::ExpectTensorEqual(tensor_mult1, expected_tensor1); EXPECT_EQ(2, runner_ ->GetCounter( @@ -446,11 +448,11 @@ TEST_F(TensorflowInferenceCalculatorTest, GetCloseBatchComputed) { runner_->Outputs().Tag(kMultipliedTag).packets; ASSERT_EQ(2, output_packets_mult.size()); const tf::Tensor& tensor_mult = output_packets_mult[0].Get(); - auto expected_tensor = tf::test::AsTensor({6, 8, 10}); - tf::test::ExpectTensorEqual(tensor_mult, expected_tensor); + auto expected_tensor = tf::test::AsTensor({6, 8, 10}); + tf::test::ExpectTensorEqual(tensor_mult, expected_tensor); const tf::Tensor& tensor_mult1 = output_packets_mult[1].Get(); - auto expected_tensor1 = tf::test::AsTensor({9, 12, 15}); - tf::test::ExpectTensorEqual(tensor_mult1, expected_tensor1); + auto expected_tensor1 = tf::test::AsTensor({9, 12, 15}); + tf::test::ExpectTensorEqual(tensor_mult1, expected_tensor1); EXPECT_EQ(2, runner_ ->GetCounter( @@ -486,11 +488,11 @@ TEST_F(TensorflowInferenceCalculatorTest, GetCloseBatchComputedNoPadding) { runner_->Outputs().Tag(kMultipliedTag).packets; ASSERT_EQ(2, output_packets_mult.size()); const tf::Tensor& tensor_mult = output_packets_mult[0].Get(); - auto expected_tensor = tf::test::AsTensor({6, 8, 10}); - tf::test::ExpectTensorEqual(tensor_mult, expected_tensor); + auto expected_tensor = tf::test::AsTensor({6, 8, 10}); + tf::test::ExpectTensorEqual(tensor_mult, expected_tensor); const tf::Tensor& tensor_mult1 = output_packets_mult[1].Get(); - auto expected_tensor1 = tf::test::AsTensor({9, 12, 15}); - tf::test::ExpectTensorEqual(tensor_mult1, expected_tensor1); + auto expected_tensor1 = tf::test::AsTensor({9, 12, 15}); + tf::test::ExpectTensorEqual(tensor_mult1, expected_tensor1); EXPECT_EQ(2, runner_ ->GetCounter( @@ -537,20 +539,20 @@ TEST_F(TensorflowInferenceCalculatorTest, GetBatchComputed_MaxInFlight) { runner_->Outputs().Tag(kMultipliedTag).packets; ASSERT_EQ(5, output_packets_mult.size()); const tf::Tensor& tensor_mult = output_packets_mult[0].Get(); - auto expected_tensor = tf::test::AsTensor({6, 8, 10}); - tf::test::ExpectTensorEqual(tensor_mult, expected_tensor); + auto expected_tensor = tf::test::AsTensor({6, 8, 10}); + tf::test::ExpectTensorEqual(tensor_mult, expected_tensor); const tf::Tensor& tensor_mult1 = output_packets_mult[1].Get(); - auto expected_tensor1 = tf::test::AsTensor({9, 12, 15}); - tf::test::ExpectTensorEqual(tensor_mult1, expected_tensor1); + auto expected_tensor1 = tf::test::AsTensor({9, 12, 15}); + tf::test::ExpectTensorEqual(tensor_mult1, expected_tensor1); const tf::Tensor& tensor_mult2 = output_packets_mult[2].Get(); - auto expected_tensor2 = tf::test::AsTensor({12, 16, 20}); - tf::test::ExpectTensorEqual(tensor_mult2, expected_tensor2); + auto expected_tensor2 = tf::test::AsTensor({12, 16, 20}); + tf::test::ExpectTensorEqual(tensor_mult2, expected_tensor2); const tf::Tensor& tensor_mult3 = output_packets_mult[3].Get(); - auto expected_tensor3 = tf::test::AsTensor({15, 20, 25}); - tf::test::ExpectTensorEqual(tensor_mult3, expected_tensor3); + auto expected_tensor3 = tf::test::AsTensor({15, 20, 25}); + tf::test::ExpectTensorEqual(tensor_mult3, expected_tensor3); const tf::Tensor& tensor_mult4 = output_packets_mult[4].Get(); - auto expected_tensor4 = tf::test::AsTensor({18, 24, 30}); - tf::test::ExpectTensorEqual(tensor_mult4, expected_tensor4); + auto expected_tensor4 = tf::test::AsTensor({18, 24, 30}); + tf::test::ExpectTensorEqual(tensor_mult4, expected_tensor4); EXPECT_EQ(5, runner_ ->GetCounter( @@ -585,12 +587,12 @@ TEST_F(TensorflowInferenceCalculatorTest, TestRecurrentStates) { ASSERT_EQ(2, output_packets_mult.size()); const tf::Tensor& tensor_mult = output_packets_mult[0].Get(); LOG(INFO) << "timestamp: " << 0; - auto expected_tensor = tf::test::AsTensor({3, 8, 15}); - tf::test::ExpectTensorEqual(tensor_mult, expected_tensor); + auto expected_tensor = tf::test::AsTensor({3, 8, 15}); + tf::test::ExpectTensorEqual(tensor_mult, expected_tensor); const tf::Tensor& tensor_mult1 = output_packets_mult[1].Get(); - auto expected_tensor1 = tf::test::AsTensor({9, 32, 75}); + auto expected_tensor1 = tf::test::AsTensor({9, 32, 75}); LOG(INFO) << "timestamp: " << 1; - tf::test::ExpectTensorEqual(tensor_mult1, expected_tensor1); + tf::test::ExpectTensorEqual(tensor_mult1, expected_tensor1); EXPECT_EQ(2, runner_ ->GetCounter( @@ -626,12 +628,12 @@ TEST_F(TensorflowInferenceCalculatorTest, TestRecurrentStateOverride) { ASSERT_EQ(2, output_packets_mult.size()); const tf::Tensor& tensor_mult = output_packets_mult[0].Get(); LOG(INFO) << "timestamp: " << 0; - auto expected_tensor = tf::test::AsTensor({3, 4, 5}); - tf::test::ExpectTensorEqual(tensor_mult, expected_tensor); + auto expected_tensor = tf::test::AsTensor({3, 4, 5}); + tf::test::ExpectTensorEqual(tensor_mult, expected_tensor); const tf::Tensor& tensor_mult1 = output_packets_mult[1].Get(); - auto expected_tensor1 = tf::test::AsTensor({3, 4, 5}); + auto expected_tensor1 = tf::test::AsTensor({3, 4, 5}); LOG(INFO) << "timestamp: " << 1; - tf::test::ExpectTensorEqual(tensor_mult1, expected_tensor1); + tf::test::ExpectTensorEqual(tensor_mult1, expected_tensor1); EXPECT_EQ(2, runner_ ->GetCounter( @@ -747,8 +749,8 @@ TEST_F(TensorflowInferenceCalculatorTest, runner_->Outputs().Tag(kMultipliedTag).packets; ASSERT_EQ(1, output_packets_mult.size()); const tf::Tensor& tensor_mult = output_packets_mult[0].Get(); - auto expected_tensor = tf::test::AsTensor({9, 12, 15}); - tf::test::ExpectTensorEqual(tensor_mult, expected_tensor); + auto expected_tensor = tf::test::AsTensor({9, 12, 15}); + tf::test::ExpectTensorEqual(tensor_mult, expected_tensor); EXPECT_EQ(1, runner_ ->GetCounter( diff --git a/mediapipe/calculators/tensorflow/tensorflow_session_from_frozen_graph_calculator.cc b/mediapipe/calculators/tensorflow/tensorflow_session_from_frozen_graph_calculator.cc index 1d6b9417b..1bb2c41fc 100644 --- a/mediapipe/calculators/tensorflow/tensorflow_session_from_frozen_graph_calculator.cc +++ b/mediapipe/calculators/tensorflow/tensorflow_session_from_frozen_graph_calculator.cc @@ -102,7 +102,7 @@ class TensorFlowSessionFromFrozenGraphCalculator : public CalculatorBase { absl::Status Open(CalculatorContext* cc) override { auto clock = std::unique_ptr( mediapipe::MonotonicClock::CreateSynchronizedMonotonicClock()); - const uint64 start_time = absl::ToUnixMicros(clock->TimeNow()); + const uint64_t start_time = absl::ToUnixMicros(clock->TimeNow()); const auto& options = cc->Options(); // Output bundle packet. @@ -155,7 +155,7 @@ class TensorFlowSessionFromFrozenGraphCalculator : public CalculatorBase { } cc->OutputSidePackets().Tag(kSessionTag).Set(Adopt(session.release())); - const uint64 end_time = absl::ToUnixMicros(clock->TimeNow()); + const uint64_t end_time = absl::ToUnixMicros(clock->TimeNow()); LOG(INFO) << "Loaded frozen model in: " << end_time - start_time << " microseconds."; return absl::OkStatus(); diff --git a/mediapipe/calculators/tensorflow/tensorflow_session_from_frozen_graph_calculator_test.cc b/mediapipe/calculators/tensorflow/tensorflow_session_from_frozen_graph_calculator_test.cc index f0f8928db..a0dbef491 100644 --- a/mediapipe/calculators/tensorflow/tensorflow_session_from_frozen_graph_calculator_test.cc +++ b/mediapipe/calculators/tensorflow/tensorflow_session_from_frozen_graph_calculator_test.cc @@ -51,7 +51,7 @@ std::string GetGraphDefPath() { tf::Tensor TensorMatrix1x3(const int v1, const int v2, const int v3) { tf::Tensor tensor(tf::DT_INT32, tf::TensorShape(std::vector({1, 3}))); - auto matrix = tensor.matrix(); + auto matrix = tensor.matrix(); matrix(0, 0) = v1; matrix(0, 1) = v2; matrix(0, 2) = v3; diff --git a/mediapipe/calculators/tensorflow/tensorflow_session_from_frozen_graph_generator.cc b/mediapipe/calculators/tensorflow/tensorflow_session_from_frozen_graph_generator.cc index d194564a6..dc39458da 100644 --- a/mediapipe/calculators/tensorflow/tensorflow_session_from_frozen_graph_generator.cc +++ b/mediapipe/calculators/tensorflow/tensorflow_session_from_frozen_graph_generator.cc @@ -101,7 +101,7 @@ class TensorFlowSessionFromFrozenGraphGenerator : public PacketGenerator { const PacketSet& input_side_packets, PacketSet* output_side_packets) { auto clock = std::unique_ptr( mediapipe::MonotonicClock::CreateSynchronizedMonotonicClock()); - const uint64 start_time = absl::ToUnixMicros(clock->TimeNow()); + const uint64_t start_time = absl::ToUnixMicros(clock->TimeNow()); const TensorFlowSessionFromFrozenGraphGeneratorOptions& options = packet_generator_options.GetExtension( TensorFlowSessionFromFrozenGraphGeneratorOptions::ext); @@ -154,7 +154,7 @@ class TensorFlowSessionFromFrozenGraphGenerator : public PacketGenerator { } output_side_packets->Tag(kSessionTag) = Adopt(session.release()); - const uint64 end_time = absl::ToUnixMicros(clock->TimeNow()); + const uint64_t end_time = absl::ToUnixMicros(clock->TimeNow()); LOG(INFO) << "Loaded frozen model in: " << end_time - start_time << " microseconds."; return absl::OkStatus(); diff --git a/mediapipe/calculators/tensorflow/tensorflow_session_from_frozen_graph_generator_test.cc b/mediapipe/calculators/tensorflow/tensorflow_session_from_frozen_graph_generator_test.cc index 83f947a0c..ede321372 100644 --- a/mediapipe/calculators/tensorflow/tensorflow_session_from_frozen_graph_generator_test.cc +++ b/mediapipe/calculators/tensorflow/tensorflow_session_from_frozen_graph_generator_test.cc @@ -51,7 +51,7 @@ std::string GetGraphDefPath() { tf::Tensor TensorMatrix1x3(const int v1, const int v2, const int v3) { tf::Tensor tensor(tf::DT_INT32, tf::TensorShape(std::vector({1, 3}))); - auto matrix = tensor.matrix(); + auto matrix = tensor.matrix(); matrix(0, 0) = v1; matrix(0, 1) = v2; matrix(0, 2) = v3; diff --git a/mediapipe/calculators/tensorflow/tensorflow_session_from_saved_model_calculator_test.cc b/mediapipe/calculators/tensorflow/tensorflow_session_from_saved_model_calculator_test.cc index e1809a017..5d5766251 100644 --- a/mediapipe/calculators/tensorflow/tensorflow_session_from_saved_model_calculator_test.cc +++ b/mediapipe/calculators/tensorflow/tensorflow_session_from_saved_model_calculator_test.cc @@ -49,7 +49,7 @@ std::string GetSavedModelDir() { tf::Tensor TensorMatrix1x3(const int v1, const int v2, const int v3) { tf::Tensor tensor(tf::DT_INT32, tf::TensorShape(std::vector({1, 3}))); - auto matrix = tensor.matrix(); + auto matrix = tensor.matrix(); matrix(0, 0) = v1; matrix(0, 1) = v2; matrix(0, 2) = v3; diff --git a/mediapipe/calculators/tensorflow/tensorflow_session_from_saved_model_generator_test.cc b/mediapipe/calculators/tensorflow/tensorflow_session_from_saved_model_generator_test.cc index c002b1bde..e24d454a8 100644 --- a/mediapipe/calculators/tensorflow/tensorflow_session_from_saved_model_generator_test.cc +++ b/mediapipe/calculators/tensorflow/tensorflow_session_from_saved_model_generator_test.cc @@ -50,7 +50,7 @@ std::string GetSavedModelDir() { tf::Tensor TensorMatrix1x3(const int v1, const int v2, const int v3) { tf::Tensor tensor(tf::DT_INT32, tf::TensorShape(std::vector({1, 3}))); - auto matrix = tensor.matrix(); + auto matrix = tensor.matrix(); matrix(0, 0) = v1; matrix(0, 1) = v2; matrix(0, 2) = v3; diff --git a/mediapipe/calculators/tensorflow/unpack_media_sequence_calculator_test.cc b/mediapipe/calculators/tensorflow/unpack_media_sequence_calculator_test.cc index fbf775403..b7a7d4496 100644 --- a/mediapipe/calculators/tensorflow/unpack_media_sequence_calculator_test.cc +++ b/mediapipe/calculators/tensorflow/unpack_media_sequence_calculator_test.cc @@ -101,10 +101,10 @@ class UnpackMediaSequenceCalculatorTest : public ::testing::Test { std::unique_ptr runner_; const std::string video_id_ = "test_video_id"; const std::string data_path_ = "test_directory"; - const int64 start_time_ = 3000000; - const int64 end_time_ = 5000000; + const int64_t start_time_ = 3000000; + const int64_t end_time_ = 5000000; const std::string encoded_video_data_ = "encoded_video_data"; - const int64 encoded_video_start_timestamp_ = 1000000; + const int64_t encoded_video_start_timestamp_ = 1000000; const double image_frame_rate_ = 1.0; }; @@ -220,7 +220,7 @@ TEST_F(UnpackMediaSequenceCalculatorTest, UnpacksOneForwardFlowImage) { for (int i = 0; i < num_forward_flow_images; ++i) { const std::string& output_image = output_packets[i].Get(); ASSERT_EQ(output_image, test_image_string); - ASSERT_EQ(output_packets[i].Timestamp().Value(), static_cast(i)); + ASSERT_EQ(output_packets[i].Timestamp().Value(), static_cast(i)); } } @@ -249,7 +249,7 @@ TEST_F(UnpackMediaSequenceCalculatorTest, UnpacksTwoForwardFlowImages) { for (int i = 0; i < num_forward_flow_images; ++i) { const std::string& output_image = output_packets[i].Get(); ASSERT_EQ(output_image, test_image_strings[i]); - ASSERT_EQ(output_packets[i].Timestamp().Value(), static_cast(i)); + ASSERT_EQ(output_packets[i].Timestamp().Value(), static_cast(i)); } } diff --git a/mediapipe/calculators/tensorflow/vector_float_to_tensor_calculator.cc b/mediapipe/calculators/tensorflow/vector_float_to_tensor_calculator.cc index 96208b3e5..28184a8ca 100644 --- a/mediapipe/calculators/tensorflow/vector_float_to_tensor_calculator.cc +++ b/mediapipe/calculators/tensorflow/vector_float_to_tensor_calculator.cc @@ -90,9 +90,9 @@ absl::Status VectorFloatToTensorCalculator::Process(CalculatorContext* cc) { const std::vector>& input = cc->Inputs().Index(0).Value().Get>>(); - const int32 rows = input.size(); + const int32_t rows = input.size(); RET_CHECK_GE(rows, 1); - const int32 cols = input[0].size(); + const int32_t cols = input[0].size(); RET_CHECK_GE(cols, 1); for (int i = 1; i < rows; ++i) { RET_CHECK_EQ(input[i].size(), cols); @@ -117,7 +117,7 @@ absl::Status VectorFloatToTensorCalculator::Process(CalculatorContext* cc) { const std::vector& input = cc->Inputs().Index(0).Value().Get>(); RET_CHECK_GE(input.size(), 1); - const int32 length = input.size(); + const int32_t length = input.size(); tensor_shape = tf::TensorShape({length}); auto output = ::absl::make_unique(tf::DT_FLOAT, tensor_shape); for (int i = 0; i < length; ++i) { diff --git a/mediapipe/calculators/tensorflow/vector_int_to_tensor_calculator.cc b/mediapipe/calculators/tensorflow/vector_int_to_tensor_calculator.cc index f5bf7661e..cb90276ae 100644 --- a/mediapipe/calculators/tensorflow/vector_int_to_tensor_calculator.cc +++ b/mediapipe/calculators/tensorflow/vector_int_to_tensor_calculator.cc @@ -112,9 +112,9 @@ absl::Status VectorIntToTensorCalculator::Process(CalculatorContext* cc) { .Value() .Get>>(); - const int32 rows = input.size(); + const int32_t rows = input.size(); CHECK_GE(rows, 1); - const int32 cols = input[0].size(); + const int32_t cols = input[0].size(); CHECK_GE(cols, 1); for (int i = 1; i < rows; ++i) { CHECK_EQ(input[i].size(), cols); @@ -134,7 +134,7 @@ absl::Status VectorIntToTensorCalculator::Process(CalculatorContext* cc) { AssignMatrixValue(c, r, input[r][c], output.get()); break; case tf::DT_UINT8: - AssignMatrixValue(c, r, input[r][c], output.get()); + AssignMatrixValue(c, r, input[r][c], output.get()); break; case tf::DT_INT32: AssignMatrixValue(c, r, input[r][c], output.get()); @@ -152,7 +152,7 @@ absl::Status VectorIntToTensorCalculator::Process(CalculatorContext* cc) { AssignMatrixValue(r, c, input[r][c], output.get()); break; case tf::DT_UINT8: - AssignMatrixValue(r, c, input[r][c], output.get()); + AssignMatrixValue(r, c, input[r][c], output.get()); break; case tf::DT_INT32: AssignMatrixValue(r, c, input[r][c], output.get()); @@ -172,7 +172,7 @@ absl::Status VectorIntToTensorCalculator::Process(CalculatorContext* cc) { input = cc->Inputs().Tag(kVectorInt).Value().Get>(); } CHECK_GE(input.size(), 1); - const int32 length = input.size(); + const int32_t length = input.size(); tensor_shape = tf::TensorShape({length}); auto output = ::absl::make_unique(options_.tensor_data_type(), tensor_shape); @@ -182,7 +182,7 @@ absl::Status VectorIntToTensorCalculator::Process(CalculatorContext* cc) { output->tensor()(i) = input.at(i); break; case tf::DT_UINT8: - output->tensor()(i) = input.at(i); + output->tensor()(i) = input.at(i); break; case tf::DT_INT32: output->tensor()(i) = input.at(i); diff --git a/mediapipe/calculators/tensorflow/vector_int_to_tensor_calculator_test.cc b/mediapipe/calculators/tensorflow/vector_int_to_tensor_calculator_test.cc index a7f1a9e7f..6a24bb188 100644 --- a/mediapipe/calculators/tensorflow/vector_int_to_tensor_calculator_test.cc +++ b/mediapipe/calculators/tensorflow/vector_int_to_tensor_calculator_test.cc @@ -63,7 +63,7 @@ class VectorIntToTensorCalculatorTest : public ::testing::Test { } } - const int64 time = 1234; + const int64_t time = 1234; runner_->MutableInputs() ->Tag(kVectorIntTag) .packets.push_back(Adopt(input.release()).At(Timestamp(time))); @@ -97,7 +97,7 @@ class VectorIntToTensorCalculatorTest : public ::testing::Test { TEST_F(VectorIntToTensorCalculatorTest, TestSingleValue) { SetUpRunner(VectorIntToTensorCalculatorOptions::INPUT_1D, tensorflow::DT_INT32, false, true); - const int64 time = 1234; + const int64_t time = 1234; runner_->MutableInputs() ->Tag(kSingleIntTag) .packets.push_back(MakePacket(1).At(Timestamp(time))); @@ -112,7 +112,7 @@ TEST_F(VectorIntToTensorCalculatorTest, TestSingleValue) { EXPECT_EQ(1, output_tensor.dims()); EXPECT_EQ(tf::DT_INT32, output_tensor.dtype()); - const auto vec = output_tensor.vec(); + const auto vec = output_tensor.vec(); EXPECT_EQ(1, vec(0)); } @@ -123,7 +123,7 @@ TEST_F(VectorIntToTensorCalculatorTest, TesOneDim) { for (int i = 0; i < 5; ++i) { input->at(i) = i; } - const int64 time = 1234; + const int64_t time = 1234; runner_->MutableInputs() ->Tag(kVectorIntTag) .packets.push_back(Adopt(input.release()).At(Timestamp(time))); @@ -138,7 +138,7 @@ TEST_F(VectorIntToTensorCalculatorTest, TesOneDim) { EXPECT_EQ(1, output_tensor.dims()); EXPECT_EQ(tf::DT_INT32, output_tensor.dtype()); - const auto vec = output_tensor.vec(); + const auto vec = output_tensor.vec(); for (int i = 0; i < 5; ++i) { EXPECT_EQ(i, vec(i)); @@ -154,7 +154,7 @@ TEST_F(VectorIntToTensorCalculatorTest, TestTwoDims) { TEST_F(VectorIntToTensorCalculatorTest, TestInt64) { SetUpRunner(VectorIntToTensorCalculatorOptions::INPUT_1D, tensorflow::DT_INT64, false, true); - const int64 time = 1234; + const int64_t time = 1234; runner_->MutableInputs() ->Tag(kSingleIntTag) .packets.push_back(MakePacket(1LL << 31).At(Timestamp(time))); @@ -181,7 +181,7 @@ TEST_F(VectorIntToTensorCalculatorTest, TestUint8) { for (int i = 0; i < 5; ++i) { input->at(i) = i; } - const int64 time = 1234; + const int64_t time = 1234; runner_->MutableInputs() ->Tag(kVectorIntTag) .packets.push_back(Adopt(input.release()).At(Timestamp(time))); @@ -196,7 +196,7 @@ TEST_F(VectorIntToTensorCalculatorTest, TestUint8) { EXPECT_EQ(1, output_tensor.dims()); EXPECT_EQ(tf::DT_UINT8, output_tensor.dtype()); - const auto vec = output_tensor.vec(); + const auto vec = output_tensor.vec(); for (int i = 0; i < 5; ++i) { EXPECT_EQ(i, vec(i)); diff --git a/mediapipe/calculators/tensorflow/vector_string_to_tensor_calculator.cc b/mediapipe/calculators/tensorflow/vector_string_to_tensor_calculator.cc index 0e579009b..139511271 100644 --- a/mediapipe/calculators/tensorflow/vector_string_to_tensor_calculator.cc +++ b/mediapipe/calculators/tensorflow/vector_string_to_tensor_calculator.cc @@ -94,9 +94,9 @@ absl::Status VectorStringToTensorCalculator::Process(CalculatorContext* cc) { .Value() .Get>>(); - const int32 rows = input.size(); + const int32_t rows = input.size(); RET_CHECK_GE(rows, 1); - const int32 cols = input[0].size(); + const int32_t cols = input[0].size(); RET_CHECK_GE(cols, 1); for (int i = 1; i < rows; ++i) { RET_CHECK_EQ(input[i].size(), cols); @@ -121,7 +121,7 @@ absl::Status VectorStringToTensorCalculator::Process(CalculatorContext* cc) { const std::vector& input = cc->Inputs().Index(0).Value().Get>(); RET_CHECK_GE(input.size(), 1); - const int32 length = input.size(); + const int32_t length = input.size(); tensor_shape = tf::TensorShape({length}); auto output = ::absl::make_unique(tf::DT_STRING, tensor_shape); for (int i = 0; i < length; ++i) { diff --git a/mediapipe/calculators/tensorflow/vector_string_to_tensor_calculator_test.cc b/mediapipe/calculators/tensorflow/vector_string_to_tensor_calculator_test.cc index 5921bd1b0..babbe888a 100644 --- a/mediapipe/calculators/tensorflow/vector_string_to_tensor_calculator_test.cc +++ b/mediapipe/calculators/tensorflow/vector_string_to_tensor_calculator_test.cc @@ -53,7 +53,7 @@ class VectorStringToTensorCalculatorTest : public ::testing::Test { } } - const int64 time = 1234; + const int64_t time = 1234; runner_->MutableInputs()->Index(0).packets.push_back( Adopt(input.release()).At(Timestamp(time))); @@ -89,7 +89,7 @@ TEST_F(VectorStringToTensorCalculatorTest, ConvertsFromVectorString) { for (int i = 0; i < 5; ++i) { input->at(i) = absl::StrCat(i); } - const int64 time = 1234; + const int64_t time = 1234; runner_->MutableInputs()->Index(0).packets.push_back( Adopt(input.release()).At(Timestamp(time)));