diff --git a/mediapipe/calculators/tensor/audio_to_tensor_calculator.cc b/mediapipe/calculators/tensor/audio_to_tensor_calculator.cc index 8bd63d005..47617b375 100644 --- a/mediapipe/calculators/tensor/audio_to_tensor_calculator.cc +++ b/mediapipe/calculators/tensor/audio_to_tensor_calculator.cc @@ -192,7 +192,7 @@ class AudioToTensorCalculator : public Node { DftTensorFormat dft_tensor_format_; Timestamp initial_timestamp_ = Timestamp::Unstarted(); - int64 cumulative_input_samples_ = 0; + int64_t cumulative_input_samples_ = 0; Timestamp next_output_timestamp_ = Timestamp::Unstarted(); double source_sample_rate_ = -1; diff --git a/mediapipe/calculators/tensor/audio_to_tensor_calculator_test.cc b/mediapipe/calculators/tensor/audio_to_tensor_calculator_test.cc index 60fcfcd82..47ec7d09b 100644 --- a/mediapipe/calculators/tensor/audio_to_tensor_calculator_test.cc +++ b/mediapipe/calculators/tensor/audio_to_tensor_calculator_test.cc @@ -163,20 +163,21 @@ class AudioToTensorCalculatorNonStreamingModeTest : public ::testing::Test { } void CheckTimestampsOutputPackets( - std::vector expected_timestamp_values) { + std::vector expected_timestamp_values) { ASSERT_EQ(num_iterations_, timestamps_packets_.size()); for (int i = 0; i < timestamps_packets_.size(); ++i) { const auto& p = timestamps_packets_[i]; MP_ASSERT_OK(p.ValidateAsType>()); auto output_timestamps = p.Get>(); - int64 base_timestamp = i * Timestamp::kTimestampUnitsPerSecond; + int64_t base_timestamp = i * Timestamp::kTimestampUnitsPerSecond; std::vector expected_timestamps; expected_timestamps.resize(expected_timestamp_values.size()); - std::transform( - expected_timestamp_values.begin(), expected_timestamp_values.end(), - expected_timestamps.begin(), [base_timestamp](int64 v) -> Timestamp { - return Timestamp(v + base_timestamp); - }); + std::transform(expected_timestamp_values.begin(), + expected_timestamp_values.end(), + expected_timestamps.begin(), + [base_timestamp](int64_t v) -> Timestamp { + return Timestamp(v + base_timestamp); + }); EXPECT_EQ(expected_timestamps, output_timestamps); EXPECT_EQ(p.Timestamp(), expected_timestamps.back()); } @@ -379,7 +380,7 @@ class AudioToTensorCalculatorStreamingModeTest : public ::testing::Test { } void CheckTensorsOutputPackets(int sample_offset, int num_packets, - int64 timestamp_interval, + int64_t timestamp_interval, bool output_last_at_close) { ASSERT_EQ(num_packets, tensors_packets_.size()); for (int i = 0; i < num_packets; ++i) { @@ -550,7 +551,7 @@ class AudioToTensorCalculatorFftTest : public ::testing::Test { protected: // Creates an audio matrix containing a single sample of 1.0 at a specified // offset. - std::unique_ptr CreateImpulseSignalData(int64 num_samples, + std::unique_ptr CreateImpulseSignalData(int64_t num_samples, int impulse_offset_idx) { Matrix impulse = Matrix::Zero(1, num_samples); impulse(0, impulse_offset_idx) = 1.0; diff --git a/mediapipe/calculators/tensor/image_to_tensor_calculator_test.cc b/mediapipe/calculators/tensor/image_to_tensor_calculator_test.cc index 3795b1fa0..409b8623c 100644 --- a/mediapipe/calculators/tensor/image_to_tensor_calculator_test.cc +++ b/mediapipe/calculators/tensor/image_to_tensor_calculator_test.cc @@ -163,12 +163,12 @@ void RunTestWithInputImagePacket( EXPECT_EQ(tensor.element_type(), Tensor::ElementType::kInt8); tensor_mat = cv::Mat(expected_result.rows, expected_result.cols, channels == 1 ? CV_8SC1 : CV_8SC3, - const_cast(view.buffer())); + const_cast(view.buffer())); } else { EXPECT_EQ(tensor.element_type(), Tensor::ElementType::kUInt8); tensor_mat = cv::Mat(expected_result.rows, expected_result.cols, channels == 1 ? CV_8UC1 : CV_8UC3, - const_cast(view.buffer())); + const_cast(view.buffer())); } } else { EXPECT_EQ(tensor.element_type(), Tensor::ElementType::kFloat32); @@ -210,14 +210,14 @@ mediapipe::ImageFormat::Format GetImageFormat(int image_channels) { Packet MakeImageFramePacket(cv::Mat input) { ImageFrame input_image(GetImageFormat(input.channels()), input.cols, - input.rows, input.step, input.data, [](uint8*) {}); + input.rows, input.step, input.data, [](uint8_t*) {}); return MakePacket(std::move(input_image)).At(Timestamp(0)); } Packet MakeImagePacket(cv::Mat input) { mediapipe::Image input_image(std::make_shared( GetImageFormat(input.channels()), input.cols, input.rows, input.step, - input.data, [](uint8*) {})); + input.data, [](uint8_t*) {})); return MakePacket(std::move(input_image)).At(Timestamp(0)); } diff --git a/mediapipe/calculators/tensor/inference_interpreter_delegate_runner.cc b/mediapipe/calculators/tensor/inference_interpreter_delegate_runner.cc index 28781e97a..26bf3d8f8 100644 --- a/mediapipe/calculators/tensor/inference_interpreter_delegate_runner.cc +++ b/mediapipe/calculators/tensor/inference_interpreter_delegate_runner.cc @@ -160,16 +160,16 @@ absl::StatusOr> InferenceInterpreterDelegateRunner::Run( Tensor::ElementType::kUInt8, shape, Tensor::QuantizationParameters{tensor->params.scale, tensor->params.zero_point}); - CopyTensorBufferFromInterpreter(interpreter_.get(), i, - &output_tensors.back()); + CopyTensorBufferFromInterpreter(interpreter_.get(), i, + &output_tensors.back()); break; case TfLiteType::kTfLiteInt8: output_tensors.emplace_back( Tensor::ElementType::kInt8, shape, Tensor::QuantizationParameters{tensor->params.scale, tensor->params.zero_point}); - CopyTensorBufferFromInterpreter(interpreter_.get(), i, - &output_tensors.back()); + CopyTensorBufferFromInterpreter(interpreter_.get(), i, + &output_tensors.back()); break; case TfLiteType::kTfLiteInt32: output_tensors.emplace_back(Tensor::ElementType::kInt32, shape); diff --git a/mediapipe/calculators/tensor/tensor_converter_calculator.cc b/mediapipe/calculators/tensor/tensor_converter_calculator.cc index 4b05488fd..c1bd92968 100644 --- a/mediapipe/calculators/tensor/tensor_converter_calculator.cc +++ b/mediapipe/calculators/tensor/tensor_converter_calculator.cc @@ -251,8 +251,8 @@ absl::Status TensorConverterCalculator::ProcessCPU(CalculatorContext* cc) { // Copy image data into tensor. if (image_frame.ByteDepth() == 1) { - MP_RETURN_IF_ERROR(NormalizeImage(image_frame, flip_vertically_, - cpu_view.buffer())); + MP_RETURN_IF_ERROR(NormalizeImage(image_frame, flip_vertically_, + cpu_view.buffer())); } else if (image_frame.ByteDepth() == 4) { MP_RETURN_IF_ERROR(NormalizeImage(image_frame, flip_vertically_, cpu_view.buffer())); diff --git a/mediapipe/calculators/tensor/tensors_dequantization_calculator.cc b/mediapipe/calculators/tensor/tensors_dequantization_calculator.cc index 3d364a53c..758c446f7 100644 --- a/mediapipe/calculators/tensor/tensors_dequantization_calculator.cc +++ b/mediapipe/calculators/tensor/tensors_dequantization_calculator.cc @@ -82,10 +82,10 @@ absl::Status TensorsDequantizationCalculator::Process(CalculatorContext* cc) { input_tensor.shape()); switch (input_tensor.element_type()) { case Tensor::ElementType::kUInt8: - Dequantize(input_tensor, &output_tensors->back()); + Dequantize(input_tensor, &output_tensors->back()); break; case Tensor::ElementType::kInt8: - Dequantize(input_tensor, &output_tensors->back()); + Dequantize(input_tensor, &output_tensors->back()); break; case Tensor::ElementType::kBool: Dequantize(input_tensor, &output_tensors->back()); diff --git a/mediapipe/calculators/tensor/tensors_to_audio_calculator_test.cc b/mediapipe/calculators/tensor/tensors_to_audio_calculator_test.cc index b522ea8b0..1ef3a0772 100644 --- a/mediapipe/calculators/tensor/tensors_to_audio_calculator_test.cc +++ b/mediapipe/calculators/tensor/tensors_to_audio_calculator_test.cc @@ -36,7 +36,7 @@ class TensorsToAudioCalculatorFftTest : public ::testing::Test { protected: // Creates an audio matrix containing a single sample of 1.0 at a specified // offset. - Matrix CreateImpulseSignalData(int64 num_samples, int impulse_offset_idx) { + Matrix CreateImpulseSignalData(int64_t num_samples, int impulse_offset_idx) { Matrix impulse = Matrix::Zero(1, num_samples); impulse(0, impulse_offset_idx) = 1.0; return impulse; diff --git a/mediapipe/calculators/tensor/tensors_to_classification_calculator_test.cc b/mediapipe/calculators/tensor/tensors_to_classification_calculator_test.cc index 9634635f0..f8b3aec2b 100644 --- a/mediapipe/calculators/tensor/tensors_to_classification_calculator_test.cc +++ b/mediapipe/calculators/tensor/tensors_to_classification_calculator_test.cc @@ -47,7 +47,7 @@ class TensorsToClassificationCalculatorTest : public ::testing::Test { tensor_buffer[i] = scores[i]; } - int64 stream_timestamp = 0; + int64_t stream_timestamp = 0; auto& input_stream_packets = runner->MutableInputs()->Tag("TENSORS").packets; diff --git a/mediapipe/calculators/tensor/tensors_to_floats_calculator_test.cc b/mediapipe/calculators/tensor/tensors_to_floats_calculator_test.cc index 0e92baf51..b988be435 100644 --- a/mediapipe/calculators/tensor/tensors_to_floats_calculator_test.cc +++ b/mediapipe/calculators/tensor/tensors_to_floats_calculator_test.cc @@ -46,7 +46,7 @@ class TensorsToFloatsCalculatorTest : public ::testing::Test { tensor_buffer[i] = values[i]; } - int64 stream_timestamp = 0; + int64_t stream_timestamp = 0; auto& input_stream_packets = runner->MutableInputs()->Tag("TENSORS").packets;