Internal change
PiperOrigin-RevId: 522287183
This commit is contained in:
parent
3c083f5d2b
commit
a151e6485a
|
@ -192,7 +192,7 @@ class AudioToTensorCalculator : public Node {
|
|||
DftTensorFormat dft_tensor_format_;
|
||||
|
||||
Timestamp initial_timestamp_ = Timestamp::Unstarted();
|
||||
int64 cumulative_input_samples_ = 0;
|
||||
int64_t cumulative_input_samples_ = 0;
|
||||
Timestamp next_output_timestamp_ = Timestamp::Unstarted();
|
||||
|
||||
double source_sample_rate_ = -1;
|
||||
|
|
|
@ -163,18 +163,19 @@ class AudioToTensorCalculatorNonStreamingModeTest : public ::testing::Test {
|
|||
}
|
||||
|
||||
void CheckTimestampsOutputPackets(
|
||||
std::vector<int64> expected_timestamp_values) {
|
||||
std::vector<int64_t> expected_timestamp_values) {
|
||||
ASSERT_EQ(num_iterations_, timestamps_packets_.size());
|
||||
for (int i = 0; i < timestamps_packets_.size(); ++i) {
|
||||
const auto& p = timestamps_packets_[i];
|
||||
MP_ASSERT_OK(p.ValidateAsType<std::vector<Timestamp>>());
|
||||
auto output_timestamps = p.Get<std::vector<Timestamp>>();
|
||||
int64 base_timestamp = i * Timestamp::kTimestampUnitsPerSecond;
|
||||
int64_t base_timestamp = i * Timestamp::kTimestampUnitsPerSecond;
|
||||
std::vector<Timestamp> expected_timestamps;
|
||||
expected_timestamps.resize(expected_timestamp_values.size());
|
||||
std::transform(
|
||||
expected_timestamp_values.begin(), expected_timestamp_values.end(),
|
||||
expected_timestamps.begin(), [base_timestamp](int64 v) -> Timestamp {
|
||||
std::transform(expected_timestamp_values.begin(),
|
||||
expected_timestamp_values.end(),
|
||||
expected_timestamps.begin(),
|
||||
[base_timestamp](int64_t v) -> Timestamp {
|
||||
return Timestamp(v + base_timestamp);
|
||||
});
|
||||
EXPECT_EQ(expected_timestamps, output_timestamps);
|
||||
|
@ -379,7 +380,7 @@ class AudioToTensorCalculatorStreamingModeTest : public ::testing::Test {
|
|||
}
|
||||
|
||||
void CheckTensorsOutputPackets(int sample_offset, int num_packets,
|
||||
int64 timestamp_interval,
|
||||
int64_t timestamp_interval,
|
||||
bool output_last_at_close) {
|
||||
ASSERT_EQ(num_packets, tensors_packets_.size());
|
||||
for (int i = 0; i < num_packets; ++i) {
|
||||
|
@ -550,7 +551,7 @@ class AudioToTensorCalculatorFftTest : public ::testing::Test {
|
|||
protected:
|
||||
// Creates an audio matrix containing a single sample of 1.0 at a specified
|
||||
// offset.
|
||||
std::unique_ptr<Matrix> CreateImpulseSignalData(int64 num_samples,
|
||||
std::unique_ptr<Matrix> CreateImpulseSignalData(int64_t num_samples,
|
||||
int impulse_offset_idx) {
|
||||
Matrix impulse = Matrix::Zero(1, num_samples);
|
||||
impulse(0, impulse_offset_idx) = 1.0;
|
||||
|
|
|
@ -163,12 +163,12 @@ void RunTestWithInputImagePacket(
|
|||
EXPECT_EQ(tensor.element_type(), Tensor::ElementType::kInt8);
|
||||
tensor_mat = cv::Mat(expected_result.rows, expected_result.cols,
|
||||
channels == 1 ? CV_8SC1 : CV_8SC3,
|
||||
const_cast<int8*>(view.buffer<int8>()));
|
||||
const_cast<int8_t*>(view.buffer<int8_t>()));
|
||||
} else {
|
||||
EXPECT_EQ(tensor.element_type(), Tensor::ElementType::kUInt8);
|
||||
tensor_mat = cv::Mat(expected_result.rows, expected_result.cols,
|
||||
channels == 1 ? CV_8UC1 : CV_8UC3,
|
||||
const_cast<uint8*>(view.buffer<uint8>()));
|
||||
const_cast<uint8_t*>(view.buffer<uint8_t>()));
|
||||
}
|
||||
} else {
|
||||
EXPECT_EQ(tensor.element_type(), Tensor::ElementType::kFloat32);
|
||||
|
@ -210,14 +210,14 @@ mediapipe::ImageFormat::Format GetImageFormat(int image_channels) {
|
|||
|
||||
Packet MakeImageFramePacket(cv::Mat input) {
|
||||
ImageFrame input_image(GetImageFormat(input.channels()), input.cols,
|
||||
input.rows, input.step, input.data, [](uint8*) {});
|
||||
input.rows, input.step, input.data, [](uint8_t*) {});
|
||||
return MakePacket<ImageFrame>(std::move(input_image)).At(Timestamp(0));
|
||||
}
|
||||
|
||||
Packet MakeImagePacket(cv::Mat input) {
|
||||
mediapipe::Image input_image(std::make_shared<mediapipe::ImageFrame>(
|
||||
GetImageFormat(input.channels()), input.cols, input.rows, input.step,
|
||||
input.data, [](uint8*) {}));
|
||||
input.data, [](uint8_t*) {}));
|
||||
return MakePacket<mediapipe::Image>(std::move(input_image)).At(Timestamp(0));
|
||||
}
|
||||
|
||||
|
|
|
@ -160,7 +160,7 @@ absl::StatusOr<std::vector<Tensor>> InferenceInterpreterDelegateRunner::Run(
|
|||
Tensor::ElementType::kUInt8, shape,
|
||||
Tensor::QuantizationParameters{tensor->params.scale,
|
||||
tensor->params.zero_point});
|
||||
CopyTensorBufferFromInterpreter<uint8>(interpreter_.get(), i,
|
||||
CopyTensorBufferFromInterpreter<uint8_t>(interpreter_.get(), i,
|
||||
&output_tensors.back());
|
||||
break;
|
||||
case TfLiteType::kTfLiteInt8:
|
||||
|
@ -168,7 +168,7 @@ absl::StatusOr<std::vector<Tensor>> InferenceInterpreterDelegateRunner::Run(
|
|||
Tensor::ElementType::kInt8, shape,
|
||||
Tensor::QuantizationParameters{tensor->params.scale,
|
||||
tensor->params.zero_point});
|
||||
CopyTensorBufferFromInterpreter<int8>(interpreter_.get(), i,
|
||||
CopyTensorBufferFromInterpreter<int8_t>(interpreter_.get(), i,
|
||||
&output_tensors.back());
|
||||
break;
|
||||
case TfLiteType::kTfLiteInt32:
|
||||
|
|
|
@ -251,7 +251,7 @@ absl::Status TensorConverterCalculator::ProcessCPU(CalculatorContext* cc) {
|
|||
|
||||
// Copy image data into tensor.
|
||||
if (image_frame.ByteDepth() == 1) {
|
||||
MP_RETURN_IF_ERROR(NormalizeImage<uint8>(image_frame, flip_vertically_,
|
||||
MP_RETURN_IF_ERROR(NormalizeImage<uint8_t>(image_frame, flip_vertically_,
|
||||
cpu_view.buffer<float>()));
|
||||
} else if (image_frame.ByteDepth() == 4) {
|
||||
MP_RETURN_IF_ERROR(NormalizeImage<float>(image_frame, flip_vertically_,
|
||||
|
|
|
@ -82,10 +82,10 @@ absl::Status TensorsDequantizationCalculator::Process(CalculatorContext* cc) {
|
|||
input_tensor.shape());
|
||||
switch (input_tensor.element_type()) {
|
||||
case Tensor::ElementType::kUInt8:
|
||||
Dequantize<uint8>(input_tensor, &output_tensors->back());
|
||||
Dequantize<uint8_t>(input_tensor, &output_tensors->back());
|
||||
break;
|
||||
case Tensor::ElementType::kInt8:
|
||||
Dequantize<int8>(input_tensor, &output_tensors->back());
|
||||
Dequantize<int8_t>(input_tensor, &output_tensors->back());
|
||||
break;
|
||||
case Tensor::ElementType::kBool:
|
||||
Dequantize<bool>(input_tensor, &output_tensors->back());
|
||||
|
|
|
@ -36,7 +36,7 @@ class TensorsToAudioCalculatorFftTest : public ::testing::Test {
|
|||
protected:
|
||||
// Creates an audio matrix containing a single sample of 1.0 at a specified
|
||||
// offset.
|
||||
Matrix CreateImpulseSignalData(int64 num_samples, int impulse_offset_idx) {
|
||||
Matrix CreateImpulseSignalData(int64_t num_samples, int impulse_offset_idx) {
|
||||
Matrix impulse = Matrix::Zero(1, num_samples);
|
||||
impulse(0, impulse_offset_idx) = 1.0;
|
||||
return impulse;
|
||||
|
|
|
@ -47,7 +47,7 @@ class TensorsToClassificationCalculatorTest : public ::testing::Test {
|
|||
tensor_buffer[i] = scores[i];
|
||||
}
|
||||
|
||||
int64 stream_timestamp = 0;
|
||||
int64_t stream_timestamp = 0;
|
||||
auto& input_stream_packets =
|
||||
runner->MutableInputs()->Tag("TENSORS").packets;
|
||||
|
||||
|
|
|
@ -46,7 +46,7 @@ class TensorsToFloatsCalculatorTest : public ::testing::Test {
|
|||
tensor_buffer[i] = values[i];
|
||||
}
|
||||
|
||||
int64 stream_timestamp = 0;
|
||||
int64_t stream_timestamp = 0;
|
||||
auto& input_stream_packets =
|
||||
runner->MutableInputs()->Tag("TENSORS").packets;
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user