Internal change

PiperOrigin-RevId: 523306493
This commit is contained in:
MediaPipe Team 2023-04-10 22:48:41 -07:00 committed by Copybara-Service
parent 3bb411e99d
commit a4172cb03f
5 changed files with 14 additions and 13 deletions

View File

@ -26,7 +26,7 @@ namespace mediapipe {
namespace {
struct MultiScaleAnchorInfo {
int32 level;
int32_t level;
std::vector<float> aspect_ratios;
std::vector<float> scales;
std::pair<float, float> base_anchor_size;

View File

@ -337,9 +337,9 @@ absl::Status TfLiteConverterCalculator::ProcessCPU(CalculatorContext* cc) {
if (use_quantized_tensors_) {
const int width_padding =
image_frame.WidthStep() / image_frame.ByteDepth() - width * channels;
const uint8* image_buffer =
reinterpret_cast<const uint8*>(image_frame.PixelData());
uint8* tensor_buffer = tensor->data.uint8;
const uint8_t* image_buffer =
reinterpret_cast<const uint8_t*>(image_frame.PixelData());
uint8_t* tensor_buffer = tensor->data.uint8;
RET_CHECK(tensor_buffer);
for (int row = 0; row < height; ++row) {
for (int col = 0; col < width; ++col) {
@ -354,8 +354,8 @@ absl::Status TfLiteConverterCalculator::ProcessCPU(CalculatorContext* cc) {
float* tensor_buffer = tensor->data.f;
RET_CHECK(tensor_buffer);
if (image_frame.ByteDepth() == 1) {
MP_RETURN_IF_ERROR(NormalizeImage<uint8>(image_frame, flip_vertically_,
tensor_buffer));
MP_RETURN_IF_ERROR(NormalizeImage<uint8_t>(
image_frame, flip_vertically_, tensor_buffer));
} else if (image_frame.ByteDepth() == 4) {
MP_RETURN_IF_ERROR(NormalizeImage<float>(image_frame, flip_vertically_,
tensor_buffer));

View File

@ -42,7 +42,7 @@ constexpr char kTransposeOptionsString[] =
using RandomEngine = std::mt19937_64;
using testing::Eq;
const uint32 kSeed = 1234;
const uint32_t kSeed = 1234;
const int kNumSizes = 8;
const int sizes[kNumSizes][2] = {{1, 1}, {12, 1}, {1, 9}, {2, 2},
{5, 3}, {7, 13}, {16, 32}, {101, 2}};
@ -50,7 +50,7 @@ const int sizes[kNumSizes][2] = {{1, 1}, {12, 1}, {1, 9}, {2, 2},
class TfLiteConverterCalculatorTest : public ::testing::Test {
protected:
// Adds a packet with a matrix filled with random values in [0,1].
void AddRandomMatrix(int num_rows, int num_columns, uint32 seed,
void AddRandomMatrix(int num_rows, int num_columns, uint32_t seed,
bool row_major_matrix = false) {
RandomEngine random(kSeed);
std::uniform_real_distribution<> uniform_dist(0, 1.0);
@ -228,7 +228,7 @@ TEST_F(TfLiteConverterCalculatorTest, CustomDivAndSub) {
MP_ASSERT_OK(graph.StartRun({}));
auto input_image = absl::make_unique<ImageFrame>(ImageFormat::GRAY8, 1, 1);
cv::Mat mat = mediapipe::formats::MatView(input_image.get());
mat.at<uint8>(0, 0) = 200;
mat.at<uint8_t>(0, 0) = 200;
MP_ASSERT_OK(graph.AddPacketToInputStream(
"input_image", Adopt(input_image.release()).At(Timestamp(0))));
@ -284,7 +284,7 @@ TEST_F(TfLiteConverterCalculatorTest, SetOutputRange) {
MP_ASSERT_OK(graph.StartRun({}));
auto input_image = absl::make_unique<ImageFrame>(ImageFormat::GRAY8, 1, 1);
cv::Mat mat = mediapipe::formats::MatView(input_image.get());
mat.at<uint8>(0, 0) = 200;
mat.at<uint8_t>(0, 0) = 200;
MP_ASSERT_OK(graph.AddPacketToInputStream(
"input_image", Adopt(input_image.release()).At(Timestamp(0))));

View File

@ -535,8 +535,9 @@ absl::Status TfLiteInferenceCalculator::ProcessInputsCpu(
const TfLiteTensor* input_tensor = &input_tensors[i];
RET_CHECK(input_tensor->data.raw);
if (use_quantized_tensors_) {
const uint8* input_tensor_buffer = input_tensor->data.uint8;
uint8* local_tensor_buffer = interpreter_->typed_input_tensor<uint8>(i);
const uint8_t* input_tensor_buffer = input_tensor->data.uint8;
uint8_t* local_tensor_buffer =
interpreter_->typed_input_tensor<uint8_t>(i);
std::memcpy(local_tensor_buffer, input_tensor_buffer,
input_tensor->bytes);
} else {

View File

@ -60,7 +60,7 @@ class TfLiteTensorsToClassificationCalculatorTest : public ::testing::Test {
auto tensors = absl::make_unique<std::vector<TfLiteTensor>>();
tensors->emplace_back(*tensor);
int64 stream_timestamp = 0;
int64_t stream_timestamp = 0;
auto& input_stream_packets =
runner->MutableInputs()->Tag("TENSORS").packets;