Remove uncoditional texture params reset to make float textures handled correctly.
PiperOrigin-RevId: 564869245
This commit is contained in:
parent
5b08a09446
commit
1d8dda3337
|
@ -660,6 +660,7 @@ absl::Status ImageTransformationCalculator::RenderGpu(CalculatorContext* cc) {
|
|||
glBindTexture(src1.target(), src1.name());
|
||||
|
||||
if (interpolation_mode_ == ImageTransformationCalculatorOptions::NEAREST) {
|
||||
// TODO: revert texture params.
|
||||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
|
||||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
|
||||
}
|
||||
|
@ -669,10 +670,6 @@ absl::Status ImageTransformationCalculator::RenderGpu(CalculatorContext* cc) {
|
|||
rotation, flip_horizontally_, flip_vertically_,
|
||||
/*flip_texture=*/false));
|
||||
|
||||
// Reset interpolation modes to MediaPipe defaults.
|
||||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
|
||||
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
|
||||
|
||||
glActiveTexture(GL_TEXTURE1);
|
||||
glBindTexture(src1.target(), 0);
|
||||
|
||||
|
|
|
@ -18,6 +18,7 @@
|
|||
#include "mediapipe/framework/port/parse_text_proto.h"
|
||||
#include "testing/base/public/gmock.h"
|
||||
#include "testing/base/public/googletest.h"
|
||||
#include "third_party/OpenCV/core.hpp" // IWYU pragma: keep
|
||||
#include "third_party/OpenCV/core/mat.hpp"
|
||||
|
||||
namespace mediapipe {
|
||||
|
@ -93,6 +94,67 @@ TEST(ImageTransformationCalculatorTest, NearestNeighborResizing) {
|
|||
}
|
||||
}
|
||||
|
||||
TEST(ImageTransformationCalculatorTest,
|
||||
NearestNeighborResizingWorksForFloatInput) {
|
||||
cv::Mat input_mat;
|
||||
cv::cvtColor(cv::imread(file::JoinPath("./",
|
||||
"/mediapipe/calculators/"
|
||||
"image/testdata/binary_mask.png")),
|
||||
input_mat, cv::COLOR_BGR2GRAY);
|
||||
Packet input_image_packet = MakePacket<ImageFrame>(
|
||||
ImageFormat::VEC32F1, input_mat.size().width, input_mat.size().height);
|
||||
cv::Mat packet_mat_view =
|
||||
formats::MatView(&(input_image_packet.Get<ImageFrame>()));
|
||||
input_mat.convertTo(packet_mat_view, CV_32FC1, 1 / 255.f);
|
||||
|
||||
std::vector<std::pair<int, int>> output_dims{
|
||||
{256, 333}, {512, 512}, {1024, 1024}};
|
||||
|
||||
for (auto& output_dim : output_dims) {
|
||||
Packet input_output_dim_packet =
|
||||
MakePacket<std::pair<int, int>>(output_dim);
|
||||
std::vector<std::string> scale_modes{"FIT", "STRETCH"};
|
||||
for (const auto& scale_mode : scale_modes) {
|
||||
CalculatorGraphConfig::Node node_config =
|
||||
ParseTextProtoOrDie<CalculatorGraphConfig::Node>(
|
||||
absl::Substitute(R"(
|
||||
calculator: "ImageTransformationCalculator"
|
||||
input_stream: "IMAGE:input_image"
|
||||
input_stream: "OUTPUT_DIMENSIONS:image_size"
|
||||
output_stream: "IMAGE:output_image"
|
||||
options: {
|
||||
[mediapipe.ImageTransformationCalculatorOptions.ext]: {
|
||||
scale_mode: $0
|
||||
interpolation_mode: NEAREST
|
||||
}
|
||||
})",
|
||||
scale_mode));
|
||||
|
||||
CalculatorRunner runner(node_config);
|
||||
runner.MutableInputs()->Tag("IMAGE").packets.push_back(
|
||||
input_image_packet.At(Timestamp(0)));
|
||||
runner.MutableInputs()
|
||||
->Tag("OUTPUT_DIMENSIONS")
|
||||
.packets.push_back(input_output_dim_packet.At(Timestamp(0)));
|
||||
|
||||
MP_ASSERT_OK(runner.Run());
|
||||
const auto& outputs = runner.Outputs();
|
||||
ASSERT_EQ(outputs.NumEntries(), 1);
|
||||
const std::vector<Packet>& packets = outputs.Tag("IMAGE").packets;
|
||||
ASSERT_EQ(packets.size(), 1);
|
||||
const auto& result = packets[0].Get<ImageFrame>();
|
||||
ASSERT_EQ(output_dim.first, result.Width());
|
||||
ASSERT_EQ(output_dim.second, result.Height());
|
||||
|
||||
auto unique_input_values = computeUniqueValues(packet_mat_view);
|
||||
auto unique_output_values =
|
||||
computeUniqueValues(formats::MatView(&result));
|
||||
EXPECT_THAT(unique_input_values,
|
||||
::testing::ContainerEq(unique_output_values));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
TEST(ImageTransformationCalculatorTest, NearestNeighborResizingGpu) {
|
||||
cv::Mat input_mat;
|
||||
cv::cvtColor(cv::imread(file::JoinPath("./",
|
||||
|
@ -170,5 +232,84 @@ TEST(ImageTransformationCalculatorTest, NearestNeighborResizingGpu) {
|
|||
}
|
||||
}
|
||||
|
||||
TEST(ImageTransformationCalculatorTest,
|
||||
NearestNeighborResizingWorksForFloatTexture) {
|
||||
cv::Mat input_mat;
|
||||
cv::cvtColor(cv::imread(file::JoinPath("./",
|
||||
"/mediapipe/calculators/"
|
||||
"image/testdata/binary_mask.png")),
|
||||
input_mat, cv::COLOR_BGR2GRAY);
|
||||
Packet input_image_packet = MakePacket<ImageFrame>(
|
||||
ImageFormat::VEC32F1, input_mat.size().width, input_mat.size().height);
|
||||
cv::Mat packet_mat_view =
|
||||
formats::MatView(&(input_image_packet.Get<ImageFrame>()));
|
||||
input_mat.convertTo(packet_mat_view, CV_32FC1, 1 / 255.f);
|
||||
|
||||
std::vector<std::pair<int, int>> output_dims{
|
||||
{256, 333}, {512, 512}, {1024, 1024}};
|
||||
|
||||
for (auto& output_dim : output_dims) {
|
||||
std::vector<std::string> scale_modes{"FIT"}; //, "STRETCH"};
|
||||
for (const auto& scale_mode : scale_modes) {
|
||||
CalculatorGraphConfig graph_config =
|
||||
ParseTextProtoOrDie<CalculatorGraphConfig>(
|
||||
absl::Substitute(R"(
|
||||
input_stream: "input_image"
|
||||
input_stream: "image_size"
|
||||
output_stream: "output_image"
|
||||
|
||||
node {
|
||||
calculator: "ImageFrameToGpuBufferCalculator"
|
||||
input_stream: "input_image"
|
||||
output_stream: "input_image_gpu"
|
||||
}
|
||||
|
||||
node {
|
||||
calculator: "ImageTransformationCalculator"
|
||||
input_stream: "IMAGE_GPU:input_image_gpu"
|
||||
input_stream: "OUTPUT_DIMENSIONS:image_size"
|
||||
output_stream: "IMAGE_GPU:output_image_gpu"
|
||||
options: {
|
||||
[mediapipe.ImageTransformationCalculatorOptions.ext]: {
|
||||
scale_mode: $0
|
||||
interpolation_mode: NEAREST
|
||||
}
|
||||
}
|
||||
}
|
||||
node {
|
||||
calculator: "GpuBufferToImageFrameCalculator"
|
||||
input_stream: "output_image_gpu"
|
||||
output_stream: "output_image"
|
||||
})",
|
||||
scale_mode));
|
||||
|
||||
std::vector<Packet> output_image_packets;
|
||||
tool::AddVectorSink("output_image", &graph_config, &output_image_packets);
|
||||
|
||||
CalculatorGraph graph(graph_config);
|
||||
MP_ASSERT_OK(graph.StartRun({}));
|
||||
|
||||
MP_ASSERT_OK(graph.AddPacketToInputStream(
|
||||
"input_image", input_image_packet.At(Timestamp(0))));
|
||||
MP_ASSERT_OK(graph.AddPacketToInputStream(
|
||||
"image_size",
|
||||
MakePacket<std::pair<int, int>>(output_dim).At(Timestamp(0))));
|
||||
|
||||
MP_ASSERT_OK(graph.WaitUntilIdle());
|
||||
ASSERT_THAT(output_image_packets, testing::SizeIs(1));
|
||||
|
||||
const auto& output_image = output_image_packets[0].Get<ImageFrame>();
|
||||
ASSERT_EQ(output_dim.first, output_image.Width());
|
||||
ASSERT_EQ(output_dim.second, output_image.Height());
|
||||
|
||||
auto unique_input_values = computeUniqueValues(packet_mat_view);
|
||||
auto unique_output_values =
|
||||
computeUniqueValues(formats::MatView(&output_image));
|
||||
EXPECT_THAT(unique_input_values,
|
||||
::testing::ContainerEq(unique_output_values));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
} // namespace
|
||||
} // namespace mediapipe
|
||||
|
|
Loading…
Reference in New Issue
Block a user