Project import generated by Copybara.

GitOrigin-RevId: 1138530ad1578c5d6615b3e3d041775c75d310c4
This commit is contained in:
MediaPipe Team 2019-09-11 13:58:46 -07:00 committed by jqtang
parent 423c21b454
commit 61bc4556af
173 changed files with 1806 additions and 1824 deletions

View File

@ -10,7 +10,7 @@ http_archive(
sha256 = "2ef429f5d7ce7111263289644d233707dba35e39696377ebab8b0bc701f7818e",
)
load("@bazel_skylib//lib:versions.bzl", "versions")
versions.check(minimum_bazel_version = "0.23.0")
versions.check(minimum_bazel_version = "0.24.1")
# ABSL cpp library.
http_archive(

View File

@ -74,7 +74,7 @@ class AudioDecoderCalculator : public CalculatorBase {
cc->InputSidePackets().Tag("INPUT_FILE_PATH").Get<std::string>();
const auto& decoder_options = cc->Options<mediapipe::AudioDecoderOptions>();
decoder_ = absl::make_unique<AudioDecoder>();
RETURN_IF_ERROR(decoder_->Initialize(input_file_path, decoder_options));
MP_RETURN_IF_ERROR(decoder_->Initialize(input_file_path, decoder_options));
std::unique_ptr<mediapipe::TimeSeriesHeader> header =
absl::make_unique<mediapipe::TimeSeriesHeader>();
if (decoder_->FillAudioHeader(decoder_options.audio_stream(0), header.get())

View File

@ -39,11 +39,10 @@ TEST(AudioDecoderCalculatorTest, TestWAV) {
file::JoinPath("./",
"/mediapipe/calculators/audio/"
"testdata/sine_wave_1k_44100_mono_2_sec_wav.audio"));
MEDIAPIPE_ASSERT_OK(runner.Run());
MEDIAPIPE_EXPECT_OK(
runner.Outputs()
.Tag("AUDIO_HEADER")
.header.ValidateAsType<mediapipe::TimeSeriesHeader>());
MP_ASSERT_OK(runner.Run());
MP_EXPECT_OK(runner.Outputs()
.Tag("AUDIO_HEADER")
.header.ValidateAsType<mediapipe::TimeSeriesHeader>());
const mediapipe::TimeSeriesHeader& header =
runner.Outputs()
.Tag("AUDIO_HEADER")
@ -71,11 +70,10 @@ TEST(AudioDecoderCalculatorTest, Test48KWAV) {
file::JoinPath("./",
"/mediapipe/calculators/audio/"
"testdata/sine_wave_1k_48000_stereo_2_sec_wav.audio"));
MEDIAPIPE_ASSERT_OK(runner.Run());
MEDIAPIPE_EXPECT_OK(
runner.Outputs()
.Tag("AUDIO_HEADER")
.header.ValidateAsType<mediapipe::TimeSeriesHeader>());
MP_ASSERT_OK(runner.Run());
MP_EXPECT_OK(runner.Outputs()
.Tag("AUDIO_HEADER")
.header.ValidateAsType<mediapipe::TimeSeriesHeader>());
const mediapipe::TimeSeriesHeader& header =
runner.Outputs()
.Tag("AUDIO_HEADER")
@ -103,11 +101,10 @@ TEST(AudioDecoderCalculatorTest, TestMP3) {
file::JoinPath("./",
"/mediapipe/calculators/audio/"
"testdata/sine_wave_1k_44100_stereo_2_sec_mp3.audio"));
MEDIAPIPE_ASSERT_OK(runner.Run());
MEDIAPIPE_EXPECT_OK(
runner.Outputs()
.Tag("AUDIO_HEADER")
.header.ValidateAsType<mediapipe::TimeSeriesHeader>());
MP_ASSERT_OK(runner.Run());
MP_EXPECT_OK(runner.Outputs()
.Tag("AUDIO_HEADER")
.header.ValidateAsType<mediapipe::TimeSeriesHeader>());
const mediapipe::TimeSeriesHeader& header =
runner.Outputs()
.Tag("AUDIO_HEADER")
@ -135,11 +132,10 @@ TEST(AudioDecoderCalculatorTest, TestAAC) {
file::JoinPath("./",
"/mediapipe/calculators/audio/"
"testdata/sine_wave_1k_44100_stereo_2_sec_aac.audio"));
MEDIAPIPE_ASSERT_OK(runner.Run());
MEDIAPIPE_EXPECT_OK(
runner.Outputs()
.Tag("AUDIO_HEADER")
.header.ValidateAsType<mediapipe::TimeSeriesHeader>());
MP_ASSERT_OK(runner.Run());
MP_EXPECT_OK(runner.Outputs()
.Tag("AUDIO_HEADER")
.header.ValidateAsType<mediapipe::TimeSeriesHeader>());
const mediapipe::TimeSeriesHeader& header =
runner.Outputs()
.Tag("AUDIO_HEADER")

View File

@ -51,11 +51,11 @@ static bool SafeMultiply(int x, int y, int* result) {
::mediapipe::Status BasicTimeSeriesCalculatorBase::Open(CalculatorContext* cc) {
TimeSeriesHeader input_header;
RETURN_IF_ERROR(time_series_util::FillTimeSeriesHeaderIfValid(
MP_RETURN_IF_ERROR(time_series_util::FillTimeSeriesHeaderIfValid(
cc->Inputs().Index(0).Header(), &input_header));
auto output_header = new TimeSeriesHeader(input_header);
RETURN_IF_ERROR(MutateHeader(output_header));
MP_RETURN_IF_ERROR(MutateHeader(output_header));
cc->Outputs().Index(0).SetHeader(Adopt(output_header));
return ::mediapipe::OkStatus();
}
@ -63,11 +63,11 @@ static bool SafeMultiply(int x, int y, int* result) {
::mediapipe::Status BasicTimeSeriesCalculatorBase::Process(
CalculatorContext* cc) {
const Matrix& input = cc->Inputs().Index(0).Get<Matrix>();
RETURN_IF_ERROR(time_series_util::IsMatrixShapeConsistentWithHeader(
MP_RETURN_IF_ERROR(time_series_util::IsMatrixShapeConsistentWithHeader(
input, cc->Inputs().Index(0).Header().Get<TimeSeriesHeader>()));
std::unique_ptr<Matrix> output(new Matrix(ProcessMatrix(input)));
RETURN_IF_ERROR(time_series_util::IsMatrixShapeConsistentWithHeader(
MP_RETURN_IF_ERROR(time_series_util::IsMatrixShapeConsistentWithHeader(
*output, cc->Outputs().Index(0).Header().Get<TimeSeriesHeader>()));
cc->Outputs().Index(0).Add(output.release(), cc->InputTimestamp());

View File

@ -105,7 +105,7 @@ class FramewiseTransformCalculatorBase : public CalculatorBase {
::mediapipe::Status FramewiseTransformCalculatorBase::Open(
CalculatorContext* cc) {
TimeSeriesHeader input_header;
RETURN_IF_ERROR(time_series_util::FillTimeSeriesHeaderIfValid(
MP_RETURN_IF_ERROR(time_series_util::FillTimeSeriesHeaderIfValid(
cc->Inputs().Index(0).Header(), &input_header));
::mediapipe::Status status = ConfigureTransform(input_header, cc);

View File

@ -112,7 +112,7 @@ TEST_F(MfccCalculatorTest, AudioSampleRateFromInputHeader) {
SetupGraphAndHeader();
SetupRandomInputPackets();
MEDIAPIPE_EXPECT_OK(Run());
MP_EXPECT_OK(Run());
CheckResults(options_.mfcc_count());
}
@ -134,7 +134,7 @@ TEST_F(MelSpectrumCalculatorTest, AudioSampleRateFromInputHeader) {
SetupGraphAndHeader();
SetupRandomInputPackets();
MEDIAPIPE_EXPECT_OK(Run());
MP_EXPECT_OK(Run());
CheckResults(options_.channel_count());
}

View File

@ -74,7 +74,7 @@ void CopyVectorToChannel(const std::vector<float>& vec, Matrix* matrix,
target_sample_rate_ = resample_options.target_sample_rate();
TimeSeriesHeader input_header;
RETURN_IF_ERROR(time_series_util::FillTimeSeriesHeaderIfValid(
MP_RETURN_IF_ERROR(time_series_util::FillTimeSeriesHeaderIfValid(
cc->Inputs().Index(0).Header(), &input_header));
source_sample_rate_ = input_header.sample_rate();

View File

@ -209,25 +209,25 @@ class RationalFactorResampleCalculatorTest
TEST_F(RationalFactorResampleCalculatorTest, Upsample) {
const double kUpsampleRate = input_sample_rate_ * 1.9;
MEDIAPIPE_ASSERT_OK(Run(kUpsampleRate));
MP_ASSERT_OK(Run(kUpsampleRate));
CheckOutput(kUpsampleRate);
}
TEST_F(RationalFactorResampleCalculatorTest, Downsample) {
const double kDownsampleRate = input_sample_rate_ / 1.9;
MEDIAPIPE_ASSERT_OK(Run(kDownsampleRate));
MP_ASSERT_OK(Run(kDownsampleRate));
CheckOutput(kDownsampleRate);
}
TEST_F(RationalFactorResampleCalculatorTest, UsesRationalFactorResampler) {
const double kUpsampleRate = input_sample_rate_ * 2;
MEDIAPIPE_ASSERT_OK(Run(kUpsampleRate));
MP_ASSERT_OK(Run(kUpsampleRate));
CheckOutput(kUpsampleRate);
}
TEST_F(RationalFactorResampleCalculatorTest, PassthroughIfSampleRateUnchanged) {
const double kUpsampleRate = input_sample_rate_;
MEDIAPIPE_ASSERT_OK(Run(kUpsampleRate));
MP_ASSERT_OK(Run(kUpsampleRate));
CheckOutputUnchanged();
}
@ -239,7 +239,7 @@ TEST_F(RationalFactorResampleCalculatorTest, DoesNotDieOnEmptyInput) {
options_.set_target_sample_rate(input_sample_rate_);
InitializeGraph();
FillInputHeader();
MEDIAPIPE_ASSERT_OK(RunGraph());
MP_ASSERT_OK(RunGraph());
EXPECT_TRUE(output().packets.empty());
}

View File

@ -194,7 +194,7 @@ const float SpectrogramCalculator::kLnPowerToDb = 4.342944819032518;
}
TimeSeriesHeader input_header;
RETURN_IF_ERROR(time_series_util::FillTimeSeriesHeaderIfValid(
MP_RETURN_IF_ERROR(time_series_util::FillTimeSeriesHeaderIfValid(
cc->Inputs().Index(0).Header(), &input_header));
input_sample_rate_ = input_header.sample_rate();

View File

@ -303,7 +303,7 @@ TEST_F(SpectrogramCalculatorTest, IntegerFrameDurationNoOverlap) {
FillInputHeader();
SetupConstantInputPackets(input_packet_sizes);
MEDIAPIPE_ASSERT_OK(Run());
MP_ASSERT_OK(Run());
CheckOutputHeadersAndTimestamps();
EXPECT_EQ(OutputFramesPerPacket(), expected_output_packet_sizes);
@ -324,7 +324,7 @@ TEST_F(SpectrogramCalculatorTest, IntegerFrameDurationSomeOverlap) {
FillInputHeader();
SetupConstantInputPackets(input_packet_sizes);
MEDIAPIPE_ASSERT_OK(Run());
MP_ASSERT_OK(Run());
CheckOutputHeadersAndTimestamps();
EXPECT_EQ(OutputFramesPerPacket(), expected_output_packet_sizes);
@ -344,7 +344,7 @@ TEST_F(SpectrogramCalculatorTest, NonintegerFrameDurationAndOverlap) {
FillInputHeader();
SetupConstantInputPackets(input_packet_sizes);
MEDIAPIPE_ASSERT_OK(Run());
MP_ASSERT_OK(Run());
CheckOutputHeadersAndTimestamps();
EXPECT_EQ(OutputFramesPerPacket(), expected_output_packet_sizes);
@ -365,7 +365,7 @@ TEST_F(SpectrogramCalculatorTest, ShortInitialPacketNoOverlap) {
FillInputHeader();
SetupConstantInputPackets(input_packet_sizes);
MEDIAPIPE_ASSERT_OK(Run());
MP_ASSERT_OK(Run());
CheckOutputHeadersAndTimestamps();
EXPECT_EQ(OutputFramesPerPacket(), expected_output_packet_sizes);
@ -382,7 +382,7 @@ TEST_F(SpectrogramCalculatorTest, TrailingSamplesNoPad) {
FillInputHeader();
SetupConstantInputPackets(input_packet_sizes);
MEDIAPIPE_ASSERT_OK(Run());
MP_ASSERT_OK(Run());
CheckOutputHeadersAndTimestamps();
EXPECT_EQ(OutputFramesPerPacket(), expected_output_packet_sizes);
@ -399,7 +399,7 @@ TEST_F(SpectrogramCalculatorTest, NoTrailingSamplesWithPad) {
FillInputHeader();
SetupConstantInputPackets(input_packet_sizes);
MEDIAPIPE_ASSERT_OK(Run());
MP_ASSERT_OK(Run());
CheckOutputHeadersAndTimestamps();
EXPECT_EQ(OutputFramesPerPacket(), expected_output_packet_sizes);
@ -418,7 +418,7 @@ TEST_F(SpectrogramCalculatorTest, TrailingSamplesWithPad) {
FillInputHeader();
SetupConstantInputPackets(input_packet_sizes);
MEDIAPIPE_ASSERT_OK(Run());
MP_ASSERT_OK(Run());
CheckOutputHeadersAndTimestamps();
EXPECT_EQ(OutputFramesPerPacket(), expected_output_packet_sizes);
@ -435,7 +435,7 @@ TEST_F(SpectrogramCalculatorTest, VeryShortInputWillPad) {
FillInputHeader();
SetupConstantInputPackets(input_packet_sizes);
MEDIAPIPE_ASSERT_OK(Run());
MP_ASSERT_OK(Run());
CheckOutputHeadersAndTimestamps();
EXPECT_EQ(OutputFramesPerPacket(), expected_output_packet_sizes);
@ -452,7 +452,7 @@ TEST_F(SpectrogramCalculatorTest, VeryShortInputZeroOutputFramesIfNoPad) {
FillInputHeader();
SetupConstantInputPackets(input_packet_sizes);
MEDIAPIPE_ASSERT_OK(Run());
MP_ASSERT_OK(Run());
CheckOutputHeadersAndTimestamps();
EXPECT_EQ(OutputFramesPerPacket(), expected_output_packet_sizes);
@ -468,7 +468,7 @@ TEST_F(SpectrogramCalculatorTest, DCSignalIsPeakBin) {
// Setup packets with DC input (non-zero constant value).
SetupConstantInputPackets(input_packet_sizes);
MEDIAPIPE_ASSERT_OK(Run());
MP_ASSERT_OK(Run());
CheckOutputHeadersAndTimestamps();
const float dc_frequency_hz = 0.0;
@ -486,7 +486,7 @@ TEST_F(SpectrogramCalculatorTest, A440ToneIsPeakBin) {
const float tone_frequency_hz = 440.0;
SetupCosineInputPackets(input_packet_sizes, tone_frequency_hz);
MEDIAPIPE_ASSERT_OK(Run());
MP_ASSERT_OK(Run());
CheckOutputHeadersAndTimestamps();
int num_output_frames = output().packets[0].Get<Matrix>().cols();
@ -507,7 +507,7 @@ TEST_F(SpectrogramCalculatorTest, SquaredMagnitudeOutputLooksRight) {
// Setup packets with DC input (non-zero constant value).
SetupConstantInputPackets(input_packet_sizes);
MEDIAPIPE_ASSERT_OK(Run());
MP_ASSERT_OK(Run());
CheckOutputHeadersAndTimestamps();
EXPECT_FLOAT_EQ(output().packets[0].Get<Matrix>()(0, 0),
@ -525,7 +525,7 @@ TEST_F(SpectrogramCalculatorTest, DefaultOutputIsSquaredMagnitude) {
// Setup packets with DC input (non-zero constant value).
SetupConstantInputPackets(input_packet_sizes);
MEDIAPIPE_ASSERT_OK(Run());
MP_ASSERT_OK(Run());
CheckOutputHeadersAndTimestamps();
EXPECT_FLOAT_EQ(output().packets[0].Get<Matrix>()(0, 0),
@ -543,7 +543,7 @@ TEST_F(SpectrogramCalculatorTest, LinearMagnitudeOutputLooksRight) {
// Setup packets with DC input (non-zero constant value).
SetupConstantInputPackets(input_packet_sizes);
MEDIAPIPE_ASSERT_OK(Run());
MP_ASSERT_OK(Run());
CheckOutputHeadersAndTimestamps();
EXPECT_FLOAT_EQ(output().packets[0].Get<Matrix>()(0, 0),
@ -561,7 +561,7 @@ TEST_F(SpectrogramCalculatorTest, DbMagnitudeOutputLooksRight) {
// Setup packets with DC input (non-zero constant value).
SetupConstantInputPackets(input_packet_sizes);
MEDIAPIPE_ASSERT_OK(Run());
MP_ASSERT_OK(Run());
CheckOutputHeadersAndTimestamps();
EXPECT_FLOAT_EQ(output().packets[0].Get<Matrix>()(0, 0),
@ -581,7 +581,7 @@ TEST_F(SpectrogramCalculatorTest, OutputScalingLooksRight) {
// Setup packets with DC input (non-zero constant value).
SetupConstantInputPackets(input_packet_sizes);
MEDIAPIPE_ASSERT_OK(Run());
MP_ASSERT_OK(Run());
CheckOutputHeadersAndTimestamps();
EXPECT_FLOAT_EQ(
@ -600,7 +600,7 @@ TEST_F(SpectrogramCalculatorTest, ComplexOutputLooksRight) {
// Setup packets with DC input (non-zero constant value).
SetupConstantInputPackets(input_packet_sizes);
MEDIAPIPE_ASSERT_OK(Run());
MP_ASSERT_OK(Run());
CheckOutputHeadersAndTimestamps();
EXPECT_FLOAT_EQ(std::norm(output().packets[0].Get<Eigen::MatrixXcf>()(0, 0)),
@ -623,7 +623,7 @@ TEST_F(SpectrogramCalculatorTest, ComplexOutputLooksRightForImpulses) {
// Make two impulse packets offset one sample from each other
SetupImpulseInputPackets(input_packet_sizes, input_packet_impulse_offsets);
MEDIAPIPE_ASSERT_OK(Run());
MP_ASSERT_OK(Run());
CheckOutputHeadersAndTimestamps();
const int num_buckets =
@ -671,7 +671,7 @@ TEST_F(SpectrogramCalculatorTest, SquaredMagnitudeOutputLooksRightForNonDC) {
const float tone_frequency_hz = target_bin * (input_sample_rate_ / fft_size);
SetupCosineInputPackets(input_packet_sizes, tone_frequency_hz);
MEDIAPIPE_ASSERT_OK(Run());
MP_ASSERT_OK(Run());
CheckOutputHeadersAndTimestamps();
// For a non-DC bin, the magnitude will be split between positive and
@ -696,7 +696,7 @@ TEST_F(SpectrogramCalculatorTest, ZeroOutputsForZeroInputsWithPaddingEnabled) {
FillInputHeader();
SetupConstantInputPackets(input_packet_sizes);
MEDIAPIPE_ASSERT_OK(Run());
MP_ASSERT_OK(Run());
CheckOutputHeadersAndTimestamps();
EXPECT_EQ(OutputFramesPerPacket(), expected_output_packet_sizes);
@ -713,7 +713,7 @@ TEST_F(SpectrogramCalculatorTest, NumChannelsIsRight) {
FillInputHeader();
const float tone_frequency_hz = 440.0;
SetupCosineInputPackets(input_packet_sizes, tone_frequency_hz);
MEDIAPIPE_ASSERT_OK(Run());
MP_ASSERT_OK(Run());
CheckOutputHeadersAndTimestamps();
EXPECT_EQ(output().packets[0].Get<std::vector<Matrix>>().size(),
@ -732,7 +732,7 @@ TEST_F(SpectrogramCalculatorTest, NumSamplesAndPacketRateAreCleared) {
FillInputHeader();
SetupConstantInputPackets(input_packet_sizes);
MEDIAPIPE_ASSERT_OK(Run());
MP_ASSERT_OK(Run());
const TimeSeriesHeader& output_header =
output().header.Get<TimeSeriesHeader>();
@ -751,7 +751,7 @@ TEST_F(SpectrogramCalculatorTest, MultichannelSpectrogramSizesAreRight) {
FillInputHeader();
const float tone_frequency_hz = 440.0;
SetupCosineInputPackets(input_packet_sizes, tone_frequency_hz);
MEDIAPIPE_ASSERT_OK(Run());
MP_ASSERT_OK(Run());
CheckOutputHeadersAndTimestamps();
auto spectrograms = output().packets[0].Get<std::vector<Matrix>>();
@ -776,7 +776,7 @@ TEST_F(SpectrogramCalculatorTest, MultichannelSpectrogramValuesAreRight) {
const float tone_frequency_hz = 440.0;
SetupMultichannelInputPackets(input_packet_sizes, tone_frequency_hz);
MEDIAPIPE_ASSERT_OK(Run());
MP_ASSERT_OK(Run());
CheckOutputHeadersAndTimestamps();
auto spectrograms = output().packets[0].Get<std::vector<Matrix>>();
@ -805,7 +805,7 @@ TEST_F(SpectrogramCalculatorTest, MultichannelHandlesShortInitialPacket) {
FillInputHeader();
const float tone_frequency_hz = 440.0;
SetupCosineInputPackets(input_packet_sizes, tone_frequency_hz);
MEDIAPIPE_ASSERT_OK(Run());
MP_ASSERT_OK(Run());
CheckOutputHeadersAndTimestamps();
auto spectrograms = output().packets[0].Get<std::vector<Matrix>>();
@ -833,7 +833,7 @@ TEST_F(SpectrogramCalculatorTest,
FillInputHeader();
const float tone_frequency_hz = 440.0;
SetupCosineInputPackets(input_packet_sizes, tone_frequency_hz);
MEDIAPIPE_ASSERT_OK(Run());
MP_ASSERT_OK(Run());
CheckOutputHeadersAndTimestamps();
auto spectrograms = output().packets[0].Get<std::vector<Eigen::MatrixXcf>>();

View File

@ -65,7 +65,7 @@ class StabilizedLogCalculator : public CalculatorBase {
// If the input packets have a header, propagate the header to the output.
if (!cc->Inputs().Index(0).Header().IsEmpty()) {
TimeSeriesHeader input_header;
RETURN_IF_ERROR(time_series_util::FillTimeSeriesHeaderIfValid(
MP_RETURN_IF_ERROR(time_series_util::FillTimeSeriesHeaderIfValid(
cc->Inputs().Index(0).Header(), &input_header));
cc->Outputs().Index(0).SetHeader(
Adopt(new TimeSeriesHeader(input_header)));

View File

@ -42,7 +42,7 @@ class StabilizedLogCalculatorTest
num_input_samples_ = kNumSamples;
}
void RunGraphNoReturn() { MEDIAPIPE_ASSERT_OK(RunGraph()); }
void RunGraphNoReturn() { MP_ASSERT_OK(RunGraph()); }
};
TEST_F(StabilizedLogCalculatorTest, BasicOperation) {
@ -60,7 +60,7 @@ TEST_F(StabilizedLogCalculatorTest, BasicOperation) {
AppendInputPacket(new Matrix(input_data_matrix), timestamp);
}
MEDIAPIPE_ASSERT_OK(RunGraph());
MP_ASSERT_OK(RunGraph());
ExpectOutputHeaderEqualsInputHeader();
for (int output_packet = 0; output_packet < kNumPackets; ++output_packet) {
ExpectApproximatelyEqual(
@ -86,7 +86,7 @@ TEST_F(StabilizedLogCalculatorTest, OutputScaleWorks) {
AppendInputPacket(new Matrix(input_data_matrix), timestamp);
}
MEDIAPIPE_ASSERT_OK(RunGraph());
MP_ASSERT_OK(RunGraph());
ExpectOutputHeaderEqualsInputHeader();
for (int output_packet = 0; output_packet < kNumPackets; ++output_packet) {
ExpectApproximatelyEqual(
@ -101,7 +101,7 @@ TEST_F(StabilizedLogCalculatorTest, ZerosAreStabilized) {
FillInputHeader();
AppendInputPacket(new Matrix(Matrix::Zero(kNumChannels, kNumSamples)),
0 /* timestamp */);
MEDIAPIPE_ASSERT_OK(RunGraph());
MP_ASSERT_OK(RunGraph());
ExpectOutputHeaderEqualsInputHeader();
ExpectApproximatelyEqual(
Matrix::Constant(kNumChannels, kNumSamples, kStabilizer).array().log(),
@ -124,7 +124,7 @@ TEST_F(StabilizedLogCalculatorTest, NegativeValuesDoNotCheckFailIfCheckIsOff) {
AppendInputPacket(
new Matrix(Matrix::Constant(kNumChannels, kNumSamples, -1.0)),
0 /* timestamp */);
MEDIAPIPE_ASSERT_OK(RunGraph());
MP_ASSERT_OK(RunGraph());
// Results are undefined.
}

View File

@ -219,7 +219,7 @@ void TimeSeriesFramerCalculator::FrameOutput(CalculatorContext* cc) {
<< framer_options.frame_overlap_seconds();
TimeSeriesHeader input_header;
RETURN_IF_ERROR(time_series_util::FillTimeSeriesHeaderIfValid(
MP_RETURN_IF_ERROR(time_series_util::FillTimeSeriesHeaderIfValid(
cc->Inputs().Index(0).Header(), &input_header));
sample_rate_ = input_header.sample_rate();

View File

@ -226,7 +226,7 @@ class TimeSeriesFramerCalculatorTest
TEST_F(TimeSeriesFramerCalculatorTest, IntegerSampleDurationNoOverlap) {
options_.set_frame_duration_seconds(100.0 / input_sample_rate_);
MEDIAPIPE_ASSERT_OK(Run());
MP_ASSERT_OK(Run());
CheckOutput();
}
@ -234,7 +234,7 @@ TEST_F(TimeSeriesFramerCalculatorTest,
IntegerSampleDurationNoOverlapHammingWindow) {
options_.set_frame_duration_seconds(100.0 / input_sample_rate_);
options_.set_window_function(TimeSeriesFramerCalculatorOptions::HAMMING);
MEDIAPIPE_ASSERT_OK(Run());
MP_ASSERT_OK(Run());
CheckOutput();
}
@ -242,14 +242,14 @@ TEST_F(TimeSeriesFramerCalculatorTest,
IntegerSampleDurationNoOverlapHannWindow) {
options_.set_frame_duration_seconds(100.0 / input_sample_rate_);
options_.set_window_function(TimeSeriesFramerCalculatorOptions::HANN);
MEDIAPIPE_ASSERT_OK(Run());
MP_ASSERT_OK(Run());
CheckOutput();
}
TEST_F(TimeSeriesFramerCalculatorTest, IntegerSampleDurationAndOverlap) {
options_.set_frame_duration_seconds(100.0 / input_sample_rate_);
options_.set_frame_overlap_seconds(40.0 / input_sample_rate_);
MEDIAPIPE_ASSERT_OK(Run());
MP_ASSERT_OK(Run());
CheckOutput();
}
@ -257,7 +257,7 @@ TEST_F(TimeSeriesFramerCalculatorTest, NonintegerSampleDurationAndOverlap) {
options_.set_frame_duration_seconds(98.5 / input_sample_rate_);
options_.set_frame_overlap_seconds(38.4 / input_sample_rate_);
MEDIAPIPE_ASSERT_OK(Run());
MP_ASSERT_OK(Run());
CheckOutput();
}
@ -267,7 +267,7 @@ TEST_F(TimeSeriesFramerCalculatorTest, NegativeOverlapExactFrames) {
// the 1100 input samples.
options_.set_frame_duration_seconds(100.0 / input_sample_rate_);
options_.set_frame_overlap_seconds(-10.0 / input_sample_rate_);
MEDIAPIPE_ASSERT_OK(Run());
MP_ASSERT_OK(Run());
EXPECT_EQ(output().packets.size(), 10);
CheckOutput();
}
@ -277,7 +277,7 @@ TEST_F(TimeSeriesFramerCalculatorTest, NegativeOverlapExactFramesLessSkip) {
// the 1100 input samples.
options_.set_frame_duration_seconds(100.0 / input_sample_rate_);
options_.set_frame_overlap_seconds(-100.0 / input_sample_rate_);
MEDIAPIPE_ASSERT_OK(Run());
MP_ASSERT_OK(Run());
EXPECT_EQ(output().packets.size(), 6);
CheckOutput();
}
@ -287,7 +287,7 @@ TEST_F(TimeSeriesFramerCalculatorTest, NegativeOverlapWithPadding) {
// on the sixth and last frame given 1100 sample input.
options_.set_frame_duration_seconds(100.0 / input_sample_rate_);
options_.set_frame_overlap_seconds(-100.0 / input_sample_rate_);
MEDIAPIPE_ASSERT_OK(Run());
MP_ASSERT_OK(Run());
EXPECT_EQ(output().packets.size(), 6);
CheckOutput();
}
@ -297,7 +297,7 @@ TEST_F(TimeSeriesFramerCalculatorTest, FixedFrameOverlap) {
// results in ceil((1100 - 30) / 11) + 1 = 99 packets.
options_.set_frame_duration_seconds(30 / input_sample_rate_);
options_.set_frame_overlap_seconds((30.0 - 11.4) / input_sample_rate_);
MEDIAPIPE_ASSERT_OK(Run());
MP_ASSERT_OK(Run());
EXPECT_EQ(output().packets.size(), 99);
CheckOutput();
}
@ -308,7 +308,7 @@ TEST_F(TimeSeriesFramerCalculatorTest, VariableFrameOverlap) {
options_.set_frame_duration_seconds(30 / input_sample_rate_);
options_.set_frame_overlap_seconds((30 - 11.4) / input_sample_rate_);
options_.set_emulate_fractional_frame_overlap(true);
MEDIAPIPE_ASSERT_OK(Run());
MP_ASSERT_OK(Run());
EXPECT_EQ(output().packets.size(), 95);
CheckOutput();
}
@ -319,7 +319,7 @@ TEST_F(TimeSeriesFramerCalculatorTest, VariableFrameSkip) {
options_.set_frame_duration_seconds(30 / input_sample_rate_);
options_.set_frame_overlap_seconds((30 - 41.4) / input_sample_rate_);
options_.set_emulate_fractional_frame_overlap(true);
MEDIAPIPE_ASSERT_OK(Run());
MP_ASSERT_OK(Run());
EXPECT_EQ(output().packets.size(), 27);
CheckOutput();
}
@ -328,7 +328,7 @@ TEST_F(TimeSeriesFramerCalculatorTest, NoFinalPacketPadding) {
options_.set_frame_duration_seconds(98.5 / input_sample_rate_);
options_.set_pad_final_packet(false);
MEDIAPIPE_ASSERT_OK(Run());
MP_ASSERT_OK(Run());
CheckOutput();
}
@ -369,7 +369,7 @@ class TimeSeriesFramerCalculatorWindowingSanityTest
FillInputHeader();
AppendInputPacket(new Matrix(Matrix::Ones(1, FrameDurationSamples())),
kInitialTimestampOffsetMicroseconds);
MEDIAPIPE_ASSERT_OK(RunGraph());
MP_ASSERT_OK(RunGraph());
ASSERT_EQ(1, output().packets.size());
ASSERT_NEAR(expected_average * FrameDurationSamples(),
output().packets[0].Get<Matrix>().sum(), 1e-5);

View File

@ -76,7 +76,7 @@ mediapipe_cc_proto_library(
name = "packet_cloner_calculator_cc_proto",
srcs = ["packet_cloner_calculator.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":packet_cloner_calculator_proto"],
)
@ -84,7 +84,7 @@ mediapipe_cc_proto_library(
name = "packet_resampler_calculator_cc_proto",
srcs = ["packet_resampler_calculator.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":packet_resampler_calculator_proto"],
)
@ -92,7 +92,7 @@ mediapipe_cc_proto_library(
name = "split_vector_calculator_cc_proto",
srcs = ["split_vector_calculator.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":split_vector_calculator_proto"],
)
@ -108,7 +108,7 @@ mediapipe_cc_proto_library(
name = "quantize_float_vector_calculator_cc_proto",
srcs = ["quantize_float_vector_calculator.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":quantize_float_vector_calculator_proto"],
)
@ -116,7 +116,7 @@ mediapipe_cc_proto_library(
name = "sequence_shift_calculator_cc_proto",
srcs = ["sequence_shift_calculator.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":sequence_shift_calculator_proto"],
)
@ -124,7 +124,7 @@ mediapipe_cc_proto_library(
name = "gate_calculator_cc_proto",
srcs = ["gate_calculator.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":gate_calculator_proto"],
)

View File

@ -42,7 +42,7 @@ TEST_F(AddHeaderCalculatorTest, Works) {
}
// Run calculator.
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
ASSERT_EQ(1, runner.Outputs().NumEntries());
@ -69,7 +69,7 @@ TEST_F(AddHeaderCalculatorTest, HandlesEmptyHeaderStream) {
// No header and no packets.
// Run calculator.
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
EXPECT_TRUE(runner.Outputs().Index(0).header.IsEmpty());
}

View File

@ -45,7 +45,7 @@ TEST(TestConcatenateIntVectorCalculatorTest, EmptyVectorInputs) {
std::vector<std::vector<int>> inputs = {{}, {}, {}};
AddInputVectors(inputs, /*timestamp=*/1, &runner);
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
const std::vector<Packet>& outputs = runner.Outputs().Index(0).packets;
EXPECT_EQ(1, outputs.size());
@ -60,7 +60,7 @@ TEST(TestConcatenateIntVectorCalculatorTest, OneTimestamp) {
std::vector<std::vector<int>> inputs = {{1, 2, 3}, {4}, {5, 6}};
AddInputVectors(inputs, /*timestamp=*/1, &runner);
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
const std::vector<Packet>& outputs = runner.Outputs().Index(0).packets;
EXPECT_EQ(1, outputs.size());
@ -81,7 +81,7 @@ TEST(TestConcatenateIntVectorCalculatorTest, TwoInputsAtTwoTimestamps) {
std::vector<std::vector<int>> inputs = {{0, 2}, {1}, {3, 5}};
AddInputVectors(inputs, /*timestamp=*/2, &runner);
}
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
const std::vector<Packet>& outputs = runner.Outputs().Index(0).packets;
EXPECT_EQ(2, outputs.size());
@ -106,7 +106,7 @@ TEST(TestConcatenateIntVectorCalculatorTest, OneEmptyStreamStillOutput) {
std::vector<std::vector<int>> inputs = {{1, 2, 3}};
AddInputVectors(inputs, /*timestamp=*/1, &runner);
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
const std::vector<Packet>& outputs = runner.Outputs().Index(0).packets;
EXPECT_EQ(1, outputs.size());
@ -125,7 +125,7 @@ TEST(TestConcatenateIntVectorCalculatorTest, OneEmptyStreamNoOutput) {
std::vector<std::vector<int>> inputs = {{1, 2, 3}};
AddInputVectors(inputs, /*timestamp=*/1, &runner);
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
const std::vector<Packet>& outputs = runner.Outputs().Index(0).packets;
EXPECT_EQ(0, outputs.size());
@ -146,7 +146,7 @@ TEST(ConcatenateFloatVectorCalculatorTest, EmptyVectorInputs) {
std::vector<std::vector<float>> inputs = {{}, {}, {}};
AddInputVectors(inputs, /*timestamp=*/1, &runner);
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
const std::vector<Packet>& outputs = runner.Outputs().Index(0).packets;
EXPECT_EQ(1, outputs.size());
@ -162,7 +162,7 @@ TEST(ConcatenateFloatVectorCalculatorTest, OneTimestamp) {
std::vector<std::vector<float>> inputs = {
{1.0f, 2.0f, 3.0f}, {4.0f}, {5.0f, 6.0f}};
AddInputVectors(inputs, /*timestamp=*/1, &runner);
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
const std::vector<Packet>& outputs = runner.Outputs().Index(0).packets;
EXPECT_EQ(1, outputs.size());
@ -185,7 +185,7 @@ TEST(ConcatenateFloatVectorCalculatorTest, TwoInputsAtTwoTimestamps) {
{0.0f, 2.0f}, {1.0f}, {3.0f, 5.0f}};
AddInputVectors(inputs, /*timestamp=*/2, &runner);
}
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
const std::vector<Packet>& outputs = runner.Outputs().Index(0).packets;
EXPECT_EQ(2, outputs.size());
@ -210,7 +210,7 @@ TEST(ConcatenateFloatVectorCalculatorTest, OneEmptyStreamStillOutput) {
std::vector<std::vector<float>> inputs = {{1.0f, 2.0f, 3.0f}};
AddInputVectors(inputs, /*timestamp=*/1, &runner);
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
const std::vector<Packet>& outputs = runner.Outputs().Index(0).packets;
EXPECT_EQ(1, outputs.size());
@ -229,7 +229,7 @@ TEST(ConcatenateFloatVectorCalculatorTest, OneEmptyStreamNoOutput) {
std::vector<std::vector<float>> inputs = {{1.0f, 2.0f, 3.0f}};
AddInputVectors(inputs, /*timestamp=*/1, &runner);
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
const std::vector<Packet>& outputs = runner.Outputs().Index(0).packets;
EXPECT_EQ(0, outputs.size());

View File

@ -91,7 +91,7 @@ TEST(FlowLimiterCalculator, OneOutputTest) {
}
// Run the calculator.
MEDIAPIPE_ASSERT_OK(runner.Run()) << "Calculator execution failed.";
MP_ASSERT_OK(runner.Run()) << "Calculator execution failed.";
const std::vector<Packet>& frame_output_packets =
runner.Outputs().Index(0).packets;
@ -117,7 +117,7 @@ TEST(FlowLimiterCalculator, BasicTest) {
}
// Run the calculator.
MEDIAPIPE_ASSERT_OK(runner.Run()) << "Calculator execution failed.";
MP_ASSERT_OK(runner.Run()) << "Calculator execution failed.";
const std::vector<Packet>& frame_output_packets =
runner.Outputs().Index(0).packets;
@ -198,7 +198,7 @@ class FlowLimiterCalculatorTest : public testing::Test {
close_count_++;
return ::mediapipe::OkStatus();
};
MEDIAPIPE_ASSERT_OK(graph_.Initialize(
MP_ASSERT_OK(graph_.Initialize(
graph_config_, {
{"max_in_flight", MakePacket<int>(max_in_flight)},
{"callback_0", Adopt(new auto(semaphore_0_func))},
@ -209,7 +209,7 @@ class FlowLimiterCalculatorTest : public testing::Test {
// Adds a packet to a graph input stream.
void AddPacket(const std::string& input_name, int value) {
MEDIAPIPE_EXPECT_OK(graph_.AddPacketToInputStream(
MP_EXPECT_OK(graph_.AddPacketToInputStream(
input_name, MakePacket<int>(value).At(Timestamp(value))));
}
@ -277,10 +277,10 @@ class FlowLimiterCalculatorTest : public testing::Test {
//
TEST_F(FlowLimiterCalculatorTest, BackEdgeCloses) {
InitializeGraph(1);
MEDIAPIPE_ASSERT_OK(graph_.StartRun({}));
MP_ASSERT_OK(graph_.StartRun({}));
auto send_packet = [this](const std::string& input_name, int64 n) {
MEDIAPIPE_EXPECT_OK(graph_.AddPacketToInputStream(
MP_EXPECT_OK(graph_.AddPacketToInputStream(
input_name, MakePacket<int64>(n).At(Timestamp(n))));
};
@ -288,14 +288,14 @@ TEST_F(FlowLimiterCalculatorTest, BackEdgeCloses) {
send_packet("in_1", i * 10);
// This next input should be dropped.
send_packet("in_1", i * 10 + 5);
MEDIAPIPE_EXPECT_OK(graph_.WaitUntilIdle());
MP_EXPECT_OK(graph_.WaitUntilIdle());
send_packet("in_2", i * 10);
exit_semaphore_.Release(1);
MEDIAPIPE_EXPECT_OK(graph_.WaitUntilIdle());
MP_EXPECT_OK(graph_.WaitUntilIdle());
}
MEDIAPIPE_EXPECT_OK(graph_.CloseInputStream("in_1"));
MEDIAPIPE_EXPECT_OK(graph_.CloseInputStream("in_2"));
MEDIAPIPE_EXPECT_OK(graph_.WaitUntilIdle());
MP_EXPECT_OK(graph_.CloseInputStream("in_1"));
MP_EXPECT_OK(graph_.CloseInputStream("in_2"));
MP_EXPECT_OK(graph_.WaitUntilIdle());
// All output streams are closed and all output packets are delivered,
// with stream "in_1" and stream "in_2" closed.
@ -321,17 +321,17 @@ TEST_F(FlowLimiterCalculatorTest, BackEdgeCloses) {
// input streams are closed after the last input packet has been processed.
TEST_F(FlowLimiterCalculatorTest, AllStreamsClose) {
InitializeGraph(1);
MEDIAPIPE_ASSERT_OK(graph_.StartRun({}));
MP_ASSERT_OK(graph_.StartRun({}));
exit_semaphore_.Release(10);
for (int i = 0; i < 10; i++) {
AddPacket("in_1", i);
MEDIAPIPE_EXPECT_OK(graph_.WaitUntilIdle());
MP_EXPECT_OK(graph_.WaitUntilIdle());
AddPacket("in_2", i);
MEDIAPIPE_EXPECT_OK(graph_.WaitUntilIdle());
MP_EXPECT_OK(graph_.WaitUntilIdle());
}
MEDIAPIPE_EXPECT_OK(graph_.CloseAllInputStreams());
MEDIAPIPE_EXPECT_OK(graph_.WaitUntilIdle());
MP_EXPECT_OK(graph_.CloseAllInputStreams());
MP_EXPECT_OK(graph_.WaitUntilIdle());
EXPECT_EQ(TimestampValues(out_1_packets_), TimestampValues(out_2_packets_));
EXPECT_EQ(TimestampValues(out_1_packets_),
@ -371,7 +371,7 @@ TEST(FlowLimiterCalculator, TwoStreams) {
};
CalculatorGraph graph_;
MEDIAPIPE_EXPECT_OK(graph_.Initialize(
MP_EXPECT_OK(graph_.Initialize(
graph_config_,
{
{"max_in_flight", MakePacket<int>(1)},
@ -379,63 +379,63 @@ TEST(FlowLimiterCalculator, TwoStreams) {
MakePacket<std::function<void(const Packet&)>>(allow_cb)},
}));
MEDIAPIPE_EXPECT_OK(graph_.StartRun({}));
MP_EXPECT_OK(graph_.StartRun({}));
auto send_packet = [&graph_](const std::string& input_name, int n) {
MEDIAPIPE_EXPECT_OK(graph_.AddPacketToInputStream(
MP_EXPECT_OK(graph_.AddPacketToInputStream(
input_name, MakePacket<int>(n).At(Timestamp(n))));
};
send_packet("in_a", 1);
MEDIAPIPE_EXPECT_OK(graph_.WaitUntilIdle());
MP_EXPECT_OK(graph_.WaitUntilIdle());
EXPECT_EQ(allow, false);
EXPECT_EQ(TimestampValues(a_passed), (std::vector<int64>{1}));
EXPECT_EQ(TimestampValues(b_passed), (std::vector<int64>{}));
send_packet("in_a", 2);
send_packet("in_b", 1);
MEDIAPIPE_EXPECT_OK(graph_.WaitUntilIdle());
MP_EXPECT_OK(graph_.WaitUntilIdle());
EXPECT_EQ(TimestampValues(a_passed), (std::vector<int64>{1}));
EXPECT_EQ(TimestampValues(b_passed), (std::vector<int64>{1}));
EXPECT_EQ(allow, false);
send_packet("finished", 1);
MEDIAPIPE_EXPECT_OK(graph_.WaitUntilIdle());
MP_EXPECT_OK(graph_.WaitUntilIdle());
EXPECT_EQ(TimestampValues(a_passed), (std::vector<int64>{1}));
EXPECT_EQ(TimestampValues(b_passed), (std::vector<int64>{1}));
EXPECT_EQ(allow, true);
send_packet("in_b", 2);
MEDIAPIPE_EXPECT_OK(graph_.WaitUntilIdle());
MP_EXPECT_OK(graph_.WaitUntilIdle());
EXPECT_EQ(TimestampValues(a_passed), (std::vector<int64>{1}));
EXPECT_EQ(TimestampValues(b_passed), (std::vector<int64>{1}));
EXPECT_EQ(allow, true);
send_packet("in_b", 3);
MEDIAPIPE_EXPECT_OK(graph_.WaitUntilIdle());
MP_EXPECT_OK(graph_.WaitUntilIdle());
EXPECT_EQ(TimestampValues(a_passed), (std::vector<int64>{1}));
EXPECT_EQ(TimestampValues(b_passed), (std::vector<int64>{1, 3}));
EXPECT_EQ(allow, false);
send_packet("in_b", 4);
MEDIAPIPE_EXPECT_OK(graph_.WaitUntilIdle());
MP_EXPECT_OK(graph_.WaitUntilIdle());
EXPECT_EQ(TimestampValues(a_passed), (std::vector<int64>{1}));
EXPECT_EQ(TimestampValues(b_passed), (std::vector<int64>{1, 3}));
EXPECT_EQ(allow, false);
send_packet("in_a", 3);
MEDIAPIPE_EXPECT_OK(graph_.WaitUntilIdle());
MP_EXPECT_OK(graph_.WaitUntilIdle());
EXPECT_EQ(TimestampValues(a_passed), (std::vector<int64>{1, 3}));
EXPECT_EQ(TimestampValues(b_passed), (std::vector<int64>{1, 3}));
EXPECT_EQ(allow, false);
send_packet("finished", 3);
MEDIAPIPE_EXPECT_OK(graph_.WaitUntilIdle());
MP_EXPECT_OK(graph_.WaitUntilIdle());
EXPECT_EQ(TimestampValues(a_passed), (std::vector<int64>{1, 3}));
EXPECT_EQ(TimestampValues(b_passed), (std::vector<int64>{1, 3}));
EXPECT_EQ(allow, true);
MEDIAPIPE_EXPECT_OK(graph_.CloseAllInputStreams());
MEDIAPIPE_EXPECT_OK(graph_.WaitUntilDone());
MP_EXPECT_OK(graph_.CloseAllInputStreams());
MP_EXPECT_OK(graph_.WaitUntilDone());
}
TEST(FlowLimiterCalculator, CanConsume) {
@ -465,7 +465,7 @@ TEST(FlowLimiterCalculator, CanConsume) {
};
CalculatorGraph graph_;
MEDIAPIPE_EXPECT_OK(graph_.Initialize(
MP_EXPECT_OK(graph_.Initialize(
graph_config_,
{
{"max_in_flight", MakePacket<int>(1)},
@ -473,21 +473,21 @@ TEST(FlowLimiterCalculator, CanConsume) {
MakePacket<std::function<void(const Packet&)>>(allow_cb)},
}));
MEDIAPIPE_EXPECT_OK(graph_.StartRun({}));
MP_EXPECT_OK(graph_.StartRun({}));
auto send_packet = [&graph_](const std::string& input_name, int n) {
MEDIAPIPE_EXPECT_OK(graph_.AddPacketToInputStream(
MP_EXPECT_OK(graph_.AddPacketToInputStream(
input_name, MakePacket<int>(n).At(Timestamp(n))));
};
send_packet("in", 1);
MEDIAPIPE_EXPECT_OK(graph_.WaitUntilIdle());
MP_EXPECT_OK(graph_.WaitUntilIdle());
EXPECT_EQ(allow, false);
EXPECT_EQ(TimestampValues(in_sampled_packets_), (std::vector<int64>{1}));
MEDIAPIPE_EXPECT_OK(in_sampled_packets_[0].Consume<int>());
MP_EXPECT_OK(in_sampled_packets_[0].Consume<int>());
MEDIAPIPE_EXPECT_OK(graph_.CloseAllInputStreams());
MEDIAPIPE_EXPECT_OK(graph_.WaitUntilDone());
MP_EXPECT_OK(graph_.CloseAllInputStreams());
MP_EXPECT_OK(graph_.WaitUntilDone());
}
} // anonymous namespace

View File

@ -32,7 +32,7 @@ class GateCalculatorTest : public ::testing::Test {
->Tag(control_tag)
.packets.push_back(MakePacket<bool>(control).At(Timestamp(timestamp)));
MEDIAPIPE_ASSERT_OK(runner_->Run()) << "Calculator execution failed.";
MP_ASSERT_OK(runner_->Run()) << "Calculator execution failed.";
}
void SetRunner(const std::string& proto) {

View File

@ -217,23 +217,23 @@ class ImmediateMuxCalculatorTest : public ::testing::Test {
// Start running the graph.
CalculatorGraph graph;
MEDIAPIPE_ASSERT_OK(graph.Initialize(graph_config_));
MEDIAPIPE_ASSERT_OK(graph.StartRun({}));
MP_ASSERT_OK(graph.Initialize(graph_config_));
MP_ASSERT_OK(graph.StartRun({}));
// Send each packet to the graph in the specified order.
for (int t = 0; t < input_sets.size(); t++) {
const std::vector<Packet>& input_set = input_sets[t];
MEDIAPIPE_EXPECT_OK(graph.WaitUntilIdle());
MP_EXPECT_OK(graph.WaitUntilIdle());
for (int i = 0; i < input_set.size(); i++) {
const Packet& packet = input_set[i];
if (!IsNone(packet)) {
MEDIAPIPE_EXPECT_OK(graph.AddPacketToInputStream(
MP_EXPECT_OK(graph.AddPacketToInputStream(
absl::StrCat("input_packets_", i), packet));
}
}
}
MEDIAPIPE_ASSERT_OK(graph.CloseAllInputStreams());
MEDIAPIPE_ASSERT_OK(graph.WaitUntilDone());
MP_ASSERT_OK(graph.CloseAllInputStreams());
MP_ASSERT_OK(graph.WaitUntilDone());
}
CalculatorGraphConfig graph_config_;
@ -335,22 +335,22 @@ TEST_F(ImmediateMuxCalculatorTest, Demux) {
// Start the graph and add five input packets.
CalculatorGraph graph;
MEDIAPIPE_ASSERT_OK(graph.Initialize(
graph_config_, {
{"callback_0", Adopt(new auto(wait_0))},
{"callback_1", Adopt(new auto(wait_1))},
}));
MEDIAPIPE_ASSERT_OK(graph.ObserveOutputStream("output_packets_0", out_cb));
MEDIAPIPE_ASSERT_OK(graph.StartRun({}));
MEDIAPIPE_EXPECT_OK(
MP_ASSERT_OK(graph.Initialize(graph_config_,
{
{"callback_0", Adopt(new auto(wait_0))},
{"callback_1", Adopt(new auto(wait_1))},
}));
MP_ASSERT_OK(graph.ObserveOutputStream("output_packets_0", out_cb));
MP_ASSERT_OK(graph.StartRun({}));
MP_EXPECT_OK(
graph.AddPacketToInputStream("input_packets_0", PacketAt(10000)));
MEDIAPIPE_EXPECT_OK(
MP_EXPECT_OK(
graph.AddPacketToInputStream("input_packets_0", PacketAt(20000)));
MEDIAPIPE_EXPECT_OK(
MP_EXPECT_OK(
graph.AddPacketToInputStream("input_packets_0", PacketAt(30000)));
MEDIAPIPE_EXPECT_OK(
MP_EXPECT_OK(
graph.AddPacketToInputStream("input_packets_0", PacketAt(40000)));
MEDIAPIPE_EXPECT_OK(
MP_EXPECT_OK(
graph.AddPacketToInputStream("input_packets_0", PacketAt(50000)));
// Release the outputs in order 20000, 10000, 30000, 50000, 40000.
@ -362,8 +362,8 @@ TEST_F(ImmediateMuxCalculatorTest, Demux) {
semaphore_0.Release(1); // 50000
wait_for([&] { return out_packets.size() >= 3; });
semaphore_1.Release(1); // 40000
MEDIAPIPE_ASSERT_OK(graph.CloseAllInputStreams());
MEDIAPIPE_ASSERT_OK(graph.WaitUntilDone());
MP_ASSERT_OK(graph.CloseAllInputStreams());
MP_ASSERT_OK(graph.WaitUntilDone());
// Output packets 10000 and 40000 are superseded and dropped.
EXPECT_THAT(TimestampValues(out_packets), ElementsAre(20000, 30000, 50000));

View File

@ -219,7 +219,7 @@ TEST(MatrixMultiplyCalculatorTest, Multiply) {
Adopt(sample).At(Timestamp(i)));
}
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
EXPECT_EQ(runner.MutableInputs()->Index(0).packets.size(),
runner.Outputs().Index(0).packets.size());

View File

@ -112,7 +112,7 @@ TEST(MatrixSubtractCalculatorTest, SubtractFromInput) {
runner.MutableInputs()->Tag("MINUEND").packets.push_back(
Adopt(input_matrix).At(Timestamp(0)));
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
EXPECT_EQ(1, runner.Outputs().Index(0).packets.size());
EXPECT_EQ(Timestamp(0), runner.Outputs().Index(0).packets[0].Timestamp());
@ -142,7 +142,7 @@ TEST(MatrixSubtractCalculatorTest, SubtractFromSideMatrix) {
->Tag("SUBTRAHEND")
.packets.push_back(Adopt(input_matrix).At(Timestamp(0)));
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
EXPECT_EQ(1, runner.Outputs().Index(0).packets.size());
EXPECT_EQ(Timestamp(0), runner.Outputs().Index(0).packets[0].Timestamp());

View File

@ -67,7 +67,7 @@ TEST_F(MatrixToVectorCalculatorTest, SingleRow) {
SetInputHeader(1, 4); // 1 channel x 4 samples
const std::vector<float>& data_vector = {1.0, 2.0, 3.0, 4.0};
AppendInput(data_vector, 0);
MEDIAPIPE_ASSERT_OK(RunGraph());
MP_ASSERT_OK(RunGraph());
CheckOutputPacket(0, data_vector);
}
@ -79,7 +79,7 @@ TEST_F(MatrixToVectorCalculatorTest, RegularMatrix) {
5.0, 6.0, 7.0, 8.0};
AppendInput(data_vector, 0);
MEDIAPIPE_ASSERT_OK(RunGraph());
MP_ASSERT_OK(RunGraph());
CheckOutputPacket(0, data_vector);
}

View File

@ -78,7 +78,7 @@ TEST(MediaPipeDetectionToSoapboxDetectionCalculatorTest,
runner.MutableInputs()->Index(1).packets.push_back(
Adopt(new float(35.5)).At(Timestamp(35)));
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
// Expected combined_output: 5.5, 10, 20, 30, 35.5 at times 5, 10, 20, 30, 35.
const std::vector<Packet>& actual_output = runner.Outputs().Index(0).packets;
@ -120,7 +120,7 @@ TEST(MediaPipeDetectionToSoapboxDetectionCalculatorTest,
runner.MutableInputs()->Index(2).packets.push_back(
Adopt(new char('c')).At(Timestamp(10)));
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
// Expected combined_output: 'c', 20.5, 30 at times 10, 20, 30.
const std::vector<Packet>& actual_output = runner.Outputs().Index(0).packets;

View File

@ -37,7 +37,7 @@ TEST(PacketInnerJoinCalculatorTest, AllMatching) {
for (int packet_load : packets_on_stream2) {
runner.MutableInputs()->Index(1).packets.push_back(PacketFrom(packet_load));
}
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
// Check.
const std::vector<int> expected = {0, 1, 2, 3};
ASSERT_EQ(expected.size(), runner.Outputs().Index(0).packets.size());
@ -64,7 +64,7 @@ TEST(PacketInnerJoinCalculatorTest, NoneMatching) {
for (int packet_load : packets_on_stream2) {
runner.MutableInputs()->Index(1).packets.push_back(PacketFrom(packet_load));
}
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
// Check.
EXPECT_TRUE(runner.Outputs().Index(0).packets.empty());
EXPECT_TRUE(runner.Outputs().Index(1).packets.empty());
@ -82,7 +82,7 @@ TEST(PacketInnerJoinCalculatorTest, SomeMatching) {
for (int packet_load : packets_on_stream2) {
runner.MutableInputs()->Index(1).packets.push_back(PacketFrom(packet_load));
}
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
// Check.
const std::vector<int> expected = {0, 2, 4, 6};
ASSERT_EQ(expected.size(), runner.Outputs().Index(0).packets.size());

View File

@ -287,9 +287,9 @@ TimestampDiff TimestampDiffFromSeconds(double seconds) {
}
}
if (jitter_ != 0.0 && random_ != nullptr) {
RETURN_IF_ERROR(ProcessWithJitter(cc));
MP_RETURN_IF_ERROR(ProcessWithJitter(cc));
} else {
RETURN_IF_ERROR(ProcessWithoutJitter(cc));
MP_RETURN_IF_ERROR(ProcessWithoutJitter(cc));
}
last_packet_ = cc->Inputs().Get(input_data_id_).Value();
return ::mediapipe::OkStatus();

View File

@ -103,7 +103,7 @@ TEST(PacketResamplerCalculatorTest, NoPacketsInStream) {
"[mediapipe.PacketResamplerCalculatorOptions.ext]: "
"{frame_rate:30}");
runner.SetInput({});
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
}
}
@ -114,7 +114,7 @@ TEST(PacketResamplerCalculatorTest, SinglePacketInStream) {
"[mediapipe.PacketResamplerCalculatorOptions.ext]: "
"{frame_rate:30}");
runner.SetInput({0});
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
runner.CheckOutputTimestamps({0}, {0});
}
@ -124,7 +124,7 @@ TEST(PacketResamplerCalculatorTest, SinglePacketInStream) {
"[mediapipe.PacketResamplerCalculatorOptions.ext]: "
"{frame_rate:30}");
runner.SetInput({1000});
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
runner.CheckOutputTimestamps({1000}, {1000});
}
@ -134,7 +134,7 @@ TEST(PacketResamplerCalculatorTest, SinglePacketInStream) {
"[mediapipe.PacketResamplerCalculatorOptions.ext]: "
"{frame_rate:30}");
runner.SetInput({16668});
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
runner.CheckOutputTimestamps({16668}, {16668});
}
}
@ -146,7 +146,7 @@ TEST(PacketResamplerCalculatorTest, TwoPacketsInStream) {
"[mediapipe.PacketResamplerCalculatorOptions.ext]: "
"{frame_rate:30}");
runner.SetInput({0, 16666});
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
runner.CheckOutputTimestamps({0}, {0});
}
@ -156,7 +156,7 @@ TEST(PacketResamplerCalculatorTest, TwoPacketsInStream) {
"[mediapipe.PacketResamplerCalculatorOptions.ext]: "
"{frame_rate:30}");
runner.SetInput({0, 16667});
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
runner.CheckOutputTimestamps({0, 16667}, {0, 33333});
}
@ -166,7 +166,7 @@ TEST(PacketResamplerCalculatorTest, TwoPacketsInStream) {
"[mediapipe.PacketResamplerCalculatorOptions.ext]: "
"{frame_rate:30}");
runner.SetInput({0, 49999});
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
runner.CheckOutputTimestamps({0, 49999}, {0, 33333});
}
@ -176,7 +176,7 @@ TEST(PacketResamplerCalculatorTest, TwoPacketsInStream) {
"[mediapipe.PacketResamplerCalculatorOptions.ext]: "
"{frame_rate:30}");
runner.SetInput({0, 50000});
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
runner.CheckOutputTimestamps({0, 0, 50000}, {0, 33333, 66667});
}
@ -186,7 +186,7 @@ TEST(PacketResamplerCalculatorTest, TwoPacketsInStream) {
"[mediapipe.PacketResamplerCalculatorOptions.ext]: "
"{frame_rate:30}");
runner.SetInput({2000, 118666});
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
runner.CheckOutputTimestamps({2000, 2000, 2000, 118666},
{2000, 35333, 68667, 102000});
}
@ -197,7 +197,7 @@ TEST(PacketResamplerCalculatorTest, InputAtExactFrequencyMiddlepoints) {
"[mediapipe.PacketResamplerCalculatorOptions.ext]: "
"{frame_rate:30}");
runner.SetInput({0, 33333, 66667, 100000, 133333, 166667, 200000});
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
runner.CheckOutputTimestamps(
{0, 33333, 66667, 100000, 133333, 166667, 200000},
{0, 33333, 66667, 100000, 133333, 166667, 200000});
@ -210,7 +210,7 @@ TEST(PacketResamplerCalculatorTest, MultiplePacketsForPeriods) {
"[mediapipe.PacketResamplerCalculatorOptions.ext]: "
"{frame_rate:30}");
runner.SetInput({0, 16666, 16667, 20000, 33300, 49999, 50000, 66600});
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
runner.CheckOutputTimestamps({0, 33300, 66600}, {0, 33333, 66667});
}
@ -222,7 +222,7 @@ TEST(PacketResamplerCalculatorTest, FillPeriodsWithLatestPacket) {
"[mediapipe.PacketResamplerCalculatorOptions.ext]: "
"{frame_rate:30}");
runner.SetInput({0, 5000, 16666, 83334});
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
runner.CheckOutputTimestamps({0, 16666, 16666, 83334},
{0, 33333, 66667, 100000});
}
@ -232,7 +232,7 @@ TEST(PacketResamplerCalculatorTest, FillPeriodsWithLatestPacket) {
"[mediapipe.PacketResamplerCalculatorOptions.ext]: "
"{frame_rate:30}");
runner.SetInput({0, 16666, 16667, 25000, 33000, 35000, 135000});
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
runner.CheckOutputTimestamps({0, 33000, 35000, 35000, 135000},
{0, 33333, 66667, 100000, 133333});
}
@ -242,7 +242,7 @@ TEST(PacketResamplerCalculatorTest, FillPeriodsWithLatestPacket) {
"[mediapipe.PacketResamplerCalculatorOptions.ext]: "
"{frame_rate:30}");
runner.SetInput({0, 15000, 32000, 49999, 150000});
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
runner.CheckOutputTimestamps({0, 32000, 49999, 49999, 49999, 150000},
{0, 33333, 66667, 100000, 133333, 166667});
}
@ -255,7 +255,7 @@ TEST(PacketResamplerCalculatorTest, SuperHighFrameRate) {
"[mediapipe.PacketResamplerCalculatorOptions.ext]: "
"{frame_rate:500000}");
runner.SetInput({0, 10, 13});
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
runner.CheckOutputTimestamps({0, 0, 0, 0, 0, 10, 10, 13},
{0, 2, 4, 6, 8, 10, 12, 14});
}
@ -266,7 +266,7 @@ TEST(PacketResamplerCalculatorTest, SuperHighFrameRate) {
"[mediapipe.PacketResamplerCalculatorOptions.ext]: "
"{frame_rate:1000000}");
runner.SetInput({0, 10, 13});
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
runner.CheckOutputTimestamps(
{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 10, 10, 10, 13},
{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13});
@ -280,7 +280,7 @@ TEST(PacketResamplerCalculatorTest, NegativeTimestampTest) {
"[mediapipe.PacketResamplerCalculatorOptions.ext]: "
"{frame_rate:30}");
runner.SetInput({-200, -20, 16466});
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
runner.CheckOutputTimestamps({-200}, {-200});
}
@ -290,7 +290,7 @@ TEST(PacketResamplerCalculatorTest, NegativeTimestampTest) {
"[mediapipe.PacketResamplerCalculatorOptions.ext]: "
"{frame_rate:30}");
runner.SetInput({-200, -20, 16467});
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
runner.CheckOutputTimestamps({-200, 16467}, {-200, 33133});
}
@ -300,7 +300,7 @@ TEST(PacketResamplerCalculatorTest, NegativeTimestampTest) {
"[mediapipe.PacketResamplerCalculatorOptions.ext]: "
"{frame_rate:30}");
runner.SetInput({-500, 66667});
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
runner.CheckOutputTimestamps({-500, -500, 66667}, {-500, 32833, 66167});
}
@ -310,7 +310,7 @@ TEST(PacketResamplerCalculatorTest, NegativeTimestampTest) {
"[mediapipe.PacketResamplerCalculatorOptions.ext]: "
"{frame_rate:30}");
runner.SetInput({-50000, -33334, 33334});
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
runner.CheckOutputTimestamps({-50000, -33334, -33334, 33334},
{-50000, -16667, 16667, 50000});
}
@ -323,7 +323,7 @@ TEST(PacketResamplerCalculatorTest, ExactFramesPerSecond) {
"[mediapipe.PacketResamplerCalculatorOptions.ext]: "
"{frame_rate:50}");
runner.SetInput({0, 9999, 29999});
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
runner.CheckOutputTimestamps({0, 29999}, {0, 20000});
}
@ -333,7 +333,7 @@ TEST(PacketResamplerCalculatorTest, ExactFramesPerSecond) {
"[mediapipe.PacketResamplerCalculatorOptions.ext]: "
"{frame_rate:50}");
runner.SetInput({0, 10000, 50000});
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
runner.CheckOutputTimestamps({0, 10000, 10000, 50000},
{0, 20000, 40000, 60000});
}
@ -347,7 +347,7 @@ TEST(PacketResamplerCalculatorTest, FrameRateTest) {
"{frame_rate:50, output_header:UPDATE_VIDEO_HEADER}");
runner.SetInput({0, 10000, 30000, 50000, 60000});
runner.SetVideoHeader(50.0);
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
runner.CheckOutputTimestamps({0, 10000, 30000, 60000},
{0, 20000, 40000, 60000});
runner.CheckVideoHeader(50.0);
@ -360,7 +360,7 @@ TEST(PacketResamplerCalculatorTest, FrameRateTest) {
"{frame_rate:50, output_header:UPDATE_VIDEO_HEADER}");
runner.SetInput({0, 5000, 10010, 15001, 19990});
runner.SetVideoHeader(200.0);
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
runner.CheckOutputTimestamps({0, 19990}, {0, 20000});
runner.CheckVideoHeader(50.0);
}
@ -372,7 +372,7 @@ TEST(PacketResamplerCalculatorTest, FrameRateTest) {
"{frame_rate:50, output_header:PASS_HEADER}");
runner.SetInput({0, 5000, 10010, 15001, 19990});
runner.SetVideoHeader(200.0);
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
runner.CheckOutputTimestamps({0, 19990}, {0, 20000});
runner.CheckVideoHeader(200.0);
}
@ -404,7 +404,7 @@ TEST(PacketResamplerCalculatorTest, SetVideoHeader) {
->Tag("VIDEO_HEADER")
.packets.push_back(
Adopt(new VideoHeader(video_header_in)).At(Timestamp::PreStream()));
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
ASSERT_EQ(1, runner.Outputs().Tag("VIDEO_HEADER").packets.size());
EXPECT_EQ(Timestamp::PreStream(),
@ -424,7 +424,7 @@ TEST(PacketResamplerCalculatorTest, FlushLastPacketWithoutRound) {
frame_rate: 1
})");
runner.SetInput({0, 333333, 666667, 1000000, 1333333});
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
// 1333333 is not emitted as 2000000, because it does not round to 2000000.
runner.CheckOutputTimestamps({0, 1000000}, {0, 1000000});
}
@ -435,7 +435,7 @@ TEST(PacketResamplerCalculatorTest, FlushLastPacketWithRound) {
frame_rate: 1
})");
runner.SetInput({0, 333333, 666667, 1000000, 1333333, 1666667});
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
// 1666667 is emitted as 2000000, because it rounds to 2000000.
runner.CheckOutputTimestamps({0, 1000000, 1666667}, {0, 1000000, 2000000});
}
@ -447,7 +447,7 @@ TEST(PacketResamplerCalculatorTest, DoNotFlushLastPacketWithoutRound) {
flush_last_packet: false
})");
runner.SetInput({0, 333333, 666667, 1000000, 1333333});
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
// 1333333 is not emitted no matter what; see FlushLastPacketWithoutRound.
runner.CheckOutputTimestamps({0, 1000000}, {0, 1000000});
}
@ -459,7 +459,7 @@ TEST(PacketResamplerCalculatorTest, DoNotFlushLastPacketWithRound) {
flush_last_packet: false
})");
runner.SetInput({0, 333333, 666667, 1000000, 1333333, 1666667});
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
// 1666667 is not emitted due to flush_last_packet: false.
runner.CheckOutputTimestamps({0, 1000000}, {0, 1000000});
}
@ -473,7 +473,7 @@ TEST(PacketResamplerCalculatorTest, InputAtExactFrequencyMiddlepointsAligned) {
"[mediapipe.PacketResamplerCalculatorOptions.ext]: "
"{frame_rate:30}");
runner.SetInput({33111, 66667, 100000, 133333, 166667, 200000});
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
runner.CheckOutputTimestamps({33111, 66667, 100000, 133333, 166667, 200000},
{33111, 66444, 99778, 133111, 166444, 199778});
}
@ -484,7 +484,7 @@ TEST(PacketResamplerCalculatorTest, InputAtExactFrequencyMiddlepointsAligned) {
"{frame_rate:30 "
"base_timestamp:0}");
runner.SetInput({33111, 66667, 100000, 133333, 166667, 200000});
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
runner.CheckOutputTimestamps(
{33111, 66667, 100000, 133333, 166667, 200000},
{33333, 66666, 100000, 133333, 166666, 200000});
@ -499,7 +499,7 @@ TEST(PacketResamplerCalculatorTest, MultiplePacketsForPeriodsAligned) {
"[mediapipe.PacketResamplerCalculatorOptions.ext]: "
"{frame_rate:30}");
runner.SetInput({-222, 16666, 16667, 20000, 33300, 49999, 50000, 66600});
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
runner.CheckOutputTimestamps({-222, 33300, 66600}, {-222, 33111, 66445});
}
{
@ -509,7 +509,7 @@ TEST(PacketResamplerCalculatorTest, MultiplePacketsForPeriodsAligned) {
"{frame_rate:30 "
"base_timestamp:900011}");
runner.SetInput({-222, 16666, 16667, 20000, 33300, 49999, 50000, 66600});
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
runner.CheckOutputTimestamps({-222, 33300, 66600}, {11, 33344, 66678});
}
{
@ -521,7 +521,7 @@ TEST(PacketResamplerCalculatorTest, MultiplePacketsForPeriodsAligned) {
"base_timestamp:11}");
runner.SetInput(
{899888, 916666, 916667, 920000, 933300, 949999, 950000, 966600});
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
runner.CheckOutputTimestamps({899888, 933300, 966600},
{900011, 933344, 966678});
}
@ -536,7 +536,7 @@ TEST(PacketResamplerCalculatorTest, FillPeriodsWithLatestPacketAligned) {
"[mediapipe.PacketResamplerCalculatorOptions.ext]: "
"{frame_rate:30}");
runner.SetInput({-222, 15000, 32000, 49999, 150000});
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
runner.CheckOutputTimestamps({-222, 32000, 49999, 49999, 49999, 150000},
{-222, 33111, 66445, 99778, 133111, 166445});
}
@ -547,7 +547,7 @@ TEST(PacketResamplerCalculatorTest, FillPeriodsWithLatestPacketAligned) {
"{frame_rate:30 "
"base_timestamp:0}");
runner.SetInput({-222, 15000, 32000, 49999, 150000});
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
runner.CheckOutputTimestamps({-222, 32000, 49999, 49999, 49999, 150000},
{0, 33333, 66667, 100000, 133333, 166667});
}
@ -565,7 +565,7 @@ TEST(PacketResamplerCalculatorTest, FirstInputAfterMiddlepointAligned) {
"{frame_rate:30 "
"base_timestamp:0}");
runner.SetInput({66667, 100020, 133333, 166667});
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
runner.CheckOutputTimestamps({66667, 100020, 133333, 166667},
{66667, 100000, 133334, 166667});
}
@ -582,7 +582,7 @@ TEST(PacketResamplerCalculatorTest, FirstInputAfterMiddlepointAligned) {
"{frame_rate:30 "
"base_timestamp:0}");
runner.SetInput({100020, 133333, 166667});
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
runner.CheckOutputTimestamps({100020, 133333, 166667},
{100000, 133333, 166667});
}
@ -596,7 +596,7 @@ TEST(PacketResamplerCalculatorTest, OutputTimestampRangeAligned) {
"{frame_rate:30 "
"base_timestamp:0}");
runner.SetInput({-222, 15000, 32000, 49999, 150000});
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
runner.CheckOutputTimestamps({-222, 32000, 49999, 49999, 49999, 150000},
{0, 33333, 66667, 100000, 133333, 166667});
}
@ -609,7 +609,7 @@ TEST(PacketResamplerCalculatorTest, OutputTimestampRangeAligned) {
"start_time:40000 "
"end_time:160000}");
runner.SetInput({-222, 15000, 32000, 49999, 150000});
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
runner.CheckOutputTimestamps({49999, 49999, 49999},
{66667, 100000, 133333});
}
@ -624,7 +624,7 @@ TEST(PacketResamplerCalculatorTest, OutputTimestampRangeAligned) {
"end_time:160000 "
"round_limits:true}");
runner.SetInput({-222, 15000, 32000, 49999, 150000});
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
runner.CheckOutputTimestamps({32000, 49999, 49999, 49999, 150000},
{33333, 66667, 100000, 133333, 166667});
}
@ -654,7 +654,7 @@ TEST(PacketResamplerCalculatorTest, OptionsSidePacket) {
})"));
runner.MutableSidePackets()->Tag("OPTIONS") = Adopt(options);
runner.SetInput({-222, 15000, 32000, 49999, 150000});
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
EXPECT_EQ(6, runner.Outputs().Index(0).packets.size());
}
{
@ -670,7 +670,7 @@ TEST(PacketResamplerCalculatorTest, OptionsSidePacket) {
runner.MutableSidePackets()->Tag("OPTIONS") = Adopt(options);
runner.SetInput({-222, 15000, 32000, 49999, 150000});
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
EXPECT_EQ(6, runner.Outputs().Index(0).packets.size());
}
}

View File

@ -74,11 +74,11 @@ TEST(PreviousLoopbackCalculator, CorrectTimestamps) {
tool::AddVectorSink("pair", &graph_config_, &in_prev);
CalculatorGraph graph_;
MEDIAPIPE_ASSERT_OK(graph_.Initialize(graph_config_, {}));
MEDIAPIPE_ASSERT_OK(graph_.StartRun({}));
MP_ASSERT_OK(graph_.Initialize(graph_config_, {}));
MP_ASSERT_OK(graph_.StartRun({}));
auto send_packet = [&graph_](const std::string& input_name, int n) {
MEDIAPIPE_EXPECT_OK(graph_.AddPacketToInputStream(
MP_EXPECT_OK(graph_.AddPacketToInputStream(
input_name, MakePacket<int>(n).At(Timestamp(n))));
};
auto pair_values = [](const Packet& packet) {
@ -89,22 +89,22 @@ TEST(PreviousLoopbackCalculator, CorrectTimestamps) {
};
send_packet("in", 1);
MEDIAPIPE_EXPECT_OK(graph_.WaitUntilIdle());
MP_EXPECT_OK(graph_.WaitUntilIdle());
EXPECT_EQ(TimestampValues(in_prev), (std::vector<int64>{1}));
EXPECT_EQ(pair_values(in_prev.back()), std::make_pair(1, -1));
send_packet("in", 5);
MEDIAPIPE_EXPECT_OK(graph_.WaitUntilIdle());
MP_EXPECT_OK(graph_.WaitUntilIdle());
EXPECT_EQ(TimestampValues(in_prev), (std::vector<int64>{1, 5}));
EXPECT_EQ(pair_values(in_prev.back()), std::make_pair(5, 1));
send_packet("in", 15);
MEDIAPIPE_EXPECT_OK(graph_.WaitUntilIdle());
MP_EXPECT_OK(graph_.WaitUntilIdle());
EXPECT_EQ(TimestampValues(in_prev), (std::vector<int64>{1, 5, 15}));
EXPECT_EQ(pair_values(in_prev.back()), std::make_pair(15, 5));
MEDIAPIPE_EXPECT_OK(graph_.CloseAllInputStreams());
MEDIAPIPE_EXPECT_OK(graph_.WaitUntilDone());
MP_EXPECT_OK(graph_.CloseAllInputStreams());
MP_EXPECT_OK(graph_.WaitUntilDone());
}
} // anonymous namespace

View File

@ -124,7 +124,7 @@ TEST(QuantizeFloatVectorCalculatorTest, TestEmptyVector) {
->Tag("FLOAT_VECTOR")
.packets.push_back(
MakePacket<std::vector<float>>(empty_vector).At(Timestamp(0)));
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
const std::vector<Packet>& outputs = runner.Outputs().Tag("ENCODED").packets;
EXPECT_EQ(1, outputs.size());
EXPECT_TRUE(outputs[0].Get<std::string>().empty());
@ -150,7 +150,7 @@ TEST(QuantizeFloatVectorCalculatorTest, TestNonEmptyVector) {
->Tag("FLOAT_VECTOR")
.packets.push_back(
MakePacket<std::vector<float>>(vector).At(Timestamp(0)));
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
const std::vector<Packet>& outputs = runner.Outputs().Tag("ENCODED").packets;
EXPECT_EQ(1, outputs.size());
const std::string& result = outputs[0].Get<std::string>();
@ -188,7 +188,7 @@ TEST(QuantizeFloatVectorCalculatorTest, TestSaturation) {
->Tag("FLOAT_VECTOR")
.packets.push_back(
MakePacket<std::vector<float>>(vector).At(Timestamp(0)));
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
const std::vector<Packet>& outputs = runner.Outputs().Tag("ENCODED").packets;
EXPECT_EQ(1, outputs.size());
const std::string& result = outputs[0].Get<std::string>();

View File

@ -38,7 +38,7 @@ TEST(SequenceShiftCalculatorTest, ZeroShift) {
"[mediapipe.SequenceShiftCalculatorOptions.ext]: { packet_offset: 0 }", 1,
1, 0);
AddPackets(&runner);
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
const std::vector<Packet>& input_packets =
runner.MutableInputs()->Index(0).packets;
const std::vector<Packet>& output_packets = runner.Outputs().Index(0).packets;
@ -59,7 +59,7 @@ TEST(SequenceShiftCalculatorTest, PositiveShift) {
"[mediapipe.SequenceShiftCalculatorOptions.ext]: { packet_offset: 3 }", 1,
1, 0);
AddPackets(&runner);
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
const std::vector<Packet>& input_packets =
runner.MutableInputs()->Index(0).packets;
const std::vector<Packet>& output_packets = runner.Outputs().Index(0).packets;
@ -83,7 +83,7 @@ TEST(SequenceShiftCalculatorTest, NegativeShift) {
"[mediapipe.SequenceShiftCalculatorOptions.ext]: { packet_offset: -2 }",
1, 1, 0);
AddPackets(&runner);
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
const std::vector<Packet>& input_packets =
runner.MutableInputs()->Index(0).packets;
const std::vector<Packet>& output_packets = runner.Outputs().Index(0).packets;

View File

@ -161,12 +161,12 @@ TEST_F(SplitTfLiteTensorVectorCalculatorTest, SmokeTest) {
// Run the graph.
CalculatorGraph graph;
MEDIAPIPE_ASSERT_OK(graph.Initialize(graph_config));
MEDIAPIPE_ASSERT_OK(graph.StartRun({}));
MEDIAPIPE_ASSERT_OK(graph.AddPacketToInputStream(
MP_ASSERT_OK(graph.Initialize(graph_config));
MP_ASSERT_OK(graph.StartRun({}));
MP_ASSERT_OK(graph.AddPacketToInputStream(
"tensor_in", Adopt(input_vec_.release()).At(Timestamp(0))));
// Wait until the calculator finishes processing.
MEDIAPIPE_ASSERT_OK(graph.WaitUntilIdle());
MP_ASSERT_OK(graph.WaitUntilIdle());
ValidateVectorOutput(range_0_packets, /*expected_elements=*/1,
/*input_begin_index=*/0);
@ -176,8 +176,8 @@ TEST_F(SplitTfLiteTensorVectorCalculatorTest, SmokeTest) {
/*input_begin_index=*/4);
// Fully close the graph at the end.
MEDIAPIPE_ASSERT_OK(graph.CloseInputStream("tensor_in"));
MEDIAPIPE_ASSERT_OK(graph.WaitUntilDone());
MP_ASSERT_OK(graph.CloseInputStream("tensor_in"));
MP_ASSERT_OK(graph.WaitUntilDone());
}
TEST_F(SplitTfLiteTensorVectorCalculatorTest, InvalidRangeTest) {
@ -270,12 +270,12 @@ TEST_F(SplitTfLiteTensorVectorCalculatorTest, SmokeTestElementOnly) {
// Run the graph.
CalculatorGraph graph;
MEDIAPIPE_ASSERT_OK(graph.Initialize(graph_config));
MEDIAPIPE_ASSERT_OK(graph.StartRun({}));
MEDIAPIPE_ASSERT_OK(graph.AddPacketToInputStream(
MP_ASSERT_OK(graph.Initialize(graph_config));
MP_ASSERT_OK(graph.StartRun({}));
MP_ASSERT_OK(graph.AddPacketToInputStream(
"tensor_in", Adopt(input_vec_.release()).At(Timestamp(0))));
// Wait until the calculator finishes processing.
MEDIAPIPE_ASSERT_OK(graph.WaitUntilIdle());
MP_ASSERT_OK(graph.WaitUntilIdle());
ValidateElementOutput(range_0_packets,
/*input_begin_index=*/0);
@ -285,8 +285,8 @@ TEST_F(SplitTfLiteTensorVectorCalculatorTest, SmokeTestElementOnly) {
/*input_begin_index=*/4);
// Fully close the graph at the end.
MEDIAPIPE_ASSERT_OK(graph.CloseInputStream("tensor_in"));
MEDIAPIPE_ASSERT_OK(graph.WaitUntilDone());
MP_ASSERT_OK(graph.CloseInputStream("tensor_in"));
MP_ASSERT_OK(graph.WaitUntilDone());
}
TEST_F(SplitTfLiteTensorVectorCalculatorTest,

View File

@ -81,7 +81,7 @@ mediapipe_cc_proto_library(
name = "opencv_image_encoder_calculator_cc_proto",
srcs = ["opencv_image_encoder_calculator.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":opencv_image_encoder_calculator_proto"],
)
@ -89,7 +89,7 @@ mediapipe_cc_proto_library(
name = "mask_overlay_calculator_cc_proto",
srcs = ["mask_overlay_calculator.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":mask_overlay_calculator_proto"],
)
@ -100,7 +100,7 @@ mediapipe_cc_proto_library(
"//mediapipe/framework:calculator_cc_proto",
"//mediapipe/framework/formats:image_format_cc_proto",
],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":scale_image_calculator_proto"],
)
@ -110,7 +110,7 @@ mediapipe_cc_proto_library(
cc_deps = [
"//mediapipe/framework:calculator_cc_proto",
],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":set_alpha_calculator_proto"],
)
@ -120,17 +120,17 @@ mediapipe_cc_proto_library(
cc_deps = [
"//mediapipe/framework:calculator_cc_proto",
],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":image_cropping_calculator_proto"],
)
mediapipe_cc_proto_library(
name = "bilateral_filter_calculator_cc_proto",
srcs = ["bilateral_filter_calculator.proto"],
cc_deps = [
"//mediapipe/framework:calculator_cc_proto",
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = [
"//visibility:public",
],
visibility = ["//mediapipe:__subpackages__"],
deps = [":bilateral_filter_calculator_proto"],
)
@ -141,7 +141,7 @@ mediapipe_cc_proto_library(
"//mediapipe/framework:calculator_cc_proto",
"//mediapipe/util:color_cc_proto",
],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":recolor_calculator_proto"],
)
@ -291,7 +291,7 @@ mediapipe_cc_proto_library(
"//mediapipe/framework:calculator_cc_proto",
"//mediapipe/gpu:scale_mode_cc_proto",
],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":image_transformation_calculator_proto"],
)

View File

@ -153,7 +153,7 @@ REGISTER_CALCULATOR(BilateralFilterCalculator);
}
#if defined(__ANDROID__) || defined(__EMSCRIPTEN__)
RETURN_IF_ERROR(mediapipe::GlCalculatorHelper::UpdateContract(cc));
MP_RETURN_IF_ERROR(mediapipe::GlCalculatorHelper::UpdateContract(cc));
#endif // __ANDROID__ || __EMSCRIPTEN__
return ::mediapipe::OkStatus();
@ -181,7 +181,7 @@ REGISTER_CALCULATOR(BilateralFilterCalculator);
if (use_gpu_) {
#if defined(__ANDROID__) || defined(__EMSCRIPTEN__)
RETURN_IF_ERROR(gpu_helper_.Open(cc));
MP_RETURN_IF_ERROR(gpu_helper_.Open(cc));
#endif
}
@ -191,18 +191,18 @@ REGISTER_CALCULATOR(BilateralFilterCalculator);
::mediapipe::Status BilateralFilterCalculator::Process(CalculatorContext* cc) {
if (use_gpu_) {
#if defined(__ANDROID__) || defined(__EMSCRIPTEN__)
RETURN_IF_ERROR(
MP_RETURN_IF_ERROR(
gpu_helper_.RunInGlContext([this, cc]() -> ::mediapipe::Status {
if (!gpu_initialized_) {
RETURN_IF_ERROR(GlSetup(cc));
MP_RETURN_IF_ERROR(GlSetup(cc));
gpu_initialized_ = true;
}
RETURN_IF_ERROR(RenderGpu(cc));
MP_RETURN_IF_ERROR(RenderGpu(cc));
return ::mediapipe::OkStatus();
}));
#endif // __ANDROID__ || __EMSCRIPTEN__
} else {
RETURN_IF_ERROR(RenderCpu(cc));
MP_RETURN_IF_ERROR(RenderCpu(cc));
}
return ::mediapipe::OkStatus();

View File

@ -131,7 +131,7 @@ REGISTER_CALCULATOR(ImageCroppingCalculator);
}
#if defined(__ANDROID__) || (defined(__APPLE__) && !TARGET_OS_OSX)
RETURN_IF_ERROR(mediapipe::GlCalculatorHelper::UpdateContract(cc));
MP_RETURN_IF_ERROR(mediapipe::GlCalculatorHelper::UpdateContract(cc));
#endif // __ANDROID__ or iOS
return ::mediapipe::OkStatus();
@ -148,7 +148,7 @@ REGISTER_CALCULATOR(ImageCroppingCalculator);
if (use_gpu_) {
#if defined(__ANDROID__) || (defined(__APPLE__) && !TARGET_OS_OSX)
RETURN_IF_ERROR(gpu_helper_.Open(cc));
MP_RETURN_IF_ERROR(gpu_helper_.Open(cc));
#else
RET_CHECK_FAIL() << "GPU processing is for Android and iOS only.";
#endif // __ANDROID__ or iOS
@ -160,18 +160,18 @@ REGISTER_CALCULATOR(ImageCroppingCalculator);
::mediapipe::Status ImageCroppingCalculator::Process(CalculatorContext* cc) {
if (use_gpu_) {
#if defined(__ANDROID__) || (defined(__APPLE__) && !TARGET_OS_OSX)
RETURN_IF_ERROR(
MP_RETURN_IF_ERROR(
gpu_helper_.RunInGlContext([this, cc]() -> ::mediapipe::Status {
if (!gpu_initialized_) {
RETURN_IF_ERROR(InitGpu(cc));
MP_RETURN_IF_ERROR(InitGpu(cc));
gpu_initialized_ = true;
}
RETURN_IF_ERROR(RenderGpu(cc));
MP_RETURN_IF_ERROR(RenderGpu(cc));
return ::mediapipe::OkStatus();
}));
#endif // __ANDROID__ or iOS
} else {
RETURN_IF_ERROR(RenderCpu(cc));
MP_RETURN_IF_ERROR(RenderCpu(cc));
}
return ::mediapipe::OkStatus();
}

View File

@ -213,7 +213,7 @@ REGISTER_CALCULATOR(ImageTransformationCalculator);
}
#if defined(__ANDROID__) || defined(__APPLE__) && !TARGET_OS_OSX
RETURN_IF_ERROR(GlCalculatorHelper::UpdateContract(cc));
MP_RETURN_IF_ERROR(GlCalculatorHelper::UpdateContract(cc));
#endif // __ANDROID__ || iOS
return ::mediapipe::OkStatus();
@ -252,7 +252,7 @@ REGISTER_CALCULATOR(ImageTransformationCalculator);
if (use_gpu_) {
#if defined(__ANDROID__) || defined(__APPLE__) && !TARGET_OS_OSX
// Let the helper access the GL context information.
RETURN_IF_ERROR(helper_.Open(cc));
MP_RETURN_IF_ERROR(helper_.Open(cc));
#else
RET_CHECK_FAIL() << "GPU processing is for Android and iOS only.";
#endif // __ANDROID__ || iOS
@ -398,7 +398,7 @@ REGISTER_CALCULATOR(ImageTransformationCalculator);
input.format() == GpuBufferFormat::kBiPlanar420YpCbCr8FullRange) {
if (!yuv_renderer_) {
yuv_renderer_ = absl::make_unique<QuadRenderer>();
RETURN_IF_ERROR(
MP_RETURN_IF_ERROR(
yuv_renderer_->GlSetup(::mediapipe::kYUV2TexToRGBFragmentShader,
{"video_frame_y", "video_frame_uv"}));
}
@ -412,7 +412,7 @@ REGISTER_CALCULATOR(ImageTransformationCalculator);
if (src1.target() == GL_TEXTURE_EXTERNAL_OES) {
if (!ext_rgb_renderer_) {
ext_rgb_renderer_ = absl::make_unique<QuadRenderer>();
RETURN_IF_ERROR(ext_rgb_renderer_->GlSetup(
MP_RETURN_IF_ERROR(ext_rgb_renderer_->GlSetup(
::mediapipe::kBasicTexturedFragmentShaderOES, {"video_frame"}));
}
renderer = ext_rgb_renderer_.get();
@ -421,7 +421,7 @@ REGISTER_CALCULATOR(ImageTransformationCalculator);
{
if (!rgb_renderer_) {
rgb_renderer_ = absl::make_unique<QuadRenderer>();
RETURN_IF_ERROR(rgb_renderer_->GlSetup());
MP_RETURN_IF_ERROR(rgb_renderer_->GlSetup());
}
renderer = rgb_renderer_.get();
}
@ -446,7 +446,7 @@ REGISTER_CALCULATOR(ImageTransformationCalculator);
glActiveTexture(GL_TEXTURE1);
glBindTexture(src1.target(), src1.name());
RETURN_IF_ERROR(renderer->GlRender(
MP_RETURN_IF_ERROR(renderer->GlRender(
src1.width(), src1.height(), dst.width(), dst.height(), scale_mode,
rotation, options_.flip_horizontally(), options_.flip_vertically(),
/*flip_texture=*/false));

View File

@ -74,7 +74,7 @@ REGISTER_CALCULATOR(MaskOverlayCalculator);
// static
::mediapipe::Status MaskOverlayCalculator::GetContract(CalculatorContract* cc) {
RETURN_IF_ERROR(GlCalculatorHelper::UpdateContract(cc));
MP_RETURN_IF_ERROR(GlCalculatorHelper::UpdateContract(cc));
cc->Inputs().Get("VIDEO", 0).Set<GpuBuffer>();
cc->Inputs().Get("VIDEO", 1).Set<GpuBuffer>();
if (cc->Inputs().HasTag("MASK"))
@ -103,7 +103,7 @@ REGISTER_CALCULATOR(MaskOverlayCalculator);
const auto& options = cc->Options<MaskOverlayCalculatorOptions>();
const auto mask_channel = options.mask_channel();
RETURN_IF_ERROR(GlSetup(mask_channel));
MP_RETURN_IF_ERROR(GlSetup(mask_channel));
initialized_ = true;
}
@ -147,7 +147,7 @@ REGISTER_CALCULATOR(MaskOverlayCalculator);
glActiveTexture(GL_TEXTURE3);
glBindTexture(mask_tex.target(), mask_tex.name());
RETURN_IF_ERROR(GlRender(mask_const));
MP_RETURN_IF_ERROR(GlRender(mask_const));
glActiveTexture(GL_TEXTURE3);
glBindTexture(mask_tex.target(), 0);
@ -155,7 +155,7 @@ REGISTER_CALCULATOR(MaskOverlayCalculator);
} else {
const float mask_const = mask_packet.Get<float>();
RETURN_IF_ERROR(GlRender(mask_const));
MP_RETURN_IF_ERROR(GlRender(mask_const));
}
glActiveTexture(GL_TEXTURE2);

View File

@ -30,7 +30,7 @@ namespace {
TEST(OpenCvEncodedImageToImageFrameCalculatorTest, TestRgbJpeg) {
std::string contents;
MEDIAPIPE_ASSERT_OK(file::GetContents(
MP_ASSERT_OK(file::GetContents(
file::JoinPath("./", "/mediapipe/calculators/image/testdata/dino.jpg"),
&contents));
Packet input_packet = MakePacket<std::string>(contents);
@ -44,7 +44,7 @@ TEST(OpenCvEncodedImageToImageFrameCalculatorTest, TestRgbJpeg) {
CalculatorRunner runner(node_config);
runner.MutableInputs()->Index(0).packets.push_back(
input_packet.At(Timestamp(0)));
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
const auto& outputs = runner.Outputs();
ASSERT_EQ(1, outputs.NumEntries());
const std::vector<Packet>& packets = outputs.Index(0).packets;
@ -87,7 +87,7 @@ TEST(OpenCvEncodedImageToImageFrameCalculatorTest, TestGrayscaleJpeg) {
CalculatorRunner runner(node_config);
runner.MutableInputs()->Index(0).packets.push_back(
input_packet.At(Timestamp(0)));
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
const auto& outputs = runner.Outputs();
ASSERT_EQ(1, outputs.NumEntries());
const std::vector<Packet>& packets = outputs.Index(0).packets;

View File

@ -55,7 +55,7 @@ TEST(OpenCvImageEncoderCalculatorTest, TestJpegWithQualities) {
CalculatorRunner runner(node_config);
runner.MutableInputs()->Index(0).packets.push_back(
input_packet.At(Timestamp(0)));
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
const auto& outputs = runner.Outputs();
ASSERT_EQ(1, outputs.NumEntries());
const std::vector<Packet>& packets = outputs.Index(0).packets;

View File

@ -135,7 +135,7 @@ REGISTER_CALCULATOR(RecolorCalculator);
}
#if defined(__ANDROID__) || (defined(__APPLE__) && !TARGET_OS_OSX)
RETURN_IF_ERROR(mediapipe::GlCalculatorHelper::UpdateContract(cc));
MP_RETURN_IF_ERROR(mediapipe::GlCalculatorHelper::UpdateContract(cc));
#endif // __ANDROID__ or iOS
return ::mediapipe::OkStatus();
@ -147,11 +147,11 @@ REGISTER_CALCULATOR(RecolorCalculator);
if (cc->Inputs().HasTag("IMAGE_GPU")) {
use_gpu_ = true;
#if defined(__ANDROID__) || (defined(__APPLE__) && !TARGET_OS_OSX)
RETURN_IF_ERROR(gpu_helper_.Open(cc));
MP_RETURN_IF_ERROR(gpu_helper_.Open(cc));
#endif // __ANDROID__ or iOS
}
RETURN_IF_ERROR(LoadOptions(cc));
MP_RETURN_IF_ERROR(LoadOptions(cc));
return ::mediapipe::OkStatus();
}
@ -159,18 +159,18 @@ REGISTER_CALCULATOR(RecolorCalculator);
::mediapipe::Status RecolorCalculator::Process(CalculatorContext* cc) {
if (use_gpu_) {
#if defined(__ANDROID__) || (defined(__APPLE__) && !TARGET_OS_OSX)
RETURN_IF_ERROR(
MP_RETURN_IF_ERROR(
gpu_helper_.RunInGlContext([this, &cc]() -> ::mediapipe::Status {
if (!initialized_) {
RETURN_IF_ERROR(InitGpu(cc));
MP_RETURN_IF_ERROR(InitGpu(cc));
initialized_ = true;
}
RETURN_IF_ERROR(RenderGpu(cc));
MP_RETURN_IF_ERROR(RenderGpu(cc));
return ::mediapipe::OkStatus();
}));
#endif // __ANDROID__ or iOS
} else {
RETURN_IF_ERROR(RenderCpu(cc));
MP_RETURN_IF_ERROR(RenderCpu(cc));
}
return ::mediapipe::OkStatus();
}

View File

@ -253,21 +253,21 @@ ScaleImageCalculator::~ScaleImageCalculator() {}
::mediapipe::Status ScaleImageCalculator::InitializeFrameInfo(
CalculatorContext* cc) {
RETURN_IF_ERROR(
MP_RETURN_IF_ERROR(
scale_image::FindCropDimensions(input_width_, input_height_, //
options_.min_aspect_ratio(), //
options_.max_aspect_ratio(), //
&crop_width_, &crop_height_, //
&col_start_, &row_start_));
RETURN_IF_ERROR(
MP_RETURN_IF_ERROR(
scale_image::FindOutputDimensions(crop_width_, crop_height_, //
options_.target_width(), //
options_.target_height(), //
options_.preserve_aspect_ratio(), //
options_.scale_to_multiple_of_two(), //
&output_width_, &output_height_));
RETURN_IF_ERROR(FindInterpolationAlgorithm(options_.algorithm(),
&interpolation_algorithm_));
MP_RETURN_IF_ERROR(FindInterpolationAlgorithm(options_.algorithm(),
&interpolation_algorithm_));
if (interpolation_algorithm_ == -1 &&
(output_width_ > crop_width_ || output_height_ > crop_height_)) {
output_width_ = crop_width_;
@ -327,7 +327,7 @@ ScaleImageCalculator::~ScaleImageCalculator() {}
bool has_override_options = cc->Inputs().HasTag("OVERRIDE_OPTIONS");
if (!has_override_options) {
RETURN_IF_ERROR(InitializeFromOptions());
MP_RETURN_IF_ERROR(InitializeFromOptions());
}
if (!cc->Inputs().Get(input_data_id_).Header().IsEmpty()) {
@ -377,8 +377,8 @@ ScaleImageCalculator::~ScaleImageCalculator() {}
if (input_width_ > 0 && input_height_ > 0 &&
input_format_ != ImageFormat::UNKNOWN &&
output_format_ != ImageFormat::UNKNOWN) {
RETURN_IF_ERROR(ValidateImageFormats());
RETURN_IF_ERROR(InitializeFrameInfo(cc));
MP_RETURN_IF_ERROR(ValidateImageFormats());
MP_RETURN_IF_ERROR(InitializeFrameInfo(cc));
std::unique_ptr<VideoHeader> output_header(new VideoHeader());
*output_header = input_video_header_;
output_header->format = output_format_;
@ -461,9 +461,9 @@ ScaleImageCalculator::~ScaleImageCalculator() {}
} else {
output_format_ = input_format_;
}
RETURN_IF_ERROR(InitializeFrameInfo(cc));
MP_RETURN_IF_ERROR(InitializeFrameInfo(cc));
}
RETURN_IF_ERROR(ValidateImageFormats());
MP_RETURN_IF_ERROR(ValidateImageFormats());
} else {
if (input_width_ != image_frame.Width() ||
input_height_ != image_frame.Height()) {
@ -503,9 +503,9 @@ ScaleImageCalculator::~ScaleImageCalculator() {}
} else {
output_format_ = input_format_;
}
RETURN_IF_ERROR(InitializeFrameInfo(cc));
MP_RETURN_IF_ERROR(InitializeFrameInfo(cc));
}
RETURN_IF_ERROR(ValidateImageFormats());
MP_RETURN_IF_ERROR(ValidateImageFormats());
} else {
if (input_width_ != yuv_image.width() ||
input_height_ != yuv_image.height()) {
@ -531,7 +531,7 @@ ScaleImageCalculator::~ScaleImageCalculator() {}
options_.MergeFrom(cc->Inputs()
.Tag("OVERRIDE_OPTIONS")
.Get<ScaleImageCalculatorOptions>());
RETURN_IF_ERROR(InitializeFromOptions());
MP_RETURN_IF_ERROR(InitializeFromOptions());
}
if (cc->Inputs().UsesTags() && cc->Inputs().HasTag("VIDEO_HEADER") &&
!cc->Inputs().Tag("VIDEO_HEADER").IsEmpty()) {
@ -548,7 +548,7 @@ ScaleImageCalculator::~ScaleImageCalculator() {}
if (input_format_ == ImageFormat::YCBCR420P) {
const YUVImage* yuv_image =
&cc->Inputs().Get(input_data_id_).Get<YUVImage>();
RETURN_IF_ERROR(ValidateYUVImage(cc, *yuv_image));
MP_RETURN_IF_ERROR(ValidateYUVImage(cc, *yuv_image));
if (output_format_ == ImageFormat::SRGB) {
// TODO: For ease of implementation, YUVImage is converted to
@ -596,7 +596,7 @@ ScaleImageCalculator::~ScaleImageCalculator() {}
}
} else {
image_frame = &cc->Inputs().Get(input_data_id_).Get<ImageFrame>();
RETURN_IF_ERROR(ValidateImageFrame(cc, *image_frame));
MP_RETURN_IF_ERROR(ValidateImageFrame(cc, *image_frame));
}
std::unique_ptr<ImageFrame> cropped_image;

View File

@ -28,8 +28,8 @@ TEST(ScaleImageUtilsTest, FindCropDimensions) {
int col_start;
int row_start;
// No cropping because aspect ratios should be ignored.
MEDIAPIPE_ASSERT_OK(FindCropDimensions(50, 100, "0/1", "1/0", &crop_width,
&crop_height, &col_start, &row_start));
MP_ASSERT_OK(FindCropDimensions(50, 100, "0/1", "1/0", &crop_width,
&crop_height, &col_start, &row_start));
EXPECT_EQ(50, crop_width);
EXPECT_EQ(100, crop_height);
EXPECT_EQ(0, row_start);
@ -37,39 +37,38 @@ TEST(ScaleImageUtilsTest, FindCropDimensions) {
// Tests proto examples.
// 16:9 aspect ratio, should be unchanged.
MEDIAPIPE_ASSERT_OK(FindCropDimensions(1920, 1080, "9/16", "16/9",
&crop_width, &crop_height, &col_start,
&row_start));
MP_ASSERT_OK(FindCropDimensions(1920, 1080, "9/16", "16/9", &crop_width,
&crop_height, &col_start, &row_start));
EXPECT_EQ(0, col_start);
EXPECT_EQ(1920, crop_width);
EXPECT_EQ(0, row_start);
EXPECT_EQ(1080, crop_height);
// 10:16 aspect ratio, should be unchanged.
MEDIAPIPE_ASSERT_OK(FindCropDimensions(640, 1024, "9/16", "16/9", &crop_width,
&crop_height, &col_start, &row_start));
MP_ASSERT_OK(FindCropDimensions(640, 1024, "9/16", "16/9", &crop_width,
&crop_height, &col_start, &row_start));
EXPECT_EQ(0, col_start);
EXPECT_EQ(640, crop_width);
EXPECT_EQ(0, row_start);
EXPECT_EQ(1024, crop_height);
// 2:1 aspect ratio, width is cropped.
MEDIAPIPE_ASSERT_OK(FindCropDimensions(640, 320, "9/16", "16/9", &crop_width,
&crop_height, &col_start, &row_start));
MP_ASSERT_OK(FindCropDimensions(640, 320, "9/16", "16/9", &crop_width,
&crop_height, &col_start, &row_start));
EXPECT_EQ(36, col_start);
EXPECT_EQ(568, crop_width);
EXPECT_EQ(0, row_start);
EXPECT_EQ(320, crop_height);
// 1:5 aspect ratio, height is cropped.
MEDIAPIPE_ASSERT_OK(FindCropDimensions(96, 480, "9/16", "16/9", &crop_width,
&crop_height, &col_start, &row_start));
MP_ASSERT_OK(FindCropDimensions(96, 480, "9/16", "16/9", &crop_width,
&crop_height, &col_start, &row_start));
EXPECT_EQ(0, col_start);
EXPECT_EQ(96, crop_width);
EXPECT_EQ(155, row_start);
EXPECT_EQ(170, crop_height);
// Tests min = max, crops width.
MEDIAPIPE_ASSERT_OK(FindCropDimensions(200, 100, "1/1", "1/1", &crop_width,
&crop_height, &col_start, &row_start));
MP_ASSERT_OK(FindCropDimensions(200, 100, "1/1", "1/1", &crop_width,
&crop_height, &col_start, &row_start));
EXPECT_EQ(50, col_start);
EXPECT_EQ(100, crop_width);
EXPECT_EQ(0, row_start);
@ -80,49 +79,49 @@ TEST(ScaleImageUtilsTest, FindOutputDimensionsPreserveRatio) {
int output_width;
int output_height;
// Not scale.
MEDIAPIPE_ASSERT_OK(FindOutputDimensions(200, 100, -1, -1, true, true,
&output_width, &output_height));
MP_ASSERT_OK(FindOutputDimensions(200, 100, -1, -1, true, true, &output_width,
&output_height));
EXPECT_EQ(200, output_width);
EXPECT_EQ(100, output_height);
// Not scale with odd input size.
MEDIAPIPE_ASSERT_OK(FindOutputDimensions(201, 101, -1, -1, false, false,
&output_width, &output_height));
MP_ASSERT_OK(FindOutputDimensions(201, 101, -1, -1, false, false,
&output_width, &output_height));
EXPECT_EQ(201, output_width);
EXPECT_EQ(101, output_height);
// Scale down by 1/2.
MEDIAPIPE_ASSERT_OK(FindOutputDimensions(200, 100, 100, -1, true, true,
&output_width, &output_height));
MP_ASSERT_OK(FindOutputDimensions(200, 100, 100, -1, true, true,
&output_width, &output_height));
EXPECT_EQ(100, output_width);
EXPECT_EQ(50, output_height);
// Scale up, doubling dimensions.
MEDIAPIPE_ASSERT_OK(FindOutputDimensions(200, 100, -1, 200, true, true,
&output_width, &output_height));
MP_ASSERT_OK(FindOutputDimensions(200, 100, -1, 200, true, true,
&output_width, &output_height));
EXPECT_EQ(400, output_width);
EXPECT_EQ(200, output_height);
// Fits a 2:1 image into a 150 x 150 box. Output dimensions are always
// visible by 2.
MEDIAPIPE_ASSERT_OK(FindOutputDimensions(200, 100, 150, 150, true, true,
&output_width, &output_height));
MP_ASSERT_OK(FindOutputDimensions(200, 100, 150, 150, true, true,
&output_width, &output_height));
EXPECT_EQ(150, output_width);
EXPECT_EQ(74, output_height);
// Fits a 2:1 image into a 400 x 50 box.
MEDIAPIPE_ASSERT_OK(FindOutputDimensions(200, 100, 400, 50, true, true,
&output_width, &output_height));
MP_ASSERT_OK(FindOutputDimensions(200, 100, 400, 50, true, true,
&output_width, &output_height));
EXPECT_EQ(100, output_width);
EXPECT_EQ(50, output_height);
// Scale to multiple number with odd targe size.
MEDIAPIPE_ASSERT_OK(FindOutputDimensions(200, 100, 101, -1, true, true,
&output_width, &output_height));
MP_ASSERT_OK(FindOutputDimensions(200, 100, 101, -1, true, true,
&output_width, &output_height));
EXPECT_EQ(100, output_width);
EXPECT_EQ(50, output_height);
// Scale to multiple number with odd targe size.
MEDIAPIPE_ASSERT_OK(FindOutputDimensions(200, 100, 101, -1, true, false,
&output_width, &output_height));
MP_ASSERT_OK(FindOutputDimensions(200, 100, 101, -1, true, false,
&output_width, &output_height));
EXPECT_EQ(100, output_width);
EXPECT_EQ(50, output_height);
// Scale to odd size.
MEDIAPIPE_ASSERT_OK(FindOutputDimensions(200, 100, 151, 101, false, false,
&output_width, &output_height));
MP_ASSERT_OK(FindOutputDimensions(200, 100, 151, 101, false, false,
&output_width, &output_height));
EXPECT_EQ(151, output_width);
EXPECT_EQ(101, output_height);
}
@ -132,18 +131,18 @@ TEST(ScaleImageUtilsTest, FindOutputDimensionsNoAspectRatio) {
int output_width;
int output_height;
// Scale width only.
MEDIAPIPE_ASSERT_OK(FindOutputDimensions(200, 100, 100, -1, false, true,
&output_width, &output_height));
MP_ASSERT_OK(FindOutputDimensions(200, 100, 100, -1, false, true,
&output_width, &output_height));
EXPECT_EQ(100, output_width);
EXPECT_EQ(100, output_height);
// Scale height only.
MEDIAPIPE_ASSERT_OK(FindOutputDimensions(200, 100, -1, 200, false, true,
&output_width, &output_height));
MP_ASSERT_OK(FindOutputDimensions(200, 100, -1, 200, false, true,
&output_width, &output_height));
EXPECT_EQ(200, output_width);
EXPECT_EQ(200, output_height);
// Scale both dimensions.
MEDIAPIPE_ASSERT_OK(FindOutputDimensions(200, 100, 150, 200, false, true,
&output_width, &output_height));
MP_ASSERT_OK(FindOutputDimensions(200, 100, 150, 200, false, true,
&output_width, &output_height));
EXPECT_EQ(150, output_width);
EXPECT_EQ(200, output_height);
}

View File

@ -157,7 +157,7 @@ REGISTER_CALCULATOR(SetAlphaCalculator);
}
#if defined(__ANDROID__) || (defined(__APPLE__) && !TARGET_OS_OSX)
RETURN_IF_ERROR(mediapipe::GlCalculatorHelper::UpdateContract(cc));
MP_RETURN_IF_ERROR(mediapipe::GlCalculatorHelper::UpdateContract(cc));
#endif // __ANDROID__ or iOS
return ::mediapipe::OkStatus();
@ -188,7 +188,7 @@ REGISTER_CALCULATOR(SetAlphaCalculator);
if (use_gpu_) {
#if defined(__ANDROID__) || (defined(__APPLE__) && !TARGET_OS_OSX)
RETURN_IF_ERROR(gpu_helper_.Open(cc));
MP_RETURN_IF_ERROR(gpu_helper_.Open(cc));
#endif
}
@ -198,18 +198,18 @@ REGISTER_CALCULATOR(SetAlphaCalculator);
::mediapipe::Status SetAlphaCalculator::Process(CalculatorContext* cc) {
if (use_gpu_) {
#if defined(__ANDROID__) || (defined(__APPLE__) && !TARGET_OS_OSX)
RETURN_IF_ERROR(
MP_RETURN_IF_ERROR(
gpu_helper_.RunInGlContext([this, cc]() -> ::mediapipe::Status {
if (!gpu_initialized_) {
RETURN_IF_ERROR(GlSetup(cc));
MP_RETURN_IF_ERROR(GlSetup(cc));
gpu_initialized_ = true;
}
RETURN_IF_ERROR(RenderGpu(cc));
MP_RETURN_IF_ERROR(RenderGpu(cc));
return ::mediapipe::OkStatus();
}));
#endif // __ANDROID__ or iOS
} else {
RETURN_IF_ERROR(RenderCpu(cc));
MP_RETURN_IF_ERROR(RenderCpu(cc));
}
return ::mediapipe::OkStatus();

View File

@ -29,7 +29,7 @@ mediapipe_cc_proto_library(
name = "callback_packet_calculator_cc_proto",
srcs = ["callback_packet_calculator.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = ["//mediapipe/framework:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":callback_packet_calculator_proto"],
)

View File

@ -22,7 +22,7 @@ load("//mediapipe/framework/port:build_config.bzl", "mediapipe_cc_proto_library"
proto_library(
name = "graph_tensors_packet_generator_proto",
srcs = ["graph_tensors_packet_generator.proto"],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_proto",
"//mediapipe/framework:packet_generator_proto",
@ -118,7 +118,7 @@ mediapipe_cc_proto_library(
"//mediapipe/framework:calculator_cc_proto",
"//mediapipe/framework:packet_generator_cc_proto",
],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":graph_tensors_packet_generator_proto"],
)
@ -129,7 +129,7 @@ mediapipe_cc_proto_library(
"//mediapipe/framework:calculator_cc_proto",
"@org_tensorflow//tensorflow/core:protos_all_cc",
],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":image_frame_to_tensor_calculator_proto"],
)
@ -137,7 +137,7 @@ mediapipe_cc_proto_library(
name = "matrix_to_tensor_calculator_options_cc_proto",
srcs = ["matrix_to_tensor_calculator_options.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":matrix_to_tensor_calculator_options_proto"],
)
@ -145,7 +145,7 @@ mediapipe_cc_proto_library(
name = "lapped_tensor_buffer_calculator_cc_proto",
srcs = ["lapped_tensor_buffer_calculator.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":lapped_tensor_buffer_calculator_proto"],
)
@ -153,7 +153,7 @@ mediapipe_cc_proto_library(
name = "object_detection_tensors_to_detections_calculator_cc_proto",
srcs = ["object_detection_tensors_to_detections_calculator.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":object_detection_tensors_to_detections_calculator_proto"],
)
@ -164,7 +164,7 @@ mediapipe_cc_proto_library(
"//mediapipe/framework:calculator_cc_proto",
"@org_tensorflow//tensorflow/core:protos_all_cc",
],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":pack_media_sequence_calculator_proto"],
)
@ -172,7 +172,7 @@ mediapipe_cc_proto_library(
name = "tensorflow_inference_calculator_cc_proto",
srcs = ["tensorflow_inference_calculator.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":tensorflow_inference_calculator_proto"],
)
@ -183,7 +183,7 @@ mediapipe_cc_proto_library(
"//mediapipe/framework:packet_generator_cc_proto",
"@org_tensorflow//tensorflow/core:protos_all_cc",
],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":tensorflow_session_from_frozen_graph_generator_proto"],
)
@ -194,7 +194,7 @@ mediapipe_cc_proto_library(
"//mediapipe/framework:calculator_cc_proto",
"@org_tensorflow//tensorflow/core:protos_all_cc",
],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":tensorflow_session_from_frozen_graph_calculator_proto"],
)
@ -202,7 +202,7 @@ mediapipe_cc_proto_library(
name = "tensorflow_session_from_saved_model_generator_cc_proto",
srcs = ["tensorflow_session_from_saved_model_generator.proto"],
cc_deps = ["//mediapipe/framework:packet_generator_cc_proto"],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":tensorflow_session_from_saved_model_generator_proto"],
)
@ -210,7 +210,7 @@ mediapipe_cc_proto_library(
name = "tensorflow_session_from_saved_model_calculator_cc_proto",
srcs = ["tensorflow_session_from_saved_model_calculator.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":tensorflow_session_from_saved_model_calculator_proto"],
)
@ -218,7 +218,7 @@ mediapipe_cc_proto_library(
name = "tensor_squeeze_dimensions_calculator_cc_proto",
srcs = ["tensor_squeeze_dimensions_calculator.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":tensor_squeeze_dimensions_calculator_proto"],
)
@ -226,7 +226,7 @@ mediapipe_cc_proto_library(
name = "tensor_to_image_frame_calculator_cc_proto",
srcs = ["tensor_to_image_frame_calculator.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":tensor_to_image_frame_calculator_proto"],
)
@ -237,7 +237,7 @@ mediapipe_cc_proto_library(
"//mediapipe/framework:calculator_cc_proto",
"//mediapipe/framework/formats:time_series_header_cc_proto",
],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":tensor_to_matrix_calculator_proto"],
)
@ -245,7 +245,7 @@ mediapipe_cc_proto_library(
name = "tensor_to_vector_float_calculator_options_cc_proto",
srcs = ["tensor_to_vector_float_calculator_options.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":tensor_to_vector_float_calculator_options_proto"],
)
@ -256,7 +256,7 @@ mediapipe_cc_proto_library(
"//mediapipe/calculators/core:packet_resampler_calculator_cc_proto",
"//mediapipe/framework:calculator_cc_proto",
],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":unpack_media_sequence_calculator_proto"],
)
@ -264,7 +264,7 @@ mediapipe_cc_proto_library(
name = "vector_float_to_tensor_calculator_options_cc_proto",
srcs = ["vector_float_to_tensor_calculator_options.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":vector_float_to_tensor_calculator_options_proto"],
)

View File

@ -74,7 +74,7 @@ TEST_F(GraphTensorsPacketGeneratorTest, VerifyTensorSizeShapeAndValue) {
::mediapipe::Status run_status = tool::RunGenerateAndValidateTypes(
"GraphTensorsPacketGenerator", extendable_options_, inputs, &outputs);
MEDIAPIPE_EXPECT_OK(run_status) << run_status.message();
MP_EXPECT_OK(run_status) << run_status.message();
VerifyTensorMap(&outputs);
}

View File

@ -171,7 +171,7 @@ TEST_F(ImageFrameToTensorCalculatorTest, SolidRedRGBFrame) {
runner_ = ::absl::make_unique<CalculatorRunner>(
"ImageFrameToTensorCalculator", "", 1, 1, 0);
AddRGBFrame(width, height);
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
const std::vector<Packet>& output_packets =
runner_->Outputs().Index(0).packets;
ASSERT_EQ(1, output_packets.size());
@ -212,7 +212,7 @@ TEST_F(ImageFrameToTensorCalculatorTest, SolidRedRGBAFrame) {
runner_.reset(
new CalculatorRunner("ImageFrameToTensorCalculator", "", 1, 1, 0));
AddRGBAFrame(width, height);
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
const std::vector<Packet>& output_packets =
runner_->Outputs().Index(0).packets;
ASSERT_EQ(1, output_packets.size());
@ -254,7 +254,7 @@ TEST_F(ImageFrameToTensorCalculatorTest, SolidGray8Frame) {
runner_.reset(
new CalculatorRunner("ImageFrameToTensorCalculator", "", 1, 1, 0));
AddGray8Frame(width, height);
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
const std::vector<Packet>& output_packets =
runner_->Outputs().Index(0).packets;
ASSERT_EQ(1, output_packets.size());
@ -293,7 +293,7 @@ TEST_F(ImageFrameToTensorCalculatorTest, SolidGray16Frame) {
runner_.reset(
new CalculatorRunner("ImageFrameToTensorCalculator", "", 1, 1, 0));
AddGray16Frame(width, height);
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
const std::vector<Packet>& output_packets =
runner_->Outputs().Index(0).packets;
ASSERT_EQ(1, output_packets.size());
@ -332,7 +332,7 @@ TEST_F(ImageFrameToTensorCalculatorTest, SolidFloatFrame) {
runner_.reset(
new CalculatorRunner("ImageFrameToTensorCalculator", "", 1, 1, 0));
AddFloatFrame(width, height);
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
const std::vector<Packet>& output_packets =
runner_->Outputs().Index(0).packets;
ASSERT_EQ(1, output_packets.size());
@ -363,7 +363,7 @@ TEST_F(ImageFrameToTensorCalculatorTest, FixedNoiseRGBFrame) {
runner_.reset(
new CalculatorRunner("ImageFrameToTensorCalculator", "", 1, 1, 0));
AddFixedNoiseRGBFrame();
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
const std::vector<Packet>& output_packets =
runner_->Outputs().Index(0).packets;
ASSERT_EQ(1, output_packets.size());
@ -396,7 +396,7 @@ TEST_F(ImageFrameToTensorCalculatorTest, RandomRGBFrame) {
runner_.reset(
new CalculatorRunner("ImageFrameToTensorCalculator", "", 1, 1, 0));
AddRandomRGBFrame(width, height, seed);
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
const std::vector<Packet>& output_packets =
runner_->Outputs().Index(0).packets;
ASSERT_EQ(1, output_packets.size());
@ -440,7 +440,7 @@ TEST_F(ImageFrameToTensorCalculatorTest, FixedRGBFrameWithMeanAndStddev) {
runner_->MutableInputs()->Index(0).packets.push_back(
Adopt(image_frame.release()).At(Timestamp(0)));
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
const auto& tensor = runner_->Outputs().Index(0).packets[0].Get<tf::Tensor>();
EXPECT_EQ(tensor.dtype(), tf::DT_FLOAT);

View File

@ -74,7 +74,7 @@ TEST_F(MatrixToTensorCalculatorTest, RandomMatrix) {
runner_ = ::absl::make_unique<CalculatorRunner>("MatrixToTensorCalculator",
"", 1, 1, 0);
AddRandomMatrix(num_rows, num_columns, kSeed);
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
const std::vector<Packet>& output_packets =
runner_->Outputs().Index(0).packets;
ASSERT_EQ(1, output_packets.size());
@ -106,7 +106,7 @@ TEST_F(MatrixToTensorCalculatorTest, RandomMatrixTranspose) {
runner_ = ::absl::make_unique<CalculatorRunner>(
"MatrixToTensorCalculator", kTransposeOptionsString, 1, 1, 0);
AddRandomMatrix(num_rows, num_columns, kSeed);
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
const std::vector<Packet>& output_packets =
runner_->Outputs().Index(0).packets;
ASSERT_EQ(1, output_packets.size());
@ -138,7 +138,7 @@ TEST_F(MatrixToTensorCalculatorTest, RandomMatrixAddDimension) {
runner_ = ::absl::make_unique<CalculatorRunner>(
"MatrixToTensorCalculator", kAddDimensionOptionsString, 1, 1, 0);
AddRandomMatrix(num_rows, num_columns, kSeed);
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
const std::vector<Packet>& output_packets =
runner_->Outputs().Index(0).packets;
ASSERT_EQ(1, output_packets.size());

View File

@ -134,7 +134,7 @@ class ObjectDetectionTensorsToDetectionsCalculatorTest
runner_->MutableInputs()->Tag(kClasses).packets.push_back(
PointToForeign(&input_classes_).At(Timestamp::PostStream()));
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
ASSERT_EQ(1, runner_->Outputs().Tag(kDetections).packets.size());
}
@ -146,7 +146,7 @@ class ObjectDetectionTensorsToDetectionsCalculatorTest
PointToForeign(&input_scores_for_all_classes_)
.At(Timestamp::PostStream()));
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
ASSERT_EQ(1, runner_->Outputs().Tag(kDetections).packets.size());
}
@ -167,7 +167,7 @@ class ObjectDetectionTensorsToDetectionsCalculatorTest
.packets.push_back(
PointToForeign(&input_keypoints_).At(Timestamp::PostStream()));
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
ASSERT_EQ(1, runner_->Outputs().Tag(kDetections).packets.size());
}
@ -201,7 +201,7 @@ class ObjectDetectionTensorsToDetectionsCalculatorTest
runner_->MutableInputs()->Tag(kClasses).packets.push_back(
PointToForeign(&input_classes_).At(Timestamp::PostStream()));
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
ASSERT_EQ(1, runner_->Outputs().Tag(kDetections).packets.size());
}

View File

@ -87,7 +87,7 @@ TEST_F(PackMediaSequenceCalculatorTest, PacksTwoImages) {
runner_->MutableSidePackets()->Tag("SEQUENCE_EXAMPLE") =
Adopt(input_sequence.release());
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
const std::vector<Packet>& output_packets =
runner_->Outputs().Tag("SEQUENCE_EXAMPLE").packets;
@ -131,7 +131,7 @@ TEST_F(PackMediaSequenceCalculatorTest, PacksTwoPrefixedImages) {
runner_->MutableSidePackets()->Tag("SEQUENCE_EXAMPLE") =
Adopt(input_sequence.release());
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
const std::vector<Packet>& output_packets =
runner_->Outputs().Tag("SEQUENCE_EXAMPLE").packets;
@ -169,7 +169,7 @@ TEST_F(PackMediaSequenceCalculatorTest, PacksTwoFloatLists) {
runner_->MutableSidePackets()->Tag("SEQUENCE_EXAMPLE") =
Adopt(input_sequence.release());
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
const std::vector<Packet>& output_packets =
runner_->Outputs().Tag("SEQUENCE_EXAMPLE").packets;
@ -214,7 +214,7 @@ TEST_F(PackMediaSequenceCalculatorTest, PacksAdditionalContext) {
runner_->MutableInputs()->Tag("IMAGE").packets.push_back(
Adopt(image_ptr.release()).At(Timestamp(0)));
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
const std::vector<Packet>& output_packets =
runner_->Outputs().Tag("SEQUENCE_EXAMPLE").packets;
@ -257,7 +257,7 @@ TEST_F(PackMediaSequenceCalculatorTest, PacksTwoForwardFlowEncodeds) {
runner_->MutableSidePackets()->Tag("SEQUENCE_EXAMPLE") =
Adopt(input_sequence.release());
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
const std::vector<Packet>& output_packets =
runner_->Outputs().Tag("SEQUENCE_EXAMPLE").packets;
@ -321,7 +321,7 @@ TEST_F(PackMediaSequenceCalculatorTest, PacksTwoBBoxDetections) {
runner_->MutableSidePackets()->Tag("SEQUENCE_EXAMPLE") =
Adopt(input_sequence.release());
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
const std::vector<Packet>& output_packets =
runner_->Outputs().Tag("SEQUENCE_EXAMPLE").packets;
@ -374,7 +374,7 @@ TEST_F(PackMediaSequenceCalculatorTest, PacksTwoKeypoints) {
runner_->MutableSidePackets()->Tag("SEQUENCE_EXAMPLE") =
Adopt(input_sequence.release());
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
const std::vector<Packet>& output_packets =
runner_->Outputs().Tag("SEQUENCE_EXAMPLE").packets;
@ -424,7 +424,7 @@ TEST_F(PackMediaSequenceCalculatorTest, PacksTwoMaskDetections) {
runner_->MutableSidePackets()->Tag("SEQUENCE_EXAMPLE") =
Adopt(input_sequence.release());
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
const std::vector<Packet>& output_packets =
runner_->Outputs().Tag("SEQUENCE_EXAMPLE").packets;
@ -473,7 +473,7 @@ TEST_F(PackMediaSequenceCalculatorTest, MissingStreamOK) {
runner_->MutableSidePackets()->Tag("SEQUENCE_EXAMPLE") =
Adopt(input_sequence.release());
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
const std::vector<Packet>& output_packets =
runner_->Outputs().Tag("SEQUENCE_EXAMPLE").packets;
@ -536,7 +536,7 @@ TEST_F(PackMediaSequenceCalculatorTest, TestReplacingImages) {
runner_->MutableSidePackets()->Tag("SEQUENCE_EXAMPLE") =
Adopt(input_sequence.release());
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
const std::vector<Packet>& output_packets =
runner_->Outputs().Tag("SEQUENCE_EXAMPLE").packets;
@ -562,7 +562,7 @@ TEST_F(PackMediaSequenceCalculatorTest, TestReplacingFlowImages) {
runner_->MutableSidePackets()->Tag("SEQUENCE_EXAMPLE") =
Adopt(input_sequence.release());
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
const std::vector<Packet>& output_packets =
runner_->Outputs().Tag("SEQUENCE_EXAMPLE").packets;
@ -599,7 +599,7 @@ TEST_F(PackMediaSequenceCalculatorTest, TestReplacingFloatVectors) {
runner_->MutableSidePackets()->Tag("SEQUENCE_EXAMPLE") =
Adopt(input_sequence.release());
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
const std::vector<Packet>& output_packets =
runner_->Outputs().Tag("SEQUENCE_EXAMPLE").packets;
@ -643,7 +643,7 @@ TEST_F(PackMediaSequenceCalculatorTest, TestReconcilingAnnotations) {
runner_->MutableSidePackets()->Tag("SEQUENCE_EXAMPLE") =
Adopt(input_sequence.release());
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
const std::vector<Packet>& output_packets =
runner_->Outputs().Tag("SEQUENCE_EXAMPLE").packets;
ASSERT_EQ(1, output_packets.size());

View File

@ -365,14 +365,14 @@ class TensorFlowInferenceCalculator : public CalculatorBase {
}
if (batch_timestamps_.size() == options_.batch_size()) {
RETURN_IF_ERROR(OutputBatch(cc));
MP_RETURN_IF_ERROR(OutputBatch(cc));
}
return ::mediapipe::OkStatus();
}
::mediapipe::Status Close(CalculatorContext* cc) override {
if (!batch_timestamps_.empty()) {
RETURN_IF_ERROR(OutputBatch(cc));
MP_RETURN_IF_ERROR(OutputBatch(cc));
}
return ::mediapipe::OkStatus();
}

View File

@ -122,7 +122,7 @@ TEST_F(TensorflowInferenceCalculatorTest, GetConstants) {
runner_ = absl::make_unique<CalculatorRunner>(config);
AddSessionInputSidePacket();
AddVectorToInputsAsTensor({0, 0, 0}, "A", 0);
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
const std::vector<Packet>& output_packets_b =
runner_->Outputs().Tag("B").packets;
@ -163,7 +163,7 @@ TEST_F(TensorflowInferenceCalculatorTest, GetComputed) {
AddSessionInputSidePacket();
AddVectorToInputsAsTensor({2, 2, 2}, "A", 0);
AddVectorToInputsAsTensor({3, 4, 5}, "B", 0);
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
const std::vector<Packet>& output_packets_mult =
runner_->Outputs().Tag("MULTIPLIED").packets;
@ -217,7 +217,7 @@ TEST_F(TensorflowInferenceCalculatorTest, GetMultiBatchComputed) {
AddVectorToInputsAsTensor({3, 4, 5}, "B", 0);
AddVectorToInputsAsTensor({3, 3, 3}, "A", 1);
AddVectorToInputsAsTensor({3, 4, 5}, "B", 1);
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
const std::vector<Packet>& output_packets_mult =
runner_->Outputs().Tag("MULTIPLIED").packets;
@ -255,7 +255,7 @@ TEST_F(TensorflowInferenceCalculatorTest, GetSingleBatchComputed) {
AddVectorToInputsAsTensor({3, 4, 5}, "B", 0);
AddVectorToInputsAsTensor({3, 3, 3}, "A", 1);
AddVectorToInputsAsTensor({3, 4, 5}, "B", 1);
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
const std::vector<Packet>& output_packets_mult =
runner_->Outputs().Tag("MULTIPLIED").packets;
@ -293,7 +293,7 @@ TEST_F(TensorflowInferenceCalculatorTest, GetCloseBatchComputed) {
AddVectorToInputsAsTensor({3, 4, 5}, "B", 0);
AddVectorToInputsAsTensor({3, 3, 3}, "A", 1);
AddVectorToInputsAsTensor({3, 4, 5}, "B", 1);
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
const std::vector<Packet>& output_packets_mult =
runner_->Outputs().Tag("MULTIPLIED").packets;
@ -331,7 +331,7 @@ TEST_F(TensorflowInferenceCalculatorTest, TestRecurrentStates) {
AddSessionInputSidePacket();
AddVectorToInputsAsTensor({3, 4, 5}, "B", 0);
AddVectorToInputsAsTensor({3, 4, 5}, "B", 1);
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
const std::vector<Packet>& output_packets_mult =
runner_->Outputs().Tag("MULTIPLIED").packets;
@ -372,7 +372,7 @@ TEST_F(TensorflowInferenceCalculatorTest, TestRecurrentStateOverride) {
AddVectorToInputsAsTensor({3, 4, 5}, "B", 0);
AddVectorToInputsAsTensor({1, 1, 1}, "A", 1);
AddVectorToInputsAsTensor({3, 4, 5}, "B", 1);
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
const std::vector<Packet>& output_packets_mult =
runner_->Outputs().Tag("MULTIPLIED").packets;
@ -409,7 +409,7 @@ TEST_F(TensorflowInferenceCalculatorTest, DISABLED_CheckTiming) {
runner_ = absl::make_unique<CalculatorRunner>(config);
AddSessionInputSidePacket();
AddVectorToInputsAsTensor({0, 0, 0}, "A", 0);
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
EXPECT_EQ(1, runner_
->GetCounter(
@ -465,7 +465,7 @@ TEST_F(TensorflowInferenceCalculatorTest, MissingInputFeature_Skip) {
runner_ = absl::make_unique<CalculatorRunner>(config);
AddSessionInputSidePacket();
AddVectorToInputsAsTensor({2, 2, 2}, "A", 0);
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
const std::vector<Packet>& output_packets_mult =
runner_->Outputs().Tag("MULTIPLIED").packets;
@ -494,7 +494,7 @@ TEST_F(TensorflowInferenceCalculatorTest,
AddVectorToInputsAsTensor({2, 2, 2}, "A", 0);
AddVectorToInputsAsTensor({3, 3, 3}, "A", 1);
AddVectorToInputsAsTensor({3, 4, 5}, "B", 1);
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
const std::vector<Packet>& output_packets_mult =
runner_->Outputs().Tag("MULTIPLIED").packets;

View File

@ -108,7 +108,7 @@ TEST_F(TensorFlowSessionFromFrozenGraphCalculatorTest,
})",
calculator_options_->DebugString()));
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
const TensorFlowSession& session =
runner.OutputSidePackets().Tag("SESSION").Get<TensorFlowSession>();
VerifySignatureMap(session);
@ -148,17 +148,17 @@ TEST_F(TensorFlowSessionFromFrozenGraphCalculatorTest,
calculator_options_->DebugString()));
CalculatorGraph graph;
MEDIAPIPE_ASSERT_OK(graph.Initialize(config));
MP_ASSERT_OK(graph.Initialize(config));
StatusOrPoller status_or_poller =
graph.AddOutputStreamPoller("multiplied_tensor");
ASSERT_TRUE(status_or_poller.ok());
OutputStreamPoller poller = std::move(status_or_poller.ValueOrDie());
MEDIAPIPE_ASSERT_OK(graph.StartRun({}));
MEDIAPIPE_ASSERT_OK(graph.AddPacketToInputStream(
MP_ASSERT_OK(graph.StartRun({}));
MP_ASSERT_OK(graph.AddPacketToInputStream(
"a_tensor",
Adopt(new auto(TensorMatrix1x3(1, -1, 10))).At(Timestamp(0))));
MEDIAPIPE_ASSERT_OK(graph.CloseInputStream("a_tensor"));
MP_ASSERT_OK(graph.CloseInputStream("a_tensor"));
Packet packet;
ASSERT_TRUE(poller.Next(&packet));
@ -168,7 +168,7 @@ TEST_F(TensorFlowSessionFromFrozenGraphCalculatorTest,
packet.Get<tf::Tensor>().DebugString());
ASSERT_FALSE(poller.Next(&packet));
MEDIAPIPE_ASSERT_OK(graph.WaitUntilDone());
MP_ASSERT_OK(graph.WaitUntilDone());
}
TEST_F(TensorFlowSessionFromFrozenGraphCalculatorTest,
@ -186,11 +186,11 @@ TEST_F(TensorFlowSessionFromFrozenGraphCalculatorTest,
calculator_options_->DebugString()));
std::string serialized_graph_contents;
MEDIAPIPE_EXPECT_OK(mediapipe::file::GetContents(GetGraphDefPath(),
&serialized_graph_contents));
MP_EXPECT_OK(mediapipe::file::GetContents(GetGraphDefPath(),
&serialized_graph_contents));
runner.MutableSidePackets()->Tag("STRING_MODEL") =
Adopt(new std::string(serialized_graph_contents));
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
const TensorFlowSession& session =
runner.OutputSidePackets().Tag("SESSION").Get<TensorFlowSession>();
@ -213,7 +213,7 @@ TEST_F(
calculator_options_->DebugString()));
runner.MutableSidePackets()->Tag("STRING_MODEL_FILE_PATH") =
Adopt(new std::string(GetGraphDefPath()));
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
const TensorFlowSession& session =
runner.OutputSidePackets().Tag("SESSION").Get<TensorFlowSession>();
@ -256,8 +256,8 @@ TEST_F(TensorFlowSessionFromFrozenGraphCalculatorTest,
runner.MutableSidePackets()->Tag("STRING_MODEL_FILE_PATH") =
Adopt(new std::string(GetGraphDefPath()));
std::string serialized_graph_contents;
MEDIAPIPE_EXPECT_OK(mediapipe::file::GetContents(GetGraphDefPath(),
&serialized_graph_contents));
MP_EXPECT_OK(mediapipe::file::GetContents(GetGraphDefPath(),
&serialized_graph_contents));
runner.MutableSidePackets()->Tag("STRING_MODEL") =
Adopt(new std::string(serialized_graph_contents));
auto run_status = runner.Run();
@ -283,8 +283,8 @@ TEST_F(TensorFlowSessionFromFrozenGraphCalculatorTest,
runner.MutableSidePackets()->Tag("STRING_MODEL_FILE_PATH") =
Adopt(new std::string(GetGraphDefPath()));
std::string serialized_graph_contents;
MEDIAPIPE_EXPECT_OK(mediapipe::file::GetContents(GetGraphDefPath(),
&serialized_graph_contents));
MP_EXPECT_OK(mediapipe::file::GetContents(GetGraphDefPath(),
&serialized_graph_contents));
runner.MutableSidePackets()->Tag("STRING_MODEL") =
Adopt(new std::string(serialized_graph_contents));
auto run_status = runner.Run();
@ -305,7 +305,7 @@ TEST_F(TensorFlowSessionFromFrozenGraphCalculatorTest,
}
})",
calculator_options_->DebugString()));
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
const TensorFlowSession& session =
runner.OutputSidePackets().Tag("SESSION").Get<TensorFlowSession>();

View File

@ -106,7 +106,7 @@ TEST_F(TensorFlowSessionFromFrozenGraphGeneratorTest,
::mediapipe::Status run_status = tool::RunGenerateAndValidateTypes(
"TensorFlowSessionFromFrozenGraphGenerator", extendable_options_,
input_side_packets, &output_side_packets);
MEDIAPIPE_EXPECT_OK(run_status) << run_status.message();
MP_EXPECT_OK(run_status) << run_status.message();
VerifySignatureMap(&output_side_packets);
}
@ -144,17 +144,17 @@ TEST_F(TensorFlowSessionFromFrozenGraphGeneratorTest,
generator_options_->DebugString()));
CalculatorGraph graph;
MEDIAPIPE_ASSERT_OK(graph.Initialize(config));
MP_ASSERT_OK(graph.Initialize(config));
StatusOrPoller status_or_poller =
graph.AddOutputStreamPoller("multiplied_tensor");
ASSERT_TRUE(status_or_poller.ok());
OutputStreamPoller poller = std::move(status_or_poller.ValueOrDie());
MEDIAPIPE_ASSERT_OK(graph.StartRun({}));
MEDIAPIPE_ASSERT_OK(graph.AddPacketToInputStream(
MP_ASSERT_OK(graph.StartRun({}));
MP_ASSERT_OK(graph.AddPacketToInputStream(
"a_tensor",
Adopt(new auto(TensorMatrix1x3(1, -1, 10))).At(Timestamp(0))));
MEDIAPIPE_ASSERT_OK(graph.CloseInputStream("a_tensor"));
MP_ASSERT_OK(graph.CloseInputStream("a_tensor"));
Packet packet;
ASSERT_TRUE(poller.Next(&packet));
@ -164,7 +164,7 @@ TEST_F(TensorFlowSessionFromFrozenGraphGeneratorTest,
packet.Get<tf::Tensor>().DebugString());
ASSERT_FALSE(poller.Next(&packet));
MEDIAPIPE_ASSERT_OK(graph.WaitUntilDone());
MP_ASSERT_OK(graph.WaitUntilDone());
}
TEST_F(TensorFlowSessionFromFrozenGraphGeneratorTest,
@ -174,15 +174,15 @@ TEST_F(TensorFlowSessionFromFrozenGraphGeneratorTest,
PacketSet output_side_packets(
tool::CreateTagMap({"SESSION:session"}).ValueOrDie());
std::string serialized_graph_contents;
MEDIAPIPE_EXPECT_OK(mediapipe::file::GetContents(GetGraphDefPath(),
&serialized_graph_contents));
MP_EXPECT_OK(mediapipe::file::GetContents(GetGraphDefPath(),
&serialized_graph_contents));
generator_options_->clear_graph_proto_path();
input_side_packets.Tag("STRING_MODEL") =
Adopt(new std::string(serialized_graph_contents));
::mediapipe::Status run_status = tool::RunGenerateAndValidateTypes(
"TensorFlowSessionFromFrozenGraphGenerator", extendable_options_,
input_side_packets, &output_side_packets);
MEDIAPIPE_EXPECT_OK(run_status) << run_status.message();
MP_EXPECT_OK(run_status) << run_status.message();
VerifySignatureMap(&output_side_packets);
}
@ -199,7 +199,7 @@ TEST_F(
::mediapipe::Status run_status = tool::RunGenerateAndValidateTypes(
"TensorFlowSessionFromFrozenGraphGenerator", extendable_options_,
input_side_packets, &output_side_packets);
MEDIAPIPE_EXPECT_OK(run_status) << run_status.message();
MP_EXPECT_OK(run_status) << run_status.message();
VerifySignatureMap(&output_side_packets);
}
@ -229,8 +229,8 @@ TEST_F(TensorFlowSessionFromFrozenGraphGeneratorTest,
PacketSet output_side_packets(
tool::CreateTagMap({"SESSION:session"}).ValueOrDie());
std::string serialized_graph_contents;
MEDIAPIPE_EXPECT_OK(mediapipe::file::GetContents(GetGraphDefPath(),
&serialized_graph_contents));
MP_EXPECT_OK(mediapipe::file::GetContents(GetGraphDefPath(),
&serialized_graph_contents));
input_side_packets.Tag("STRING_MODEL") =
Adopt(new std::string(serialized_graph_contents));
input_side_packets.Tag("STRING_MODEL_FILE_PATH") =
@ -254,8 +254,8 @@ TEST_F(TensorFlowSessionFromFrozenGraphGeneratorTest,
PacketSet output_side_packets(
tool::CreateTagMap({"SESSION:session"}).ValueOrDie());
std::string serialized_graph_contents;
EXPECT_OK(mediapipe::file::GetContents(GetGraphDefPath(),
&serialized_graph_contents));
MP_EXPECT_OK(mediapipe::file::GetContents(GetGraphDefPath(),
&serialized_graph_contents));
input_side_packets.Tag("STRING_MODEL") =
Adopt(new std::string(serialized_graph_contents));
input_side_packets.Tag("STRING_MODEL_FILE_PATH") =
@ -280,7 +280,7 @@ TEST_F(TensorFlowSessionFromFrozenGraphGeneratorTest,
::mediapipe::Status run_status = tool::RunGenerateAndValidateTypes(
"TensorFlowSessionFromFrozenGraphGenerator", extendable_options_,
input_side_packets, &output_side_packets);
MEDIAPIPE_EXPECT_OK(run_status);
MP_EXPECT_OK(run_status);
VerifySignatureMap(&output_side_packets);
}

View File

@ -14,10 +14,6 @@
#include <algorithm>
#if defined(MEDIAPIPE_TPU_SUPPORT)
#include "learning/brain/google/xla/global_tpu_init.h"
#include "tensorflow/core/protobuf/tpu/topology.pb.h"
#endif
#if !defined(__ANDROID__)
#include "mediapipe/framework/port/file_helpers.h"
#endif

View File

@ -75,7 +75,7 @@ TEST_F(TensorFlowSessionFromSavedModelCalculatorTest,
}
})",
options_->DebugString()));
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
const TensorFlowSession& session =
runner.OutputSidePackets().Tag("SESSION").Get<TensorFlowSession>();
// Session must be set.
@ -119,7 +119,7 @@ TEST_F(TensorFlowSessionFromSavedModelCalculatorTest,
options_->DebugString()));
runner.MutableSidePackets()->Tag("STRING_SAVED_MODEL_PATH") =
MakePacket<std::string>(GetSavedModelDir());
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
const TensorFlowSession& session =
runner.OutputSidePackets().Tag("SESSION").Get<TensorFlowSession>();
// Session must be set.
@ -159,17 +159,17 @@ TEST_F(TensorFlowSessionFromSavedModelCalculatorTest,
options_->DebugString()));
CalculatorGraph graph;
MEDIAPIPE_ASSERT_OK(graph.Initialize(graph_config));
MP_ASSERT_OK(graph.Initialize(graph_config));
StatusOrPoller status_or_poller =
graph.AddOutputStreamPoller("multiplied_tensor");
ASSERT_TRUE(status_or_poller.ok());
OutputStreamPoller poller = std::move(status_or_poller.ValueOrDie());
MEDIAPIPE_ASSERT_OK(graph.StartRun({}));
MEDIAPIPE_ASSERT_OK(graph.AddPacketToInputStream(
MP_ASSERT_OK(graph.StartRun({}));
MP_ASSERT_OK(graph.AddPacketToInputStream(
"a_tensor",
Adopt(new auto(TensorMatrix1x3(1, -1, 10))).At(Timestamp(0))));
MEDIAPIPE_ASSERT_OK(graph.CloseInputStream("a_tensor"));
MP_ASSERT_OK(graph.CloseInputStream("a_tensor"));
Packet packet;
ASSERT_TRUE(poller.Next(&packet));
@ -179,7 +179,7 @@ TEST_F(TensorFlowSessionFromSavedModelCalculatorTest,
packet.Get<tf::Tensor>().DebugString());
ASSERT_FALSE(poller.Next(&packet));
MEDIAPIPE_ASSERT_OK(graph.WaitUntilDone());
MP_ASSERT_OK(graph.WaitUntilDone());
}
TEST_F(TensorFlowSessionFromSavedModelCalculatorTest,
@ -197,7 +197,7 @@ TEST_F(TensorFlowSessionFromSavedModelCalculatorTest,
}
})",
options_->DebugString()));
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
const TensorFlowSession& session =
runner.OutputSidePackets().Tag("SESSION").Get<TensorFlowSession>();
// Session must be set.

View File

@ -14,10 +14,6 @@
#include <algorithm>
#if defined(MEDIAPIPE_TPU_SUPPORT)
#include "learning/brain/google/xla/global_tpu_init.h"
#include "tensorflow/core/protobuf/tpu/topology.pb.h"
#endif
#if !defined(__ANDROID__)
#include "mediapipe/framework/port/file_helpers.h"
#endif

View File

@ -71,7 +71,7 @@ TEST_F(TensorFlowSessionFromSavedModelGeneratorTest,
::mediapipe::Status run_status = tool::RunGenerateAndValidateTypes(
"TensorFlowSessionFromSavedModelGenerator", extendable_options_,
input_side_packets, &output_side_packets);
MEDIAPIPE_EXPECT_OK(run_status) << run_status.message();
MP_EXPECT_OK(run_status) << run_status.message();
const TensorFlowSession& session =
output_side_packets.Tag("SESSION").Get<TensorFlowSession>();
// Session must be set.
@ -113,7 +113,7 @@ TEST_F(TensorFlowSessionFromSavedModelGeneratorTest,
::mediapipe::Status run_status = tool::RunGenerateAndValidateTypes(
"TensorFlowSessionFromSavedModelGenerator", extendable_options_,
input_side_packets, &output_side_packets);
MEDIAPIPE_EXPECT_OK(run_status) << run_status.message();
MP_EXPECT_OK(run_status) << run_status.message();
const TensorFlowSession& session =
output_side_packets.Tag("SESSION").Get<TensorFlowSession>();
// Session must be set.
@ -154,17 +154,17 @@ TEST_F(TensorFlowSessionFromSavedModelGeneratorTest,
generator_options_->DebugString()));
CalculatorGraph graph;
MEDIAPIPE_ASSERT_OK(graph.Initialize(graph_config));
MP_ASSERT_OK(graph.Initialize(graph_config));
StatusOrPoller status_or_poller =
graph.AddOutputStreamPoller("multiplied_tensor");
ASSERT_TRUE(status_or_poller.ok());
OutputStreamPoller poller = std::move(status_or_poller.ValueOrDie());
MEDIAPIPE_ASSERT_OK(graph.StartRun({}));
MEDIAPIPE_ASSERT_OK(graph.AddPacketToInputStream(
MP_ASSERT_OK(graph.StartRun({}));
MP_ASSERT_OK(graph.AddPacketToInputStream(
"a_tensor",
Adopt(new auto(TensorMatrix1x3(1, -1, 10))).At(Timestamp(0))));
MEDIAPIPE_ASSERT_OK(graph.CloseInputStream("a_tensor"));
MP_ASSERT_OK(graph.CloseInputStream("a_tensor"));
Packet packet;
ASSERT_TRUE(poller.Next(&packet));
@ -174,7 +174,7 @@ TEST_F(TensorFlowSessionFromSavedModelGeneratorTest,
packet.Get<tf::Tensor>().DebugString());
ASSERT_FALSE(poller.Next(&packet));
MEDIAPIPE_ASSERT_OK(graph.WaitUntilDone());
MP_ASSERT_OK(graph.WaitUntilDone());
}
TEST_F(TensorFlowSessionFromSavedModelGeneratorTest,
@ -189,7 +189,7 @@ TEST_F(TensorFlowSessionFromSavedModelGeneratorTest,
::mediapipe::Status run_status = tool::RunGenerateAndValidateTypes(
"TensorFlowSessionFromSavedModelGenerator", extendable_options_,
input_side_packets, &output_side_packets);
MEDIAPIPE_EXPECT_OK(run_status) << run_status.message();
MP_EXPECT_OK(run_status) << run_status.message();
const TensorFlowSession& session =
output_side_packets.Tag("SESSION").Get<TensorFlowSession>();
// Session must be set.

View File

@ -97,7 +97,7 @@ TEST_F(UnpackMediaSequenceCalculatorTest, UnpacksOneImage) {
runner_->MutableSidePackets()->Tag("SEQUENCE_EXAMPLE") =
Adopt(input_sequence.release());
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
const std::vector<Packet>& output_packets =
runner_->Outputs().Tag("IMAGE").packets;
@ -126,7 +126,7 @@ TEST_F(UnpackMediaSequenceCalculatorTest, UnpacksTwoImages) {
runner_->MutableSidePackets()->Tag("SEQUENCE_EXAMPLE") =
Adopt(input_sequence.release());
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
const std::vector<Packet>& output_packets =
runner_->Outputs().Tag("IMAGE").packets;
@ -156,7 +156,7 @@ TEST_F(UnpackMediaSequenceCalculatorTest, UnpacksTwoPrefixedImages) {
runner_->MutableSidePackets()->Tag("SEQUENCE_EXAMPLE") =
Adopt(input_sequence.release());
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
const std::vector<Packet>& output_packets =
runner_->Outputs().Tag("IMAGE_PREFIX").packets;
@ -183,7 +183,7 @@ TEST_F(UnpackMediaSequenceCalculatorTest, UnpacksOneForwardFlowImage) {
runner_->MutableSidePackets()->Tag("SEQUENCE_EXAMPLE") =
Adopt(input_sequence.release());
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
const std::vector<Packet>& output_packets =
runner_->Outputs().Tag("FORWARD_FLOW_ENCODED").packets;
@ -212,7 +212,7 @@ TEST_F(UnpackMediaSequenceCalculatorTest, UnpacksTwoForwardFlowImages) {
runner_->MutableSidePackets()->Tag("SEQUENCE_EXAMPLE") =
Adopt(input_sequence.release());
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
const std::vector<Packet>& output_packets =
runner_->Outputs().Tag("FORWARD_FLOW_ENCODED").packets;
@ -242,7 +242,7 @@ TEST_F(UnpackMediaSequenceCalculatorTest, UnpacksBBoxes) {
runner_->MutableSidePackets()->Tag("SEQUENCE_EXAMPLE") =
Adopt(input_sequence.release());
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
const std::vector<Packet>& output_packets =
runner_->Outputs().Tag("BBOX").packets;
@ -276,7 +276,7 @@ TEST_F(UnpackMediaSequenceCalculatorTest, UnpacksPrefixedBBoxes) {
runner_->MutableSidePackets()->Tag("SEQUENCE_EXAMPLE") =
Adopt(input_sequence.release());
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
const std::vector<Packet>& output_packets =
runner_->Outputs().Tag("BBOX_PREFIX").packets;
@ -308,7 +308,7 @@ TEST_F(UnpackMediaSequenceCalculatorTest, UnpacksTwoFloatLists) {
runner_->MutableSidePackets()->Tag("SEQUENCE_EXAMPLE") =
Adopt(input_sequence.release());
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
const std::vector<Packet>& output_packets =
runner_->Outputs().Tag("FLOAT_FEATURE_TEST").packets;
@ -353,7 +353,7 @@ TEST_F(UnpackMediaSequenceCalculatorTest, UnpacksNonOverlappingTimestamps) {
runner_->MutableSidePackets()->Tag("SEQUENCE_EXAMPLE") =
Adopt(input_sequence.release());
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
const std::vector<Packet>& output_packets =
runner_->Outputs().Tag("IMAGE").packets;
@ -390,7 +390,7 @@ TEST_F(UnpackMediaSequenceCalculatorTest, UnpacksTwoPostStreamFloatLists) {
runner_->MutableSidePackets()->Tag("SEQUENCE_EXAMPLE") =
Adopt(input_sequence.release());
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
const std::vector<Packet>& fdense_avg_packets =
runner_->Outputs().Tag("FLOAT_FEATURE_FDENSE_AVG").packets;
@ -419,11 +419,11 @@ TEST_F(UnpackMediaSequenceCalculatorTest, GetDatasetFromPacket) {
std::string root = "test_root";
runner_->MutableSidePackets()->Tag("DATASET_ROOT") = PointToForeign(&root);
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
MEDIAPIPE_ASSERT_OK(runner_->OutputSidePackets()
.Tag("DATA_PATH")
.ValidateAsType<std::string>());
MP_ASSERT_OK(runner_->OutputSidePackets()
.Tag("DATA_PATH")
.ValidateAsType<std::string>());
ASSERT_EQ(runner_->OutputSidePackets().Tag("DATA_PATH").Get<std::string>(),
root + "/" + data_path_);
}
@ -437,11 +437,11 @@ TEST_F(UnpackMediaSequenceCalculatorTest, GetDatasetFromOptions) {
runner_->MutableSidePackets()->Tag("SEQUENCE_EXAMPLE") =
Adopt(sequence_.release());
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
MEDIAPIPE_ASSERT_OK(runner_->OutputSidePackets()
.Tag("DATA_PATH")
.ValidateAsType<std::string>());
MP_ASSERT_OK(runner_->OutputSidePackets()
.Tag("DATA_PATH")
.ValidateAsType<std::string>());
ASSERT_EQ(runner_->OutputSidePackets().Tag("DATA_PATH").Get<std::string>(),
root + "/" + data_path_);
}
@ -450,11 +450,11 @@ TEST_F(UnpackMediaSequenceCalculatorTest, GetDatasetFromExample) {
SetUpCalculator({}, {"DATA_PATH:data_path"});
runner_->MutableSidePackets()->Tag("SEQUENCE_EXAMPLE") =
Adopt(sequence_.release());
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
MEDIAPIPE_ASSERT_OK(runner_->OutputSidePackets()
.Tag("DATA_PATH")
.ValidateAsType<std::string>());
MP_ASSERT_OK(runner_->OutputSidePackets()
.Tag("DATA_PATH")
.ValidateAsType<std::string>());
ASSERT_EQ(runner_->OutputSidePackets().Tag("DATA_PATH").Get<std::string>(),
data_path_);
}
@ -473,11 +473,11 @@ TEST_F(UnpackMediaSequenceCalculatorTest, GetPacketResamplingOptions) {
SetUpCalculator({}, {"RESAMPLER_OPTIONS:resampler_options"}, {}, &options);
runner_->MutableSidePackets()->Tag("SEQUENCE_EXAMPLE") =
Adopt(sequence_.release());
MEDIAPIPE_ASSERT_OK(runner_->Run());
MP_ASSERT_OK(runner_->Run());
MEDIAPIPE_EXPECT_OK(runner_->OutputSidePackets()
.Tag("RESAMPLER_OPTIONS")
.ValidateAsType<CalculatorOptions>());
MP_EXPECT_OK(runner_->OutputSidePackets()
.Tag("RESAMPLER_OPTIONS")
.ValidateAsType<CalculatorOptions>());
EXPECT_NEAR(runner_->OutputSidePackets()
.Tag("RESAMPLER_OPTIONS")
.Get<CalculatorOptions>()
@ -502,10 +502,10 @@ TEST_F(UnpackMediaSequenceCalculatorTest, GetFrameRateFromExample) {
SetUpCalculator({}, {"IMAGE_FRAME_RATE:frame_rate"});
runner_->MutableSidePackets()->Tag("SEQUENCE_EXAMPLE") =
Adopt(sequence_.release());
MEDIAPIPE_ASSERT_OK(runner_->Run());
MEDIAPIPE_EXPECT_OK(runner_->OutputSidePackets()
.Tag("IMAGE_FRAME_RATE")
.ValidateAsType<double>());
MP_ASSERT_OK(runner_->Run());
MP_EXPECT_OK(runner_->OutputSidePackets()
.Tag("IMAGE_FRAME_RATE")
.ValidateAsType<double>());
EXPECT_EQ(runner_->OutputSidePackets().Tag("IMAGE_FRAME_RATE").Get<double>(),
image_frame_rate_);
}

View File

@ -79,7 +79,7 @@ mediapipe_cc_proto_library(
name = "ssd_anchors_calculator_cc_proto",
srcs = ["ssd_anchors_calculator.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":ssd_anchors_calculator_proto"],
)
@ -87,7 +87,7 @@ mediapipe_cc_proto_library(
name = "tflite_custom_op_resolver_calculator_cc_proto",
srcs = ["tflite_custom_op_resolver_calculator.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":tflite_custom_op_resolver_calculator_proto"],
)
@ -95,7 +95,7 @@ mediapipe_cc_proto_library(
name = "tflite_converter_calculator_cc_proto",
srcs = ["tflite_converter_calculator.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":tflite_converter_calculator_proto"],
)
@ -103,7 +103,7 @@ mediapipe_cc_proto_library(
name = "tflite_tensors_to_segmentation_calculator_cc_proto",
srcs = ["tflite_tensors_to_segmentation_calculator.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":tflite_tensors_to_segmentation_calculator_proto"],
)
@ -111,7 +111,7 @@ mediapipe_cc_proto_library(
name = "tflite_inference_calculator_cc_proto",
srcs = ["tflite_inference_calculator.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":tflite_inference_calculator_proto"],
)
@ -119,7 +119,7 @@ mediapipe_cc_proto_library(
name = "tflite_tensors_to_detections_calculator_cc_proto",
srcs = ["tflite_tensors_to_detections_calculator.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":tflite_tensors_to_detections_calculator_proto"],
)
@ -127,7 +127,7 @@ mediapipe_cc_proto_library(
name = "tflite_tensors_to_classification_calculator_cc_proto",
srcs = ["tflite_tensors_to_classification_calculator.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":tflite_tensors_to_classification_calculator_proto"],
)
@ -135,7 +135,7 @@ mediapipe_cc_proto_library(
name = "tflite_tensors_to_landmarks_calculator_cc_proto",
srcs = ["tflite_tensors_to_landmarks_calculator.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":tflite_tensors_to_landmarks_calculator_proto"],
)
@ -200,7 +200,6 @@ cc_library(
srcs = ["tflite_inference_calculator.cc"],
copts = select({
"//mediapipe:ios": [
"-std=c++11",
"-x objective-c++",
"-fobjc-arc", # enable reference-counting
],
@ -246,7 +245,6 @@ cc_library(
srcs = ["tflite_converter_calculator.cc"],
copts = select({
"//mediapipe:ios": [
"-std=c++11",
"-x objective-c++",
"-fobjc-arc", # enable reference-counting
],

View File

@ -79,7 +79,7 @@ class SsdAnchorsCalculator : public CalculatorBase {
cc->Options<SsdAnchorsCalculatorOptions>();
auto anchors = absl::make_unique<std::vector<Anchor>>();
RETURN_IF_ERROR(GenerateAnchors(anchors.get(), options));
MP_RETURN_IF_ERROR(GenerateAnchors(anchors.get(), options));
cc->OutputSidePackets().Index(0).Set(Adopt(anchors.release()));
return ::mediapipe::OkStatus();
}

View File

@ -90,12 +90,12 @@ TEST(SsdAnchorCalculatorTest, FaceDetectionConfig) {
}
)"));
MEDIAPIPE_ASSERT_OK(runner.Run()) << "Calculator execution failed.";
MP_ASSERT_OK(runner.Run()) << "Calculator execution failed.";
const auto& anchors =
runner.OutputSidePackets().Index(0).Get<std::vector<Anchor>>();
std::string anchors_string;
MEDIAPIPE_EXPECT_OK(mediapipe::file::GetContents(
MP_EXPECT_OK(mediapipe::file::GetContents(
GetGoldenFilePath("anchor_golden_file_0.txt"), &anchors_string));
std::vector<Anchor> anchors_golden;
@ -133,12 +133,12 @@ TEST(SsdAnchorCalculatorTest, MobileSSDConfig) {
}
)"));
MEDIAPIPE_ASSERT_OK(runner.Run()) << "Calculator execution failed.";
MP_ASSERT_OK(runner.Run()) << "Calculator execution failed.";
const auto& anchors =
runner.OutputSidePackets().Index(0).Get<std::vector<Anchor>>();
std::string anchors_string;
MEDIAPIPE_EXPECT_OK(mediapipe::file::GetContents(
MP_EXPECT_OK(mediapipe::file::GetContents(
GetGoldenFilePath("anchor_golden_file_1.txt"), &anchors_string));
std::vector<Anchor> anchors_golden;

View File

@ -190,9 +190,9 @@ REGISTER_CALCULATOR(TfLiteConverterCalculator);
#endif
#if defined(__ANDROID__)
RETURN_IF_ERROR(mediapipe::GlCalculatorHelper::UpdateContract(cc));
MP_RETURN_IF_ERROR(mediapipe::GlCalculatorHelper::UpdateContract(cc));
#elif defined(__APPLE__) && !TARGET_OS_OSX // iOS
RETURN_IF_ERROR([MPPMetalHelper updateContract:cc]);
MP_RETURN_IF_ERROR([MPPMetalHelper updateContract:cc]);
#endif
// Assign this calculator's default InputStreamHandler.
@ -204,7 +204,7 @@ REGISTER_CALCULATOR(TfLiteConverterCalculator);
::mediapipe::Status TfLiteConverterCalculator::Open(CalculatorContext* cc) {
cc->SetOffset(TimestampDiff(0));
RETURN_IF_ERROR(LoadOptions(cc));
MP_RETURN_IF_ERROR(LoadOptions(cc));
if (cc->Inputs().HasTag("IMAGE_GPU") ||
cc->Outputs().HasTag("IMAGE_OUT_GPU")) {
@ -222,7 +222,7 @@ REGISTER_CALCULATOR(TfLiteConverterCalculator);
// Cannot use quantization.
use_quantized_tensors_ = false;
#if defined(__ANDROID__)
RETURN_IF_ERROR(gpu_helper_.Open(cc));
MP_RETURN_IF_ERROR(gpu_helper_.Open(cc));
#elif defined(__APPLE__) && !TARGET_OS_OSX // iOS
gpu_helper_ = [[MPPMetalHelper alloc] initWithCalculatorContext:cc];
RET_CHECK(gpu_helper_);
@ -239,14 +239,14 @@ REGISTER_CALCULATOR(TfLiteConverterCalculator);
::mediapipe::Status TfLiteConverterCalculator::Process(CalculatorContext* cc) {
if (use_gpu_) {
if (!initialized_) {
RETURN_IF_ERROR(InitGpu(cc));
MP_RETURN_IF_ERROR(InitGpu(cc));
initialized_ = true;
}
// Convert to GPU tensors type.
RETURN_IF_ERROR(ProcessGPU(cc));
MP_RETURN_IF_ERROR(ProcessGPU(cc));
} else {
// Convert to CPU tensors or Matrix type.
RETURN_IF_ERROR(ProcessCPU(cc));
MP_RETURN_IF_ERROR(ProcessCPU(cc));
}
return ::mediapipe::OkStatus();
@ -321,11 +321,11 @@ REGISTER_CALCULATOR(TfLiteConverterCalculator);
float* tensor_buffer = tensor->data.f;
RET_CHECK(tensor_buffer);
if (image_frame.ByteDepth() == 1) {
RETURN_IF_ERROR(NormalizeImage<uint8>(image_frame, zero_center_,
flip_vertically_, tensor_buffer));
MP_RETURN_IF_ERROR(NormalizeImage<uint8>(
image_frame, zero_center_, flip_vertically_, tensor_buffer));
} else if (image_frame.ByteDepth() == 4) {
RETURN_IF_ERROR(NormalizeImage<float>(image_frame, zero_center_,
flip_vertically_, tensor_buffer));
MP_RETURN_IF_ERROR(NormalizeImage<float>(
image_frame, zero_center_, flip_vertically_, tensor_buffer));
} else {
return ::mediapipe::InternalError(
"Only byte-based (8 bit) and float (32 bit) images supported.");
@ -359,7 +359,7 @@ REGISTER_CALCULATOR(TfLiteConverterCalculator);
float* tensor_buffer = tensor->data.f;
RET_CHECK(tensor_buffer);
RETURN_IF_ERROR(CopyMatrixToTensor(matrix, tensor_buffer));
MP_RETURN_IF_ERROR(CopyMatrixToTensor(matrix, tensor_buffer));
auto output_tensors = absl::make_unique<std::vector<TfLiteTensor>>();
output_tensors->emplace_back(*tensor);
@ -375,7 +375,7 @@ REGISTER_CALCULATOR(TfLiteConverterCalculator);
#if defined(__ANDROID__)
// GpuBuffer to tflite::gpu::GlBuffer conversion.
const auto& input = cc->Inputs().Tag("IMAGE_GPU").Get<mediapipe::GpuBuffer>();
RETURN_IF_ERROR(
MP_RETURN_IF_ERROR(
gpu_helper_.RunInGlContext([this, &input]() -> ::mediapipe::Status {
// Convert GL texture into TfLite GlBuffer (SSBO).
auto src = gpu_helper_.CreateSourceTexture(input);

View File

@ -67,7 +67,7 @@ class TfLiteConverterCalculatorTest : public ::testing::Test {
}
}
}
MEDIAPIPE_ASSERT_OK(graph_->AddPacketToInputStream(
MP_ASSERT_OK(graph_->AddPacketToInputStream(
"matrix", Adopt(matrix.release()).At(Timestamp(0))));
}
@ -99,14 +99,14 @@ TEST_F(TfLiteConverterCalculatorTest, RandomMatrixColMajor) {
// Run the graph.
graph_ = absl::make_unique<CalculatorGraph>();
MEDIAPIPE_ASSERT_OK(graph_->Initialize(graph_config));
MEDIAPIPE_ASSERT_OK(graph_->StartRun({}));
MP_ASSERT_OK(graph_->Initialize(graph_config));
MP_ASSERT_OK(graph_->StartRun({}));
// Push the tensor into the graph.
AddRandomMatrix(num_rows, num_columns, kSeed, /*row_major_matrix=*/false);
// Wait until the calculator done processing.
MEDIAPIPE_ASSERT_OK(graph_->WaitUntilIdle());
MP_ASSERT_OK(graph_->WaitUntilIdle());
EXPECT_EQ(1, output_packets.size());
// Get and process results.
@ -128,8 +128,8 @@ TEST_F(TfLiteConverterCalculatorTest, RandomMatrixColMajor) {
// Fully close graph at end, otherwise calculator+tensors are destroyed
// after calling WaitUntilDone().
MEDIAPIPE_ASSERT_OK(graph_->CloseInputStream("matrix"));
MEDIAPIPE_ASSERT_OK(graph_->WaitUntilDone());
MP_ASSERT_OK(graph_->CloseInputStream("matrix"));
MP_ASSERT_OK(graph_->WaitUntilDone());
graph_.reset();
}
@ -160,14 +160,14 @@ TEST_F(TfLiteConverterCalculatorTest, RandomMatrixRowMajor) {
// Run the graph.
graph_ = absl::make_unique<CalculatorGraph>();
MEDIAPIPE_ASSERT_OK(graph_->Initialize(graph_config));
MEDIAPIPE_ASSERT_OK(graph_->StartRun({}));
MP_ASSERT_OK(graph_->Initialize(graph_config));
MP_ASSERT_OK(graph_->StartRun({}));
// Push the tensor into the graph.
AddRandomMatrix(num_rows, num_columns, kSeed, /*row_major_matrix=*/true);
// Wait until the calculator done processing.
MEDIAPIPE_ASSERT_OK(graph_->WaitUntilIdle());
MP_ASSERT_OK(graph_->WaitUntilIdle());
EXPECT_EQ(1, output_packets.size());
// Get and process results.
@ -189,8 +189,8 @@ TEST_F(TfLiteConverterCalculatorTest, RandomMatrixRowMajor) {
// Fully close graph at end, otherwise calculator+tensors are destroyed
// after calling WaitUntilDone().
MEDIAPIPE_ASSERT_OK(graph_->CloseInputStream("matrix"));
MEDIAPIPE_ASSERT_OK(graph_->WaitUntilDone());
MP_ASSERT_OK(graph_->CloseInputStream("matrix"));
MP_ASSERT_OK(graph_->WaitUntilDone());
graph_.reset();
}

View File

@ -182,9 +182,9 @@ REGISTER_CALCULATOR(TfLiteInferenceCalculator);
}
#if defined(__ANDROID__)
RETURN_IF_ERROR(mediapipe::GlCalculatorHelper::UpdateContract(cc));
MP_RETURN_IF_ERROR(mediapipe::GlCalculatorHelper::UpdateContract(cc));
#elif defined(__APPLE__) && !TARGET_OS_OSX // iOS
RETURN_IF_ERROR([MPPMetalHelper updateContract:cc]);
MP_RETURN_IF_ERROR([MPPMetalHelper updateContract:cc]);
#endif
// Assign this calculator's default InputStreamHandler.
@ -196,7 +196,7 @@ REGISTER_CALCULATOR(TfLiteInferenceCalculator);
::mediapipe::Status TfLiteInferenceCalculator::Open(CalculatorContext* cc) {
cc->SetOffset(TimestampDiff(0));
RETURN_IF_ERROR(LoadOptions(cc));
MP_RETURN_IF_ERROR(LoadOptions(cc));
if (cc->Inputs().HasTag("TENSORS_GPU")) {
#if defined(__ANDROID__) || (defined(__APPLE__) && !TARGET_OS_OSX)
@ -217,17 +217,17 @@ REGISTER_CALCULATOR(TfLiteInferenceCalculator);
#endif
}
RETURN_IF_ERROR(LoadModel(cc));
MP_RETURN_IF_ERROR(LoadModel(cc));
if (gpu_inference_) {
#if defined(__ANDROID__)
RETURN_IF_ERROR(gpu_helper_.Open(cc));
MP_RETURN_IF_ERROR(gpu_helper_.Open(cc));
#elif defined(__APPLE__) && !TARGET_OS_OSX // iOS
gpu_helper_ = [[MPPMetalHelper alloc] initWithCalculatorContext:cc];
RET_CHECK(gpu_helper_);
#endif
RETURN_IF_ERROR(LoadDelegate(cc));
MP_RETURN_IF_ERROR(LoadDelegate(cc));
}
return ::mediapipe::OkStatus();
@ -241,7 +241,7 @@ REGISTER_CALCULATOR(TfLiteInferenceCalculator);
const auto& input_tensors =
cc->Inputs().Tag("TENSORS_GPU").Get<std::vector<GpuTensor>>();
RET_CHECK_EQ(input_tensors.size(), 1);
RETURN_IF_ERROR(gpu_helper_.RunInGlContext(
MP_RETURN_IF_ERROR(gpu_helper_.RunInGlContext(
[this, &input_tensors]() -> ::mediapipe::Status {
// Explicit copy input.
tflite::gpu::gl::CopyBuffer(input_tensors[0], gpu_data_in_->buffer);
@ -290,10 +290,11 @@ REGISTER_CALCULATOR(TfLiteInferenceCalculator);
// 2. Run inference.
if (gpu_inference_) {
#if defined(__ANDROID__)
RETURN_IF_ERROR(gpu_helper_.RunInGlContext([this]() -> ::mediapipe::Status {
RET_CHECK_EQ(interpreter_->Invoke(), kTfLiteOk);
return ::mediapipe::OkStatus();
}));
MP_RETURN_IF_ERROR(
gpu_helper_.RunInGlContext([this]() -> ::mediapipe::Status {
RET_CHECK_EQ(interpreter_->Invoke(), kTfLiteOk);
return ::mediapipe::OkStatus();
}));
#elif defined(__APPLE__) && !TARGET_OS_OSX // iOS
RET_CHECK_EQ(interpreter_->Invoke(), kTfLiteOk);
#endif
@ -367,7 +368,7 @@ REGISTER_CALCULATOR(TfLiteInferenceCalculator);
::mediapipe::Status TfLiteInferenceCalculator::Close(CalculatorContext* cc) {
if (delegate_) {
#if defined(__ANDROID__)
RETURN_IF_ERROR(gpu_helper_.RunInGlContext([this]() -> Status {
MP_RETURN_IF_ERROR(gpu_helper_.RunInGlContext([this]() -> Status {
TfLiteGpuDelegateDelete(delegate_);
gpu_data_in_.reset();
for (int i = 0; i < gpu_data_out_.size(); ++i) {

View File

@ -93,13 +93,13 @@ TEST_F(TfLiteInferenceCalculatorTest, SmokeTest) {
std::vector<Packet> output_packets;
tool::AddVectorSink("tensor_out", &graph_config, &output_packets);
CalculatorGraph graph(graph_config);
MEDIAPIPE_ASSERT_OK(graph.StartRun({}));
MP_ASSERT_OK(graph.StartRun({}));
// Push the tensor into the graph.
MEDIAPIPE_ASSERT_OK(graph.AddPacketToInputStream(
MP_ASSERT_OK(graph.AddPacketToInputStream(
"tensor_in", Adopt(input_vec.release()).At(Timestamp(0))));
// Wait until the calculator done processing.
MEDIAPIPE_ASSERT_OK(graph.WaitUntilIdle());
MP_ASSERT_OK(graph.WaitUntilIdle());
ASSERT_EQ(1, output_packets.size());
// Get and process results.
@ -116,8 +116,8 @@ TEST_F(TfLiteInferenceCalculatorTest, SmokeTest) {
// Fully close graph at end, otherwise calculator+tensors are destroyed
// after calling WaitUntilDone().
MEDIAPIPE_ASSERT_OK(graph.CloseInputStream("tensor_in"));
MEDIAPIPE_ASSERT_OK(graph.WaitUntilDone());
MP_ASSERT_OK(graph.CloseInputStream("tensor_in"));
MP_ASSERT_OK(graph.WaitUntilDone());
}
} // namespace mediapipe

View File

@ -103,7 +103,7 @@ REGISTER_CALCULATOR(TfLiteTensorsToClassificationCalculator);
ASSIGN_OR_RETURN(string_path,
PathToResourceAsFile(options.label_map_path()));
std::string label_map_string;
RETURN_IF_ERROR(file::GetContents(string_path, &label_map_string));
MP_RETURN_IF_ERROR(file::GetContents(string_path, &label_map_string));
std::istringstream stream(label_map_string);
std::string line;

View File

@ -83,7 +83,7 @@ TEST_F(TfLiteTensorsToClassificationCalculatorTest, CorrectOutput) {
)"));
BuildGraph(&runner, {0, 0.5, 1});
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
const auto& output_packets_ = runner.Outputs().Tag("CLASSIFICATIONS").packets;
@ -115,7 +115,7 @@ TEST_F(TfLiteTensorsToClassificationCalculatorTest,
)"));
BuildGraph(&runner, {0, 0.5, 1});
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
const auto& output_packets_ = runner.Outputs().Tag("CLASSIFICATIONS").packets;
@ -147,7 +147,7 @@ TEST_F(TfLiteTensorsToClassificationCalculatorTest,
)"));
BuildGraph(&runner, {0, 0.5, 1});
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
const auto& output_packets_ = runner.Outputs().Tag("CLASSIFICATIONS").packets;
@ -174,7 +174,7 @@ TEST_F(TfLiteTensorsToClassificationCalculatorTest, CorrectOutputWithTopK) {
)"));
BuildGraph(&runner, {0, 0.5, 1});
MEDIAPIPE_ASSERT_OK(runner.Run());
MP_ASSERT_OK(runner.Run());
const auto& output_packets_ = runner.Outputs().Tag("CLASSIFICATIONS").packets;

View File

@ -188,7 +188,7 @@ REGISTER_CALCULATOR(TfLiteTensorsToDetectionsCalculator);
}
#if defined(__ANDROID__)
RETURN_IF_ERROR(mediapipe::GlCalculatorHelper::UpdateContract(cc));
MP_RETURN_IF_ERROR(mediapipe::GlCalculatorHelper::UpdateContract(cc));
#endif
return ::mediapipe::OkStatus();
@ -201,15 +201,15 @@ REGISTER_CALCULATOR(TfLiteTensorsToDetectionsCalculator);
if (cc->Inputs().HasTag("TENSORS_GPU")) {
gpu_input_ = true;
#if defined(__ANDROID__)
RETURN_IF_ERROR(gpu_helper_.Open(cc));
MP_RETURN_IF_ERROR(gpu_helper_.Open(cc));
#endif
}
RETURN_IF_ERROR(LoadOptions(cc));
MP_RETURN_IF_ERROR(LoadOptions(cc));
side_packet_anchors_ = cc->InputSidePackets().HasTag("ANCHORS");
if (gpu_input_) {
RETURN_IF_ERROR(GlSetup(cc));
MP_RETURN_IF_ERROR(GlSetup(cc));
}
return ::mediapipe::OkStatus();
@ -225,9 +225,9 @@ REGISTER_CALCULATOR(TfLiteTensorsToDetectionsCalculator);
auto output_detections = absl::make_unique<std::vector<Detection>>();
if (gpu_input_) {
RETURN_IF_ERROR(ProcessGPU(cc, output_detections.get()));
MP_RETURN_IF_ERROR(ProcessGPU(cc, output_detections.get()));
} else {
RETURN_IF_ERROR(ProcessCPU(cc, output_detections.get()));
MP_RETURN_IF_ERROR(ProcessCPU(cc, output_detections.get()));
} // if gpu_input_
// Output
@ -282,7 +282,7 @@ REGISTER_CALCULATOR(TfLiteTensorsToDetectionsCalculator);
anchors_init_ = true;
}
std::vector<float> boxes(num_boxes_ * num_coords_);
RETURN_IF_ERROR(DecodeBoxes(raw_boxes, anchors_, &boxes));
MP_RETURN_IF_ERROR(DecodeBoxes(raw_boxes, anchors_, &boxes));
std::vector<float> detection_scores(num_boxes_);
std::vector<int> detection_classes(num_boxes_);
@ -316,9 +316,9 @@ REGISTER_CALCULATOR(TfLiteTensorsToDetectionsCalculator);
detection_classes[i] = class_id;
}
RETURN_IF_ERROR(ConvertToDetections(boxes.data(), detection_scores.data(),
detection_classes.data(),
output_detections));
MP_RETURN_IF_ERROR(
ConvertToDetections(boxes.data(), detection_scores.data(),
detection_classes.data(), output_detections));
} else {
// Postprocessing on CPU with postprocessing op (e.g. anchor decoding and
// non-maximum suppression) within the model.
@ -350,9 +350,9 @@ REGISTER_CALCULATOR(TfLiteTensorsToDetectionsCalculator);
detection_classes[i] =
static_cast<int>(detection_classes_tensor->data.f[i]);
}
RETURN_IF_ERROR(ConvertToDetections(detection_boxes, detection_scores,
detection_classes.data(),
output_detections));
MP_RETURN_IF_ERROR(ConvertToDetections(detection_boxes, detection_scores,
detection_classes.data(),
output_detections));
}
return ::mediapipe::OkStatus();
}
@ -381,7 +381,7 @@ REGISTER_CALCULATOR(TfLiteTensorsToDetectionsCalculator);
}
// Run shaders.
RETURN_IF_ERROR(gpu_helper_.RunInGlContext(
MP_RETURN_IF_ERROR(gpu_helper_.RunInGlContext(
[this, &input_tensors]() -> ::mediapipe::Status {
// Decode boxes.
decoded_boxes_buffer_->BindToIndex(0);
@ -419,9 +419,9 @@ REGISTER_CALCULATOR(TfLiteTensorsToDetectionsCalculator);
detection_scores[i] = score_class_id_pairs[i * 2];
detection_classes[i] = static_cast<int>(score_class_id_pairs[i * 2 + 1]);
}
RETURN_IF_ERROR(ConvertToDetections(boxes.data(), detection_scores.data(),
detection_classes.data(),
output_detections));
MP_RETURN_IF_ERROR(ConvertToDetections(boxes.data(), detection_scores.data(),
detection_classes.data(),
output_detections));
#else
LOG(ERROR) << "GPU input on non-Android not supported yet.";
#endif // defined(__ANDROID__)

View File

@ -89,7 +89,7 @@ REGISTER_CALCULATOR(TfLiteTensorsToLandmarksCalculator);
CalculatorContext* cc) {
cc->SetOffset(TimestampDiff(0));
RETURN_IF_ERROR(LoadOptions(cc));
MP_RETURN_IF_ERROR(LoadOptions(cc));
if (cc->Outputs().HasTag("NORM_LANDMARKS")) {
RET_CHECK(options_.has_input_image_height() &&

View File

@ -177,7 +177,7 @@ REGISTER_CALCULATOR(TfLiteTensorsToSegmentationCalculator);
#endif // __ANDROID__
#if defined(__ANDROID__)
RETURN_IF_ERROR(mediapipe::GlCalculatorHelper::UpdateContract(cc));
MP_RETURN_IF_ERROR(mediapipe::GlCalculatorHelper::UpdateContract(cc));
#endif // __ANDROID__
return ::mediapipe::OkStatus();
@ -190,17 +190,17 @@ REGISTER_CALCULATOR(TfLiteTensorsToSegmentationCalculator);
if (cc->Inputs().HasTag("TENSORS_GPU")) {
use_gpu_ = true;
#if defined(__ANDROID__)
RETURN_IF_ERROR(gpu_helper_.Open(cc));
MP_RETURN_IF_ERROR(gpu_helper_.Open(cc));
#endif // __ANDROID__
}
RETURN_IF_ERROR(LoadOptions(cc));
MP_RETURN_IF_ERROR(LoadOptions(cc));
if (use_gpu_) {
#if defined(__ANDROID__)
RETURN_IF_ERROR(
MP_RETURN_IF_ERROR(
gpu_helper_.RunInGlContext([this, cc]() -> ::mediapipe::Status {
RETURN_IF_ERROR(InitGpu(cc));
MP_RETURN_IF_ERROR(InitGpu(cc));
return ::mediapipe::OkStatus();
}));
#else
@ -216,14 +216,14 @@ REGISTER_CALCULATOR(TfLiteTensorsToSegmentationCalculator);
CalculatorContext* cc) {
if (use_gpu_) {
#if defined(__ANDROID__)
RETURN_IF_ERROR(
MP_RETURN_IF_ERROR(
gpu_helper_.RunInGlContext([this, cc]() -> ::mediapipe::Status {
RETURN_IF_ERROR(ProcessGpu(cc));
MP_RETURN_IF_ERROR(ProcessGpu(cc));
return ::mediapipe::OkStatus();
}));
#endif // __ANDROID__
} else {
RETURN_IF_ERROR(ProcessCpu(cc));
MP_RETURN_IF_ERROR(ProcessCpu(cc));
}
return ::mediapipe::OkStatus();

View File

@ -79,7 +79,7 @@ mediapipe_cc_proto_library(
"//mediapipe/framework:calculator_cc_proto",
"//mediapipe/util:color_cc_proto",
],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":annotation_overlay_calculator_proto"],
)
@ -89,7 +89,7 @@ mediapipe_cc_proto_library(
cc_deps = [
"//mediapipe/framework:calculator_cc_proto",
],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [
":detection_label_id_to_text_calculator_proto",
],
@ -106,7 +106,7 @@ mediapipe_cc_proto_library(
name = "non_max_suppression_calculator_cc_proto",
srcs = ["non_max_suppression_calculator.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":non_max_suppression_calculator_proto"],
)
@ -303,7 +303,7 @@ mediapipe_cc_proto_library(
cc_deps = [
"//mediapipe/framework:calculator_cc_proto",
],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":thresholding_calculator_proto"],
)
@ -326,7 +326,7 @@ mediapipe_cc_proto_library(
cc_deps = [
"//mediapipe/framework:calculator_cc_proto",
],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":landmarks_to_detection_calculator_proto"],
)
@ -352,7 +352,7 @@ mediapipe_cc_proto_library(
cc_deps = [
"//mediapipe/framework:calculator_cc_proto",
],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":detections_to_rects_calculator_proto"],
)
@ -362,7 +362,7 @@ mediapipe_cc_proto_library(
cc_deps = [
"//mediapipe/framework:calculator_cc_proto",
],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":landmark_projection_calculator_proto"],
)
@ -372,7 +372,7 @@ mediapipe_cc_proto_library(
cc_deps = [
"//mediapipe/framework:calculator_cc_proto",
],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":rect_transformation_calculator_proto"],
)
@ -517,7 +517,7 @@ mediapipe_cc_proto_library(
"//mediapipe/util:color_cc_proto",
"//mediapipe/util:render_data_cc_proto",
],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":rect_to_render_data_calculator_proto"],
)
@ -529,7 +529,7 @@ mediapipe_cc_proto_library(
"//mediapipe/util:color_cc_proto",
"//mediapipe/util:render_data_cc_proto",
],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":detections_to_render_data_calculator_proto"],
)
@ -560,7 +560,7 @@ mediapipe_cc_proto_library(
"//mediapipe/util:color_cc_proto",
"//mediapipe/util:render_data_cc_proto",
],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":landmarks_to_render_data_calculator_proto"],
)

View File

@ -200,7 +200,7 @@ REGISTER_CALCULATOR(AnnotationOverlayCalculator);
}
#if defined(__ANDROID__) || (defined(__APPLE__) && !TARGET_OS_OSX)
RETURN_IF_ERROR(mediapipe::GlCalculatorHelper::UpdateContract(cc));
MP_RETURN_IF_ERROR(mediapipe::GlCalculatorHelper::UpdateContract(cc));
#endif // __ANDROID__ or iOS
return ::mediapipe::OkStatus();
@ -247,7 +247,7 @@ REGISTER_CALCULATOR(AnnotationOverlayCalculator);
if (use_gpu_) {
#if defined(__ANDROID__) || (defined(__APPLE__) && !TARGET_OS_OSX)
RETURN_IF_ERROR(gpu_helper_.Open(cc));
MP_RETURN_IF_ERROR(gpu_helper_.Open(cc));
#endif // __ANDROID__ or iOS
}
@ -262,17 +262,17 @@ REGISTER_CALCULATOR(AnnotationOverlayCalculator);
if (use_gpu_) {
#if defined(__ANDROID__) || (defined(__APPLE__) && !TARGET_OS_OSX)
if (!gpu_initialized_) {
RETURN_IF_ERROR(
MP_RETURN_IF_ERROR(
gpu_helper_.RunInGlContext([this, cc]() -> ::mediapipe::Status {
RETURN_IF_ERROR(GlSetup(cc));
MP_RETURN_IF_ERROR(GlSetup(cc));
return ::mediapipe::OkStatus();
}));
gpu_initialized_ = true;
}
#endif // __ANDROID__ or iOS
RETURN_IF_ERROR(CreateRenderTargetGpu(cc, image_mat));
MP_RETURN_IF_ERROR(CreateRenderTargetGpu(cc, image_mat));
} else {
RETURN_IF_ERROR(CreateRenderTargetCpu(cc, image_mat, &target_format));
MP_RETURN_IF_ERROR(CreateRenderTargetCpu(cc, image_mat, &target_format));
}
// Reset the renderer with the image_mat. No copy here.
@ -291,16 +291,16 @@ REGISTER_CALCULATOR(AnnotationOverlayCalculator);
#if defined(__ANDROID__) || (defined(__APPLE__) && !TARGET_OS_OSX)
// Overlay rendered image in OpenGL, onto a copy of input.
uchar* image_mat_ptr = image_mat->data;
RETURN_IF_ERROR(gpu_helper_.RunInGlContext(
MP_RETURN_IF_ERROR(gpu_helper_.RunInGlContext(
[this, cc, image_mat_ptr]() -> ::mediapipe::Status {
RETURN_IF_ERROR(RenderToGpu(cc, image_mat_ptr));
MP_RETURN_IF_ERROR(RenderToGpu(cc, image_mat_ptr));
return ::mediapipe::OkStatus();
}));
#endif // __ANDROID__ or iOS
} else {
// Copy the rendered image to output.
uchar* image_mat_ptr = image_mat->data;
RETURN_IF_ERROR(RenderToCpu(cc, target_format, image_mat_ptr));
MP_RETURN_IF_ERROR(RenderToCpu(cc, target_format, image_mat_ptr));
}
return ::mediapipe::OkStatus();
@ -372,7 +372,7 @@ REGISTER_CALCULATOR(AnnotationOverlayCalculator);
glActiveTexture(GL_TEXTURE2);
glBindTexture(GL_TEXTURE_2D, image_mat_tex_);
RETURN_IF_ERROR(GlRender(cc));
MP_RETURN_IF_ERROR(GlRender(cc));
glActiveTexture(GL_TEXTURE2);
glBindTexture(GL_TEXTURE_2D, 0);

View File

@ -75,7 +75,7 @@ REGISTER_CALCULATOR(DetectionLabelIdToTextCalculator);
std::string string_path;
ASSIGN_OR_RETURN(string_path, PathToResourceAsFile(options.label_map_path()));
std::string label_map_string;
RETURN_IF_ERROR(file::GetContents(string_path, &label_map_string));
MP_RETURN_IF_ERROR(file::GetContents(string_path, &label_map_string));
std::istringstream stream(label_map_string);
std::string line;

View File

@ -86,7 +86,7 @@ TEST(DetectionLetterboxRemovalCalculatorTest, PaddingLeftRight) {
->Tag("LETTERBOX_PADDING")
.packets.push_back(Adopt(padding.release()).At(Timestamp::PostStream()));
MEDIAPIPE_ASSERT_OK(runner.Run()) << "Calculator execution failed.";
MP_ASSERT_OK(runner.Run()) << "Calculator execution failed.";
const std::vector<Packet>& output =
runner.Outputs().Tag("DETECTIONS").packets;
ASSERT_EQ(1, output.size());
@ -134,7 +134,7 @@ TEST(DetectionLetterboxRemovalCalculatorTest, PaddingTopBottom) {
->Tag("LETTERBOX_PADDING")
.packets.push_back(Adopt(padding.release()).At(Timestamp::PostStream()));
MEDIAPIPE_ASSERT_OK(runner.Run()) << "Calculator execution failed.";
MP_ASSERT_OK(runner.Run()) << "Calculator execution failed.";
const std::vector<Packet>& output =
runner.Outputs().Tag("DETECTIONS").packets;
ASSERT_EQ(1, output.size());

View File

@ -245,7 +245,7 @@ REGISTER_CALCULATOR(DetectionsToRectsCalculator);
if (cc->Outputs().HasTag(kRectTag)) {
auto output_rect = absl::make_unique<Rect>();
RETURN_IF_ERROR(DetectionToRect(detections[0], output_rect.get()));
MP_RETURN_IF_ERROR(DetectionToRect(detections[0], output_rect.get()));
if (rotate_) {
output_rect->set_rotation(ComputeRotation(detections[0], image_size));
}
@ -254,7 +254,7 @@ REGISTER_CALCULATOR(DetectionsToRectsCalculator);
}
if (cc->Outputs().HasTag(kNormRectTag)) {
auto output_rect = absl::make_unique<NormalizedRect>();
RETURN_IF_ERROR(
MP_RETURN_IF_ERROR(
DetectionToNormalizedRect(detections[0], output_rect.get()));
if (rotate_) {
output_rect->set_rotation(ComputeRotation(detections[0], image_size));
@ -266,7 +266,8 @@ REGISTER_CALCULATOR(DetectionsToRectsCalculator);
if (cc->Outputs().HasTag(kRectsTag)) {
auto output_rects = absl::make_unique<std::vector<Rect>>(detections.size());
for (int i = 0; i < detections.size(); ++i) {
RETURN_IF_ERROR(DetectionToRect(detections[i], &(output_rects->at(i))));
MP_RETURN_IF_ERROR(
DetectionToRect(detections[i], &(output_rects->at(i))));
if (rotate_) {
output_rects->at(i).set_rotation(
ComputeRotation(detections[i], image_size));
@ -279,7 +280,7 @@ REGISTER_CALCULATOR(DetectionsToRectsCalculator);
auto output_rects =
absl::make_unique<std::vector<NormalizedRect>>(detections.size());
for (int i = 0; i < detections.size(); ++i) {
RETURN_IF_ERROR(
MP_RETURN_IF_ERROR(
DetectionToNormalizedRect(detections[i], &(output_rects->at(i))));
if (rotate_) {
output_rects->at(i).set_rotation(

View File

@ -66,7 +66,7 @@ TEST(DetectionsToRectsCalculatorTest, DetectionToRect) {
.packets.push_back(
Adopt(detection.release()).At(Timestamp::PostStream()));
MEDIAPIPE_ASSERT_OK(runner.Run()) << "Calculator execution failed.";
MP_ASSERT_OK(runner.Run()) << "Calculator execution failed.";
const std::vector<Packet>& output = runner.Outputs().Tag("RECT").packets;
ASSERT_EQ(1, output.size());
const auto& rect = output[0].Get<Rect>();
@ -91,7 +91,7 @@ TEST(DetectionsToRectsCalculatorTest, DetectionToNormalizedRect) {
.packets.push_back(
Adopt(detection.release()).At(Timestamp::PostStream()));
MEDIAPIPE_ASSERT_OK(runner.Run()) << "Calculator execution failed.";
MP_ASSERT_OK(runner.Run()) << "Calculator execution failed.";
const std::vector<Packet>& output = runner.Outputs().Tag("NORM_RECT").packets;
ASSERT_EQ(1, output.size());
const auto& rect = output[0].Get<NormalizedRect>();
@ -117,7 +117,7 @@ TEST(DetectionsToRectsCalculatorTest, DetectionsToRect) {
.packets.push_back(
Adopt(detections.release()).At(Timestamp::PostStream()));
MEDIAPIPE_ASSERT_OK(runner.Run()) << "Calculator execution failed.";
MP_ASSERT_OK(runner.Run()) << "Calculator execution failed.";
const std::vector<Packet>& output = runner.Outputs().Tag("RECT").packets;
ASSERT_EQ(1, output.size());
const auto& rect = output[0].Get<Rect>();
@ -143,7 +143,7 @@ TEST(DetectionsToRectsCalculatorTest, DetectionsToNormalizedRect) {
.packets.push_back(
Adopt(detections.release()).At(Timestamp::PostStream()));
MEDIAPIPE_ASSERT_OK(runner.Run()) << "Calculator execution failed.";
MP_ASSERT_OK(runner.Run()) << "Calculator execution failed.";
const std::vector<Packet>& output = runner.Outputs().Tag("NORM_RECT").packets;
ASSERT_EQ(1, output.size());
const auto& rect = output[0].Get<NormalizedRect>();
@ -169,7 +169,7 @@ TEST(DetectionsToRectsCalculatorTest, DetectionsToRects) {
.packets.push_back(
Adopt(detections.release()).At(Timestamp::PostStream()));
MEDIAPIPE_ASSERT_OK(runner.Run()) << "Calculator execution failed.";
MP_ASSERT_OK(runner.Run()) << "Calculator execution failed.";
const std::vector<Packet>& output = runner.Outputs().Tag("RECTS").packets;
ASSERT_EQ(1, output.size());
const auto& rects = output[0].Get<std::vector<Rect>>();
@ -200,7 +200,7 @@ TEST(DetectionsToRectsCalculatorTest, DetectionsToNormalizedRects) {
.packets.push_back(
Adopt(detections.release()).At(Timestamp::PostStream()));
MEDIAPIPE_ASSERT_OK(runner.Run()) << "Calculator execution failed.";
MP_ASSERT_OK(runner.Run()) << "Calculator execution failed.";
const std::vector<Packet>& output =
runner.Outputs().Tag("NORM_RECTS").packets;
ASSERT_EQ(1, output.size());
@ -231,7 +231,7 @@ TEST(DetectionsToRectsCalculatorTest, DetectionToRects) {
.packets.push_back(
Adopt(detection.release()).At(Timestamp::PostStream()));
MEDIAPIPE_ASSERT_OK(runner.Run()) << "Calculator execution failed.";
MP_ASSERT_OK(runner.Run()) << "Calculator execution failed.";
const std::vector<Packet>& output = runner.Outputs().Tag("RECTS").packets;
ASSERT_EQ(1, output.size());
const auto& rects = output[0].Get<std::vector<Rect>>();
@ -257,7 +257,7 @@ TEST(DetectionsToRectsCalculatorTest, DetectionToNormalizedRects) {
.packets.push_back(
Adopt(detection.release()).At(Timestamp::PostStream()));
MEDIAPIPE_ASSERT_OK(runner.Run()) << "Calculator execution failed.";
MP_ASSERT_OK(runner.Run()) << "Calculator execution failed.";
const std::vector<Packet>& output =
runner.Outputs().Tag("NORM_RECTS").packets;
ASSERT_EQ(1, output.size());

View File

@ -101,7 +101,7 @@ TEST(DetectionsToRenderDataCalculatorTest, OnlyDetecctionList) {
.packets.push_back(
Adopt(detections.release()).At(Timestamp::PostStream()));
MEDIAPIPE_ASSERT_OK(runner.Run()) << "Calculator execution failed.";
MP_ASSERT_OK(runner.Run()) << "Calculator execution failed.";
const std::vector<Packet>& output =
runner.Outputs().Tag("RENDER_DATA").packets;
ASSERT_EQ(1, output.size());
@ -135,7 +135,7 @@ TEST(DetectionsToRenderDataCalculatorTest, OnlyDetecctionVector) {
.packets.push_back(
Adopt(detections.release()).At(Timestamp::PostStream()));
MEDIAPIPE_ASSERT_OK(runner.Run()) << "Calculator execution failed.";
MP_ASSERT_OK(runner.Run()) << "Calculator execution failed.";
const std::vector<Packet>& output =
runner.Outputs().Tag("RENDER_DATA").packets;
ASSERT_EQ(1, output.size());
@ -178,7 +178,7 @@ TEST(DetectionsToRenderDataCalculatorTest, BothDetecctionListAndVector) {
.packets.push_back(
Adopt(detections.release()).At(Timestamp::PostStream()));
MEDIAPIPE_ASSERT_OK(runner.Run()) << "Calculator execution failed.";
MP_ASSERT_OK(runner.Run()) << "Calculator execution failed.";
const std::vector<Packet>& actual =
runner.Outputs().Tag("RENDER_DATA").packets;
ASSERT_EQ(1, actual.size());
@ -218,7 +218,7 @@ TEST(DetectionsToRenderDataCalculatorTest, ProduceEmptyPacket) {
.packets.push_back(
Adopt(detections1.release()).At(Timestamp::PostStream()));
MEDIAPIPE_ASSERT_OK(runner1.Run()) << "Calculator execution failed.";
MP_ASSERT_OK(runner1.Run()) << "Calculator execution failed.";
const std::vector<Packet>& exact1 =
runner1.Outputs().Tag("RENDER_DATA").packets;
ASSERT_EQ(0, exact1.size());
@ -248,7 +248,7 @@ TEST(DetectionsToRenderDataCalculatorTest, ProduceEmptyPacket) {
.packets.push_back(
Adopt(detections2.release()).At(Timestamp::PostStream()));
MEDIAPIPE_ASSERT_OK(runner2.Run()) << "Calculator execution failed.";
MP_ASSERT_OK(runner2.Run()) << "Calculator execution failed.";
const std::vector<Packet>& exact2 =
runner2.Outputs().Tag("RENDER_DATA").packets;
ASSERT_EQ(1, exact2.size());

View File

@ -58,7 +58,7 @@ TEST(LandmarkLetterboxRemovalCalculatorTest, PaddingLeftRight) {
->Tag("LETTERBOX_PADDING")
.packets.push_back(Adopt(padding.release()).At(Timestamp::PostStream()));
MEDIAPIPE_ASSERT_OK(runner.Run()) << "Calculator execution failed.";
MP_ASSERT_OK(runner.Run()) << "Calculator execution failed.";
const std::vector<Packet>& output = runner.Outputs().Tag("LANDMARKS").packets;
ASSERT_EQ(1, output.size());
const auto& output_landmarks =
@ -92,7 +92,7 @@ TEST(LandmarkLetterboxRemovalCalculatorTest, PaddingTopBottom) {
->Tag("LETTERBOX_PADDING")
.packets.push_back(Adopt(padding.release()).At(Timestamp::PostStream()));
MEDIAPIPE_ASSERT_OK(runner.Run()) << "Calculator execution failed.";
MP_ASSERT_OK(runner.Run()) << "Calculator execution failed.";
const std::vector<Packet>& output = runner.Outputs().Tag("LANDMARKS").packets;
ASSERT_EQ(1, output.size());
const auto& output_landmarks =

View File

@ -87,7 +87,7 @@ TEST(PacketFrequencyCalculatorTest, MultiPacketTest) {
Adopt(new int).At(Timestamp(9000000)));
// Run the calculator.
MEDIAPIPE_ASSERT_OK(runner.Run()) << "Calculator execution failed.";
MP_ASSERT_OK(runner.Run()) << "Calculator execution failed.";
const std::vector<Packet>& output_packets = runner.Outputs().Index(0).packets;
// Very first packet. So frequency is zero.
@ -153,7 +153,7 @@ TEST(PacketFrequencyCalculatorTest, MultiStreamTest) {
Adopt(new std::string).At(Timestamp(3000000)));
// Run the calculator.
MEDIAPIPE_ASSERT_OK(runner.Run()) << "Calculator execution failed.";
MP_ASSERT_OK(runner.Run()) << "Calculator execution failed.";
const std::vector<Packet>& output_packets_stream_1 =
runner.Outputs().Index(0).packets;
const std::vector<Packet>& output_packets_stream_2 =

View File

@ -34,7 +34,7 @@ class PacketLatencyCalculatorTest : public ::testing::Test {
void SetupSimulationClock() {
auto executor = std::make_shared<SimulationClockExecutor>(4);
simulation_clock_ = executor->GetClock();
MEDIAPIPE_ASSERT_OK(graph_.SetExecutor("", executor));
MP_ASSERT_OK(graph_.SetExecutor("", executor));
}
void InitializeSingleStreamGraph() {
@ -72,10 +72,10 @@ class PacketLatencyCalculatorTest : public ::testing::Test {
simulation_clock_);
// Start graph run.
MEDIAPIPE_ASSERT_OK(graph_.Initialize(graph_config_, {}));
MEDIAPIPE_ASSERT_OK(graph_.StartRun(side_packet));
MP_ASSERT_OK(graph_.Initialize(graph_config_, {}));
MP_ASSERT_OK(graph_.StartRun(side_packet));
// Let Calculator::Open() calls finish before continuing.
MEDIAPIPE_ASSERT_OK(graph_.WaitUntilIdle());
MP_ASSERT_OK(graph_.WaitUntilIdle());
}
void InitializeMultipleStreamGraph() {
@ -115,7 +115,7 @@ class PacketLatencyCalculatorTest : public ::testing::Test {
&out_1_packets_);
mediapipe::tool::AddVectorSink("packet_latency_2", &graph_config_,
&out_2_packets_);
MEDIAPIPE_ASSERT_OK(graph_.Initialize(graph_config_, {}));
MP_ASSERT_OK(graph_.Initialize(graph_config_, {}));
// Create the simulation clock side packet.
simulation_clock_.reset(new SimulationClock());
@ -125,9 +125,9 @@ class PacketLatencyCalculatorTest : public ::testing::Test {
simulation_clock_);
// Start graph run.
MEDIAPIPE_ASSERT_OK(graph_.StartRun(side_packet));
MP_ASSERT_OK(graph_.StartRun(side_packet));
// Let Calculator::Open() calls finish before continuing.
MEDIAPIPE_ASSERT_OK(graph_.WaitUntilIdle());
MP_ASSERT_OK(graph_.WaitUntilIdle());
}
void InitializeSingleStreamGraphWithoutClock() {
@ -163,10 +163,10 @@ class PacketLatencyCalculatorTest : public ::testing::Test {
simulation_clock_);
// Start graph run.
MEDIAPIPE_ASSERT_OK(graph_.Initialize(graph_config_, {}));
MEDIAPIPE_ASSERT_OK(graph_.StartRun(side_packet));
MP_ASSERT_OK(graph_.Initialize(graph_config_, {}));
MP_ASSERT_OK(graph_.StartRun(side_packet));
// Let Calculator::Open() calls finish before continuing.
MEDIAPIPE_ASSERT_OK(graph_.WaitUntilIdle());
MP_ASSERT_OK(graph_.WaitUntilIdle());
}
PacketLatency CreatePacketLatency(const double latency_usec,
@ -205,16 +205,16 @@ TEST_F(PacketLatencyCalculatorTest, DoesNotOutputUntilInputPacketReceived) {
dynamic_cast<SimulationClock*>(&*simulation_clock_)->ThreadStart();
// Send reference packets with timestamps 0, 6 and 10 usec.
MEDIAPIPE_ASSERT_OK(graph_.AddPacketToInputStream(
MP_ASSERT_OK(graph_.AddPacketToInputStream(
"camera_frames", Adopt(new double()).At(Timestamp(0))));
MEDIAPIPE_ASSERT_OK(graph_.AddPacketToInputStream(
MP_ASSERT_OK(graph_.AddPacketToInputStream(
"camera_frames", Adopt(new double()).At(Timestamp(6))));
MEDIAPIPE_ASSERT_OK(graph_.AddPacketToInputStream(
MP_ASSERT_OK(graph_.AddPacketToInputStream(
"camera_frames", Adopt(new double()).At(Timestamp(10))));
dynamic_cast<SimulationClock*>(&*simulation_clock_)->ThreadFinish();
MEDIAPIPE_ASSERT_OK(graph_.CloseAllInputStreams());
MEDIAPIPE_ASSERT_OK(graph_.WaitUntilDone());
MP_ASSERT_OK(graph_.CloseAllInputStreams());
MP_ASSERT_OK(graph_.WaitUntilDone());
// Expect zero output packets.
ASSERT_EQ(out_0_packets_.size(), 0);
@ -228,20 +228,20 @@ TEST_F(PacketLatencyCalculatorTest, OutputsCorrectLatencyForSingleStream) {
// Send a reference packet with timestamp 10 usec at time 12 usec.
simulation_clock_->Sleep(absl::Microseconds(12));
MEDIAPIPE_ASSERT_OK(graph_.AddPacketToInputStream(
MP_ASSERT_OK(graph_.AddPacketToInputStream(
"camera_frames", Adopt(new double()).At(Timestamp(10))));
// Add two delayed packets with timestamp 1 and 8 resp.
simulation_clock_->Sleep(absl::Microseconds(1));
MEDIAPIPE_ASSERT_OK(graph_.AddPacketToInputStream(
MP_ASSERT_OK(graph_.AddPacketToInputStream(
"delayed_packet_0", Adopt(new double()).At(Timestamp(1))));
simulation_clock_->Sleep(absl::Microseconds(1));
MEDIAPIPE_ASSERT_OK(graph_.AddPacketToInputStream(
MP_ASSERT_OK(graph_.AddPacketToInputStream(
"delayed_packet_0", Adopt(new double()).At(Timestamp(8))));
dynamic_cast<SimulationClock*>(&*simulation_clock_)->ThreadFinish();
MEDIAPIPE_ASSERT_OK(graph_.CloseAllInputStreams());
MEDIAPIPE_ASSERT_OK(graph_.WaitUntilDone());
MP_ASSERT_OK(graph_.CloseAllInputStreams());
MP_ASSERT_OK(graph_.WaitUntilDone());
// Expect two latency packets with timestamp 1 and 8 resp.
ASSERT_EQ(out_0_packets_.size(), 2);
@ -270,26 +270,26 @@ TEST_F(PacketLatencyCalculatorTest, DoesNotOutputUntilReferencePacketReceived) {
dynamic_cast<SimulationClock*>(&*simulation_clock_)->ThreadStart();
// Add two packets with timestamp 1 and 2.
MEDIAPIPE_ASSERT_OK(graph_.AddPacketToInputStream(
MP_ASSERT_OK(graph_.AddPacketToInputStream(
"delayed_packet_0", Adopt(new double()).At(Timestamp(1))));
MEDIAPIPE_ASSERT_OK(graph_.AddPacketToInputStream(
MP_ASSERT_OK(graph_.AddPacketToInputStream(
"delayed_packet_0", Adopt(new double()).At(Timestamp(2))));
// Send a reference packet with timestamp 10 usec.
MEDIAPIPE_ASSERT_OK(graph_.AddPacketToInputStream(
MP_ASSERT_OK(graph_.AddPacketToInputStream(
"camera_frames", Adopt(new double()).At(Timestamp(10))));
simulation_clock_->Sleep(absl::Microseconds(1));
// Add two delayed packets with timestamp 7 and 9 resp.
MEDIAPIPE_ASSERT_OK(graph_.AddPacketToInputStream(
MP_ASSERT_OK(graph_.AddPacketToInputStream(
"delayed_packet_0", Adopt(new double()).At(Timestamp(7))));
MEDIAPIPE_ASSERT_OK(graph_.AddPacketToInputStream(
MP_ASSERT_OK(graph_.AddPacketToInputStream(
"delayed_packet_0", Adopt(new double()).At(Timestamp(9))));
simulation_clock_->Sleep(absl::Microseconds(1));
dynamic_cast<SimulationClock*>(&*simulation_clock_)->ThreadFinish();
MEDIAPIPE_ASSERT_OK(graph_.CloseAllInputStreams());
MEDIAPIPE_ASSERT_OK(graph_.WaitUntilDone());
MP_ASSERT_OK(graph_.CloseAllInputStreams());
MP_ASSERT_OK(graph_.WaitUntilDone());
// Expect two latency packets with timestamp 7 and 9 resp. The packets with
// timestamps 1 and 2 should not have any latency associated with them since
@ -320,18 +320,18 @@ TEST_F(PacketLatencyCalculatorTest, OutputsCorrectLatencyWhenNoClock) {
dynamic_cast<SimulationClock*>(&*simulation_clock_)->ThreadStart();
// Send a reference packet with timestamp 10 usec.
MEDIAPIPE_ASSERT_OK(graph_.AddPacketToInputStream(
MP_ASSERT_OK(graph_.AddPacketToInputStream(
"camera_frames", Adopt(new double()).At(Timestamp(10))));
// Add two delayed packets with timestamp 5 and 10 resp.
MEDIAPIPE_ASSERT_OK(graph_.AddPacketToInputStream(
MP_ASSERT_OK(graph_.AddPacketToInputStream(
"delayed_packet_0", Adopt(new double()).At(Timestamp(5))));
MEDIAPIPE_ASSERT_OK(graph_.AddPacketToInputStream(
MP_ASSERT_OK(graph_.AddPacketToInputStream(
"delayed_packet_0", Adopt(new double()).At(Timestamp(10))));
dynamic_cast<SimulationClock*>(&*simulation_clock_)->ThreadFinish();
MEDIAPIPE_ASSERT_OK(graph_.CloseAllInputStreams());
MEDIAPIPE_ASSERT_OK(graph_.WaitUntilDone());
MP_ASSERT_OK(graph_.CloseAllInputStreams());
MP_ASSERT_OK(graph_.WaitUntilDone());
// Expect two latency packets with timestamp 5 and 10 resp.
ASSERT_EQ(out_0_packets_.size(), 2);
@ -347,18 +347,18 @@ TEST_F(PacketLatencyCalculatorTest,
dynamic_cast<SimulationClock*>(&*simulation_clock_)->ThreadStart();
// Send a reference packet with timestamp 20 usec.
MEDIAPIPE_ASSERT_OK(graph_.AddPacketToInputStream(
MP_ASSERT_OK(graph_.AddPacketToInputStream(
"camera_frames", Adopt(new double()).At(Timestamp(20))));
// Add two delayed packets with timestamp 0 and 20 resp.
MEDIAPIPE_ASSERT_OK(graph_.AddPacketToInputStream(
MP_ASSERT_OK(graph_.AddPacketToInputStream(
"delayed_packet_0", Adopt(new double()).At(Timestamp(0))));
MEDIAPIPE_ASSERT_OK(graph_.AddPacketToInputStream(
MP_ASSERT_OK(graph_.AddPacketToInputStream(
"delayed_packet_0", Adopt(new double()).At(Timestamp(20))));
dynamic_cast<SimulationClock*>(&*simulation_clock_)->ThreadFinish();
MEDIAPIPE_ASSERT_OK(graph_.CloseAllInputStreams());
MEDIAPIPE_ASSERT_OK(graph_.WaitUntilDone());
MP_ASSERT_OK(graph_.CloseAllInputStreams());
MP_ASSERT_OK(graph_.WaitUntilDone());
// Expect two latency packets with timestamp 0 and 20 resp.
ASSERT_EQ(out_0_packets_.size(), 2);
@ -387,24 +387,24 @@ TEST_F(PacketLatencyCalculatorTest, ResetsHistogramAndAverageCorrectly) {
dynamic_cast<SimulationClock*>(&*simulation_clock_)->ThreadStart();
// Send a reference packet with timestamp 0 usec.
MEDIAPIPE_ASSERT_OK(graph_.AddPacketToInputStream(
MP_ASSERT_OK(graph_.AddPacketToInputStream(
"camera_frames", Adopt(new double()).At(Timestamp(0))));
// Add a delayed packet with timestamp 0 usec at time 20 usec.
simulation_clock_->Sleep(absl::Microseconds(20));
MEDIAPIPE_ASSERT_OK(graph_.AddPacketToInputStream(
MP_ASSERT_OK(graph_.AddPacketToInputStream(
"delayed_packet_0", Adopt(new double()).At(Timestamp(0))));
// Do a long sleep so that histogram and average are reset.
simulation_clock_->Sleep(absl::Microseconds(100));
// Add a delayed packet with timestamp 115 usec at time 120 usec.
MEDIAPIPE_ASSERT_OK(graph_.AddPacketToInputStream(
MP_ASSERT_OK(graph_.AddPacketToInputStream(
"delayed_packet_0", Adopt(new double()).At(Timestamp(115))));
dynamic_cast<SimulationClock*>(&*simulation_clock_)->ThreadFinish();
MEDIAPIPE_ASSERT_OK(graph_.CloseAllInputStreams());
MEDIAPIPE_ASSERT_OK(graph_.WaitUntilDone());
MP_ASSERT_OK(graph_.CloseAllInputStreams());
MP_ASSERT_OK(graph_.WaitUntilDone());
// Expect two latency packets with timestamp 0 and 115 resp.
ASSERT_EQ(out_0_packets_.size(), 2);
@ -435,26 +435,26 @@ TEST_F(PacketLatencyCalculatorTest, OutputsCorrectLatencyForMultipleStreams) {
dynamic_cast<SimulationClock*>(&*simulation_clock_)->ThreadStart();
// Send a reference packet with timestamp 10 usec.
MEDIAPIPE_ASSERT_OK(graph_.AddPacketToInputStream(
MP_ASSERT_OK(graph_.AddPacketToInputStream(
"camera_frames", Adopt(new double()).At(Timestamp(10))));
// Add delayed packets on each input stream.
// Fastest stream.
MEDIAPIPE_ASSERT_OK(graph_.AddPacketToInputStream(
MP_ASSERT_OK(graph_.AddPacketToInputStream(
"delayed_packet_0", Adopt(new double()).At(Timestamp(10))));
// Slow stream.
MEDIAPIPE_ASSERT_OK(graph_.AddPacketToInputStream(
MP_ASSERT_OK(graph_.AddPacketToInputStream(
"delayed_packet_1", Adopt(new double()).At(Timestamp(5))));
// Slowest stream.
MEDIAPIPE_ASSERT_OK(graph_.AddPacketToInputStream(
MP_ASSERT_OK(graph_.AddPacketToInputStream(
"delayed_packet_2", Adopt(new double()).At(Timestamp(0))));
dynamic_cast<SimulationClock*>(&*simulation_clock_)->ThreadFinish();
MEDIAPIPE_ASSERT_OK(graph_.CloseAllInputStreams());
MEDIAPIPE_ASSERT_OK(graph_.WaitUntilDone());
MP_ASSERT_OK(graph_.CloseAllInputStreams());
MP_ASSERT_OK(graph_.WaitUntilDone());
// Expect one latency packet on each output stream.
ASSERT_EQ(out_0_packets_.size(), 1);

View File

@ -37,7 +37,7 @@ mediapipe_cc_proto_library(
name = "flow_to_image_calculator_cc_proto",
srcs = ["flow_to_image_calculator.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":flow_to_image_calculator_proto"],
)
@ -45,7 +45,7 @@ mediapipe_cc_proto_library(
name = "opencv_video_encoder_calculator_cc_proto",
srcs = ["opencv_video_encoder_calculator.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = ["//mediapipe:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":opencv_video_encoder_calculator_proto"],
)

View File

@ -41,13 +41,13 @@ TEST(OpenCvVideoDecoderCalculatorTest, TestMp4Avc720pVideo) {
file::JoinPath("./",
"/mediapipe/calculators/video/"
"testdata/format_MP4_AVC720P_AAC.video"));
MEDIAPIPE_EXPECT_OK(runner.Run());
MP_EXPECT_OK(runner.Run());
EXPECT_EQ(runner.Outputs().Tag("VIDEO_PRESTREAM").packets.size(), 1);
MEDIAPIPE_EXPECT_OK(runner.Outputs()
.Tag("VIDEO_PRESTREAM")
.packets[0]
.ValidateAsType<VideoHeader>());
MP_EXPECT_OK(runner.Outputs()
.Tag("VIDEO_PRESTREAM")
.packets[0]
.ValidateAsType<VideoHeader>());
const mediapipe::VideoHeader& header =
runner.Outputs().Tag("VIDEO_PRESTREAM").packets[0].Get<VideoHeader>();
EXPECT_EQ(ImageFormat::SRGB, header.format);
@ -83,13 +83,13 @@ TEST(OpenCvVideoDecoderCalculatorTest, TestFlvH264Video) {
file::JoinPath("./",
"/mediapipe/calculators/video/"
"testdata/format_FLV_H264_AAC.video"));
MEDIAPIPE_EXPECT_OK(runner.Run());
MP_EXPECT_OK(runner.Run());
EXPECT_EQ(runner.Outputs().Tag("VIDEO_PRESTREAM").packets.size(), 1);
MEDIAPIPE_EXPECT_OK(runner.Outputs()
.Tag("VIDEO_PRESTREAM")
.packets[0]
.ValidateAsType<VideoHeader>());
MP_EXPECT_OK(runner.Outputs()
.Tag("VIDEO_PRESTREAM")
.packets[0]
.ValidateAsType<VideoHeader>());
const mediapipe::VideoHeader& header =
runner.Outputs().Tag("VIDEO_PRESTREAM").packets[0].Get<VideoHeader>();
EXPECT_EQ(ImageFormat::SRGB, header.format);
@ -127,13 +127,13 @@ TEST(OpenCvVideoDecoderCalculatorTest, TestMkvVp8Video) {
file::JoinPath("./",
"/mediapipe/calculators/video/"
"testdata/format_MKV_VP8_VORBIS.video"));
MEDIAPIPE_EXPECT_OK(runner.Run());
MP_EXPECT_OK(runner.Run());
EXPECT_EQ(runner.Outputs().Tag("VIDEO_PRESTREAM").packets.size(), 1);
MEDIAPIPE_EXPECT_OK(runner.Outputs()
.Tag("VIDEO_PRESTREAM")
.packets[0]
.ValidateAsType<VideoHeader>());
MP_EXPECT_OK(runner.Outputs()
.Tag("VIDEO_PRESTREAM")
.packets[0]
.ValidateAsType<VideoHeader>());
const mediapipe::VideoHeader& header =
runner.Outputs().Tag("VIDEO_PRESTREAM").packets[0].Get<VideoHeader>();
EXPECT_EQ(ImageFormat::SRGB, header.format);

View File

@ -66,17 +66,17 @@ TEST(OpenCvVideoEncoderCalculatorTest, DISABLED_TestMp4Avc720pVideo) {
input_side_packets["output_file_path"] =
MakePacket<std::string>(output_file_path);
CalculatorGraph graph;
MEDIAPIPE_ASSERT_OK(graph.Initialize(config, input_side_packets));
MP_ASSERT_OK(graph.Initialize(config, input_side_packets));
StatusOrPoller status_or_poller =
graph.AddOutputStreamPoller("video_prestream");
ASSERT_TRUE(status_or_poller.ok());
OutputStreamPoller poller = std::move(status_or_poller.ValueOrDie());
MEDIAPIPE_ASSERT_OK(graph.StartRun({}));
MP_ASSERT_OK(graph.StartRun({}));
Packet packet;
while (poller.Next(&packet)) {
}
MEDIAPIPE_ASSERT_OK(graph.WaitUntilDone());
MP_ASSERT_OK(graph.WaitUntilDone());
const VideoHeader& video_header = packet.Get<VideoHeader>();
// Checks the generated video file has the same width, height, fps, and
@ -125,17 +125,17 @@ TEST(OpenCvVideoEncoderCalculatorTest, TestFlvH264Video) {
input_side_packets["output_file_path"] =
MakePacket<std::string>(output_file_path);
CalculatorGraph graph;
MEDIAPIPE_ASSERT_OK(graph.Initialize(config, input_side_packets));
MP_ASSERT_OK(graph.Initialize(config, input_side_packets));
StatusOrPoller status_or_poller =
graph.AddOutputStreamPoller("video_prestream");
ASSERT_TRUE(status_or_poller.ok());
OutputStreamPoller poller = std::move(status_or_poller.ValueOrDie());
MEDIAPIPE_ASSERT_OK(graph.StartRun({}));
MP_ASSERT_OK(graph.StartRun({}));
Packet packet;
while (poller.Next(&packet)) {
}
MEDIAPIPE_ASSERT_OK(graph.WaitUntilDone());
MP_ASSERT_OK(graph.WaitUntilDone());
const VideoHeader& video_header = packet.Get<VideoHeader>();
// Checks the generated video file has the same width, height, fps, and
@ -186,17 +186,17 @@ TEST(OpenCvVideoEncoderCalculatorTest, TestMkvVp8Video) {
input_side_packets["output_file_path"] =
MakePacket<std::string>(output_file_path);
CalculatorGraph graph;
MEDIAPIPE_ASSERT_OK(graph.Initialize(config, input_side_packets));
MP_ASSERT_OK(graph.Initialize(config, input_side_packets));
StatusOrPoller status_or_poller =
graph.AddOutputStreamPoller("video_prestream");
ASSERT_TRUE(status_or_poller.ok());
OutputStreamPoller poller = std::move(status_or_poller.ValueOrDie());
MEDIAPIPE_ASSERT_OK(graph.StartRun({}));
MP_ASSERT_OK(graph.StartRun({}));
Packet packet;
while (poller.Next(&packet)) {
}
MEDIAPIPE_ASSERT_OK(graph.WaitUntilDone());
MP_ASSERT_OK(graph.WaitUntilDone());
const VideoHeader& video_header = packet.Get<VideoHeader>();
// Checks the generated video file has the same width, height, fps, and

View File

@ -24,6 +24,7 @@ load("//mediapipe/framework/port:build_config.bzl", "mediapipe_cc_proto_library"
proto_library(
name = "flow_quantizer_model_proto",
srcs = ["flow_quantizer_model.proto"],
visibility = ["//mediapipe:__subpackages__"],
)
mediapipe_cc_proto_library(

View File

@ -133,16 +133,16 @@ class Tvl1OpticalFlowCalculator : public CalculatorBase {
cc->Inputs().Tag("SECOND_FRAME").Value().Get<ImageFrame>();
if (forward_requested_) {
auto forward_optical_flow_field = absl::make_unique<OpticalFlowField>();
RETURN_IF_ERROR(CalculateOpticalFlow(first_frame, second_frame,
forward_optical_flow_field.get()));
MP_RETURN_IF_ERROR(CalculateOpticalFlow(first_frame, second_frame,
forward_optical_flow_field.get()));
cc->Outputs()
.Tag("FORWARD_FLOW")
.Add(forward_optical_flow_field.release(), cc->InputTimestamp());
}
if (backward_requested_) {
auto backward_optical_flow_field = absl::make_unique<OpticalFlowField>();
RETURN_IF_ERROR(CalculateOpticalFlow(second_frame, first_frame,
backward_optical_flow_field.get()));
MP_RETURN_IF_ERROR(CalculateOpticalFlow(second_frame, first_frame,
backward_optical_flow_field.get()));
cc->Outputs()
.Tag("BACKWARD_FLOW")
.Add(backward_optical_flow_field.release(), cc->InputTimestamp());

View File

@ -49,12 +49,12 @@ void AddInputPackets(int num_packets, CalculatorGraph* graph) {
}
for (int i = 0; i < num_packets; ++i) {
MEDIAPIPE_ASSERT_OK(graph->AddPacketToInputStream(
"first_frames", packet1.At(Timestamp(i))));
MEDIAPIPE_ASSERT_OK(graph->AddPacketToInputStream(
"second_frames", packet2.At(Timestamp(i))));
MP_ASSERT_OK(graph->AddPacketToInputStream("first_frames",
packet1.At(Timestamp(i))));
MP_ASSERT_OK(graph->AddPacketToInputStream("second_frames",
packet2.At(Timestamp(i))));
}
MEDIAPIPE_ASSERT_OK(graph->CloseAllInputStreams());
MP_ASSERT_OK(graph->CloseAllInputStreams());
}
void RunTest(int num_input_packets, int max_in_flight) {
@ -74,7 +74,7 @@ void RunTest(int num_input_packets, int max_in_flight) {
)",
max_in_flight));
CalculatorGraph graph;
MEDIAPIPE_ASSERT_OK(graph.Initialize(config));
MP_ASSERT_OK(graph.Initialize(config));
StatusOrPoller status_or_poller1 =
graph.AddOutputStreamPoller("forward_flow");
ASSERT_TRUE(status_or_poller1.ok());
@ -84,7 +84,7 @@ void RunTest(int num_input_packets, int max_in_flight) {
ASSERT_TRUE(status_or_poller2.ok());
OutputStreamPoller poller2 = std::move(status_or_poller2.ValueOrDie());
MEDIAPIPE_ASSERT_OK(graph.StartRun({}));
MP_ASSERT_OK(graph.StartRun({}));
AddInputPackets(num_input_packets, &graph);
Packet packet;
std::vector<Packet> forward_optical_flow_packets;
@ -95,7 +95,7 @@ void RunTest(int num_input_packets, int max_in_flight) {
while (poller2.Next(&packet)) {
backward_optical_flow_packets.emplace_back(packet);
}
MEDIAPIPE_ASSERT_OK(graph.WaitUntilDone());
MP_ASSERT_OK(graph.WaitUntilDone());
EXPECT_EQ(num_input_packets, forward_optical_flow_packets.size());
int count = 0;

View File

@ -7,11 +7,11 @@ future.
Note: If you plan to use TensorFlow calculators and example apps, there is a
known issue with gcc and g++ version 6.3 and 7.3. Please use other versions.
Note: While Mediapipe configuring TensorFlow with Python 2, if you see the
Note: While Mediapipe configures TensorFlow, if you see the
following error:
'"/private/var/.../org_tensorflow/third_party/git/git_configure.bzl", line 14,
in _fail fail(("%sGit Configuration Error:%s %...)))', please install the python
future library by `$ pip install --user future`
`"...git_configure.bzl", line 14, in _fail fail(("%sGit Configuration
Error:%s %...)))`,
please install the python future library using: `$ pip install --user future`.
Choose your operating system:
@ -41,7 +41,7 @@ To build and run iOS apps:
$ cd mediapipe
```
2. Install Bazel (0.23 and above required).
2. Install Bazel (0.24.1 and above required).
Option 1. Use package manager tool to install the latest version of Bazel.
@ -139,7 +139,7 @@ To build and run iOS apps:
$ cd mediapipe
```
2. Install Bazel (0.23 and above required).
2. Install Bazel (0.24.1 and above required).
Follow Bazel's
[documentation](https://docs.bazel.build/versions/master/install-redhat.html)
@ -227,7 +227,7 @@ To build and run iOS apps:
$ cd mediapipe
```
3. Install Bazel (0.23 and above required).
3. Install Bazel (0.24.1 and above required).
Option 1. Use package manager tool to install the latest version of Bazel.
@ -360,7 +360,7 @@ To build and run iOS apps:
username@DESKTOP-TMVLBJ1:~$ sudo apt-get update && sudo apt-get install -y --no-install-recommends build-essential git python zip adb openjdk-8-jdk
```
5. Install Bazel (0.23 and above required).
5. Install Bazel (0.24.1 and above required).
```bash
username@DESKTOP-TMVLBJ1:~$ curl -sLO --retry 5 --retry-max-time 10 \
@ -571,9 +571,10 @@ Please verify all the necessary packages are installed.
### Setting up Android Studio with MediaPipe
The steps below use Android Studio to build and install a MediaPipe example app.
The steps below use Android Studio 3.5 to build and install a MediaPipe example
app.
1. Install and launch Android Studio.
1. Install and launch Android Studio 3.5.
2. Select `Configure` | `SDK Manager` | `SDK Platforms`.
@ -588,24 +589,31 @@ The steps below use Android Studio to build and install a MediaPipe example app.
* Verify that Android SDK Tools 26.1.1 is installed.
* Verify that Android NDK 17c or above is installed.
* Take note of the Android NDK Location, e.g.,
`/usr/local/home/Android/Sdk/ndk-bundle`.
`/usr/local/home/Android/Sdk/ndk-bundle` or
`/usr/local/home/Android/Sdk/ndk/20.0.5594570`.
4. Set environment variables `$ANDROID_HOME` and `$ANDROID_NDK_HOME` to point
to the installed SDK and NDK.
```bash
export ANDROID_HOME=/usr/local/home/Android/Sdk
# If the NDK libraries are installed by a previous version of Android Studio, do
export ANDROID_NDK_HOME=/usr/local/home/Android/Sdk/ndk-bundle
# If the NDK libraries are installed by Android Studio 3.5, do
export ANDROID_NDK_HOME=/usr/local/home/Android/Sdk/ndk/<version number>
```
5. Select `Configure` | `Plugins` install `Bazel`.
6. Select `Android Studio` | `Preferences` | `Bazel settings` and modify `Bazel binary location` to be the same as the output of `$ which bazel`.
6. On Linux, select `File` | `Settings`| `Bazel settings`. On macos, select
`Android Studio` | `Preferences` | `Bazel settings`. Then, modify `Bazel
binary location` to be the same as the output of `$ which bazel`.
7. Select `Import Bazel Project`.
* Select `Workspace`: `/path/to/mediapipe`.
* Select `Generate from BUILD file`: `/path/to/mediapipe/BUILD`.
* Select `Workspace`: `/path/to/mediapipe` and select `Next`.
* Select `Generate from BUILD file`: `/path/to/mediapipe/BUILD` and select `Next`.
* Modify `Project View` to be the following and select `Finish`.
```
@ -616,19 +624,43 @@ The steps below use Android Studio to build and install a MediaPipe example app.
-mediapipe/examples/ios
targets:
//mediapipe/...:all
//mediapipe/examples/android/...:all
//mediapipe/java/...:all
android_sdk_platform: android-29
```
8. Connect an Android device to the workstation.
8. Select `Bazel` | `Sync` | `Sync project with Build files`.
9. Select `Run...` | `Edit Configurations...`.
Note: Even after doing step 4, if you still see the error:
`"no such package '@androidsdk//': Either the path
attribute of android_sdk_repository or the ANDROID_HOME environment variable
must be set."`, please modify the **WORKSPACE** file to point
to your SDK and NDK library locations, as below:
```
android_sdk_repository(
name = "androidsdk",
path = "/path/to/android/sdk"
)
android_ndk_repository(
name = "androidndk",
path = "/path/to/android/ndk"
)
```
9. Connect an Android device to the workstation.
10. Select `Run...` | `Edit Configurations...`.
* Select `Templates` | `Bazel Command`.
* Enter Target Expression:
`//mediapipe/examples/android/src/java/com/google/mediapipe/apps/facedetectioncpu`
* Enter Bazel command: `mobile-install`
* Enter Bazel flags: `-c opt --config=android_arm64` select `Run`
* Enter Bazel command: `mobile-install`.
* Enter Bazel flags: `-c opt --config=android_arm64`.
* Press the `[+]` button to add the new configuration.
* Select `Run` to run the example app on the connected Android device.
[`WORKSAPCE`]: https://github.com/google/mediapipe/tree/master/WORKSPACE
[`opencv_linux.BUILD`]: https://github.com/google/mediapipe/tree/master/third_party/opencv_linux.BUILD

View File

@ -39,17 +39,17 @@ namespace mediapipe {
)");
CalculatorGraph graph;
RETURN_IF_ERROR(graph.Initialize(config));
MP_RETURN_IF_ERROR(graph.Initialize(config));
ASSIGN_OR_RETURN(OutputStreamPoller poller,
graph.AddOutputStreamPoller("out"));
RETURN_IF_ERROR(graph.StartRun({}));
MP_RETURN_IF_ERROR(graph.StartRun({}));
// Give 10 input packets that contains the same std::string "Hello World!".
for (int i = 0; i < 10; ++i) {
RETURN_IF_ERROR(graph.AddPacketToInputStream(
MP_RETURN_IF_ERROR(graph.AddPacketToInputStream(
"in", MakePacket<std::string>("Hello World!").At(Timestamp(i))));
}
// Close the input stream "in".
RETURN_IF_ERROR(graph.CloseInputStream("in"));
MP_RETURN_IF_ERROR(graph.CloseInputStream("in"));
mediapipe::Packet packet;
// Get the output packets std::string.
while (poller.Next(&packet)) {

View File

@ -39,7 +39,7 @@ DEFINE_string(output_side_packets, "",
::mediapipe::Status RunMPPGraph() {
std::string calculator_graph_config_contents;
RETURN_IF_ERROR(mediapipe::file::GetContents(
MP_RETURN_IF_ERROR(mediapipe::file::GetContents(
FLAGS_calculator_graph_config_file, &calculator_graph_config_contents));
LOG(INFO) << "Get calculator graph config contents: "
<< calculator_graph_config_contents;
@ -54,16 +54,16 @@ DEFINE_string(output_side_packets, "",
RET_CHECK(name_and_value.size() == 2);
RET_CHECK(!::mediapipe::ContainsKey(input_side_packets, name_and_value[0]));
std::string input_side_packet_contents;
RETURN_IF_ERROR(mediapipe::file::GetContents(name_and_value[1],
&input_side_packet_contents));
MP_RETURN_IF_ERROR(mediapipe::file::GetContents(
name_and_value[1], &input_side_packet_contents));
input_side_packets[name_and_value[0]] =
::mediapipe::MakePacket<std::string>(input_side_packet_contents);
}
LOG(INFO) << "Initialize the calculator graph.";
mediapipe::CalculatorGraph graph;
RETURN_IF_ERROR(graph.Initialize(config, input_side_packets));
MP_RETURN_IF_ERROR(graph.Initialize(config, input_side_packets));
LOG(INFO) << "Start running the calculator graph.";
RETURN_IF_ERROR(graph.Run());
MP_RETURN_IF_ERROR(graph.Run());
LOG(INFO) << "Gathering output side packets.";
kv_pairs = absl::StrSplit(FLAGS_output_side_packets, ',');
for (const std::string& kv_pair : kv_pairs) {
@ -75,7 +75,7 @@ DEFINE_string(output_side_packets, "",
<< "Packet " << name_and_value[0] << " was not available.";
const std::string& serialized_string =
output_packet.ValueOrDie().Get<std::string>();
RETURN_IF_ERROR(
MP_RETURN_IF_ERROR(
mediapipe::file::SetContents(name_and_value[1], serialized_string));
}
return ::mediapipe::OkStatus();

View File

@ -33,7 +33,7 @@ DEFINE_string(input_side_packets, "",
::mediapipe::Status RunMPPGraph() {
std::string calculator_graph_config_contents;
RETURN_IF_ERROR(mediapipe::file::GetContents(
MP_RETURN_IF_ERROR(mediapipe::file::GetContents(
FLAGS_calculator_graph_config_file, &calculator_graph_config_contents));
LOG(INFO) << "Get calculator graph config contents: "
<< calculator_graph_config_contents;
@ -52,7 +52,7 @@ DEFINE_string(input_side_packets, "",
}
LOG(INFO) << "Initialize the calculator graph.";
mediapipe::CalculatorGraph graph;
RETURN_IF_ERROR(graph.Initialize(config, input_side_packets));
MP_RETURN_IF_ERROR(graph.Initialize(config, input_side_packets));
LOG(INFO) << "Start running the calculator graph.";
return graph.Run();
}

View File

@ -47,7 +47,7 @@
--define MEDIAPIPE_DISABLE_GPU=1 --define no_aws_support=true \
mediapipe/examples/desktop/youtube8m:extract_yt8m_features
./bazel-bin/mediapipe/examples/desktop/youtube8m/extract_yt8m_features
./bazel-bin/mediapipe/examples/desktop/youtube8m/extract_yt8m_features \
--calculator_graph_config_file=mediapipe/graphs/youtube8m/feature_extraction.pbtxt \
--input_side_packets=input_sequence_example=/tmp/mediapipe/metadata.tfrecord \
--output_side_packets=output_sequence_example=/tmp/mediapipe/output.tfrecord

View File

@ -40,7 +40,7 @@ DEFINE_string(output_side_packets, "",
::mediapipe::Status RunMPPGraph() {
std::string calculator_graph_config_contents;
RETURN_IF_ERROR(mediapipe::file::GetContents(
MP_RETURN_IF_ERROR(mediapipe::file::GetContents(
FLAGS_calculator_graph_config_file, &calculator_graph_config_contents));
LOG(INFO) << "Get calculator graph config contents: "
<< calculator_graph_config_contents;
@ -55,8 +55,8 @@ DEFINE_string(output_side_packets, "",
RET_CHECK(name_and_value.size() == 2);
RET_CHECK(!::mediapipe::ContainsKey(input_side_packets, name_and_value[0]));
std::string input_side_packet_contents;
RETURN_IF_ERROR(mediapipe::file::GetContents(name_and_value[1],
&input_side_packet_contents));
MP_RETURN_IF_ERROR(mediapipe::file::GetContents(
name_and_value[1], &input_side_packet_contents));
input_side_packets[name_and_value[0]] =
::mediapipe::MakePacket<std::string>(input_side_packet_contents);
}
@ -68,7 +68,7 @@ DEFINE_string(output_side_packets, "",
vggish_pca_mean_matrix, vggish_pca_projection_matrix;
std::string content;
RETURN_IF_ERROR(mediapipe::file::GetContents(
MP_RETURN_IF_ERROR(mediapipe::file::GetContents(
"/tmp/mediapipe/inception3_mean_matrix_data.pb", &content));
inc3_pca_mean_matrix_data.ParseFromString(content);
mediapipe::MatrixFromMatrixDataProto(inc3_pca_mean_matrix_data,
@ -76,7 +76,7 @@ DEFINE_string(output_side_packets, "",
input_side_packets["inception3_pca_mean_matrix"] =
::mediapipe::MakePacket<mediapipe::Matrix>(inc3_pca_mean_matrix);
RETURN_IF_ERROR(mediapipe::file::GetContents(
MP_RETURN_IF_ERROR(mediapipe::file::GetContents(
"/tmp/mediapipe/inception3_projection_matrix_data.pb", &content));
inc3_pca_projection_matrix_data.ParseFromString(content);
mediapipe::MatrixFromMatrixDataProto(inc3_pca_projection_matrix_data,
@ -84,7 +84,7 @@ DEFINE_string(output_side_packets, "",
input_side_packets["inception3_pca_projection_matrix"] =
::mediapipe::MakePacket<mediapipe::Matrix>(inc3_pca_projection_matrix);
RETURN_IF_ERROR(mediapipe::file::GetContents(
MP_RETURN_IF_ERROR(mediapipe::file::GetContents(
"/tmp/mediapipe/vggish_mean_matrix_data.pb", &content));
vggish_pca_mean_matrix_data.ParseFromString(content);
mediapipe::MatrixFromMatrixDataProto(vggish_pca_mean_matrix_data,
@ -92,7 +92,7 @@ DEFINE_string(output_side_packets, "",
input_side_packets["vggish_pca_mean_matrix"] =
::mediapipe::MakePacket<mediapipe::Matrix>(vggish_pca_mean_matrix);
RETURN_IF_ERROR(mediapipe::file::GetContents(
MP_RETURN_IF_ERROR(mediapipe::file::GetContents(
"/tmp/mediapipe/vggish_projection_matrix_data.pb", &content));
vggish_pca_projection_matrix_data.ParseFromString(content);
mediapipe::MatrixFromMatrixDataProto(vggish_pca_projection_matrix_data,
@ -102,9 +102,9 @@ DEFINE_string(output_side_packets, "",
LOG(INFO) << "Initialize the calculator graph.";
mediapipe::CalculatorGraph graph;
RETURN_IF_ERROR(graph.Initialize(config, input_side_packets));
MP_RETURN_IF_ERROR(graph.Initialize(config, input_side_packets));
LOG(INFO) << "Start running the calculator graph.";
RETURN_IF_ERROR(graph.Run());
MP_RETURN_IF_ERROR(graph.Run());
LOG(INFO) << "Gathering output side packets.";
kv_pairs = absl::StrSplit(FLAGS_output_side_packets, ',');
for (const std::string& kv_pair : kv_pairs) {
@ -116,7 +116,7 @@ DEFINE_string(output_side_packets, "",
<< "Packet " << name_and_value[0] << " was not available.";
const std::string& serialized_string =
output_packet.ValueOrDie().Get<std::string>();
RETURN_IF_ERROR(
MP_RETURN_IF_ERROR(
mediapipe::file::SetContents(name_and_value[1], serialized_string));
}
return ::mediapipe::OkStatus();

View File

@ -52,7 +52,7 @@ proto_library(
proto_library(
name = "calculator_options_proto",
srcs = ["calculator_options.proto"],
visibility = ["//mediapipe/framework:__subpackages__"],
visibility = ["//visibility:public"],
)
proto_library(
@ -87,7 +87,6 @@ proto_library(
srcs = ["packet_generator.proto"],
visibility = [
"//mediapipe:__subpackages__",
"//mediapipe/packet_generator:__pkg__",
],
)
@ -187,7 +186,9 @@ mediapipe_cc_proto_library(
mediapipe_cc_proto_library(
name = "packet_generator_cc_proto",
srcs = ["packet_generator.proto"],
visibility = ["//mediapipe:__subpackages__"],
visibility = [
"//mediapipe:__subpackages__",
],
deps = [":packet_generator_proto"],
)
@ -220,7 +221,7 @@ mediapipe_cc_proto_library(
testonly = 1,
srcs = ["test_calculators.proto"],
cc_deps = [":calculator_cc_proto"],
visibility = ["//mediapipe/framework:__subpackages__"],
visibility = ["//visibility:public"],
deps = [":test_calculators_proto"],
)
@ -1086,7 +1087,6 @@ cc_library(
copts = select({
"//conditions:default": [],
"//mediapipe:apple": [
"-std=c++11",
"-ObjC++",
],
}),

View File

@ -111,9 +111,9 @@ TEST(CalculatorTest, SourceProcessOrder) {
output0_type.SetAny();
output1_type.SetAny();
MEDIAPIPE_ASSERT_OK(
MP_ASSERT_OK(
output_stream_managers.Index(0).Initialize("output0", &output0_type));
MEDIAPIPE_ASSERT_OK(
MP_ASSERT_OK(
output_stream_managers.Index(1).Initialize("output1", &output1_type));
PacketSet input_side_packets(tool::CreateTagMap({}).ValueOrDie());
@ -158,22 +158,22 @@ TEST(CalculatorTest, SourceProcessOrder) {
// Tests registration of a calculator within a namespace.
// DeadEndCalculator is registered in namespace "mediapipe::test_ns".
TEST(CalculatorTest, CreateByName) {
MEDIAPIPE_EXPECT_OK(CalculatorBaseRegistry::CreateByName( //
MP_EXPECT_OK(CalculatorBaseRegistry::CreateByName( //
"mediapipe.test_ns.DeadEndCalculator"));
MEDIAPIPE_EXPECT_OK(CalculatorBaseRegistry::CreateByName( //
MP_EXPECT_OK(CalculatorBaseRegistry::CreateByName( //
".mediapipe.test_ns.DeadEndCalculator"));
MEDIAPIPE_EXPECT_OK(CalculatorBaseRegistry::CreateByNameInNamespace( //
MP_EXPECT_OK(CalculatorBaseRegistry::CreateByNameInNamespace( //
"alpha", ".mediapipe.test_ns.DeadEndCalculator"));
MEDIAPIPE_EXPECT_OK(CalculatorBaseRegistry::CreateByNameInNamespace( //
MP_EXPECT_OK(CalculatorBaseRegistry::CreateByNameInNamespace( //
"alpha", "mediapipe.test_ns.DeadEndCalculator"));
MEDIAPIPE_EXPECT_OK(CalculatorBaseRegistry::CreateByNameInNamespace( //
MP_EXPECT_OK(CalculatorBaseRegistry::CreateByNameInNamespace( //
"mediapipe", "mediapipe.test_ns.DeadEndCalculator"));
MEDIAPIPE_EXPECT_OK(CalculatorBaseRegistry::CreateByNameInNamespace( //
MP_EXPECT_OK(CalculatorBaseRegistry::CreateByNameInNamespace( //
"mediapipe.test_ns.sub_ns", "DeadEndCalculator"));
EXPECT_EQ(CalculatorBaseRegistry::CreateByNameInNamespace( //
@ -204,23 +204,23 @@ TEST(CalculatorTest, CreateByNameWhitelisted) {
absl::make_unique< ::mediapipe::test_ns::whitelisted_ns::DeadCalculator>);
// A whitelisted calculator can be found in its own namespace.
MEDIAPIPE_EXPECT_OK(CalculatorBaseRegistry::CreateByNameInNamespace( //
MP_EXPECT_OK(CalculatorBaseRegistry::CreateByNameInNamespace( //
"", "mediapipe.test_ns.whitelisted_ns.DeadCalculator"));
MEDIAPIPE_EXPECT_OK(CalculatorBaseRegistry::CreateByNameInNamespace( //
MP_EXPECT_OK(CalculatorBaseRegistry::CreateByNameInNamespace( //
"mediapipe.sub_ns", "test_ns.whitelisted_ns.DeadCalculator"));
MEDIAPIPE_EXPECT_OK(CalculatorBaseRegistry::CreateByNameInNamespace( //
MP_EXPECT_OK(CalculatorBaseRegistry::CreateByNameInNamespace( //
"mediapipe.sub_ns", "mediapipe.EndCalculator"));
// A whitelisted calculator can be found in the top-level namespace.
MEDIAPIPE_EXPECT_OK(CalculatorBaseRegistry::CreateByNameInNamespace( //
MP_EXPECT_OK(CalculatorBaseRegistry::CreateByNameInNamespace( //
"", "DeadCalculator"));
MEDIAPIPE_EXPECT_OK(CalculatorBaseRegistry::CreateByNameInNamespace( //
MP_EXPECT_OK(CalculatorBaseRegistry::CreateByNameInNamespace( //
"mediapipe", "DeadCalculator"));
MEDIAPIPE_EXPECT_OK(CalculatorBaseRegistry::CreateByNameInNamespace( //
MP_EXPECT_OK(CalculatorBaseRegistry::CreateByNameInNamespace( //
"mediapipe.test_ns.sub_ns", "DeadCalculator"));
MEDIAPIPE_EXPECT_OK(CalculatorBaseRegistry::CreateByNameInNamespace( //
MP_EXPECT_OK(CalculatorBaseRegistry::CreateByNameInNamespace( //
"", "EndCalculator"));
MEDIAPIPE_EXPECT_OK(CalculatorBaseRegistry::CreateByNameInNamespace( //
MP_EXPECT_OK(CalculatorBaseRegistry::CreateByNameInNamespace( //
"mediapipe.test_ns.sub_ns", "EndCalculator"));
}

View File

@ -40,7 +40,7 @@ TEST(CalculatorContractTest, Calculator) {
output_stream: "egraph_topical_detection"
)");
CalculatorContract contract;
MEDIAPIPE_EXPECT_OK(contract.Initialize(node));
MP_EXPECT_OK(contract.Initialize(node));
EXPECT_EQ(contract.Inputs().NumEntries(), 4);
EXPECT_EQ(contract.Outputs().NumEntries(), 1);
EXPECT_EQ(contract.InputSidePackets().NumEntries(), 1);
@ -59,7 +59,7 @@ TEST(CalculatorContractTest, CalculatorOptions) {
[mediapipe.CalculatorContractTestOptions.ext] { test_field: 1.0 }
})");
CalculatorContract contract;
MEDIAPIPE_EXPECT_OK(contract.Initialize(node));
MP_EXPECT_OK(contract.Initialize(node));
const auto& test_options =
contract.Options().GetExtension(CalculatorContractTestOptions::ext);
EXPECT_EQ(test_options.test_field(), 1.0);
@ -80,7 +80,7 @@ TEST(CalculatorContractTest, PacketGenerator) {
output_side_packet: "content_fingerprint"
)");
CalculatorContract contract;
MEDIAPIPE_EXPECT_OK(contract.Initialize(node));
MP_EXPECT_OK(contract.Initialize(node));
EXPECT_EQ(contract.InputSidePackets().NumEntries(), 1);
EXPECT_EQ(contract.OutputSidePackets().NumEntries(), 4);
}
@ -93,7 +93,7 @@ TEST(CalculatorContractTest, StatusHandler) {
input_side_packet: "SPEC:task_specification"
)");
CalculatorContract contract;
MEDIAPIPE_EXPECT_OK(contract.Initialize(node));
MP_EXPECT_OK(contract.Initialize(node));
EXPECT_EQ(contract.InputSidePackets().NumEntries(), 2);
}

View File

@ -139,7 +139,7 @@ CalculatorGraph::~CalculatorGraph() {}
++index) {
const EdgeInfo& edge_info =
validated_graph_->OutputSidePacketInfos()[index];
RETURN_IF_ERROR(output_side_packets_[index].Initialize(
MP_RETURN_IF_ERROR(output_side_packets_[index].Initialize(
edge_info.name, edge_info.packet_type));
}
@ -166,7 +166,7 @@ CalculatorGraph::~CalculatorGraph() {}
for (int index = 0; index < validated_graph_->InputStreamInfos().size();
++index) {
const EdgeInfo& edge_info = validated_graph_->InputStreamInfos()[index];
RETURN_IF_ERROR(input_stream_managers_[index].Initialize(
MP_RETURN_IF_ERROR(input_stream_managers_[index].Initialize(
edge_info.name, edge_info.packet_type, edge_info.back_edge));
}
@ -176,7 +176,7 @@ CalculatorGraph::~CalculatorGraph() {}
for (int index = 0; index < validated_graph_->OutputStreamInfos().size();
++index) {
const EdgeInfo& edge_info = validated_graph_->OutputStreamInfos()[index];
RETURN_IF_ERROR(output_stream_managers_[index].Initialize(
MP_RETURN_IF_ERROR(output_stream_managers_[index].Initialize(
edge_info.name, edge_info.packet_type));
}
@ -313,8 +313,8 @@ CalculatorGraph::~CalculatorGraph() {}
}
if (!::mediapipe::ContainsKey(executors_, "")) {
RETURN_IF_ERROR(InitializeDefaultExecutor(*default_executor_options,
use_application_thread));
MP_RETURN_IF_ERROR(InitializeDefaultExecutor(*default_executor_options,
use_application_thread));
}
return ::mediapipe::OkStatus();
@ -345,7 +345,7 @@ CalculatorGraph::~CalculatorGraph() {}
std::max({validated_graph_->Config().node().size(),
validated_graph_->Config().packet_generator().size(), 1}));
}
RETURN_IF_ERROR(
MP_RETURN_IF_ERROR(
CreateDefaultThreadPool(default_executor_options, num_threads));
return ::mediapipe::OkStatus();
}
@ -359,12 +359,12 @@ CalculatorGraph::~CalculatorGraph() {}
<< "validated_graph is not initialized.";
validated_graph_ = std::move(validated_graph);
RETURN_IF_ERROR(InitializeExecutors());
RETURN_IF_ERROR(InitializePacketGeneratorGraph(side_packets));
RETURN_IF_ERROR(InitializeStreams());
RETURN_IF_ERROR(InitializeCalculatorNodes());
MP_RETURN_IF_ERROR(InitializeExecutors());
MP_RETURN_IF_ERROR(InitializePacketGeneratorGraph(side_packets));
MP_RETURN_IF_ERROR(InitializeStreams());
MP_RETURN_IF_ERROR(InitializeCalculatorNodes());
#ifdef MEDIAPIPE_PROFILER_AVAILABLE
RETURN_IF_ERROR(InitializeProfiler());
MP_RETURN_IF_ERROR(InitializeProfiler());
#endif
initialized_ = true;
@ -380,7 +380,7 @@ CalculatorGraph::~CalculatorGraph() {}
const CalculatorGraphConfig& input_config,
const std::map<std::string, Packet>& side_packets) {
auto validated_graph = absl::make_unique<ValidatedGraphConfig>();
RETURN_IF_ERROR(validated_graph->Initialize(input_config));
MP_RETURN_IF_ERROR(validated_graph->Initialize(input_config));
return Initialize(std::move(validated_graph), side_packets);
}
@ -390,8 +390,8 @@ CalculatorGraph::~CalculatorGraph() {}
const std::map<std::string, Packet>& side_packets,
const std::string& graph_type, const Subgraph::SubgraphOptions* options) {
auto validated_graph = absl::make_unique<ValidatedGraphConfig>();
RETURN_IF_ERROR(validated_graph->Initialize(input_configs, input_templates,
graph_type, options));
MP_RETURN_IF_ERROR(validated_graph->Initialize(input_configs, input_templates,
graph_type, options));
return Initialize(std::move(validated_graph), side_packets);
}
@ -409,7 +409,7 @@ CalculatorGraph::~CalculatorGraph() {}
<< "\" because it doesn't exist.";
}
auto observer = absl::make_unique<internal::OutputStreamObserver>();
RETURN_IF_ERROR(observer->Initialize(
MP_RETURN_IF_ERROR(observer->Initialize(
stream_name, &any_packet_type_, std::move(packet_callback),
&output_stream_managers_[output_stream_index]));
graph_output_streams_.push_back(std::move(observer));
@ -427,7 +427,7 @@ CalculatorGraph::AddOutputStreamPoller(const std::string& stream_name) {
<< "\" because it doesn't exist.";
}
auto internal_poller = std::make_shared<internal::OutputStreamPollerImpl>();
RETURN_IF_ERROR(internal_poller->Initialize(
MP_RETURN_IF_ERROR(internal_poller->Initialize(
stream_name, &any_packet_type_,
std::bind(&CalculatorGraph::UpdateThrottledNodes, this,
std::placeholders::_1, std::placeholders::_2),
@ -479,7 +479,7 @@ CalculatorGraph::AddOutputStreamPoller(const std::string& stream_name) {
RET_CHECK(graph_input_streams_.empty()).SetNoLogging()
<< "When using graph input streams, call StartRun() instead of Run() so "
"that AddPacketToInputStream() and CloseInputStream() can be called.";
RETURN_IF_ERROR(StartRun(extra_side_packets, {}));
MP_RETURN_IF_ERROR(StartRun(extra_side_packets, {}));
return WaitUntilDone();
}
@ -488,8 +488,8 @@ CalculatorGraph::AddOutputStreamPoller(const std::string& stream_name) {
const std::map<std::string, Packet>& stream_headers) {
RET_CHECK(initialized_).SetNoLogging()
<< "CalculatorGraph is not initialized.";
RETURN_IF_ERROR(PrepareForRun(extra_side_packets, stream_headers));
RETURN_IF_ERROR(profiler_->Start(executors_[""].get()));
MP_RETURN_IF_ERROR(PrepareForRun(extra_side_packets, stream_headers));
MP_RETURN_IF_ERROR(profiler_->Start(executors_[""].get()));
scheduler_.Start();
return ::mediapipe::OkStatus();
}
@ -570,7 +570,7 @@ CalculatorGraph::PrepareGpu(const std::map<std::string, Packet>& side_packets) {
}
}
for (const auto& name_executor : gpu_resources->GetGpuExecutors()) {
RETURN_IF_ERROR(
MP_RETURN_IF_ERROR(
SetExecutorInternal(name_executor.first, name_executor.second));
}
}
@ -755,7 +755,7 @@ CalculatorGraph::PrepareGpu(const std::map<std::string, Packet>& side_packets) {
return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC)
<< "WaitUntilIdle called on a graph with source nodes.";
}
RETURN_IF_ERROR(scheduler_.WaitUntilIdle());
MP_RETURN_IF_ERROR(scheduler_.WaitUntilIdle());
VLOG(2) << "Scheduler idle.";
::mediapipe::Status status = ::mediapipe::OkStatus();
if (GetCombinedErrors(&status)) {
@ -766,7 +766,7 @@ CalculatorGraph::PrepareGpu(const std::map<std::string, Packet>& side_packets) {
::mediapipe::Status CalculatorGraph::WaitUntilDone() {
VLOG(2) << "Waiting for scheduler to terminate...";
RETURN_IF_ERROR(scheduler_.WaitUntilDone());
MP_RETURN_IF_ERROR(scheduler_.WaitUntilDone());
VLOG(2) << "Scheduler terminated.";
return FinishRun();
@ -1186,7 +1186,7 @@ Packet CalculatorGraph::GetServicePacket(const GraphServiceBase& service) {
if (name.empty()) {
scheduler_.SetExecutor(executor.get());
} else {
RETURN_IF_ERROR(scheduler_.SetNonDefaultExecutor(name, executor.get()));
MP_RETURN_IF_ERROR(scheduler_.SetNonDefaultExecutor(name, executor.get()));
}
return ::mediapipe::OkStatus();
}
@ -1225,7 +1225,7 @@ bool CalculatorGraph::IsReservedExecutorName(const std::string& name) {
::mediapipe::Status CalculatorGraph::FinishRun() {
// Check for any errors that may have occurred.
::mediapipe::Status status = ::mediapipe::OkStatus();
RETURN_IF_ERROR(profiler_->Stop());
MP_RETURN_IF_ERROR(profiler_->Stop());
GetCombinedErrors(&status);
CleanupAfterRun(&status);
return status;

View File

@ -77,17 +77,17 @@ typedef ::mediapipe::StatusOr<OutputStreamPoller> StatusOrPoller;
// #include "mediapipe/framework/calculator_framework.h"
//
// mediapipe::CalculatorGraphConfig config;
// RETURN_IF_ERROR(mediapipe::tool::ParseGraphFromString(kGraphStr, &config));
// mediapipe::CalculatorGraph graph;
// RETURN_IF_ERROR(graph.Initialize(config));
// MP_RETURN_IF_ERROR(mediapipe::tool::ParseGraphFromString(kGraphStr,
// &config)); mediapipe::CalculatorGraph graph;
// MP_RETURN_IF_ERROR(graph.Initialize(config));
//
// std::map<std::string, mediapipe::Packet> extra_side_packets;
// extra_side_packets["video_id"] = mediapipe::MakePacket<std::string>(
// "3edb9503834e9b42");
// RETURN_IF_ERROR(graph.Run(extra_side_packets));
// MP_RETURN_IF_ERROR(graph.Run(extra_side_packets));
//
// // Run again (demonstrating the more concise initializer list syntax).
// RETURN_IF_ERROR(graph.Run(
// MP_RETURN_IF_ERROR(graph.Run(
// {{"video_id", mediapipe::MakePacket<std::string>("Ex-uGhDzue4")}}));
// // See mediapipe/framework/graph_runner.h for an interface
// // to insert and extract packets from a graph as it runs.
@ -186,15 +186,15 @@ class CalculatorGraph {
// subsequent call to StartRun can be attempted.
//
// Example:
// RETURN_IF_ERROR(graph.StartRun(...));
// MP_RETURN_IF_ERROR(graph.StartRun(...));
// while (true) {
// if (graph.HasError() || want_to_stop) break;
// RETURN_IF_ERROR(graph.AddPacketToInputStream(...));
// MP_RETURN_IF_ERROR(graph.AddPacketToInputStream(...));
// }
// for (const std::string& stream : streams) {
// RETURN_IF_ERROR(graph.CloseInputStream(stream));
// MP_RETURN_IF_ERROR(graph.CloseInputStream(stream));
// }
// RETURN_IF_ERROR(graph.WaitUntilDone());
// MP_RETURN_IF_ERROR(graph.WaitUntilDone());
::mediapipe::Status StartRun(
const std::map<std::string, Packet>& extra_side_packets) {
return StartRun(extra_side_packets, {});

View File

@ -77,27 +77,27 @@ TEST(CalculatorGraphBounds, ImmediateHandlerBounds) {
)");
CalculatorGraph graph;
std::vector<Packet> output_packets;
MEDIAPIPE_ASSERT_OK(graph.Initialize(config));
MEDIAPIPE_ASSERT_OK(graph.ObserveOutputStream("output", [&](const Packet& p) {
MP_ASSERT_OK(graph.Initialize(config));
MP_ASSERT_OK(graph.ObserveOutputStream("output", [&](const Packet& p) {
output_packets.push_back(p);
return ::mediapipe::OkStatus();
}));
MEDIAPIPE_ASSERT_OK(graph.StartRun({}));
MEDIAPIPE_ASSERT_OK(graph.WaitUntilIdle());
MP_ASSERT_OK(graph.StartRun({}));
MP_ASSERT_OK(graph.WaitUntilIdle());
// Add four packets into the graph.
for (int i = 0; i < 4; ++i) {
Packet p = MakePacket<int>(33).At(Timestamp(i));
MEDIAPIPE_ASSERT_OK(graph.AddPacketToInputStream("input", p));
MP_ASSERT_OK(graph.AddPacketToInputStream("input", p));
}
// Four packets arrive at the output only if timestamp bounds are propagated.
MEDIAPIPE_ASSERT_OK(graph.WaitUntilIdle());
MP_ASSERT_OK(graph.WaitUntilIdle());
EXPECT_EQ(output_packets.size(), 4);
// Eventually four packets arrive.
MEDIAPIPE_ASSERT_OK(graph.CloseAllPacketSources());
MEDIAPIPE_ASSERT_OK(graph.WaitUntilDone());
MP_ASSERT_OK(graph.CloseAllPacketSources());
MP_ASSERT_OK(graph.WaitUntilDone());
EXPECT_EQ(output_packets.size(), 4);
}

View File

@ -136,14 +136,14 @@ TEST_F(CalculatorGraphEventLoopTest, WellProvisionedEventLoop) {
// Start MediaPipe graph.
CalculatorGraph graph(graph_config);
MEDIAPIPE_ASSERT_OK(graph.StartRun(
MP_ASSERT_OK(graph.StartRun(
{{"callback", MakePacket<std::function<void(const Packet&)>>(std::bind(
&CalculatorGraphEventLoopTest::AddThreadSafeVectorSink,
this, std::placeholders::_1))}}));
// Insert 100 packets at the rate the calculator can keep up with.
for (int i = 0; i < 100; ++i) {
MEDIAPIPE_ASSERT_OK(graph.AddPacketToInputStream(
MP_ASSERT_OK(graph.AddPacketToInputStream(
"input_numbers", Adopt(new int(i)).At(Timestamp(i))));
// Wait for all packets to be received by the sink.
while (true) {
@ -167,13 +167,13 @@ TEST_F(CalculatorGraphEventLoopTest, WellProvisionedEventLoop) {
// Insert 100 more packets at rate the graph can't keep up.
for (int i = 100; i < 200; ++i) {
MEDIAPIPE_ASSERT_OK(graph.AddPacketToInputStream(
MP_ASSERT_OK(graph.AddPacketToInputStream(
"input_numbers", Adopt(new int(i)).At(Timestamp(i))));
}
// Don't wait but just close the input stream.
MEDIAPIPE_ASSERT_OK(graph.CloseInputStream("input_numbers"));
MP_ASSERT_OK(graph.CloseInputStream("input_numbers"));
// Wait properly via the API until the graph is done.
MEDIAPIPE_ASSERT_OK(graph.WaitUntilDone());
MP_ASSERT_OK(graph.WaitUntilDone());
// Check final results.
{
absl::ReaderMutexLock lock(&output_packets_mutex_);
@ -225,7 +225,7 @@ TEST_F(CalculatorGraphEventLoopTest, FailingEventLoop) {
// Start MediaPipe graph.
CalculatorGraph graph(graph_config);
MEDIAPIPE_ASSERT_OK(graph.StartRun(
MP_ASSERT_OK(graph.StartRun(
{{"callback", MakePacket<std::function<void(const Packet&)>>(std::bind(
&CalculatorGraphEventLoopTest::AddThreadSafeVectorSink,
this, std::placeholders::_1))}}));
@ -243,7 +243,7 @@ TEST_F(CalculatorGraphEventLoopTest, FailingEventLoop) {
break;
}
}
MEDIAPIPE_ASSERT_OK(graph.CloseInputStream("input_numbers"));
MP_ASSERT_OK(graph.CloseInputStream("input_numbers"));
status = graph.WaitUntilDone();
ASSERT_THAT(status.message(),
testing::HasSubstr("Meant to fail (magicstringincludedhere)."));
@ -270,7 +270,7 @@ TEST_F(CalculatorGraphEventLoopTest, StepByStepSchedulerLoop) {
// Start MediaPipe graph.
CalculatorGraph graph(graph_config);
MEDIAPIPE_ASSERT_OK(graph.StartRun(
MP_ASSERT_OK(graph.StartRun(
{{"callback", MakePacket<std::function<void(const Packet&)>>(std::bind(
&CalculatorGraphEventLoopTest::AddThreadSafeVectorSink,
this, std::placeholders::_1))}}));
@ -278,16 +278,16 @@ TEST_F(CalculatorGraphEventLoopTest, StepByStepSchedulerLoop) {
// Add packet one at a time, we should be able to syncrhonize the output for
// each addition in the step by step mode.
for (int i = 0; i < 100; ++i) {
MEDIAPIPE_ASSERT_OK(graph.AddPacketToInputStream(
MP_ASSERT_OK(graph.AddPacketToInputStream(
"input_numbers", Adopt(new int(i)).At(Timestamp(i))));
MEDIAPIPE_ASSERT_OK(graph.WaitUntilIdle());
MP_ASSERT_OK(graph.WaitUntilIdle());
absl::ReaderMutexLock lock(&output_packets_mutex_);
ASSERT_EQ(i + 1, output_packets_.size());
}
// Don't wait but just close the input stream.
MEDIAPIPE_ASSERT_OK(graph.CloseInputStream("input_numbers"));
MP_ASSERT_OK(graph.CloseInputStream("input_numbers"));
// Wait properly via the API until the graph is done.
MEDIAPIPE_ASSERT_OK(graph.WaitUntilDone());
MP_ASSERT_OK(graph.WaitUntilDone());
}
// Test setting the stream header.
@ -310,7 +310,7 @@ TEST_F(CalculatorGraphEventLoopTest, SetStreamHeader) {
&graph_config));
CalculatorGraph graph(graph_config);
MEDIAPIPE_ASSERT_OK(graph.StartRun(
MP_ASSERT_OK(graph.StartRun(
{{"callback", MakePacket<std::function<void(const Packet&)>>(std::bind(
&CalculatorGraphEventLoopTest::AddThreadSafeVectorSink,
this, std::placeholders::_1))}}));
@ -327,15 +327,15 @@ TEST_F(CalculatorGraphEventLoopTest, SetStreamHeader) {
header->width = 320;
header->height = 240;
// With stream header set, the StartRun should succeed.
MEDIAPIPE_ASSERT_OK(graph2.StartRun(
MP_ASSERT_OK(graph2.StartRun(
{{"callback", MakePacket<std::function<void(const Packet&)>>(std::bind(
&CalculatorGraphEventLoopTest::AddThreadSafeVectorSink,
this, std::placeholders::_1))}},
{{"input_numbers", Adopt(header.release())}}));
// Don't wait but just close the input stream.
MEDIAPIPE_ASSERT_OK(graph2.CloseInputStream("input_numbers"));
MP_ASSERT_OK(graph2.CloseInputStream("input_numbers"));
// Wait properly via the API until the graph is done.
MEDIAPIPE_ASSERT_OK(graph2.WaitUntilDone());
MP_ASSERT_OK(graph2.WaitUntilDone());
}
// Test ADD_IF_NOT_FULL mode for graph input streams (by creating more packets
@ -369,7 +369,7 @@ TEST_F(CalculatorGraphEventLoopTest, TryToAddPacketToInputStream) {
CalculatorGraph::GraphInputStreamAddMode::ADD_IF_NOT_FULL);
// Start MediaPipe graph.
MEDIAPIPE_ASSERT_OK(graph.StartRun(
MP_ASSERT_OK(graph.StartRun(
{{"callback", MakePacket<std::function<void(const Packet&)>>(std::bind(
&CalculatorGraphEventLoopTest::AddThreadSafeVectorSink,
this, std::placeholders::_1))},
@ -397,9 +397,9 @@ TEST_F(CalculatorGraphEventLoopTest, TryToAddPacketToInputStream) {
EXPECT_GE(fail_count, kNumInputPackets - kMaxQueueSize - 1);
// Don't wait but just close the input stream.
MEDIAPIPE_ASSERT_OK(graph.CloseInputStream("input_numbers"));
MP_ASSERT_OK(graph.CloseInputStream("input_numbers"));
// Wait properly via the API until the graph is done.
MEDIAPIPE_ASSERT_OK(graph.WaitUntilDone());
MP_ASSERT_OK(graph.WaitUntilDone());
}
// Verify that "max_queue_size: -1" disables throttling of graph-input-streams.
@ -426,18 +426,18 @@ TEST_F(CalculatorGraphEventLoopTest, ThrottlingDisabled) {
CalculatorGraph::GraphInputStreamAddMode::ADD_IF_NOT_FULL);
// Start MediaPipe graph.
MEDIAPIPE_ASSERT_OK(graph.StartRun({{"blocking_mutex", mutex_side_packet}}));
MP_ASSERT_OK(graph.StartRun({{"blocking_mutex", mutex_side_packet}}));
// Lock the mutex so that the BlockingPassThroughCalculator cannot read any
// of these packets.
mutex->Lock();
for (int i = 0; i < 10; ++i) {
MEDIAPIPE_EXPECT_OK(graph.AddPacketToInputStream(
MP_EXPECT_OK(graph.AddPacketToInputStream(
"input_numbers", Adopt(new int(i)).At(Timestamp(i))));
}
mutex->Unlock();
MEDIAPIPE_EXPECT_OK(graph.CloseInputStream("input_numbers"));
MEDIAPIPE_EXPECT_OK(graph.WaitUntilDone());
MP_EXPECT_OK(graph.CloseInputStream("input_numbers"));
MP_EXPECT_OK(graph.WaitUntilDone());
}
// Verify that the graph input stream throttling code still works if we run the
@ -467,8 +467,7 @@ TEST_F(CalculatorGraphEventLoopTest, ThrottleGraphInputStreamTwice) {
// Run the graph twice.
for (int i = 0; i < 2; ++i) {
// Start MediaPipe graph.
MEDIAPIPE_ASSERT_OK(
graph.StartRun({{"blocking_mutex", mutex_side_packet}}));
MP_ASSERT_OK(graph.StartRun({{"blocking_mutex", mutex_side_packet}}));
// Lock the mutex so that the BlockingPassThroughCalculator cannot read any
// of these packets.
@ -485,8 +484,8 @@ TEST_F(CalculatorGraphEventLoopTest, ThrottleGraphInputStreamTwice) {
ASSERT_FALSE(status.ok());
EXPECT_EQ(status.code(), ::mediapipe::StatusCode::kUnavailable);
EXPECT_THAT(status.message(), testing::HasSubstr("Graph is throttled."));
MEDIAPIPE_ASSERT_OK(graph.CloseInputStream("input_numbers"));
MEDIAPIPE_ASSERT_OK(graph.WaitUntilDone());
MP_ASSERT_OK(graph.CloseInputStream("input_numbers"));
MP_ASSERT_OK(graph.WaitUntilDone());
}
}
@ -515,7 +514,7 @@ TEST_F(CalculatorGraphEventLoopTest, WaitToAddPacketToInputStream) {
// Start MediaPipe graph.
CalculatorGraph graph(graph_config);
MEDIAPIPE_ASSERT_OK(graph.StartRun(
MP_ASSERT_OK(graph.StartRun(
{{"callback", MakePacket<std::function<void(const Packet&)>>(std::bind(
&CalculatorGraphEventLoopTest::AddThreadSafeVectorSink,
this, std::placeholders::_1))}}));
@ -534,9 +533,9 @@ TEST_F(CalculatorGraphEventLoopTest, WaitToAddPacketToInputStream) {
EXPECT_EQ(0, fail_count);
// Don't wait but just close the input stream.
MEDIAPIPE_ASSERT_OK(graph.CloseInputStream("input_numbers"));
MP_ASSERT_OK(graph.CloseInputStream("input_numbers"));
// Wait properly via the API until the graph is done.
MEDIAPIPE_ASSERT_OK(graph.WaitUntilDone());
MP_ASSERT_OK(graph.WaitUntilDone());
absl::ReaderMutexLock lock(&output_packets_mutex_);
ASSERT_EQ(kNumInputPackets, output_packets_.size());

View File

@ -188,7 +188,7 @@ TEST(CalculatorGraphStoppingTest, CloseAllPacketSources) {
)",
&graph_config));
CalculatorGraph graph;
MEDIAPIPE_ASSERT_OK(graph.Initialize(graph_config, {}));
MP_ASSERT_OK(graph.Initialize(graph_config, {}));
// Observe output packets, and call CloseAllPacketSources after kNumPackets.
std::vector<Packet> out_packets;
@ -196,37 +196,37 @@ TEST(CalculatorGraphStoppingTest, CloseAllPacketSources) {
std::vector<int> event_packets;
std::vector<int> event_out_packets;
int kNumPackets = 8;
MEDIAPIPE_ASSERT_OK(graph.ObserveOutputStream( //
MP_ASSERT_OK(graph.ObserveOutputStream( //
"input_out", [&](const Packet& packet) {
out_packets.push_back(packet);
if (out_packets.size() >= kNumPackets) {
MEDIAPIPE_EXPECT_OK(graph.CloseAllPacketSources());
MP_EXPECT_OK(graph.CloseAllPacketSources());
}
return ::mediapipe::OkStatus();
}));
MEDIAPIPE_ASSERT_OK(graph.ObserveOutputStream( //
MP_ASSERT_OK(graph.ObserveOutputStream( //
"count_out", [&](const Packet& packet) {
count_packets.push_back(packet);
return ::mediapipe::OkStatus();
}));
MEDIAPIPE_ASSERT_OK(graph.ObserveOutputStream( //
MP_ASSERT_OK(graph.ObserveOutputStream( //
"event", [&](const Packet& packet) {
event_packets.push_back(packet.Get<int>());
return ::mediapipe::OkStatus();
}));
MEDIAPIPE_ASSERT_OK(graph.ObserveOutputStream( //
MP_ASSERT_OK(graph.ObserveOutputStream( //
"event_out", [&](const Packet& packet) {
event_out_packets.push_back(packet.Get<int>());
return ::mediapipe::OkStatus();
}));
MEDIAPIPE_ASSERT_OK(graph.StartRun({}));
MP_ASSERT_OK(graph.StartRun({}));
for (int i = 0; i < kNumPackets; ++i) {
MEDIAPIPE_EXPECT_OK(graph.AddPacketToInputStream(
MP_EXPECT_OK(graph.AddPacketToInputStream(
"input", MakePacket<int>(i).At(Timestamp(i))));
}
// The graph run should complete with no error status.
MEDIAPIPE_EXPECT_OK(graph.WaitUntilDone());
MP_EXPECT_OK(graph.WaitUntilDone());
EXPECT_EQ(kNumPackets, out_packets.size());
EXPECT_LE(kNumPackets, count_packets.size());
std::vector<int> expected_events = {1, 2};
@ -254,11 +254,11 @@ TEST(CalculatorGraphStoppingTest, DeadlockReporting) {
)",
&config));
CalculatorGraph graph;
MEDIAPIPE_ASSERT_OK(graph.Initialize(config));
MP_ASSERT_OK(graph.Initialize(config));
graph.SetGraphInputStreamAddMode(
CalculatorGraph::GraphInputStreamAddMode::WAIT_TILL_NOT_FULL);
std::vector<Packet> out_packets;
MEDIAPIPE_ASSERT_OK(
MP_ASSERT_OK(
graph.ObserveOutputStream("out_1", [&out_packets](const Packet& packet) {
out_packets.push_back(packet);
return ::mediapipe::OkStatus();
@ -278,15 +278,15 @@ TEST(CalculatorGraphStoppingTest, DeadlockReporting) {
};
// Start the graph.
MEDIAPIPE_ASSERT_OK(graph.StartRun({
MP_ASSERT_OK(graph.StartRun({
{"callback_1", AdoptAsUniquePtr(new auto(callback_1))},
}));
// Add 3 packets to "in_1" with no packets on "in_2".
// This causes throttling and deadlock with max_queue_size 2.
semaphore.Release(3);
MEDIAPIPE_EXPECT_OK(add_packet("in_1", 1));
MEDIAPIPE_EXPECT_OK(add_packet("in_1", 2));
MP_EXPECT_OK(add_packet("in_1", 1));
MP_EXPECT_OK(add_packet("in_1", 2));
EXPECT_FALSE(add_packet("in_1", 3).ok());
::mediapipe::Status status = graph.WaitUntilIdle();
@ -295,7 +295,7 @@ TEST(CalculatorGraphStoppingTest, DeadlockReporting) {
status.message(),
testing::HasSubstr("Detected a deadlock due to input throttling"));
MEDIAPIPE_ASSERT_OK(graph.CloseAllInputStreams());
MP_ASSERT_OK(graph.CloseAllInputStreams());
EXPECT_FALSE(graph.WaitUntilDone().ok());
ASSERT_EQ(0, out_packets.size());
}
@ -319,11 +319,11 @@ TEST(CalculatorGraphStoppingTest, DeadlockResolution) {
)",
&config));
CalculatorGraph graph;
MEDIAPIPE_ASSERT_OK(graph.Initialize(config));
MP_ASSERT_OK(graph.Initialize(config));
graph.SetGraphInputStreamAddMode(
CalculatorGraph::GraphInputStreamAddMode::WAIT_TILL_NOT_FULL);
std::vector<Packet> out_packets;
MEDIAPIPE_ASSERT_OK(
MP_ASSERT_OK(
graph.ObserveOutputStream("out_1", [&out_packets](const Packet& packet) {
out_packets.push_back(packet);
return ::mediapipe::OkStatus();
@ -343,7 +343,7 @@ TEST(CalculatorGraphStoppingTest, DeadlockResolution) {
};
// Start the graph.
MEDIAPIPE_ASSERT_OK(graph.StartRun({
MP_ASSERT_OK(graph.StartRun({
{"callback_1", AdoptAsUniquePtr(new auto(callback_1))},
}));
@ -351,19 +351,19 @@ TEST(CalculatorGraphStoppingTest, DeadlockResolution) {
// This grows the input stream "in_1" to max-queue-size 10.
semaphore.Release(9);
for (int i = 1; i <= 9; ++i) {
MEDIAPIPE_EXPECT_OK(add_packet("in_1", i));
MEDIAPIPE_ASSERT_OK(graph.WaitUntilIdle());
MP_EXPECT_OK(add_packet("in_1", i));
MP_ASSERT_OK(graph.WaitUntilIdle());
}
// Advance the timestamp-bound and flush "in_1".
semaphore.Release(1);
MEDIAPIPE_EXPECT_OK(add_packet("in_2", 30));
MEDIAPIPE_ASSERT_OK(graph.WaitUntilIdle());
MP_EXPECT_OK(add_packet("in_2", 30));
MP_ASSERT_OK(graph.WaitUntilIdle());
// Fill up input stream "in_1", with the semaphore blocked and deadlock
// resolution disabled.
for (int i = 11; i < 23; ++i) {
MEDIAPIPE_EXPECT_OK(add_packet("in_1", i));
MP_EXPECT_OK(add_packet("in_1", i));
}
// Adding any more packets fails with error "Graph is throttled".
@ -374,9 +374,9 @@ TEST(CalculatorGraphStoppingTest, DeadlockResolution) {
// Allow the 12 blocked calls to "callback_1" to complete.
semaphore.Release(12);
MEDIAPIPE_ASSERT_OK(graph.WaitUntilIdle());
MEDIAPIPE_ASSERT_OK(graph.CloseAllInputStreams());
MEDIAPIPE_ASSERT_OK(graph.WaitUntilDone());
MP_ASSERT_OK(graph.WaitUntilIdle());
MP_ASSERT_OK(graph.CloseAllInputStreams());
MP_ASSERT_OK(graph.WaitUntilDone());
ASSERT_EQ(21, out_packets.size());
}

Some files were not shown because too many files have changed in this diff Show More