Merge branch 'master' into text-embedder-python
This commit is contained in:
commit
acd9c280c0
|
@ -177,7 +177,7 @@ http_archive(
|
|||
"//third_party:com_google_sentencepiece_no_gflag_no_gtest.diff",
|
||||
],
|
||||
patch_args = ["-p1"],
|
||||
repo_mapping = {"@com_google_glog" : "@com_github_glog_glog"},
|
||||
repo_mapping = {"@com_google_glog" : "@com_github_glog_glog_no_gflags"},
|
||||
)
|
||||
|
||||
http_archive(
|
||||
|
|
|
@ -45,6 +45,12 @@ def main(_) -> None:
|
|||
while (mp_root := mp_root.parent).name != 'mediapipe':
|
||||
# Find the nearest `mediapipe` dir.
|
||||
pass
|
||||
|
||||
# Externally, parts of the repo are nested inside a mediapipe/ directory
|
||||
# that does not exist internally. Support both.
|
||||
if (mp_root / 'mediapipe').exists():
|
||||
mp_root = mp_root / 'mediapipe'
|
||||
|
||||
java_root = mp_root / 'tasks/java'
|
||||
|
||||
gen_java.gen_java_docs(
|
||||
|
|
|
@ -79,21 +79,25 @@ std::vector<T> PacketValues(const std::vector<Packet>& packets) {
|
|||
return result;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
std::vector<Packet> MakePackets(std::vector<std::pair<Timestamp, T>> contents) {
|
||||
std::vector<Packet> result;
|
||||
for (auto& entry : contents) {
|
||||
result.push_back(MakePacket<T>(entry.second).At(entry.first));
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
std::string SourceString(Timestamp t) {
|
||||
return (t.IsSpecialValue())
|
||||
? t.DebugString()
|
||||
: absl::StrCat("Timestamp(", t.DebugString(), ")");
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
std::string SourceString(Packet packet) {
|
||||
std::ostringstream oss;
|
||||
if (packet.IsEmpty()) {
|
||||
oss << "Packet()";
|
||||
} else {
|
||||
oss << "MakePacket<" << MediaPipeTypeStringOrDemangled<T>() << ">("
|
||||
<< packet.Get<T>() << ")";
|
||||
}
|
||||
oss << ".At(" << SourceString(packet.Timestamp()) << ")";
|
||||
return oss.str();
|
||||
}
|
||||
|
||||
template <typename PacketContainer, typename PacketContent>
|
||||
class PacketsEqMatcher
|
||||
: public ::testing::MatcherInterface<const PacketContainer&> {
|
||||
|
@ -123,8 +127,9 @@ class PacketsEqMatcher
|
|||
}
|
||||
for (auto i1 = c1.begin(), i2 = c2.begin(); i1 != c1.end(); ++i1, ++i2) {
|
||||
Packet p1 = *i1, p2 = *i2;
|
||||
if (p1.Timestamp() != p2.Timestamp() ||
|
||||
p1.Get<PacketContent>() != p2.Get<PacketContent>()) {
|
||||
if (p1.Timestamp() != p2.Timestamp() || p1.IsEmpty() != p2.IsEmpty() ||
|
||||
(!p1.IsEmpty() &&
|
||||
p1.Get<PacketContent>() != p2.Get<PacketContent>())) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
@ -133,10 +138,9 @@ class PacketsEqMatcher
|
|||
void Print(const PacketContainer& packets, ::std::ostream* os) const {
|
||||
for (auto it = packets.begin(); it != packets.end(); ++it) {
|
||||
const Packet& packet = *it;
|
||||
*os << (it == packets.begin() ? "{" : "") << "{"
|
||||
<< SourceString(packet.Timestamp()) << ", "
|
||||
<< packet.Get<PacketContent>() << "}"
|
||||
<< (std::next(it) == packets.end() ? "}" : ", ");
|
||||
*os << (it == packets.begin() ? "{" : "");
|
||||
*os << SourceString<PacketContent>(packet);
|
||||
*os << (std::next(it) == packets.end() ? "}" : ", ");
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -144,7 +148,7 @@ class PacketsEqMatcher
|
|||
};
|
||||
|
||||
template <typename PacketContainer, typename PacketContent>
|
||||
::testing::Matcher<const PacketContainer&> PackestEq(
|
||||
::testing::Matcher<const PacketContainer&> PacketsEq(
|
||||
const PacketContainer& packets) {
|
||||
return MakeMatcher(
|
||||
new PacketsEqMatcher<PacketContainer, PacketContent>(packets));
|
||||
|
@ -739,8 +743,8 @@ TEST_F(FlowLimiterCalculatorTest, TwoInputStreams) {
|
|||
// The processing time "sleep_time" is reduced from 22ms to 12ms to create
|
||||
// the same frame rate as FlowLimiterCalculatorTest::TwoInputStreams.
|
||||
TEST_F(FlowLimiterCalculatorTest, ZeroQueue) {
|
||||
auto BoolPackestEq = PackestEq<std::vector<Packet>, bool>;
|
||||
auto IntPackestEq = PackestEq<std::vector<Packet>, int>;
|
||||
auto BoolPacketsEq = PacketsEq<std::vector<Packet>, bool>;
|
||||
auto IntPacketsEq = PacketsEq<std::vector<Packet>, int>;
|
||||
|
||||
// Configure the test.
|
||||
SetUpInputData();
|
||||
|
@ -835,52 +839,86 @@ TEST_F(FlowLimiterCalculatorTest, ZeroQueue) {
|
|||
input_packets_[0], input_packets_[2], input_packets_[15],
|
||||
input_packets_[17], input_packets_[19],
|
||||
};
|
||||
EXPECT_THAT(out_1_packets_, IntPackestEq(expected_output));
|
||||
EXPECT_THAT(out_1_packets_, IntPacketsEq(expected_output));
|
||||
// Exactly the timestamps released by FlowLimiterCalculator for in_1_sampled.
|
||||
std::vector<Packet> expected_output_2 = {
|
||||
input_packets_[0], input_packets_[2], input_packets_[4],
|
||||
input_packets_[15], input_packets_[17], input_packets_[19],
|
||||
};
|
||||
EXPECT_THAT(out_2_packets, IntPackestEq(expected_output_2));
|
||||
EXPECT_THAT(out_2_packets, IntPacketsEq(expected_output_2));
|
||||
|
||||
// Validate the ALLOW stream output.
|
||||
std::vector<Packet> expected_allow = MakePackets<bool>( //
|
||||
{{Timestamp(0), true}, {Timestamp(10000), false},
|
||||
{Timestamp(20000), true}, {Timestamp(30000), false},
|
||||
{Timestamp(40000), true}, {Timestamp(50000), false},
|
||||
{Timestamp(60000), false}, {Timestamp(70000), false},
|
||||
{Timestamp(80000), false}, {Timestamp(90000), false},
|
||||
{Timestamp(100000), false}, {Timestamp(110000), false},
|
||||
{Timestamp(120000), false}, {Timestamp(130000), false},
|
||||
{Timestamp(140000), false}, {Timestamp(150000), true},
|
||||
{Timestamp(160000), false}, {Timestamp(170000), true},
|
||||
{Timestamp(180000), false}, {Timestamp(190000), true},
|
||||
{Timestamp(200000), false}});
|
||||
EXPECT_THAT(allow_packets_, BoolPackestEq(expected_allow));
|
||||
std::vector<Packet> expected_allow = {
|
||||
MakePacket<bool>(true).At(Timestamp(0)),
|
||||
MakePacket<bool>(false).At(Timestamp(10000)),
|
||||
MakePacket<bool>(true).At(Timestamp(20000)),
|
||||
MakePacket<bool>(false).At(Timestamp(30000)),
|
||||
MakePacket<bool>(true).At(Timestamp(40000)),
|
||||
MakePacket<bool>(false).At(Timestamp(50000)),
|
||||
MakePacket<bool>(false).At(Timestamp(60000)),
|
||||
MakePacket<bool>(false).At(Timestamp(70000)),
|
||||
MakePacket<bool>(false).At(Timestamp(80000)),
|
||||
MakePacket<bool>(false).At(Timestamp(90000)),
|
||||
MakePacket<bool>(false).At(Timestamp(100000)),
|
||||
MakePacket<bool>(false).At(Timestamp(110000)),
|
||||
MakePacket<bool>(false).At(Timestamp(120000)),
|
||||
MakePacket<bool>(false).At(Timestamp(130000)),
|
||||
MakePacket<bool>(false).At(Timestamp(140000)),
|
||||
MakePacket<bool>(true).At(Timestamp(150000)),
|
||||
MakePacket<bool>(false).At(Timestamp(160000)),
|
||||
MakePacket<bool>(true).At(Timestamp(170000)),
|
||||
MakePacket<bool>(false).At(Timestamp(180000)),
|
||||
MakePacket<bool>(true).At(Timestamp(190000)),
|
||||
MakePacket<bool>(false).At(Timestamp(200000)),
|
||||
};
|
||||
EXPECT_THAT(allow_packets_, BoolPacketsEq(expected_allow));
|
||||
}
|
||||
|
||||
std::vector<Packet> StripBoundsUpdates(const std::vector<Packet>& packets,
|
||||
Timestamp begin = Timestamp::Min(),
|
||||
Timestamp end = Timestamp::Max()) {
|
||||
std::vector<Packet> result;
|
||||
for (const auto& packet : packets) {
|
||||
Timestamp ts = packet.Timestamp();
|
||||
if (packet.IsEmpty() && ts >= begin && ts < end) {
|
||||
continue;
|
||||
}
|
||||
result.push_back(packet);
|
||||
}
|
||||
return result;
|
||||
}
|
||||
|
||||
// Shows how FlowLimiterCalculator releases auxiliary input packets.
|
||||
// In this test, auxiliary input packets arrive at twice the primary rate.
|
||||
TEST_F(FlowLimiterCalculatorTest, AuxiliaryInputs) {
|
||||
auto BoolPackestEq = PackestEq<std::vector<Packet>, bool>;
|
||||
auto IntPackestEq = PackestEq<std::vector<Packet>, int>;
|
||||
auto BoolPacketsEq = PacketsEq<std::vector<Packet>, bool>;
|
||||
auto IntPacketsEq = PacketsEq<std::vector<Packet>, int>;
|
||||
|
||||
// Configure the test.
|
||||
SetUpInputData();
|
||||
SetUpSimulationClock();
|
||||
CalculatorGraphConfig graph_config =
|
||||
ParseTextProtoOrDie<CalculatorGraphConfig>(R"pb(
|
||||
input_stream: 'in_1'
|
||||
input_stream: 'in_2'
|
||||
input_stream: 'input_1'
|
||||
input_stream: 'auxiliary_input_2'
|
||||
input_stream: 'auxiliary_input_3'
|
||||
node {
|
||||
calculator: 'FlowLimiterCalculator'
|
||||
input_side_packet: 'OPTIONS:limiter_options'
|
||||
input_stream: 'in_1'
|
||||
input_stream: 'in_2'
|
||||
options {
|
||||
[mediapipe.FlowLimiterCalculatorOptions.ext] {
|
||||
max_in_flight: 1
|
||||
max_in_queue: 0
|
||||
in_flight_timeout: 1000000 # 1s
|
||||
}
|
||||
}
|
||||
input_stream: 'input_1'
|
||||
input_stream: 'auxiliary_input_2'
|
||||
input_stream: 'auxiliary_input_3'
|
||||
input_stream: 'FINISHED:out_1'
|
||||
input_stream_info: { tag_index: 'FINISHED' back_edge: true }
|
||||
output_stream: 'in_1_sampled'
|
||||
output_stream: 'in_2_sampled'
|
||||
output_stream: 'input_1_sampled'
|
||||
output_stream: 'auxiliary_input_2_sampled'
|
||||
output_stream: 'auxiliary_input_3_sampled'
|
||||
output_stream: 'ALLOW:allow'
|
||||
}
|
||||
node {
|
||||
|
@ -888,49 +926,75 @@ TEST_F(FlowLimiterCalculatorTest, AuxiliaryInputs) {
|
|||
input_side_packet: 'WARMUP_TIME:warmup_time'
|
||||
input_side_packet: 'SLEEP_TIME:sleep_time'
|
||||
input_side_packet: 'CLOCK:clock'
|
||||
input_stream: 'PACKET:in_1_sampled'
|
||||
input_stream: 'PACKET:input_1_sampled'
|
||||
output_stream: 'PACKET:out_1'
|
||||
}
|
||||
)pb");
|
||||
|
||||
auto limiter_options = ParseTextProtoOrDie<FlowLimiterCalculatorOptions>(
|
||||
R"pb(
|
||||
max_in_flight: 1 max_in_queue: 0 in_flight_timeout: 1000000 # 1s
|
||||
)pb");
|
||||
std::map<std::string, Packet> side_packets = {
|
||||
{"limiter_options",
|
||||
MakePacket<FlowLimiterCalculatorOptions>(limiter_options)},
|
||||
// Fake processing lazy initialization time in microseconds.
|
||||
{"warmup_time", MakePacket<int64>(22000)},
|
||||
// Fake processing duration in microseconds.
|
||||
{"sleep_time", MakePacket<int64>(22000)},
|
||||
// The SimulationClock to count virtual elapsed time.
|
||||
{"clock", MakePacket<mediapipe::Clock*>(clock_)},
|
||||
};
|
||||
|
||||
// Start the graph.
|
||||
MP_ASSERT_OK(graph_.Initialize(graph_config));
|
||||
MP_EXPECT_OK(graph_.ObserveOutputStream("out_1", [this](Packet p) {
|
||||
out_1_packets_.push_back(p);
|
||||
return absl::OkStatus();
|
||||
}));
|
||||
std::vector<Packet> out_2_packets;
|
||||
MP_EXPECT_OK(graph_.ObserveOutputStream("in_2_sampled", [&](Packet p) {
|
||||
out_2_packets.push_back(p);
|
||||
return absl::OkStatus();
|
||||
}));
|
||||
MP_EXPECT_OK(graph_.ObserveOutputStream("allow", [this](Packet p) {
|
||||
allow_packets_.push_back(p);
|
||||
return absl::OkStatus();
|
||||
}));
|
||||
MP_EXPECT_OK(graph_.ObserveOutputStream(
|
||||
"out_1",
|
||||
[this](Packet p) {
|
||||
out_1_packets_.push_back(p);
|
||||
return absl::OkStatus();
|
||||
},
|
||||
true));
|
||||
std::vector<Packet> out_2_packets, out_3_packets;
|
||||
MP_EXPECT_OK(graph_.ObserveOutputStream(
|
||||
"auxiliary_input_2_sampled",
|
||||
[&](Packet p) {
|
||||
out_2_packets.push_back(p);
|
||||
return absl::OkStatus();
|
||||
},
|
||||
true));
|
||||
MP_EXPECT_OK(graph_.ObserveOutputStream(
|
||||
"auxiliary_input_3_sampled",
|
||||
[&](Packet p) {
|
||||
out_3_packets.push_back(p);
|
||||
return absl::OkStatus();
|
||||
},
|
||||
true));
|
||||
MP_EXPECT_OK(graph_.ObserveOutputStream(
|
||||
"allow",
|
||||
[this](Packet p) {
|
||||
allow_packets_.push_back(p);
|
||||
return absl::OkStatus();
|
||||
},
|
||||
true));
|
||||
simulation_clock_->ThreadStart();
|
||||
MP_ASSERT_OK(graph_.StartRun(side_packets));
|
||||
|
||||
// Add packets 2,4,6,8 to stream in_1 and 1..9 to stream in_2.
|
||||
clock_->Sleep(absl::Microseconds(10000));
|
||||
// Add packets 1..9 to auxiliary_input_3, early.
|
||||
for (int i = 1; i < 10; ++i) {
|
||||
MP_EXPECT_OK(graph_.AddPacketToInputStream(
|
||||
"auxiliary_input_3", MakePacket<int>(i).At(Timestamp(i * 10000))));
|
||||
}
|
||||
|
||||
// The total count of out_2_packets after each input packet.
|
||||
// std::vector<int> sizes_2 = {0, 0, 2, 2, 3, 3, 4, 4, 5, 5};
|
||||
std::vector<int> sizes_2 = {0, 1, 3, 4, 6, 7, 9, 10, 12, 13};
|
||||
|
||||
// Add packets 2,4,6,8 to stream input_1.
|
||||
// Add packets 1..9 to auxiliary_input_2.
|
||||
for (int i = 1; i < 10; ++i) {
|
||||
if (i % 2 == 0) {
|
||||
MP_EXPECT_OK(graph_.AddPacketToInputStream("in_1", input_packets_[i]));
|
||||
MP_EXPECT_OK(graph_.AddPacketToInputStream(
|
||||
"input_1", MakePacket<int>(i).At(Timestamp(i * 10000))));
|
||||
}
|
||||
MP_EXPECT_OK(graph_.AddPacketToInputStream("in_2", input_packets_[i]));
|
||||
MP_EXPECT_OK(graph_.AddPacketToInputStream(
|
||||
"auxiliary_input_2", MakePacket<int>(i).At(Timestamp(i * 10000))));
|
||||
clock_->Sleep(absl::Microseconds(10000));
|
||||
EXPECT_EQ(out_2_packets.size(), sizes_2[i]);
|
||||
}
|
||||
|
||||
// Finish the graph run.
|
||||
|
@ -942,24 +1006,46 @@ TEST_F(FlowLimiterCalculatorTest, AuxiliaryInputs) {
|
|||
// Validate the output.
|
||||
// Input packets 4 and 8 are dropped due to max_in_flight.
|
||||
std::vector<Packet> expected_output = {
|
||||
input_packets_[2],
|
||||
input_packets_[6],
|
||||
MakePacket<int>(2).At(Timestamp(20000)),
|
||||
Packet().At(Timestamp(40000)),
|
||||
MakePacket<int>(6).At(Timestamp(60000)),
|
||||
Packet().At(Timestamp(80000)),
|
||||
};
|
||||
EXPECT_THAT(out_1_packets_, IntPackestEq(expected_output));
|
||||
EXPECT_THAT(out_1_packets_, IntPacketsEq(expected_output));
|
||||
|
||||
// Packets following input packets 2 and 6, and not input packets 4 and 8.
|
||||
std::vector<Packet> expected_output_2 = {
|
||||
input_packets_[1], input_packets_[2], input_packets_[3],
|
||||
input_packets_[6], input_packets_[7],
|
||||
std::vector<Packet> expected_auxiliary_output = {
|
||||
Packet().At(Timestamp(9999)),
|
||||
MakePacket<int>(1).At(Timestamp(10000)),
|
||||
MakePacket<int>(2).At(Timestamp(20000)),
|
||||
Packet().At(Timestamp(29999)),
|
||||
MakePacket<int>(3).At(Timestamp(30000)),
|
||||
Packet().At(Timestamp(40000)),
|
||||
Packet().At(Timestamp(49999)),
|
||||
Packet().At(Timestamp(50000)),
|
||||
MakePacket<int>(6).At(Timestamp(60000)),
|
||||
Packet().At(Timestamp(69999)),
|
||||
MakePacket<int>(7).At(Timestamp(70000)),
|
||||
Packet().At(Timestamp(80000)),
|
||||
Packet().At(Timestamp(89999)),
|
||||
};
|
||||
EXPECT_THAT(out_2_packets, IntPackestEq(expected_output_2));
|
||||
std::vector<Packet> actual_2 =
|
||||
StripBoundsUpdates(out_2_packets, Timestamp(90000));
|
||||
EXPECT_THAT(actual_2, IntPacketsEq(expected_auxiliary_output));
|
||||
std::vector<Packet> expected_3 =
|
||||
StripBoundsUpdates(expected_auxiliary_output, Timestamp(39999));
|
||||
std::vector<Packet> actual_3 =
|
||||
StripBoundsUpdates(out_3_packets, Timestamp(39999));
|
||||
EXPECT_THAT(actual_3, IntPacketsEq(expected_3));
|
||||
|
||||
// Validate the ALLOW stream output.
|
||||
std::vector<Packet> expected_allow =
|
||||
MakePackets<bool>({{Timestamp(20000), 1},
|
||||
{Timestamp(40000), 0},
|
||||
{Timestamp(60000), 1},
|
||||
{Timestamp(80000), 0}});
|
||||
EXPECT_THAT(allow_packets_, BoolPackestEq(expected_allow));
|
||||
std::vector<Packet> expected_allow = {
|
||||
MakePacket<bool>(true).At(Timestamp(20000)),
|
||||
MakePacket<bool>(false).At(Timestamp(40000)),
|
||||
MakePacket<bool>(true).At(Timestamp(60000)),
|
||||
MakePacket<bool>(false).At(Timestamp(80000)),
|
||||
};
|
||||
EXPECT_THAT(allow_packets_, BoolPacketsEq(expected_allow));
|
||||
}
|
||||
|
||||
} // anonymous namespace
|
||||
|
|
|
@ -46,6 +46,40 @@ constexpr char kOutputFrameTagGpu[] = "IMAGE_GPU";
|
|||
constexpr int kNumChannelsRGBA = 4;
|
||||
|
||||
enum { ATTRIB_VERTEX, ATTRIB_TEXTURE_POSITION, NUM_ATTRIBUTES };
|
||||
|
||||
// Combines an RGB cv::Mat and a single-channel alpha cv::Mat of the same
|
||||
// dimensions into an RGBA cv::Mat. Alpha may be read as uint8 or as another
|
||||
// numeric type; in the latter case, it is upscaled to values between 0 and 255
|
||||
// from an assumed input range of [0, 1). RGB and RGBA Mat's must be uchar.
|
||||
template <typename AlphaType>
|
||||
absl::Status MergeRGBA8Image(const cv::Mat input_mat, const cv::Mat& alpha_mat,
|
||||
cv::Mat& output_mat) {
|
||||
RET_CHECK_EQ(input_mat.rows, alpha_mat.rows);
|
||||
RET_CHECK_EQ(input_mat.cols, alpha_mat.cols);
|
||||
RET_CHECK_EQ(input_mat.rows, output_mat.rows);
|
||||
RET_CHECK_EQ(input_mat.cols, output_mat.cols);
|
||||
|
||||
for (int i = 0; i < output_mat.rows; ++i) {
|
||||
const uchar* in_ptr = input_mat.ptr<uchar>(i);
|
||||
const AlphaType* alpha_ptr = alpha_mat.ptr<AlphaType>(i);
|
||||
uchar* out_ptr = output_mat.ptr<uchar>(i);
|
||||
for (int j = 0; j < output_mat.cols; ++j) {
|
||||
const int out_idx = j * kNumChannelsRGBA;
|
||||
const int in_idx = j * input_mat.channels();
|
||||
const int alpha_idx = j * alpha_mat.channels();
|
||||
out_ptr[out_idx + 0] = in_ptr[in_idx + 0];
|
||||
out_ptr[out_idx + 1] = in_ptr[in_idx + 1];
|
||||
out_ptr[out_idx + 2] = in_ptr[in_idx + 2];
|
||||
if constexpr (std::is_same<AlphaType, uchar>::value) {
|
||||
out_ptr[out_idx + 3] = alpha_ptr[alpha_idx + 0];
|
||||
} else {
|
||||
const AlphaType alpha = alpha_ptr[alpha_idx + 0];
|
||||
out_ptr[out_idx + 3] = static_cast<uchar>(round(alpha * 255.0f));
|
||||
}
|
||||
}
|
||||
}
|
||||
return absl::OkStatus();
|
||||
}
|
||||
} // namespace
|
||||
|
||||
// A calculator for setting the alpha channel of an RGBA image.
|
||||
|
@ -250,28 +284,22 @@ absl::Status SetAlphaCalculator::RenderCpu(CalculatorContext* cc) {
|
|||
|
||||
const bool has_alpha_mask = cc->Inputs().HasTag(kInputAlphaTag) &&
|
||||
!cc->Inputs().Tag(kInputAlphaTag).IsEmpty();
|
||||
const bool use_alpa_mask = alpha_value_ < 0 && has_alpha_mask;
|
||||
const bool use_alpha_mask = alpha_value_ < 0 && has_alpha_mask;
|
||||
|
||||
// Setup alpha image and Update image in CPU.
|
||||
if (use_alpa_mask) {
|
||||
if (use_alpha_mask) {
|
||||
const auto& alpha_mask = cc->Inputs().Tag(kInputAlphaTag).Get<ImageFrame>();
|
||||
cv::Mat alpha_mat = mediapipe::formats::MatView(&alpha_mask);
|
||||
RET_CHECK_EQ(input_mat.rows, alpha_mat.rows);
|
||||
RET_CHECK_EQ(input_mat.cols, alpha_mat.cols);
|
||||
|
||||
for (int i = 0; i < output_mat.rows; ++i) {
|
||||
const uchar* in_ptr = input_mat.ptr<uchar>(i);
|
||||
uchar* alpha_ptr = alpha_mat.ptr<uchar>(i);
|
||||
uchar* out_ptr = output_mat.ptr<uchar>(i);
|
||||
for (int j = 0; j < output_mat.cols; ++j) {
|
||||
const int out_idx = j * kNumChannelsRGBA;
|
||||
const int in_idx = j * input_mat.channels();
|
||||
const int alpha_idx = j * alpha_mat.channels();
|
||||
out_ptr[out_idx + 0] = in_ptr[in_idx + 0];
|
||||
out_ptr[out_idx + 1] = in_ptr[in_idx + 1];
|
||||
out_ptr[out_idx + 2] = in_ptr[in_idx + 2];
|
||||
out_ptr[out_idx + 3] = alpha_ptr[alpha_idx + 0]; // channel 0 of mask
|
||||
}
|
||||
const bool alpha_is_float = alpha_mat.type() == CV_32FC1;
|
||||
RET_CHECK(alpha_is_float || alpha_mat.type() == CV_8UC1);
|
||||
|
||||
if (alpha_is_float) {
|
||||
MP_RETURN_IF_ERROR(
|
||||
MergeRGBA8Image<float>(input_mat, alpha_mat, output_mat));
|
||||
} else {
|
||||
MP_RETURN_IF_ERROR(
|
||||
MergeRGBA8Image<uchar>(input_mat, alpha_mat, output_mat));
|
||||
}
|
||||
} else {
|
||||
const uchar alpha_value = std::min(std::max(0.0f, alpha_value_), 255.0f);
|
||||
|
|
|
@ -266,6 +266,7 @@ cc_test(
|
|||
"@com_google_absl//absl/status",
|
||||
"@com_google_absl//absl/status:statusor",
|
||||
"@com_google_absl//absl/strings",
|
||||
"@com_google_sentencepiece//src:sentencepiece_processor",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -321,6 +322,7 @@ cc_test(
|
|||
"@com_google_absl//absl/status",
|
||||
"@com_google_absl//absl/status:statusor",
|
||||
"@com_google_absl//absl/strings",
|
||||
"@com_google_sentencepiece//src:sentencepiece_processor",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
|
@ -613,6 +613,7 @@ cc_library(
|
|||
deps = [
|
||||
":tensorflow_session",
|
||||
":tensorflow_session_from_saved_model_generator_cc_proto",
|
||||
"@com_google_absl//absl/status",
|
||||
"//mediapipe/framework:packet_generator",
|
||||
"//mediapipe/framework:packet_type",
|
||||
"//mediapipe/framework/tool:status_util",
|
||||
|
|
|
@ -14,6 +14,8 @@
|
|||
|
||||
#include <algorithm>
|
||||
|
||||
#include "absl/status/status.h"
|
||||
|
||||
#if !defined(__ANDROID__)
|
||||
#include "mediapipe/framework/port/file_helpers.h"
|
||||
#endif
|
||||
|
@ -38,6 +40,8 @@ constexpr char kSessionTag[] = "SESSION";
|
|||
|
||||
static constexpr char kStringSavedModelPath[] = "STRING_SAVED_MODEL_PATH";
|
||||
|
||||
static constexpr char kStringSignatureName[] = "STRING_SIGNATURE_NAME";
|
||||
|
||||
// Given the path to a directory containing multiple tensorflow saved models
|
||||
// in subdirectories, replaces path with the alphabetically last subdirectory.
|
||||
absl::Status GetLatestDirectory(std::string* path) {
|
||||
|
@ -104,6 +108,10 @@ class TensorFlowSessionFromSavedModelGenerator : public PacketGenerator {
|
|||
if (input_side_packets->HasTag(kStringSavedModelPath)) {
|
||||
input_side_packets->Tag(kStringSavedModelPath).Set<std::string>();
|
||||
}
|
||||
// Set Signature_def.
|
||||
if (input_side_packets->HasTag(kStringSignatureName)) {
|
||||
input_side_packets->Tag(kStringSignatureName).Set<std::string>();
|
||||
}
|
||||
// A TensorFlow model loaded and ready for use along with tensor
|
||||
output_side_packets->Tag(kSessionTag).Set<TensorFlowSession>();
|
||||
return absl::OkStatus();
|
||||
|
@ -146,9 +154,19 @@ class TensorFlowSessionFromSavedModelGenerator : public PacketGenerator {
|
|||
auto session = absl::make_unique<TensorFlowSession>();
|
||||
session->session = std::move(saved_model->session);
|
||||
|
||||
RET_CHECK(!options.signature_name().empty());
|
||||
// Use input side packet to overwrite signature name in options.
|
||||
std::string signature_name =
|
||||
input_side_packets.HasTag(kStringSignatureName)
|
||||
? input_side_packets.Tag(kStringSignatureName).Get<std::string>()
|
||||
: options.signature_name();
|
||||
RET_CHECK(!signature_name.empty());
|
||||
const auto& signature_def_map = saved_model->meta_graph_def.signature_def();
|
||||
const auto& signature_def = signature_def_map.at(options.signature_name());
|
||||
if (signature_def_map.find(signature_name) == signature_def_map.end()) {
|
||||
return absl::NotFoundError(absl::StrFormat(
|
||||
"Signature name '%s' does not exist in the loaded signature def",
|
||||
signature_name));
|
||||
}
|
||||
const auto& signature_def = signature_def_map.at(signature_name);
|
||||
for (const auto& input_signature : signature_def.inputs()) {
|
||||
session->tag_to_tensor_map[MaybeConvertSignatureToTag(
|
||||
input_signature.first, options)] = input_signature.second.name();
|
||||
|
|
|
@ -30,11 +30,13 @@
|
|||
|
||||
namespace mediapipe {
|
||||
|
||||
using ::testing::status::StatusIs;
|
||||
namespace {
|
||||
|
||||
namespace tf = ::tensorflow;
|
||||
|
||||
constexpr char kStringSavedModelPathTag[] = "STRING_SAVED_MODEL_PATH";
|
||||
constexpr char kStringSignatureNameTag[] = "STRING_SIGNATURE_NAME";
|
||||
constexpr char kSessionTag[] = "SESSION";
|
||||
|
||||
std::string GetSavedModelDir() {
|
||||
|
@ -124,6 +126,48 @@ TEST_F(TensorFlowSessionFromSavedModelGeneratorTest,
|
|||
ASSERT_NE(session.session, nullptr);
|
||||
}
|
||||
|
||||
TEST_F(TensorFlowSessionFromSavedModelGeneratorTest,
|
||||
CreateSessionFromSidePacketWithCorrectSignatureName) {
|
||||
generator_options_->clear_saved_model_path();
|
||||
PacketSet input_side_packets(
|
||||
tool::CreateTagMap({"STRING_SAVED_MODEL_PATH:saved_model_dir",
|
||||
"STRING_SIGNATURE_NAME:signature_name"})
|
||||
.value());
|
||||
input_side_packets.Tag(kStringSavedModelPathTag) =
|
||||
Adopt(new std::string(GetSavedModelDir()));
|
||||
input_side_packets.Tag(kStringSignatureNameTag) =
|
||||
Adopt(new std::string("serving_default"));
|
||||
PacketSet output_side_packets(
|
||||
tool::CreateTagMap({"SESSION:session"}).value());
|
||||
absl::Status run_status = tool::RunGenerateAndValidateTypes(
|
||||
"TensorFlowSessionFromSavedModelGenerator", extendable_options_,
|
||||
input_side_packets, &output_side_packets);
|
||||
MP_EXPECT_OK(run_status) << run_status.message();
|
||||
const TensorFlowSession& session =
|
||||
output_side_packets.Tag(kSessionTag).Get<TensorFlowSession>();
|
||||
// Session must be set.
|
||||
ASSERT_NE(session.session, nullptr);
|
||||
}
|
||||
|
||||
TEST_F(TensorFlowSessionFromSavedModelGeneratorTest,
|
||||
CreateSessionFromSidePacketWithWrongSignatureName) {
|
||||
generator_options_->clear_saved_model_path();
|
||||
PacketSet input_side_packets(
|
||||
tool::CreateTagMap({"STRING_SAVED_MODEL_PATH:saved_model_dir",
|
||||
"STRING_SIGNATURE_NAME:signature_name"})
|
||||
.value());
|
||||
input_side_packets.Tag(kStringSavedModelPathTag) =
|
||||
Adopt(new std::string(GetSavedModelDir()));
|
||||
input_side_packets.Tag(kStringSignatureNameTag) =
|
||||
Adopt(new std::string("wrong_signature_name"));
|
||||
PacketSet output_side_packets(
|
||||
tool::CreateTagMap({"SESSION:session"}).value());
|
||||
absl::Status run_status = tool::RunGenerateAndValidateTypes(
|
||||
"TensorFlowSessionFromSavedModelGenerator", extendable_options_,
|
||||
input_side_packets, &output_side_packets);
|
||||
EXPECT_THAT(run_status, StatusIs(absl::StatusCode::kNotFound));
|
||||
}
|
||||
|
||||
// Integration test. Verifies that TensorFlowInferenceCalculator correctly
|
||||
// consumes the Packet emitted by this factory.
|
||||
TEST_F(TensorFlowSessionFromSavedModelGeneratorTest,
|
||||
|
|
|
@ -21,6 +21,19 @@ package(
|
|||
|
||||
licenses(["notice"])
|
||||
|
||||
py_library(
|
||||
name = "text_classifier_import",
|
||||
srcs = ["__init__.py"],
|
||||
deps = [
|
||||
":dataset",
|
||||
":model_options",
|
||||
":model_spec",
|
||||
":text_classifier",
|
||||
":text_classifier_options",
|
||||
"//mediapipe/model_maker/python/core:hyperparameters",
|
||||
],
|
||||
)
|
||||
|
||||
py_library(
|
||||
name = "model_options",
|
||||
srcs = ["model_options.py"],
|
||||
|
@ -114,12 +127,7 @@ py_test(
|
|||
],
|
||||
tags = ["requires-net:external"],
|
||||
deps = [
|
||||
":dataset",
|
||||
":model_options",
|
||||
":model_spec",
|
||||
":text_classifier",
|
||||
":text_classifier_options",
|
||||
"//mediapipe/model_maker/python/core:hyperparameters",
|
||||
":text_classifier_import",
|
||||
"//mediapipe/tasks/python/test:test_utils",
|
||||
],
|
||||
)
|
||||
|
@ -128,11 +136,7 @@ py_library(
|
|||
name = "text_classifier_demo_lib",
|
||||
srcs = ["text_classifier_demo.py"],
|
||||
deps = [
|
||||
":dataset",
|
||||
":model_spec",
|
||||
":text_classifier",
|
||||
":text_classifier_options",
|
||||
"//mediapipe/model_maker/python/core:hyperparameters",
|
||||
":text_classifier_import",
|
||||
"//mediapipe/model_maker/python/core/utils:quantization",
|
||||
],
|
||||
)
|
||||
|
|
|
@ -11,3 +11,21 @@
|
|||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""MediaPipe Public Python API for Text Classifier."""
|
||||
|
||||
from mediapipe.model_maker.python.core import hyperparameters
|
||||
from mediapipe.model_maker.python.text.text_classifier import dataset
|
||||
from mediapipe.model_maker.python.text.text_classifier import model_options
|
||||
from mediapipe.model_maker.python.text.text_classifier import model_spec
|
||||
from mediapipe.model_maker.python.text.text_classifier import text_classifier
|
||||
from mediapipe.model_maker.python.text.text_classifier import text_classifier_options
|
||||
|
||||
HParams = hyperparameters.BaseHParams
|
||||
CSVParams = dataset.CSVParameters
|
||||
Dataset = dataset.Dataset
|
||||
AverageWordEmbeddingClassifierModelOptions = (
|
||||
model_options.AverageWordEmbeddingClassifierModelOptions)
|
||||
BertClassifierModelOptions = model_options.BertClassifierModelOptions
|
||||
SupportedModels = model_spec.SupportedModels
|
||||
TextClassifier = text_classifier.TextClassifier
|
||||
TextClassifierOptions = text_classifier_options.TextClassifierOptions
|
||||
|
|
|
@ -18,12 +18,12 @@ from typing import Union
|
|||
|
||||
from mediapipe.model_maker.python.text.core import bert_model_options
|
||||
|
||||
# BERT text classifier options inherited from BertModelOptions.
|
||||
BertClassifierOptions = bert_model_options.BertModelOptions
|
||||
# BERT text classifier model options inherited from BertModelOptions.
|
||||
BertClassifierModelOptions = bert_model_options.BertModelOptions
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class AverageWordEmbeddingClassifierOptions:
|
||||
class AverageWordEmbeddingClassifierModelOptions:
|
||||
"""Configurable model options for an Average Word Embedding classifier.
|
||||
|
||||
Attributes:
|
||||
|
@ -41,5 +41,5 @@ class AverageWordEmbeddingClassifierOptions:
|
|||
dropout_rate: float = 0.2
|
||||
|
||||
|
||||
TextClassifierModelOptions = Union[AverageWordEmbeddingClassifierOptions,
|
||||
BertClassifierOptions]
|
||||
TextClassifierModelOptions = Union[AverageWordEmbeddingClassifierModelOptions,
|
||||
BertClassifierModelOptions]
|
||||
|
|
|
@ -38,8 +38,8 @@ class AverageWordEmbeddingClassifierSpec:
|
|||
# `learning_rate` is unused for the average word embedding model
|
||||
hparams: hp.BaseHParams = hp.BaseHParams(
|
||||
epochs=10, batch_size=32, learning_rate=0)
|
||||
model_options: mo.AverageWordEmbeddingClassifierOptions = (
|
||||
mo.AverageWordEmbeddingClassifierOptions())
|
||||
model_options: mo.AverageWordEmbeddingClassifierModelOptions = (
|
||||
mo.AverageWordEmbeddingClassifierModelOptions())
|
||||
name: str = 'AverageWordEmbedding'
|
||||
|
||||
|
||||
|
|
|
@ -40,7 +40,7 @@ class ModelSpecTest(tf.test.TestCase):
|
|||
})
|
||||
self.assertEqual(
|
||||
model_spec_obj.model_options,
|
||||
classifier_model_options.BertClassifierOptions(
|
||||
classifier_model_options.BertClassifierModelOptions(
|
||||
seq_len=128, do_fine_tuning=True, dropout_rate=0.1))
|
||||
self.assertEqual(
|
||||
model_spec_obj.hparams,
|
||||
|
@ -57,7 +57,7 @@ class ModelSpecTest(tf.test.TestCase):
|
|||
self.assertEqual(model_spec_obj.name, 'AverageWordEmbedding')
|
||||
self.assertEqual(
|
||||
model_spec_obj.model_options,
|
||||
classifier_model_options.AverageWordEmbeddingClassifierOptions(
|
||||
classifier_model_options.AverageWordEmbeddingClassifierModelOptions(
|
||||
seq_len=256,
|
||||
wordvec_dim=16,
|
||||
do_lower_case=True,
|
||||
|
@ -77,7 +77,7 @@ class ModelSpecTest(tf.test.TestCase):
|
|||
|
||||
def test_custom_bert_spec(self):
|
||||
custom_bert_classifier_options = (
|
||||
classifier_model_options.BertClassifierOptions(
|
||||
classifier_model_options.BertClassifierModelOptions(
|
||||
seq_len=512, do_fine_tuning=False, dropout_rate=0.3))
|
||||
model_spec_obj = (
|
||||
ms.SupportedModels.MOBILEBERT_CLASSIFIER.value(
|
||||
|
@ -97,7 +97,7 @@ class ModelSpecTest(tf.test.TestCase):
|
|||
num_gpus=3,
|
||||
tpu='tpu/address')
|
||||
custom_average_word_embedding_model_options = (
|
||||
classifier_model_options.AverageWordEmbeddingClassifierOptions(
|
||||
classifier_model_options.AverageWordEmbeddingClassifierModelOptions(
|
||||
seq_len=512,
|
||||
wordvec_dim=32,
|
||||
do_lower_case=False,
|
||||
|
|
|
@ -51,12 +51,12 @@ def _validate(options: text_classifier_options.TextClassifierOptions):
|
|||
return
|
||||
|
||||
if (isinstance(options.model_options,
|
||||
mo.AverageWordEmbeddingClassifierOptions) and
|
||||
mo.AverageWordEmbeddingClassifierModelOptions) and
|
||||
(options.supported_model !=
|
||||
ms.SupportedModels.AVERAGE_WORD_EMBEDDING_CLASSIFIER)):
|
||||
raise ValueError("Expected AVERAGE_WORD_EMBEDDING_CLASSIFIER,"
|
||||
f" got {options.supported_model}")
|
||||
if (isinstance(options.model_options, mo.BertClassifierOptions) and
|
||||
if (isinstance(options.model_options, mo.BertClassifierModelOptions) and
|
||||
(options.supported_model != ms.SupportedModels.MOBILEBERT_CLASSIFIER)):
|
||||
raise ValueError(
|
||||
f"Expected MOBILEBERT_CLASSIFIER, got {options.supported_model}")
|
||||
|
@ -194,7 +194,7 @@ class _AverageWordEmbeddingClassifier(TextClassifier):
|
|||
_DELIM_REGEX_PATTERN = r"[^\w\']+"
|
||||
|
||||
def __init__(self, model_spec: ms.AverageWordEmbeddingClassifierSpec,
|
||||
model_options: mo.AverageWordEmbeddingClassifierOptions,
|
||||
model_options: mo.AverageWordEmbeddingClassifierModelOptions,
|
||||
hparams: hp.BaseHParams, label_names: Sequence[str]):
|
||||
super().__init__(model_spec, hparams, label_names)
|
||||
self._model_options = model_options
|
||||
|
@ -304,8 +304,8 @@ class _BertClassifier(TextClassifier):
|
|||
_INITIALIZER_RANGE = 0.02
|
||||
|
||||
def __init__(self, model_spec: ms.BertClassifierSpec,
|
||||
model_options: mo.BertClassifierOptions, hparams: hp.BaseHParams,
|
||||
label_names: Sequence[str]):
|
||||
model_options: mo.BertClassifierModelOptions,
|
||||
hparams: hp.BaseHParams, label_names: Sequence[str]):
|
||||
super().__init__(model_spec, hparams, label_names)
|
||||
self._model_options = model_options
|
||||
self._loss_function = tf.keras.losses.SparseCategoricalCrossentropy()
|
||||
|
|
|
@ -23,12 +23,8 @@ from absl import flags
|
|||
from absl import logging
|
||||
import tensorflow as tf
|
||||
|
||||
from mediapipe.model_maker.python.core import hyperparameters as hp
|
||||
from mediapipe.model_maker.python.core.utils import quantization
|
||||
from mediapipe.model_maker.python.text.text_classifier import dataset as text_ds
|
||||
from mediapipe.model_maker.python.text.text_classifier import model_spec as ms
|
||||
from mediapipe.model_maker.python.text.text_classifier import text_classifier
|
||||
from mediapipe.model_maker.python.text.text_classifier import text_classifier_options
|
||||
from mediapipe.model_maker.python.text import text_classifier
|
||||
|
||||
FLAGS = flags.FLAGS
|
||||
|
||||
|
@ -53,31 +49,34 @@ def download_demo_data():
|
|||
|
||||
def run(data_dir,
|
||||
export_dir=tempfile.mkdtemp(),
|
||||
supported_model=ms.SupportedModels.AVERAGE_WORD_EMBEDDING_CLASSIFIER):
|
||||
supported_model=(
|
||||
text_classifier.SupportedModels.AVERAGE_WORD_EMBEDDING_CLASSIFIER)):
|
||||
"""Runs demo."""
|
||||
|
||||
# Gets training data and validation data.
|
||||
csv_params = text_ds.CSVParameters(
|
||||
csv_params = text_classifier.CSVParams(
|
||||
text_column='sentence', label_column='label', delimiter='\t')
|
||||
train_data = text_ds.Dataset.from_csv(
|
||||
train_data = text_classifier.Dataset.from_csv(
|
||||
filename=os.path.join(os.path.join(data_dir, 'train.tsv')),
|
||||
csv_params=csv_params)
|
||||
validation_data = text_ds.Dataset.from_csv(
|
||||
validation_data = text_classifier.Dataset.from_csv(
|
||||
filename=os.path.join(os.path.join(data_dir, 'dev.tsv')),
|
||||
csv_params=csv_params)
|
||||
|
||||
quantization_config = None
|
||||
if supported_model == ms.SupportedModels.AVERAGE_WORD_EMBEDDING_CLASSIFIER:
|
||||
hparams = hp.BaseHParams(
|
||||
if (supported_model ==
|
||||
text_classifier.SupportedModels.AVERAGE_WORD_EMBEDDING_CLASSIFIER):
|
||||
hparams = text_classifier.HParams(
|
||||
epochs=10, batch_size=32, learning_rate=0, export_dir=export_dir)
|
||||
# Warning: This takes extremely long to run on CPU
|
||||
elif supported_model == ms.SupportedModels.MOBILEBERT_CLASSIFIER:
|
||||
elif (
|
||||
supported_model == text_classifier.SupportedModels.MOBILEBERT_CLASSIFIER):
|
||||
quantization_config = quantization.QuantizationConfig.for_dynamic()
|
||||
hparams = hp.BaseHParams(
|
||||
hparams = text_classifier.HParams(
|
||||
epochs=3, batch_size=48, learning_rate=3e-5, export_dir=export_dir)
|
||||
|
||||
# Fine-tunes the model.
|
||||
options = text_classifier_options.TextClassifierOptions(
|
||||
options = text_classifier.TextClassifierOptions(
|
||||
supported_model=supported_model, hparams=hparams)
|
||||
model = text_classifier.TextClassifier.create(train_data, validation_data,
|
||||
options)
|
||||
|
@ -96,9 +95,10 @@ def main(_):
|
|||
export_dir = os.path.expanduser(FLAGS.export_dir)
|
||||
|
||||
if FLAGS.supported_model == 'average_word_embedding':
|
||||
supported_model = ms.SupportedModels.AVERAGE_WORD_EMBEDDING_CLASSIFIER
|
||||
supported_model = (
|
||||
text_classifier.SupportedModels.AVERAGE_WORD_EMBEDDING_CLASSIFIER)
|
||||
elif FLAGS.supported_model == 'bert':
|
||||
supported_model = ms.SupportedModels.MOBILEBERT_CLASSIFIER
|
||||
supported_model = text_classifier.SupportedModels.MOBILEBERT_CLASSIFIER
|
||||
|
||||
run(data_dir, export_dir, supported_model)
|
||||
|
||||
|
|
|
@ -18,12 +18,7 @@ import os
|
|||
|
||||
import tensorflow as tf
|
||||
|
||||
from mediapipe.model_maker.python.core import hyperparameters as hp
|
||||
from mediapipe.model_maker.python.text.text_classifier import dataset
|
||||
from mediapipe.model_maker.python.text.text_classifier import model_options as mo
|
||||
from mediapipe.model_maker.python.text.text_classifier import model_spec as ms
|
||||
from mediapipe.model_maker.python.text.text_classifier import text_classifier
|
||||
from mediapipe.model_maker.python.text.text_classifier import text_classifier_options
|
||||
from mediapipe.model_maker.python.text import text_classifier
|
||||
from mediapipe.tasks.python.test import test_utils
|
||||
|
||||
|
||||
|
@ -43,18 +38,23 @@ class TextClassifierTest(tf.test.TestCase):
|
|||
writer.writeheader()
|
||||
for label, text in labels_and_text:
|
||||
writer.writerow({'text': text, 'label': label})
|
||||
csv_params = dataset.CSVParameters(text_column='text', label_column='label')
|
||||
all_data = dataset.Dataset.from_csv(
|
||||
csv_params = text_classifier.CSVParams(
|
||||
text_column='text', label_column='label')
|
||||
all_data = text_classifier.Dataset.from_csv(
|
||||
filename=csv_file, csv_params=csv_params)
|
||||
return all_data.split(0.5)
|
||||
|
||||
def test_create_and_train_average_word_embedding_model(self):
|
||||
train_data, validation_data = self._get_data()
|
||||
options = text_classifier_options.TextClassifierOptions(
|
||||
supported_model=ms.SupportedModels.AVERAGE_WORD_EMBEDDING_CLASSIFIER,
|
||||
hparams=hp.BaseHParams(epochs=1, batch_size=1, learning_rate=0))
|
||||
average_word_embedding_classifier = text_classifier.TextClassifier.create(
|
||||
train_data, validation_data, options)
|
||||
options = (
|
||||
text_classifier.TextClassifierOptions(
|
||||
supported_model=(text_classifier.SupportedModels
|
||||
.AVERAGE_WORD_EMBEDDING_CLASSIFIER),
|
||||
hparams=text_classifier.HParams(
|
||||
epochs=1, batch_size=1, learning_rate=0)))
|
||||
average_word_embedding_classifier = (
|
||||
text_classifier.TextClassifier.create(train_data, validation_data,
|
||||
options))
|
||||
|
||||
_, accuracy = average_word_embedding_classifier.evaluate(validation_data)
|
||||
self.assertGreaterEqual(accuracy, 0.0)
|
||||
|
@ -77,10 +77,11 @@ class TextClassifierTest(tf.test.TestCase):
|
|||
|
||||
def test_create_and_train_bert(self):
|
||||
train_data, validation_data = self._get_data()
|
||||
options = text_classifier_options.TextClassifierOptions(
|
||||
supported_model=ms.SupportedModels.MOBILEBERT_CLASSIFIER,
|
||||
model_options=mo.BertClassifierOptions(do_fine_tuning=False, seq_len=2),
|
||||
hparams=hp.BaseHParams(
|
||||
options = text_classifier.TextClassifierOptions(
|
||||
supported_model=text_classifier.SupportedModels.MOBILEBERT_CLASSIFIER,
|
||||
model_options=text_classifier.BertClassifierModelOptions(
|
||||
do_fine_tuning=False, seq_len=2),
|
||||
hparams=text_classifier.HParams(
|
||||
epochs=1,
|
||||
batch_size=1,
|
||||
learning_rate=3e-5,
|
||||
|
@ -94,12 +95,13 @@ class TextClassifierTest(tf.test.TestCase):
|
|||
|
||||
def test_label_mismatch(self):
|
||||
options = (
|
||||
text_classifier_options.TextClassifierOptions(
|
||||
supported_model=ms.SupportedModels.MOBILEBERT_CLASSIFIER))
|
||||
text_classifier.TextClassifierOptions(
|
||||
supported_model=(
|
||||
text_classifier.SupportedModels.MOBILEBERT_CLASSIFIER)))
|
||||
train_tf_dataset = tf.data.Dataset.from_tensor_slices([[0]])
|
||||
train_data = dataset.Dataset(train_tf_dataset, 1, ['foo'])
|
||||
train_data = text_classifier.Dataset(train_tf_dataset, 1, ['foo'])
|
||||
validation_tf_dataset = tf.data.Dataset.from_tensor_slices([[0]])
|
||||
validation_data = dataset.Dataset(validation_tf_dataset, 1, ['bar'])
|
||||
validation_data = text_classifier.Dataset(validation_tf_dataset, 1, ['bar'])
|
||||
with self.assertRaisesRegex(
|
||||
ValueError,
|
||||
'Training data label names .* not equal to validation data label names'
|
||||
|
@ -111,9 +113,11 @@ class TextClassifierTest(tf.test.TestCase):
|
|||
train_data, validation_data = self._get_data()
|
||||
|
||||
avg_options = (
|
||||
text_classifier_options.TextClassifierOptions(
|
||||
supported_model=ms.SupportedModels.MOBILEBERT_CLASSIFIER,
|
||||
model_options=mo.AverageWordEmbeddingClassifierOptions()))
|
||||
text_classifier.TextClassifierOptions(
|
||||
supported_model=(
|
||||
text_classifier.SupportedModels.MOBILEBERT_CLASSIFIER),
|
||||
model_options=(
|
||||
text_classifier.AverageWordEmbeddingClassifierModelOptions())))
|
||||
with self.assertRaisesRegex(
|
||||
ValueError, 'Expected AVERAGE_WORD_EMBEDDING_CLASSIFIER, got'
|
||||
' SupportedModels.MOBILEBERT_CLASSIFIER'):
|
||||
|
@ -121,10 +125,10 @@ class TextClassifierTest(tf.test.TestCase):
|
|||
avg_options)
|
||||
|
||||
bert_options = (
|
||||
text_classifier_options.TextClassifierOptions(
|
||||
supported_model=(
|
||||
ms.SupportedModels.AVERAGE_WORD_EMBEDDING_CLASSIFIER),
|
||||
model_options=mo.BertClassifierOptions()))
|
||||
text_classifier.TextClassifierOptions(
|
||||
supported_model=(text_classifier.SupportedModels
|
||||
.AVERAGE_WORD_EMBEDDING_CLASSIFIER),
|
||||
model_options=text_classifier.BertClassifierModelOptions()))
|
||||
with self.assertRaisesRegex(
|
||||
ValueError, 'Expected MOBILEBERT_CLASSIFIER, got'
|
||||
' SupportedModels.AVERAGE_WORD_EMBEDDING_CLASSIFIER'):
|
||||
|
|
|
@ -89,6 +89,7 @@ cc_library(
|
|||
deps = [
|
||||
"//mediapipe/tasks/cc/vision/gesture_recognizer:gesture_recognizer_graph",
|
||||
"//mediapipe/tasks/cc/vision/image_classifier:image_classifier_graph",
|
||||
"//mediapipe/tasks/cc/vision/image_embedder:image_embedder_graph",
|
||||
"//mediapipe/tasks/cc/vision/image_segmenter:image_segmenter_graph",
|
||||
"//mediapipe/tasks/cc/vision/object_detector:object_detector_graph",
|
||||
] + select({
|
||||
|
|
|
@ -85,6 +85,7 @@ cc_test(
|
|||
"@com_google_absl//absl/status:statusor",
|
||||
"@com_google_absl//absl/strings",
|
||||
"@com_google_absl//absl/strings:cord",
|
||||
"@com_google_sentencepiece//src:sentencepiece_processor",
|
||||
"@org_tensorflow//tensorflow/lite/core/shims:cc_shims_test_util",
|
||||
],
|
||||
)
|
||||
|
|
|
@ -82,6 +82,7 @@ cc_test(
|
|||
"@com_google_absl//absl/flags:flag",
|
||||
"@com_google_absl//absl/status",
|
||||
"@com_google_absl//absl/status:statusor",
|
||||
"@com_google_sentencepiece//src:sentencepiece_processor",
|
||||
"@org_tensorflow//tensorflow/lite/core/shims:cc_shims_test_util",
|
||||
],
|
||||
)
|
||||
|
|
|
@ -83,6 +83,7 @@ cc_test(
|
|||
":sentencepiece_tokenizer",
|
||||
"//mediapipe/framework/port:gtest_main",
|
||||
"//mediapipe/tasks/cc/core:utils",
|
||||
"@com_google_sentencepiece//src:sentencepiece_processor",
|
||||
],
|
||||
)
|
||||
|
||||
|
@ -132,6 +133,7 @@ cc_test(
|
|||
"@com_google_absl//absl/status:statusor",
|
||||
"@com_google_absl//absl/strings",
|
||||
"@com_google_absl//absl/strings:cord",
|
||||
"@com_google_sentencepiece//src:sentencepiece_processor",
|
||||
],
|
||||
)
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@
|
|||
"""Embeddings data class."""
|
||||
|
||||
import dataclasses
|
||||
from typing import Any, Optional, List
|
||||
from typing import Optional, List
|
||||
|
||||
import numpy as np
|
||||
from mediapipe.tasks.cc.components.containers.proto import embeddings_pb2
|
||||
|
@ -36,29 +36,11 @@ class FloatEmbedding:
|
|||
|
||||
values: np.ndarray
|
||||
|
||||
@doc_controls.do_not_generate_docs
|
||||
def to_pb2(self) -> _FloatEmbeddingProto:
|
||||
"""Generates a FloatEmbedding protobuf object."""
|
||||
return _FloatEmbeddingProto(values=self.values)
|
||||
|
||||
@classmethod
|
||||
@doc_controls.do_not_generate_docs
|
||||
def create_from_pb2(
|
||||
cls, pb2_obj: _FloatEmbeddingProto) -> 'FloatEmbedding':
|
||||
def create_from_pb2(cls, pb2_obj: _FloatEmbeddingProto) -> 'FloatEmbedding':
|
||||
"""Creates a `FloatEmbedding` object from the given protobuf object."""
|
||||
return FloatEmbedding(values=np.array(pb2_obj.value_float, dtype=float))
|
||||
|
||||
def __eq__(self, other: Any) -> bool:
|
||||
"""Checks if this object is equal to the given object.
|
||||
Args:
|
||||
other: The object to be compared with.
|
||||
Returns:
|
||||
True if the objects are equal.
|
||||
"""
|
||||
if not isinstance(other, FloatEmbedding):
|
||||
return False
|
||||
|
||||
return self.to_pb2().__eq__(other.to_pb2())
|
||||
return FloatEmbedding(values=np.array(pb2_obj.values, dtype=float))
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
|
@ -71,30 +53,13 @@ class QuantizedEmbedding:
|
|||
|
||||
values: np.ndarray
|
||||
|
||||
@doc_controls.do_not_generate_docs
|
||||
def to_pb2(self) -> _QuantizedEmbeddingProto:
|
||||
"""Generates a QuantizedEmbedding protobuf object."""
|
||||
return _QuantizedEmbeddingProto(values=self.values)
|
||||
|
||||
@classmethod
|
||||
@doc_controls.do_not_generate_docs
|
||||
def create_from_pb2(
|
||||
cls, pb2_obj: _QuantizedEmbeddingProto) -> 'QuantizedEmbedding':
|
||||
"""Creates a `QuantizedEmbedding` object from the given protobuf object."""
|
||||
return QuantizedEmbedding(
|
||||
values=np.array(bytearray(pb2_obj.value_string), dtype=np.uint8))
|
||||
|
||||
def __eq__(self, other: Any) -> bool:
|
||||
"""Checks if this object is equal to the given object.
|
||||
Args:
|
||||
other: The object to be compared with.
|
||||
Returns:
|
||||
True if the objects are equal.
|
||||
"""
|
||||
if not isinstance(other, QuantizedEmbedding):
|
||||
return False
|
||||
|
||||
return self.to_pb2().__eq__(other.to_pb2())
|
||||
values=np.array(bytearray(pb2_obj.values), dtype=np.uint8))
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
|
@ -113,58 +78,31 @@ class Embedding:
|
|||
head_index: Optional[int] = None
|
||||
head_name: Optional[str] = None
|
||||
|
||||
@doc_controls.do_not_generate_docs
|
||||
def to_pb2(self) -> _EmbeddingProto:
|
||||
"""Generates a Embedding protobuf object."""
|
||||
|
||||
if self.embedding.dtype == float:
|
||||
return _EmbeddingProto(float_embedding=self.embedding,
|
||||
head_index=self.head_index,
|
||||
head_name=self.head_name)
|
||||
|
||||
elif self.embedding.dtype == np.uint8:
|
||||
return _EmbeddingProto(quantized_embedding=bytes(self.embedding),
|
||||
head_index=self.head_index,
|
||||
head_name=self.head_name)
|
||||
|
||||
else:
|
||||
raise ValueError("Invalid dtype. Only float and np.uint8 are supported.")
|
||||
|
||||
@classmethod
|
||||
@doc_controls.do_not_generate_docs
|
||||
def create_from_pb2(
|
||||
cls, pb2_obj: _EmbeddingProto) -> 'Embedding':
|
||||
def create_from_pb2(cls, pb2_obj: _EmbeddingProto) -> 'Embedding':
|
||||
"""Creates a `Embedding` object from the given protobuf object."""
|
||||
|
||||
quantized_embedding = np.array(
|
||||
bytearray(pb2_obj.quantized_embedding.values))
|
||||
float_embedding = np.array(pb2_obj.float_embedding.values, dtype=float)
|
||||
|
||||
if len(quantized_embedding) == 0:
|
||||
return Embedding(embedding=float_embedding,
|
||||
head_index=pb2_obj.head_index,
|
||||
head_name=pb2_obj.head_name)
|
||||
if not quantized_embedding:
|
||||
return Embedding(
|
||||
embedding=float_embedding,
|
||||
head_index=pb2_obj.head_index,
|
||||
head_name=pb2_obj.head_name)
|
||||
else:
|
||||
return Embedding(embedding=quantized_embedding,
|
||||
head_index=pb2_obj.head_index,
|
||||
head_name=pb2_obj.head_name)
|
||||
|
||||
def __eq__(self, other: Any) -> bool:
|
||||
"""Checks if this object is equal to the given object.
|
||||
Args:
|
||||
other: The object to be compared with.
|
||||
Returns:
|
||||
True if the objects are equal.
|
||||
"""
|
||||
if not isinstance(other, Embedding):
|
||||
return False
|
||||
|
||||
return self.to_pb2().__eq__(other.to_pb2())
|
||||
return Embedding(
|
||||
embedding=quantized_embedding,
|
||||
head_index=pb2_obj.head_index,
|
||||
head_name=pb2_obj.head_name)
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class EmbeddingResult:
|
||||
"""Embedding results for a given embedder model.
|
||||
|
||||
Attributes:
|
||||
embeddings: A list of `Embedding` objects.
|
||||
timestamp_ms: The optional timestamp (in milliseconds) of the start of the
|
||||
|
@ -178,33 +116,10 @@ class EmbeddingResult:
|
|||
embeddings: List[Embedding]
|
||||
timestamp_ms: Optional[int] = None
|
||||
|
||||
@doc_controls.do_not_generate_docs
|
||||
def to_pb2(self) -> _EmbeddingResultProto:
|
||||
"""Generates a EmbeddingResult protobuf object."""
|
||||
return _EmbeddingResultProto(
|
||||
embeddings=[
|
||||
embedding.to_pb2() for embedding in self.embeddings
|
||||
])
|
||||
|
||||
@classmethod
|
||||
@doc_controls.do_not_generate_docs
|
||||
def create_from_pb2(
|
||||
cls, pb2_obj: _EmbeddingResultProto) -> 'EmbeddingResult':
|
||||
def create_from_pb2(cls, pb2_obj: _EmbeddingResultProto) -> 'EmbeddingResult':
|
||||
"""Creates a `EmbeddingResult` object from the given protobuf object."""
|
||||
return EmbeddingResult(
|
||||
embeddings=[
|
||||
Embedding.create_from_pb2(embedding)
|
||||
for embedding in pb2_obj.embeddings
|
||||
])
|
||||
|
||||
def __eq__(self, other: Any) -> bool:
|
||||
"""Checks if this object is equal to the given object.
|
||||
Args:
|
||||
other: The object to be compared with.
|
||||
Returns:
|
||||
True if the objects are equal.
|
||||
"""
|
||||
if not isinstance(other, EmbeddingResult):
|
||||
return False
|
||||
|
||||
return self.to_pb2().__eq__(other.to_pb2())
|
||||
return EmbeddingResult(embeddings=[
|
||||
Embedding.create_from_pb2(embedding) for embedding in pb2_obj.embeddings
|
||||
])
|
||||
|
|
|
@ -44,16 +44,14 @@ class EmbedderOptions:
|
|||
def to_pb2(self) -> _EmbedderOptionsProto:
|
||||
"""Generates a EmbedderOptions protobuf object."""
|
||||
return _EmbedderOptionsProto(
|
||||
l2_normalize=self.l2_normalize,
|
||||
quantize=self.quantize)
|
||||
l2_normalize=self.l2_normalize, quantize=self.quantize)
|
||||
|
||||
@classmethod
|
||||
@doc_controls.do_not_generate_docs
|
||||
def create_from_pb2(cls, pb2_obj: _EmbedderOptionsProto) -> 'EmbedderOptions':
|
||||
"""Creates a `EmbedderOptions` object from the given protobuf object."""
|
||||
return EmbedderOptions(
|
||||
l2_normalize=pb2_obj.l2_normalize,
|
||||
quantize=pb2_obj.quantize)
|
||||
l2_normalize=pb2_obj.l2_normalize, quantize=pb2_obj.quantize)
|
||||
|
||||
def __eq__(self, other: Any) -> bool:
|
||||
"""Checks if this object is equal to the given object.
|
||||
|
|
|
@ -14,6 +14,8 @@
|
|||
|
||||
# Placeholder for internal Python strict library compatibility macro.
|
||||
|
||||
# Placeholder for internal Python strict library and test compatibility macro.
|
||||
|
||||
package(default_visibility = ["//mediapipe/tasks:internal"])
|
||||
|
||||
licenses(["notice"])
|
||||
|
|
|
@ -23,6 +23,8 @@ _EmbedderOptions = embedder_options.EmbedderOptions
|
|||
|
||||
|
||||
def _compute_cosine_similarity(u, v):
|
||||
"""Computes cosine similarity between two embeddings."""
|
||||
|
||||
if len(u.embedding) <= 0:
|
||||
raise ValueError("Cannot compute cosing similarity on empty embeddings.")
|
||||
|
||||
|
@ -38,6 +40,7 @@ def _compute_cosine_similarity(u, v):
|
|||
|
||||
def cosine_similarity(u: _Embedding, v: _Embedding) -> float:
|
||||
"""Utility function to compute cosine similarity between two embedding.
|
||||
|
||||
May return an InvalidArgumentError if e.g. the feature vectors are of
|
||||
different types (quantized vs. float), have different sizes, or have an
|
||||
L2-norm of 0.
|
||||
|
@ -45,6 +48,9 @@ def cosine_similarity(u: _Embedding, v: _Embedding) -> float:
|
|||
Args:
|
||||
u: An embedding.
|
||||
v: An embedding.
|
||||
|
||||
Returns:
|
||||
Cosine similarity value.
|
||||
"""
|
||||
if len(u.embedding) != len(v.embedding):
|
||||
raise ValueError(f"Cannot compute cosine similarity between embeddings "
|
||||
|
|
|
@ -125,7 +125,7 @@ class ModelFileType(enum.Enum):
|
|||
FILE_NAME = 2
|
||||
|
||||
|
||||
class ImageClassifierTest(parameterized.TestCase):
|
||||
class TextClassifierTest(parameterized.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
|
|
|
@ -58,6 +58,26 @@ py_test(
|
|||
],
|
||||
)
|
||||
|
||||
py_test(
|
||||
name = "image_embedder_test",
|
||||
srcs = ["image_embedder_test.py"],
|
||||
data = [
|
||||
"//mediapipe/tasks/testdata/vision:test_images",
|
||||
"//mediapipe/tasks/testdata/vision:test_models",
|
||||
],
|
||||
deps = [
|
||||
"//mediapipe/python:_framework_bindings",
|
||||
"//mediapipe/tasks/python/components/containers:embedding_result",
|
||||
"//mediapipe/tasks/python/components/containers:rect",
|
||||
"//mediapipe/tasks/python/components/processors:embedder_options",
|
||||
"//mediapipe/tasks/python/core:base_options",
|
||||
"//mediapipe/tasks/python/test:test_utils",
|
||||
"//mediapipe/tasks/python/vision:image_embedder",
|
||||
"//mediapipe/tasks/python/vision/core:image_processing_options",
|
||||
"//mediapipe/tasks/python/vision/core:vision_task_running_mode",
|
||||
],
|
||||
)
|
||||
|
||||
py_test(
|
||||
name = "image_segmenter_test",
|
||||
srcs = ["image_segmenter_test.py"],
|
||||
|
|
402
mediapipe/tasks/python/test/vision/image_embedder_test.py
Normal file
402
mediapipe/tasks/python/test/vision/image_embedder_test.py
Normal file
|
@ -0,0 +1,402 @@
|
|||
# Copyright 2022 The MediaPipe Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""Tests for image embedder."""
|
||||
|
||||
import enum
|
||||
import os
|
||||
from unittest import mock
|
||||
|
||||
from absl.testing import absltest
|
||||
from absl.testing import parameterized
|
||||
import numpy as np
|
||||
|
||||
from mediapipe.python._framework_bindings import image as image_module
|
||||
from mediapipe.tasks.python.components.containers import embedding_result as embedding_result_module
|
||||
from mediapipe.tasks.python.components.containers import rect
|
||||
from mediapipe.tasks.python.components.processors import embedder_options as embedder_options_module
|
||||
from mediapipe.tasks.python.core import base_options as base_options_module
|
||||
from mediapipe.tasks.python.test import test_utils
|
||||
from mediapipe.tasks.python.vision import image_embedder
|
||||
from mediapipe.tasks.python.vision.core import image_processing_options as image_processing_options_module
|
||||
from mediapipe.tasks.python.vision.core import vision_task_running_mode as running_mode_module
|
||||
|
||||
ImageEmbedderResult = embedding_result_module.EmbeddingResult
|
||||
_Rect = rect.Rect
|
||||
_BaseOptions = base_options_module.BaseOptions
|
||||
_EmbedderOptions = embedder_options_module.EmbedderOptions
|
||||
_FloatEmbedding = embedding_result_module.FloatEmbedding
|
||||
_QuantizedEmbedding = embedding_result_module.QuantizedEmbedding
|
||||
_Embedding = embedding_result_module.Embedding
|
||||
_Image = image_module.Image
|
||||
_ImageEmbedder = image_embedder.ImageEmbedder
|
||||
_ImageEmbedderOptions = image_embedder.ImageEmbedderOptions
|
||||
_RUNNING_MODE = running_mode_module.VisionTaskRunningMode
|
||||
_ImageProcessingOptions = image_processing_options_module.ImageProcessingOptions
|
||||
|
||||
_MODEL_FILE = 'mobilenet_v3_small_100_224_embedder.tflite'
|
||||
_BURGER_IMAGE_FILE = 'burger.jpg'
|
||||
_BURGER_CROPPED_IMAGE_FILE = 'burger_crop.jpg'
|
||||
_TEST_DATA_DIR = 'mediapipe/tasks/testdata/vision'
|
||||
# Tolerance for embedding vector coordinate values.
|
||||
_EPSILON = 1e-4
|
||||
# Tolerance for cosine similarity evaluation.
|
||||
_SIMILARITY_TOLERANCE = 1e-6
|
||||
|
||||
|
||||
class ModelFileType(enum.Enum):
|
||||
FILE_CONTENT = 1
|
||||
FILE_NAME = 2
|
||||
|
||||
|
||||
class ImageEmbedderTest(parameterized.TestCase):
|
||||
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
self.test_image = _Image.create_from_file(
|
||||
test_utils.get_test_data_path(
|
||||
os.path.join(_TEST_DATA_DIR, _BURGER_IMAGE_FILE)))
|
||||
self.test_cropped_image = _Image.create_from_file(
|
||||
test_utils.get_test_data_path(
|
||||
os.path.join(_TEST_DATA_DIR, _BURGER_CROPPED_IMAGE_FILE)))
|
||||
self.model_path = test_utils.get_test_data_path(
|
||||
os.path.join(_TEST_DATA_DIR, _MODEL_FILE))
|
||||
|
||||
def test_create_from_file_succeeds_with_valid_model_path(self):
|
||||
# Creates with default option and valid model file successfully.
|
||||
with _ImageEmbedder.create_from_model_path(self.model_path) as embedder:
|
||||
self.assertIsInstance(embedder, _ImageEmbedder)
|
||||
|
||||
def test_create_from_options_succeeds_with_valid_model_path(self):
|
||||
# Creates with options containing model file successfully.
|
||||
base_options = _BaseOptions(model_asset_path=self.model_path)
|
||||
options = _ImageEmbedderOptions(base_options=base_options)
|
||||
with _ImageEmbedder.create_from_options(options) as embedder:
|
||||
self.assertIsInstance(embedder, _ImageEmbedder)
|
||||
|
||||
def test_create_from_options_fails_with_invalid_model_path(self):
|
||||
with self.assertRaisesRegex(
|
||||
RuntimeError, 'Unable to open file at /path/to/invalid/model.tflite'):
|
||||
base_options = _BaseOptions(
|
||||
model_asset_path='/path/to/invalid/model.tflite')
|
||||
options = _ImageEmbedderOptions(base_options=base_options)
|
||||
_ImageEmbedder.create_from_options(options)
|
||||
|
||||
def test_create_from_options_succeeds_with_valid_model_content(self):
|
||||
# Creates with options containing model content successfully.
|
||||
with open(self.model_path, 'rb') as f:
|
||||
base_options = _BaseOptions(model_asset_buffer=f.read())
|
||||
options = _ImageEmbedderOptions(base_options=base_options)
|
||||
embedder = _ImageEmbedder.create_from_options(options)
|
||||
self.assertIsInstance(embedder, _ImageEmbedder)
|
||||
|
||||
def _check_embedding_value(self, result, expected_first_value):
|
||||
# Check embedding first value.
|
||||
self.assertAlmostEqual(
|
||||
result.embeddings[0].embedding[0], expected_first_value, delta=_EPSILON)
|
||||
|
||||
def _check_embedding_size(self, result, quantize, expected_embedding_size):
|
||||
# Check embedding size.
|
||||
self.assertLen(result.embeddings, 1)
|
||||
embedding_result = result.embeddings[0]
|
||||
self.assertLen(embedding_result.embedding, expected_embedding_size)
|
||||
if quantize:
|
||||
self.assertEqual(embedding_result.embedding.dtype, np.uint8)
|
||||
else:
|
||||
self.assertEqual(embedding_result.embedding.dtype, float)
|
||||
|
||||
def _check_cosine_similarity(self, result0, result1, expected_similarity):
|
||||
# Checks cosine similarity.
|
||||
similarity = _ImageEmbedder.cosine_similarity(result0.embeddings[0],
|
||||
result1.embeddings[0])
|
||||
self.assertAlmostEqual(
|
||||
similarity, expected_similarity, delta=_SIMILARITY_TOLERANCE)
|
||||
|
||||
@parameterized.parameters(
|
||||
(False, False, False, ModelFileType.FILE_NAME, 0.925519, 1024,
|
||||
(-0.2101883, -0.193027)),
|
||||
(True, False, False, ModelFileType.FILE_NAME, 0.925519, 1024,
|
||||
(-0.0142344, -0.0131606)),
|
||||
# (False, True, False, ModelFileType.FILE_NAME,
|
||||
# 0.926791, 1024, (229, 231)),
|
||||
(False, False, True, ModelFileType.FILE_CONTENT, 0.999931, 1024,
|
||||
(-0.195062, -0.193027)))
|
||||
def test_embed(self, l2_normalize, quantize, with_roi, model_file_type,
|
||||
expected_similarity, expected_size, expected_first_values):
|
||||
# Creates embedder.
|
||||
if model_file_type is ModelFileType.FILE_NAME:
|
||||
base_options = _BaseOptions(model_asset_path=self.model_path)
|
||||
elif model_file_type is ModelFileType.FILE_CONTENT:
|
||||
with open(self.model_path, 'rb') as f:
|
||||
model_content = f.read()
|
||||
base_options = _BaseOptions(model_asset_buffer=model_content)
|
||||
else:
|
||||
# Should never happen
|
||||
raise ValueError('model_file_type is invalid.')
|
||||
|
||||
embedder_options = _EmbedderOptions(
|
||||
l2_normalize=l2_normalize, quantize=quantize)
|
||||
options = _ImageEmbedderOptions(
|
||||
base_options=base_options, embedder_options=embedder_options)
|
||||
embedder = _ImageEmbedder.create_from_options(options)
|
||||
|
||||
image_processing_options = None
|
||||
if with_roi:
|
||||
# Region-of-interest in "burger.jpg" corresponding to "burger_crop.jpg".
|
||||
roi = _Rect(left=0, top=0, right=0.833333, bottom=1)
|
||||
image_processing_options = _ImageProcessingOptions(roi)
|
||||
|
||||
# Extracts both embeddings.
|
||||
image_result = embedder.embed(self.test_image, image_processing_options)
|
||||
crop_result = embedder.embed(self.test_cropped_image)
|
||||
|
||||
# Checks embeddings and cosine similarity.
|
||||
expected_result0_value, expected_result1_value = expected_first_values
|
||||
self._check_embedding_size(image_result, quantize, expected_size)
|
||||
self._check_embedding_size(crop_result, quantize, expected_size)
|
||||
self._check_embedding_value(image_result, expected_result0_value)
|
||||
self._check_embedding_value(crop_result, expected_result1_value)
|
||||
self._check_cosine_similarity(image_result, crop_result,
|
||||
expected_similarity)
|
||||
# Closes the embedder explicitly when the embedder is not used in
|
||||
# a context.
|
||||
embedder.close()
|
||||
|
||||
@parameterized.parameters(
|
||||
(False, False, ModelFileType.FILE_NAME, 0.925519),
|
||||
(False, False, ModelFileType.FILE_CONTENT, 0.925519))
|
||||
def test_embed_in_context(self, l2_normalize, quantize, model_file_type,
|
||||
expected_similarity):
|
||||
# Creates embedder.
|
||||
if model_file_type is ModelFileType.FILE_NAME:
|
||||
base_options = _BaseOptions(model_asset_path=self.model_path)
|
||||
elif model_file_type is ModelFileType.FILE_CONTENT:
|
||||
with open(self.model_path, 'rb') as f:
|
||||
model_content = f.read()
|
||||
base_options = _BaseOptions(model_asset_buffer=model_content)
|
||||
else:
|
||||
# Should never happen
|
||||
raise ValueError('model_file_type is invalid.')
|
||||
|
||||
embedder_options = _EmbedderOptions(
|
||||
l2_normalize=l2_normalize, quantize=quantize)
|
||||
options = _ImageEmbedderOptions(
|
||||
base_options=base_options, embedder_options=embedder_options)
|
||||
|
||||
with _ImageEmbedder.create_from_options(options) as embedder:
|
||||
# Extracts both embeddings.
|
||||
image_result = embedder.embed(self.test_image)
|
||||
crop_result = embedder.embed(self.test_cropped_image)
|
||||
|
||||
# Checks cosine similarity.
|
||||
self._check_cosine_similarity(image_result, crop_result,
|
||||
expected_similarity)
|
||||
|
||||
def test_missing_result_callback(self):
|
||||
options = _ImageEmbedderOptions(
|
||||
base_options=_BaseOptions(model_asset_path=self.model_path),
|
||||
running_mode=_RUNNING_MODE.LIVE_STREAM)
|
||||
with self.assertRaisesRegex(ValueError,
|
||||
r'result callback must be provided'):
|
||||
with _ImageEmbedder.create_from_options(options) as unused_embedder:
|
||||
pass
|
||||
|
||||
@parameterized.parameters((_RUNNING_MODE.IMAGE), (_RUNNING_MODE.VIDEO))
|
||||
def test_illegal_result_callback(self, running_mode):
|
||||
options = _ImageEmbedderOptions(
|
||||
base_options=_BaseOptions(model_asset_path=self.model_path),
|
||||
running_mode=running_mode,
|
||||
result_callback=mock.MagicMock())
|
||||
with self.assertRaisesRegex(ValueError,
|
||||
r'result callback should not be provided'):
|
||||
with _ImageEmbedder.create_from_options(options) as unused_embedder:
|
||||
pass
|
||||
|
||||
def test_calling_embed_for_video_in_image_mode(self):
|
||||
options = _ImageEmbedderOptions(
|
||||
base_options=_BaseOptions(model_asset_path=self.model_path),
|
||||
running_mode=_RUNNING_MODE.IMAGE)
|
||||
with _ImageEmbedder.create_from_options(options) as embedder:
|
||||
with self.assertRaisesRegex(ValueError,
|
||||
r'not initialized with the video mode'):
|
||||
embedder.embed_for_video(self.test_image, 0)
|
||||
|
||||
def test_calling_embed_async_in_image_mode(self):
|
||||
options = _ImageEmbedderOptions(
|
||||
base_options=_BaseOptions(model_asset_path=self.model_path),
|
||||
running_mode=_RUNNING_MODE.IMAGE)
|
||||
with _ImageEmbedder.create_from_options(options) as embedder:
|
||||
with self.assertRaisesRegex(ValueError,
|
||||
r'not initialized with the live stream mode'):
|
||||
embedder.embed_async(self.test_image, 0)
|
||||
|
||||
def test_calling_embed_in_video_mode(self):
|
||||
options = _ImageEmbedderOptions(
|
||||
base_options=_BaseOptions(model_asset_path=self.model_path),
|
||||
running_mode=_RUNNING_MODE.VIDEO)
|
||||
with _ImageEmbedder.create_from_options(options) as embedder:
|
||||
with self.assertRaisesRegex(ValueError,
|
||||
r'not initialized with the image mode'):
|
||||
embedder.embed(self.test_image)
|
||||
|
||||
def test_calling_embed_async_in_video_mode(self):
|
||||
options = _ImageEmbedderOptions(
|
||||
base_options=_BaseOptions(model_asset_path=self.model_path),
|
||||
running_mode=_RUNNING_MODE.VIDEO)
|
||||
with _ImageEmbedder.create_from_options(options) as embedder:
|
||||
with self.assertRaisesRegex(ValueError,
|
||||
r'not initialized with the live stream mode'):
|
||||
embedder.embed_async(self.test_image, 0)
|
||||
|
||||
def test_embed_for_video_with_out_of_order_timestamp(self):
|
||||
options = _ImageEmbedderOptions(
|
||||
base_options=_BaseOptions(model_asset_path=self.model_path),
|
||||
running_mode=_RUNNING_MODE.VIDEO)
|
||||
with _ImageEmbedder.create_from_options(options) as embedder:
|
||||
unused_result = embedder.embed_for_video(self.test_image, 1)
|
||||
with self.assertRaisesRegex(
|
||||
ValueError, r'Input timestamp must be monotonically increasing'):
|
||||
embedder.embed_for_video(self.test_image, 0)
|
||||
|
||||
def test_embed_for_video(self):
|
||||
options = _ImageEmbedderOptions(
|
||||
base_options=_BaseOptions(model_asset_path=self.model_path),
|
||||
running_mode=_RUNNING_MODE.VIDEO)
|
||||
with _ImageEmbedder.create_from_options(options) as embedder0, \
|
||||
_ImageEmbedder.create_from_options(options) as embedder1:
|
||||
for timestamp in range(0, 300, 30):
|
||||
# Extracts both embeddings.
|
||||
image_result = embedder0.embed_for_video(self.test_image, timestamp)
|
||||
crop_result = embedder1.embed_for_video(self.test_cropped_image,
|
||||
timestamp)
|
||||
# Checks cosine similarity.
|
||||
self._check_cosine_similarity(
|
||||
image_result, crop_result, expected_similarity=0.925519)
|
||||
|
||||
def test_embed_for_video_succeeds_with_region_of_interest(self):
|
||||
options = _ImageEmbedderOptions(
|
||||
base_options=_BaseOptions(model_asset_path=self.model_path),
|
||||
running_mode=_RUNNING_MODE.VIDEO)
|
||||
with _ImageEmbedder.create_from_options(options) as embedder0, \
|
||||
_ImageEmbedder.create_from_options(options) as embedder1:
|
||||
# Region-of-interest in "burger.jpg" corresponding to "burger_crop.jpg".
|
||||
roi = _Rect(left=0, top=0, right=0.833333, bottom=1)
|
||||
image_processing_options = _ImageProcessingOptions(roi)
|
||||
|
||||
for timestamp in range(0, 300, 30):
|
||||
# Extracts both embeddings.
|
||||
image_result = embedder0.embed_for_video(self.test_image, timestamp,
|
||||
image_processing_options)
|
||||
crop_result = embedder1.embed_for_video(self.test_cropped_image,
|
||||
timestamp)
|
||||
|
||||
# Checks cosine similarity.
|
||||
self._check_cosine_similarity(
|
||||
image_result, crop_result, expected_similarity=0.999931)
|
||||
|
||||
def test_calling_embed_in_live_stream_mode(self):
|
||||
options = _ImageEmbedderOptions(
|
||||
base_options=_BaseOptions(model_asset_path=self.model_path),
|
||||
running_mode=_RUNNING_MODE.LIVE_STREAM,
|
||||
result_callback=mock.MagicMock())
|
||||
with _ImageEmbedder.create_from_options(options) as embedder:
|
||||
with self.assertRaisesRegex(ValueError,
|
||||
r'not initialized with the image mode'):
|
||||
embedder.embed(self.test_image)
|
||||
|
||||
def test_calling_embed_for_video_in_live_stream_mode(self):
|
||||
options = _ImageEmbedderOptions(
|
||||
base_options=_BaseOptions(model_asset_path=self.model_path),
|
||||
running_mode=_RUNNING_MODE.LIVE_STREAM,
|
||||
result_callback=mock.MagicMock())
|
||||
with _ImageEmbedder.create_from_options(options) as embedder:
|
||||
with self.assertRaisesRegex(ValueError,
|
||||
r'not initialized with the video mode'):
|
||||
embedder.embed_for_video(self.test_image, 0)
|
||||
|
||||
def test_embed_async_calls_with_illegal_timestamp(self):
|
||||
options = _ImageEmbedderOptions(
|
||||
base_options=_BaseOptions(model_asset_path=self.model_path),
|
||||
running_mode=_RUNNING_MODE.LIVE_STREAM,
|
||||
result_callback=mock.MagicMock())
|
||||
with _ImageEmbedder.create_from_options(options) as embedder:
|
||||
embedder.embed_async(self.test_image, 100)
|
||||
with self.assertRaisesRegex(
|
||||
ValueError, r'Input timestamp must be monotonically increasing'):
|
||||
embedder.embed_async(self.test_image, 0)
|
||||
|
||||
def test_embed_async_calls(self):
|
||||
# Get the embedding result for the cropped image.
|
||||
options = _ImageEmbedderOptions(
|
||||
base_options=_BaseOptions(model_asset_path=self.model_path),
|
||||
running_mode=_RUNNING_MODE.IMAGE)
|
||||
with _ImageEmbedder.create_from_options(options) as embedder:
|
||||
crop_result = embedder.embed(self.test_cropped_image)
|
||||
|
||||
observed_timestamp_ms = -1
|
||||
|
||||
def check_result(result: ImageEmbedderResult, output_image: _Image,
|
||||
timestamp_ms: int):
|
||||
# Checks cosine similarity.
|
||||
self._check_cosine_similarity(
|
||||
result, crop_result, expected_similarity=0.925519)
|
||||
self.assertTrue(
|
||||
np.array_equal(output_image.numpy_view(),
|
||||
self.test_image.numpy_view()))
|
||||
self.assertLess(observed_timestamp_ms, timestamp_ms)
|
||||
self.observed_timestamp_ms = timestamp_ms
|
||||
|
||||
options = _ImageEmbedderOptions(
|
||||
base_options=_BaseOptions(model_asset_path=self.model_path),
|
||||
running_mode=_RUNNING_MODE.LIVE_STREAM,
|
||||
result_callback=check_result)
|
||||
with _ImageEmbedder.create_from_options(options) as embedder:
|
||||
for timestamp in range(0, 300, 30):
|
||||
embedder.embed_async(self.test_image, timestamp)
|
||||
|
||||
def test_embed_async_succeeds_with_region_of_interest(self):
|
||||
# Get the embedding result for the cropped image.
|
||||
options = _ImageEmbedderOptions(
|
||||
base_options=_BaseOptions(model_asset_path=self.model_path),
|
||||
running_mode=_RUNNING_MODE.IMAGE)
|
||||
with _ImageEmbedder.create_from_options(options) as embedder:
|
||||
crop_result = embedder.embed(self.test_cropped_image)
|
||||
|
||||
# Region-of-interest in "burger.jpg" corresponding to "burger_crop.jpg".
|
||||
roi = _Rect(left=0, top=0, right=0.833333, bottom=1)
|
||||
image_processing_options = _ImageProcessingOptions(roi)
|
||||
observed_timestamp_ms = -1
|
||||
|
||||
def check_result(result: ImageEmbedderResult, output_image: _Image,
|
||||
timestamp_ms: int):
|
||||
# Checks cosine similarity.
|
||||
self._check_cosine_similarity(
|
||||
result, crop_result, expected_similarity=0.999931)
|
||||
self.assertTrue(
|
||||
np.array_equal(output_image.numpy_view(),
|
||||
self.test_image.numpy_view()))
|
||||
self.assertLess(observed_timestamp_ms, timestamp_ms)
|
||||
self.observed_timestamp_ms = timestamp_ms
|
||||
|
||||
options = _ImageEmbedderOptions(
|
||||
base_options=_BaseOptions(model_asset_path=self.model_path),
|
||||
running_mode=_RUNNING_MODE.LIVE_STREAM,
|
||||
result_callback=check_result)
|
||||
with _ImageEmbedder.create_from_options(options) as embedder:
|
||||
for timestamp in range(0, 300, 30):
|
||||
embedder.embed_async(self.test_image, timestamp,
|
||||
image_processing_options)
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
absltest.main()
|
|
@ -79,6 +79,29 @@ py_library(
|
|||
],
|
||||
)
|
||||
|
||||
py_library(
|
||||
name = "image_embedder",
|
||||
srcs = [
|
||||
"image_embedder.py",
|
||||
],
|
||||
deps = [
|
||||
"//mediapipe/python:_framework_bindings",
|
||||
"//mediapipe/python:packet_creator",
|
||||
"//mediapipe/python:packet_getter",
|
||||
"//mediapipe/tasks/cc/components/containers/proto:embeddings_py_pb2",
|
||||
"//mediapipe/tasks/cc/vision/image_embedder/proto:image_embedder_graph_options_py_pb2",
|
||||
"//mediapipe/tasks/python/components/containers:embedding_result",
|
||||
"//mediapipe/tasks/python/components/processors:embedder_options",
|
||||
"//mediapipe/tasks/python/components/utils:cosine_similarity",
|
||||
"//mediapipe/tasks/python/core:base_options",
|
||||
"//mediapipe/tasks/python/core:optional_dependencies",
|
||||
"//mediapipe/tasks/python/core:task_info",
|
||||
"//mediapipe/tasks/python/vision/core:base_vision_task_api",
|
||||
"//mediapipe/tasks/python/vision/core:image_processing_options",
|
||||
"//mediapipe/tasks/python/vision/core:vision_task_running_mode",
|
||||
],
|
||||
)
|
||||
|
||||
py_library(
|
||||
name = "gesture_recognizer",
|
||||
srcs = [
|
||||
|
|
|
@ -18,6 +18,7 @@ import mediapipe.tasks.python.vision.core
|
|||
import mediapipe.tasks.python.vision.gesture_recognizer
|
||||
import mediapipe.tasks.python.vision.hand_landmarker
|
||||
import mediapipe.tasks.python.vision.image_classifier
|
||||
import mediapipe.tasks.python.vision.image_embedder
|
||||
import mediapipe.tasks.python.vision.image_segmenter
|
||||
import mediapipe.tasks.python.vision.object_detector
|
||||
|
||||
|
@ -29,6 +30,10 @@ HandLandmarkerOptions = hand_landmarker.HandLandmarkerOptions
|
|||
HandLandmarkerResult = hand_landmarker.HandLandmarkerResult
|
||||
ImageClassifier = image_classifier.ImageClassifier
|
||||
ImageClassifierOptions = image_classifier.ImageClassifierOptions
|
||||
ImageClassifierResult = image_classifier.ImageClassifierResult
|
||||
ImageEmbedder = image_embedder.ImageEmbedder
|
||||
ImageEmbedderOptions = image_embedder.ImageEmbedderOptions
|
||||
ImageEmbedderResult = image_embedder.ImageEmbedderResult
|
||||
ImageSegmenter = image_segmenter.ImageSegmenter
|
||||
ImageSegmenterOptions = image_segmenter.ImageSegmenterOptions
|
||||
ObjectDetector = object_detector.ObjectDetector
|
||||
|
@ -40,6 +45,7 @@ del core
|
|||
del gesture_recognizer
|
||||
del hand_landmarker
|
||||
del image_classifier
|
||||
del image_embedder
|
||||
del image_segmenter
|
||||
del object_detector
|
||||
del mediapipe
|
||||
|
|
309
mediapipe/tasks/python/vision/image_embedder.py
Normal file
309
mediapipe/tasks/python/vision/image_embedder.py
Normal file
|
@ -0,0 +1,309 @@
|
|||
# Copyright 2022 The MediaPipe Authors. All Rights Reserved.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""MediaPipe image embedder task."""
|
||||
|
||||
import dataclasses
|
||||
from typing import Callable, Mapping, Optional
|
||||
|
||||
from mediapipe.python import packet_creator
|
||||
from mediapipe.python import packet_getter
|
||||
from mediapipe.python._framework_bindings import image as image_module
|
||||
from mediapipe.python._framework_bindings import packet as packet_module
|
||||
from mediapipe.tasks.cc.components.containers.proto import embeddings_pb2
|
||||
from mediapipe.tasks.cc.vision.image_embedder.proto import image_embedder_graph_options_pb2
|
||||
from mediapipe.tasks.python.components.containers import embedding_result as embedding_result_module
|
||||
from mediapipe.tasks.python.components.processors import embedder_options
|
||||
from mediapipe.tasks.python.components.utils import cosine_similarity
|
||||
from mediapipe.tasks.python.core import base_options as base_options_module
|
||||
from mediapipe.tasks.python.core import task_info as task_info_module
|
||||
from mediapipe.tasks.python.core.optional_dependencies import doc_controls
|
||||
from mediapipe.tasks.python.vision.core import base_vision_task_api
|
||||
from mediapipe.tasks.python.vision.core import image_processing_options as image_processing_options_module
|
||||
from mediapipe.tasks.python.vision.core import vision_task_running_mode as running_mode_module
|
||||
|
||||
ImageEmbedderResult = embedding_result_module.EmbeddingResult
|
||||
_BaseOptions = base_options_module.BaseOptions
|
||||
_ImageEmbedderGraphOptionsProto = image_embedder_graph_options_pb2.ImageEmbedderGraphOptions
|
||||
_EmbedderOptions = embedder_options.EmbedderOptions
|
||||
_RunningMode = running_mode_module.VisionTaskRunningMode
|
||||
_TaskInfo = task_info_module.TaskInfo
|
||||
_ImageProcessingOptions = image_processing_options_module.ImageProcessingOptions
|
||||
|
||||
_EMBEDDINGS_OUT_STREAM_NAME = 'embeddings_out'
|
||||
_EMBEDDINGS_TAG = 'EMBEDDINGS'
|
||||
_IMAGE_IN_STREAM_NAME = 'image_in'
|
||||
_IMAGE_OUT_STREAM_NAME = 'image_out'
|
||||
_IMAGE_TAG = 'IMAGE'
|
||||
_NORM_RECT_STREAM_NAME = 'norm_rect_in'
|
||||
_NORM_RECT_TAG = 'NORM_RECT'
|
||||
_TASK_GRAPH_NAME = 'mediapipe.tasks.vision.image_embedder.ImageEmbedderGraph'
|
||||
_MICRO_SECONDS_PER_MILLISECOND = 1000
|
||||
|
||||
|
||||
@dataclasses.dataclass
|
||||
class ImageEmbedderOptions:
|
||||
"""Options for the image embedder task.
|
||||
|
||||
Attributes:
|
||||
base_options: Base options for the image embedder task.
|
||||
running_mode: The running mode of the task. Default to the image mode. Image
|
||||
embedder task has three running modes: 1) The image mode for embedding
|
||||
image on single image inputs. 2) The video mode for embedding image on the
|
||||
decoded frames of a video. 3) The live stream mode for embedding image on
|
||||
a live stream of input data, such as from camera.
|
||||
embedder_options: Options for the image embedder task.
|
||||
result_callback: The user-defined result callback for processing live stream
|
||||
data. The result callback should only be specified when the running mode
|
||||
is set to the live stream mode.
|
||||
"""
|
||||
base_options: _BaseOptions
|
||||
running_mode: _RunningMode = _RunningMode.IMAGE
|
||||
embedder_options: _EmbedderOptions = _EmbedderOptions()
|
||||
result_callback: Optional[Callable[
|
||||
[ImageEmbedderResult, image_module.Image, int], None]] = None
|
||||
|
||||
@doc_controls.do_not_generate_docs
|
||||
def to_pb2(self) -> _ImageEmbedderGraphOptionsProto:
|
||||
"""Generates an ImageEmbedderOptions protobuf object."""
|
||||
base_options_proto = self.base_options.to_pb2()
|
||||
base_options_proto.use_stream_mode = False if self.running_mode == _RunningMode.IMAGE else True
|
||||
embedder_options_proto = self.embedder_options.to_pb2()
|
||||
|
||||
return _ImageEmbedderGraphOptionsProto(
|
||||
base_options=base_options_proto,
|
||||
embedder_options=embedder_options_proto)
|
||||
|
||||
|
||||
class ImageEmbedder(base_vision_task_api.BaseVisionTaskApi):
|
||||
"""Class that performs embedding extraction on images."""
|
||||
|
||||
@classmethod
|
||||
def create_from_model_path(cls, model_path: str) -> 'ImageEmbedder':
|
||||
"""Creates an `ImageEmbedder` object from a TensorFlow Lite model and the default `ImageEmbedderOptions`.
|
||||
|
||||
Note that the created `ImageEmbedder` instance is in image mode, for
|
||||
embedding image on single image inputs.
|
||||
|
||||
Args:
|
||||
model_path: Path to the model.
|
||||
|
||||
Returns:
|
||||
`ImageEmbedder` object that's created from the model file and the default
|
||||
`ImageEmbedderOptions`.
|
||||
|
||||
Raises:
|
||||
ValueError: If failed to create `ImageEmbedder` object from the provided
|
||||
file such as invalid file path.
|
||||
RuntimeError: If other types of error occurred.
|
||||
"""
|
||||
base_options = _BaseOptions(model_asset_path=model_path)
|
||||
options = ImageEmbedderOptions(
|
||||
base_options=base_options, running_mode=_RunningMode.IMAGE)
|
||||
return cls.create_from_options(options)
|
||||
|
||||
@classmethod
|
||||
def create_from_options(cls,
|
||||
options: ImageEmbedderOptions) -> 'ImageEmbedder':
|
||||
"""Creates the `ImageEmbedder` object from image embedder options.
|
||||
|
||||
Args:
|
||||
options: Options for the image embedder task.
|
||||
|
||||
Returns:
|
||||
`ImageEmbedder` object that's created from `options`.
|
||||
|
||||
Raises:
|
||||
ValueError: If failed to create `ImageEmbedder` object from
|
||||
`ImageEmbedderOptions` such as missing the model.
|
||||
RuntimeError: If other types of error occurred.
|
||||
"""
|
||||
|
||||
def packets_callback(output_packets: Mapping[str, packet_module.Packet]):
|
||||
if output_packets[_IMAGE_OUT_STREAM_NAME].is_empty():
|
||||
return
|
||||
|
||||
embedding_result_proto = embeddings_pb2.EmbeddingResult()
|
||||
embedding_result_proto.CopyFrom(
|
||||
packet_getter.get_proto(output_packets[_EMBEDDINGS_OUT_STREAM_NAME]))
|
||||
|
||||
image = packet_getter.get_image(output_packets[_IMAGE_OUT_STREAM_NAME])
|
||||
timestamp = output_packets[_IMAGE_OUT_STREAM_NAME].timestamp
|
||||
options.result_callback(
|
||||
ImageEmbedderResult.create_from_pb2(embedding_result_proto), image,
|
||||
timestamp.value // _MICRO_SECONDS_PER_MILLISECOND)
|
||||
|
||||
task_info = _TaskInfo(
|
||||
task_graph=_TASK_GRAPH_NAME,
|
||||
input_streams=[
|
||||
':'.join([_IMAGE_TAG, _IMAGE_IN_STREAM_NAME]),
|
||||
':'.join([_NORM_RECT_TAG, _NORM_RECT_STREAM_NAME]),
|
||||
],
|
||||
output_streams=[
|
||||
':'.join([_EMBEDDINGS_TAG, _EMBEDDINGS_OUT_STREAM_NAME]),
|
||||
':'.join([_IMAGE_TAG, _IMAGE_OUT_STREAM_NAME])
|
||||
],
|
||||
task_options=options)
|
||||
return cls(
|
||||
task_info.generate_graph_config(
|
||||
enable_flow_limiting=options.running_mode ==
|
||||
_RunningMode.LIVE_STREAM), options.running_mode,
|
||||
packets_callback if options.result_callback else None)
|
||||
|
||||
def embed(
|
||||
self,
|
||||
image: image_module.Image,
|
||||
image_processing_options: Optional[_ImageProcessingOptions] = None
|
||||
) -> ImageEmbedderResult:
|
||||
"""Performs image embedding extraction on the provided MediaPipe Image.
|
||||
|
||||
Extraction is performed on the region of interest specified by the `roi`
|
||||
argument if provided, or on the entire image otherwise.
|
||||
|
||||
Args:
|
||||
image: MediaPipe Image.
|
||||
image_processing_options: Options for image processing.
|
||||
|
||||
Returns:
|
||||
An embedding result object that contains a list of embeddings.
|
||||
|
||||
Raises:
|
||||
ValueError: If any of the input arguments is invalid.
|
||||
RuntimeError: If image embedder failed to run.
|
||||
"""
|
||||
normalized_rect = self.convert_to_normalized_rect(image_processing_options)
|
||||
output_packets = self._process_image_data({
|
||||
_IMAGE_IN_STREAM_NAME:
|
||||
packet_creator.create_image(image),
|
||||
_NORM_RECT_STREAM_NAME:
|
||||
packet_creator.create_proto(normalized_rect.to_pb2())
|
||||
})
|
||||
|
||||
embedding_result_proto = embeddings_pb2.EmbeddingResult()
|
||||
embedding_result_proto.CopyFrom(
|
||||
packet_getter.get_proto(output_packets[_EMBEDDINGS_OUT_STREAM_NAME]))
|
||||
|
||||
return ImageEmbedderResult.create_from_pb2(embedding_result_proto)
|
||||
|
||||
def embed_for_video(
|
||||
self,
|
||||
image: image_module.Image,
|
||||
timestamp_ms: int,
|
||||
image_processing_options: Optional[_ImageProcessingOptions] = None
|
||||
) -> ImageEmbedderResult:
|
||||
"""Performs image embedding extraction on the provided video frames.
|
||||
|
||||
Extraction is performed on the region of interested specified by the `roi`
|
||||
argument if provided, or on the entire image otherwise.
|
||||
|
||||
Only use this method when the ImageEmbedder is created with the video
|
||||
running mode. It's required to provide the video frame's timestamp (in
|
||||
milliseconds) along with the video frame. The input timestamps should be
|
||||
monotonically increasing for adjacent calls of this method.
|
||||
|
||||
Args:
|
||||
image: MediaPipe Image.
|
||||
timestamp_ms: The timestamp of the input video frame in milliseconds.
|
||||
image_processing_options: Options for image processing.
|
||||
|
||||
Returns:
|
||||
An embedding result object that contains a list of embeddings.
|
||||
|
||||
Raises:
|
||||
ValueError: If any of the input arguments is invalid.
|
||||
RuntimeError: If image embedder failed to run.
|
||||
"""
|
||||
normalized_rect = self.convert_to_normalized_rect(image_processing_options)
|
||||
output_packets = self._process_video_data({
|
||||
_IMAGE_IN_STREAM_NAME:
|
||||
packet_creator.create_image(image).at(
|
||||
timestamp_ms * _MICRO_SECONDS_PER_MILLISECOND),
|
||||
_NORM_RECT_STREAM_NAME:
|
||||
packet_creator.create_proto(normalized_rect.to_pb2()).at(
|
||||
timestamp_ms * _MICRO_SECONDS_PER_MILLISECOND)
|
||||
})
|
||||
embedding_result_proto = embeddings_pb2.EmbeddingResult()
|
||||
embedding_result_proto.CopyFrom(
|
||||
packet_getter.get_proto(output_packets[_EMBEDDINGS_OUT_STREAM_NAME]))
|
||||
|
||||
return ImageEmbedderResult.create_from_pb2(embedding_result_proto)
|
||||
|
||||
def embed_async(
|
||||
self,
|
||||
image: image_module.Image,
|
||||
timestamp_ms: int,
|
||||
image_processing_options: Optional[_ImageProcessingOptions] = None
|
||||
) -> None:
|
||||
"""Sends live image data to embedder.
|
||||
|
||||
The results will be available via the "result_callback" provided in the
|
||||
ImageEmbedderOptions. Embedding extraction is performed on the region of
|
||||
interested specified by the `roi` argument if provided, or on the entire
|
||||
image otherwise.
|
||||
|
||||
Only use this method when the ImageEmbedder is created with the live
|
||||
stream running mode. The input timestamps should be monotonically increasing
|
||||
for adjacent calls of this method. This method will return immediately after
|
||||
the input image is accepted. The results will be available via the
|
||||
`result_callback` provided in the `ImageEmbedderOptions`. The
|
||||
`embed_async` method is designed to process live stream data such as
|
||||
camera input. To lower the overall latency, image embedder may drop the
|
||||
input images if needed. In other words, it's not guaranteed to have output
|
||||
per input image.
|
||||
|
||||
The `result_callback` provides:
|
||||
- An embedding result object that contains a list of embeddings.
|
||||
- The input image that the image embedder runs on.
|
||||
- The input timestamp in milliseconds.
|
||||
|
||||
Args:
|
||||
image: MediaPipe Image.
|
||||
timestamp_ms: The timestamp of the input image in milliseconds.
|
||||
image_processing_options: Options for image processing.
|
||||
|
||||
Raises:
|
||||
ValueError: If the current input timestamp is smaller than what the image
|
||||
embedder has already processed.
|
||||
"""
|
||||
normalized_rect = self.convert_to_normalized_rect(image_processing_options)
|
||||
self._send_live_stream_data({
|
||||
_IMAGE_IN_STREAM_NAME:
|
||||
packet_creator.create_image(image).at(
|
||||
timestamp_ms * _MICRO_SECONDS_PER_MILLISECOND),
|
||||
_NORM_RECT_STREAM_NAME:
|
||||
packet_creator.create_proto(normalized_rect.to_pb2()).at(
|
||||
timestamp_ms * _MICRO_SECONDS_PER_MILLISECOND)
|
||||
})
|
||||
|
||||
@classmethod
|
||||
def cosine_similarity(cls, u: embedding_result_module.Embedding,
|
||||
v: embedding_result_module.Embedding) -> float:
|
||||
"""Utility function to compute cosine similarity between two embedding entries.
|
||||
|
||||
May return an InvalidArgumentError if e.g. the feature vectors are of
|
||||
different types (quantized vs. float), have different sizes, or have an
|
||||
L2-norm of 0.
|
||||
|
||||
Args:
|
||||
u: An embedding entry.
|
||||
v: An embedding entry.
|
||||
|
||||
Returns:
|
||||
The cosine similarity for the two embeddings.
|
||||
|
||||
Raises:
|
||||
ValueError: May return an error if e.g. the feature vectors are of
|
||||
different types (quantized vs. float), have different sizes, or have
|
||||
an L2-norm of 0.
|
||||
"""
|
||||
return cosine_similarity.cosine_similarity(u, v)
|
|
@ -31,6 +31,7 @@ rollup_bundle(
|
|||
name = "audio_bundle",
|
||||
config_file = "rollup.config.mjs",
|
||||
entry_point = "audio.ts",
|
||||
format = "cjs",
|
||||
output_dir = False,
|
||||
deps = [
|
||||
":audio_lib",
|
||||
|
@ -67,6 +68,7 @@ rollup_bundle(
|
|||
name = "text_bundle",
|
||||
config_file = "rollup.config.mjs",
|
||||
entry_point = "text.ts",
|
||||
format = "cjs",
|
||||
output_dir = False,
|
||||
deps = [
|
||||
":text_lib",
|
||||
|
@ -103,6 +105,7 @@ rollup_bundle(
|
|||
name = "vision_bundle",
|
||||
config_file = "rollup.config.mjs",
|
||||
entry_point = "vision.ts",
|
||||
format = "cjs",
|
||||
output_dir = False,
|
||||
deps = [
|
||||
":vision_lib",
|
||||
|
|
|
@ -2,16 +2,15 @@
|
|||
"name": "@mediapipe/tasks-__NAME__",
|
||||
"version": "__VERSION__",
|
||||
"description": "__DESCRIPTION__",
|
||||
"main": "__NAME__bundle.js",
|
||||
"module": "__NAME__bundle.js",
|
||||
"main": "__NAME___bundle.js",
|
||||
"module": "__NAME___bundle.js",
|
||||
"exports": {
|
||||
".": "./__NAME__bundle.js",
|
||||
"./loader": "./wasm/__NAME__wasm_internal.js",
|
||||
"./wasm": "./wasm/__NAME__wasm_internal.wasm"
|
||||
".": "./__NAME___bundle.js",
|
||||
"./loader": "./wasm/__NAME___wasm_internal.js",
|
||||
"./wasm": "./wasm/__NAME___wasm_internal.wasm"
|
||||
},
|
||||
"author": "mediapipe@google.com",
|
||||
"license": "Apache-2.0",
|
||||
"type": "module",
|
||||
"dependencies": {
|
||||
"google-protobuf": "^3.21.2"
|
||||
},
|
||||
|
|
|
@ -32,17 +32,23 @@ namespace mediapipe {
|
|||
|
||||
// Thin wrapper over AAssetManager provided by JNI. This class is meant to be
|
||||
// used as a singleton.
|
||||
// Usage: Call InitializeFromActivity from a JNI function that has access to the
|
||||
// Java activity in the Android application. This initializes the asset manager
|
||||
// and now files bundled in the assets folder can be read using ReadFile().
|
||||
//
|
||||
// Usage: Call one of Initialize* functions from a JNI function that has access
|
||||
// to a context/activity/etc. This initializes the asset manager and now files
|
||||
// bundled in the assets folder can be read using ReadFile().
|
||||
//
|
||||
// NOTE: initialization should happen strictly once and guaranteed to complete
|
||||
// before any possible use, otherwise it cannot be used safely across multiple
|
||||
// threads.
|
||||
class AssetManager {
|
||||
public:
|
||||
AssetManager(const AssetManager&) = delete;
|
||||
AssetManager& operator=(const AssetManager&) = delete;
|
||||
|
||||
// Returns the asset manager if it has been set by a call to
|
||||
// InitializeFromActivity, otherwise returns nullptr.
|
||||
// Returns the asset manager if it has been set by one of Initialize*
|
||||
// functions, otherwise returns nullptr.
|
||||
AAssetManager* GetAssetManager();
|
||||
|
||||
// Returns true if AAssetManager was successfully initialized.
|
||||
bool InitializeFromAssetManager(JNIEnv* env, jobject local_asset_manager,
|
||||
const std::string& cache_dir_path);
|
||||
|
@ -55,7 +61,7 @@ class AssetManager {
|
|||
const std::string& cache_dir_path);
|
||||
|
||||
// Returns true if AAssetManager was successfully initialized.
|
||||
ABSL_DEPRECATED("Use InitializeFromActivity instead.")
|
||||
ABSL_DEPRECATED("Use one of alternate Initialize* functions instead.")
|
||||
bool InitializeFromAssetManager(JNIEnv* env, jobject local_asset_manager);
|
||||
|
||||
// Returns true if AAssetManager was successfully initialized.
|
||||
|
@ -79,12 +85,14 @@ class AssetManager {
|
|||
std::string* output);
|
||||
|
||||
// Returns the path to the Android cache directory. Will be empty if
|
||||
// InitializeFromActivity has not been called.
|
||||
// AssetManager hasn't been initialized.
|
||||
const std::string& GetCacheDirPath();
|
||||
|
||||
// Caches the contents of the given asset as a file, and returns a path to
|
||||
// that file. This can be used to pass an asset to APIs that require a path
|
||||
// to a filesystem file.
|
||||
// NOTE: this is _not_ thread-safe, e.g. if two threads are requesting the
|
||||
// same file
|
||||
absl::StatusOr<std::string> CachedFileFromAsset(
|
||||
const std::string& asset_path);
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user