Merge branch 'master' into ios-task

This commit is contained in:
Prianka Liz Kariat 2022-12-05 18:24:28 +05:30
commit b2c6d48614
340 changed files with 7192 additions and 4480 deletions

View File

@ -26,7 +26,7 @@ versions.check(minimum_bazel_version = "3.7.2")
http_archive(
name = "com_google_absl",
urls = [
"https://github.com/abseil/abseil-cpp/archive/refs/tags/20210324.2.tar.gz",
"https://github.com/abseil/abseil-cpp/archive/refs/tags/20220623.1.tar.gz",
],
# Remove after https://github.com/abseil/abseil-cpp/issues/326 is solved.
patches = [
@ -35,8 +35,8 @@ http_archive(
patch_args = [
"-p1",
],
strip_prefix = "abseil-cpp-20210324.2",
sha256 = "59b862f50e710277f8ede96f083a5bb8d7c9595376146838b9580be90374ee1f"
strip_prefix = "abseil-cpp-20220623.1",
sha256 = "91ac87d30cc6d79f9ab974c51874a704de9c2647c40f6932597329a282217ba8"
)
http_archive(
@ -212,14 +212,14 @@ http_archive(
sha256 = "75922da3a1bdb417d820398eb03d4e9bd067c4905a4246d35a44c01d62154d91",
)
# Point to the commit that deprecates the usage of Eigen::MappedSparseMatrix.
# 2022-10-20
http_archive(
name = "pybind11",
urls = [
"https://github.com/pybind/pybind11/archive/70a58c577eaf067748c2ec31bfd0b0a614cffba6.zip",
"https://github.com/pybind/pybind11/archive/v2.10.1.zip",
],
sha256 = "b971842fab1b5b8f3815a2302331782b7d137fef0e06502422bc4bc360f4956c",
strip_prefix = "pybind11-70a58c577eaf067748c2ec31bfd0b0a614cffba6",
sha256 = "fcf94065efcfd0a7a828bacf118fa11c43f6390d0c805e3e6342ac119f2e9976",
strip_prefix = "pybind11-2.10.1",
build_file = "@pybind11_bazel//:pybind11.BUILD",
)

View File

@ -17,6 +17,7 @@ py_binary(
name = "build_java_api_docs",
srcs = ["build_java_api_docs.py"],
data = [
"//third_party/android/sdk:api/26.txt",
"//third_party/java/doclava/current:doclava.jar",
"//third_party/java/jsilver:jsilver_jar",
],

View File

@ -20,10 +20,6 @@ from absl import flags
from tensorflow_docs.api_generator import gen_java
_JAVA_ROOT = flags.DEFINE_string('java_src', None,
'Override the Java source path.',
required=False)
_OUT_DIR = flags.DEFINE_string('output_dir', '/tmp/mp_java/',
'Write docs here.')
@ -37,27 +33,30 @@ _ = flags.DEFINE_bool(
'search_hints', True,
'[UNUSED] Include metadata search hints in the generated files')
_ANDROID_SDK = pathlib.Path('android/sdk/api/26.txt')
def main(_) -> None:
if not (java_root := _JAVA_ROOT.value):
# Default to using a relative path to find the Java source.
mp_root = pathlib.Path(__file__)
while (mp_root := mp_root.parent).name != 'mediapipe':
# Find the nearest `mediapipe` dir.
pass
# Default to using a relative path to find the Java source.
mp_root = pathlib.Path(__file__)
while (mp_root := mp_root.parent).name != 'mediapipe':
# Find the nearest `mediapipe` dir.
pass
# Externally, parts of the repo are nested inside a mediapipe/ directory
# that does not exist internally. Support both.
if (mp_root / 'mediapipe').exists():
mp_root = mp_root / 'mediapipe'
# Find the root from which all packages are relative.
root = mp_root.parent
java_root = mp_root / 'tasks/java'
# Externally, parts of the repo are nested inside a mediapipe/ directory
# that does not exist internally. Support both.
if (mp_root / 'mediapipe').exists():
mp_root = mp_root / 'mediapipe'
gen_java.gen_java_docs(
package='com.google.mediapipe',
source_path=pathlib.Path(java_root),
source_path=mp_root / 'tasks/java',
output_dir=pathlib.Path(_OUT_DIR.value),
site_path=pathlib.Path(_SITE_PATH.value))
site_path=pathlib.Path(_SITE_PATH.value),
federated_docs={'https://developer.android.com': root / _ANDROID_SDK})
if __name__ == '__main__':

View File

@ -30,7 +30,7 @@ from tensorflow_docs.api_generator import public_api
try:
# mediapipe has not been set up to work with bazel yet, so catch & report.
import mediapipe # pytype: disable=import-error
import mediapipe as mp # pytype: disable=import-error
except ImportError as e:
raise ImportError('Please `pip install mediapipe`.') from e
@ -58,11 +58,13 @@ _SITE_PATH = flags.DEFINE_string('site_path', '/mediapipe/api_docs/python',
def gen_api_docs():
"""Generates API docs for the mediapipe package."""
if hasattr(mp, 'solutions'):
del mp.solutions
doc_generator = generate_lib.DocGenerator(
root_title=PROJECT_FULL_NAME,
py_modules=[(PROJECT_SHORT_NAME, mediapipe)],
base_dir=os.path.dirname(mediapipe.__file__),
py_modules=[(PROJECT_SHORT_NAME, mp)],
base_dir=os.path.dirname(mp.__file__),
code_url_prefix=_URL_PREFIX.value,
search_hints=_SEARCH_HINTS.value,
site_path=_SITE_PATH.value,

View File

@ -197,7 +197,6 @@ cc_library(
":spectrogram_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework/formats:matrix",
"//mediapipe/framework/formats:time_series_header_cc_proto",
"//mediapipe/framework/port:core_proto",
"//mediapipe/framework/port:integral_types",
"//mediapipe/framework/port:logging",

View File

@ -341,7 +341,6 @@ cc_test(
srcs = ["concatenate_proto_list_calculator_test.cc"],
deps = [
":concatenate_proto_list_calculator",
":concatenate_vector_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework:calculator_runner",
"//mediapipe/framework:timestamp",
@ -403,7 +402,6 @@ cc_test(
srcs = ["clip_vector_size_calculator_test.cc"],
deps = [
":clip_vector_size_calculator",
"//mediapipe/calculators/core:packet_resampler_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework:calculator_runner",
"//mediapipe/framework:timestamp",
@ -956,10 +954,10 @@ cc_library(
deps = [
":split_vector_calculator_cc_proto",
"//mediapipe/framework/formats:detection_cc_proto",
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework/formats:landmark_cc_proto",
"//mediapipe/framework/formats:classification_cc_proto",
"//mediapipe/framework/formats:landmark_cc_proto",
"//mediapipe/framework/formats:rect_cc_proto",
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework/formats:matrix",
"//mediapipe/framework/formats:tensor",
"//mediapipe/framework/port:ret_check",
@ -1301,6 +1299,7 @@ cc_library(
"//mediapipe/framework/api2:packet",
"//mediapipe/framework/api2:port",
"//mediapipe/framework/formats:classification_cc_proto",
"//mediapipe/framework/formats:detection_cc_proto",
"//mediapipe/framework/formats:landmark_cc_proto",
"//mediapipe/framework/port:ret_check",
"//mediapipe/framework/port:status",

View File

@ -111,6 +111,10 @@ class BypassCalculator : public Node {
cc->Outputs().Get(id).SetAny();
}
}
for (auto id = cc->InputSidePackets().BeginId();
id != cc->InputSidePackets().EndId(); ++id) {
cc->InputSidePackets().Get(id).SetAny();
}
return absl::OkStatus();
}

View File

@ -85,75 +85,6 @@ std::string SourceString(Timestamp t) {
: absl::StrCat("Timestamp(", t.DebugString(), ")");
}
template <typename T>
std::string SourceString(Packet packet) {
std::ostringstream oss;
if (packet.IsEmpty()) {
oss << "Packet()";
} else {
oss << "MakePacket<" << MediaPipeTypeStringOrDemangled<T>() << ">("
<< packet.Get<T>() << ")";
}
oss << ".At(" << SourceString(packet.Timestamp()) << ")";
return oss.str();
}
template <typename PacketContainer, typename PacketContent>
class PacketsEqMatcher
: public ::testing::MatcherInterface<const PacketContainer&> {
public:
PacketsEqMatcher(PacketContainer packets) : packets_(packets) {}
void DescribeTo(::std::ostream* os) const override {
*os << "The expected packet contents: \n";
Print(packets_, os);
}
bool MatchAndExplain(
const PacketContainer& value,
::testing::MatchResultListener* listener) const override {
if (!Equals(packets_, value)) {
if (listener->IsInterested()) {
*listener << "The actual packet contents: \n";
Print(value, listener->stream());
}
return false;
}
return true;
}
private:
bool Equals(const PacketContainer& c1, const PacketContainer& c2) const {
if (c1.size() != c2.size()) {
return false;
}
for (auto i1 = c1.begin(), i2 = c2.begin(); i1 != c1.end(); ++i1, ++i2) {
Packet p1 = *i1, p2 = *i2;
if (p1.Timestamp() != p2.Timestamp() || p1.IsEmpty() != p2.IsEmpty() ||
(!p1.IsEmpty() &&
p1.Get<PacketContent>() != p2.Get<PacketContent>())) {
return false;
}
}
return true;
}
void Print(const PacketContainer& packets, ::std::ostream* os) const {
for (auto it = packets.begin(); it != packets.end(); ++it) {
const Packet& packet = *it;
*os << (it == packets.begin() ? "{" : "");
*os << SourceString<PacketContent>(packet);
*os << (std::next(it) == packets.end() ? "}" : ", ");
}
}
const PacketContainer packets_;
};
template <typename PacketContainer, typename PacketContent>
::testing::Matcher<const PacketContainer&> PacketsEq(
const PacketContainer& packets) {
return MakeMatcher(
new PacketsEqMatcher<PacketContainer, PacketContent>(packets));
}
// A Calculator::Process callback function.
typedef std::function<absl::Status(const InputStreamShardSet&,
OutputStreamShardSet*)>
@ -743,9 +674,6 @@ TEST_F(FlowLimiterCalculatorTest, TwoInputStreams) {
// The processing time "sleep_time" is reduced from 22ms to 12ms to create
// the same frame rate as FlowLimiterCalculatorTest::TwoInputStreams.
TEST_F(FlowLimiterCalculatorTest, ZeroQueue) {
auto BoolPacketsEq = PacketsEq<std::vector<Packet>, bool>;
auto IntPacketsEq = PacketsEq<std::vector<Packet>, int>;
// Configure the test.
SetUpInputData();
SetUpSimulationClock();
@ -839,13 +767,16 @@ TEST_F(FlowLimiterCalculatorTest, ZeroQueue) {
input_packets_[0], input_packets_[2], input_packets_[15],
input_packets_[17], input_packets_[19],
};
EXPECT_THAT(out_1_packets_, IntPacketsEq(expected_output));
EXPECT_THAT(out_1_packets_,
ElementsAreArray(PacketMatchers<int>(expected_output)));
// Exactly the timestamps released by FlowLimiterCalculator for in_1_sampled.
std::vector<Packet> expected_output_2 = {
input_packets_[0], input_packets_[2], input_packets_[4],
input_packets_[15], input_packets_[17], input_packets_[19],
};
EXPECT_THAT(out_2_packets, IntPacketsEq(expected_output_2));
EXPECT_THAT(out_2_packets,
ElementsAreArray(PacketMatchers<int>(expected_output_2)));
// Validate the ALLOW stream output.
std::vector<Packet> expected_allow = {
@ -871,7 +802,8 @@ TEST_F(FlowLimiterCalculatorTest, ZeroQueue) {
MakePacket<bool>(true).At(Timestamp(190000)),
MakePacket<bool>(false).At(Timestamp(200000)),
};
EXPECT_THAT(allow_packets_, BoolPacketsEq(expected_allow));
EXPECT_THAT(allow_packets_,
ElementsAreArray(PacketMatchers<bool>(expected_allow)));
}
std::vector<Packet> StripBoundsUpdates(const std::vector<Packet>& packets,
@ -891,9 +823,6 @@ std::vector<Packet> StripBoundsUpdates(const std::vector<Packet>& packets,
// Shows how FlowLimiterCalculator releases auxiliary input packets.
// In this test, auxiliary input packets arrive at twice the primary rate.
TEST_F(FlowLimiterCalculatorTest, AuxiliaryInputs) {
auto BoolPacketsEq = PacketsEq<std::vector<Packet>, bool>;
auto IntPacketsEq = PacketsEq<std::vector<Packet>, int>;
// Configure the test.
SetUpInputData();
SetUpSimulationClock();
@ -1011,7 +940,8 @@ TEST_F(FlowLimiterCalculatorTest, AuxiliaryInputs) {
MakePacket<int>(6).At(Timestamp(60000)),
Packet().At(Timestamp(80000)),
};
EXPECT_THAT(out_1_packets_, IntPacketsEq(expected_output));
EXPECT_THAT(out_1_packets_,
ElementsAreArray(PacketMatchers<int>(expected_output)));
// Packets following input packets 2 and 6, and not input packets 4 and 8.
std::vector<Packet> expected_auxiliary_output = {
@ -1031,12 +961,13 @@ TEST_F(FlowLimiterCalculatorTest, AuxiliaryInputs) {
};
std::vector<Packet> actual_2 =
StripBoundsUpdates(out_2_packets, Timestamp(90000));
EXPECT_THAT(actual_2, IntPacketsEq(expected_auxiliary_output));
EXPECT_THAT(actual_2,
ElementsAreArray(PacketMatchers<int>(expected_auxiliary_output)));
std::vector<Packet> expected_3 =
StripBoundsUpdates(expected_auxiliary_output, Timestamp(39999));
std::vector<Packet> actual_3 =
StripBoundsUpdates(out_3_packets, Timestamp(39999));
EXPECT_THAT(actual_3, IntPacketsEq(expected_3));
EXPECT_THAT(actual_3, ElementsAreArray(PacketMatchers<int>(expected_3)));
// Validate the ALLOW stream output.
std::vector<Packet> expected_allow = {
@ -1045,7 +976,8 @@ TEST_F(FlowLimiterCalculatorTest, AuxiliaryInputs) {
MakePacket<bool>(true).At(Timestamp(60000)),
MakePacket<bool>(false).At(Timestamp(80000)),
};
EXPECT_THAT(allow_packets_, BoolPacketsEq(expected_allow));
EXPECT_THAT(allow_packets_,
ElementsAreArray(PacketMatchers<bool>(expected_allow)));
}
} // anonymous namespace

View File

@ -15,6 +15,7 @@
#include "mediapipe/calculators/core/get_vector_item_calculator.h"
#include "mediapipe/framework/formats/classification.pb.h"
#include "mediapipe/framework/formats/detection.pb.h"
#include "mediapipe/framework/formats/landmark.pb.h"
namespace mediapipe {
@ -32,5 +33,9 @@ using GetClassificationListVectorItemCalculator =
GetVectorItemCalculator<mediapipe::ClassificationList>;
REGISTER_CALCULATOR(GetClassificationListVectorItemCalculator);
using GetDetectionVectorItemCalculator =
GetVectorItemCalculator<mediapipe::Detection>;
REGISTER_CALCULATOR(GetDetectionVectorItemCalculator);
} // namespace api2
} // namespace mediapipe

View File

@ -41,6 +41,10 @@ class MuxCalculator : public Node {
StreamHandler("MuxInputStreamHandler"));
absl::Status Process(CalculatorContext* cc) final {
if (kSelect(cc).IsStream() && kSelect(cc).IsEmpty()) {
return absl::OkStatus();
}
int select = *kSelect(cc);
RET_CHECK(0 <= select && select < kIn(cc).Count());
if (!kIn(cc)[select].IsEmpty()) {

View File

@ -398,6 +398,95 @@ TEST(MuxCalculatorTest, HandleTimestampBoundUpdates) {
MP_ASSERT_OK(graph.WaitUntilDone());
}
TEST(MuxCalculatorTest, HandlesCloseGracefully) {
CalculatorGraphConfig config =
mediapipe::ParseTextProtoOrDie<CalculatorGraphConfig>(
R"pb(
input_stream: "select"
input_stream: "value_0"
input_stream: "value_1"
node {
calculator: "MuxCalculator"
input_stream: "SELECT:select"
input_stream: "INPUT:0:value_0"
input_stream: "INPUT:1:value_1"
output_stream: "OUTPUT:output"
}
)pb");
CalculatorGraph graph;
MP_ASSERT_OK(graph.Initialize(config));
// Observe packets.
std::vector<Packet> output_packets;
MP_ASSERT_OK(graph.ObserveOutputStream(
"output",
[&output_packets](const Packet& p) -> absl::Status {
output_packets.push_back(p);
return absl::OkStatus();
},
/*observe_timestamp_bounds=*/true));
// Start graph.
MP_ASSERT_OK(graph.StartRun({}));
// Add single packet wait for completion and close.
MP_ASSERT_OK(graph.AddPacketToInputStream(
"value_0", MakePacket<int>(0).At(Timestamp(1000))));
MP_ASSERT_OK(graph.WaitUntilIdle());
MP_ASSERT_OK(graph.CloseAllInputStreams());
MP_ASSERT_OK(graph.WaitUntilDone());
EXPECT_TRUE(output_packets.empty());
}
TEST(MuxCalculatorTest, HandlesCloseGracefullyWithDeafultInputStreamHandler) {
CalculatorGraphConfig config =
mediapipe::ParseTextProtoOrDie<CalculatorGraphConfig>(
R"pb(
# This is required in order for EXPECT_DEATH to work everywhere
executor { name: "" type: "ApplicationThreadExecutor" }
input_stream: "select"
input_stream: "value_0"
input_stream: "value_1"
node {
calculator: "MuxCalculator"
input_stream: "SELECT:select"
input_stream: "INPUT:0:value_0"
input_stream: "INPUT:1:value_1"
output_stream: "OUTPUT:output"
input_stream_handler {
input_stream_handler: "DefaultInputStreamHandler"
}
}
)pb");
CalculatorGraph graph;
MP_ASSERT_OK(graph.Initialize(config));
// Observe packets.
std::vector<Packet> output_packets;
MP_ASSERT_OK(graph.ObserveOutputStream(
"output",
[&output_packets](const Packet& p) -> absl::Status {
output_packets.push_back(p);
return absl::OkStatus();
},
/*observe_timestamp_bounds=*/true));
// Start graph.
MP_ASSERT_OK(graph.StartRun({}));
// Add single packet wait for completion and close.
MP_ASSERT_OK(graph.AddPacketToInputStream(
"value_0", MakePacket<int>(0).At(Timestamp(1000))));
MP_ASSERT_OK(graph.WaitUntilIdle());
MP_ASSERT_OK(graph.CloseAllInputStreams());
MP_ASSERT_OK(graph.WaitUntilDone());
ASSERT_EQ(output_packets.size(), 1);
EXPECT_TRUE(output_packets[0].IsEmpty());
}
} // namespace
} // namespace mediapipe

View File

@ -16,12 +16,11 @@ load("//mediapipe/framework/port:build_config.bzl", "mediapipe_proto_library")
licenses(["notice"])
package(default_visibility = ["//visibility:private"])
package(default_visibility = ["//visibility:public"])
mediapipe_proto_library(
name = "opencv_image_encoder_calculator_proto",
srcs = ["opencv_image_encoder_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -31,7 +30,6 @@ mediapipe_proto_library(
mediapipe_proto_library(
name = "scale_image_calculator_proto",
srcs = ["scale_image_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -42,7 +40,6 @@ mediapipe_proto_library(
mediapipe_proto_library(
name = "set_alpha_calculator_proto",
srcs = ["set_alpha_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -52,7 +49,6 @@ mediapipe_proto_library(
mediapipe_proto_library(
name = "image_cropping_calculator_proto",
srcs = ["image_cropping_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -62,7 +58,6 @@ mediapipe_proto_library(
mediapipe_proto_library(
name = "bilateral_filter_calculator_proto",
srcs = ["bilateral_filter_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -72,7 +67,6 @@ mediapipe_proto_library(
mediapipe_proto_library(
name = "recolor_calculator_proto",
srcs = ["recolor_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -83,7 +77,6 @@ mediapipe_proto_library(
mediapipe_proto_library(
name = "segmentation_smoothing_calculator_proto",
srcs = ["segmentation_smoothing_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -93,7 +86,6 @@ mediapipe_proto_library(
cc_library(
name = "color_convert_calculator",
srcs = ["color_convert_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework:timestamp",
@ -112,7 +104,6 @@ cc_library(
cc_library(
name = "opencv_encoded_image_to_image_frame_calculator",
srcs = ["opencv_encoded_image_to_image_frame_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":opencv_encoded_image_to_image_frame_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
@ -127,7 +118,6 @@ cc_library(
cc_library(
name = "opencv_image_encoder_calculator",
srcs = ["opencv_image_encoder_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":opencv_image_encoder_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
@ -142,7 +132,6 @@ cc_library(
cc_library(
name = "opencv_put_text_calculator",
srcs = ["opencv_put_text_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework/formats:image_frame_opencv",
@ -156,11 +145,10 @@ cc_library(
cc_library(
name = "set_alpha_calculator",
srcs = ["set_alpha_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":set_alpha_calculator_cc_proto",
"//mediapipe/framework:calculator_options_cc_proto",
"//mediapipe/framework/formats:image_format_cc_proto",
"//mediapipe/framework:calculator_options_cc_proto",
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework/formats:image_frame",
"//mediapipe/framework/formats:image_frame_opencv",
@ -183,11 +171,10 @@ cc_library(
cc_library(
name = "bilateral_filter_calculator",
srcs = ["bilateral_filter_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":bilateral_filter_calculator_cc_proto",
"//mediapipe/framework:calculator_options_cc_proto",
"//mediapipe/framework/formats:image_format_cc_proto",
"//mediapipe/framework:calculator_options_cc_proto",
"@com_google_absl//absl/strings",
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework/formats:image_frame",
@ -212,13 +199,11 @@ cc_library(
mediapipe_proto_library(
name = "rotation_mode_proto",
srcs = ["rotation_mode.proto"],
visibility = ["//visibility:public"],
)
mediapipe_proto_library(
name = "image_transformation_calculator_proto",
srcs = ["image_transformation_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
":rotation_mode_proto",
"//mediapipe/framework:calculator_options_proto",
@ -243,7 +228,6 @@ cc_library(
],
"//conditions:default": [],
}),
visibility = ["//visibility:public"],
deps = [
":rotation_mode_cc_proto",
":image_transformation_calculator_cc_proto",
@ -287,13 +271,12 @@ cc_library(
],
"//conditions:default": [],
}),
visibility = ["//visibility:public"],
deps = [
":image_cropping_calculator_cc_proto",
"//mediapipe/framework/formats:rect_cc_proto",
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework/formats:image_frame",
"//mediapipe/framework/formats:image_frame_opencv",
"//mediapipe/framework/formats:rect_cc_proto",
"//mediapipe/framework/port:opencv_core",
"//mediapipe/framework/port:opencv_imgproc",
"//mediapipe/framework/port:ret_check",
@ -330,7 +313,6 @@ cc_test(
cc_library(
name = "luminance_calculator",
srcs = ["luminance_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework/port:ret_check",
"//mediapipe/framework/port:status",
@ -344,7 +326,6 @@ cc_library(
cc_library(
name = "sobel_edges_calculator",
srcs = ["sobel_edges_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework/port:ret_check",
"//mediapipe/framework/port:status",
@ -358,15 +339,14 @@ cc_library(
cc_library(
name = "recolor_calculator",
srcs = ["recolor_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":recolor_calculator_cc_proto",
"//mediapipe/util:color_cc_proto",
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework/formats:image_frame",
"//mediapipe/framework/formats:image_frame_opencv",
"//mediapipe/framework/port:status",
"//mediapipe/framework/port:ret_check",
"//mediapipe/util:color_cc_proto",
"//mediapipe/framework/port:opencv_core",
"//mediapipe/framework/port:opencv_imgproc",
] + select({
@ -385,9 +365,6 @@ cc_library(
name = "scale_image_utils",
srcs = ["scale_image_utils.cc"],
hdrs = ["scale_image_utils.h"],
visibility = [
"//mediapipe:__subpackages__",
],
deps = [
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework/port:logging",
@ -400,9 +377,6 @@ cc_library(
cc_library(
name = "scale_image_calculator",
srcs = ["scale_image_calculator.cc"],
visibility = [
"//visibility:public",
],
deps = [
":scale_image_utils",
"//mediapipe/calculators/image:scale_image_calculator_cc_proto",
@ -429,7 +403,6 @@ cc_library(
mediapipe_proto_library(
name = "image_clone_calculator_proto",
srcs = ["image_clone_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -439,7 +412,6 @@ mediapipe_proto_library(
cc_library(
name = "image_clone_calculator",
srcs = ["image_clone_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":image_clone_calculator_cc_proto",
"//mediapipe/framework/api2:node",
@ -459,7 +431,6 @@ cc_library(
cc_library(
name = "image_properties_calculator",
srcs = ["image_properties_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework/api2:node",
"//mediapipe/framework:calculator_framework",
@ -524,7 +495,6 @@ cc_test(
mediapipe_proto_library(
name = "mask_overlay_calculator_proto",
srcs = ["mask_overlay_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -534,7 +504,6 @@ mediapipe_proto_library(
mediapipe_proto_library(
name = "opencv_encoded_image_to_image_frame_calculator_proto",
srcs = ["opencv_encoded_image_to_image_frame_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -544,7 +513,6 @@ mediapipe_proto_library(
mediapipe_proto_library(
name = "feature_detector_calculator_proto",
srcs = ["feature_detector_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -554,7 +522,6 @@ mediapipe_proto_library(
cc_library(
name = "mask_overlay_calculator",
srcs = ["mask_overlay_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":mask_overlay_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
@ -570,7 +537,6 @@ cc_library(
cc_library(
name = "feature_detector_calculator",
srcs = ["feature_detector_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":feature_detector_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
@ -597,7 +563,6 @@ cc_library(
cc_library(
name = "image_file_properties_calculator",
srcs = ["image_file_properties_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework/formats:image_file_properties_cc_proto",
@ -627,11 +592,10 @@ cc_test(
cc_library(
name = "segmentation_smoothing_calculator",
srcs = ["segmentation_smoothing_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":segmentation_smoothing_calculator_cc_proto",
"//mediapipe/framework:calculator_options_cc_proto",
"//mediapipe/framework/formats:image_format_cc_proto",
"//mediapipe/framework:calculator_options_cc_proto",
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework/formats:image_frame",
"//mediapipe/framework/formats:image",
@ -724,7 +688,6 @@ cc_library(
mediapipe_proto_library(
name = "warp_affine_calculator_proto",
srcs = ["warp_affine_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -736,7 +699,6 @@ cc_library(
name = "warp_affine_calculator",
srcs = ["warp_affine_calculator.cc"],
hdrs = ["warp_affine_calculator.h"],
visibility = ["//visibility:public"],
deps = [
":affine_transformation",
":warp_affine_calculator_cc_proto",
@ -817,7 +779,6 @@ cc_test(
cc_library(
name = "yuv_to_image_calculator",
srcs = ["yuv_to_image_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_context",
"//mediapipe/framework:calculator_framework",

View File

@ -30,6 +30,7 @@ exports_files(
glob(["testdata/image_to_tensor/*"]),
visibility = [
"//mediapipe/calculators/image:__subpackages__",
"//mediapipe/util:__subpackages__",
],
)
@ -433,6 +434,7 @@ cc_library(
}),
visibility = ["//visibility:public"],
deps = [
":inference_calculator_cc_proto",
":inference_calculator_options_lib",
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework/api2:node",
@ -463,6 +465,7 @@ cc_library(
"//mediapipe/gpu:gl_calculator_helper",
"@com_google_absl//absl/memory",
"@com_google_absl//absl/status",
"@com_google_absl//absl/strings:str_format",
"@org_tensorflow//tensorflow/lite/delegates/gpu:gl_delegate",
],
alwayslink = 1,
@ -512,6 +515,7 @@ cc_library(
"//mediapipe/objc:mediapipe_framework_ios",
"//mediapipe/util/tflite:config",
"@com_google_absl//absl/memory",
"@com_google_absl//absl/strings:str_format",
"@org_tensorflow//tensorflow/lite/delegates/gpu:metal_delegate",
"@org_tensorflow//tensorflow/lite/delegates/gpu:metal_delegate_internal",
"@org_tensorflow//tensorflow/lite/delegates/gpu/common:shape",
@ -794,12 +798,12 @@ cc_library(
"@com_google_absl//absl/strings:str_format",
"@com_google_absl//absl/types:span",
"//mediapipe/framework/api2:node",
"//mediapipe/framework/formats/object_detection:anchor_cc_proto",
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework:port",
"//mediapipe/framework/deps:file_path",
"//mediapipe/framework/formats:location",
"//mediapipe/framework/formats:tensor",
"//mediapipe/framework/formats/object_detection:anchor_cc_proto",
"//mediapipe/framework/port:ret_check",
] + selects.with_or({
":compute_shader_unavailable": [],
@ -1130,6 +1134,7 @@ cc_test(
"//mediapipe/framework/port:opencv_imgcodecs",
"//mediapipe/framework/port:opencv_imgproc",
"//mediapipe/framework/port:parse_text_proto",
"//mediapipe/util:image_test_utils",
"@com_google_absl//absl/flags:flag",
"@com_google_absl//absl/memory",
"@com_google_absl//absl/strings",
@ -1279,7 +1284,6 @@ cc_library(
"//mediapipe/gpu:MPPMetalHelper",
"@com_google_absl//absl/strings",
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework/formats:rect_cc_proto",
"//mediapipe/framework/formats:tensor",
"//mediapipe/framework/port:ret_check",
"//mediapipe/framework/port:status",
@ -1378,9 +1382,9 @@ cc_library(
"//mediapipe/framework:calculator_context",
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework:port",
"//mediapipe/gpu:gpu_origin_cc_proto",
"//mediapipe/util:resource_util",
"@org_tensorflow//tensorflow/lite:framework",
"//mediapipe/gpu:gpu_origin_cc_proto",
"//mediapipe/framework/port:statusor",
] + selects.with_or({
"//mediapipe/gpu:disable_gpu": [],

View File

@ -43,6 +43,7 @@ namespace api2 {
namespace {
using Options = ::mediapipe::AudioToTensorCalculatorOptions;
using DftTensorFormat = Options::DftTensorFormat;
using FlushMode = Options::FlushMode;
std::vector<float> HannWindow(int window_size, bool sqrt_hann) {
@ -188,6 +189,8 @@ class AudioToTensorCalculator : public Node {
int padding_samples_before_;
int padding_samples_after_;
FlushMode flush_mode_;
DftTensorFormat dft_tensor_format_;
Timestamp initial_timestamp_ = Timestamp::Unstarted();
int64 cumulative_input_samples_ = 0;
Timestamp next_output_timestamp_ = Timestamp::Unstarted();
@ -273,6 +276,7 @@ absl::Status AudioToTensorCalculator::Open(CalculatorContext* cc) {
}
padding_samples_before_ = options.padding_samples_before();
padding_samples_after_ = options.padding_samples_after();
dft_tensor_format_ = options.dft_tensor_format();
flush_mode_ = options.flush_mode();
RET_CHECK(kAudioSampleRateIn(cc).IsConnected() ^
@ -492,14 +496,43 @@ absl::Status AudioToTensorCalculator::OutputTensor(const Matrix& block,
kDcAndNyquistOut(cc).Send(std::make_pair(fft_output_[0], fft_output_[1]),
timestamp);
}
Matrix fft_output_matrix =
Eigen::Map<const Matrix>(fft_output_.data() + 2, 1, fft_size_ - 2);
fft_output_matrix.conservativeResize(Eigen::NoChange, fft_size_);
// The last two elements are the DFT Nyquist values.
fft_output_matrix(fft_size_ - 2) = fft_output_[1]; // Nyquist real part
fft_output_matrix(fft_size_ - 1) = 0.0f; // Nyquist imagery part
ASSIGN_OR_RETURN(output_tensor,
ConvertToTensor(fft_output_matrix, {2, fft_size_ / 2}));
switch (dft_tensor_format_) {
case Options::WITH_NYQUIST: {
Matrix fft_output_matrix =
Eigen::Map<const Matrix>(fft_output_.data() + 2, 1, fft_size_ - 2);
fft_output_matrix.conservativeResize(Eigen::NoChange, fft_size_);
// The last two elements are Nyquist component.
fft_output_matrix(fft_size_ - 2) = fft_output_[1]; // Nyquist real part
fft_output_matrix(fft_size_ - 1) = 0.0f; // Nyquist imagery part
ASSIGN_OR_RETURN(output_tensor, ConvertToTensor(fft_output_matrix,
{2, fft_size_ / 2}));
break;
}
case Options::WITH_DC_AND_NYQUIST: {
Matrix fft_output_matrix =
Eigen::Map<const Matrix>(fft_output_.data(), 1, fft_size_);
fft_output_matrix.conservativeResize(Eigen::NoChange, fft_size_ + 2);
fft_output_matrix(1) = 0.0f; // DC imagery part.
// The last two elements are Nyquist component.
fft_output_matrix(fft_size_) = fft_output_[1]; // Nyquist real part
fft_output_matrix(fft_size_ + 1) = 0.0f; // Nyquist imagery part
ASSIGN_OR_RETURN(
output_tensor,
ConvertToTensor(fft_output_matrix, {2, (fft_size_ + 2) / 2}));
break;
}
case Options::WITHOUT_DC_AND_NYQUIST: {
Matrix fft_output_matrix =
Eigen::Map<const Matrix>(fft_output_.data() + 2, 1, fft_size_ - 2);
ASSIGN_OR_RETURN(
output_tensor,
ConvertToTensor(fft_output_matrix, {2, (fft_size_ - 2) / 2}));
break;
}
default:
return absl::InvalidArgumentError("Unsupported dft tensor format.");
}
} else {
ASSIGN_OR_RETURN(output_tensor,
ConvertToTensor(block, {num_channels_, num_samples_}));

View File

@ -68,4 +68,17 @@ message AudioToTensorCalculatorOptions {
}
optional FlushMode flush_mode = 10 [default = ENTIRE_TAIL_AT_TIMESTAMP_MAX];
enum DftTensorFormat {
DFT_TENSOR_FORMAT_UNKNOWN = 0;
// The output dft tensor without dc and nyquist components.
WITHOUT_DC_AND_NYQUIST = 1;
// The output dft tensor contains the nyquist component as the last
// two values.
WITH_NYQUIST = 2;
// The output dft tensor contains the dc component as the first two values
// and the nyquist component as the last two values.
WITH_DC_AND_NYQUIST = 3;
}
optional DftTensorFormat dft_tensor_format = 11 [default = WITH_NYQUIST];
}

View File

@ -36,22 +36,17 @@
#include "mediapipe/framework/port/opencv_imgproc_inc.h"
#include "mediapipe/framework/port/parse_text_proto.h"
#include "mediapipe/framework/port/status_matchers.h"
#include "mediapipe/util/image_test_utils.h"
namespace mediapipe {
namespace {
cv::Mat GetRgb(absl::string_view path) {
cv::Mat bgr = cv::imread(file::JoinPath("./", path));
cv::Mat rgb;
cv::cvtColor(bgr, rgb, cv::COLOR_BGR2RGB);
return rgb;
}
constexpr char kTestDataDir[] =
"/mediapipe/calculators/tensor/testdata/"
"image_to_tensor/";
cv::Mat GetRgba(absl::string_view path) {
cv::Mat bgr = cv::imread(file::JoinPath("./", path));
cv::Mat rgb;
cv::cvtColor(bgr, rgb, cv::COLOR_BGR2RGBA);
return rgb;
std::string GetFilePath(absl::string_view filename) {
return file::JoinPath("./", kTestDataDir, filename);
}
// Image to tensor test template.
@ -147,29 +142,34 @@ void RunTestWithInputImagePacket(const Packet& input_image_packet,
ASSERT_THAT(tensor_vec, testing::SizeIs(1));
const Tensor& tensor = tensor_vec[0];
const int channels = tensor.shape().dims[3];
ASSERT_TRUE(channels == 1 || channels == 3);
auto view = tensor.GetCpuReadView();
cv::Mat tensor_mat;
if (output_int_tensor) {
if (range_min < 0) {
EXPECT_EQ(tensor.element_type(), Tensor::ElementType::kInt8);
tensor_mat = cv::Mat(tensor_height, tensor_width, CV_8SC3,
tensor_mat = cv::Mat(tensor_height, tensor_width,
channels == 1 ? CV_8SC1 : CV_8SC3,
const_cast<int8*>(view.buffer<int8>()));
} else {
EXPECT_EQ(tensor.element_type(), Tensor::ElementType::kUInt8);
tensor_mat = cv::Mat(tensor_height, tensor_width, CV_8UC3,
tensor_mat = cv::Mat(tensor_height, tensor_width,
channels == 1 ? CV_8UC1 : CV_8UC3,
const_cast<uint8*>(view.buffer<uint8>()));
}
} else {
EXPECT_EQ(tensor.element_type(), Tensor::ElementType::kFloat32);
tensor_mat = cv::Mat(tensor_height, tensor_width, CV_32FC3,
tensor_mat = cv::Mat(tensor_height, tensor_width,
channels == 1 ? CV_32FC1 : CV_32FC3,
const_cast<float*>(view.buffer<float>()));
}
cv::Mat result_rgb;
auto transformation =
GetValueRangeTransformation(range_min, range_max, 0.0f, 255.0f).value();
tensor_mat.convertTo(result_rgb, CV_8UC3, transformation.scale,
transformation.offset);
tensor_mat.convertTo(result_rgb, channels == 1 ? CV_8UC1 : CV_8UC3,
transformation.scale, transformation.offset);
cv::Mat diff;
cv::absdiff(result_rgb, expected_result, diff);
@ -185,17 +185,27 @@ void RunTestWithInputImagePacket(const Packet& input_image_packet,
MP_ASSERT_OK(graph.WaitUntilDone());
}
mediapipe::ImageFormat::Format GetImageFormat(int image_channels) {
if (image_channels == 4) {
return ImageFormat::SRGBA;
} else if (image_channels == 3) {
return ImageFormat::SRGB;
} else if (image_channels == 1) {
return ImageFormat::GRAY8;
}
CHECK(false) << "Unsupported input image channles: " << image_channels;
}
Packet MakeImageFramePacket(cv::Mat input) {
ImageFrame input_image(
input.channels() == 4 ? ImageFormat::SRGBA : ImageFormat::SRGB,
input.cols, input.rows, input.step, input.data, [](uint8*) {});
ImageFrame input_image(GetImageFormat(input.channels()), input.cols,
input.rows, input.step, input.data, [](uint8*) {});
return MakePacket<ImageFrame>(std::move(input_image)).At(Timestamp(0));
}
Packet MakeImagePacket(cv::Mat input) {
mediapipe::Image input_image(std::make_shared<mediapipe::ImageFrame>(
input.channels() == 4 ? ImageFormat::SRGBA : ImageFormat::SRGB,
input.cols, input.rows, input.step, input.data, [](uint8*) {}));
GetImageFormat(input.channels()), input.cols, input.rows, input.step,
input.data, [](uint8*) {}));
return MakePacket<mediapipe::Image>(std::move(input_image)).At(Timestamp(0));
}
@ -237,15 +247,12 @@ TEST(ImageToTensorCalculatorTest, MediumSubRectKeepAspect) {
roi.set_width(0.5f);
roi.set_height(0.5f);
roi.set_rotation(0);
RunTest(
GetRgb("/mediapipe/calculators/"
"tensor/testdata/image_to_tensor/input.jpg"),
GetRgb("/mediapipe/calculators/"
"tensor/testdata/image_to_tensor/medium_sub_rect_keep_aspect.png"),
/*float_ranges=*/{{0.0f, 1.0f}},
/*int_ranges=*/{{0, 255}, {-128, 127}},
/*tensor_width=*/256, /*tensor_height=*/256, /*keep_aspect=*/true,
/*border mode*/ {}, roi);
RunTest(GetRgb(GetFilePath("input.jpg")),
GetRgb(GetFilePath("medium_sub_rect_keep_aspect.png")),
/*float_ranges=*/{{0.0f, 1.0f}},
/*int_ranges=*/{{0, 255}, {-128, 127}},
/*tensor_width=*/256, /*tensor_height=*/256, /*keep_aspect=*/true,
/*border mode*/ {}, roi);
}
TEST(ImageToTensorCalculatorTest, MediumSubRectKeepAspectBorderZero) {
@ -255,11 +262,8 @@ TEST(ImageToTensorCalculatorTest, MediumSubRectKeepAspectBorderZero) {
roi.set_width(0.5f);
roi.set_height(0.5f);
roi.set_rotation(0);
RunTest(GetRgb("/mediapipe/calculators/"
"tensor/testdata/image_to_tensor/input.jpg"),
GetRgb("/mediapipe/calculators/"
"tensor/testdata/image_to_tensor/"
"medium_sub_rect_keep_aspect_border_zero.png"),
RunTest(GetRgb(GetFilePath("input.jpg")),
GetRgb(GetFilePath("medium_sub_rect_keep_aspect_border_zero.png")),
/*float_ranges=*/{{0.0f, 1.0f}},
/*int_ranges=*/{{0, 255}, {-128, 127}},
/*tensor_width=*/256, /*tensor_height=*/256, /*keep_aspect=*/true,
@ -273,11 +277,8 @@ TEST(ImageToTensorCalculatorTest, MediumSubRectKeepAspectWithRotation) {
roi.set_width(0.5f);
roi.set_height(0.5f);
roi.set_rotation(M_PI * 90.0f / 180.0f);
RunTest(GetRgb("/mediapipe/calculators/"
"tensor/testdata/image_to_tensor/input.jpg"),
GetRgb("/mediapipe/calculators/"
"tensor/testdata/image_to_tensor/"
"medium_sub_rect_keep_aspect_with_rotation.png"),
RunTest(GetRgb(GetFilePath("input.jpg")),
GetRgb(GetFilePath("medium_sub_rect_keep_aspect_with_rotation.png")),
/*float_ranges=*/{{0.0f, 1.0f}},
/*int_ranges=*/{{0, 255}},
/*tensor_width=*/256, /*tensor_height=*/256, /*keep_aspect=*/true,
@ -292,11 +293,9 @@ TEST(ImageToTensorCalculatorTest,
roi.set_width(0.5f);
roi.set_height(0.5f);
roi.set_rotation(M_PI * 90.0f / 180.0f);
RunTest(GetRgb("/mediapipe/calculators/"
"tensor/testdata/image_to_tensor/input.jpg"),
GetRgb("/mediapipe/calculators/"
"tensor/testdata/image_to_tensor/"
"medium_sub_rect_keep_aspect_with_rotation_border_zero.png"),
RunTest(GetRgb(GetFilePath("input.jpg")),
GetRgb(GetFilePath(
"medium_sub_rect_keep_aspect_with_rotation_border_zero.png")),
/*float_ranges=*/{{0.0f, 1.0f}},
/*int_ranges=*/{{0, 255}, {-128, 127}},
/*tensor_width=*/256, /*tensor_height=*/256, /*keep_aspect=*/true,
@ -310,16 +309,12 @@ TEST(ImageToTensorCalculatorTest, MediumSubRectWithRotation) {
roi.set_width(0.5f);
roi.set_height(0.5f);
roi.set_rotation(M_PI * -45.0f / 180.0f);
RunTest(
GetRgb("/mediapipe/calculators/"
"tensor/testdata/image_to_tensor/input.jpg"),
GetRgb(
"/mediapipe/calculators/"
"tensor/testdata/image_to_tensor/medium_sub_rect_with_rotation.png"),
/*float_ranges=*/{{-1.0f, 1.0f}},
/*int_ranges=*/{{0, 255}, {-128, 127}},
/*tensor_width=*/256, /*tensor_height=*/256, /*keep_aspect=*/false,
BorderMode::kReplicate, roi);
RunTest(GetRgb(GetFilePath("input.jpg")),
GetRgb(GetFilePath("medium_sub_rect_with_rotation.png")),
/*float_ranges=*/{{-1.0f, 1.0f}},
/*int_ranges=*/{{0, 255}, {-128, 127}},
/*tensor_width=*/256, /*tensor_height=*/256, /*keep_aspect=*/false,
BorderMode::kReplicate, roi);
}
TEST(ImageToTensorCalculatorTest, MediumSubRectWithRotationBorderZero) {
@ -329,11 +324,8 @@ TEST(ImageToTensorCalculatorTest, MediumSubRectWithRotationBorderZero) {
roi.set_width(0.5f);
roi.set_height(0.5f);
roi.set_rotation(M_PI * -45.0f / 180.0f);
RunTest(GetRgb("/mediapipe/calculators/"
"tensor/testdata/image_to_tensor/input.jpg"),
GetRgb("/mediapipe/calculators/"
"tensor/testdata/image_to_tensor/"
"medium_sub_rect_with_rotation_border_zero.png"),
RunTest(GetRgb(GetFilePath("input.jpg")),
GetRgb(GetFilePath("medium_sub_rect_with_rotation_border_zero.png")),
/*float_ranges=*/{{-1.0f, 1.0f}},
/*int_ranges=*/{{0, 255}, {-128, 127}},
/*tensor_width=*/256, /*tensor_height=*/256, /*keep_aspect=*/false,
@ -347,10 +339,8 @@ TEST(ImageToTensorCalculatorTest, LargeSubRect) {
roi.set_width(1.5f);
roi.set_height(1.1f);
roi.set_rotation(0);
RunTest(GetRgb("/mediapipe/calculators/"
"tensor/testdata/image_to_tensor/input.jpg"),
GetRgb("/mediapipe/calculators/"
"tensor/testdata/image_to_tensor/large_sub_rect.png"),
RunTest(GetRgb(GetFilePath("input.jpg")),
GetRgb(GetFilePath("large_sub_rect.png")),
/*float_ranges=*/{{0.0f, 1.0f}},
/*int_ranges=*/{{0, 255}},
/*tensor_width=*/128, /*tensor_height=*/128, /*keep_aspect=*/false,
@ -364,15 +354,12 @@ TEST(ImageToTensorCalculatorTest, LargeSubRectBorderZero) {
roi.set_width(1.5f);
roi.set_height(1.1f);
roi.set_rotation(0);
RunTest(
GetRgb("/mediapipe/calculators/"
"tensor/testdata/image_to_tensor/input.jpg"),
GetRgb("/mediapipe/calculators/"
"tensor/testdata/image_to_tensor/large_sub_rect_border_zero.png"),
/*float_ranges=*/{{0.0f, 1.0f}},
/*int_ranges=*/{{0, 255}, {-128, 127}},
/*tensor_width=*/128, /*tensor_height=*/128, /*keep_aspect=*/false,
BorderMode::kZero, roi);
RunTest(GetRgb(GetFilePath("input.jpg")),
GetRgb(GetFilePath("large_sub_rect_border_zero.png")),
/*float_ranges=*/{{0.0f, 1.0f}},
/*int_ranges=*/{{0, 255}, {-128, 127}},
/*tensor_width=*/128, /*tensor_height=*/128, /*keep_aspect=*/false,
BorderMode::kZero, roi);
}
TEST(ImageToTensorCalculatorTest, LargeSubRectKeepAspect) {
@ -382,15 +369,12 @@ TEST(ImageToTensorCalculatorTest, LargeSubRectKeepAspect) {
roi.set_width(1.5f);
roi.set_height(1.1f);
roi.set_rotation(0);
RunTest(
GetRgb("/mediapipe/calculators/"
"tensor/testdata/image_to_tensor/input.jpg"),
GetRgb("/mediapipe/calculators/"
"tensor/testdata/image_to_tensor/large_sub_rect_keep_aspect.png"),
/*float_ranges=*/{{0.0f, 1.0f}},
/*int_ranges=*/{{0, 255}, {-128, 127}},
/*tensor_width=*/128, /*tensor_height=*/128, /*keep_aspect=*/true,
BorderMode::kReplicate, roi);
RunTest(GetRgb(GetFilePath("input.jpg")),
GetRgb(GetFilePath("large_sub_rect_keep_aspect.png")),
/*float_ranges=*/{{0.0f, 1.0f}},
/*int_ranges=*/{{0, 255}, {-128, 127}},
/*tensor_width=*/128, /*tensor_height=*/128, /*keep_aspect=*/true,
BorderMode::kReplicate, roi);
}
TEST(ImageToTensorCalculatorTest, LargeSubRectKeepAspectBorderZero) {
@ -400,11 +384,8 @@ TEST(ImageToTensorCalculatorTest, LargeSubRectKeepAspectBorderZero) {
roi.set_width(1.5f);
roi.set_height(1.1f);
roi.set_rotation(0);
RunTest(GetRgb("/mediapipe/calculators/"
"tensor/testdata/image_to_tensor/input.jpg"),
GetRgb("/mediapipe/calculators/"
"tensor/testdata/image_to_tensor/"
"large_sub_rect_keep_aspect_border_zero.png"),
RunTest(GetRgb(GetFilePath("input.jpg")),
GetRgb(GetFilePath("large_sub_rect_keep_aspect_border_zero.png")),
/*float_ranges=*/{{0.0f, 1.0f}},
/*int_ranges=*/{{0, 255}, {-128, 127}},
/*tensor_width=*/128, /*tensor_height=*/128, /*keep_aspect=*/true,
@ -418,11 +399,23 @@ TEST(ImageToTensorCalculatorTest, LargeSubRectKeepAspectWithRotation) {
roi.set_width(1.5f);
roi.set_height(1.1f);
roi.set_rotation(M_PI * -15.0f / 180.0f);
RunTest(GetRgba("/mediapipe/calculators/"
"tensor/testdata/image_to_tensor/input.jpg"),
GetRgb("/mediapipe/calculators/"
"tensor/testdata/image_to_tensor/"
"large_sub_rect_keep_aspect_with_rotation.png"),
RunTest(GetRgba(GetFilePath("input.jpg")),
GetRgb(GetFilePath("large_sub_rect_keep_aspect_with_rotation.png")),
/*float_ranges=*/{{0.0f, 1.0f}},
/*int_ranges=*/{{0, 255}, {-128, 127}},
/*tensor_width=*/128, /*tensor_height=*/128, /*keep_aspect=*/true,
/*border_mode=*/{}, roi);
}
TEST(ImageToTensorCalculatorTest, LargeSubRectKeepAspectWithRotationGray) {
mediapipe::NormalizedRect roi;
roi.set_x_center(0.5f);
roi.set_y_center(0.5f);
roi.set_width(1.5f);
roi.set_height(1.1f);
roi.set_rotation(M_PI * -15.0f / 180.0f);
RunTest(GetGray(GetFilePath("input.jpg")),
GetGray(GetFilePath("large_sub_rect_keep_aspect_with_rotation.png")),
/*float_ranges=*/{{0.0f, 1.0f}},
/*int_ranges=*/{{0, 255}, {-128, 127}},
/*tensor_width=*/128, /*tensor_height=*/128, /*keep_aspect=*/true,
@ -437,11 +430,26 @@ TEST(ImageToTensorCalculatorTest,
roi.set_width(1.5f);
roi.set_height(1.1f);
roi.set_rotation(M_PI * -15.0f / 180.0f);
RunTest(GetRgba("/mediapipe/calculators/"
"tensor/testdata/image_to_tensor/input.jpg"),
GetRgb("/mediapipe/calculators/"
"tensor/testdata/image_to_tensor/"
"large_sub_rect_keep_aspect_with_rotation_border_zero.png"),
RunTest(GetRgba(GetFilePath("input.jpg")),
GetRgb(GetFilePath(
"large_sub_rect_keep_aspect_with_rotation_border_zero.png")),
/*float_ranges=*/{{0.0f, 1.0f}},
/*int_ranges=*/{{0, 255}},
/*tensor_width=*/128, /*tensor_height=*/128, /*keep_aspect=*/true,
/*border_mode=*/BorderMode::kZero, roi);
}
TEST(ImageToTensorCalculatorTest,
LargeSubRectKeepAspectWithRotationBorderZeroGray) {
mediapipe::NormalizedRect roi;
roi.set_x_center(0.5f);
roi.set_y_center(0.5f);
roi.set_width(1.5f);
roi.set_height(1.1f);
roi.set_rotation(M_PI * -15.0f / 180.0f);
RunTest(GetGray(GetFilePath("input.jpg")),
GetGray(GetFilePath(
"large_sub_rect_keep_aspect_with_rotation_border_zero.png")),
/*float_ranges=*/{{0.0f, 1.0f}},
/*int_ranges=*/{{0, 255}},
/*tensor_width=*/128, /*tensor_height=*/128, /*keep_aspect=*/true,
@ -455,10 +463,8 @@ TEST(ImageToTensorCalculatorTest, NoOpExceptRange) {
roi.set_width(1.0f);
roi.set_height(1.0f);
roi.set_rotation(0);
RunTest(GetRgba("/mediapipe/calculators/"
"tensor/testdata/image_to_tensor/input.jpg"),
GetRgb("/mediapipe/calculators/"
"tensor/testdata/image_to_tensor/noop_except_range.png"),
RunTest(GetRgba(GetFilePath("input.jpg")),
GetRgb(GetFilePath("noop_except_range.png")),
/*float_ranges=*/{{0.0f, 1.0f}},
/*int_ranges=*/{{0, 255}, {-128, 127}},
/*tensor_width=*/64, /*tensor_height=*/128, /*keep_aspect=*/true,
@ -472,10 +478,8 @@ TEST(ImageToTensorCalculatorTest, NoOpExceptRangeBorderZero) {
roi.set_width(1.0f);
roi.set_height(1.0f);
roi.set_rotation(0);
RunTest(GetRgba("/mediapipe/calculators/"
"tensor/testdata/image_to_tensor/input.jpg"),
GetRgb("/mediapipe/calculators/"
"tensor/testdata/image_to_tensor/noop_except_range.png"),
RunTest(GetRgba(GetFilePath("input.jpg")),
GetRgb(GetFilePath("noop_except_range.png")),
/*float_ranges=*/{{0.0f, 1.0f}},
/*int_ranges=*/{{0, 255}, {-128, 127}},
/*tensor_width=*/64, /*tensor_height=*/128, /*keep_aspect=*/true,

View File

@ -48,15 +48,19 @@ class OpenCvProcessor : public ImageToTensorConverter {
switch (tensor_type_) {
case Tensor::ElementType::kInt8:
mat_type_ = CV_8SC3;
mat_gray_type_ = CV_8SC1;
break;
case Tensor::ElementType::kFloat32:
mat_type_ = CV_32FC3;
mat_gray_type_ = CV_32FC1;
break;
case Tensor::ElementType::kUInt8:
mat_type_ = CV_8UC3;
mat_gray_type_ = CV_8UC1;
break;
default:
mat_type_ = -1;
mat_gray_type_ = -1;
}
}
@ -64,36 +68,57 @@ class OpenCvProcessor : public ImageToTensorConverter {
float range_min, float range_max,
int tensor_buffer_offset,
Tensor& output_tensor) override {
if (input.image_format() != mediapipe::ImageFormat::SRGB &&
input.image_format() != mediapipe::ImageFormat::SRGBA) {
return InvalidArgumentError(
absl::StrCat("Only RGBA/RGB formats are supported, passed format: ",
static_cast<uint32_t>(input.image_format())));
const bool is_supported_format =
input.image_format() == mediapipe::ImageFormat::SRGB ||
input.image_format() == mediapipe::ImageFormat::SRGBA ||
input.image_format() == mediapipe::ImageFormat::GRAY8;
if (!is_supported_format) {
return InvalidArgumentError(absl::StrCat(
"Unsupported format: ", static_cast<uint32_t>(input.image_format())));
}
// TODO: Remove the check once tensor_buffer_offset > 0 is
// supported.
RET_CHECK_EQ(tensor_buffer_offset, 0)
<< "The non-zero tensor_buffer_offset input is not supported yet.";
RET_CHECK_GE(tensor_buffer_offset, 0)
<< "The input tensor_buffer_offset needs to be non-negative.";
const auto& output_shape = output_tensor.shape();
MP_RETURN_IF_ERROR(ValidateTensorShape(output_shape));
const int output_height = output_shape.dims[1];
const int output_width = output_shape.dims[2];
const int output_channels = output_shape.dims[3];
const int num_elements_per_img =
output_height * output_width * output_channels;
auto buffer_view = output_tensor.GetCpuWriteView();
cv::Mat dst;
const int dst_data_type = output_channels == 1 ? mat_gray_type_ : mat_type_;
switch (tensor_type_) {
case Tensor::ElementType::kInt8:
dst = cv::Mat(output_height, output_width, mat_type_,
buffer_view.buffer<int8>());
RET_CHECK_GE(output_shape.num_elements(),
tensor_buffer_offset / sizeof(int8) + num_elements_per_img)
<< "The buffer offset + the input image size is larger than the "
"allocated tensor buffer.";
dst = cv::Mat(
output_height, output_width, dst_data_type,
buffer_view.buffer<int8>() + tensor_buffer_offset / sizeof(int8));
break;
case Tensor::ElementType::kFloat32:
dst = cv::Mat(output_height, output_width, mat_type_,
buffer_view.buffer<float>());
RET_CHECK_GE(
output_shape.num_elements(),
tensor_buffer_offset / sizeof(float) + num_elements_per_img)
<< "The buffer offset + the input image size is larger than the "
"allocated tensor buffer.";
dst = cv::Mat(
output_height, output_width, dst_data_type,
buffer_view.buffer<float>() + tensor_buffer_offset / sizeof(float));
break;
case Tensor::ElementType::kUInt8:
dst = cv::Mat(output_height, output_width, mat_type_,
buffer_view.buffer<uint8>());
RET_CHECK_GE(
output_shape.num_elements(),
tensor_buffer_offset / sizeof(uint8) + num_elements_per_img)
<< "The buffer offset + the input image size is larger than the "
"allocated tensor buffer.";
dst = cv::Mat(
output_height, output_width, dst_data_type,
buffer_view.buffer<uint8>() + tensor_buffer_offset / sizeof(uint8));
break;
default:
return InvalidArgumentError(
@ -137,7 +162,8 @@ class OpenCvProcessor : public ImageToTensorConverter {
auto transform,
GetValueRangeTransformation(kInputImageRangeMin, kInputImageRangeMax,
range_min, range_max));
transformed.convertTo(dst, mat_type_, transform.scale, transform.offset);
transformed.convertTo(dst, dst_data_type, transform.scale,
transform.offset);
return absl::OkStatus();
}
@ -145,10 +171,9 @@ class OpenCvProcessor : public ImageToTensorConverter {
absl::Status ValidateTensorShape(const Tensor::Shape& output_shape) {
RET_CHECK_EQ(output_shape.dims.size(), 4)
<< "Wrong output dims size: " << output_shape.dims.size();
RET_CHECK_EQ(output_shape.dims[0], 1)
<< "Handling batch dimension not equal to 1 is not implemented in this "
"converter.";
RET_CHECK_EQ(output_shape.dims[3], 3)
RET_CHECK_GE(output_shape.dims[0], 1)
<< "The batch dimension needs to be equal or larger than 1.";
RET_CHECK(output_shape.dims[3] == 3 || output_shape.dims[3] == 1)
<< "Wrong output channel: " << output_shape.dims[3];
return absl::OkStatus();
}
@ -156,6 +181,7 @@ class OpenCvProcessor : public ImageToTensorConverter {
enum cv::BorderTypes border_mode_;
Tensor::ElementType tensor_type_;
int mat_type_;
int mat_gray_type_;
};
} // namespace

View File

@ -253,7 +253,14 @@ int GetNumOutputChannels(const mediapipe::Image& image) {
}
#endif // MEDIAPIPE_METAL_ENABLED
#endif // !MEDIAPIPE_DISABLE_GPU
// All of the processors except for Metal expect 3 channels.
// TODO: Add a unittest here to test the behavior on GPU, i.e.
// failure.
// Only output channel == 1 when running on CPU and the input image channel
// is 1. Ideally, we want to also support GPU for output channel == 1. But
// setting this on the safer side to prevent unintentional failure.
if (!image.UsesGpu() && image.channels() == 1) {
return 1;
}
return 3;
}

View File

@ -20,6 +20,7 @@
#include "absl/memory/memory.h"
#include "absl/status/status.h"
#include "absl/strings/str_format.h"
#include "mediapipe/calculators/tensor/inference_calculator.h"
#include "mediapipe/calculators/tensor/inference_calculator.pb.h"
#include "mediapipe/framework/calculator_context.h"
@ -154,6 +155,10 @@ absl::Status InferenceCalculatorGlImpl::GpuInferenceRunner::LoadDelegate(
const auto& input_indices = interpreter_->inputs();
for (int i = 0; i < input_indices.size(); ++i) {
const TfLiteTensor* tensor = interpreter_->tensor(input_indices[i]);
RET_CHECK(tensor->dims->size > 0) << absl::StrFormat(
"Input tensor at index [%d] doesn't specify dimensions.",
input_indices[i]);
gpu_buffers_in_.emplace_back(absl::make_unique<Tensor>(
Tensor::ElementType::kFloat32,
Tensor::Shape{std::vector<int>{
@ -171,6 +176,9 @@ absl::Status InferenceCalculatorGlImpl::GpuInferenceRunner::LoadDelegate(
// Create and bind output buffers.
for (int i = 0; i < output_size_; ++i) {
const TfLiteTensor* tensor = interpreter_->tensor(output_indices[i]);
RET_CHECK(tensor->dims->size > 0) << absl::StrFormat(
"Output tensor at index [%d] doesn't specify dimensions.",
output_indices[i]);
gpu_buffers_out_.emplace_back(absl::make_unique<Tensor>(
Tensor::ElementType::kFloat32,
Tensor::Shape{std::vector<int>{

View File

@ -236,14 +236,21 @@ absl::Status InferenceCalculatorGlAdvancedImpl::OnDiskCacheHelper::Init(
const mediapipe::InferenceCalculatorOptions& options,
const mediapipe::InferenceCalculatorOptions::Delegate::Gpu&
gpu_delegate_options) {
use_kernel_caching_ = gpu_delegate_options.has_cached_kernel_path();
// The kernel cache needs a unique filename based on either model_path or the
// model token, to prevent the cache from being overwritten if the graph has
// more than one model.
use_kernel_caching_ =
gpu_delegate_options.has_cached_kernel_path() &&
(options.has_model_path() || gpu_delegate_options.has_model_token());
use_serialized_model_ = gpu_delegate_options.has_serialized_model_dir() &&
gpu_delegate_options.has_model_token();
if (use_kernel_caching_) {
cached_kernel_filename_ = gpu_delegate_options.cached_kernel_path() +
mediapipe::File::Basename(options.model_path()) +
".ker";
std::string basename = options.has_model_path()
? mediapipe::File::Basename(options.model_path())
: gpu_delegate_options.model_token();
cached_kernel_filename_ = mediapipe::file::JoinPath(
gpu_delegate_options.cached_kernel_path(), basename + ".ker");
}
if (use_serialized_model_) {
serialized_model_path_ =
@ -258,9 +265,9 @@ InferenceCalculatorGlAdvancedImpl::OnDiskCacheHelper::SaveGpuCaches(
tflite::gpu::TFLiteGPURunner* gpu_runner) const {
if (use_kernel_caching_) {
// Save kernel file.
auto kernel_cache = absl::make_unique<std::vector<uint8_t>>(
gpu_runner->GetSerializedBinaryCache());
std::string cache_str(kernel_cache->begin(), kernel_cache->end());
ASSIGN_OR_RETURN(std::vector<uint8_t> kernel_cache,
gpu_runner->GetSerializedBinaryCache());
std::string cache_str(kernel_cache.begin(), kernel_cache.end());
MP_RETURN_IF_ERROR(
mediapipe::file::SetContents(cached_kernel_filename_, cache_str));
}

View File

@ -22,6 +22,7 @@
#include <vector>
#include "absl/memory/memory.h"
#include "absl/strings/str_format.h"
#include "mediapipe/calculators/tensor/inference_calculator.h"
#import "mediapipe/gpu/MPPMetalHelper.h"
#include "mediapipe/gpu/MPPMetalUtil.h"
@ -245,6 +246,9 @@ absl::Status InferenceCalculatorMetalImpl::CreateConverters(
const auto& input_indices = interpreter_->inputs();
for (int i = 0; i < input_indices.size(); ++i) {
const TfLiteTensor* tensor = interpreter_->tensor(input_indices[i]);
RET_CHECK(tensor->dims->size > 0) << absl::StrFormat(
"Input tensor at index [%d] doesn't specify dimensions.",
input_indices[i]);
// Create and bind input buffer.
std::vector<int> dims{tensor->dims->data,
tensor->dims->data + tensor->dims->size};
@ -266,6 +270,9 @@ absl::Status InferenceCalculatorMetalImpl::CreateConverters(
output_shapes_.resize(output_indices.size());
for (int i = 0; i < output_shapes_.size(); ++i) {
const TfLiteTensor* tensor = interpreter_->tensor(output_indices[i]);
RET_CHECK(tensor->dims->size > 0) << absl::StrFormat(
"Output tensor at index [%d] doesn't specify dimensions.",
output_indices[i]);
RET_CHECK(tensor->dims->size <= 4);
// Create and bind output buffers.
// Channels are always padded to multiple of 4.

View File

@ -17,12 +17,11 @@ load("//mediapipe/framework/port:build_config.bzl", "mediapipe_cc_proto_library"
licenses(["notice"])
package(default_visibility = ["//visibility:private"])
package(default_visibility = ["//visibility:public"])
proto_library(
name = "graph_tensors_packet_generator_proto",
srcs = ["graph_tensors_packet_generator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_proto",
"//mediapipe/framework:packet_generator_proto",
@ -32,49 +31,42 @@ proto_library(
proto_library(
name = "matrix_to_tensor_calculator_options_proto",
srcs = ["matrix_to_tensor_calculator_options.proto"],
visibility = ["//visibility:public"],
deps = ["//mediapipe/framework:calculator_proto"],
)
proto_library(
name = "lapped_tensor_buffer_calculator_proto",
srcs = ["lapped_tensor_buffer_calculator.proto"],
visibility = ["//visibility:public"],
deps = ["//mediapipe/framework:calculator_proto"],
)
proto_library(
name = "object_detection_tensors_to_detections_calculator_proto",
srcs = ["object_detection_tensors_to_detections_calculator.proto"],
visibility = ["//visibility:public"],
deps = ["//mediapipe/framework:calculator_proto"],
)
proto_library(
name = "tensorflow_inference_calculator_proto",
srcs = ["tensorflow_inference_calculator.proto"],
visibility = ["//visibility:public"],
deps = ["//mediapipe/framework:calculator_proto"],
)
proto_library(
name = "tensor_squeeze_dimensions_calculator_proto",
srcs = ["tensor_squeeze_dimensions_calculator.proto"],
visibility = ["//visibility:public"],
deps = ["//mediapipe/framework:calculator_proto"],
)
proto_library(
name = "tensor_to_image_frame_calculator_proto",
srcs = ["tensor_to_image_frame_calculator.proto"],
visibility = ["//visibility:public"],
deps = ["//mediapipe/framework:calculator_proto"],
)
proto_library(
name = "tensor_to_matrix_calculator_proto",
srcs = ["tensor_to_matrix_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_proto",
"//mediapipe/framework/formats:time_series_header_proto",
@ -84,30 +76,24 @@ proto_library(
proto_library(
name = "tensor_to_vector_float_calculator_options_proto",
srcs = ["tensor_to_vector_float_calculator_options.proto"],
visibility = ["//visibility:public"],
deps = ["//mediapipe/framework:calculator_proto"],
)
proto_library(
name = "tensor_to_vector_int_calculator_options_proto",
srcs = ["tensor_to_vector_int_calculator_options.proto"],
visibility = ["//visibility:public"],
deps = ["//mediapipe/framework:calculator_proto"],
)
proto_library(
name = "tensor_to_vector_string_calculator_options_proto",
srcs = ["tensor_to_vector_string_calculator_options.proto"],
visibility = ["//visibility:public"],
deps = ["//mediapipe/framework:calculator_proto"],
)
mediapipe_proto_library(
name = "unpack_media_sequence_calculator_proto",
srcs = ["unpack_media_sequence_calculator.proto"],
visibility = [
"//visibility:public",
],
deps = [
"//mediapipe/calculators/core:packet_resampler_calculator_proto",
"//mediapipe/framework:calculator_proto",
@ -118,14 +104,12 @@ mediapipe_proto_library(
proto_library(
name = "vector_float_to_tensor_calculator_options_proto",
srcs = ["vector_float_to_tensor_calculator_options.proto"],
visibility = ["//visibility:public"],
deps = ["//mediapipe/framework:calculator_proto"],
)
proto_library(
name = "vector_string_to_tensor_calculator_options_proto",
srcs = ["vector_string_to_tensor_calculator_options.proto"],
visibility = ["//visibility:public"],
deps = ["//mediapipe/framework:calculator_proto"],
)
@ -136,7 +120,6 @@ mediapipe_cc_proto_library(
"//mediapipe/framework:calculator_cc_proto",
"//mediapipe/framework:packet_generator_cc_proto",
],
visibility = ["//visibility:public"],
deps = [":graph_tensors_packet_generator_proto"],
)
@ -147,7 +130,6 @@ mediapipe_cc_proto_library(
"//mediapipe/framework:calculator_cc_proto",
"@org_tensorflow//tensorflow/core:protos_all_cc",
],
visibility = ["//visibility:public"],
deps = [":image_frame_to_tensor_calculator_proto"],
)
@ -155,7 +137,6 @@ mediapipe_cc_proto_library(
name = "matrix_to_tensor_calculator_options_cc_proto",
srcs = ["matrix_to_tensor_calculator_options.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = ["//visibility:public"],
deps = [":matrix_to_tensor_calculator_options_proto"],
)
@ -163,7 +144,6 @@ mediapipe_cc_proto_library(
name = "lapped_tensor_buffer_calculator_cc_proto",
srcs = ["lapped_tensor_buffer_calculator.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = ["//visibility:public"],
deps = [":lapped_tensor_buffer_calculator_proto"],
)
@ -171,7 +151,6 @@ mediapipe_cc_proto_library(
name = "object_detection_tensors_to_detections_calculator_cc_proto",
srcs = ["object_detection_tensors_to_detections_calculator.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = ["//visibility:public"],
deps = [":object_detection_tensors_to_detections_calculator_proto"],
)
@ -179,7 +158,6 @@ mediapipe_cc_proto_library(
name = "tensorflow_inference_calculator_cc_proto",
srcs = ["tensorflow_inference_calculator.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = ["//visibility:public"],
deps = [":tensorflow_inference_calculator_proto"],
)
@ -190,7 +168,6 @@ mediapipe_cc_proto_library(
"//mediapipe/framework:packet_generator_cc_proto",
"@org_tensorflow//tensorflow/core:protos_all_cc",
],
visibility = ["//visibility:public"],
deps = [":tensorflow_session_from_frozen_graph_generator_proto"],
)
@ -201,7 +178,6 @@ mediapipe_cc_proto_library(
"//mediapipe/framework:calculator_cc_proto",
"@org_tensorflow//tensorflow/core:protos_all_cc",
],
visibility = ["//visibility:public"],
deps = [":tensorflow_session_from_frozen_graph_calculator_proto"],
)
@ -212,7 +188,6 @@ mediapipe_cc_proto_library(
"//mediapipe/framework:packet_generator_cc_proto",
"@org_tensorflow//tensorflow/core:protos_all_cc",
],
visibility = ["//visibility:public"],
deps = [":tensorflow_session_from_saved_model_generator_proto"],
)
@ -223,7 +198,6 @@ mediapipe_cc_proto_library(
"//mediapipe/framework:calculator_cc_proto",
"@org_tensorflow//tensorflow/core:protos_all_cc",
],
visibility = ["//visibility:public"],
deps = [":tensorflow_session_from_saved_model_calculator_proto"],
)
@ -231,7 +205,6 @@ mediapipe_cc_proto_library(
name = "tensor_squeeze_dimensions_calculator_cc_proto",
srcs = ["tensor_squeeze_dimensions_calculator.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = ["//visibility:public"],
deps = [":tensor_squeeze_dimensions_calculator_proto"],
)
@ -239,7 +212,6 @@ mediapipe_cc_proto_library(
name = "tensor_to_image_frame_calculator_cc_proto",
srcs = ["tensor_to_image_frame_calculator.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = ["//visibility:public"],
deps = [":tensor_to_image_frame_calculator_proto"],
)
@ -250,7 +222,6 @@ mediapipe_cc_proto_library(
"//mediapipe/framework:calculator_cc_proto",
"//mediapipe/framework/formats:time_series_header_cc_proto",
],
visibility = ["//visibility:public"],
deps = [":tensor_to_matrix_calculator_proto"],
)
@ -258,7 +229,6 @@ mediapipe_cc_proto_library(
name = "tensor_to_vector_float_calculator_options_cc_proto",
srcs = ["tensor_to_vector_float_calculator_options.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = ["//visibility:public"],
deps = [":tensor_to_vector_float_calculator_options_proto"],
)
@ -266,7 +236,6 @@ mediapipe_cc_proto_library(
name = "tensor_to_vector_int_calculator_options_cc_proto",
srcs = ["tensor_to_vector_int_calculator_options.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = ["//visibility:public"],
deps = [":tensor_to_vector_int_calculator_options_proto"],
)
@ -274,7 +243,6 @@ mediapipe_cc_proto_library(
name = "tensor_to_vector_string_calculator_options_cc_proto",
srcs = ["tensor_to_vector_string_calculator_options.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = ["//visibility:public"],
deps = [":tensor_to_vector_string_calculator_options_proto"],
)
@ -285,7 +253,6 @@ mediapipe_cc_proto_library(
"//mediapipe/framework:calculator_cc_proto",
"@org_tensorflow//tensorflow/core:protos_all_cc",
],
visibility = ["//visibility:public"],
deps = [":vector_int_to_tensor_calculator_options_proto"],
)
@ -293,7 +260,6 @@ mediapipe_cc_proto_library(
name = "vector_float_to_tensor_calculator_options_cc_proto",
srcs = ["vector_float_to_tensor_calculator_options.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = ["//visibility:public"],
deps = [":vector_float_to_tensor_calculator_options_proto"],
)
@ -301,14 +267,12 @@ mediapipe_cc_proto_library(
name = "vector_string_to_tensor_calculator_options_cc_proto",
srcs = ["vector_string_to_tensor_calculator_options.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = ["//visibility:public"],
deps = [":vector_string_to_tensor_calculator_options_proto"],
)
cc_library(
name = "graph_tensors_packet_generator",
srcs = ["graph_tensors_packet_generator.cc"],
visibility = ["//visibility:public"],
deps = [
":graph_tensors_packet_generator_cc_proto",
"//mediapipe/framework:calculator_framework",
@ -323,7 +287,6 @@ cc_library(
cc_library(
name = "image_frame_to_tensor_calculator",
srcs = ["image_frame_to_tensor_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":image_frame_to_tensor_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
@ -344,10 +307,9 @@ cc_library(
cc_library(
name = "matrix_to_tensor_calculator",
srcs = ["matrix_to_tensor_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework/formats:time_series_header_cc_proto",
":matrix_to_tensor_calculator_options_cc_proto",
"//mediapipe/framework/formats:time_series_header_cc_proto",
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework/formats:matrix",
"//mediapipe/framework/port:status",
@ -366,7 +328,6 @@ cc_library(
cc_library(
name = "lapped_tensor_buffer_calculator",
srcs = ["lapped_tensor_buffer_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":lapped_tensor_buffer_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
@ -388,9 +349,6 @@ cc_library(
# Layering check doesn't play nicely with portable proto wrappers.
"no_layering_check",
],
visibility = [
"//visibility:public",
],
deps = [
":object_detection_tensors_to_detections_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
@ -407,14 +365,11 @@ cc_library(
cc_library(
name = "pack_media_sequence_calculator",
srcs = ["pack_media_sequence_calculator.cc"],
visibility = [
"//visibility:public",
],
deps = [
"//mediapipe/calculators/image:opencv_image_encoder_calculator_cc_proto",
"//mediapipe/calculators/tensorflow:pack_media_sequence_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework/formats:detection_cc_proto", # build_cleaner: keep
"//mediapipe/framework/formats:detection_cc_proto",
"//mediapipe/framework/formats:location",
"//mediapipe/framework/formats:location_opencv",
"//mediapipe/framework/port:opencv_imgcodecs",
@ -432,9 +387,6 @@ cc_library(
cc_library(
name = "string_to_sequence_example_calculator",
srcs = ["string_to_sequence_example_calculator.cc"],
visibility = [
"//visibility:public",
],
deps = [
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework/port:ret_check",
@ -449,10 +401,9 @@ cc_library(
cc_library(
name = "tensorflow_inference_calculator",
srcs = ["tensorflow_inference_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":tensorflow_session",
":tensorflow_inference_calculator_cc_proto",
":tensorflow_session",
"@com_google_absl//absl/log:check",
"//mediapipe/framework:timestamp",
"@com_google_absl//absl/base:core_headers",
@ -487,7 +438,6 @@ cc_library(
"tensorflow_session.h",
],
features = ["no_layering_check"],
visibility = ["//visibility:public"],
deps = select({
"//conditions:default": [
"@org_tensorflow//tensorflow/core:core",
@ -505,7 +455,6 @@ cc_library(
name = "tensorflow_session_from_frozen_graph_calculator",
srcs = ["tensorflow_session_from_frozen_graph_calculator.cc"],
features = ["no_layering_check"],
visibility = ["//visibility:public"],
deps = [
":tensorflow_session",
"//mediapipe/calculators/tensorflow:tensorflow_session_from_frozen_graph_calculator_cc_proto",
@ -515,6 +464,7 @@ cc_library(
"//mediapipe/framework/port:logging",
"//mediapipe/framework/port:status",
"//mediapipe/framework/port:ret_check",
"@org_tensorflow//tensorflow/core:protos_all_cc",
] + select({
"//conditions:default": [
"//mediapipe/framework/port:file_helpers",
@ -536,7 +486,6 @@ cc_library(
name = "tensorflow_session_from_frozen_graph_generator",
srcs = ["tensorflow_session_from_frozen_graph_generator.cc"],
features = ["no_layering_check"],
visibility = ["//visibility:public"],
deps = [
":tensorflow_session",
":tensorflow_session_from_frozen_graph_generator_cc_proto",
@ -546,6 +495,7 @@ cc_library(
"//mediapipe/framework/deps:clock",
"//mediapipe/framework/port:logging",
"//mediapipe/framework/port:ret_check",
"@org_tensorflow//tensorflow/core:protos_all_cc",
] + select({
"//conditions:default": [
"//mediapipe/framework/port:file_helpers",
@ -570,7 +520,6 @@ cc_library(
"//mediapipe:android": ["__ANDROID__"],
"//conditions:default": [],
}),
visibility = ["//visibility:public"],
deps = [
":tensorflow_session",
":tensorflow_session_from_saved_model_calculator_cc_proto",
@ -609,7 +558,6 @@ cc_library(
"//mediapipe:android": ["__ANDROID__"],
"//conditions:default": [],
}),
visibility = ["//visibility:public"],
deps = [
":tensorflow_session",
":tensorflow_session_from_saved_model_generator_cc_proto",
@ -635,7 +583,6 @@ cc_library(
cc_library(
name = "tensor_squeeze_dimensions_calculator",
srcs = ["tensor_squeeze_dimensions_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":tensor_squeeze_dimensions_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
@ -649,7 +596,6 @@ cc_library(
cc_library(
name = "tensor_to_image_frame_calculator",
srcs = ["tensor_to_image_frame_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":tensor_to_image_frame_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
@ -664,10 +610,9 @@ cc_library(
cc_library(
name = "tensor_to_matrix_calculator",
srcs = ["tensor_to_matrix_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework/formats:time_series_header_cc_proto",
":tensor_to_matrix_calculator_cc_proto",
"//mediapipe/framework/formats:time_series_header_cc_proto",
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework/formats:matrix",
"//mediapipe/framework/port:status",
@ -686,7 +631,6 @@ cc_library(
cc_library(
name = "tfrecord_reader_calculator",
srcs = ["tfrecord_reader_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework/port:integral_types",
@ -702,12 +646,11 @@ cc_library(
cc_library(
name = "tensor_to_vector_float_calculator",
srcs = ["tensor_to_vector_float_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":tensor_to_vector_float_calculator_options_cc_proto",
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework/port:status",
"//mediapipe/framework/port:ret_check",
":tensor_to_vector_float_calculator_options_cc_proto",
] + select({
"//conditions:default": [
"@org_tensorflow//tensorflow/core:framework",
@ -722,7 +665,6 @@ cc_library(
cc_library(
name = "tensor_to_vector_int_calculator",
srcs = ["tensor_to_vector_int_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":tensor_to_vector_int_calculator_options_cc_proto",
"@com_google_absl//absl/base:core_headers",
@ -744,7 +686,6 @@ cc_library(
cc_library(
name = "tensor_to_vector_string_calculator",
srcs = ["tensor_to_vector_string_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework/port:status",
@ -764,9 +705,6 @@ cc_library(
cc_library(
name = "unpack_media_sequence_calculator",
srcs = ["unpack_media_sequence_calculator.cc"],
visibility = [
"//visibility:public",
],
deps = [
"//mediapipe/calculators/core:packet_resampler_calculator_cc_proto",
"//mediapipe/calculators/tensorflow:unpack_media_sequence_calculator_cc_proto",
@ -784,7 +722,6 @@ cc_library(
cc_library(
name = "vector_int_to_tensor_calculator",
srcs = ["vector_int_to_tensor_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":vector_int_to_tensor_calculator_options_cc_proto",
"//mediapipe/framework:calculator_framework",
@ -798,7 +735,6 @@ cc_library(
cc_library(
name = "vector_float_to_tensor_calculator",
srcs = ["vector_float_to_tensor_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":vector_float_to_tensor_calculator_options_cc_proto",
"//mediapipe/framework:calculator_framework",
@ -812,7 +748,6 @@ cc_library(
cc_library(
name = "vector_string_to_tensor_calculator",
srcs = ["vector_string_to_tensor_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":vector_string_to_tensor_calculator_options_cc_proto",
"//mediapipe/framework:calculator_framework",
@ -826,7 +761,6 @@ cc_library(
cc_library(
name = "unpack_yt8m_sequence_example_calculator",
srcs = ["unpack_yt8m_sequence_example_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":lapped_tensor_buffer_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
@ -1083,7 +1017,6 @@ cc_test(
linkstatic = 1,
deps = [
":tensor_to_image_frame_calculator",
":tensor_to_image_frame_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework:calculator_runner",
"//mediapipe/framework/formats:image_frame",
@ -1236,6 +1169,7 @@ cc_test(
data = [":test_frozen_graph"],
linkstatic = 1,
deps = [
":tensorflow_inference_calculator_cc_proto",
":tensorflow_session",
":tensorflow_inference_calculator",
":tensorflow_session_from_frozen_graph_generator",

View File

@ -18,12 +18,11 @@ load("@bazel_skylib//lib:selects.bzl", "selects")
licenses(["notice"])
package(default_visibility = ["//visibility:private"])
package(default_visibility = ["//visibility:public"])
mediapipe_proto_library(
name = "ssd_anchors_calculator_proto",
srcs = ["ssd_anchors_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -33,7 +32,6 @@ mediapipe_proto_library(
mediapipe_proto_library(
name = "tflite_custom_op_resolver_calculator_proto",
srcs = ["tflite_custom_op_resolver_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -43,7 +41,6 @@ mediapipe_proto_library(
mediapipe_proto_library(
name = "tflite_inference_calculator_proto",
srcs = ["tflite_inference_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -53,7 +50,6 @@ mediapipe_proto_library(
mediapipe_proto_library(
name = "tflite_converter_calculator_proto",
srcs = ["tflite_converter_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -63,7 +59,6 @@ mediapipe_proto_library(
mediapipe_proto_library(
name = "tflite_tensors_to_segmentation_calculator_proto",
srcs = ["tflite_tensors_to_segmentation_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -73,7 +68,6 @@ mediapipe_proto_library(
mediapipe_proto_library(
name = "tflite_tensors_to_detections_calculator_proto",
srcs = ["tflite_tensors_to_detections_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -83,7 +77,6 @@ mediapipe_proto_library(
mediapipe_proto_library(
name = "tflite_tensors_to_classification_calculator_proto",
srcs = ["tflite_tensors_to_classification_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -93,7 +86,6 @@ mediapipe_proto_library(
mediapipe_proto_library(
name = "tflite_tensors_to_landmarks_calculator_proto",
srcs = ["tflite_tensors_to_landmarks_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -103,7 +95,6 @@ mediapipe_proto_library(
cc_library(
name = "ssd_anchors_calculator",
srcs = ["ssd_anchors_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":ssd_anchors_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
@ -117,7 +108,6 @@ cc_library(
cc_library(
name = "tflite_custom_op_resolver_calculator",
srcs = ["tflite_custom_op_resolver_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":tflite_custom_op_resolver_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
@ -208,7 +198,6 @@ cc_library(
],
"//conditions:default": [],
}),
visibility = ["//visibility:public"],
deps = [
":tflite_inference_calculator_cc_proto",
"@com_google_absl//absl/memory",
@ -287,10 +276,9 @@ cc_library(
],
"//conditions:default": [],
}),
visibility = ["//visibility:public"],
deps = [
"//mediapipe/util/tflite:config",
":tflite_converter_calculator_cc_proto",
"//mediapipe/util/tflite:config",
"//mediapipe/util:resource_util",
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework/formats:image_frame",
@ -326,7 +314,6 @@ cc_library(
cc_library(
name = "tflite_model_calculator",
srcs = ["tflite_model_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework:packet",
@ -340,7 +327,6 @@ cc_library(
cc_library(
name = "tflite_tensors_to_segmentation_calculator",
srcs = ["tflite_tensors_to_segmentation_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":tflite_tensors_to_segmentation_calculator_cc_proto",
"@com_google_absl//absl/strings:str_format",
@ -408,17 +394,16 @@ cc_library(
],
"//conditions:default": [],
}),
visibility = ["//visibility:public"],
deps = [
"//mediapipe/util/tflite:config",
":tflite_tensors_to_detections_calculator_cc_proto",
"//mediapipe/framework/formats:detection_cc_proto",
"//mediapipe/framework/formats/object_detection:anchor_cc_proto",
"//mediapipe/util/tflite:config",
"@com_google_absl//absl/strings:str_format",
"@com_google_absl//absl/types:span",
"//mediapipe/framework/deps:file_path",
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework/formats:location",
"//mediapipe/framework/formats/object_detection:anchor_cc_proto",
"//mediapipe/framework/port:ret_check",
"@org_tensorflow//tensorflow/lite:framework",
] + selects.with_or({
@ -444,7 +429,6 @@ cc_library(
cc_library(
name = "tflite_tensors_to_classification_calculator",
srcs = ["tflite_tensors_to_classification_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":tflite_tensors_to_classification_calculator_cc_proto",
"@com_google_absl//absl/container:node_hash_map",
@ -476,7 +460,6 @@ cc_library(
cc_library(
name = "tflite_tensors_to_landmarks_calculator",
srcs = ["tflite_tensors_to_landmarks_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":tflite_tensors_to_landmarks_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
@ -490,7 +473,6 @@ cc_library(
cc_library(
name = "tflite_tensors_to_floats_calculator",
srcs = ["tflite_tensors_to_floats_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework/port:ret_check",

View File

@ -485,9 +485,9 @@ absl::Status TfLiteInferenceCalculator::WriteKernelsToFile() {
#if MEDIAPIPE_TFLITE_GL_INFERENCE && defined(MEDIAPIPE_ANDROID)
if (use_kernel_caching_) {
// Save kernel file.
auto kernel_cache = absl::make_unique<std::vector<uint8_t>>(
tflite_gpu_runner_->GetSerializedBinaryCache());
std::string cache_str(kernel_cache->begin(), kernel_cache->end());
ASSIGN_OR_RETURN(std::vector<uint8_t> kernel_cache,
tflite_gpu_runner_->GetSerializedBinaryCache());
std::string cache_str(kernel_cache.begin(), kernel_cache.end());
MP_RETURN_IF_ERROR(
mediapipe::file::SetContents(cached_kernel_filename_, cache_str));
}

View File

@ -21,10 +21,9 @@ package(default_visibility = ["//visibility:public"])
cc_library(
name = "alignment_points_to_rects_calculator",
srcs = ["alignment_points_to_rects_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":detections_to_rects_calculator_cc_proto",
"//mediapipe/calculators/util:detections_to_rects_calculator",
"//mediapipe/calculators/util:detections_to_rects_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework:calculator_options_cc_proto",
"//mediapipe/framework/formats:detection_cc_proto",
@ -39,7 +38,6 @@ cc_library(
mediapipe_proto_library(
name = "annotation_overlay_calculator_proto",
srcs = ["annotation_overlay_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -50,7 +48,6 @@ mediapipe_proto_library(
mediapipe_proto_library(
name = "detection_label_id_to_text_calculator_proto",
srcs = ["detection_label_id_to_text_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -61,7 +58,6 @@ mediapipe_proto_library(
mediapipe_proto_library(
name = "filter_detections_calculator_proto",
srcs = ["filter_detections_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -71,7 +67,6 @@ mediapipe_proto_library(
mediapipe_proto_library(
name = "timed_box_list_id_to_label_calculator_proto",
srcs = ["timed_box_list_id_to_label_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -81,13 +76,11 @@ mediapipe_proto_library(
mediapipe_proto_library(
name = "latency_proto",
srcs = ["latency.proto"],
visibility = ["//visibility:public"],
)
mediapipe_proto_library(
name = "non_max_suppression_calculator_proto",
srcs = ["non_max_suppression_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -97,13 +90,11 @@ mediapipe_proto_library(
mediapipe_proto_library(
name = "packet_frequency_proto",
srcs = ["packet_frequency.proto"],
visibility = ["//visibility:public"],
)
mediapipe_proto_library(
name = "packet_frequency_calculator_proto",
srcs = ["packet_frequency_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -113,7 +104,6 @@ mediapipe_proto_library(
mediapipe_proto_library(
name = "packet_latency_calculator_proto",
srcs = ["packet_latency_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -123,7 +113,6 @@ mediapipe_proto_library(
mediapipe_proto_library(
name = "collection_has_min_size_calculator_proto",
srcs = ["collection_has_min_size_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -133,7 +122,6 @@ mediapipe_proto_library(
mediapipe_proto_library(
name = "association_calculator_proto",
srcs = ["association_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -143,7 +131,6 @@ mediapipe_proto_library(
cc_library(
name = "packet_frequency_calculator",
srcs = ["packet_frequency_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/calculators/util:packet_frequency_calculator_cc_proto",
"//mediapipe/calculators/util:packet_frequency_cc_proto",
@ -188,7 +175,6 @@ cc_test(
cc_library(
name = "packet_latency_calculator",
srcs = ["packet_latency_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/calculators/util:latency_cc_proto",
"//mediapipe/calculators/util:packet_latency_calculator_cc_proto",
@ -228,9 +214,6 @@ cc_test(
cc_library(
name = "clock_timestamp_calculator",
srcs = ["clock_timestamp_calculator.cc"],
visibility = [
"//visibility:public",
],
deps = [
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework:timestamp",
@ -246,9 +229,6 @@ cc_library(
cc_library(
name = "clock_latency_calculator",
srcs = ["clock_latency_calculator.cc"],
visibility = [
"//visibility:public",
],
deps = [
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework:timestamp",
@ -263,11 +243,10 @@ cc_library(
cc_library(
name = "annotation_overlay_calculator",
srcs = ["annotation_overlay_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":annotation_overlay_calculator_cc_proto",
"//mediapipe/framework:calculator_options_cc_proto",
"//mediapipe/framework/formats:image_format_cc_proto",
"//mediapipe/framework:calculator_options_cc_proto",
"//mediapipe/util:color_cc_proto",
"@com_google_absl//absl/strings",
"//mediapipe/framework:calculator_framework",
@ -296,7 +275,6 @@ cc_library(
cc_library(
name = "detection_label_id_to_text_calculator",
srcs = ["detection_label_id_to_text_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":detection_label_id_to_text_calculator_cc_proto",
"//mediapipe/framework/formats:detection_cc_proto",
@ -328,7 +306,6 @@ cc_library(
cc_library(
name = "timed_box_list_id_to_label_calculator",
srcs = ["timed_box_list_id_to_label_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":timed_box_list_id_to_label_calculator_cc_proto",
"@com_google_absl//absl/container:node_hash_map",
@ -357,7 +334,6 @@ cc_library(
cc_library(
name = "detection_transformation_calculator",
srcs = ["detection_transformation_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework/api2:node",
@ -391,7 +367,6 @@ cc_test(
cc_library(
name = "non_max_suppression_calculator",
srcs = ["non_max_suppression_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":non_max_suppression_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
@ -408,7 +383,6 @@ cc_library(
cc_library(
name = "thresholding_calculator",
srcs = ["thresholding_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":thresholding_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
@ -421,7 +395,6 @@ cc_library(
cc_library(
name = "detection_to_landmarks_calculator",
srcs = ["detection_to_landmarks_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework/formats:detection_cc_proto",
@ -436,7 +409,6 @@ cc_library(
cc_library(
name = "filter_detections_calculator",
srcs = ["filter_detections_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":filter_detections_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
@ -450,7 +422,6 @@ cc_library(
cc_library(
name = "landmarks_to_detection_calculator",
srcs = ["landmarks_to_detection_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":landmarks_to_detection_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
@ -471,7 +442,6 @@ cc_library(
hdrs = [
"detections_to_rects_calculator.h",
],
visibility = ["//visibility:public"],
deps = [
":detections_to_rects_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
@ -489,7 +459,6 @@ cc_library(
cc_library(
name = "rect_transformation_calculator",
srcs = ["rect_transformation_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":rect_transformation_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
@ -504,7 +473,6 @@ cc_library(
cc_library(
name = "rect_projection_calculator",
srcs = ["rect_projection_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework/formats:rect_cc_proto",
@ -535,7 +503,6 @@ cc_test(
mediapipe_proto_library(
name = "rect_to_render_data_calculator_proto",
srcs = ["rect_to_render_data_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -547,7 +514,6 @@ mediapipe_proto_library(
mediapipe_proto_library(
name = "rect_to_render_scale_calculator_proto",
srcs = ["rect_to_render_scale_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -557,7 +523,6 @@ mediapipe_proto_library(
mediapipe_proto_library(
name = "detections_to_render_data_calculator_proto",
srcs = ["detections_to_render_data_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -569,7 +534,6 @@ mediapipe_proto_library(
mediapipe_proto_library(
name = "landmarks_to_render_data_calculator_proto",
srcs = ["landmarks_to_render_data_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -581,7 +545,6 @@ mediapipe_proto_library(
mediapipe_proto_library(
name = "timed_box_list_to_render_data_calculator_proto",
srcs = ["timed_box_list_to_render_data_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -593,7 +556,6 @@ mediapipe_proto_library(
mediapipe_proto_library(
name = "labels_to_render_data_calculator_proto",
srcs = ["labels_to_render_data_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -605,7 +567,6 @@ mediapipe_proto_library(
mediapipe_proto_library(
name = "thresholding_calculator_proto",
srcs = ["thresholding_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -617,7 +578,6 @@ mediapipe_proto_library(
mediapipe_proto_library(
name = "detections_to_rects_calculator_proto",
srcs = ["detections_to_rects_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -627,7 +587,6 @@ mediapipe_proto_library(
mediapipe_proto_library(
name = "landmark_projection_calculator_proto",
srcs = ["landmark_projection_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -637,7 +596,6 @@ mediapipe_proto_library(
cc_library(
name = "landmark_visibility_calculator",
srcs = ["landmark_visibility_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework/formats:landmark_cc_proto",
@ -649,7 +607,6 @@ cc_library(
cc_library(
name = "set_landmark_visibility_calculator",
srcs = ["set_landmark_visibility_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework/formats:landmark_cc_proto",
@ -661,7 +618,6 @@ cc_library(
mediapipe_proto_library(
name = "landmarks_to_floats_calculator_proto",
srcs = ["landmarks_to_floats_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -671,7 +627,6 @@ mediapipe_proto_library(
mediapipe_proto_library(
name = "rect_transformation_calculator_proto",
srcs = ["rect_transformation_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -681,7 +636,6 @@ mediapipe_proto_library(
mediapipe_proto_library(
name = "landmarks_to_detection_calculator_proto",
srcs = ["landmarks_to_detection_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -693,7 +647,6 @@ mediapipe_proto_library(
cc_library(
name = "detections_to_render_data_calculator",
srcs = ["detections_to_render_data_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":detections_to_render_data_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
@ -713,7 +666,6 @@ cc_library(
name = "landmarks_to_render_data_calculator",
srcs = ["landmarks_to_render_data_calculator.cc"],
hdrs = ["landmarks_to_render_data_calculator.h"],
visibility = ["//visibility:public"],
deps = [
":landmarks_to_render_data_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
@ -732,7 +684,6 @@ cc_library(
cc_library(
name = "timed_box_list_to_render_data_calculator",
srcs = ["timed_box_list_to_render_data_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":timed_box_list_to_render_data_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
@ -751,11 +702,9 @@ cc_library(
cc_library(
name = "labels_to_render_data_calculator",
srcs = ["labels_to_render_data_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":labels_to_render_data_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework:calculator_options_cc_proto",
"//mediapipe/framework/formats:classification_cc_proto",
"//mediapipe/framework/formats:video_stream_header",
"//mediapipe/framework/port:ret_check",
@ -771,7 +720,6 @@ cc_library(
cc_library(
name = "rect_to_render_data_calculator",
srcs = ["rect_to_render_data_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":rect_to_render_data_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
@ -786,7 +734,6 @@ cc_library(
cc_library(
name = "rect_to_render_scale_calculator",
srcs = ["rect_to_render_scale_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":rect_to_render_scale_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
@ -821,7 +768,6 @@ cc_test(
cc_library(
name = "detection_letterbox_removal_calculator",
srcs = ["detection_letterbox_removal_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework/formats:detection_cc_proto",
@ -835,7 +781,6 @@ cc_library(
cc_library(
name = "detection_projection_calculator",
srcs = ["detection_projection_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework/formats:detection_cc_proto",
@ -868,7 +813,6 @@ cc_test(
cc_library(
name = "landmark_letterbox_removal_calculator",
srcs = ["landmark_letterbox_removal_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework/formats:landmark_cc_proto",
@ -882,7 +826,6 @@ cc_library(
cc_library(
name = "landmark_projection_calculator",
srcs = ["landmark_projection_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":landmark_projection_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
@ -915,7 +858,6 @@ cc_test(
cc_library(
name = "world_landmark_projection_calculator",
srcs = ["world_landmark_projection_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework/formats:landmark_cc_proto",
@ -929,7 +871,6 @@ cc_library(
mediapipe_proto_library(
name = "landmarks_smoothing_calculator_proto",
srcs = ["landmarks_smoothing_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -939,7 +880,6 @@ mediapipe_proto_library(
cc_library(
name = "landmarks_smoothing_calculator",
srcs = ["landmarks_smoothing_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":landmarks_smoothing_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
@ -957,7 +897,6 @@ cc_library(
mediapipe_proto_library(
name = "visibility_smoothing_calculator_proto",
srcs = ["visibility_smoothing_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -967,7 +906,6 @@ mediapipe_proto_library(
cc_library(
name = "visibility_smoothing_calculator",
srcs = ["visibility_smoothing_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":visibility_smoothing_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
@ -983,7 +921,6 @@ cc_library(
mediapipe_proto_library(
name = "visibility_copy_calculator_proto",
srcs = ["visibility_copy_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -993,7 +930,6 @@ mediapipe_proto_library(
cc_library(
name = "visibility_copy_calculator",
srcs = ["visibility_copy_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":visibility_copy_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
@ -1008,7 +944,6 @@ cc_library(
cc_library(
name = "landmarks_to_floats_calculator",
srcs = ["landmarks_to_floats_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":landmarks_to_floats_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
@ -1055,7 +990,6 @@ cc_test(
mediapipe_proto_library(
name = "top_k_scores_calculator_proto",
srcs = ["top_k_scores_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -1065,7 +999,6 @@ mediapipe_proto_library(
cc_library(
name = "top_k_scores_calculator",
srcs = ["top_k_scores_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":top_k_scores_calculator_cc_proto",
"@com_google_absl//absl/container:node_hash_map",
@ -1109,7 +1042,6 @@ cc_test(
mediapipe_proto_library(
name = "local_file_contents_calculator_proto",
srcs = ["local_file_contents_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -1119,7 +1051,6 @@ mediapipe_proto_library(
cc_library(
name = "local_file_contents_calculator",
srcs = ["local_file_contents_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":local_file_contents_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
@ -1133,7 +1064,6 @@ cc_library(
cc_library(
name = "local_file_pattern_contents_calculator",
srcs = ["local_file_pattern_contents_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework/port:file_helpers",
@ -1147,7 +1077,6 @@ cc_library(
name = "filter_collection_calculator",
srcs = ["filter_collection_calculator.cc"],
hdrs = ["filter_collection_calculator.h"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework/formats:classification_cc_proto",
@ -1165,7 +1094,6 @@ cc_library(
name = "collection_has_min_size_calculator",
srcs = ["collection_has_min_size_calculator.cc"],
hdrs = ["collection_has_min_size_calculator.h"],
visibility = ["//visibility:public"],
deps = [
":collection_has_min_size_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
@ -1193,7 +1121,6 @@ cc_test(
cc_library(
name = "association_calculator",
hdrs = ["association_calculator.h"],
visibility = ["//visibility:public"],
deps = [
":association_calculator_cc_proto",
"//mediapipe/framework:calculator_context",
@ -1210,7 +1137,6 @@ cc_library(
cc_library(
name = "association_norm_rect_calculator",
srcs = ["association_norm_rect_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":association_calculator",
"//mediapipe/framework:calculator_context",
@ -1225,7 +1151,6 @@ cc_library(
cc_library(
name = "association_detection_calculator",
srcs = ["association_detection_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":association_calculator",
"//mediapipe/framework:calculator_context",
@ -1260,7 +1185,6 @@ cc_test(
cc_library(
name = "detections_to_timed_box_list_calculator",
srcs = ["detections_to_timed_box_list_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework/formats:detection_cc_proto",
@ -1275,7 +1199,6 @@ cc_library(
cc_library(
name = "detection_unique_id_calculator",
srcs = ["detection_unique_id_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework/formats:detection_cc_proto",
@ -1288,7 +1211,6 @@ cc_library(
mediapipe_proto_library(
name = "logic_calculator_proto",
srcs = ["logic_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -1298,7 +1220,6 @@ mediapipe_proto_library(
cc_library(
name = "logic_calculator",
srcs = ["logic_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":logic_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
@ -1311,10 +1232,9 @@ cc_library(
cc_library(
name = "to_image_calculator",
srcs = ["to_image_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_cc_proto",
"//mediapipe/framework/formats:image_format_cc_proto",
"//mediapipe/framework:calculator_options_cc_proto",
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework/api2:node",
"//mediapipe/framework/formats:image_frame",
@ -1334,10 +1254,9 @@ cc_library(
cc_library(
name = "from_image_calculator",
srcs = ["from_image_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_cc_proto",
"//mediapipe/framework/formats:image_format_cc_proto",
"//mediapipe/framework:calculator_options_cc_proto",
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework/formats:image_frame",
"//mediapipe/framework/formats:image",
@ -1386,7 +1305,6 @@ cc_test(
mediapipe_proto_library(
name = "refine_landmarks_from_heatmap_calculator_proto",
srcs = ["refine_landmarks_from_heatmap_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -1404,7 +1322,6 @@ cc_library(
],
"//conditions:default": [],
}),
visibility = ["//visibility:public"],
deps = [
":refine_landmarks_from_heatmap_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
@ -1455,7 +1372,6 @@ cc_library(
name = "inverse_matrix_calculator",
srcs = ["inverse_matrix_calculator.cc"],
hdrs = ["inverse_matrix_calculator.h"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework/api2:node",

View File

@ -21,19 +21,17 @@ load(
licenses(["notice"])
package(default_visibility = ["//visibility:private"])
package(default_visibility = ["//visibility:public"])
proto_library(
name = "flow_to_image_calculator_proto",
srcs = ["flow_to_image_calculator.proto"],
visibility = ["//visibility:public"],
deps = ["//mediapipe/framework:calculator_proto"],
)
proto_library(
name = "opencv_video_encoder_calculator_proto",
srcs = ["opencv_video_encoder_calculator.proto"],
visibility = ["//visibility:public"],
deps = ["//mediapipe/framework:calculator_proto"],
)
@ -58,7 +56,6 @@ proto_library(
proto_library(
name = "box_tracker_calculator_proto",
srcs = ["box_tracker_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_proto",
"//mediapipe/util/tracking:box_tracker_proto",
@ -68,7 +65,6 @@ proto_library(
proto_library(
name = "tracked_detection_manager_calculator_proto",
srcs = ["tracked_detection_manager_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_proto",
"//mediapipe/util/tracking:tracked_detection_manager_config_proto",
@ -78,7 +74,6 @@ proto_library(
proto_library(
name = "box_detector_calculator_proto",
srcs = ["box_detector_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_proto",
"//mediapipe/util/tracking:box_detector_proto",
@ -88,7 +83,6 @@ proto_library(
proto_library(
name = "video_pre_stream_calculator_proto",
srcs = ["video_pre_stream_calculator.proto"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_proto",
],
@ -101,7 +95,6 @@ mediapipe_cc_proto_library(
"//mediapipe/framework:calculator_cc_proto",
"//mediapipe/util/tracking:motion_analysis_cc_proto",
],
visibility = ["//visibility:public"],
deps = [":motion_analysis_calculator_proto"],
)
@ -112,7 +105,6 @@ mediapipe_cc_proto_library(
"//mediapipe/framework:calculator_cc_proto",
"//mediapipe/util/tracking:flow_packager_cc_proto",
],
visibility = ["//visibility:public"],
deps = [":flow_packager_calculator_proto"],
)
@ -123,7 +115,6 @@ mediapipe_cc_proto_library(
"//mediapipe/framework:calculator_cc_proto",
"//mediapipe/util/tracking:box_tracker_cc_proto",
],
visibility = ["//visibility:public"],
deps = [":box_tracker_calculator_proto"],
)
@ -134,7 +125,6 @@ mediapipe_cc_proto_library(
"//mediapipe/framework:calculator_cc_proto",
"//mediapipe/util/tracking:tracked_detection_manager_config_cc_proto",
],
visibility = ["//visibility:public"],
deps = [":tracked_detection_manager_calculator_proto"],
)
@ -145,7 +135,6 @@ mediapipe_cc_proto_library(
"//mediapipe/framework:calculator_cc_proto",
"//mediapipe/util/tracking:box_detector_cc_proto",
],
visibility = ["//visibility:public"],
deps = [":box_detector_calculator_proto"],
)
@ -155,7 +144,6 @@ mediapipe_cc_proto_library(
cc_deps = [
"//mediapipe/framework:calculator_cc_proto",
],
visibility = ["//visibility:public"],
deps = [":video_pre_stream_calculator_proto"],
)
@ -163,7 +151,6 @@ mediapipe_cc_proto_library(
name = "flow_to_image_calculator_cc_proto",
srcs = ["flow_to_image_calculator.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = ["//visibility:public"],
deps = [":flow_to_image_calculator_proto"],
)
@ -171,14 +158,12 @@ mediapipe_cc_proto_library(
name = "opencv_video_encoder_calculator_cc_proto",
srcs = ["opencv_video_encoder_calculator.proto"],
cc_deps = ["//mediapipe/framework:calculator_cc_proto"],
visibility = ["//visibility:public"],
deps = [":opencv_video_encoder_calculator_proto"],
)
cc_library(
name = "flow_to_image_calculator",
srcs = ["flow_to_image_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":flow_to_image_calculator_cc_proto",
"//mediapipe/calculators/video/tool:flow_quantizer_model",
@ -198,7 +183,6 @@ cc_library(
cc_library(
name = "opencv_video_decoder_calculator",
srcs = ["opencv_video_decoder_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework/formats:image_format_cc_proto",
@ -217,7 +201,6 @@ cc_library(
cc_library(
name = "opencv_video_encoder_calculator",
srcs = ["opencv_video_encoder_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":opencv_video_encoder_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
@ -240,7 +223,6 @@ cc_library(
cc_library(
name = "tvl1_optical_flow_calculator",
srcs = ["tvl1_optical_flow_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework/formats:image_frame",
@ -256,7 +238,6 @@ cc_library(
cc_library(
name = "motion_analysis_calculator",
srcs = ["motion_analysis_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":motion_analysis_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
@ -282,7 +263,6 @@ cc_library(
cc_library(
name = "flow_packager_calculator",
srcs = ["flow_packager_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":flow_packager_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
@ -300,7 +280,6 @@ cc_library(
cc_library(
name = "box_tracker_calculator",
srcs = ["box_tracker_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":box_tracker_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
@ -327,7 +306,6 @@ cc_library(
cc_library(
name = "box_detector_calculator",
srcs = ["box_detector_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":box_detector_calculator_cc_proto",
"@com_google_absl//absl/memory",
@ -342,12 +320,12 @@ cc_library(
"//mediapipe/framework/port:opencv_features2d",
"//mediapipe/framework/port:ret_check",
"//mediapipe/framework/port:status",
"//mediapipe/util/tracking:box_tracker_cc_proto",
"//mediapipe/util/tracking:flow_packager_cc_proto",
"//mediapipe/util:resource_util",
"//mediapipe/util/tracking",
"//mediapipe/util/tracking:box_detector",
"//mediapipe/util/tracking:box_tracker",
"//mediapipe/util/tracking:box_tracker_cc_proto",
"//mediapipe/util/tracking:flow_packager_cc_proto",
"//mediapipe/util/tracking:tracking_visualization_utilities",
] + select({
"//mediapipe:android": [
@ -369,7 +347,6 @@ cc_library(
cc_library(
name = "tracked_detection_manager_calculator",
srcs = ["tracked_detection_manager_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":tracked_detection_manager_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
@ -390,7 +367,6 @@ cc_library(
cc_library(
name = "video_pre_stream_calculator",
srcs = ["video_pre_stream_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
":video_pre_stream_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
@ -407,7 +383,6 @@ filegroup(
"testdata/format_MKV_VP8_VORBIS.video",
"testdata/format_MP4_AVC720P_AAC.video",
],
visibility = ["//visibility:public"],
)
cc_test(
@ -480,7 +455,6 @@ mediapipe_binary_graph(
name = "parallel_tracker_binarypb",
graph = "testdata/parallel_tracker_graph.pbtxt",
output_name = "testdata/parallel_tracker.binarypb",
visibility = ["//visibility:public"],
deps = [
":box_tracker_calculator",
":flow_packager_calculator",
@ -494,7 +468,6 @@ mediapipe_binary_graph(
name = "tracker_binarypb",
graph = "testdata/tracker_graph.pbtxt",
output_name = "testdata/tracker.binarypb",
visibility = ["//visibility:public"],
deps = [
":box_tracker_calculator",
":flow_packager_calculator",

View File

@ -14,12 +14,11 @@
licenses(["notice"])
package(default_visibility = ["//mediapipe/examples:__subpackages__"])
package(default_visibility = ["//visibility:public"])
cc_binary(
name = "hello_world",
srcs = ["hello_world.cc"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/calculators/core:pass_through_calculator",
"//mediapipe/framework:calculator_graph",

View File

@ -139,7 +139,7 @@ mediapipe_proto_library(
name = "test_calculators_proto",
testonly = 1,
srcs = ["test_calculators.proto"],
visibility = ["//visibility:public"],
visibility = [":mediapipe_internal"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -1039,7 +1039,6 @@ cc_library(
":graph_service_manager",
":port",
"//mediapipe/framework:calculator_cc_proto",
"//mediapipe/framework:mediapipe_options_cc_proto",
"//mediapipe/framework/deps:registration",
"//mediapipe/framework/port:ret_check",
"//mediapipe/framework/port:status",
@ -1469,6 +1468,7 @@ cc_test(
"//mediapipe/framework/stream_handler:mux_input_stream_handler",
"//mediapipe/framework/stream_handler:sync_set_input_stream_handler",
"//mediapipe/framework/tool:sink",
"//mediapipe/util:packet_test_util",
"@com_google_absl//absl/strings",
],
)
@ -1659,9 +1659,6 @@ cc_test(
"//mediapipe/calculators/core:constant_side_packet_calculator",
"//mediapipe/calculators/core:default_side_packet_calculator",
"//mediapipe/calculators/core:pass_through_calculator",
"//mediapipe/framework:calculator_cc_proto",
"//mediapipe/framework:packet_generator_cc_proto",
"//mediapipe/framework:status_handler_cc_proto",
"//mediapipe/framework/port:gtest_main",
"//mediapipe/framework/port:parse_text_proto",
"//mediapipe/framework/tool:template_parser",

View File

@ -412,11 +412,11 @@ using GenericNode = Node<internal::Generic>;
template <class Calc>
class Node : public NodeBase {
public:
Node() : NodeBase(Calc::kCalculatorName) {}
Node() : NodeBase(std::string(Calc::kCalculatorName)) {}
// Overrides the built-in calculator type string with the provided argument.
// Can be used to create nodes from pure interfaces.
// TODO: only use this for pure interfaces
Node(const std::string& type_override) : NodeBase(type_override) {}
Node(std::string type_override) : NodeBase(std::move(type_override)) {}
// These methods only allow access to ports declared in the contract.
// The argument must be a tag object created with the MPP_TAG macro.

View File

@ -98,14 +98,13 @@ void CalculatorGraph::GraphInputStream::SetHeader(const Packet& header) {
manager_->LockIntroData();
}
void CalculatorGraph::GraphInputStream::SetNextTimestampBound(
Timestamp timestamp) {
shard_.SetNextTimestampBound(timestamp);
}
void CalculatorGraph::GraphInputStream::PropagateUpdatesToMirrors() {
// Since GraphInputStream doesn't allow SetOffset() and
// SetNextTimestampBound(), the timestamp bound to propagate is only
// determined by the timestamp of the output packets.
CHECK(!shard_.IsEmpty()) << "Shard with name \"" << manager_->Name()
<< "\" failed";
manager_->PropagateUpdatesToMirrors(
shard_.LastAddedPacketTimestamp().NextAllowedInStream(), &shard_);
manager_->PropagateUpdatesToMirrors(shard_.NextTimestampBound(), &shard_);
}
void CalculatorGraph::GraphInputStream::Close() {
@ -868,6 +867,19 @@ absl::Status CalculatorGraph::AddPacketToInputStream(
return AddPacketToInputStreamInternal(stream_name, std::move(packet));
}
absl::Status CalculatorGraph::SetInputStreamTimestampBound(
const std::string& stream_name, Timestamp timestamp) {
std::unique_ptr<GraphInputStream>* stream =
mediapipe::FindOrNull(graph_input_streams_, stream_name);
RET_CHECK(stream).SetNoLogging() << absl::Substitute(
"SetInputStreamTimestampBound called on input stream \"$0\" which is not "
"a graph input stream.",
stream_name);
(*stream)->SetNextTimestampBound(timestamp);
(*stream)->PropagateUpdatesToMirrors();
return absl::OkStatus();
}
// We avoid having two copies of this code for AddPacketToInputStream(
// const Packet&) and AddPacketToInputStream(Packet &&) by having this
// internal-only templated version. T&& is a forwarding reference here, so

View File

@ -257,6 +257,10 @@ class CalculatorGraph {
absl::Status AddPacketToInputStream(const std::string& stream_name,
Packet&& packet);
// Indicates that input will arrive no earlier than a certain timestamp.
absl::Status SetInputStreamTimestampBound(const std::string& stream_name,
Timestamp timestamp);
// Sets the queue size of a graph input stream, overriding the graph default.
absl::Status SetInputStreamMaxQueueSize(const std::string& stream_name,
int max_queue_size);
@ -425,6 +429,8 @@ class CalculatorGraph {
void AddPacket(Packet&& packet) { shard_.AddPacket(std::move(packet)); }
void SetNextTimestampBound(Timestamp timestamp);
void PropagateUpdatesToMirrors();
void Close();

View File

@ -12,6 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include <vector>
#include "absl/strings/str_replace.h"
#include "mediapipe/framework/calculator_context.h"
#include "mediapipe/framework/calculator_framework.h"
@ -24,6 +26,7 @@
#include "mediapipe/framework/port/status_matchers.h"
#include "mediapipe/framework/thread_pool_executor.h"
#include "mediapipe/framework/timestamp.h"
#include "mediapipe/util/packet_test_util.h"
namespace mediapipe {
namespace {
@ -1536,7 +1539,7 @@ class EmptyPacketCalculator : public CalculatorBase {
};
REGISTER_CALCULATOR(EmptyPacketCalculator);
// This test shows that an output timestamp bound can be specified by outputing
// This test shows that an output timestamp bound can be specified by outputting
// an empty packet with a settled timestamp.
TEST(CalculatorGraphBoundsTest, EmptyPacketOutput) {
// OffsetAndBoundCalculator runs on parallel threads and sends ts
@ -1580,6 +1583,195 @@ TEST(CalculatorGraphBoundsTest, EmptyPacketOutput) {
EXPECT_EQ(output_0_packets[i].Timestamp(), Timestamp(10 + i * 10));
}
// Shut down the graph.
MP_ASSERT_OK(graph.CloseAllPacketSources());
MP_ASSERT_OK(graph.WaitUntilDone());
}
// This test shows that input timestamp bounds can be specified using
// CalculatorGraph::SetInputStreamTimestampBound.
TEST(CalculatorGraphBoundsTest, SetInputStreamTimestampBound) {
std::string config_str = R"(
input_stream: "input_0"
node {
calculator: "ProcessBoundToPacketCalculator"
input_stream: "input_0"
output_stream: "output_0"
}
)";
CalculatorGraphConfig config =
mediapipe::ParseTextProtoOrDie<CalculatorGraphConfig>(config_str);
CalculatorGraph graph;
std::vector<Packet> output_0_packets;
MP_ASSERT_OK(graph.Initialize(config));
MP_ASSERT_OK(graph.ObserveOutputStream("output_0", [&](const Packet& p) {
output_0_packets.push_back(p);
return absl::OkStatus();
}));
MP_ASSERT_OK(graph.StartRun({}));
MP_ASSERT_OK(graph.WaitUntilIdle());
// Send in timestamp bounds.
for (int i = 0; i < 9; ++i) {
const int ts = 10 + i * 10;
MP_ASSERT_OK(graph.SetInputStreamTimestampBound(
"input_0", Timestamp(ts).NextAllowedInStream()));
MP_ASSERT_OK(graph.WaitUntilIdle());
}
// 9 timestamp bounds are converted to packets.
EXPECT_EQ(output_0_packets.size(), 9);
for (int i = 0; i < 9; ++i) {
EXPECT_EQ(output_0_packets[i].Timestamp(), Timestamp(10 + i * 10));
}
// Shutdown the graph.
MP_ASSERT_OK(graph.CloseAllPacketSources());
MP_ASSERT_OK(graph.WaitUntilDone());
}
// This test shows how an input stream with infrequent packets, such as
// configuration protobufs, can be consumed while processing more frequent
// packets, such as video frames.
TEST(CalculatorGraphBoundsTest, TimestampBoundsForInfrequentInput) {
// PassThroughCalculator consuming two input streams, with default ISH.
std::string config_str = R"pb(
input_stream: "INFREQUENT:config"
input_stream: "FREQUENT:frame"
node {
calculator: "PassThroughCalculator"
input_stream: "CONFIG:config"
input_stream: "VIDEO:frame"
output_stream: "VIDEO:output_frame"
output_stream: "CONFIG:output_config"
}
)pb";
CalculatorGraphConfig config =
mediapipe::ParseTextProtoOrDie<CalculatorGraphConfig>(config_str);
CalculatorGraph graph;
std::vector<Packet> frame_packets;
MP_ASSERT_OK(graph.Initialize(config));
MP_ASSERT_OK(graph.ObserveOutputStream(
"output_frame",
[&](const Packet& p) {
frame_packets.push_back(p);
return absl::OkStatus();
},
/*observe_bound_updates=*/true));
std::vector<Packet> config_packets;
MP_ASSERT_OK(graph.ObserveOutputStream(
"output_config",
[&](const Packet& p) {
config_packets.push_back(p);
return absl::OkStatus();
},
/*observe_bound_updates=*/true));
MP_ASSERT_OK(graph.StartRun({}));
MP_ASSERT_OK(graph.WaitUntilIdle());
// Utility functions to send packets or timestamp bounds.
auto send_fn = [&](std::string stream, std::string value, int ts) {
MP_ASSERT_OK(graph.AddPacketToInputStream(
stream,
MakePacket<std::string>(absl::StrCat(value)).At(Timestamp(ts))));
MP_ASSERT_OK(graph.WaitUntilIdle());
};
auto bound_fn = [&](std::string stream, int ts) {
MP_ASSERT_OK(graph.SetInputStreamTimestampBound(stream, Timestamp(ts)));
MP_ASSERT_OK(graph.WaitUntilIdle());
};
// Send in a frame packet.
send_fn("frame", "frame_0", 0);
// The frame is not processed yet.
EXPECT_THAT(frame_packets, ElementsAreArray(PacketMatchers<std::string>({})));
bound_fn("config", 10000);
// The frame is processed after a fresh config timestamp bound arrives.
EXPECT_THAT(frame_packets,
ElementsAreArray(PacketMatchers<std::string>({
MakePacket<std::string>("frame_0").At(Timestamp(0)),
})));
// Send in a frame packet.
send_fn("frame", "frame_1", 20000);
// The frame is not processed yet.
// The PassThroughCalculator with TimestampOffset 0 now propagates
// Timestamp bound 10000 to both "output_frame" and "output_config",
// which appears here as Packet().At(Timestamp(9999). The timestamp
// bounds at 29999 and 50000 are propagated similarly.
EXPECT_THAT(frame_packets,
ElementsAreArray(PacketMatchers<std::string>({
MakePacket<std::string>("frame_0").At(Timestamp(0)),
Packet().At(Timestamp(9999)),
})));
bound_fn("config", 30000);
// The frame is processed after a fresh config timestamp bound arrives.
EXPECT_THAT(frame_packets,
ElementsAreArray(PacketMatchers<std::string>({
MakePacket<std::string>("frame_0").At(Timestamp(0)),
Packet().At(Timestamp(9999)),
MakePacket<std::string>("frame_1").At(Timestamp(20000)),
})));
// Send in a frame packet.
send_fn("frame", "frame_2", 40000);
// The frame is not processed yet.
EXPECT_THAT(frame_packets,
ElementsAreArray(PacketMatchers<std::string>({
MakePacket<std::string>("frame_0").At(Timestamp(0)),
Packet().At(Timestamp(9999)),
MakePacket<std::string>("frame_1").At(Timestamp(20000)),
Packet().At(Timestamp(29999)),
})));
send_fn("config", "config_1", 50000);
// The frame is processed after a fresh config arrives.
EXPECT_THAT(frame_packets,
ElementsAreArray(PacketMatchers<std::string>({
MakePacket<std::string>("frame_0").At(Timestamp(0)),
Packet().At(Timestamp(9999)),
MakePacket<std::string>("frame_1").At(Timestamp(20000)),
Packet().At(Timestamp(29999)),
MakePacket<std::string>("frame_2").At(Timestamp(40000)),
})));
// Send in a frame packet.
send_fn("frame", "frame_3", 60000);
// The frame is not processed yet.
EXPECT_THAT(frame_packets,
ElementsAreArray(PacketMatchers<std::string>({
MakePacket<std::string>("frame_0").At(Timestamp(0)),
Packet().At(Timestamp(9999)),
MakePacket<std::string>("frame_1").At(Timestamp(20000)),
Packet().At(Timestamp(29999)),
MakePacket<std::string>("frame_2").At(Timestamp(40000)),
Packet().At(Timestamp(50000)),
})));
bound_fn("config", 70000);
// The frame is processed after a fresh config timestamp bound arrives.
EXPECT_THAT(frame_packets,
ElementsAreArray(PacketMatchers<std::string>({
MakePacket<std::string>("frame_0").At(Timestamp(0)),
Packet().At(Timestamp(9999)),
MakePacket<std::string>("frame_1").At(Timestamp(20000)),
Packet().At(Timestamp(29999)),
MakePacket<std::string>("frame_2").At(Timestamp(40000)),
Packet().At(Timestamp(50000)),
MakePacket<std::string>("frame_3").At(Timestamp(60000)),
})));
// One config packet is deleivered.
EXPECT_THAT(config_packets,
ElementsAreArray(PacketMatchers<std::string>({
Packet().At(Timestamp(0)),
Packet().At(Timestamp(9999)),
Packet().At(Timestamp(20000)),
Packet().At(Timestamp(29999)),
Packet().At(Timestamp(40000)),
MakePacket<std::string>("config_1").At(Timestamp(50000)),
Packet().At(Timestamp(60000)),
})));
// Shutdown the graph.
MP_ASSERT_OK(graph.CloseAllPacketSources());
MP_ASSERT_OK(graph.WaitUntilDone());

View File

@ -225,6 +225,7 @@ cc_library(
"//mediapipe/framework/port:status",
"//mediapipe/framework/port:statusor",
"@com_google_absl//absl/base:core_headers",
"@com_google_absl//absl/container:flat_hash_map",
"@com_google_absl//absl/container:flat_hash_set",
"@com_google_absl//absl/meta:type_traits",
"@com_google_absl//absl/strings",

View File

@ -26,10 +26,12 @@
#include "absl/base/macros.h"
#include "absl/base/thread_annotations.h"
#include "absl/container/flat_hash_map.h"
#include "absl/container/flat_hash_set.h"
#include "absl/meta/type_traits.h"
#include "absl/strings/str_join.h"
#include "absl/strings/str_split.h"
#include "absl/strings/string_view.h"
#include "absl/synchronization/mutex.h"
#include "mediapipe/framework/deps/registration_token.h"
#include "mediapipe/framework/port/canonical_errors.h"
@ -159,7 +161,7 @@ class FunctionRegistry {
FunctionRegistry(const FunctionRegistry&) = delete;
FunctionRegistry& operator=(const FunctionRegistry&) = delete;
RegistrationToken Register(const std::string& name, Function func)
RegistrationToken Register(absl::string_view name, Function func)
ABSL_LOCKS_EXCLUDED(lock_) {
std::string normalized_name = GetNormalizedName(name);
absl::WriterMutexLock lock(&lock_);
@ -189,14 +191,15 @@ class FunctionRegistry {
absl::enable_if_t<std::is_convertible<std::tuple<Args2...>,
std::tuple<Args...>>::value,
int> = 0>
ReturnType Invoke(const std::string& name, Args2&&... args)
ReturnType Invoke(absl::string_view name, Args2&&... args)
ABSL_LOCKS_EXCLUDED(lock_) {
Function function;
{
absl::ReaderMutexLock lock(&lock_);
auto it = functions_.find(name);
if (it == functions_.end()) {
return absl::NotFoundError("No registered object with name: " + name);
return absl::NotFoundError(
absl::StrCat("No registered object with name: ", name));
}
function = it->second;
}
@ -206,7 +209,7 @@ class FunctionRegistry {
// Invokes the specified factory function and returns the result.
// Namespaces in |name| and |ns| are separated by kNameSep.
template <typename... Args2>
ReturnType Invoke(const std::string& ns, const std::string& name,
ReturnType Invoke(absl::string_view ns, absl::string_view name,
Args2&&... args) ABSL_LOCKS_EXCLUDED(lock_) {
return Invoke(GetQualifiedName(ns, name), args...);
}
@ -214,14 +217,14 @@ class FunctionRegistry {
// Note that it's possible for registered implementations to be subsequently
// unregistered, though this will never happen with registrations made via
// MEDIAPIPE_REGISTER_FACTORY_FUNCTION.
bool IsRegistered(const std::string& name) const ABSL_LOCKS_EXCLUDED(lock_) {
bool IsRegistered(absl::string_view name) const ABSL_LOCKS_EXCLUDED(lock_) {
absl::ReaderMutexLock lock(&lock_);
return functions_.count(name) != 0;
}
// Returns true if the specified factory function is available.
// Namespaces in |name| and |ns| are separated by kNameSep.
bool IsRegistered(const std::string& ns, const std::string& name) const
bool IsRegistered(absl::string_view ns, absl::string_view name) const
ABSL_LOCKS_EXCLUDED(lock_) {
return IsRegistered(GetQualifiedName(ns, name));
}
@ -244,7 +247,7 @@ class FunctionRegistry {
// Normalizes a C++ qualified name. Validates the name qualification.
// The name must be either unqualified or fully qualified with a leading "::".
// The leading "::" in a fully qualified name is stripped.
std::string GetNormalizedName(const std::string& name) {
std::string GetNormalizedName(absl::string_view name) {
using ::mediapipe::registration_internal::kCxxSep;
std::vector<std::string> names = absl::StrSplit(name, kCxxSep);
if (names[0].empty()) {
@ -259,8 +262,8 @@ class FunctionRegistry {
// Returns the registry key for a name specified within a namespace.
// Namespaces are separated by kNameSep.
std::string GetQualifiedName(const std::string& ns,
const std::string& name) const {
std::string GetQualifiedName(absl::string_view ns,
absl::string_view name) const {
using ::mediapipe::registration_internal::kCxxSep;
using ::mediapipe::registration_internal::kNameSep;
std::vector<std::string> names = absl::StrSplit(name, kNameSep);
@ -287,10 +290,10 @@ class FunctionRegistry {
private:
mutable absl::Mutex lock_;
std::unordered_map<std::string, Function> functions_ ABSL_GUARDED_BY(lock_);
absl::flat_hash_map<std::string, Function> functions_ ABSL_GUARDED_BY(lock_);
// For names included in NamespaceAllowlist, strips the namespace.
std::string GetAdjustedName(const std::string& name) {
std::string GetAdjustedName(absl::string_view name) {
using ::mediapipe::registration_internal::kCxxSep;
std::vector<std::string> names = absl::StrSplit(name, kCxxSep);
std::string base_name = names.back();
@ -299,10 +302,10 @@ class FunctionRegistry {
if (NamespaceAllowlist::TopNamespaces().count(ns)) {
return base_name;
}
return name;
return std::string(name);
}
void Unregister(const std::string& name) {
void Unregister(absl::string_view name) {
absl::WriterMutexLock lock(&lock_);
std::string adjusted_name = GetAdjustedName(name);
if (adjusted_name != name) {
@ -317,7 +320,7 @@ class GlobalFactoryRegistry {
using Functions = FunctionRegistry<R, Args...>;
public:
static RegistrationToken Register(const std::string& name,
static RegistrationToken Register(absl::string_view name,
typename Functions::Function func) {
return functions()->Register(name, std::move(func));
}
@ -326,7 +329,7 @@ class GlobalFactoryRegistry {
// If using namespaces with this registry, the variant with a namespace
// argument should be used.
template <typename... Args2>
static typename Functions::ReturnType CreateByName(const std::string& name,
static typename Functions::ReturnType CreateByName(absl::string_view name,
Args2&&... args) {
return functions()->Invoke(name, std::forward<Args2>(args)...);
}
@ -334,7 +337,7 @@ class GlobalFactoryRegistry {
// Returns true if the specified factory function is available.
// If using namespaces with this registry, the variant with a namespace
// argument should be used.
static bool IsRegistered(const std::string& name) {
static bool IsRegistered(absl::string_view name) {
return functions()->IsRegistered(name);
}
@ -350,13 +353,13 @@ class GlobalFactoryRegistry {
std::tuple<Args...>>::value,
int> = 0>
static typename Functions::ReturnType CreateByNameInNamespace(
const std::string& ns, const std::string& name, Args2&&... args) {
absl::string_view ns, absl::string_view name, Args2&&... args) {
return functions()->Invoke(ns, name, std::forward<Args2>(args)...);
}
// Returns true if the specified factory function is available.
// Namespaces in |name| and |ns| are separated by kNameSep.
static bool IsRegistered(const std::string& ns, const std::string& name) {
static bool IsRegistered(absl::string_view ns, absl::string_view name) {
return functions()->IsRegistered(ns, name);
}

View File

@ -97,39 +97,24 @@ absl::Status StatusBuilder::Impl::JoinMessageToStatus() {
}());
}
StatusBuilder::Impl::Impl(const absl::Status& status, const char* file,
int line)
: status(status), line(line), file(file), stream() {}
StatusBuilder::Impl::Impl(absl::Status&& status, const char* file, int line)
: status(std::move(status)), line(line), file(file), stream() {}
StatusBuilder::Impl::Impl(const absl::Status& status,
mediapipe::source_location location)
: status(status),
line(location.line()),
file(location.file_name()),
stream() {}
: status(status), location(location), stream() {}
StatusBuilder::Impl::Impl(absl::Status&& status,
mediapipe::source_location location)
: status(std::move(status)),
line(location.line()),
file(location.file_name()),
stream() {}
: status(std::move(status)), location(location), stream() {}
StatusBuilder::Impl::Impl(const Impl& other)
: status(other.status),
line(other.line),
file(other.file),
location(other.location),
no_logging(other.no_logging),
stream(other.stream.str()),
join_style(other.join_style) {}
StatusBuilder::Impl& StatusBuilder::Impl::operator=(const Impl& other) {
status = other.status;
line = other.line;
file = other.file;
location = other.location;
no_logging = other.no_logging;
stream = std::ostringstream(other.stream.str());
join_style = other.join_style;

View File

@ -60,17 +60,6 @@ class ABSL_MUST_USE_RESULT StatusBuilder {
? nullptr
: std::make_unique<Impl>(absl::Status(code, ""), location)) {}
StatusBuilder(const absl::Status& original_status, const char* file, int line)
: impl_(original_status.ok()
? nullptr
: std::make_unique<Impl>(original_status, file, line)) {}
StatusBuilder(absl::Status&& original_status, const char* file, int line)
: impl_(original_status.ok()
? nullptr
: std::make_unique<Impl>(std::move(original_status), file,
line)) {}
bool ok() const { return !impl_; }
StatusBuilder& SetAppend() &;
@ -109,8 +98,6 @@ class ABSL_MUST_USE_RESULT StatusBuilder {
kPrepend,
};
Impl(const absl::Status& status, const char* file, int line);
Impl(absl::Status&& status, const char* file, int line);
Impl(const absl::Status& status, mediapipe::source_location location);
Impl(absl::Status&& status, mediapipe::source_location location);
Impl(const Impl&);
@ -120,10 +107,8 @@ class ABSL_MUST_USE_RESULT StatusBuilder {
// The status that the result will be based on.
absl::Status status;
// The line to record if this file is logged.
int line;
// Not-owned: The file to record if this status is logged.
const char* file;
// The source location to record if this file is logged.
mediapipe::source_location location;
// Logging disabled if true.
bool no_logging = false;
// The additional messages added with `<<`. This is nullptr when status_ is

View File

@ -33,21 +33,6 @@ TEST(StatusBuilder, OkStatusRvalue) {
ASSERT_EQ(status, absl::OkStatus());
}
TEST(StatusBuilder, OkStatusFileAndLineRvalueStatus) {
absl::Status status = StatusBuilder(absl::OkStatus(), "hello.cc", 1234)
<< "annotated message1 "
<< "annotated message2";
ASSERT_EQ(status, absl::OkStatus());
}
TEST(StatusBuilder, OkStatusFileAndLineLvalueStatus) {
const auto original_status = absl::OkStatus();
absl::Status status = StatusBuilder(original_status, "hello.cc", 1234)
<< "annotated message1 "
<< "annotated message2";
ASSERT_EQ(status, absl::OkStatus());
}
TEST(StatusBuilder, AnnotateMode) {
absl::Status status = StatusBuilder(absl::Status(absl::StatusCode::kNotFound,
"original message"),
@ -60,30 +45,6 @@ TEST(StatusBuilder, AnnotateMode) {
"original message; annotated message1 annotated message2");
}
TEST(StatusBuilder, AnnotateModeFileAndLineRvalueStatus) {
absl::Status status = StatusBuilder(absl::Status(absl::StatusCode::kNotFound,
"original message"),
"hello.cc", 1234)
<< "annotated message1 "
<< "annotated message2";
ASSERT_FALSE(status.ok());
EXPECT_EQ(status.code(), absl::StatusCode::kNotFound);
EXPECT_EQ(status.message(),
"original message; annotated message1 annotated message2");
}
TEST(StatusBuilder, AnnotateModeFileAndLineLvalueStatus) {
const auto original_status =
absl::Status(absl::StatusCode::kNotFound, "original message");
absl::Status status = StatusBuilder(original_status, "hello.cc", 1234)
<< "annotated message1 "
<< "annotated message2";
ASSERT_FALSE(status.ok());
EXPECT_EQ(status.code(), absl::StatusCode::kNotFound);
EXPECT_EQ(status.message(),
"original message; annotated message1 annotated message2");
}
TEST(StatusBuilder, PrependModeLvalue) {
StatusBuilder builder(
absl::Status(absl::StatusCode::kInvalidArgument, "original message"),

View File

@ -81,11 +81,11 @@
// MP_RETURN_IF_ERROR(foo.Method(args...));
// return absl::OkStatus();
// }
#define MP_RETURN_IF_ERROR(expr) \
STATUS_MACROS_IMPL_ELSE_BLOCKER_ \
if (mediapipe::status_macro_internal::StatusAdaptorForMacros \
status_macro_internal_adaptor = {(expr), __FILE__, __LINE__}) { \
} else /* NOLINT */ \
#define MP_RETURN_IF_ERROR(expr) \
STATUS_MACROS_IMPL_ELSE_BLOCKER_ \
if (mediapipe::status_macro_internal::StatusAdaptorForMacros \
status_macro_internal_adaptor = {(expr), MEDIAPIPE_LOC}) { \
} else /* NOLINT */ \
return status_macro_internal_adaptor.Consume()
// Executes an expression `rexpr` that returns a `absl::StatusOr<T>`. On
@ -156,14 +156,14 @@
return mediapipe::StatusBuilder( \
std::move(STATUS_MACROS_IMPL_CONCAT_(_status_or_value, __LINE__)) \
.status(), \
__FILE__, __LINE__))
MEDIAPIPE_LOC))
#define STATUS_MACROS_IMPL_ASSIGN_OR_RETURN_3_(lhs, rexpr, error_expression) \
STATUS_MACROS_IMPL_ASSIGN_OR_RETURN_( \
STATUS_MACROS_IMPL_CONCAT_(_status_or_value, __LINE__), lhs, rexpr, \
mediapipe::StatusBuilder _( \
std::move(STATUS_MACROS_IMPL_CONCAT_(_status_or_value, __LINE__)) \
.status(), \
__FILE__, __LINE__); \
MEDIAPIPE_LOC); \
(void)_; /* error_expression is allowed to not use this variable */ \
return (error_expression))
#define STATUS_MACROS_IMPL_ASSIGN_OR_RETURN_(statusor, lhs, rexpr, \
@ -201,18 +201,17 @@ namespace status_macro_internal {
// that declares a variable.
class StatusAdaptorForMacros {
public:
StatusAdaptorForMacros(const absl::Status& status, const char* file, int line)
: builder_(status, file, line) {}
StatusAdaptorForMacros(const absl::Status& status, source_location location)
: builder_(status, location) {}
StatusAdaptorForMacros(absl::Status&& status, const char* file, int line)
: builder_(std::move(status), file, line) {}
StatusAdaptorForMacros(absl::Status&& status, source_location location)
: builder_(std::move(status), location) {}
StatusAdaptorForMacros(const StatusBuilder& builder, const char* /* file */,
int /* line */)
StatusAdaptorForMacros(const StatusBuilder& builder,
source_location /*location*/)
: builder_(builder) {}
StatusAdaptorForMacros(StatusBuilder&& builder, const char* /* file */,
int /* line */)
StatusAdaptorForMacros(StatusBuilder&& builder, source_location /*location*/)
: builder_(std::move(builder)) {}
StatusAdaptorForMacros(const StatusAdaptorForMacros&) = delete;

View File

@ -17,7 +17,7 @@ load("//mediapipe/framework/port:build_config.bzl", "mediapipe_proto_library")
load("//mediapipe/framework:mediapipe_register_type.bzl", "mediapipe_register_type")
package(
default_visibility = ["//visibility:private"],
default_visibility = ["//visibility:public"],
features = ["-layering_check"],
)
@ -26,7 +26,6 @@ licenses(["notice"])
mediapipe_proto_library(
name = "detection_proto",
srcs = ["detection.proto"],
visibility = ["//visibility:public"],
deps = ["//mediapipe/framework/formats:location_data_proto"],
)
@ -45,7 +44,6 @@ mediapipe_register_type(
mediapipe_proto_library(
name = "classification_proto",
srcs = ["classification.proto"],
visibility = ["//visibility:public"],
)
mediapipe_register_type(
@ -64,46 +62,39 @@ mediapipe_register_type(
mediapipe_proto_library(
name = "image_format_proto",
srcs = ["image_format.proto"],
visibility = ["//visibility:public"],
)
mediapipe_proto_library(
name = "matrix_data_proto",
srcs = ["matrix_data.proto"],
visibility = ["//visibility:public"],
)
mediapipe_proto_library(
name = "location_data_proto",
srcs = ["location_data.proto"],
portable_deps = ["//mediapipe/framework/formats/annotation:rasterization_cc_proto"],
visibility = ["//visibility:public"],
deps = ["//mediapipe/framework/formats/annotation:rasterization_proto"],
)
mediapipe_proto_library(
name = "affine_transform_data_proto",
srcs = ["affine_transform_data.proto"],
visibility = ["//visibility:public"],
)
mediapipe_proto_library(
name = "time_series_header_proto",
srcs = ["time_series_header.proto"],
visibility = ["//visibility:public"],
)
mediapipe_proto_library(
name = "image_file_properties_proto",
srcs = ["image_file_properties.proto"],
visibility = ["//visibility:public"],
)
cc_library(
name = "deleting_file",
srcs = ["deleting_file.cc"],
hdrs = ["deleting_file.h"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework/port:logging",
],
@ -113,7 +104,6 @@ cc_library(
name = "matrix",
srcs = ["matrix.cc"],
hdrs = ["matrix.h"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:port",
"//mediapipe/framework/formats:matrix_data_cc_proto",
@ -129,13 +119,10 @@ cc_library(
name = "affine_transform",
srcs = ["affine_transform.cc"],
hdrs = ["affine_transform.h"],
visibility = [
"//visibility:public",
],
deps = [
":affine_transform_data_cc_proto",
"//mediapipe/framework:port",
"//mediapipe/framework:type_map",
"//mediapipe/framework/formats:affine_transform_data_cc_proto",
"//mediapipe/framework/port:integral_types",
"//mediapipe/framework/port:logging",
"//mediapipe/framework/port:point",
@ -154,7 +141,6 @@ cc_library(
name = "image_frame",
srcs = ["image_frame.cc"],
hdrs = ["image_frame.h"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework/formats:image_format_cc_proto",
"@com_google_absl//absl/base",
@ -179,7 +165,6 @@ cc_library(
name = "image_frame_opencv",
srcs = ["image_frame_opencv.cc"],
hdrs = ["image_frame_opencv.h"],
visibility = ["//visibility:public"],
deps = [
":image_frame",
"//mediapipe/framework/formats:image_format_cc_proto",
@ -206,11 +191,10 @@ cc_library(
name = "location",
srcs = ["location.cc"],
hdrs = ["location.h"],
visibility = ["//visibility:public"],
deps = [
"@com_google_protobuf//:protobuf",
"//mediapipe/framework/formats:location_data_cc_proto",
"//mediapipe/framework/formats/annotation:locus_cc_proto",
"//mediapipe/framework/formats:location_data_cc_proto",
"@com_google_absl//absl/base:core_headers",
"@com_google_absl//absl/memory",
"@com_google_absl//absl/strings",
@ -238,9 +222,9 @@ cc_library(
name = "location_opencv",
srcs = ["location_opencv.cc"],
hdrs = ["location_opencv.h"],
visibility = ["//visibility:public"],
deps = [
":location",
"//mediapipe/framework/formats/annotation:rasterization_cc_proto",
"//mediapipe/framework/port:opencv_imgproc",
],
alwayslink = 1,
@ -251,6 +235,7 @@ cc_test(
srcs = ["location_opencv_test.cc"],
deps = [
":location_opencv",
"//mediapipe/framework/formats/annotation:rasterization_cc_proto",
"//mediapipe/framework/port:gtest_main",
"//mediapipe/framework/port:rectangle",
],
@ -259,7 +244,6 @@ cc_test(
cc_library(
name = "video_stream_header",
hdrs = ["video_stream_header.h"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework/formats:image_format_cc_proto",
],
@ -268,7 +252,6 @@ cc_library(
cc_library(
name = "yuv_image",
hdrs = ["yuv_image.h"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework/port:integral_types",
"@libyuv",
@ -292,7 +275,6 @@ cc_test(
mediapipe_proto_library(
name = "rect_proto",
srcs = ["rect.proto"],
visibility = ["//visibility:public"],
)
mediapipe_register_type(
@ -310,9 +292,6 @@ mediapipe_register_type(
mediapipe_proto_library(
name = "landmark_proto",
srcs = ["landmark.proto"],
visibility = [
"//visibility:public",
],
)
mediapipe_register_type(
@ -344,10 +323,9 @@ cc_library(
],
"//conditions:default": [],
}),
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework/formats:image_frame",
"//mediapipe/framework/formats:image_format_cc_proto",
"//mediapipe/framework/formats:image_frame",
"@com_google_absl//absl/synchronization",
"//mediapipe/framework:port",
"//mediapipe/framework:type_map",
@ -374,7 +352,6 @@ cc_library(
name = "image_multi_pool",
srcs = ["image_multi_pool.cc"],
hdrs = ["image_multi_pool.h"],
visibility = ["//visibility:public"],
deps = [
":image",
"//mediapipe/framework/formats:image_frame_pool",
@ -411,7 +388,6 @@ cc_library(
hdrs = [
"image_opencv.h",
],
visibility = ["//visibility:public"],
deps = [
":image",
"//mediapipe/framework/formats:image_format_cc_proto",
@ -425,7 +401,6 @@ cc_library(
name = "image_frame_pool",
srcs = ["image_frame_pool.cc"],
hdrs = ["image_frame_pool.h"],
visibility = ["//visibility:public"],
deps = [
":image_frame",
"@com_google_absl//absl/memory",
@ -476,7 +451,6 @@ cc_library(
"-landroid",
],
}),
visibility = ["//visibility:public"],
deps = [
"@com_google_absl//absl/memory",
"@com_google_absl//absl/synchronization",

View File

@ -16,7 +16,7 @@
load("//mediapipe/framework/port:build_config.bzl", "mediapipe_proto_library")
package(default_visibility = ["//visibility:private"])
package(default_visibility = ["//visibility:public"])
licenses(["notice"])
@ -24,12 +24,10 @@ mediapipe_proto_library(
name = "locus_proto",
srcs = ["locus.proto"],
portable_deps = ["//mediapipe/framework/formats/annotation:rasterization_cc_proto"],
visibility = ["//visibility:public"],
deps = ["//mediapipe/framework/formats/annotation:rasterization_proto"],
)
mediapipe_proto_library(
name = "rasterization_proto",
srcs = ["rasterization.proto"],
visibility = ["//visibility:public"],
)

View File

@ -16,22 +16,20 @@
# Description:
# Working with dense optical flow in mediapipe.
licenses(["notice"])
load("//mediapipe/framework/port:build_config.bzl", "mediapipe_cc_proto_library")
package(default_visibility = ["//visibility:private"])
licenses(["notice"])
package(default_visibility = ["//visibility:public"])
proto_library(
name = "optical_flow_field_data_proto",
srcs = ["optical_flow_field_data.proto"],
visibility = ["//visibility:public"],
)
mediapipe_cc_proto_library(
name = "optical_flow_field_data_cc_proto",
srcs = ["optical_flow_field_data.proto"],
visibility = ["//visibility:public"],
deps = [":optical_flow_field_data_proto"],
)
@ -39,9 +37,6 @@ cc_library(
name = "optical_flow_field",
srcs = ["optical_flow_field.cc"],
hdrs = ["optical_flow_field.h"],
visibility = [
"//visibility:public",
],
deps = [
"//mediapipe/framework:type_map",
"//mediapipe/framework/deps:mathutil",

View File

@ -19,17 +19,15 @@ load("//mediapipe/framework/port:build_config.bzl", "mediapipe_cc_proto_library"
licenses(["notice"])
package(default_visibility = ["//visibility:private"])
package(default_visibility = ["//visibility:public"])
proto_library(
name = "anchor_proto",
srcs = ["anchor.proto"],
visibility = ["//visibility:public"],
)
mediapipe_cc_proto_library(
name = "anchor_cc_proto",
srcs = ["anchor.proto"],
visibility = ["//visibility:public"],
deps = [":anchor_proto"],
)

View File

@ -354,7 +354,9 @@ NodeReadiness SyncSet::GetReadiness(Timestamp* min_stream_timestamp) {
}
}
*min_stream_timestamp = std::min(min_packet, min_bound);
if (*min_stream_timestamp == Timestamp::Done()) {
if (*min_stream_timestamp >= Timestamp::OneOverPostStream()) {
// Either OneOverPostStream or Done indicates no more packets.
*min_stream_timestamp = Timestamp::Done();
last_processed_ts_ = Timestamp::Done().PreviousAllowedInStream();
return NodeReadiness::kReadyForClose;
}

View File

@ -311,6 +311,17 @@ cc_library(
],
)
cc_library(
name = "opencv_videoio",
hdrs = ["opencv_videoio_inc.h"],
visibility = ["//visibility:public"],
deps = [
":opencv_core",
"//mediapipe/framework:port",
"//third_party:opencv",
],
)
cc_library(
name = "parse_text_proto",
hdrs = [

View File

@ -0,0 +1,21 @@
// Copyright 2022 The MediaPipe Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef MEDIAPIPE_PORT_OPENCV_VIDEOIO_INC_H_
#define MEDIAPIPE_PORT_OPENCV_VIDEOIO_INC_H_
#include "mediapipe/framework/port/opencv_core_inc.h"
#include "third_party/OpenCV/videoio.hpp"
#endif // MEDIAPIPE_PORT_OPENCV_VIDEOIO_INC_H_

View File

@ -334,6 +334,10 @@ cc_library(
"graph_profiler_stub.h",
],
visibility = ["//mediapipe/framework:__pkg__"],
deps = [
"//mediapipe/framework:calculator_cc_proto",
"//mediapipe/framework:calculator_profile_cc_proto",
],
)
cc_test(

View File

@ -13,40 +13,36 @@
# limitations under the License.
#
load("//mediapipe/framework/port:build_config.bzl", "mediapipe_cc_proto_library")
licenses(["notice"])
package(
default_visibility = ["//visibility:private"],
default_visibility = ["//visibility:public"],
features = ["-layering_check"],
)
load("//mediapipe/framework/port:build_config.bzl", "mediapipe_cc_proto_library")
proto_library(
name = "default_input_stream_handler_proto",
srcs = ["default_input_stream_handler.proto"],
visibility = ["//visibility:public"],
deps = ["//mediapipe/framework:mediapipe_options_proto"],
)
proto_library(
name = "fixed_size_input_stream_handler_proto",
srcs = ["fixed_size_input_stream_handler.proto"],
visibility = ["//visibility:public"],
deps = ["//mediapipe/framework:mediapipe_options_proto"],
)
proto_library(
name = "sync_set_input_stream_handler_proto",
srcs = ["sync_set_input_stream_handler.proto"],
visibility = ["//visibility:public"],
deps = ["//mediapipe/framework:mediapipe_options_proto"],
)
proto_library(
name = "timestamp_align_input_stream_handler_proto",
srcs = ["timestamp_align_input_stream_handler.proto"],
visibility = ["//visibility:public"],
deps = ["//mediapipe/framework:mediapipe_options_proto"],
)
@ -54,7 +50,6 @@ mediapipe_cc_proto_library(
name = "default_input_stream_handler_cc_proto",
srcs = ["default_input_stream_handler.proto"],
cc_deps = ["//mediapipe/framework:mediapipe_options_cc_proto"],
visibility = ["//visibility:public"],
deps = [":default_input_stream_handler_proto"],
)
@ -62,7 +57,6 @@ mediapipe_cc_proto_library(
name = "fixed_size_input_stream_handler_cc_proto",
srcs = ["fixed_size_input_stream_handler.proto"],
cc_deps = ["//mediapipe/framework:mediapipe_options_cc_proto"],
visibility = ["//visibility:public"],
deps = [":fixed_size_input_stream_handler_proto"],
)
@ -70,7 +64,6 @@ mediapipe_cc_proto_library(
name = "sync_set_input_stream_handler_cc_proto",
srcs = ["sync_set_input_stream_handler.proto"],
cc_deps = ["//mediapipe/framework:mediapipe_options_cc_proto"],
visibility = ["//visibility:public"],
deps = [":sync_set_input_stream_handler_proto"],
)
@ -78,14 +71,12 @@ mediapipe_cc_proto_library(
name = "timestamp_align_input_stream_handler_cc_proto",
srcs = ["timestamp_align_input_stream_handler.proto"],
cc_deps = ["//mediapipe/framework:mediapipe_options_cc_proto"],
visibility = ["//visibility:public"],
deps = [":timestamp_align_input_stream_handler_proto"],
)
cc_library(
name = "barrier_input_stream_handler",
srcs = ["barrier_input_stream_handler.cc"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:input_stream_handler",
],
@ -96,7 +87,6 @@ cc_library(
name = "default_input_stream_handler",
srcs = ["default_input_stream_handler.cc"],
hdrs = ["default_input_stream_handler.h"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:input_stream_handler",
"//mediapipe/framework/stream_handler:default_input_stream_handler_cc_proto",
@ -108,7 +98,6 @@ cc_library(
cc_library(
name = "early_close_input_stream_handler",
srcs = ["early_close_input_stream_handler.cc"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:input_stream_handler",
"@com_google_absl//absl/strings",
@ -119,7 +108,6 @@ cc_library(
cc_library(
name = "fixed_size_input_stream_handler",
srcs = ["fixed_size_input_stream_handler.cc"],
visibility = ["//visibility:public"],
deps = [
":default_input_stream_handler",
"//mediapipe/framework:input_stream_handler",
@ -131,7 +119,6 @@ cc_library(
cc_library(
name = "immediate_input_stream_handler",
srcs = ["immediate_input_stream_handler.cc"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:input_stream_handler",
],
@ -142,7 +129,6 @@ cc_library(
name = "in_order_output_stream_handler",
srcs = ["in_order_output_stream_handler.cc"],
hdrs = ["in_order_output_stream_handler.h"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:collection",
"//mediapipe/framework:collection_item_id",
@ -160,7 +146,6 @@ cc_library(
cc_library(
name = "mux_input_stream_handler",
srcs = ["mux_input_stream_handler.cc"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:input_stream_handler",
"//mediapipe/framework/port:logging",
@ -173,7 +158,6 @@ cc_library(
cc_library(
name = "sync_set_input_stream_handler",
srcs = ["sync_set_input_stream_handler.cc"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:collection",
"//mediapipe/framework:collection_item_id",
@ -192,7 +176,6 @@ cc_library(
cc_library(
name = "timestamp_align_input_stream_handler",
srcs = ["timestamp_align_input_stream_handler.cc"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:collection_item_id",
"//mediapipe/framework:input_stream_handler",

View File

@ -230,6 +230,43 @@ TEST_F(ImmediateInputStreamHandlerTest, StreamDoneReady) {
input_stream_handler_->ClearCurrentInputs(cc_);
}
// This test checks that the state is ReadyForClose after all streams reach
// Timestamp::Max.
TEST_F(ImmediateInputStreamHandlerTest, ReadyForCloseAfterTimestampMax) {
Timestamp min_stream_timestamp;
std::list<Packet> packets;
// One packet arrives, ready for process.
packets.push_back(Adopt(new std::string("packet 1")).At(Timestamp(10)));
input_stream_handler_->AddPackets(name_to_id_["input_a"], packets);
EXPECT_TRUE(input_stream_handler_->ScheduleInvocations(
/*max_allowance=*/1, &min_stream_timestamp));
EXPECT_EQ(Timestamp(10), cc_->InputTimestamp());
input_stream_handler_->FinalizeInputSet(cc_->InputTimestamp(),
&cc_->Inputs());
input_stream_handler_->ClearCurrentInputs(cc_);
// No packets arrive, not ready.
EXPECT_FALSE(input_stream_handler_->ScheduleInvocations(
/*max_allowance=*/1, &min_stream_timestamp));
EXPECT_EQ(Timestamp::Unset(), cc_->InputTimestamp());
// Timestamp::Max arrives, ready for close.
input_stream_handler_->SetNextTimestampBound(
name_to_id_["input_a"], Timestamp::Max().NextAllowedInStream());
input_stream_handler_->SetNextTimestampBound(
name_to_id_["input_b"], Timestamp::Max().NextAllowedInStream());
input_stream_handler_->SetNextTimestampBound(
name_to_id_["input_c"], Timestamp::Max().NextAllowedInStream());
EXPECT_TRUE(input_stream_handler_->ScheduleInvocations(
/*max_allowance=*/1, &min_stream_timestamp));
EXPECT_EQ(Timestamp::Done(), cc_->InputTimestamp());
input_stream_handler_->FinalizeInputSet(cc_->InputTimestamp(),
&cc_->Inputs());
input_stream_handler_->ClearCurrentInputs(cc_);
}
// This test checks that when any stream is done, the state is ready to close.
TEST_F(ImmediateInputStreamHandlerTest, ReadyForClose) {
Timestamp min_stream_timestamp;

View File

@ -299,12 +299,12 @@ mediapipe_cc_test(
data = [":node_chain_subgraph.proto"],
requires_full_emulation = False,
deps = [
":node_chain_subgraph_cc_proto",
":options_field_util",
":options_registry",
":options_syntax_util",
":options_util",
"//mediapipe/calculators/core:flow_limiter_calculator",
"//mediapipe/calculators/core:flow_limiter_calculator_cc_proto",
"//mediapipe/framework:basic_types_registration",
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework:calculator_runner",
@ -312,6 +312,7 @@ mediapipe_cc_test(
"//mediapipe/framework/port:gtest_main",
"//mediapipe/framework/port:parse_text_proto",
"//mediapipe/framework/port:status",
"//mediapipe/framework/testdata:night_light_calculator_cc_proto",
"//mediapipe/framework/testdata:night_light_calculator_options_lib",
"//mediapipe/framework/tool:node_chain_subgraph_options_lib",
"//mediapipe/util:header_util",
@ -486,7 +487,6 @@ cc_library(
deps = [
":proto_util_lite",
"//mediapipe/framework:calculator_cc_proto",
"//mediapipe/framework/deps:proto_descriptor_cc_proto",
"//mediapipe/framework/port:logging",
"//mediapipe/framework/port:numbers",
"//mediapipe/framework/port:ret_check",
@ -738,9 +738,7 @@ cc_test(
"//mediapipe/framework:calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework:graph_service_manager",
"//mediapipe/framework:mediapipe_options_cc_proto",
"//mediapipe/framework:packet",
"//mediapipe/framework:packet_generator_cc_proto",
"//mediapipe/framework:packet_set",
"//mediapipe/framework:packet_type",
"//mediapipe/framework:status_handler",
@ -923,7 +921,6 @@ cc_test(
"//mediapipe/calculators/core:pass_through_calculator",
"//mediapipe/framework:calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework:stream_handler_cc_proto",
"//mediapipe/framework:subgraph",
"//mediapipe/framework:test_calculators",
"//mediapipe/framework/port:gtest_main",

View File

@ -258,11 +258,8 @@ std::string GetTestFilePath(absl::string_view relative_path) {
return file::JoinPath(GetTestRootDir(), relative_path);
}
absl::StatusOr<std::unique_ptr<ImageFrame>> LoadTestImage(
absl::string_view path, ImageFormat::Format format) {
std::string encoded;
MP_RETURN_IF_ERROR(mediapipe::file::GetContents(path, &encoded));
absl::StatusOr<std::unique_ptr<ImageFrame>> DecodeTestImage(
absl::string_view encoded, ImageFormat::Format format) {
// stbi_load determines the output pixel format based on the desired channels.
// 0 means "use whatever's in the file".
int desired_channels = format == ImageFormat::UNKNOWN ? 0
@ -274,10 +271,10 @@ absl::StatusOr<std::unique_ptr<ImageFrame>> LoadTestImage(
<< "unsupported output format requested: " << format;
int width, height, channels_in_file;
auto data = stbi_load_from_memory(reinterpret_cast<stbi_uc*>(encoded.data()),
encoded.size(), &width, &height,
&channels_in_file, desired_channels);
RET_CHECK(data) << "failed to decode image data from: " << path;
auto data = stbi_load_from_memory(
reinterpret_cast<const stbi_uc*>(encoded.data()), encoded.size(), &width,
&height, &channels_in_file, desired_channels);
RET_CHECK(data) << "failed to decode image data";
// If we didn't specify a desired format, it will be determined by what the
// file contains.
@ -295,6 +292,13 @@ absl::StatusOr<std::unique_ptr<ImageFrame>> LoadTestImage(
format, width, height, width * output_channels, data, stbi_image_free);
}
absl::StatusOr<std::unique_ptr<ImageFrame>> LoadTestImage(
absl::string_view path, ImageFormat::Format format) {
std::string encoded;
MP_RETURN_IF_ERROR(mediapipe::file::GetContents(path, &encoded));
return DecodeTestImage(encoded, format);
}
std::unique_ptr<ImageFrame> LoadTestPng(absl::string_view path,
ImageFormat::Format format) {
return nullptr;

View File

@ -81,6 +81,10 @@ std::string GetTestDataDir(absl::string_view package_base_path);
// Loads a binary graph from path. Returns true iff successful.
bool LoadTestGraph(CalculatorGraphConfig* proto, const std::string& path);
// Loads an image from memory.
absl::StatusOr<std::unique_ptr<ImageFrame>> DecodeTestImage(
absl::string_view encoded, ImageFormat::Format format = ImageFormat::SRGBA);
// Loads an image from path.
absl::StatusOr<std::unique_ptr<ImageFrame>> LoadTestImage(
absl::string_view path, ImageFormat::Format format = ImageFormat::SRGBA);

View File

@ -1048,6 +1048,14 @@ absl::Status ValidatedGraphConfig::ValidateRequiredSidePacketTypes(
for (const auto& required_item : required_side_packets_) {
auto iter = side_packet_types.find(required_item.first);
if (iter == side_packet_types.end()) {
bool is_optional = true;
for (int index : required_item.second) {
is_optional &= input_side_packets_[index].packet_type->IsOptional();
}
if (is_optional) {
// Side packets that are optional and not provided are ignored.
continue;
}
statuses.push_back(mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC)
<< "Side packet \"" << required_item.first
<< "\" is required but was not provided.");

View File

@ -176,6 +176,16 @@ cc_library(
"-fobjc-arc", # enable reference-counting
],
}),
linkopts = select({
"//conditions:default": [],
"//mediapipe:ios": [
"-framework OpenGLES",
],
"//mediapipe:macos": [
"-framework OpenGL",
"-framework AppKit",
],
}),
visibility = ["//visibility:public"],
deps = [
":attachments",
@ -204,8 +214,10 @@ cc_library(
}) + select({
"//conditions:default": [
],
"//mediapipe:ios": [],
"//mediapipe:macos": [],
"//mediapipe:ios": [
],
"//mediapipe:macos": [
],
}),
)
@ -221,12 +233,18 @@ cc_library(
":gpu_buffer_format",
":gpu_buffer_storage",
":gpu_buffer_storage_image_frame",
"@com_google_absl//absl/memory",
# TODO: remove this dependency. Some other teams' tests
# depend on having an indirect image_frame dependency, need to be
# fixed first.
"//mediapipe/framework/formats:image_frame",
"@com_google_absl//absl/memory",
],
] + select({
"//conditions:default": [],
":platform_ios_with_gpu": [
":gl_texture_util",
":gpu_buffer_storage_cv_pixel_buffer",
],
}),
)
cc_library(
@ -344,6 +362,60 @@ cc_library(
],
)
mediapipe_cc_test(
name = "gpu_buffer_storage_cv_pixel_buffer_test",
size = "small",
timeout = "moderate",
srcs = ["gpu_buffer_storage_cv_pixel_buffer_test.cc"],
platforms = ["ios"],
deps = [
":gl_texture_buffer",
":gl_texture_util",
":gpu_buffer",
":gpu_buffer_storage_cv_pixel_buffer",
":gpu_test_base",
"//mediapipe/framework/port:gtest_main",
"//mediapipe/framework/tool:test_util",
"//mediapipe/objc:util",
"@com_google_absl//absl/strings",
],
)
cc_library(
name = "cv_texture_cache_manager",
srcs = ["cv_texture_cache_manager.cc"],
hdrs = ["cv_texture_cache_manager.h"],
deps = [
":pixel_buffer_pool_util",
"//mediapipe/framework/port:logging",
"//mediapipe/objc:CFHolder",
"@com_google_absl//absl/synchronization",
],
)
cc_library(
name = "cv_pixel_buffer_pool_wrapper",
srcs = ["cv_pixel_buffer_pool_wrapper.cc"],
hdrs = ["cv_pixel_buffer_pool_wrapper.h"],
copts = select({
"//conditions:default": [],
"//mediapipe:apple": [
"-x objective-c++",
"-fobjc-arc",
],
}),
deps = [
":cv_texture_cache_manager",
":gpu_buffer_format",
":multi_pool",
":pixel_buffer_pool_util",
"//mediapipe/framework/port:logging",
"//mediapipe/objc:CFHolder",
"//mediapipe/objc:util",
"@com_google_absl//absl/synchronization",
],
)
cc_library(
name = "gpu_buffer_storage_image_frame",
hdrs = ["gpu_buffer_storage_image_frame.h"],
@ -410,12 +482,9 @@ objc_library(
)
objc_library(
name = "MPPGraphGPUData",
srcs = [
"MPPGraphGPUData.mm",
"gpu_shared_data_internal.cc",
],
hdrs = ["MPPGraphGPUData.h"],
name = "metal_shared_resources",
srcs = ["metal_shared_resources.mm"],
hdrs = ["metal_shared_resources.h"],
copts = [
"-x objective-c++",
"-Wno-shorten-64-to-32",
@ -424,24 +493,9 @@ objc_library(
sdk_frameworks = [
"CoreVideo",
"Metal",
] + select({
"//conditions:default": [
"OpenGLES",
],
"//mediapipe:macos": [
"OpenGL",
"AppKit",
],
}),
],
visibility = ["//visibility:public"],
deps = [
":gl_base",
":gl_context",
":gpu_buffer_multi_pool",
":gpu_shared_data_header",
":graph_support",
"//mediapipe/gpu:gl_context_options_cc_proto",
"//mediapipe/framework:calculator_context",
"//mediapipe/framework/port:ret_check",
"@google_toolbox_for_mac//:GTM_Defines",
] + [
@ -489,12 +543,7 @@ cc_library(
name = "gpu_shared_data_header",
textual_hdrs = [
"gpu_shared_data_internal.h",
] + select({
"//conditions:default": [],
"//mediapipe:apple": [
"MPPGraphGPUData.h",
],
}),
],
visibility = ["//visibility:private"],
deps = [
":gl_base",
@ -528,16 +577,19 @@ cc_library(
cc_library(
name = "gpu_shared_data_internal_actual",
srcs = select({
"//conditions:default": [
"gpu_shared_data_internal.cc",
],
# iOS uses an Objective-C++ version of this, built in MPPGraphGPUData.
"//mediapipe:apple": [],
}),
srcs = [
"gpu_shared_data_internal.cc",
],
hdrs = [
"gpu_shared_data_internal.h",
],
copts = select({
"//conditions:default": [],
"//mediapipe:apple": [
"-x objective-c++",
"-fobjc-arc", # enable reference-counting
],
}),
visibility = ["//visibility:private"],
deps = [
"//mediapipe/gpu:gl_context_options_cc_proto",
@ -554,7 +606,8 @@ cc_library(
] + select({
"//conditions:default": [],
"//mediapipe:apple": [
":MPPGraphGPUData",
":metal_shared_resources",
":cv_texture_cache_manager",
],
}),
)
@ -569,6 +622,8 @@ cc_library(
":gl_texture_buffer",
":gpu_buffer",
":gpu_shared_data_header",
":multi_pool",
":reusable_pool",
"//mediapipe/framework:calculator_context",
"//mediapipe/framework:calculator_node",
"//mediapipe/framework/port:logging",
@ -577,6 +632,22 @@ cc_library(
],
)
cc_library(
name = "reusable_pool",
hdrs = ["reusable_pool.h"],
deps = [
":multi_pool",
"@com_google_absl//absl/functional:any_invocable",
"@com_google_absl//absl/synchronization",
],
)
cc_library(
name = "multi_pool",
hdrs = ["multi_pool.h"],
deps = ["//mediapipe/util:resource_cache"],
)
cc_library(
name = "gpu_buffer_multi_pool",
srcs = ["gpu_buffer_multi_pool.cc"],
@ -604,6 +675,7 @@ cc_library(
":gl_base",
":gpu_buffer",
":gpu_shared_data_header",
":multi_pool",
"//mediapipe/framework:calculator_context",
"//mediapipe/framework:calculator_node",
"//mediapipe/framework/port:logging",
@ -617,11 +689,15 @@ cc_library(
":gl_texture_buffer_pool",
],
"//mediapipe:ios": [
":cv_pixel_buffer_pool_wrapper",
":cv_texture_cache_manager",
":pixel_buffer_pool_util",
"//mediapipe/objc:CFHolder",
"//mediapipe/objc:util",
],
"//mediapipe:macos": [
":cv_pixel_buffer_pool_wrapper",
":cv_texture_cache_manager",
":pixel_buffer_pool_util",
":gl_texture_buffer",
":gl_texture_buffer_pool",
@ -629,6 +705,17 @@ cc_library(
}),
)
cc_library(
name = "gl_texture_util",
srcs = ["gl_texture_util.cc"],
hdrs = ["gl_texture_util.h"],
visibility = ["//visibility:public"],
deps = [
":gl_base",
":gl_texture_view",
],
)
cc_library(
name = "shader_util",
srcs = ["shader_util.cc"],
@ -653,11 +740,9 @@ cc_library(
name = "gl_calculator_helper",
srcs = [
"gl_calculator_helper.cc",
"gl_calculator_helper_impl_common.cc",
],
hdrs = [
"gl_calculator_helper.h",
"gl_calculator_helper_impl.h",
],
linkopts = select({
"//conditions:default": [],
@ -689,7 +774,7 @@ cc_library(
":image_frame_view",
":shader_util",
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework:calculator_cc_proto",
"@com_google_absl//absl/base:core_headers",
"//mediapipe/framework:calculator_context",
"//mediapipe/framework:calculator_node",
"//mediapipe/framework:calculator_contract",
@ -715,20 +800,6 @@ cc_library(
}),
)
# TODO: remove
objc_library(
name = "gl_calculator_helper_ios",
copts = [
"-Wno-shorten-64-to-32",
],
visibility = ["//visibility:public"],
deps = [
":gl_calculator_helper",
"//mediapipe/objc:mediapipe_framework_ios",
"//mediapipe/objc:util",
],
)
objc_library(
name = "MPPMetalHelper",
srcs = ["MPPMetalHelper.mm"],
@ -821,6 +892,8 @@ cc_library(
visibility = ["//visibility:public"],
deps = [
":gl_calculator_helper",
":gpu_buffer_storage_image_frame",
"//mediapipe/framework/api2:node",
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework/formats:image_frame",
"//mediapipe/framework/port:status",
@ -1062,8 +1135,8 @@ objc_library(
name = "gl_ios_test_lib",
testonly = 1,
srcs = [
"MPPGraphGPUDataTests.mm",
"gl_ios_test.mm",
"metal_shared_resources_test.mm",
],
copts = [
"-Wno-shorten-64-to-32",
@ -1073,7 +1146,7 @@ objc_library(
],
features = ["-layering_check"],
deps = [
":MPPGraphGPUData",
":metal_shared_resources",
":gl_scaler_calculator",
":gpu_buffer_to_image_frame_calculator",
":gpu_shared_data_internal",

View File

@ -1,71 +0,0 @@
// Copyright 2019 The MediaPipe Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef MEDIAPIPE_GPU_MPPGRAPHGPUDATA_H_
#define MEDIAPIPE_GPU_MPPGRAPHGPUDATA_H_
#import <CoreVideo/CVMetalTextureCache.h>
#import <CoreVideo/CoreVideo.h>
#import <Metal/Metal.h>
#import "mediapipe/gpu/gl_base.h"
#import "mediapipe/gpu/gl_context.h"
namespace mediapipe {
class GlContext;
class GpuBufferMultiPool;
} // namespace mediapipe
@interface MPPGraphGPUData : NSObject {
// Shared buffer pool for GPU calculators.
mediapipe::GpuBufferMultiPool* _gpuBufferPool;
mediapipe::GlContext* _glContext;
}
- (instancetype)init NS_UNAVAILABLE;
/// Initialize. The provided multipool pointer must remain valid throughout
/// this object's lifetime.
- (instancetype)initWithContext:(mediapipe::GlContext*)context
multiPool:(mediapipe::GpuBufferMultiPool*)pool NS_DESIGNATED_INITIALIZER;
/// Shared texture pool for GPU calculators.
/// For internal use by GlCalculatorHelper.
@property(readonly) mediapipe::GpuBufferMultiPool* gpuBufferPool;
/// Shared OpenGL context.
#if TARGET_OS_OSX
@property(readonly) NSOpenGLContext* glContext;
@property(readonly) NSOpenGLPixelFormat* glPixelFormat;
#else
@property(readonly) EAGLContext* glContext;
#endif // TARGET_OS_OSX
/// Shared texture cache.
#if TARGET_OS_OSX
@property(readonly) CVOpenGLTextureCacheRef textureCache;
#else
@property(readonly) CVOpenGLESTextureCacheRef textureCache;
#endif // TARGET_OS_OSX
/// Shared Metal resources.
@property(readonly) id<MTLDevice> mtlDevice;
@property(readonly) id<MTLCommandQueue> mtlCommandQueue;
#if COREVIDEO_SUPPORTS_METAL
@property(readonly) CVMetalTextureCacheRef mtlTextureCache;
#endif
@end
#endif // MEDIAPIPE_GPU_MPPGRAPHGPUDATA_H_

View File

@ -1,124 +0,0 @@
// Copyright 2019 The MediaPipe Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#import "mediapipe/gpu/MPPGraphGPUData.h"
#import "GTMDefines.h"
#include "mediapipe/gpu/gl_context.h"
#include "mediapipe/gpu/gpu_buffer_multi_pool.h"
#if TARGET_OS_OSX
#import <AppKit/NSOpenGL.h>
#else
#import <OpenGLES/EAGL.h>
#endif // TARGET_OS_OSX
@implementation MPPGraphGPUData
@synthesize textureCache = _textureCache;
@synthesize mtlDevice = _mtlDevice;
@synthesize mtlCommandQueue = _mtlCommandQueue;
#if COREVIDEO_SUPPORTS_METAL
@synthesize mtlTextureCache = _mtlTextureCache;
#endif
#if TARGET_OS_OSX
typedef CVOpenGLTextureCacheRef CVTextureCacheType;
#else
typedef CVOpenGLESTextureCacheRef CVTextureCacheType;
#endif // TARGET_OS_OSX
- (instancetype)initWithContext:(mediapipe::GlContext *)context
multiPool:(mediapipe::GpuBufferMultiPool *)pool {
self = [super init];
if (self) {
_gpuBufferPool = pool;
_glContext = context;
}
return self;
}
- (void)dealloc {
if (_textureCache) {
_textureCache = NULL;
}
#if COREVIDEO_SUPPORTS_METAL
if (_mtlTextureCache) {
CFRelease(_mtlTextureCache);
_mtlTextureCache = NULL;
}
#endif
}
#if TARGET_OS_OSX
- (NSOpenGLContext *)glContext {
return _glContext->nsgl_context();
}
- (NSOpenGLPixelFormat *) glPixelFormat {
return _glContext->nsgl_pixel_format();
}
#else
- (EAGLContext *)glContext {
return _glContext->eagl_context();
}
#endif // TARGET_OS_OSX
- (CVTextureCacheType)textureCache {
@synchronized(self) {
if (!_textureCache) {
_textureCache = _glContext->cv_texture_cache();
}
}
return _textureCache;
}
- (mediapipe::GpuBufferMultiPool *)gpuBufferPool {
return _gpuBufferPool;
}
- (id<MTLDevice>)mtlDevice {
@synchronized(self) {
if (!_mtlDevice) {
_mtlDevice = MTLCreateSystemDefaultDevice();
}
}
return _mtlDevice;
}
- (id<MTLCommandQueue>)mtlCommandQueue {
@synchronized(self) {
if (!_mtlCommandQueue) {
_mtlCommandQueue = [self.mtlDevice newCommandQueue];
}
}
return _mtlCommandQueue;
}
#if COREVIDEO_SUPPORTS_METAL
- (CVMetalTextureCacheRef)mtlTextureCache {
@synchronized(self) {
if (!_mtlTextureCache) {
CVReturn __unused err =
CVMetalTextureCacheCreate(NULL, NULL, self.mtlDevice, NULL, &_mtlTextureCache);
NSAssert(err == kCVReturnSuccess, @"Error at CVMetalTextureCacheCreate %d", err);
// TODO: register and flush metal caches too.
}
}
return _mtlTextureCache;
}
#endif
@end

View File

@ -1,86 +0,0 @@
// Copyright 2019 The MediaPipe Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#import <UIKit/UIKit.h>
#import <XCTest/XCTest.h>
#include <memory>
#include "absl/memory/memory.h"
#include "mediapipe/framework/port/threadpool.h"
#import "mediapipe/gpu/MPPGraphGPUData.h"
#import "mediapipe/gpu/gpu_shared_data_internal.h"
@interface MPPGraphGPUDataTests : XCTestCase {
}
@end
@implementation MPPGraphGPUDataTests
// This test verifies that the internal Objective-C object is correctly
// released when the C++ wrapper is released.
- (void)testCorrectlyReleased {
__weak id gpuData = nil;
std::weak_ptr<mediapipe::GpuResources> gpuRes;
@autoreleasepool {
mediapipe::GpuSharedData gpu_shared;
gpuRes = gpu_shared.gpu_resources;
gpuData = gpu_shared.gpu_resources->ios_gpu_data();
XCTAssertNotEqual(gpuRes.lock(), nullptr);
XCTAssertNotNil(gpuData);
}
XCTAssertEqual(gpuRes.lock(), nullptr);
XCTAssertNil(gpuData);
}
// This test verifies that the lazy initialization of the glContext instance
// variable is thread-safe. All threads should read the same value.
- (void)testGlContextThreadSafeLazyInitialization {
mediapipe::GpuSharedData gpu_shared;
constexpr int kNumThreads = 10;
EAGLContext* ogl_context[kNumThreads];
auto pool = absl::make_unique<mediapipe::ThreadPool>(kNumThreads);
pool->StartWorkers();
for (int i = 0; i < kNumThreads; ++i) {
pool->Schedule([&gpu_shared, &ogl_context, i] {
ogl_context[i] = gpu_shared.gpu_resources->ios_gpu_data().glContext;
});
}
pool.reset();
for (int i = 0; i < kNumThreads - 1; ++i) {
XCTAssertEqual(ogl_context[i], ogl_context[i + 1]);
}
}
// This test verifies that the lazy initialization of the textureCache instance
// variable is thread-safe. All threads should read the same value.
- (void)testTextureCacheThreadSafeLazyInitialization {
mediapipe::GpuSharedData gpu_shared;
constexpr int kNumThreads = 10;
CFHolder<CVOpenGLESTextureCacheRef> texture_cache[kNumThreads];
auto pool = absl::make_unique<mediapipe::ThreadPool>(kNumThreads);
pool->StartWorkers();
for (int i = 0; i < kNumThreads; ++i) {
pool->Schedule([&gpu_shared, &texture_cache, i] {
texture_cache[i].reset(gpu_shared.gpu_resources->ios_gpu_data().textureCache);
});
}
pool.reset();
for (int i = 0; i < kNumThreads - 1; ++i) {
XCTAssertEqual(*texture_cache[i], *texture_cache[i + 1]);
}
}
@end

View File

@ -21,37 +21,35 @@
#include "mediapipe/framework/packet.h"
#include "mediapipe/framework/packet_type.h"
#include "mediapipe/gpu/MPPGraphGPUData.h"
#include "mediapipe/gpu/gpu_shared_data_internal.h"
NS_ASSUME_NONNULL_BEGIN
@interface MPPMetalHelper : NSObject {
MPPGraphGPUData* _gpuShared;
}
- (instancetype)init NS_UNAVAILABLE;
/// Initialize. This initializer is recommended for calculators.
- (instancetype)initWithCalculatorContext:(mediapipe::CalculatorContext*)cc;
- (instancetype)initWithCalculatorContext:(mediapipe::CalculatorContext *)cc;
/// Initialize.
- (instancetype)initWithGpuResources:(mediapipe::GpuResources*)gpuResources
- (instancetype)initWithGpuResources:(mediapipe::GpuResources *)gpuResources
NS_DESIGNATED_INITIALIZER;
/// Configures a calculator's contract for accessing GPU resources.
/// Calculators should use this in GetContract.
+ (absl::Status)updateContract:(mediapipe::CalculatorContract*)cc;
+ (absl::Status)updateContract:(mediapipe::CalculatorContract *)cc;
/// Deprecated initializer.
- (instancetype)initWithSidePackets:(const mediapipe::PacketSet&)inputSidePackets;
- (instancetype)initWithSidePackets:(const mediapipe::PacketSet &)inputSidePackets;
/// Deprecated initializer.
- (instancetype)initWithGpuSharedData:(mediapipe::GpuSharedData*)gpuShared;
- (instancetype)initWithGpuSharedData:(mediapipe::GpuSharedData *)gpuShared;
/// Configures a calculator's side packets for accessing GPU resources.
/// Calculators should use this in FillExpectations.
+ (absl::Status)setupInputSidePackets:(mediapipe::PacketTypeSet*)inputSidePackets;
+ (absl::Status)setupInputSidePackets:(mediapipe::PacketTypeSet *)inputSidePackets;
/// Get a metal command buffer.
/// Calculators should use this method instead of getting a buffer from the
@ -63,23 +61,23 @@ NS_ASSUME_NONNULL_BEGIN
/// Creates a CVMetalTextureRef linked to the provided GpuBuffer.
/// Ownership follows the copy rule, so the caller is responsible for
/// releasing the CVMetalTextureRef.
- (CVMetalTextureRef)copyCVMetalTextureWithGpuBuffer:(const mediapipe::GpuBuffer&)gpuBuffer;
- (CVMetalTextureRef)copyCVMetalTextureWithGpuBuffer:(const mediapipe::GpuBuffer &)gpuBuffer;
/// Creates a CVMetalTextureRef linked to the provided GpuBuffer given a specific plane.
/// Ownership follows the copy rule, so the caller is responsible for
/// releasing the CVMetalTextureRef.
- (CVMetalTextureRef)copyCVMetalTextureWithGpuBuffer:(const mediapipe::GpuBuffer&)gpuBuffer
- (CVMetalTextureRef)copyCVMetalTextureWithGpuBuffer:(const mediapipe::GpuBuffer &)gpuBuffer
plane:(size_t)plane;
/// Returns a MTLTexture linked to the provided GpuBuffer.
/// A calculator can freely use it as a rendering source, but it should not
/// use it as a rendering target if the GpuBuffer was provided as an input.
- (id<MTLTexture>)metalTextureWithGpuBuffer:(const mediapipe::GpuBuffer&)gpuBuffer;
- (id<MTLTexture>)metalTextureWithGpuBuffer:(const mediapipe::GpuBuffer &)gpuBuffer;
/// Returns a MTLTexture linked to the provided GpuBuffer given a specific plane.
/// A calculator can freely use it as a rendering source, but it should not
/// use it as a rendering target if the GpuBuffer was provided as an input.
- (id<MTLTexture>)metalTextureWithGpuBuffer:(const mediapipe::GpuBuffer&)gpuBuffer
- (id<MTLTexture>)metalTextureWithGpuBuffer:(const mediapipe::GpuBuffer &)gpuBuffer
plane:(size_t)plane;
/// Obtains a new GpuBuffer to be used as an output destination.
@ -91,7 +89,7 @@ NS_ASSUME_NONNULL_BEGIN
format:(mediapipe::GpuBufferFormat)format;
/// Convenience method to load a Metal library stored as a bundle resource.
- (id<MTLLibrary>)newLibraryWithResourceName:(NSString*)name error:(NSError* _Nullable*)error;
- (id<MTLLibrary>)newLibraryWithResourceName:(NSString *)name error:(NSError *_Nullable *)error;
/// Shared Metal resources.
@property(readonly) id<MTLDevice> mtlDevice;

View File

@ -14,11 +14,18 @@
#import "mediapipe/gpu/MPPMetalHelper.h"
#import "mediapipe/gpu/gpu_buffer.h"
#import "mediapipe/gpu/graph_support.h"
#import "mediapipe/gpu/metal_shared_resources.h"
#import "GTMDefines.h"
#include "mediapipe/framework/port/ret_check.h"
@interface MPPMetalHelper () {
mediapipe::GpuResources* _gpuResources;
}
@end
namespace mediapipe {
// Using a C++ class so it can be declared as a friend of LegacyCalculatorSupport.
@ -40,7 +47,7 @@ class MetalHelperLegacySupport {
- (instancetype)initWithGpuResources:(mediapipe::GpuResources*)gpuResources {
self = [super init];
if (self) {
_gpuShared = gpuResources->ios_gpu_data();
_gpuResources = gpuResources;
}
return self;
}
@ -105,19 +112,19 @@ class MetalHelperLegacySupport {
}
- (id<MTLDevice>)mtlDevice {
return _gpuShared.mtlDevice;
return _gpuResources->metal_shared().resources().mtlDevice;
}
- (id<MTLCommandQueue>)mtlCommandQueue {
return _gpuShared.mtlCommandQueue;
return _gpuResources->metal_shared().resources().mtlCommandQueue;
}
- (CVMetalTextureCacheRef)mtlTextureCache {
return _gpuShared.mtlTextureCache;
return _gpuResources->metal_shared().resources().mtlTextureCache;
}
- (id<MTLCommandBuffer>)commandBuffer {
return [_gpuShared.mtlCommandQueue commandBuffer];
return [_gpuResources->metal_shared().resources().mtlCommandQueue commandBuffer];
}
- (CVMetalTextureRef)copyCVMetalTextureWithGpuBuffer:(const mediapipe::GpuBuffer&)gpuBuffer
@ -169,8 +176,9 @@ class MetalHelperLegacySupport {
CVMetalTextureRef texture;
CVReturn err = CVMetalTextureCacheCreateTextureFromImage(
NULL, _gpuShared.mtlTextureCache, mediapipe::GetCVPixelBufferRef(gpuBuffer), NULL,
metalPixelFormat, width, height, plane, &texture);
NULL, _gpuResources->metal_shared().resources().mtlTextureCache,
mediapipe::GetCVPixelBufferRef(gpuBuffer), NULL, metalPixelFormat, width, height, plane,
&texture);
CHECK_EQ(err, kCVReturnSuccess);
return texture;
}
@ -191,19 +199,20 @@ class MetalHelperLegacySupport {
}
- (mediapipe::GpuBuffer)mediapipeGpuBufferWithWidth:(int)width height:(int)height {
return _gpuShared.gpuBufferPool->GetBuffer(width, height);
return _gpuResources->gpu_buffer_pool().GetBuffer(width, height);
}
- (mediapipe::GpuBuffer)mediapipeGpuBufferWithWidth:(int)width
height:(int)height
format:(mediapipe::GpuBufferFormat)format {
return _gpuShared.gpuBufferPool->GetBuffer(width, height, format);
return _gpuResources->gpu_buffer_pool().GetBuffer(width, height, format);
}
- (id<MTLLibrary>)newLibraryWithResourceName:(NSString*)name error:(NSError * _Nullable *)error {
return [_gpuShared.mtlDevice newLibraryWithFile:[[NSBundle bundleForClass:[self class]]
pathForResource:name ofType:@"metallib"]
error:error];
return [_gpuResources->metal_shared().resources().mtlDevice
newLibraryWithFile:[[NSBundle bundleForClass:[self class]] pathForResource:name
ofType:@"metallib"]
error:error];
}
@end

View File

@ -31,8 +31,8 @@ class AttachmentBase {};
template <class Context, class T>
class Attachment : public AttachmentBase<Context> {
public:
using FactoryT = std::function<AttachmentPtr<T>(Context&)>;
Attachment(FactoryT factory) : factory_(factory) {}
using FactoryT = AttachmentPtr<T> (*)(Context&);
explicit constexpr Attachment(FactoryT factory) : factory_(factory) {}
Attachment(const Attachment&) = delete;
Attachment(Attachment&&) = delete;

View File

@ -0,0 +1,84 @@
// Copyright 2019 The MediaPipe Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "mediapipe/gpu/cv_pixel_buffer_pool_wrapper.h"
#include <tuple>
#include "CoreFoundation/CFBase.h"
#include "mediapipe/framework/port/logging.h"
#include "mediapipe/objc/CFHolder.h"
#include "mediapipe/objc/util.h"
namespace mediapipe {
CvPixelBufferPoolWrapper::CvPixelBufferPoolWrapper(
int width, int height, GpuBufferFormat format, CFTimeInterval maxAge,
CvTextureCacheManager* texture_caches) {
OSType cv_format = CVPixelFormatForGpuBufferFormat(format);
CHECK_NE(cv_format, -1) << "unsupported pixel format";
pool_ = MakeCFHolderAdopting(
/* keep count is 0 because the age param keeps buffers around anyway */
CreateCVPixelBufferPool(width, height, cv_format, 0, maxAge));
texture_caches_ = texture_caches;
}
CFHolder<CVPixelBufferRef> CvPixelBufferPoolWrapper::GetBuffer() {
CVPixelBufferRef buffer;
int threshold = 1;
NSMutableDictionary* auxAttributes =
[NSMutableDictionary dictionaryWithCapacity:1];
CVReturn err;
bool tried_flushing = false;
while (1) {
auxAttributes[(id)kCVPixelBufferPoolAllocationThresholdKey] = @(threshold);
err = CVPixelBufferPoolCreatePixelBufferWithAuxAttributes(
kCFAllocatorDefault, *pool_, (__bridge CFDictionaryRef)auxAttributes,
&buffer);
if (err != kCVReturnWouldExceedAllocationThreshold) break;
if (texture_caches_ && !tried_flushing) {
// Call the flush function to potentially release old holds on buffers
// and try again to create a pixel buffer.
// This is used to flush CV texture caches, which may retain buffers until
// flushed.
texture_caches_->FlushTextureCaches();
tried_flushing = true;
} else {
++threshold;
}
}
CHECK(!err) << "Error creating pixel buffer: " << err;
count_ = threshold;
return MakeCFHolderAdopting(buffer);
}
std::string CvPixelBufferPoolWrapper::GetDebugString() const {
auto description = MakeCFHolderAdopting(CFCopyDescription(*pool_));
return [(__bridge NSString*)*description UTF8String];
}
void CvPixelBufferPoolWrapper::Flush() { CVPixelBufferPoolFlush(*pool_, 0); }
CFHolder<CVPixelBufferRef> CvPixelBufferPoolWrapper::CreateBufferWithoutPool(
const internal::GpuBufferSpec& spec) {
OSType cv_format = CVPixelFormatForGpuBufferFormat(spec.format);
CHECK_NE(cv_format, -1) << "unsupported pixel format";
CVPixelBufferRef buffer;
CVReturn err = CreateCVPixelBufferWithoutPool(spec.width, spec.height,
cv_format, &buffer);
CHECK(!err) << "Error creating pixel buffer: " << err;
return MakeCFHolderAdopting(buffer);
}
} // namespace mediapipe

View File

@ -0,0 +1,66 @@
// Copyright 2019 The MediaPipe Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This class lets calculators allocate GpuBuffers of various sizes, caching
// and reusing them as needed. It does so by automatically creating and using
// platform-specific buffer pools for the requested sizes.
//
// This class is not meant to be used directly by calculators, but is instead
// used by GlCalculatorHelper to allocate buffers.
#ifndef MEDIAPIPE_GPU_CV_PIXEL_BUFFER_POOL_WRAPPER_H_
#define MEDIAPIPE_GPU_CV_PIXEL_BUFFER_POOL_WRAPPER_H_
#include "CoreFoundation/CFBase.h"
#include "mediapipe/gpu/cv_texture_cache_manager.h"
#include "mediapipe/gpu/gpu_buffer_format.h"
#include "mediapipe/gpu/multi_pool.h"
#include "mediapipe/gpu/pixel_buffer_pool_util.h"
#include "mediapipe/objc/CFHolder.h"
namespace mediapipe {
class CvPixelBufferPoolWrapper {
public:
CvPixelBufferPoolWrapper(int width, int height, GpuBufferFormat format,
CFTimeInterval maxAge,
CvTextureCacheManager* texture_caches);
static std::shared_ptr<CvPixelBufferPoolWrapper> Create(
const internal::GpuBufferSpec& spec, const MultiPoolOptions& options,
CvTextureCacheManager* texture_caches = nullptr) {
return std::make_shared<CvPixelBufferPoolWrapper>(
spec.width, spec.height, spec.format, options.max_inactive_buffer_age,
texture_caches);
}
CFHolder<CVPixelBufferRef> GetBuffer();
int GetBufferCount() const { return count_; }
std::string GetDebugString() const;
void Flush();
static CFHolder<CVPixelBufferRef> CreateBufferWithoutPool(
const internal::GpuBufferSpec& spec);
private:
CFHolder<CVPixelBufferPoolRef> pool_;
int count_ = 0;
CvTextureCacheManager* texture_caches_;
};
} // namespace mediapipe
#endif // MEDIAPIPE_GPU_CV_PIXEL_BUFFER_POOL_WRAPPER_H_

View File

@ -0,0 +1,55 @@
// Copyright 2019 The MediaPipe Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "mediapipe/gpu/cv_texture_cache_manager.h"
#include "mediapipe/framework/port/logging.h"
namespace mediapipe {
void CvTextureCacheManager::FlushTextureCaches() {
absl::MutexLock lock(&mutex_);
for (const auto& cache : texture_caches_) {
#if TARGET_OS_OSX
CVOpenGLTextureCacheFlush(*cache, 0);
#else
CVOpenGLESTextureCacheFlush(*cache, 0);
#endif // TARGET_OS_OSX
}
}
void CvTextureCacheManager::RegisterTextureCache(CVTextureCacheType cache) {
absl::MutexLock lock(&mutex_);
CHECK(std::find(texture_caches_.begin(), texture_caches_.end(), cache) ==
texture_caches_.end())
<< "Attempting to register a texture cache twice";
texture_caches_.emplace_back(cache);
}
void CvTextureCacheManager::UnregisterTextureCache(CVTextureCacheType cache) {
absl::MutexLock lock(&mutex_);
auto it = std::find(texture_caches_.begin(), texture_caches_.end(), cache);
CHECK(it != texture_caches_.end())
<< "Attempting to unregister an unknown texture cache";
texture_caches_.erase(it);
}
CvTextureCacheManager::~CvTextureCacheManager() {
CHECK_EQ(texture_caches_.size(), 0)
<< "Failed to unregister texture caches before deleting manager";
}
} // namespace mediapipe

View File

@ -0,0 +1,49 @@
// Copyright 2019 The MediaPipe Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef MEDIAPIPE_GPU_CV_TEXTURE_CACHE_MANAGER_H_
#define MEDIAPIPE_GPU_CV_TEXTURE_CACHE_MANAGER_H_
#include <vector>
#include "absl/synchronization/mutex.h"
#include "mediapipe/gpu/pixel_buffer_pool_util.h"
#include "mediapipe/objc/CFHolder.h"
namespace mediapipe {
class CvTextureCacheManager {
public:
~CvTextureCacheManager();
// TODO: add tests for the texture cache registration.
// Inform the pool of a cache that should be flushed when it is low on
// reusable buffers.
void RegisterTextureCache(CVTextureCacheType cache);
// Remove a texture cache from the list of caches to be flushed.
void UnregisterTextureCache(CVTextureCacheType cache);
void FlushTextureCaches();
private:
absl::Mutex mutex_;
std::vector<CFHolder<CVTextureCacheType>> texture_caches_
ABSL_GUARDED_BY(mutex_);
};
} // namespace mediapipe
#endif // MEDIAPIPE_GPU_CV_TEXTURE_CACHE_MANAGER_H_

View File

@ -20,38 +20,37 @@
#include "mediapipe/framework/port/canonical_errors.h"
#include "mediapipe/framework/port/ret_check.h"
#include "mediapipe/framework/port/status.h"
#include "mediapipe/gpu/gl_calculator_helper_impl.h"
#include "mediapipe/gpu/gpu_buffer.h"
#include "mediapipe/gpu/gpu_service.h"
namespace mediapipe {
// The constructor and destructor need to be defined here so that
// std::unique_ptr can see the full definition of GlCalculatorHelperImpl.
// In the header, it is an incomplete type.
GlCalculatorHelper::GlCalculatorHelper() {}
GlCalculatorHelper::~GlCalculatorHelper() {}
void GlCalculatorHelper::InitializeInternal(CalculatorContext* cc,
GpuResources* gpu_resources) {
gpu_resources_ = gpu_resources;
gl_context_ = gpu_resources_->gl_context(cc);
}
absl::Status GlCalculatorHelper::Open(CalculatorContext* cc) {
CHECK(cc);
auto gpu_service = cc->Service(kGpuService);
RET_CHECK(gpu_service.IsAvailable())
<< "GPU service not available. Did you forget to call "
"GlCalculatorHelper::UpdateContract?";
// TODO return error from impl_ (needs two-stage init)
impl_ =
absl::make_unique<GlCalculatorHelperImpl>(cc, &gpu_service.GetObject());
InitializeInternal(cc, &gpu_service.GetObject());
return absl::OkStatus();
}
void GlCalculatorHelper::InitializeForTest(GpuSharedData* gpu_shared) {
impl_ = absl::make_unique<GlCalculatorHelperImpl>(
nullptr, gpu_shared->gpu_resources.get());
InitializeInternal(nullptr, gpu_shared->gpu_resources.get());
}
void GlCalculatorHelper::InitializeForTest(GpuResources* gpu_resources) {
impl_ = absl::make_unique<GlCalculatorHelperImpl>(nullptr, gpu_resources);
InitializeInternal(nullptr, gpu_resources);
}
// static
@ -88,44 +87,109 @@ absl::Status GlCalculatorHelper::SetupInputSidePackets(
return absl::OkStatus();
}
absl::Status GlCalculatorHelper::RunInGlContext(
std::function<absl::Status(void)> gl_func,
CalculatorContext* calculator_context) {
if (calculator_context) {
return gl_context_->Run(std::move(gl_func), calculator_context->NodeId(),
calculator_context->InputTimestamp());
} else {
return gl_context_->Run(std::move(gl_func));
}
}
absl::Status GlCalculatorHelper::RunInGlContext(
std::function<absl::Status(void)> gl_func) {
if (!impl_) return absl::InternalError("helper not initialized");
if (!Initialized()) return absl::InternalError("helper not initialized");
// TODO: Remove LegacyCalculatorSupport from MediaPipe OSS.
auto calculator_context =
LegacyCalculatorSupport::Scoped<CalculatorContext>::current();
return impl_->RunInGlContext(gl_func, calculator_context);
return RunInGlContext(gl_func, calculator_context);
}
GLuint GlCalculatorHelper::framebuffer() const { return impl_->framebuffer(); }
GLuint GlCalculatorHelper::framebuffer() const { return framebuffer_; }
void GlCalculatorHelper::CreateFramebuffer() {
// Our framebuffer will have a color attachment but no depth attachment,
// so it's important that the depth test be off. It is disabled by default,
// but we wanted to be explicit.
// TODO: move this to glBindFramebuffer? Or just remove.
glDisable(GL_DEPTH_TEST);
framebuffer_ = kUtilityFramebuffer.Get(*gl_context_);
}
void GlCalculatorHelper::BindFramebuffer(const GlTexture& dst) {
return impl_->BindFramebuffer(dst);
#ifdef __ANDROID__
// On (some?) Android devices, attaching a new texture to the frame buffer
// does not seem to detach the old one. As a result, using that texture
// for texturing can produce incorrect output. See b/32091368 for details.
// To fix this, we have to call either glBindFramebuffer with a FBO id of 0
// or glFramebufferTexture2D with a texture ID of 0.
glBindFramebuffer(GL_FRAMEBUFFER, 0);
#endif
if (!framebuffer_) {
CreateFramebuffer();
}
glBindFramebuffer(GL_FRAMEBUFFER, framebuffer_);
glViewport(0, 0, dst.width(), dst.height());
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, dst.target(),
dst.name(), 0);
#ifndef NDEBUG
GLenum status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
if (status != GL_FRAMEBUFFER_COMPLETE) {
VLOG(2) << "incomplete framebuffer: " << status;
}
#endif
}
GlTexture GlCalculatorHelper::CreateSourceTexture(
const GpuBuffer& pixel_buffer) {
return impl_->CreateSourceTexture(pixel_buffer);
GlTexture GlCalculatorHelper::MapGpuBuffer(const GpuBuffer& gpu_buffer,
GlTextureView view) {
if (gpu_buffer.format() != GpuBufferFormat::kUnknown) {
// TODO: do the params need to be reset here??
glBindTexture(view.target(), view.name());
GlTextureInfo info = GlTextureInfoForGpuBufferFormat(
gpu_buffer.format(), view.plane(), GetGlVersion());
gl_context_->SetStandardTextureParams(view.target(),
info.gl_internal_format);
glBindTexture(view.target(), 0);
}
return GlTexture(std::move(view), gpu_buffer);
}
GlTexture GlCalculatorHelper::CreateSourceTexture(const GpuBuffer& gpu_buffer) {
return CreateSourceTexture(gpu_buffer, 0);
}
GlTexture GlCalculatorHelper::CreateSourceTexture(const GpuBuffer& gpu_buffer,
int plane) {
return MapGpuBuffer(gpu_buffer, gpu_buffer.GetReadView<GlTextureView>(plane));
}
GlTexture GlCalculatorHelper::CreateSourceTexture(
const ImageFrame& image_frame) {
return impl_->CreateSourceTexture(image_frame);
}
GlTexture GlCalculatorHelper::CreateSourceTexture(const GpuBuffer& pixel_buffer,
int plane) {
return impl_->CreateSourceTexture(pixel_buffer, plane);
auto gpu_buffer = GpuBufferCopyingImageFrame(image_frame);
return MapGpuBuffer(gpu_buffer, gpu_buffer.GetReadView<GlTextureView>(0));
}
GpuBuffer GlCalculatorHelper::GpuBufferWithImageFrame(
std::shared_ptr<ImageFrame> image_frame) {
return impl_->GpuBufferWithImageFrame(std::move(image_frame));
return GpuBuffer(
std::make_shared<GpuBufferStorageImageFrame>(std::move(image_frame)));
}
GpuBuffer GlCalculatorHelper::GpuBufferCopyingImageFrame(
const ImageFrame& image_frame) {
return impl_->GpuBufferCopyingImageFrame(image_frame);
#if MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
auto maybe_buffer = CreateCVPixelBufferCopyingImageFrame(image_frame);
// Converts absl::StatusOr to absl::Status since CHECK_OK() currently only
// deals with absl::Status in MediaPipe OSS.
CHECK_OK(maybe_buffer.status());
return GpuBuffer(std::move(maybe_buffer).value());
#else
return GpuBuffer(GlTextureBuffer::Create(image_frame));
#endif // !MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
}
void GlCalculatorHelper::GetGpuBufferDimensions(const GpuBuffer& pixel_buffer,
@ -136,23 +200,36 @@ void GlCalculatorHelper::GetGpuBufferDimensions(const GpuBuffer& pixel_buffer,
*height = pixel_buffer.height();
}
GlTexture GlCalculatorHelper::CreateDestinationTexture(int output_width,
int output_height,
GlTexture GlCalculatorHelper::CreateDestinationTexture(int width, int height,
GpuBufferFormat format) {
return impl_->CreateDestinationTexture(output_width, output_height, format);
}
if (!framebuffer_) {
CreateFramebuffer();
}
GlContext& GlCalculatorHelper::GetGlContext() const {
return impl_->GetGlContext();
}
GlVersion GlCalculatorHelper::GetGlVersion() const {
return impl_->GetGlVersion();
GpuBuffer gpu_buffer =
gpu_resources_->gpu_buffer_pool().GetBuffer(width, height, format);
return MapGpuBuffer(gpu_buffer, gpu_buffer.GetWriteView<GlTextureView>(0));
}
GlTexture GlCalculatorHelper::CreateSourceTexture(
const mediapipe::Image& image) {
return impl_->CreateSourceTexture(image.GetGpuBuffer());
return CreateSourceTexture(image.GetGpuBuffer());
}
template <>
std::unique_ptr<ImageFrame> GlTexture::GetFrame<ImageFrame>() const {
view_->DoneWriting();
std::shared_ptr<const ImageFrame> view =
gpu_buffer_.GetReadView<ImageFrame>();
auto copy = absl::make_unique<ImageFrame>();
copy->CopyFrom(*view, ImageFrame::kDefaultAlignmentBoundary);
return copy;
}
template <>
std::unique_ptr<GpuBuffer> GlTexture::GetFrame<GpuBuffer>() const {
view_->DoneWriting();
return absl::make_unique<GpuBuffer>(gpu_buffer_);
}
template <>

View File

@ -17,6 +17,7 @@
#include <memory>
#include "absl/base/attributes.h"
#include "absl/memory/memory.h"
#include "mediapipe/framework/calculator_context.h"
#include "mediapipe/framework/calculator_contract.h"
@ -33,7 +34,6 @@
namespace mediapipe {
class GlCalculatorHelperImpl;
class GlTexture;
class GpuResources;
struct GpuSharedData;
@ -62,6 +62,7 @@ class GlCalculatorHelper {
// Can be used to initialize the helper outside of a calculator. Useful for
// testing.
void InitializeForTest(GpuResources* gpu_resources);
ABSL_DEPRECATED("Use InitializeForTest(GpuResources)")
void InitializeForTest(GpuSharedData* gpu_shared);
// This method can be called from GetContract to set up the needed GPU
@ -70,6 +71,7 @@ class GlCalculatorHelper {
// This method can be called from FillExpectations to set the correct types
// for the shared GL input side packet(s).
ABSL_DEPRECATED("Use UpdateContract")
static absl::Status SetupInputSidePackets(PacketTypeSet* input_side_packets);
// Execute the provided function within the helper's GL context. On some
@ -161,15 +163,30 @@ class GlCalculatorHelper {
// TODO: do we need an unbind method too?
void BindFramebuffer(const GlTexture& dst);
GlContext& GetGlContext() const;
GlContext& GetGlContext() const { return *gl_context_; }
GlVersion GetGlVersion() const;
GlVersion GetGlVersion() const { return gl_context_->GetGlVersion(); }
// Check if the calculator helper has been previously initialized.
bool Initialized() { return impl_ != nullptr; }
bool Initialized() { return gpu_resources_ != nullptr; }
private:
std::unique_ptr<GlCalculatorHelperImpl> impl_;
void InitializeInternal(CalculatorContext* cc, GpuResources* gpu_resources);
absl::Status RunInGlContext(std::function<absl::Status(void)> gl_func,
CalculatorContext* calculator_context);
// Makes a GpuBuffer accessible as a texture in the GL context.
GlTexture MapGpuBuffer(const GpuBuffer& gpu_buffer, GlTextureView view);
// Create the framebuffer for rendering.
void CreateFramebuffer();
std::shared_ptr<GlContext> gl_context_;
GLuint framebuffer_ = 0;
GpuResources* gpu_resources_ = nullptr;
};
// Represents an OpenGL texture, and is a 'view' into the memory pool.
@ -201,9 +218,13 @@ class GlTexture {
void Release() { view_ = std::make_shared<GlTextureView>(); }
private:
explicit GlTexture(GlTextureView view)
: view_(std::make_shared<GlTextureView>(std::move(view))) {}
friend class GlCalculatorHelperImpl;
explicit GlTexture(GlTextureView view, GpuBuffer gpu_buffer)
: gpu_buffer_(std::move(gpu_buffer)),
view_(std::make_shared<GlTextureView>(std::move(view))) {}
friend class GlCalculatorHelper;
// We store the GpuBuffer to support GetFrame, and to ensure that the storage
// outlives the view.
GpuBuffer gpu_buffer_;
std::shared_ptr<GlTextureView> view_;
};
@ -217,12 +238,14 @@ class GlTexture {
// it is better to keep const-safety and accept having two versions of the
// same thing.
template <typename T>
ABSL_DEPRECATED("Only for legacy calculators")
auto TagOrIndex(const T& collection, const std::string& tag, int index)
-> decltype(collection.Tag(tag)) {
return collection.UsesTags() ? collection.Tag(tag) : collection.Index(index);
}
template <typename T>
ABSL_DEPRECATED("Only for legacy calculators")
auto TagOrIndex(T* collection, const std::string& tag, int index)
-> decltype(collection->Tag(tag)) {
return collection->UsesTags() ? collection->Tag(tag)
@ -230,12 +253,14 @@ auto TagOrIndex(T* collection, const std::string& tag, int index)
}
template <typename T>
ABSL_DEPRECATED("Only for legacy calculators")
bool HasTagOrIndex(const T& collection, const std::string& tag, int index) {
return collection.UsesTags() ? collection.HasTag(tag)
: index < collection.NumEntries();
}
template <typename T>
ABSL_DEPRECATED("Only for legacy calculators")
bool HasTagOrIndex(T* collection, const std::string& tag, int index) {
return collection->UsesTags() ? collection->HasTag(tag)
: index < collection->NumEntries();

View File

@ -1,82 +0,0 @@
// Copyright 2019 The MediaPipe Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef MEDIAPIPE_GPU_GL_CALCULATOR_HELPER_IMPL_H_
#define MEDIAPIPE_GPU_GL_CALCULATOR_HELPER_IMPL_H_
#include "mediapipe/gpu/gl_calculator_helper.h"
#include "mediapipe/gpu/gpu_shared_data_internal.h"
#ifdef __OBJC__
#import <AVFoundation/AVFoundation.h>
#import <Foundation/Foundation.h>
#endif // __OBJC__
#ifdef __ANDROID__
#include "mediapipe/gpu/gl_texture_buffer_pool.h"
#endif
namespace mediapipe {
// This class implements the GlCalculatorHelper for iOS and Android.
// See GlCalculatorHelper for details on these methods.
class GlCalculatorHelperImpl {
public:
explicit GlCalculatorHelperImpl(CalculatorContext* cc,
GpuResources* gpu_resources);
~GlCalculatorHelperImpl();
absl::Status RunInGlContext(std::function<absl::Status(void)> gl_func,
CalculatorContext* calculator_context);
GlTexture CreateSourceTexture(const ImageFrame& image_frame);
GlTexture CreateSourceTexture(const GpuBuffer& gpu_buffer);
// Note: multi-plane support is currently only available on iOS.
GlTexture CreateSourceTexture(const GpuBuffer& gpu_buffer, int plane);
// Creates a framebuffer and returns the texture that it is bound to.
GlTexture CreateDestinationTexture(int output_width, int output_height,
GpuBufferFormat format);
GpuBuffer GpuBufferWithImageFrame(std::shared_ptr<ImageFrame> image_frame);
GpuBuffer GpuBufferCopyingImageFrame(const ImageFrame& image_frame);
GLuint framebuffer() const { return framebuffer_; }
void BindFramebuffer(const GlTexture& dst);
GlVersion GetGlVersion() const { return gl_context_->GetGlVersion(); }
GlContext& GetGlContext() const;
// For internal use.
static void ReadTexture(const GlTextureView& view, void* output, size_t size);
private:
// Makes a GpuBuffer accessible as a texture in the GL context.
GlTexture MapGpuBuffer(const GpuBuffer& gpu_buffer, GlTextureView view);
// Create the framebuffer for rendering.
void CreateFramebuffer();
std::shared_ptr<GlContext> gl_context_;
GLuint framebuffer_ = 0;
GpuResources& gpu_resources_;
};
} // namespace mediapipe
#endif // MEDIAPIPE_GPU_GL_CALCULATOR_HELPER_IMPL_H_

View File

@ -1,178 +0,0 @@
// Copyright 2019 The MediaPipe Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <memory>
#include "absl/memory/memory.h"
#include "mediapipe/framework/formats/image_frame.h"
#include "mediapipe/gpu/gl_calculator_helper_impl.h"
#include "mediapipe/gpu/gpu_buffer_format.h"
#include "mediapipe/gpu/gpu_shared_data_internal.h"
#include "mediapipe/gpu/image_frame_view.h"
namespace mediapipe {
GlCalculatorHelperImpl::GlCalculatorHelperImpl(CalculatorContext* cc,
GpuResources* gpu_resources)
: gpu_resources_(*gpu_resources) {
gl_context_ = gpu_resources_.gl_context(cc);
}
GlCalculatorHelperImpl::~GlCalculatorHelperImpl() {
RunInGlContext(
[this] {
if (framebuffer_) {
glDeleteFramebuffers(1, &framebuffer_);
framebuffer_ = 0;
}
return absl::OkStatus();
},
/*calculator_context=*/nullptr)
.IgnoreError();
}
GlContext& GlCalculatorHelperImpl::GetGlContext() const { return *gl_context_; }
absl::Status GlCalculatorHelperImpl::RunInGlContext(
std::function<absl::Status(void)> gl_func,
CalculatorContext* calculator_context) {
if (calculator_context) {
return gl_context_->Run(std::move(gl_func), calculator_context->NodeId(),
calculator_context->InputTimestamp());
} else {
return gl_context_->Run(std::move(gl_func));
}
}
void GlCalculatorHelperImpl::CreateFramebuffer() {
// Our framebuffer will have a color attachment but no depth attachment,
// so it's important that the depth test be off. It is disabled by default,
// but we wanted to be explicit.
// TODO: move this to glBindFramebuffer?
glDisable(GL_DEPTH_TEST);
glGenFramebuffers(1, &framebuffer_);
}
void GlCalculatorHelperImpl::BindFramebuffer(const GlTexture& dst) {
#ifdef __ANDROID__
// On (some?) Android devices, attaching a new texture to the frame buffer
// does not seem to detach the old one. As a result, using that texture
// for texturing can produce incorrect output. See b/32091368 for details.
// To fix this, we have to call either glBindFramebuffer with a FBO id of 0
// or glFramebufferTexture2D with a texture ID of 0.
glBindFramebuffer(GL_FRAMEBUFFER, 0);
#endif
if (!framebuffer_) {
CreateFramebuffer();
}
glBindFramebuffer(GL_FRAMEBUFFER, framebuffer_);
glViewport(0, 0, dst.width(), dst.height());
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, dst.target(),
dst.name(), 0);
#ifndef NDEBUG
GLenum status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
if (status != GL_FRAMEBUFFER_COMPLETE) {
VLOG(2) << "incomplete framebuffer: " << status;
}
#endif
}
GlTexture GlCalculatorHelperImpl::MapGpuBuffer(const GpuBuffer& gpu_buffer,
GlTextureView view) {
if (gpu_buffer.format() != GpuBufferFormat::kUnknown) {
// TODO: do the params need to be reset here??
glBindTexture(view.target(), view.name());
GlTextureInfo info = GlTextureInfoForGpuBufferFormat(
gpu_buffer.format(), view.plane(), GetGlVersion());
gl_context_->SetStandardTextureParams(view.target(),
info.gl_internal_format);
glBindTexture(view.target(), 0);
}
return GlTexture(std::move(view));
}
GlTexture GlCalculatorHelperImpl::CreateSourceTexture(
const GpuBuffer& gpu_buffer) {
return CreateSourceTexture(gpu_buffer, 0);
}
GlTexture GlCalculatorHelperImpl::CreateSourceTexture(
const GpuBuffer& gpu_buffer, int plane) {
return MapGpuBuffer(gpu_buffer, gpu_buffer.GetReadView<GlTextureView>(plane));
}
GlTexture GlCalculatorHelperImpl::CreateSourceTexture(
const ImageFrame& image_frame) {
auto gpu_buffer = GpuBufferCopyingImageFrame(image_frame);
return MapGpuBuffer(gpu_buffer, gpu_buffer.GetReadView<GlTextureView>(0));
}
GpuBuffer GlCalculatorHelperImpl::GpuBufferWithImageFrame(
std::shared_ptr<ImageFrame> image_frame) {
return GpuBuffer(
std::make_shared<GpuBufferStorageImageFrame>(std::move(image_frame)));
}
GpuBuffer GlCalculatorHelperImpl::GpuBufferCopyingImageFrame(
const ImageFrame& image_frame) {
#if MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
auto maybe_buffer = CreateCVPixelBufferCopyingImageFrame(image_frame);
// Converts absl::StatusOr to absl::Status since CHECK_OK() currently only
// deals with absl::Status in MediaPipe OSS.
CHECK_OK(maybe_buffer.status());
return GpuBuffer(std::move(maybe_buffer).value());
#else
return GpuBuffer(GlTextureBuffer::Create(image_frame));
#endif // !MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
}
template <>
std::unique_ptr<ImageFrame> GlTexture::GetFrame<ImageFrame>() const {
view_->DoneWriting();
std::shared_ptr<const ImageFrame> view =
view_->gpu_buffer().GetReadView<ImageFrame>();
auto copy = absl::make_unique<ImageFrame>();
copy->CopyFrom(*view, ImageFrame::kDefaultAlignmentBoundary);
return copy;
}
template <>
std::unique_ptr<GpuBuffer> GlTexture::GetFrame<GpuBuffer>() const {
auto gpu_buffer = view_->gpu_buffer();
#ifdef __EMSCRIPTEN__
// When WebGL is used, the GL context may be spontaneously lost which can
// cause GpuBuffer allocations to fail. In that case, return a dummy buffer
// to allow processing of the current frame complete.
if (!gpu_buffer) {
return std::make_unique<GpuBuffer>();
}
#endif // __EMSCRIPTEN__
view_->DoneWriting();
return absl::make_unique<GpuBuffer>(gpu_buffer);
}
GlTexture GlCalculatorHelperImpl::CreateDestinationTexture(
int width, int height, GpuBufferFormat format) {
if (!framebuffer_) {
CreateFramebuffer();
}
GpuBuffer gpu_buffer =
gpu_resources_.gpu_buffer_pool().GetBuffer(width, height, format);
return MapGpuBuffer(gpu_buffer, gpu_buffer.GetWriteView<GlTextureView>(0));
}
} // namespace mediapipe

View File

@ -290,8 +290,15 @@ absl::Status GlContext::FinishInitialization(bool create_thread) {
// some Emscripten cases), there might be some existing tripped error.
ForceClearExistingGlErrors();
absl::string_view version_string(
reinterpret_cast<const char*>(glGetString(GL_VERSION)));
absl::string_view version_string;
const GLubyte* version_string_ptr = glGetString(GL_VERSION);
if (version_string_ptr != nullptr) {
version_string = reinterpret_cast<const char*>(version_string_ptr);
} else {
// This may happen when using SwiftShader, but the numeric versions are
// available and will be used instead.
LOG(WARNING) << "failed to get GL_VERSION string";
}
// We will decide later whether we want to use the version numbers we query
// for, or instead derive that information from the context creation result,
@ -333,7 +340,7 @@ absl::Status GlContext::FinishInitialization(bool create_thread) {
}
LOG(INFO) << "GL version: " << gl_major_version_ << "." << gl_minor_version_
<< " (" << glGetString(GL_VERSION) << ")";
<< " (" << version_string << ")";
{
auto status = GetGlExtensions();
if (!status.ok()) {
@ -826,10 +833,14 @@ std::shared_ptr<GlSyncPoint> GlContext::CreateSyncToken() {
return token;
}
bool GlContext::IsAnyContextCurrent() {
PlatformGlContext GlContext::GetCurrentNativeContext() {
ContextBinding ctx;
GetCurrentContextBinding(&ctx);
return ctx.context != kPlatformGlContextNone;
return ctx.context;
}
bool GlContext::IsAnyContextCurrent() {
return GetCurrentNativeContext() != kPlatformGlContextNone;
}
std::shared_ptr<GlSyncPoint>
@ -1043,4 +1054,16 @@ void GlContext::SetStandardTextureParams(GLenum target, GLint internal_format) {
glTexParameteri(target, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);
}
const GlContext::Attachment<GLuint> kUtilityFramebuffer(
[](GlContext&) -> GlContext::Attachment<GLuint>::Ptr {
GLuint framebuffer;
glGenFramebuffers(1, &framebuffer);
if (!framebuffer) return nullptr;
return {new GLuint(framebuffer), [](void* ptr) {
GLuint* fb = static_cast<GLuint*>(ptr);
glDeleteFramebuffers(1, fb);
delete fb;
}};
});
} // namespace mediapipe

View File

@ -307,6 +307,10 @@ class GlContext : public std::enable_shared_from_this<GlContext> {
// the GlContext class, is current.
static bool IsAnyContextCurrent();
// Returns the current native context, whether managed by this class or not.
// Useful as a cross-platform way to get the current PlatformGlContext.
static PlatformGlContext GetCurrentNativeContext();
// Creates a synchronization token for the current, non-GlContext-owned
// context. This can be passed to MediaPipe so it can synchronize with the
// commands issued in the external context up to this point.
@ -470,6 +474,12 @@ class GlContext : public std::enable_shared_from_this<GlContext> {
bool destructing_ = false;
};
// A framebuffer that the framework can use to attach textures for rendering
// etc.
// This could just be a member of GlContext, but it serves as a basic example
// of an attachment.
ABSL_CONST_INIT extern const GlContext::Attachment<GLuint> kUtilityFramebuffer;
// For backward compatibility. TODO: migrate remaining callers.
ABSL_DEPRECATED(
"Prefer passing an explicit GlVersion argument (use "

View File

@ -37,7 +37,6 @@ enum { kAttribVertex, kAttribTexturePosition, kNumberOfAttributes };
// VIDEO or index 0: GpuBuffers to be rendered.
// Side inputs:
// SURFACE: unique_ptr to an EglSurfaceHolder to draw to.
// GPU_SHARED: shared GPU resources.
//
// See GlSurfaceSinkCalculatorOptions for options.
class GlSurfaceSinkCalculator : public Node {

View File

@ -15,9 +15,15 @@
#include "mediapipe/gpu/gl_texture_buffer.h"
#include "mediapipe/framework/formats/image_frame.h"
#include "mediapipe/gpu/gl_context.h"
#include "mediapipe/gpu/gl_texture_view.h"
#include "mediapipe/gpu/gpu_buffer_storage_image_frame.h"
#if MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
#include "mediapipe/gpu/gl_texture_util.h"
#include "mediapipe/gpu/gpu_buffer_storage_cv_pixel_buffer.h"
#endif // MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
namespace mediapipe {
std::unique_ptr<GlTextureBuffer> GlTextureBuffer::Wrap(
@ -250,39 +256,46 @@ void GlTextureBuffer::WaitForConsumersOnGpu() {
// precisely, on only one GL context.
}
GlTextureView GlTextureBuffer::GetReadView(
internal::types<GlTextureView>, std::shared_ptr<GpuBuffer> gpu_buffer,
int plane) const {
GlTextureView GlTextureBuffer::GetReadView(internal::types<GlTextureView>,
int plane) const {
auto gl_context = GlContext::GetCurrent();
CHECK(gl_context);
CHECK_EQ(plane, 0);
// Note that this method is only supposed to be called by GpuBuffer, which
// ensures this condition is satisfied.
DCHECK(!weak_from_this().expired())
<< "GlTextureBuffer must be held in shared_ptr to get a GlTextureView";
// Insert wait call to sync with the producer.
WaitOnGpu();
GlTextureView::DetachFn detach = [this](GlTextureView& texture) {
// Inform the GlTextureBuffer that we have finished accessing its
// contents, and create a consumer sync point.
DidRead(texture.gl_context()->CreateSyncToken());
};
GlTextureView::DetachFn detach =
[texbuf = shared_from_this()](GlTextureView& texture) {
// Inform the GlTextureBuffer that we have finished accessing its
// contents, and create a consumer sync point.
texbuf->DidRead(texture.gl_context()->CreateSyncToken());
};
return GlTextureView(gl_context.get(), target(), name(), width(), height(),
std::move(gpu_buffer), plane, std::move(detach),
nullptr);
plane, std::move(detach), nullptr);
}
GlTextureView GlTextureBuffer::GetWriteView(
internal::types<GlTextureView>, std::shared_ptr<GpuBuffer> gpu_buffer,
int plane) {
GlTextureView GlTextureBuffer::GetWriteView(internal::types<GlTextureView>,
int plane) {
auto gl_context = GlContext::GetCurrent();
CHECK(gl_context);
CHECK_EQ(plane, 0);
// Note that this method is only supposed to be called by GpuBuffer, which
// ensures this condition is satisfied.
DCHECK(!weak_from_this().expired())
<< "GlTextureBuffer must be held in shared_ptr to get a GlTextureView";
// Insert wait call to sync with the producer.
WaitOnGpu();
Reuse(); // TODO: the producer wait should probably be part of Reuse in the
// case when there are no consumers.
GlTextureView::DoneWritingFn done_writing =
[this](const GlTextureView& texture) { ViewDoneWriting(texture); };
[texbuf = shared_from_this()](const GlTextureView& texture) {
texbuf->ViewDoneWriting(texture);
};
return GlTextureView(gl_context.get(), target(), name(), width(), height(),
std::move(gpu_buffer), plane, nullptr,
std::move(done_writing));
plane, nullptr, std::move(done_writing));
}
void GlTextureBuffer::ViewDoneWriting(const GlTextureView& view) {
@ -321,8 +334,8 @@ void GlTextureBuffer::ViewDoneWriting(const GlTextureView& view) {
#endif // __ANDROID__
}
static void ReadTexture(const GlTextureView& view, GpuBufferFormat format,
void* output, size_t size) {
static void ReadTexture(GlContext& ctx, const GlTextureView& view,
GpuBufferFormat format, void* output, size_t size) {
// TODO: check buffer size? We could use glReadnPixels where available
// (OpenGL ES 3.2, i.e. nowhere). Note that, to fully check that the read
// won't overflow the buffer with glReadPixels, we'd also need to check or
@ -332,13 +345,7 @@ static void ReadTexture(const GlTextureView& view, GpuBufferFormat format,
GlTextureInfo info = GlTextureInfoForGpuBufferFormat(
format, view.plane(), view.gl_context()->GetGlVersion());
GLint previous_fbo;
glGetIntegerv(GL_FRAMEBUFFER_BINDING, &previous_fbo);
// We use a temp fbo to avoid depending on the app having an existing one.
// TODO: keep a utility fbo around in the context?
GLuint fbo = 0;
glGenFramebuffers(1, &fbo);
GLuint fbo = kUtilityFramebuffer.Get(ctx);
glBindFramebuffer(GL_FRAMEBUFFER, fbo);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, view.target(),
view.name(), 0);
@ -346,9 +353,7 @@ static void ReadTexture(const GlTextureView& view, GpuBufferFormat format,
output);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, GL_TEXTURE_2D, 0,
0);
// TODO: just set the binding to 0 to avoid the get call?
glBindFramebuffer(GL_FRAMEBUFFER, previous_fbo);
glDeleteFramebuffers(1, &fbo);
glBindFramebuffer(GL_FRAMEBUFFER, 0);
}
static std::shared_ptr<GpuBufferStorageImageFrame> ConvertToImageFrame(
@ -358,9 +363,11 @@ static std::shared_ptr<GpuBufferStorageImageFrame> ConvertToImageFrame(
auto output =
absl::make_unique<ImageFrame>(image_format, buf->width(), buf->height(),
ImageFrame::kGlDefaultAlignmentBoundary);
buf->GetProducerContext()->Run([buf, &output] {
auto view = buf->GetReadView(internal::types<GlTextureView>{}, nullptr, 0);
ReadTexture(view, buf->format(), output->MutablePixelData(),
auto ctx = GlContext::GetCurrent();
if (!ctx) ctx = buf->GetProducerContext();
ctx->Run([buf, &output, &ctx] {
auto view = buf->GetReadView(internal::types<GlTextureView>{}, /*plane=*/0);
ReadTexture(*ctx, view, buf->format(), output->MutablePixelData(),
output->PixelDataSize());
});
return std::make_shared<GpuBufferStorageImageFrame>(std::move(output));
@ -380,4 +387,30 @@ static auto kConverterRegistration2 =
.RegisterConverter<GpuBufferStorageImageFrame, GlTextureBuffer>(
ConvertFromImageFrame);
#if MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
static std::shared_ptr<GpuBufferStorageCvPixelBuffer> ConvertToCvPixelBuffer(
std::shared_ptr<GlTextureBuffer> buf) {
auto output = absl::make_unique<GpuBufferStorageCvPixelBuffer>(
buf->width(), buf->height(), buf->format());
auto ctx = GlContext::GetCurrent();
if (!ctx) ctx = buf->GetProducerContext();
ctx->Run([buf, &output] {
TempGlFramebuffer framebuffer;
auto src = buf->GetReadView(internal::types<GlTextureView>{}, /*plane=*/0);
auto dst =
output->GetWriteView(internal::types<GlTextureView>{}, /*plane=*/0);
CopyGlTexture(src, dst);
glFlush();
});
return output;
}
static auto kConverterRegistrationCvpb =
internal::GpuBufferStorageRegistry::Get()
.RegisterConverter<GlTextureBuffer, GpuBufferStorageCvPixelBuffer>(
ConvertToCvPixelBuffer);
#endif // MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
} // namespace mediapipe

View File

@ -35,7 +35,8 @@ class GlCalculatorHelperImpl;
// Implements a GPU memory buffer as an OpenGL texture. For internal use.
class GlTextureBuffer
: public internal::GpuBufferStorageImpl<
GlTextureBuffer, internal::ViewProvider<GlTextureView>> {
GlTextureBuffer, internal::ViewProvider<GlTextureView>>,
public std::enable_shared_from_this<GlTextureBuffer> {
public:
// This is called when the texture buffer is deleted. It is passed a sync
// token created at that time on the GlContext. If the GlTextureBuffer has
@ -71,6 +72,11 @@ class GlTextureBuffer
// Create a texture with a copy of the data in image_frame.
static std::unique_ptr<GlTextureBuffer> Create(const ImageFrame& image_frame);
static std::unique_ptr<GlTextureBuffer> Create(
const internal::GpuBufferSpec& spec) {
return Create(spec.width, spec.height, spec.format);
}
// Wraps an existing texture, but does not take ownership of it.
// deletion_callback is invoked when the GlTextureBuffer is released, so
// the caller knows that the texture is no longer in use.
@ -90,10 +96,8 @@ class GlTextureBuffer
GpuBufferFormat format() const { return format_; }
GlTextureView GetReadView(internal::types<GlTextureView>,
std::shared_ptr<GpuBuffer> gpu_buffer,
int plane) const override;
GlTextureView GetWriteView(internal::types<GlTextureView>,
std::shared_ptr<GpuBuffer> gpu_buffer,
int plane) override;
// If this texture is going to be used outside of the context that produced
@ -138,6 +142,10 @@ class GlTextureBuffer
return producer_context_;
}
#if MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
static constexpr bool kDisableGpuBufferRegistration = true;
#endif // MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
private:
// Creates a texture of dimensions width x height and allocates space for it.
// If data is provided, it is uploaded to the texture; otherwise, it can be

View File

@ -16,79 +16,4 @@
#include "absl/synchronization/mutex.h"
namespace mediapipe {
GlTextureBufferPool::GlTextureBufferPool(int width, int height,
GpuBufferFormat format, int keep_count)
: width_(width),
height_(height),
format_(format),
keep_count_(keep_count) {}
GlTextureBufferSharedPtr GlTextureBufferPool::GetBuffer() {
std::unique_ptr<GlTextureBuffer> buffer;
bool reuse = false;
{
absl::MutexLock lock(&mutex_);
if (available_.empty()) {
buffer = GlTextureBuffer::Create(width_, height_, format_);
if (!buffer) return nullptr;
} else {
buffer = std::move(available_.back());
available_.pop_back();
reuse = true;
}
++in_use_count_;
}
// This needs to wait on consumer sync points, therefore it should not be
// done while holding the mutex.
if (reuse) {
buffer->Reuse();
}
// Return a shared_ptr with a custom deleter that adds the buffer back
// to our available list.
std::weak_ptr<GlTextureBufferPool> weak_pool(shared_from_this());
return std::shared_ptr<GlTextureBuffer>(
buffer.release(), [weak_pool](GlTextureBuffer* buf) {
auto pool = weak_pool.lock();
if (pool) {
pool->Return(absl::WrapUnique(buf));
} else {
delete buf;
}
});
}
std::pair<int, int> GlTextureBufferPool::GetInUseAndAvailableCounts() {
absl::MutexLock lock(&mutex_);
return {in_use_count_, available_.size()};
}
void GlTextureBufferPool::Return(std::unique_ptr<GlTextureBuffer> buf) {
std::vector<std::unique_ptr<GlTextureBuffer>> trimmed;
{
absl::MutexLock lock(&mutex_);
--in_use_count_;
available_.emplace_back(std::move(buf));
TrimAvailable(&trimmed);
}
// The trimmed buffers will be released without holding the lock.
}
void GlTextureBufferPool::TrimAvailable(
std::vector<std::unique_ptr<GlTextureBuffer>>* trimmed) {
int keep = std::max(keep_count_ - in_use_count_, 0);
if (available_.size() > keep) {
auto trim_it = std::next(available_.begin(), keep);
if (trimmed) {
std::move(trim_it, available_.end(), std::back_inserter(*trimmed));
}
available_.erase(trim_it, available_.end());
}
}
} // namespace mediapipe
namespace mediapipe {} // namespace mediapipe

View File

@ -23,11 +23,12 @@
#include "absl/synchronization/mutex.h"
#include "mediapipe/gpu/gl_texture_buffer.h"
#include "mediapipe/gpu/multi_pool.h"
#include "mediapipe/gpu/reusable_pool.h"
namespace mediapipe {
class GlTextureBufferPool
: public std::enable_shared_from_this<GlTextureBufferPool> {
class GlTextureBufferPool : public ReusablePool<GlTextureBuffer> {
public:
// Creates a pool. This pool will manage buffers of the specified dimensions,
// and will keep keep_count buffers around for reuse.
@ -36,42 +37,32 @@ class GlTextureBufferPool
static std::shared_ptr<GlTextureBufferPool> Create(int width, int height,
GpuBufferFormat format,
int keep_count) {
return std::shared_ptr<GlTextureBufferPool>(
new GlTextureBufferPool(width, height, format, keep_count));
return Create({width, height, format}, {.keep_count = keep_count});
}
// Obtains a buffers. May either be reused or created anew.
// A GlContext must be current when this is called.
GlTextureBufferSharedPtr GetBuffer();
static std::shared_ptr<GlTextureBufferPool> Create(
const internal::GpuBufferSpec& spec, const MultiPoolOptions& options) {
return std::shared_ptr<GlTextureBufferPool>(
new GlTextureBufferPool(spec, options));
}
int width() const { return width_; }
int height() const { return height_; }
GpuBufferFormat format() const { return format_; }
int width() const { return spec_.width; }
int height() const { return spec_.height; }
GpuBufferFormat format() const { return spec_.format; }
// This method is meant for testing.
std::pair<int, int> GetInUseAndAvailableCounts();
static GlTextureBufferSharedPtr CreateBufferWithoutPool(
const internal::GpuBufferSpec& spec) {
return GlTextureBuffer::Create(spec);
}
private:
GlTextureBufferPool(int width, int height, GpuBufferFormat format,
int keep_count);
protected:
GlTextureBufferPool(const internal::GpuBufferSpec& spec,
const MultiPoolOptions& options)
: ReusablePool<GlTextureBuffer>(
[this] { return GlTextureBuffer::Create(spec_); }, options),
spec_(spec) {}
// Return a buffer to the pool.
void Return(std::unique_ptr<GlTextureBuffer> buf);
// If the total number of buffers is greater than keep_count, destroys any
// surplus buffers that are no longer in use.
void TrimAvailable(std::vector<std::unique_ptr<GlTextureBuffer>>* trimmed)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
const int width_;
const int height_;
const GpuBufferFormat format_;
const int keep_count_;
absl::Mutex mutex_;
int in_use_count_ ABSL_GUARDED_BY(mutex_) = 0;
std::vector<std::unique_ptr<GlTextureBuffer>> available_
ABSL_GUARDED_BY(mutex_);
const internal::GpuBufferSpec spec_;
};
} // namespace mediapipe

View File

@ -0,0 +1,30 @@
#include "mediapipe/gpu/gl_texture_util.h"
namespace mediapipe {
void CopyGlTexture(const GlTextureView& src, GlTextureView& dst) {
glViewport(0, 0, src.width(), src.height());
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, src.target(),
src.name(), 0);
glActiveTexture(GL_TEXTURE0);
glBindTexture(dst.target(), dst.name());
glCopyTexSubImage2D(dst.target(), 0, 0, 0, 0, 0, dst.width(), dst.height());
glBindTexture(dst.target(), 0);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, src.target(), 0,
0);
}
void FillGlTextureRgba(GlTextureView& view, float r, float g, float b,
float a) {
glViewport(0, 0, view.width(), view.height());
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, view.target(),
view.name(), 0);
glClearColor(r, g, b, a);
glClear(GL_COLOR_BUFFER_BIT);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, view.target(), 0,
0);
}
} // namespace mediapipe

View File

@ -0,0 +1,34 @@
#ifndef MEDIAPIPE_GPU_GL_TEXTURE_UTIL_H_
#define MEDIAPIPE_GPU_GL_TEXTURE_UTIL_H_
#include "mediapipe/gpu/gl_base.h"
#include "mediapipe/gpu/gl_texture_view.h"
namespace mediapipe {
// Copies a texture to another.
// Assumes a framebuffer is already set up
void CopyGlTexture(const GlTextureView& src, GlTextureView& dst);
// Fills a texture with a color.
void FillGlTextureRgba(GlTextureView& view, float r, float g, float b, float a);
// RAII class to set up a temporary framebuffer. Mainly for test use.
class TempGlFramebuffer {
public:
TempGlFramebuffer() {
glGenFramebuffers(1, &framebuffer_);
glBindFramebuffer(GL_FRAMEBUFFER, framebuffer_);
}
~TempGlFramebuffer() {
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glDeleteFramebuffers(1, &framebuffer_);
}
private:
GLuint framebuffer_;
};
} // namespace mediapipe
#endif // MEDIAPIPE_GPU_GL_TEXTURE_UTIL_H_

View File

@ -7,7 +7,6 @@ void GlTextureView::Release() {
if (detach_) detach_(*this);
detach_ = nullptr;
gl_context_ = nullptr;
gpu_buffer_ = nullptr;
plane_ = 0;
name_ = 0;
width_ = 0;

View File

@ -25,8 +25,6 @@
namespace mediapipe {
class GlContext;
class GlTextureViewManager;
class GpuBuffer;
class GlTextureView {
public:
@ -43,7 +41,6 @@ class GlTextureView {
name_ = other.name_;
width_ = other.width_;
height_ = other.height_;
gpu_buffer_ = std::move(other.gpu_buffer_);
plane_ = other.plane_;
detach_ = std::exchange(other.detach_, nullptr);
done_writing_ = std::exchange(other.done_writing_, nullptr);
@ -55,26 +52,23 @@ class GlTextureView {
int height() const { return height_; }
GLenum target() const { return target_; }
GLuint name() const { return name_; }
const GpuBuffer& gpu_buffer() const { return *gpu_buffer_; }
int plane() const { return plane_; }
using DetachFn = std::function<void(GlTextureView&)>;
using DoneWritingFn = std::function<void(const GlTextureView&)>;
private:
friend class GpuBuffer;
friend class GlTextureBuffer;
friend class GpuBufferStorageCvPixelBuffer;
friend class GpuBufferStorageAhwb;
GlTextureView(GlContext* context, GLenum target, GLuint name, int width,
int height, std::shared_ptr<GpuBuffer> gpu_buffer, int plane,
DetachFn detach, DoneWritingFn done_writing)
int height, int plane, DetachFn detach,
DoneWritingFn done_writing)
: gl_context_(context),
target_(target),
name_(name),
width_(width),
height_(height),
gpu_buffer_(std::move(gpu_buffer)),
plane_(plane),
detach_(std::move(detach)),
done_writing_(std::move(done_writing)) {}
@ -93,7 +87,6 @@ class GlTextureView {
// Note: when scale is not 1, we still give the nominal size of the image.
int width_ = 0;
int height_ = 0;
std::shared_ptr<GpuBuffer> gpu_buffer_; // using shared_ptr temporarily
int plane_ = 0;
DetachFn detach_;
mutable DoneWritingFn done_writing_;
@ -112,12 +105,8 @@ class ViewProvider<GlTextureView> {
// the same view implement the same signature.
// Note that we allow different views to have custom signatures, providing
// additional view-specific arguments that may be needed.
virtual GlTextureView GetReadView(types<GlTextureView>,
std::shared_ptr<GpuBuffer> gpu_buffer,
int plane) const = 0;
virtual GlTextureView GetWriteView(types<GlTextureView>,
std::shared_ptr<GpuBuffer> gpu_buffer,
int plane) = 0;
virtual GlTextureView GetReadView(types<GlTextureView>, int plane) const = 0;
virtual GlTextureView GetWriteView(types<GlTextureView>, int plane) = 0;
};
} // namespace internal

View File

@ -1,6 +1,7 @@
#include "mediapipe/gpu/gpu_buffer.h"
#include <memory>
#include <utility>
#include "absl/strings/str_cat.h"
#include "absl/strings/str_join.h"
@ -29,7 +30,7 @@ std::string GpuBuffer::DebugString() const {
"]");
}
internal::GpuBufferStorage& GpuBuffer::GetStorageForView(
internal::GpuBufferStorage* GpuBuffer::GetStorageForView(
TypeId view_provider_type, bool for_writing) const {
const std::shared_ptr<internal::GpuBufferStorage>* chosen_storage = nullptr;
@ -45,45 +46,58 @@ internal::GpuBufferStorage& GpuBuffer::GetStorageForView(
// TODO: choose best conversion.
if (!chosen_storage) {
for (const auto& s : storages_) {
auto converter = internal::GpuBufferStorageRegistry::Get()
.StorageConverterForViewProvider(view_provider_type,
s->storage_type());
if (converter) {
storages_.push_back(converter(s));
chosen_storage = &storages_.back();
if (auto converter = internal::GpuBufferStorageRegistry::Get()
.StorageConverterForViewProvider(
view_provider_type, s->storage_type())) {
if (auto new_storage = converter(s)) {
storages_.push_back(new_storage);
chosen_storage = &storages_.back();
break;
}
}
}
}
if (for_writing) {
if (!chosen_storage) {
// Allocate a new storage supporting the requested view.
auto factory = internal::GpuBufferStorageRegistry::Get()
.StorageFactoryForViewProvider(view_provider_type);
if (factory) {
storages_ = {factory(width(), height(), format())};
chosen_storage = &storages_.back();
}
} else {
if (chosen_storage) {
// Discard all other storages.
storages_ = {*chosen_storage};
chosen_storage = &storages_.back();
} else {
// Allocate a new storage supporting the requested view.
if (auto factory =
internal::GpuBufferStorageRegistry::Get()
.StorageFactoryForViewProvider(view_provider_type)) {
if (auto new_storage = factory(width(), height(), format())) {
storages_ = {std::move(new_storage)};
chosen_storage = &storages_.back();
}
}
}
}
return chosen_storage ? chosen_storage->get() : nullptr;
}
internal::GpuBufferStorage& GpuBuffer::GetStorageForViewOrDie(
TypeId view_provider_type, bool for_writing) const {
auto* chosen_storage =
GpuBuffer::GetStorageForView(view_provider_type, for_writing);
CHECK(chosen_storage) << "no view provider found for requested view "
<< view_provider_type.name() << "; storages available: "
<< absl::StrJoin(storages_, ", ",
StorageTypeFormatter());
DCHECK((*chosen_storage)->can_down_cast_to(view_provider_type));
return **chosen_storage;
DCHECK(chosen_storage->can_down_cast_to(view_provider_type));
return *chosen_storage;
}
#if !MEDIAPIPE_DISABLE_GPU
#if MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
CVPixelBufferRef GetCVPixelBufferRef(const GpuBuffer& buffer) {
auto p = buffer.internal_storage<GpuBufferStorageCvPixelBuffer>();
if (p) return **p;
if (buffer.GetStorageForView(
kTypeId<internal::ViewProvider<CVPixelBufferRef>>,
/*for_writing=*/false) != nullptr) {
return *buffer.GetReadView<CVPixelBufferRef>();
}
return nullptr;
}
#endif // MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER

View File

@ -105,18 +105,16 @@ class GpuBuffer {
// specific view type; see the corresponding ViewProvider.
template <class View, class... Args>
decltype(auto) GetReadView(Args... args) const {
return GetViewProvider<View>(false)->GetReadView(
internal::types<View>{}, std::make_shared<GpuBuffer>(*this),
std::forward<Args>(args)...);
return GetViewProviderOrDie<View>(false).GetReadView(
internal::types<View>{}, std::forward<Args>(args)...);
}
// Gets a write view of the specified type. The arguments depend on the
// specific view type; see the corresponding ViewProvider.
template <class View, class... Args>
decltype(auto) GetWriteView(Args... args) {
return GetViewProvider<View>(true)->GetWriteView(
internal::types<View>{}, std::make_shared<GpuBuffer>(*this),
std::forward<Args>(args)...);
return GetViewProviderOrDie<View>(true).GetWriteView(
internal::types<View>{}, std::forward<Args>(args)...);
}
// Attempts to access an underlying storage object of the specified type.
@ -147,13 +145,17 @@ class GpuBuffer {
GpuBufferFormat format_ = GpuBufferFormat::kUnknown;
};
internal::GpuBufferStorage& GetStorageForView(TypeId view_provider_type,
internal::GpuBufferStorage* GetStorageForView(TypeId view_provider_type,
bool for_writing) const;
internal::GpuBufferStorage& GetStorageForViewOrDie(TypeId view_provider_type,
bool for_writing) const;
template <class View>
internal::ViewProvider<View>* GetViewProvider(bool for_writing) const {
internal::ViewProvider<View>& GetViewProviderOrDie(bool for_writing) const {
using VP = internal::ViewProvider<View>;
return GetStorageForView(kTypeId<VP>, for_writing).template down_cast<VP>();
return *GetStorageForViewOrDie(kTypeId<VP>, for_writing)
.template down_cast<VP>();
}
std::shared_ptr<internal::GpuBufferStorage>& no_storage() const {
@ -175,6 +177,10 @@ class GpuBuffer {
// This is mutable because view methods that do not change the contents may
// still need to allocate new storages.
mutable std::vector<std::shared_ptr<internal::GpuBufferStorage>> storages_;
#if MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
friend CVPixelBufferRef GetCVPixelBufferRef(const GpuBuffer& buffer);
#endif // MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
};
inline bool GpuBuffer::operator==(std::nullptr_t other) const {

View File

@ -153,6 +153,34 @@ inline GpuBufferFormat GpuBufferFormatForCVPixelFormat(OSType format) {
#endif // __APPLE__
namespace internal {
struct GpuBufferSpec {
GpuBufferSpec(int w, int h, GpuBufferFormat f)
: width(w), height(h), format(f) {}
template <typename H>
friend H AbslHashValue(H h, const GpuBufferSpec& spec) {
return H::combine(std::move(h), spec.width, spec.height,
static_cast<uint32_t>(spec.format));
}
int width;
int height;
GpuBufferFormat format;
};
// BufferSpec equality operators
inline bool operator==(const GpuBufferSpec& lhs, const GpuBufferSpec& rhs) {
return lhs.width == rhs.width && lhs.height == rhs.height &&
lhs.format == rhs.format;
}
inline bool operator!=(const GpuBufferSpec& lhs, const GpuBufferSpec& rhs) {
return !operator==(lhs, rhs);
}
} // namespace internal
} // namespace mediapipe
#endif // MEDIAPIPE_GPU_GPU_BUFFER_FORMAT_H_

View File

@ -16,204 +16,7 @@
#include <tuple>
#include "absl/memory/memory.h"
#include "absl/synchronization/mutex.h"
#include "mediapipe/framework/port/logging.h"
#include "mediapipe/gpu/gpu_shared_data_internal.h"
#if MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
#include "CoreFoundation/CFBase.h"
#include "mediapipe/objc/CFHolder.h"
#include "mediapipe/objc/util.h"
#endif // MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
namespace mediapipe {
// Keep this many buffers allocated for a given frame size.
static constexpr int kKeepCount = 2;
// The maximum size of the GpuBufferMultiPool. When the limit is reached, the
// oldest BufferSpec will be dropped.
static constexpr int kMaxPoolCount = 10;
// Time in seconds after which an inactive buffer can be dropped from the pool.
// Currently only used with CVPixelBufferPool.
static constexpr float kMaxInactiveBufferAge = 0.25;
// Skip allocating a buffer pool until at least this many requests have been
// made for a given BufferSpec.
static constexpr int kMinRequestsBeforePool = 2;
// Do a deeper flush every this many requests.
static constexpr int kRequestCountScrubInterval = 50;
#if MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
CvPixelBufferPoolWrapper::CvPixelBufferPoolWrapper(
const GpuBufferMultiPool::BufferSpec& spec, CFTimeInterval maxAge) {
OSType cv_format = CVPixelFormatForGpuBufferFormat(spec.format);
CHECK_NE(cv_format, -1) << "unsupported pixel format";
pool_ = MakeCFHolderAdopting(
/* keep count is 0 because the age param keeps buffers around anyway */
CreateCVPixelBufferPool(spec.width, spec.height, cv_format, 0, maxAge));
}
GpuBuffer CvPixelBufferPoolWrapper::GetBuffer(std::function<void(void)> flush) {
CVPixelBufferRef buffer;
int threshold = 1;
NSMutableDictionary* auxAttributes =
[NSMutableDictionary dictionaryWithCapacity:1];
CVReturn err;
bool tried_flushing = false;
while (1) {
auxAttributes[(id)kCVPixelBufferPoolAllocationThresholdKey] = @(threshold);
err = CVPixelBufferPoolCreatePixelBufferWithAuxAttributes(
kCFAllocatorDefault, *pool_, (__bridge CFDictionaryRef)auxAttributes,
&buffer);
if (err != kCVReturnWouldExceedAllocationThreshold) break;
if (flush && !tried_flushing) {
// Call the flush function to potentially release old holds on buffers
// and try again to create a pixel buffer.
// This is used to flush CV texture caches, which may retain buffers until
// flushed.
flush();
tried_flushing = true;
} else {
++threshold;
}
}
CHECK(!err) << "Error creating pixel buffer: " << err;
count_ = threshold;
return GpuBuffer(MakeCFHolderAdopting(buffer));
}
std::string CvPixelBufferPoolWrapper::GetDebugString() const {
auto description = MakeCFHolderAdopting(CFCopyDescription(*pool_));
return [(__bridge NSString*)*description UTF8String];
}
void CvPixelBufferPoolWrapper::Flush() { CVPixelBufferPoolFlush(*pool_, 0); }
GpuBufferMultiPool::SimplePool GpuBufferMultiPool::MakeSimplePool(
const GpuBufferMultiPool::BufferSpec& spec) {
return std::make_shared<CvPixelBufferPoolWrapper>(spec,
kMaxInactiveBufferAge);
}
GpuBuffer GpuBufferMultiPool::GetBufferWithoutPool(const BufferSpec& spec) {
OSType cv_format = CVPixelFormatForGpuBufferFormat(spec.format);
CHECK_NE(cv_format, -1) << "unsupported pixel format";
CVPixelBufferRef buffer;
CVReturn err = CreateCVPixelBufferWithoutPool(spec.width, spec.height,
cv_format, &buffer);
CHECK(!err) << "Error creating pixel buffer: " << err;
return GpuBuffer(MakeCFHolderAdopting(buffer));
}
void GpuBufferMultiPool::FlushTextureCaches() {
absl::MutexLock lock(&mutex_);
for (const auto& cache : texture_caches_) {
#if TARGET_OS_OSX
CVOpenGLTextureCacheFlush(*cache, 0);
#else
CVOpenGLESTextureCacheFlush(*cache, 0);
#endif // TARGET_OS_OSX
}
}
// Turning this on disables the pixel buffer pools when using the simulator.
// It is no longer necessary, since the helper code now supports non-contiguous
// buffers. We leave the code in for now for the sake of documentation.
#define FORCE_CONTIGUOUS_PIXEL_BUFFER_ON_IPHONE_SIMULATOR 0
GpuBuffer GpuBufferMultiPool::GetBufferFromSimplePool(
BufferSpec spec, const GpuBufferMultiPool::SimplePool& pool) {
#if TARGET_IPHONE_SIMULATOR && FORCE_CONTIGUOUS_PIXEL_BUFFER_ON_IPHONE_SIMULATOR
// On the simulator, syncing the texture with the pixelbuffer does not work,
// and we have to use glReadPixels. Since GL_UNPACK_ROW_LENGTH is not
// available in OpenGL ES 2, we should create the buffer so the pixels are
// contiguous.
//
// TODO: verify if we can use kIOSurfaceBytesPerRow to force the
// pool to give us contiguous data.
return GetBufferWithoutPool(spec);
#else
return pool->GetBuffer([this]() { FlushTextureCaches(); });
#endif // TARGET_IPHONE_SIMULATOR
}
#else
GpuBufferMultiPool::SimplePool GpuBufferMultiPool::MakeSimplePool(
const BufferSpec& spec) {
return GlTextureBufferPool::Create(spec.width, spec.height, spec.format,
kKeepCount);
}
GpuBuffer GpuBufferMultiPool::GetBufferWithoutPool(const BufferSpec& spec) {
return GpuBuffer(
GlTextureBuffer::Create(spec.width, spec.height, spec.format));
}
GpuBuffer GpuBufferMultiPool::GetBufferFromSimplePool(
BufferSpec spec, const GpuBufferMultiPool::SimplePool& pool) {
return GpuBuffer(pool->GetBuffer());
}
#endif // MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
GpuBufferMultiPool::SimplePool GpuBufferMultiPool::RequestPool(
const BufferSpec& spec) {
SimplePool pool;
std::vector<SimplePool> evicted;
{
absl::MutexLock lock(&mutex_);
pool =
cache_.Lookup(spec, [this](const BufferSpec& spec, int request_count) {
return (request_count >= kMinRequestsBeforePool)
? MakeSimplePool(spec)
: nullptr;
});
evicted = cache_.Evict(kMaxPoolCount, kRequestCountScrubInterval);
}
// Evicted pools, and their buffers, will be released without holding the
// lock.
return pool;
}
GpuBuffer GpuBufferMultiPool::GetBuffer(int width, int height,
GpuBufferFormat format) {
BufferSpec key(width, height, format);
SimplePool pool = RequestPool(key);
if (pool) {
// Note: we release our multipool lock before accessing the simple pool.
return GetBufferFromSimplePool(key, pool);
} else {
return GetBufferWithoutPool(key);
}
}
GpuBufferMultiPool::~GpuBufferMultiPool() {
#ifdef __APPLE__
CHECK_EQ(texture_caches_.size(), 0)
<< "Failed to unregister texture caches before deleting pool";
#endif // defined(__APPLE__)
}
#ifdef __APPLE__
void GpuBufferMultiPool::RegisterTextureCache(CVTextureCacheType cache) {
absl::MutexLock lock(&mutex_);
CHECK(std::find(texture_caches_.begin(), texture_caches_.end(), cache) ==
texture_caches_.end())
<< "Attempting to register a texture cache twice";
texture_caches_.emplace_back(cache);
}
void GpuBufferMultiPool::UnregisterTextureCache(CVTextureCacheType cache) {
absl::MutexLock lock(&mutex_);
auto it = std::find(texture_caches_.begin(), texture_caches_.end(), cache);
CHECK(it != texture_caches_.end())
<< "Attempting to unregister an unknown texture cache";
texture_caches_.erase(it);
}
#endif // defined(__APPLE__)
} // namespace mediapipe
namespace mediapipe {} // namespace mediapipe

View File

@ -22,120 +22,35 @@
#ifndef MEDIAPIPE_GPU_GPU_BUFFER_MULTI_POOL_H_
#define MEDIAPIPE_GPU_GPU_BUFFER_MULTI_POOL_H_
#include "absl/hash/hash.h"
#include "absl/synchronization/mutex.h"
#include "mediapipe/gpu/gpu_buffer.h"
#include "mediapipe/util/resource_cache.h"
#include "mediapipe/gpu/multi_pool.h"
#ifdef __APPLE__
#include "mediapipe/gpu/pixel_buffer_pool_util.h"
#endif // __APPLE__
#if !MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
#if MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
#include "mediapipe/gpu/cv_pixel_buffer_pool_wrapper.h"
#else
#include "mediapipe/gpu/gl_texture_buffer_pool.h"
#endif // !MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
#endif // MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
namespace mediapipe {
struct GpuSharedData;
class CvPixelBufferPoolWrapper;
class GpuBufferMultiPool {
public:
GpuBufferMultiPool() {}
explicit GpuBufferMultiPool(void* ignored) {}
~GpuBufferMultiPool();
// Obtains a buffer. May either be reused or created anew.
GpuBuffer GetBuffer(int width, int height,
GpuBufferFormat format = GpuBufferFormat::kBGRA32);
#ifdef __APPLE__
// TODO: add tests for the texture cache registration.
// Inform the pool of a cache that should be flushed when it is low on
// reusable buffers.
void RegisterTextureCache(CVTextureCacheType cache);
// Remove a texture cache from the list of caches to be flushed.
void UnregisterTextureCache(CVTextureCacheType cache);
void FlushTextureCaches();
#endif // defined(__APPLE__)
// This class is not intended as part of the public api of this class. It is
// public only because it is used as a map key type, and the map
// implementation needs access to, e.g., the equality operator.
struct BufferSpec {
BufferSpec(int w, int h, mediapipe::GpuBufferFormat f)
: width(w), height(h), format(f) {}
template <typename H>
friend H AbslHashValue(H h, const BufferSpec& spec) {
return H::combine(std::move(h), spec.width, spec.height,
static_cast<uint32_t>(spec.format));
}
int width;
int height;
mediapipe::GpuBufferFormat format;
};
private:
class GpuBufferMultiPool : public MultiPool<
#if MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
using SimplePool = std::shared_ptr<CvPixelBufferPoolWrapper>;
CvPixelBufferPoolWrapper,
#else
using SimplePool = std::shared_ptr<GlTextureBufferPool>;
GlTextureBufferPool,
#endif // MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
SimplePool MakeSimplePool(const BufferSpec& spec);
// Requests a simple buffer pool for the given spec. This may return nullptr
// if we have not yet reached a sufficient number of requests to allocate a
// pool, in which case the caller should invoke GetBufferWithoutPool instead
// of GetBufferFromSimplePool.
SimplePool RequestPool(const BufferSpec& spec);
GpuBuffer GetBufferFromSimplePool(BufferSpec spec, const SimplePool& pool);
GpuBuffer GetBufferWithoutPool(const BufferSpec& spec);
absl::Mutex mutex_;
mediapipe::ResourceCache<BufferSpec, SimplePool, absl::Hash<BufferSpec>>
cache_ ABSL_GUARDED_BY(mutex_);
#ifdef __APPLE__
// Texture caches used with this pool.
std::vector<CFHolder<CVTextureCacheType>> texture_caches_
ABSL_GUARDED_BY(mutex_);
#endif // defined(__APPLE__)
};
#if MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
class CvPixelBufferPoolWrapper {
internal::GpuBufferSpec, GpuBuffer> {
public:
CvPixelBufferPoolWrapper(const GpuBufferMultiPool::BufferSpec& spec,
CFTimeInterval maxAge);
GpuBuffer GetBuffer(std::function<void(void)> flush);
using MultiPool::MultiPool;
int GetBufferCount() const { return count_; }
std::string GetDebugString() const;
void Flush();
private:
CFHolder<CVPixelBufferPoolRef> pool_;
int count_ = 0;
GpuBuffer GetBuffer(int width, int height,
GpuBufferFormat format = GpuBufferFormat::kBGRA32) {
return Get(internal::GpuBufferSpec(width, height, format));
}
};
#endif // MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
// BufferSpec equality operators
inline bool operator==(const GpuBufferMultiPool::BufferSpec& lhs,
const GpuBufferMultiPool::BufferSpec& rhs) {
return lhs.width == rhs.width && lhs.height == rhs.height &&
lhs.format == rhs.format;
}
inline bool operator!=(const GpuBufferMultiPool::BufferSpec& lhs,
const GpuBufferMultiPool::BufferSpec& rhs) {
return !operator==(lhs, rhs);
}
} // namespace mediapipe

View File

@ -13,22 +13,57 @@
#include "mediapipe/gpu/gpu_buffer_format.h"
namespace mediapipe {
class GpuBuffer;
namespace internal {
template <class... T>
struct types {};
// This template must be specialized for each view type V. Each specialization
// should define a pair of virtual methods called GetReadView and GetWriteView,
// whose first argument is a types<V> tag object. The result type and optional
// further arguments will depend on the view type.
//
// Example:
// template <>
// class ViewProvider<MyView> {
// public:
// virtual ~ViewProvider() = default;
// virtual MyView GetReadView(types<MyView>) const = 0;
// virtual MyView GetWriteView(types<MyView>) = 0;
// };
//
// The additional arguments and result type are reflected in GpuBuffer's
// GetReadView and GetWriteView methods.
//
// Using a type tag for the first argument allows the methods to be overloaded,
// so that a single storage can implement provider methods for multiple views.
// Since these methods are not template methods, they can (and should) be
// virtual, which allows storage classes to override them, enforcing that all
// storages providing a given view type implement the same interface.
template <class V>
class ViewProvider;
// Interface for a backing storage for GpuBuffer.
// Generic interface for a backing storage for GpuBuffer.
//
// GpuBuffer is an opaque handle to an image. Its contents are handled by
// Storage classes. Application code does not interact with the storages
// directly; to access the data, it asks the GpuBuffer for a View, and in turn
// GpuBuffer looks for a storage that can provide that view.
// This architecture decouples application code from the underlying storage,
// making it possible to use platform-specific optimized storage systems, e.g.
// for zero-copy data sharing between CPU and GPU.
//
// Storage implementations should inherit from GpuBufferStorageImpl. See that
// class for details.
class GpuBufferStorage {
public:
virtual ~GpuBufferStorage() = default;
// Concrete storage types should override the following three accessors.
virtual int width() const = 0;
virtual int height() const = 0;
virtual GpuBufferFormat format() const = 0;
// We can't use dynamic_cast since we want to support building without RTTI.
// The public methods delegate to the type-erased private virtual method.
template <class T>
@ -72,19 +107,33 @@ class GpuBufferStorageRegistry {
return *registry;
}
// Registers a storage type by automatically creating a factory for it.
// This is normally called by GpuBufferImpl.
template <class Storage>
RegistryToken Register() {
return Register(
return RegisterFactory<Storage>(
[](int width, int height,
GpuBufferFormat format) -> std::shared_ptr<Storage> {
return CreateStorage<Storage>(overload_priority<10>{}, width, height,
format);
},
Storage::GetProviderTypes());
});
}
// Registers a new factory for a storage type.
template <class Storage, class F>
RegistryToken RegisterFactory(F&& factory) {
if constexpr (kDisableRegistration<Storage>) {
return {};
}
return Register(factory, Storage::GetProviderTypes());
}
// Registers a new converter from storage type StorageFrom to StorageTo.
template <class StorageFrom, class StorageTo, class F>
RegistryToken RegisterConverter(F&& converter) {
if constexpr (kDisableRegistration<StorageTo>) {
return {};
}
return Register(
[converter](std::shared_ptr<GpuBufferStorage> source)
-> std::shared_ptr<GpuBufferStorage> {
@ -115,6 +164,13 @@ class GpuBufferStorageRegistry {
return std::make_shared<Storage>(args...);
}
// Temporary workaround: a Storage class can define a static constexpr
// kDisableGpuBufferRegistration member to true to prevent registering any
// factory of converter that would produce it.
// TODO: better solution for storage priorities.
template <class Storage, typename = void>
static constexpr bool kDisableRegistration = false;
RegistryToken Register(StorageFactory factory,
std::vector<TypeId> provider_hashes);
RegistryToken Register(StorageConverter converter,
@ -126,6 +182,13 @@ class GpuBufferStorageRegistry {
converter_for_view_provider_and_existing_storage_;
};
// Putting this outside the class body to work around a GCC bug.
// https://gcc.gnu.org/bugzilla/show_bug.cgi?id=71954
template <class Storage>
constexpr bool GpuBufferStorageRegistry::kDisableRegistration<
Storage, std::void_t<decltype(&Storage::kDisableGpuBufferRegistration)>> =
Storage::kDisableGpuBufferRegistration;
// Defining a member of this type causes P to be ODR-used, which forces its
// instantiation if it's a static member of a template.
template <auto* P>
@ -138,21 +201,41 @@ struct ForceStaticInstantiation {
#endif // _MSC_VER
};
// T: storage type
// U...: ViewProvider<SomeView>
// Inherit from this class to define a new storage type. The storage type itself
// should be passed as the first template argument (CRTP), followed by one or
// more specializations of ViewProvider.
//
// Concrete storage types should implement the basic accessors from
// GpuBufferStorage, plus the view read/write getters for each ViewProvider they
// implement. This class handles the rest.
//
// Arguments:
// T: storage type
// U...: ViewProvider<SomeView>
// Example:
// class MyStorage : public GpuBufferStorageImpl<
// MyStorage, ViewProvider<GlTextureView>>
template <class T, class... U>
class GpuBufferStorageImpl : public GpuBufferStorage, public U... {
public:
static const std::vector<TypeId>& GetProviderTypes() {
static std::vector<TypeId> kHashes{kTypeId<U>...};
return kHashes;
static std::vector<TypeId> kProviderIds{kTypeId<U>...};
return kProviderIds;
}
// Exposing this as a function allows dependent initializers to call this to
// ensure proper ordering.
static GpuBufferStorageRegistry::RegistryToken RegisterOnce() {
static auto registration = GpuBufferStorageRegistry::Get().Register<T>();
return registration;
}
private:
virtual const void* down_cast(TypeId to) const override {
// Allows a down_cast to any of the view provider types in U.
const void* down_cast(TypeId to) const final {
return down_cast_impl(to, types<T, U...>{});
}
TypeId storage_type() const override { return kTypeId<T>; }
TypeId storage_type() const final { return kTypeId<T>; }
const void* down_cast_impl(TypeId to, types<>) const { return nullptr; }
template <class V, class... W>
@ -161,8 +244,7 @@ class GpuBufferStorageImpl : public GpuBufferStorage, public U... {
return down_cast_impl(to, types<W...>{});
}
inline static auto registration =
GpuBufferStorageRegistry::Get().Register<T>();
inline static auto registration = RegisterOnce();
using RequireStatics = ForceStaticInstantiation<&registration>;
};

View File

@ -26,8 +26,7 @@ GpuBufferStorageCvPixelBuffer::GpuBufferStorageCvPixelBuffer(
}
GlTextureView GpuBufferStorageCvPixelBuffer::GetTexture(
std::shared_ptr<GpuBuffer> gpu_buffer, int plane,
GlTextureView::DoneWritingFn done_writing) const {
int plane, GlTextureView::DoneWritingFn done_writing) const {
CVReturn err;
auto gl_context = GlContext::GetCurrent();
CHECK(gl_context);
@ -60,39 +59,20 @@ GlTextureView GpuBufferStorageCvPixelBuffer::GetTexture(
cv_texture.adopt(cv_texture_temp);
return GlTextureView(
gl_context.get(), CVOpenGLESTextureGetTarget(*cv_texture),
CVOpenGLESTextureGetName(*cv_texture), width(), height(),
std::move(gpu_buffer), plane,
CVOpenGLESTextureGetName(*cv_texture), width(), height(), plane,
[cv_texture](mediapipe::GlTextureView&) { /* only retains cv_texture */ },
done_writing);
#endif // TARGET_OS_OSX
}
GlTextureView GpuBufferStorageCvPixelBuffer::GetReadView(
internal::types<GlTextureView>, std::shared_ptr<GpuBuffer> gpu_buffer,
int plane) const {
return GetTexture(std::move(gpu_buffer), plane, nullptr);
internal::types<GlTextureView>, int plane) const {
return GetTexture(plane, nullptr);
}
GlTextureView GpuBufferStorageCvPixelBuffer::GetWriteView(
internal::types<GlTextureView>, std::shared_ptr<GpuBuffer> gpu_buffer,
int plane) {
return GetTexture(
std::move(gpu_buffer), plane,
[this](const mediapipe::GlTextureView& view) { ViewDoneWriting(view); });
}
std::shared_ptr<const ImageFrame> GpuBufferStorageCvPixelBuffer::GetReadView(
internal::types<ImageFrame>, std::shared_ptr<GpuBuffer> gpu_buffer) const {
return CreateImageFrameForCVPixelBuffer(**this);
}
std::shared_ptr<ImageFrame> GpuBufferStorageCvPixelBuffer::GetWriteView(
internal::types<ImageFrame>, std::shared_ptr<GpuBuffer> gpu_buffer) {
return CreateImageFrameForCVPixelBuffer(**this);
}
void GpuBufferStorageCvPixelBuffer::ViewDoneWriting(const GlTextureView& view) {
#if TARGET_IPHONE_SIMULATOR
CVPixelBufferRef pixel_buffer = **this;
static void ViewDoneWritingSimulatorWorkaround(CVPixelBufferRef pixel_buffer,
const GlTextureView& view) {
CHECK(pixel_buffer);
CVReturn err = CVPixelBufferLockBaseAddress(pixel_buffer, 0);
CHECK(err == kCVReturnSuccess)
@ -130,7 +110,30 @@ void GpuBufferStorageCvPixelBuffer::ViewDoneWriting(const GlTextureView& view) {
err = CVPixelBufferUnlockBaseAddress(pixel_buffer, 0);
CHECK(err == kCVReturnSuccess)
<< "CVPixelBufferUnlockBaseAddress failed: " << err;
#endif
}
#endif // TARGET_IPHONE_SIMULATOR
GlTextureView GpuBufferStorageCvPixelBuffer::GetWriteView(
internal::types<GlTextureView>, int plane) {
return GetTexture(plane,
#if TARGET_IPHONE_SIMULATOR
[pixel_buffer = CFHolder<CVPixelBufferRef>(*this)](
const mediapipe::GlTextureView& view) {
ViewDoneWritingSimulatorWorkaround(*pixel_buffer, view);
}
#else
nullptr
#endif // TARGET_IPHONE_SIMULATOR
);
}
std::shared_ptr<const ImageFrame> GpuBufferStorageCvPixelBuffer::GetReadView(
internal::types<ImageFrame>) const {
return CreateImageFrameForCVPixelBuffer(**this);
}
std::shared_ptr<ImageFrame> GpuBufferStorageCvPixelBuffer::GetWriteView(
internal::types<ImageFrame>) {
return CreateImageFrameForCVPixelBuffer(**this);
}
static std::shared_ptr<GpuBufferStorageCvPixelBuffer> ConvertFromImageFrame(

View File

@ -12,10 +12,25 @@ namespace mediapipe {
class GlContext;
namespace internal {
template <>
class ViewProvider<CVPixelBufferRef> {
public:
virtual ~ViewProvider() = default;
virtual CFHolder<CVPixelBufferRef> GetReadView(
internal::types<CVPixelBufferRef>) const = 0;
virtual CFHolder<CVPixelBufferRef> GetWriteView(
internal::types<CVPixelBufferRef>) = 0;
};
} // namespace internal
class GpuBufferStorageCvPixelBuffer
: public internal::GpuBufferStorageImpl<
GpuBufferStorageCvPixelBuffer, internal::ViewProvider<GlTextureView>,
internal::ViewProvider<ImageFrame>>,
internal::ViewProvider<ImageFrame>,
internal::ViewProvider<CVPixelBufferRef>>,
public CFHolder<CVPixelBufferRef> {
public:
using CFHolder<CVPixelBufferRef>::CFHolder;
@ -33,24 +48,32 @@ class GpuBufferStorageCvPixelBuffer
CVPixelBufferGetPixelFormatType(**this));
}
GlTextureView GetReadView(internal::types<GlTextureView>,
std::shared_ptr<GpuBuffer> gpu_buffer,
int plane) const override;
GlTextureView GetWriteView(internal::types<GlTextureView>,
std::shared_ptr<GpuBuffer> gpu_buffer,
int plane) override;
std::shared_ptr<const ImageFrame> GetReadView(
internal::types<ImageFrame>,
std::shared_ptr<GpuBuffer> gpu_buffer) const override;
internal::types<ImageFrame>) const override;
std::shared_ptr<ImageFrame> GetWriteView(
internal::types<ImageFrame>,
std::shared_ptr<GpuBuffer> gpu_buffer) override;
internal::types<ImageFrame>) override;
CFHolder<CVPixelBufferRef> GetReadView(
internal::types<CVPixelBufferRef>) const override;
CFHolder<CVPixelBufferRef> GetWriteView(
internal::types<CVPixelBufferRef>) override;
private:
GlTextureView GetTexture(std::shared_ptr<GpuBuffer> gpu_buffer, int plane,
GlTextureView GetTexture(int plane,
GlTextureView::DoneWritingFn done_writing) const;
void ViewDoneWriting(const GlTextureView& view);
};
inline CFHolder<CVPixelBufferRef> GpuBufferStorageCvPixelBuffer::GetReadView(
internal::types<CVPixelBufferRef>) const {
return *this;
}
inline CFHolder<CVPixelBufferRef> GpuBufferStorageCvPixelBuffer::GetWriteView(
internal::types<CVPixelBufferRef>) {
return *this;
}
namespace internal {
// These functions enable backward-compatible construction of a GpuBuffer from
// CVPixelBufferRef without having to expose that type in the main GpuBuffer

View File

@ -29,13 +29,11 @@ class GpuBufferStorageImageFrame
std::shared_ptr<const ImageFrame> image_frame() const { return image_frame_; }
std::shared_ptr<ImageFrame> image_frame() { return image_frame_; }
std::shared_ptr<const ImageFrame> GetReadView(
internal::types<ImageFrame>,
std::shared_ptr<GpuBuffer> gpu_buffer) const override {
internal::types<ImageFrame>) const override {
return image_frame_;
}
std::shared_ptr<ImageFrame> GetWriteView(
internal::types<ImageFrame>,
std::shared_ptr<GpuBuffer> gpu_buffer) override {
internal::types<ImageFrame>) override {
return image_frame_;
}

View File

@ -14,10 +14,13 @@
#include "mediapipe/gpu/gpu_buffer.h"
#include <utility>
#include "mediapipe/framework/formats/image_format.pb.h"
#include "mediapipe/framework/port/gmock.h"
#include "mediapipe/framework/port/gtest.h"
#include "mediapipe/framework/tool/test_util.h"
#include "mediapipe/gpu/gl_texture_util.h"
#include "mediapipe/gpu/gpu_buffer_storage_ahwb.h"
#include "mediapipe/gpu/gpu_buffer_storage_image_frame.h"
#include "mediapipe/gpu/gpu_test_base.h"
@ -41,47 +44,6 @@ void FillImageFrameRGBA(ImageFrame& image, uint8 r, uint8 g, uint8 b, uint8 a) {
}
}
// Assumes a framebuffer is already set up
void CopyGlTexture(const GlTextureView& src, GlTextureView& dst) {
glViewport(0, 0, src.width(), src.height());
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, src.target(),
src.name(), 0);
glActiveTexture(GL_TEXTURE0);
glBindTexture(dst.target(), dst.name());
glCopyTexSubImage2D(dst.target(), 0, 0, 0, 0, 0, dst.width(), dst.height());
glBindTexture(dst.target(), 0);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, src.target(), 0,
0);
}
void FillGlTextureRgba(GlTextureView& view, float r, float g, float b,
float a) {
glViewport(0, 0, view.width(), view.height());
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, view.target(),
view.name(), 0);
glClearColor(r, g, b, a);
glClear(GL_COLOR_BUFFER_BIT);
glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, view.target(), 0,
0);
}
class TempGlFramebuffer {
public:
TempGlFramebuffer() {
glGenFramebuffers(1, &framebuffer_);
glBindFramebuffer(GL_FRAMEBUFFER, framebuffer_);
}
~TempGlFramebuffer() {
glBindFramebuffer(GL_FRAMEBUFFER, 0);
glDeleteFramebuffers(1, &framebuffer_);
}
private:
GLuint framebuffer_;
};
class GpuBufferTest : public GpuTestBase {};
TEST_F(GpuBufferTest, BasicTest) {
@ -127,7 +89,7 @@ TEST_F(GpuBufferTest, GlTextureView) {
ImageFrame red(ImageFormat::SRGBA, 300, 200);
FillImageFrameRGBA(red, 255, 0, 0, 255);
EXPECT_TRUE(mediapipe::CompareImageFrames(*view, red, 0.0, 0.0));
EXPECT_TRUE(CompareImageFrames(*view, red, 0.0, 0.0));
MP_EXPECT_OK(SavePngTestOutput(red, "gltv_red_gold"));
MP_EXPECT_OK(SavePngTestOutput(*view, "gltv_red_view"));
}
@ -162,7 +124,7 @@ TEST_F(GpuBufferTest, ImageFrame) {
ImageFrame red(ImageFormat::SRGBA, 300, 200);
FillImageFrameRGBA(red, 255, 0, 0, 255);
EXPECT_TRUE(mediapipe::CompareImageFrames(*view, red, 0.0, 0.0));
EXPECT_TRUE(CompareImageFrames(*view, red, 0.0, 0.0));
MP_EXPECT_OK(SavePngTestOutput(red, "if_red_gold"));
MP_EXPECT_OK(SavePngTestOutput(*view, "if_red_view"));
}
@ -196,7 +158,7 @@ TEST_F(GpuBufferTest, Overwrite) {
ImageFrame red(ImageFormat::SRGBA, 300, 200);
FillImageFrameRGBA(red, 255, 0, 0, 255);
EXPECT_TRUE(mediapipe::CompareImageFrames(*view, red, 0.0, 0.0));
EXPECT_TRUE(CompareImageFrames(*view, red, 0.0, 0.0));
MP_EXPECT_OK(SavePngTestOutput(red, "ow_red_gold"));
MP_EXPECT_OK(SavePngTestOutput(*view, "ow_red_view"));
}
@ -230,7 +192,7 @@ TEST_F(GpuBufferTest, Overwrite) {
ImageFrame green(ImageFormat::SRGBA, 300, 200);
FillImageFrameRGBA(green, 0, 255, 0, 255);
EXPECT_TRUE(mediapipe::CompareImageFrames(*view, green, 0.0, 0.0));
EXPECT_TRUE(CompareImageFrames(*view, green, 0.0, 0.0));
MP_EXPECT_OK(SavePngTestOutput(green, "ow_green_gold"));
MP_EXPECT_OK(SavePngTestOutput(*view, "ow_green_view"));
}
@ -240,11 +202,31 @@ TEST_F(GpuBufferTest, Overwrite) {
ImageFrame blue(ImageFormat::SRGBA, 300, 200);
FillImageFrameRGBA(blue, 0, 0, 255, 255);
EXPECT_TRUE(mediapipe::CompareImageFrames(*view, blue, 0.0, 0.0));
EXPECT_TRUE(CompareImageFrames(*view, blue, 0.0, 0.0));
MP_EXPECT_OK(SavePngTestOutput(blue, "ow_blue_gold"));
MP_EXPECT_OK(SavePngTestOutput(*view, "ow_blue_view"));
}
}
TEST_F(GpuBufferTest, GlTextureViewRetainsWhatItNeeds) {
GpuBuffer buffer(300, 200, GpuBufferFormat::kBGRA32);
{
std::shared_ptr<ImageFrame> view = buffer.GetWriteView<ImageFrame>();
EXPECT_EQ(view->Width(), 300);
EXPECT_EQ(view->Height(), 200);
FillImageFrameRGBA(*view, 255, 0, 0, 255);
}
RunInGlContext([buffer = std::move(buffer)]() mutable {
// This is not a recommended pattern, but let's make sure that we don't
// crash if the buffer is released before the view. The view can hold
// callbacks into its underlying storage.
auto view = buffer.GetReadView<GlTextureView>(0);
buffer = nullptr;
});
// We're really checking that we haven't crashed.
EXPECT_TRUE(true);
}
} // anonymous namespace
} // namespace mediapipe

View File

@ -21,7 +21,7 @@
#include "mediapipe/gpu/graph_support.h"
#if __APPLE__
#import "mediapipe/gpu/MPPGraphGPUData.h"
#include "mediapipe/gpu/metal_shared_resources.h"
#endif // __APPLE__
namespace mediapipe {
@ -80,28 +80,40 @@ GpuResources::StatusOrGpuResources GpuResources::Create(
return gpu_resources;
}
GpuResources::GpuResources(std::shared_ptr<GlContext> gl_context) {
GpuResources::GpuResources(std::shared_ptr<GlContext> gl_context)
#if MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
: texture_caches_(std::make_shared<CvTextureCacheManager>()),
gpu_buffer_pool_(
[tc = texture_caches_](const internal::GpuBufferSpec& spec,
const MultiPoolOptions& options) {
return CvPixelBufferPoolWrapper::Create(spec, options, tc.get());
})
#endif // MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
{
gl_key_context_[SharedContextKey()] = gl_context;
named_executors_[kGpuExecutorName] =
std::make_shared<GlContextExecutor>(gl_context.get());
#if __APPLE__
gpu_buffer_pool().RegisterTextureCache(gl_context->cv_texture_cache());
ios_gpu_data_ = [[MPPGraphGPUData alloc] initWithContext:gl_context.get()
multiPool:&gpu_buffer_pool_];
#if MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
texture_caches_->RegisterTextureCache(gl_context->cv_texture_cache());
#endif // MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
metal_shared_ = std::make_unique<MetalSharedResources>();
#endif // __APPLE__
}
GpuResources::~GpuResources() {
#if __APPLE__
// Note: on Apple platforms, this object contains Objective-C objects. The
// destructor will release them, but ARC must be on.
// Note: on Apple platforms, this object contains Objective-C objects.
// The destructor will release them, but ARC must be on.
#if !__has_feature(objc_arc)
#error This file must be built with ARC.
#endif
#if MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
for (auto& kv : gl_key_context_) {
gpu_buffer_pool().UnregisterTextureCache(kv.second->cv_texture_cache());
texture_caches_->UnregisterTextureCache(kv.second->cv_texture_cache());
}
#endif
#endif // MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
#endif // __APPLE__
}
absl::Status GpuResources::PrepareGpuNode(CalculatorNode* node) {
@ -174,17 +186,43 @@ GlContext::StatusOrGlContext GpuResources::GetOrCreateGlContext(
GlContext::Create(*gl_key_context_[SharedContextKey()],
kGlContextUseDedicatedThread));
it = gl_key_context_.emplace(key, new_context).first;
#if __APPLE__
gpu_buffer_pool_.RegisterTextureCache(it->second->cv_texture_cache());
#endif
#if MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
texture_caches_->RegisterTextureCache(it->second->cv_texture_cache());
#endif // MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
}
return it->second;
}
GpuSharedData::GpuSharedData() : GpuSharedData(kPlatformGlContextNone) {}
#if __APPLE__
MPPGraphGPUData* GpuResources::ios_gpu_data() { return ios_gpu_data_; }
#endif // __APPLE__
extern const GraphService<GpuResources> kGpuService;
#if !MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
static std::shared_ptr<GlTextureBuffer> GetGlTextureBufferFromPool(
int width, int height, GpuBufferFormat format) {
std::shared_ptr<GlTextureBuffer> texture_buffer;
const auto cc = LegacyCalculatorSupport::Scoped<CalculatorContext>::current();
if (cc && cc->Service(kGpuService).IsAvailable()) {
GpuBufferMultiPool* pool =
&cc->Service(kGpuService).GetObject().gpu_buffer_pool();
// Note that the "gpu_buffer_pool" serves GlTextureBuffers on non-Apple
// platforms. TODO: refactor into storage pools.
texture_buffer = pool->GetBuffer(width, height, format)
.internal_storage<GlTextureBuffer>();
} else {
texture_buffer = GlTextureBuffer::Create(width, height, format);
}
return texture_buffer;
}
static auto kGlTextureBufferPoolRegistration = [] {
// Ensure that the GlTextureBuffer's own factory is already registered, so we
// can override it.
GlTextureBuffer::RegisterOnce();
return internal::GpuBufferStorageRegistry::Get()
.RegisterFactory<GlTextureBuffer>(GetGlTextureBufferFromPool);
}();
#endif // !MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
} // namespace mediapipe

View File

@ -30,15 +30,15 @@
#include "mediapipe/gpu/gpu_buffer_multi_pool.h"
#ifdef __APPLE__
#ifdef __OBJC__
@class MPPGraphGPUData;
#else
struct MPPGraphGPUData;
#endif // __OBJC__
#include "mediapipe/gpu/cv_texture_cache_manager.h"
#endif // defined(__APPLE__)
namespace mediapipe {
#ifdef __APPLE__
class MetalSharedResources;
#endif // defined(__APPLE__)
// TODO: rename to GpuService or GpuManager or something.
class GpuResources {
public:
@ -55,9 +55,7 @@ class GpuResources {
// Shared GL context for calculators.
// TODO: require passing a context or node identifier.
const std::shared_ptr<GlContext>& gl_context() {
return gl_context(nullptr);
};
const std::shared_ptr<GlContext>& gl_context() { return gl_context(nullptr); }
const std::shared_ptr<GlContext>& gl_context(CalculatorContext* cc);
@ -65,7 +63,7 @@ class GpuResources {
GpuBufferMultiPool& gpu_buffer_pool() { return gpu_buffer_pool_; }
#ifdef __APPLE__
MPPGraphGPUData* ios_gpu_data();
MetalSharedResources& metal_shared() { return *metal_shared_; }
#endif // defined(__APPLE__)§
absl::Status PrepareGpuNode(CalculatorNode* node);
@ -86,13 +84,16 @@ class GpuResources {
std::map<std::string, std::string> node_key_;
std::map<std::string, std::shared_ptr<GlContext>> gl_key_context_;
#ifdef MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
std::shared_ptr<CvTextureCacheManager> texture_caches_;
#endif // MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
// The pool must be destructed before the gl_context, but after the
// ios_gpu_data, so the declaration order is important.
GpuBufferMultiPool gpu_buffer_pool_;
#ifdef __APPLE__
// Note that this is an Objective-C object.
MPPGraphGPUData* ios_gpu_data_;
std::unique_ptr<MetalSharedResources> metal_shared_;
#endif // defined(__APPLE__)
std::map<std::string, std::shared_ptr<Executor>> named_executors_;

View File

@ -24,13 +24,14 @@ namespace mediapipe {
class GpuTestBase : public ::testing::Test {
protected:
GpuTestBase() { helper_.InitializeForTest(&gpu_shared_); }
GpuTestBase() { helper_.InitializeForTest(gpu_resources_.get()); }
void RunInGlContext(std::function<void(void)> gl_func) {
helper_.RunInGlContext(std::move(gl_func));
}
GpuSharedData gpu_shared_;
std::shared_ptr<GpuResources> gpu_resources_ = gpu_shared_.gpu_resources;
GlCalculatorHelper helper_;
};

View File

@ -12,73 +12,63 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#include "mediapipe/framework/api2/node.h"
#include "mediapipe/framework/calculator_framework.h"
#include "mediapipe/framework/formats/image_frame.h"
#include "mediapipe/framework/port/status.h"
#include "mediapipe/gpu/gl_calculator_helper.h"
#ifdef __APPLE__
#include "mediapipe/objc/util.h"
#endif
namespace mediapipe {
namespace api2 {
// Convert ImageFrame to GpuBuffer.
class ImageFrameToGpuBufferCalculator : public CalculatorBase {
class ImageFrameToGpuBufferCalculator
: public RegisteredNode<ImageFrameToGpuBufferCalculator> {
public:
ImageFrameToGpuBufferCalculator() {}
static constexpr Input<ImageFrame> kIn{""};
static constexpr Output<GpuBuffer> kOut{""};
static absl::Status GetContract(CalculatorContract* cc);
MEDIAPIPE_NODE_INTERFACE(ImageFrameToGpuBufferCalculator, kIn, kOut);
static absl::Status UpdateContract(CalculatorContract* cc);
absl::Status Open(CalculatorContext* cc) override;
absl::Status Process(CalculatorContext* cc) override;
private:
#if !MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
GlCalculatorHelper helper_;
#endif // !MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
};
REGISTER_CALCULATOR(ImageFrameToGpuBufferCalculator);
// static
absl::Status ImageFrameToGpuBufferCalculator::GetContract(
absl::Status ImageFrameToGpuBufferCalculator::UpdateContract(
CalculatorContract* cc) {
cc->Inputs().Index(0).Set<ImageFrame>();
cc->Outputs().Index(0).Set<GpuBuffer>();
// Note: we call this method even on platforms where we don't use the helper,
// to ensure the calculator's contract is the same. In particular, the helper
// enables support for the legacy side packet, which several graphs still use.
MP_RETURN_IF_ERROR(GlCalculatorHelper::UpdateContract(cc));
return absl::OkStatus();
return GlCalculatorHelper::UpdateContract(cc);
}
absl::Status ImageFrameToGpuBufferCalculator::Open(CalculatorContext* cc) {
// Inform the framework that we always output at the same timestamp
// as we receive a packet at.
cc->SetOffset(TimestampDiff(0));
#if !MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
MP_RETURN_IF_ERROR(helper_.Open(cc));
#endif // !MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
return absl::OkStatus();
}
absl::Status ImageFrameToGpuBufferCalculator::Process(CalculatorContext* cc) {
#if MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
CFHolder<CVPixelBufferRef> buffer;
MP_RETURN_IF_ERROR(CreateCVPixelBufferForImageFramePacket(
cc->Inputs().Index(0).Value(), &buffer));
cc->Outputs().Index(0).Add(new GpuBuffer(buffer), cc->InputTimestamp());
#else
const auto& input = cc->Inputs().Index(0).Get<ImageFrame>();
helper_.RunInGlContext([this, &input, &cc]() {
auto src = helper_.CreateSourceTexture(input);
auto output = src.GetFrame<GpuBuffer>();
glFlush();
cc->Outputs().Index(0).Add(output.release(), cc->InputTimestamp());
src.Release();
});
#endif // MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
auto image_frame = std::const_pointer_cast<ImageFrame>(
mediapipe::SharedPtrWithPacket<ImageFrame>(kIn(cc).packet()));
auto gpu_buffer = api2::MakePacket<GpuBuffer>(
std::make_shared<mediapipe::GpuBufferStorageImageFrame>(
std::move(image_frame)))
.At(cc->InputTimestamp());
// This calculator's behavior has been to do the texture upload eagerly, and
// some graphs may rely on running this on a separate GL context to avoid
// blocking another context with the read operation. So let's request GPU
// access here to ensure that the behavior stays the same.
// TODO: have a better way to do this, or defer until later.
helper_.RunInGlContext(
[&gpu_buffer] { auto view = gpu_buffer->GetReadView<GlTextureView>(0); });
kOut(cc).Send(std::move(gpu_buffer));
return absl::OkStatus();
}
} // namespace api2
} // namespace mediapipe

View File

@ -12,9 +12,8 @@ class ViewProvider<ImageFrame> {
public:
virtual ~ViewProvider() = default;
virtual std::shared_ptr<const ImageFrame> GetReadView(
types<ImageFrame>, std::shared_ptr<GpuBuffer> gpu_buffer) const = 0;
virtual std::shared_ptr<ImageFrame> GetWriteView(
types<ImageFrame>, std::shared_ptr<GpuBuffer> gpu_buffer) = 0;
types<ImageFrame>) const = 0;
virtual std::shared_ptr<ImageFrame> GetWriteView(types<ImageFrame>) = 0;
};
} // namespace internal

119
mediapipe/gpu/multi_pool.h Normal file
View File

@ -0,0 +1,119 @@
// Copyright 2019 The MediaPipe Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#ifndef MEDIAPIPE_GPU_MULTI_POOL_H_
#define MEDIAPIPE_GPU_MULTI_POOL_H_
#include "mediapipe/util/resource_cache.h"
namespace mediapipe {
struct MultiPoolOptions {
// Keep this many buffers allocated for a given frame size.
int keep_count = 2;
// The maximum size of the GpuBufferMultiPool. When the limit is reached, the
// oldest BufferSpec will be dropped.
int max_pool_count = 10;
// Time in seconds after which an inactive buffer can be dropped from the
// pool. Currently only used with CVPixelBufferPool.
float max_inactive_buffer_age = 0.25;
// Skip allocating a buffer pool until at least this many requests have been
// made for a given BufferSpec.
int min_requests_before_pool = 2;
// Do a deeper flush every this many requests.
int request_count_scrub_interval = 50;
};
static constexpr MultiPoolOptions kDefaultMultiPoolOptions;
// MultiPool is a generic class for vending reusable resources of type Item,
// which are assumed to be relatively expensive to create, so that reusing them
// is beneficial.
// Items are classified by Spec; when an item with a given Spec is requested,
// an old Item with the same Spec can be reused, if available; otherwise a new
// Item will be created. When user code is done with an Item, it is returned
// to the pool for reuse.
// In order to manage this, a MultiPool contains a map of Specs to SimplePool;
// each SimplePool manages Items with the same Spec, which are thus considered
// interchangeable.
// Item retention and eviction policies are controlled by options.
// A concrete example would be a pool of GlTextureBuffer, grouped by dimensions
// and format.
template <class SimplePool, class Spec, class Item>
class MultiPool {
public:
using SimplePoolFactory = std::function<std::shared_ptr<SimplePool>(
const Spec& spec, const MultiPoolOptions& options)>;
MultiPool(SimplePoolFactory factory = DefaultMakeSimplePool,
MultiPoolOptions options = kDefaultMultiPoolOptions)
: create_simple_pool_(factory), options_(options) {}
explicit MultiPool(MultiPoolOptions options)
: MultiPool(DefaultMakeSimplePool, options) {}
// Obtains an item. May either be reused or created anew.
Item Get(const Spec& spec);
private:
static std::shared_ptr<SimplePool> DefaultMakeSimplePool(
const Spec& spec, const MultiPoolOptions& options) {
return SimplePool::Create(spec, options);
}
// Requests a simple buffer pool for the given spec. This may return nullptr
// if we have not yet reached a sufficient number of requests to allocate a
// pool, in which case the caller should invoke CreateBufferWithoutPool.
std::shared_ptr<SimplePool> RequestPool(const Spec& spec);
absl::Mutex mutex_;
mediapipe::ResourceCache<Spec, std::shared_ptr<SimplePool>> cache_
ABSL_GUARDED_BY(mutex_);
SimplePoolFactory create_simple_pool_ = DefaultMakeSimplePool;
MultiPoolOptions options_;
};
template <class SimplePool, class Spec, class Item>
std::shared_ptr<SimplePool> MultiPool<SimplePool, Spec, Item>::RequestPool(
const Spec& spec) {
std::shared_ptr<SimplePool> pool;
std::vector<std::shared_ptr<SimplePool>> evicted;
{
absl::MutexLock lock(&mutex_);
pool = cache_.Lookup(spec, [this](const Spec& spec, int request_count) {
return (request_count >= options_.min_requests_before_pool)
? create_simple_pool_(spec, options_)
: nullptr;
});
evicted = cache_.Evict(options_.max_pool_count,
options_.request_count_scrub_interval);
}
// Evicted pools, and their buffers, will be released without holding the
// lock.
return pool;
}
template <class SimplePool, class Spec, class Item>
Item MultiPool<SimplePool, Spec, Item>::Get(const Spec& spec) {
std::shared_ptr<SimplePool> pool = RequestPool(spec);
if (pool) {
// Note: we release our multipool lock before accessing the simple pool.
return Item(pool->GetBuffer());
} else {
return Item(SimplePool::CreateBufferWithoutPool(spec));
}
}
} // namespace mediapipe
#endif // MEDIAPIPE_GPU_MULTI_POOL_H_

View File

@ -0,0 +1,145 @@
// Copyright 2019 The MediaPipe Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Consider this file an implementation detail. None of this is part of the
// public API.
#ifndef MEDIAPIPE_GPU_REUSABLE_POOL_H_
#define MEDIAPIPE_GPU_REUSABLE_POOL_H_
#include <utility>
#include <vector>
#include "absl/functional/any_invocable.h"
#include "absl/synchronization/mutex.h"
#include "mediapipe/gpu/multi_pool.h"
namespace mediapipe {
template <class Item>
class ReusablePool : public std::enable_shared_from_this<ReusablePool<Item>> {
public:
using ItemFactory = absl::AnyInvocable<std::unique_ptr<Item>() const>;
// Creates a pool. This pool will manage buffers of the specified dimensions,
// and will keep keep_count buffers around for reuse.
// We enforce creation as a shared_ptr so that we can use a weak reference in
// the buffers' deleters.
static std::shared_ptr<ReusablePool<Item>> Create(
ItemFactory item_factory, const MultiPoolOptions& options) {
return std::shared_ptr<ReusablePool<Item>>(
new ReusablePool<Item>(std::move(item_factory), options));
}
// Obtains a buffer. May either be reused or created anew.
// A GlContext must be current when this is called.
std::shared_ptr<Item> GetBuffer();
// This method is meant for testing.
std::pair<int, int> GetInUseAndAvailableCounts();
protected:
ReusablePool(ItemFactory item_factory, const MultiPoolOptions& options)
: item_factory_(std::move(item_factory)),
keep_count_(options.keep_count) {}
private:
// Return a buffer to the pool.
void Return(std::unique_ptr<Item> buf);
// If the total number of buffers is greater than keep_count, destroys any
// surplus buffers that are no longer in use.
void TrimAvailable(std::vector<std::unique_ptr<Item>>* trimmed)
ABSL_EXCLUSIVE_LOCKS_REQUIRED(mutex_);
const ItemFactory item_factory_;
const int keep_count_;
absl::Mutex mutex_;
int in_use_count_ ABSL_GUARDED_BY(mutex_) = 0;
std::vector<std::unique_ptr<Item>> available_ ABSL_GUARDED_BY(mutex_);
};
template <class Item>
inline std::shared_ptr<Item> ReusablePool<Item>::GetBuffer() {
std::unique_ptr<Item> buffer;
bool reuse = false;
{
absl::MutexLock lock(&mutex_);
if (available_.empty()) {
buffer = item_factory_();
if (!buffer) return nullptr;
} else {
buffer = std::move(available_.back());
available_.pop_back();
reuse = true;
}
++in_use_count_;
}
// This needs to wait on consumer sync points, therefore it should not be
// done while holding the mutex.
if (reuse) {
buffer->Reuse();
}
// Return a shared_ptr with a custom deleter that adds the buffer back
// to our available list.
std::weak_ptr<ReusablePool<Item>> weak_pool(this->shared_from_this());
return std::shared_ptr<Item>(buffer.release(), [weak_pool](Item* buf) {
auto pool = weak_pool.lock();
if (pool) {
pool->Return(absl::WrapUnique(buf));
} else {
delete buf;
}
});
}
template <class Item>
inline std::pair<int, int> ReusablePool<Item>::GetInUseAndAvailableCounts() {
absl::MutexLock lock(&mutex_);
return {in_use_count_, available_.size()};
}
template <class Item>
void ReusablePool<Item>::Return(std::unique_ptr<Item> buf) {
std::vector<std::unique_ptr<Item>> trimmed;
{
absl::MutexLock lock(&mutex_);
--in_use_count_;
available_.emplace_back(std::move(buf));
TrimAvailable(&trimmed);
}
// The trimmed buffers will be released without holding the lock.
}
template <class Item>
void ReusablePool<Item>::TrimAvailable(
std::vector<std::unique_ptr<Item>>* trimmed) {
int keep = std::max(keep_count_ - in_use_count_, 0);
if (available_.size() > keep) {
auto trim_it = std::next(available_.begin(), keep);
if (trimmed) {
std::move(trim_it, available_.end(), std::back_inserter(*trimmed));
}
available_.erase(trim_it, available_.end());
}
}
} // namespace mediapipe
#endif // MEDIAPIPE_GPU_REUSABLE_POOL_H_

View File

@ -55,7 +55,11 @@ public class PacketCreator {
public Packet createRgbImage(ByteBuffer buffer, int width, int height) {
int widthStep = (((width * 3) + 3) / 4) * 4;
if (widthStep * height != buffer.capacity()) {
throw new RuntimeException("The size of the buffer should be: " + widthStep * height);
throw new IllegalArgumentException(
"The size of the buffer should be: "
+ widthStep * height
+ " but is "
+ buffer.capacity());
}
return Packet.create(
nativeCreateRgbImage(mediapipeGraph.getNativeHandle(), buffer, width, height));
@ -123,7 +127,11 @@ public class PacketCreator {
*/
public Packet createRgbImageFromRgba(ByteBuffer buffer, int width, int height) {
if (width * height * 4 != buffer.capacity()) {
throw new RuntimeException("The size of the buffer should be: " + width * height * 4);
throw new IllegalArgumentException(
"The size of the buffer should be: "
+ width * height * 4
+ " but is "
+ buffer.capacity());
}
return Packet.create(
nativeCreateRgbImageFromRgba(mediapipeGraph.getNativeHandle(), buffer, width, height));
@ -136,7 +144,7 @@ public class PacketCreator {
*/
public Packet createGrayscaleImage(ByteBuffer buffer, int width, int height) {
if (width * height != buffer.capacity()) {
throw new RuntimeException(
throw new IllegalArgumentException(
"The size of the buffer should be: " + width * height + " but is " + buffer.capacity());
}
return Packet.create(
@ -150,7 +158,11 @@ public class PacketCreator {
*/
public Packet createRgbaImageFrame(ByteBuffer buffer, int width, int height) {
if (buffer.capacity() != width * height * 4) {
throw new RuntimeException("buffer doesn't have the correct size.");
throw new IllegalArgumentException(
"The size of the buffer should be: "
+ width * height * 4
+ " but is "
+ buffer.capacity());
}
return Packet.create(
nativeCreateRgbaImageFrame(mediapipeGraph.getNativeHandle(), buffer, width, height));
@ -163,7 +175,11 @@ public class PacketCreator {
*/
public Packet createFloatImageFrame(FloatBuffer buffer, int width, int height) {
if (buffer.capacity() != width * height * 4) {
throw new RuntimeException("buffer doesn't have the correct size.");
throw new IllegalArgumentException(
"The size of the buffer should be: "
+ width * height * 4
+ " but is "
+ buffer.capacity());
}
return Packet.create(
nativeCreateFloatImageFrame(mediapipeGraph.getNativeHandle(), buffer, width, height));
@ -354,25 +370,24 @@ public class PacketCreator {
* <p>For 3 and 4 channel images, the pixel rows should have 4-byte alignment.
*/
public Packet createImage(ByteBuffer buffer, int width, int height, int numChannels) {
int widthStep;
if (numChannels == 4) {
if (buffer.capacity() != width * height * 4) {
throw new RuntimeException("buffer doesn't have the correct size.");
}
widthStep = width * 4;
} else if (numChannels == 3) {
int widthStep = (((width * 3) + 3) / 4) * 4;
if (widthStep * height != buffer.capacity()) {
throw new RuntimeException("The size of the buffer should be: " + widthStep * height);
}
widthStep = (((width * 3) + 3) / 4) * 4;
} else if (numChannels == 1) {
if (width * height != buffer.capacity()) {
throw new RuntimeException(
"The size of the buffer should be: " + width * height + " but is " + buffer.capacity());
}
widthStep = width;
} else {
throw new RuntimeException("Channels should be: 1, 3, or 4, but is " + numChannels);
throw new IllegalArgumentException("Channels should be: 1, 3, or 4, but is " + numChannels);
}
int expectedSize = widthStep * height;
if (buffer.capacity() != expectedSize) {
throw new IllegalArgumentException(
"The size of the buffer should be: " + expectedSize + " but is " + buffer.capacity());
}
return Packet.create(
nativeCreateCpuImage(mediapipeGraph.getNativeHandle(), buffer, width, height, numChannels));
nativeCreateCpuImage(
mediapipeGraph.getNativeHandle(), buffer, width, height, widthStep, numChannels));
}
/** Helper callback adaptor to create the Java {@link GlSyncToken}. This is called by JNI code. */
@ -430,7 +445,7 @@ public class PacketCreator {
long context, int name, int width, int height, TextureReleaseCallback releaseCallback);
private native long nativeCreateCpuImage(
long context, ByteBuffer buffer, int width, int height, int numChannels);
long context, ByteBuffer buffer, int width, int height, int rowBytes, int numChannels);
private native long nativeCreateInt32Array(long context, int[] data);

View File

@ -20,9 +20,7 @@ android_library(
name = "image",
srcs = glob(["*.java"]),
manifest = "AndroidManifest.xml",
visibility = [
"//mediapipe:__subpackages__",
],
visibility = ["//visibility:public"],
deps = [
"//third_party:androidx_legacy_support_v4",
"//third_party:autovalue",

View File

@ -231,8 +231,6 @@ int64_t Graph::AddSurfaceOutput(const std::string& output_stream_name) {
*graph_config(), absl::StrCat("egl_surface_sink_", output_stream_name)));
sink_node->set_calculator("GlSurfaceSinkCalculator");
sink_node->add_input_stream(output_stream_name);
sink_node->add_input_side_packet(
absl::StrCat(kGpuSharedTagName, ":", kGpuSharedSidePacketName));
const std::string input_side_packet_name =
mediapipe::tool::GetUnusedSidePacketName(

View File

@ -17,6 +17,8 @@
#include <cstring>
#include <memory>
#include "absl/status/status.h"
#include "absl/strings/str_cat.h"
#include "mediapipe/framework/calculator.pb.h"
#include "mediapipe/framework/camera_intrinsics.h"
#include "mediapipe/framework/formats/image.h"
@ -107,55 +109,31 @@ absl::StatusOr<mediapipe::GpuBuffer> CreateGpuBuffer(
// Create a 1, 3, or 4 channel 8-bit ImageFrame shared pointer from a Java
// ByteBuffer.
std::unique_ptr<mediapipe::ImageFrame> CreateImageFrameFromByteBuffer(
JNIEnv* env, jobject byte_buffer, jint width, jint height,
mediapipe::ImageFormat::Format format) {
switch (format) {
case mediapipe::ImageFormat::SRGBA:
case mediapipe::ImageFormat::SRGB:
case mediapipe::ImageFormat::GRAY8:
break;
default:
LOG(ERROR) << "Format must be either SRGBA, SRGB, or GRAY8.";
return nullptr;
}
auto image_frame = std::make_unique<mediapipe::ImageFrame>(
format, width, height,
mediapipe::ImageFrame::kGlDefaultAlignmentBoundary);
absl::StatusOr<std::unique_ptr<mediapipe::ImageFrame>>
CreateImageFrameFromByteBuffer(JNIEnv* env, jobject byte_buffer, jint width,
jint height, jint width_step,
mediapipe::ImageFormat::Format format) {
const int64_t buffer_size = env->GetDirectBufferCapacity(byte_buffer);
const int num_channels = image_frame->NumberOfChannels();
const int expected_buffer_size =
num_channels == 1 ? width * height : image_frame->PixelDataSize();
if (buffer_size != expected_buffer_size) {
if (num_channels != 1)
LOG(ERROR) << "The input image buffer should have 4 bytes alignment.";
LOG(ERROR) << "Please check the input buffer size.";
LOG(ERROR) << "Buffer size: " << buffer_size
<< ", Buffer size needed: " << expected_buffer_size
<< ", Image width: " << width;
return nullptr;
const void* buffer_data = env->GetDirectBufferAddress(byte_buffer);
if (buffer_data == nullptr || buffer_size < 0) {
return absl::InvalidArgumentError(
"Cannot get direct access to the input buffer. It should be created "
"using allocateDirect.");
}
// Copy buffer data to image frame's pixel_data_.
if (num_channels == 1) {
const int width_step = image_frame->WidthStep();
const char* src_row =
reinterpret_cast<const char*>(env->GetDirectBufferAddress(byte_buffer));
char* dst_row = reinterpret_cast<char*>(image_frame->MutablePixelData());
for (int i = height; i > 0; --i) {
std::memcpy(dst_row, src_row, width);
src_row += width;
dst_row += width_step;
}
} else {
// 3 and 4 channels.
const void* buffer_data = env->GetDirectBufferAddress(byte_buffer);
std::memcpy(image_frame->MutablePixelData(), buffer_data,
image_frame->PixelDataSize());
}
const int expected_buffer_size = height * width_step;
RET_CHECK_EQ(buffer_size, expected_buffer_size)
<< "Input buffer size should be " << expected_buffer_size
<< " but is: " << buffer_size;
auto image_frame = std::make_unique<mediapipe::ImageFrame>();
// TODO: we could retain the buffer with a special deleter and use
// the data directly without a copy. May need a new Java API since existing
// code might expect to be able to overwrite the buffer after creating an
// ImageFrame from it.
image_frame->CopyPixelData(
format, width, height, width_step, static_cast<const uint8*>(buffer_data),
mediapipe::ImageFrame::kGlDefaultAlignmentBoundary);
return image_frame;
}
@ -176,77 +154,83 @@ JNIEXPORT jlong JNICALL PACKET_CREATOR_METHOD(nativeCreateReferencePacket)(
JNIEXPORT jlong JNICALL PACKET_CREATOR_METHOD(nativeCreateRgbImage)(
JNIEnv* env, jobject thiz, jlong context, jobject byte_buffer, jint width,
jint height) {
auto image_frame = CreateImageFrameFromByteBuffer(
env, byte_buffer, width, height, mediapipe::ImageFormat::SRGB);
if (nullptr == image_frame) return 0L;
// We require 4-byte alignment. See Java method.
constexpr int kAlignment = 4;
int width_step = ((width * 3 - 1) | (kAlignment - 1)) + 1;
auto image_frame_or =
CreateImageFrameFromByteBuffer(env, byte_buffer, width, height,
width_step, mediapipe::ImageFormat::SRGB);
if (ThrowIfError(env, image_frame_or.status())) return 0L;
mediapipe::Packet packet = mediapipe::Adopt(image_frame.release());
mediapipe::Packet packet = mediapipe::Adopt(image_frame_or->release());
return CreatePacketWithContext(context, packet);
}
absl::StatusOr<std::unique_ptr<mediapipe::ImageFrame>> CreateRgbImageFromRgba(
JNIEnv* env, jobject byte_buffer, jint width, jint height) {
const uint8_t* rgba_data =
static_cast<uint8_t*>(env->GetDirectBufferAddress(byte_buffer));
int64_t buffer_size = env->GetDirectBufferCapacity(byte_buffer);
if (rgba_data == nullptr || buffer_size < 0) {
return absl::InvalidArgumentError(
"Cannot get direct access to the input buffer. It should be created "
"using allocateDirect.");
}
const int expected_buffer_size = width * height * 4;
RET_CHECK_EQ(buffer_size, expected_buffer_size)
<< "Input buffer size should be " << expected_buffer_size
<< " but is: " << buffer_size;
auto image_frame = absl::make_unique<mediapipe::ImageFrame>(
mediapipe::ImageFormat::SRGB, width, height,
mediapipe::ImageFrame::kGlDefaultAlignmentBoundary);
mediapipe::android::RgbaToRgb(rgba_data, width * 4, width, height,
image_frame->MutablePixelData(),
image_frame->WidthStep());
return image_frame;
}
JNIEXPORT jlong JNICALL PACKET_CREATOR_METHOD(nativeCreateRgbImageFromRgba)(
JNIEnv* env, jobject thiz, jlong context, jobject byte_buffer, jint width,
jint height) {
const uint8_t* rgba_data =
static_cast<uint8_t*>(env->GetDirectBufferAddress(byte_buffer));
auto image_frame = absl::make_unique<mediapipe::ImageFrame>(
mediapipe::ImageFormat::SRGB, width, height,
mediapipe::ImageFrame::kGlDefaultAlignmentBoundary);
int64_t buffer_size = env->GetDirectBufferCapacity(byte_buffer);
if (buffer_size != width * height * 4) {
LOG(ERROR) << "Please check the input buffer size.";
LOG(ERROR) << "Buffer size: " << buffer_size
<< ", Buffer size needed: " << width * height * 4
<< ", Image width: " << width;
return 0L;
}
mediapipe::android::RgbaToRgb(rgba_data, width * 4, width, height,
image_frame->MutablePixelData(),
image_frame->WidthStep());
mediapipe::Packet packet = mediapipe::Adopt(image_frame.release());
auto image_frame_or = CreateRgbImageFromRgba(env, byte_buffer, width, height);
if (ThrowIfError(env, image_frame_or.status())) return 0L;
mediapipe::Packet packet = mediapipe::Adopt(image_frame_or->release());
return CreatePacketWithContext(context, packet);
}
JNIEXPORT jlong JNICALL PACKET_CREATOR_METHOD(nativeCreateGrayscaleImage)(
JNIEnv* env, jobject thiz, jlong context, jobject byte_buffer, jint width,
jint height) {
auto image_frame = CreateImageFrameFromByteBuffer(
env, byte_buffer, width, height, mediapipe::ImageFormat::GRAY8);
if (nullptr == image_frame) return 0L;
auto image_frame_or = CreateImageFrameFromByteBuffer(
env, byte_buffer, width, height, width, mediapipe::ImageFormat::GRAY8);
if (ThrowIfError(env, image_frame_or.status())) return 0L;
mediapipe::Packet packet = mediapipe::Adopt(image_frame.release());
mediapipe::Packet packet = mediapipe::Adopt(image_frame_or->release());
return CreatePacketWithContext(context, packet);
}
JNIEXPORT jlong JNICALL PACKET_CREATOR_METHOD(nativeCreateFloatImageFrame)(
JNIEnv* env, jobject thiz, jlong context, jobject byte_buffer, jint width,
jint height) {
const void* data = env->GetDirectBufferAddress(byte_buffer);
auto image_frame = absl::make_unique<mediapipe::ImageFrame>(
mediapipe::ImageFormat::VEC32F1, width, height,
mediapipe::ImageFrame::kGlDefaultAlignmentBoundary);
int64_t buffer_size = env->GetDirectBufferCapacity(byte_buffer);
if (buffer_size != image_frame->PixelDataSize()) {
LOG(ERROR) << "Please check the input buffer size.";
LOG(ERROR) << "Buffer size: " << buffer_size
<< ", Buffer size needed: " << image_frame->PixelDataSize()
<< ", Image width: " << width;
return 0L;
}
std::memcpy(image_frame->MutablePixelData(), data,
image_frame->PixelDataSize());
mediapipe::Packet packet = mediapipe::Adopt(image_frame.release());
auto image_frame_or =
CreateImageFrameFromByteBuffer(env, byte_buffer, width, height, width * 4,
mediapipe::ImageFormat::VEC32F1);
if (ThrowIfError(env, image_frame_or.status())) return 0L;
mediapipe::Packet packet = mediapipe::Adopt(image_frame_or->release());
return CreatePacketWithContext(context, packet);
}
JNIEXPORT jlong JNICALL PACKET_CREATOR_METHOD(nativeCreateRgbaImageFrame)(
JNIEnv* env, jobject thiz, jlong context, jobject byte_buffer, jint width,
jint height) {
auto image_frame = CreateImageFrameFromByteBuffer(
env, byte_buffer, width, height, mediapipe::ImageFormat::SRGBA);
if (nullptr == image_frame) return 0L;
mediapipe::Packet packet = mediapipe::Adopt(image_frame.release());
auto image_frame_or =
CreateImageFrameFromByteBuffer(env, byte_buffer, width, height, width * 4,
mediapipe::ImageFormat::SRGBA);
if (ThrowIfError(env, image_frame_or.status())) return 0L;
mediapipe::Packet packet = mediapipe::Adopt(image_frame_or->release());
return CreatePacketWithContext(context, packet);
}
@ -291,6 +275,12 @@ JNIEXPORT jlong JNICALL PACKET_CREATOR_METHOD(nativeCreateAudioPacketDirect)(
jint num_samples) {
const uint8_t* audio_sample =
reinterpret_cast<uint8_t*>(env->GetDirectBufferAddress(data));
if (!audio_sample) {
ThrowIfError(env, absl::InvalidArgumentError(
"Cannot get direct access to the input buffer. It "
"should be created using allocateDirect."));
return 0L;
}
mediapipe::Packet packet =
createAudioPacket(audio_sample, num_samples, num_channels);
return CreatePacketWithContext(context, packet);
@ -360,8 +350,10 @@ JNIEXPORT jlong JNICALL PACKET_CREATOR_METHOD(nativeCreateMatrix)(
JNIEnv* env, jobject thiz, jlong context, jint rows, jint cols,
jfloatArray data) {
if (env->GetArrayLength(data) != rows * cols) {
LOG(ERROR) << "Please check the matrix data size, has to be rows * cols = "
<< rows * cols;
ThrowIfError(
env, absl::InvalidArgumentError(absl::StrCat(
"Please check the matrix data size, has to be rows * cols = ",
rows * cols)));
return 0L;
}
std::unique_ptr<mediapipe::Matrix> matrix(new mediapipe::Matrix(rows, cols));
@ -379,7 +371,7 @@ JNIEXPORT jlong JNICALL PACKET_CREATOR_METHOD(nativeCreateMatrix)(
JNIEXPORT jlong JNICALL PACKET_CREATOR_METHOD(nativeCreateCpuImage)(
JNIEnv* env, jobject thiz, jlong context, jobject byte_buffer, jint width,
jint height, jint num_channels) {
jint height, jint width_step, jint num_channels) {
mediapipe::ImageFormat::Format format;
switch (num_channels) {
case 4:
@ -392,16 +384,18 @@ JNIEXPORT jlong JNICALL PACKET_CREATOR_METHOD(nativeCreateCpuImage)(
format = mediapipe::ImageFormat::GRAY8;
break;
default:
LOG(ERROR) << "Channels must be either 1, 3, or 4.";
ThrowIfError(env, absl::InvalidArgumentError(absl::StrCat(
"Channels must be either 1, 3, or 4, but are ",
num_channels)));
return 0L;
}
auto image_frame =
CreateImageFrameFromByteBuffer(env, byte_buffer, width, height, format);
if (nullptr == image_frame) return 0L;
auto image_frame_or = CreateImageFrameFromByteBuffer(
env, byte_buffer, width, height, width_step, format);
if (ThrowIfError(env, image_frame_or.status())) return 0L;
mediapipe::Packet packet =
mediapipe::MakePacket<mediapipe::Image>(std::move(image_frame));
mediapipe::MakePacket<mediapipe::Image>(*std::move(image_frame_or));
return CreatePacketWithContext(context, packet);
}
@ -502,7 +496,8 @@ JNIEXPORT jlong JNICALL PACKET_CREATOR_METHOD(nativeCreateCalculatorOptions)(
jbyte* data_ref = env->GetByteArrayElements(data, nullptr);
auto options = absl::make_unique<mediapipe::CalculatorOptions>();
if (!options->ParseFromArray(data_ref, count)) {
LOG(ERROR) << "Parsing binary-encoded CalculatorOptions failed.";
ThrowIfError(env, absl::InvalidArgumentError(absl::StrCat(
"Parsing binary-encoded CalculatorOptions failed.")));
return 0L;
}
mediapipe::Packet packet = mediapipe::Adopt(options.release());

View File

@ -99,7 +99,7 @@ JNIEXPORT jlong JNICALL PACKET_CREATOR_METHOD(nativeCreateMatrix)(
JNIEXPORT jlong JNICALL PACKET_CREATOR_METHOD(nativeCreateCpuImage)(
JNIEnv* env, jobject thiz, jlong context, jobject byte_buffer, jint width,
jint height, jint num_channels);
jint height, jint width_step, jint num_channels);
JNIEXPORT jlong JNICALL PACKET_CREATOR_METHOD(nativeCreateGpuImage)(
JNIEnv* env, jobject thiz, jlong context, jint name, jint width,

Some files were not shown because too many files have changed in this diff Show More