diff --git a/Dockerfile b/Dockerfile index efa95301d..8adcd6c24 100644 --- a/Dockerfile +++ b/Dockerfile @@ -30,6 +30,7 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ unzip \ python \ python-pip \ + python3-pip \ libopencv-core-dev \ libopencv-highgui-dev \ libopencv-imgproc-dev \ @@ -42,9 +43,10 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ RUN pip install --upgrade setuptools RUN pip install future +RUN pip3 install six # Install bazel -ARG BAZEL_VERSION=0.26.1 +ARG BAZEL_VERSION=1.1.0 RUN mkdir /bazel && \ wget --no-check-certificate -O /bazel/installer.sh "https://github.com/bazelbuild/bazel/releases/download/${BAZEL_VERSION}/b\ azel-${BAZEL_VERSION}-installer-linux-x86_64.sh" && \ diff --git a/README.md b/README.md index e7a9c10d9..f5a0bceef 100644 --- a/README.md +++ b/README.md @@ -10,11 +10,13 @@ ## ML Solutions in MediaPipe * [Hand Tracking](mediapipe/docs/hand_tracking_mobile_gpu.md) +* [Multi-hand Tracking](mediapipe/docs/multi_hand_tracking_mobile_gpu.md) * [Face Detection](mediapipe/docs/face_detection_mobile_gpu.md) * [Hair Segmentation](mediapipe/docs/hair_segmentation_mobile_gpu.md) * [Object Detection](mediapipe/docs/object_detection_mobile_gpu.md) ![hand_tracking](mediapipe/docs/images/mobile/hand_tracking_3d_android_gpu_small.gif) +![multi-hand_tracking](mediapipe/docs/images/mobile/multi_hand_tracking_android_gpu_small.gif) ![face_detection](mediapipe/docs/images/mobile/face_detection_android_gpu_small.gif) ![hair_segmentation](mediapipe/docs/images/mobile/hair_segmentation_android_gpu_small.gif) ![object_detection](mediapipe/docs/images/mobile/object_detection_android_gpu_small.gif) @@ -23,7 +25,7 @@ Follow these [instructions](mediapipe/docs/install.md). ## Getting started -See mobile and desktop [examples](mediapipe/docs/examples.md). +See mobile, desktop and Google Coral [examples](mediapipe/docs/examples.md). ## Documentation [MediaPipe Read-the-Docs](https://mediapipe.readthedocs.io/) or [docs.mediapipe.dev](https://docs.mediapipe.dev) @@ -41,6 +43,7 @@ A web-based visualizer is hosted on [viz.mediapipe.dev](https://viz.mediapipe.de * [MediaPipe: A Framework for Building Perception Pipelines](https://arxiv.org/abs/1906.08172) ## Events +* [AI Nextcon 2020, 12-16 Feb 2020, Seattle](http://aisea20.xnextcon.com/) * [MediaPipe Madrid Meetup, 16 Dec 2019](https://www.meetup.com/Madrid-AI-Developers-Group/events/266329088/) * [MediaPipe London Meetup, Google 123 Building, 12 Dec 2019](https://www.meetup.com/London-AI-Tech-Talk/events/266329038) * [ML Conference, Berlin, 11 Dec 2019](https://mlconference.ai/machine-learning-advanced-development/mediapipe-building-real-time-cross-platform-mobile-web-edge-desktop-video-audio-ml-pipelines/) diff --git a/WORKSPACE b/WORKSPACE index ca87c83c9..c09ca03a5 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -12,17 +12,21 @@ http_archive( load("@bazel_skylib//lib:versions.bzl", "versions") versions.check(minimum_bazel_version = "0.24.1") -# ABSL cpp library. +# ABSL cpp library lts_2019_08_08. http_archive( name = "com_google_absl", - # Head commit on 2019-04-12. - # TODO: Switch to the latest absl version when the problem gets - # fixed. urls = [ - "https://github.com/abseil/abseil-cpp/archive/a02f62f456f2c4a7ecf2be3104fe0c6e16fbad9a.tar.gz", + "https://github.com/abseil/abseil-cpp/archive/20190808.tar.gz", ], - sha256 = "d437920d1434c766d22e85773b899c77c672b8b4865d5dc2cd61a29fdff3cf03", - strip_prefix = "abseil-cpp-a02f62f456f2c4a7ecf2be3104fe0c6e16fbad9a", + # Remove after https://github.com/abseil/abseil-cpp/issues/326 is solved. + patches = [ + "@//third_party:com_google_absl_f863b622fe13612433fdf43f76547d5edda0c93001.diff" + ], + patch_args = [ + "-p1", + ], + strip_prefix = "abseil-cpp-20190808", + sha256 = "8100085dada279bf3ee00cd064d43b5f55e5d913be0dfe2906f06f8f28d5b37e" ) http_archive( @@ -103,9 +107,9 @@ http_archive( ], ) -# 2019-11-12 -_TENSORFLOW_GIT_COMMIT = "a5f9bcd64453ff3d1f64cb4da4786db3d2da7f82" -_TENSORFLOW_SHA256= "f2b6f2ab2ffe63e86eccd3ce4bea6b7197383d726638dfeeebcdc1e7de73f075" +# 2019-11-21 +_TENSORFLOW_GIT_COMMIT = "f482488b481a799ca07e7e2d153cf47b8e91a60c" +_TENSORFLOW_SHA256= "8d9118c2ce186c7e1403f04b96982fe72c184060c7f7a93e30a28dca358694f0" http_archive( name = "org_tensorflow", urls = [ @@ -149,11 +153,10 @@ new_local_repository( http_archive( name = "android_opencv", - sha256 = "056b849842e4fa8751d09edbb64530cfa7a63c84ccd232d0ace330e27ba55d0b", build_file = "@//third_party:opencv_android.BUILD", strip_prefix = "OpenCV-android-sdk", type = "zip", - url = "https://github.com/opencv/opencv/releases/download/4.1.0/opencv-4.1.0-android-sdk.zip", + url = "https://github.com/opencv/opencv/releases/download/3.4.3/opencv-3.4.3-android-sdk.zip", ) # After OpenCV 3.2.0, the pre-compiled opencv2.framework has google protobuf symbols, which will @@ -184,13 +187,18 @@ maven_install( artifacts = [ "androidx.annotation:annotation:aar:1.1.0", "androidx.appcompat:appcompat:aar:1.1.0-rc01", + "androidx.camera:camera-core:aar:1.0.0-alpha06", + "androidx.camera:camera-camera2:aar:1.0.0-alpha06", "androidx.constraintlayout:constraintlayout:aar:1.1.3", "androidx.core:core:aar:1.1.0-rc03", "androidx.legacy:legacy-support-v4:aar:1.0.0", "androidx.recyclerview:recyclerview:aar:1.1.0-beta02", "com.google.android.material:material:aar:1.0.0-rc01", ], - repositories = ["https://dl.google.com/dl/android/maven2"], + repositories = [ + "https://dl.google.com/dl/android/maven2", + "https://repo1.maven.org/maven2", + ], ) maven_server( @@ -206,10 +214,10 @@ maven_jar( ) maven_jar( - name = "androidx_concurrent_futures", - artifact = "androidx.concurrent:concurrent-futures:1.0.0-alpha03", - sha1 = "b528df95c7e2fefa2210c0c742bf3e491c1818ae", - server = "google_server", + name = "androidx_concurrent_futures", + artifact = "androidx.concurrent:concurrent-futures:1.0.0-alpha03", + sha1 = "b528df95c7e2fefa2210c0c742bf3e491c1818ae", + server = "google_server", ) maven_jar( diff --git a/mediapipe/calculators/core/BUILD b/mediapipe/calculators/core/BUILD index 86cb28522..6de3c3828 100644 --- a/mediapipe/calculators/core/BUILD +++ b/mediapipe/calculators/core/BUILD @@ -691,6 +691,7 @@ cc_library( ":split_vector_calculator_cc_proto", "//mediapipe/framework:calculator_framework", "//mediapipe/framework/formats:landmark_cc_proto", + "//mediapipe/framework/formats:rect_cc_proto", "//mediapipe/framework/port:ret_check", "//mediapipe/framework/port:status", "//mediapipe/util:resource_util", diff --git a/mediapipe/calculators/core/begin_loop_calculator.cc b/mediapipe/calculators/core/begin_loop_calculator.cc index cc7f6b85e..6c1ac20bf 100644 --- a/mediapipe/calculators/core/begin_loop_calculator.cc +++ b/mediapipe/calculators/core/begin_loop_calculator.cc @@ -21,16 +21,10 @@ namespace mediapipe { -// A calculator to process std::vector. -typedef BeginLoopCalculator> - BeginLoopNormalizedLandmarkCalculator; -REGISTER_CALCULATOR(BeginLoopNormalizedLandmarkCalculator); - -// A calculator to process std::vector>. -typedef BeginLoopCalculator< - std::vector>> - BeginLoopNormalizedLandmarksVectorCalculator; -REGISTER_CALCULATOR(BeginLoopNormalizedLandmarksVectorCalculator); +// A calculator to process std::vector. +typedef BeginLoopCalculator> + BeginLoopNormalizedLandmarkListVectorCalculator; +REGISTER_CALCULATOR(BeginLoopNormalizedLandmarkListVectorCalculator); // A calculator to process std::vector. typedef BeginLoopCalculator> diff --git a/mediapipe/calculators/core/concatenate_vector_calculator.cc b/mediapipe/calculators/core/concatenate_vector_calculator.cc index c4144990e..0f6bb759c 100644 --- a/mediapipe/calculators/core/concatenate_vector_calculator.cc +++ b/mediapipe/calculators/core/concatenate_vector_calculator.cc @@ -19,7 +19,7 @@ #include "mediapipe/framework/formats/landmark.pb.h" #include "tensorflow/lite/interpreter.h" -#if !defined(MEDIAPIPE_DISABLE_GPU) && !defined(__APPLE__) +#if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) #include "tensorflow/lite/delegates/gpu/gl/gl_buffer.h" #endif // !MEDIAPIPE_DISABLE_GPU @@ -50,7 +50,7 @@ typedef ConcatenateVectorCalculator<::mediapipe::NormalizedLandmark> ConcatenateLandmarkVectorCalculator; REGISTER_CALCULATOR(ConcatenateLandmarkVectorCalculator); -#if !defined(MEDIAPIPE_DISABLE_GPU) && !defined(__APPLE__) +#if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) typedef ConcatenateVectorCalculator<::tflite::gpu::gl::GlBuffer> ConcatenateGlBufferVectorCalculator; REGISTER_CALCULATOR(ConcatenateGlBufferVectorCalculator); diff --git a/mediapipe/calculators/core/end_loop_calculator.cc b/mediapipe/calculators/core/end_loop_calculator.cc index 8991e97f1..e27ab11ea 100644 --- a/mediapipe/calculators/core/end_loop_calculator.cc +++ b/mediapipe/calculators/core/end_loop_calculator.cc @@ -26,14 +26,9 @@ typedef EndLoopCalculator> EndLoopNormalizedRectCalculator; REGISTER_CALCULATOR(EndLoopNormalizedRectCalculator); -typedef EndLoopCalculator> - EndLoopNormalizedLandmarkCalculator; -REGISTER_CALCULATOR(EndLoopNormalizedLandmarkCalculator); - -typedef EndLoopCalculator< - std::vector>> - EndLoopNormalizedLandmarksVectorCalculator; -REGISTER_CALCULATOR(EndLoopNormalizedLandmarksVectorCalculator); +typedef EndLoopCalculator> + EndLoopNormalizedLandmarkListVectorCalculator; +REGISTER_CALCULATOR(EndLoopNormalizedLandmarkListVectorCalculator); typedef EndLoopCalculator> EndLoopBooleanCalculator; REGISTER_CALCULATOR(EndLoopBooleanCalculator); diff --git a/mediapipe/calculators/core/previous_loopback_calculator_test.cc b/mediapipe/calculators/core/previous_loopback_calculator_test.cc index 4ac38e9f0..09456514b 100644 --- a/mediapipe/calculators/core/previous_loopback_calculator_test.cc +++ b/mediapipe/calculators/core/previous_loopback_calculator_test.cc @@ -93,14 +93,19 @@ TEST(PreviousLoopbackCalculator, CorrectTimestamps) { EXPECT_EQ(TimestampValues(in_prev), (std::vector{1})); EXPECT_EQ(pair_values(in_prev.back()), std::make_pair(1, -1)); + send_packet("in", 2); + MP_EXPECT_OK(graph_.WaitUntilIdle()); + EXPECT_EQ(TimestampValues(in_prev), (std::vector{1, 2})); + EXPECT_EQ(pair_values(in_prev.back()), std::make_pair(2, 1)); + send_packet("in", 5); MP_EXPECT_OK(graph_.WaitUntilIdle()); - EXPECT_EQ(TimestampValues(in_prev), (std::vector{1, 5})); - EXPECT_EQ(pair_values(in_prev.back()), std::make_pair(5, 1)); + EXPECT_EQ(TimestampValues(in_prev), (std::vector{1, 2, 5})); + EXPECT_EQ(pair_values(in_prev.back()), std::make_pair(5, 2)); send_packet("in", 15); MP_EXPECT_OK(graph_.WaitUntilIdle()); - EXPECT_EQ(TimestampValues(in_prev), (std::vector{1, 5, 15})); + EXPECT_EQ(TimestampValues(in_prev), (std::vector{1, 2, 5, 15})); EXPECT_EQ(pair_values(in_prev.back()), std::make_pair(15, 5)); MP_EXPECT_OK(graph_.CloseAllInputStreams()); @@ -182,18 +187,22 @@ TEST(PreviousLoopbackCalculator, ClosesCorrectly) { MP_EXPECT_OK(graph_.WaitUntilIdle()); EXPECT_EQ(TimestampValues(outputs), (std::vector{1})); + send_packet("in", 2); + MP_EXPECT_OK(graph_.WaitUntilIdle()); + EXPECT_EQ(TimestampValues(outputs), (std::vector{1, 2})); + send_packet("in", 5); MP_EXPECT_OK(graph_.WaitUntilIdle()); - EXPECT_EQ(TimestampValues(outputs), (std::vector{1, 5})); + EXPECT_EQ(TimestampValues(outputs), (std::vector{1, 2, 5})); send_packet("in", 15); MP_EXPECT_OK(graph_.WaitUntilIdle()); - EXPECT_EQ(TimestampValues(outputs), (std::vector{1, 5, 15})); + EXPECT_EQ(TimestampValues(outputs), (std::vector{1, 2, 5, 15})); MP_EXPECT_OK(graph_.CloseAllInputStreams()); MP_EXPECT_OK(graph_.WaitUntilIdle()); EXPECT_EQ(TimestampValues(outputs), - (std::vector{1, 5, 15, Timestamp::Max().Value()})); + (std::vector{1, 2, 5, 15, Timestamp::Max().Value()})); MP_EXPECT_OK(graph_.WaitUntilDone()); } diff --git a/mediapipe/calculators/core/split_vector_calculator.cc b/mediapipe/calculators/core/split_vector_calculator.cc index 3e60b8072..2a5a74f8b 100644 --- a/mediapipe/calculators/core/split_vector_calculator.cc +++ b/mediapipe/calculators/core/split_vector_calculator.cc @@ -17,6 +17,7 @@ #include #include "mediapipe/framework/formats/landmark.pb.h" +#include "mediapipe/framework/formats/rect.pb.h" #include "tensorflow/lite/interpreter.h" namespace mediapipe { @@ -41,4 +42,8 @@ REGISTER_CALCULATOR(SplitTfLiteTensorVectorCalculator); typedef SplitVectorCalculator<::mediapipe::NormalizedLandmark> SplitLandmarkVectorCalculator; REGISTER_CALCULATOR(SplitLandmarkVectorCalculator); + +typedef SplitVectorCalculator<::mediapipe::NormalizedRect> + SplitNormalizedRectVectorCalculator; +REGISTER_CALCULATOR(SplitNormalizedRectVectorCalculator); } // namespace mediapipe diff --git a/mediapipe/calculators/image/BUILD b/mediapipe/calculators/image/BUILD index 5a0a75619..c6f2b38af 100644 --- a/mediapipe/calculators/image/BUILD +++ b/mediapipe/calculators/image/BUILD @@ -356,13 +356,13 @@ cc_library( "//mediapipe/framework/port:opencv_imgproc", "//mediapipe/framework/port:ret_check", "//mediapipe/framework/port:status", - "//mediapipe/gpu:gpu_buffer", ] + select({ "//mediapipe/gpu:disable_gpu": [], "//conditions:default": [ "//mediapipe/gpu:gl_calculator_helper", "//mediapipe/gpu:gl_simple_shaders", "//mediapipe/gpu:gl_quad_renderer", + "//mediapipe/gpu:gpu_buffer", "//mediapipe/gpu:shader_util", ], }), diff --git a/mediapipe/calculators/image/image_transformation_calculator.cc b/mediapipe/calculators/image/image_transformation_calculator.cc index 5eb34c3c0..c5bf4262a 100644 --- a/mediapipe/calculators/image/image_transformation_calculator.cc +++ b/mediapipe/calculators/image/image_transformation_calculator.cc @@ -400,7 +400,7 @@ REGISTER_CALCULATOR(ImageTransformationCalculator); QuadRenderer* renderer = nullptr; GlTexture src1; -#if defined(__APPLE__) && !TARGET_OS_OSX +#if defined(MEDIAPIPE_IOS) if (input.format() == GpuBufferFormat::kBiPlanar420YpCbCr8VideoRange || input.format() == GpuBufferFormat::kBiPlanar420YpCbCr8FullRange) { if (!yuv_renderer_) { diff --git a/mediapipe/calculators/image/scale_image_calculator.proto b/mediapipe/calculators/image/scale_image_calculator.proto index 2fc782a4f..0f0d5aae4 100644 --- a/mediapipe/calculators/image/scale_image_calculator.proto +++ b/mediapipe/calculators/image/scale_image_calculator.proto @@ -36,7 +36,8 @@ message ScaleImageCalculatorOptions { // If ratio is positive, crop the image to this minimum and maximum // aspect ratio (preserving the center of the frame). This is done - // before scaling. + // before scaling. The string must contain "/", so to disable cropping, + // set both to "0/1". // For example, for a min_aspect_ratio of "9/16" and max of "16/9" the // following cropping will occur: // 1920x1080 (which is 16:9) is not cropped diff --git a/mediapipe/calculators/tensorflow/tensorflow_inference_calculator.cc b/mediapipe/calculators/tensorflow/tensorflow_inference_calculator.cc index 80f54d554..d3f77b063 100644 --- a/mediapipe/calculators/tensorflow/tensorflow_inference_calculator.cc +++ b/mediapipe/calculators/tensorflow/tensorflow_inference_calculator.cc @@ -34,7 +34,7 @@ #include "tensorflow/core/framework/tensor_shape.h" #include "tensorflow/core/framework/tensor_util.h" -#if !defined(__ANDROID__) && !defined(__APPLE__) +#if !defined(MEDIAPIPE_MOBILE) && !defined(__APPLE__) #include "tensorflow/core/profiler/lib/traceme.h" #endif @@ -441,7 +441,7 @@ class TensorFlowInferenceCalculator : public CalculatorBase { const int64 run_start_time = absl::ToUnixMicros(clock_->TimeNow()); tf::Status tf_status; { -#if !defined(__ANDROID__) && !defined(__APPLE__) +#if !defined(MEDIAPIPE_MOBILE) && !defined(__APPLE__) tensorflow::profiler::TraceMe trace(absl::string_view(cc->NodeName())); #endif tf_status = session_->Run(input_tensors, output_tensor_names, diff --git a/mediapipe/calculators/tensorflow/tensorflow_session_from_frozen_graph_calculator.cc b/mediapipe/calculators/tensorflow/tensorflow_session_from_frozen_graph_calculator.cc index b6d678b6b..77b22571f 100644 --- a/mediapipe/calculators/tensorflow/tensorflow_session_from_frozen_graph_calculator.cc +++ b/mediapipe/calculators/tensorflow/tensorflow_session_from_frozen_graph_calculator.cc @@ -31,8 +31,7 @@ #include "mediapipe/framework/tool/status_util.h" #include "tensorflow/core/public/session_options.h" -#if defined(MEDIAPIPE_LITE) || defined(__ANDROID__) || \ - defined(__APPLE__) && !TARGET_OS_OSX +#if defined(MEDIAPIPE_MOBILE) #include "mediapipe/util/android/file/base/helpers.h" #else #include "mediapipe/framework/port/file_helpers.h" diff --git a/mediapipe/calculators/tensorflow/tfrecord_reader_calculator.cc b/mediapipe/calculators/tensorflow/tfrecord_reader_calculator.cc index 5de7b0c0d..eed7ca9dd 100644 --- a/mediapipe/calculators/tensorflow/tfrecord_reader_calculator.cc +++ b/mediapipe/calculators/tensorflow/tfrecord_reader_calculator.cc @@ -85,7 +85,7 @@ class TFRecordReaderCalculator : public CalculatorBase { tensorflow::io::RecordReader reader(file.get(), tensorflow::io::RecordReaderOptions()); tensorflow::uint64 offset = 0; - std::string example_str; + tensorflow::tstring example_str; const int target_idx = cc->InputSidePackets().HasTag(kRecordIndex) ? cc->InputSidePackets().Tag(kRecordIndex).Get() @@ -98,7 +98,7 @@ class TFRecordReaderCalculator : public CalculatorBase { if (current_idx == target_idx) { if (cc->OutputSidePackets().HasTag(kExampleTag)) { tensorflow::Example tf_example; - tf_example.ParseFromString(example_str); + tf_example.ParseFromArray(example_str.data(), example_str.size()); cc->OutputSidePackets() .Tag(kExampleTag) .Set(MakePacket(std::move(tf_example))); diff --git a/mediapipe/calculators/tflite/BUILD b/mediapipe/calculators/tflite/BUILD index 89b4d980b..50531a58b 100644 --- a/mediapipe/calculators/tflite/BUILD +++ b/mediapipe/calculators/tflite/BUILD @@ -13,12 +13,12 @@ # limitations under the License. # +load("//mediapipe/framework/port:build_config.bzl", "mediapipe_cc_proto_library") + licenses(["notice"]) # Apache 2.0 package(default_visibility = ["//visibility:private"]) -load("//mediapipe/framework/port:build_config.bzl", "mediapipe_cc_proto_library") - proto_library( name = "ssd_anchors_calculator_proto", srcs = ["ssd_anchors_calculator.proto"], @@ -249,6 +249,11 @@ cc_library( "@org_tensorflow//tensorflow/lite/delegates/gpu/gl:gl_program", "@org_tensorflow//tensorflow/lite/delegates/gpu/gl:gl_shader", ], + }) + select({ + "//conditions:default": [], + "//mediapipe:android": [ + "@org_tensorflow//tensorflow/lite/delegates/nnapi:nnapi_delegate", + ], }), alwayslink = 1, ) diff --git a/mediapipe/calculators/tflite/tflite_converter_calculator.cc b/mediapipe/calculators/tflite/tflite_converter_calculator.cc index a9dccaed8..b2d69d9bb 100644 --- a/mediapipe/calculators/tflite/tflite_converter_calculator.cc +++ b/mediapipe/calculators/tflite/tflite_converter_calculator.cc @@ -25,8 +25,7 @@ #include "tensorflow/lite/error_reporter.h" #include "tensorflow/lite/interpreter.h" -#if !defined(MEDIAPIPE_DISABLE_GPU) && !defined(__EMSCRIPTEN__) && \ - !defined(__APPLE__) +#if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) #include "mediapipe/gpu/gl_calculator_helper.h" #include "mediapipe/gpu/gpu_buffer.h" #include "tensorflow/lite/delegates/gpu/gl/gl_buffer.h" @@ -35,7 +34,7 @@ #include "tensorflow/lite/delegates/gpu/gl_delegate.h" #endif // !MEDIAPIPE_DISABLE_GPU -#if defined(__APPLE__) && !TARGET_OS_OSX // iOS +#if defined(MEDIAPIPE_IOS) #import #import #import @@ -46,10 +45,9 @@ #include "tensorflow/lite/delegates/gpu/metal_delegate.h" #endif // iOS -#if !defined(MEDIAPIPE_DISABLE_GPU) && !defined(__EMSCRIPTEN__) && \ - !defined(__APPLE__) +#if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) typedef ::tflite::gpu::gl::GlBuffer GpuTensor; -#elif defined(__APPLE__) && !TARGET_OS_OSX // iOS +#elif defined(MEDIAPIPE_IOS) typedef id GpuTensor; #endif @@ -69,8 +67,7 @@ typedef Eigen::Matrix namespace mediapipe { -#if !defined(MEDIAPIPE_DISABLE_GPU) && !defined(__EMSCRIPTEN__) && \ - !defined(__APPLE__) +#if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) using ::tflite::gpu::gl::CreateReadWriteShaderStorageBuffer; using ::tflite::gpu::gl::GlProgram; using ::tflite::gpu::gl::GlShader; @@ -80,7 +77,7 @@ struct GPUData { GlShader shader; GlProgram program; }; -#elif defined(__APPLE__) && !TARGET_OS_OSX // iOS +#elif defined(MEDIAPIPE_IOS) struct GPUData { int elements = 1; GpuTensor buffer; @@ -149,11 +146,10 @@ class TfLiteConverterCalculator : public CalculatorBase { std::unique_ptr interpreter_ = nullptr; -#if !defined(MEDIAPIPE_DISABLE_GPU) && !defined(__EMSCRIPTEN__) && \ - !defined(__APPLE__) +#if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) mediapipe::GlCalculatorHelper gpu_helper_; std::unique_ptr gpu_data_out_; -#elif defined(__APPLE__) && !TARGET_OS_OSX // iOS +#elif defined(MEDIAPIPE_IOS) MPPMetalHelper* gpu_helper_ = nullptr; std::unique_ptr gpu_data_out_; #endif @@ -202,10 +198,9 @@ REGISTER_CALCULATOR(TfLiteConverterCalculator); #endif // !MEDIAPIPE_DISABLE_GPU if (use_gpu) { -#if !defined(MEDIAPIPE_DISABLE_GPU) && !defined(__EMSCRIPTEN__) && \ - !defined(__APPLE__) +#if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) MP_RETURN_IF_ERROR(mediapipe::GlCalculatorHelper::UpdateContract(cc)); -#elif defined(__APPLE__) && !TARGET_OS_OSX // iOS +#elif defined(MEDIAPIPE_IOS) MP_RETURN_IF_ERROR([MPPMetalHelper updateContract:cc]); #endif } @@ -236,10 +231,9 @@ REGISTER_CALCULATOR(TfLiteConverterCalculator); cc->Outputs().HasTag("TENSORS_GPU")); // Cannot use quantization. use_quantized_tensors_ = false; -#if !defined(MEDIAPIPE_DISABLE_GPU) && !defined(__EMSCRIPTEN__) && \ - !defined(__APPLE__) +#if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) MP_RETURN_IF_ERROR(gpu_helper_.Open(cc)); -#elif defined(__APPLE__) && !TARGET_OS_OSX // iOS +#elif defined(MEDIAPIPE_IOS) gpu_helper_ = [[MPPMetalHelper alloc] initWithCalculatorContext:cc]; RET_CHECK(gpu_helper_); #endif @@ -270,11 +264,10 @@ REGISTER_CALCULATOR(TfLiteConverterCalculator); } ::mediapipe::Status TfLiteConverterCalculator::Close(CalculatorContext* cc) { -#if !defined(MEDIAPIPE_DISABLE_GPU) && !defined(__EMSCRIPTEN__) && \ - !defined(__APPLE__) +#if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) gpu_helper_.RunInGlContext([this] { gpu_data_out_.reset(); }); #endif -#if defined(__APPLE__) && !TARGET_OS_OSX // iOS +#if defined(MEDIAPIPE_IOS) gpu_data_out_.reset(); #endif return ::mediapipe::OkStatus(); @@ -390,8 +383,7 @@ REGISTER_CALCULATOR(TfLiteConverterCalculator); ::mediapipe::Status TfLiteConverterCalculator::ProcessGPU( CalculatorContext* cc) { -#if !defined(MEDIAPIPE_DISABLE_GPU) && !defined(__EMSCRIPTEN__) && \ - !defined(__APPLE__) +#if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) // GpuBuffer to tflite::gpu::GlBuffer conversion. const auto& input = cc->Inputs().Tag("IMAGE_GPU").Get(); MP_RETURN_IF_ERROR( @@ -427,7 +419,7 @@ REGISTER_CALCULATOR(TfLiteConverterCalculator); cc->Outputs() .Tag("TENSORS_GPU") .Add(output_tensors.release(), cc->InputTimestamp()); -#elif defined(__APPLE__) && !TARGET_OS_OSX // iOS +#elif defined(MEDIAPIPE_IOS) // GpuBuffer to id conversion. const auto& input = cc->Inputs().Tag("IMAGE_GPU").Get(); { @@ -493,8 +485,7 @@ REGISTER_CALCULATOR(TfLiteConverterCalculator); RET_CHECK_FAIL() << "Num input channels is less than desired output."; #endif // !MEDIAPIPE_DISABLE_GPU -#if !defined(MEDIAPIPE_DISABLE_GPU) && !defined(__EMSCRIPTEN__) && \ - !defined(__APPLE__) +#if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) MP_RETURN_IF_ERROR(gpu_helper_.RunInGlContext( [this, &include_alpha, &input, &single_channel]() -> ::mediapipe::Status { // Device memory. @@ -538,7 +529,9 @@ REGISTER_CALCULATOR(TfLiteConverterCalculator); &gpu_data_out_->program)); return ::mediapipe::OkStatus(); })); -#elif defined(__APPLE__) && !TARGET_OS_OSX // iOS + +#elif defined(MEDIAPIPE_IOS) + RET_CHECK(include_alpha) << "iOS GPU inference currently accepts only RGBA input."; @@ -619,7 +612,7 @@ REGISTER_CALCULATOR(TfLiteConverterCalculator); CHECK_GE(max_num_channels_, 1); CHECK_LE(max_num_channels_, 4); CHECK_NE(max_num_channels_, 2); -#if defined(__APPLE__) && !TARGET_OS_OSX // iOS +#if defined(MEDIAPIPE_IOS) if (cc->Inputs().HasTag("IMAGE_GPU")) // Currently on iOS, tflite gpu input tensor must be 4 channels, // so input image must be 4 channels also (checked in InitGpu). diff --git a/mediapipe/calculators/tflite/tflite_inference_calculator.cc b/mediapipe/calculators/tflite/tflite_inference_calculator.cc index 0abb48329..de693865b 100644 --- a/mediapipe/calculators/tflite/tflite_inference_calculator.cc +++ b/mediapipe/calculators/tflite/tflite_inference_calculator.cc @@ -27,7 +27,7 @@ #include "tensorflow/lite/kernels/register.h" #include "tensorflow/lite/model.h" -#if !defined(MEDIAPIPE_DISABLE_GPU) && !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) +#if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) #include "mediapipe/gpu/gl_calculator_helper.h" #include "mediapipe/gpu/gpu_buffer.h" #include "tensorflow/lite/delegates/gpu/common/shape.h" @@ -35,9 +35,9 @@ #include "tensorflow/lite/delegates/gpu/gl/gl_program.h" #include "tensorflow/lite/delegates/gpu/gl/gl_shader.h" #include "tensorflow/lite/delegates/gpu/gl_delegate.h" -#endif // !MEDIAPIPE_DISABLE_GPU +#endif // !MEDIAPIPE_DISABLE_GL_COMPUTE -#if defined(__APPLE__) && !TARGET_OS_OSX // iOS +#if defined(MEDIAPIPE_IOS) #import #import #import @@ -51,12 +51,15 @@ #include "tensorflow/lite/delegates/gpu/metal_delegate_internal.h" #endif // iOS +#if defined(MEDIAPIPE_ANDROID) +#include "tensorflow/lite/delegates/nnapi/nnapi_delegate.h" +#endif // ANDROID + namespace { -#if !defined(MEDIAPIPE_DISABLE_GPU) && !defined(__EMSCRIPTEN__) && \ - !defined(__APPLE__) +#if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) typedef ::tflite::gpu::gl::GlBuffer GpuTensor; -#elif defined(__APPLE__) && !TARGET_OS_OSX // iOS +#elif defined(MEDIAPIPE_IOS) typedef id GpuTensor; #endif @@ -64,14 +67,35 @@ typedef id GpuTensor; size_t RoundUp(size_t n, size_t m) { return ((n + m - 1) / m) * m; } // NOLINT } // namespace +#if defined(MEDIAPIPE_EDGE_TPU) +#include "edgetpu.h" + +// Creates and returns an Edge TPU interpreter to run the given edgetpu model. +std::unique_ptr BuildEdgeTpuInterpreter( + const tflite::FlatBufferModel& model, + tflite::ops::builtin::BuiltinOpResolver* resolver, + edgetpu::EdgeTpuContext* edgetpu_context) { + resolver->AddCustom(edgetpu::kCustomOp, edgetpu::RegisterCustomOp()); + std::unique_ptr interpreter; + if (tflite::InterpreterBuilder(model, *resolver)(&interpreter) != kTfLiteOk) { + std::cerr << "Failed to build edge TPU interpreter." << std::endl; + } + interpreter->SetExternalContext(kTfLiteEdgeTpuContext, edgetpu_context); + interpreter->SetNumThreads(1); + if (interpreter->AllocateTensors() != kTfLiteOk) { + std::cerr << "Failed to allocate edge TPU tensors." << std::endl; + } + return interpreter; +} +#endif // MEDIAPIPE_EDGE_TPU + // TfLiteInferenceCalculator File Layout: // * Header // * Core // * Aux namespace mediapipe { -#if !defined(MEDIAPIPE_DISABLE_GPU) && !defined(__EMSCRIPTEN__) && \ - !defined(__APPLE__) +#if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) using ::tflite::gpu::gl::CopyBuffer; using ::tflite::gpu::gl::CreateReadWriteShaderStorageBuffer; using ::tflite::gpu::gl::GlBuffer; @@ -150,18 +174,22 @@ class TfLiteInferenceCalculator : public CalculatorBase { std::unique_ptr model_; TfLiteDelegate* delegate_ = nullptr; -#if !defined(MEDIAPIPE_DISABLE_GPU) && !defined(__EMSCRIPTEN__) && \ - !defined(__APPLE__) +#if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) mediapipe::GlCalculatorHelper gpu_helper_; std::unique_ptr gpu_data_in_; std::vector> gpu_data_out_; -#elif defined(__APPLE__) && !TARGET_OS_OSX // iOS +#elif defined(MEDIAPIPE_IOS) MPPMetalHelper* gpu_helper_ = nullptr; std::unique_ptr gpu_data_in_; std::vector> gpu_data_out_; TFLBufferConvert* converter_from_BPHWC4_ = nil; #endif +#if defined(MEDIAPIPE_EDGE_TPU) + std::shared_ptr edgetpu_context_ = + edgetpu::EdgeTpuManager::GetSingleton()->OpenDevice(); +#endif + std::string model_path_ = ""; bool gpu_inference_ = false; bool gpu_input_ = false; @@ -210,10 +238,9 @@ REGISTER_CALCULATOR(TfLiteInferenceCalculator); use_gpu |= options.use_gpu(); if (use_gpu) { -#if !defined(MEDIAPIPE_DISABLE_GPU) && !defined(__EMSCRIPTEN__) && \ - !defined(__APPLE__) +#if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) MP_RETURN_IF_ERROR(mediapipe::GlCalculatorHelper::UpdateContract(cc)); -#elif defined(__APPLE__) && !TARGET_OS_OSX // iOS +#elif defined(MEDIAPIPE_IOS) MP_RETURN_IF_ERROR([MPPMetalHelper updateContract:cc]); #endif } @@ -253,26 +280,24 @@ REGISTER_CALCULATOR(TfLiteInferenceCalculator); MP_RETURN_IF_ERROR(LoadModel(cc)); if (gpu_inference_) { -#if !defined(MEDIAPIPE_DISABLE_GPU) && !defined(__EMSCRIPTEN__) && \ - !defined(__APPLE__) +#if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) MP_RETURN_IF_ERROR(gpu_helper_.Open(cc)); -#elif defined(__APPLE__) && !TARGET_OS_OSX // iOS +#elif defined(MEDIAPIPE_IOS) gpu_helper_ = [[MPPMetalHelper alloc] initWithCalculatorContext:cc]; RET_CHECK(gpu_helper_); #endif -#if !defined(MEDIAPIPE_DISABLE_GPU) && !defined(__EMSCRIPTEN__) && \ - !defined(__APPLE__) + +#if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) MP_RETURN_IF_ERROR(gpu_helper_.RunInGlContext( [this, &cc]() -> ::mediapipe::Status { return LoadDelegate(cc); })); #else MP_RETURN_IF_ERROR(LoadDelegate(cc)); #endif + } else { +#if defined(__EMSCRIPTEN__) || defined(MEDIAPIPE_ANDROID) + MP_RETURN_IF_ERROR(LoadDelegate(cc)); +#endif // __EMSCRIPTEN__ || ANDROID } - -#if defined(__EMSCRIPTEN__) - MP_RETURN_IF_ERROR(LoadDelegate(cc)); -#endif // __EMSCRIPTEN__ - return ::mediapipe::OkStatus(); } @@ -280,8 +305,7 @@ REGISTER_CALCULATOR(TfLiteInferenceCalculator); // 1. Receive pre-processed tensor inputs. if (gpu_input_) { // Read GPU input into SSBO. -#if !defined(MEDIAPIPE_DISABLE_GPU) && !defined(__EMSCRIPTEN__) && \ - !defined(__APPLE__) +#if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) const auto& input_tensors = cc->Inputs().Tag("TENSORS_GPU").Get>(); RET_CHECK_EQ(input_tensors.size(), 1); @@ -291,7 +315,7 @@ REGISTER_CALCULATOR(TfLiteInferenceCalculator); RET_CHECK_CALL(CopyBuffer(input_tensors[0], gpu_data_in_->buffer)); return ::mediapipe::OkStatus(); })); -#elif defined(__APPLE__) && !TARGET_OS_OSX // iOS +#elif defined(MEDIAPIPE_IOS) const auto& input_tensors = cc->Inputs().Tag("TENSORS_GPU").Get>(); RET_CHECK_EQ(input_tensors.size(), 1); @@ -327,14 +351,13 @@ REGISTER_CALCULATOR(TfLiteInferenceCalculator); // 2. Run inference. if (gpu_inference_) { -#if !defined(MEDIAPIPE_DISABLE_GPU) && !defined(__EMSCRIPTEN__) && \ - !defined(__APPLE__) +#if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) MP_RETURN_IF_ERROR( gpu_helper_.RunInGlContext([this]() -> ::mediapipe::Status { RET_CHECK_EQ(interpreter_->Invoke(), kTfLiteOk); return ::mediapipe::OkStatus(); })); -#elif defined(__APPLE__) && !TARGET_OS_OSX // iOS +#elif defined(MEDIAPIPE_IOS) RET_CHECK_EQ(interpreter_->Invoke(), kTfLiteOk); #endif } else { @@ -343,8 +366,7 @@ REGISTER_CALCULATOR(TfLiteInferenceCalculator); // 3. Output processed tensors. if (gpu_output_) { -#if !defined(MEDIAPIPE_DISABLE_GPU) && !defined(__EMSCRIPTEN__) && \ - !defined(__APPLE__) +#if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) // Output result tensors (GPU). auto output_tensors = absl::make_unique>(); MP_RETURN_IF_ERROR(gpu_helper_.RunInGlContext( @@ -361,7 +383,7 @@ REGISTER_CALCULATOR(TfLiteInferenceCalculator); cc->Outputs() .Tag("TENSORS_GPU") .Add(output_tensors.release(), cc->InputTimestamp()); -#elif defined(__APPLE__) && !TARGET_OS_OSX // iOS +#elif defined(MEDIAPIPE_IOS) // Output result tensors (GPU). auto output_tensors = absl::make_unique>(); output_tensors->resize(gpu_data_out_.size()); @@ -406,25 +428,29 @@ REGISTER_CALCULATOR(TfLiteInferenceCalculator); ::mediapipe::Status TfLiteInferenceCalculator::Close(CalculatorContext* cc) { if (delegate_) { -#if !defined(MEDIAPIPE_DISABLE_GPU) && !defined(__EMSCRIPTEN__) && \ - !defined(__APPLE__) - MP_RETURN_IF_ERROR(gpu_helper_.RunInGlContext([this]() -> Status { - TfLiteGpuDelegateDelete(delegate_); + if (gpu_inference_) { +#if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) + MP_RETURN_IF_ERROR(gpu_helper_.RunInGlContext([this]() -> Status { + TfLiteGpuDelegateDelete(delegate_); + gpu_data_in_.reset(); + for (int i = 0; i < gpu_data_out_.size(); ++i) { + gpu_data_out_[i].reset(); + } + return ::mediapipe::OkStatus(); + })); +#elif defined(MEDIAPIPE_IOS) + TFLGpuDelegateDelete(delegate_); gpu_data_in_.reset(); for (int i = 0; i < gpu_data_out_.size(); ++i) { gpu_data_out_[i].reset(); } - return ::mediapipe::OkStatus(); - })); -#elif defined(__APPLE__) && !TARGET_OS_OSX // iOS - TFLGpuDelegateDelete(delegate_); - gpu_data_in_.reset(); - for (int i = 0; i < gpu_data_out_.size(); ++i) { - gpu_data_out_[i].reset(); - } #endif + } delegate_ = nullptr; } +#if defined(MEDIAPIPE_EDGE_TPU) + edgetpu_context_.reset(); +#endif return ::mediapipe::OkStatus(); } @@ -458,16 +484,18 @@ REGISTER_CALCULATOR(TfLiteInferenceCalculator); model_ = tflite::FlatBufferModel::BuildFromFile(model_path_.c_str()); RET_CHECK(model_); + tflite::ops::builtin::BuiltinOpResolver op_resolver; if (cc->InputSidePackets().HasTag("CUSTOM_OP_RESOLVER")) { - const auto& op_resolver = - cc->InputSidePackets() - .Tag("CUSTOM_OP_RESOLVER") - .Get(); - tflite::InterpreterBuilder(*model_, op_resolver)(&interpreter_); - } else { - const tflite::ops::builtin::BuiltinOpResolver op_resolver; - tflite::InterpreterBuilder(*model_, op_resolver)(&interpreter_); + op_resolver = cc->InputSidePackets() + .Tag("CUSTOM_OP_RESOLVER") + .Get(); } +#if defined(MEDIAPIPE_EDGE_TPU) + interpreter_ = + BuildEdgeTpuInterpreter(*model_, &op_resolver, edgetpu_context_.get()); +#else + tflite::InterpreterBuilder(*model_, op_resolver)(&interpreter_); +#endif // MEDIAPIPE_EDGE_TPU RET_CHECK(interpreter_); @@ -490,8 +518,22 @@ REGISTER_CALCULATOR(TfLiteInferenceCalculator); ::mediapipe::Status TfLiteInferenceCalculator::LoadDelegate( CalculatorContext* cc) { -#if !defined(MEDIAPIPE_DISABLE_GPU) && !defined(__EMSCRIPTEN__) && \ - !defined(__APPLE__) +#if defined(MEDIAPIPE_ANDROID) + if (!gpu_inference_) { + if (cc->Options() + .use_nnapi()) { + // Attempt to use NNAPI. + // If not supported, the default CPU delegate will be created and used. + interpreter_->SetAllowFp16PrecisionForFp32(1); + delegate_ = tflite::NnApiDelegate(); + RET_CHECK_EQ(interpreter_->ModifyGraphWithDelegate(delegate_), kTfLiteOk); + } + // Return, no need for GPU delegate below. + return ::mediapipe::OkStatus(); + } +#endif // ANDROID + +#if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) // Configure and create the delegate. TfLiteGpuDelegateOptions options = TfLiteGpuDelegateOptionsDefault(); options.compile_options.precision_loss_allowed = 1; @@ -551,7 +593,7 @@ REGISTER_CALCULATOR(TfLiteInferenceCalculator); RET_CHECK_EQ(interpreter_->ModifyGraphWithDelegate(delegate_), kTfLiteOk); #endif // OpenGL -#if defined(__APPLE__) && !TARGET_OS_OSX // iOS +#if defined(MEDIAPIPE_IOS) // Configure and create the delegate. TFLGpuDelegateOptions options; options.allow_precision_loss = false; // Must match converter, F=float/T=half diff --git a/mediapipe/calculators/tflite/tflite_inference_calculator.proto b/mediapipe/calculators/tflite/tflite_inference_calculator.proto index a2950add3..8a862f3df 100644 --- a/mediapipe/calculators/tflite/tflite_inference_calculator.proto +++ b/mediapipe/calculators/tflite/tflite_inference_calculator.proto @@ -45,4 +45,9 @@ message TfLiteInferenceCalculatorOptions { // input tensors are on CPU. For input tensors on GPU, GPU backend is always // used. optional bool use_gpu = 2 [default = false]; + + // Android only. When true, an NNAPI delegate will be used for inference. + // If NNAPI is not available, then the default CPU delegate will be used + // automatically. + optional bool use_nnapi = 3 [default = false]; } diff --git a/mediapipe/calculators/tflite/tflite_tensors_to_classification_calculator.cc b/mediapipe/calculators/tflite/tflite_tensors_to_classification_calculator.cc index 906b4242f..6e1c6e1e6 100644 --- a/mediapipe/calculators/tflite/tflite_tensors_to_classification_calculator.cc +++ b/mediapipe/calculators/tflite/tflite_tensors_to_classification_calculator.cc @@ -24,8 +24,7 @@ #include "mediapipe/framework/port/ret_check.h" #include "mediapipe/util/resource_util.h" #include "tensorflow/lite/interpreter.h" -#if defined(__EMSCRIPTEN__) || defined(__ANDROID__) || \ - (defined(__APPLE__) && !TARGET_OS_OSX) +#if defined(MEDIAPIPE_MOBILE) #include "mediapipe/util/android/file/base/file.h" #include "mediapipe/util/android/file/base/helpers.h" #else diff --git a/mediapipe/calculators/tflite/tflite_tensors_to_detections_calculator.cc b/mediapipe/calculators/tflite/tflite_tensors_to_detections_calculator.cc index bac852f44..371c7862e 100644 --- a/mediapipe/calculators/tflite/tflite_tensors_to_detections_calculator.cc +++ b/mediapipe/calculators/tflite/tflite_tensors_to_detections_calculator.cc @@ -27,8 +27,7 @@ #include "mediapipe/framework/port/ret_check.h" #include "tensorflow/lite/interpreter.h" -#if !defined(MEDIAPIPE_DISABLE_GPU) && !defined(__EMSCRIPTEN__) && \ - !defined(__APPLE__) +#if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) #include "mediapipe/gpu/gl_calculator_helper.h" #include "tensorflow/lite/delegates/gpu/gl/gl_buffer.h" #include "tensorflow/lite/delegates/gpu/gl/gl_program.h" @@ -36,7 +35,7 @@ #include "tensorflow/lite/delegates/gpu/gl_delegate.h" #endif // !MEDIAPIPE_DISABLE_GPU -#if defined(__APPLE__) && !TARGET_OS_OSX // iOS +#if defined(MEDIAPIPE_IOS) #import #import #import @@ -56,17 +55,15 @@ constexpr int kNumCoordsPerBox = 4; namespace mediapipe { -#if !defined(MEDIAPIPE_DISABLE_GPU) && !defined(__EMSCRIPTEN__) && \ - !defined(__APPLE__) +#if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) using ::tflite::gpu::gl::CreateReadWriteShaderStorageBuffer; using ::tflite::gpu::gl::GlShader; #endif -#if !defined(MEDIAPIPE_DISABLE_GPU) && !defined(__EMSCRIPTEN__) && \ - !defined(__APPLE__) +#if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) typedef ::tflite::gpu::gl::GlBuffer GpuTensor; typedef ::tflite::gpu::gl::GlProgram GpuProgram; -#elif defined(__APPLE__) && !TARGET_OS_OSX // iOS +#elif defined(MEDIAPIPE_IOS) typedef id GpuTensor; typedef id GpuProgram; #endif @@ -183,11 +180,10 @@ class TfLiteTensorsToDetectionsCalculator : public CalculatorBase { std::vector anchors_; bool side_packet_anchors_{}; -#if !defined(MEDIAPIPE_DISABLE_GPU) && !defined(__EMSCRIPTEN__) && \ - !defined(__APPLE__) +#if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) mediapipe::GlCalculatorHelper gpu_helper_; std::unique_ptr gpu_data_; -#elif defined(__APPLE__) && !TARGET_OS_OSX // iOS +#elif defined(MEDIAPIPE_IOS) MPPMetalHelper* gpu_helper_ = nullptr; std::unique_ptr gpu_data_; #endif @@ -226,10 +222,9 @@ REGISTER_CALCULATOR(TfLiteTensorsToDetectionsCalculator); } if (use_gpu) { -#if !defined(MEDIAPIPE_DISABLE_GPU) && !defined(__EMSCRIPTEN__) && \ - !defined(__APPLE__) +#if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) MP_RETURN_IF_ERROR(mediapipe::GlCalculatorHelper::UpdateContract(cc)); -#elif defined(__APPLE__) && !TARGET_OS_OSX // iOS +#elif defined(MEDIAPIPE_IOS) MP_RETURN_IF_ERROR([MPPMetalHelper updateContract:cc]); #endif } @@ -243,10 +238,9 @@ REGISTER_CALCULATOR(TfLiteTensorsToDetectionsCalculator); if (cc->Inputs().HasTag("TENSORS_GPU")) { gpu_input_ = true; -#if !defined(MEDIAPIPE_DISABLE_GPU) && !defined(__EMSCRIPTEN__) && \ - !defined(__APPLE__) +#if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) MP_RETURN_IF_ERROR(gpu_helper_.Open(cc)); -#elif defined(__APPLE__) && !TARGET_OS_OSX // iOS +#elif defined(MEDIAPIPE_IOS) gpu_helper_ = [[MPPMetalHelper alloc] initWithCalculatorContext:cc]; RET_CHECK(gpu_helper_); #endif @@ -406,8 +400,7 @@ REGISTER_CALCULATOR(TfLiteTensorsToDetectionsCalculator); } ::mediapipe::Status TfLiteTensorsToDetectionsCalculator::ProcessGPU( CalculatorContext* cc, std::vector* output_detections) { -#if !defined(MEDIAPIPE_DISABLE_GPU) && !defined(__EMSCRIPTEN__) && \ - !defined(__APPLE__) +#if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) const auto& input_tensors = cc->Inputs().Tag("TENSORS_GPU").Get>(); RET_CHECK_GE(input_tensors.size(), 2); @@ -470,7 +463,7 @@ REGISTER_CALCULATOR(TfLiteTensorsToDetectionsCalculator); return ::mediapipe::OkStatus(); })); -#elif defined(__APPLE__) && !TARGET_OS_OSX // iOS +#elif defined(MEDIAPIPE_IOS) const auto& input_tensors = cc->Inputs().Tag("TENSORS_GPU").Get>(); @@ -569,12 +562,11 @@ REGISTER_CALCULATOR(TfLiteTensorsToDetectionsCalculator); ::mediapipe::Status TfLiteTensorsToDetectionsCalculator::Close( CalculatorContext* cc) { -#if !defined(MEDIAPIPE_DISABLE_GPU) && !defined(__EMSCRIPTEN__) && \ - !defined(__APPLE__) +#if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) gpu_helper_.RunInGlContext([this] { gpu_data_.reset(); }); -#elif defined(__APPLE__) && !TARGET_OS_OSX // iOS +#elif defined(MEDIAPIPE_IOS) gpu_data_.reset(); -#endif // !MEDIAPIPE_DISABLE_GPU +#endif return ::mediapipe::OkStatus(); } @@ -723,8 +715,7 @@ Detection TfLiteTensorsToDetectionsCalculator::ConvertToDetection( ::mediapipe::Status TfLiteTensorsToDetectionsCalculator::GpuInit( CalculatorContext* cc) { -#if !defined(MEDIAPIPE_DISABLE_GPU) && !defined(__EMSCRIPTEN__) && \ - !defined(__APPLE__) +#if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) MP_RETURN_IF_ERROR(gpu_helper_.RunInGlContext([this]() -> ::mediapipe::Status { gpu_data_ = absl::make_unique(); @@ -937,8 +928,7 @@ void main() { return ::mediapipe::OkStatus(); })); -#elif defined(__APPLE__) && !TARGET_OS_OSX // iOS - // TODO consolidate Metal and OpenGL shaders via vulkan. +#elif defined(MEDIAPIPE_IOS) gpu_data_ = absl::make_unique(); id device = gpu_helper_.mtlDevice; @@ -1168,7 +1158,7 @@ kernel void scoreKernel( CHECK_LT(num_classes_, max_wg_size) << "# classes must be <" << max_wg_size; } -#endif // __ANDROID__ or iOS +#endif // !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) return ::mediapipe::OkStatus(); } diff --git a/mediapipe/calculators/tflite/tflite_tensors_to_landmarks_calculator.cc b/mediapipe/calculators/tflite/tflite_tensors_to_landmarks_calculator.cc index 996b1fa35..f6cffee40 100644 --- a/mediapipe/calculators/tflite/tflite_tensors_to_landmarks_calculator.cc +++ b/mediapipe/calculators/tflite/tflite_tensors_to_landmarks_calculator.cc @@ -76,11 +76,11 @@ REGISTER_CALCULATOR(TfLiteTensorsToLandmarksCalculator); } if (cc->Outputs().HasTag("LANDMARKS")) { - cc->Outputs().Tag("LANDMARKS").Set>(); + cc->Outputs().Tag("LANDMARKS").Set(); } if (cc->Outputs().HasTag("NORM_LANDMARKS")) { - cc->Outputs().Tag("NORM_LANDMARKS").Set>(); + cc->Outputs().Tag("NORM_LANDMARKS").Set(); } return ::mediapipe::OkStatus(); @@ -127,54 +127,55 @@ REGISTER_CALCULATOR(TfLiteTensorsToLandmarksCalculator); const float* raw_landmarks = raw_tensor->data.f; - auto output_landmarks = absl::make_unique>(); + LandmarkList output_landmarks; for (int ld = 0; ld < num_landmarks_; ++ld) { const int offset = ld * num_dimensions; - Landmark landmark; + Landmark* landmark = output_landmarks.add_landmark(); if (options_.flip_horizontally()) { - landmark.set_x(options_.input_image_width() - raw_landmarks[offset]); + landmark->set_x(options_.input_image_width() - raw_landmarks[offset]); } else { - landmark.set_x(raw_landmarks[offset]); + landmark->set_x(raw_landmarks[offset]); } if (num_dimensions > 1) { if (options_.flip_vertically()) { - landmark.set_y(options_.input_image_height() - - raw_landmarks[offset + 1]); + landmark->set_y(options_.input_image_height() - + raw_landmarks[offset + 1]); } else { - landmark.set_y(raw_landmarks[offset + 1]); + landmark->set_y(raw_landmarks[offset + 1]); } } if (num_dimensions > 2) { - landmark.set_z(raw_landmarks[offset + 2]); + landmark->set_z(raw_landmarks[offset + 2]); } - output_landmarks->push_back(landmark); } // Output normalized landmarks if required. if (cc->Outputs().HasTag("NORM_LANDMARKS")) { - auto output_norm_landmarks = - absl::make_unique>(); - for (const auto& landmark : *output_landmarks) { - NormalizedLandmark norm_landmark; - norm_landmark.set_x(static_cast(landmark.x()) / - options_.input_image_width()); - norm_landmark.set_y(static_cast(landmark.y()) / - options_.input_image_height()); - norm_landmark.set_z(landmark.z() / options_.normalize_z()); - - output_norm_landmarks->push_back(norm_landmark); + NormalizedLandmarkList output_norm_landmarks; + // for (const auto& landmark : output_landmarks) { + for (int i = 0; i < output_landmarks.landmark_size(); ++i) { + const Landmark& landmark = output_landmarks.landmark(i); + NormalizedLandmark* norm_landmark = output_norm_landmarks.add_landmark(); + norm_landmark->set_x(static_cast(landmark.x()) / + options_.input_image_width()); + norm_landmark->set_y(static_cast(landmark.y()) / + options_.input_image_height()); + norm_landmark->set_z(landmark.z() / options_.normalize_z()); } cc->Outputs() .Tag("NORM_LANDMARKS") - .Add(output_norm_landmarks.release(), cc->InputTimestamp()); + .AddPacket(MakePacket(output_norm_landmarks) + .At(cc->InputTimestamp())); } + // Output absolute landmarks. if (cc->Outputs().HasTag("LANDMARKS")) { cc->Outputs() .Tag("LANDMARKS") - .Add(output_landmarks.release(), cc->InputTimestamp()); + .AddPacket(MakePacket(output_landmarks) + .At(cc->InputTimestamp())); } return ::mediapipe::OkStatus(); diff --git a/mediapipe/calculators/tflite/tflite_tensors_to_segmentation_calculator.cc b/mediapipe/calculators/tflite/tflite_tensors_to_segmentation_calculator.cc index 55279308a..7fde03224 100644 --- a/mediapipe/calculators/tflite/tflite_tensors_to_segmentation_calculator.cc +++ b/mediapipe/calculators/tflite/tflite_tensors_to_segmentation_calculator.cc @@ -28,8 +28,7 @@ #include "mediapipe/util/resource_util.h" #include "tensorflow/lite/interpreter.h" -#if !defined(MEDIAPIPE_DISABLE_GPU) && !defined(__EMSCRIPTEN__) && \ - !defined(__APPLE__) +#if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) #include "mediapipe/gpu/gl_calculator_helper.h" #include "mediapipe/gpu/gl_simple_shaders.h" #include "mediapipe/gpu/shader_util.h" @@ -54,8 +53,7 @@ float Clamp(float val, float min, float max) { namespace mediapipe { -#if !defined(MEDIAPIPE_DISABLE_GPU) && !defined(__EMSCRIPTEN__) && \ - !defined(__APPLE__) +#if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) using ::tflite::gpu::gl::CopyBuffer; using ::tflite::gpu::gl::CreateReadWriteRgbaImageTexture; using ::tflite::gpu::gl::CreateReadWriteShaderStorageBuffer; @@ -131,8 +129,7 @@ class TfLiteTensorsToSegmentationCalculator : public CalculatorBase { int tensor_channels_ = 0; bool use_gpu_ = false; -#if !defined(MEDIAPIPE_DISABLE_GPU) && !defined(__EMSCRIPTEN__) && \ - !defined(__APPLE__) +#if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) mediapipe::GlCalculatorHelper gpu_helper_; std::unique_ptr mask_program_with_prev_; std::unique_ptr mask_program_no_prev_; @@ -162,8 +159,7 @@ REGISTER_CALCULATOR(TfLiteTensorsToSegmentationCalculator); } // Inputs GPU. -#if !defined(MEDIAPIPE_DISABLE_GPU) && !defined(__EMSCRIPTEN__) && \ - !defined(__APPLE__) +#if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) if (cc->Inputs().HasTag("TENSORS_GPU")) { cc->Inputs().Tag("TENSORS_GPU").Set>(); use_gpu |= true; @@ -182,8 +178,7 @@ REGISTER_CALCULATOR(TfLiteTensorsToSegmentationCalculator); if (cc->Outputs().HasTag("MASK")) { cc->Outputs().Tag("MASK").Set(); } -#if !defined(MEDIAPIPE_DISABLE_GPU) && !defined(__EMSCRIPTEN__) && \ - !defined(__APPLE__) +#if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) if (cc->Outputs().HasTag("MASK_GPU")) { cc->Outputs().Tag("MASK_GPU").Set(); use_gpu |= true; @@ -191,8 +186,7 @@ REGISTER_CALCULATOR(TfLiteTensorsToSegmentationCalculator); #endif // !MEDIAPIPE_DISABLE_GPU if (use_gpu) { -#if !defined(MEDIAPIPE_DISABLE_GPU) && !defined(__EMSCRIPTEN__) && \ - !defined(__APPLE__) +#if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) MP_RETURN_IF_ERROR(mediapipe::GlCalculatorHelper::UpdateContract(cc)); #endif // !MEDIAPIPE_DISABLE_GPU } @@ -205,8 +199,7 @@ REGISTER_CALCULATOR(TfLiteTensorsToSegmentationCalculator); if (cc->Inputs().HasTag("TENSORS_GPU")) { use_gpu_ = true; -#if !defined(MEDIAPIPE_DISABLE_GPU) && !defined(__EMSCRIPTEN__) && \ - !defined(__APPLE__) +#if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) MP_RETURN_IF_ERROR(gpu_helper_.Open(cc)); #endif // !MEDIAPIPE_DISABLE_GPU } @@ -214,8 +207,7 @@ REGISTER_CALCULATOR(TfLiteTensorsToSegmentationCalculator); MP_RETURN_IF_ERROR(LoadOptions(cc)); if (use_gpu_) { -#if !defined(MEDIAPIPE_DISABLE_GPU) && !defined(__EMSCRIPTEN__) && \ - !defined(__APPLE__) +#if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) MP_RETURN_IF_ERROR( gpu_helper_.RunInGlContext([this, cc]() -> ::mediapipe::Status { MP_RETURN_IF_ERROR(InitGpu(cc)); @@ -232,8 +224,7 @@ REGISTER_CALCULATOR(TfLiteTensorsToSegmentationCalculator); ::mediapipe::Status TfLiteTensorsToSegmentationCalculator::Process( CalculatorContext* cc) { if (use_gpu_) { -#if !defined(MEDIAPIPE_DISABLE_GPU) && !defined(__EMSCRIPTEN__) && \ - !defined(__APPLE__) +#if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) MP_RETURN_IF_ERROR( gpu_helper_.RunInGlContext([this, cc]() -> ::mediapipe::Status { MP_RETURN_IF_ERROR(ProcessGpu(cc)); @@ -249,8 +240,7 @@ REGISTER_CALCULATOR(TfLiteTensorsToSegmentationCalculator); ::mediapipe::Status TfLiteTensorsToSegmentationCalculator::Close( CalculatorContext* cc) { -#if !defined(MEDIAPIPE_DISABLE_GPU) && !defined(__EMSCRIPTEN__) && \ - !defined(__APPLE__) +#if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) gpu_helper_.RunInGlContext([this] { if (upsample_program_) glDeleteProgram(upsample_program_); upsample_program_ = 0; @@ -377,8 +367,7 @@ REGISTER_CALCULATOR(TfLiteTensorsToSegmentationCalculator); if (cc->Inputs().Tag("TENSORS_GPU").IsEmpty()) { return ::mediapipe::OkStatus(); } -#if !defined(MEDIAPIPE_DISABLE_GPU) && !defined(__EMSCRIPTEN__) && \ - !defined(__APPLE__) +#if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) // Get input streams. const auto& input_tensors = cc->Inputs().Tag("TENSORS_GPU").Get>(); @@ -464,8 +453,7 @@ REGISTER_CALCULATOR(TfLiteTensorsToSegmentationCalculator); } void TfLiteTensorsToSegmentationCalculator::GlRender() { -#if !defined(MEDIAPIPE_DISABLE_GPU) && !defined(__EMSCRIPTEN__) && \ - !defined(__APPLE__) +#if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) static const GLfloat square_vertices[] = { -1.0f, -1.0f, // bottom left 1.0f, -1.0f, // bottom right @@ -537,8 +525,7 @@ void TfLiteTensorsToSegmentationCalculator::GlRender() { ::mediapipe::Status TfLiteTensorsToSegmentationCalculator::InitGpu( CalculatorContext* cc) { -#if !defined(MEDIAPIPE_DISABLE_GPU) && !defined(__EMSCRIPTEN__) && \ - !defined(__APPLE__) +#if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) MP_RETURN_IF_ERROR(gpu_helper_.RunInGlContext([this]() -> ::mediapipe::Status { // A shader to process a segmentation tensor into an output mask, diff --git a/mediapipe/calculators/util/detection_label_id_to_text_calculator.cc b/mediapipe/calculators/util/detection_label_id_to_text_calculator.cc index 7e8beadf1..4ac09e5af 100644 --- a/mediapipe/calculators/util/detection_label_id_to_text_calculator.cc +++ b/mediapipe/calculators/util/detection_label_id_to_text_calculator.cc @@ -19,8 +19,7 @@ #include "mediapipe/framework/port/status.h" #include "mediapipe/util/resource_util.h" -#if defined(MEDIAPIPE_LITE) || defined(__EMSCRIPTEN__) || \ - defined(__ANDROID__) || (defined(__APPLE__) && !TARGET_OS_OSX) +#if defined(MEDIAPIPE_MOBILE) #include "mediapipe/util/android/file/base/file.h" #include "mediapipe/util/android/file/base/helpers.h" #else diff --git a/mediapipe/calculators/util/filter_collection_calculator.cc b/mediapipe/calculators/util/filter_collection_calculator.cc index f86de04f0..e110afe7d 100644 --- a/mediapipe/calculators/util/filter_collection_calculator.cc +++ b/mediapipe/calculators/util/filter_collection_calculator.cc @@ -27,8 +27,8 @@ typedef FilterCollectionCalculator> REGISTER_CALCULATOR(FilterNormalizedRectCollectionCalculator); typedef FilterCollectionCalculator< - std::vector>> - FilterLandmarksCollectionCalculator; -REGISTER_CALCULATOR(FilterLandmarksCollectionCalculator); + std::vector<::mediapipe::NormalizedLandmarkList>> + FilterLandmarkListCollectionCalculator; +REGISTER_CALCULATOR(FilterLandmarkListCollectionCalculator); } // namespace mediapipe diff --git a/mediapipe/calculators/util/labels_to_render_data_calculator.cc b/mediapipe/calculators/util/labels_to_render_data_calculator.cc index a7f517291..722f4480c 100644 --- a/mediapipe/calculators/util/labels_to_render_data_calculator.cc +++ b/mediapipe/calculators/util/labels_to_render_data_calculator.cc @@ -93,6 +93,7 @@ REGISTER_CALCULATOR(LabelsToRenderDataCalculator); } ::mediapipe::Status LabelsToRenderDataCalculator::Open(CalculatorContext* cc) { + cc->SetOffset(TimestampDiff(0)); options_ = cc->Options(); num_colors_ = options_.color_size(); label_height_px_ = std::ceil(options_.font_height_px() * kFontHeightScale); diff --git a/mediapipe/calculators/util/landmark_letterbox_removal_calculator.cc b/mediapipe/calculators/util/landmark_letterbox_removal_calculator.cc index fd22cf191..aca312c30 100644 --- a/mediapipe/calculators/util/landmark_letterbox_removal_calculator.cc +++ b/mediapipe/calculators/util/landmark_letterbox_removal_calculator.cc @@ -49,7 +49,7 @@ constexpr char kLetterboxPaddingTag[] = "LETTERBOX_PADDING"; // corresponding input image before letterboxing. // // Input: -// LANDMARKS: An std::vector representing landmarks on an +// LANDMARKS: A NormalizedLandmarkList representing landmarks on an // letterboxed image. // // LETTERBOX_PADDING: An std::array representing the letterbox @@ -57,7 +57,7 @@ constexpr char kLetterboxPaddingTag[] = "LETTERBOX_PADDING"; // image, normalized to [0.f, 1.f] by the letterboxed image dimensions. // // Output: -// LANDMARKS: An std::vector representing landmarks with +// LANDMARKS: An NormalizedLandmarkList proto representing landmarks with // their locations adjusted to the letterbox-removed (non-padded) image. // // Usage example: @@ -74,10 +74,10 @@ class LandmarkLetterboxRemovalCalculator : public CalculatorBase { cc->Inputs().HasTag(kLetterboxPaddingTag)) << "Missing one or more input streams."; - cc->Inputs().Tag(kLandmarksTag).Set>(); + cc->Inputs().Tag(kLandmarksTag).Set(); cc->Inputs().Tag(kLetterboxPaddingTag).Set>(); - cc->Outputs().Tag(kLandmarksTag).Set>(); + cc->Outputs().Tag(kLandmarksTag).Set(); return ::mediapipe::OkStatus(); } @@ -94,8 +94,8 @@ class LandmarkLetterboxRemovalCalculator : public CalculatorBase { return ::mediapipe::OkStatus(); } - const auto& input_landmarks = - cc->Inputs().Tag(kLandmarksTag).Get>(); + const NormalizedLandmarkList& input_landmarks = + cc->Inputs().Tag(kLandmarksTag).Get(); const auto& letterbox_padding = cc->Inputs().Tag(kLetterboxPaddingTag).Get>(); @@ -104,24 +104,23 @@ class LandmarkLetterboxRemovalCalculator : public CalculatorBase { const float left_and_right = letterbox_padding[0] + letterbox_padding[2]; const float top_and_bottom = letterbox_padding[1] + letterbox_padding[3]; - auto output_landmarks = - absl::make_unique>(); - for (const auto& landmark : input_landmarks) { - NormalizedLandmark new_landmark; + NormalizedLandmarkList output_landmarks; + for (int i = 0; i < input_landmarks.landmark_size(); ++i) { + const NormalizedLandmark& landmark = input_landmarks.landmark(i); + NormalizedLandmark* new_landmark = output_landmarks.add_landmark(); const float new_x = (landmark.x() - left) / (1.0f - left_and_right); const float new_y = (landmark.y() - top) / (1.0f - top_and_bottom); - new_landmark.set_x(new_x); - new_landmark.set_y(new_y); + new_landmark->set_x(new_x); + new_landmark->set_y(new_y); // Keep z-coord as is. - new_landmark.set_z(landmark.z()); - - output_landmarks->emplace_back(new_landmark); + new_landmark->set_z(landmark.z()); } cc->Outputs() .Tag(kLandmarksTag) - .Add(output_landmarks.release(), cc->InputTimestamp()); + .AddPacket(MakePacket(output_landmarks) + .At(cc->InputTimestamp())); return ::mediapipe::OkStatus(); } }; diff --git a/mediapipe/calculators/util/landmark_letterbox_removal_calculator_test.cc b/mediapipe/calculators/util/landmark_letterbox_removal_calculator_test.cc index 33724890e..7723c0d89 100644 --- a/mediapipe/calculators/util/landmark_letterbox_removal_calculator_test.cc +++ b/mediapipe/calculators/util/landmark_letterbox_removal_calculator_test.cc @@ -43,10 +43,10 @@ CalculatorGraphConfig::Node GetDefaultNode() { TEST(LandmarkLetterboxRemovalCalculatorTest, PaddingLeftRight) { CalculatorRunner runner(GetDefaultNode()); - auto landmarks = absl::make_unique>(); - landmarks->push_back(CreateLandmark(0.5f, 0.5f)); - landmarks->push_back(CreateLandmark(0.2f, 0.2f)); - landmarks->push_back(CreateLandmark(0.7f, 0.7f)); + auto landmarks = absl::make_unique(); + *landmarks->add_landmark() = CreateLandmark(0.5f, 0.5f); + *landmarks->add_landmark() = CreateLandmark(0.2f, 0.2f); + *landmarks->add_landmark() = CreateLandmark(0.7f, 0.7f); runner.MutableInputs() ->Tag("LANDMARKS") .packets.push_back( @@ -61,26 +61,28 @@ TEST(LandmarkLetterboxRemovalCalculatorTest, PaddingLeftRight) { MP_ASSERT_OK(runner.Run()) << "Calculator execution failed."; const std::vector& output = runner.Outputs().Tag("LANDMARKS").packets; ASSERT_EQ(1, output.size()); - const auto& output_landmarks = - output[0].Get>(); + const auto& output_landmarks = output[0].Get(); - EXPECT_EQ(output_landmarks.size(), 3); + EXPECT_EQ(output_landmarks.landmark_size(), 3); - EXPECT_THAT(output_landmarks[0].x(), testing::FloatNear(0.6f, 1e-5)); - EXPECT_THAT(output_landmarks[0].y(), testing::FloatNear(0.5f, 1e-5)); - EXPECT_THAT(output_landmarks[1].x(), testing::FloatNear(0.0f, 1e-5)); - EXPECT_THAT(output_landmarks[1].y(), testing::FloatNear(0.2f, 1e-5)); - EXPECT_THAT(output_landmarks[2].x(), testing::FloatNear(1.0f, 1e-5)); - EXPECT_THAT(output_landmarks[2].y(), testing::FloatNear(0.7f, 1e-5)); + EXPECT_THAT(output_landmarks.landmark(0).x(), testing::FloatNear(0.6f, 1e-5)); + EXPECT_THAT(output_landmarks.landmark(0).y(), testing::FloatNear(0.5f, 1e-5)); + EXPECT_THAT(output_landmarks.landmark(1).x(), testing::FloatNear(0.0f, 1e-5)); + EXPECT_THAT(output_landmarks.landmark(1).y(), testing::FloatNear(0.2f, 1e-5)); + EXPECT_THAT(output_landmarks.landmark(2).x(), testing::FloatNear(1.0f, 1e-5)); + EXPECT_THAT(output_landmarks.landmark(2).y(), testing::FloatNear(0.7f, 1e-5)); } TEST(LandmarkLetterboxRemovalCalculatorTest, PaddingTopBottom) { CalculatorRunner runner(GetDefaultNode()); - auto landmarks = absl::make_unique>(); - landmarks->push_back(CreateLandmark(0.5f, 0.5f)); - landmarks->push_back(CreateLandmark(0.2f, 0.2f)); - landmarks->push_back(CreateLandmark(0.7f, 0.7f)); + auto landmarks = absl::make_unique(); + NormalizedLandmark* landmark = landmarks->add_landmark(); + *landmark = CreateLandmark(0.5f, 0.5f); + landmark = landmarks->add_landmark(); + *landmark = CreateLandmark(0.2f, 0.2f); + landmark = landmarks->add_landmark(); + *landmark = CreateLandmark(0.7f, 0.7f); runner.MutableInputs() ->Tag("LANDMARKS") .packets.push_back( @@ -95,17 +97,16 @@ TEST(LandmarkLetterboxRemovalCalculatorTest, PaddingTopBottom) { MP_ASSERT_OK(runner.Run()) << "Calculator execution failed."; const std::vector& output = runner.Outputs().Tag("LANDMARKS").packets; ASSERT_EQ(1, output.size()); - const auto& output_landmarks = - output[0].Get>(); + const auto& output_landmarks = output[0].Get(); - EXPECT_EQ(output_landmarks.size(), 3); + EXPECT_EQ(output_landmarks.landmark_size(), 3); - EXPECT_THAT(output_landmarks[0].x(), testing::FloatNear(0.5f, 1e-5)); - EXPECT_THAT(output_landmarks[0].y(), testing::FloatNear(0.6f, 1e-5)); - EXPECT_THAT(output_landmarks[1].x(), testing::FloatNear(0.2f, 1e-5)); - EXPECT_THAT(output_landmarks[1].y(), testing::FloatNear(0.0f, 1e-5)); - EXPECT_THAT(output_landmarks[2].x(), testing::FloatNear(0.7f, 1e-5)); - EXPECT_THAT(output_landmarks[2].y(), testing::FloatNear(1.0f, 1e-5)); + EXPECT_THAT(output_landmarks.landmark(0).x(), testing::FloatNear(0.5f, 1e-5)); + EXPECT_THAT(output_landmarks.landmark(0).y(), testing::FloatNear(0.6f, 1e-5)); + EXPECT_THAT(output_landmarks.landmark(1).x(), testing::FloatNear(0.2f, 1e-5)); + EXPECT_THAT(output_landmarks.landmark(1).y(), testing::FloatNear(0.0f, 1e-5)); + EXPECT_THAT(output_landmarks.landmark(2).x(), testing::FloatNear(0.7f, 1e-5)); + EXPECT_THAT(output_landmarks.landmark(2).y(), testing::FloatNear(1.0f, 1e-5)); } } // namespace mediapipe diff --git a/mediapipe/calculators/util/landmark_projection_calculator.cc b/mediapipe/calculators/util/landmark_projection_calculator.cc index 39ac61f2e..9c868fd50 100644 --- a/mediapipe/calculators/util/landmark_projection_calculator.cc +++ b/mediapipe/calculators/util/landmark_projection_calculator.cc @@ -47,13 +47,13 @@ constexpr char kRectTag[] = "NORM_RECT"; // Projects normalized landmarks in a rectangle to its original coordinates. The // rectangle must also be in normalized coordinates. // Input: -// NORM_LANDMARKS: An std::vector representing landmarks +// NORM_LANDMARKS: A NormalizedLandmarkList representing landmarks // in a normalized rectangle. // NORM_RECT: An NormalizedRect representing a normalized rectangle in image // coordinates. // // Output: -// NORM_LANDMARKS: An std::vector representing landmarks +// NORM_LANDMARKS: A NormalizedLandmarkList representing landmarks // with their locations adjusted to the image. // // Usage example: @@ -70,10 +70,10 @@ class LandmarkProjectionCalculator : public CalculatorBase { cc->Inputs().HasTag(kRectTag)) << "Missing one or more input streams."; - cc->Inputs().Tag(kLandmarksTag).Set>(); + cc->Inputs().Tag(kLandmarksTag).Set(); cc->Inputs().Tag(kRectTag).Set(); - cc->Outputs().Tag(kLandmarksTag).Set>(); + cc->Outputs().Tag(kLandmarksTag).Set(); return ::mediapipe::OkStatus(); } @@ -92,14 +92,14 @@ class LandmarkProjectionCalculator : public CalculatorBase { return ::mediapipe::OkStatus(); } - const auto& input_landmarks = - cc->Inputs().Tag(kLandmarksTag).Get>(); + const NormalizedLandmarkList& input_landmarks = + cc->Inputs().Tag(kLandmarksTag).Get(); const auto& input_rect = cc->Inputs().Tag(kRectTag).Get(); - auto output_landmarks = - absl::make_unique>(); - for (const auto& landmark : input_landmarks) { - NormalizedLandmark new_landmark; + NormalizedLandmarkList output_landmarks; + for (int i = 0; i < input_landmarks.landmark_size(); ++i) { + const NormalizedLandmark& landmark = input_landmarks.landmark(i); + NormalizedLandmark* new_landmark = output_landmarks.add_landmark(); const float x = landmark.x() - 0.5f; const float y = landmark.y() - 0.5f; @@ -110,17 +110,16 @@ class LandmarkProjectionCalculator : public CalculatorBase { new_x = new_x * input_rect.width() + input_rect.x_center(); new_y = new_y * input_rect.height() + input_rect.y_center(); - new_landmark.set_x(new_x); - new_landmark.set_y(new_y); + new_landmark->set_x(new_x); + new_landmark->set_y(new_y); // Keep z-coord as is. - new_landmark.set_z(landmark.z()); - - output_landmarks->emplace_back(new_landmark); + new_landmark->set_z(landmark.z()); } cc->Outputs() .Tag(kLandmarksTag) - .Add(output_landmarks.release(), cc->InputTimestamp()); + .AddPacket(MakePacket(output_landmarks) + .At(cc->InputTimestamp())); return ::mediapipe::OkStatus(); } }; diff --git a/mediapipe/calculators/util/landmarks_to_detection_calculator.cc b/mediapipe/calculators/util/landmarks_to_detection_calculator.cc index ca71ac377..5f429cabf 100644 --- a/mediapipe/calculators/util/landmarks_to_detection_calculator.cc +++ b/mediapipe/calculators/util/landmarks_to_detection_calculator.cc @@ -28,8 +28,7 @@ namespace { constexpr char kDetectionTag[] = "DETECTION"; constexpr char kNormalizedLandmarksTag[] = "NORM_LANDMARKS"; -Detection ConvertLandmarksToDetection( - const std::vector& landmarks) { +Detection ConvertLandmarksToDetection(const NormalizedLandmarkList& landmarks) { Detection detection; LocationData* location_data = detection.mutable_location_data(); @@ -37,7 +36,8 @@ Detection ConvertLandmarksToDetection( float x_max = std::numeric_limits::min(); float y_min = std::numeric_limits::max(); float y_max = std::numeric_limits::min(); - for (const auto& landmark : landmarks) { + for (int i = 0; i < landmarks.landmark_size(); ++i) { + const NormalizedLandmark& landmark = landmarks.landmark(i); x_min = std::min(x_min, landmark.x()); x_max = std::max(x_max, landmark.x()); y_min = std::min(y_min, landmark.y()); @@ -67,7 +67,7 @@ Detection ConvertLandmarksToDetection( // to specify a subset of landmarks for creating the detection. // // Input: -// NOMR_LANDMARKS: A vector of NormalizedLandmark. +// NOMR_LANDMARKS: A NormalizedLandmarkList proto. // // Output: // DETECTION: A Detection proto. @@ -95,9 +95,7 @@ REGISTER_CALCULATOR(LandmarksToDetectionCalculator); RET_CHECK(cc->Inputs().HasTag(kNormalizedLandmarksTag)); RET_CHECK(cc->Outputs().HasTag(kDetectionTag)); // TODO: Also support converting Landmark to Detection. - cc->Inputs() - .Tag(kNormalizedLandmarksTag) - .Set>(); + cc->Inputs().Tag(kNormalizedLandmarksTag).Set(); cc->Outputs().Tag(kDetectionTag).Set(); return ::mediapipe::OkStatus(); @@ -113,19 +111,20 @@ REGISTER_CALCULATOR(LandmarksToDetectionCalculator); ::mediapipe::Status LandmarksToDetectionCalculator::Process( CalculatorContext* cc) { - const auto& landmarks = cc->Inputs() - .Tag(kNormalizedLandmarksTag) - .Get>(); - RET_CHECK_GT(landmarks.size(), 0) << "Input landmark vector is empty."; + const auto& landmarks = + cc->Inputs().Tag(kNormalizedLandmarksTag).Get(); + RET_CHECK_GT(landmarks.landmark_size(), 0) + << "Input landmark vector is empty."; auto detection = absl::make_unique(); if (options_.selected_landmark_indices_size()) { - std::vector subset_landmarks( - options_.selected_landmark_indices_size()); - for (int i = 0; i < subset_landmarks.size(); ++i) { - RET_CHECK_LT(options_.selected_landmark_indices(i), landmarks.size()) + NormalizedLandmarkList subset_landmarks; + for (int i = 0; i < options_.selected_landmark_indices_size(); ++i) { + RET_CHECK_LT(options_.selected_landmark_indices(i), + landmarks.landmark_size()) << "Index of landmark subset is out of range."; - subset_landmarks[i] = landmarks[options_.selected_landmark_indices(i)]; + *subset_landmarks.add_landmark() = + landmarks.landmark(options_.selected_landmark_indices(i)); } *detection = ConvertLandmarksToDetection(subset_landmarks); } else { diff --git a/mediapipe/calculators/util/landmarks_to_floats_calculator.cc b/mediapipe/calculators/util/landmarks_to_floats_calculator.cc index 09ab4b575..b86542dd5 100644 --- a/mediapipe/calculators/util/landmarks_to_floats_calculator.cc +++ b/mediapipe/calculators/util/landmarks_to_floats_calculator.cc @@ -48,7 +48,7 @@ constexpr char kMatrixTag[] = "MATRIX"; // Converts a vector of landmarks to a vector of floats or a matrix. // Input: -// NORM_LANDMARKS: An std::vector. +// NORM_LANDMARKS: A NormalizedLandmarkList proto. // // Output: // FLOATS(optional): A vector of floats from flattened landmarks. @@ -63,7 +63,7 @@ constexpr char kMatrixTag[] = "MATRIX"; class LandmarksToFloatsCalculator : public CalculatorBase { public: static ::mediapipe::Status GetContract(CalculatorContract* cc) { - cc->Inputs().Tag(kLandmarksTag).Set>(); + cc->Inputs().Tag(kLandmarksTag).Set(); RET_CHECK(cc->Outputs().HasTag(kFloatsTag) || cc->Outputs().HasTag(kMatrixTag)); if (cc->Outputs().HasTag(kFloatsTag)) { @@ -94,11 +94,12 @@ class LandmarksToFloatsCalculator : public CalculatorBase { } const auto& input_landmarks = - cc->Inputs().Tag(kLandmarksTag).Get>(); + cc->Inputs().Tag(kLandmarksTag).Get(); if (cc->Outputs().HasTag(kFloatsTag)) { auto output_floats = absl::make_unique>(); - for (const auto& landmark : input_landmarks) { + for (int i = 0; i < input_landmarks.landmark_size(); ++i) { + const NormalizedLandmark& landmark = input_landmarks.landmark(i); output_floats->emplace_back(landmark.x()); if (num_dimensions_ > 1) { output_floats->emplace_back(landmark.y()); @@ -113,14 +114,14 @@ class LandmarksToFloatsCalculator : public CalculatorBase { .Add(output_floats.release(), cc->InputTimestamp()); } else { auto output_matrix = absl::make_unique(); - output_matrix->setZero(num_dimensions_, input_landmarks.size()); - for (int i = 0; i < input_landmarks.size(); ++i) { - (*output_matrix)(0, i) = input_landmarks[i].x(); + output_matrix->setZero(num_dimensions_, input_landmarks.landmark_size()); + for (int i = 0; i < input_landmarks.landmark_size(); ++i) { + (*output_matrix)(0, i) = input_landmarks.landmark(i).x(); if (num_dimensions_ > 1) { - (*output_matrix)(1, i) = input_landmarks[i].y(); + (*output_matrix)(1, i) = input_landmarks.landmark(i).y(); } if (num_dimensions_ > 2) { - (*output_matrix)(2, i) = input_landmarks[i].z(); + (*output_matrix)(2, i) = input_landmarks.landmark(i).z(); } } cc->Outputs() diff --git a/mediapipe/calculators/util/landmarks_to_render_data_calculator.cc b/mediapipe/calculators/util/landmarks_to_render_data_calculator.cc index 25ffb67ef..c2b318a3d 100644 --- a/mediapipe/calculators/util/landmarks_to_render_data_calculator.cc +++ b/mediapipe/calculators/util/landmarks_to_render_data_calculator.cc @@ -46,12 +46,13 @@ inline float Remap(float x, float lo, float hi, float scale) { return (x - lo) / (hi - lo + 1e-6) * scale; } -template -inline void GetMinMaxZ(const std::vector& landmarks, float* z_min, +template +inline void GetMinMaxZ(const LandmarkListType& landmarks, float* z_min, float* z_max) { *z_min = std::numeric_limits::max(); *z_max = std::numeric_limits::min(); - for (const auto& landmark : landmarks) { + for (int i = 0; i < landmarks.landmark_size(); ++i) { + const LandmarkType& landmark = landmarks.landmark(i); *z_min = std::min(landmark.z(), *z_min); *z_max = std::max(landmark.z(), *z_max); } @@ -73,7 +74,7 @@ void SetColorSizeValueFromZ(float z, float z_min, float z_max, } // namespace // A calculator that converts Landmark proto to RenderData proto for -// visualization. The input should be std::vector. It is also possible +// visualization. The input should be LandmarkList proto. It is also possible // to specify the connections between landmarks. // // Example config: @@ -121,11 +122,11 @@ class LandmarksToRenderDataCalculator : public CalculatorBase { const LandmarksToRenderDataCalculatorOptions& options, bool normalized, int gray_val1, int gray_val2, RenderData* render_data); - template - void AddConnections(const std::vector& landmarks, - bool normalized, RenderData* render_data); - template - void AddConnectionsWithDepth(const std::vector& landmarks, + template + void AddConnections(const LandmarkListType& landmarks, bool normalized, + RenderData* render_data); + template + void AddConnectionsWithDepth(const LandmarkListType& landmarks, bool normalized, float min_z, float max_z, RenderData* render_data); @@ -144,10 +145,10 @@ REGISTER_CALCULATOR(LandmarksToRenderDataCalculator); "normalized landmarks."; if (cc->Inputs().HasTag(kLandmarksTag)) { - cc->Inputs().Tag(kLandmarksTag).Set>(); + cc->Inputs().Tag(kLandmarksTag).Set(); } if (cc->Inputs().HasTag(kNormLandmarksTag)) { - cc->Inputs().Tag(kNormLandmarksTag).Set>(); + cc->Inputs().Tag(kNormLandmarksTag).Set(); } cc->Outputs().Tag(kRenderDataTag).Set(); return ::mediapipe::OkStatus(); @@ -169,16 +170,17 @@ REGISTER_CALCULATOR(LandmarksToRenderDataCalculator); float z_max = 0.f; if (cc->Inputs().HasTag(kLandmarksTag)) { - const auto& landmarks = - cc->Inputs().Tag(kLandmarksTag).Get>(); + const LandmarkList& landmarks = + cc->Inputs().Tag(kLandmarksTag).Get(); RET_CHECK_EQ(options_.landmark_connections_size() % 2, 0) << "Number of entries in landmark connections must be a multiple of 2"; if (visualize_depth) { - GetMinMaxZ(landmarks, &z_min, &z_max); + GetMinMaxZ(landmarks, &z_min, &z_max); } // Only change rendering if there are actually z values other than 0. visualize_depth &= ((z_max - z_min) > 1e-3); - for (const auto& landmark : landmarks) { + for (int i = 0; i < landmarks.landmark_size(); ++i) { + const Landmark& landmark = landmarks.landmark(i); auto* landmark_data_render = AddPointRenderData(options_, render_data.get()); if (visualize_depth) { @@ -191,25 +193,27 @@ REGISTER_CALCULATOR(LandmarksToRenderDataCalculator); landmark_data->set_y(landmark.y()); } if (visualize_depth) { - AddConnectionsWithDepth(landmarks, /*normalized=*/false, z_min, z_max, - render_data.get()); + AddConnectionsWithDepth(landmarks, /*normalized=*/false, + z_min, z_max, render_data.get()); } else { - AddConnections(landmarks, /*normalized=*/false, render_data.get()); + AddConnections(landmarks, /*normalized=*/false, + render_data.get()); } } if (cc->Inputs().HasTag(kNormLandmarksTag)) { - const auto& landmarks = cc->Inputs() - .Tag(kNormLandmarksTag) - .Get>(); + const NormalizedLandmarkList& landmarks = + cc->Inputs().Tag(kNormLandmarksTag).Get(); RET_CHECK_EQ(options_.landmark_connections_size() % 2, 0) << "Number of entries in landmark connections must be a multiple of 2"; if (visualize_depth) { - GetMinMaxZ(landmarks, &z_min, &z_max); + GetMinMaxZ(landmarks, &z_min, + &z_max); } // Only change rendering if there are actually z values other than 0. visualize_depth &= ((z_max - z_min) > 1e-3); - for (const auto& landmark : landmarks) { + for (int i = 0; i < landmarks.landmark_size(); ++i) { + const NormalizedLandmark& landmark = landmarks.landmark(i); auto* landmark_data_render = AddPointRenderData(options_, render_data.get()); if (visualize_depth) { @@ -222,10 +226,11 @@ REGISTER_CALCULATOR(LandmarksToRenderDataCalculator); landmark_data->set_y(landmark.y()); } if (visualize_depth) { - AddConnectionsWithDepth(landmarks, /*normalized=*/true, z_min, z_max, - render_data.get()); + AddConnectionsWithDepth( + landmarks, /*normalized=*/true, z_min, z_max, render_data.get()); } else { - AddConnections(landmarks, /*normalized=*/true, render_data.get()); + AddConnections(landmarks, /*normalized=*/true, + render_data.get()); } } @@ -235,13 +240,13 @@ REGISTER_CALCULATOR(LandmarksToRenderDataCalculator); return ::mediapipe::OkStatus(); } -template +template void LandmarksToRenderDataCalculator::AddConnectionsWithDepth( - const std::vector& landmarks, bool normalized, float min_z, + const LandmarkListType& landmarks, bool normalized, float min_z, float max_z, RenderData* render_data) { for (int i = 0; i < options_.landmark_connections_size(); i += 2) { - const auto& ld0 = landmarks[options_.landmark_connections(i)]; - const auto& ld1 = landmarks[options_.landmark_connections(i + 1)]; + const auto& ld0 = landmarks.landmark(options_.landmark_connections(i)); + const auto& ld1 = landmarks.landmark(options_.landmark_connections(i + 1)); const int gray_val1 = 255 - static_cast(Remap(ld0.z(), min_z, max_z, 255)); const int gray_val2 = @@ -272,13 +277,13 @@ void LandmarksToRenderDataCalculator::AddConnectionToRenderData( connection_annotation->set_thickness(options.thickness()); } -template +template void LandmarksToRenderDataCalculator::AddConnections( - const std::vector& landmarks, bool normalized, + const LandmarkListType& landmarks, bool normalized, RenderData* render_data) { for (int i = 0; i < options_.landmark_connections_size(); i += 2) { - const auto& ld0 = landmarks[options_.landmark_connections(i)]; - const auto& ld1 = landmarks[options_.landmark_connections(i + 1)]; + const auto& ld0 = landmarks.landmark(options_.landmark_connections(i)); + const auto& ld1 = landmarks.landmark(options_.landmark_connections(i + 1)); AddConnectionToRenderData(ld0.x(), ld0.y(), ld1.x(), ld1.y(), options_, normalized, render_data); } diff --git a/mediapipe/calculators/util/top_k_scores_calculator.cc b/mediapipe/calculators/util/top_k_scores_calculator.cc index bc8d30f87..8465c2391 100644 --- a/mediapipe/calculators/util/top_k_scores_calculator.cc +++ b/mediapipe/calculators/util/top_k_scores_calculator.cc @@ -29,8 +29,7 @@ #include "mediapipe/framework/port/statusor.h" #include "mediapipe/util/resource_util.h" -#if defined(MEDIAPIPE_LITE) || defined(__EMSCRIPTEN__) || \ - defined(__ANDROID__) || (defined(__APPLE__) && !TARGET_OS_OSX) +#if defined(MEDIAPIPE_MOBILE) #include "mediapipe/util/android/file/base/file.h" #include "mediapipe/util/android/file/base/helpers.h" #else diff --git a/mediapipe/calculators/video/opencv_video_decoder_calculator.cc b/mediapipe/calculators/video/opencv_video_decoder_calculator.cc index 49c7f0556..a5016d3dd 100644 --- a/mediapipe/calculators/video/opencv_video_decoder_calculator.cc +++ b/mediapipe/calculators/video/opencv_video_decoder_calculator.cc @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +#include + #include "mediapipe/framework/calculator_framework.h" #include "mediapipe/framework/formats/image_format.pb.h" #include "mediapipe/framework/formats/image_frame.h" @@ -66,6 +68,20 @@ ImageFormat::Format GetImageFormat(int num_channels) { // output_stream: "VIDEO:video_frames" // output_stream: "VIDEO_PRESTREAM:video_header" // } +// +// OpenCV's VideoCapture doesn't decode audio tracks. If the audio tracks need +// to be saved, specify an output side packet with tag "SAVED_AUDIO_PATH". +// The calculator will call FFmpeg binary to save audio tracks as an aac file. +// +// Example config: +// node { +// calculator: "OpenCvVideoDecoderCalculator" +// input_side_packet: "INPUT_FILE_PATH:input_file_path" +// output_side_packet: "SAVED_AUDIO_PATH:audio_path" +// output_stream: "VIDEO:video_frames" +// output_stream: "VIDEO_PRESTREAM:video_header" +// } +// class OpenCvVideoDecoderCalculator : public CalculatorBase { public: static ::mediapipe::Status GetContract(CalculatorContract* cc) { @@ -74,6 +90,9 @@ class OpenCvVideoDecoderCalculator : public CalculatorBase { if (cc->Outputs().HasTag("VIDEO_PRESTREAM")) { cc->Outputs().Tag("VIDEO_PRESTREAM").Set(); } + if (cc->OutputSidePackets().HasTag("SAVED_AUDIO_PATH")) { + cc->OutputSidePackets().Tag("SAVED_AUDIO_PATH").Set(); + } return ::mediapipe::OkStatus(); } @@ -127,6 +146,25 @@ class OpenCvVideoDecoderCalculator : public CalculatorBase { } // Rewind to the very first frame. cap_->set(cv::CAP_PROP_POS_AVI_RATIO, 0); + + if (cc->OutputSidePackets().HasTag("SAVED_AUDIO_PATH")) { +#ifdef HAVE_FFMPEG + std::string saved_audio_path = std::tmpnam(nullptr); + system(absl::StrCat("ffmpeg -nostats -loglevel 0 -i ", input_file_path, + " -vn -f adts ", saved_audio_path) + .c_str()); + cc->OutputSidePackets() + .Tag("SAVED_AUDIO_PATH") + .Set(MakePacket(saved_audio_path)); + +#else + return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + << "OpenCVVideoDecoderCalculator can't save the audio file " + "because FFmpeg is not installed. Please remove " + "output_side_packet: \"SAVED_AUDIO_PATH\" from the node " + "config."; +#endif + } return ::mediapipe::OkStatus(); } diff --git a/mediapipe/calculators/video/opencv_video_encoder_calculator.cc b/mediapipe/calculators/video/opencv_video_encoder_calculator.cc index 6ac11d933..3bf29be42 100644 --- a/mediapipe/calculators/video/opencv_video_encoder_calculator.cc +++ b/mediapipe/calculators/video/opencv_video_encoder_calculator.cc @@ -12,6 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. +#include + #include #include #include @@ -39,8 +41,7 @@ namespace mediapipe { // packet. Currently, the calculator only supports one video stream (in // mediapipe::ImageFrame). // -// Example config to generate the output video file: -// +// Example config: // node { // calculator: "OpenCvVideoEncoderCalculator" // input_stream: "VIDEO:video" @@ -53,6 +54,26 @@ namespace mediapipe { // } // } // } +// +// OpenCV's VideoWriter doesn't encode audio. If an input side packet with tag +// "AUDIO_FILE_PATH" is specified, the calculator will call FFmpeg binary to +// attach the audio file to the video as the last step in Close(). +// +// Example config: +// node { +// calculator: "OpenCvVideoEncoderCalculator" +// input_stream: "VIDEO:video" +// input_stream: "VIDEO_PRESTREAM:video_header" +// input_side_packet: "OUTPUT_FILE_PATH:output_file_path" +// input_side_packet: "AUDIO_FILE_PATH:audio_path" +// node_options { +// [type.googleapis.com/mediapipe.OpenCvVideoEncoderCalculatorOptions]: { +// codec: "avc1" +// video_format: "mp4" +// } +// } +// } +// class OpenCvVideoEncoderCalculator : public CalculatorBase { public: static ::mediapipe::Status GetContract(CalculatorContract* cc); @@ -77,6 +98,9 @@ class OpenCvVideoEncoderCalculator : public CalculatorBase { } RET_CHECK(cc->InputSidePackets().HasTag("OUTPUT_FILE_PATH")); cc->InputSidePackets().Tag("OUTPUT_FILE_PATH").Set(); + if (cc->InputSidePackets().HasTag("AUDIO_FILE_PATH")) { + cc->InputSidePackets().Tag("AUDIO_FILE_PATH").Set(); + } return ::mediapipe::OkStatus(); } @@ -155,6 +179,27 @@ class OpenCvVideoEncoderCalculator : public CalculatorBase { if (writer_ && writer_->isOpened()) { writer_->release(); } + if (cc->InputSidePackets().HasTag("AUDIO_FILE_PATH")) { +#ifdef HAVE_FFMPEG + const std::string& audio_file_path = + cc->InputSidePackets().Tag("AUDIO_FILE_PATH").Get(); + // A temp output file is needed because FFmpeg can't do in-place editing. + const std::string temp_file_path = std::tmpnam(nullptr); + system(absl::StrCat("mv ", output_file_path_, " ", temp_file_path, + "&& ffmpeg -nostats -loglevel 0 -i ", temp_file_path, + " -i ", audio_file_path, + " -c copy -map 0:v:0 -map 1:a:0 ", output_file_path_, + "&& rm ", temp_file_path) + .c_str()); + +#else + return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC) + << "OpenCVVideoEncoderCalculator can't attach the audio tracks to " + "the video because FFmpeg is not installed. Please remove " + "input_side_packet: \"AUDIO_FILE_PATH\" from the node " + "config."; +#endif + } return ::mediapipe::OkStatus(); } diff --git a/mediapipe/docs/android_archive_library.md b/mediapipe/docs/android_archive_library.md index 5351e58f9..0b6ccdf2b 100644 --- a/mediapipe/docs/android_archive_library.md +++ b/mediapipe/docs/android_archive_library.md @@ -92,7 +92,7 @@ project. MediaPipe depends on OpenCV, you will need to copy the precompiled OpenCV so files into app/src/main/jniLibs. You can download the official OpenCV Android SDK from - [here](https://github.com/opencv/opencv/releases/download/4.1.0/opencv-4.1.0-android-sdk.zip) + [here](https://github.com/opencv/opencv/releases/download/3.4.3/opencv-3.4.3-android-sdk.zip) and run: ```bash @@ -126,6 +126,6 @@ project. ``` 6. Follow our Android app examples to use MediaPipe in Android Studio for your - use case. If you are looking for an example, a working face detection + use case. If you are looking for an example, a face detection example can be found - [here](https://github.com/jiuqiant/mediapipe_aar_example). + [here](https://github.com/jiuqiant/mediapipe_face_detection_aar_example) and a multi-hand tracking example can be found [here](https://github.com/jiuqiant/mediapipe_multi_hands_tracking_aar_example). diff --git a/mediapipe/docs/examples.md b/mediapipe/docs/examples.md index 4e279e935..846f4f7e7 100644 --- a/mediapipe/docs/examples.md +++ b/mediapipe/docs/examples.md @@ -157,3 +157,20 @@ how to use MediaPipe with a TFLite model for hair segmentation on desktop using GPU with live video from a webcam. * [Desktop GPU](./hair_segmentation_desktop.md) + +## Google Coral (machine learning acceleration with Google EdgeTPU) + +Below are code samples on how to run MediaPipe on Google Coral Dev Board. + +### Object Detection on Coral + +[Object Detection on Coral with Webcam](./object_detection_coral_devboard.md) +shows how to run quantized object detection TFlite model accelerated with +EdgeTPU on +[Google Coral Dev Board](https://coral.withgoogle.com/products/dev-board). + +### Face Detection on Coral + +[Face Detection on Coral with Webcam](./face_detection_coral_devboard.md) shows +how to use quantized face detection TFlite model accelerated with EdgeTPU on +[Google Coral Dev Board](https://coral.withgoogle.com/products/dev-board). diff --git a/mediapipe/docs/face_detection_coral_devboard.md b/mediapipe/docs/face_detection_coral_devboard.md new file mode 100644 index 000000000..e7656c2ec --- /dev/null +++ b/mediapipe/docs/face_detection_coral_devboard.md @@ -0,0 +1,20 @@ +## Face Detection on Coral with Webcam + +MediaPipe is able to run cross platform across device types like desktop, mobile +and edge devices. Here is an example of running MediaPipe +[face detection pipeline](./face_detection_desktop.md) on edge device like +[Google Coral dev board](https://coral.withgoogle.com/products/dev-board) with +[Edge TPU](https://cloud.google.com/edge-tpu/). This MediaPipe Coral face +detection pipeline is running [coral specific quantized version](https://github.com/google/mediapipe/blob/master/mediapipe/examples/coral/models/face-detector-quantized_edgetpu.tflite) +of the [MediaPipe face detection TFLite model](https://github.com/google/mediapipe/blob/master/mediapipe/models/face_detection_front.tflite) +accelerated on Edge TPU. + +### Cross compilation of MediaPipe Coral binaries in Docker + +We recommend building the MediaPipe binaries not on the edge device due to +limited compute resulting in long build times. Instead, we will build MediaPipe +binaries using Docker containers on a more powerful host machine. For step by +step details of cross compiling and running MediaPipe binaries on Coral dev +board, please refer to [README.md in MediaPipe Coral example folder](https://github.com/google/mediapipe/blob/master/mediapipe/examples/coral/README.md). + +![Face Detection running on Coral](images/face_detection_demo_coral.jpg) diff --git a/mediapipe/docs/gpu.md b/mediapipe/docs/gpu.md index d17733266..63c97e4bc 100644 --- a/mediapipe/docs/gpu.md +++ b/mediapipe/docs/gpu.md @@ -1,10 +1,10 @@ ## Running on GPUs - [Overview](#overview) -- [OpenGL Support](#graphconfig) +- [OpenGL Support](#opengl-support) - [Life of a GPU calculator](#life-of-a-gpu-calculator) - [GpuBuffer to ImageFrame converters](#gpubuffer-to-imageframe-converters) - +- [Disable GPU support](#disable-gpu-support) ### Overview MediaPipe supports calculator nodes for GPU compute and rendering, and allows combining multiple GPU nodes, as well as mixing them with CPU based calculator nodes. There exist several GPU APIs on mobile platforms (eg, OpenGL ES, Metal and Vulkan). MediaPipe does not attempt to offer a single cross-API GPU abstraction. Individual nodes can be written using different APIs, allowing them to take advantage of platform specific features when needed. @@ -23,6 +23,7 @@ Below are the design principles for GPU support in MediaPipe * A calculator should be allowed maximum flexibility in using the GPU for all or part of its operation, combining it with the CPU if necessary. ### OpenGL support + MediaPipe supports OpenGL ES up to version 3.2 on Android and up to ES 3.0 on iOS. In addition, MediaPipe also supports Metal on iOS. * MediaPipe allows graphs to run OpenGL in multiple GL contexts. For example, this can be very useful in graphs that combine a slower GPU inference path (eg, at 10 FPS) with a faster GPU rendering path (eg, at 30 FPS): since one GL context corresponds to one sequential command queue, using the same context for both tasks would reduce the rendering frame rate. One challenge MediaPipe's use of multiple contexts solves is the ability to communicate across them. An example scenario is one with an input video that is sent to both the rendering and inferences paths, and rendering needs to have access to the latest output from inference. @@ -128,3 +129,26 @@ The below diagram shows the data flow in a mobile application that captures vide |:--:| | *Video frames from the camera are fed into the graph as `GpuBuffer` packets. The input stream is accessed by two calculators in parallel. `GpuBufferToImageFrameCalculator` converts the buffer into an `ImageFrame`, which is then sent through a grayscale converter and a canny filter (both based on OpenCV and running on the CPU), whose output is then converted into a `GpuBuffer` again. A multi-input GPU calculator, GlOverlayCalculator, takes as input both the original `GpuBuffer` and the one coming out of the edge detector, and overlays them using a shader. The output is then sent back to the application using a callback calculator, and the application renders the image to the screen using OpenGL.* | +### Disable GPU Support + +By default, building MediaPipe (with no special bazel flags) attempts to compile +and link against OpenGL/Metal libraries. + +There are some command line build flags available to disable/enable GPU support +within the MediaPipe framework: + +``` +# To disable *all* gpu support +bazel build --define MEDIAPIPE_DISABLE_GPU=1 + +# to enable full GPU support (OpenGL ES 3.1+ & Metal) +bazel build --copt -DMESA_EGL_NO_X11_HEADERS + +# to enable only OpenGL ES 3.0 and below (no GLES 3.1+ features) +bazel build --copt -DMESA_EGL_NO_X11_HEADERS --copt -DMEDIAPIPE_DISABLE_GL_COMPUTE +``` + +Note *MEDIAPIPE_DISABLE_GL_COMPUTE* is automatically defined on all Apple +systems (Apple doesn't support OpenGL ES 3.1+). + +Note on iOS and Android, it is assumed that GPU support will be enabled. diff --git a/mediapipe/docs/hello_world_android.md b/mediapipe/docs/hello_world_android.md index 35f628349..eb186d657 100644 --- a/mediapipe/docs/hello_world_android.md +++ b/mediapipe/docs/hello_world_android.md @@ -629,7 +629,7 @@ to load both dependencies: static { // Load all native libraries needed by the app. System.loadLibrary("mediapipe_jni"); - System.loadLibrary("opencv_java4"); + System.loadLibrary("opencv_java3"); } ``` diff --git a/mediapipe/docs/images/face_detection_demo_coral.jpg b/mediapipe/docs/images/face_detection_demo_coral.jpg new file mode 100644 index 000000000..e66aecc25 Binary files /dev/null and b/mediapipe/docs/images/face_detection_demo_coral.jpg differ diff --git a/mediapipe/docs/images/mobile/multi_hand_tracking_android_gpu_small.gif b/mediapipe/docs/images/mobile/multi_hand_tracking_android_gpu_small.gif new file mode 100644 index 000000000..f844fc59b Binary files /dev/null and b/mediapipe/docs/images/mobile/multi_hand_tracking_android_gpu_small.gif differ diff --git a/mediapipe/docs/images/multi_hand_tracking_android_gpu.gif b/mediapipe/docs/images/multi_hand_tracking_android_gpu.gif new file mode 100644 index 000000000..2cc920c86 Binary files /dev/null and b/mediapipe/docs/images/multi_hand_tracking_android_gpu.gif differ diff --git a/mediapipe/docs/images/multi_hand_tracking_android_gpu_small.gif b/mediapipe/docs/images/multi_hand_tracking_android_gpu_small.gif new file mode 100644 index 000000000..572b3658f Binary files /dev/null and b/mediapipe/docs/images/multi_hand_tracking_android_gpu_small.gif differ diff --git a/mediapipe/docs/images/object_detection_demo_coral.jpg b/mediapipe/docs/images/object_detection_demo_coral.jpg new file mode 100644 index 000000000..901242f3a Binary files /dev/null and b/mediapipe/docs/images/object_detection_demo_coral.jpg differ diff --git a/mediapipe/docs/install.md b/mediapipe/docs/install.md index 02b6fc149..97eb8ca0b 100644 --- a/mediapipe/docs/install.md +++ b/mediapipe/docs/install.md @@ -245,19 +245,23 @@ To build and run iOS apps: $ cd mediapipe ``` -3. Install Bazel (0.24.1 and above required). +3. Install Bazel (version between 0.24.1 and 1.1.0). - Option 1. Use package manager tool to install the latest version of Bazel. + Option 1. Use package manager tool to install Bazel 1.1.0 ```bash - $ brew install bazel - - # Run 'bazel version' to check version of bazel installed + # If Bazel 1.1.0+ was installed. + $ brew uninstall bazel + # Install Bazel 1.1.0 + $ brew install https://raw.githubusercontent.com/bazelbuild/homebrew-tap/f8a0fa981bcb1784a0d0823e14867b844e94fb3d/Formula/bazel.rb + $ brew link bazel + # Run 'bazel version' to check version of bazel ``` Option 2. Follow the official [Bazel documentation](https://docs.bazel.build/versions/master/install-os-x.html#install-with-installer-mac-os-x) - to install any version of Bazel manually. + to install any version of Bazel manually. Note that MediaPipe doesn't + support Bazel 1.1.0+ on macOS yet. 4. Install OpenCV and FFmpeg. @@ -526,7 +530,7 @@ This will use a Docker image that will isolate mediapipe's installation from the ```bash $ docker run -it --name mediapipe mediapipe:latest - root@bca08b91ff63:/mediapipe# bash ./setup_android_sdk_and_ndk + root@bca08b91ff63:/mediapipe# bash ./setup_android_sdk_and_ndk.sh # Should print: # Android NDK is now installed. Consider setting $ANDROID_NDK_HOME environment variable to be /root/Android/Sdk/ndk-bundle/android-ndk-r18b diff --git a/mediapipe/docs/mediapipe_ios_setup.md b/mediapipe/docs/mediapipe_ios_setup.md index 1ac531c8c..533e075d6 100644 --- a/mediapipe/docs/mediapipe_ios_setup.md +++ b/mediapipe/docs/mediapipe_ios_setup.md @@ -7,7 +7,8 @@ 2. Install [Bazel](https://bazel.build/). - See their [instructions](https://docs.bazel.build/versions/master/install-os-x.html). + See their + [instructions](https://docs.bazel.build/versions/master/install-os-x.html). We recommend using [Homebrew](https://brew.sh/): ```bash @@ -15,13 +16,23 @@ brew install bazelbuild/tap/bazel ``` -3. Clone the MediaPipe repository. +3. Install python "future" and "six". + + To make Mediapipe work with TensorFlow, please install the python "future" + library and the python "six" library: + + ```bash + pip install --user future six + ``` + +4. Clone the MediaPipe repository. ```bash git clone https://github.com/google/mediapipe.git ``` -4. Symlink or copy your provisioning profile to `mediapipe/mediapipe/provisioning_profile.mobileprovision`. +5. Symlink or copy your provisioning profile to + `mediapipe/mediapipe/provisioning_profile.mobileprovision`. ```bash cd mediapipe diff --git a/mediapipe/docs/multi_hand_tracking_desktop.md b/mediapipe/docs/multi_hand_tracking_desktop.md index d80707f9b..295c10169 100644 --- a/mediapipe/docs/multi_hand_tracking_desktop.md +++ b/mediapipe/docs/multi_hand_tracking_desktop.md @@ -156,7 +156,7 @@ node { output_stream: "multi_hand_rects" node_options: { [type.googleapis.com/mediapipe.AssociationCalculatorOptions] { - min_similarity_threshold: 0.1 + min_similarity_threshold: 0.5 } } } diff --git a/mediapipe/docs/multi_hand_tracking_mobile_gpu.md b/mediapipe/docs/multi_hand_tracking_mobile_gpu.md index b57a6631d..111cd894d 100644 --- a/mediapipe/docs/multi_hand_tracking_mobile_gpu.md +++ b/mediapipe/docs/multi_hand_tracking_mobile_gpu.md @@ -219,7 +219,7 @@ node { output_stream: "multi_hand_rects" node_options: { [type.googleapis.com/mediapipe.AssociationCalculatorOptions] { - min_similarity_threshold: 0.1 + min_similarity_threshold: 0.5 } } } @@ -560,7 +560,7 @@ node { # BATCH_END timestamp, outputs the vector of landmarks at the BATCH_END # timestamp. node { - calculator: "EndLoopNormalizedLandmarksVectorCalculator" + calculator: "EndLoopNormalizedLandmarkListVectorCalculator" input_stream: "ITEM:single_hand_landmarks" input_stream: "BATCH_END:single_hand_rect_timestamp" output_stream: "ITERABLE:multi_hand_landmarks" @@ -580,7 +580,7 @@ node { # hand. If the hand presence for hand #i is false, the set of landmarks # corresponding to that hand are dropped from the vector. node { - calculator: "FilterLandmarksCollectionCalculator" + calculator: "FilterLandmarkListCollectionCalculator" input_stream: "ITERABLE:multi_hand_landmarks" input_stream: "CONDITION:multi_hand_presence" output_stream: "ITERABLE:filtered_multi_hand_landmarks" @@ -669,7 +669,7 @@ node { # timestamp for downstream calculators to inform them that all elements in the # vector have been processed. node { - calculator: "BeginLoopNormalizedLandmarksVectorCalculator" + calculator: "BeginLoopNormalizedLandmarkListVectorCalculator" input_stream: "ITERABLE:multi_hand_landmarks" output_stream: "ITEM:single_hand_landmarks" output_stream: "BATCH_END:landmark_timestamp" diff --git a/mediapipe/docs/object_detection_coral_devboard.md b/mediapipe/docs/object_detection_coral_devboard.md new file mode 100644 index 000000000..f0ae1aa6b --- /dev/null +++ b/mediapipe/docs/object_detection_coral_devboard.md @@ -0,0 +1,20 @@ +## Object Detection on Coral with Webcam + +MediaPipe is able to run cross platform across device types like desktop, mobile +and edge devices. Here is an example of running MediaPipe +[object detection pipeline](./object_detection_desktop.md) on edge device like +[Google Coral dev board](https://coral.withgoogle.com/products/dev-board) with +[Edge TPU](https://cloud.google.com/edge-tpu/). This MediaPipe Coral object +detection pipeline is running [coral specific quantized version](https://github.com/google/mediapipe/blob/master/mediapipe/examples/coral/models/object-detector-quantized_edgetpu.tflite) +of the [MediaPipe object detection TFLite model](https://github.com/google/mediapipe/blob/master/mediapipe/models/object_detection_front.tflite) +accelerated on Edge TPU. + +### Cross compilation of MediaPipe Coral binaries in Docker + +We recommend building the MediaPipe binaries not on the edge device due to +limited compute resulting in long build times. Instead, we will build MediaPipe +binaries using Docker containers on a more powerful host machine. For step by +step details of cross compiling and running MediaPipe binaries on Coral dev +board, please refer to [README.md in MediaPipe Coral example folder](https://github.com/google/mediapipe/blob/master/mediapipe/examples/coral/README.md). + +![Object Detection running on Coral](images/object_detection_demo_coral.jpg) diff --git a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/edgedetectiongpu/MainActivity.java b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/edgedetectiongpu/MainActivity.java index 9c45cbd39..15708d6a1 100644 --- a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/edgedetectiongpu/MainActivity.java +++ b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/edgedetectiongpu/MainActivity.java @@ -47,7 +47,7 @@ public class MainActivity extends AppCompatActivity { static { // Load all native libraries needed by the app. System.loadLibrary("mediapipe_jni"); - System.loadLibrary("opencv_java4"); + System.loadLibrary("opencv_java3"); } // {@link SurfaceTexture} where the camera-preview frames can be accessed. diff --git a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/facedetectioncpu/MainActivity.java b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/facedetectioncpu/MainActivity.java index 05ef0e756..fab132214 100644 --- a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/facedetectioncpu/MainActivity.java +++ b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/facedetectioncpu/MainActivity.java @@ -48,7 +48,7 @@ public class MainActivity extends AppCompatActivity { static { // Load all native libraries needed by the app. System.loadLibrary("mediapipe_jni"); - System.loadLibrary("opencv_java4"); + System.loadLibrary("opencv_java3"); } // {@link SurfaceTexture} where the camera-preview frames can be accessed. diff --git a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/facedetectiongpu/MainActivity.java b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/facedetectiongpu/MainActivity.java index 91e533455..5990c6c49 100644 --- a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/facedetectiongpu/MainActivity.java +++ b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/facedetectiongpu/MainActivity.java @@ -48,7 +48,7 @@ public class MainActivity extends AppCompatActivity { static { // Load all native libraries needed by the app. System.loadLibrary("mediapipe_jni"); - System.loadLibrary("opencv_java4"); + System.loadLibrary("opencv_java3"); } // {@link SurfaceTexture} where the camera-preview frames can be accessed. diff --git a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/hairsegmentationgpu/MainActivity.java b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/hairsegmentationgpu/MainActivity.java index 16022d9d2..11e1fe1a6 100644 --- a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/hairsegmentationgpu/MainActivity.java +++ b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/hairsegmentationgpu/MainActivity.java @@ -48,7 +48,7 @@ public class MainActivity extends AppCompatActivity { static { // Load all native libraries needed by the app. System.loadLibrary("mediapipe_jni"); - System.loadLibrary("opencv_java4"); + System.loadLibrary("opencv_java3"); } // {@link SurfaceTexture} where the camera-preview frames can be accessed. diff --git a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/handdetectiongpu/MainActivity.java b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/handdetectiongpu/MainActivity.java index cc95bbbfb..2d7c84a2a 100644 --- a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/handdetectiongpu/MainActivity.java +++ b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/handdetectiongpu/MainActivity.java @@ -48,7 +48,7 @@ public class MainActivity extends AppCompatActivity { static { // Load all native libraries needed by the app. System.loadLibrary("mediapipe_jni"); - System.loadLibrary("opencv_java4"); + System.loadLibrary("opencv_java3"); } // {@link SurfaceTexture} where the camera-preview frames can be accessed. diff --git a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/handtrackinggpu/BUILD b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/handtrackinggpu/BUILD index 9dd6b475d..9846a9c45 100644 --- a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/handtrackinggpu/BUILD +++ b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/handtrackinggpu/BUILD @@ -75,6 +75,7 @@ android_library( resource_files = glob(["res/**"]), deps = [ ":mediapipe_jni_lib", + "//mediapipe/framework/formats:landmark_java_proto_lite", "//mediapipe/java/com/google/mediapipe/components:android_camerax_helper", "//mediapipe/java/com/google/mediapipe/components:android_components", "//mediapipe/java/com/google/mediapipe/framework:android_framework", diff --git a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/handtrackinggpu/MainActivity.java b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/handtrackinggpu/MainActivity.java index 5e69f62e3..a5fa8c674 100644 --- a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/handtrackinggpu/MainActivity.java +++ b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/handtrackinggpu/MainActivity.java @@ -17,18 +17,23 @@ package com.google.mediapipe.apps.handtrackinggpu; import android.graphics.SurfaceTexture; import android.os.Bundle; import androidx.appcompat.app.AppCompatActivity; +import android.util.Log; import android.util.Size; import android.view.SurfaceHolder; import android.view.SurfaceView; import android.view.View; import android.view.ViewGroup; +import com.google.mediapipe.formats.proto.LandmarkProto.NormalizedLandmark; +import com.google.mediapipe.formats.proto.LandmarkProto.NormalizedLandmarkList; import com.google.mediapipe.components.CameraHelper; import com.google.mediapipe.components.CameraXPreviewHelper; import com.google.mediapipe.components.ExternalTextureConverter; import com.google.mediapipe.components.FrameProcessor; import com.google.mediapipe.components.PermissionHelper; import com.google.mediapipe.framework.AndroidAssetUtil; +import com.google.mediapipe.framework.PacketGetter; import com.google.mediapipe.glutil.EglManager; +import com.google.protobuf.InvalidProtocolBufferException; /** Main activity of MediaPipe example apps. */ public class MainActivity extends AppCompatActivity { @@ -37,6 +42,8 @@ public class MainActivity extends AppCompatActivity { private static final String BINARY_GRAPH_NAME = "handtrackinggpu.binarypb"; private static final String INPUT_VIDEO_STREAM_NAME = "input_video"; private static final String OUTPUT_VIDEO_STREAM_NAME = "output_video"; + private static final String OUTPUT_HAND_PRESENCE_STREAM_NAME = "hand_presence"; + private static final String OUTPUT_LANDMARKS_STREAM_NAME = "hand_landmarks"; private static final CameraHelper.CameraFacing CAMERA_FACING = CameraHelper.CameraFacing.FRONT; // Flips the camera-preview frames vertically before sending them into FrameProcessor to be @@ -48,7 +55,7 @@ public class MainActivity extends AppCompatActivity { static { // Load all native libraries needed by the app. System.loadLibrary("mediapipe_jni"); - System.loadLibrary("opencv_java4"); + System.loadLibrary("opencv_java3"); } // {@link SurfaceTexture} where the camera-preview frames can be accessed. @@ -90,6 +97,41 @@ public class MainActivity extends AppCompatActivity { OUTPUT_VIDEO_STREAM_NAME); processor.getVideoSurfaceOutput().setFlipY(FLIP_FRAMES_VERTICALLY); + processor.addPacketCallback( + OUTPUT_HAND_PRESENCE_STREAM_NAME, + (packet) -> { + Boolean handPresence = PacketGetter.getBool(packet); + if (!handPresence) { + Log.d( + TAG, + "[TS:" + packet.getTimestamp() + "] Hand presence is false, no hands detected."); + } + }); + + processor.addPacketCallback( + OUTPUT_LANDMARKS_STREAM_NAME, + (packet) -> { + byte[] landmarksRaw = PacketGetter.getProtoBytes(packet); + try { + NormalizedLandmarkList landmarks = NormalizedLandmarkList.parseFrom(landmarksRaw); + if (landmarks == null) { + Log.d(TAG, "[TS:" + packet.getTimestamp() + "] No hand landmarks."); + return; + } + // Note: If hand_presence is false, these landmarks are useless. + Log.d( + TAG, + "[TS:" + + packet.getTimestamp() + + "] #Landmarks for hand: " + + landmarks.getLandmarkCount()); + Log.d(TAG, getLandmarksDebugString(landmarks)); + } catch (InvalidProtocolBufferException e) { + Log.e(TAG, "Couldn't Exception received - " + e); + return; + } + }); + PermissionHelper.checkAndRequestCameraPermissions(this); } @@ -164,4 +206,23 @@ public class MainActivity extends AppCompatActivity { }); cameraHelper.startCamera(this, CAMERA_FACING, /*surfaceTexture=*/ null); } + + private static String getLandmarksDebugString(NormalizedLandmarkList landmarks) { + int landmarkIndex = 0; + String landmarksString = ""; + for (NormalizedLandmark landmark : landmarks.getLandmarkList()) { + landmarksString += + "\t\tLandmark[" + + landmarkIndex + + "]: (" + + landmark.getX() + + ", " + + landmark.getY() + + ", " + + landmark.getZ() + + ")\n"; + ++landmarkIndex; + } + return landmarksString; + } } diff --git a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/multihandtrackinggpu/BUILD b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/multihandtrackinggpu/BUILD index 61c2065dd..80beaf37b 100644 --- a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/multihandtrackinggpu/BUILD +++ b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/multihandtrackinggpu/BUILD @@ -75,6 +75,7 @@ android_library( resource_files = glob(["res/**"]), deps = [ ":mediapipe_jni_lib", + "//mediapipe/framework/formats:landmark_java_proto_lite", "//mediapipe/java/com/google/mediapipe/components:android_camerax_helper", "//mediapipe/java/com/google/mediapipe/components:android_components", "//mediapipe/java/com/google/mediapipe/framework:android_framework", diff --git a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/multihandtrackinggpu/MainActivity.java b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/multihandtrackinggpu/MainActivity.java index cef138546..f7fe06c82 100644 --- a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/multihandtrackinggpu/MainActivity.java +++ b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/multihandtrackinggpu/MainActivity.java @@ -17,18 +17,23 @@ package com.google.mediapipe.apps.multihandtrackinggpu; import android.graphics.SurfaceTexture; import android.os.Bundle; import androidx.appcompat.app.AppCompatActivity; +import android.util.Log; import android.util.Size; import android.view.SurfaceHolder; import android.view.SurfaceView; import android.view.View; import android.view.ViewGroup; +import com.google.mediapipe.formats.proto.LandmarkProto.NormalizedLandmark; +import com.google.mediapipe.formats.proto.LandmarkProto.NormalizedLandmarkList; import com.google.mediapipe.components.CameraHelper; import com.google.mediapipe.components.CameraXPreviewHelper; import com.google.mediapipe.components.ExternalTextureConverter; import com.google.mediapipe.components.FrameProcessor; import com.google.mediapipe.components.PermissionHelper; import com.google.mediapipe.framework.AndroidAssetUtil; +import com.google.mediapipe.framework.PacketGetter; import com.google.mediapipe.glutil.EglManager; +import java.util.List; /** Main activity of MediaPipe example apps. */ public class MainActivity extends AppCompatActivity { @@ -37,6 +42,7 @@ public class MainActivity extends AppCompatActivity { private static final String BINARY_GRAPH_NAME = "multihandtrackinggpu.binarypb"; private static final String INPUT_VIDEO_STREAM_NAME = "input_video"; private static final String OUTPUT_VIDEO_STREAM_NAME = "output_video"; + private static final String OUTPUT_LANDMARKS_STREAM_NAME = "multi_hand_landmarks"; private static final CameraHelper.CameraFacing CAMERA_FACING = CameraHelper.CameraFacing.FRONT; // Flips the camera-preview frames vertically before sending them into FrameProcessor to be @@ -48,7 +54,7 @@ public class MainActivity extends AppCompatActivity { static { // Load all native libraries needed by the app. System.loadLibrary("mediapipe_jni"); - System.loadLibrary("opencv_java4"); + System.loadLibrary("opencv_java3"); } // {@link SurfaceTexture} where the camera-preview frames can be accessed. @@ -90,6 +96,20 @@ public class MainActivity extends AppCompatActivity { OUTPUT_VIDEO_STREAM_NAME); processor.getVideoSurfaceOutput().setFlipY(FLIP_FRAMES_VERTICALLY); + processor.addPacketCallback( + OUTPUT_LANDMARKS_STREAM_NAME, + (packet) -> { + Log.d(TAG, "Received multi-hand landmarks packet."); + List multiHandLandmarks = + PacketGetter.getProtoVector(packet, NormalizedLandmarkList.parser()); + Log.d( + TAG, + "[TS:" + + packet.getTimestamp() + + "] " + + getMultiHandLandmarksDebugString(multiHandLandmarks)); + }); + PermissionHelper.checkAndRequestCameraPermissions(this); } @@ -164,4 +184,32 @@ public class MainActivity extends AppCompatActivity { }); cameraHelper.startCamera(this, CAMERA_FACING, /*surfaceTexture=*/ null); } + + private String getMultiHandLandmarksDebugString(List multiHandLandmarks) { + if (multiHandLandmarks.isEmpty()) { + return "No hand landmarks"; + } + String multiHandLandmarksStr = "Number of hands detected: " + multiHandLandmarks.size() + "\n"; + int handIndex = 0; + for (NormalizedLandmarkList landmarks : multiHandLandmarks) { + multiHandLandmarksStr += + "\t#Hand landmarks for hand[" + handIndex + "]: " + landmarks.getLandmarkCount() + "\n"; + int landmarkIndex = 0; + for (NormalizedLandmark landmark : landmarks.getLandmarkList()) { + multiHandLandmarksStr += + "\t\tLandmark [" + + landmarkIndex + + "]: (" + + landmark.getX() + + ", " + + landmark.getY() + + ", " + + landmark.getZ() + + ")\n"; + ++landmarkIndex; + } + ++handIndex; + } + return multiHandLandmarksStr; + } } diff --git a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/objectdetectioncpu/MainActivity.java b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/objectdetectioncpu/MainActivity.java index c6bdb8d58..de7f478a4 100644 --- a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/objectdetectioncpu/MainActivity.java +++ b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/objectdetectioncpu/MainActivity.java @@ -48,7 +48,7 @@ public class MainActivity extends AppCompatActivity { static { // Load all native libraries needed by the app. System.loadLibrary("mediapipe_jni"); - System.loadLibrary("opencv_java4"); + System.loadLibrary("opencv_java3"); } // {@link SurfaceTexture} where the camera-preview frames can be accessed. diff --git a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/objectdetectiongpu/MainActivity.java b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/objectdetectiongpu/MainActivity.java index c436ef7f1..39d51d585 100644 --- a/mediapipe/examples/android/src/java/com/google/mediapipe/apps/objectdetectiongpu/MainActivity.java +++ b/mediapipe/examples/android/src/java/com/google/mediapipe/apps/objectdetectiongpu/MainActivity.java @@ -48,7 +48,7 @@ public class MainActivity extends AppCompatActivity { static { // Load all native libraries needed by the app. System.loadLibrary("mediapipe_jni"); - System.loadLibrary("opencv_java4"); + System.loadLibrary("opencv_java3"); } // {@link SurfaceTexture} where the camera-preview frames can be accessed. diff --git a/mediapipe/examples/coral/BUILD b/mediapipe/examples/coral/BUILD new file mode 100644 index 000000000..1515140d3 --- /dev/null +++ b/mediapipe/examples/coral/BUILD @@ -0,0 +1,56 @@ +# Copyright 2019 The MediaPipe Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +licenses(["notice"]) # Apache 2.0 + +package(default_visibility = [ + "//visibility:public", +]) + +# Graph Runner + +cc_library( + name = "demo_run_graph_main", + srcs = ["demo_run_graph_main.cc"], + deps = [ + "//mediapipe/framework:calculator_framework", + "//mediapipe/framework/formats:image_frame", + "//mediapipe/framework/formats:image_frame_opencv", + "//mediapipe/framework/port:commandlineflags", + "//mediapipe/framework/port:file_helpers", + "//mediapipe/framework/port:opencv_highgui", + "//mediapipe/framework/port:opencv_imgproc", + "//mediapipe/framework/port:opencv_video", + "//mediapipe/framework/port:parse_text_proto", + "//mediapipe/framework/port:status", + ], +) + +# Demos + +cc_binary( + name = "object_detection_cpu", + deps = [ + "//mediapipe/examples/coral:demo_run_graph_main", + "//mediapipe/graphs/object_detection:desktop_tflite_calculators", + ], +) + +cc_binary( + name = "face_detection_cpu", + deps = [ + "//mediapipe/examples/coral:demo_run_graph_main", + "//mediapipe/graphs/face_detection:desktop_tflite_calculators", + ], +) diff --git a/mediapipe/examples/coral/Dockerfile b/mediapipe/examples/coral/Dockerfile new file mode 100644 index 000000000..1640f57df --- /dev/null +++ b/mediapipe/examples/coral/Dockerfile @@ -0,0 +1,87 @@ +# Copyright 2019 The MediaPipe Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +#==== ! Prerequisite ! ==== +# $ sh mediapipe/examples/coral/setup.sh +#==== + +# for opencv 3.2 default +FROM ubuntu:18.04 + +MAINTAINER + +WORKDIR /mediapipe + +ENV DEBIAN_FRONTEND=noninteractive + +# Install MediaPipe & Coral deps + +COPY update_sources.sh / +RUN /update_sources.sh + +RUN dpkg --add-architecture armhf +RUN dpkg --add-architecture arm64 +RUN apt-get update && apt-get install -y \ + build-essential \ + crossbuild-essential-arm64 \ + libusb-1.0-0-dev \ + libusb-1.0-0-dev:arm64 \ + zlib1g-dev \ + zlib1g-dev:arm64 \ + pkg-config \ + zip \ + unzip \ + curl \ + wget \ + git \ + python \ + python-pip \ + python3-pip \ + vim-common \ + ca-certificates \ + emacs \ + software-properties-common && \ + add-apt-repository -y ppa:openjdk-r/ppa && \ + apt-get update && apt-get install -y openjdk-8-jdk + +RUN pip install --upgrade setuptools +RUN pip install future +RUN pip3 install six + +COPY . /mediapipe/ + +# Install bazel + +ARG BAZEL_VERSION=0.29.1 +RUN mkdir /bazel && \ + wget --no-check-certificate -O /bazel/installer.sh "https://github.com/bazelbuild/bazel/releases/download/${BAZEL_VERSION}/bazel-${BAZEL_VERSION}-installer-linux-x86_64.sh" && \ + wget --no-check-certificate -O /bazel/LICENSE.txt "https://raw.githubusercontent.com/bazelbuild/bazel/master/LICENSE" && \ + chmod +x /bazel/installer.sh && \ + /bazel/installer.sh && \ + rm -f /bazel/installer.sh + +# OpenCV (3.2 default in 18.04) + +RUN apt-get update && apt-get install -y libopencv-dev + +# Opencv libs copied from coral device into opencv32_arm64_libs + +RUN cp opencv32_arm64_libs/* /usr/lib/aarch64-linux-gnu/. + +# Edge tpu header and lib + +RUN git clone https://github.com/google-coral/edgetpu.git /edgetpu +RUN cp /edgetpu/libedgetpu/direct/aarch64/libedgetpu.so.1.0 /usr/lib/aarch64-linux-gnu/libedgetpu.so + +# See mediapipe/examples/coral/README.md to finish setup diff --git a/mediapipe/examples/coral/README.md b/mediapipe/examples/coral/README.md new file mode 100644 index 000000000..6712a1e0a --- /dev/null +++ b/mediapipe/examples/coral/README.md @@ -0,0 +1,137 @@ +# Coral Dev Board Setup (experimental) + +**Dislaimer**: Running MediaPipe on Coral is experimental, and this process may not be exact and is subject to change. These instructions have only been tested on the coral dev board with OS version _mendel day_, and may vary for different devices and workstations. + +This file describes how to prepare a Google Coral Dev Board and setup a linux Docker container for building MediaPipe applications that run on Edge TPU. + +## Before creating the Docker + +* (on host machine) run _setup.sh_ from MediaPipe root directory + + sh mediapipe/examples/coral/setup.sh + +* Setup the coral device via [here](https://coral.withgoogle.com/docs/dev-board/get-started/), and ensure the _mdt_ command works + +* (on coral device) prepare MediaPipe + + cd ~ + sudo apt-get install git + git clone https://github.com/google/mediapipe.git + mkdir mediapipe/bazel-bin + +* (on coral device) install opencv 3.2 + + sudo apt-get update && apt-get install -y libopencv-dev + +* (on coral device) find all opencv libs + + find /usr/lib/aarch64-linux-gnu/ -name 'libopencv*so' + +* (on host machine) copy core opencv libs from coral device to a local folder inside MediaPipe checkout: + + # in root level mediapipe folder # + mdt pull /usr/lib/aarch64-linux-gnu/libopencv_core.so opencv32_arm64_libs + mdt pull /usr/lib/aarch64-linux-gnu/libopencv_calib3d.so opencv32_arm64_libs + mdt pull /usr/lib/aarch64-linux-gnu/libopencv_features2d.so opencv32_arm64_libs + mdt pull /usr/lib/aarch64-linux-gnu/libopencv_highgui.so opencv32_arm64_libs + mdt pull /usr/lib/aarch64-linux-gnu/libopencv_imgcodecs.so opencv32_arm64_libs + mdt pull /usr/lib/aarch64-linux-gnu/libopencv_imgproc.so opencv32_arm64_libs + mdt pull /usr/lib/aarch64-linux-gnu/libopencv_video.so opencv32_arm64_libs + mdt pull /usr/lib/aarch64-linux-gnu/libopencv_videoio.so opencv32_arm64_libs + +* (on host machine) Create and start the docker environment + + # from mediapipe root level directory # + docker build -t coral . + docker run -it --name coral coral:latest + +## Inside the Docker environment + +* Update library paths in /mediapipe/third_party/opencv_linux.BUILD + + (replace 'x86_64-linux-gnu' with 'aarch64-linux-gnu') + + "lib/aarch64-linux-gnu/libopencv_core.so", + "lib/aarch64-linux-gnu/libopencv_calib3d.so", + "lib/aarch64-linux-gnu/libopencv_features2d.so", + "lib/aarch64-linux-gnu/libopencv_highgui.so", + "lib/aarch64-linux-gnu/libopencv_imgcodecs.so", + "lib/aarch64-linux-gnu/libopencv_imgproc.so", + "lib/aarch64-linux-gnu/libopencv_video.so", + "lib/aarch64-linux-gnu/libopencv_videoio.so", + +* Attempt to build hello world (to download external deps) + + bazel build -c opt --define MEDIAPIPE_DISABLE_GPU=1 mediapipe/examples/desktop/hello_world:hello_world + +* Edit /mediapipe/bazel-mediapipe/external/com_github_glog_glog/src/signalhandler.cc + + on line 78, replace + + return (void*)context->PC_FROM_UCONTEXT; + + with + + return NULL; + +* Edit /edgetpu/libedgetpu/BUILD + + to add this build target + + cc_library( + name = "lib", + srcs = [ + "libedgetpu.so", + ], + visibility = ["//visibility:public"], + ) + +* Edit *tflite_inference_calculator.cc* BUILD rules: + + sed -i 's/\":tflite_inference_calculator_cc_proto\",/\":tflite_inference_calculator_cc_proto\",\n\t\"@edgetpu\/\/:header\",\n\t\"@libedgetpu\/\/:lib\",/g' mediapipe/calculators/tflite/BUILD + + The above command should add + + "@edgetpu//:header", + "@libedgetpu//:lib", + + to the _deps_ of tflite_inference_calculator.cc + +#### Now try cross-compiling for device + +* Object detection demo + + bazel build -c opt --crosstool_top=@crosstool//:toolchains --compiler=gcc --cpu=aarch64 --define MEDIAPIPE_DISABLE_GPU=1 --copt -DMEDIAPIPE_EDGE_TPU --copt=-flax-vector-conversions mediapipe/examples/coral:object_detection_cpu + + Copy object_detection_cpu binary to the MediaPipe checkout on the coral device + + # outside docker env, open new terminal on host machine # + docker ps + docker cp :/mediapipe/bazel-bin/mediapipe/examples/coral/object_detection_cpu /tmp/. + mdt push /tmp/object_detection_cpu /home/mendel/mediapipe/bazel-bin/. + +* Face detection demo + + bazel build -c opt --crosstool_top=@crosstool//:toolchains --compiler=gcc --cpu=aarch64 --define MEDIAPIPE_DISABLE_GPU=1 --copt -DMEDIAPIPE_EDGE_TPU --copt=-flax-vector-conversions mediapipe/examples/coral:face_detection_cpu + + Copy face_detection_cpu binary to the MediaPipe checkout on the coral device + + # outside docker env, open new terminal on host machine # + docker ps + docker cp :/mediapipe/bazel-bin/mediapipe/examples/coral/face_detection_cpu /tmp/. + mdt push /tmp/face_detection_cpu /home/mendel/mediapipe/bazel-bin/. + +## On the coral device (with display) + + # Object detection + cd ~/mediapipe + chmod +x bazel-bin/object_detection_cpu + export GLOG_logtostderr=1 + bazel-bin/object_detection_cpu --calculator_graph_config_file=mediapipe/examples/coral/graphs/object_detection_desktop_live.pbtxt + + # Face detection + cd ~/mediapipe + chmod +x bazel-bin/face_detection_cpu + export GLOG_logtostderr=1 + bazel-bin/face_detection_cpu --calculator_graph_config_file=mediapipe/examples/coral/graphs/face_detection_desktop_live.pbtxt + diff --git a/mediapipe/examples/coral/WORKSPACE b/mediapipe/examples/coral/WORKSPACE new file mode 100644 index 000000000..28112f958 --- /dev/null +++ b/mediapipe/examples/coral/WORKSPACE @@ -0,0 +1,313 @@ +workspace(name = "mediapipe") + +load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive") + +skylib_version = "0.8.0" +http_archive( + name = "bazel_skylib", + type = "tar.gz", + url = "https://github.com/bazelbuild/bazel-skylib/releases/download/{}/bazel-skylib.{}.tar.gz".format (skylib_version, skylib_version), + sha256 = "2ef429f5d7ce7111263289644d233707dba35e39696377ebab8b0bc701f7818e", +) +load("@bazel_skylib//lib:versions.bzl", "versions") +versions.check(minimum_bazel_version = "0.24.1") + +# ABSL cpp library. +http_archive( + name = "com_google_absl", + # Head commit on 2019-04-12. + # TODO: Switch to the latest absl version when the problem gets + # fixed. + urls = [ + "https://github.com/abseil/abseil-cpp/archive/a02f62f456f2c4a7ecf2be3104fe0c6e16fbad9a.tar.gz", + ], + sha256 = "d437920d1434c766d22e85773b899c77c672b8b4865d5dc2cd61a29fdff3cf03", + strip_prefix = "abseil-cpp-a02f62f456f2c4a7ecf2be3104fe0c6e16fbad9a", +) + +http_archive( + name = "rules_cc", + strip_prefix = "rules_cc-master", + urls = ["https://github.com/bazelbuild/rules_cc/archive/master.zip"], +) + +# GoogleTest/GoogleMock framework. Used by most unit-tests. +http_archive( + name = "com_google_googletest", + urls = ["https://github.com/google/googletest/archive/master.zip"], + strip_prefix = "googletest-master", +) + +# Google Benchmark library. +http_archive( + name = "com_google_benchmark", + urls = ["https://github.com/google/benchmark/archive/master.zip"], + strip_prefix = "benchmark-master", + build_file = "@//third_party:benchmark.BUILD", +) + +# gflags needed by glog +http_archive( + name = "com_github_gflags_gflags", + sha256 = "6e16c8bc91b1310a44f3965e616383dbda48f83e8c1eaa2370a215057b00cabe", + strip_prefix = "gflags-77592648e3f3be87d6c7123eb81cbad75f9aef5a", + urls = [ + "https://mirror.bazel.build/github.com/gflags/gflags/archive/77592648e3f3be87d6c7123eb81cbad75f9aef5a.tar.gz", + "https://github.com/gflags/gflags/archive/77592648e3f3be87d6c7123eb81cbad75f9aef5a.tar.gz", + ], +) + +# glog +http_archive( + name = "com_github_glog_glog", + url = "https://github.com/google/glog/archive/v0.3.5.zip", + sha256 = "267103f8a1e9578978aa1dc256001e6529ef593e5aea38193d31c2872ee025e8", + strip_prefix = "glog-0.3.5", + build_file = "@//third_party:glog.BUILD", + patches = [ + "@//third_party:com_github_glog_glog_9779e5ea6ef59562b030248947f787d1256132ae.diff" + ], + patch_args = [ + "-p1", + ], +) + +# libyuv +http_archive( + name = "libyuv", + urls = ["https://chromium.googlesource.com/libyuv/libyuv/+archive/refs/heads/master.tar.gz"], + build_file = "@//third_party:libyuv.BUILD", +) + +http_archive( + name = "com_google_protobuf_javalite", + sha256 = "79d102c61e2a479a0b7e5fc167bcfaa4832a0c6aad4a75fa7da0480564931bcc", + strip_prefix = "protobuf-384989534b2246d413dbcd750744faab2607b516", + urls = ["https://github.com/google/protobuf/archive/384989534b2246d413dbcd750744faab2607b516.zip"], +) + +http_archive( + name = "com_google_audio_tools", + strip_prefix = "multichannel-audio-tools-master", + urls = ["https://github.com/google/multichannel-audio-tools/archive/master.zip"], +) + +# Needed by TensorFlow +http_archive( + name = "io_bazel_rules_closure", + sha256 = "e0a111000aeed2051f29fcc7a3f83be3ad8c6c93c186e64beb1ad313f0c7f9f9", + strip_prefix = "rules_closure-cf1e44edb908e9616030cc83d085989b8e6cd6df", + urls = [ + "http://mirror.tensorflow.org/github.com/bazelbuild/rules_closure/archive/cf1e44edb908e9616030cc83d085989b8e6cd6df.tar.gz", + "https://github.com/bazelbuild/rules_closure/archive/cf1e44edb908e9616030cc83d085989b8e6cd6df.tar.gz", # 2019-04-04 + ], +) + +# 2019-11-12 +_TENSORFLOW_GIT_COMMIT = "a5f9bcd64453ff3d1f64cb4da4786db3d2da7f82" +_TENSORFLOW_SHA256= "f2b6f2ab2ffe63e86eccd3ce4bea6b7197383d726638dfeeebcdc1e7de73f075" +http_archive( + name = "org_tensorflow", + urls = [ + "https://mirror.bazel.build/github.com/tensorflow/tensorflow/archive/%s.tar.gz" % _TENSORFLOW_GIT_COMMIT, + "https://github.com/tensorflow/tensorflow/archive/%s.tar.gz" % _TENSORFLOW_GIT_COMMIT, + ], + strip_prefix = "tensorflow-%s" % _TENSORFLOW_GIT_COMMIT, + sha256 = _TENSORFLOW_SHA256, +) + +load("@org_tensorflow//tensorflow:workspace.bzl", "tf_workspace") +tf_workspace(tf_repo_name = "org_tensorflow") + +# Please run +# $ sudo apt-get install libopencv-core-dev libopencv-highgui-dev \ +# libopencv-imgproc-dev libopencv-video-dev +new_local_repository( + name = "linux_opencv", + build_file = "@//third_party:opencv_linux.BUILD", + path = "/usr", +) + +new_local_repository( + name = "linux_ffmpeg", + build_file = "@//third_party:ffmpeg_linux.BUILD", + path = "/usr" +) + +# Please run $ brew install opencv@3 +new_local_repository( + name = "macos_opencv", + build_file = "@//third_party:opencv_macos.BUILD", + path = "/usr", +) + +new_local_repository( + name = "macos_ffmpeg", + build_file = "@//third_party:ffmpeg_macos.BUILD", + path = "/usr", +) + +http_archive( + name = "android_opencv", + sha256 = "056b849842e4fa8751d09edbb64530cfa7a63c84ccd232d0ace330e27ba55d0b", + build_file = "@//third_party:opencv_android.BUILD", + strip_prefix = "OpenCV-android-sdk", + type = "zip", + url = "https://github.com/opencv/opencv/releases/download/4.1.0/opencv-4.1.0-android-sdk.zip", +) + +# After OpenCV 3.2.0, the pre-compiled opencv2.framework has google protobuf symbols, which will +# trigger duplicate symbol errors in the linking stage of building a mediapipe ios app. +# To get a higher version of OpenCV for iOS, opencv2.framework needs to be built from source with +# '-DBUILD_PROTOBUF=OFF -DBUILD_opencv_dnn=OFF'. +http_archive( + name = "ios_opencv", + sha256 = "7dd536d06f59e6e1156b546bd581523d8df92ce83440002885ec5abc06558de2", + build_file = "@//third_party:opencv_ios.BUILD", + type = "zip", + url = "https://github.com/opencv/opencv/releases/download/3.2.0/opencv-3.2.0-ios-framework.zip", +) + +RULES_JVM_EXTERNAL_TAG = "2.2" +RULES_JVM_EXTERNAL_SHA = "f1203ce04e232ab6fdd81897cf0ff76f2c04c0741424d192f28e65ae752ce2d6" + +http_archive( + name = "rules_jvm_external", + strip_prefix = "rules_jvm_external-%s" % RULES_JVM_EXTERNAL_TAG, + sha256 = RULES_JVM_EXTERNAL_SHA, + url = "https://github.com/bazelbuild/rules_jvm_external/archive/%s.zip" % RULES_JVM_EXTERNAL_TAG, +) + +load("@rules_jvm_external//:defs.bzl", "maven_install") + +maven_install( + artifacts = [ + "androidx.annotation:annotation:aar:1.1.0", + "androidx.appcompat:appcompat:aar:1.1.0-rc01", + "androidx.constraintlayout:constraintlayout:aar:1.1.3", + "androidx.core:core:aar:1.1.0-rc03", + "androidx.legacy:legacy-support-v4:aar:1.0.0", + "androidx.recyclerview:recyclerview:aar:1.1.0-beta02", + "com.google.android.material:material:aar:1.0.0-rc01", + ], + repositories = ["https://dl.google.com/dl/android/maven2"], +) + +maven_server( + name = "google_server", + url = "https://dl.google.com/dl/android/maven2", +) + +maven_jar( + name = "androidx_lifecycle", + artifact = "androidx.lifecycle:lifecycle-common:2.0.0", + sha1 = "e070ffae07452331bc5684734fce6831d531785c", + server = "google_server", +) + +maven_jar( + name = "androidx_concurrent_futures", + artifact = "androidx.concurrent:concurrent-futures:1.0.0-alpha03", + sha1 = "b528df95c7e2fefa2210c0c742bf3e491c1818ae", + server = "google_server", +) + +maven_jar( + name = "com_google_guava_android", + artifact = "com.google.guava:guava:27.0.1-android", + sha1 = "b7e1c37f66ef193796ccd7ea6e80c2b05426182d", +) + +maven_jar( + name = "com_google_common_flogger", + artifact = "com.google.flogger:flogger:0.3.1", + sha1 = "585030fe1ec709760cbef997a459729fb965df0e", +) + +maven_jar( + name = "com_google_common_flogger_system_backend", + artifact = "com.google.flogger:flogger-system-backend:0.3.1", + sha1 = "287b569d76abcd82f9de87fe41829fbc7ebd8ac9", +) + +maven_jar( + name = "com_google_code_findbugs", + artifact = "com.google.code.findbugs:jsr305:3.0.2", + sha1 = "25ea2e8b0c338a877313bd4672d3fe056ea78f0d", +) + +# You may run setup_android.sh to install Android SDK and NDK. +android_ndk_repository( + name = "androidndk", +) + +android_sdk_repository( + name = "androidsdk", +) + +# iOS basic build deps. + +http_archive( + name = "build_bazel_rules_apple", + sha256 = "bdc8e66e70b8a75da23b79f1f8c6207356df07d041d96d2189add7ee0780cf4e", + strip_prefix = "rules_apple-b869b0d3868d78a1d4ffd866ccb304fb68aa12c3", + url = "https://github.com/bazelbuild/rules_apple/archive/b869b0d3868d78a1d4ffd866ccb304fb68aa12c3.tar.gz", +) + +load( + "@build_bazel_rules_apple//apple:repositories.bzl", + "apple_rules_dependencies", +) + +apple_rules_dependencies() + +load( + "@build_bazel_rules_swift//swift:repositories.bzl", + "swift_rules_dependencies", +) + +swift_rules_dependencies() + +load( + "@build_bazel_apple_support//lib:repositories.bzl", + "apple_support_dependencies", +) + +apple_support_dependencies() + +# More iOS deps. + +http_archive( + name = "google_toolbox_for_mac", + url = "https://github.com/google/google-toolbox-for-mac/archive/v2.2.1.zip", + sha256 = "e3ac053813c989a88703556df4dc4466e424e30d32108433ed6beaec76ba4fdc", + strip_prefix = "google-toolbox-for-mac-2.2.1", + build_file = "@//third_party:google_toolbox_for_mac.BUILD", +) + + +# Coral +#COMMIT=$(git ls-remote https://github.com/google-coral/crosstool master | awk '{print $1}') +#SHA256=$(curl -L "https://github.com/google-coral/crosstool/archive/${COMMIT}.tar.gz" | sha256sum | awk '{print $1}') +http_archive( + name = "coral_crosstool", + sha256 = "cb31b1417ccdcf7dd9fca5ec63e1571672372c30427730255997a547569d2feb", + strip_prefix = "crosstool-9e00d5be43bf001f883b5700f5d04882fea00229", + urls = [ + "https://github.com/google-coral/crosstool/archive/9e00d5be43bf001f883b5700f5d04882fea00229.tar.gz", + ], +) +load("@coral_crosstool//:configure.bzl", "cc_crosstool") +cc_crosstool(name = "crosstool") + +# EdgeTPU +new_local_repository( + name = "edgetpu", + path = "/edgetpu/libedgetpu", + build_file = "/edgetpu/libedgetpu/BUILD" +) +new_local_repository( + name = "libedgetpu", + path = "/usr/lib/aarch64-linux-gnu", + build_file = "/edgetpu/libedgetpu/BUILD" +) diff --git a/mediapipe/examples/coral/demo_run_graph_main.cc b/mediapipe/examples/coral/demo_run_graph_main.cc new file mode 100644 index 000000000..fb5e86923 --- /dev/null +++ b/mediapipe/examples/coral/demo_run_graph_main.cc @@ -0,0 +1,151 @@ +// Copyright 2019 The MediaPipe Authors. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +// +// An example of sending OpenCV webcam frames into a MediaPipe graph. + +#include "mediapipe/framework/calculator_framework.h" +#include "mediapipe/framework/formats/image_frame.h" +#include "mediapipe/framework/formats/image_frame_opencv.h" +#include "mediapipe/framework/port/commandlineflags.h" +#include "mediapipe/framework/port/file_helpers.h" +#include "mediapipe/framework/port/opencv_highgui_inc.h" +#include "mediapipe/framework/port/opencv_imgproc_inc.h" +#include "mediapipe/framework/port/opencv_video_inc.h" +#include "mediapipe/framework/port/parse_text_proto.h" +#include "mediapipe/framework/port/status.h" + +constexpr char kInputStream[] = "input_video"; +constexpr char kOutputStream[] = "output_video"; +constexpr char kWindowName[] = "MediaPipe"; + +DEFINE_string( + calculator_graph_config_file, "", + "Name of file containing text format CalculatorGraphConfig proto."); +DEFINE_string(input_video_path, "", + "Full path of video to load. " + "If not provided, attempt to use a webcam."); +DEFINE_string(output_video_path, "", + "Full path of where to save result (.mp4 only). " + "If not provided, show result in a window."); + +::mediapipe::Status RunMPPGraph() { + std::string calculator_graph_config_contents; + MP_RETURN_IF_ERROR(mediapipe::file::GetContents( + FLAGS_calculator_graph_config_file, &calculator_graph_config_contents)); + LOG(INFO) << "Get calculator graph config contents: " + << calculator_graph_config_contents; + mediapipe::CalculatorGraphConfig config = + mediapipe::ParseTextProtoOrDie( + calculator_graph_config_contents); + + LOG(INFO) << "Initialize the calculator graph."; + mediapipe::CalculatorGraph graph; + MP_RETURN_IF_ERROR(graph.Initialize(config)); + + LOG(INFO) << "Initialize the camera or load the video."; + cv::VideoCapture capture; + const bool load_video = !FLAGS_input_video_path.empty(); + if (load_video) { + capture.open(FLAGS_input_video_path); + } else { + capture.open(0); + } + RET_CHECK(capture.isOpened()); + + cv::VideoWriter writer; + const bool save_video = !FLAGS_output_video_path.empty(); + if (save_video) { + LOG(INFO) << "Prepare video writer."; + cv::Mat test_frame; + capture.read(test_frame); // Consume first frame. + capture.set(cv::CAP_PROP_POS_AVI_RATIO, 0); // Rewind to beginning. + writer.open(FLAGS_output_video_path, + mediapipe::fourcc('a', 'v', 'c', '1'), // .mp4 + capture.get(cv::CAP_PROP_FPS), test_frame.size()); + RET_CHECK(writer.isOpened()); + } else { + cv::namedWindow(kWindowName, /*flags=WINDOW_AUTOSIZE*/ 1); + capture.set(cv::CAP_PROP_FRAME_WIDTH, 640); + capture.set(cv::CAP_PROP_FRAME_HEIGHT, 480); + capture.set(cv::CAP_PROP_AUTOFOCUS, 0); + capture.set(cv::CAP_PROP_FOCUS, 1); + capture.set(cv::CAP_PROP_FPS, 30); + } + + LOG(INFO) << "Start running the calculator graph."; + ASSIGN_OR_RETURN(mediapipe::OutputStreamPoller poller, + graph.AddOutputStreamPoller(kOutputStream)); + MP_RETURN_IF_ERROR(graph.StartRun({})); + + LOG(INFO) << "Start grabbing and processing frames."; + size_t frame_timestamp = 0; + bool grab_frames = true; + while (grab_frames) { + // Capture opencv camera or video frame. + cv::Mat camera_frame_raw; + capture >> camera_frame_raw; + if (camera_frame_raw.empty()) break; // End of video. + cv::Mat camera_frame; + cv::cvtColor(camera_frame_raw, camera_frame, cv::COLOR_BGR2RGB); + if (!load_video) { + cv::flip(camera_frame, camera_frame, /*flipcode=HORIZONTAL*/ 1); + } + + // Wrap Mat into an ImageFrame. + auto input_frame = absl::make_unique( + mediapipe::ImageFormat::SRGB, camera_frame.cols, camera_frame.rows, + mediapipe::ImageFrame::kDefaultAlignmentBoundary); + cv::Mat input_frame_mat = mediapipe::formats::MatView(input_frame.get()); + camera_frame.copyTo(input_frame_mat); + + // Send image packet into the graph. + MP_RETURN_IF_ERROR(graph.AddPacketToInputStream( + kInputStream, mediapipe::Adopt(input_frame.release()) + .At(mediapipe::Timestamp(frame_timestamp++)))); + + // Get the graph result packet, or stop if that fails. + mediapipe::Packet packet; + if (!poller.Next(&packet)) break; + auto& output_frame = packet.Get(); + + // Convert back to opencv for display or saving. + cv::Mat output_frame_mat = mediapipe::formats::MatView(&output_frame); + cv::cvtColor(output_frame_mat, output_frame_mat, cv::COLOR_RGB2BGR); + if (save_video) { + writer.write(output_frame_mat); + } else { + cv::imshow(kWindowName, output_frame_mat); + // Press any key to exit. + const int pressed_key = cv::waitKey(5); + if (pressed_key >= 0 && pressed_key != 255) grab_frames = false; + } + } + + LOG(INFO) << "Shutting down."; + if (writer.isOpened()) writer.release(); + MP_RETURN_IF_ERROR(graph.CloseInputStream(kInputStream)); + return graph.WaitUntilDone(); +} + +int main(int argc, char** argv) { + google::InitGoogleLogging(argv[0]); + gflags::ParseCommandLineFlags(&argc, &argv, true); + ::mediapipe::Status run_status = RunMPPGraph(); + if (!run_status.ok()) { + LOG(ERROR) << "Failed to run the graph: " << run_status.message(); + } else { + LOG(INFO) << "Success!"; + } + return 0; +} diff --git a/mediapipe/examples/coral/graphs/face_detection_desktop_live.pbtxt b/mediapipe/examples/coral/graphs/face_detection_desktop_live.pbtxt new file mode 100644 index 000000000..eaae2f90d --- /dev/null +++ b/mediapipe/examples/coral/graphs/face_detection_desktop_live.pbtxt @@ -0,0 +1,189 @@ +# MediaPipe graph that performs face detection with TensorFlow Lite on CPU. +# Used in the examples in +# mediapipe/examples/coral:face_detection_cpu. + +# Images on GPU coming into and out of the graph. +input_stream: "input_video" +output_stream: "output_video" + +# Throttles the images flowing downstream for flow control. It passes through +# the very first incoming image unaltered, and waits for +# TfLiteTensorsToDetectionsCalculator downstream in the graph to finish +# generating the corresponding detections before it passes through another +# image. All images that come in while waiting are dropped, limiting the number +# of in-flight images between this calculator and +# TfLiteTensorsToDetectionsCalculator to 1. This prevents the nodes in between +# from queuing up incoming images and data excessively, which leads to increased +# latency and memory usage, unwanted in real-time mobile applications. It also +# eliminates unnecessarily computation, e.g., a transformed image produced by +# ImageTransformationCalculator may get dropped downstream if the subsequent +# TfLiteConverterCalculator or TfLiteInferenceCalculator is still busy +# processing previous inputs. +node { + calculator: "FlowLimiterCalculator" + input_stream: "input_video" + input_stream: "FINISHED:detections" + input_stream_info: { + tag_index: "FINISHED" + back_edge: true + } + output_stream: "throttled_input_video" +} + +# Transforms the input image on CPU to a 128x128 image. To scale the input +# image, the scale_mode option is set to FIT to preserve the aspect ratio, +# resulting in potential letterboxing in the transformed image. +node: { + calculator: "ImageTransformationCalculator" + input_stream: "IMAGE:throttled_input_video" + output_stream: "IMAGE:transformed_input_video_cpu" + output_stream: "LETTERBOX_PADDING:letterbox_padding" + options: { + [mediapipe.ImageTransformationCalculatorOptions.ext] { + output_width: 128 + output_height: 128 + scale_mode: FIT + } + } +} + +# Converts the transformed input image on CPU into an image tensor stored as a +# TfLiteTensor. +node { + calculator: "TfLiteConverterCalculator" + input_stream: "IMAGE:transformed_input_video_cpu" + output_stream: "TENSORS:image_tensor" + options: { + [mediapipe.TfLiteConverterCalculatorOptions.ext] { + use_quantized_tensors: true + } + } +} + +# Runs a TensorFlow Lite model on CPU that takes an image tensor and outputs a +# vector of tensors representing, for instance, detection boxes/keypoints and +# scores. +node { + calculator: "TfLiteInferenceCalculator" + input_stream: "TENSORS:image_tensor" + output_stream: "TENSORS:detection_tensors" + options: { + [mediapipe.TfLiteInferenceCalculatorOptions.ext] { + model_path: "mediapipe/examples/coral/models/face-detector-quantized_edgetpu.tflite" + } + } +} + +# Generates a single side packet containing a vector of SSD anchors based on +# the specification in the options. +node { + calculator: "SsdAnchorsCalculator" + output_side_packet: "anchors" + options: { + [mediapipe.SsdAnchorsCalculatorOptions.ext] { + num_layers: 4 + min_scale: 0.1484375 + max_scale: 0.75 + input_size_height: 128 + input_size_width: 128 + anchor_offset_x: 0.5 + anchor_offset_y: 0.5 + strides: 8 + strides: 16 + strides: 16 + strides: 16 + aspect_ratios: 1.0 + fixed_anchor_size: true + } + } +} + +# Decodes the detection tensors generated by the TensorFlow Lite model, based on +# the SSD anchors and the specification in the options, into a vector of +# detections. Each detection describes a detected object. +node { + calculator: "TfLiteTensorsToDetectionsCalculator" + input_stream: "TENSORS:detection_tensors" + input_side_packet: "ANCHORS:anchors" + output_stream: "DETECTIONS:detections" + options: { + [mediapipe.TfLiteTensorsToDetectionsCalculatorOptions.ext] { + num_classes: 1 + num_boxes: 896 + num_coords: 16 + box_coord_offset: 0 + keypoint_coord_offset: 4 + num_keypoints: 6 + num_values_per_keypoint: 2 + sigmoid_score: true + score_clipping_thresh: 100.0 + reverse_output_order: true + x_scale: 128.0 + y_scale: 128.0 + h_scale: 128.0 + w_scale: 128.0 + min_score_thresh: 0.75 + } + } +} + +# Performs non-max suppression to remove excessive detections. +node { + calculator: "NonMaxSuppressionCalculator" + input_stream: "detections" + output_stream: "filtered_detections" + options: { + [mediapipe.NonMaxSuppressionCalculatorOptions.ext] { + min_suppression_threshold: 0.3 + overlap_type: INTERSECTION_OVER_UNION + algorithm: WEIGHTED + return_empty_detections: true + } + } +} + +# Maps detection label IDs to the corresponding label text ("Face"). The label +# map is provided in the label_map_path option. +node { + calculator: "DetectionLabelIdToTextCalculator" + input_stream: "filtered_detections" + output_stream: "labeled_detections" + options: { + [mediapipe.DetectionLabelIdToTextCalculatorOptions.ext] { + label_map_path: "mediapipe/models/face_detection_front_labelmap.txt" + } + } +} + +# Adjusts detection locations (already normalized to [0.f, 1.f]) on the +# letterboxed image (after image transformation with the FIT scale mode) to the +# corresponding locations on the same image with the letterbox removed (the +# input image to the graph before image transformation). +node { + calculator: "DetectionLetterboxRemovalCalculator" + input_stream: "DETECTIONS:labeled_detections" + input_stream: "LETTERBOX_PADDING:letterbox_padding" + output_stream: "DETECTIONS:output_detections" +} + +# Converts the detections to drawing primitives for annotation overlay. +node { + calculator: "DetectionsToRenderDataCalculator" + input_stream: "DETECTIONS:output_detections" + output_stream: "RENDER_DATA:render_data" + options: { + [mediapipe.DetectionsToRenderDataCalculatorOptions.ext] { + thickness: 4.0 + color { r: 255 g: 0 b: 0 } + } + } +} + +# Draws annotations and overlays them on top of the input images. +node { + calculator: "AnnotationOverlayCalculator" + input_stream: "INPUT_FRAME:throttled_input_video" + input_stream: "render_data" + output_stream: "OUTPUT_FRAME:output_video" +} + diff --git a/mediapipe/examples/coral/graphs/object_detection_desktop_live.pbtxt b/mediapipe/examples/coral/graphs/object_detection_desktop_live.pbtxt new file mode 100644 index 000000000..52de19705 --- /dev/null +++ b/mediapipe/examples/coral/graphs/object_detection_desktop_live.pbtxt @@ -0,0 +1,179 @@ +# MediaPipe graph that performs object detection with TensorFlow Lite on CPU. +# Used in the examples in +# mediapipie/examples/coral:object_detection_cpu. + +# Images on CPU coming into and out of the graph. +input_stream: "input_video" +output_stream: "output_video" + +# Throttles the images flowing downstream for flow control. It passes through +# the very first incoming image unaltered, and waits for +# TfLiteTensorsToDetectionsCalculator downstream in the graph to finish +# generating the corresponding detections before it passes through another +# image. All images that come in while waiting are dropped, limiting the number +# of in-flight images between this calculator and +# TfLiteTensorsToDetectionsCalculator to 1. This prevents the nodes in between +# from queuing up incoming images and data excessively, which leads to increased +# latency and memory usage, unwanted in real-time mobile applications. It also +# eliminates unnecessarily computation, e.g., a transformed image produced by +# ImageTransformationCalculator may get dropped downstream if the subsequent +# TfLiteConverterCalculator or TfLiteInferenceCalculator is still busy +# processing previous inputs. +node { + calculator: "FlowLimiterCalculator" + input_stream: "input_video" + input_stream: "FINISHED:detections" + input_stream_info: { + tag_index: "FINISHED" + back_edge: true + } + output_stream: "throttled_input_video" +} + +# Transforms the input image on CPU to a 320x320 image. To scale the image, by +# default it uses the STRETCH scale mode that maps the entire input image to the +# entire transformed image. As a result, image aspect ratio may be changed and +# objects in the image may be deformed (stretched or squeezed), but the object +# detection model used in this graph is agnostic to that deformation. +node: { + calculator: "ImageTransformationCalculator" + input_stream: "IMAGE:throttled_input_video" + output_stream: "IMAGE:transformed_input_video" + options: { + [mediapipe.ImageTransformationCalculatorOptions.ext] { + output_width: 300 + output_height: 300 + } + } +} + +# Converts the transformed input image on CPU into an image tensor stored as a +# TfLiteTensor. +node { + calculator: "TfLiteConverterCalculator" + input_stream: "IMAGE:transformed_input_video" + output_stream: "TENSORS:image_tensor" + options: { + [mediapipe.TfLiteConverterCalculatorOptions.ext] { + use_quantized_tensors: true + } + } +} + +# Runs a TensorFlow Lite model on CPU that takes an image tensor and outputs a +# vector of tensors representing, for instance, detection boxes/keypoints and +# scores. +node { + calculator: "TfLiteInferenceCalculator" + input_stream: "TENSORS:image_tensor" + output_stream: "TENSORS:detection_tensors" + options: { + [mediapipe.TfLiteInferenceCalculatorOptions.ext] { + model_path: "mediapipe/examples/coral/models/object-detector-quantized_edgetpu.tflite" + } + } +} + +# Generates a single side packet containing a vector of SSD anchors based on +# the specification in the options. +node { + calculator: "SsdAnchorsCalculator" + output_side_packet: "anchors" + options: { + [mediapipe.SsdAnchorsCalculatorOptions.ext] { + num_layers: 6 + min_scale: 0.2 + max_scale: 0.95 + input_size_height: 300 + input_size_width: 300 + anchor_offset_x: 0.5 + anchor_offset_y: 0.5 + strides: 16 + strides: 32 + strides: 64 + strides: 128 + strides: 256 + strides: 512 + aspect_ratios: 1.0 + aspect_ratios: 2.0 + aspect_ratios: 0.5 + aspect_ratios: 3.0 + aspect_ratios: 0.3333 + reduce_boxes_in_lowest_layer: true + } + } +} + +# Decodes the detection tensors generated by the TensorFlow Lite model, based on +# the SSD anchors and the specification in the options, into a vector of +# detections. Each detection describes a detected object. +node { + calculator: "TfLiteTensorsToDetectionsCalculator" + input_stream: "TENSORS:detection_tensors" + input_side_packet: "ANCHORS:anchors" + output_stream: "DETECTIONS:detections" + options: { + [mediapipe.TfLiteTensorsToDetectionsCalculatorOptions.ext] { + num_classes: 91 + num_boxes: 2034 + num_coords: 4 + ignore_classes: 0 + sigmoid_score: true + apply_exponential_on_box_size: true + x_scale: 10.0 + y_scale: 10.0 + h_scale: 5.0 + w_scale: 5.0 + min_score_thresh: 0.6 + } + } +} + +# Performs non-max suppression to remove excessive detections. +node { + calculator: "NonMaxSuppressionCalculator" + input_stream: "detections" + output_stream: "filtered_detections" + options: { + [mediapipe.NonMaxSuppressionCalculatorOptions.ext] { + min_suppression_threshold: 0.4 + max_num_detections: 3 + overlap_type: INTERSECTION_OVER_UNION + return_empty_detections: true + } + } +} + +# Maps detection label IDs to the corresponding label text. The label map is +# provided in the label_map_path option. +node { + calculator: "DetectionLabelIdToTextCalculator" + input_stream: "filtered_detections" + output_stream: "output_detections" + options: { + [mediapipe.DetectionLabelIdToTextCalculatorOptions.ext] { + label_map_path: "mediapipe/examples/coral/models/object_detection_labelmap.txt" + } + } +} + +# Converts the detections to drawing primitives for annotation overlay. +node { + calculator: "DetectionsToRenderDataCalculator" + input_stream: "DETECTIONS:output_detections" + output_stream: "RENDER_DATA:render_data" + options: { + [mediapipe.DetectionsToRenderDataCalculatorOptions.ext] { + thickness: 4.0 + color { r: 255 g: 0 b: 0 } + } + } +} + +# Draws annotations and overlays them on top of the input images. +node { + calculator: "AnnotationOverlayCalculator" + input_stream: "INPUT_FRAME:throttled_input_video" + input_stream: "render_data" + output_stream: "OUTPUT_FRAME:output_video" +} diff --git a/mediapipe/examples/coral/models/face-detector-quantized_edgetpu.tflite b/mediapipe/examples/coral/models/face-detector-quantized_edgetpu.tflite new file mode 100644 index 000000000..2f2b4b55e Binary files /dev/null and b/mediapipe/examples/coral/models/face-detector-quantized_edgetpu.tflite differ diff --git a/mediapipe/examples/coral/models/object-detector-quantized_edgetpu.tflite b/mediapipe/examples/coral/models/object-detector-quantized_edgetpu.tflite new file mode 100644 index 000000000..7e6cf12f6 Binary files /dev/null and b/mediapipe/examples/coral/models/object-detector-quantized_edgetpu.tflite differ diff --git a/mediapipe/examples/coral/models/object_detection_labelmap.txt b/mediapipe/examples/coral/models/object_detection_labelmap.txt new file mode 100644 index 000000000..695772dcd --- /dev/null +++ b/mediapipe/examples/coral/models/object_detection_labelmap.txt @@ -0,0 +1,90 @@ +person +bicycle +car +motorcycle +airplane +bus +train +truck +boat +traffic light +fire hydrant +??? +stop sign +parking meter +bench +bird +cat +dog +horse +sheep +cow +elephant +bear +zebra +giraffe +??? +backpack +umbrella +??? +??? +handbag +tie +suitcase +frisbee +skis +snowboard +sports ball +kite +baseball bat +baseball glove +skateboard +surfboard +tennis racket +bottle +??? +wine glass +cup +fork +knife +spoon +bowl +banana +apple +sandwich +orange +broccoli +carrot +hot dog +pizza +donut +cake +chair +couch +potted plant +bed +??? +dining table +??? +??? +toilet +??? +tv +laptop +mouse +remote +keyboard +cell phone +microwave +oven +toaster +sink +refrigerator +??? +book +clock +vase +scissors +teddy bear +hair drier +toothbrush diff --git a/mediapipe/examples/coral/setup.sh b/mediapipe/examples/coral/setup.sh new file mode 100755 index 000000000..d8680e49c --- /dev/null +++ b/mediapipe/examples/coral/setup.sh @@ -0,0 +1,21 @@ +#!/bin/sh + +set -e +set -v + +echo 'Please run this from root level mediapipe directory! \n Ex:' +echo ' sh mediapipe/examples/coral/setup.sh ' + +sleep 3 + +mkdir opencv32_arm64_libs + +cp mediapipe/examples/coral/update_sources.sh update_sources.sh +chmod +x update_sources.sh + +mv Dockerfile Dockerfile.orig +cp mediapipe/examples/coral/Dockerfile Dockerfile + +cp WORKSPACE WORKSPACE.orig +cp mediapipe/examples/coral/WORKSPACE WORKSPACE + diff --git a/mediapipe/examples/coral/update_sources.sh b/mediapipe/examples/coral/update_sources.sh new file mode 100755 index 000000000..dcd336e67 --- /dev/null +++ b/mediapipe/examples/coral/update_sources.sh @@ -0,0 +1,11 @@ +#!/bin/bash + +# To run in the Coral Docker environment. + +. /etc/os-release + +sed -i "s/deb\ /deb \[arch=amd64\]\ /g" /etc/apt/sources.list + +echo "deb [arch=arm64,armhf] http://ports.ubuntu.com/ubuntu-ports ${UBUNTU_CODENAME} main universe" >> /etc/apt/sources.list +echo "deb [arch=arm64,armhf] http://ports.ubuntu.com/ubuntu-ports ${UBUNTU_CODENAME}-updates main universe" >> /etc/apt/sources.list +echo "deb [arch=arm64,armhf] http://ports.ubuntu.com/ubuntu-ports ${UBUNTU_CODENAME}-security main universe" >> /etc/apt/sources.list diff --git a/mediapipe/examples/desktop/README.md b/mediapipe/examples/desktop/README.md index 95e40e49d..8e36e42eb 100644 --- a/mediapipe/examples/desktop/README.md +++ b/mediapipe/examples/desktop/README.md @@ -9,7 +9,9 @@ bazel build -c opt mediapipe/examples/desktop/hello_world:hello_world and then run it using: ``` -bazel-bin/mediapipe/examples/desktop/hello_world/hello_world --logtostderr +export GLOG_logtostderr=1 + +bazel-bin/mediapipe/examples/desktop/hello_world/hello_world ``` **TFlite Object Detection** @@ -23,10 +25,11 @@ bazel build -c opt mediapipe/examples/desktop/object_detection:object_detection_ and run it using: ``` +export GLOG_logtostderr=1 + bazel-bin/mediapipe/examples/desktop/object_detection/object_detection_tflite \ --calculator_graph_config_file=mediapipe/graphs/object_detection/object_detection_desktop_tflite_graph.pbtxt \ - --input_side_packets=input_video_path=/path/to/input/file,output_video_path=/path/to/output/file \ - --alsologtostderr + --input_side_packets=input_video_path=/path/to/input/file,output_video_path=/path/to/output/file ``` **TensorFlow Object Detection** @@ -34,6 +37,8 @@ bazel-bin/mediapipe/examples/desktop/object_detection/object_detection_tflite \ To build the object detection demo using a TensorFlow model on desktop, use: ``` +export GLOG_logtostderr=1 + bazel build -c opt mediapipe/examples/desktop/object_detection:object_detection_tensorflow \ --define MEDIAPIPE_DISABLE_GPU=1 ``` @@ -41,8 +46,68 @@ bazel build -c opt mediapipe/examples/desktop/object_detection:object_detection_ and run it using: ``` +export GLOG_logtostderr=1 + bazel-bin/mediapipe/examples/desktop/object_detection/object_detection_tensorflow \ --calculator_graph_config_file=mediapipe/graphs/object_detection/object_detection_desktop_tensorflow_graph.pbtxt \ --input_side_packets=input_video_path=/path/to/input/file,output_video_path=/path/to/output/file - --alsologtostderr ``` + +**TFlite Hand Detection** + +To build the hand detection demo using a TFLite model on desktop, use: + +``` +bazel build -c opt mediapipe/examples/desktop/hand_tracking:hand_tracking_tflite --define MEDIAPIPE_DISABLE_GPU=1 +``` + +and run it using: + +``` +export GLOG_logtostderr=1 + +bazel-bin/mediapipe/examples/desktop/hand_tracking/hand_tracking_tflite \ + --calculator_graph_config_file=mediapipe/graphs/hand_tracking/hand_detection_desktop.pbtxt \ + --input_side_packets=input_video_path=/path/to/input/file,output_video_path=/path/to/output/file +``` + +**TFlite Hand Tracking** + +To build the hand tracking demo using a TFLite model on desktop, use: + +``` +bazel build -c opt mediapipe/examples/desktop/hand_tracking:hand_tracking_tflite --define MEDIAPIPE_DISABLE_GPU=1 +``` + +and run it using: + +``` +export GLOG_logtostderr=1 + +bazel-bin/mediapipe/examples/desktop/hand_tracking/hand_tracking_tflite \ + --calculator_graph_config_file=mediapipe/graphs/hand_tracking/hand_tracking_desktop.pbtxt \ + --input_side_packets=input_video_path=/path/to/input/file,output_video_path=/path/to/output/file +``` + +**TFlite Multi-Hand Tracking** + +To build the multi-hand tracking demo using a TFLite model on desktop, use: + +``` +bazel build -c opt mediapipe/examples/desktop/multi_hand_tracking:multi_hand_tracking_tflite --define MEDIAPIPE_DISABLE_GPU=1 +``` + +and run it using: + +``` +export GLOG_logtostderr=1 + +bazel-bin/mediapipe/examples/desktop/multi_hand_tracking/multi_hand_tracking_tflite \ + --calculator_graph_config_file=mediapipe/graphs/hand_tracking/multi_hand_tracking_desktop.pbtxt \ + --input_side_packets=input_video_path=/path/to/input/file,output_video_path=/path/to/output/file +``` + +To change the number of hands to `x` in this application, change: + +1. `min_size:x` in `CollectionHasMinSizeCalculatorOptions` in `mediapipe/graphs/hand_tracking/multi_hand_tracking_desktop.pbtxt`. +2. `max_vec_size:x` in `ClipVectorSizeCalculatorOptions` in `mediapipe/examples/dekstop/hand_tracking/subgraphs/multi_hand_detection_cpu.pbtxt`. diff --git a/mediapipe/examples/desktop/demo_run_graph_main.cc b/mediapipe/examples/desktop/demo_run_graph_main.cc index 14136560c..d650cec53 100644 --- a/mediapipe/examples/desktop/demo_run_graph_main.cc +++ b/mediapipe/examples/desktop/demo_run_graph_main.cc @@ -76,6 +76,11 @@ DEFINE_string(output_video_path, "", RET_CHECK(writer.isOpened()); } else { cv::namedWindow(kWindowName, /*flags=WINDOW_AUTOSIZE*/ 1); +#if (CV_MAJOR_VERSION >= 3) && (CV_MINOR_VERSION >= 2) + capture.set(cv::CAP_PROP_FRAME_WIDTH, 640); + capture.set(cv::CAP_PROP_FRAME_HEIGHT, 480); + capture.set(cv::CAP_PROP_FPS, 30); +#endif } LOG(INFO) << "Start running the calculator graph."; diff --git a/mediapipe/examples/desktop/demo_run_graph_main_gpu.cc b/mediapipe/examples/desktop/demo_run_graph_main_gpu.cc index 4bf8cf97a..687a704eb 100644 --- a/mediapipe/examples/desktop/demo_run_graph_main_gpu.cc +++ b/mediapipe/examples/desktop/demo_run_graph_main_gpu.cc @@ -86,6 +86,11 @@ DEFINE_string(output_video_path, "", RET_CHECK(writer.isOpened()); } else { cv::namedWindow(kWindowName, /*flags=WINDOW_AUTOSIZE*/ 1); +#if (CV_MAJOR_VERSION >= 3) && (CV_MINOR_VERSION >= 2) + capture.set(cv::CAP_PROP_FRAME_WIDTH, 640); + capture.set(cv::CAP_PROP_FRAME_HEIGHT, 480); + capture.set(cv::CAP_PROP_FPS, 30); +#endif } LOG(INFO) << "Start running the calculator graph."; diff --git a/mediapipe/examples/desktop/media_sequence/kinetics_dataset.py b/mediapipe/examples/desktop/media_sequence/kinetics_dataset.py index 1d7f5b69e..93c62d54e 100644 --- a/mediapipe/examples/desktop/media_sequence/kinetics_dataset.py +++ b/mediapipe/examples/desktop/media_sequence/kinetics_dataset.py @@ -93,15 +93,15 @@ FILEPATTERN = "kinetics_700_%s_25fps_rgb_flow" SPLITS = { "train": { "shards": 1000, - "examples": 541632 + "examples": 541490 }, "validate": { "shards": 100, - "examples": 34727 + "examples": 34715 }, "test": { "shards": 100, - "examples": 69347 + "examples": 69321 }, "custom": { "csv": None, # Add a CSV for your own data here. diff --git a/mediapipe/examples/ios/handtrackinggpu/BUILD b/mediapipe/examples/ios/handtrackinggpu/BUILD index f84008fc1..7481db221 100644 --- a/mediapipe/examples/ios/handtrackinggpu/BUILD +++ b/mediapipe/examples/ios/handtrackinggpu/BUILD @@ -12,15 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -licenses(["notice"]) # Apache 2.0 - -MIN_IOS_VERSION = "10.0" - load( "@build_bazel_rules_apple//apple:ios.bzl", "ios_application", ) +licenses(["notice"]) # Apache 2.0 + +MIN_IOS_VERSION = "10.0" + # To use the 3D model instead of the default 2D model, add "--define 3D=true" to the # bazel build command. config_setting( @@ -90,6 +90,7 @@ objc_library( "//mediapipe:ios_x86_64": [], "//conditions:default": [ "//mediapipe/graphs/hand_tracking:mobile_calculators", + "//mediapipe/framework/formats:landmark_cc_proto", ], }), ) diff --git a/mediapipe/examples/ios/handtrackinggpu/ViewController.mm b/mediapipe/examples/ios/handtrackinggpu/ViewController.mm index ca587ed88..a15de9d43 100644 --- a/mediapipe/examples/ios/handtrackinggpu/ViewController.mm +++ b/mediapipe/examples/ios/handtrackinggpu/ViewController.mm @@ -18,10 +18,13 @@ #import "mediapipe/objc/MPPCameraInputSource.h" #import "mediapipe/objc/MPPLayerRenderer.h" +#include "mediapipe/framework/formats/landmark.pb.h" + static NSString* const kGraphName = @"hand_tracking_mobile_gpu"; static const char* kInputStream = "input_video"; static const char* kOutputStream = "output_video"; +static const char* kLandmarksOutputStream = "hand_landmarks"; static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue"; @interface ViewController () @@ -80,6 +83,7 @@ static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue"; // Create MediaPipe graph with mediapipe::CalculatorGraphConfig proto object. MPPGraph* newGraph = [[MPPGraph alloc] initWithGraphConfig:config]; [newGraph addFrameOutputStream:kOutputStream outputPacketType:MPPPacketTypePixelBuffer]; + [newGraph addFrameOutputStream:kLandmarksOutputStream outputPacketType:MPPPacketTypeRaw]; return newGraph; } @@ -160,6 +164,25 @@ static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue"; } } +// Receives a raw packet from the MediaPipe graph. Invoked on a MediaPipe worker thread. +- (void)mediapipeGraph:(MPPGraph*)graph + didOutputPacket:(const ::mediapipe::Packet&)packet + fromStream:(const std::string&)streamName { + if (streamName == kLandmarksOutputStream) { + if (packet.IsEmpty()) { + NSLog(@"[TS:%lld] No hand landmarks", packet.Timestamp().Value()); + return; + } + const auto& landmarks = packet.Get<::mediapipe::NormalizedLandmarkList>(); + NSLog(@"[TS:%lld] Number of landmarks on hand: %d", packet.Timestamp().Value(), + landmarks.landmark_size()); + for (int i = 0; i < landmarks.landmark_size(); ++i) { + NSLog(@"\tLandmark[%d]: (%f, %f, %f)", i, landmarks.landmark(i).x(), + landmarks.landmark(i).y(), landmarks.landmark(i).z()); + } + } +} + #pragma mark - MPPInputSourceDelegate methods // Must be invoked on _videoQueue. diff --git a/mediapipe/examples/ios/multihandtrackinggpu/BUILD b/mediapipe/examples/ios/multihandtrackinggpu/BUILD index edfd5bb54..cda589e2d 100644 --- a/mediapipe/examples/ios/multihandtrackinggpu/BUILD +++ b/mediapipe/examples/ios/multihandtrackinggpu/BUILD @@ -12,15 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -licenses(["notice"]) # Apache 2.0 - -MIN_IOS_VERSION = "10.0" - load( "@build_bazel_rules_apple//apple:ios.bzl", "ios_application", ) +licenses(["notice"]) # Apache 2.0 + +MIN_IOS_VERSION = "10.0" + # To use the 3D model instead of the default 2D model, add "--define 3D=true" to the # bazel build command. config_setting( @@ -90,6 +90,7 @@ objc_library( "//mediapipe:ios_x86_64": [], "//conditions:default": [ "//mediapipe/graphs/hand_tracking:multi_hand_mobile_calculators", + "//mediapipe/framework/formats:landmark_cc_proto", ], }), ) diff --git a/mediapipe/examples/ios/multihandtrackinggpu/ViewController.mm b/mediapipe/examples/ios/multihandtrackinggpu/ViewController.mm index 2d7c5d7a5..66a3c9aff 100644 --- a/mediapipe/examples/ios/multihandtrackinggpu/ViewController.mm +++ b/mediapipe/examples/ios/multihandtrackinggpu/ViewController.mm @@ -18,10 +18,13 @@ #import "mediapipe/objc/MPPCameraInputSource.h" #import "mediapipe/objc/MPPLayerRenderer.h" +#include "mediapipe/framework/formats/landmark.pb.h" + static NSString* const kGraphName = @"multi_hand_tracking_mobile_gpu"; static const char* kInputStream = "input_video"; static const char* kOutputStream = "output_video"; +static const char* kLandmarksOutputStream = "multi_hand_landmarks"; static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue"; @interface ViewController () @@ -80,6 +83,7 @@ static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue"; // Create MediaPipe graph with mediapipe::CalculatorGraphConfig proto object. MPPGraph* newGraph = [[MPPGraph alloc] initWithGraphConfig:config]; [newGraph addFrameOutputStream:kOutputStream outputPacketType:MPPPacketTypePixelBuffer]; + [newGraph addFrameOutputStream:kLandmarksOutputStream outputPacketType:MPPPacketTypeRaw]; return newGraph; } @@ -160,6 +164,29 @@ static const char* kVideoQueueLabel = "com.google.mediapipe.example.videoQueue"; } } +// Receives a raw packet from the MediaPipe graph. Invoked on a MediaPipe worker thread. +- (void)mediapipeGraph:(MPPGraph*)graph + didOutputPacket:(const ::mediapipe::Packet&)packet + fromStream:(const std::string&)streamName { + if (streamName == kLandmarksOutputStream) { + if (packet.IsEmpty()) { + NSLog(@"[TS:%lld] No hand landmarks", packet.Timestamp().Value()); + return; + } + const auto& multi_hand_landmarks = packet.Get>(); + NSLog(@"[TS:%lld] Number of hand instances with landmarks: %lu", packet.Timestamp().Value(), + multi_hand_landmarks.size()); + for (int hand_index = 0; hand_index < multi_hand_landmarks.size(); ++hand_index) { + const auto& landmarks = multi_hand_landmarks[hand_index]; + NSLog(@"\tNumber of landmarks for hand[%d]: %d", hand_index, landmarks.landmark_size()); + for (int i = 0; i < landmarks.landmark_size(); ++i) { + NSLog(@"\t\tLandmark[%d]: (%f, %f, %f)", i, landmarks.landmark(i).x(), + landmarks.landmark(i).y(), landmarks.landmark(i).z()); + } + } + } +} + #pragma mark - MPPInputSourceDelegate methods // Must be invoked on _videoQueue. diff --git a/mediapipe/framework/BUILD b/mediapipe/framework/BUILD index d5586ae9a..d9471d948 100644 --- a/mediapipe/framework/BUILD +++ b/mediapipe/framework/BUILD @@ -1524,7 +1524,9 @@ cc_test( "//mediapipe/framework/stream_handler:fixed_size_input_stream_handler", "//mediapipe/framework/stream_handler:immediate_input_stream_handler", "//mediapipe/framework/stream_handler:mux_input_stream_handler", + "//mediapipe/framework/stream_handler:sync_set_input_stream_handler", "//mediapipe/framework/tool:sink", + "@com_google_absl//absl/strings", ], ) diff --git a/mediapipe/framework/calculator_graph.cc b/mediapipe/framework/calculator_graph.cc index 32a790add..9bd6ac6fa 100644 --- a/mediapipe/framework/calculator_graph.cc +++ b/mediapipe/framework/calculator_graph.cc @@ -1094,6 +1094,19 @@ bool CalculatorGraph::IsNodeThrottled(int node_id) { return max_queue_size_ != -1 && !full_input_streams_[node_id].empty(); } +// Returns true if an input stream serves as a graph-output-stream. +bool IsGraphOutputStream( + InputStreamManager* stream, + const std::vector>& + graph_output_streams) { + for (auto& graph_output_stream : graph_output_streams) { + if (stream == graph_output_stream->input_stream()) { + return true; + } + } + return false; +} + bool CalculatorGraph::UnthrottleSources() { // NOTE: We can be sure that this function will grow input streams enough // to unthrottle at least one source node. The current stream queue sizes @@ -1105,25 +1118,17 @@ bool CalculatorGraph::UnthrottleSources() { { absl::MutexLock lock(&full_input_streams_mutex_); for (absl::flat_hash_set& s : full_input_streams_) { - if (!s.empty()) { - full_streams.insert(s.begin(), s.end()); + for (auto& stream : s) { + // The queue size of a graph output stream shouldn't change. Throttling + // should continue until the caller of the graph output stream consumes + // enough packets. + if (!IsGraphOutputStream(stream, graph_output_streams_)) { + full_streams.insert(stream); + } } } } for (InputStreamManager* stream : full_streams) { - // The queue size of a graph output stream shouldn't change. Throttling - // should continue until the caller of the graph output stream consumes - // enough packets. - bool is_graph_output_stream = false; - for (auto& graph_output_stream : graph_output_streams_) { - if (stream == graph_output_stream->input_stream()) { - is_graph_output_stream = true; - break; - } - } - if (is_graph_output_stream) { - continue; - } if (Config().report_deadlock()) { RecordError(::mediapipe::UnavailableError(absl::StrCat( "Detected a deadlock due to input throttling for: \"", stream->Name(), diff --git a/mediapipe/framework/calculator_graph_bounds_test.cc b/mediapipe/framework/calculator_graph_bounds_test.cc index b6144c0ae..17998a1ff 100644 --- a/mediapipe/framework/calculator_graph_bounds_test.cc +++ b/mediapipe/framework/calculator_graph_bounds_test.cc @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +#include "absl/strings/str_replace.h" #include "mediapipe/framework/calculator_context.h" #include "mediapipe/framework/calculator_framework.h" #include "mediapipe/framework/port/canonical_errors.h" @@ -642,6 +643,36 @@ REGISTER_CALCULATOR(OffsetBoundCalculator); // A Calculator that produces a packet for each call to Process. class BoundToPacketCalculator : public CalculatorBase { + public: + static ::mediapipe::Status GetContract(CalculatorContract* cc) { + for (int i = 0; i < cc->Inputs().NumEntries(); ++i) { + cc->Inputs().Index(i).SetAny(); + } + for (int i = 0; i < cc->Outputs().NumEntries(); ++i) { + cc->Outputs().Index(i).Set(); + } + return ::mediapipe::OkStatus(); + } + + ::mediapipe::Status Open(CalculatorContext* cc) final { + return ::mediapipe::OkStatus(); + } + + ::mediapipe::Status Process(CalculatorContext* cc) final { + for (int i = 0; i < cc->Outputs().NumEntries(); ++i) { + Timestamp t = cc->Inputs().Index(i).Value().Timestamp(); + cc->Outputs().Index(i).AddPacket( + mediapipe::MakePacket(t).At(cc->InputTimestamp())); + } + return ::mediapipe::OkStatus(); + } +}; +REGISTER_CALCULATOR(BoundToPacketCalculator); + +// A Calculator that produces packets at timestamps beyond the input timestamp. +class FuturePacketCalculator : public CalculatorBase { + static constexpr int64 kOutputFutureMicros = 3; + public: static ::mediapipe::Status GetContract(CalculatorContract* cc) { cc->Inputs().Index(0).Set(); @@ -654,11 +685,14 @@ class BoundToPacketCalculator : public CalculatorBase { } ::mediapipe::Status Process(CalculatorContext* cc) final { - cc->Outputs().Index(0).AddPacket(Adopt(new int(33))); + const Packet& packet = cc->Inputs().Index(0).Value(); + Timestamp timestamp = + Timestamp(packet.Timestamp().Value() + kOutputFutureMicros); + cc->Outputs().Index(0).AddPacket(packet.At(timestamp)); return ::mediapipe::OkStatus(); } }; -REGISTER_CALCULATOR(BoundToPacketCalculator); +REGISTER_CALCULATOR(FuturePacketCalculator); // Verifies that SetOffset still propagates when Process is called and // produces no output packets. @@ -964,5 +998,111 @@ TEST(CalculatorGraphBoundsTest, LastPacketCheck) { MP_ASSERT_OK(graph.WaitUntilDone()); } +// Shows that bounds are indicated for input streams without input packets. +void TestBoundsForEmptyInputs(std::string input_stream_handler) { + // FuturePacketCalculator and OffsetBoundCalculator produce future ts bounds. + // BoundToPacketCalculator reports all of its bounds, including empty inputs. + std::string config_str = R"( + input_stream: 'input' + node { + calculator: 'FuturePacketCalculator' + input_stream: 'input' + output_stream: 'futures' + } + node { + calculator: 'OffsetBoundCalculator' + input_stream: 'futures' + output_stream: 'bounds' + } + node { + calculator: 'BoundToPacketCalculator' + input_stream: 'input' + input_stream: 'bounds' + output_stream: 'input_ts' + output_stream: 'bounds_ts' + input_stream_handler { $input_stream_handler } + } + )"; + absl::StrReplaceAll({{"$input_stream_handler", input_stream_handler}}, + &config_str); + CalculatorGraphConfig config = + ::mediapipe::ParseTextProtoOrDie(config_str); + CalculatorGraph graph; + std::vector input_ts_packets; + std::vector bounds_ts_packets; + MP_ASSERT_OK(graph.Initialize(config)); + MP_ASSERT_OK(graph.ObserveOutputStream("input_ts", [&](const Packet& p) { + input_ts_packets.push_back(p); + return ::mediapipe::OkStatus(); + })); + MP_ASSERT_OK(graph.ObserveOutputStream("bounds_ts", [&](const Packet& p) { + bounds_ts_packets.push_back(p); + return ::mediapipe::OkStatus(); + })); + MP_ASSERT_OK(graph.StartRun({})); + MP_ASSERT_OK(graph.WaitUntilIdle()); + + // Add four packets into the graph, with timedtamps 0, 10, 20, 30. + constexpr int kNumInputs = 4; + for (int i = 0; i < kNumInputs; ++i) { + Packet p = MakePacket(33).At(Timestamp(i * 10)); + MP_ASSERT_OK(graph.AddPacketToInputStream("input", p)); + MP_ASSERT_OK(graph.WaitUntilIdle()); + } + + // Packets arrive. The input packet timestamps are: 0, 10, 20, 30. + // The corresponding empty packet timestamps are: 3, 13, 23, 33. + MP_ASSERT_OK(graph.WaitUntilIdle()); + EXPECT_EQ(input_ts_packets.size(), 4); + EXPECT_EQ(bounds_ts_packets.size(), 4); + + // The timestamp bounds from OffsetBoundCalculator are: 3, 13, 23, 33. + // Because the process call waits for the input packets and not for + // the empty packets, the first empty packet timestamp can be + // either Timestamp::Unstarted() or Timestamp(3). + std::vector expected = {Timestamp::Unstarted(), Timestamp(3), + Timestamp(13), Timestamp(23), + Timestamp(33)}; + for (int i = 0; i < bounds_ts_packets.size(); ++i) { + Timestamp ts = bounds_ts_packets[i].Get(); + EXPECT_GE(ts, expected[i]); + EXPECT_LE(ts, expected[i + 1]); + } + + // Shutdown the graph. + MP_ASSERT_OK(graph.CloseAllPacketSources()); + MP_ASSERT_OK(graph.WaitUntilDone()); +} + +// Shows that bounds are indicated for input streams without input packets. +TEST(CalculatorGraphBoundsTest, BoundsForEmptyInputs_Immediate) { + TestBoundsForEmptyInputs(R"( + input_stream_handler: "ImmediateInputStreamHandler")"); +} + +// Shows that bounds are indicated for input streams without input packets. +TEST(CalculatorGraphBoundsTest, BoundsForEmptyInputs_Default) { + TestBoundsForEmptyInputs(R"( + input_stream_handler: "DefaultInputStreamHandler")"); +} + +// Shows that bounds are indicated for input streams without input packets. +TEST(CalculatorGraphBoundsTest, BoundsForEmptyInputs_SyncSet) { + TestBoundsForEmptyInputs(R"( + input_stream_handler: "SyncSetInputStreamHandler")"); +} + +// Shows that bounds are indicated for input streams without input packets. +TEST(CalculatorGraphBoundsTest, BoundsForEmptyInputs_SyncSets) { + TestBoundsForEmptyInputs(R"( + input_stream_handler: "SyncSetInputStreamHandler" + options { + [mediapipe.SyncSetInputStreamHandlerOptions.ext] { + sync_set { tag_index: ":0" } + } + } + )"); +} + } // namespace } // namespace mediapipe diff --git a/mediapipe/framework/formats/BUILD b/mediapipe/framework/formats/BUILD index c7ab46bb3..f831bb3fd 100644 --- a/mediapipe/framework/formats/BUILD +++ b/mediapipe/framework/formats/BUILD @@ -13,10 +13,6 @@ # limitations under the License. # -licenses(["notice"]) # Apache 2.0 - -exports_files(["LICENSE"]) - load("//mediapipe/framework/port:build_config.bzl", "mediapipe_cc_proto_library") package( @@ -24,75 +20,74 @@ package( features = ["-layering_check"], ) +licenses(["notice"]) # Apache 2.0 + +exports_files(["LICENSE"]) + proto_library( name = "detection_proto", srcs = ["detection.proto"], - visibility = ["//mediapipe:__subpackages__"], + visibility = ["//visibility:public"], deps = ["//mediapipe/framework/formats:location_data_proto"], ) proto_library( name = "classification_proto", srcs = ["classification.proto"], - visibility = ["//mediapipe:__subpackages__"], + visibility = ["//visibility:public"], ) proto_library( name = "image_format_proto", srcs = ["image_format.proto"], - visibility = ["//mediapipe:__subpackages__"], + visibility = ["//visibility:public"], ) proto_library( name = "matrix_data_proto", srcs = ["matrix_data.proto"], - visibility = ["//mediapipe:__subpackages__"], + visibility = ["//visibility:public"], ) proto_library( name = "location_data_proto", srcs = ["location_data.proto"], + visibility = ["//visibility:public"], deps = ["//mediapipe/framework/formats/annotation:rasterization_proto"], ) proto_library( name = "time_series_header_proto", srcs = ["time_series_header.proto"], - visibility = ["//mediapipe:__subpackages__"], + visibility = ["//visibility:public"], ) mediapipe_cc_proto_library( name = "detection_cc_proto", srcs = ["detection.proto"], cc_deps = [":location_data_cc_proto"], - visibility = ["//mediapipe:__subpackages__"], + visibility = ["//visibility:public"], deps = [":detection_proto"], ) mediapipe_cc_proto_library( name = "classification_cc_proto", srcs = ["classification.proto"], - visibility = ["//mediapipe:__subpackages__"], + visibility = ["//visibility:public"], deps = [":classification_proto"], ) mediapipe_cc_proto_library( name = "image_format_cc_proto", srcs = ["image_format.proto"], - visibility = [ - "//mediapipe:__subpackages__", - "//mediapipe/java/com/google/mediapipe/framework:__subpackages__", - ], + visibility = ["//visibility:public"], deps = [":image_format_proto"], ) mediapipe_cc_proto_library( name = "matrix_data_cc_proto", srcs = ["matrix_data.proto"], - visibility = [ - "//mediapipe:__subpackages__", - "//mediapipe/java/com/google/mediapipe/framework:__subpackages__", - ], + visibility = ["//visibility:public"], deps = [":matrix_data_proto"], ) @@ -100,17 +95,14 @@ mediapipe_cc_proto_library( name = "location_data_cc_proto", srcs = ["location_data.proto"], cc_deps = ["//mediapipe/framework/formats/annotation:rasterization_cc_proto"], - visibility = ["//mediapipe:__subpackages__"], + visibility = ["//visibility:public"], deps = [":location_data_proto"], ) mediapipe_cc_proto_library( name = "time_series_header_cc_proto", srcs = ["time_series_header.proto"], - visibility = [ - "//mediapipe:__subpackages__", - "//mediapipe/java/com/google/mediapipe/framework:__subpackages__", - ], + visibility = ["//visibility:public"], deps = [":time_series_header_proto"], ) @@ -249,14 +241,14 @@ cc_test( proto_library( name = "rect_proto", srcs = ["rect.proto"], - visibility = ["//mediapipe:__subpackages__"], + visibility = ["//visibility:public"], ) mediapipe_cc_proto_library( name = "rect_cc_proto", srcs = ["rect.proto"], visibility = [ - "//mediapipe:__subpackages__", + "//visibility:public", ], deps = [":rect_proto"], ) @@ -264,12 +256,26 @@ mediapipe_cc_proto_library( proto_library( name = "landmark_proto", srcs = ["landmark.proto"], - visibility = ["//mediapipe:__subpackages__"], + visibility = ["//visibility:public"], ) mediapipe_cc_proto_library( name = "landmark_cc_proto", srcs = ["landmark.proto"], + visibility = ["//visibility:public"], + deps = [":landmark_proto"], +) + +java_lite_proto_library( + name = "landmark_java_proto_lite", + strict_deps = 0, visibility = ["//mediapipe:__subpackages__"], deps = [":landmark_proto"], ) + +# Expose the proto source files for building mediapipe AAR. +filegroup( + name = "protos_src", + srcs = glob(["*.proto"]), + visibility = ["//mediapipe:__subpackages__"], +) diff --git a/mediapipe/framework/formats/annotation/BUILD b/mediapipe/framework/formats/annotation/BUILD index 939e4d95a..5ea495abe 100644 --- a/mediapipe/framework/formats/annotation/BUILD +++ b/mediapipe/framework/formats/annotation/BUILD @@ -25,26 +25,27 @@ package(default_visibility = ["//visibility:private"]) proto_library( name = "locus_proto", srcs = ["locus.proto"], + visibility = ["//visibility:public"], deps = ["//mediapipe/framework/formats/annotation:rasterization_proto"], ) proto_library( name = "rasterization_proto", srcs = ["rasterization.proto"], - visibility = ["//mediapipe:__subpackages__"], + visibility = ["//visibility:public"], ) mediapipe_cc_proto_library( name = "locus_cc_proto", srcs = ["locus.proto"], cc_deps = [":rasterization_cc_proto"], - visibility = ["//mediapipe:__subpackages__"], + visibility = ["//visibility:public"], deps = [":locus_proto"], ) mediapipe_cc_proto_library( name = "rasterization_cc_proto", srcs = ["rasterization.proto"], - visibility = ["//mediapipe:__subpackages__"], + visibility = ["//visibility:public"], deps = [":rasterization_proto"], ) diff --git a/mediapipe/framework/formats/annotation/rasterization.proto b/mediapipe/framework/formats/annotation/rasterization.proto index 728c593c0..9aad7e88f 100644 --- a/mediapipe/framework/formats/annotation/rasterization.proto +++ b/mediapipe/framework/formats/annotation/rasterization.proto @@ -27,7 +27,7 @@ message Rasterization { required int32 right_x = 3; } - // Intervals are always sorted by y-corrdinate. + // Intervals are always sorted by y-coordinate. // Therefore, a region occupies a set of scanlines ranging // from interval(0).y() to interval(interval_size() - 1)).y(). // Note: In video, at some scanlines no interval might be present. diff --git a/mediapipe/framework/formats/landmark.proto b/mediapipe/framework/formats/landmark.proto index 220b3725d..708a34000 100644 --- a/mediapipe/framework/formats/landmark.proto +++ b/mediapipe/framework/formats/landmark.proto @@ -16,6 +16,9 @@ syntax = "proto2"; package mediapipe; +option java_package = "com.google.mediapipe.formats.proto"; +option java_outer_classname = "LandmarkProto"; + // A landmark that can have 1 to 3 dimensions. Use x for 1D points, (x, y) for // 2D points and (x, y, z) for 3D points. For more dimensions, consider using // matrix_data.proto. @@ -25,6 +28,11 @@ message Landmark { optional float z = 3; } +// Group of Landmark protos. +message LandmarkList { + repeated Landmark landmark = 1; +} + // A normalized version of above Landmark proto. All coordiates should be within // [0, 1]. message NormalizedLandmark { diff --git a/mediapipe/framework/formats/motion/BUILD b/mediapipe/framework/formats/motion/BUILD index c065eae02..a77034c50 100644 --- a/mediapipe/framework/formats/motion/BUILD +++ b/mediapipe/framework/formats/motion/BUILD @@ -27,12 +27,13 @@ package(default_visibility = ["//visibility:private"]) proto_library( name = "optical_flow_field_data_proto", srcs = ["optical_flow_field_data.proto"], + visibility = ["//visibility:public"], ) mediapipe_cc_proto_library( name = "optical_flow_field_data_cc_proto", srcs = ["optical_flow_field_data.proto"], - visibility = ["//mediapipe:__subpackages__"], + visibility = ["//visibility:public"], deps = [":optical_flow_field_data_proto"], ) @@ -41,7 +42,7 @@ cc_library( srcs = ["optical_flow_field.cc"], hdrs = ["optical_flow_field.h"], visibility = [ - "//mediapipe:__subpackages__", + "//visibility:public", ], deps = [ "//mediapipe/framework:type_map", diff --git a/mediapipe/framework/formats/object_detection/BUILD b/mediapipe/framework/formats/object_detection/BUILD index 060281b66..c073afa07 100644 --- a/mediapipe/framework/formats/object_detection/BUILD +++ b/mediapipe/framework/formats/object_detection/BUILD @@ -24,12 +24,12 @@ package(default_visibility = ["//visibility:private"]) proto_library( name = "anchor_proto", srcs = ["anchor.proto"], - visibility = ["//mediapipe:__subpackages__"], + visibility = ["//visibility:public"], ) mediapipe_cc_proto_library( name = "anchor_cc_proto", srcs = ["anchor.proto"], - visibility = ["//mediapipe:__subpackages__"], + visibility = ["//visibility:public"], deps = [":anchor_proto"], ) diff --git a/mediapipe/framework/input_stream_manager.cc b/mediapipe/framework/input_stream_manager.cc index edbe6a689..70d67a557 100644 --- a/mediapipe/framework/input_stream_manager.cc +++ b/mediapipe/framework/input_stream_manager.cc @@ -229,6 +229,11 @@ Timestamp InputStreamManager::MinTimestampOrBound(bool* is_empty) const { if (is_empty) { *is_empty = queue_.empty(); } + return MinTimestampOrBoundHelper(); +} + +Timestamp InputStreamManager::MinTimestampOrBoundHelper() const + EXCLUSIVE_LOCKS_REQUIRED(stream_mutex_) { return queue_.empty() ? next_timestamp_bound_ : queue_.front().Timestamp(); } @@ -271,7 +276,9 @@ Packet InputStreamManager::PopPacketAtTimestamp(Timestamp timestamp, } // Clear value_ if it doesn't have exactly the right timestamp. if (current_timestamp != timestamp) { - packet = Packet(); + // The timestamp bound reported when no packet is sent. + Timestamp bound = MinTimestampOrBoundHelper(); + packet = Packet().At(bound.PreviousAllowedInStream()); ++(*num_packets_dropped); } diff --git a/mediapipe/framework/input_stream_manager.h b/mediapipe/framework/input_stream_manager.h index c0a37403b..48dfcf369 100644 --- a/mediapipe/framework/input_stream_manager.h +++ b/mediapipe/framework/input_stream_manager.h @@ -189,6 +189,9 @@ class InputStreamManager { // Returns true if the next timestamp bound reaches Timestamp::Done(). bool IsDone() const EXCLUSIVE_LOCKS_REQUIRED(stream_mutex_); + // Returns the smallest timestamp at which this stream might see an input. + Timestamp MinTimestampOrBoundHelper() const; + mutable absl::Mutex stream_mutex_; std::deque queue_ GUARDED_BY(stream_mutex_); // The number of packets added to queue_. Used to verify a packet at diff --git a/mediapipe/framework/packet.cc b/mediapipe/framework/packet.cc index 274e5ad23..f19f8ca6f 100644 --- a/mediapipe/framework/packet.cc +++ b/mediapipe/framework/packet.cc @@ -107,6 +107,14 @@ const proto_ns::MessageLite& Packet::GetProtoMessageLite() const { return *proto; } +StatusOr> +Packet::GetVectorOfProtoMessageLitePtrs() { + if (holder_ == nullptr) { + return ::mediapipe::InternalError("Packet is empty."); + } + return holder_->GetVectorOfProtoMessageLite(); +} + MEDIAPIPE_REGISTER_TYPE(::mediapipe::Packet, "::mediapipe::Packet", nullptr, nullptr); MEDIAPIPE_REGISTER_TYPE(::std::vector<::mediapipe::Packet>, diff --git a/mediapipe/framework/packet.h b/mediapipe/framework/packet.h index 11cfb5cc0..9e1946eaa 100644 --- a/mediapipe/framework/packet.h +++ b/mediapipe/framework/packet.h @@ -163,6 +163,13 @@ class Packet { // object type is protocol buffer, crashes otherwise. const proto_ns::MessageLite& GetProtoMessageLite() const; + // Returns a vector of pointers to MessageLite data, if the underlying + // object type is a vector of MessageLite data, returns an error otherwise. + // Note: This function is meant to be used internally within the MediaPipe + // framework only. + StatusOr> + GetVectorOfProtoMessageLitePtrs(); + // Returns an error if the packet does not contain data of type T. template ::mediapipe::Status ValidateAsType() const; @@ -347,6 +354,12 @@ class HolderBase { // underlying object is protocol buffer type, otherwise, nullptr is returned. virtual const proto_ns::MessageLite* GetProtoMessageLite() = 0; + // Returns a vector for the data in the holder, if the + // underlying object is a vector of protocol buffer objects, otherwise, + // returns an error. + virtual StatusOr> + GetVectorOfProtoMessageLite() = 0; + private: size_t type_id_; }; @@ -364,6 +377,37 @@ const proto_ns::MessageLite* ConvertToProtoMessageLite(const T* data, return data; } +// Helper structs for determining if a type is an std::vector. +template +struct is_proto_vector : public std::false_type {}; + +template +struct is_proto_vector> + : public std::is_base_of::type {}; + +// Helper function to create and return a vector of pointers to proto message +// elements of the vector passed into the function. +template +StatusOr> +ConvertToVectorOfProtoMessageLitePtrs(const T* data, + /*is_proto_vector=*/std::false_type) { + return ::mediapipe::InvalidArgumentError(absl::StrCat( + "The Packet stores \"", typeid(T).name(), "\"", + "which is not convertible to vector.")); +} + +template +StatusOr> +ConvertToVectorOfProtoMessageLitePtrs(const T* data, + /*is_proto_vector=*/std::true_type) { + std::vector result; + for (auto it = data->begin(); it != data->end(); ++it) { + const proto_ns::MessageLite* element = &(*it); + result.push_back(element); + } + return result; +} + template class Holder : public HolderBase { public: @@ -421,6 +465,14 @@ class Holder : public HolderBase { ptr_, std::is_base_of()); } + // Returns a vector for the data in the holder, if the + // underlying object is a vector of protocol buffer objects, otherwise, + // returns an error. + StatusOr> + GetVectorOfProtoMessageLite() override { + return ConvertToVectorOfProtoMessageLitePtrs(ptr_, is_proto_vector()); + } + private: // Call delete[] if T is an array, delete otherwise. template diff --git a/mediapipe/framework/port.h b/mediapipe/framework/port.h index 275f8ca98..a1aeae893 100644 --- a/mediapipe/framework/port.h +++ b/mediapipe/framework/port.h @@ -37,12 +37,16 @@ #if !defined(MEDIAPIPE_IOS) && !TARGET_OS_OSX #define MEDIAPIPE_IOS #endif +#if !defined(MEDIAPIPE_OSX) && TARGET_OS_OSX +#define MEDIAPIPE_OSX +#endif #endif // These platforms do not support OpenGL ES Compute Shaders (v3.1 and up), -// but can still run OpenGL ES 3.0 and below. -#if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) && \ - (defined(__APPLE__) || defined(__EMSCRIPTEN__)) +// but may or may not still be able to run other OpenGL code. +#if !defined(MEDIAPIPE_DISABLE_GL_COMPUTE) && \ + (defined(__APPLE__) || defined(__EMSCRIPTEN__) || \ + defined(MEDIAPIPE_DISABLE_GPU)) #define MEDIAPIPE_DISABLE_GL_COMPUTE #endif diff --git a/mediapipe/framework/port/opencv_core_inc.h b/mediapipe/framework/port/opencv_core_inc.h index 653d8c677..a358ad90b 100644 --- a/mediapipe/framework/port/opencv_core_inc.h +++ b/mediapipe/framework/port/opencv_core_inc.h @@ -20,6 +20,8 @@ #ifdef CV_VERSION_EPOCH // for OpenCV 2.x #include #else +#include + #include #endif diff --git a/mediapipe/framework/scheduler.cc b/mediapipe/framework/scheduler.cc index ce76f6530..c8263ec0d 100644 --- a/mediapipe/framework/scheduler.cc +++ b/mediapipe/framework/scheduler.cc @@ -158,9 +158,11 @@ void Scheduler::HandleIdle() { if (!active_sources_.empty() || throttled_graph_input_stream_count_ > 0) { VLOG(2) << "HandleIdle: unthrottling"; state_mutex_.Unlock(); - graph_->UnthrottleSources(); + bool did_unthrottle = graph_->UnthrottleSources(); state_mutex_.Lock(); - continue; + if (did_unthrottle) { + continue; + } } // Nothing left to do. diff --git a/mediapipe/framework/stream_handler/immediate_input_stream_handler.cc b/mediapipe/framework/stream_handler/immediate_input_stream_handler.cc index 8dd5c53d2..97b0ad782 100644 --- a/mediapipe/framework/stream_handler/immediate_input_stream_handler.cc +++ b/mediapipe/framework/stream_handler/immediate_input_stream_handler.cc @@ -115,9 +115,10 @@ void ImmediateInputStreamHandler::FillInputSet(Timestamp input_timestamp, AddPacketToShard(&input_set->Get(id), std::move(current_packet), stream_is_done); } else { - bool empty = false; - bool is_done = stream->MinTimestampOrBound(&empty) == Timestamp::Done(); - AddPacketToShard(&input_set->Get(id), Packet(), is_done); + Timestamp bound = stream->MinTimestampOrBound(nullptr); + AddPacketToShard(&input_set->Get(id), + Packet().At(bound.PreviousAllowedInStream()), + bound == Timestamp::Done()); } } } diff --git a/mediapipe/framework/stream_handler/sync_set_input_stream_handler.cc b/mediapipe/framework/stream_handler/sync_set_input_stream_handler.cc index 26bfe5aef..4175a7673 100644 --- a/mediapipe/framework/stream_handler/sync_set_input_stream_handler.cc +++ b/mediapipe/framework/stream_handler/sync_set_input_stream_handler.cc @@ -56,9 +56,16 @@ class SyncSetInputStreamHandler : public InputStreamHandler { NodeReadiness GetNodeReadiness(Timestamp* min_stream_timestamp) override; // Only invoked when associated GetNodeReadiness() returned kReadyForProcess. + // Populates packets for the ready sync-set, and populates timestamp bounds + // for all sync-sets. void FillInputSet(Timestamp input_timestamp, InputStreamShardSet* input_set) override; + // Populates timestamp bounds for streams outside the ready sync-set. + void FillInputBounds(Timestamp input_timestamp, + InputStreamShardSet* input_set) + EXCLUSIVE_LOCKS_REQUIRED(mutex_); + private: absl::Mutex mutex_; // The ids of each set of inputs. @@ -181,6 +188,22 @@ NodeReadiness SyncSetInputStreamHandler::GetNodeReadiness( return NodeReadiness::kNotReady; } +void SyncSetInputStreamHandler::FillInputBounds( + Timestamp input_timestamp, InputStreamShardSet* input_set) { + for (int i = 0; i < sync_sets_.size(); ++i) { + if (i != ready_sync_set_index_) { + // Set the input streams for the not-ready sync sets. + for (CollectionItemId id : sync_sets_[i]) { + const auto stream = input_stream_managers_.Get(id); + Timestamp bound = stream->MinTimestampOrBound(nullptr); + AddPacketToShard(&input_set->Get(id), + Packet().At(bound.PreviousAllowedInStream()), + bound == Timestamp::Done()); + } + } + } +} + void SyncSetInputStreamHandler::FillInputSet(Timestamp input_timestamp, InputStreamShardSet* input_set) { // Assume that all current packets are already cleared. @@ -202,6 +225,7 @@ void SyncSetInputStreamHandler::FillInputSet(Timestamp input_timestamp, AddPacketToShard(&input_set->Get(id), std::move(current_packet), stream_is_done); } + FillInputBounds(input_timestamp, input_set); ready_sync_set_index_ = -1; ready_timestamp_ = Timestamp::Done(); } diff --git a/mediapipe/framework/timestamp.cc b/mediapipe/framework/timestamp.cc index bacbc3f4b..99d48b14b 100644 --- a/mediapipe/framework/timestamp.cc +++ b/mediapipe/framework/timestamp.cc @@ -123,13 +123,23 @@ std::string TimestampDiff::DebugString() const { Timestamp Timestamp::NextAllowedInStream() const { CHECK(IsAllowedInStream()) << "Timestamp is: " << DebugString(); - if (IsRangeValue() && *this != Max()) { - return *this + 1; - } else { - // Returning this value effectively means no futher timestamps may - // occur (however, the stream is not yet closed). + if (*this >= Max() || *this == PreStream()) { + // Indicates that no further timestamps may occur. return OneOverPostStream(); + } else if (*this < Min()) { + return Min(); } + return *this + 1; +} + +Timestamp Timestamp::PreviousAllowedInStream() const { + if (*this <= Min() || *this == PostStream()) { + // Indicates that no previous timestamps may occur. + return Unstarted(); + } else if (*this > Max()) { + return Max(); + } + return *this - 1; } std::ostream& operator<<(std::ostream& os, Timestamp arg) { diff --git a/mediapipe/framework/timestamp.h b/mediapipe/framework/timestamp.h index d6525c5c8..a79c4fd7b 100644 --- a/mediapipe/framework/timestamp.h +++ b/mediapipe/framework/timestamp.h @@ -182,12 +182,15 @@ class Timestamp { Timestamp operator++(int); Timestamp operator--(int); - // Returns the next timestamp at which a Packet may arrive in a stream, given - // that the current Packet is at *this timestamp. CHECKs that - // this->IsAllowedInStream()==true. Returns Timestamp::OneOverPostStream() if - // no Packets may follow one with the given timestamp. + // Returns the next timestamp in the range [Min .. Max], or + // OneOverPostStream() if no Packets may follow one with this timestamp. + // CHECKs that this->IsAllowedInStream(). Timestamp NextAllowedInStream() const; + // Returns the previous timestamp in the range [Min .. Max], or + // Unstarted() if no Packets may preceed one with this timestamp. + Timestamp PreviousAllowedInStream() const; + private: TimestampBaseType timestamp_; }; diff --git a/mediapipe/framework/tool/BUILD b/mediapipe/framework/tool/BUILD index 61ec92e1e..a75cda32c 100644 --- a/mediapipe/framework/tool/BUILD +++ b/mediapipe/framework/tool/BUILD @@ -13,10 +13,6 @@ # limitations under the License. # -licenses(["notice"]) # Apache 2.0 - -package(default_visibility = ["//visibility:private"]) - load( "//mediapipe/framework/port:build_config.bzl", "mediapipe_cc_proto_library", @@ -27,6 +23,10 @@ load( "mediapipe_binary_graph", ) +licenses(["notice"]) # Apache 2.0 + +package(default_visibility = ["//visibility:private"]) + exports_files([ "simple_subgraph_template.cc", ]) diff --git a/mediapipe/gpu/metal.bzl b/mediapipe/gpu/metal.bzl index 2b45c6614..0f19ec2c5 100644 --- a/mediapipe/gpu/metal.bzl +++ b/mediapipe/gpu/metal.bzl @@ -132,7 +132,7 @@ def _metal_library_impl(ctx): ), ) - # This ridiculous circumlocution is needed because new_objc_provider rejects + # This circumlocution is needed because new_objc_provider rejects # an empty depset, with the error: # "Value for key header must be a set of File, instead found set of unknown." # It also rejects an explicit "None". diff --git a/mediapipe/graphs/hand_tracking/multi_hand_tracking_desktop.pbtxt b/mediapipe/graphs/hand_tracking/multi_hand_tracking_desktop.pbtxt index 1a554629c..aa4b4ae4d 100644 --- a/mediapipe/graphs/hand_tracking/multi_hand_tracking_desktop.pbtxt +++ b/mediapipe/graphs/hand_tracking/multi_hand_tracking_desktop.pbtxt @@ -94,7 +94,7 @@ node { output_stream: "multi_hand_rects" node_options: { [type.googleapis.com/mediapipe.AssociationCalculatorOptions] { - min_similarity_threshold: 0.1 + min_similarity_threshold: 0.5 } } } diff --git a/mediapipe/graphs/hand_tracking/multi_hand_tracking_desktop_live.pbtxt b/mediapipe/graphs/hand_tracking/multi_hand_tracking_desktop_live.pbtxt index 6e4a0331f..f009f49c3 100644 --- a/mediapipe/graphs/hand_tracking/multi_hand_tracking_desktop_live.pbtxt +++ b/mediapipe/graphs/hand_tracking/multi_hand_tracking_desktop_live.pbtxt @@ -85,7 +85,7 @@ node { output_stream: "multi_hand_rects" node_options: { [type.googleapis.com/mediapipe.AssociationCalculatorOptions] { - min_similarity_threshold: 0.1 + min_similarity_threshold: 0.5 } } } diff --git a/mediapipe/graphs/hand_tracking/multi_hand_tracking_mobile.pbtxt b/mediapipe/graphs/hand_tracking/multi_hand_tracking_mobile.pbtxt index c47bc3d8a..87f651177 100644 --- a/mediapipe/graphs/hand_tracking/multi_hand_tracking_mobile.pbtxt +++ b/mediapipe/graphs/hand_tracking/multi_hand_tracking_mobile.pbtxt @@ -105,7 +105,7 @@ node { output_stream: "multi_hand_rects" node_options: { [type.googleapis.com/mediapipe.AssociationCalculatorOptions] { - min_similarity_threshold: 0.1 + min_similarity_threshold: 0.5 } } } diff --git a/mediapipe/graphs/hand_tracking/subgraphs/hand_landmark_cpu.pbtxt b/mediapipe/graphs/hand_tracking/subgraphs/hand_landmark_cpu.pbtxt index ad52a5716..8865ea22c 100644 --- a/mediapipe/graphs/hand_tracking/subgraphs/hand_landmark_cpu.pbtxt +++ b/mediapipe/graphs/hand_tracking/subgraphs/hand_landmark_cpu.pbtxt @@ -101,7 +101,7 @@ node { } } -# Decodes the landmark tensors into a vector of lanmarks, where the landmark +# Decodes the landmark tensors into a list of landmarks, where the landmark # coordinates are normalized by the size of the input image to the model. node { calculator: "TfLiteTensorsToLandmarksCalculator" diff --git a/mediapipe/graphs/hand_tracking/subgraphs/hand_landmark_gpu.pbtxt b/mediapipe/graphs/hand_tracking/subgraphs/hand_landmark_gpu.pbtxt index 283ce459c..229463454 100644 --- a/mediapipe/graphs/hand_tracking/subgraphs/hand_landmark_gpu.pbtxt +++ b/mediapipe/graphs/hand_tracking/subgraphs/hand_landmark_gpu.pbtxt @@ -96,7 +96,7 @@ node { } } -# Decodes the landmark tensors into a vector of lanmarks, where the landmark +# Decodes the landmark tensors into a list of landmarks, where the landmark # coordinates are normalized by the size of the input image to the model. node { calculator: "TfLiteTensorsToLandmarksCalculator" diff --git a/mediapipe/graphs/hand_tracking/subgraphs/multi_hand_detection_cpu.pbtxt b/mediapipe/graphs/hand_tracking/subgraphs/multi_hand_detection_cpu.pbtxt index aa0557318..928e75213 100644 --- a/mediapipe/graphs/hand_tracking/subgraphs/multi_hand_detection_cpu.pbtxt +++ b/mediapipe/graphs/hand_tracking/subgraphs/multi_hand_detection_cpu.pbtxt @@ -104,7 +104,7 @@ node { y_scale: 256.0 h_scale: 256.0 w_scale: 256.0 - min_score_thresh: 0.5 + min_score_thresh: 0.7 } } } @@ -117,7 +117,6 @@ node { node_options: { [type.googleapis.com/mediapipe.NonMaxSuppressionCalculatorOptions] { min_suppression_threshold: 0.3 - min_score_threshold: 0.5 overlap_type: INTERSECTION_OVER_UNION algorithm: WEIGHTED return_empty_detections: true diff --git a/mediapipe/graphs/hand_tracking/subgraphs/multi_hand_landmark.pbtxt b/mediapipe/graphs/hand_tracking/subgraphs/multi_hand_landmark.pbtxt index a380966ca..08b283a80 100644 --- a/mediapipe/graphs/hand_tracking/subgraphs/multi_hand_landmark.pbtxt +++ b/mediapipe/graphs/hand_tracking/subgraphs/multi_hand_landmark.pbtxt @@ -47,7 +47,7 @@ node { # BATCH_END timestamp, outputs the vector of landmarks at the BATCH_END # timestamp. node { - calculator: "EndLoopNormalizedLandmarksVectorCalculator" + calculator: "EndLoopNormalizedLandmarkListVectorCalculator" input_stream: "ITEM:single_hand_landmarks" input_stream: "BATCH_END:single_hand_rect_timestamp" output_stream: "ITERABLE:multi_hand_landmarks" @@ -67,7 +67,7 @@ node { # hand. If the hand presence for hand #i is false, the set of landmarks # corresponding to that hand are dropped from the vector. node { - calculator: "FilterLandmarksCollectionCalculator" + calculator: "FilterLandmarkListCollectionCalculator" input_stream: "ITERABLE:multi_hand_landmarks" input_stream: "CONDITION:multi_hand_presence" output_stream: "ITERABLE:filtered_multi_hand_landmarks" diff --git a/mediapipe/graphs/hand_tracking/subgraphs/multi_hand_renderer_cpu.pbtxt b/mediapipe/graphs/hand_tracking/subgraphs/multi_hand_renderer_cpu.pbtxt index 2dcd6b478..8406712e9 100644 --- a/mediapipe/graphs/hand_tracking/subgraphs/multi_hand_renderer_cpu.pbtxt +++ b/mediapipe/graphs/hand_tracking/subgraphs/multi_hand_renderer_cpu.pbtxt @@ -59,7 +59,7 @@ node { # timestamp for downstream calculators to inform them that all elements in the # vector have been processed. node { - calculator: "BeginLoopNormalizedLandmarksVectorCalculator" + calculator: "BeginLoopNormalizedLandmarkListVectorCalculator" input_stream: "ITERABLE:multi_hand_landmarks" output_stream: "ITEM:single_hand_landmarks" output_stream: "BATCH_END:landmark_timestamp" diff --git a/mediapipe/graphs/hand_tracking/subgraphs/multi_hand_renderer_gpu.pbtxt b/mediapipe/graphs/hand_tracking/subgraphs/multi_hand_renderer_gpu.pbtxt index 3ea9275dc..d7e300c02 100644 --- a/mediapipe/graphs/hand_tracking/subgraphs/multi_hand_renderer_gpu.pbtxt +++ b/mediapipe/graphs/hand_tracking/subgraphs/multi_hand_renderer_gpu.pbtxt @@ -59,7 +59,7 @@ node { # timestamp for downstream calculators to inform them that all elements in the # vector have been processed. node { - calculator: "BeginLoopNormalizedLandmarksVectorCalculator" + calculator: "BeginLoopNormalizedLandmarkListVectorCalculator" input_stream: "ITERABLE:multi_hand_landmarks" output_stream: "ITEM:single_hand_landmarks" output_stream: "BATCH_END:landmark_timestamp" diff --git a/mediapipe/java/com/google/mediapipe/components/FrameProcessor.java b/mediapipe/java/com/google/mediapipe/components/FrameProcessor.java index c63f0495a..8c901606e 100644 --- a/mediapipe/java/com/google/mediapipe/components/FrameProcessor.java +++ b/mediapipe/java/com/google/mediapipe/components/FrameProcessor.java @@ -148,6 +148,11 @@ public class FrameProcessor implements TextureFrameProcessor { hybridPath = true; } + /** Adds a callback to the graph to process packets from the specified output stream. */ + public void addPacketCallback(String outputStream, PacketCallback callback) { + mediapipeGraph.addPacketCallback(outputStream, callback); + } + public void addConsumer(TextureFrameConsumer listener) { synchronized (this) { List newConsumers = new ArrayList<>(consumers); diff --git a/mediapipe/java/com/google/mediapipe/framework/PacketGetter.java b/mediapipe/java/com/google/mediapipe/framework/PacketGetter.java index a1a05b175..d87bc8945 100644 --- a/mediapipe/java/com/google/mediapipe/framework/PacketGetter.java +++ b/mediapipe/java/com/google/mediapipe/framework/PacketGetter.java @@ -14,7 +14,10 @@ package com.google.mediapipe.framework; +import com.google.common.base.Preconditions; import com.google.common.flogger.FluentLogger; +import com.google.protobuf.InvalidProtocolBufferException; +import com.google.protobuf.Parser; import java.nio.ByteBuffer; import java.util.ArrayList; import java.util.List; @@ -134,6 +137,22 @@ public final class PacketGetter { return nativeGetFloat64Vector(packet.getNativeHandle()); } + public static List getProtoVector(final Packet packet, Parser messageParser) { + byte[][] protoVector = nativeGetProtoVector(packet.getNativeHandle()); + Preconditions.checkNotNull( + protoVector, "Vector of protocol buffer objects should not be null!"); + try { + List parsedMessageList = new ArrayList<>(); + for (byte[] message : protoVector) { + T parsedMessage = messageParser.parseFrom(message); + parsedMessageList.add(parsedMessage); + } + return parsedMessageList; + } catch (InvalidProtocolBufferException e) { + throw new IllegalArgumentException(e); + } + } + public static int getImageWidth(final Packet packet) { return nativeGetImageWidth(packet.getNativeHandle()); } @@ -277,6 +296,9 @@ public final class PacketGetter { private static native long[] nativeGetInt64Vector(long nativePacketHandle); private static native float[] nativeGetFloat32Vector(long nativePacketHandle); private static native double[] nativeGetFloat64Vector(long nativePacketHandle); + + private static native byte[][] nativeGetProtoVector(long nativePacketHandle); + private static native int nativeGetImageWidth(long nativePacketHandle); private static native int nativeGetImageHeight(long nativePacketHandle); private static native boolean nativeGetImageData(long nativePacketHandle, ByteBuffer buffer); diff --git a/mediapipe/java/com/google/mediapipe/framework/jni/BUILD b/mediapipe/java/com/google/mediapipe/framework/jni/BUILD index 182226cbb..0e6e71815 100644 --- a/mediapipe/java/com/google/mediapipe/framework/jni/BUILD +++ b/mediapipe/java/com/google/mediapipe/framework/jni/BUILD @@ -134,6 +134,7 @@ cc_library( deps = [ "@com_google_absl//absl/synchronization", "//mediapipe/framework/port:logging", + "//mediapipe/framework/port:status", ] + select({ "//conditions:default": [ ], diff --git a/mediapipe/java/com/google/mediapipe/framework/jni/graph_jni.cc b/mediapipe/java/com/google/mediapipe/framework/jni/graph_jni.cc index d968ff5d0..e53b97235 100644 --- a/mediapipe/java/com/google/mediapipe/framework/jni/graph_jni.cc +++ b/mediapipe/java/com/google/mediapipe/framework/jni/graph_jni.cc @@ -69,25 +69,11 @@ mediapipe::Status AddStreamHeadersIntoGraph( return mediapipe::OkStatus(); } -// Creates a java MediaPipeException object for a mediapipe::Status. -jthrowable CreateMediaPipeException(JNIEnv* env, mediapipe::Status status) { - jclass status_cls = - env->FindClass("com/google/mediapipe/framework/MediaPipeException"); - jmethodID status_ctr = env->GetMethodID(status_cls, "", "(I[B)V"); - int length = status.message().length(); - jbyteArray message_bytes = env->NewByteArray(length); - env->SetByteArrayRegion(message_bytes, 0, length, - reinterpret_cast(const_cast( - std::string(status.message()).c_str()))); - return reinterpret_cast( - env->NewObject(status_cls, status_ctr, status.code(), message_bytes)); -} - // Throws a MediaPipeException for any non-ok mediapipe::Status. // Note that the exception is thrown after execution returns to Java. bool ThrowIfError(JNIEnv* env, mediapipe::Status status) { if (!status.ok()) { - env->Throw(CreateMediaPipeException(env, status)); + env->Throw(mediapipe::android::CreateMediaPipeException(env, status)); return true; } return false; diff --git a/mediapipe/java/com/google/mediapipe/framework/jni/jni_util.cc b/mediapipe/java/com/google/mediapipe/framework/jni/jni_util.cc index d383a7b0b..cb9453b6f 100644 --- a/mediapipe/java/com/google/mediapipe/framework/jni/jni_util.cc +++ b/mediapipe/java/com/google/mediapipe/framework/jni/jni_util.cc @@ -110,6 +110,19 @@ std::string JStringToStdString(JNIEnv* env, jstring jstr) { return str; } +jthrowable CreateMediaPipeException(JNIEnv* env, mediapipe::Status status) { + jclass status_cls = + env->FindClass("com/google/mediapipe/framework/MediaPipeException"); + jmethodID status_ctr = env->GetMethodID(status_cls, "", "(I[B)V"); + int length = status.message().length(); + jbyteArray message_bytes = env->NewByteArray(length); + env->SetByteArrayRegion(message_bytes, 0, length, + reinterpret_cast(const_cast( + std::string(status.message()).c_str()))); + return reinterpret_cast( + env->NewObject(status_cls, status_ctr, status.code(), message_bytes)); +} + } // namespace android namespace java { diff --git a/mediapipe/java/com/google/mediapipe/framework/jni/jni_util.h b/mediapipe/java/com/google/mediapipe/framework/jni/jni_util.h index 81a44919d..9efa28304 100644 --- a/mediapipe/java/com/google/mediapipe/framework/jni/jni_util.h +++ b/mediapipe/java/com/google/mediapipe/framework/jni/jni_util.h @@ -19,12 +19,17 @@ #include +#include "mediapipe/framework/port/status.h" + namespace mediapipe { namespace android { std::string JStringToStdString(JNIEnv* env, jstring jstr); +// Creates a java MediaPipeException object for a mediapipe::Status. +jthrowable CreateMediaPipeException(JNIEnv* env, mediapipe::Status status); + } // namespace android namespace java { diff --git a/mediapipe/java/com/google/mediapipe/framework/jni/packet_getter_jni.cc b/mediapipe/java/com/google/mediapipe/framework/jni/packet_getter_jni.cc index 9940d186e..1cab1aca7 100644 --- a/mediapipe/java/com/google/mediapipe/framework/jni/packet_getter_jni.cc +++ b/mediapipe/java/com/google/mediapipe/framework/jni/packet_getter_jni.cc @@ -19,8 +19,10 @@ #include "mediapipe/framework/formats/time_series_header.pb.h" #include "mediapipe/framework/formats/video_stream_header.h" #include "mediapipe/framework/port/core_proto_inc.h" +#include "mediapipe/framework/port/proto_ns.h" #include "mediapipe/java/com/google/mediapipe/framework/jni/colorspace.h" #include "mediapipe/java/com/google/mediapipe/framework/jni/graph.h" +#include "mediapipe/java/com/google/mediapipe/framework/jni/jni_util.h" #ifndef MEDIAPIPE_DISABLE_GPU #include "mediapipe/gpu/gl_calculator_helper.h" #endif // !defined(MEDIAPIPE_DISABLE_GPU) @@ -141,6 +143,37 @@ JNIEXPORT jbyteArray JNICALL PACKET_GETTER_METHOD(nativeGetProtoBytes)( return data; } +JNIEXPORT jobjectArray JNICALL PACKET_GETTER_METHOD(nativeGetProtoVector)( + JNIEnv* env, jobject thiz, jlong packet) { + mediapipe::Packet mediapipe_packet = + mediapipe::android::Graph::GetPacketFromHandle(packet); + auto get_proto_vector = mediapipe_packet.GetVectorOfProtoMessageLitePtrs(); + if (!get_proto_vector.ok()) { + env->Throw(mediapipe::android::CreateMediaPipeException( + env, get_proto_vector.status())); + } + const std::vector& proto_vector = + get_proto_vector.ValueOrDie(); + jobjectArray proto_array = + env->NewObjectArray(proto_vector.size(), env->FindClass("[B"), nullptr); + for (int i = 0; i < proto_vector.size(); ++i) { + const ::mediapipe::proto_ns::MessageLite* proto_message = proto_vector[i]; + + // Convert the proto object into a Java byte array. + std::string serialized; + proto_message->SerializeToString(&serialized); + jbyteArray byte_array = env->NewByteArray(serialized.size()); + env->SetByteArrayRegion(byte_array, 0, serialized.size(), + reinterpret_cast(serialized.c_str())); + + // Add the serialized proto byte_array to the output array. + env->SetObjectArrayElement(proto_array, i, byte_array); + env->DeleteLocalRef(byte_array); + } + + return proto_array; +} + JNIEXPORT jshortArray JNICALL PACKET_GETTER_METHOD(nativeGetInt16Vector)( JNIEnv* env, jobject thiz, jlong packet) { const std::vector& values = diff --git a/mediapipe/java/com/google/mediapipe/framework/jni/packet_getter_jni.h b/mediapipe/java/com/google/mediapipe/framework/jni/packet_getter_jni.h index cb35bac66..72c55935d 100644 --- a/mediapipe/java/com/google/mediapipe/framework/jni/packet_getter_jni.h +++ b/mediapipe/java/com/google/mediapipe/framework/jni/packet_getter_jni.h @@ -69,6 +69,9 @@ JNIEXPORT jbyteArray JNICALL PACKET_GETTER_METHOD(nativeGetBytes)(JNIEnv* env, JNIEXPORT jbyteArray JNICALL PACKET_GETTER_METHOD(nativeGetProtoBytes)( JNIEnv* env, jobject thiz, jlong packet); +JNIEXPORT jobjectArray JNICALL PACKET_GETTER_METHOD(nativeGetProtoVector)( + JNIEnv* env, jobject thiz, jlong packet); + JNIEXPORT jshortArray JNICALL PACKET_GETTER_METHOD(nativeGetInt16Vector)( JNIEnv* env, jobject thiz, jlong packet); diff --git a/mediapipe/java/com/google/mediapipe/mediapipe_aar.bzl b/mediapipe/java/com/google/mediapipe/mediapipe_aar.bzl index eaf4612cf..0c0d2aac4 100644 --- a/mediapipe/java/com/google/mediapipe/mediapipe_aar.bzl +++ b/mediapipe/java/com/google/mediapipe/mediapipe_aar.bzl @@ -64,23 +64,18 @@ cat > $(OUTS) < $(OUTS) < $(OUTS) <_dummy_app target below) diff --git a/mediapipe/util/audio_decoder.cc b/mediapipe/util/audio_decoder.cc index 4f284f106..b101e1c3e 100644 --- a/mediapipe/util/audio_decoder.cc +++ b/mediapipe/util/audio_decoder.cc @@ -340,9 +340,7 @@ AudioPacketProcessor::AudioPacketProcessor(const AudioStreamOptions& options) DCHECK(absl::little_endian::IsLittleEndian()); } -mediapipe::Status AudioPacketProcessor::Open(int id, - - AVStream* stream) { +mediapipe::Status AudioPacketProcessor::Open(int id, AVStream* stream) { id_ = id; avcodec_ = avcodec_find_decoder(stream->codecpar->codec_id); if (!avcodec_) { diff --git a/mediapipe/util/sequence/media_sequence.cc b/mediapipe/util/sequence/media_sequence.cc index d50055da6..9f00be3cd 100644 --- a/mediapipe/util/sequence/media_sequence.cc +++ b/mediapipe/util/sequence/media_sequence.cc @@ -274,10 +274,11 @@ float TimestampsToRate(int64 first_timestamp, int64 second_timestamp) { // overwriting with modified values. if (!GetUnmodifiedBBoxTimestampSize(prefix, *sequence)) { for (int i = 0; i < num_frames; ++i) { - if (bbox_index_if_annotated[i] >= 0 && - GetBBoxIsAnnotatedAt(prefix, *sequence, i)) { - AddUnmodifiedBBoxTimestamp( - prefix, box_timestamps[bbox_index_if_annotated[i]], sequence); + const int bbox_index = bbox_index_if_annotated[i]; + if (bbox_index >= 0 && + GetBBoxIsAnnotatedAt(prefix, *sequence, bbox_index)) { + AddUnmodifiedBBoxTimestamp(prefix, box_timestamps[bbox_index], + sequence); } } } diff --git a/mediapipe/util/sequence/media_sequence.py b/mediapipe/util/sequence/media_sequence.py index fc1f15d32..7a443afe8 100644 --- a/mediapipe/util/sequence/media_sequence.py +++ b/mediapipe/util/sequence/media_sequence.py @@ -284,7 +284,7 @@ REGION_TRACK_CONFIDENCE_KEY = "region/track/confidence" # have overlapping track ids. REGION_CLASS_INDEX_KEY = "region/class/index" REGION_CLASS_STRING_KEY = "region/class/string" -REGION_CLASS_CONFIDENCE_KEY = "region/class/confidencee" +REGION_CLASS_CONFIDENCE_KEY = "region/class/confidence" # The timestamp of the region annotation in microseconds. REGION_TIMESTAMP_KEY = "region/timestamp" # The original timestamp in microseconds for region annotations. diff --git a/mediapipe/util/sequence/media_sequence_test.cc b/mediapipe/util/sequence/media_sequence_test.cc index 26bfb6fc0..93b3267bd 100644 --- a/mediapipe/util/sequence/media_sequence_test.cc +++ b/mediapipe/util/sequence/media_sequence_test.cc @@ -738,10 +738,11 @@ TEST(MediaSequenceTest, AddImageTimestamp(10, &sequence); AddImageTimestamp(20, &sequence); AddImageTimestamp(30, &sequence); + AddImageTimestamp(40, &sequence); - AddBBoxTimestamp(9, &sequence); - AddBBoxTimestamp(21, &sequence); - AddBBoxTimestamp(22, &sequence); // Will be dropped in the output. + AddBBoxTimestamp(11, &sequence); + AddBBoxTimestamp(12, &sequence); // Will be dropped in the output. + AddBBoxTimestamp(39, &sequence); std::vector> bboxes = { {Location::CreateRelativeBBoxLocation(0.1, 0.2, 0.7, 0.7)}, @@ -753,32 +754,35 @@ TEST(MediaSequenceTest, MP_ASSERT_OK(ReconcileMetadata(true, false, &sequence)); - ASSERT_EQ(GetBBoxTimestampSize(sequence), 3); + ASSERT_EQ(GetBBoxTimestampSize(sequence), 4); ASSERT_EQ(GetBBoxTimestampAt(sequence, 0), 10); ASSERT_EQ(GetBBoxTimestampAt(sequence, 1), 20); ASSERT_EQ(GetBBoxTimestampAt(sequence, 2), 30); + ASSERT_EQ(GetBBoxTimestampAt(sequence, 3), 40); - ASSERT_EQ(GetBBoxIsAnnotatedSize(sequence), 3); + ASSERT_EQ(GetBBoxIsAnnotatedSize(sequence), 4); ASSERT_EQ(GetBBoxIsAnnotatedAt(sequence, 0), true); - ASSERT_EQ(GetBBoxIsAnnotatedAt(sequence, 1), true); + ASSERT_EQ(GetBBoxIsAnnotatedAt(sequence, 1), false); ASSERT_EQ(GetBBoxIsAnnotatedAt(sequence, 2), false); + ASSERT_EQ(GetBBoxIsAnnotatedAt(sequence, 3), true); // Unmodified timestamp is only stored for is_annotated == true. ASSERT_EQ(GetUnmodifiedBBoxTimestampSize(sequence), 2); - ASSERT_EQ(GetUnmodifiedBBoxTimestampAt(sequence, 0), 9); - ASSERT_EQ(GetUnmodifiedBBoxTimestampAt(sequence, 1), 21); + ASSERT_EQ(GetUnmodifiedBBoxTimestampAt(sequence, 0), 11); + ASSERT_EQ(GetUnmodifiedBBoxTimestampAt(sequence, 1), 39); // A second reconciliation should not corrupt unmodified bbox timestamps. MP_ASSERT_OK(ReconcileMetadata(true, false, &sequence)); - ASSERT_EQ(GetBBoxTimestampSize(sequence), 3); + ASSERT_EQ(GetBBoxTimestampSize(sequence), 4); ASSERT_EQ(GetBBoxTimestampAt(sequence, 0), 10); ASSERT_EQ(GetBBoxTimestampAt(sequence, 1), 20); ASSERT_EQ(GetBBoxTimestampAt(sequence, 2), 30); + ASSERT_EQ(GetBBoxTimestampAt(sequence, 3), 40); ASSERT_EQ(GetUnmodifiedBBoxTimestampSize(sequence), 2); - ASSERT_EQ(GetUnmodifiedBBoxTimestampAt(sequence, 0), 9); - ASSERT_EQ(GetUnmodifiedBBoxTimestampAt(sequence, 1), 21); + ASSERT_EQ(GetUnmodifiedBBoxTimestampAt(sequence, 0), 11); + ASSERT_EQ(GetUnmodifiedBBoxTimestampAt(sequence, 1), 39); } TEST(MediaSequenceTest, ReconcileMetadataBoxAnnotationsFillsMissing) { diff --git a/third_party/BUILD b/third_party/BUILD index 65fda9d22..4c74553b1 100644 --- a/third_party/BUILD +++ b/third_party/BUILD @@ -118,13 +118,16 @@ android_library( ], ) -# TODO: Get the AARs from Google's Maven Repository. -aar_import( +android_library( name = "camerax_core", - aar = "camera-core-1.0.0-alpha01.aar", + exports = [ + "@maven//:androidx_camera_camera_core", + ], ) -aar_import( +android_library( name = "camera2", - aar = "camera-camera2-1.0.0-alpha01.aar", + exports = [ + "@maven//:androidx_camera_camera_camera2", + ], ) diff --git a/third_party/camera-camera2-1.0.0-alpha01.aar b/third_party/camera-camera2-1.0.0-alpha01.aar deleted file mode 100644 index ee89bb96d..000000000 Binary files a/third_party/camera-camera2-1.0.0-alpha01.aar and /dev/null differ diff --git a/third_party/camera-core-1.0.0-alpha01.aar b/third_party/camera-core-1.0.0-alpha01.aar deleted file mode 100644 index 593c3bad2..000000000 Binary files a/third_party/camera-core-1.0.0-alpha01.aar and /dev/null differ diff --git a/third_party/com_github_glog_glog_9779e5ea6ef59562b030248947f787d1256132ae.diff b/third_party/com_github_glog_glog_9779e5ea6ef59562b030248947f787d1256132ae.diff index 776e6d671..89e80a9c3 100644 --- a/third_party/com_github_glog_glog_9779e5ea6ef59562b030248947f787d1256132ae.diff +++ b/third_party/com_github_glog_glog_9779e5ea6ef59562b030248947f787d1256132ae.diff @@ -1,9 +1,3 @@ -commit 9779e5ea6ef59562b030248947f787d1256132ae -Author: jqtang -Date: Wed Sep 18 11:43:48 2019 -0700 - - Add glog Android support for MediaPipe. - diff --git a/src/logging.cc b/src/logging.cc index 0b5e6ee..be5a506 100644 --- a/src/logging.cc diff --git a/third_party/com_google_absl_f863b622fe13612433fdf43f76547d5edda0c93001.diff b/third_party/com_google_absl_f863b622fe13612433fdf43f76547d5edda0c93001.diff new file mode 100644 index 000000000..0cd2dffa4 --- /dev/null +++ b/third_party/com_google_absl_f863b622fe13612433fdf43f76547d5edda0c93001.diff @@ -0,0 +1,14 @@ +diff --git a/absl/time/internal/cctz/BUILD.bazel b/absl/time/internal/cctz/BUILD.bazel +index 9fceffe..e7f9d01 100644 +--- a/absl/time/internal/cctz/BUILD.bazel ++++ b/absl/time/internal/cctz/BUILD.bazel +@@ -69,8 +69,5 @@ cc_library( + "include/cctz/zone_info_source.h", + ], + linkopts = select({ +- ":osx": [ +- "-framework Foundation", +- ], + ":ios": [ + "-framework Foundation", + ], \ No newline at end of file diff --git a/third_party/opencv_android.BUILD b/third_party/opencv_android.BUILD index e0e6bd607..3bdfc88d3 100644 --- a/third_party/opencv_android.BUILD +++ b/third_party/opencv_android.BUILD @@ -5,7 +5,7 @@ licenses(["notice"]) # BSD license exports_files(["LICENSE"]) -OPENCV_LIBRARY_NAME = "libopencv_java4.so" +OPENCV_LIBRARY_NAME = "libopencv_java3.so" OPENCVANDROIDSDK_NATIVELIBS_PATH = "sdk/native/libs/"