my setup for medipipe
This commit is contained in:
parent
4c68eb4a70
commit
928ab96ee1
36
WORKSPACE
36
WORKSPACE
|
@ -2,6 +2,42 @@ workspace(name = "mediapipe")
|
|||
|
||||
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive")
|
||||
|
||||
# System libs files
|
||||
new_local_repository(
|
||||
name = "system_libs",
|
||||
# pkg-config --variable=libdir x11
|
||||
path = "/usr/lib/x86_64-linux-gnu",
|
||||
build_file_content = """
|
||||
cc_library(
|
||||
name = "libncurses",
|
||||
srcs = ["libncurses.so"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
cc_library(
|
||||
name = "libmosquittopp-dev",
|
||||
srcs = ["libmosquittopp.so"],
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
)
|
||||
|
||||
|
||||
# System header files
|
||||
new_local_repository(
|
||||
name = "system_headers",
|
||||
# pkg-config --variable=libdir x11
|
||||
path = "/usr/include",
|
||||
build_file_content = """
|
||||
cc_library(
|
||||
name = "headers",
|
||||
hdrs = glob([
|
||||
"*.h", "*.hpp",
|
||||
]),
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
)
|
||||
|
||||
skylib_version = "0.9.0"
|
||||
http_archive(
|
||||
name = "bazel_skylib",
|
||||
|
|
3
bldDynamicGestures.sh
Executable file
3
bldDynamicGestures.sh
Executable file
|
@ -0,0 +1,3 @@
|
|||
bazel build -c opt --define MEDIAPIPE_DISABLE_GPU=1 \
|
||||
--define 3D=true myMediapipe/projects/dynamicGestures:dynamic_gestures_cpu_tflite
|
||||
|
3
bldHandsLMtoFile.sh
Executable file
3
bldHandsLMtoFile.sh
Executable file
|
@ -0,0 +1,3 @@
|
|||
bazel build -c opt --define MEDIAPIPE_DISABLE_GPU=1 \
|
||||
--define 3D=true myMediapipe/projects/staticGestures/staticGesturesCaptureToFile:static_gestures_cpu_tflite
|
||||
|
3
bldStaticGestures.sh
Executable file
3
bldStaticGestures.sh
Executable file
|
@ -0,0 +1,3 @@
|
|||
bazel build -c opt --define MEDIAPIPE_DISABLE_GPU=1 \
|
||||
--define 3D=true myMediapipe/projects/staticGestures/staticGestures:static_gestures_cpu_tflite
|
||||
|
3
buildHandLandmarks.sh
Executable file
3
buildHandLandmarks.sh
Executable file
|
@ -0,0 +1,3 @@
|
|||
bazel build -c opt --define MEDIAPIPE_DISABLE_GPU=1 \
|
||||
--define 3D=true mediapipe/examples/desktop/hand_landmark:hand_landmark_cpu__tflite
|
||||
|
3
build_simple_io.sh
Executable file
3
build_simple_io.sh
Executable file
|
@ -0,0 +1,3 @@
|
|||
bazel build -c opt --define MEDIAPIPE_DISABLE_GPU=1 \
|
||||
mediapipe/examples/desktop/simpleIO:simple_io_tflite
|
||||
|
|
@ -12,8 +12,6 @@
|
|||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "mediapipe/framework/calculator_framework.h"
|
||||
#include "mediapipe/framework/formats/image_format.pb.h"
|
||||
#include "mediapipe/framework/formats/image_frame.h"
|
||||
|
@ -68,22 +66,6 @@ ImageFormat::Format GetImageFormat(int num_channels) {
|
|||
// output_stream: "VIDEO:video_frames"
|
||||
// output_stream: "VIDEO_PRESTREAM:video_header"
|
||||
// }
|
||||
//
|
||||
// OpenCV's VideoCapture doesn't decode audio tracks. If the audio tracks need
|
||||
// to be saved, specify an output side packet with tag "SAVED_AUDIO_PATH".
|
||||
// The calculator will call FFmpeg binary to save audio tracks as an aac file.
|
||||
// If the audio tracks can't be extracted by FFmpeg, the output side packet
|
||||
// will contain an empty std::string.
|
||||
//
|
||||
// Example config:
|
||||
// node {
|
||||
// calculator: "OpenCvVideoDecoderCalculator"
|
||||
// input_side_packet: "INPUT_FILE_PATH:input_file_path"
|
||||
// output_side_packet: "SAVED_AUDIO_PATH:audio_path"
|
||||
// output_stream: "VIDEO:video_frames"
|
||||
// output_stream: "VIDEO_PRESTREAM:video_header"
|
||||
// }
|
||||
//
|
||||
class OpenCvVideoDecoderCalculator : public CalculatorBase {
|
||||
public:
|
||||
static ::mediapipe::Status GetContract(CalculatorContract* cc) {
|
||||
|
@ -92,9 +74,6 @@ class OpenCvVideoDecoderCalculator : public CalculatorBase {
|
|||
if (cc->Outputs().HasTag("VIDEO_PRESTREAM")) {
|
||||
cc->Outputs().Tag("VIDEO_PRESTREAM").Set<VideoHeader>();
|
||||
}
|
||||
if (cc->OutputSidePackets().HasTag("SAVED_AUDIO_PATH")) {
|
||||
cc->OutputSidePackets().Tag("SAVED_AUDIO_PATH").Set<std::string>();
|
||||
}
|
||||
return ::mediapipe::OkStatus();
|
||||
}
|
||||
|
||||
|
@ -106,15 +85,17 @@ class OpenCvVideoDecoderCalculator : public CalculatorBase {
|
|||
return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC)
|
||||
<< "Fail to open video file at " << input_file_path;
|
||||
}
|
||||
width_ = static_cast<int>(cap_->get(cv::CAP_PROP_FRAME_WIDTH));
|
||||
height_ = static_cast<int>(cap_->get(cv::CAP_PROP_FRAME_HEIGHT));
|
||||
double fps = static_cast<double>(cap_->get(cv::CAP_PROP_FPS));
|
||||
frame_count_ = static_cast<int>(cap_->get(cv::CAP_PROP_FRAME_COUNT));
|
||||
|
||||
// Unfortunately, cap_->get(cv::CAP_PROP_FORMAT) always returns CV_8UC1
|
||||
// back. To get correct image format, we read the first frame from the video
|
||||
// and get the number of channels.
|
||||
cv::Mat frame;
|
||||
cap_->read(frame);
|
||||
width_ = static_cast<int>(cap_->get(cv::CAP_PROP_FRAME_WIDTH));
|
||||
height_ = static_cast<int>(cap_->get(cv::CAP_PROP_FRAME_HEIGHT));
|
||||
double fps = static_cast<double>(cap_->get(cv::CAP_PROP_FPS));
|
||||
frame_count_ = static_cast<int>(cap_->get(cv::CAP_PROP_FRAME_COUNT));
|
||||
|
||||
if (frame.empty()) {
|
||||
return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC)
|
||||
<< "Fail to read any frames from the video file at "
|
||||
|
@ -127,11 +108,16 @@ class OpenCvVideoDecoderCalculator : public CalculatorBase {
|
|||
<< input_file_path;
|
||||
}
|
||||
|
||||
if (fps <= 0 || frame_count_ <= 0 || width_ <= 0 || height_ <= 0) {
|
||||
if (fps <= 0 || frame_count_ == 0 || width_ <= 0 || height_ <= 0) {
|
||||
return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC)
|
||||
<< "Fail to make video header due to the incorrect metadata from "
|
||||
"the video file at "
|
||||
<< input_file_path;
|
||||
<< input_file_path
|
||||
<< ", format:" << format_
|
||||
<< ", fps:" << fps
|
||||
<< ", frame count:" << frame_count_
|
||||
<< ", width:" << width_
|
||||
<< ", height:" << height_;
|
||||
}
|
||||
auto header = absl::make_unique<VideoHeader>();
|
||||
header->format = format_;
|
||||
|
@ -144,39 +130,9 @@ class OpenCvVideoDecoderCalculator : public CalculatorBase {
|
|||
cc->Outputs()
|
||||
.Tag("VIDEO_PRESTREAM")
|
||||
.Add(header.release(), Timestamp::PreStream());
|
||||
cc->Outputs().Tag("VIDEO_PRESTREAM").Close();
|
||||
}
|
||||
// Rewind to the very first frame.
|
||||
cap_->set(cv::CAP_PROP_POS_AVI_RATIO, 0);
|
||||
|
||||
if (cc->OutputSidePackets().HasTag("SAVED_AUDIO_PATH")) {
|
||||
#ifdef HAVE_FFMPEG
|
||||
std::string saved_audio_path = std::tmpnam(nullptr);
|
||||
std::string ffmpeg_command =
|
||||
absl::StrCat("ffmpeg -nostats -loglevel 0 -i ", input_file_path,
|
||||
" -vn -f adts ", saved_audio_path);
|
||||
system(ffmpeg_command.c_str());
|
||||
int status_code = system(absl::StrCat("ls ", saved_audio_path).c_str());
|
||||
if (status_code == 0) {
|
||||
cc->OutputSidePackets()
|
||||
.Tag("SAVED_AUDIO_PATH")
|
||||
.Set(MakePacket<std::string>(saved_audio_path));
|
||||
} else {
|
||||
LOG(WARNING) << "FFmpeg can't extract audio from " << input_file_path
|
||||
<< " by executing the following command: "
|
||||
<< ffmpeg_command;
|
||||
cc->OutputSidePackets()
|
||||
.Tag("SAVED_AUDIO_PATH")
|
||||
.Set(MakePacket<std::string>(std::string()));
|
||||
}
|
||||
#else
|
||||
return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC)
|
||||
<< "OpenCVVideoDecoderCalculator can't save the audio file "
|
||||
"because FFmpeg is not installed. Please remove "
|
||||
"output_side_packet: \"SAVED_AUDIO_PATH\" from the node "
|
||||
"config.";
|
||||
#endif
|
||||
}
|
||||
return ::mediapipe::OkStatus();
|
||||
}
|
||||
|
||||
|
@ -205,14 +161,8 @@ class OpenCvVideoDecoderCalculator : public CalculatorBase {
|
|||
cv::COLOR_BGRA2RGBA);
|
||||
}
|
||||
}
|
||||
// If the timestamp of the current frame is not greater than the one of the
|
||||
// previous frame, the new frame will be discarded.
|
||||
if (prev_timestamp_ < timestamp) {
|
||||
cc->Outputs().Tag("VIDEO").Add(image_frame.release(), timestamp);
|
||||
prev_timestamp_ = timestamp;
|
||||
decoded_frames_++;
|
||||
}
|
||||
|
||||
cc->Outputs().Tag("VIDEO").Add(image_frame.release(), timestamp);
|
||||
decoded_frames_++;
|
||||
return ::mediapipe::OkStatus();
|
||||
}
|
||||
|
||||
|
@ -235,7 +185,6 @@ class OpenCvVideoDecoderCalculator : public CalculatorBase {
|
|||
int frame_count_;
|
||||
int decoded_frames_ = 0;
|
||||
ImageFormat::Format format_;
|
||||
Timestamp prev_timestamp_ = Timestamp::Unset();
|
||||
};
|
||||
|
||||
REGISTER_CALCULATOR(OpenCvVideoDecoderCalculator);
|
||||
|
|
|
@ -0,0 +1,250 @@
|
|||
// Copyright 2019 The MediaPipe Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#include <stdlib.h>
|
||||
|
||||
#include "mediapipe/framework/calculator_framework.h"
|
||||
#include "mediapipe/framework/formats/image_format.pb.h"
|
||||
#include "mediapipe/framework/formats/image_frame.h"
|
||||
#include "mediapipe/framework/formats/image_frame_opencv.h"
|
||||
#include "mediapipe/framework/formats/video_stream_header.h"
|
||||
#include "mediapipe/framework/port/opencv_imgproc_inc.h"
|
||||
#include "mediapipe/framework/port/opencv_video_inc.h"
|
||||
#include "mediapipe/framework/port/status.h"
|
||||
#include "mediapipe/framework/tool/status_util.h"
|
||||
|
||||
namespace mediapipe {
|
||||
|
||||
namespace {
|
||||
// cv::VideoCapture set data type to unsigned char by default. Therefore, the
|
||||
// image format is only related to the number of channles the cv::Mat has.
|
||||
ImageFormat::Format GetImageFormat(int num_channels) {
|
||||
ImageFormat::Format format;
|
||||
switch (num_channels) {
|
||||
case 1:
|
||||
format = ImageFormat::GRAY8;
|
||||
break;
|
||||
case 3:
|
||||
format = ImageFormat::SRGB;
|
||||
break;
|
||||
case 4:
|
||||
format = ImageFormat::SRGBA;
|
||||
break;
|
||||
default:
|
||||
format = ImageFormat::UNKNOWN;
|
||||
break;
|
||||
}
|
||||
return format;
|
||||
}
|
||||
} // namespace
|
||||
|
||||
// This Calculator takes no input streams and produces video packets.
|
||||
// All streams and input side packets are specified using tags and all of them
|
||||
// are optional.
|
||||
//
|
||||
// Output Streams:
|
||||
// VIDEO: Output video frames (ImageFrame).
|
||||
// VIDEO_PRESTREAM:
|
||||
// Optional video header information output at
|
||||
// Timestamp::PreStream() for the corresponding stream.
|
||||
// Input Side Packets:
|
||||
// INPUT_FILE_PATH: The input file path.
|
||||
//
|
||||
// Example config:
|
||||
// node {
|
||||
// calculator: "OpenCvVideoDecoderCalculator"
|
||||
// input_side_packet: "INPUT_FILE_PATH:input_file_path"
|
||||
// output_stream: "VIDEO:video_frames"
|
||||
// output_stream: "VIDEO_PRESTREAM:video_header"
|
||||
// }
|
||||
//
|
||||
// OpenCV's VideoCapture doesn't decode audio tracks. If the audio tracks need
|
||||
// to be saved, specify an output side packet with tag "SAVED_AUDIO_PATH".
|
||||
// The calculator will call FFmpeg binary to save audio tracks as an aac file.
|
||||
// If the audio tracks can't be extracted by FFmpeg, the output side packet
|
||||
// will contain an empty std::string.
|
||||
//
|
||||
// Example config:
|
||||
// node {
|
||||
// calculator: "OpenCvVideoDecoderCalculator"
|
||||
// input_side_packet: "INPUT_FILE_PATH:input_file_path"
|
||||
// output_side_packet: "SAVED_AUDIO_PATH:audio_path"
|
||||
// output_stream: "VIDEO:video_frames"
|
||||
// output_stream: "VIDEO_PRESTREAM:video_header"
|
||||
// }
|
||||
//
|
||||
class OpenCvVideoDecoderCalculator : public CalculatorBase {
|
||||
public:
|
||||
static ::mediapipe::Status GetContract(CalculatorContract* cc) {
|
||||
cc->InputSidePackets().Tag("INPUT_FILE_PATH").Set<std::string>();
|
||||
cc->Outputs().Tag("VIDEO").Set<ImageFrame>();
|
||||
if (cc->Outputs().HasTag("VIDEO_PRESTREAM")) {
|
||||
cc->Outputs().Tag("VIDEO_PRESTREAM").Set<VideoHeader>();
|
||||
}
|
||||
if (cc->OutputSidePackets().HasTag("SAVED_AUDIO_PATH")) {
|
||||
cc->OutputSidePackets().Tag("SAVED_AUDIO_PATH").Set<std::string>();
|
||||
}
|
||||
return ::mediapipe::OkStatus();
|
||||
}
|
||||
|
||||
::mediapipe::Status Open(CalculatorContext* cc) override {
|
||||
const std::string& input_file_path =
|
||||
cc->InputSidePackets().Tag("INPUT_FILE_PATH").Get<std::string>();
|
||||
cap_ = absl::make_unique<cv::VideoCapture>(input_file_path);
|
||||
if (!cap_->isOpened()) {
|
||||
return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC)
|
||||
<< "Fail to open video file at " << input_file_path;
|
||||
}
|
||||
|
||||
// Unfortunately, cap_->get(cv::CAP_PROP_FORMAT) always returns CV_8UC1
|
||||
// back. To get correct image format, we read the first frame from the video
|
||||
// and get the number of channels.
|
||||
cv::Mat frame;
|
||||
cap_->read(frame);
|
||||
width_ = static_cast<int>(cap_->get(cv::CAP_PROP_FRAME_WIDTH));
|
||||
height_ = static_cast<int>(cap_->get(cv::CAP_PROP_FRAME_HEIGHT));
|
||||
double fps = static_cast<double>(cap_->get(cv::CAP_PROP_FPS));
|
||||
frame_count_ = static_cast<int>(cap_->get(cv::CAP_PROP_FRAME_COUNT));
|
||||
|
||||
|
||||
if (frame.empty()) {
|
||||
return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC)
|
||||
<< "Fail to read any frames from the video file at "
|
||||
<< input_file_path;
|
||||
}
|
||||
format_ = GetImageFormat(frame.channels());
|
||||
if (format_ == ImageFormat::UNKNOWN) {
|
||||
return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC)
|
||||
<< "Unsupported video format of the video file at "
|
||||
<< input_file_path;
|
||||
}
|
||||
|
||||
if (fps <= 0 || frame_count_ == 0 || width_ <= 0 || height_ <= 0) {
|
||||
return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC)
|
||||
<< "Fail to make video header due to the incorrect metadata from "
|
||||
"the video file at "
|
||||
<< input_file_path
|
||||
<< ", format:" << format_
|
||||
<< ", fps:" << fps
|
||||
<< ", frame count:" << frame_count_
|
||||
<< ", width:" << width_
|
||||
<< ", height:" << height_;
|
||||
}
|
||||
auto header = absl::make_unique<VideoHeader>();
|
||||
header->format = format_;
|
||||
header->width = width_;
|
||||
header->height = height_;
|
||||
header->frame_rate = fps;
|
||||
header->duration = frame_count_ / fps;
|
||||
|
||||
if (cc->Outputs().HasTag("VIDEO_PRESTREAM")) {
|
||||
cc->Outputs()
|
||||
.Tag("VIDEO_PRESTREAM")
|
||||
.Add(header.release(), Timestamp::PreStream());
|
||||
//cc->Outputs().Tag("VIDEO_PRESTREAM").Close();
|
||||
}
|
||||
// Rewind to the very first frame.
|
||||
cap_->set(cv::CAP_PROP_POS_AVI_RATIO, 0);
|
||||
|
||||
if (cc->OutputSidePackets().HasTag("SAVED_AUDIO_PATH")) {
|
||||
#ifdef HAVE_FFMPEG
|
||||
std::string saved_audio_path = std::tmpnam(nullptr);
|
||||
std::string ffmpeg_command =
|
||||
absl::StrCat("ffmpeg -nostats -loglevel 0 -i ", input_file_path,
|
||||
" -vn -f adts ", saved_audio_path);
|
||||
system(ffmpeg_command.c_str());
|
||||
int status_code = system(absl::StrCat("ls ", saved_audio_path).c_str());
|
||||
if (status_code == 0) {
|
||||
cc->OutputSidePackets()
|
||||
.Tag("SAVED_AUDIO_PATH")
|
||||
.Set(MakePacket<std::string>(saved_audio_path));
|
||||
} else {
|
||||
LOG(WARNING) << "FFmpeg can't extract audio from " << input_file_path
|
||||
<< " by executing the following command: "
|
||||
<< ffmpeg_command;
|
||||
cc->OutputSidePackets()
|
||||
.Tag("SAVED_AUDIO_PATH")
|
||||
.Set(MakePacket<std::string>(std::string()));
|
||||
}
|
||||
#else
|
||||
return ::mediapipe::InvalidArgumentErrorBuilder(MEDIAPIPE_LOC)
|
||||
<< "OpenCVVideoDecoderCalculator can't save the audio file "
|
||||
"because FFmpeg is not installed. Please remove "
|
||||
"output_side_packet: \"SAVED_AUDIO_PATH\" from the node "
|
||||
"config.";
|
||||
#endif
|
||||
}
|
||||
return ::mediapipe::OkStatus();
|
||||
}
|
||||
|
||||
::mediapipe::Status Process(CalculatorContext* cc) override {
|
||||
auto image_frame = absl::make_unique<ImageFrame>(format_, width_, height_,
|
||||
/*alignment_boundary=*/1);
|
||||
// Use microsecond as the unit of time.
|
||||
Timestamp timestamp(cap_->get(cv::CAP_PROP_POS_MSEC) * 1000);
|
||||
if (format_ == ImageFormat::GRAY8) {
|
||||
cv::Mat frame = formats::MatView(image_frame.get());
|
||||
cap_->read(frame);
|
||||
if (frame.empty()) {
|
||||
return tool::StatusStop();
|
||||
}
|
||||
} else {
|
||||
cv::Mat tmp_frame;
|
||||
cap_->read(tmp_frame);
|
||||
if (tmp_frame.empty()) {
|
||||
return tool::StatusStop();
|
||||
}
|
||||
if (format_ == ImageFormat::SRGB) {
|
||||
cv::cvtColor(tmp_frame, formats::MatView(image_frame.get()),
|
||||
cv::COLOR_BGR2RGB);
|
||||
} else if (format_ == ImageFormat::SRGBA) {
|
||||
cv::cvtColor(tmp_frame, formats::MatView(image_frame.get()),
|
||||
cv::COLOR_BGRA2RGBA);
|
||||
}
|
||||
}
|
||||
// If the timestamp of the current frame is not greater than the one of the
|
||||
// previous frame, the new frame will be discarded.
|
||||
if (prev_timestamp_ < timestamp) {
|
||||
cc->Outputs().Tag("VIDEO").Add(image_frame.release(), timestamp);
|
||||
prev_timestamp_ = timestamp;
|
||||
decoded_frames_++;
|
||||
}
|
||||
|
||||
return ::mediapipe::OkStatus();
|
||||
}
|
||||
|
||||
::mediapipe::Status Close(CalculatorContext* cc) override {
|
||||
if (cap_ && cap_->isOpened()) {
|
||||
cap_->release();
|
||||
}
|
||||
if (decoded_frames_ != frame_count_) {
|
||||
LOG(WARNING) << "Not all the frames are decoded (total frames: "
|
||||
<< frame_count_ << " vs decoded frames: " << decoded_frames_
|
||||
<< ").";
|
||||
}
|
||||
return ::mediapipe::OkStatus();
|
||||
}
|
||||
|
||||
private:
|
||||
std::unique_ptr<cv::VideoCapture> cap_;
|
||||
int width_;
|
||||
int height_;
|
||||
int frame_count_;
|
||||
int decoded_frames_ = 0;
|
||||
ImageFormat::Format format_;
|
||||
Timestamp prev_timestamp_ = Timestamp::Unset();
|
||||
};
|
||||
|
||||
REGISTER_CALCULATOR(OpenCvVideoDecoderCalculator);
|
||||
} // namespace mediapipe
|
27
mediapipe/examples/desktop/simpleIO/BUILD
Normal file
27
mediapipe/examples/desktop/simpleIO/BUILD
Normal file
|
@ -0,0 +1,27 @@
|
|||
# Copyright 2019 The MediaPipe Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
licenses(["notice"]) # Apache 2.0
|
||||
|
||||
package(default_visibility = ["//mediapipe/examples:__subpackages__"])
|
||||
|
||||
|
||||
|
||||
cc_binary(
|
||||
name = "simple_io_tflite",
|
||||
deps = [
|
||||
"//mediapipe/examples/desktop:simple_run_graph_main",
|
||||
"//mediapipe/graphs/simple_io:desktop_tflite_calculators",
|
||||
],
|
||||
)
|
33
mediapipe/examples/desktop/simpleIO/README.md
Normal file
33
mediapipe/examples/desktop/simpleIO/README.md
Normal file
|
@ -0,0 +1,33 @@
|
|||
|
||||
|
||||
$ bazel build -c opt --define MEDIAPIPE_DISABLE_GPU=1 \
|
||||
mediapipe/examples/desktop/simpleIO:simple_io_tflite
|
||||
|
||||
# It should print:
|
||||
# Target //mediapipe/examples/desktop/simpleIO:simple_io_tflite up-to-date:
|
||||
# bazel-bin/mediapipe/examples/desktop/simpleIO/simple_io_tflite
|
||||
# INFO: Elapsed time: 36.417s, Critical Path: 23.22s
|
||||
# INFO: 711 processes: 710 linux-sandbox, 1 local.
|
||||
# INFO: Build completed successfully, 734 total actions
|
||||
|
||||
$ export GLOG_logtostderr=1
|
||||
|
||||
# INPUT= file, OUTPUT=file
|
||||
# Replace <input video path> and <output video path>.
|
||||
# You can find a test video in mediapipe/examples/desktop/simpleIO.
|
||||
$ bazel-bin/mediapipe/examples/desktop/simpleIO/simple_io_tflite \
|
||||
--calculator_graph_config_file=mediapipe/graphs/simple_io/simple_io_graph.pbtxt \
|
||||
--input_side_packets=input_video_path=./mediapipe/examples/desktop/simpleIO/test_video.mp4,output_video_path=./mediapipe/examples/desktop/simpleIO/output_video.mp4
|
||||
|
||||
# INPUT= file, OUTPUT=screen
|
||||
|
||||
$ bazel-bin/mediapipe/examples/desktop/simpleIO/simple_io_tflite \
|
||||
--calculator_graph_config_file=mediapipe/graphs/simple_io/simple_media_to_screen_graph.pbtxt \
|
||||
--input_side_packets=input_video_path=./mediapipe/examples/desktop/simpleIO/test_video.mp4
|
||||
|
||||
|
||||
# INPUT= Stream , OUTPUT=screen
|
||||
|
||||
$ bazel-bin/mediapipe/examples/desktop/simpleIO/simple_io_tflite \
|
||||
--calculator_graph_config_file=mediapipe/graphs/simple_io/simple_media_to_screen_graph.pbtxt \
|
||||
--input_side_packets=input_video_path=rtp://0.0.0.0:5000
|
70
mediapipe/examples/desktop/simpleIO/simple_run_graph_main.cc
Normal file
70
mediapipe/examples/desktop/simpleIO/simple_run_graph_main.cc
Normal file
|
@ -0,0 +1,70 @@
|
|||
// Copyright 2019 The MediaPipe Authors.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
//
|
||||
// A simple main function to run a MediaPipe graph.
|
||||
|
||||
#include "absl/strings/str_split.h"
|
||||
#include "mediapipe/framework/calculator_framework.h"
|
||||
#include "mediapipe/framework/port/commandlineflags.h"
|
||||
#include "mediapipe/framework/port/file_helpers.h"
|
||||
#include "mediapipe/framework/port/map_util.h"
|
||||
#include "mediapipe/framework/port/parse_text_proto.h"
|
||||
#include "mediapipe/framework/port/status.h"
|
||||
|
||||
DEFINE_string(
|
||||
calculator_graph_config_file, "",
|
||||
"Name of file containing text format CalculatorGraphConfig proto.");
|
||||
|
||||
DEFINE_string(input_side_packets, "",
|
||||
"Comma-separated list of key=value pairs specifying side packets "
|
||||
"for the CalculatorGraph. All values will be treated as the "
|
||||
"string type even if they represent doubles, floats, etc.");
|
||||
|
||||
::mediapipe::Status RunMPPGraph() {
|
||||
std::string calculator_graph_config_contents;
|
||||
RETURN_IF_ERROR(mediapipe::file::GetContents(
|
||||
FLAGS_calculator_graph_config_file, &calculator_graph_config_contents));
|
||||
LOG(INFO) << "Get calculator graph config contents: "
|
||||
<< calculator_graph_config_contents;
|
||||
mediapipe::CalculatorGraphConfig config =
|
||||
mediapipe::ParseTextProtoOrDie<mediapipe::CalculatorGraphConfig>(
|
||||
calculator_graph_config_contents);
|
||||
std::map<std::string, ::mediapipe::Packet> input_side_packets;
|
||||
std::vector<std::string> kv_pairs =
|
||||
absl::StrSplit(FLAGS_input_side_packets, ',');
|
||||
for (const std::string& kv_pair : kv_pairs) {
|
||||
std::vector<std::string> name_and_value = absl::StrSplit(kv_pair, '=');
|
||||
RET_CHECK(name_and_value.size() == 2);
|
||||
RET_CHECK(!::mediapipe::ContainsKey(input_side_packets, name_and_value[0]));
|
||||
input_side_packets[name_and_value[0]] =
|
||||
::mediapipe::MakePacket<std::string>(name_and_value[1]);
|
||||
}
|
||||
LOG(INFO) << "Initialize the calculator graph.";
|
||||
mediapipe::CalculatorGraph graph;
|
||||
RETURN_IF_ERROR(graph.Initialize(config, input_side_packets));
|
||||
LOG(INFO) << "Start running the calculator graph.";
|
||||
return graph.Run();
|
||||
}
|
||||
|
||||
int main(int argc, char** argv) {
|
||||
google::InitGoogleLogging(argv[0]);
|
||||
gflags::ParseCommandLineFlags(&argc, &argv, true);
|
||||
::mediapipe::Status run_status = RunMPPGraph();
|
||||
if (!run_status.ok()) {
|
||||
LOG(ERROR) << "Failed to run the graph: " << run_status.message();
|
||||
} else {
|
||||
LOG(INFO) << "Success!";
|
||||
}
|
||||
return 0;
|
||||
}
|
92
mediapipe/graphs/simple_io/BUILD
Normal file
92
mediapipe/graphs/simple_io/BUILD
Normal file
|
@ -0,0 +1,92 @@
|
|||
# Copyright 2019 The MediaPipe Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
licenses(["notice"]) # Apache 2.0
|
||||
|
||||
package(default_visibility = ["//visibility:public"])
|
||||
|
||||
cc_library(
|
||||
name = "mobile_calculators",
|
||||
deps = [
|
||||
"//mediapipe/calculators/core:flow_limiter_calculator",
|
||||
"//mediapipe/calculators/image:image_transformation_calculator",
|
||||
"//mediapipe/calculators/tflite:ssd_anchors_calculator",
|
||||
"//mediapipe/calculators/tflite:tflite_converter_calculator",
|
||||
"//mediapipe/calculators/tflite:tflite_inference_calculator",
|
||||
"//mediapipe/calculators/tflite:tflite_tensors_to_detections_calculator",
|
||||
"//mediapipe/calculators/util:annotation_overlay_calculator",
|
||||
"//mediapipe/calculators/util:detection_label_id_to_text_calculator",
|
||||
"//mediapipe/calculators/util:detection_letterbox_removal_calculator",
|
||||
"//mediapipe/calculators/util:detections_to_render_data_calculator",
|
||||
"//mediapipe/calculators/util:non_max_suppression_calculator",
|
||||
"//mediapipe/gpu:gpu_buffer_to_image_frame_calculator",
|
||||
"//mediapipe/gpu:image_frame_to_gpu_buffer_calculator",
|
||||
],
|
||||
)
|
||||
|
||||
cc_library(
|
||||
name = "desktop_tensorflow_calculators",
|
||||
deps = [
|
||||
"//mediapipe/calculators/tensorflow:image_frame_to_tensor_calculator",
|
||||
"//mediapipe/calculators/tensorflow:lapped_tensor_buffer_calculator",
|
||||
"//mediapipe/calculators/tensorflow:object_detection_tensors_to_detections_calculator",
|
||||
"//mediapipe/calculators/tensorflow:tensor_squeeze_dimensions_calculator",
|
||||
"//mediapipe/calculators/tensorflow:tensorflow_inference_calculator",
|
||||
"//mediapipe/calculators/tensorflow:tensorflow_session_from_saved_model_calculator",
|
||||
"//mediapipe/calculators/util:annotation_overlay_calculator",
|
||||
"//mediapipe/calculators/util:detection_label_id_to_text_calculator",
|
||||
"//mediapipe/calculators/util:detections_to_render_data_calculator",
|
||||
"//mediapipe/calculators/util:non_max_suppression_calculator",
|
||||
"//mediapipe/calculators/video:opencv_video_decoder_calculator",
|
||||
"//mediapipe/calculators/video:opencv_video_encoder_calculator",
|
||||
"//myMediaPipe/calculators/video:opencv_video_imshow_calculator",
|
||||
],
|
||||
)
|
||||
|
||||
cc_library(
|
||||
name = "desktop_tflite_calculators",
|
||||
deps = [
|
||||
"//mediapipe/calculators/image:image_transformation_calculator",
|
||||
"//mediapipe/calculators/tflite:ssd_anchors_calculator",
|
||||
"//mediapipe/calculators/tflite:tflite_converter_calculator",
|
||||
"//mediapipe/calculators/tflite:tflite_inference_calculator",
|
||||
"//mediapipe/calculators/tflite:tflite_tensors_to_detections_calculator",
|
||||
"//mediapipe/calculators/util:annotation_overlay_calculator",
|
||||
"//mediapipe/calculators/util:detection_label_id_to_text_calculator",
|
||||
"//mediapipe/calculators/util:detections_to_render_data_calculator",
|
||||
"//mediapipe/calculators/util:non_max_suppression_calculator",
|
||||
"//mediapipe/calculators/video:opencv_video_decoder_calculator",
|
||||
"//mediapipe/calculators/video:opencv_video_encoder_calculator",
|
||||
"//myMediaPipe/calculators/video:opencv_video_imshow_calculator",
|
||||
],
|
||||
)
|
||||
|
||||
load(
|
||||
"//mediapipe/framework/tool:mediapipe_graph.bzl",
|
||||
"mediapipe_binary_graph",
|
||||
)
|
||||
|
||||
mediapipe_binary_graph(
|
||||
name = "mobile_cpu_binary_graph",
|
||||
graph = "object_detection_mobile_cpu.pbtxt",
|
||||
output_name = "mobile_cpu.binarypb",
|
||||
deps = [":mobile_calculators"],
|
||||
)
|
||||
|
||||
mediapipe_binary_graph(
|
||||
name = "mobile_gpu_binary_graph",
|
||||
graph = "object_detection_mobile_gpu.pbtxt",
|
||||
output_name = "mobile_gpu.binarypb",
|
||||
deps = [":mobile_calculators"],
|
||||
)
|
27
mediapipe/graphs/simple_io/simple_io_graph.pbtxt
Normal file
27
mediapipe/graphs/simple_io/simple_io_graph.pbtxt
Normal file
|
@ -0,0 +1,27 @@
|
|||
# MediaPipe graph, simple input and output video
|
||||
# on CPU.
|
||||
# Used in the example in
|
||||
# mediapipie/examples/desktop/object_detection:object_detection_tensorflow.
|
||||
|
||||
# Decodes an input video file into images and a video header.
|
||||
node {
|
||||
calculator: "OpenCvVideoDecoderCalculator"
|
||||
input_side_packet: "INPUT_FILE_PATH:input_video_path"
|
||||
output_stream: "VIDEO:input_video"
|
||||
output_stream: "VIDEO_PRESTREAM:input_video_header"
|
||||
}
|
||||
|
||||
# Encodes the annotated images into a video file, adopting properties specified
|
||||
# in the input video header, e.g., video framerate.
|
||||
node {
|
||||
calculator: "OpenCvVideoEncoderCalculator"
|
||||
input_stream: "VIDEO:input_video"
|
||||
input_stream: "VIDEO_PRESTREAM:input_video_header"
|
||||
input_side_packet: "OUTPUT_FILE_PATH:output_video_path"
|
||||
node_options: {
|
||||
[type.googleapis.com/mediapipe.OpenCvVideoEncoderCalculatorOptions]: {
|
||||
codec: "avc1"
|
||||
video_format: "mp4"
|
||||
}
|
||||
}
|
||||
}
|
|
@ -0,0 +1,26 @@
|
|||
# MediaPipe graph, simple input and output video
|
||||
# on CPU.
|
||||
# Used in the example in
|
||||
# mediapipie/examples/desktop/object_detection:object_detection_tensorflow.
|
||||
|
||||
# Decodes an input video file into images and a video header.
|
||||
node {
|
||||
calculator: "OpenCvVideoDecoderCalculator"
|
||||
input_side_packet: "INPUT_FILE_PATH:input_video_path"
|
||||
output_stream: "VIDEO:input_video"
|
||||
output_stream: "VIDEO_PRESTREAM:input_video_header"
|
||||
}
|
||||
|
||||
# Encodes the annotated images into a video file, adopting properties specified
|
||||
# in the input video header, e.g., video framerate.
|
||||
node {
|
||||
calculator: "OpenCvVideoImShowCalculator"
|
||||
input_stream: "VIDEO:input_video"
|
||||
input_stream: "VIDEO_PRESTREAM:input_video_header"
|
||||
node_options: {
|
||||
[type.googleapis.com/mediapipe.OpenCvVideoEncoderCalculatorOptions]: {
|
||||
codec: "avc1"
|
||||
video_format: "mp4"
|
||||
}
|
||||
}
|
||||
}
|
1
myMediaPipe
Submodule
1
myMediaPipe
Submodule
|
@ -0,0 +1 @@
|
|||
Subproject commit be990c5c2f4fc7bdad7a203a7bcdbb272af73d99
|
3
rnDynamicGestures.sh
Executable file
3
rnDynamicGestures.sh
Executable file
|
@ -0,0 +1,3 @@
|
|||
bazel-bin/myMediapipe/projects/dynamicGestures/dynamic_gestures_cpu_tflite \
|
||||
--calculator_graph_config_file=myMediapipe/graphs/dynamicGestures/mainGraph_desktop.pbtxt \
|
||||
--input_side_packets=input_video_path=rtp://0.0.0.0:5000
|
3
rnDynamicGestures_moving_volume.sh
Executable file
3
rnDynamicGestures_moving_volume.sh
Executable file
|
@ -0,0 +1,3 @@
|
|||
bazel-bin/myMediapipe/projects/dynamicGestures/dynamic_gestures_cpu_tflite \
|
||||
--calculator_graph_config_file=myMediapipe/graphs/dynamicGestures/mainGraph_desktop.pbtxt \
|
||||
--input_side_packets=input_video_path=myMediapipe/projects/dynamicGestures/videos/test1.mp4
|
3
rnStaticGestures.sh
Executable file
3
rnStaticGestures.sh
Executable file
|
@ -0,0 +1,3 @@
|
|||
bazel-bin/myMediapipe/projects/staticGestures/staticGestures/static_gestures_cpu_tflite \
|
||||
--calculator_graph_config_file=myMediapipe/graphs/staticGestures/mainGraph_desktop.pbtxt \
|
||||
--input_side_packets=input_video_path=rtp://0.0.0.0:5000
|
3
runHandDetection.sh
Executable file
3
runHandDetection.sh
Executable file
|
@ -0,0 +1,3 @@
|
|||
bazel-bin/mediapipe/examples/desktop/hand_detection/hand_detection_tflite \
|
||||
--calculator_graph_config_file=mediapipe/graphs/hand_tracking/hand_detection_desktop.pbtxt \
|
||||
--input_side_packets=input_video_path=rtp://0.0.0.0:5000
|
3
runHandLMtoFile.sh
Executable file
3
runHandLMtoFile.sh
Executable file
|
@ -0,0 +1,3 @@
|
|||
bazel-bin/myMediapipe/projects/staticGestures/staticGesturesCaptureToFile/static_gestures_cpu_tflite \
|
||||
--calculator_graph_config_file=myMediapipe/graphs/staticGesturesCaptureToFile/mainGraph_desktop.pbtxt \
|
||||
--input_side_packets=input_video_path=rtp://0.0.0.0:5000
|
3
runTensorFlowObjectDetectionDemo.sh
Executable file
3
runTensorFlowObjectDetectionDemo.sh
Executable file
|
@ -0,0 +1,3 @@
|
|||
bazel-bin/mediapipe/examples/desktop/object_detection/object_detection_tensorflow \
|
||||
--calculator_graph_config_file=mediapipe/graphs/object_detection/object_detection_desktop_tensorflow_to_screen_graph.pbtxt \
|
||||
--input_side_packets=input_video_path=udp://0.0.0.0:5000
|
Loading…
Reference in New Issue
Block a user