face style pipeline
This commit is contained in:
parent
33ed0f7c23
commit
fcfe31a67b
50
mediapipe/calculators/image_style/BUILD
Normal file
50
mediapipe/calculators/image_style/BUILD
Normal file
|
@ -0,0 +1,50 @@
|
||||||
|
# Copyright 2019 The MediaPipe Authors.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
load("//mediapipe/framework/port:build_config.bzl", "mediapipe_proto_library")
|
||||||
|
|
||||||
|
licenses(["notice"])
|
||||||
|
|
||||||
|
package(default_visibility = ["//visibility:public"])
|
||||||
|
|
||||||
|
cc_library(
|
||||||
|
name = "fast_utils_calculator",
|
||||||
|
srcs = ["fast_utils_calculator.cc"],
|
||||||
|
visibility = ["//visibility:public"],
|
||||||
|
deps = [
|
||||||
|
"//mediapipe/framework:calculator_options_cc_proto",
|
||||||
|
"//mediapipe/framework/formats:image_format_cc_proto",
|
||||||
|
"//mediapipe/util:color_cc_proto",
|
||||||
|
"@com_google_absl//absl/strings",
|
||||||
|
"//mediapipe/framework:calculator_framework",
|
||||||
|
"//mediapipe/framework/formats:image_frame",
|
||||||
|
"//mediapipe/framework/formats:image_frame_opencv",
|
||||||
|
"//mediapipe/framework/formats:video_stream_header",
|
||||||
|
"//mediapipe/framework/port:logging",
|
||||||
|
"//mediapipe/framework/port:opencv_core",
|
||||||
|
"//mediapipe/framework/port:opencv_imgproc",
|
||||||
|
"//mediapipe/framework/port:opencv_highgui",
|
||||||
|
"//mediapipe/framework/port:status",
|
||||||
|
"//mediapipe/framework/port:vector",
|
||||||
|
"//mediapipe/util:annotation_renderer",
|
||||||
|
"//mediapipe/util:render_data_cc_proto",
|
||||||
|
],
|
||||||
|
alwayslink = 1,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
399
mediapipe/calculators/image_style/fast_utils_calculator.cc
Normal file
399
mediapipe/calculators/image_style/fast_utils_calculator.cc
Normal file
|
@ -0,0 +1,399 @@
|
||||||
|
// Copyright 2019 The MediaPipe Authors.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#include <math.h>
|
||||||
|
|
||||||
|
#include <algorithm>
|
||||||
|
#include <cmath>
|
||||||
|
#include <map>
|
||||||
|
#include <string>
|
||||||
|
//#include <android/log.h>
|
||||||
|
|
||||||
|
#include <memory>
|
||||||
|
|
||||||
|
#include "absl/strings/str_cat.h"
|
||||||
|
#include "mediapipe/framework/calculator_framework.h"
|
||||||
|
#include "mediapipe/framework/calculator_options.pb.h"
|
||||||
|
#include "mediapipe/framework/formats/image_format.pb.h"
|
||||||
|
#include "mediapipe/framework/formats/image_frame.h"
|
||||||
|
#include "mediapipe/framework/formats/image_frame_opencv.h"
|
||||||
|
#include "mediapipe/framework/formats/video_stream_header.h"
|
||||||
|
#include "mediapipe/framework/port/logging.h"
|
||||||
|
#include "mediapipe/framework/port/opencv_core_inc.h"
|
||||||
|
#include "mediapipe/framework/port/opencv_imgproc_inc.h"
|
||||||
|
#include "mediapipe/framework/port/status.h"
|
||||||
|
#include "mediapipe/util/annotation_renderer.h"
|
||||||
|
#include "mediapipe/util/render_data.pb.h"
|
||||||
|
#include "mediapipe/framework/port/logging.h"
|
||||||
|
#include "mediapipe/framework/port/vector.h"
|
||||||
|
#include "mediapipe/util/color.pb.h"
|
||||||
|
|
||||||
|
namespace mediapipe
|
||||||
|
{
|
||||||
|
namespace
|
||||||
|
{
|
||||||
|
static const std::vector<cv::Point> FFHQ_NORM_LM = {
|
||||||
|
{638.68525475 / 1024, 486.24604922 / 1024},
|
||||||
|
{389.31496114 / 1024, 485.8921848 / 1024},
|
||||||
|
{513.67979275 / 1024, 620.8915371 / 1024},
|
||||||
|
{405.50932642 / 1024, 756.52797927 / 1024},
|
||||||
|
{622.55630397 / 1024, 756.15509499 / 1024}};
|
||||||
|
|
||||||
|
constexpr char kImageFrameTag[] = "IMAGE";
|
||||||
|
constexpr char kVectorTag[] = "VECTOR";
|
||||||
|
|
||||||
|
std::tuple<int, int> _normalized_to_pixel_coordinates(float normalized_x,
|
||||||
|
float normalized_y, int image_width, int image_height)
|
||||||
|
{
|
||||||
|
// Converts normalized value pair to pixel coordinates
|
||||||
|
int x_px = std::min<int>(floor(normalized_x * image_width), image_width - 1);
|
||||||
|
int y_px = std::min<int>(floor(normalized_y * image_height), image_height - 1);
|
||||||
|
|
||||||
|
return {x_px, y_px};
|
||||||
|
};
|
||||||
|
|
||||||
|
static const std::unordered_set<cv::Point> FACEMESH_FACE_OVAL =
|
||||||
|
{{10, 338}, {338, 297}, {297, 332}, {332, 284}, {284, 251}, {251, 389}, {389, 356}, {356, 454}, {454, 323}, {323, 361}, {361, 288}, {288, 397}, {397, 365}, {365, 379}, {379, 378}, {378, 400}, {400, 377}, {377, 152}, {152, 148}, {148, 176}, {176, 149}, {149, 150}, {150, 136}, {136, 172}, {172, 58}, {58, 132}, {132, 93}, {93, 234}, {234, 127}, {127, 162}, {162, 21}, {21, 54}, {54, 103}, {103, 67}, {67, 109}, {109, 10}};
|
||||||
|
|
||||||
|
enum
|
||||||
|
{
|
||||||
|
ATTRIB_VERTEX,
|
||||||
|
ATTRIB_TEXTURE_POSITION,
|
||||||
|
NUM_ATTRIBUTES
|
||||||
|
};
|
||||||
|
|
||||||
|
// Round up n to next multiple of m.
|
||||||
|
size_t RoundUp(size_t n, size_t m) { return ((n + m - 1) / m) * m; } // NOLINT
|
||||||
|
inline bool HasImageTag(mediapipe::CalculatorContext *cc) { return false; }
|
||||||
|
|
||||||
|
using Point = RenderAnnotation::Point;
|
||||||
|
|
||||||
|
bool NormalizedtoPixelCoordinates(double normalized_x, double normalized_y,
|
||||||
|
int image_width, int image_height, int *x_px,
|
||||||
|
int *y_px)
|
||||||
|
{
|
||||||
|
CHECK(x_px != nullptr);
|
||||||
|
CHECK(y_px != nullptr);
|
||||||
|
CHECK_GT(image_width, 0);
|
||||||
|
CHECK_GT(image_height, 0);
|
||||||
|
|
||||||
|
if (normalized_x < 0 || normalized_x > 1.0 || normalized_y < 0 ||
|
||||||
|
normalized_y > 1.0)
|
||||||
|
{
|
||||||
|
VLOG(1) << "Normalized coordinates must be between 0.0 and 1.0";
|
||||||
|
}
|
||||||
|
|
||||||
|
*x_px = static_cast<int32>(round(normalized_x * image_width));
|
||||||
|
*y_px = static_cast<int32>(round(normalized_y * image_height));
|
||||||
|
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
} // namespace
|
||||||
|
|
||||||
|
class FastUtilsCalculator : public CalculatorBase
|
||||||
|
{
|
||||||
|
public:
|
||||||
|
FastUtilsCalculator() = default;
|
||||||
|
~FastUtilsCalculator() override = default;
|
||||||
|
|
||||||
|
static absl::Status GetContract(CalculatorContract *cc);
|
||||||
|
|
||||||
|
// From Calculator.
|
||||||
|
absl::Status Open(CalculatorContext *cc) override;
|
||||||
|
absl::Status Process(CalculatorContext *cc) override;
|
||||||
|
absl::Status Close(CalculatorContext *cc) override;
|
||||||
|
|
||||||
|
private:
|
||||||
|
absl::Status CreateRenderTargetCpu(CalculatorContext *cc,
|
||||||
|
std::unique_ptr<cv::Mat> &image_mat,
|
||||||
|
ImageFormat::Format *target_format);
|
||||||
|
|
||||||
|
absl::Status RenderToCpu(
|
||||||
|
CalculatorContext *cc, const ImageFormat::Format &target_format,
|
||||||
|
uchar *data_image, std::unique_ptr<cv::Mat> &image_mat);
|
||||||
|
|
||||||
|
absl::Status Call(CalculatorContext *cc,
|
||||||
|
std::unique_ptr<cv::Mat> &image_mat,
|
||||||
|
ImageFormat::Format *target_format,
|
||||||
|
const RenderData &render_data,
|
||||||
|
std::unordered_map<std::string, cv::Mat> &all_masks);
|
||||||
|
|
||||||
|
// Indicates if image frame is available as input.
|
||||||
|
bool image_frame_available_ = false;
|
||||||
|
std::unordered_map<std::string, const std::vector<int>> index_dict = {
|
||||||
|
{"leftEye", {384, 385, 386, 387, 388, 390, 263, 362, 398, 466, 373, 374, 249, 380, 381, 382}},
|
||||||
|
{"rightEye", {160, 33, 161, 163, 133, 7, 173, 144, 145, 246, 153, 154, 155, 157, 158, 159}},
|
||||||
|
{"nose", {4}},
|
||||||
|
{"lips", {0, 13, 14, 17, 84}},
|
||||||
|
{"leftLips", {61, 146}},
|
||||||
|
{"rightLips", {291, 375}},
|
||||||
|
};
|
||||||
|
|
||||||
|
int width_ = 0;
|
||||||
|
int height_ = 0;
|
||||||
|
int width_canvas_ = 0; // Size of overlay drawing texture canvas.
|
||||||
|
int height_canvas_ = 0;
|
||||||
|
|
||||||
|
int max_num_faces = 1;
|
||||||
|
bool refine_landmarks = True;
|
||||||
|
double min_detection_confidence = 0.5;
|
||||||
|
double min_tracking_confidence = 0.5;
|
||||||
|
};
|
||||||
|
REGISTER_CALCULATOR(FastUtilsCalculator);
|
||||||
|
|
||||||
|
absl::Status FastUtilsCalculator::GetContract(CalculatorContract *cc)
|
||||||
|
{
|
||||||
|
CHECK_GE(cc->Inputs().NumEntries(), 1);
|
||||||
|
|
||||||
|
if (cc->Inputs().HasTag(kImageFrameTag))
|
||||||
|
{
|
||||||
|
cc->Inputs().Tag(kImageFrameTag).Set<ImageFrame>();
|
||||||
|
CHECK(cc->Outputs().HasTag(kImageFrameTag));
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cc->Outputs().HasTag(kImageFrameTag))
|
||||||
|
{
|
||||||
|
cc->Outputs().Tag(kImageFrameTag).Set<ImageFrame>();
|
||||||
|
}
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status FastUtilsCalculator::Open(CalculatorContext *cc)
|
||||||
|
{
|
||||||
|
cc->SetOffset(TimestampDiff(0));
|
||||||
|
|
||||||
|
if (cc->Inputs().HasTag(kImageFrameTag) || HasImageTag(cc))
|
||||||
|
{
|
||||||
|
image_frame_available_ = true;
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set the output header based on the input header (if present).
|
||||||
|
const char *tag = kImageFrameTag;
|
||||||
|
if (image_frame_available_ && !cc->Inputs().Tag(tag).Header().IsEmpty())
|
||||||
|
{
|
||||||
|
const auto &input_header =
|
||||||
|
cc->Inputs().Tag(tag).Header().Get<VideoHeader>();
|
||||||
|
auto *output_video_header = new VideoHeader(input_header);
|
||||||
|
cc->Outputs().Tag(tag).SetHeader(Adopt(output_video_header));
|
||||||
|
}
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status FastUtilsCalculator::Process(CalculatorContext *cc)
|
||||||
|
{
|
||||||
|
if (cc->Inputs().HasTag(kImageFrameTag) &&
|
||||||
|
cc->Inputs().Tag(kImageFrameTag).IsEmpty())
|
||||||
|
{
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
// Initialize render target, drawn with OpenCV.
|
||||||
|
std::unique_ptr<cv::Mat> image_mat;
|
||||||
|
ImageFormat::Format target_format;
|
||||||
|
std::unordered_map<std::string, cv::Mat> all_masks;
|
||||||
|
|
||||||
|
if (cc->Outputs().HasTag(kImageFrameTag))
|
||||||
|
{
|
||||||
|
MP_RETURN_IF_ERROR(CreateRenderTargetCpu(cc, image_mat, &target_format));
|
||||||
|
}
|
||||||
|
|
||||||
|
// Render streams onto render target.
|
||||||
|
for (CollectionItemId id = cc->Inputs().BeginId(); id < cc->Inputs().EndId();
|
||||||
|
++id)
|
||||||
|
{
|
||||||
|
auto tag_and_index = cc->Inputs().TagAndIndexFromId(id);
|
||||||
|
std::string tag = tag_and_index.first;
|
||||||
|
if (!tag.empty() && tag != kVectorTag)
|
||||||
|
{
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (cc->Inputs().Get(id).IsEmpty())
|
||||||
|
{
|
||||||
|
continue;
|
||||||
|
}
|
||||||
|
if (tag.empty())
|
||||||
|
{
|
||||||
|
// Empty tag defaults to accepting a single object of RenderData type.
|
||||||
|
const RenderData &render_data = cc->Inputs().Get(id).Get<RenderData>();
|
||||||
|
MP_RETURN_IF_ERROR(Call(cc, image_mat, &target_format, render_data, all_masks));
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
RET_CHECK_EQ(kVectorTag, tag);
|
||||||
|
const std::vector<RenderData> &render_data_vec =
|
||||||
|
cc->Inputs().Get(id).Get<std::vector<RenderData>>();
|
||||||
|
for (const RenderData &render_data : render_data_vec)
|
||||||
|
{
|
||||||
|
MP_RETURN_IF_ERROR(Call(cc, image_mat, &target_format, render_data, all_masks));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Copy the rendered image to output.
|
||||||
|
uchar *image_mat_ptr = image_mat->data;
|
||||||
|
MP_RETURN_IF_ERROR(RenderToCpu(cc, target_format, image_mat_ptr, image_mat));
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status FastUtilsCalculator::Close(CalculatorContext *cc)
|
||||||
|
{
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status FastUtilsCalculator::RenderToCpu(
|
||||||
|
CalculatorContext *cc, const ImageFormat::Format &target_format,
|
||||||
|
uchar *data_image, std::unique_ptr<cv::Mat> &image_mat)
|
||||||
|
{
|
||||||
|
|
||||||
|
cv::Mat mat_image_ = *image_mat.get();
|
||||||
|
|
||||||
|
auto output_frame = absl::make_unique<ImageFrame>(
|
||||||
|
target_format, mat_image_.cols, mat_image_.rows);
|
||||||
|
|
||||||
|
output_frame->CopyPixelData(target_format, mat_image_.cols, mat_image_.rows, data_image,
|
||||||
|
ImageFrame::kDefaultAlignmentBoundary);
|
||||||
|
|
||||||
|
if (cc->Outputs().HasTag(kImageFrameTag))
|
||||||
|
{
|
||||||
|
cc->Outputs()
|
||||||
|
.Tag(kImageFrameTag)
|
||||||
|
.Add(output_frame.release(), cc->InputTimestamp());
|
||||||
|
}
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status FastUtilsCalculator::CreateRenderTargetCpu(
|
||||||
|
CalculatorContext *cc, std::unique_ptr<cv::Mat> &image_mat,
|
||||||
|
ImageFormat::Format *target_format)
|
||||||
|
{
|
||||||
|
if (image_frame_available_)
|
||||||
|
{
|
||||||
|
const auto &input_frame =
|
||||||
|
cc->Inputs().Tag(kImageFrameTag).Get<ImageFrame>();
|
||||||
|
|
||||||
|
int target_mat_type;
|
||||||
|
switch (input_frame.Format())
|
||||||
|
{
|
||||||
|
case ImageFormat::SRGBA:
|
||||||
|
*target_format = ImageFormat::SRGBA;
|
||||||
|
target_mat_type = CV_8UC4;
|
||||||
|
break;
|
||||||
|
case ImageFormat::SRGB:
|
||||||
|
*target_format = ImageFormat::SRGB;
|
||||||
|
target_mat_type = CV_8UC3;
|
||||||
|
break;
|
||||||
|
case ImageFormat::GRAY8:
|
||||||
|
*target_format = ImageFormat::SRGB;
|
||||||
|
target_mat_type = CV_8UC3;
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
return absl::UnknownError("Unexpected image frame format.");
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
|
image_mat = absl::make_unique<cv::Mat>(
|
||||||
|
input_frame.Height(), input_frame.Width(), target_mat_type);
|
||||||
|
|
||||||
|
auto input_mat = formats::MatView(&input_frame);
|
||||||
|
|
||||||
|
if (input_frame.Format() == ImageFormat::GRAY8)
|
||||||
|
{
|
||||||
|
cv::Mat rgb_mat;
|
||||||
|
cv::cvtColor(input_mat, rgb_mat, CV_GRAY2RGB);
|
||||||
|
rgb_mat.copyTo(*image_mat);
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
input_mat.copyTo(*image_mat);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
else
|
||||||
|
{
|
||||||
|
image_mat = absl::make_unique<cv::Mat>(
|
||||||
|
150, 150, CV_8UC4,
|
||||||
|
cv::Scalar(255, 255,
|
||||||
|
255));
|
||||||
|
*target_format = ImageFormat::SRGBA;
|
||||||
|
}
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
absl::Status FastUtilsCalculator::Call(CalculatorContext *cc,
|
||||||
|
std::unique_ptr<cv::Mat> &image_mat,
|
||||||
|
ImageFormat::Format *target_format,
|
||||||
|
const RenderData &render_data,
|
||||||
|
std::unordered_map<std::string, cv::Mat> &all_masks)
|
||||||
|
{
|
||||||
|
cv::Mat mat_image_ = *image_mat.get();
|
||||||
|
|
||||||
|
int image_width_ = image_mat->cols;
|
||||||
|
int image_height_ = image_mat->rows;
|
||||||
|
|
||||||
|
cv::Mat mask;
|
||||||
|
std::vector<cv::Point> kps, landmarks;
|
||||||
|
std::vector<std::vector<cv::Point>> lms_out;
|
||||||
|
|
||||||
|
int c = 0;
|
||||||
|
|
||||||
|
for (const auto &[key, value] : index_dict)
|
||||||
|
{
|
||||||
|
for (auto order : value)
|
||||||
|
{
|
||||||
|
c = 0;
|
||||||
|
for (auto &annotation : render_data.render_annotations())
|
||||||
|
{
|
||||||
|
if (annotation.data_case() == RenderAnnotation::kPoint)
|
||||||
|
{
|
||||||
|
if (order == c)
|
||||||
|
{
|
||||||
|
const auto &point = annotation.point();
|
||||||
|
int x = -1;
|
||||||
|
int y = -1;
|
||||||
|
CHECK(NormalizedtoPixelCoordinates(point.x(), point.y(), image_width_,
|
||||||
|
image_height_, &x, &y));
|
||||||
|
kps.push_back(cv::Point(x, y));
|
||||||
|
}
|
||||||
|
c += 1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
double sumx = 0, sumy = 0, meanx, meany;
|
||||||
|
|
||||||
|
for (auto p : kps)
|
||||||
|
{
|
||||||
|
sumx += p.x;
|
||||||
|
sumy += p.y;
|
||||||
|
}
|
||||||
|
meanx = sumx / kps.size();
|
||||||
|
meany = sumy / kps.size();
|
||||||
|
|
||||||
|
landmarks.push_back({meanx, meany});
|
||||||
|
|
||||||
|
kps.clear();
|
||||||
|
}
|
||||||
|
|
||||||
|
lms_out.push_back(landmarks);
|
||||||
|
|
||||||
|
return absl::OkStatus();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace mediapipe
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,60 @@
|
||||||
|
# Copyright 2019 The MediaPipe Authors.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
licenses(["notice"])
|
||||||
|
|
||||||
|
package(default_visibility = ["//visibility:private"])
|
||||||
|
|
||||||
|
cc_binary(
|
||||||
|
name = "libmediapipe_jni.so",
|
||||||
|
linkshared = 1,
|
||||||
|
linkstatic = 1,
|
||||||
|
deps = [
|
||||||
|
"//mediapipe/graphs/image_style:mobile_calculators",
|
||||||
|
"//mediapipe/java/com/google/mediapipe/framework/jni:mediapipe_framework_jni",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
|
cc_library(
|
||||||
|
name = "mediapipe_jni_lib",
|
||||||
|
srcs = [":libmediapipe_jni.so"],
|
||||||
|
alwayslink = 1,
|
||||||
|
)
|
||||||
|
|
||||||
|
android_binary(
|
||||||
|
name = "imagestylegpu",
|
||||||
|
srcs = glob(["*.java"]),
|
||||||
|
assets = [
|
||||||
|
"//mediapipe/graphs/image_style:mobile_gpu.binarypb",
|
||||||
|
"//mediapipe/models:model_float32.tflite",
|
||||||
|
],
|
||||||
|
assets_dir = "",
|
||||||
|
manifest = "//mediapipe/examples/android/src/java/com/google/mediapipe/apps/basic:AndroidManifest.xml",
|
||||||
|
manifest_values = {
|
||||||
|
"applicationId": "com.google.mediapipe.apps.imagestylegpu",
|
||||||
|
"appName": "Image Style",
|
||||||
|
"mainActivity": "com.google.mediapipe.apps.basic.MainActivity",
|
||||||
|
"cameraFacingFront": "True",
|
||||||
|
"binaryGraphName": "mobile_gpu.binarypb",
|
||||||
|
"inputVideoStreamName": "input_video",
|
||||||
|
"outputVideoStreamName": "output_video",
|
||||||
|
"flipFramesVertically": "True",
|
||||||
|
"converterNumBuffers": "2",
|
||||||
|
},
|
||||||
|
multidex = "native",
|
||||||
|
deps = [
|
||||||
|
":mediapipe_jni_lib",
|
||||||
|
"//mediapipe/examples/android/src/java/com/google/mediapipe/apps/basic:basic_lib",
|
||||||
|
],
|
||||||
|
)
|
|
@ -24,22 +24,14 @@ package(default_visibility = ["//visibility:public"])
|
||||||
cc_library(
|
cc_library(
|
||||||
name = "mobile_calculators",
|
name = "mobile_calculators",
|
||||||
deps = [
|
deps = [
|
||||||
"//mediapipe/calculators/tensorflow:tensor_to_image_frame_calculator",
|
|
||||||
"//mediapipe/calculators/tensorflow:vector_float_to_tensor_calculator",
|
|
||||||
"//mediapipe/calculators/tensor:tensors_to_floats_calculator",
|
|
||||||
"//mediapipe/calculators/tensor:tensors_to_segmentation_calculator",
|
|
||||||
"//mediapipe/calculators/util:from_image_calculator",
|
|
||||||
"//mediapipe/calculators/tensor:image_to_tensor_calculator",
|
|
||||||
"//mediapipe/calculators/tensor:inference_calculator",
|
|
||||||
"//mediapipe/calculators/core:flow_limiter_calculator",
|
"//mediapipe/calculators/core:flow_limiter_calculator",
|
||||||
"//mediapipe/calculators/image:image_transformation_calculator",
|
"//mediapipe/calculators/tensor:image_to_tensor_calculator",
|
||||||
"//mediapipe/calculators/tflite:tflite_converter_calculator",
|
"//mediapipe/calculators/tensor:inference_calculator",
|
||||||
"//mediapipe/calculators/tflite:tflite_custom_op_resolver_calculator",
|
"//mediapipe/calculators/tensor:tensors_to_segmentation_calculator",
|
||||||
"//mediapipe/calculators/tflite:tflite_inference_calculator",
|
"//mediapipe/calculators/util:to_image_calculator",
|
||||||
"//mediapipe/gpu:gpu_buffer_to_image_frame_calculator",
|
"//mediapipe/calculators/util:from_image_calculator",
|
||||||
"//mediapipe/gpu:image_frame_to_gpu_buffer_calculator",
|
"//mediapipe/calculators/image:image_properties_calculator",
|
||||||
"//mediapipe/calculators/tflite:tflite_tensors_to_segmentation_calculator",
|
"//mediapipe/calculators/tflite:tflite_custom_op_resolver_calculator",
|
||||||
"//mediapipe/calculators/image:image_properties_calculator",
|
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -47,18 +39,17 @@ cc_library(
|
||||||
name = "desktop_calculators",
|
name = "desktop_calculators",
|
||||||
deps = [
|
deps = [
|
||||||
"//mediapipe/calculators/core:flow_limiter_calculator",
|
"//mediapipe/calculators/core:flow_limiter_calculator",
|
||||||
"//mediapipe/calculators/image:image_transformation_calculator",
|
"//mediapipe/calculators/tensor:image_to_tensor_calculator",
|
||||||
"//mediapipe/calculators/tflite:tflite_converter_calculator",
|
"//mediapipe/calculators/tensor:inference_calculator",
|
||||||
"//mediapipe/calculators/tflite:tflite_inference_calculator",
|
"//mediapipe/calculators/tensor:tensors_to_segmentation_calculator",
|
||||||
"//mediapipe/calculators/tflite:tflite_tensors_to_gpuimage_calculator",
|
"//mediapipe/calculators/util:to_image_calculator",
|
||||||
"//mediapipe/calculators/tflite:tflite_custom_op_resolver_calculator",
|
"//mediapipe/calculators/util:from_image_calculator",
|
||||||
"//mediapipe/calculators/tflite:tflite_tensors_to_segmentation_calculator",
|
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
mediapipe_binary_graph(
|
mediapipe_binary_graph(
|
||||||
name = "mobile_gpu_binary_graph",
|
name = "mobile_gpu_binary_graph",
|
||||||
graph = "image_style.pbtxt",
|
graph = "image_style_gpu.pbtxt",
|
||||||
output_name = "mobile_gpu.binarypb",
|
output_name = "mobile_gpu.binarypb",
|
||||||
deps = [":mobile_calculators"],
|
deps = [":mobile_calculators"],
|
||||||
)
|
)
|
||||||
|
|
|
@ -42,8 +42,8 @@ node {
|
||||||
options {
|
options {
|
||||||
[mediapipe.TfLiteConverterCalculatorOptions.ext] {
|
[mediapipe.TfLiteConverterCalculatorOptions.ext] {
|
||||||
output_tensor_float_range {
|
output_tensor_float_range {
|
||||||
min: 0
|
min: -1
|
||||||
max: 255
|
max: 1
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -1,19 +1,30 @@
|
||||||
# MediaPipe graph that performs object detection on desktop with TensorFlow Lite
|
# MediaPipe graph that performs face mesh with TensorFlow Lite on CPU.
|
||||||
# on CPU.
|
|
||||||
# Used in the example in
|
|
||||||
# mediapipe/examples/desktop/object_detection:object_detection_tflite.
|
|
||||||
|
|
||||||
# max_queue_size limits the number of packets enqueued on any input stream
|
# Input image. (ImageFrame)
|
||||||
# by throttling inputs to the graph. This makes the graph only process one
|
input_stream: "input_video"
|
||||||
# frame per time.
|
|
||||||
max_queue_size: 1
|
|
||||||
|
|
||||||
# Decodes an input video file into images and a video header.
|
# Output image with rendered results. (ImageFrame)
|
||||||
|
output_stream: "output_video"
|
||||||
|
|
||||||
|
# Throttles the images flowing downstream for flow control. It passes through
|
||||||
|
# the very first incoming image unaltered, and waits for downstream nodes
|
||||||
|
# (calculators and subgraphs) in the graph to finish their tasks before it
|
||||||
|
# passes through another image. All images that come in while waiting are
|
||||||
|
# dropped, limiting the number of in-flight images in most part of the graph to
|
||||||
|
# 1. This prevents the downstream nodes from queuing up incoming images and data
|
||||||
|
# excessively, which leads to increased latency and memory usage, unwanted in
|
||||||
|
# real-time mobile applications. It also eliminates unnecessarily computation,
|
||||||
|
# e.g., the output produced by a node may get dropped downstream if the
|
||||||
|
# subsequent nodes are still busy processing previous inputs.
|
||||||
node {
|
node {
|
||||||
calculator: "OpenCvVideoDecoderCalculator"
|
calculator: "FlowLimiterCalculator"
|
||||||
input_side_packet: "INPUT_FILE_PATH:input_video_path"
|
input_stream: "input_video"
|
||||||
output_stream: "VIDEO:input_video"
|
input_stream: "FINISHED:output_video"
|
||||||
output_stream: "VIDEO_PRESTREAM:input_video_header"
|
input_stream_info: {
|
||||||
|
tag_index: "FINISHED"
|
||||||
|
back_edge: true
|
||||||
|
}
|
||||||
|
output_stream: "throttled_input_video"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Transforms the input image on CPU to a 320x320 image. To scale the image, by
|
# Transforms the input image on CPU to a 320x320 image. To scale the image, by
|
||||||
|
@ -23,12 +34,12 @@ node {
|
||||||
# detection model used in this graph is agnostic to that deformation.
|
# detection model used in this graph is agnostic to that deformation.
|
||||||
node: {
|
node: {
|
||||||
calculator: "ImageTransformationCalculator"
|
calculator: "ImageTransformationCalculator"
|
||||||
input_stream: "IMAGE:input_video"
|
input_stream: "IMAGE:throttled_input_video"
|
||||||
output_stream: "IMAGE:transformed_input_video"
|
output_stream: "IMAGE:transformed_input_video"
|
||||||
node_options: {
|
node_options: {
|
||||||
[type.googleapis.com/mediapipe.ImageTransformationCalculatorOptions] {
|
[type.googleapis.com/mediapipe.ImageTransformationCalculatorOptions] {
|
||||||
output_width: 512
|
output_width: 256
|
||||||
output_height: 512
|
output_height: 256
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -39,58 +50,45 @@ node: {
|
||||||
node {
|
node {
|
||||||
calculator: "TfLiteConverterCalculator"
|
calculator: "TfLiteConverterCalculator"
|
||||||
input_stream: "IMAGE:transformed_input_video"
|
input_stream: "IMAGE:transformed_input_video"
|
||||||
output_stream: "TENSORS:image_tensor"
|
output_stream: "TENSORS:input_tensors"
|
||||||
node_options: {
|
options {
|
||||||
[type.googleapis.com/mediapipe.TfLiteConverterCalculatorOptions] {
|
[mediapipe.TfLiteConverterCalculatorOptions.ext] {
|
||||||
zero_center: true
|
zero_center: false
|
||||||
}
|
max_num_channels: 3
|
||||||
|
output_tensor_float_range {
|
||||||
|
min: 0.0
|
||||||
|
max: 255.0
|
||||||
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
# Runs a TensorFlow Lite model on CPU that takes an image tensor and outputs a
|
# Runs a TensorFlow Lite model on CPU that takes an image tensor and outputs a
|
||||||
# vector of tensors representing, for instance, detection boxes/keypoints and
|
# vector of tensors representing, for instance, detection boxes/keypoints and
|
||||||
# scores.
|
# scores.
|
||||||
node {
|
node {
|
||||||
calculator: "TfLiteInferenceCalculator"
|
calculator: "TfLiteInferenceCalculator"
|
||||||
input_stream: "TENSORS:image_tensor"
|
input_stream: "TENSORS:input_tensors"
|
||||||
output_stream: "TENSORS:stylized_tensor"
|
output_stream: "TENSORS:output_tensors"
|
||||||
node_options: {
|
node_options: {
|
||||||
[type.googleapis.com/mediapipe.TfLiteInferenceCalculatorOptions] {
|
[type.googleapis.com/mediapipe.TfLiteInferenceCalculatorOptions] {
|
||||||
model_path: "mediapipe/models/metaf-512-mobile3.tflite"
|
model_path: "mediapipe/models/model_float32.tflite"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
node {
|
|
||||||
calculator: "TfliteTensorsToGpuImageCalculator"
|
|
||||||
input_stream: "TENSORS:stylized_tensor"
|
|
||||||
output_stream: "IMAGE:image"
|
|
||||||
}
|
|
||||||
|
|
||||||
#node {
|
|
||||||
# calculator: "TfLiteTensorsToSegmentationCalculator"
|
|
||||||
# input_stream: "TENSORS:stylized_tensor"
|
|
||||||
# output_stream: "MASK:mask_image"
|
|
||||||
# node_options: {
|
|
||||||
# [type.googleapis.com/mediapipe.TfLiteTensorsToSegmentationCalculatorOptions] {
|
|
||||||
# tensor_width: 512
|
|
||||||
# tensor_height: 512
|
|
||||||
# tensor_channels: 3
|
|
||||||
# }
|
|
||||||
# }
|
|
||||||
#}
|
|
||||||
|
|
||||||
# Encodes the annotated images into a video file, adopting properties specified
|
|
||||||
# in the input video header, e.g., video framerate.
|
|
||||||
node {
|
node {
|
||||||
calculator: "OpenCvVideoEncoderCalculator"
|
calculator: "TfLiteTensorsToSegmentationCalculator"
|
||||||
input_stream: "VIDEO:image"
|
input_stream: "TENSORS:output_tensors"
|
||||||
input_stream: "VIDEO_PRESTREAM:input_video_header"
|
output_stream: "MASK:output_video"
|
||||||
input_side_packet: "OUTPUT_FILE_PATH:output_video_path"
|
|
||||||
node_options: {
|
node_options: {
|
||||||
[type.googleapis.com/mediapipe.OpenCvVideoEncoderCalculatorOptions]: {
|
[type.googleapis.com/mediapipe.TfLiteTensorsToSegmentationCalculatorOptions] {
|
||||||
codec: "avc1"
|
tensor_width: 256
|
||||||
video_format: "mp4"
|
tensor_height: 256
|
||||||
}
|
tensor_channels: 3
|
||||||
}
|
}
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -6,16 +6,7 @@ input_stream: "input_video"
|
||||||
# Output image with rendered results. (ImageFrame)
|
# Output image with rendered results. (ImageFrame)
|
||||||
output_stream: "output_video"
|
output_stream: "output_video"
|
||||||
|
|
||||||
# Throttles the images flowing downstream for flow control. It passes through
|
|
||||||
# the very first incoming image unaltered, and waits for downstream nodes
|
|
||||||
# (calculators and subgraphs) in the graph to finish their tasks before it
|
|
||||||
# passes through another image. All images that come in while waiting are
|
|
||||||
# dropped, limiting the number of in-flight images in most part of the graph to
|
|
||||||
# 1. This prevents the downstream nodes from queuing up incoming images and data
|
|
||||||
# excessively, which leads to increased latency and memory usage, unwanted in
|
|
||||||
# real-time mobile applications. It also eliminates unnecessarily computation,
|
|
||||||
# e.g., the output produced by a node may get dropped downstream if the
|
|
||||||
# subsequent nodes are still busy processing previous inputs.
|
|
||||||
node {
|
node {
|
||||||
calculator: "FlowLimiterCalculator"
|
calculator: "FlowLimiterCalculator"
|
||||||
input_stream: "input_video"
|
input_stream: "input_video"
|
||||||
|
@ -27,67 +18,59 @@ node {
|
||||||
output_stream: "throttled_input_video"
|
output_stream: "throttled_input_video"
|
||||||
}
|
}
|
||||||
|
|
||||||
# Transforms the input image on CPU to a 320x320 image. To scale the image, by
|
|
||||||
# default it uses the STRETCH scale mode that maps the entire input image to the
|
|
||||||
# entire transformed image. As a result, image aspect ratio may be changed and
|
|
||||||
# objects in the image may be deformed (stretched or squeezed), but the object
|
|
||||||
# detection model used in this graph is agnostic to that deformation.
|
|
||||||
node: {
|
node: {
|
||||||
calculator: "ImageTransformationCalculator"
|
calculator: "ToImageCalculator"
|
||||||
input_stream: "IMAGE:throttled_input_video"
|
input_stream: "IMAGE_CPU:throttled_input_video"
|
||||||
output_stream: "IMAGE:transformed_input_video"
|
output_stream: "IMAGE:image_input_video"
|
||||||
node_options: {
|
|
||||||
[type.googleapis.com/mediapipe.ImageTransformationCalculatorOptions] {
|
|
||||||
output_width: 256
|
|
||||||
output_height: 256
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# Converts the transformed input image on CPU into an image tensor as a
|
|
||||||
# TfLiteTensor. The zero_center option is set to true to normalize the
|
|
||||||
# pixel values to [-1.f, 1.f] as opposed to [0.f, 1.f].
|
|
||||||
node {
|
node {
|
||||||
calculator: "TfLiteConverterCalculator"
|
calculator: "ImageToTensorCalculator"
|
||||||
input_stream: "IMAGE:transformed_input_video"
|
input_stream: "IMAGE:image_input_video"
|
||||||
output_stream: "TENSORS:input_tensors"
|
output_stream: "TENSORS:input_tensor"
|
||||||
options {
|
options: {
|
||||||
[mediapipe.TfLiteConverterCalculatorOptions.ext] {
|
[mediapipe.ImageToTensorCalculatorOptions.ext] {
|
||||||
output_tensor_float_range {
|
output_tensor_width: 256
|
||||||
min: 0
|
output_tensor_height: 256
|
||||||
max: 255
|
keep_aspect_ratio: true
|
||||||
}
|
output_tensor_float_range {
|
||||||
max_num_channels: 3
|
min: -1.0
|
||||||
}
|
max: 1.0
|
||||||
}
|
}
|
||||||
}
|
border_mode: BORDER_ZERO
|
||||||
|
|
||||||
|
|
||||||
# Runs a TensorFlow Lite model on CPU that takes an image tensor and outputs a
|
|
||||||
# vector of tensors representing, for instance, detection boxes/keypoints and
|
|
||||||
# scores.
|
|
||||||
node {
|
|
||||||
calculator: "TfLiteInferenceCalculator"
|
|
||||||
input_stream: "TENSORS:input_tensors"
|
|
||||||
output_stream: "TENSORS:output_tensors"
|
|
||||||
node_options: {
|
|
||||||
[type.googleapis.com/mediapipe.TfLiteInferenceCalculatorOptions] {
|
|
||||||
model_path: "mediapipe/models/model_float32.tflite"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
node {
|
node {
|
||||||
calculator: "TfLiteTensorsToSegmentationCalculator"
|
calculator: "InferenceCalculator"
|
||||||
input_stream: "TENSORS:output_tensors"
|
input_stream: "TENSORS:input_tensor"
|
||||||
output_stream: "MASK:output_video"
|
output_stream: "TENSORS:output_tensor"
|
||||||
node_options: {
|
options: {
|
||||||
[type.googleapis.com/mediapipe.TfLiteTensorsToSegmentationCalculatorOptions] {
|
[mediapipe.InferenceCalculatorOptions.ext] {
|
||||||
tensor_width: 256
|
model_path: "mediapipe/models/model_float32.tflite"
|
||||||
tensor_height: 256
|
delegate { xnnpack {} }
|
||||||
tensor_channels: 3
|
}
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
node {
|
||||||
|
calculator: "TensorsToSegmentationCalculator"
|
||||||
|
input_stream: "TENSORS:output_tensor"
|
||||||
|
output_stream: "MASK:output"
|
||||||
|
options: {
|
||||||
|
[mediapipe.TensorsToSegmentationCalculatorOptions.ext] {
|
||||||
|
activation: NONE
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
node{
|
||||||
|
calculator: "FromImageCalculator"
|
||||||
|
input_stream: "IMAGE:output"
|
||||||
|
output_stream: "IMAGE_CPU:output_video"
|
||||||
|
}
|
||||||
|
|
||||||
|
|
|
@ -18,30 +18,18 @@ node {
|
||||||
output_stream: "throttled_input_video"
|
output_stream: "throttled_input_video"
|
||||||
}
|
}
|
||||||
|
|
||||||
node: {
|
|
||||||
calculator: "ImageTransformationCalculator"
|
|
||||||
input_stream: "IMAGE_GPU:throttled_input_video"
|
|
||||||
output_stream: "IMAGE_GPU:transformed_input_video"
|
|
||||||
node_options: {
|
|
||||||
[type.googleapis.com/mediapipe.ImageTransformationCalculatorOptions] {
|
|
||||||
output_width: 512
|
|
||||||
output_height: 512
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
node: {
|
node: {
|
||||||
calculator: "ImageToTensorCalculator"
|
calculator: "ImageToTensorCalculator"
|
||||||
input_stream: "IMAGE_GPU:transformed_input_video"
|
input_stream: "IMAGE_GPU:throttled_input_video"
|
||||||
output_stream: "TENSORS:input_tensors"
|
output_stream: "TENSORS:input_tensors"
|
||||||
options {
|
options {
|
||||||
[mediapipe.ImageToTensorCalculatorOptions.ext] {
|
[mediapipe.ImageToTensorCalculatorOptions.ext] {
|
||||||
output_tensor_width: 512
|
output_tensor_width: 256
|
||||||
output_tensor_height: 512
|
output_tensor_height: 256
|
||||||
keep_aspect_ratio: true
|
keep_aspect_ratio: false
|
||||||
output_tensor_float_range {
|
output_tensor_float_range {
|
||||||
min: 0.0
|
min: -1.0
|
||||||
max: 255.0
|
max: 1.0
|
||||||
}
|
}
|
||||||
gpu_origin: TOP_LEFT
|
gpu_origin: TOP_LEFT
|
||||||
border_mode: BORDER_REPLICATE
|
border_mode: BORDER_REPLICATE
|
||||||
|
@ -49,32 +37,42 @@ node: {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
node {
|
node {
|
||||||
calculator: "InferenceCalculator"
|
calculator: "InferenceCalculator"
|
||||||
input_stream: "TENSORS_GPU:input_tensors"
|
input_stream: "TENSORS:input_tensors"
|
||||||
output_stream: "TENSORS_GPU:output_tensors"
|
output_stream: "TENSORS:output_tensors"
|
||||||
options: {
|
options: {
|
||||||
[mediapipe.InferenceCalculatorOptions.ext] {
|
[mediapipe.InferenceCalculatorOptions.ext] {
|
||||||
model_path: "mediapipe/models/metaf-512-mobile3.tflite"
|
model_path: "mediapipe/models/model_float32.tflite"
|
||||||
delegate { gpu {} }
|
delegate { xnnpack {} }
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Retrieves the size of the input image.
|
||||||
|
node {
|
||||||
|
calculator: "ImagePropertiesCalculator"
|
||||||
|
input_stream: "IMAGE_GPU:input_video"
|
||||||
|
output_stream: "SIZE:input_size"
|
||||||
|
}
|
||||||
|
|
||||||
# Processes the output tensors into a segmentation mask that has the same size
|
# Processes the output tensors into a segmentation mask that has the same size
|
||||||
# as the input image into the graph.
|
# as the input image into the graph.
|
||||||
node {
|
node {
|
||||||
calculator: "TensorsToSegmentationCalculator"
|
calculator: "TensorsToSegmentationCalculator"
|
||||||
input_stream: "TENSORS:output_tensors"
|
input_stream: "TENSORS:output_tensors"
|
||||||
|
input_stream: "OUTPUT_SIZE:input_size"
|
||||||
output_stream: "MASK:mask_image"
|
output_stream: "MASK:mask_image"
|
||||||
options: {
|
options: {
|
||||||
[mediapipe.TensorsToSegmentationCalculatorOptions.ext] {
|
[mediapipe.TensorsToSegmentationCalculatorOptions.ext] {
|
||||||
activation: NONE
|
activation: NONE
|
||||||
|
gpu_origin: TOP_LEFT
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
node: {
|
node: {
|
||||||
calculator: "FromImageCalculator"
|
calculator: "FromImageCalculator"
|
||||||
input_stream: "IMAGE:mask_image"
|
input_stream: "IMAGE:mask_image"
|
||||||
|
|
Loading…
Reference in New Issue
Block a user