Merge pull request #4 from HFVladimir/AS-bilateral-calc

feat:  added bilateral filter calculator
This commit is contained in:
Vladimir 2022-07-28 01:00:34 +03:00 committed by GitHub
commit 331c01b918
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
23 changed files with 963 additions and 104 deletions

View File

@ -1 +1 @@
5.0.0 5.2.0

View File

@ -205,13 +205,13 @@ new_local_repository(
# For local MacOS builds, the path should point to an opencv@3 installation. # For local MacOS builds, the path should point to an opencv@3 installation.
# If you edit the path here, you will also need to update the corresponding # If you edit the path here, you will also need to update the corresponding
# prefix in "opencv_macos.BUILD". # prefix in "opencv_macos.BUILD".
path = "/usr/local", path = "/opt/homebrew/Cellar/",
) )
new_local_repository( new_local_repository(
name = "macos_ffmpeg", name = "macos_ffmpeg",
build_file = "@//third_party:ffmpeg_macos.BUILD", build_file = "@//third_party:ffmpeg_macos.BUILD",
path = "/usr/local/opt/ffmpeg", path = "/opt/homebrew/Cellar/ffmpeg",
) )
new_local_repository( new_local_repository(

BIN
mediapipe/.DS_Store vendored Normal file

Binary file not shown.

View File

@ -2,6 +2,7 @@
"additionalFilePaths" : [ "additionalFilePaths" : [
"/BUILD", "/BUILD",
"mediapipe/BUILD", "mediapipe/BUILD",
"mediapipe/examples/ios/beauty/BUILD",
"mediapipe/examples/ios/common/BUILD", "mediapipe/examples/ios/common/BUILD",
"mediapipe/examples/ios/facedetectioncpu/BUILD", "mediapipe/examples/ios/facedetectioncpu/BUILD",
"mediapipe/examples/ios/facedetectiongpu/BUILD", "mediapipe/examples/ios/facedetectiongpu/BUILD",
@ -23,6 +24,7 @@
"mediapipe/objc/testing/app/BUILD" "mediapipe/objc/testing/app/BUILD"
], ],
"buildTargets" : [ "buildTargets" : [
"//mediapipe/examples/ios/beauty:BeautyApp",
"//mediapipe/examples/ios/facedetectioncpu:FaceDetectionCpuApp", "//mediapipe/examples/ios/facedetectioncpu:FaceDetectionCpuApp",
"//mediapipe/examples/ios/facedetectiongpu:FaceDetectionGpuApp", "//mediapipe/examples/ios/facedetectiongpu:FaceDetectionGpuApp",
"//mediapipe/examples/ios/faceeffect:FaceEffectApp", "//mediapipe/examples/ios/faceeffect:FaceEffectApp",
@ -93,6 +95,7 @@
"mediapipe/examples/ios", "mediapipe/examples/ios",
"mediapipe/examples/ios/common", "mediapipe/examples/ios/common",
"mediapipe/examples/ios/common/Base.lproj", "mediapipe/examples/ios/common/Base.lproj",
"mediapipe/examples/ios/beauty",
"mediapipe/examples/ios/facedetectioncpu", "mediapipe/examples/ios/facedetectioncpu",
"mediapipe/examples/ios/facedetectiongpu", "mediapipe/examples/ios/facedetectiongpu",
"mediapipe/examples/ios/faceeffect", "mediapipe/examples/ios/faceeffect",

View File

@ -3,6 +3,9 @@
"optionSet" : { "optionSet" : {
"CLANG_CXX_LANGUAGE_STANDARD" : { "CLANG_CXX_LANGUAGE_STANDARD" : {
"p" : "c++14" "p" : "c++14"
},
"EnvironmentVariables" : {
"p" : "MEDIAPIPE_PROFILING=1"
} }
} }
}, },
@ -10,6 +13,7 @@
"", "",
"mediapipe", "mediapipe",
"mediapipe/examples/ios", "mediapipe/examples/ios",
"mediapipe/examples/ios/beauty",
"mediapipe/examples/ios/facedetectioncpu", "mediapipe/examples/ios/facedetectioncpu",
"mediapipe/examples/ios/facedetectiongpu", "mediapipe/examples/ios/facedetectiongpu",
"mediapipe/examples/ios/faceeffect", "mediapipe/examples/ios/faceeffect",

View File

@ -69,8 +69,52 @@ cc_library(
cc_library( cc_library(
name = "smooth_face_calculator", name = "smooth_face_calculator1",
srcs = ["smooth_face_calculator.cc"], srcs = ["smooth_face1_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_cc_proto",
"//mediapipe/framework/formats:image_format_cc_proto",
"//mediapipe/util:color_cc_proto",
"@com_google_absl//absl/strings",
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework/formats:image_frame",
"//mediapipe/framework/formats:image_frame_opencv",
"//mediapipe/framework/formats:video_stream_header",
"//mediapipe/framework/port:logging",
"//mediapipe/framework/port:opencv_core",
"//mediapipe/framework/port:opencv_imgproc",
"//mediapipe/framework/port:status",
"//mediapipe/framework/port:vector",
],
alwayslink = 1,
)
cc_library(
name = "bilateral_filter_calculator",
srcs = ["bilateral_filter_calculator.cc"],
visibility = ["//visibility:public"],
deps = [
"//mediapipe/framework:calculator_options_cc_proto",
"//mediapipe/framework/formats:image_format_cc_proto",
"//mediapipe/util:color_cc_proto",
"@com_google_absl//absl/strings",
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework/formats:image_frame",
"//mediapipe/framework/formats:image_frame_opencv",
"//mediapipe/framework/formats:video_stream_header",
"//mediapipe/framework/port:logging",
"//mediapipe/framework/port:opencv_core",
"//mediapipe/framework/port:opencv_imgproc",
"//mediapipe/framework/port:status",
"//mediapipe/framework/port:vector",
],
alwayslink = 1,
)
cc_library(
name = "smooth_face_calculator2",
srcs = ["smooth_face2_calculator.cc"],
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
deps = [ deps = [
"//mediapipe/framework:calculator_options_cc_proto", "//mediapipe/framework:calculator_options_cc_proto",

View File

@ -0,0 +1,281 @@
// Copyright 2019 The MediaPipe Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <math.h>
#include <algorithm>
#include <cmath>
#include <iostream>
#include <memory>
#include "absl/strings/str_cat.h"
#include "mediapipe/framework/calculator_framework.h"
#include "mediapipe/framework/calculator_options.pb.h"
#include "mediapipe/framework/formats/image_format.pb.h"
#include "mediapipe/framework/formats/image_frame.h"
#include "mediapipe/framework/formats/image_frame_opencv.h"
#include "mediapipe/framework/formats/video_stream_header.h"
#include "mediapipe/framework/port/logging.h"
#include "mediapipe/framework/port/opencv_core_inc.h"
#include "mediapipe/framework/port/opencv_imgproc_inc.h"
#include "mediapipe/framework/port/status.h"
#include "mediapipe/framework/port/logging.h"
#include "mediapipe/framework/port/vector.h"
namespace mediapipe
{
namespace
{
constexpr char kMaskTag[] = "MASK";
constexpr char kFaceBoxTag[] = "FACEBOX";
constexpr char kImageFrameTag[] = "IMAGE";
constexpr char kImageNewTag[] = "IMAGE2";
enum
{
ATTRIB_VERTEX,
ATTRIB_TEXTURE_POSITION,
NUM_ATTRIBUTES
};
inline bool HasImageTag(mediapipe::CalculatorContext *cc) { return false; }
} // namespace
class BilateralCalculator : public CalculatorBase
{
public:
BilateralCalculator() = default;
~BilateralCalculator() override = default;
static absl::Status GetContract(CalculatorContract *cc);
// From Calculator.
absl::Status Open(CalculatorContext *cc) override;
absl::Status Process(CalculatorContext *cc) override;
absl::Status Close(CalculatorContext *cc) override;
private:
absl::Status CreateRenderTargetCpu(CalculatorContext *cc,
std::unique_ptr<cv::Mat> &image_mat,
ImageFormat::Format *target_format);
absl::Status RenderToCpu(
CalculatorContext *cc, const ImageFormat::Format &target_format,
uchar *data_image);
absl::Status BilateralFilter(CalculatorContext *cc,
const std::vector<double> &face_box);
// Indicates if image frame is available as input.
bool image_frame_available_ = false;
int image_width_;
int image_height_;
cv::Mat mat_image_;
std::unique_ptr<cv::Mat> image_mat;
};
REGISTER_CALCULATOR(BilateralCalculator);
absl::Status BilateralCalculator::GetContract(CalculatorContract *cc)
{
CHECK_GE(cc->Inputs().NumEntries(), 1);
if (cc->Inputs().HasTag(kImageFrameTag))
{
cc->Inputs().Tag(kImageFrameTag).Set<ImageFrame>();
CHECK(cc->Outputs().HasTag(kImageNewTag));
}
// Data streams to render.
for (CollectionItemId id = cc->Inputs().BeginId(); id < cc->Inputs().EndId();
++id)
{
auto tag_and_index = cc->Inputs().TagAndIndexFromId(id);
std::string tag = tag_and_index.first;
if (tag == kFaceBoxTag)
{
cc->Inputs().Get(id).Set<std::vector<double>>();
}
else if (tag.empty())
{
// Empty tag defaults to accepting a single object of Mat type.
cc->Inputs().Get(id).Set<cv::Mat>();
}
}
if (cc->Outputs().HasTag(kImageNewTag))
{
cc->Outputs().Tag(kImageNewTag).Set<ImageFrame>();
}
return absl::OkStatus();
}
absl::Status BilateralCalculator::Open(CalculatorContext *cc)
{
cc->SetOffset(TimestampDiff(0));
if (cc->Inputs().HasTag(kImageFrameTag) || HasImageTag(cc))
{
image_frame_available_ = true;
}
// Set the output header based on the input header (if present).
const char *tag = kImageFrameTag;
const char *out_tag = kImageNewTag;
if (image_frame_available_ && !cc->Inputs().Tag(tag).Header().IsEmpty())
{
const auto &input_header =
cc->Inputs().Tag(tag).Header().Get<VideoHeader>();
auto *output_video_header = new VideoHeader(input_header);
cc->Outputs().Tag(out_tag).SetHeader(Adopt(output_video_header));
}
return absl::OkStatus();
}
absl::Status BilateralCalculator::Process(CalculatorContext *cc)
{
if (cc->Inputs().HasTag(kImageFrameTag) &&
cc->Inputs().Tag(kImageFrameTag).IsEmpty())
{
return absl::OkStatus();
}
if (cc->Inputs().HasTag(kFaceBoxTag) &&
cc->Inputs().Tag(kFaceBoxTag).IsEmpty())
{
return absl::OkStatus();
}
// Initialize render target, drawn with OpenCV.
ImageFormat::Format target_format;
if (cc->Outputs().HasTag(kImageNewTag))
{
MP_RETURN_IF_ERROR(CreateRenderTargetCpu(cc, image_mat, &target_format));
}
mat_image_ = *image_mat.get();
image_width_ = image_mat->cols;
image_height_ = image_mat->rows;
const std::vector<double> &face_box =
cc->Inputs().Tag(kFaceBoxTag).Get<std::vector<double>>();
MP_RETURN_IF_ERROR(BilateralFilter(cc, face_box));
// Copy the rendered image to output.
uchar *image_mat_ptr = image_mat->data;
MP_RETURN_IF_ERROR(RenderToCpu(cc, target_format, image_mat_ptr));
return absl::OkStatus();
}
absl::Status BilateralCalculator::Close(CalculatorContext *cc)
{
return absl::OkStatus();
}
absl::Status BilateralCalculator::RenderToCpu(
CalculatorContext *cc, const ImageFormat::Format &target_format,
uchar *data_image)
{
auto output_frame = absl::make_unique<ImageFrame>(
target_format, image_width_, image_height_);
output_frame->CopyPixelData(target_format, image_width_, image_height_, data_image,
ImageFrame::kDefaultAlignmentBoundary);
if (cc->Outputs().HasTag(kImageNewTag))
{
cc->Outputs()
.Tag(kImageNewTag)
.Add(output_frame.release(), cc->InputTimestamp());
}
return absl::OkStatus();
}
absl::Status BilateralCalculator::CreateRenderTargetCpu(
CalculatorContext *cc, std::unique_ptr<cv::Mat> &image_mat,
ImageFormat::Format *target_format)
{
if (image_frame_available_)
{
const auto &input_frame =
cc->Inputs().Tag(kImageFrameTag).Get<ImageFrame>();
int target_mat_type;
switch (input_frame.Format())
{
case ImageFormat::SRGBA:
*target_format = ImageFormat::SRGBA;
target_mat_type = CV_8UC4;
break;
case ImageFormat::SRGB:
*target_format = ImageFormat::SRGB;
target_mat_type = CV_8UC3;
break;
case ImageFormat::GRAY8:
*target_format = ImageFormat::SRGB;
target_mat_type = CV_8UC3;
break;
default:
return absl::UnknownError("Unexpected image frame format.");
break;
}
image_mat = absl::make_unique<cv::Mat>(
input_frame.Height(), input_frame.Width(), target_mat_type);
auto input_mat = formats::MatView(&input_frame);
if (input_frame.Format() == ImageFormat::GRAY8)
{
cv::Mat rgb_mat;
cv::cvtColor(input_mat, rgb_mat, CV_GRAY2RGB);
rgb_mat.copyTo(*image_mat);
}
else
{
input_mat.copyTo(*image_mat);
}
}
else
{
image_mat = absl::make_unique<cv::Mat>(
150, 150, CV_8UC4,
cv::Scalar::all(255));
*target_format = ImageFormat::SRGBA;
}
return absl::OkStatus();
}
absl::Status BilateralCalculator::BilateralFilter(CalculatorContext *cc,
const std::vector<double> &face_box)
{
cv::Mat patch_face = mat_image_(cv::Range(face_box[1], face_box[3]),
cv::Range(face_box[0], face_box[2]));
cv::Mat patch_new, patch_wow;
cv::cvtColor(patch_face, patch_wow, cv::COLOR_RGBA2RGB);
cv::bilateralFilter(patch_wow, patch_new, 12, 50, 50);
cv::cvtColor(patch_new, patch_new, cv::COLOR_RGB2RGBA);
return absl::OkStatus();
}
} // namespace mediapipe

View File

@ -17,6 +17,7 @@
#include <algorithm> #include <algorithm>
#include <cmath> #include <cmath>
#include <iostream> #include <iostream>
#include <tuple>
#include <memory> #include <memory>
@ -53,11 +54,11 @@ namespace mediapipe
inline bool HasImageTag(mediapipe::CalculatorContext *cc) { return false; } inline bool HasImageTag(mediapipe::CalculatorContext *cc) { return false; }
} // namespace } // namespace
class SmoothFaceCalculator : public CalculatorBase class SmoothFaceCalculator1 : public CalculatorBase
{ {
public: public:
SmoothFaceCalculator() = default; SmoothFaceCalculator1() = default;
~SmoothFaceCalculator() override = default; ~SmoothFaceCalculator1() override = default;
static absl::Status GetContract(CalculatorContract *cc); static absl::Status GetContract(CalculatorContract *cc);
@ -76,7 +77,6 @@ namespace mediapipe
uchar *data_image); uchar *data_image);
absl::Status SmoothFace(CalculatorContext *cc, absl::Status SmoothFace(CalculatorContext *cc,
ImageFormat::Format *target_format,
const std::unordered_map<std::string, cv::Mat> &mask_vec, const std::unordered_map<std::string, cv::Mat> &mask_vec,
const std::tuple<double, double, double, double> &face_box); const std::tuple<double, double, double, double> &face_box);
@ -88,11 +88,13 @@ namespace mediapipe
int image_width_; int image_width_;
int image_height_; int image_height_;
cv::Mat mat_image_; cv::Mat mat_image_;
cv::Mat not_full_face;
std::vector<double> face_box;
std::unique_ptr<cv::Mat> image_mat; std::unique_ptr<cv::Mat> image_mat;
}; };
REGISTER_CALCULATOR(SmoothFaceCalculator); REGISTER_CALCULATOR(SmoothFaceCalculator1);
absl::Status SmoothFaceCalculator::GetContract(CalculatorContract *cc) absl::Status SmoothFaceCalculator1::GetContract(CalculatorContract *cc)
{ {
CHECK_GE(cc->Inputs().NumEntries(), 1); CHECK_GE(cc->Inputs().NumEntries(), 1);
@ -100,6 +102,7 @@ namespace mediapipe
{ {
cc->Inputs().Tag(kImageFrameTag).Set<ImageFrame>(); cc->Inputs().Tag(kImageFrameTag).Set<ImageFrame>();
CHECK(cc->Outputs().HasTag(kImageFrameTag)); CHECK(cc->Outputs().HasTag(kImageFrameTag));
CHECK(cc->Outputs().HasTag(kMaskTag));
} }
// Data streams to render. // Data streams to render.
@ -128,11 +131,19 @@ namespace mediapipe
{ {
cc->Outputs().Tag(kImageFrameTag).Set<ImageFrame>(); cc->Outputs().Tag(kImageFrameTag).Set<ImageFrame>();
} }
if (cc->Outputs().HasTag(kMaskTag))
{
cc->Outputs().Tag(kMaskTag).Set<cv::Mat>();
}
if (cc->Outputs().HasTag(kFaceBoxTag))
{
cc->Outputs().Tag(kFaceBoxTag).Set<std::vector<double>>();
}
return absl::OkStatus(); return absl::OkStatus();
} }
absl::Status SmoothFaceCalculator::Open(CalculatorContext *cc) absl::Status SmoothFaceCalculator1::Open(CalculatorContext *cc)
{ {
cc->SetOffset(TimestampDiff(0)); cc->SetOffset(TimestampDiff(0));
@ -140,9 +151,6 @@ namespace mediapipe
{ {
image_frame_available_ = true; image_frame_available_ = true;
} }
else
{
}
// Set the output header based on the input header (if present). // Set the output header based on the input header (if present).
const char *tag = kImageFrameTag; const char *tag = kImageFrameTag;
@ -157,7 +165,7 @@ namespace mediapipe
return absl::OkStatus(); return absl::OkStatus();
} }
absl::Status SmoothFaceCalculator::Process(CalculatorContext *cc) absl::Status SmoothFaceCalculator1::Process(CalculatorContext *cc)
{ {
if (cc->Inputs().HasTag(kImageFrameTag) && if (cc->Inputs().HasTag(kImageFrameTag) &&
cc->Inputs().Tag(kImageFrameTag).IsEmpty()) cc->Inputs().Tag(kImageFrameTag).IsEmpty())
@ -185,14 +193,13 @@ namespace mediapipe
const std::vector<std::unordered_map<std::string, cv::Mat>> &mask_vec = const std::vector<std::unordered_map<std::string, cv::Mat>> &mask_vec =
cc->Inputs().Tag(kMaskTag).Get<std::vector<std::unordered_map<std::string, cv::Mat>>>(); cc->Inputs().Tag(kMaskTag).Get<std::vector<std::unordered_map<std::string, cv::Mat>>>();
const std::vector<std::tuple<double, double, double, double>> &face_box = const std::vector<std::tuple<double, double, double, double>> &face_boxes =
cc->Inputs().Tag(kFaceBoxTag).Get<std::vector<std::tuple<double, double, double, double>>>(); cc->Inputs().Tag(kFaceBoxTag).Get<std::vector<std::tuple<double, double, double, double>>>();
if (mask_vec.size() > 0 && face_box.size() > 0) if (mask_vec.size() > 0 && face_boxes.size() > 0)
{ {
for (int i = 0; i < mask_vec.size(); i++) for (int i = 0; i < mask_vec.size(); i++)
MP_RETURN_IF_ERROR(SmoothFace(cc, &target_format, mask_vec[i], face_box[i])); MP_RETURN_IF_ERROR(SmoothFace(cc, mask_vec[i], face_boxes[i]));
}
} }
// Copy the rendered image to output. // Copy the rendered image to output.
uchar *image_mat_ptr = image_mat->data; uchar *image_mat_ptr = image_mat->data;
@ -201,32 +208,50 @@ namespace mediapipe
return absl::OkStatus(); return absl::OkStatus();
} }
absl::Status SmoothFaceCalculator::Close(CalculatorContext *cc) absl::Status SmoothFaceCalculator1::Close(CalculatorContext *cc)
{ {
return absl::OkStatus(); return absl::OkStatus();
} }
absl::Status SmoothFaceCalculator::RenderToCpu( absl::Status SmoothFaceCalculator1::RenderToCpu(
CalculatorContext *cc, const ImageFormat::Format &target_format, CalculatorContext *cc, const ImageFormat::Format &target_format,
uchar *data_image) uchar *data_image)
{ {
auto output_frame = absl::make_unique<ImageFrame>( auto output_frame1 = absl::make_unique<ImageFrame>(
target_format, image_width_, image_height_); target_format, image_width_, image_height_);
output_frame->CopyPixelData(target_format, image_width_, image_height_, data_image, output_frame1->CopyPixelData(target_format, image_width_, image_height_, data_image,
ImageFrame::kDefaultAlignmentBoundary); ImageFrame::kDefaultAlignmentBoundary);
if (cc->Outputs().HasTag(kImageFrameTag)) if (cc->Outputs().HasTag(kImageFrameTag))
{ {
cc->Outputs() cc->Outputs()
.Tag(kImageFrameTag) .Tag(kImageFrameTag)
.Add(output_frame.release(), cc->InputTimestamp()); .Add(output_frame1.release(), cc->InputTimestamp());
}
auto output_frame2 = absl::make_unique<cv::Mat>(not_full_face);
if (cc->Outputs().HasTag(kMaskTag))
{
cc->Outputs()
.Tag(kMaskTag)
.Add(output_frame2.release(), cc->InputTimestamp());
}
auto output_frame3 = absl::make_unique<std::vector<double>>(face_box);
if (cc->Outputs().HasTag(kFaceBoxTag))
{
cc->Outputs()
.Tag(kFaceBoxTag)
.Add(output_frame3.release(), cc->InputTimestamp());
} }
return absl::OkStatus(); return absl::OkStatus();
} }
absl::Status SmoothFaceCalculator::CreateRenderTargetCpu( absl::Status SmoothFaceCalculator1::CreateRenderTargetCpu(
CalculatorContext *cc, std::unique_ptr<cv::Mat> &image_mat, CalculatorContext *cc, std::unique_ptr<cv::Mat> &image_mat,
ImageFormat::Format *target_format) ImageFormat::Format *target_format)
{ {
@ -282,9 +307,8 @@ namespace mediapipe
return absl::OkStatus(); return absl::OkStatus();
} }
cv::Mat SmoothFaceCalculator::predict_forehead_mask(const std::unordered_map<std::string, cv::Mat> &mask_vec, double face_box_min_y) cv::Mat SmoothFaceCalculator1::predict_forehead_mask(const std::unordered_map<std::string, cv::Mat> &mask_vec, double face_box_min_y)
{ {
cv::Mat part_forehead_mask = mask_vec.find("PART_FOREHEAD_B")->second.clone(); cv::Mat part_forehead_mask = mask_vec.find("PART_FOREHEAD_B")->second.clone();
part_forehead_mask.convertTo(part_forehead_mask, CV_32F, 1.0 / 255); part_forehead_mask.convertTo(part_forehead_mask, CV_32F, 1.0 / 255);
part_forehead_mask.convertTo(part_forehead_mask, CV_8U); part_forehead_mask.convertTo(part_forehead_mask, CV_8U);
@ -353,15 +377,12 @@ namespace mediapipe
return new_skin_mask; return new_skin_mask;
} }
absl::Status SmoothFaceCalculator::SmoothFace(CalculatorContext *cc, absl::Status SmoothFaceCalculator1::SmoothFace(CalculatorContext *cc,
ImageFormat::Format *target_format,
const std::unordered_map<std::string, cv::Mat> &mask_vec, const std::unordered_map<std::string, cv::Mat> &mask_vec,
const std::tuple<double, double, double, double> &face_box) const std::tuple<double, double, double, double> &face_boxx)
{ {
cv::Mat mouth_mask, mouth; not_full_face = mask_vec.find("FACE_OVAL")->second.clone() -
// predict_forehead_mask(mask_vec, std::get<1>(face_boxx)) -
cv::Mat not_full_face = mask_vec.find("FACE_OVAL")->second.clone() +
predict_forehead_mask(mask_vec, std::get<1>(face_box)) -
mask_vec.find("LEFT_EYE")->second.clone() - mask_vec.find("LEFT_EYE")->second.clone() -
mask_vec.find("RIGHT_EYE")->second.clone() - mask_vec.find("RIGHT_EYE")->second.clone() -
mask_vec.find("LEFT_BROW")->second.clone() - mask_vec.find("LEFT_BROW")->second.clone() -
@ -388,27 +409,10 @@ namespace mediapipe
cv::minMaxLoc(x, &min_x, &max_x); cv::minMaxLoc(x, &min_x, &max_x);
cv::minMaxLoc(y, &min_y, &max_y); cv::minMaxLoc(y, &min_y, &max_y);
face_box.push_back(min_x);
cv::Mat patch_face = mat_image_(cv::Range(min_y, max_y), cv::Range(min_x, max_x)); face_box.push_back(min_y);
cv::Mat patch_nff = not_full_face(cv::Range(min_y, max_y), cv::Range(min_x, max_x)); face_box.push_back(max_x);
cv::Mat patch_new, patch_wow; face_box.push_back(max_y);
cv::cvtColor(patch_face, patch_wow, cv::COLOR_RGBA2RGB);
cv::bilateralFilter(patch_wow, patch_new, 12, 50, 50);
cv::Mat patch_new_nff, patch_new_mask, patch, patch_face_nff;
patch_new.copyTo(patch_new_nff, patch_nff);
patch_face.copyTo(patch_face_nff, patch_nff);
cv::cvtColor(patch_face_nff, patch_face_nff, cv::COLOR_RGBA2RGB);
patch_new_mask = 0.85 * patch_new_nff + 0.15 * patch_face_nff;
patch = cv::min(255, patch_new_mask);
cv::cvtColor(patch, patch, cv::COLOR_RGB2RGBA);
patch.copyTo(patch_face, patch_nff);
return absl::OkStatus(); return absl::OkStatus();
} }

View File

@ -0,0 +1,342 @@
// Copyright 2019 The MediaPipe Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include <math.h>
#include <algorithm>
#include <cmath>
#include <iostream>
#include <tuple>
#include <memory>
#include "absl/strings/str_cat.h"
#include "mediapipe/framework/calculator_framework.h"
#include "mediapipe/framework/calculator_options.pb.h"
#include "mediapipe/framework/formats/image_format.pb.h"
#include "mediapipe/framework/formats/image_frame.h"
#include "mediapipe/framework/formats/image_frame_opencv.h"
#include "mediapipe/framework/formats/video_stream_header.h"
#include "mediapipe/framework/port/logging.h"
#include "mediapipe/framework/port/opencv_core_inc.h"
#include "mediapipe/framework/port/opencv_imgproc_inc.h"
#include "mediapipe/framework/port/status.h"
#include "mediapipe/framework/port/logging.h"
#include "mediapipe/framework/port/vector.h"
namespace mediapipe
{
namespace
{
constexpr char kMaskTag[] = "MASK";
constexpr char kFaceBoxTag[] = "FACEBOX";
constexpr char kImageFrameTag[] = "IMAGE";
constexpr char kImageNewTag[] = "IMAGE2";
enum
{
ATTRIB_VERTEX,
ATTRIB_TEXTURE_POSITION,
NUM_ATTRIBUTES
};
inline bool HasImageTag(mediapipe::CalculatorContext *cc) { return false; }
} // namespace
class SmoothFaceCalculator2 : public CalculatorBase
{
public:
SmoothFaceCalculator2() = default;
~SmoothFaceCalculator2() override = default;
static absl::Status GetContract(CalculatorContract *cc);
// From Calculator.
absl::Status Open(CalculatorContext *cc) override;
absl::Status Process(CalculatorContext *cc) override;
absl::Status Close(CalculatorContext *cc) override;
private:
absl::Status CreateRenderTargetCpu(CalculatorContext *cc,
std::unique_ptr<cv::Mat> &image_mat,
std::unique_ptr<cv::Mat> &new_mat,
ImageFormat::Format *target_format);
absl::Status RenderToCpu(
CalculatorContext *cc, const ImageFormat::Format &target_format,
uchar *data_image);
absl::Status SmoothEnd(CalculatorContext *cc,
const std::vector<double> &face_box);
// Indicates if image frame is available as input.
bool image_frame_available_ = false;
int image_width_;
int image_height_;
cv::Mat mat_image_;
cv::Mat new_image_;
cv::Mat not_full_face;
std::unique_ptr<cv::Mat> image_mat;
std::unique_ptr<cv::Mat> new_mat;
std::unique_ptr<cv::Mat> nff_mat;
};
REGISTER_CALCULATOR(SmoothFaceCalculator2);
absl::Status SmoothFaceCalculator2::GetContract(CalculatorContract *cc)
{
CHECK_GE(cc->Inputs().NumEntries(), 1);
if (cc->Inputs().HasTag(kImageFrameTag))
{
cc->Inputs().Tag(kImageFrameTag).Set<ImageFrame>();
CHECK(cc->Outputs().HasTag(kImageFrameTag));
}
if (cc->Inputs().HasTag(kImageNewTag))
{
cc->Inputs().Tag(kImageNewTag).Set<ImageFrame>();
}
if (cc->Inputs().HasTag(kMaskTag))
{
cc->Inputs().Tag(kMaskTag).Set<cv::Mat>();
}
// Data streams to render.
for (CollectionItemId id = cc->Inputs().BeginId(); id < cc->Inputs().EndId();
++id)
{
auto tag_and_index = cc->Inputs().TagAndIndexFromId(id);
std::string tag = tag_and_index.first;
if (tag == kFaceBoxTag)
{
cc->Inputs().Get(id).Set<std::vector<double>>();
}
else if (tag.empty())
{
// Empty tag defaults to accepting a single object of Mat type.
cc->Inputs().Get(id).Set<cv::Mat>();
}
}
if (cc->Outputs().HasTag(kImageFrameTag))
{
cc->Outputs().Tag(kImageFrameTag).Set<ImageFrame>();
}
return absl::OkStatus();
}
absl::Status SmoothFaceCalculator2::Open(CalculatorContext *cc)
{
cc->SetOffset(TimestampDiff(0));
if (cc->Inputs().HasTag(kImageFrameTag) || HasImageTag(cc))
{
image_frame_available_ = true;
}
// Set the output header based on the input header (if present).
const char *tag = kImageFrameTag;
if (image_frame_available_ && !cc->Inputs().Tag(tag).Header().IsEmpty())
{
const auto &input_header =
cc->Inputs().Tag(tag).Header().Get<VideoHeader>();
auto *output_video_header = new VideoHeader(input_header);
cc->Outputs().Tag(tag).SetHeader(Adopt(output_video_header));
}
return absl::OkStatus();
}
absl::Status SmoothFaceCalculator2::Process(CalculatorContext *cc)
{
if (cc->Inputs().HasTag(kImageFrameTag) &&
cc->Inputs().Tag(kImageFrameTag).IsEmpty())
{
return absl::OkStatus();
}
if (cc->Inputs().HasTag(kImageNewTag) &&
cc->Inputs().Tag(kImageNewTag).IsEmpty())
{
return absl::OkStatus();
}
if (cc->Inputs().HasTag(kMaskTag) &&
cc->Inputs().Tag(kMaskTag).IsEmpty())
{
return absl::OkStatus();
}
if (cc->Inputs().HasTag(kFaceBoxTag) &&
cc->Inputs().Tag(kFaceBoxTag).IsEmpty())
{
return absl::OkStatus();
}
// Initialize render target, drawn with OpenCV.
ImageFormat::Format target_format;
if (cc->Outputs().HasTag(kImageFrameTag))
{
MP_RETURN_IF_ERROR(CreateRenderTargetCpu(cc, image_mat, new_mat, &target_format));
}
not_full_face = cc->Inputs().Tag(kMaskTag).Get<cv::Mat>();
new_image_ = *new_mat.get();
mat_image_ = *image_mat.get();
image_width_ = image_mat->cols;
image_height_ = image_mat->rows;
const std::vector<double> &face_box =
cc->Inputs().Tag(kFaceBoxTag).Get<std::vector<double>>();
MP_RETURN_IF_ERROR(SmoothEnd(cc, face_box));
// Copy the rendered image to output.
uchar *image_mat_ptr = image_mat->data;
MP_RETURN_IF_ERROR(RenderToCpu(cc, target_format, image_mat_ptr));
return absl::OkStatus();
}
absl::Status SmoothFaceCalculator2::Close(CalculatorContext *cc)
{
return absl::OkStatus();
}
absl::Status SmoothFaceCalculator2::RenderToCpu(
CalculatorContext *cc, const ImageFormat::Format &target_format,
uchar *data_image)
{
auto output_frame = absl::make_unique<ImageFrame>(
target_format, image_width_, image_height_);
output_frame->CopyPixelData(target_format, image_width_, image_height_, data_image,
ImageFrame::kDefaultAlignmentBoundary);
if (cc->Outputs().HasTag(kImageFrameTag))
{
cc->Outputs()
.Tag(kImageFrameTag)
.Add(output_frame.release(), cc->InputTimestamp());
}
return absl::OkStatus();
}
absl::Status SmoothFaceCalculator2::CreateRenderTargetCpu(
CalculatorContext *cc,
std::unique_ptr<cv::Mat> &image_mat,
std::unique_ptr<cv::Mat> &new_mat,
ImageFormat::Format *target_format)
{
if (image_frame_available_)
{
const auto &input_frame =
cc->Inputs().Tag(kImageFrameTag).Get<ImageFrame>();
const auto &new_frame =
cc->Inputs().Tag(kImageNewTag).Get<ImageFrame>();
int target_mat_type;
switch (input_frame.Format())
{
case ImageFormat::SRGBA:
*target_format = ImageFormat::SRGBA;
target_mat_type = CV_8UC4;
break;
case ImageFormat::SRGB:
*target_format = ImageFormat::SRGB;
target_mat_type = CV_8UC3;
break;
case ImageFormat::GRAY8:
*target_format = ImageFormat::SRGB;
target_mat_type = CV_8UC3;
break;
default:
return absl::UnknownError("Unexpected image frame format.");
break;
}
image_mat = absl::make_unique<cv::Mat>(
input_frame.Height(), input_frame.Width(), target_mat_type);
auto input_mat = formats::MatView(&input_frame);
new_mat = absl::make_unique<cv::Mat>(
new_frame.Height(), new_frame.Width(), target_mat_type);
auto new_input_mat = formats::MatView(&new_frame);
if (input_frame.Format() == ImageFormat::GRAY8)
{
cv::Mat rgb_mat;
cv::Mat rgb_mat2;
cv::cvtColor(input_mat, rgb_mat, CV_GRAY2RGB);
rgb_mat.copyTo(*image_mat);
cv::cvtColor(new_input_mat, rgb_mat2, CV_GRAY2RGB);
rgb_mat2.copyTo(*new_mat);
}
else
{
input_mat.copyTo(*image_mat);
new_input_mat.copyTo(*new_mat);
}
}
else
{
image_mat = absl::make_unique<cv::Mat>(
150, 150, CV_8UC4,
cv::Scalar::all(255));
nff_mat = absl::make_unique<cv::Mat>(
150, 150, CV_8UC4,
cv::Scalar::all(255));
new_mat = absl::make_unique<cv::Mat>(
150, 150, CV_8UC4,
cv::Scalar::all(255));
*target_format = ImageFormat::SRGBA;
}
return absl::OkStatus();
}
absl::Status SmoothFaceCalculator2::SmoothEnd(CalculatorContext *cc,
const std::vector<double> &face_box)
{
cv::Mat patch_face = mat_image_(cv::Range(face_box[1], face_box[3]),
cv::Range(face_box[0], face_box[2]));
cv::Mat patch_new = new_image_(cv::Range(face_box[1], face_box[3]),
cv::Range(face_box[0], face_box[2]));
cv::Mat patch_nff = not_full_face(cv::Range(face_box[1], face_box[3]),
cv::Range(face_box[0], face_box[2]));
cv::Mat patch_new_nff, patch_new_mask, patch, patch_face_nff;
patch_new.copyTo(patch_new_nff, patch_nff);
patch_face.copyTo(patch_face_nff, patch_nff);
cv::cvtColor(patch_face_nff, patch_face_nff, cv::COLOR_RGBA2RGB);
cv::cvtColor(patch_new_nff, patch_new_nff, cv::COLOR_RGBA2RGB);
patch_new_mask = 0.85 * patch_new_nff + 0.15 * patch_face_nff;
patch = cv::min(255, patch_new_mask);
cv::cvtColor(patch, patch, cv::COLOR_RGB2RGBA);
patch.copyTo(patch_face, patch_nff);
return absl::OkStatus();
}
} // namespace mediapipe

View File

@ -32,6 +32,14 @@ cc_binary(
], ],
) )
cc_binary(
name = "beauty_mobile",
deps = [
"//mediapipe/examples/desktop:demo_run_graph_main",
"//mediapipe/graphs/beauty:mobile_calculators",
],
)
cc_binary( cc_binary(
name = "beauty_cpu_single", name = "beauty_cpu_single",
deps = [ deps = [

View File

@ -58,6 +58,7 @@ cc_library(
"//mediapipe/gpu:image_frame_to_gpu_buffer_calculator", "//mediapipe/gpu:image_frame_to_gpu_buffer_calculator",
"//mediapipe/calculators/core:flow_limiter_calculator", "//mediapipe/calculators/core:flow_limiter_calculator",
"//mediapipe/graphs/beauty/subgraphs:face_renderer_cpu", "//mediapipe/graphs/beauty/subgraphs:face_renderer_cpu",
"//mediapipe/graphs/beauty/subgraphs:face_renderer_gpu",
"//mediapipe/modules/face_landmark:face_landmark_front_gpu", "//mediapipe/modules/face_landmark:face_landmark_front_gpu",
], ],
) )

View File

@ -10,6 +10,25 @@ output_stream: "output_video"
output_stream: "multi_face_landmarks" output_stream: "multi_face_landmarks"
profiler_config {
trace_enabled: true
enable_profiler: true
trace_log_interval_count: 200
trace_log_path: "/Users/alena/Workdir/mediapipe/logs/beauty/"
}
# Throttles the images flowing downstream for flow control. It passes through
# the very first incoming image unaltered, and waits for downstream nodes
# (calculators and subgraphs) in the graph to finish their tasks before it
# passes through another image. All images that come in while waiting are
# dropped, limiting the number of in-flight images in most part of the graph to
# 1. This prevents the downstream nodes from queuing up incoming images and data
# excessively, which leads to increased latency and memory usage, unwanted in
# real-time mobile applications. It also eliminates unnecessarily computation,
# e.g., the output produced by a node may get dropped downstream if the
# subsequent nodes are still busy processing previous inputs.
node { node {
calculator: "FlowLimiterCalculator" calculator: "FlowLimiterCalculator"
input_stream: "input_video" input_stream: "input_video"

View File

@ -10,6 +10,13 @@ output_stream: "output_video"
output_stream: "multi_face_landmarks" output_stream: "multi_face_landmarks"
profiler_config {
trace_enabled: true
enable_profiler: true
trace_log_interval_count: 200
trace_log_path: "/Users/alena/Workdir/mediapipe/logs/beauty/"
}
node { node {
calculator: "FlowLimiterCalculator" calculator: "FlowLimiterCalculator"
input_stream: "input_video" input_stream: "input_video"

View File

@ -12,6 +12,12 @@ output_stream: "output_video"
# landmarks. (std::vector<NormalizedLandmarkList>) # landmarks. (std::vector<NormalizedLandmarkList>)
output_stream: "multi_face_landmarks" output_stream: "multi_face_landmarks"
profiler_config {
trace_enabled: true
enable_profiler: true
trace_log_interval_count: 200
}
# Throttles the images flowing downstream for flow control. It passes through # Throttles the images flowing downstream for flow control. It passes through
# the very first incoming image unaltered, and waits for downstream nodes # the very first incoming image unaltered, and waits for downstream nodes
# (calculators and subgraphs) in the graph to finish their tasks before it # (calculators and subgraphs) in the graph to finish their tasks before it
@ -58,6 +64,9 @@ node {
input_side_packet: "NUM_FACES:num_faces" input_side_packet: "NUM_FACES:num_faces"
input_side_packet: "WITH_ATTENTION:with_attention" input_side_packet: "WITH_ATTENTION:with_attention"
output_stream: "LANDMARKS:multi_face_landmarks" output_stream: "LANDMARKS:multi_face_landmarks"
output_stream: "ROIS_FROM_LANDMARKS:face_rects_from_landmarks"
output_stream: "DETECTIONS:face_detections"
output_stream: "ROIS_FROM_DETECTIONS:face_rects_from_detections"
} }
# Subgraph that renders face-landmark annotation onto the input image. # Subgraph that renders face-landmark annotation onto the input image.
@ -68,7 +77,6 @@ node {
output_stream: "IMAGE:output_video_cpu" output_stream: "IMAGE:output_video_cpu"
} }
# Defines side packets for further use in the graph.
node { node {
calculator: "ImageFrameToGpuBufferCalculator" calculator: "ImageFrameToGpuBufferCalculator"
input_stream: "output_video_cpu" input_stream: "output_video_cpu"

View File

@ -27,7 +27,9 @@ cc_library(
"//mediapipe/calculators/core:split_proto_list_calculator", "//mediapipe/calculators/core:split_proto_list_calculator",
"//mediapipe/util:annotation_renderer", "//mediapipe/util:annotation_renderer",
"//mediapipe/calculators/util:annotation_overlay_calculator", "//mediapipe/calculators/util:annotation_overlay_calculator",
"//mediapipe/calculators/beauty:smooth_face_calculator", "//mediapipe/calculators/beauty:smooth_face_calculator1",
"//mediapipe/calculators/beauty:bilateral_filter_calculator",
"//mediapipe/calculators/beauty:smooth_face_calculator2",
"//mediapipe/calculators/beauty:draw_lipstick_calculator", "//mediapipe/calculators/beauty:draw_lipstick_calculator",
"//mediapipe/calculators/beauty:whiten_teeth_calculator", "//mediapipe/calculators/beauty:whiten_teeth_calculator",
"//mediapipe/calculators/util:landmarks_to_render_data_calculator", "//mediapipe/calculators/util:landmarks_to_render_data_calculator",

View File

@ -68,12 +68,29 @@ node {
#Smoothes face on the IMAGE using MASK. #Smoothes face on the IMAGE using MASK.
node { node {
calculator: "SmoothFaceCalculator" calculator: "SmoothFaceCalculator1"
input_stream: "IMAGE:input_image_2" input_stream: "IMAGE:input_image_2"
input_stream: "MASK:0:multi_mask" input_stream: "MASK:0:multi_mask"
input_stream: "FACEBOX:0:multi_face_box" input_stream: "FACEBOX:0:multi_face_box"
output_stream: "IMAGE:output_image" output_stream: "IMAGE:input_image_3"
output_stream: "MASK:not_full_face"
output_stream: "FACEBOX:box1"
} }
#Smoothes face on the IMAGE using MASK.
node {
calculator: "BilateralCalculator"
input_stream: "IMAGE:input_image_3"
input_stream: "FACEBOX:box1"
output_stream: "IMAGE2:input_image_4"
}
#Smoothes face on the IMAGE using MASK.
node {
calculator: "SmoothFaceCalculator2"
input_stream: "IMAGE:input_image_2"
input_stream: "IMAGE2:input_image_4"
input_stream: "MASK:not_full_face"
input_stream: "FACEBOX:box1"
output_stream: "IMAGE:output_image"
}

View File

@ -0,0 +1,106 @@
# MediaPipe face mesh rendering subgraph.
type: "FaceRendererGpu"
# GPU image. (GpuBuffer)
input_stream: "IMAGE:input_image"
# Collection of detected/predicted faces, each represented as a list of
# landmarks. (std::vector<NormalizedLandmarkList>)
input_stream: "LANDMARKS:multi_face_landmarks"
# Regions of interest calculated based on palm detections.
# (std::vector<NormalizedRect>)
input_stream: "NORM_RECTS:rects"
# Detected palms. (std::vector<Detection>)
input_stream: "DETECTIONS:detections"
# GPU image with rendered data. (GpuBuffer)
output_stream: "IMAGE:output_image"
node {
calculator: "ImagePropertiesCalculator"
input_stream: "IMAGE_GPU:input_image"
output_stream: "SIZE:image_size"
}
# Outputs each element of multi_face_landmarks at a fake timestamp for the rest
# of the graph to process. At the end of the loop, outputs the BATCH_END
# timestamp for downstream calculators to inform them that all elements in the
# vector have been processed.
node {
calculator: "BeginLoopNormalizedLandmarkListVectorCalculator"
input_stream: "ITERABLE:multi_face_landmarks"
input_stream: "IMAGE_GPU:input_image"
output_stream: "ITEM:face_landmarks"
output_stream: "IMAGE_GPU:loop_image"
output_stream: "BATCH_END:landmark_timestamp"
}
# Converts landmarks to face part masks.
node {
calculator: "LandmarksToMaskCalculator"
input_stream: "IMAGE_GPU:loop_image"
input_stream: "NORM_LANDMARKS:face_landmarks"
output_stream: "FACEBOX:face_box"
output_stream: "MASK:mask"
}
# Collects a MapMask object for each hand into a vector. Upon receiving the
# BATCH_END timestamp, outputs the vector of RenderData at the BATCH_END
# timestamp.
node {
calculator: "EndLoopMapMaskCalculator"
input_stream: "ITEM:mask"
input_stream: "BATCH_END:landmark_timestamp"
output_stream: "ITERABLE:multi_mask"
}
node {
calculator: "EndLoopFaceBoxCalculator"
input_stream: "ITEM:face_box"
input_stream: "BATCH_END:landmark_timestamp"
output_stream: "ITERABLE:multi_face_box"
}
#Applies lipstick to the face on the IMAGE using MASK.
node {
calculator: "DrawLipstickCalculator"
input_stream: "IMAGE_GPU:input_image"
input_stream: "MASK:0:multi_mask"
output_stream: "IMAGE_GPU:input_image_1"
}
#Whitens teeth of the face on the IMAGE using MASK.
node {
calculator: "WhitenTeethCalculator"
input_stream: "IMAGE_GPU:input_image_1"
input_stream: "MASK:0:multi_mask"
output_stream: "IMAGE_GPU:input_image_2"
}
#Smoothes face on the IMAGE using MASK.
node {
calculator: "SmoothFaceCalculator1"
input_stream: "IMAGE:input_image_2"
input_stream: "MASK:0:multi_mask"
input_stream: "FACEBOX:0:multi_face_box"
output_stream: "IMAGE:input_image_3"
output_stream: "MASK:not_full_face"
output_stream: "FACEBOX:box1"
}
#Smoothes face on the IMAGE using MASK.
node {
calculator: "BilateralCalculator"
input_stream: "IMAGE:input_image_3"
input_stream: "FACEBOX:box1"
output_stream: "IMAGE2:input_image_4"
}
#Smoothes face on the IMAGE using MASK.
node {
calculator: "SmoothFaceCalculator2"
input_stream: "IMAGE:input_image_2"
input_stream: "IMAGE2:input_image_4"
input_stream: "MASK:not_full_face"
input_stream: "FACEBOX:box1"
output_stream: "IMAGE:output_image"
}

View File

@ -9,6 +9,13 @@ output_stream: "output_video"
# landmarks. (std::vector<NormalizedLandmarkList>) # landmarks. (std::vector<NormalizedLandmarkList>)
output_stream: "multi_face_landmarks" output_stream: "multi_face_landmarks"
profiler_config {
trace_enabled: true
enable_profiler: true
trace_log_interval_count: 200
trace_log_path: "/Users/alena/Workdir/mediapipe/logs"
}
# Throttles the images flowing downstream for flow control. It passes through # Throttles the images flowing downstream for flow control. It passes through
# the very first incoming image unaltered, and waits for downstream nodes # the very first incoming image unaltered, and waits for downstream nodes
# (calculators and subgraphs) in the graph to finish their tasks before it # (calculators and subgraphs) in the graph to finish their tasks before it

View File

@ -12,6 +12,12 @@ output_stream: "output_video"
# landmarks. (std::vector<NormalizedLandmarkList>) # landmarks. (std::vector<NormalizedLandmarkList>)
output_stream: "multi_face_landmarks" output_stream: "multi_face_landmarks"
profiler_config {
trace_enabled: true
enable_profiler: true
trace_log_interval_count: 200
}
# Throttles the images flowing downstream for flow control. It passes through # Throttles the images flowing downstream for flow control. It passes through
# the very first incoming image unaltered, and waits for downstream nodes # the very first incoming image unaltered, and waits for downstream nodes
# (calculators and subgraphs) in the graph to finish their tasks before it # (calculators and subgraphs) in the graph to finish their tasks before it

View File

@ -12,6 +12,12 @@ output_stream: "output_video"
# landmarks. (std::vector<NormalizedLandmarkList>) # landmarks. (std::vector<NormalizedLandmarkList>)
output_stream: "multi_face_landmarks" output_stream: "multi_face_landmarks"
profiler_config {
trace_enabled: true
enable_profiler: true
trace_log_interval_count: 200
}
# Throttles the images flowing downstream for flow control. It passes through # Throttles the images flowing downstream for flow control. It passes through
# the very first incoming image unaltered, and waits for downstream nodes # the very first incoming image unaltered, and waits for downstream nodes
# (calculators and subgraphs) in the graph to finish their tasks before it # (calculators and subgraphs) in the graph to finish their tasks before it

View File

@ -108,7 +108,7 @@ void AnnotationRenderer::RenderDataOnImage(const RenderData &render_data)
if (render_data.render_annotations().size()){ if (render_data.render_annotations().size()){
DrawLipstick(render_data); DrawLipstick(render_data);
WhitenTeeth(render_data); WhitenTeeth(render_data);
// SmoothFace(render_data); //SmoothFace(render_data);
} }
else else
{ {
@ -170,6 +170,7 @@ cv::Mat AnnotationRenderer::FormFacePartMask(std::vector<int> orderList, const R
} }
if (points_array.size() != orderList.size()){ if (points_array.size() != orderList.size()){
mask.convertTo(mask, CV_8U);
return mask; return mask;
} }
@ -290,7 +291,6 @@ cv::Mat AnnotationRenderer::predict_forehead_mask(const RenderData &render_data,
void AnnotationRenderer::SmoothFace(const RenderData &render_data) void AnnotationRenderer::SmoothFace(const RenderData &render_data)
{ {
cv::Mat not_full_face = cv::Mat(FormFacePartMask(FACE_OVAL, render_data)) + cv::Mat not_full_face = cv::Mat(FormFacePartMask(FACE_OVAL, render_data)) +
cv::Mat(predict_forehead_mask(render_data, std::get<1>(GetFaceBox(render_data)))) - cv::Mat(predict_forehead_mask(render_data, std::get<1>(GetFaceBox(render_data)))) -
cv::Mat(FormFacePartMask(LEFT_EYE, render_data)) - cv::Mat(FormFacePartMask(LEFT_EYE, render_data)) -
@ -324,7 +324,9 @@ void AnnotationRenderer::SmoothFace(const RenderData &render_data)
cv::Mat patch_nff = not_full_face(cv::Range(min_y, max_y), cv::Range(min_x, max_x)); cv::Mat patch_nff = not_full_face(cv::Range(min_y, max_y), cv::Range(min_x, max_x));
cv::Mat patch_new, patch_wow; cv::Mat patch_new, patch_wow;
cv::cvtColor(patch_face, patch_wow, cv::COLOR_RGBA2RGB); cv::cvtColor(patch_face, patch_wow, cv::COLOR_RGBA2RGB);
if (patch_wow.data != patch_new.data) {
cv::bilateralFilter(patch_wow, patch_new, 12, 50, 50); cv::bilateralFilter(patch_wow, patch_new, 12, 50, 50);
}
cv::Mat patch_new_nff, patch_new_mask, patch, patch_face_nff; cv::Mat patch_new_nff, patch_new_mask, patch, patch_face_nff;

View File

@ -1,41 +1,33 @@
# Description: # Description:
# OpenCV libraries for video/image processing on Linux # OpenCV libraries for video/image processing on MacOS
licenses(["notice"]) # BSD license licenses(["notice"]) # BSD license
exports_files(["LICENSE"]) exports_files(["LICENSE"])
# The following build rule assumes that OpenCV is installed by load("@bazel_skylib//lib:paths.bzl", "paths")
# 'apt-get install libopencv-core-dev libopencv-highgui-dev \'
# ' libopencv-calib3d-dev libopencv-features2d-dev \' # The path to OpenCV is a combination of the path set for "macos_opencv"
# ' libopencv-imgproc-dev libopencv-video-dev' # in the WORKSPACE file and the prefix here.
# on Debian Buster/Ubuntu 18.04. PREFIX = "opencv@3/3.4.16_3/"
# If you install OpenCV separately, please modify the build rule accordingly.
cc_library( cc_library(
name = "opencv", name = "opencv",
hdrs = glob([ srcs = glob(
# For OpenCV 4.x [
#"include/aarch64-linux-gnu/opencv4/opencv2/cvconfig.h", paths.join(PREFIX, "lib/libopencv_core.dylib"),
#"include/arm-linux-gnueabihf/opencv4/opencv2/cvconfig.h", paths.join(PREFIX, "lib/libopencv_calib3d.dylib"),
#"include/x86_64-linux-gnu/opencv4/opencv2/cvconfig.h", paths.join(PREFIX, "lib/libopencv_features2d.dylib"),
#"include/opencv4/opencv2/**/*.h*", paths.join(PREFIX, "lib/libopencv_highgui.dylib"),
]), paths.join(PREFIX, "lib/libopencv_imgcodecs.dylib"),
includes = [ paths.join(PREFIX, "lib/libopencv_imgproc.dylib"),
# For OpenCV 4.x paths.join(PREFIX, "lib/libopencv_video.dylib"),
#"include/aarch64-linux-gnu/opencv4/", paths.join(PREFIX, "lib/libopencv_videoio.dylib"),
#"include/arm-linux-gnueabihf/opencv4/",
#"include/x86_64-linux-gnu/opencv4/",
#"include/opencv4/",
],
linkopts = [
"-l:libopencv_core.so",
"-l:libopencv_calib3d.so",
"-l:libopencv_features2d.so",
"-l:libopencv_highgui.so",
"-l:libopencv_imgcodecs.so",
"-l:libopencv_imgproc.so",
"-l:libopencv_video.so",
"-l:libopencv_videoio.so",
], ],
),
hdrs = glob([paths.join(PREFIX, "include/opencv2/**/*.h*")]),
includes = [paths.join(PREFIX, "include/")],
linkstatic = 1,
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
) )

View File

@ -9,7 +9,7 @@ load("@bazel_skylib//lib:paths.bzl", "paths")
# The path to OpenCV is a combination of the path set for "macos_opencv" # The path to OpenCV is a combination of the path set for "macos_opencv"
# in the WORKSPACE file and the prefix here. # in the WORKSPACE file and the prefix here.
PREFIX = "opt/opencv@3" PREFIX = "opencv@3/3.4.16_3/"
cc_library( cc_library(
name = "opencv", name = "opencv",