initial commit beautifier (disable face_beauty)
This commit is contained in:
parent
4a20e9909d
commit
1a4ba20427
|
@ -96,14 +96,14 @@ absl::Status RunMPPGraph() {
|
|||
break;
|
||||
}
|
||||
cv::Mat camera_frame;
|
||||
cv::cvtColor(camera_frame_raw, camera_frame, cv::COLOR_BGR2RGB);
|
||||
cv::cvtColor(camera_frame_raw, camera_frame, cv::COLOR_BGR2RGBA);
|
||||
if (!load_video) {
|
||||
cv::flip(camera_frame, camera_frame, /*flipcode=HORIZONTAL*/ 1);
|
||||
}
|
||||
|
||||
// Wrap Mat into an ImageFrame.
|
||||
auto input_frame = absl::make_unique<mediapipe::ImageFrame>(
|
||||
mediapipe::ImageFormat::SRGB, camera_frame.cols, camera_frame.rows,
|
||||
mediapipe::ImageFormat::SRGBA, camera_frame.cols, camera_frame.rows,
|
||||
mediapipe::ImageFrame::kDefaultAlignmentBoundary);
|
||||
cv::Mat input_frame_mat = mediapipe::formats::MatView(input_frame.get());
|
||||
camera_frame.copyTo(input_frame_mat);
|
||||
|
@ -122,7 +122,7 @@ absl::Status RunMPPGraph() {
|
|||
|
||||
// Convert back to opencv for display or saving.
|
||||
cv::Mat output_frame_mat = mediapipe::formats::MatView(&output_frame);
|
||||
cv::cvtColor(output_frame_mat, output_frame_mat, cv::COLOR_RGB2BGR);
|
||||
cv::cvtColor(output_frame_mat, output_frame_mat, cv::COLOR_RGBA2BGR);
|
||||
if (save_video) {
|
||||
if (!writer.isOpened()) {
|
||||
LOG(INFO) << "Prepare video writer.";
|
||||
|
|
|
@ -55,8 +55,10 @@ cc_library(
|
|||
cc_library(
|
||||
name = "mobile_calculators",
|
||||
deps = [
|
||||
"//mediapipe/gpu:gpu_buffer_to_image_frame_calculator",
|
||||
"//mediapipe/gpu:image_frame_to_gpu_buffer_calculator",
|
||||
"//mediapipe/calculators/core:flow_limiter_calculator",
|
||||
"//mediapipe/graphs/face_mesh/subgraphs:face_renderer_gpu",
|
||||
"//mediapipe/graphs/face_mesh/subgraphs:face_renderer_cpu",
|
||||
"//mediapipe/modules/face_landmark:face_landmark_front_gpu",
|
||||
],
|
||||
)
|
||||
|
|
|
@ -44,6 +44,13 @@ node {
|
|||
}
|
||||
}
|
||||
|
||||
# Defines side packets for further use in the graph.
|
||||
node {
|
||||
calculator: "GpuBufferToImageFrameCalculator"
|
||||
input_stream: "throttled_input_video"
|
||||
output_stream: "throttled_input_video_cpu"
|
||||
}
|
||||
|
||||
# Subgraph that detects faces and corresponding landmarks.
|
||||
node {
|
||||
calculator: "FaceLandmarkFrontGpu"
|
||||
|
@ -58,10 +65,17 @@ node {
|
|||
|
||||
# Subgraph that renders face-landmark annotation onto the input image.
|
||||
node {
|
||||
calculator: "FaceRendererGpu"
|
||||
input_stream: "IMAGE:throttled_input_video"
|
||||
calculator: "FaceRendererCpu"
|
||||
input_stream: "IMAGE:throttled_input_video_cpu"
|
||||
input_stream: "LANDMARKS:multi_face_landmarks"
|
||||
input_stream: "NORM_RECTS:face_rects_from_landmarks"
|
||||
input_stream: "DETECTIONS:face_detections"
|
||||
output_stream: "IMAGE:output_video"
|
||||
output_stream: "IMAGE:output_video_cpu"
|
||||
}
|
||||
|
||||
# Defines side packets for further use in the graph.
|
||||
node {
|
||||
calculator: "ImageFrameToGpuBufferCalculator"
|
||||
input_stream: "output_video_cpu"
|
||||
output_stream: "output_video"
|
||||
}
|
||||
|
|
67
mediapipe/graphs/face_mesh/face_mesh_mobile_gpu.pbtxt
Normal file
67
mediapipe/graphs/face_mesh/face_mesh_mobile_gpu.pbtxt
Normal file
|
@ -0,0 +1,67 @@
|
|||
# MediaPipe graph that performs face mesh with TensorFlow Lite on GPU.
|
||||
|
||||
# GPU buffer. (GpuBuffer)
|
||||
input_stream: "input_video"
|
||||
|
||||
# Max number of faces to detect/process. (int)
|
||||
input_side_packet: "num_faces"
|
||||
|
||||
# Output image with rendered results. (GpuBuffer)
|
||||
output_stream: "output_video"
|
||||
# Collection of detected/processed faces, each represented as a list of
|
||||
# landmarks. (std::vector<NormalizedLandmarkList>)
|
||||
output_stream: "multi_face_landmarks"
|
||||
|
||||
# Throttles the images flowing downstream for flow control. It passes through
|
||||
# the very first incoming image unaltered, and waits for downstream nodes
|
||||
# (calculators and subgraphs) in the graph to finish their tasks before it
|
||||
# passes through another image. All images that come in while waiting are
|
||||
# dropped, limiting the number of in-flight images in most part of the graph to
|
||||
# 1. This prevents the downstream nodes from queuing up incoming images and data
|
||||
# excessively, which leads to increased latency and memory usage, unwanted in
|
||||
# real-time mobile applications. It also eliminates unnecessarily computation,
|
||||
# e.g., the output produced by a node may get dropped downstream if the
|
||||
# subsequent nodes are still busy processing previous inputs.
|
||||
node {
|
||||
calculator: "FlowLimiterCalculator"
|
||||
input_stream: "input_video"
|
||||
input_stream: "FINISHED:output_video"
|
||||
input_stream_info: {
|
||||
tag_index: "FINISHED"
|
||||
back_edge: true
|
||||
}
|
||||
output_stream: "throttled_input_video"
|
||||
}
|
||||
|
||||
# Defines side packets for further use in the graph.
|
||||
node {
|
||||
calculator: "ConstantSidePacketCalculator"
|
||||
output_side_packet: "PACKET:with_attention"
|
||||
node_options: {
|
||||
[type.googleapis.com/mediapipe.ConstantSidePacketCalculatorOptions]: {
|
||||
packet { bool_value: true }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Subgraph that detects faces and corresponding landmarks.
|
||||
node {
|
||||
calculator: "FaceLandmarkFrontGpu"
|
||||
input_stream: "IMAGE:throttled_input_video"
|
||||
input_side_packet: "NUM_FACES:num_faces"
|
||||
input_side_packet: "WITH_ATTENTION:with_attention"
|
||||
output_stream: "LANDMARKS:multi_face_landmarks"
|
||||
output_stream: "ROIS_FROM_LANDMARKS:face_rects_from_landmarks"
|
||||
output_stream: "DETECTIONS:face_detections"
|
||||
output_stream: "ROIS_FROM_DETECTIONS:face_rects_from_detections"
|
||||
}
|
||||
|
||||
# Subgraph that renders face-landmark annotation onto the input image.
|
||||
node {
|
||||
calculator: "FaceRendererGpu"
|
||||
input_stream: "IMAGE:throttled_input_video"
|
||||
input_stream: "LANDMARKS:multi_face_landmarks"
|
||||
input_stream: "NORM_RECTS:face_rects_from_landmarks"
|
||||
input_stream: "DETECTIONS:face_detections"
|
||||
output_stream: "IMAGE:output_video"
|
||||
}
|
|
@ -18,6 +18,7 @@
|
|||
|
||||
#include <algorithm>
|
||||
#include <cmath>
|
||||
//#include <android/log.h>
|
||||
|
||||
#include "mediapipe/framework/port/logging.h"
|
||||
#include "mediapipe/framework/port/vector.h"
|
||||
|
@ -38,6 +39,21 @@ using Rectangle = RenderAnnotation::Rectangle;
|
|||
using RoundedRectangle = RenderAnnotation::RoundedRectangle;
|
||||
using Text = RenderAnnotation::Text;
|
||||
|
||||
static const std::vector<int> UPPER_LIP = {61, 185, 40, 39, 37, 0, 267, 269, 270, 409, 291, 308, 415, 310, 311, 312, 13, 82, 81, 80, 191, 78};
|
||||
static const std::vector<int> LOWER_LIP = {61, 78, 95, 88, 178, 87, 14, 317, 402, 318, 324, 308, 291, 375, 321, 405, 314, 17, 84, 181, 91, 146};
|
||||
static const std::vector<int> FACE_OVAL = {10, 338, 338, 297, 297, 332, 332, 284, 284, 251, 251, 389, 389, 356, 356,
|
||||
454, 454, 323, 323, 361, 361, 288, 288, 397, 397, 365, 365, 379, 379, 378,
|
||||
378, 400, 400, 377, 377, 152, 152, 148, 148, 176, 176, 149, 149, 150, 150,
|
||||
136, 136, 172, 172, 58, 58, 132, 132, 93, 93, 234, 234, 127, 127, 162, 162,
|
||||
21, 21, 54, 54, 103, 103, 67, 67, 109, 109, 10};
|
||||
static const std::vector<int> MOUTH_INSIDE = {78, 191, 80, 81, 13, 312, 311, 310, 415, 308, 324, 318, 402, 317, 14, 87, 178, 88, 95};
|
||||
static const std::vector<int> PART_FOREHEAD_B = {21, 54, 103, 67, 109, 10, 338, 297, 332, 284, 251, 301, 293, 334, 296, 336, 9, 107, 66, 105, 63, 71};
|
||||
static const std::vector<int> LEFT_EYE = {130, 33, 246, 161, 160, 159, 157, 173, 133, 155, 154, 153, 145, 144, 163, 7};
|
||||
static const std::vector<int> RIGHT_EYE = {362, 398, 384, 385, 386, 387, 388, 466, 263, 249, 390, 373, 374, 380, 381, 382};
|
||||
static const std::vector<int> LIPS = {61, 185, 40, 39, 37, 0, 267, 269, 270, 409, 291, 375, 321, 405, 314, 17, 84, 181, 91, 146};
|
||||
static const std::vector<int> LEFT_BROW = {70, 63, 105, 66, 107, 55, 65, 52, 53, 46};
|
||||
static const std::vector<int> RIGHT_BROW = {336, 296, 334, 293, 301, 300, 283, 282, 295, 285};
|
||||
|
||||
int ClampThickness(int thickness) {
|
||||
constexpr int kMaxThickness = 32767; // OpenCV MAX_THICKNESS
|
||||
return std::clamp(thickness, 1, kMaxThickness);
|
||||
|
@ -87,34 +103,16 @@ void cv_line2(cv::Mat& img, const cv::Point& start, const cv::Point& end,
|
|||
|
||||
} // namespace
|
||||
|
||||
void AnnotationRenderer::RenderDataOnImage(const RenderData& render_data) {
|
||||
for (const auto& annotation : render_data.render_annotations()) {
|
||||
if (annotation.data_case() == RenderAnnotation::kRectangle) {
|
||||
DrawRectangle(annotation);
|
||||
} else if (annotation.data_case() == RenderAnnotation::kRoundedRectangle) {
|
||||
DrawRoundedRectangle(annotation);
|
||||
} else if (annotation.data_case() == RenderAnnotation::kFilledRectangle) {
|
||||
DrawFilledRectangle(annotation);
|
||||
} else if (annotation.data_case() ==
|
||||
RenderAnnotation::kFilledRoundedRectangle) {
|
||||
DrawFilledRoundedRectangle(annotation);
|
||||
} else if (annotation.data_case() == RenderAnnotation::kOval) {
|
||||
DrawOval(annotation);
|
||||
} else if (annotation.data_case() == RenderAnnotation::kFilledOval) {
|
||||
DrawFilledOval(annotation);
|
||||
} else if (annotation.data_case() == RenderAnnotation::kText) {
|
||||
DrawText(annotation);
|
||||
} else if (annotation.data_case() == RenderAnnotation::kPoint) {
|
||||
DrawPoint(annotation);
|
||||
} else if (annotation.data_case() == RenderAnnotation::kLine) {
|
||||
DrawLine(annotation);
|
||||
} else if (annotation.data_case() == RenderAnnotation::kGradientLine) {
|
||||
DrawGradientLine(annotation);
|
||||
} else if (annotation.data_case() == RenderAnnotation::kArrow) {
|
||||
DrawArrow(annotation);
|
||||
} else {
|
||||
LOG(FATAL) << "Unknown annotation type: " << annotation.data_case();
|
||||
}
|
||||
void AnnotationRenderer::RenderDataOnImage(const RenderData &render_data)
|
||||
{
|
||||
if (render_data.render_annotations().size()){
|
||||
DrawLipstick(render_data);
|
||||
WhitenTeeth(render_data);
|
||||
// smooth_face(render_data);
|
||||
}
|
||||
else
|
||||
{
|
||||
LOG(FATAL) << "Unknown annotation type: ";
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -137,6 +135,405 @@ void AnnotationRenderer::SetScaleFactor(float scale_factor) {
|
|||
if (scale_factor > 0.0f) scale_factor_ = std::min(scale_factor, 1.0f);
|
||||
}
|
||||
|
||||
cv::Mat AnnotationRenderer::FormFacePartMask(std::vector<int> orderList, const RenderData &render_data)
|
||||
{
|
||||
int c = 0;
|
||||
std::vector<cv::Point> points_array;
|
||||
cv::Mat mask = cv::Mat::zeros(mat_image_.size(), CV_32F);
|
||||
for (auto order : orderList)
|
||||
{
|
||||
c = 0;
|
||||
for (auto &annotation : render_data.render_annotations())
|
||||
{
|
||||
if (annotation.data_case() == RenderAnnotation::kPoint)
|
||||
{
|
||||
if (order == c)
|
||||
{
|
||||
const auto &point = annotation.point();
|
||||
int x = -1;
|
||||
int y = -1;
|
||||
if (point.normalized())
|
||||
{
|
||||
CHECK(NormalizedtoPixelCoordinates(point.x(), point.y(), image_width_,
|
||||
image_height_, &x, &y));
|
||||
}
|
||||
else
|
||||
{
|
||||
x = static_cast<int>(point.x() * scale_factor_);
|
||||
y = static_cast<int>(point.y() * scale_factor_);
|
||||
}
|
||||
points_array.push_back(cv::Point(x, y));
|
||||
}
|
||||
c += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (points_array.size() != orderList.size()){
|
||||
return mask;
|
||||
}
|
||||
|
||||
std::vector<std::vector<cv::Point>> points_array_wrapper;
|
||||
points_array_wrapper.push_back(points_array);
|
||||
|
||||
cv::fillPoly(mask, points_array_wrapper, cv::Scalar::all(255), cv::LINE_AA);
|
||||
mask.convertTo(mask, CV_8U);
|
||||
|
||||
return mask;
|
||||
}
|
||||
|
||||
|
||||
std::tuple<double, double, double, double> AnnotationRenderer::GetFaceBox(const RenderData &render_data)
|
||||
{
|
||||
std::vector<int> x_s, y_s;
|
||||
double box_min_y, box_max_y, box_max_x, box_min_x;
|
||||
|
||||
for (auto &annotation : render_data.render_annotations())
|
||||
{
|
||||
if (annotation.data_case() == RenderAnnotation::kPoint)
|
||||
{
|
||||
const auto &point = annotation.point();
|
||||
int x = -1;
|
||||
int y = -1;
|
||||
if (point.normalized())
|
||||
{
|
||||
CHECK(NormalizedtoPixelCoordinates(point.x(), point.y(), image_width_,
|
||||
image_height_, &x, &y));
|
||||
}
|
||||
else
|
||||
{
|
||||
x = static_cast<int32>(point.x() * scale_factor_);
|
||||
y = static_cast<int32>(point.y() * scale_factor_);
|
||||
}
|
||||
x_s.push_back(point.x());
|
||||
x_s.push_back(point.y());
|
||||
}
|
||||
}
|
||||
cv::minMaxLoc(y_s, &box_min_y, &box_max_y);
|
||||
cv::minMaxLoc(x_s, &box_min_x, &box_max_x);
|
||||
box_min_y = box_min_y * 0.9;
|
||||
|
||||
return std::make_tuple(box_min_x, box_min_y, box_max_x, box_max_y);
|
||||
}
|
||||
|
||||
cv::Mat AnnotationRenderer::predict_forehead_mask(const RenderData &render_data, double face_box_min_y)
|
||||
{
|
||||
|
||||
cv::Mat part_forehead_mask = AnnotationRenderer::FormFacePartMask(PART_FOREHEAD_B, render_data);
|
||||
part_forehead_mask.convertTo(part_forehead_mask, CV_32F, 1.0 / 255);
|
||||
part_forehead_mask.convertTo(part_forehead_mask, CV_8U);
|
||||
|
||||
cv::Mat image_sm, image_sm_hsv, skinMask;
|
||||
|
||||
cv::resize(mat_image_, image_sm, cv::Size(mat_image_.size().width, mat_image_.size().height));
|
||||
cv::cvtColor(image_sm, image_sm_hsv, cv::COLOR_BGR2HSV);
|
||||
|
||||
std::vector<int> x, y;
|
||||
std::vector<cv::Point> location;
|
||||
// std::cout << "R (numpy) = " << std::endl << cv::format(part_forehead_mask, cv::Formatter::FMT_NUMPY ) << std::endl << std::endl;
|
||||
|
||||
cv::Vec3d hsv_min, hsv_max;
|
||||
|
||||
std::vector<cv::Mat> channels(3);
|
||||
cv::split(image_sm_hsv, channels);
|
||||
std::vector<std::vector<double>> minx(3), maxx(3);
|
||||
int c = 0;
|
||||
for (auto ch : channels)
|
||||
{
|
||||
cv::Mat row, mask_row;
|
||||
double min, max;
|
||||
for (int i = 0; i < ch.rows; i++)
|
||||
{
|
||||
row = ch.row(i);
|
||||
mask_row = part_forehead_mask.row(i);
|
||||
cv::minMaxLoc(row, &min, &max, 0, 0, mask_row);
|
||||
minx[c].push_back(min);
|
||||
maxx[c].push_back(max);
|
||||
}
|
||||
c++;
|
||||
}
|
||||
for (int i = 0; i < 3; i++)
|
||||
{
|
||||
hsv_min[i] = *std::min_element(minx[i].begin(), minx[i].end());
|
||||
}
|
||||
for (int i = 0; i < 3; i++)
|
||||
{
|
||||
hsv_max[i] = *std::max_element(maxx[i].begin(), maxx[i].end());
|
||||
}
|
||||
|
||||
cv::Mat _forehead_kernel = cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(1, 1));
|
||||
cv::inRange(image_sm_hsv, hsv_min, hsv_max, skinMask);
|
||||
cv::erode(skinMask, skinMask, _forehead_kernel, cv::Point(-1, -1), 2);
|
||||
cv::dilate(skinMask, skinMask, _forehead_kernel, cv::Point(-1, -1), 2);
|
||||
skinMask.convertTo(skinMask, CV_8U, 1.0 / 255);
|
||||
|
||||
cv::findNonZero(skinMask, location);
|
||||
|
||||
double max_part_f, x_min_part, x_max_part;
|
||||
|
||||
for (auto &i : location)
|
||||
{
|
||||
x.push_back(i.x);
|
||||
y.push_back(i.y);
|
||||
}
|
||||
|
||||
cv::minMaxLoc(y, NULL, &max_part_f);
|
||||
cv::minMaxLoc(x, &x_min_part, &x_max_part);
|
||||
|
||||
cv::Mat new_skin_mask = cv::Mat::zeros(skinMask.size(), CV_8U);
|
||||
|
||||
new_skin_mask(cv::Range(face_box_min_y, max_part_f), cv::Range(x_min_part, x_max_part)) =
|
||||
skinMask(cv::Range(face_box_min_y, max_part_f), cv::Range(x_min_part, x_max_part));
|
||||
|
||||
return new_skin_mask;
|
||||
}
|
||||
|
||||
void AnnotationRenderer::smooth_face(const RenderData &render_data)
|
||||
{
|
||||
|
||||
cv::Mat not_full_face = cv::Mat(FormFacePartMask(FACE_OVAL, render_data)) +
|
||||
cv::Mat(predict_forehead_mask(render_data, std::get<1>(GetFaceBox(render_data)))) -
|
||||
cv::Mat(FormFacePartMask(LEFT_EYE, render_data)) -
|
||||
cv::Mat(FormFacePartMask(RIGHT_EYE, render_data)) -
|
||||
cv::Mat(FormFacePartMask(LEFT_BROW, render_data)) -
|
||||
cv::Mat(FormFacePartMask(RIGHT_BROW, render_data)) -
|
||||
cv::Mat(FormFacePartMask(LIPS, render_data));
|
||||
|
||||
cv::resize(not_full_face,
|
||||
not_full_face,
|
||||
mat_image_.size(), 0, 0,
|
||||
cv::INTER_LINEAR);
|
||||
|
||||
std::vector<int> x, y;
|
||||
std::vector<cv::Point> location;
|
||||
|
||||
cv::findNonZero(not_full_face, location);
|
||||
|
||||
double min_y, min_x, max_x, max_y;
|
||||
|
||||
for (auto &i : location)
|
||||
{
|
||||
x.push_back(i.x);
|
||||
y.push_back(i.y);
|
||||
}
|
||||
|
||||
cv::minMaxLoc(x, &min_x, &max_x);
|
||||
cv::minMaxLoc(y, &min_y, &max_y);
|
||||
|
||||
cv::Mat patch_face = mat_image_(cv::Range(min_y, max_y), cv::Range(min_x, max_x));
|
||||
cv::Mat patch_nff = not_full_face(cv::Range(min_y, max_y), cv::Range(min_x, max_x));
|
||||
cv::Mat patch_new, patch_wow;
|
||||
cv::cvtColor(patch_face, patch_wow, cv::COLOR_RGBA2RGB);
|
||||
cv::bilateralFilter(patch_wow, patch_new, 12, 50, 50);
|
||||
|
||||
cv::Mat patch_new_nff, patch_new_mask, patch, patch_face_nff;
|
||||
|
||||
patch_new.copyTo(patch_new_nff, patch_nff);
|
||||
|
||||
patch_face.copyTo(patch_face_nff, patch_nff);
|
||||
cv::cvtColor(patch_face_nff, patch_face_nff, cv::COLOR_RGBA2RGB);
|
||||
|
||||
patch_new_mask = 0.85 * patch_new_nff + 0.15 * patch_face_nff;
|
||||
|
||||
patch = cv::min(255, patch_new_mask);
|
||||
patch.copyTo(patch_face, patch_nff);
|
||||
}
|
||||
|
||||
cv::Mat matmul32F(cv::Mat& bgr, cv::Mat& mask)
|
||||
{
|
||||
assert(bgr.type() == CV_32FC3 && mask.type() == CV_32FC1 && bgr.size() == mask.size());
|
||||
int H = bgr.rows;
|
||||
int W = bgr.cols;
|
||||
cv::Mat dst(bgr.size(), bgr.type());
|
||||
|
||||
if (bgr.isContinuous() && mask.isContinuous())
|
||||
{
|
||||
W *= H;
|
||||
H = 1;
|
||||
}
|
||||
|
||||
for( int i = 0; i < H; ++i)
|
||||
{
|
||||
float* pdst = ((float*)dst.data)+i*W*3;
|
||||
float* pbgr = ((float*)bgr.data)+i*W*3;
|
||||
float* pmask = ((float*)mask.data) + i*W;
|
||||
for ( int j = 0; j < W; ++j)
|
||||
{
|
||||
(*pdst++) = (*pbgr++) *(*pmask);
|
||||
(*pdst++) = (*pbgr++) *(*pmask);
|
||||
(*pdst++) = (*pbgr++) *(*pmask);
|
||||
pmask+=1;
|
||||
}
|
||||
}
|
||||
return dst;
|
||||
}
|
||||
|
||||
void AnnotationRenderer::DrawLipstick(const RenderData &render_data)
|
||||
{
|
||||
cv::Mat spec_lips_mask, upper_lips_mask, lower_lips_mask;
|
||||
spec_lips_mask = cv::Mat::zeros(mat_image_.size(), CV_32F);
|
||||
upper_lips_mask = cv::Mat::zeros(mat_image_.size(), CV_32F);
|
||||
lower_lips_mask = cv::Mat::zeros(mat_image_.size(), CV_32F);
|
||||
|
||||
upper_lips_mask = AnnotationRenderer::FormFacePartMask(UPPER_LIP, render_data);
|
||||
lower_lips_mask = AnnotationRenderer::FormFacePartMask(LOWER_LIP, render_data);
|
||||
|
||||
spec_lips_mask = upper_lips_mask + lower_lips_mask;
|
||||
//
|
||||
spec_lips_mask.convertTo(spec_lips_mask, CV_8U);
|
||||
//
|
||||
cv::resize(spec_lips_mask, spec_lips_mask, mat_image_.size(), cv::INTER_LINEAR);
|
||||
//
|
||||
std::vector<int> x, y;
|
||||
std::vector<cv::Point> location;
|
||||
|
||||
cv::findNonZero(spec_lips_mask, location);
|
||||
|
||||
for (auto &i : location)
|
||||
{
|
||||
x.push_back(i.x);
|
||||
y.push_back(i.y);
|
||||
}
|
||||
|
||||
if (!(x.empty()) && !(y.empty()))
|
||||
{
|
||||
double min_y, max_y, max_x, min_x;
|
||||
cv::minMaxLoc(y, &min_y, &max_y);
|
||||
cv::minMaxLoc(x, &min_x, &max_x);
|
||||
|
||||
cv::Mat lips_crop_mask = spec_lips_mask(cv::Range(min_y, max_y), cv::Range(min_x, max_x));
|
||||
lips_crop_mask.convertTo(lips_crop_mask, CV_32F, 1.0 / 255);
|
||||
|
||||
cv::Mat lips_crop = cv::Mat(mat_image_(cv::Range(min_y, max_y), cv::Range(min_x, max_x)));
|
||||
|
||||
cv::Mat lips_blend = cv::Mat(lips_crop.size().height, lips_crop.size().width, CV_32FC4, cv::Scalar(255.0, 0, 0, 0));
|
||||
|
||||
std::vector<cv::Mat> channels(4);
|
||||
|
||||
cv::split(lips_blend, channels);
|
||||
channels[3] = lips_crop_mask * 20;
|
||||
|
||||
cv::merge(channels, lips_blend);
|
||||
|
||||
cv::Mat tmp_lip_mask;
|
||||
|
||||
channels[3].convertTo(tmp_lip_mask, CV_32FC1, 1.0 / 255);
|
||||
|
||||
cv::split(lips_blend, channels);
|
||||
for (auto &ch : channels)
|
||||
{
|
||||
cv::multiply(ch, tmp_lip_mask, ch, 1.0, CV_32F);
|
||||
}
|
||||
cv::merge(channels, lips_blend);
|
||||
|
||||
cv::subtract(1.0, tmp_lip_mask, tmp_lip_mask, cv::noArray(), CV_32F);
|
||||
|
||||
cv::split(lips_crop, channels);
|
||||
for (auto &ch : channels)
|
||||
{
|
||||
cv::multiply(ch, tmp_lip_mask, ch, 1.0, CV_8U);
|
||||
}
|
||||
cv::merge(channels, lips_crop);
|
||||
|
||||
cv::add(lips_blend, lips_crop, lips_crop, cv::noArray(), CV_8U);
|
||||
|
||||
lips_crop = cv::abs(lips_crop);
|
||||
|
||||
cvtColor(lips_crop, lips_crop, cv::COLOR_RGBA2RGB);
|
||||
|
||||
cv::Mat slice = mat_image_(cv::Range(min_y, max_y), cv::Range(min_x, max_x));
|
||||
lips_crop_mask.convertTo(lips_crop_mask, slice.type());
|
||||
slice.copyTo(slice, lips_crop_mask);
|
||||
|
||||
cv::Mat masked_lips_crop, slice_gray;
|
||||
lips_crop.copyTo(masked_lips_crop, lips_crop_mask);
|
||||
|
||||
cv::cvtColor(masked_lips_crop, slice_gray, cv::COLOR_RGB2GRAY);
|
||||
|
||||
masked_lips_crop.copyTo(slice, slice_gray);
|
||||
}
|
||||
}
|
||||
|
||||
void AnnotationRenderer::WhitenTeeth(const RenderData &render_data)
|
||||
{
|
||||
cv::Mat mouth_mask, mouth;
|
||||
|
||||
mouth_mask = cv::Mat::zeros(mat_image_.size(), CV_32F);
|
||||
mouth_mask = AnnotationRenderer::FormFacePartMask(MOUTH_INSIDE, render_data);
|
||||
|
||||
cv::resize(mouth_mask, mouth, mat_image_.size(), cv::INTER_LINEAR);
|
||||
mouth.convertTo(mouth, CV_8U);
|
||||
|
||||
std::vector<int> x, y;
|
||||
std::vector<cv::Point> location;
|
||||
|
||||
cv::findNonZero(mouth, location);
|
||||
|
||||
for (auto &i : location)
|
||||
{
|
||||
x.push_back(i.x);
|
||||
y.push_back(i.y);
|
||||
}
|
||||
|
||||
if (!(x.empty()) && !(y.empty()))
|
||||
{
|
||||
double mouth_min_y, mouth_max_y, mouth_max_x, mouth_min_x;
|
||||
cv::minMaxLoc(y, &mouth_min_y, &mouth_max_y);
|
||||
cv::minMaxLoc(x, &mouth_min_x, &mouth_max_x);
|
||||
double mh = mouth_max_y - mouth_min_y;
|
||||
double mw = mouth_max_x - mouth_min_x;
|
||||
cv::Mat mouth_crop_mask;
|
||||
mouth.convertTo(mouth, CV_32F, 1.0 / 255);
|
||||
mouth.convertTo(mouth, CV_32F, 1.0 / 255);
|
||||
if (mh / mw > 0.17)
|
||||
{
|
||||
mouth_min_y = static_cast<int>(std::max(mouth_min_y - mh * 0.1, 0.0));
|
||||
mouth_max_y = static_cast<int>(std::min(mouth_max_y + mh * 0.1, (double)image_height_));
|
||||
mouth_min_x = static_cast<int>(std::max(mouth_min_x - mw * 0.1, 0.0));
|
||||
mouth_max_x = static_cast<int>(std::min(mouth_max_x + mw * 0.1, (double)image_width_));
|
||||
mouth_crop_mask = mouth(cv::Range(mouth_min_y, mouth_max_y), cv::Range(mouth_min_x, mouth_max_x));
|
||||
cv::Mat img_hsv, tmp_mask, img_hls;
|
||||
cv::cvtColor(mat_image_(cv::Range(mouth_min_y, mouth_max_y), cv::Range(mouth_min_x, mouth_max_x)), img_hsv,
|
||||
cv::COLOR_RGBA2RGB);
|
||||
cv::cvtColor(img_hsv, img_hsv,
|
||||
cv::COLOR_RGB2HSV);
|
||||
|
||||
cv::Mat _mouth_erode_kernel = cv::getStructuringElement(
|
||||
cv::MORPH_ELLIPSE, cv::Size(7, 7));
|
||||
|
||||
cv::erode(mouth_crop_mask * 255, tmp_mask, _mouth_erode_kernel, cv::Point(-1, -1), 3);
|
||||
cv::GaussianBlur(tmp_mask, tmp_mask, cv::Size(51, 51), 0);
|
||||
|
||||
img_hsv.convertTo(img_hsv, CV_8U);
|
||||
|
||||
std::vector<cv::Mat> channels(3);
|
||||
cv::split(img_hsv, channels);
|
||||
|
||||
cv::Mat tmp;
|
||||
cv::multiply(channels[1], tmp_mask, tmp, 0.3, CV_8U);
|
||||
cv::subtract(channels[1], tmp, channels[1], cv::noArray(), CV_8U);
|
||||
channels[1] = cv::min(255, channels[1]);
|
||||
cv::merge(channels, img_hsv);
|
||||
|
||||
cv::cvtColor(img_hsv, img_hsv, cv::COLOR_HSV2RGB);
|
||||
cv::cvtColor(img_hsv, img_hls, cv::COLOR_RGB2HLS);
|
||||
|
||||
cv::split(img_hls, channels);
|
||||
cv::multiply(channels[1], tmp_mask, tmp, 0.3, CV_8U);
|
||||
cv::add(channels[1], tmp, channels[1], cv::noArray(), CV_8U);
|
||||
channels[1] = cv::min(255, channels[1]);
|
||||
cv::merge(channels, img_hls);
|
||||
|
||||
cv::cvtColor(img_hls, img_hls, cv::COLOR_HLS2RGB);
|
||||
cv::cvtColor(img_hls, img_hls, cv::COLOR_RGB2RGBA);
|
||||
// std::cout << "R (numpy) = " << std::endl << cv::format(img_hls, cv::Formatter::FMT_NUMPY ) << std::endl << std::endl;
|
||||
|
||||
cv::Mat slice = mat_image_(cv::Range(mouth_min_y, mouth_max_y), cv::Range(mouth_min_x, mouth_max_x));
|
||||
img_hls.copyTo(slice);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void AnnotationRenderer::DrawRectangle(const RenderAnnotation& annotation) {
|
||||
int left = -1;
|
||||
int top = -1;
|
||||
|
|
|
@ -125,6 +125,27 @@ class AnnotationRenderer {
|
|||
// Computes the font scale from font_face, size and thickness.
|
||||
double ComputeFontScale(int font_face, int font_size, int thickness);
|
||||
|
||||
// Draw lipstick on image based on facemensh points
|
||||
void DrawLipstick(const RenderData& render_data);
|
||||
|
||||
//
|
||||
void WhitenTeeth(const RenderData &render_data);
|
||||
|
||||
//
|
||||
void smooth_face(const RenderData &render_data);
|
||||
|
||||
//
|
||||
cv::Mat FormFacePartMask(std::vector<int> orderList, const RenderData &render_data);
|
||||
|
||||
//
|
||||
cv::Mat matmul32F(cv::Mat& bgr, cv::Mat& mask);
|
||||
|
||||
//
|
||||
cv::Mat predict_forehead_mask(const RenderData &render_data, double face_box_min_y);
|
||||
|
||||
//
|
||||
std::tuple<double, double, double, double> GetFaceBox(const RenderData &render_data);
|
||||
|
||||
// Width and Height of the image (in pixels).
|
||||
int image_width_ = -1;
|
||||
int image_height_ = -1;
|
||||
|
|
Loading…
Reference in New Issue
Block a user