Merge pull request #5 from HFVladimir/revert-3-AS-beautifier
Revert "As beautifier"
This commit is contained in:
commit
672cd3a689
|
@ -1 +1 @@
|
||||||
5.2.0
|
5.0.0
|
||||||
|
|
|
@ -205,13 +205,13 @@ new_local_repository(
|
||||||
# For local MacOS builds, the path should point to an opencv@3 installation.
|
# For local MacOS builds, the path should point to an opencv@3 installation.
|
||||||
# If you edit the path here, you will also need to update the corresponding
|
# If you edit the path here, you will also need to update the corresponding
|
||||||
# prefix in "opencv_macos.BUILD".
|
# prefix in "opencv_macos.BUILD".
|
||||||
path = "/opt/homebrew/Cellar/",
|
path = "/usr/local",
|
||||||
)
|
)
|
||||||
|
|
||||||
new_local_repository(
|
new_local_repository(
|
||||||
name = "macos_ffmpeg",
|
name = "macos_ffmpeg",
|
||||||
build_file = "@//third_party:ffmpeg_macos.BUILD",
|
build_file = "@//third_party:ffmpeg_macos.BUILD",
|
||||||
path = "/opt/homebrew/Cellar/ffmpeg",
|
path = "/usr/local/opt/ffmpeg",
|
||||||
)
|
)
|
||||||
|
|
||||||
new_local_repository(
|
new_local_repository(
|
||||||
|
|
BIN
mediapipe/.DS_Store
vendored
BIN
mediapipe/.DS_Store
vendored
Binary file not shown.
|
@ -2,7 +2,6 @@
|
||||||
"additionalFilePaths" : [
|
"additionalFilePaths" : [
|
||||||
"/BUILD",
|
"/BUILD",
|
||||||
"mediapipe/BUILD",
|
"mediapipe/BUILD",
|
||||||
"mediapipe/examples/ios/beauty/BUILD",
|
|
||||||
"mediapipe/examples/ios/common/BUILD",
|
"mediapipe/examples/ios/common/BUILD",
|
||||||
"mediapipe/examples/ios/facedetectioncpu/BUILD",
|
"mediapipe/examples/ios/facedetectioncpu/BUILD",
|
||||||
"mediapipe/examples/ios/facedetectiongpu/BUILD",
|
"mediapipe/examples/ios/facedetectiongpu/BUILD",
|
||||||
|
@ -24,7 +23,6 @@
|
||||||
"mediapipe/objc/testing/app/BUILD"
|
"mediapipe/objc/testing/app/BUILD"
|
||||||
],
|
],
|
||||||
"buildTargets" : [
|
"buildTargets" : [
|
||||||
"//mediapipe/examples/ios/beauty:BeautyApp",
|
|
||||||
"//mediapipe/examples/ios/facedetectioncpu:FaceDetectionCpuApp",
|
"//mediapipe/examples/ios/facedetectioncpu:FaceDetectionCpuApp",
|
||||||
"//mediapipe/examples/ios/facedetectiongpu:FaceDetectionGpuApp",
|
"//mediapipe/examples/ios/facedetectiongpu:FaceDetectionGpuApp",
|
||||||
"//mediapipe/examples/ios/faceeffect:FaceEffectApp",
|
"//mediapipe/examples/ios/faceeffect:FaceEffectApp",
|
||||||
|
@ -95,7 +93,6 @@
|
||||||
"mediapipe/examples/ios",
|
"mediapipe/examples/ios",
|
||||||
"mediapipe/examples/ios/common",
|
"mediapipe/examples/ios/common",
|
||||||
"mediapipe/examples/ios/common/Base.lproj",
|
"mediapipe/examples/ios/common/Base.lproj",
|
||||||
"mediapipe/examples/ios/beauty",
|
|
||||||
"mediapipe/examples/ios/facedetectioncpu",
|
"mediapipe/examples/ios/facedetectioncpu",
|
||||||
"mediapipe/examples/ios/facedetectiongpu",
|
"mediapipe/examples/ios/facedetectiongpu",
|
||||||
"mediapipe/examples/ios/faceeffect",
|
"mediapipe/examples/ios/faceeffect",
|
||||||
|
|
|
@ -3,9 +3,6 @@
|
||||||
"optionSet" : {
|
"optionSet" : {
|
||||||
"CLANG_CXX_LANGUAGE_STANDARD" : {
|
"CLANG_CXX_LANGUAGE_STANDARD" : {
|
||||||
"p" : "c++14"
|
"p" : "c++14"
|
||||||
},
|
|
||||||
"EnvironmentVariables" : {
|
|
||||||
"p" : "MEDIAPIPE_PROFILING=1"
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
@ -13,7 +10,6 @@
|
||||||
"",
|
"",
|
||||||
"mediapipe",
|
"mediapipe",
|
||||||
"mediapipe/examples/ios",
|
"mediapipe/examples/ios",
|
||||||
"mediapipe/examples/ios/beauty",
|
|
||||||
"mediapipe/examples/ios/facedetectioncpu",
|
"mediapipe/examples/ios/facedetectioncpu",
|
||||||
"mediapipe/examples/ios/facedetectiongpu",
|
"mediapipe/examples/ios/facedetectiongpu",
|
||||||
"mediapipe/examples/ios/faceeffect",
|
"mediapipe/examples/ios/faceeffect",
|
||||||
|
|
|
@ -284,6 +284,7 @@ namespace mediapipe
|
||||||
|
|
||||||
cv::Mat SmoothFaceCalculator::predict_forehead_mask(const std::unordered_map<std::string, cv::Mat> &mask_vec, double face_box_min_y)
|
cv::Mat SmoothFaceCalculator::predict_forehead_mask(const std::unordered_map<std::string, cv::Mat> &mask_vec, double face_box_min_y)
|
||||||
{
|
{
|
||||||
|
|
||||||
cv::Mat part_forehead_mask = mask_vec.find("PART_FOREHEAD_B")->second.clone();
|
cv::Mat part_forehead_mask = mask_vec.find("PART_FOREHEAD_B")->second.clone();
|
||||||
part_forehead_mask.convertTo(part_forehead_mask, CV_32F, 1.0 / 255);
|
part_forehead_mask.convertTo(part_forehead_mask, CV_32F, 1.0 / 255);
|
||||||
part_forehead_mask.convertTo(part_forehead_mask, CV_8U);
|
part_forehead_mask.convertTo(part_forehead_mask, CV_8U);
|
||||||
|
@ -359,8 +360,8 @@ namespace mediapipe
|
||||||
{
|
{
|
||||||
cv::Mat mouth_mask, mouth;
|
cv::Mat mouth_mask, mouth;
|
||||||
|
|
||||||
cv::Mat not_full_face = mask_vec.find("FACE_OVAL")->second.clone() -
|
cv::Mat not_full_face = mask_vec.find("FACE_OVAL")->second.clone() +
|
||||||
// predict_forehead_mask(mask_vec, std::get<1>(face_box)) -
|
predict_forehead_mask(mask_vec, std::get<1>(face_box)) -
|
||||||
mask_vec.find("LEFT_EYE")->second.clone() -
|
mask_vec.find("LEFT_EYE")->second.clone() -
|
||||||
mask_vec.find("RIGHT_EYE")->second.clone() -
|
mask_vec.find("RIGHT_EYE")->second.clone() -
|
||||||
mask_vec.find("LEFT_BROW")->second.clone() -
|
mask_vec.find("LEFT_BROW")->second.clone() -
|
||||||
|
@ -393,7 +394,6 @@ namespace mediapipe
|
||||||
cv::Mat patch_new, patch_wow;
|
cv::Mat patch_new, patch_wow;
|
||||||
cv::cvtColor(patch_face, patch_wow, cv::COLOR_RGBA2RGB);
|
cv::cvtColor(patch_face, patch_wow, cv::COLOR_RGBA2RGB);
|
||||||
cv::bilateralFilter(patch_wow, patch_new, 12, 50, 50);
|
cv::bilateralFilter(patch_wow, patch_new, 12, 50, 50);
|
||||||
//patch_wow.copyTo(patch_new);
|
|
||||||
|
|
||||||
cv::Mat patch_new_nff, patch_new_mask, patch, patch_face_nff;
|
cv::Mat patch_new_nff, patch_new_mask, patch, patch_face_nff;
|
||||||
|
|
||||||
|
|
|
@ -32,14 +32,6 @@ cc_binary(
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
cc_binary(
|
|
||||||
name = "beauty_mobile",
|
|
||||||
deps = [
|
|
||||||
"//mediapipe/examples/desktop:demo_run_graph_main",
|
|
||||||
"//mediapipe/graphs/beauty:mobile_calculators",
|
|
||||||
],
|
|
||||||
)
|
|
||||||
|
|
||||||
cc_binary(
|
cc_binary(
|
||||||
name = "beauty_cpu_single",
|
name = "beauty_cpu_single",
|
||||||
deps = [
|
deps = [
|
||||||
|
|
|
@ -58,7 +58,6 @@ cc_library(
|
||||||
"//mediapipe/gpu:image_frame_to_gpu_buffer_calculator",
|
"//mediapipe/gpu:image_frame_to_gpu_buffer_calculator",
|
||||||
"//mediapipe/calculators/core:flow_limiter_calculator",
|
"//mediapipe/calculators/core:flow_limiter_calculator",
|
||||||
"//mediapipe/graphs/beauty/subgraphs:face_renderer_cpu",
|
"//mediapipe/graphs/beauty/subgraphs:face_renderer_cpu",
|
||||||
"//mediapipe/graphs/beauty/subgraphs:face_renderer_gpu",
|
|
||||||
"//mediapipe/modules/face_landmark:face_landmark_front_gpu",
|
"//mediapipe/modules/face_landmark:face_landmark_front_gpu",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
|
@ -10,25 +10,6 @@ output_stream: "output_video"
|
||||||
output_stream: "multi_face_landmarks"
|
output_stream: "multi_face_landmarks"
|
||||||
|
|
||||||
|
|
||||||
profiler_config {
|
|
||||||
trace_enabled: true
|
|
||||||
enable_profiler: true
|
|
||||||
trace_log_interval_count: 200
|
|
||||||
trace_log_path: "/Users/alena/Workdir/mediapipe/logs/beauty/"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Throttles the images flowing downstream for flow control. It passes through
|
|
||||||
# the very first incoming image unaltered, and waits for downstream nodes
|
|
||||||
# (calculators and subgraphs) in the graph to finish their tasks before it
|
|
||||||
# passes through another image. All images that come in while waiting are
|
|
||||||
# dropped, limiting the number of in-flight images in most part of the graph to
|
|
||||||
# 1. This prevents the downstream nodes from queuing up incoming images and data
|
|
||||||
# excessively, which leads to increased latency and memory usage, unwanted in
|
|
||||||
# real-time mobile applications. It also eliminates unnecessarily computation,
|
|
||||||
# e.g., the output produced by a node may get dropped downstream if the
|
|
||||||
# subsequent nodes are still busy processing previous inputs.
|
|
||||||
|
|
||||||
|
|
||||||
node {
|
node {
|
||||||
calculator: "FlowLimiterCalculator"
|
calculator: "FlowLimiterCalculator"
|
||||||
input_stream: "input_video"
|
input_stream: "input_video"
|
||||||
|
|
|
@ -10,14 +10,6 @@ output_stream: "output_video"
|
||||||
output_stream: "multi_face_landmarks"
|
output_stream: "multi_face_landmarks"
|
||||||
|
|
||||||
|
|
||||||
profiler_config {
|
|
||||||
trace_enabled: true
|
|
||||||
enable_profiler: true
|
|
||||||
trace_log_interval_count: 200
|
|
||||||
trace_log_path: "/Users/alena/Workdir/mediapipe/logs/beauty/"
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
node {
|
node {
|
||||||
calculator: "FlowLimiterCalculator"
|
calculator: "FlowLimiterCalculator"
|
||||||
input_stream: "input_video"
|
input_stream: "input_video"
|
||||||
|
|
|
@ -12,12 +12,6 @@ output_stream: "output_video"
|
||||||
# landmarks. (std::vector<NormalizedLandmarkList>)
|
# landmarks. (std::vector<NormalizedLandmarkList>)
|
||||||
output_stream: "multi_face_landmarks"
|
output_stream: "multi_face_landmarks"
|
||||||
|
|
||||||
profiler_config {
|
|
||||||
trace_enabled: true
|
|
||||||
enable_profiler: true
|
|
||||||
trace_log_interval_count: 200
|
|
||||||
}
|
|
||||||
|
|
||||||
# Throttles the images flowing downstream for flow control. It passes through
|
# Throttles the images flowing downstream for flow control. It passes through
|
||||||
# the very first incoming image unaltered, and waits for downstream nodes
|
# the very first incoming image unaltered, and waits for downstream nodes
|
||||||
# (calculators and subgraphs) in the graph to finish their tasks before it
|
# (calculators and subgraphs) in the graph to finish their tasks before it
|
||||||
|
@ -64,9 +58,6 @@ node {
|
||||||
input_side_packet: "NUM_FACES:num_faces"
|
input_side_packet: "NUM_FACES:num_faces"
|
||||||
input_side_packet: "WITH_ATTENTION:with_attention"
|
input_side_packet: "WITH_ATTENTION:with_attention"
|
||||||
output_stream: "LANDMARKS:multi_face_landmarks"
|
output_stream: "LANDMARKS:multi_face_landmarks"
|
||||||
output_stream: "ROIS_FROM_LANDMARKS:face_rects_from_landmarks"
|
|
||||||
output_stream: "DETECTIONS:face_detections"
|
|
||||||
output_stream: "ROIS_FROM_DETECTIONS:face_rects_from_detections"
|
|
||||||
}
|
}
|
||||||
|
|
||||||
# Subgraph that renders face-landmark annotation onto the input image.
|
# Subgraph that renders face-landmark annotation onto the input image.
|
||||||
|
@ -77,6 +68,7 @@ node {
|
||||||
output_stream: "IMAGE:output_video_cpu"
|
output_stream: "IMAGE:output_video_cpu"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# Defines side packets for further use in the graph.
|
||||||
node {
|
node {
|
||||||
calculator: "ImageFrameToGpuBufferCalculator"
|
calculator: "ImageFrameToGpuBufferCalculator"
|
||||||
input_stream: "output_video_cpu"
|
input_stream: "output_video_cpu"
|
||||||
|
|
|
@ -74,3 +74,6 @@ node {
|
||||||
input_stream: "FACEBOX:0:multi_face_box"
|
input_stream: "FACEBOX:0:multi_face_box"
|
||||||
output_stream: "IMAGE:output_image"
|
output_stream: "IMAGE:output_image"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -1,86 +0,0 @@
|
||||||
# MediaPipe face mesh rendering subgraph.
|
|
||||||
|
|
||||||
type: "FaceRendererGpu"
|
|
||||||
|
|
||||||
# GPU image. (GpuBuffer)
|
|
||||||
input_stream: "IMAGE:input_image"
|
|
||||||
# Collection of detected/predicted faces, each represented as a list of
|
|
||||||
# landmarks. (std::vector<NormalizedLandmarkList>)
|
|
||||||
input_stream: "LANDMARKS:multi_face_landmarks"
|
|
||||||
# Regions of interest calculated based on palm detections.
|
|
||||||
# (std::vector<NormalizedRect>)
|
|
||||||
input_stream: "NORM_RECTS:rects"
|
|
||||||
# Detected palms. (std::vector<Detection>)
|
|
||||||
input_stream: "DETECTIONS:detections"
|
|
||||||
|
|
||||||
# GPU image with rendered data. (GpuBuffer)
|
|
||||||
output_stream: "IMAGE:output_image"
|
|
||||||
|
|
||||||
node {
|
|
||||||
calculator: "ImagePropertiesCalculator"
|
|
||||||
input_stream: "IMAGE_GPU:input_image"
|
|
||||||
output_stream: "SIZE:image_size"
|
|
||||||
}
|
|
||||||
# Outputs each element of multi_face_landmarks at a fake timestamp for the rest
|
|
||||||
# of the graph to process. At the end of the loop, outputs the BATCH_END
|
|
||||||
# timestamp for downstream calculators to inform them that all elements in the
|
|
||||||
# vector have been processed.
|
|
||||||
node {
|
|
||||||
calculator: "BeginLoopNormalizedLandmarkListVectorCalculator"
|
|
||||||
input_stream: "ITERABLE:multi_face_landmarks"
|
|
||||||
input_stream: "IMAGE_GPU:input_image"
|
|
||||||
output_stream: "ITEM:face_landmarks"
|
|
||||||
output_stream: "IMAGE_GPU:loop_image"
|
|
||||||
output_stream: "BATCH_END:landmark_timestamp"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Converts landmarks to face part masks.
|
|
||||||
node {
|
|
||||||
calculator: "LandmarksToMaskCalculator"
|
|
||||||
input_stream: "IMAGE_GPU:loop_image"
|
|
||||||
input_stream: "NORM_LANDMARKS:face_landmarks"
|
|
||||||
output_stream: "FACEBOX:face_box"
|
|
||||||
output_stream: "MASK:mask"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Collects a MapMask object for each hand into a vector. Upon receiving the
|
|
||||||
# BATCH_END timestamp, outputs the vector of RenderData at the BATCH_END
|
|
||||||
# timestamp.
|
|
||||||
node {
|
|
||||||
calculator: "EndLoopMapMaskCalculator"
|
|
||||||
input_stream: "ITEM:mask"
|
|
||||||
input_stream: "BATCH_END:landmark_timestamp"
|
|
||||||
output_stream: "ITERABLE:multi_mask"
|
|
||||||
}
|
|
||||||
|
|
||||||
node {
|
|
||||||
calculator: "EndLoopFaceBoxCalculator"
|
|
||||||
input_stream: "ITEM:face_box"
|
|
||||||
input_stream: "BATCH_END:landmark_timestamp"
|
|
||||||
output_stream: "ITERABLE:multi_face_box"
|
|
||||||
}
|
|
||||||
|
|
||||||
#Applies lipstick to the face on the IMAGE using MASK.
|
|
||||||
node {
|
|
||||||
calculator: "DrawLipstickCalculator"
|
|
||||||
input_stream: "IMAGE_GPU:input_image"
|
|
||||||
input_stream: "MASK:0:multi_mask"
|
|
||||||
output_stream: "IMAGE_GPU:input_image_1"
|
|
||||||
}
|
|
||||||
|
|
||||||
#Whitens teeth of the face on the IMAGE using MASK.
|
|
||||||
node {
|
|
||||||
calculator: "WhitenTeethCalculator"
|
|
||||||
input_stream: "IMAGE_GPU:input_image_1"
|
|
||||||
input_stream: "MASK:0:multi_mask"
|
|
||||||
output_stream: "IMAGE_GPU:input_image_2"
|
|
||||||
}
|
|
||||||
|
|
||||||
#Smoothes face on the IMAGE using MASK.
|
|
||||||
node {
|
|
||||||
calculator: "SmoothFaceCalculator"
|
|
||||||
input_stream: "IMAGE_GPU:input_image_2"
|
|
||||||
input_stream: "MASK:0:multi_mask"
|
|
||||||
input_stream: "FACEBOX:0:multi_face_box"
|
|
||||||
output_stream: "IMAGE_GPU:output_image"
|
|
||||||
}
|
|
|
@ -9,13 +9,6 @@ output_stream: "output_video"
|
||||||
# landmarks. (std::vector<NormalizedLandmarkList>)
|
# landmarks. (std::vector<NormalizedLandmarkList>)
|
||||||
output_stream: "multi_face_landmarks"
|
output_stream: "multi_face_landmarks"
|
||||||
|
|
||||||
profiler_config {
|
|
||||||
trace_enabled: true
|
|
||||||
enable_profiler: true
|
|
||||||
trace_log_interval_count: 200
|
|
||||||
trace_log_path: "/Users/alena/Workdir/mediapipe/logs"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Throttles the images flowing downstream for flow control. It passes through
|
# Throttles the images flowing downstream for flow control. It passes through
|
||||||
# the very first incoming image unaltered, and waits for downstream nodes
|
# the very first incoming image unaltered, and waits for downstream nodes
|
||||||
# (calculators and subgraphs) in the graph to finish their tasks before it
|
# (calculators and subgraphs) in the graph to finish their tasks before it
|
||||||
|
|
|
@ -12,12 +12,6 @@ output_stream: "output_video"
|
||||||
# landmarks. (std::vector<NormalizedLandmarkList>)
|
# landmarks. (std::vector<NormalizedLandmarkList>)
|
||||||
output_stream: "multi_face_landmarks"
|
output_stream: "multi_face_landmarks"
|
||||||
|
|
||||||
profiler_config {
|
|
||||||
trace_enabled: true
|
|
||||||
enable_profiler: true
|
|
||||||
trace_log_interval_count: 200
|
|
||||||
}
|
|
||||||
|
|
||||||
# Throttles the images flowing downstream for flow control. It passes through
|
# Throttles the images flowing downstream for flow control. It passes through
|
||||||
# the very first incoming image unaltered, and waits for downstream nodes
|
# the very first incoming image unaltered, and waits for downstream nodes
|
||||||
# (calculators and subgraphs) in the graph to finish their tasks before it
|
# (calculators and subgraphs) in the graph to finish their tasks before it
|
||||||
|
|
|
@ -12,12 +12,6 @@ output_stream: "output_video"
|
||||||
# landmarks. (std::vector<NormalizedLandmarkList>)
|
# landmarks. (std::vector<NormalizedLandmarkList>)
|
||||||
output_stream: "multi_face_landmarks"
|
output_stream: "multi_face_landmarks"
|
||||||
|
|
||||||
profiler_config {
|
|
||||||
trace_enabled: true
|
|
||||||
enable_profiler: true
|
|
||||||
trace_log_interval_count: 200
|
|
||||||
}
|
|
||||||
|
|
||||||
# Throttles the images flowing downstream for flow control. It passes through
|
# Throttles the images flowing downstream for flow control. It passes through
|
||||||
# the very first incoming image unaltered, and waits for downstream nodes
|
# the very first incoming image unaltered, and waits for downstream nodes
|
||||||
# (calculators and subgraphs) in the graph to finish their tasks before it
|
# (calculators and subgraphs) in the graph to finish their tasks before it
|
||||||
|
|
|
@ -170,7 +170,6 @@ cv::Mat AnnotationRenderer::FormFacePartMask(std::vector<int> orderList, const R
|
||||||
}
|
}
|
||||||
|
|
||||||
if (points_array.size() != orderList.size()){
|
if (points_array.size() != orderList.size()){
|
||||||
mask.convertTo(mask, CV_8U);
|
|
||||||
return mask;
|
return mask;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -291,6 +290,7 @@ cv::Mat AnnotationRenderer::predict_forehead_mask(const RenderData &render_data,
|
||||||
|
|
||||||
void AnnotationRenderer::SmoothFace(const RenderData &render_data)
|
void AnnotationRenderer::SmoothFace(const RenderData &render_data)
|
||||||
{
|
{
|
||||||
|
|
||||||
cv::Mat not_full_face = cv::Mat(FormFacePartMask(FACE_OVAL, render_data)) +
|
cv::Mat not_full_face = cv::Mat(FormFacePartMask(FACE_OVAL, render_data)) +
|
||||||
cv::Mat(predict_forehead_mask(render_data, std::get<1>(GetFaceBox(render_data)))) -
|
cv::Mat(predict_forehead_mask(render_data, std::get<1>(GetFaceBox(render_data)))) -
|
||||||
cv::Mat(FormFacePartMask(LEFT_EYE, render_data)) -
|
cv::Mat(FormFacePartMask(LEFT_EYE, render_data)) -
|
||||||
|
@ -324,9 +324,7 @@ void AnnotationRenderer::SmoothFace(const RenderData &render_data)
|
||||||
cv::Mat patch_nff = not_full_face(cv::Range(min_y, max_y), cv::Range(min_x, max_x));
|
cv::Mat patch_nff = not_full_face(cv::Range(min_y, max_y), cv::Range(min_x, max_x));
|
||||||
cv::Mat patch_new, patch_wow;
|
cv::Mat patch_new, patch_wow;
|
||||||
cv::cvtColor(patch_face, patch_wow, cv::COLOR_RGBA2RGB);
|
cv::cvtColor(patch_face, patch_wow, cv::COLOR_RGBA2RGB);
|
||||||
if (patch_wow.data != patch_new.data) {
|
|
||||||
cv::bilateralFilter(patch_wow, patch_new, 12, 50, 50);
|
cv::bilateralFilter(patch_wow, patch_new, 12, 50, 50);
|
||||||
}
|
|
||||||
|
|
||||||
cv::Mat patch_new_nff, patch_new_mask, patch, patch_face_nff;
|
cv::Mat patch_new_nff, patch_new_mask, patch, patch_face_nff;
|
||||||
|
|
||||||
|
|
52
third_party/opencv_linux.BUILD
vendored
52
third_party/opencv_linux.BUILD
vendored
|
@ -1,33 +1,41 @@
|
||||||
# Description:
|
# Description:
|
||||||
# OpenCV libraries for video/image processing on MacOS
|
# OpenCV libraries for video/image processing on Linux
|
||||||
|
|
||||||
licenses(["notice"]) # BSD license
|
licenses(["notice"]) # BSD license
|
||||||
|
|
||||||
exports_files(["LICENSE"])
|
exports_files(["LICENSE"])
|
||||||
|
|
||||||
load("@bazel_skylib//lib:paths.bzl", "paths")
|
# The following build rule assumes that OpenCV is installed by
|
||||||
|
# 'apt-get install libopencv-core-dev libopencv-highgui-dev \'
|
||||||
# The path to OpenCV is a combination of the path set for "macos_opencv"
|
# ' libopencv-calib3d-dev libopencv-features2d-dev \'
|
||||||
# in the WORKSPACE file and the prefix here.
|
# ' libopencv-imgproc-dev libopencv-video-dev'
|
||||||
PREFIX = "opencv@3/3.4.16_3/"
|
# on Debian Buster/Ubuntu 18.04.
|
||||||
|
# If you install OpenCV separately, please modify the build rule accordingly.
|
||||||
cc_library(
|
cc_library(
|
||||||
name = "opencv",
|
name = "opencv",
|
||||||
srcs = glob(
|
hdrs = glob([
|
||||||
[
|
# For OpenCV 4.x
|
||||||
paths.join(PREFIX, "lib/libopencv_core.dylib"),
|
#"include/aarch64-linux-gnu/opencv4/opencv2/cvconfig.h",
|
||||||
paths.join(PREFIX, "lib/libopencv_calib3d.dylib"),
|
#"include/arm-linux-gnueabihf/opencv4/opencv2/cvconfig.h",
|
||||||
paths.join(PREFIX, "lib/libopencv_features2d.dylib"),
|
#"include/x86_64-linux-gnu/opencv4/opencv2/cvconfig.h",
|
||||||
paths.join(PREFIX, "lib/libopencv_highgui.dylib"),
|
#"include/opencv4/opencv2/**/*.h*",
|
||||||
paths.join(PREFIX, "lib/libopencv_imgcodecs.dylib"),
|
]),
|
||||||
paths.join(PREFIX, "lib/libopencv_imgproc.dylib"),
|
includes = [
|
||||||
paths.join(PREFIX, "lib/libopencv_video.dylib"),
|
# For OpenCV 4.x
|
||||||
paths.join(PREFIX, "lib/libopencv_videoio.dylib"),
|
#"include/aarch64-linux-gnu/opencv4/",
|
||||||
|
#"include/arm-linux-gnueabihf/opencv4/",
|
||||||
|
#"include/x86_64-linux-gnu/opencv4/",
|
||||||
|
#"include/opencv4/",
|
||||||
|
],
|
||||||
|
linkopts = [
|
||||||
|
"-l:libopencv_core.so",
|
||||||
|
"-l:libopencv_calib3d.so",
|
||||||
|
"-l:libopencv_features2d.so",
|
||||||
|
"-l:libopencv_highgui.so",
|
||||||
|
"-l:libopencv_imgcodecs.so",
|
||||||
|
"-l:libopencv_imgproc.so",
|
||||||
|
"-l:libopencv_video.so",
|
||||||
|
"-l:libopencv_videoio.so",
|
||||||
],
|
],
|
||||||
),
|
|
||||||
hdrs = glob([paths.join(PREFIX, "include/opencv2/**/*.h*")]),
|
|
||||||
includes = [paths.join(PREFIX, "include/")],
|
|
||||||
linkstatic = 1,
|
|
||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
2
third_party/opencv_macos.BUILD
vendored
2
third_party/opencv_macos.BUILD
vendored
|
@ -9,7 +9,7 @@ load("@bazel_skylib//lib:paths.bzl", "paths")
|
||||||
|
|
||||||
# The path to OpenCV is a combination of the path set for "macos_opencv"
|
# The path to OpenCV is a combination of the path set for "macos_opencv"
|
||||||
# in the WORKSPACE file and the prefix here.
|
# in the WORKSPACE file and the prefix here.
|
||||||
PREFIX = "opencv@3/3.4.16_3/"
|
PREFIX = "opt/opencv@3"
|
||||||
|
|
||||||
cc_library(
|
cc_library(
|
||||||
name = "opencv",
|
name = "opencv",
|
||||||
|
|
Loading…
Reference in New Issue
Block a user