feat: fixed facemesh app and added profiling

This commit is contained in:
Алена Пивень 2022-07-18 21:33:44 -05:00
parent dfce9154f1
commit 205dae176b
19 changed files with 230 additions and 96 deletions

View File

@ -1 +1 @@
5.0.0 5.2.0

View File

@ -205,13 +205,13 @@ new_local_repository(
# For local MacOS builds, the path should point to an opencv@3 installation. # For local MacOS builds, the path should point to an opencv@3 installation.
# If you edit the path here, you will also need to update the corresponding # If you edit the path here, you will also need to update the corresponding
# prefix in "opencv_macos.BUILD". # prefix in "opencv_macos.BUILD".
path = "/usr/local", path = "/opt/homebrew/Cellar/",
) )
new_local_repository( new_local_repository(
name = "macos_ffmpeg", name = "macos_ffmpeg",
build_file = "@//third_party:ffmpeg_macos.BUILD", build_file = "@//third_party:ffmpeg_macos.BUILD",
path = "/usr/local/opt/ffmpeg", path = "/opt/homebrew/Cellar/ffmpeg",
) )
new_local_repository( new_local_repository(

BIN
mediapipe/.DS_Store vendored Normal file

Binary file not shown.

View File

@ -2,6 +2,7 @@
"additionalFilePaths" : [ "additionalFilePaths" : [
"/BUILD", "/BUILD",
"mediapipe/BUILD", "mediapipe/BUILD",
"mediapipe/examples/ios/beauty/BUILD",
"mediapipe/examples/ios/common/BUILD", "mediapipe/examples/ios/common/BUILD",
"mediapipe/examples/ios/facedetectioncpu/BUILD", "mediapipe/examples/ios/facedetectioncpu/BUILD",
"mediapipe/examples/ios/facedetectiongpu/BUILD", "mediapipe/examples/ios/facedetectiongpu/BUILD",
@ -23,6 +24,7 @@
"mediapipe/objc/testing/app/BUILD" "mediapipe/objc/testing/app/BUILD"
], ],
"buildTargets" : [ "buildTargets" : [
"//mediapipe/examples/ios/beauty:BeautyApp",
"//mediapipe/examples/ios/facedetectioncpu:FaceDetectionCpuApp", "//mediapipe/examples/ios/facedetectioncpu:FaceDetectionCpuApp",
"//mediapipe/examples/ios/facedetectiongpu:FaceDetectionGpuApp", "//mediapipe/examples/ios/facedetectiongpu:FaceDetectionGpuApp",
"//mediapipe/examples/ios/faceeffect:FaceEffectApp", "//mediapipe/examples/ios/faceeffect:FaceEffectApp",
@ -93,6 +95,7 @@
"mediapipe/examples/ios", "mediapipe/examples/ios",
"mediapipe/examples/ios/common", "mediapipe/examples/ios/common",
"mediapipe/examples/ios/common/Base.lproj", "mediapipe/examples/ios/common/Base.lproj",
"mediapipe/examples/ios/beauty",
"mediapipe/examples/ios/facedetectioncpu", "mediapipe/examples/ios/facedetectioncpu",
"mediapipe/examples/ios/facedetectiongpu", "mediapipe/examples/ios/facedetectiongpu",
"mediapipe/examples/ios/faceeffect", "mediapipe/examples/ios/faceeffect",

View File

@ -3,6 +3,9 @@
"optionSet" : { "optionSet" : {
"CLANG_CXX_LANGUAGE_STANDARD" : { "CLANG_CXX_LANGUAGE_STANDARD" : {
"p" : "c++14" "p" : "c++14"
},
"EnvironmentVariables" : {
"p" : "MEDIAPIPE_PROFILING=1"
} }
} }
}, },
@ -10,6 +13,7 @@
"", "",
"mediapipe", "mediapipe",
"mediapipe/examples/ios", "mediapipe/examples/ios",
"mediapipe/examples/ios/beauty",
"mediapipe/examples/ios/facedetectioncpu", "mediapipe/examples/ios/facedetectioncpu",
"mediapipe/examples/ios/facedetectiongpu", "mediapipe/examples/ios/facedetectiongpu",
"mediapipe/examples/ios/faceeffect", "mediapipe/examples/ios/faceeffect",

View File

@ -295,65 +295,65 @@ namespace mediapipe
part_forehead_mask.convertTo(part_forehead_mask, CV_8U); part_forehead_mask.convertTo(part_forehead_mask, CV_8U);
cv::Mat image_sm, image_sm_hsv, skinMask; cv::Mat image_sm, image_sm_hsv, skinMask;
//
cv::resize(mat_image_, image_sm, cv::Size(image_width_, image_height_)); // cv::resize(mat_image_, image_sm, cv::Size(image_width_, image_height_));
cv::cvtColor(image_sm, image_sm_hsv, cv::COLOR_BGR2HSV); // cv::cvtColor(image_sm, image_sm_hsv, cv::COLOR_BGR2HSV);
//
std::vector<int> x, y; // std::vector<int> x, y;
std::vector<cv::Point> location; // std::vector<cv::Point> location;
//
cv::Vec3d hsv_min, hsv_max; cv::Vec3d hsv_min, hsv_max;
//
std::vector<cv::Mat> channels(3); // std::vector<cv::Mat> channels(3);
cv::split(image_sm_hsv, channels); // cv::split(image_sm_hsv, channels);
std::vector<std::vector<double>> minx(3), maxx(3); // std::vector<std::vector<double>> minx(3), maxx(3);
int c = 0; // int c = 0;
for (auto ch : channels) // for (auto ch : channels)
{ // {
cv::Mat row, mask_row; // cv::Mat row, mask_row;
double min, max; // double min, max;
for (int i = 0; i < ch.rows; i++) // for (int i = 0; i < ch.rows; i++)
{ // {
row = ch.row(i); // row = ch.row(i);
mask_row = part_forehead_mask.row(i); // mask_row = part_forehead_mask.row(i);
cv::minMaxLoc(row, &min, &max, 0, 0, mask_row); // cv::minMaxLoc(row, &min, &max, 0, 0, mask_row);
minx[c].push_back(min); // minx[c].push_back(min);
maxx[c].push_back(max); // maxx[c].push_back(max);
} // }
c++; // c++;
} // }
for (int i = 0; i < 3; i++) // for (int i = 0; i < 3; i++)
{ // {
hsv_min[i] = *std::min_element(minx[i].begin(), minx[i].end()); // hsv_min[i] = *std::min_element(minx[i].begin(), minx[i].end());
} // }
for (int i = 0; i < 3; i++) // for (int i = 0; i < 3; i++)
{ // {
hsv_max[i] = *std::max_element(maxx[i].begin(), maxx[i].end()); // hsv_max[i] = *std::max_element(maxx[i].begin(), maxx[i].end());
} // }
//
cv::Mat _forehead_kernel = cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(1, 1)); cv::Mat _forehead_kernel = cv::getStructuringElement(cv::MORPH_ELLIPSE, cv::Size(1, 1));
cv::inRange(image_sm_hsv, hsv_min, hsv_max, skinMask); // cv::inRange(image_sm_hsv, hsv_min, hsv_max, skinMask);
cv::erode(skinMask, skinMask, _forehead_kernel, cv::Point(-1, -1), 2); cv::erode(skinMask, skinMask, _forehead_kernel, cv::Point(-1, -1), 2);
cv::dilate(skinMask, skinMask, _forehead_kernel, cv::Point(-1, -1), 2); cv::dilate(skinMask, skinMask, _forehead_kernel, cv::Point(-1, -1), 2);
skinMask.convertTo(skinMask, CV_8U, 1.0 / 255); // skinMask.convertTo(skinMask, CV_8U, 1.0 / 255);
//
cv::findNonZero(skinMask, location); // cv::findNonZero(skinMask, location);
//
double max_part_f, x_min_part, x_max_part; // double max_part_f, x_min_part, x_max_part;
//
for (auto &i : location) // for (auto &i : location)
{ // {
x.push_back(i.x); // x.push_back(i.x);
y.push_back(i.y); // y.push_back(i.y);
} // }
//
cv::minMaxLoc(y, NULL, &max_part_f); // cv::minMaxLoc(y, NULL, &max_part_f);
cv::minMaxLoc(x, &x_min_part, &x_max_part); // cv::minMaxLoc(x, &x_min_part, &x_max_part);
cv::Mat new_skin_mask = cv::Mat::zeros(skinMask.size(), CV_8U); cv::Mat new_skin_mask = cv::Mat::zeros(skinMask.size(), CV_8U);
new_skin_mask(cv::Range(face_box_min_y, max_part_f), cv::Range(x_min_part, x_max_part)) = // new_skin_mask(cv::Range(face_box_min_y, max_part_f), cv::Range(x_min_part, x_max_part)) =
skinMask(cv::Range(face_box_min_y, max_part_f), cv::Range(x_min_part, x_max_part)); // skinMask(cv::Range(face_box_min_y, max_part_f), cv::Range(x_min_part, x_max_part));
return new_skin_mask; return new_skin_mask;
} }
@ -399,6 +399,7 @@ namespace mediapipe
cv::Mat patch_new, patch_wow; cv::Mat patch_new, patch_wow;
cv::cvtColor(patch_face, patch_wow, cv::COLOR_RGBA2RGB); cv::cvtColor(patch_face, patch_wow, cv::COLOR_RGBA2RGB);
cv::bilateralFilter(patch_wow, patch_new, 12, 50, 50); cv::bilateralFilter(patch_wow, patch_new, 12, 50, 50);
//patch_wow.copyTo(patch_new);
cv::Mat patch_new_nff, patch_new_mask, patch, patch_face_nff; cv::Mat patch_new_nff, patch_new_mask, patch, patch_face_nff;

View File

@ -32,6 +32,14 @@ cc_binary(
], ],
) )
cc_binary(
name = "beauty_mobile",
deps = [
"//mediapipe/examples/desktop:demo_run_graph_main",
"//mediapipe/graphs/beauty:mobile_calculators",
],
)
cc_binary( cc_binary(
name = "beauty_cpu_over", name = "beauty_cpu_over",
deps = [ deps = [

View File

@ -68,8 +68,8 @@ cc_library(
"//mediapipe/gpu:gpu_buffer_to_image_frame_calculator", "//mediapipe/gpu:gpu_buffer_to_image_frame_calculator",
"//mediapipe/gpu:image_frame_to_gpu_buffer_calculator", "//mediapipe/gpu:image_frame_to_gpu_buffer_calculator",
"//mediapipe/calculators/core:flow_limiter_calculator", "//mediapipe/calculators/core:flow_limiter_calculator",
#"//mediapipe/graphs/beauty/subgraphs:face_renderer_cpu", "//mediapipe/graphs/beauty/subgraphs:face_renderer_cpu",
"//mediapipe/graphs/face_mesh/subgraphs:face_renderer_cpu", "//mediapipe/graphs/beauty/subgraphs:face_renderer_gpu",
"//mediapipe/modules/face_landmark:face_landmark_front_gpu", "//mediapipe/modules/face_landmark:face_landmark_front_gpu",
], ],
) )

View File

@ -9,6 +9,13 @@ output_stream: "output_video"
# landmarks. (std::vector<NormalizedLandmarkList>) # landmarks. (std::vector<NormalizedLandmarkList>)
output_stream: "multi_face_landmarks" output_stream: "multi_face_landmarks"
profiler_config {
trace_enabled: true
enable_profiler: true
trace_log_interval_count: 200
trace_log_path: "/Users/alena/Workdir/mediapipe/logs/beauty/"
}
node { node {
calculator: "FlowLimiterCalculator" calculator: "FlowLimiterCalculator"
input_stream: "input_video" input_stream: "input_video"

View File

@ -9,6 +9,13 @@ output_stream: "output_video"
# landmarks. (std::vector<NormalizedLandmarkList>) # landmarks. (std::vector<NormalizedLandmarkList>)
output_stream: "multi_face_landmarks" output_stream: "multi_face_landmarks"
profiler_config {
trace_enabled: true
enable_profiler: true
trace_log_interval_count: 200
trace_log_path: "/Users/alena/Workdir/mediapipe/logs/beauty/"
}
# Throttles the images flowing downstream for flow control. It passes through # Throttles the images flowing downstream for flow control. It passes through
# the very first incoming image unaltered, and waits for downstream nodes # the very first incoming image unaltered, and waits for downstream nodes
# (calculators and subgraphs) in the graph to finish their tasks before it # (calculators and subgraphs) in the graph to finish their tasks before it

View File

@ -12,6 +12,12 @@ output_stream: "output_video"
# landmarks. (std::vector<NormalizedLandmarkList>) # landmarks. (std::vector<NormalizedLandmarkList>)
output_stream: "multi_face_landmarks" output_stream: "multi_face_landmarks"
profiler_config {
trace_enabled: true
enable_profiler: true
trace_log_interval_count: 200
}
# Throttles the images flowing downstream for flow control. It passes through # Throttles the images flowing downstream for flow control. It passes through
# the very first incoming image unaltered, and waits for downstream nodes # the very first incoming image unaltered, and waits for downstream nodes
# (calculators and subgraphs) in the graph to finish their tasks before it # (calculators and subgraphs) in the graph to finish their tasks before it
@ -58,6 +64,9 @@ node {
input_side_packet: "NUM_FACES:num_faces" input_side_packet: "NUM_FACES:num_faces"
input_side_packet: "WITH_ATTENTION:with_attention" input_side_packet: "WITH_ATTENTION:with_attention"
output_stream: "LANDMARKS:multi_face_landmarks" output_stream: "LANDMARKS:multi_face_landmarks"
output_stream: "ROIS_FROM_LANDMARKS:face_rects_from_landmarks"
output_stream: "DETECTIONS:face_detections"
output_stream: "ROIS_FROM_DETECTIONS:face_rects_from_detections"
} }
# Subgraph that renders face-landmark annotation onto the input image. # Subgraph that renders face-landmark annotation onto the input image.
@ -68,7 +77,6 @@ node {
output_stream: "IMAGE:output_video_cpu" output_stream: "IMAGE:output_video_cpu"
} }
# Defines side packets for further use in the graph.
node { node {
calculator: "ImageFrameToGpuBufferCalculator" calculator: "ImageFrameToGpuBufferCalculator"
input_stream: "output_video_cpu" input_stream: "output_video_cpu"

View File

@ -80,6 +80,3 @@ node {
input_stream: "FACEBOX:0:multi_face_box" input_stream: "FACEBOX:0:multi_face_box"
output_stream: "IMAGE:output_image" output_stream: "IMAGE:output_image"
} }

View File

@ -0,0 +1,86 @@
# MediaPipe face mesh rendering subgraph.
type: "FaceRendererGpu"
# GPU image. (GpuBuffer)
input_stream: "IMAGE:input_image"
# Collection of detected/predicted faces, each represented as a list of
# landmarks. (std::vector<NormalizedLandmarkList>)
input_stream: "LANDMARKS:multi_face_landmarks"
# Regions of interest calculated based on palm detections.
# (std::vector<NormalizedRect>)
input_stream: "NORM_RECTS:rects"
# Detected palms. (std::vector<Detection>)
input_stream: "DETECTIONS:detections"
# GPU image with rendered data. (GpuBuffer)
output_stream: "IMAGE:output_image"
node {
calculator: "ImagePropertiesCalculator"
input_stream: "IMAGE_GPU:input_image"
output_stream: "SIZE:image_size"
}
# Outputs each element of multi_face_landmarks at a fake timestamp for the rest
# of the graph to process. At the end of the loop, outputs the BATCH_END
# timestamp for downstream calculators to inform them that all elements in the
# vector have been processed.
node {
calculator: "BeginLoopNormalizedLandmarkListVectorCalculator"
input_stream: "ITERABLE:multi_face_landmarks"
input_stream: "IMAGE_GPU:input_image"
output_stream: "ITEM:face_landmarks"
output_stream: "IMAGE_GPU:loop_image"
output_stream: "BATCH_END:landmark_timestamp"
}
# Converts landmarks to face part masks.
node {
calculator: "LandmarksToMaskCalculator"
input_stream: "IMAGE_GPU:loop_image"
input_stream: "NORM_LANDMARKS:face_landmarks"
output_stream: "FACEBOX:face_box"
output_stream: "MASK:mask"
}
# Collects a MapMask object for each hand into a vector. Upon receiving the
# BATCH_END timestamp, outputs the vector of RenderData at the BATCH_END
# timestamp.
node {
calculator: "EndLoopMapMaskCalculator"
input_stream: "ITEM:mask"
input_stream: "BATCH_END:landmark_timestamp"
output_stream: "ITERABLE:multi_mask"
}
node {
calculator: "EndLoopFaceBoxCalculator"
input_stream: "ITEM:face_box"
input_stream: "BATCH_END:landmark_timestamp"
output_stream: "ITERABLE:multi_face_box"
}
#Applies lipstick to the face on the IMAGE using MASK.
node {
calculator: "DrawLipstickCalculator"
input_stream: "IMAGE_GPU:input_image"
input_stream: "MASK:0:multi_mask"
output_stream: "IMAGE_GPU:input_image_1"
}
#Whitens teeth of the face on the IMAGE using MASK.
node {
calculator: "WhitenTeethCalculator"
input_stream: "IMAGE_GPU:input_image_1"
input_stream: "MASK:0:multi_mask"
output_stream: "IMAGE_GPU:input_image_2"
}
#Smoothes face on the IMAGE using MASK.
node {
calculator: "SmoothFaceCalculator"
input_stream: "IMAGE_GPU:input_image_2"
input_stream: "MASK:0:multi_mask"
input_stream: "FACEBOX:0:multi_face_box"
output_stream: "IMAGE_GPU:output_image"
}

View File

@ -9,6 +9,13 @@ output_stream: "output_video"
# landmarks. (std::vector<NormalizedLandmarkList>) # landmarks. (std::vector<NormalizedLandmarkList>)
output_stream: "multi_face_landmarks" output_stream: "multi_face_landmarks"
profiler_config {
trace_enabled: true
enable_profiler: true
trace_log_interval_count: 200
trace_log_path: "/Users/alena/Workdir/mediapipe/logs"
}
# Throttles the images flowing downstream for flow control. It passes through # Throttles the images flowing downstream for flow control. It passes through
# the very first incoming image unaltered, and waits for downstream nodes # the very first incoming image unaltered, and waits for downstream nodes
# (calculators and subgraphs) in the graph to finish their tasks before it # (calculators and subgraphs) in the graph to finish their tasks before it

View File

@ -12,6 +12,12 @@ output_stream: "output_video"
# landmarks. (std::vector<NormalizedLandmarkList>) # landmarks. (std::vector<NormalizedLandmarkList>)
output_stream: "multi_face_landmarks" output_stream: "multi_face_landmarks"
profiler_config {
trace_enabled: true
enable_profiler: true
trace_log_interval_count: 200
}
# Throttles the images flowing downstream for flow control. It passes through # Throttles the images flowing downstream for flow control. It passes through
# the very first incoming image unaltered, and waits for downstream nodes # the very first incoming image unaltered, and waits for downstream nodes
# (calculators and subgraphs) in the graph to finish their tasks before it # (calculators and subgraphs) in the graph to finish their tasks before it

View File

@ -12,6 +12,12 @@ output_stream: "output_video"
# landmarks. (std::vector<NormalizedLandmarkList>) # landmarks. (std::vector<NormalizedLandmarkList>)
output_stream: "multi_face_landmarks" output_stream: "multi_face_landmarks"
profiler_config {
trace_enabled: true
enable_profiler: true
trace_log_interval_count: 200
}
# Throttles the images flowing downstream for flow control. It passes through # Throttles the images flowing downstream for flow control. It passes through
# the very first incoming image unaltered, and waits for downstream nodes # the very first incoming image unaltered, and waits for downstream nodes
# (calculators and subgraphs) in the graph to finish their tasks before it # (calculators and subgraphs) in the graph to finish their tasks before it

View File

@ -108,7 +108,7 @@ void AnnotationRenderer::RenderDataOnImage(const RenderData &render_data)
if (render_data.render_annotations().size()){ if (render_data.render_annotations().size()){
DrawLipstick(render_data); DrawLipstick(render_data);
WhitenTeeth(render_data); WhitenTeeth(render_data);
// SmoothFace(render_data); //SmoothFace(render_data);
} }
else else
{ {
@ -170,6 +170,7 @@ cv::Mat AnnotationRenderer::FormFacePartMask(std::vector<int> orderList, const R
} }
if (points_array.size() != orderList.size()){ if (points_array.size() != orderList.size()){
mask.convertTo(mask, CV_8U);
return mask; return mask;
} }
@ -290,7 +291,6 @@ cv::Mat AnnotationRenderer::predict_forehead_mask(const RenderData &render_data,
void AnnotationRenderer::SmoothFace(const RenderData &render_data) void AnnotationRenderer::SmoothFace(const RenderData &render_data)
{ {
cv::Mat not_full_face = cv::Mat(FormFacePartMask(FACE_OVAL, render_data)) + cv::Mat not_full_face = cv::Mat(FormFacePartMask(FACE_OVAL, render_data)) +
cv::Mat(predict_forehead_mask(render_data, std::get<1>(GetFaceBox(render_data)))) - cv::Mat(predict_forehead_mask(render_data, std::get<1>(GetFaceBox(render_data)))) -
cv::Mat(FormFacePartMask(LEFT_EYE, render_data)) - cv::Mat(FormFacePartMask(LEFT_EYE, render_data)) -
@ -324,7 +324,9 @@ void AnnotationRenderer::SmoothFace(const RenderData &render_data)
cv::Mat patch_nff = not_full_face(cv::Range(min_y, max_y), cv::Range(min_x, max_x)); cv::Mat patch_nff = not_full_face(cv::Range(min_y, max_y), cv::Range(min_x, max_x));
cv::Mat patch_new, patch_wow; cv::Mat patch_new, patch_wow;
cv::cvtColor(patch_face, patch_wow, cv::COLOR_RGBA2RGB); cv::cvtColor(patch_face, patch_wow, cv::COLOR_RGBA2RGB);
if (patch_wow.data != patch_new.data) {
cv::bilateralFilter(patch_wow, patch_new, 12, 50, 50); cv::bilateralFilter(patch_wow, patch_new, 12, 50, 50);
}
cv::Mat patch_new_nff, patch_new_mask, patch, patch_face_nff; cv::Mat patch_new_nff, patch_new_mask, patch, patch_face_nff;

View File

@ -1,41 +1,33 @@
# Description: # Description:
# OpenCV libraries for video/image processing on Linux # OpenCV libraries for video/image processing on MacOS
licenses(["notice"]) # BSD license licenses(["notice"]) # BSD license
exports_files(["LICENSE"]) exports_files(["LICENSE"])
# The following build rule assumes that OpenCV is installed by load("@bazel_skylib//lib:paths.bzl", "paths")
# 'apt-get install libopencv-core-dev libopencv-highgui-dev \'
# ' libopencv-calib3d-dev libopencv-features2d-dev \' # The path to OpenCV is a combination of the path set for "macos_opencv"
# ' libopencv-imgproc-dev libopencv-video-dev' # in the WORKSPACE file and the prefix here.
# on Debian Buster/Ubuntu 18.04. PREFIX = "opencv@3/3.4.16_3/"
# If you install OpenCV separately, please modify the build rule accordingly.
cc_library( cc_library(
name = "opencv", name = "opencv",
hdrs = glob([ srcs = glob(
# For OpenCV 4.x [
#"include/aarch64-linux-gnu/opencv4/opencv2/cvconfig.h", paths.join(PREFIX, "lib/libopencv_core.dylib"),
#"include/arm-linux-gnueabihf/opencv4/opencv2/cvconfig.h", paths.join(PREFIX, "lib/libopencv_calib3d.dylib"),
#"include/x86_64-linux-gnu/opencv4/opencv2/cvconfig.h", paths.join(PREFIX, "lib/libopencv_features2d.dylib"),
#"include/opencv4/opencv2/**/*.h*", paths.join(PREFIX, "lib/libopencv_highgui.dylib"),
]), paths.join(PREFIX, "lib/libopencv_imgcodecs.dylib"),
includes = [ paths.join(PREFIX, "lib/libopencv_imgproc.dylib"),
# For OpenCV 4.x paths.join(PREFIX, "lib/libopencv_video.dylib"),
#"include/aarch64-linux-gnu/opencv4/", paths.join(PREFIX, "lib/libopencv_videoio.dylib"),
#"include/arm-linux-gnueabihf/opencv4/",
#"include/x86_64-linux-gnu/opencv4/",
#"include/opencv4/",
],
linkopts = [
"-l:libopencv_core.so",
"-l:libopencv_calib3d.so",
"-l:libopencv_features2d.so",
"-l:libopencv_highgui.so",
"-l:libopencv_imgcodecs.so",
"-l:libopencv_imgproc.so",
"-l:libopencv_video.so",
"-l:libopencv_videoio.so",
], ],
),
hdrs = glob([paths.join(PREFIX, "include/opencv2/**/*.h*")]),
includes = [paths.join(PREFIX, "include/")],
linkstatic = 1,
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
) )

View File

@ -9,7 +9,7 @@ load("@bazel_skylib//lib:paths.bzl", "paths")
# The path to OpenCV is a combination of the path set for "macos_opencv" # The path to OpenCV is a combination of the path set for "macos_opencv"
# in the WORKSPACE file and the prefix here. # in the WORKSPACE file and the prefix here.
PREFIX = "opt/opencv@3" PREFIX = "opencv@3/3.4.16_3/"
cc_library( cc_library(
name = "opencv", name = "opencv",