diff --git a/WORKSPACE b/WORKSPACE index a131001a1..19d988f0a 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -528,11 +528,11 @@ load("@build_bazel_rules_android//android:rules.bzl", "android_ndk_repository", android_sdk_repository( name = "androidsdk", build_tools_version = "30.0.3", - path = "/Users/tj/Library/Android/sdk", # Path to Android SDK, optional if $ANDROID_HOME is set + # path = "/Users/tj/Library/Android/sdk", # Path to Android SDK, optional if $ANDROID_HOME is set ) android_ndk_repository( name = "androidndk", # Required. Name *must* be "androidndk". api_level = 21, - path = "/Users/tj/Library/Android/sdk/ndk/21.4.7075529", # Optional. Can be omitted if `ANDROID_NDK_HOME` environment variable is set. + # path = "/Users/tj/Library/Android/sdk/ndk/21.4.7075529", # Optional. Can be omitted if `ANDROID_NDK_HOME` environment variable is set. ) diff --git a/mediapipe/examples/ios/posetracking-lindera/Podfile b/mediapipe/examples/ios/posetracking-lindera/Podfile index d680af3a4..d2309ed77 100644 --- a/mediapipe/examples/ios/posetracking-lindera/Podfile +++ b/mediapipe/examples/ios/posetracking-lindera/Podfile @@ -1,10 +1,12 @@ # Uncomment the next line to define a global platform for your project # platform :ios, '9.0' +source 'https://github.com/copper-labs/CocoaSpecs.git' target 'PoseTrackingLindera' do # Comment the next line if you don't want to use dynamic frameworks use_frameworks! - pod 'LinderaDetection', :path => 'LinderaDetection' + + pod 'LinderaDetection' #, :path => 'LinderaDetection' # Pods for PoseTrackingLindera diff --git a/mediapipe/examples/ios/posetracking-lindera/PoseTrackingLindera.xcworkspace/xcuserdata/tj.xcuserdatad/UserInterfaceState.xcuserstate b/mediapipe/examples/ios/posetracking-lindera/PoseTrackingLindera.xcworkspace/xcuserdata/tj.xcuserdatad/UserInterfaceState.xcuserstate index c30343617..30d055d46 100644 Binary files a/mediapipe/examples/ios/posetracking-lindera/PoseTrackingLindera.xcworkspace/xcuserdata/tj.xcuserdatad/UserInterfaceState.xcuserstate and b/mediapipe/examples/ios/posetracking-lindera/PoseTrackingLindera.xcworkspace/xcuserdata/tj.xcuserdatad/UserInterfaceState.xcuserstate differ diff --git a/mediapipe/objc/solutions/posetracking_gpu/BUILD b/mediapipe/objc/solutions/posetracking_gpu/BUILD index 43708389b..251c59767 100644 --- a/mediapipe/objc/solutions/posetracking_gpu/BUILD +++ b/mediapipe/objc/solutions/posetracking_gpu/BUILD @@ -6,6 +6,10 @@ load( "ios_static_framework", "ios_unit_test", ) +load( + "@build_bazel_rules_apple//apple:apple.bzl", + "apple_static_xcframework", +) load( "//mediapipe/examples/ios:bundle_id.bzl", "BUNDLE_ID_PREFIX", @@ -67,10 +71,22 @@ ios_static_framework( visibility = ["//visibility:public"], deps = [ "//mediapipe/objc/solutions/posetracking_gpu:posetracking_gpu_solution", - "//mediapipe/calculators/core:flow_limiter_calculator", # "//third_party:opencv", - "@ios_opencv//:OpencvFramework", + # "@ios_opencv//:OpencvFramework", + ], +) + +apple_static_xcframework( + name = "MPPoseTrackingXC", + bundle_name = "MPPoseTracking", + minimum_os_versions = {"ios": "12.0"}, + public_hdrs = MPP_HEADERS + MP_GEN_IOS_HEADERS, + deps = [ + "//mediapipe/objc/solutions/posetracking_gpu:posetracking_gpu_solution", + + # "//third_party:opencv", + # "@ios_opencv//:OpencvFramework", ], ) @@ -133,10 +149,15 @@ genrule( objc_library( name = "posetracking_gpu_solution", - srcs = glob([ - "*.h", - "*.mm", - ]), + srcs = [ + "PoseTrackingOptions.mm", + "PoseTrackingResults.mm", + "PoseTracking.mm", + ] + select({ + "//mediapipe:ios_i386": [], + "//mediapipe:ios_x86_64": [], + "//conditions:default": [], + }), hdrs = MPP_HEADERS + MP_GEN_IOS_HEADERS, copts = [ "-Wno-shorten-64-to-32", @@ -168,13 +189,13 @@ objc_library( "//mediapipe/objc:mediapipe_framework_ios", "//mediapipe/objc:mediapipe_input_sources_ios", "//mediapipe/objc:mediapipe_layer_renderer", + "//mediapipe/graphs/pose_tracking:pose_tracking_gpu_deps", + "//mediapipe/framework/formats:landmark_cc_proto", + "calculator_registry", ] + select({ "//mediapipe:ios_i386": [], "//mediapipe:ios_x86_64": [], "//conditions:default": [ - "//mediapipe/graphs/pose_tracking:pose_tracking_gpu_deps", - "//mediapipe/framework/formats:landmark_cc_proto", - "calculator_registry", ], }), alwayslink = True, diff --git a/mediapipe/swift/solutions/lindera/BUILD b/mediapipe/swift/solutions/lindera/BUILD index 9185b3750..962d19f29 100644 --- a/mediapipe/swift/solutions/lindera/BUILD +++ b/mediapipe/swift/solutions/lindera/BUILD @@ -20,10 +20,13 @@ genrule( srcs = [ "//mediapipe/objc/solutions/posetracking_gpu:MPPoseTrackingHeaderPatched", "LinderaDetection.podspec", + "@ios_opencv//:OpencvFrameworkContents", ] + glob(["*.swift"]), outs = ["LinderaDetection.zip"], cmd = """ + mkdir mediapipe/swift/solutions/lindera/frameworks + cp -r external/ios_opencv/opencv2.framework mediapipe/swift/solutions/lindera/frameworks unzip $(location //mediapipe/objc/solutions/posetracking_gpu:MPPoseTrackingHeaderPatched) -d mediapipe/swift/solutions/lindera/frameworks cd mediapipe/swift/solutions/lindera/ diff --git a/mediapipe/swift/solutions/lindera/Lindera.swift b/mediapipe/swift/solutions/lindera/Lindera.swift index 2943c2416..3a380c923 100644 --- a/mediapipe/swift/solutions/lindera/Lindera.swift +++ b/mediapipe/swift/solutions/lindera/Lindera.swift @@ -1,31 +1,32 @@ // This is the copperlabs posetracking api built in objective c -import MPPoseTracking import UIKit +#if arch(arm64) +import MPPoseTracking /// A helper class to run the Pose Tracking API /// TFLite models are also loaded when you initialize this class public final class Lindera{ - - + + //MARK: - Public Class API - - + + // A delegate to handle results public weak var delegate: LinderaDelegate? - + /// This function sets up your callback function to happen whenver there is an fps update public func setFpsDelegate(fpsDelegate: @escaping (_ fps:Double)->Void){ fpsHelper.onFpsUpdate = fpsDelegate; } - + // Get the camera UI View that may contain landmarks drawing public var cameraView: UIView { return self.linderaExerciseSession } - - + + // Show Landmarks - works instantaneously! public func showLandmarks(value:Bool){ self.poseTracking.showLandmarks(value) @@ -38,50 +39,50 @@ public final class Lindera{ public func getModelComplexity() -> Int { return Int(self.poseTracking.poseTrackingOptions.modelComplexity); } - + // Set the model complexity and restart detection to load new models public func setModelComplexityNow(complexity:Int){ let poseTrackingOptions = poseTracking.poseTrackingOptions - + poseTrackingOptions?.modelComplexity = Int32(complexity) - + poseTracking = PoseTracking(poseTrackingOptions: poseTrackingOptions) startPoseTracking() startCamera() - + } - + public required init(){ - + startPoseTracking() } - - + + public func startCamera(_ completion: ((Result) -> Void)? = nil) { // set our rendering layer frame according to cameraView boundry self.poseTracking.renderer.layer.frame = cameraView.layer.bounds // attach render CALayer on cameraView to render output to self.cameraView.layer.addSublayer(self.poseTracking.renderer.layer) - + self.cameraSource.requestCameraAccess( - completionHandler: {(granted:Bool)->Void in - if (granted){ - self.poseTracking.videoQueue.async(execute:{ [weak self] in - - self?.cameraSource.start() - - } ) - completion?(.success(Void())) - }else{ - - completion?(.failure(preconditionFailure("Camera Access Not Granted"))) - - } - }) - - - - + completionHandler: {(granted:Bool)->Void in + if (granted){ + self.poseTracking.videoQueue.async(execute:{ [weak self] in + + self?.cameraSource.start() + + } ) + completion?(.success(Void())) + }else{ + + completion?(.failure(preconditionFailure("Camera Access Not Granted"))) + + } + }) + + + + } /// Choose front or back camera. Must restart camera after use if already started public func selectCamera(_ position: AVCaptureDevice.Position, _ completion: ((Result) -> Void)? = nil) { @@ -89,23 +90,23 @@ public final class Lindera{ self?.cameraSource.cameraPosition = position completion?(.success(Void())) } - + } - - + + // MARK: - Private Class Functions - + // Set your custom view heree private lazy var linderaExerciseSession: UIView = { - + // this will be the main camera view; Change it to custom view class to get desired results let liveView = UIView() - + return liveView - + }() - - + + private func startPoseTracking(){ // set camera preferences self.cameraSource.sessionPreset = AVCaptureSession.Preset.high.rawValue @@ -116,43 +117,43 @@ public final class Lindera{ } // call LinderaDelegate on pose tracking results self.poseTracking.poseTrackingResultsListener = {[weak self] results in - - + + guard let self = self, let results = results else { return } - + self.delegate?.lindera(self, didDetect: .init(pose: Asensei3DPose.init(results), timestamp: CMTimeGetSeconds(self.poseTracking.timeStamp))) } self.poseTracking.graphOutputStreamListener = {[weak self] in self?.fpsHelper.logTime() } - + self.poseTracking.startGraph() // attach camera's output with poseTracking object and its videoQueue self.cameraSource.setDelegate(self.poseTracking, queue: self.poseTracking.videoQueue) } - - + + func stopCamera(){ if (self.cameraSource.isRunning){ self.poseTracking.videoQueue.async { [weak self] in self?.cameraSource.stop() } - + } } - + /// switches camera from front to back and vice versa func switchCamera(_ completion: ((Result) -> Void)? = nil) { self.poseTracking.videoQueue.async { [weak self] in if let self = self { - + self.stopCamera() self.startCamera(completion) - + switch(self.cameraSource.cameraPosition){ - + case .unspecified: completion?(.failure(preconditionFailure("Unkown Camera Position"))) case .back: @@ -161,34 +162,34 @@ public final class Lindera{ self.selectCamera(AVCaptureDevice.Position.back,completion) @unknown default: completion?(.failure(preconditionFailure("Unkown Camera Position"))) - + } - - + + } - + } } - - + + // MARK: - Private Class Objects // initalize the PoseTracking api and load models var poseTracking:PoseTracking = PoseTracking(poseTrackingOptions: PoseTrackingOptions(showLandmarks: true,modelComplexity: 1)) - + // Needed to get fps of model let fpsHelper = FPSHelper(smoothingFactor: 0.95) - + // attach Mediapipe camera helper to our class let cameraSource = MPPCameraInputSource() - - - + + + } public protocol LinderaDelegate: AnyObject { - + func lindera(_ lindera: Lindera, didDetect event: Asensei3DPose.Event) } @@ -199,61 +200,61 @@ func landmarkToBodyJointDetails(landmark: PoseLandmark) -> Asensei3DPose.BodyJoi } // MARK: - Helpers extension Asensei3DPose { - + init(_ pose: PoseTrackingResults) { - + self.nose = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_NOSE]) - + self.leftEyeInner = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_LEFT_EYE_INNER]) self.leftEye = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_LEFT_EYE]) self.leftEyeOuter = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_LEFT_EYE_OUTER]) - + self.rightEyeInner = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_RIGHT_EYE_OUTER]) self.rightEye = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_RIGHT_EYE]) self.rightEyeOuter = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_RIGHT_EYE_OUTER]) - + self.leftEar = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_LEFT_EAR]) self.rightEar = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_RIGHT_EAR]) - + self.mouthLeft = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_MOUTH_LEFT]) self.mouthRight = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_MOUTH_RIGHT]) - + self.leftShoulder = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_LEFT_SHOULDER]) self.rightShoulder = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_RIGHT_SHOULDER]) - + self.leftElbow = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_LEFT_ELBOW]) self.rightElbow = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_RIGHT_ELBOW]) - + self.leftWrist = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_LEFT_WRIST]) self.rightWrist = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_RIGHT_WRIST]) - + self.leftPinky = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_LEFT_PINKY]) self.rightPinky = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_RIGHT_PINKY]) - + self.leftIndex = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_LEFT_INDEX]) self.rightIndex = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_RIGHT_INDEX]) - + self.leftThumb = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_LEFT_THUMB]) self.rightThumb = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_RIGHT_THUMB]) - + self.leftHip = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_LEFT_HIP]) self.rightHip = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_RIGHT_HIP]) - + self.leftKnee = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_LEFT_KNEE]) self.rightKnee = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_RIGHT_KNEE]) - + self.rightAnkle = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_RIGHT_ANKLE]) self.leftAnkle = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_LEFT_ANKLE]) - - + + self.rightHeel = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_RIGHT_HEEL]) self.leftHeel = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_LEFT_HEEL]) - + self.rightFoot = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_RIGHT_FOOT]) self.leftFoot = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_LEFT_FOOT]) - - - + + + } } @@ -265,3 +266,6 @@ extension Asensei3DPose { // self.z = vector.y // } //} +#else +final public class Lindera{} +#endif diff --git a/mediapipe/swift/solutions/lindera/LinderaDetection.podspec b/mediapipe/swift/solutions/lindera/LinderaDetection.podspec index d7389d204..d0f2961de 100644 --- a/mediapipe/swift/solutions/lindera/LinderaDetection.podspec +++ b/mediapipe/swift/solutions/lindera/LinderaDetection.podspec @@ -16,15 +16,15 @@ Pod::Spec.new do |spec| # spec.name = "LinderaDetection" - spec.version = "0.0.1" - spec.summary = "LinderaDetection is a simple yet powerful interface to run AI Health Solutions" + spec.version = "0.0.2" + spec.summary = "LinderaDetection is a simple yet powerful interface to run AI Fitness Solutions" # This description is used to generate tags and improve search results. # * Think: What does it do? Why did you write it? What is the focus? # * Try to keep it short, snappy and to the point. # * Write the description between the DESC delimiters below. # * Finally, don't worry about the indent, CocoaPods strips it! - spec.description = "LinderaDetection is a simple yet powerful interface to run AI Health Solutions" + spec.description = "LinderaDetection is a simple yet powerful interface to run AI Fitness Solutions. It is powered by Mediapipe." spec.homepage = "https://github.com/udamaster/mediapipe" # spec.screenshots = "www.example.com/screenshots_1.gif", "www.example.com/screenshots_2.gif" @@ -37,8 +37,11 @@ Pod::Spec.new do |spec| # Popular ones are 'MIT', 'BSD' and 'Apache License, Version 2.0'. # - spec.license = "MIT (example)" - spec.license = { :type => "MIT"} + spec.license = { :type => 'MIT', :text => <<-LICENSE + Copyright 2012 + Permission is granted to... + LICENSE + } # ――― Author Metadata ――――――――――――――――――――――――――――――――――――――――――――――――――――――――― # @@ -59,7 +62,7 @@ Pod::Spec.new do |spec| # If this Pod runs only on iOS or OS X, then specify the platform and # the deployment target. You can optionally include the target after the platform. # - + spec.swift_versions = ["4.0"] # spec.platform = :ios spec.platform = :ios, "12.0" @@ -74,9 +77,12 @@ Pod::Spec.new do |spec| # # Specify the location from where the source should be retrieved. # Supports git, hg, bzr, svn and HTTP. - # + + spec.source = { :http => 'https://github.com/copper-labs/iOSFramework/releases/download/0.1.0/LinderaDetection.zip' } - spec.source = { :http => 'https://edge-engine-store.s3.amazonaws.com/libs/ios/EdgeEngine/pod/EdgeEngine.zip' } + # for quickly testing locally + # spec.source = { :http => 'http://127.0.0.1:8000/LinderaDetection.zip' } + # ――― Source Code ―――――――――――――――――――――――――――――――――――――――――――――――――――――――――――――― # # @@ -128,14 +134,21 @@ Pod::Spec.new do |spec| # spec.requires_arc = true # spec.xcconfig = { "HEADER_SEARCH_PATHS" => "$(SDKROOT)/usr/include/libxml2" } - spec.dependency "OpenCV", "3.2" + # spec.dependency "OpenCV", "3.2" spec.static_framework = true - + # spec.preserve_paths = "frameworks/**/*" spec.ios.vendored_frameworks = 'frameworks/*.framework' - # spec.pod_target_xcconfig = { 'OTHER_LDFLAGS' => '-lc++' } - # spec.user_target_xcconfig = {'OTHER_LDFLAGS' => '-lc++' } + spec.pod_target_xcconfig = { 'EXCLUDED_ARCHS[sdk=iphonesimulator*]' => 'arm64' , + 'OTHER_LDFLAGS' => '$(inherited) -force_load $(PODS_ROOT)/LinderaDetection/frameworks/MPPoseTracking.framework/MPPoseTracking' } + spec.user_target_xcconfig = { + 'EXCLUDED_ARCHS[sdk=iphonesimulator*]' => 'arm64' , + 'OTHER_LDFLAGS' => '$(inherited) -force_load $(PODS_ROOT)/LinderaDetection/frameworks/MPPoseTracking.framework/MPPoseTracking' } spec.libraries = 'stdc++' - # ――― Temporary Architecture fixes - spec.user_target_xcconfig = { 'EXCLUDED_ARCHS[sdk=iphonesimulator*]' => 'arm64' } - spec.pod_target_xcconfig = { 'EXCLUDED_ARCHS[sdk=iphonesimulator*]' => 'arm64' } + + + # spec.xcconfig = { + # 'FRAMEWORK_SEARCH_PATH[sdk=iphoneos*]' => '$(inherited) "$(PODS_ROOT)/frameworks"', + # 'OTHERCFLAGS[sdk=iphoneos*]' => '$(inherited) -iframework "$(PODS_ROOT)/frameworks"', + # 'OTHER_LDFLAGS[sdk=iphoneos*]' => '$(inherited) -framework frameworks' + # } end diff --git a/mediapipe/swift/solutions/lindera/README.md b/mediapipe/swift/solutions/lindera/README.md new file mode 100644 index 000000000..7cfe9b747 --- /dev/null +++ b/mediapipe/swift/solutions/lindera/README.md @@ -0,0 +1,13 @@ +## CocoaPods + +### Building Pod zipfile +```shell +bazel build -c opt --config=ios_fat --cxxopt=--std=c++17 --copt=-fembed-bitcode //mediapipe/swift/solutions/lindera:podgen +``` + +### Pushing Pods + +here clspecs is the name of pod specs repository +```shell +pod repo push clspecs LinderaDetection.podspec --skip-import-validation +``` diff --git a/third_party/opencv_ios.BUILD b/third_party/opencv_ios.BUILD index c9f112075..6a20a2f45 100644 --- a/third_party/opencv_ios.BUILD +++ b/third_party/opencv_ios.BUILD @@ -10,6 +10,12 @@ load( "apple_static_framework_import", ) +filegroup( + name = "OpencvFrameworkContents", + srcs = glob(["opencv2.framework/**"]), + visibility = ["//visibility:public"], +) + apple_static_framework_import( name = "OpencvFramework", framework_imports = glob(["opencv2.framework/**"]),