basic lindera api implementation
This commit is contained in:
parent
09f98fbad0
commit
a96dab057a
|
@ -24,8 +24,7 @@
|
|||
],
|
||||
"buildTargets" : [
|
||||
"//mediapipe/examples/ios/posetracking-lindera/PoseTrackingLindera:posetracking-lindera",
|
||||
"//mediapipe/examples/ios/posetrackingsolutiongpu:PoseTrackingSolutionGpuApp",
|
||||
"//mediapipe/swift/solutions/lindera:Lindera"
|
||||
"//mediapipe/swift/solutions/lindera:lindera"
|
||||
],
|
||||
"optionSet" : {
|
||||
"BazelBuildOptionsDebug" : {
|
||||
|
|
|
@ -30,18 +30,23 @@ swift_library(
|
|||
deps = [
|
||||
"@ios_opencv//:OpencvFramework",
|
||||
] + [
|
||||
"//mediapipe/objc/solutions/posetracking_gpu:posetracking_gpu_solution",
|
||||
"//mediapipe/objc:mediapipe_framework_ios",
|
||||
"//mediapipe/objc:mediapipe_input_sources_ios",
|
||||
"//mediapipe/objc:mediapipe_layer_renderer",
|
||||
] + select({
|
||||
"//mediapipe:ios_i386": [],
|
||||
"//mediapipe:ios_x86_64": [],
|
||||
"//conditions:default": [
|
||||
"//mediapipe/graphs/pose_tracking:pose_tracking_gpu_deps",
|
||||
"//mediapipe/framework/formats:landmark_cc_proto",
|
||||
],
|
||||
}),
|
||||
"//mediapipe/swift/solutions/lindera:lindera",
|
||||
],
|
||||
|
||||
# +
|
||||
# [
|
||||
# "//mediapipe/objc/solutions/posetracking_gpu:posetracking_gpu_solution",
|
||||
# "//mediapipe/objc:mediapipe_framework_ios",
|
||||
# "//mediapipe/objc:mediapipe_input_sources_ios",
|
||||
# "//mediapipe/objc:mediapipe_layer_renderer",
|
||||
# ] + select({
|
||||
# "//mediapipe:ios_i386": [],
|
||||
# "//mediapipe:ios_x86_64": [],
|
||||
# "//conditions:default": [
|
||||
# "//mediapipe/graphs/pose_tracking:pose_tracking_gpu_deps",
|
||||
# "//mediapipe/framework/formats:landmark_cc_proto",
|
||||
# ],
|
||||
# })
|
||||
)
|
||||
|
||||
apple_resource_bundle(
|
||||
|
@ -60,9 +65,10 @@ ios_application(
|
|||
"iphone",
|
||||
"ipad",
|
||||
],
|
||||
infoplists = ["Info.plist",
|
||||
"//mediapipe/examples/ios/common:Info.plist",
|
||||
],
|
||||
infoplists = [
|
||||
"Info.plist",
|
||||
"//mediapipe/examples/ios/common:Info.plist",
|
||||
],
|
||||
linkopts = [
|
||||
"-lc++",
|
||||
],
|
||||
|
@ -73,6 +79,7 @@ ios_application(
|
|||
deps = [
|
||||
":lindera_app_lib",
|
||||
":app_resources",
|
||||
|
||||
# "@ios_opencv//:OpencvFramework",
|
||||
],
|
||||
)
|
||||
|
|
|
@ -4,35 +4,45 @@
|
|||
//
|
||||
// Created by Mautisim Munir on 17/10/2022.
|
||||
//
|
||||
|
||||
import UIKit
|
||||
import MPPoseTracking
|
||||
import LinderaDetection
|
||||
|
||||
|
||||
class ViewController: UIViewController {
|
||||
let poseTracking:PoseTracking = PoseTracking(poseTrackingOptions: PoseTrackingOptions(showLandmarks: true));
|
||||
let cameraSource = MPPCameraInputSource();
|
||||
|
||||
@IBOutlet var liveView:UIView?;
|
||||
|
||||
let lindera = Lindera()
|
||||
|
||||
/// A simple LinderaDelegate implementation that prints nose coordinates if detected
|
||||
class LinderaDelegateImpl:LinderaDelegate{
|
||||
func lindera(_ lindera: Lindera, didDetect event: Asensei3DPose.Event) {
|
||||
if let kpt = event.pose.nose{
|
||||
print("LinderaDelegateImpl: Nose Keypoint (\(String(describing: kpt.position.x)),\(String(describing: kpt.position.y)),\(kpt.position.z)) with confidence \(kpt.confidence)")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
let linderaDelegate = LinderaDelegateImpl()
|
||||
|
||||
|
||||
override func viewDidLoad() {
|
||||
super.viewDidLoad()
|
||||
|
||||
// Do any additional setup after loading the view.
|
||||
|
||||
self.poseTracking.renderer.layer.frame = self.liveView!.layer.bounds
|
||||
self.liveView?.layer.addSublayer(self.poseTracking.renderer.layer)
|
||||
self.cameraSource.sessionPreset = AVCaptureSession.Preset.high.rawValue;
|
||||
self.cameraSource.cameraPosition = AVCaptureDevice.Position.front;
|
||||
self.cameraSource.orientation = AVCaptureVideoOrientation.portrait;
|
||||
if (self.cameraSource.orientation == AVCaptureVideoOrientation.portrait){
|
||||
self.cameraSource.videoMirrored = true;
|
||||
}
|
||||
self.cameraSource.requestCameraAccess(
|
||||
completionHandler: {(granted:Bool)->Void
|
||||
in
|
||||
if (granted){
|
||||
self.poseTracking.start(withCamera: self.cameraSource)
|
||||
}
|
||||
|
||||
})
|
||||
|
||||
|
||||
self.lindera.delegate = linderaDelegate
|
||||
|
||||
// add lindera camera view to our app's UIView i.e. liveView
|
||||
self.liveView?.addSubview(lindera.cameraView)
|
||||
// Expand our cameraView frame to liveView frame
|
||||
lindera.cameraView.frame = self.liveView!.bounds;
|
||||
|
||||
lindera.startCamera()
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
|
|
@ -22,7 +22,7 @@
|
|||
// Render frames in a layer.
|
||||
@property(nonatomic) MPPLayerRenderer* renderer;
|
||||
|
||||
|
||||
@property (nonatomic) CMTime timeStamp;
|
||||
// Graph name.
|
||||
@property(nonatomic) NSString* graphName;
|
||||
|
||||
|
@ -43,6 +43,7 @@
|
|||
@property(nonatomic) void(^poseTrackingResultsListener)(PoseTrackingResults*);
|
||||
|
||||
- (instancetype) initWithPoseTrackingOptions: (PoseTrackingOptions*) poseTrackingOptions;
|
||||
- (void)startGraph;
|
||||
- (void) startWithCamera: (MPPCameraInputSource*) cameraSource;
|
||||
@end
|
||||
|
||||
|
|
|
@ -196,6 +196,8 @@ static const char* kLandmarksOutputStream = "pose_landmarks";
|
|||
- (void)processVideoFrame:(CVPixelBufferRef)imageBuffer
|
||||
timestamp:(CMTime)timestamp
|
||||
fromSource:(MPPInputSource*)source {
|
||||
|
||||
self.timeStamp = timestamp;
|
||||
|
||||
[self->mediapipeGraph sendPixelBuffer:imageBuffer
|
||||
intoStream:self.graphInputStream
|
||||
|
|
|
@ -2,6 +2,44 @@
|
|||
#define MEDIAPIPE_POSETRACKINGRESULTS_H
|
||||
|
||||
#import <Foundation/Foundation.h>
|
||||
|
||||
static const NSInteger POSE_NOSE = 0;
|
||||
static const NSInteger POSE_LEFT_EYE_INNER = 1;
|
||||
static const NSInteger POSE_LEFT_EYE = 2;
|
||||
static const NSInteger POSE_LEFT_EYE_OUTER = 3;
|
||||
static const NSInteger POSE_RIGHT_EYE_INNER = 4;
|
||||
static const NSInteger POSE_RIGHT_EYE = 5;
|
||||
static const NSInteger POSE_RIGHT_EYE_OUTER = 6;
|
||||
static const NSInteger POSE_LEFT_EAR = 7;
|
||||
static const NSInteger POSE_RIGHT_EAR = 8;
|
||||
static const NSInteger POSE_MOUTH_LEFT = 9;
|
||||
static const NSInteger POSE_MOUTH_RIGHT = 10;
|
||||
static const NSInteger POSE_LEFT_SHOULDER = 11;
|
||||
static const NSInteger POSE_RIGHT_SHOULDER = 12;
|
||||
static const NSInteger POSE_LEFT_ELBOW = 13;
|
||||
static const NSInteger POSE_RIGHT_ELBOW = 14;
|
||||
static const NSInteger POSE_LEFT_WRIST = 15;
|
||||
static const NSInteger POSE_RIGHT_WRIST = 16;
|
||||
static const NSInteger POSE_LEFT_PINKY = 17;
|
||||
static const NSInteger POSE_RIGHT_PINKY = 18;
|
||||
static const NSInteger POSE_LEFT_INDEX = 19;
|
||||
static const NSInteger POSE_RIGHT_INDEX = 20;
|
||||
static const NSInteger POSE_LEFT_THUMB = 21;
|
||||
static const NSInteger POSE_RIGHT_THUMB = 22;
|
||||
static const NSInteger POSE_LEFT_HIP = 23;
|
||||
static const NSInteger POSE_RIGHT_HIP = 24;
|
||||
static const NSInteger POSE_LEFT_KNEE = 25;
|
||||
static const NSInteger POSE_RIGHT_KNEE = 26;
|
||||
static const NSInteger POSE_LEFT_ANKLE = 27;
|
||||
static const NSInteger POSE_RIGHT_ANKLE = 28;
|
||||
static const NSInteger POSE_LEFT_HEEL = 29;
|
||||
static const NSInteger POSE_RIGHT_HEEL = 30;
|
||||
static const NSInteger POSE_LEFT_FOOT = 31;
|
||||
static const NSInteger POSE_RIGHT_FOOT = 32;
|
||||
|
||||
|
||||
|
||||
|
||||
@interface PoseLandmark: NSObject
|
||||
|
||||
@property float x;
|
||||
|
|
|
@ -7,6 +7,8 @@
|
|||
self.x = x;
|
||||
self.y = y;
|
||||
self.z = z;
|
||||
self.presence = presence;
|
||||
self.visibility = visibility;
|
||||
return self;
|
||||
}
|
||||
|
||||
|
|
224
mediapipe/swift/solutions/lindera/Asensei3D.swift
Normal file
224
mediapipe/swift/solutions/lindera/Asensei3D.swift
Normal file
|
@ -0,0 +1,224 @@
|
|||
import Foundation
|
||||
|
||||
public struct Asensei3DPose {
|
||||
|
||||
|
||||
public let nose: BodyJointDetails?
|
||||
|
||||
public let leftEyeInner: BodyJointDetails?
|
||||
public let leftEye: BodyJointDetails?
|
||||
public let leftEyeOuter: BodyJointDetails?
|
||||
|
||||
public let rightEyeInner: BodyJointDetails?
|
||||
public let rightEye: BodyJointDetails?
|
||||
public let rightEyeOuter: BodyJointDetails?
|
||||
|
||||
public let leftEar: BodyJointDetails?
|
||||
public let rightEar: BodyJointDetails?
|
||||
|
||||
public let mouthLeft: BodyJointDetails?
|
||||
public let mouthRight: BodyJointDetails?
|
||||
|
||||
public let leftShoulder: BodyJointDetails?
|
||||
public let rightShoulder: BodyJointDetails?
|
||||
|
||||
public let leftElbow: BodyJointDetails?
|
||||
public let rightElbow: BodyJointDetails?
|
||||
|
||||
public let leftWrist: BodyJointDetails?
|
||||
public let rightWrist: BodyJointDetails?
|
||||
|
||||
public let leftPinky: BodyJointDetails?
|
||||
public let rightPinky: BodyJointDetails?
|
||||
|
||||
public let leftIndex: BodyJointDetails?
|
||||
public let rightIndex: BodyJointDetails?
|
||||
|
||||
public let leftThumb: BodyJointDetails?
|
||||
public let rightThumb: BodyJointDetails?
|
||||
|
||||
public let leftHip: BodyJointDetails?
|
||||
public let rightHip: BodyJointDetails?
|
||||
|
||||
public let leftKnee: BodyJointDetails?
|
||||
public let rightKnee: BodyJointDetails?
|
||||
|
||||
public let rightAnkle: BodyJointDetails?
|
||||
public let leftAnkle: BodyJointDetails?
|
||||
|
||||
|
||||
public let rightHeel: BodyJointDetails?
|
||||
public let leftHeel: BodyJointDetails?
|
||||
|
||||
public let rightFoot: BodyJointDetails?
|
||||
public let leftFoot: BodyJointDetails?
|
||||
}
|
||||
|
||||
extension Asensei3DPose: Encodable {
|
||||
|
||||
private enum CodingKeys: String, CodingKey {
|
||||
case nose
|
||||
|
||||
case leftEyeInner
|
||||
case leftEye
|
||||
case leftEyeOuter
|
||||
|
||||
case rightEyeInner
|
||||
case rightEye
|
||||
case rightEyeOuter
|
||||
|
||||
case leftEar
|
||||
case rightEar
|
||||
|
||||
case mouthLeft
|
||||
case mouthRight
|
||||
|
||||
case leftShoulder
|
||||
case rightShoulder
|
||||
|
||||
case leftElbow
|
||||
case rightElbow
|
||||
|
||||
case leftWrist
|
||||
case rightWrist
|
||||
|
||||
case leftPinky
|
||||
case rightPinky
|
||||
|
||||
case leftIndex
|
||||
case rightIndex
|
||||
|
||||
case leftThumb
|
||||
case rightThumb
|
||||
|
||||
case leftHip
|
||||
case rightHip
|
||||
|
||||
case leftKnee
|
||||
case rightKnee
|
||||
|
||||
case rightAnkle
|
||||
case leftAnkle
|
||||
|
||||
|
||||
case rightHeel
|
||||
case leftHeel
|
||||
|
||||
case rightFoot
|
||||
case leftFoot
|
||||
}
|
||||
|
||||
public func encode(to encoder: Encoder) throws {
|
||||
var container = encoder.container(keyedBy: CodingKeys.self)
|
||||
|
||||
try container.encodeIfPresent(self.nose, forKey: .nose)
|
||||
|
||||
try container.encodeIfPresent(self.leftEyeInner, forKey: .leftEyeInner)
|
||||
try container.encodeIfPresent(self.leftEye, forKey:.leftEye )
|
||||
try container.encodeIfPresent(self.leftEyeOuter, forKey: .leftEyeOuter)
|
||||
|
||||
try container.encodeIfPresent(self.rightEyeInner, forKey: .rightEyeInner)
|
||||
try container.encodeIfPresent(self.rightEye, forKey: .rightEye)
|
||||
try container.encodeIfPresent(self.rightEyeOuter, forKey: .rightEyeOuter )
|
||||
|
||||
try container.encodeIfPresent(self.leftEar, forKey: .leftEar)
|
||||
try container.encodeIfPresent(self.rightEar, forKey: .rightEar)
|
||||
|
||||
try container.encodeIfPresent(self.mouthLeft, forKey: .mouthLeft)
|
||||
try container.encodeIfPresent(self.mouthRight, forKey: .mouthRight )
|
||||
|
||||
try container.encodeIfPresent(self.leftShoulder, forKey: .leftShoulder)
|
||||
try container.encodeIfPresent(self.rightShoulder, forKey: .rightShoulder)
|
||||
|
||||
try container.encodeIfPresent(self.leftElbow, forKey: .leftElbow)
|
||||
try container.encodeIfPresent(self.rightElbow, forKey:.rightElbow )
|
||||
|
||||
try container.encodeIfPresent(self.leftWrist, forKey: .leftWrist)
|
||||
try container.encodeIfPresent(self.rightWrist, forKey: .rightWrist )
|
||||
|
||||
try container.encodeIfPresent(self.leftPinky, forKey: .leftPinky)
|
||||
try container.encodeIfPresent(self.rightPinky, forKey: .rightPinky)
|
||||
|
||||
try container.encodeIfPresent(self.leftIndex, forKey: .leftIndex )
|
||||
try container.encodeIfPresent(self.rightIndex, forKey:.rightIndex )
|
||||
|
||||
try container.encodeIfPresent(self.leftThumb, forKey: .leftThumb)
|
||||
try container.encodeIfPresent(self.rightThumb, forKey: .rightThumb )
|
||||
|
||||
try container.encodeIfPresent(self.leftHip, forKey: .leftHip)
|
||||
try container.encodeIfPresent(self.rightHip, forKey: .rightHip )
|
||||
|
||||
try container.encodeIfPresent(self.leftKnee, forKey: .leftKnee )
|
||||
try container.encodeIfPresent(self.rightKnee, forKey: .rightKnee )
|
||||
|
||||
try container.encodeIfPresent(self.rightAnkle, forKey: .rightAnkle)
|
||||
try container.encodeIfPresent(self.leftAnkle, forKey: .leftAnkle )
|
||||
|
||||
|
||||
try container.encodeIfPresent(self.rightHeel, forKey: .rightHeel)
|
||||
try container.encodeIfPresent(self.leftHeel, forKey: .leftHeel)
|
||||
|
||||
try container.encodeIfPresent(self.rightFoot, forKey: .rightFoot )
|
||||
try container.encodeIfPresent(self.leftFoot, forKey: .leftFoot)
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
extension Asensei3DPose {
|
||||
|
||||
public struct BodyJointDetails: Encodable {
|
||||
|
||||
public let position: Vector3D
|
||||
public let confidence: Float
|
||||
|
||||
private enum CodingKeys: String, CodingKey {
|
||||
case x
|
||||
case y
|
||||
case z
|
||||
case c
|
||||
}
|
||||
|
||||
public func encode(to encoder: Encoder) throws {
|
||||
var container = encoder.container(keyedBy: CodingKeys.self)
|
||||
try container.encode(self.position.x, forKey: .x)
|
||||
try container.encode(self.position.y, forKey: .y)
|
||||
try container.encode(self.position.z, forKey: .z)
|
||||
try container.encode(self.confidence, forKey: .c)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
extension Asensei3DPose {
|
||||
|
||||
public struct Vector3D {
|
||||
public let x: Float
|
||||
public let y: Float
|
||||
public let z: Float
|
||||
|
||||
public init(x: Float, y: Float, z: Float) {
|
||||
self.x = x
|
||||
self.y = y
|
||||
self.z = z
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
extension Asensei3DPose {
|
||||
|
||||
public struct Event: Encodable {
|
||||
public let pose: Asensei3DPose
|
||||
let timestamp: TimeInterval
|
||||
|
||||
private enum CodingKeys: String, CodingKey {
|
||||
case bodyJoints
|
||||
case timestamp
|
||||
}
|
||||
|
||||
public func encode(to encoder: Encoder) throws {
|
||||
var container = encoder.container(keyedBy: CodingKeys.self)
|
||||
try container.encode(self.pose, forKey: .bodyJoints)
|
||||
try container.encode(self.timestamp * 1000, forKey: .timestamp)
|
||||
}
|
||||
}
|
||||
}
|
|
@ -1,9 +1,26 @@
|
|||
load("@build_bazel_rules_swift//swift:swift.bzl", "swift_library")
|
||||
|
||||
swift_library(
|
||||
name = "Lindera",
|
||||
srcs = glob(["*.swift"]),
|
||||
name = "lindera",
|
||||
srcs = ["Lindera.swift","Asensei3D.swift"],
|
||||
linkopts = [
|
||||
"-lc++",
|
||||
"-std=c++17",
|
||||
"-lstdc++",
|
||||
],
|
||||
module_name = "LinderaDetection",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//mediapipe/objc/solutions/posetracking_gpu:posetracking_gpu_solution",
|
||||
]
|
||||
"//mediapipe/objc/solutions/posetracking_gpu:posetracking_gpu_solution",
|
||||
"//mediapipe/objc:mediapipe_framework_ios",
|
||||
"//mediapipe/objc:mediapipe_input_sources_ios",
|
||||
"//mediapipe/objc:mediapipe_layer_renderer",
|
||||
] + select({
|
||||
"//mediapipe:ios_i386": [],
|
||||
"//mediapipe:ios_x86_64": [],
|
||||
"//conditions:default": [
|
||||
"//mediapipe/graphs/pose_tracking:pose_tracking_gpu_deps",
|
||||
"//mediapipe/framework/formats:landmark_cc_proto",
|
||||
],
|
||||
}),
|
||||
)
|
||||
|
|
|
@ -1,96 +1,208 @@
|
|||
final class Lindera {
|
||||
// This is the copperlabs posetracking api built in objective c
|
||||
import MPPoseTracking
|
||||
import UIKit
|
||||
|
||||
var cameraView: UIView {
|
||||
self.linderaExerciseSession
|
||||
|
||||
/// A helper class to run the Pose Tracking API
|
||||
/// TFLite models are also loaded when you initialize this class
|
||||
public final class Lindera{
|
||||
// initalize the PoseTracking api and load models
|
||||
let poseTracking:PoseTracking = PoseTracking(poseTrackingOptions: PoseTrackingOptions(showLandmarks: true))
|
||||
// attach Mediapipe camera helper to our class
|
||||
let cameraSource = MPPCameraInputSource()
|
||||
|
||||
// A delegate to handle results
|
||||
public weak var delegate: LinderaDelegate?
|
||||
|
||||
|
||||
public var cameraView: UIView {
|
||||
return self.linderaExerciseSession
|
||||
}
|
||||
|
||||
weak var delegate: LinderaDelegate?
|
||||
|
||||
private lazy var linderaExerciseSession: LinderaExerciseSessionView = {
|
||||
let session = LinderaExerciseSessionView()
|
||||
session.detectionSpeed = .high
|
||||
session.processCameraFrames = true
|
||||
session.enable3DPoseDetection = true
|
||||
session.pose3DDetectionHandler = { [weak self] event in
|
||||
|
||||
guard let self = self, let pose = event.pose.map({ Asensei3DPose($0) }) else {
|
||||
|
||||
// Initializes pipeline parameters and starts mediapipe graph
|
||||
private lazy var linderaExerciseSession: UIView = {
|
||||
|
||||
// this will be the main camera view
|
||||
let liveView = UIView()
|
||||
|
||||
// set camera preferences
|
||||
self.cameraSource.sessionPreset = AVCaptureSession.Preset.high.rawValue
|
||||
self.cameraSource.cameraPosition = AVCaptureDevice.Position.front
|
||||
self.cameraSource.orientation = AVCaptureVideoOrientation.portrait
|
||||
if (self.cameraSource.orientation == AVCaptureVideoOrientation.portrait){
|
||||
self.cameraSource.videoMirrored = true
|
||||
}
|
||||
// call LinderaDelegate on pose tracking results
|
||||
self.poseTracking.poseTrackingResultsListener = {[weak self] results in
|
||||
guard let self = self, let results = results else {
|
||||
return
|
||||
}
|
||||
|
||||
self.delegate?.lindera(self, didDetect: .init(pose: pose, timestamp: event.sourceTimestamp))
|
||||
self.delegate?.lindera(self, didDetect: .init(pose: Asensei3DPose.init(results), timestamp: CMTimeGetSeconds(self.poseTracking.timeStamp)))
|
||||
}
|
||||
|
||||
self.poseTracking.startGraph()
|
||||
// attach camera's output with poseTracking object and its videoQueue
|
||||
self.cameraSource.setDelegate(self.poseTracking, queue: self.poseTracking.videoQueue)
|
||||
|
||||
return session
|
||||
return liveView
|
||||
|
||||
}()
|
||||
|
||||
public required init(){}
|
||||
|
||||
|
||||
public func startCamera(_ completion: ((Result<Void, Error>) -> Void)? = nil) {
|
||||
if (!self.cameraSource.isRunning){
|
||||
// set our rendering layer frame according to cameraView boundry
|
||||
self.poseTracking.renderer.layer.frame = cameraView.layer.bounds
|
||||
// attach render CALayer on cameraView to render output to
|
||||
self.cameraView.layer.addSublayer(self.poseTracking.renderer.layer)
|
||||
|
||||
self.cameraSource.requestCameraAccess(
|
||||
completionHandler: {(granted:Bool)->Void in
|
||||
if (granted){
|
||||
self.poseTracking.videoQueue.async(execute:{ [weak self] in
|
||||
|
||||
self?.cameraSource.start()
|
||||
|
||||
required init () { }
|
||||
|
||||
func startCamera(_ completion: ((Result<Void, Error>) -> Void)? = nil) {
|
||||
DispatchQueue.main.async { [weak self] in
|
||||
self?.linderaExerciseSession.startCamera { result in
|
||||
switch result {
|
||||
case .success:
|
||||
} )
|
||||
completion?(.success(Void()))
|
||||
case .failure(let error):
|
||||
completion?(.failure(error))
|
||||
}else{
|
||||
|
||||
completion?(.failure(preconditionFailure("Camera Access Not Granted")))
|
||||
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
func stopCamera(){
|
||||
if (self.cameraSource.isRunning){
|
||||
self.poseTracking.videoQueue.async { [weak self] in
|
||||
self?.cameraSource.stop()
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func stopCamera() {
|
||||
DispatchQueue.main.async { [weak self] in
|
||||
self?.linderaExerciseSession.stopCamera()
|
||||
}
|
||||
}
|
||||
|
||||
/// switches camera from front to back and vice versa
|
||||
func switchCamera(_ completion: ((Result<Void, Error>) -> Void)? = nil) {
|
||||
DispatchQueue.main.async { [weak self] in
|
||||
self?.linderaExerciseSession.switchCamera(completionHandler: completion)
|
||||
}
|
||||
}
|
||||
self.poseTracking.videoQueue.async { [weak self] in
|
||||
if let self = self {
|
||||
|
||||
self.stopCamera()
|
||||
self.startCamera(completion)
|
||||
|
||||
switch(self.cameraSource.cameraPosition){
|
||||
|
||||
func selectCamera(_ position: AVCaptureDevice.Position, _ completion: ((Result<Void, Error>) -> Void)? = nil) {
|
||||
DispatchQueue.main.async { [weak self] in
|
||||
self?.linderaExerciseSession.setUseFrontCamera(position == .front, completionHandler: completion)
|
||||
case .unspecified:
|
||||
completion?(.failure(preconditionFailure("Unkown Camera Position")))
|
||||
case .back:
|
||||
self.selectCamera(AVCaptureDevice.Position.front,completion)
|
||||
case .front:
|
||||
self.selectCamera(AVCaptureDevice.Position.back,completion)
|
||||
@unknown default:
|
||||
completion?(.failure(preconditionFailure("Unkown Camera Position")))
|
||||
|
||||
}
|
||||
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
/// Choose front or back camera. Must restart camera after use if already started
|
||||
public func selectCamera(_ position: AVCaptureDevice.Position, _ completion: ((Result<Void, Error>) -> Void)? = nil) {
|
||||
self.poseTracking.videoQueue.async { [weak self] in
|
||||
self?.cameraSource.cameraPosition = position
|
||||
completion?(.success(Void()))
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
protocol LinderaDelegate: AnyObject {
|
||||
public protocol LinderaDelegate: AnyObject {
|
||||
|
||||
func lindera(_ lindera: Lindera, didDetect event: Asensei3DPose.Event)
|
||||
}
|
||||
|
||||
|
||||
/// Convert PoseLandmarks from PoseTrackingAPI to BodyJointDetails
|
||||
func landmarkToBodyJointDetails(landmark: PoseLandmark) -> Asensei3DPose.BodyJointDetails{
|
||||
return Asensei3DPose.BodyJointDetails(position: .init(x: landmark.x, y: landmark.y, z: landmark.z), confidence: landmark.visibility)
|
||||
}
|
||||
// MARK: - Helpers
|
||||
extension Asensei3DPose {
|
||||
|
||||
init(_ pose: Lindera3DPose) {
|
||||
self.pelvis = pose.landmarks[.pelvis].map { .init(position: .init($0.position), confidence: $0.confidence) }
|
||||
self.rightHip = pose.landmarks[.rightHip].map { .init(position: .init($0.position), confidence: $0.confidence) }
|
||||
self.rightKnee = pose.landmarks[.rightKnee].map { .init(position: .init($0.position), confidence: $0.confidence) }
|
||||
self.rightAnkle = pose.landmarks[.rightAnkle].map { .init(position: .init($0.position), confidence: $0.confidence) }
|
||||
self.leftHip = pose.landmarks[.leftHip].map { .init(position: .init($0.position), confidence: $0.confidence) }
|
||||
self.leftKnee = pose.landmarks[.leftKnee].map { .init(position: .init($0.position), confidence: $0.confidence) }
|
||||
self.leftAnkle = pose.landmarks[.leftAnkle].map { .init(position: .init($0.position), confidence: $0.confidence) }
|
||||
self.spine = pose.landmarks[.spine].map { .init(position: .init($0.position), confidence: $0.confidence) }
|
||||
self.thorax = pose.landmarks[.thorax].map { .init(position: .init($0.position), confidence: $0.confidence) }
|
||||
self.neckNose = pose.landmarks[.neckToNose].map { .init(position: .init($0.position), confidence: $0.confidence) }
|
||||
self.headTop = pose.landmarks[.headTop].map { .init(position: .init($0.position), confidence: $0.confidence) }
|
||||
self.leftShoulder = pose.landmarks[.leftShoulder].map { .init(position: .init($0.position), confidence: $0.confidence) }
|
||||
self.leftElbow = pose.landmarks[.leftElbow].map { .init(position: .init($0.position), confidence: $0.confidence) }
|
||||
self.leftWrist = pose.landmarks[.leftWrist].map { .init(position: .init($0.position), confidence: $0.confidence) }
|
||||
self.rightShoulder = pose.landmarks[.rightShoulder].map { .init(position: .init($0.position), confidence: $0.confidence) }
|
||||
self.rightElbow = pose.landmarks[.rightElbow].map { .init(position: .init($0.position), confidence: $0.confidence) }
|
||||
self.rightWrist = pose.landmarks[.rightWrist].map { .init(position: .init($0.position), confidence: $0.confidence) }
|
||||
init(_ pose: PoseTrackingResults) {
|
||||
|
||||
self.nose = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_NOSE])
|
||||
|
||||
self.leftEyeInner = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_LEFT_EYE_INNER])
|
||||
self.leftEye = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_LEFT_EYE])
|
||||
self.leftEyeOuter = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_LEFT_EYE_OUTER])
|
||||
|
||||
self.rightEyeInner = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_RIGHT_EYE_OUTER])
|
||||
self.rightEye = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_RIGHT_EYE])
|
||||
self.rightEyeOuter = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_RIGHT_EYE_OUTER])
|
||||
|
||||
self.leftEar = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_LEFT_EAR])
|
||||
self.rightEar = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_RIGHT_EAR])
|
||||
|
||||
self.mouthLeft = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_MOUTH_LEFT])
|
||||
self.mouthRight = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_MOUTH_RIGHT])
|
||||
|
||||
self.leftShoulder = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_LEFT_SHOULDER])
|
||||
self.rightShoulder = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_RIGHT_SHOULDER])
|
||||
|
||||
self.leftElbow = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_LEFT_ELBOW])
|
||||
self.rightElbow = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_RIGHT_ELBOW])
|
||||
|
||||
self.leftWrist = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_LEFT_WRIST])
|
||||
self.rightWrist = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_RIGHT_WRIST])
|
||||
|
||||
self.leftPinky = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_LEFT_PINKY])
|
||||
self.rightPinky = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_RIGHT_PINKY])
|
||||
|
||||
self.leftIndex = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_LEFT_INDEX])
|
||||
self.rightIndex = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_RIGHT_INDEX])
|
||||
|
||||
self.leftThumb = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_LEFT_THUMB])
|
||||
self.rightThumb = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_RIGHT_THUMB])
|
||||
|
||||
self.leftHip = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_LEFT_HIP])
|
||||
self.rightHip = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_RIGHT_HIP])
|
||||
|
||||
self.leftKnee = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_LEFT_KNEE])
|
||||
self.rightKnee = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_RIGHT_KNEE])
|
||||
|
||||
self.rightAnkle = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_RIGHT_ANKLE])
|
||||
self.leftAnkle = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_LEFT_ANKLE])
|
||||
|
||||
|
||||
self.rightHeel = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_RIGHT_HEEL])
|
||||
self.leftHeel = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_LEFT_HEEL])
|
||||
|
||||
self.rightFoot = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_RIGHT_FOOT])
|
||||
self.leftFoot = landmarkToBodyJointDetails(landmark: pose.landmarks[POSE_LEFT_FOOT])
|
||||
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
extension Asensei3DPose.Vector3D {
|
||||
|
||||
init(_ vector: Lindera3DVector) {
|
||||
self.x = -vector.x
|
||||
self.y = vector.z
|
||||
self.z = vector.y
|
||||
}
|
||||
}
|
||||
//extension Asensei3DPose.Vector3D {
|
||||
//
|
||||
// init(_ vector: Lindera3DVector) {
|
||||
// self.x = -vector.x
|
||||
// self.y = vector.z
|
||||
// self.z = vector.y
|
||||
// }
|
||||
//}
|
||||
|
|
Loading…
Reference in New Issue
Block a user