diff --git a/mediapipe/tasks/python/test/vision/face_landmarker_test.py b/mediapipe/tasks/python/test/vision/face_landmarker_test.py index 5028f8545..590cca750 100644 --- a/mediapipe/tasks/python/test/vision/face_landmarker_test.py +++ b/mediapipe/tasks/python/test/vision/face_landmarker_test.py @@ -51,9 +51,9 @@ _PORTRAIT_IMAGE = 'portrait.jpg' _CAT_IMAGE = 'cat.jpg' _PORTRAIT_EXPECTED_FACE_LANDMARKS = 'portrait_expected_face_landmarks.pbtxt' _PORTRAIT_EXPECTED_BLENDSHAPES = 'portrait_expected_blendshapes.pbtxt' -_LANDMARKS_DIFF_MARGIN = 0.03 -_BLENDSHAPES_DIFF_MARGIN = 0.13 -_FACIAL_TRANSFORMATION_MATRIX_DIFF_MARGIN = 0.02 +_LANDMARKS_MARGIN = 0.03 +_BLENDSHAPES_MARGIN = 0.13 +_FACIAL_TRANSFORMATION_MATRIX_MARGIN = 0.02 def _get_expected_face_landmarks(file_path: str): @@ -126,10 +126,10 @@ class FaceLandmarkerTest(parameterized.TestCase): for i, _ in enumerate(actual_landmarks): for j, elem in enumerate(actual_landmarks[i]): self.assertAlmostEqual( - elem.x, expected_landmarks[i][j].x, delta=_LANDMARKS_DIFF_MARGIN + elem.x, expected_landmarks[i][j].x, delta=_LANDMARKS_MARGIN ) self.assertAlmostEqual( - elem.y, expected_landmarks[i][j].y, delta=_LANDMARKS_DIFF_MARGIN + elem.y, expected_landmarks[i][j].y, delta=_LANDMARKS_MARGIN ) def _expect_blendshapes_correct( @@ -144,7 +144,7 @@ class FaceLandmarkerTest(parameterized.TestCase): self.assertAlmostEqual( elem.score, expected_blendshapes[i][j].score, - delta=_BLENDSHAPES_DIFF_MARGIN, + delta=_BLENDSHAPES_MARGIN, ) def _expect_facial_transformation_matrixes_correct( @@ -158,7 +158,7 @@ class FaceLandmarkerTest(parameterized.TestCase): self.assertSequenceAlmostEqual( elem.flatten(), expected_matrix_list[i].flatten(), - delta=_FACIAL_TRANSFORMATION_MATRIX_DIFF_MARGIN, + delta=_FACIAL_TRANSFORMATION_MATRIX_MARGIN, ) def test_create_from_file_succeeds_with_valid_model_path(self): diff --git a/mediapipe/tasks/python/test/vision/hand_landmarker_test.py b/mediapipe/tasks/python/test/vision/hand_landmarker_test.py index a7aea1cb2..6b8e4ec32 100644 --- a/mediapipe/tasks/python/test/vision/hand_landmarker_test.py +++ b/mediapipe/tasks/python/test/vision/hand_landmarker_test.py @@ -54,7 +54,7 @@ _POINTING_UP_IMAGE = 'pointing_up.jpg' _POINTING_UP_LANDMARKS = 'pointing_up_landmarks.pbtxt' _POINTING_UP_ROTATED_IMAGE = 'pointing_up_rotated.jpg' _POINTING_UP_ROTATED_LANDMARKS = 'pointing_up_rotated_landmarks.pbtxt' -_LANDMARKS_ERROR_TOLERANCE = 0.03 +_LANDMARKS_MARGIN = 0.03 _HANDEDNESS_MARGIN = 0.05 @@ -89,39 +89,52 @@ class HandLandmarkerTest(parameterized.TestCase): self.model_path = test_utils.get_test_data_path( _HAND_LANDMARKER_BUNDLE_ASSET_FILE) - def _assert_actual_result_approximately_matches_expected_result( - self, actual_result: _HandLandmarkerResult, - expected_result: _HandLandmarkerResult): + def _expect_hand_landmarks_correct( + self, actual_landmarks, expected_landmarks, margin + ): # Expects to have the same number of hands detected. - self.assertLen(actual_result.hand_landmarks, - len(expected_result.hand_landmarks)) - self.assertLen(actual_result.hand_world_landmarks, - len(expected_result.hand_world_landmarks)) - self.assertLen(actual_result.handedness, len(expected_result.handedness)) - # Actual landmarks match expected landmarks. - self.assertLen(actual_result.hand_landmarks[0], - len(expected_result.hand_landmarks[0])) - actual_landmarks = actual_result.hand_landmarks[0] - expected_landmarks = expected_result.hand_landmarks[0] - for i, rename_me in enumerate(actual_landmarks): - self.assertAlmostEqual( - rename_me.x, - expected_landmarks[i].x, - delta=_LANDMARKS_ERROR_TOLERANCE) - self.assertAlmostEqual( - rename_me.y, - expected_landmarks[i].y, - delta=_LANDMARKS_ERROR_TOLERANCE) - # Actual handedness matches expected handedness. - actual_top_handedness = actual_result.handedness[0][0] - expected_top_handedness = expected_result.handedness[0][0] + self.assertLen(actual_landmarks, len(expected_landmarks)) + + for i, _ in enumerate(actual_landmarks): + for j, elem in enumerate(actual_landmarks[i]): + self.assertAlmostEqual( + elem.x, + expected_landmarks[i][j].x, + delta=margin + ) + self.assertAlmostEqual( + elem.y, + expected_landmarks[i][j].y, + delta=margin + ) + + def _expect_handedness_correct( + self, actual_handedness, expected_handedness, margin + ): + # Actual top handedness matches expected top handedness. + actual_top_handedness = actual_handedness[0][0] + expected_top_handedness = expected_handedness[0][0] self.assertEqual(actual_top_handedness.index, expected_top_handedness.index) self.assertEqual(actual_top_handedness.category_name, expected_top_handedness.category_name) self.assertAlmostEqual( - actual_top_handedness.score, - expected_top_handedness.score, - delta=_HANDEDNESS_MARGIN) + actual_top_handedness.score, + expected_top_handedness.score, + delta=margin) + + def _expect_hand_landmarker_results_correct( + self, + actual_result: _HandLandmarkerResult, + expected_result: _HandLandmarkerResult + ): + self._expect_hand_landmarks_correct( + actual_result.hand_landmarks, expected_result.hand_landmarks, + _LANDMARKS_MARGIN + ) + self._expect_handedness_correct( + actual_result.handedness, expected_result.handedness, + _HANDEDNESS_MARGIN + ) def test_create_from_file_succeeds_with_valid_model_path(self): # Creates with default option and valid model file successfully. @@ -175,7 +188,7 @@ class HandLandmarkerTest(parameterized.TestCase): # Performs hand landmarks detection on the input. detection_result = landmarker.detect(self.test_image) # Comparing results. - self._assert_actual_result_approximately_matches_expected_result( + self._expect_hand_landmarker_results_correct( detection_result, expected_detection_result) # Closes the hand landmarker explicitly when the hand landmarker is not used # in a context. @@ -203,7 +216,7 @@ class HandLandmarkerTest(parameterized.TestCase): # Performs hand landmarks detection on the input. detection_result = landmarker.detect(self.test_image) # Comparing results. - self._assert_actual_result_approximately_matches_expected_result( + self._expect_hand_landmarker_results_correct( detection_result, expected_detection_result) def test_detect_succeeds_with_num_hands(self): @@ -234,7 +247,7 @@ class HandLandmarkerTest(parameterized.TestCase): expected_detection_result = _get_expected_hand_landmarker_result( _POINTING_UP_ROTATED_LANDMARKS) # Comparing results. - self._assert_actual_result_approximately_matches_expected_result( + self._expect_hand_landmarker_results_correct( detection_result, expected_detection_result) def test_detect_fails_with_region_of_interest(self): @@ -351,7 +364,7 @@ class HandLandmarkerTest(parameterized.TestCase): result = landmarker.detect_for_video(test_image, timestamp, image_processing_options) if result.hand_landmarks and result.hand_world_landmarks and result.handedness: - self._assert_actual_result_approximately_matches_expected_result( + self._expect_hand_landmarker_results_correct( result, expected_result) else: self.assertEqual(result, expected_result) @@ -406,7 +419,7 @@ class HandLandmarkerTest(parameterized.TestCase): def check_result(result: _HandLandmarkerResult, output_image: _Image, timestamp_ms: int): if result.hand_landmarks and result.hand_world_landmarks and result.handedness: - self._assert_actual_result_approximately_matches_expected_result( + self._expect_hand_landmarker_results_correct( result, expected_result) else: self.assertEqual(result, expected_result) diff --git a/mediapipe/tasks/python/test/vision/pose_landmarker_test.py b/mediapipe/tasks/python/test/vision/pose_landmarker_test.py index 974389a0b..07d7f06a4 100644 --- a/mediapipe/tasks/python/test/vision/pose_landmarker_test.py +++ b/mediapipe/tasks/python/test/vision/pose_landmarker_test.py @@ -50,8 +50,7 @@ _POSE_LANDMARKER_BUNDLE_ASSET_FILE = 'pose_landmarker.task' _BURGER_IMAGE = 'burger.jpg' _POSE_IMAGE = 'pose.jpg' _POSE_LANDMARKS = 'pose_landmarks.pbtxt' -_LANDMARKS_DIFF_MARGIN = 0.03 -_LANDMARKS_ON_VIDEO_DIFF_MARGIN = 0.03 +_LANDMARKS_MARGIN = 0.03 def _get_expected_pose_landmarker_result( @@ -87,10 +86,7 @@ class PoseLandmarkerTest(parameterized.TestCase): _POSE_LANDMARKER_BUNDLE_ASSET_FILE) def _expect_pose_landmarks_correct( - self, - actual_landmarks: List[List[landmark_module.NormalizedLandmark]], - expected_landmarks: List[List[landmark_module.NormalizedLandmark]], - diff_margin: float + self, actual_landmarks, expected_landmarks, margin ): # Expects to have the same number of poses detected. self.assertLen(actual_landmarks, len(expected_landmarks)) @@ -98,21 +94,21 @@ class PoseLandmarkerTest(parameterized.TestCase): for i, _ in enumerate(actual_landmarks): for j, elem in enumerate(actual_landmarks[i]): self.assertAlmostEqual( - elem.x, expected_landmarks[i][j].x, delta=diff_margin + elem.x, expected_landmarks[i][j].x, delta=margin ) self.assertAlmostEqual( - elem.y, expected_landmarks[i][j].y, delta=diff_margin + elem.y, expected_landmarks[i][j].y, delta=margin ) def _expect_pose_landmarker_results_correct( self, actual_result: PoseLandmarkerResult, expected_result: PoseLandmarkerResult, - diff_margin: float + margin: float ): self._expect_pose_landmarks_correct( actual_result.pose_landmarks, expected_result.pose_landmarks, - diff_margin + margin ) def test_create_from_file_succeeds_with_valid_model_path(self): @@ -170,7 +166,7 @@ class PoseLandmarkerTest(parameterized.TestCase): # Comparing results. self._expect_pose_landmarker_results_correct( - detection_result, expected_detection_result, _LANDMARKS_DIFF_MARGIN + detection_result, expected_detection_result, _LANDMARKS_MARGIN ) # Closes the pose landmarker explicitly when the pose landmarker is not used # in a context. @@ -201,7 +197,7 @@ class PoseLandmarkerTest(parameterized.TestCase): # Comparing results. self._expect_pose_landmarker_results_correct( - detection_result, expected_detection_result, _LANDMARKS_DIFF_MARGIN + detection_result, expected_detection_result, _LANDMARKS_MARGIN ) def test_detect_fails_with_region_of_interest(self): @@ -319,7 +315,7 @@ class PoseLandmarkerTest(parameterized.TestCase): image_processing_options) if result.pose_landmarks: self._expect_pose_landmarker_results_correct( - result, expected_result, _LANDMARKS_ON_VIDEO_DIFF_MARGIN + result, expected_result, _LANDMARKS_MARGIN ) else: self.assertEqual(result, expected_result) @@ -373,7 +369,7 @@ class PoseLandmarkerTest(parameterized.TestCase): timestamp_ms: int): if result.pose_landmarks: self._expect_pose_landmarker_results_correct( - result, expected_result, _LANDMARKS_DIFF_MARGIN + result, expected_result, _LANDMARKS_MARGIN ) else: self.assertEqual(result, expected_result) diff --git a/mediapipe/tasks/python/vision/face_detector.py b/mediapipe/tasks/python/vision/face_detector.py index cf09a378d..5a9487eec 100644 --- a/mediapipe/tasks/python/vision/face_detector.py +++ b/mediapipe/tasks/python/vision/face_detector.py @@ -71,8 +71,8 @@ class FaceDetectorOptions: base_options: _BaseOptions running_mode: _RunningMode = _RunningMode.IMAGE - min_detection_confidence: Optional[float] = None - min_suppression_threshold: Optional[float] = None + min_detection_confidence: float = 0.5 + min_suppression_threshold: float = 0.3 result_callback: Optional[ Callable[ [detections_module.DetectionResult, image_module.Image, int], None diff --git a/mediapipe/tasks/python/vision/face_landmarker.py b/mediapipe/tasks/python/vision/face_landmarker.py index c5b24499f..3776637fa 100644 --- a/mediapipe/tasks/python/vision/face_landmarker.py +++ b/mediapipe/tasks/python/vision/face_landmarker.py @@ -2966,12 +2966,12 @@ class FaceLandmarkerOptions: base_options: _BaseOptions running_mode: _RunningMode = _RunningMode.IMAGE - num_faces: Optional[int] = 1 - min_face_detection_confidence: Optional[float] = 0.5 - min_face_presence_confidence: Optional[float] = 0.5 - min_tracking_confidence: Optional[float] = 0.5 - output_face_blendshapes: Optional[bool] = False - output_facial_transformation_matrixes: Optional[bool] = False + num_faces: int = 1 + min_face_detection_confidence: float = 0.5 + min_face_presence_confidence: float = 0.5 + min_tracking_confidence: float = 0.5 + output_face_blendshapes: bool = False + output_facial_transformation_matrixes: bool = False result_callback: Optional[ Callable[[FaceLandmarkerResult, image_module.Image, int], None] ] = None diff --git a/mediapipe/tasks/python/vision/gesture_recognizer.py b/mediapipe/tasks/python/vision/gesture_recognizer.py index 7d480c95f..9ae5285df 100644 --- a/mediapipe/tasks/python/vision/gesture_recognizer.py +++ b/mediapipe/tasks/python/vision/gesture_recognizer.py @@ -194,14 +194,14 @@ class GestureRecognizerOptions: base_options: _BaseOptions running_mode: _RunningMode = _RunningMode.IMAGE - num_hands: Optional[int] = 1 - min_hand_detection_confidence: Optional[float] = 0.5 - min_hand_presence_confidence: Optional[float] = 0.5 - min_tracking_confidence: Optional[float] = 0.5 - canned_gesture_classifier_options: Optional[_ClassifierOptions] = ( + num_hands: int = 1 + min_hand_detection_confidence: float = 0.5 + min_hand_presence_confidence: float = 0.5 + min_tracking_confidence: float = 0.5 + canned_gesture_classifier_options: _ClassifierOptions = ( dataclasses.field(default_factory=_ClassifierOptions) ) - custom_gesture_classifier_options: Optional[_ClassifierOptions] = ( + custom_gesture_classifier_options: _ClassifierOptions = ( dataclasses.field(default_factory=_ClassifierOptions) ) result_callback: Optional[ diff --git a/mediapipe/tasks/python/vision/hand_landmarker.py b/mediapipe/tasks/python/vision/hand_landmarker.py index e6fcca2e2..616f0b724 100644 --- a/mediapipe/tasks/python/vision/hand_landmarker.py +++ b/mediapipe/tasks/python/vision/hand_landmarker.py @@ -182,10 +182,10 @@ class HandLandmarkerOptions: base_options: _BaseOptions running_mode: _RunningMode = _RunningMode.IMAGE - num_hands: Optional[int] = 1 - min_hand_detection_confidence: Optional[float] = 0.5 - min_hand_presence_confidence: Optional[float] = 0.5 - min_tracking_confidence: Optional[float] = 0.5 + num_hands: int = 1 + min_hand_detection_confidence: float = 0.5 + min_hand_presence_confidence: float = 0.5 + min_tracking_confidence: float = 0.5 result_callback: Optional[ Callable[[HandLandmarkerResult, image_module.Image, int], None] ] = None