diff --git a/mediapipe/tasks/python/vision/__init__.py b/mediapipe/tasks/python/vision/__init__.py index 4ff367f2b..17593357b 100644 --- a/mediapipe/tasks/python/vision/__init__.py +++ b/mediapipe/tasks/python/vision/__init__.py @@ -23,6 +23,7 @@ import mediapipe.tasks.python.vision.object_detector GestureRecognizer = gesture_recognizer.GestureRecognizer GestureRecognizerOptions = gesture_recognizer.GestureRecognizerOptions +GestureRecognizerResult = gesture_recognizer.GestureRecognizerResult HandLandmarker = hand_landmarker.HandLandmarker HandLandmarkerOptions = hand_landmarker.HandLandmarkerOptions HandLandmarkerResult = hand_landmarker.HandLandmarkerResult diff --git a/mediapipe/tasks/python/vision/gesture_recognizer.py b/mediapipe/tasks/python/vision/gesture_recognizer.py index 2e61e36eb..d118a315a 100644 --- a/mediapipe/tasks/python/vision/gesture_recognizer.py +++ b/mediapipe/tasks/python/vision/gesture_recognizer.py @@ -59,7 +59,7 @@ _GESTURE_DEFAULT_INDEX = -1 @dataclasses.dataclass -class GestureRecognitionResult: +class GestureRecognizerResult: """The gesture recognition result from GestureRecognizer, where each vector element represents a single hand detected in the image. Attributes: @@ -79,8 +79,8 @@ class GestureRecognitionResult: def _build_recognition_result( output_packets: Mapping[str, - packet_module.Packet]) -> GestureRecognitionResult: - """Consturcts a `GestureRecognitionResult` from output packets.""" + packet_module.Packet]) -> GestureRecognizerResult: + """Consturcts a `GestureRecognizerResult` from output packets.""" gestures_proto_list = packet_getter.get_proto_list( output_packets[_HAND_GESTURE_STREAM_NAME]) handedness_proto_list = packet_getter.get_proto_list( @@ -138,9 +138,9 @@ def _build_recognition_result( landmark_module.Landmark.create_from_pb2(hand_world_landmark)) hand_world_landmarks_results.append(hand_world_landmarks_list) - return GestureRecognitionResult(gesture_results, handedness_results, - hand_landmarks_results, - hand_world_landmarks_results) + return GestureRecognizerResult(gesture_results, handedness_results, + hand_landmarks_results, + hand_world_landmarks_results) @dataclasses.dataclass @@ -185,7 +185,7 @@ class GestureRecognizerOptions: custom_gesture_classifier_options: Optional[ _ClassifierOptions] = _ClassifierOptions() result_callback: Optional[Callable[ - [GestureRecognitionResult, image_module.Image, int], None]] = None + [GestureRecognizerResult, image_module.Image, int], None]] = None @doc_controls.do_not_generate_docs def to_pb2(self) -> _GestureRecognizerGraphOptionsProto: @@ -266,7 +266,7 @@ class GestureRecognizer(base_vision_task_api.BaseVisionTaskApi): if output_packets[_HAND_GESTURE_STREAM_NAME].is_empty(): empty_packet = output_packets[_HAND_GESTURE_STREAM_NAME] options.result_callback( - GestureRecognitionResult([], [], [], []), image, + GestureRecognizerResult([], [], [], []), image, empty_packet.timestamp.value // _MICRO_SECONDS_PER_MILLISECOND) return @@ -301,7 +301,7 @@ class GestureRecognizer(base_vision_task_api.BaseVisionTaskApi): self, image: image_module.Image, image_processing_options: Optional[_ImageProcessingOptions] = None - ) -> GestureRecognitionResult: + ) -> GestureRecognizerResult: """Performs hand gesture recognition on the given image. Only use this method when the GestureRecognizer is created with the image @@ -332,7 +332,7 @@ class GestureRecognizer(base_vision_task_api.BaseVisionTaskApi): }) if output_packets[_HAND_GESTURE_STREAM_NAME].is_empty(): - return GestureRecognitionResult([], [], [], []) + return GestureRecognizerResult([], [], [], []) return _build_recognition_result(output_packets) @@ -341,7 +341,7 @@ class GestureRecognizer(base_vision_task_api.BaseVisionTaskApi): image: image_module.Image, timestamp_ms: int, image_processing_options: Optional[_ImageProcessingOptions] = None - ) -> GestureRecognitionResult: + ) -> GestureRecognizerResult: """Performs gesture recognition on the provided video frame. Only use this method when the GestureRecognizer is created with the video @@ -376,7 +376,7 @@ class GestureRecognizer(base_vision_task_api.BaseVisionTaskApi): }) if output_packets[_HAND_GESTURE_STREAM_NAME].is_empty(): - return GestureRecognitionResult([], [], [], []) + return GestureRecognizerResult([], [], [], []) return _build_recognition_result(output_packets)