diff --git a/mediapipe/tasks/cc/vision/face_landmarker/face_landmarker_test.cc b/mediapipe/tasks/cc/vision/face_landmarker/face_landmarker_test.cc index 411693ecf..97af42da7 100644 --- a/mediapipe/tasks/cc/vision/face_landmarker/face_landmarker_test.cc +++ b/mediapipe/tasks/cc/vision/face_landmarker/face_landmarker_test.cc @@ -67,7 +67,7 @@ constexpr char kPortraitExpectedFaceLandmarksName[] = "portrait_expected_face_landmarks.pbtxt"; constexpr char kPortraitExpectedBlendshapesName[] = "portrait_expected_blendshapes.pbtxt"; -constexpr char kPortaitExpectedFaceGeomertyName[] = +constexpr char kPortraitExpectedFaceGeometryName[] = "portrait_expected_face_geometry.pbtxt"; constexpr float kLandmarksDiffMargin = 0.03; @@ -100,7 +100,7 @@ struct FaceLandmarkerTestParams { mediapipe::MatrixData MakePortraitExpectedFacialTransformationMatrix() { auto face_geometry = GetExpectedProto( - kPortaitExpectedFaceGeomertyName); + kPortraitExpectedFaceGeometryName); return face_geometry.pose_transform_matrix(); } diff --git a/mediapipe/tasks/ios/components/containers/utils/sources/MPPCategory+Helpers.mm b/mediapipe/tasks/ios/components/containers/utils/sources/MPPCategory+Helpers.mm index ff0983139..12cfa5627 100644 --- a/mediapipe/tasks/ios/components/containers/utils/sources/MPPCategory+Helpers.mm +++ b/mediapipe/tasks/ios/components/containers/utils/sources/MPPCategory+Helpers.mm @@ -21,20 +21,20 @@ using ClassificationProto = ::mediapipe::Classification; @implementation MPPCategory (Helpers) -+ (MPPCategory *)categoryWithProto:(const ClassificationProto &)clasificationProto { ++ (MPPCategory *)categoryWithProto:(const ClassificationProto &)classificationProto { NSString *categoryName; NSString *displayName; - if (clasificationProto.has_label()) { - categoryName = [NSString stringWithCppString:clasificationProto.label()]; + if (classificationProto.has_label()) { + categoryName = [NSString stringWithCppString:classificationProto.label()]; } - if (clasificationProto.has_display_name()) { - displayName = [NSString stringWithCppString:clasificationProto.display_name()]; + if (classificationProto.has_display_name()) { + displayName = [NSString stringWithCppString:classificationProto.display_name()]; } - return [[MPPCategory alloc] initWithIndex:clasificationProto.index() - score:clasificationProto.score() + return [[MPPCategory alloc] initWithIndex:classificationProto.index() + score:classificationProto.score() categoryName:categoryName displayName:displayName]; } diff --git a/mediapipe/tasks/web/audio/README.md b/mediapipe/tasks/web/audio/README.md index 834785709..ed2543c7a 100644 --- a/mediapipe/tasks/web/audio/README.md +++ b/mediapipe/tasks/web/audio/README.md @@ -13,7 +13,7 @@ const audio = await FilesetResolver.forAudioTasks( const audioClassifier = await AudioClassifier.createFromModelPath(audio, "https://storage.googleapis.com/mediapipe-tasks/audio_classifier/yamnet_audio_classifier_with_metadata.tflite" ); -const classifications = audioClassifier.classifiy(audioData); +const classifications = audioClassifier.classify(audioData); ``` ## Audio Embedding diff --git a/mediapipe/tasks/web/text/README.md b/mediapipe/tasks/web/text/README.md index 089894653..4a26f5b9d 100644 --- a/mediapipe/tasks/web/text/README.md +++ b/mediapipe/tasks/web/text/README.md @@ -28,7 +28,7 @@ const text = await FilesetResolver.forTextTasks( const textClassifier = await TextClassifier.createFromModelPath(text, "https://storage.googleapis.com/mediapipe-tasks/text_classifier/bert_text_classifier.tflite" ); -const classifications = textClassifier.classifiy(textData); +const classifications = textClassifier.classify(textData); ``` For more information, refer to the [Text Classification](https://developers.google.com/mediapipe/solutions/text/text_classifier/web_js) documentation. diff --git a/mediapipe/tasks/web/vision/image_segmenter/image_segmenter.ts b/mediapipe/tasks/web/vision/image_segmenter/image_segmenter.ts index 740047762..c32423e12 100644 --- a/mediapipe/tasks/web/vision/image_segmenter/image_segmenter.ts +++ b/mediapipe/tasks/web/vision/image_segmenter/image_segmenter.ts @@ -56,7 +56,7 @@ const DEFAULT_OUTPUT_CONFIDENCE_MASKS = true; * asynchronous processing is needed, all data needs to be copied before the * callback returns. */ -export type ImageSegmenterCallack = (result: ImageSegmenterResult) => void; +export type ImageSegmenterCallback = (result: ImageSegmenterResult) => void; /** Performs image segmentation on images. */ export class ImageSegmenter extends VisionTaskRunner { @@ -208,7 +208,7 @@ export class ImageSegmenter extends VisionTaskRunner { * lifetime of the returned data is only guaranteed for the duration of the * callback. */ - segment(image: ImageSource, callback: ImageSegmenterCallack): void; + segment(image: ImageSource, callback: ImageSegmenterCallback): void; /** * Performs image segmentation on the provided single image and invokes the * callback with the response. The method returns synchronously once the @@ -224,12 +224,12 @@ export class ImageSegmenter extends VisionTaskRunner { */ segment( image: ImageSource, imageProcessingOptions: ImageProcessingOptions, - callback: ImageSegmenterCallack): void; + callback: ImageSegmenterCallback): void; segment( image: ImageSource, imageProcessingOptionsOrCallback: ImageProcessingOptions| - ImageSegmenterCallack, - callback?: ImageSegmenterCallack): void { + ImageSegmenterCallback, + callback?: ImageSegmenterCallback): void { const imageProcessingOptions = typeof imageProcessingOptionsOrCallback !== 'function' ? imageProcessingOptionsOrCallback : @@ -258,7 +258,7 @@ export class ImageSegmenter extends VisionTaskRunner { */ segmentForVideo( videoFrame: ImageSource, timestamp: number, - callback: ImageSegmenterCallack): void; + callback: ImageSegmenterCallback): void; /** * Performs image segmentation on the provided video frame and invokes the * callback with the response. The method returns synchronously once the @@ -275,12 +275,12 @@ export class ImageSegmenter extends VisionTaskRunner { */ segmentForVideo( videoFrame: ImageSource, imageProcessingOptions: ImageProcessingOptions, - timestamp: number, callback: ImageSegmenterCallack): void; + timestamp: number, callback: ImageSegmenterCallback): void; segmentForVideo( videoFrame: ImageSource, timestampOrImageProcessingOptions: number|ImageProcessingOptions, - timestampOrCallback: number|ImageSegmenterCallack, - callback?: ImageSegmenterCallack): void { + timestampOrCallback: number|ImageSegmenterCallback, + callback?: ImageSegmenterCallback): void { const imageProcessingOptions = typeof timestampOrImageProcessingOptions !== 'number' ? timestampOrImageProcessingOptions :