diff --git a/mediapipe/model_maker/python/core/utils/loss_functions.py b/mediapipe/model_maker/python/core/utils/loss_functions.py index a60bd2ed4..504ba91ef 100644 --- a/mediapipe/model_maker/python/core/utils/loss_functions.py +++ b/mediapipe/model_maker/python/core/utils/loss_functions.py @@ -187,7 +187,7 @@ class PerceptualLoss(tf.keras.Model, metaclass=abc.ABCMeta): """Instantiates perceptual loss. Args: - feature_weight: The weight coeffcients of multiple model extracted + feature_weight: The weight coefficients of multiple model extracted features used for calculating the perceptual loss. loss_weight: The weight coefficients between `style_loss` and `content_loss`. diff --git a/mediapipe/model_maker/python/vision/face_stylizer/face_stylizer.py b/mediapipe/model_maker/python/vision/face_stylizer/face_stylizer.py index 85b567ca3..5758ac7b5 100644 --- a/mediapipe/model_maker/python/vision/face_stylizer/face_stylizer.py +++ b/mediapipe/model_maker/python/vision/face_stylizer/face_stylizer.py @@ -105,7 +105,7 @@ class FaceStylizer(object): self._train_model(train_data=train_data, preprocessor=self._preprocessor) def _create_model(self): - """Creates the componenets of face stylizer.""" + """Creates the components of face stylizer.""" self._encoder = model_util.load_keras_model( constants.FACE_STYLIZER_ENCODER_MODEL_FILES.get_path() ) @@ -138,7 +138,7 @@ class FaceStylizer(object): """ train_dataset = train_data.gen_tf_dataset(preprocess=preprocessor) - # TODO: Support processing mulitple input style images. The + # TODO: Support processing multiple input style images. The # input style images are expected to have similar style. # style_sample represents a tuple of (style_image, style_label). style_sample = next(iter(train_dataset)) diff --git a/mediapipe/tasks/ios/test/vision/image_classifier/MPPImageClassifierTests.m b/mediapipe/tasks/ios/test/vision/image_classifier/MPPImageClassifierTests.m index 332a217ca..59383dad6 100644 --- a/mediapipe/tasks/ios/test/vision/image_classifier/MPPImageClassifierTests.m +++ b/mediapipe/tasks/ios/test/vision/image_classifier/MPPImageClassifierTests.m @@ -668,10 +668,10 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation"; // Because of flow limiting, we cannot ensure that the callback will be // invoked `iterationCount` times. - // An normal expectation will fail if expectation.fullfill() is not called + // An normal expectation will fail if expectation.fulfill() is not called // `expectation.expectedFulfillmentCount` times. // If `expectation.isInverted = true`, the test will only succeed if - // expectation is not fullfilled for the specified `expectedFulfillmentCount`. + // expectation is not fulfilled for the specified `expectedFulfillmentCount`. // Since in our case we cannot predict how many times the expectation is // supposed to be fullfilled setting, // `expectation.expectedFulfillmentCount` = `iterationCount` + 1 and diff --git a/mediapipe/tasks/ios/test/vision/object_detector/MPPObjectDetectorTests.m b/mediapipe/tasks/ios/test/vision/object_detector/MPPObjectDetectorTests.m index 1b717ba48..2ef5a0957 100644 --- a/mediapipe/tasks/ios/test/vision/object_detector/MPPObjectDetectorTests.m +++ b/mediapipe/tasks/ios/test/vision/object_detector/MPPObjectDetectorTests.m @@ -673,15 +673,15 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation"; // Because of flow limiting, we cannot ensure that the callback will be // invoked `iterationCount` times. - // An normal expectation will fail if expectation.fullfill() is not called + // An normal expectation will fail if expectation.fulfill() is not called // `expectation.expectedFulfillmentCount` times. // If `expectation.isInverted = true`, the test will only succeed if - // expectation is not fullfilled for the specified `expectedFulfillmentCount`. + // expectation is not fulfilled for the specified `expectedFulfillmentCount`. // Since in our case we cannot predict how many times the expectation is - // supposed to be fullfilled setting, + // supposed to be fulfilled setting, // `expectation.expectedFulfillmentCount` = `iterationCount` + 1 and // `expectation.isInverted = true` ensures that test succeeds if - // expectation is fullfilled <= `iterationCount` times. + // expectation is fulfilled <= `iterationCount` times. XCTestExpectation *expectation = [[XCTestExpectation alloc] initWithDescription:@"detectWithOutOfOrderTimestampsAndLiveStream"]; expectation.expectedFulfillmentCount = iterationCount + 1; diff --git a/mediapipe/tasks/java/com/google/mediapipe/tasks/vision/core/BaseVisionTaskApi.java b/mediapipe/tasks/java/com/google/mediapipe/tasks/vision/core/BaseVisionTaskApi.java index 5964cef2c..9ea057b0d 100644 --- a/mediapipe/tasks/java/com/google/mediapipe/tasks/vision/core/BaseVisionTaskApi.java +++ b/mediapipe/tasks/java/com/google/mediapipe/tasks/vision/core/BaseVisionTaskApi.java @@ -166,7 +166,7 @@ public class BaseVisionTaskApi implements AutoCloseable { // For 90° and 270° rotations, we need to swap width and height. // This is due to the internal behavior of ImageToTensorCalculator, which: // - first denormalizes the provided rect by multiplying the rect width or - // height by the image width or height, repectively. + // height by the image width or height, respectively. // - then rotates this by denormalized rect by the provided rotation, and // uses this for cropping, // - then finally rotates this back. diff --git a/mediapipe/tasks/web/vision/face_stylizer/face_stylizer.ts b/mediapipe/tasks/web/vision/face_stylizer/face_stylizer.ts index 8169e6775..2d99dc54d 100644 --- a/mediapipe/tasks/web/vision/face_stylizer/face_stylizer.ts +++ b/mediapipe/tasks/web/vision/face_stylizer/face_stylizer.ts @@ -171,7 +171,7 @@ export class FaceStylizer extends VisionTaskRunner { /** * Performs face stylization on the provided single image and returns the * result. This method creates a copy of the resulting image and should not be - * used in high-throughput applictions. Only use this method when the + * used in high-throughput applications. Only use this method when the * FaceStylizer is created with the image running mode. * * @param image An image to process. @@ -182,7 +182,7 @@ export class FaceStylizer extends VisionTaskRunner { /** * Performs face stylization on the provided single image and returns the * result. This method creates a copy of the resulting image and should not be - * used in high-throughput applictions. Only use this method when the + * used in high-throughput applications. Only use this method when the * FaceStylizer is created with the image running mode. * * The 'imageProcessingOptions' parameter can be used to specify one or all @@ -275,7 +275,7 @@ export class FaceStylizer extends VisionTaskRunner { /** * Performs face stylization on the provided video frame. This method creates * a copy of the resulting image and should not be used in high-throughput - * applictions. Only use this method when the FaceStylizer is created with the + * applications. Only use this method when the FaceStylizer is created with the * video running mode. * * The input frame can be of any size. It's required to provide the video diff --git a/mediapipe/tasks/web/vision/image_segmenter/image_segmenter.ts b/mediapipe/tasks/web/vision/image_segmenter/image_segmenter.ts index 3dd2d03ef..6d295aaa8 100644 --- a/mediapipe/tasks/web/vision/image_segmenter/image_segmenter.ts +++ b/mediapipe/tasks/web/vision/image_segmenter/image_segmenter.ts @@ -231,7 +231,7 @@ export class ImageSegmenter extends VisionTaskRunner { /** * Performs image segmentation on the provided single image and returns the * segmentation result. This method creates a copy of the resulting masks and - * should not be used in high-throughput applictions. Only use this method + * should not be used in high-throughput applications. Only use this method * when the ImageSegmenter is created with running mode `image`. * * @param image An image to process. @@ -242,7 +242,7 @@ export class ImageSegmenter extends VisionTaskRunner { /** * Performs image segmentation on the provided single image and returns the * segmentation result. This method creates a copy of the resulting masks and - * should not be used in high-v applictions. Only use this method when + * should not be used in high-v applications. Only use this method when * the ImageSegmenter is created with running mode `image`. * * @param image An image to process. @@ -320,7 +320,7 @@ export class ImageSegmenter extends VisionTaskRunner { /** * Performs image segmentation on the provided video frame and returns the * segmentation result. This method creates a copy of the resulting masks and - * should not be used in high-v applictions. Only use this method when + * should not be used in high-v applications. Only use this method when * the ImageSegmenter is created with running mode `video`. * * @param videoFrame A video frame to process.