diff --git a/mediapipe/tasks/java/com/google/mediapipe/tasks/core/OutputHandler.java b/mediapipe/tasks/java/com/google/mediapipe/tasks/core/OutputHandler.java index 49c459ef1..c330b1a56 100644 --- a/mediapipe/tasks/java/com/google/mediapipe/tasks/core/OutputHandler.java +++ b/mediapipe/tasks/java/com/google/mediapipe/tasks/core/OutputHandler.java @@ -33,7 +33,7 @@ public class OutputHandler { /** * Interface for the customizable MediaPipe task result listener that can reteive both task result - * objects and the correpsonding input data. + * objects and the corresponding input data. */ public interface ResultListener { void run(OutputT result, InputT input); @@ -90,8 +90,8 @@ public class OutputHandler { } /** - * Sets whether the output handler should react to the timestamp bound changes that are reprsented - * as empty output {@link Packet}s. + * Sets whether the output handler should react to the timestamp bound changes that are + * represented as empty output {@link Packet}s. * * @param handleTimestampBoundChanges A boolean value. */ diff --git a/mediapipe/tasks/java/com/google/mediapipe/tasks/core/TaskInfo.java b/mediapipe/tasks/java/com/google/mediapipe/tasks/core/TaskInfo.java index 310f5739c..31af80f5c 100644 --- a/mediapipe/tasks/java/com/google/mediapipe/tasks/core/TaskInfo.java +++ b/mediapipe/tasks/java/com/google/mediapipe/tasks/core/TaskInfo.java @@ -24,7 +24,7 @@ import java.util.ArrayList; import java.util.List; /** - * {@link TaskInfo} contains all needed informaton to initialize a MediaPipe Task {@link + * {@link TaskInfo} contains all needed information to initialize a MediaPipe Task {@link * com.google.mediapipe.framework.Graph}. */ @AutoValue diff --git a/mediapipe/tasks/java/com/google/mediapipe/tasks/vision/facelandmarker/FaceLandmarkerResult.java b/mediapipe/tasks/java/com/google/mediapipe/tasks/vision/facelandmarker/FaceLandmarkerResult.java index bafa40e19..7054856fc 100644 --- a/mediapipe/tasks/java/com/google/mediapipe/tasks/vision/facelandmarker/FaceLandmarkerResult.java +++ b/mediapipe/tasks/java/com/google/mediapipe/tasks/vision/facelandmarker/FaceLandmarkerResult.java @@ -108,7 +108,7 @@ public abstract class FaceLandmarkerResult implements TaskResult { public abstract Optional>> faceBlendshapes(); /** - * Optional facial transformation matrix list from cannonical face to the detected face landmarks. + * Optional facial transformation matrix list from canonical face to the detected face landmarks. * The 4x4 facial transformation matrix is represetned as a flat column-major float array. */ public abstract Optional> facialTransformationMatrixes(); diff --git a/mediapipe/tasks/java/com/google/mediapipe/tasks/vision/gesturerecognizer/GestureRecognizer.java b/mediapipe/tasks/java/com/google/mediapipe/tasks/vision/gesturerecognizer/GestureRecognizer.java index a933d2f65..5b2d7191f 100644 --- a/mediapipe/tasks/java/com/google/mediapipe/tasks/vision/gesturerecognizer/GestureRecognizer.java +++ b/mediapipe/tasks/java/com/google/mediapipe/tasks/vision/gesturerecognizer/GestureRecognizer.java @@ -403,10 +403,10 @@ public final class GestureRecognizer extends BaseVisionTaskApi { public abstract Builder setMinTrackingConfidence(Float value); /** - * Sets the optional {@link ClassifierOptions} controling the canned gestures classifier, such - * as score threshold, allow list and deny list of gestures. The categories for canned gesture - * classifiers are: ["None", "Closed_Fist", "Open_Palm", "Pointing_Up", "Thumb_Down", - * "Thumb_Up", "Victory", "ILoveYou"] + * Sets the optional {@link ClassifierOptions} controlling the canned gestures classifier, + * such as score threshold, allow list and deny list of gestures. The categories + * for canned gesture classifiers are: ["None", "Closed_Fist", "Open_Palm", + * "Pointing_Up", "Thumb_Down", "Thumb_Up", "Victory", "ILoveYou"] * *

TODO Note this option is subject to change, after scoring merging * calculator is implemented. @@ -415,8 +415,8 @@ public final class GestureRecognizer extends BaseVisionTaskApi { ClassifierOptions classifierOptions); /** - * Sets the optional {@link ClassifierOptions} controling the custom gestures classifier, such - * as score threshold, allow list and deny list of gestures. + * Sets the optional {@link ClassifierOptions} controlling the custom gestures classifier, + * such as score threshold, allow list and deny list of gestures. * *

TODO Note this option is subject to change, after scoring merging * calculator is implemented. diff --git a/mediapipe/tasks/java/com/google/mediapipe/tasks/vision/imagesegmenter/ImageSegmenter.java b/mediapipe/tasks/java/com/google/mediapipe/tasks/vision/imagesegmenter/ImageSegmenter.java index f1a08d425..b809ab963 100644 --- a/mediapipe/tasks/java/com/google/mediapipe/tasks/vision/imagesegmenter/ImageSegmenter.java +++ b/mediapipe/tasks/java/com/google/mediapipe/tasks/vision/imagesegmenter/ImageSegmenter.java @@ -302,7 +302,7 @@ public final class ImageSegmenter extends BaseVisionTaskApi { * @throws IllegalArgumentException if the {@link ImageProcessingOptions} specify a * region-of-interest. * @throws MediaPipeException if there is an internal error. Or if {@link ImageSegmenter} is not - * created wtih {@link ResultListener} set in {@link ImageSegmenterOptions}. + * created with {@link ResultListener} set in {@link ImageSegmenterOptions}. */ public void segmentWithResultListener(MPImage image) { segmentWithResultListener(image, ImageProcessingOptions.builder().build()); @@ -329,7 +329,7 @@ public final class ImageSegmenter extends BaseVisionTaskApi { * @throws IllegalArgumentException if the {@link ImageProcessingOptions} specify a * region-of-interest. * @throws MediaPipeException if there is an internal error. Or if {@link ImageSegmenter} is not - * created wtih {@link ResultListener} set in {@link ImageSegmenterOptions}. + * created with {@link ResultListener} set in {@link ImageSegmenterOptions}. */ public void segmentWithResultListener( MPImage image, ImageProcessingOptions imageProcessingOptions) { @@ -421,7 +421,7 @@ public final class ImageSegmenter extends BaseVisionTaskApi { * @param image a MediaPipe {@link MPImage} object for processing. * @param timestampMs the input timestamp (in milliseconds). * @throws MediaPipeException if there is an internal error. Or if {@link ImageSegmenter} is not - * created wtih {@link ResultListener} set in {@link ImageSegmenterOptions}. + * created with {@link ResultListener} set in {@link ImageSegmenterOptions}. */ public void segmentForVideoWithResultListener(MPImage image, long timestampMs) { segmentForVideoWithResultListener(image, ImageProcessingOptions.builder().build(), timestampMs); @@ -444,7 +444,7 @@ public final class ImageSegmenter extends BaseVisionTaskApi { * @param image a MediaPipe {@link MPImage} object for processing. * @param timestampMs the input timestamp (in milliseconds). * @throws MediaPipeException if there is an internal error. Or if {@link ImageSegmenter} is not - * created wtih {@link ResultListener} set in {@link ImageSegmenterOptions}. + * created with {@link ResultListener} set in {@link ImageSegmenterOptions}. */ public void segmentForVideoWithResultListener( MPImage image, ImageProcessingOptions imageProcessingOptions, long timestampMs) { diff --git a/mediapipe/tasks/java/com/google/mediapipe/tasks/vision/interactivesegmenter/InteractiveSegmenter.java b/mediapipe/tasks/java/com/google/mediapipe/tasks/vision/interactivesegmenter/InteractiveSegmenter.java index 8ee6951f8..657716b6b 100644 --- a/mediapipe/tasks/java/com/google/mediapipe/tasks/vision/interactivesegmenter/InteractiveSegmenter.java +++ b/mediapipe/tasks/java/com/google/mediapipe/tasks/vision/interactivesegmenter/InteractiveSegmenter.java @@ -327,7 +327,7 @@ public final class InteractiveSegmenter extends BaseVisionTaskApi { * @throws IllegalArgumentException if the {@link ImageProcessingOptions} specify a * region-of-interest. * @throws MediaPipeException if there is an internal error. Or if {@link InteractiveSegmenter} is - * not created wtih {@link ResultListener} set in {@link InteractiveSegmenterOptions}. + * not created with {@link ResultListener} set in {@link InteractiveSegmenterOptions}. */ public void segmentWithResultListener(MPImage image, RegionOfInterest roi) { segmentWithResultListener(image, roi, ImageProcessingOptions.builder().build()); @@ -357,7 +357,7 @@ public final class InteractiveSegmenter extends BaseVisionTaskApi { * @throws IllegalArgumentException if the {@link ImageProcessingOptions} specify a * region-of-interest. * @throws MediaPipeException if there is an internal error. Or if {@link InteractiveSegmenter} is - * not created wtih {@link ResultListener} set in {@link InteractiveSegmenterOptions}. + * not created with {@link ResultListener} set in {@link InteractiveSegmenterOptions}. */ public void segmentWithResultListener( MPImage image, RegionOfInterest roi, ImageProcessingOptions imageProcessingOptions) { diff --git a/mediapipe/tasks/metadata/metadata_schema.fbs b/mediapipe/tasks/metadata/metadata_schema.fbs index 8fe7a08fa..8660ba38c 100644 --- a/mediapipe/tasks/metadata/metadata_schema.fbs +++ b/mediapipe/tasks/metadata/metadata_schema.fbs @@ -142,7 +142,7 @@ enum AssociatedFileType : byte { // TODO: introduce the ScaNN index file with links once the code // is released. - // Contains on-devide ScaNN index file with LevelDB format. + // Contains on-device ScaNN index file with LevelDB format. // Added in: 1.4.0 SCANN_INDEX_FILE = 6, } diff --git a/mediapipe/tasks/python/metadata/metadata.py b/mediapipe/tasks/python/metadata/metadata.py index 25d83cae8..6a107c8d8 100644 --- a/mediapipe/tasks/python/metadata/metadata.py +++ b/mediapipe/tasks/python/metadata/metadata.py @@ -121,7 +121,7 @@ class MetadataPopulator(object): Then, pack the metadata and label file into the model as follows. ```python - # Populating a metadata file (or a metadta buffer) and associated files to + # Populating a metadata file (or a metadata buffer) and associated files to a model file: populator = MetadataPopulator.with_model_file(model_file) # For metadata buffer (bytearray read from the metadata file), use: @@ -332,7 +332,7 @@ class MetadataPopulator(object): Raises: IOError: File not found. ValueError: The metadata to be populated is empty. - ValueError: The metadata does not have the expected flatbuffer identifer. + ValueError: The metadata does not have the expected flatbuffer identifier. ValueError: Cannot get minimum metadata parser version. ValueError: The number of SubgraphMetadata is not 1. ValueError: The number of input/output tensors does not match the number diff --git a/mediapipe/tasks/python/metadata/metadata_writers/metadata_info.py b/mediapipe/tasks/python/metadata/metadata_writers/metadata_info.py index f201ab7e0..10b66ff18 100644 --- a/mediapipe/tasks/python/metadata/metadata_writers/metadata_info.py +++ b/mediapipe/tasks/python/metadata/metadata_writers/metadata_info.py @@ -559,7 +559,7 @@ class InputTextTensorMd(TensorMd): name: name of the tensor. description: description of what the tensor is. tokenizer_md: information of the tokenizer in the input text tensor, if - any. Only `RegexTokenizer` [1] is currenly supported. If the tokenizer + any. Only `RegexTokenizer` [1] is currently supported. If the tokenizer is `BertTokenizer` [2] or `SentencePieceTokenizer` [3], refer to `BertInputTensorsMd` class. [1]: diff --git a/mediapipe/tasks/python/test/metadata/metadata_test.py b/mediapipe/tasks/python/test/metadata/metadata_test.py index d892f1b61..c91bcce6e 100644 --- a/mediapipe/tasks/python/test/metadata/metadata_test.py +++ b/mediapipe/tasks/python/test/metadata/metadata_test.py @@ -388,7 +388,7 @@ class MetadataPopulatorTest(MetadataTest): populator = _metadata.MetadataPopulator.with_model_file(self._model_file) populator.load_metadata_file(self._metadata_file) populator.load_associated_files([self._file1]) - # Suppose to populate self._file2, because it is recorded in the metadta. + # Suppose to populate self._file2, because it is recorded in the metadata. with self.assertRaises(ValueError) as error: populator.populate() self.assertEqual(("File, '{0}', is recorded in the metadata, but has " diff --git a/mediapipe/tasks/python/vision/core/base_vision_task_api.py b/mediapipe/tasks/python/vision/core/base_vision_task_api.py index 0c8262d4b..768d392f1 100644 --- a/mediapipe/tasks/python/vision/core/base_vision_task_api.py +++ b/mediapipe/tasks/python/vision/core/base_vision_task_api.py @@ -144,7 +144,7 @@ class BaseVisionTaskApi(object): set. By default, it's set to True. Returns: - A normalized rect proto that repesents the image processing options. + A normalized rect proto that represents the image processing options. """ normalized_rect = _NormalizedRect( rotation=0, x_center=0.5, y_center=0.5, width=1, height=1) diff --git a/mediapipe/tasks/web/components/containers/matrix.d.ts b/mediapipe/tasks/web/components/containers/matrix.d.ts index fd4bda4c3..e0bad58c8 100644 --- a/mediapipe/tasks/web/components/containers/matrix.d.ts +++ b/mediapipe/tasks/web/components/containers/matrix.d.ts @@ -14,7 +14,7 @@ * limitations under the License. */ -/** A two-dimenionsal matrix. */ +/** A two-dimensional matrix. */ export declare interface Matrix { /** The number of rows. */ rows: number; diff --git a/mediapipe/tasks/web/vision/core/types.d.ts b/mediapipe/tasks/web/vision/core/types.d.ts index c04366ac0..b48b5045d 100644 --- a/mediapipe/tasks/web/vision/core/types.d.ts +++ b/mediapipe/tasks/web/vision/core/types.d.ts @@ -19,7 +19,7 @@ import {NormalizedKeypoint} from '../../../../tasks/web/components/containers/ke /** * The segmentation tasks return the segmentation either as a WebGLTexture (when * the output is on GPU) or as a typed JavaScript arrays for CPU-based - * category or confidence masks. `Uint8ClampedArray`s are used to represend + * category or confidence masks. `Uint8ClampedArray`s are used to represent * CPU-based category masks and `Float32Array`s are used for CPU-based * confidence masks. */ diff --git a/mediapipe/tasks/web/vision/core/vision_task_options.d.ts b/mediapipe/tasks/web/vision/core/vision_task_options.d.ts index 72bc2efb1..a45efd6d3 100644 --- a/mediapipe/tasks/web/vision/core/vision_task_options.d.ts +++ b/mediapipe/tasks/web/vision/core/vision_task_options.d.ts @@ -27,7 +27,7 @@ export type RunningMode = 'IMAGE'|'VIDEO'; export declare interface VisionTaskOptions extends TaskRunnerOptions { /** * The canvas element to bind textures to. This has to be set for GPU - * processing. The task will initialize a WebGL context and throw an eror if + * processing. The task will initialize a WebGL context and throw an error if * this fails (e.g. if you have already initialized a different type of * context). */