This CL fixes the multiple typos in the new task api solution

PiperOrigin-RevId: 521407588
This commit is contained in:
MediaPipe Team 2023-04-03 03:02:30 -07:00 committed by Copybara-Service
parent b5bbed8ebb
commit 4a490cd27c
14 changed files with 26 additions and 26 deletions

View File

@ -33,7 +33,7 @@ public class OutputHandler<OutputT extends TaskResult, InputT> {
/**
* Interface for the customizable MediaPipe task result listener that can reteive both task result
* objects and the correpsonding input data.
* objects and the corresponding input data.
*/
public interface ResultListener<OutputT extends TaskResult, InputT> {
void run(OutputT result, InputT input);
@ -90,8 +90,8 @@ public class OutputHandler<OutputT extends TaskResult, InputT> {
}
/**
* Sets whether the output handler should react to the timestamp bound changes that are reprsented
* as empty output {@link Packet}s.
* Sets whether the output handler should react to the timestamp bound changes that are
* represented as empty output {@link Packet}s.
*
* @param handleTimestampBoundChanges A boolean value.
*/

View File

@ -24,7 +24,7 @@ import java.util.ArrayList;
import java.util.List;
/**
* {@link TaskInfo} contains all needed informaton to initialize a MediaPipe Task {@link
* {@link TaskInfo} contains all needed information to initialize a MediaPipe Task {@link
* com.google.mediapipe.framework.Graph}.
*/
@AutoValue

View File

@ -108,7 +108,7 @@ public abstract class FaceLandmarkerResult implements TaskResult {
public abstract Optional<List<List<Category>>> faceBlendshapes();
/**
* Optional facial transformation matrix list from cannonical face to the detected face landmarks.
* Optional facial transformation matrix list from canonical face to the detected face landmarks.
* The 4x4 facial transformation matrix is represetned as a flat column-major float array.
*/
public abstract Optional<List<float[]>> facialTransformationMatrixes();

View File

@ -403,10 +403,10 @@ public final class GestureRecognizer extends BaseVisionTaskApi {
public abstract Builder setMinTrackingConfidence(Float value);
/**
* Sets the optional {@link ClassifierOptions} controling the canned gestures classifier, such
* as score threshold, allow list and deny list of gestures. The categories for canned gesture
* classifiers are: ["None", "Closed_Fist", "Open_Palm", "Pointing_Up", "Thumb_Down",
* "Thumb_Up", "Victory", "ILoveYou"]
* Sets the optional {@link ClassifierOptions} controlling the canned gestures classifier,
* such as score threshold, allow list and deny list of gestures. The categories
* for canned gesture classifiers are: ["None", "Closed_Fist", "Open_Palm",
* "Pointing_Up", "Thumb_Down", "Thumb_Up", "Victory", "ILoveYou"]
*
* <p>TODO Note this option is subject to change, after scoring merging
* calculator is implemented.
@ -415,8 +415,8 @@ public final class GestureRecognizer extends BaseVisionTaskApi {
ClassifierOptions classifierOptions);
/**
* Sets the optional {@link ClassifierOptions} controling the custom gestures classifier, such
* as score threshold, allow list and deny list of gestures.
* Sets the optional {@link ClassifierOptions} controlling the custom gestures classifier,
* such as score threshold, allow list and deny list of gestures.
*
* <p>TODO Note this option is subject to change, after scoring merging
* calculator is implemented.

View File

@ -302,7 +302,7 @@ public final class ImageSegmenter extends BaseVisionTaskApi {
* @throws IllegalArgumentException if the {@link ImageProcessingOptions} specify a
* region-of-interest.
* @throws MediaPipeException if there is an internal error. Or if {@link ImageSegmenter} is not
* created wtih {@link ResultListener} set in {@link ImageSegmenterOptions}.
* created with {@link ResultListener} set in {@link ImageSegmenterOptions}.
*/
public void segmentWithResultListener(MPImage image) {
segmentWithResultListener(image, ImageProcessingOptions.builder().build());
@ -329,7 +329,7 @@ public final class ImageSegmenter extends BaseVisionTaskApi {
* @throws IllegalArgumentException if the {@link ImageProcessingOptions} specify a
* region-of-interest.
* @throws MediaPipeException if there is an internal error. Or if {@link ImageSegmenter} is not
* created wtih {@link ResultListener} set in {@link ImageSegmenterOptions}.
* created with {@link ResultListener} set in {@link ImageSegmenterOptions}.
*/
public void segmentWithResultListener(
MPImage image, ImageProcessingOptions imageProcessingOptions) {
@ -421,7 +421,7 @@ public final class ImageSegmenter extends BaseVisionTaskApi {
* @param image a MediaPipe {@link MPImage} object for processing.
* @param timestampMs the input timestamp (in milliseconds).
* @throws MediaPipeException if there is an internal error. Or if {@link ImageSegmenter} is not
* created wtih {@link ResultListener} set in {@link ImageSegmenterOptions}.
* created with {@link ResultListener} set in {@link ImageSegmenterOptions}.
*/
public void segmentForVideoWithResultListener(MPImage image, long timestampMs) {
segmentForVideoWithResultListener(image, ImageProcessingOptions.builder().build(), timestampMs);
@ -444,7 +444,7 @@ public final class ImageSegmenter extends BaseVisionTaskApi {
* @param image a MediaPipe {@link MPImage} object for processing.
* @param timestampMs the input timestamp (in milliseconds).
* @throws MediaPipeException if there is an internal error. Or if {@link ImageSegmenter} is not
* created wtih {@link ResultListener} set in {@link ImageSegmenterOptions}.
* created with {@link ResultListener} set in {@link ImageSegmenterOptions}.
*/
public void segmentForVideoWithResultListener(
MPImage image, ImageProcessingOptions imageProcessingOptions, long timestampMs) {

View File

@ -327,7 +327,7 @@ public final class InteractiveSegmenter extends BaseVisionTaskApi {
* @throws IllegalArgumentException if the {@link ImageProcessingOptions} specify a
* region-of-interest.
* @throws MediaPipeException if there is an internal error. Or if {@link InteractiveSegmenter} is
* not created wtih {@link ResultListener} set in {@link InteractiveSegmenterOptions}.
* not created with {@link ResultListener} set in {@link InteractiveSegmenterOptions}.
*/
public void segmentWithResultListener(MPImage image, RegionOfInterest roi) {
segmentWithResultListener(image, roi, ImageProcessingOptions.builder().build());
@ -357,7 +357,7 @@ public final class InteractiveSegmenter extends BaseVisionTaskApi {
* @throws IllegalArgumentException if the {@link ImageProcessingOptions} specify a
* region-of-interest.
* @throws MediaPipeException if there is an internal error. Or if {@link InteractiveSegmenter} is
* not created wtih {@link ResultListener} set in {@link InteractiveSegmenterOptions}.
* not created with {@link ResultListener} set in {@link InteractiveSegmenterOptions}.
*/
public void segmentWithResultListener(
MPImage image, RegionOfInterest roi, ImageProcessingOptions imageProcessingOptions) {

View File

@ -142,7 +142,7 @@ enum AssociatedFileType : byte {
// TODO: introduce the ScaNN index file with links once the code
// is released.
// Contains on-devide ScaNN index file with LevelDB format.
// Contains on-device ScaNN index file with LevelDB format.
// Added in: 1.4.0
SCANN_INDEX_FILE = 6,
}

View File

@ -121,7 +121,7 @@ class MetadataPopulator(object):
Then, pack the metadata and label file into the model as follows.
```python
# Populating a metadata file (or a metadta buffer) and associated files to
# Populating a metadata file (or a metadata buffer) and associated files to
a model file:
populator = MetadataPopulator.with_model_file(model_file)
# For metadata buffer (bytearray read from the metadata file), use:
@ -332,7 +332,7 @@ class MetadataPopulator(object):
Raises:
IOError: File not found.
ValueError: The metadata to be populated is empty.
ValueError: The metadata does not have the expected flatbuffer identifer.
ValueError: The metadata does not have the expected flatbuffer identifier.
ValueError: Cannot get minimum metadata parser version.
ValueError: The number of SubgraphMetadata is not 1.
ValueError: The number of input/output tensors does not match the number

View File

@ -559,7 +559,7 @@ class InputTextTensorMd(TensorMd):
name: name of the tensor.
description: description of what the tensor is.
tokenizer_md: information of the tokenizer in the input text tensor, if
any. Only `RegexTokenizer` [1] is currenly supported. If the tokenizer
any. Only `RegexTokenizer` [1] is currently supported. If the tokenizer
is `BertTokenizer` [2] or `SentencePieceTokenizer` [3], refer to
`BertInputTensorsMd` class.
[1]:

View File

@ -388,7 +388,7 @@ class MetadataPopulatorTest(MetadataTest):
populator = _metadata.MetadataPopulator.with_model_file(self._model_file)
populator.load_metadata_file(self._metadata_file)
populator.load_associated_files([self._file1])
# Suppose to populate self._file2, because it is recorded in the metadta.
# Suppose to populate self._file2, because it is recorded in the metadata.
with self.assertRaises(ValueError) as error:
populator.populate()
self.assertEqual(("File, '{0}', is recorded in the metadata, but has "

View File

@ -144,7 +144,7 @@ class BaseVisionTaskApi(object):
set. By default, it's set to True.
Returns:
A normalized rect proto that repesents the image processing options.
A normalized rect proto that represents the image processing options.
"""
normalized_rect = _NormalizedRect(
rotation=0, x_center=0.5, y_center=0.5, width=1, height=1)

View File

@ -14,7 +14,7 @@
* limitations under the License.
*/
/** A two-dimenionsal matrix. */
/** A two-dimensional matrix. */
export declare interface Matrix {
/** The number of rows. */
rows: number;

View File

@ -19,7 +19,7 @@ import {NormalizedKeypoint} from '../../../../tasks/web/components/containers/ke
/**
* The segmentation tasks return the segmentation either as a WebGLTexture (when
* the output is on GPU) or as a typed JavaScript arrays for CPU-based
* category or confidence masks. `Uint8ClampedArray`s are used to represend
* category or confidence masks. `Uint8ClampedArray`s are used to represent
* CPU-based category masks and `Float32Array`s are used for CPU-based
* confidence masks.
*/

View File

@ -27,7 +27,7 @@ export type RunningMode = 'IMAGE'|'VIDEO';
export declare interface VisionTaskOptions extends TaskRunnerOptions {
/**
* The canvas element to bind textures to. This has to be set for GPU
* processing. The task will initialize a WebGL context and throw an eror if
* processing. The task will initialize a WebGL context and throw an error if
* this fails (e.g. if you have already initialized a different type of
* context).
*/