CL will fix the typos in the tasks files

PiperOrigin-RevId: 522240681
This commit is contained in:
MediaPipe Team 2023-04-05 21:40:17 -07:00 committed by Copybara-Service
parent d5def9e24d
commit 7ae4d0175a
12 changed files with 30 additions and 30 deletions

View File

@ -84,7 +84,7 @@ class Dataset(object):
create randomness during model training. create randomness during model training.
preprocess: A function taking three arguments in order, feature, label and preprocess: A function taking three arguments in order, feature, label and
boolean is_training. boolean is_training.
drop_remainder: boolean, whether the finaly batch drops remainder. drop_remainder: boolean, whether the finally batch drops remainder.
Returns: Returns:
A TF dataset ready to be consumed by Keras model. A TF dataset ready to be consumed by Keras model.

View File

@ -32,7 +32,7 @@ class BaseHParams:
epochs: Number of training iterations over the dataset. epochs: Number of training iterations over the dataset.
steps_per_epoch: An optional integer indicate the number of training steps steps_per_epoch: An optional integer indicate the number of training steps
per epoch. If not set, the training pipeline calculates the default steps per epoch. If not set, the training pipeline calculates the default steps
per epoch as the training dataset size devided by batch size. per epoch as the training dataset size divided by batch size.
shuffle: True if the dataset is shuffled before training. shuffle: True if the dataset is shuffled before training.
export_dir: The location of the model checkpoint files. export_dir: The location of the model checkpoint files.
distribution_strategy: A string specifying which Distribution Strategy to distribution_strategy: A string specifying which Distribution Strategy to

View File

@ -21,7 +21,7 @@ package(
default_visibility = ["//mediapipe:__subpackages__"], default_visibility = ["//mediapipe:__subpackages__"],
) )
# TODO: Remove the unncessary test data once the demo data are moved to an open-sourced # TODO: Remove the unnecessary test data once the demo data are moved to an open-sourced
# directory. # directory.
filegroup( filegroup(
name = "testdata", name = "testdata",

View File

@ -155,8 +155,8 @@ class Dataset(classification_dataset.ClassificationDataset):
ObjectDetectorDataset object. ObjectDetectorDataset object.
""" """
# Get TFRecord Files # Get TFRecord Files
tfrecord_file_patten = cache_prefix + '*.tfrecord' tfrecord_file_pattern = cache_prefix + '*.tfrecord'
matched_files = tf.io.gfile.glob(tfrecord_file_patten) matched_files = tf.io.gfile.glob(tfrecord_file_pattern)
if not matched_files: if not matched_files:
raise ValueError('TFRecord files are empty.') raise ValueError('TFRecord files are empty.')

View File

@ -345,7 +345,7 @@ def _coco_annotations_to_lists(
Args: Args:
bbox_annotations: List of dicts with keys ['bbox', 'category_id'] bbox_annotations: List of dicts with keys ['bbox', 'category_id']
image_height: Height of image image_height: Height of image
image_width: Width of iamge image_width: Width of image
Returns: Returns:
(data, num_annotations_skipped) tuple where data contains the keys: (data, num_annotations_skipped) tuple where data contains the keys:

View File

@ -111,7 +111,7 @@ TEST(MetadataVersionTest,
TEST(MetadataVersionTest, TEST(MetadataVersionTest,
GetMinimumMetadataParserVersionForModelMetadataVocabAssociatedFiles) { GetMinimumMetadataParserVersionForModelMetadataVocabAssociatedFiles) {
// Creates a metadata flatbuffer with the field, // Creates a metadata flatbuffer with the field,
// ModelMetadata.associated_fiels, populated with the vocabulary file type. // ModelMetadata.associated_fields, populated with the vocabulary file type.
FlatBufferBuilder builder(1024); FlatBufferBuilder builder(1024);
AssociatedFileBuilder associated_file_builder(builder); AssociatedFileBuilder associated_file_builder(builder);
associated_file_builder.add_type(tflite::AssociatedFileType_VOCABULARY); associated_file_builder.add_type(tflite::AssociatedFileType_VOCABULARY);
@ -159,8 +159,8 @@ TEST(MetadataVersionTest,
TEST(MetadataVersionTest, TEST(MetadataVersionTest,
GetMinimumMetadataParserVersionForInputMetadataVocabAssociatedFiles) { GetMinimumMetadataParserVersionForInputMetadataVocabAssociatedFiles) {
// Creates a metadata flatbuffer with the field, // Creates a metadata flatbuffer with the field,
// SubGraphMetadata.input_tensor_metadata.associated_fiels, populated with the // SubGraphMetadata.input_tensor_metadata.associated_fields, populated with
// vocabulary file type. // the vocabulary file type.
FlatBufferBuilder builder(1024); FlatBufferBuilder builder(1024);
AssociatedFileBuilder associated_file_builder(builder); AssociatedFileBuilder associated_file_builder(builder);
associated_file_builder.add_type(tflite::AssociatedFileType_VOCABULARY); associated_file_builder.add_type(tflite::AssociatedFileType_VOCABULARY);
@ -184,7 +184,7 @@ TEST(MetadataVersionTest,
TEST(MetadataVersionTest, TEST(MetadataVersionTest,
GetMinimumMetadataParserVersionForOutputMetadataVocabAssociatedFiles) { GetMinimumMetadataParserVersionForOutputMetadataVocabAssociatedFiles) {
// Creates a metadata flatbuffer with the field, // Creates a metadata flatbuffer with the field,
// SubGraphMetadata.output_tensor_metadata.associated_fiels, populated with // SubGraphMetadata.output_tensor_metadata.associated_fields, populated with
// the vocabulary file type. // the vocabulary file type.
FlatBufferBuilder builder(1024); FlatBufferBuilder builder(1024);
AssociatedFileBuilder associated_file_builder(builder); AssociatedFileBuilder associated_file_builder(builder);

View File

@ -188,7 +188,7 @@ class BaseVisionTaskApi : public tasks::core::BaseTaskApi {
// For 90° and 270° rotations, we need to swap width and height. // For 90° and 270° rotations, we need to swap width and height.
// This is due to the internal behavior of ImageToTensorCalculator, which: // This is due to the internal behavior of ImageToTensorCalculator, which:
// - first denormalizes the provided rect by multiplying the rect width or // - first denormalizes the provided rect by multiplying the rect width or
// height by the image width or height, repectively. // height by the image width or height, respectively.
// - then rotates this by denormalized rect by the provided rotation, and // - then rotates this by denormalized rect by the provided rotation, and
// uses this for cropping, // uses this for cropping,
// - then finally rotates this back. // - then finally rotates this back.

View File

@ -374,22 +374,22 @@ class SingleHandGestureRecognizerGraph : public core::ModelTaskGraph {
// Inference for custom gesture classifier if it exists. // Inference for custom gesture classifier if it exists.
if (has_custom_gesture_classifier) { if (has_custom_gesture_classifier) {
ASSIGN_OR_RETURN( ASSIGN_OR_RETURN(
auto gesture_clasification_list, auto gesture_classification_list,
GetGestureClassificationList( GetGestureClassificationList(
sub_task_model_resources.custom_gesture_classifier_model_resource, sub_task_model_resources.custom_gesture_classifier_model_resource,
graph_options.custom_gesture_classifier_graph_options(), graph_options.custom_gesture_classifier_graph_options(),
embedding_tensors, graph)); embedding_tensors, graph));
gesture_clasification_list >> combine_predictions.In(classifier_nums++); gesture_classification_list >> combine_predictions.In(classifier_nums++);
} }
// Inference for canned gesture classifier. // Inference for canned gesture classifier.
ASSIGN_OR_RETURN( ASSIGN_OR_RETURN(
auto gesture_clasification_list, auto gesture_classification_list,
GetGestureClassificationList( GetGestureClassificationList(
sub_task_model_resources.canned_gesture_classifier_model_resource, sub_task_model_resources.canned_gesture_classifier_model_resource,
graph_options.canned_gesture_classifier_graph_options(), graph_options.canned_gesture_classifier_graph_options(),
embedding_tensors, graph)); embedding_tensors, graph));
gesture_clasification_list >> combine_predictions.In(classifier_nums++); gesture_classification_list >> combine_predictions.In(classifier_nums++);
auto combined_classification_list = auto combined_classification_list =
combine_predictions.Out(kPredictionTag).Cast<ClassificationList>(); combine_predictions.Out(kPredictionTag).Cast<ClassificationList>();

View File

@ -29,7 +29,7 @@ NS_ASSUME_NONNULL_BEGIN
* @param classObject The specified class associated with the bundle containing the file to be * @param classObject The specified class associated with the bundle containing the file to be
* loaded. * loaded.
* @param name Name of the image file. * @param name Name of the image file.
* @param type Extenstion of the image file. * @param type Extension of the image file.
* *
* @return The `MPPImage` object contains the loaded image. This method returns * @return The `MPPImage` object contains the loaded image. This method returns
* nil if it cannot load the image. * nil if it cannot load the image.
@ -46,7 +46,7 @@ NS_ASSUME_NONNULL_BEGIN
* @param classObject The specified class associated with the bundle containing the file to be * @param classObject The specified class associated with the bundle containing the file to be
* loaded. * loaded.
* @param name Name of the image file. * @param name Name of the image file.
* @param type Extenstion of the image file. * @param type Extension of the image file.
* @param orientation Orientation of the image. * @param orientation Orientation of the image.
* *
* @return The `MPPImage` object contains the loaded image. This method returns * @return The `MPPImage` object contains the loaded image. This method returns

View File

@ -94,7 +94,7 @@ public class TaskRunner implements AutoCloseable {
* *
* <p>Note: This method is designed for processing batch data such as unrelated images and texts. * <p>Note: This method is designed for processing batch data such as unrelated images and texts.
* The call blocks the current thread until a failure status or a successful result is returned. * The call blocks the current thread until a failure status or a successful result is returned.
* An internal timestamp will be assigend per invocation. This method is thread-safe and allows * An internal timestamp will be assigned per invocation. This method is thread-safe and allows
* clients to call it from different threads. * clients to call it from different threads.
* *
* @param inputs a map contains (input stream {@link String}, data {@link Packet}) pairs. * @param inputs a map contains (input stream {@link String}, data {@link Packet}) pairs.

View File

@ -254,7 +254,7 @@ public final class FaceStylizer extends BaseVisionTaskApi {
* @throws IllegalArgumentException if the {@link ImageProcessingOptions} specify a * @throws IllegalArgumentException if the {@link ImageProcessingOptions} specify a
* region-of-interest. * region-of-interest.
* @throws MediaPipeException if there is an internal error. Or if {@link FaceStylizer} is not * @throws MediaPipeException if there is an internal error. Or if {@link FaceStylizer} is not
* created wtih {@link ResultListener} set in {@link FaceStylizerOptions}. * created with {@link ResultListener} set in {@link FaceStylizerOptions}.
*/ */
public void stylizeWithResultListener(MPImage image) { public void stylizeWithResultListener(MPImage image) {
stylizeWithResultListener(image, ImageProcessingOptions.builder().build()); stylizeWithResultListener(image, ImageProcessingOptions.builder().build());
@ -283,7 +283,7 @@ public final class FaceStylizer extends BaseVisionTaskApi {
* @throws IllegalArgumentException if the {@link ImageProcessingOptions} specify a * @throws IllegalArgumentException if the {@link ImageProcessingOptions} specify a
* region-of-interest. * region-of-interest.
* @throws MediaPipeException if there is an internal error. Or if {@link FaceStylizer} is not * @throws MediaPipeException if there is an internal error. Or if {@link FaceStylizer} is not
* created wtih {@link ResultListener} set in {@link FaceStylizerOptions}. * created with {@link ResultListener} set in {@link FaceStylizerOptions}.
*/ */
public void stylizeWithResultListener( public void stylizeWithResultListener(
MPImage image, ImageProcessingOptions imageProcessingOptions) { MPImage image, ImageProcessingOptions imageProcessingOptions) {
@ -384,7 +384,7 @@ public final class FaceStylizer extends BaseVisionTaskApi {
* @param image a MediaPipe {@link MPImage} object for processing. * @param image a MediaPipe {@link MPImage} object for processing.
* @param timestampMs the input timestamp (in milliseconds). * @param timestampMs the input timestamp (in milliseconds).
* @throws MediaPipeException if there is an internal error. Or if {@link FaceStylizer} is not * @throws MediaPipeException if there is an internal error. Or if {@link FaceStylizer} is not
* created wtih {@link ResultListener} set in {@link FaceStylizerOptions}. * created with {@link ResultListener} set in {@link FaceStylizerOptions}.
*/ */
public void stylizeForVideoWithResultListener(MPImage image, long timestampMs) { public void stylizeForVideoWithResultListener(MPImage image, long timestampMs) {
stylizeForVideoWithResultListener(image, ImageProcessingOptions.builder().build(), timestampMs); stylizeForVideoWithResultListener(image, ImageProcessingOptions.builder().build(), timestampMs);
@ -411,7 +411,7 @@ public final class FaceStylizer extends BaseVisionTaskApi {
* @param image a MediaPipe {@link MPImage} object for processing. * @param image a MediaPipe {@link MPImage} object for processing.
* @param timestampMs the input timestamp (in milliseconds). * @param timestampMs the input timestamp (in milliseconds).
* @throws MediaPipeException if there is an internal error. Or if {@link FaceStylizer} is not * @throws MediaPipeException if there is an internal error. Or if {@link FaceStylizer} is not
* created wtih {@link ResultListener} set in {@link FaceStylizerOptions}. * created with {@link ResultListener} set in {@link FaceStylizerOptions}.
*/ */
public void stylizeForVideoWithResultListener( public void stylizeForVideoWithResultListener(
MPImage image, ImageProcessingOptions imageProcessingOptions, long timestampMs) { MPImage image, ImageProcessingOptions imageProcessingOptions, long timestampMs) {

View File

@ -32,12 +32,12 @@ describe('convertFromClassificationResultProto()', () => {
classifcations.setHeadIndex(1); classifcations.setHeadIndex(1);
classifcations.setHeadName('headName'); classifcations.setHeadName('headName');
const classificationList = new ClassificationList(); const classificationList = new ClassificationList();
const clasification = new Classification(); const classification = new Classification();
clasification.setIndex(2); classification.setIndex(2);
clasification.setScore(0.3); classification.setScore(0.3);
clasification.setDisplayName('displayName'); classification.setDisplayName('displayName');
clasification.setLabel('categoryName'); classification.setLabel('categoryName');
classificationList.addClassification(clasification); classificationList.addClassification(classification);
classifcations.setClassificationList(classificationList); classifcations.setClassificationList(classificationList);
classificationResult.addClassifications(classifcations); classificationResult.addClassifications(classifcations);
@ -62,8 +62,8 @@ describe('convertFromClassificationResultProto()', () => {
const classificationResult = new ClassificationResult(); const classificationResult = new ClassificationResult();
const classifcations = new Classifications(); const classifcations = new Classifications();
const classificationList = new ClassificationList(); const classificationList = new ClassificationList();
const clasification = new Classification(); const classification = new Classification();
classificationList.addClassification(clasification); classificationList.addClassification(classification);
classifcations.setClassificationList(classificationList); classifcations.setClassificationList(classificationList);
classificationResult.addClassifications(classifcations); classificationResult.addClassifications(classifcations);