CL will fix the typos in the tasks files
PiperOrigin-RevId: 522240681
This commit is contained in:
parent
d5def9e24d
commit
7ae4d0175a
|
@ -84,7 +84,7 @@ class Dataset(object):
|
|||
create randomness during model training.
|
||||
preprocess: A function taking three arguments in order, feature, label and
|
||||
boolean is_training.
|
||||
drop_remainder: boolean, whether the finaly batch drops remainder.
|
||||
drop_remainder: boolean, whether the finally batch drops remainder.
|
||||
|
||||
Returns:
|
||||
A TF dataset ready to be consumed by Keras model.
|
||||
|
|
|
@ -32,7 +32,7 @@ class BaseHParams:
|
|||
epochs: Number of training iterations over the dataset.
|
||||
steps_per_epoch: An optional integer indicate the number of training steps
|
||||
per epoch. If not set, the training pipeline calculates the default steps
|
||||
per epoch as the training dataset size devided by batch size.
|
||||
per epoch as the training dataset size divided by batch size.
|
||||
shuffle: True if the dataset is shuffled before training.
|
||||
export_dir: The location of the model checkpoint files.
|
||||
distribution_strategy: A string specifying which Distribution Strategy to
|
||||
|
|
|
@ -21,7 +21,7 @@ package(
|
|||
default_visibility = ["//mediapipe:__subpackages__"],
|
||||
)
|
||||
|
||||
# TODO: Remove the unncessary test data once the demo data are moved to an open-sourced
|
||||
# TODO: Remove the unnecessary test data once the demo data are moved to an open-sourced
|
||||
# directory.
|
||||
filegroup(
|
||||
name = "testdata",
|
||||
|
|
|
@ -155,8 +155,8 @@ class Dataset(classification_dataset.ClassificationDataset):
|
|||
ObjectDetectorDataset object.
|
||||
"""
|
||||
# Get TFRecord Files
|
||||
tfrecord_file_patten = cache_prefix + '*.tfrecord'
|
||||
matched_files = tf.io.gfile.glob(tfrecord_file_patten)
|
||||
tfrecord_file_pattern = cache_prefix + '*.tfrecord'
|
||||
matched_files = tf.io.gfile.glob(tfrecord_file_pattern)
|
||||
if not matched_files:
|
||||
raise ValueError('TFRecord files are empty.')
|
||||
|
||||
|
|
|
@ -345,7 +345,7 @@ def _coco_annotations_to_lists(
|
|||
Args:
|
||||
bbox_annotations: List of dicts with keys ['bbox', 'category_id']
|
||||
image_height: Height of image
|
||||
image_width: Width of iamge
|
||||
image_width: Width of image
|
||||
|
||||
Returns:
|
||||
(data, num_annotations_skipped) tuple where data contains the keys:
|
||||
|
|
|
@ -111,7 +111,7 @@ TEST(MetadataVersionTest,
|
|||
TEST(MetadataVersionTest,
|
||||
GetMinimumMetadataParserVersionForModelMetadataVocabAssociatedFiles) {
|
||||
// Creates a metadata flatbuffer with the field,
|
||||
// ModelMetadata.associated_fiels, populated with the vocabulary file type.
|
||||
// ModelMetadata.associated_fields, populated with the vocabulary file type.
|
||||
FlatBufferBuilder builder(1024);
|
||||
AssociatedFileBuilder associated_file_builder(builder);
|
||||
associated_file_builder.add_type(tflite::AssociatedFileType_VOCABULARY);
|
||||
|
@ -159,8 +159,8 @@ TEST(MetadataVersionTest,
|
|||
TEST(MetadataVersionTest,
|
||||
GetMinimumMetadataParserVersionForInputMetadataVocabAssociatedFiles) {
|
||||
// Creates a metadata flatbuffer with the field,
|
||||
// SubGraphMetadata.input_tensor_metadata.associated_fiels, populated with the
|
||||
// vocabulary file type.
|
||||
// SubGraphMetadata.input_tensor_metadata.associated_fields, populated with
|
||||
// the vocabulary file type.
|
||||
FlatBufferBuilder builder(1024);
|
||||
AssociatedFileBuilder associated_file_builder(builder);
|
||||
associated_file_builder.add_type(tflite::AssociatedFileType_VOCABULARY);
|
||||
|
@ -184,7 +184,7 @@ TEST(MetadataVersionTest,
|
|||
TEST(MetadataVersionTest,
|
||||
GetMinimumMetadataParserVersionForOutputMetadataVocabAssociatedFiles) {
|
||||
// Creates a metadata flatbuffer with the field,
|
||||
// SubGraphMetadata.output_tensor_metadata.associated_fiels, populated with
|
||||
// SubGraphMetadata.output_tensor_metadata.associated_fields, populated with
|
||||
// the vocabulary file type.
|
||||
FlatBufferBuilder builder(1024);
|
||||
AssociatedFileBuilder associated_file_builder(builder);
|
||||
|
|
|
@ -188,7 +188,7 @@ class BaseVisionTaskApi : public tasks::core::BaseTaskApi {
|
|||
// For 90° and 270° rotations, we need to swap width and height.
|
||||
// This is due to the internal behavior of ImageToTensorCalculator, which:
|
||||
// - first denormalizes the provided rect by multiplying the rect width or
|
||||
// height by the image width or height, repectively.
|
||||
// height by the image width or height, respectively.
|
||||
// - then rotates this by denormalized rect by the provided rotation, and
|
||||
// uses this for cropping,
|
||||
// - then finally rotates this back.
|
||||
|
|
|
@ -374,22 +374,22 @@ class SingleHandGestureRecognizerGraph : public core::ModelTaskGraph {
|
|||
// Inference for custom gesture classifier if it exists.
|
||||
if (has_custom_gesture_classifier) {
|
||||
ASSIGN_OR_RETURN(
|
||||
auto gesture_clasification_list,
|
||||
auto gesture_classification_list,
|
||||
GetGestureClassificationList(
|
||||
sub_task_model_resources.custom_gesture_classifier_model_resource,
|
||||
graph_options.custom_gesture_classifier_graph_options(),
|
||||
embedding_tensors, graph));
|
||||
gesture_clasification_list >> combine_predictions.In(classifier_nums++);
|
||||
gesture_classification_list >> combine_predictions.In(classifier_nums++);
|
||||
}
|
||||
|
||||
// Inference for canned gesture classifier.
|
||||
ASSIGN_OR_RETURN(
|
||||
auto gesture_clasification_list,
|
||||
auto gesture_classification_list,
|
||||
GetGestureClassificationList(
|
||||
sub_task_model_resources.canned_gesture_classifier_model_resource,
|
||||
graph_options.canned_gesture_classifier_graph_options(),
|
||||
embedding_tensors, graph));
|
||||
gesture_clasification_list >> combine_predictions.In(classifier_nums++);
|
||||
gesture_classification_list >> combine_predictions.In(classifier_nums++);
|
||||
|
||||
auto combined_classification_list =
|
||||
combine_predictions.Out(kPredictionTag).Cast<ClassificationList>();
|
||||
|
|
|
@ -29,7 +29,7 @@ NS_ASSUME_NONNULL_BEGIN
|
|||
* @param classObject The specified class associated with the bundle containing the file to be
|
||||
* loaded.
|
||||
* @param name Name of the image file.
|
||||
* @param type Extenstion of the image file.
|
||||
* @param type Extension of the image file.
|
||||
*
|
||||
* @return The `MPPImage` object contains the loaded image. This method returns
|
||||
* nil if it cannot load the image.
|
||||
|
@ -46,7 +46,7 @@ NS_ASSUME_NONNULL_BEGIN
|
|||
* @param classObject The specified class associated with the bundle containing the file to be
|
||||
* loaded.
|
||||
* @param name Name of the image file.
|
||||
* @param type Extenstion of the image file.
|
||||
* @param type Extension of the image file.
|
||||
* @param orientation Orientation of the image.
|
||||
*
|
||||
* @return The `MPPImage` object contains the loaded image. This method returns
|
||||
|
|
|
@ -94,7 +94,7 @@ public class TaskRunner implements AutoCloseable {
|
|||
*
|
||||
* <p>Note: This method is designed for processing batch data such as unrelated images and texts.
|
||||
* The call blocks the current thread until a failure status or a successful result is returned.
|
||||
* An internal timestamp will be assigend per invocation. This method is thread-safe and allows
|
||||
* An internal timestamp will be assigned per invocation. This method is thread-safe and allows
|
||||
* clients to call it from different threads.
|
||||
*
|
||||
* @param inputs a map contains (input stream {@link String}, data {@link Packet}) pairs.
|
||||
|
|
|
@ -254,7 +254,7 @@ public final class FaceStylizer extends BaseVisionTaskApi {
|
|||
* @throws IllegalArgumentException if the {@link ImageProcessingOptions} specify a
|
||||
* region-of-interest.
|
||||
* @throws MediaPipeException if there is an internal error. Or if {@link FaceStylizer} is not
|
||||
* created wtih {@link ResultListener} set in {@link FaceStylizerOptions}.
|
||||
* created with {@link ResultListener} set in {@link FaceStylizerOptions}.
|
||||
*/
|
||||
public void stylizeWithResultListener(MPImage image) {
|
||||
stylizeWithResultListener(image, ImageProcessingOptions.builder().build());
|
||||
|
@ -283,7 +283,7 @@ public final class FaceStylizer extends BaseVisionTaskApi {
|
|||
* @throws IllegalArgumentException if the {@link ImageProcessingOptions} specify a
|
||||
* region-of-interest.
|
||||
* @throws MediaPipeException if there is an internal error. Or if {@link FaceStylizer} is not
|
||||
* created wtih {@link ResultListener} set in {@link FaceStylizerOptions}.
|
||||
* created with {@link ResultListener} set in {@link FaceStylizerOptions}.
|
||||
*/
|
||||
public void stylizeWithResultListener(
|
||||
MPImage image, ImageProcessingOptions imageProcessingOptions) {
|
||||
|
@ -384,7 +384,7 @@ public final class FaceStylizer extends BaseVisionTaskApi {
|
|||
* @param image a MediaPipe {@link MPImage} object for processing.
|
||||
* @param timestampMs the input timestamp (in milliseconds).
|
||||
* @throws MediaPipeException if there is an internal error. Or if {@link FaceStylizer} is not
|
||||
* created wtih {@link ResultListener} set in {@link FaceStylizerOptions}.
|
||||
* created with {@link ResultListener} set in {@link FaceStylizerOptions}.
|
||||
*/
|
||||
public void stylizeForVideoWithResultListener(MPImage image, long timestampMs) {
|
||||
stylizeForVideoWithResultListener(image, ImageProcessingOptions.builder().build(), timestampMs);
|
||||
|
@ -411,7 +411,7 @@ public final class FaceStylizer extends BaseVisionTaskApi {
|
|||
* @param image a MediaPipe {@link MPImage} object for processing.
|
||||
* @param timestampMs the input timestamp (in milliseconds).
|
||||
* @throws MediaPipeException if there is an internal error. Or if {@link FaceStylizer} is not
|
||||
* created wtih {@link ResultListener} set in {@link FaceStylizerOptions}.
|
||||
* created with {@link ResultListener} set in {@link FaceStylizerOptions}.
|
||||
*/
|
||||
public void stylizeForVideoWithResultListener(
|
||||
MPImage image, ImageProcessingOptions imageProcessingOptions, long timestampMs) {
|
||||
|
|
|
@ -32,12 +32,12 @@ describe('convertFromClassificationResultProto()', () => {
|
|||
classifcations.setHeadIndex(1);
|
||||
classifcations.setHeadName('headName');
|
||||
const classificationList = new ClassificationList();
|
||||
const clasification = new Classification();
|
||||
clasification.setIndex(2);
|
||||
clasification.setScore(0.3);
|
||||
clasification.setDisplayName('displayName');
|
||||
clasification.setLabel('categoryName');
|
||||
classificationList.addClassification(clasification);
|
||||
const classification = new Classification();
|
||||
classification.setIndex(2);
|
||||
classification.setScore(0.3);
|
||||
classification.setDisplayName('displayName');
|
||||
classification.setLabel('categoryName');
|
||||
classificationList.addClassification(classification);
|
||||
classifcations.setClassificationList(classificationList);
|
||||
classificationResult.addClassifications(classifcations);
|
||||
|
||||
|
@ -62,8 +62,8 @@ describe('convertFromClassificationResultProto()', () => {
|
|||
const classificationResult = new ClassificationResult();
|
||||
const classifcations = new Classifications();
|
||||
const classificationList = new ClassificationList();
|
||||
const clasification = new Classification();
|
||||
classificationList.addClassification(clasification);
|
||||
const classification = new Classification();
|
||||
classificationList.addClassification(classification);
|
||||
classifcations.setClassificationList(classificationList);
|
||||
classificationResult.addClassifications(classifcations);
|
||||
|
||||
|
|
Loading…
Reference in New Issue
Block a user