diff --git a/mediapipe/tasks/cc/common.h b/mediapipe/tasks/cc/common.h index 1295177df..70892c5cd 100644 --- a/mediapipe/tasks/cc/common.h +++ b/mediapipe/tasks/cc/common.h @@ -30,7 +30,7 @@ constexpr absl::string_view kMediaPipeTasksPayload = "MediaPipeTasksStatus"; // // At runtime, such codes are meant to be attached (where applicable) to a // `absl::Status` in a key-value manner with `kMediaPipeTasksPayload` as key and -// stringifed error code as value (aka payload). This logic is encapsulated in +// stringified error code as value (aka payload). This logic is encapsulated in // the `CreateStatusWithPayload` helper below for convenience. // // The returned status includes: diff --git a/mediapipe/tasks/cc/metadata/metadata_populator.h b/mediapipe/tasks/cc/metadata/metadata_populator.h index 024ad785f..c0554f704 100644 --- a/mediapipe/tasks/cc/metadata/metadata_populator.h +++ b/mediapipe/tasks/cc/metadata/metadata_populator.h @@ -64,7 +64,7 @@ class ModelMetadataPopulator { // Loads associated files into the TFLite FlatBuffer model. The input is a map // of {filename, file contents}. // - // Warning: this method removes any previoulsy present associated files. + // Warning: this method removes any previously present associated files. // Calling this method multiple time removes any associated files from // previous calls, so this method should usually be called only once. void LoadAssociatedFiles( diff --git a/mediapipe/tasks/cc/metadata/python/metadata_version.cc b/mediapipe/tasks/cc/metadata/python/metadata_version.cc index 860a00e4f..e3072bc9e 100644 --- a/mediapipe/tasks/cc/metadata/python/metadata_version.cc +++ b/mediapipe/tasks/cc/metadata/python/metadata_version.cc @@ -31,8 +31,8 @@ PYBIND11_MODULE(_pywrap_metadata_version, m) { // Using pybind11 type conversions to convert between Python and native // C++ types. There are other options to provide access to native Python types - // in C++ and vice versa. See the pybind 11 instrcution [1] for more details. - // Type converstions is recommended by pybind11, though the main downside + // in C++ and vice versa. See the pybind 11 instruction [1] for more details. + // Type conversions is recommended by pybind11, though the main downside // is that a copy of the data must be made on every Python to C++ transition: // this is needed since the C++ and Python versions of the same type generally // won’t have the same memory layout. diff --git a/mediapipe/tasks/cc/metadata/tests/metadata_version_test.cc b/mediapipe/tasks/cc/metadata/tests/metadata_version_test.cc index 273c91685..3085e6585 100644 --- a/mediapipe/tasks/cc/metadata/tests/metadata_version_test.cc +++ b/mediapipe/tasks/cc/metadata/tests/metadata_version_test.cc @@ -79,7 +79,7 @@ TEST(MetadataVersionTest, auto metadata = metadata_builder.Finish(); FinishModelMetadataBuffer(builder, metadata); - // Gets the mimimum metadata parser version. + // Gets the minimum metadata parser version. std::string min_version; EXPECT_EQ(GetMinimumMetadataParserVersion(builder.GetBufferPointer(), builder.GetSize(), &min_version), @@ -100,7 +100,7 @@ TEST(MetadataVersionTest, auto metadata = metadata_builder.Finish(); builder.Finish(metadata); - // Gets the mimimum metadata parser version and triggers error. + // Gets the minimum metadata parser version and triggers error. std::string min_version; EXPECT_EQ(GetMinimumMetadataParserVersion(builder.GetBufferPointer(), builder.GetSize(), &min_version), @@ -121,7 +121,7 @@ TEST(MetadataVersionTest, metadata_builder.add_associated_files(associated_files); FinishModelMetadataBuffer(builder, metadata_builder.Finish()); - // Gets the mimimum metadata parser version. + // Gets the minimum metadata parser version. std::string min_version; EXPECT_EQ(GetMinimumMetadataParserVersion(builder.GetBufferPointer(), builder.GetSize(), &min_version), @@ -147,7 +147,7 @@ TEST(MetadataVersionTest, metadata_builder.add_subgraph_metadata(subgraphs); FinishModelMetadataBuffer(builder, metadata_builder.Finish()); - // Gets the mimimum metadata parser version. + // Gets the minimum metadata parser version. std::string min_version; EXPECT_EQ(GetMinimumMetadataParserVersion(builder.GetBufferPointer(), builder.GetSize(), &min_version), @@ -172,7 +172,7 @@ TEST(MetadataVersionTest, std::vector>{tensor_builder.Finish()}); CreateModelWithMetadata(tensors, builder); - // Gets the mimimum metadata parser version. + // Gets the minimum metadata parser version. std::string min_version; EXPECT_EQ(GetMinimumMetadataParserVersion(builder.GetBufferPointer(), builder.GetSize(), &min_version), @@ -203,7 +203,7 @@ TEST(MetadataVersionTest, metadata_builder.add_subgraph_metadata(subgraphs); FinishModelMetadataBuffer(builder, metadata_builder.Finish()); - // Gets the mimimum metadata parser version. + // Gets the minimum metadata parser version. std::string min_version; EXPECT_EQ(GetMinimumMetadataParserVersion(builder.GetBufferPointer(), builder.GetSize(), &min_version), @@ -234,7 +234,7 @@ TEST(MetadataVersionTest, metadata_builder.add_subgraph_metadata(subgraphs); FinishModelMetadataBuffer(builder, metadata_builder.Finish()); - // Gets the mimimum metadata parser version. + // Gets the minimum metadata parser version. std::string min_version; EXPECT_EQ(GetMinimumMetadataParserVersion(builder.GetBufferPointer(), builder.GetSize(), &min_version), @@ -294,7 +294,7 @@ TEST(MetadataVersionTest, std::vector>{tensor_builder.Finish()}); CreateModelWithMetadata(tensors, builder); - // Gets the mimimum metadata parser version. + // Gets the minimum metadata parser version. std::string min_version; EXPECT_EQ(GetMinimumMetadataParserVersion(builder.GetBufferPointer(), builder.GetSize(), &min_version), @@ -323,7 +323,7 @@ TEST(MetadataVersionTest, std::vector>{tensor_builder.Finish()}); CreateModelWithMetadata(tensors, builder); - // Gets the mimimum metadata parser version. + // Gets the minimum metadata parser version. std::string min_version; EXPECT_EQ(GetMinimumMetadataParserVersion(builder.GetBufferPointer(), builder.GetSize(), &min_version), @@ -348,7 +348,7 @@ TEST(MetadataVersionTest, metadata_builder.add_subgraph_metadata(subgraphs); FinishModelMetadataBuffer(builder, metadata_builder.Finish()); - // Gets the mimimum metadata parser version. + // Gets the minimum metadata parser version. std::string min_version; EXPECT_EQ(GetMinimumMetadataParserVersion(builder.GetBufferPointer(), builder.GetSize(), &min_version), @@ -373,7 +373,7 @@ TEST(MetadataVersionTest, metadata_builder.add_subgraph_metadata(subgraphs); FinishModelMetadataBuffer(builder, metadata_builder.Finish()); - // Gets the mimimum metadata parser version. + // Gets the minimum metadata parser version. std::string min_version; EXPECT_EQ(GetMinimumMetadataParserVersion(builder.GetBufferPointer(), builder.GetSize(), &min_version), @@ -404,7 +404,7 @@ TEST(MetadataVersionTest, metadata_builder.add_subgraph_metadata(subgraphs); FinishModelMetadataBuffer(builder, metadata_builder.Finish()); - // Gets the mimimum metadata parser version. + // Gets the minimum metadata parser version. std::string min_version; EXPECT_EQ(GetMinimumMetadataParserVersion(builder.GetBufferPointer(), builder.GetSize(), &min_version), @@ -431,7 +431,7 @@ TEST(MetadataVersionTest, std::vector>{tensor_builder.Finish()}); CreateModelWithMetadata(tensors, builder); - // Gets the mimimum metadata parser version. + // Gets the minimum metadata parser version. std::string min_version; EXPECT_EQ(GetMinimumMetadataParserVersion(builder.GetBufferPointer(), builder.GetSize(), &min_version), @@ -453,7 +453,7 @@ TEST(MetadataVersionTest, metadata_builder.add_associated_files(associated_files); FinishModelMetadataBuffer(builder, metadata_builder.Finish()); - // Gets the mimimum metadata parser version. + // Gets the minimum metadata parser version. std::string min_version; EXPECT_EQ(GetMinimumMetadataParserVersion(builder.GetBufferPointer(), builder.GetSize(), &min_version), @@ -476,7 +476,7 @@ TEST(MetadataVersionTest, metadata_builder.add_associated_files(associated_files); FinishModelMetadataBuffer(builder, metadata_builder.Finish()); - // Gets the mimimum metadata parser version. + // Gets the minimum metadata parser version. std::string min_version; EXPECT_EQ(GetMinimumMetadataParserVersion(builder.GetBufferPointer(), builder.GetSize(), &min_version), @@ -504,7 +504,7 @@ TEST(MetadataVersionTest, GetMinimumMetadataParserVersionForOptions) { metadata_builder.add_subgraph_metadata(subgraphs); FinishModelMetadataBuffer(builder, metadata_builder.Finish()); - // Gets the mimimum metadata parser version. + // Gets the minimum metadata parser version. std::string min_version; EXPECT_EQ(GetMinimumMetadataParserVersion(builder.GetBufferPointer(), builder.GetSize(), &min_version), diff --git a/mediapipe/tasks/cc/text/tokenizers/sentencepiece_tokenizer_test.cc b/mediapipe/tasks/cc/text/tokenizers/sentencepiece_tokenizer_test.cc index a42719446..88afabe1e 100644 --- a/mediapipe/tasks/cc/text/tokenizers/sentencepiece_tokenizer_test.cc +++ b/mediapipe/tasks/cc/text/tokenizers/sentencepiece_tokenizer_test.cc @@ -34,7 +34,7 @@ constexpr char kTestSPModelPath[] = std::unique_ptr CreateSentencePieceTokenizer( absl::string_view model_path) { - // We are using `LoadBinaryContent()` instead of loading the model direclty + // We are using `LoadBinaryContent()` instead of loading the model directly // via `SentencePieceTokenizer` so that the file can be located on Windows std::string buffer = LoadBinaryContent(kTestSPModelPath); return absl::make_unique(buffer.data(), diff --git a/mediapipe/tasks/cc/vision/gesture_recognizer/gesture_recognizer_graph.cc b/mediapipe/tasks/cc/vision/gesture_recognizer/gesture_recognizer_graph.cc index b6f6c88da..11b2d12c4 100644 --- a/mediapipe/tasks/cc/vision/gesture_recognizer/gesture_recognizer_graph.cc +++ b/mediapipe/tasks/cc/vision/gesture_recognizer/gesture_recognizer_graph.cc @@ -127,7 +127,7 @@ absl::Status SetSubTaskBaseOptions(const ModelAssetBundleResources& resources, ->mutable_acceleration() ->mutable_xnnpack(); LOG(WARNING) << "Hand Gesture Recognizer contains CPU only ops. Sets " - << "HandGestureRecognizerGraph acceleartion to Xnnpack."; + << "HandGestureRecognizerGraph acceleration to Xnnpack."; } hand_gesture_recognizer_graph_options->mutable_base_options() ->set_use_stream_mode(options->base_options().use_stream_mode()); diff --git a/mediapipe/tasks/cc/vision/hand_landmarker/hand_landmarker.h b/mediapipe/tasks/cc/vision/hand_landmarker/hand_landmarker.h index 6f96fc68e..7a43d20d7 100644 --- a/mediapipe/tasks/cc/vision/hand_landmarker/hand_landmarker.h +++ b/mediapipe/tasks/cc/vision/hand_landmarker/hand_landmarker.h @@ -101,7 +101,7 @@ class HandLandmarker : tasks::vision::core::BaseVisionTaskApi { // three running modes: // 1) Image mode for detecting hand landmarks on single image inputs. Users // provide mediapipe::Image to the `Detect` method, and will receive the - // deteced hand landmarks results as the return value. + // detected hand landmarks results as the return value. // 2) Video mode for detecting hand landmarks on the decoded frames of a // video. Users call `DetectForVideo` method, and will receive the detected // hand landmarks results as the return value. diff --git a/mediapipe/tasks/cc/vision/hand_landmarker/hand_landmarks_detector_graph.cc b/mediapipe/tasks/cc/vision/hand_landmarker/hand_landmarks_detector_graph.cc index 6d232d3f1..f7fa83a11 100644 --- a/mediapipe/tasks/cc/vision/hand_landmarker/hand_landmarks_detector_graph.cc +++ b/mediapipe/tasks/cc/vision/hand_landmarker/hand_landmarks_detector_graph.cc @@ -409,7 +409,7 @@ REGISTER_MEDIAPIPE_GRAPH( // - Accepts CPU input image and a vector of hand rect RoIs to detect the // multiple hands landmarks enclosed by the RoIs. Output vectors of // hand landmarks related results, where each element in the vectors -// corrresponds to the result of the same hand. +// corresponds to the result of the same hand. // // Inputs: // IMAGE - Image diff --git a/mediapipe/tasks/cc/vision/image_embedder/image_embedder_test.cc b/mediapipe/tasks/cc/vision/image_embedder/image_embedder_test.cc index dd602bef5..8fa036b7d 100644 --- a/mediapipe/tasks/cc/vision/image_embedder/image_embedder_test.cc +++ b/mediapipe/tasks/cc/vision/image_embedder/image_embedder_test.cc @@ -52,7 +52,7 @@ constexpr char kMobileNetV3Embedder[] = constexpr double kSimilarityTolerancy = 1e-6; // Utility function to check the sizes, head_index and head_names of a result -// procuded by kMobileNetV3Embedder. +// procduced by kMobileNetV3Embedder. void CheckMobileNetV3Result(const ImageEmbedderResult& result, bool quantized) { EXPECT_EQ(result.embeddings.size(), 1); EXPECT_EQ(result.embeddings[0].head_index, 0); diff --git a/mediapipe/tasks/metadata/metadata_schema.fbs b/mediapipe/tasks/metadata/metadata_schema.fbs index 3253e1ea8..8fe7a08fa 100644 --- a/mediapipe/tasks/metadata/metadata_schema.fbs +++ b/mediapipe/tasks/metadata/metadata_schema.fbs @@ -233,7 +233,7 @@ table ImageProperties { // // : // Input image tensors: NA. -// Output image tensors: parses the values into a data stucture that represents +// Output image tensors: parses the values into a data structure that represents // bounding boxes. For example, in the generated wrapper for Android, it returns // the output as android.graphics.Rect objects. enum BoundingBoxType : byte { @@ -389,7 +389,7 @@ table NormalizationOptions{ // mean and std are normalization parameters. Tensor values are normalized // on a per-channel basis, by the formula // (x - mean) / std. - // If there is only one value in mean or std, we'll propogate the value to + // If there is only one value in mean or std, we'll propagate the value to // all channels. // // Quantized models share the same normalization parameters as their @@ -526,7 +526,7 @@ table Stats { // Max and min are not currently used in tflite.support codegen. They mainly // serve as references for users to better understand the model. They can also // be used to validate model pre/post processing results. - // If there is only one value in max or min, we'll propogate the value to + // If there is only one value in max or min, we'll propagate the value to // all channels. // Per-channel maximum value of the tensor. @@ -542,7 +542,7 @@ table Stats { // has four outputs: classes, scores, bounding boxes, and number of detections. // If the four outputs are bundled together using TensorGroup (for example, // named as "detection result"), the codegen tool will generate the class, -// `DetectionResult`, which contains the class, score, and bouding box. And the +// `DetectionResult`, which contains the class, score, and bounding box. And the // outputs of the model will be converted to a list of `DetectionResults` and // the number of detection. Note that the number of detection is a single // number, therefore is inappropriate for the list of `DetectionResult`. @@ -624,7 +624,7 @@ table SubGraphMetadata { // A description explains details about what the subgraph does. description:string; - // Metadata of all input tensors used in this subgraph. It matches extactly + // Metadata of all input tensors used in this subgraph. It matches exactly // the input tensors specified by `SubGraph.inputs` in the TFLite // schema.fbs file[2]. The number of `TensorMetadata` in the array should // equal to the number of indices in `SubGraph.inputs`. @@ -634,7 +634,7 @@ table SubGraphMetadata { // Determines how to process the inputs. input_tensor_metadata:[TensorMetadata]; - // Metadata of all output tensors used in this subgraph. It matches extactly + // Metadata of all output tensors used in this subgraph. It matches exactly // the output tensors specified by `SubGraph.outputs` in the TFLite // schema.fbs file[2]. The number of `TensorMetadata` in the array should // equal to the number of indices in `SubGraph.outputs`. @@ -724,7 +724,7 @@ table ModelMetadata { // number among the versions of all the fields populated and the smallest // compatible version indicated by the file identifier. // - // This field is automaticaly populated by the MetadataPopulator when + // This field is automatically populated by the MetadataPopulator when // the metadata is populated into a TFLite model. min_parser_version:string; }