From 47fa1a9578c19b0fcb9b4bb0eb44618255307103 Mon Sep 17 00:00:00 2001 From: MediaPipe Team Date: Mon, 20 Mar 2023 00:23:46 -0700 Subject: [PATCH] Internal change PiperOrigin-RevId: 517886450 --- mediapipe/tasks/cc/core/model_task_graph.cc | 2 +- mediapipe/tasks/cc/core/model_task_graph.h | 4 ++-- mediapipe/tasks/cc/core/task_runner.cc | 2 +- mediapipe/tasks/cc/core/task_runner.h | 4 ++-- .../tasks/cc/metadata/metadata_version.cc | 2 +- .../metadata/tests/metadata_version_test.cc | 2 +- .../custom_ops/utils/utf/utf.h | 2 +- .../cc/vision/face_detector/face_detector.h | 2 +- .../face_geometry/libs/geometry_pipeline.cc | 4 ++-- .../face_geometry/proto/face_geometry.proto | 2 +- .../vision/face_landmarker/face_landmarker.h | 2 +- .../face_landmarker/face_landmarker_graph.cc | 6 +++--- .../face_landmarks_detector_graph.cc | 2 +- .../cc/vision/face_stylizer/face_stylizer.h | 4 ++-- .../pose_detector/pose_detector_graph.cc | 4 ++-- .../ios/common/utils/sources/MPPCommonUtils.h | 2 +- .../tasks/ios/core/sources/MPPTaskInfo.h | 2 +- .../tasks/ios/core/sources/MPPTaskRunner.h | 4 ++-- .../tasks/python/core/pybind/task_runner.cc | 4 ++-- mediapipe/tasks/python/metadata/metadata.py | 20 +++++++++---------- .../metadata_writers/image_segmenter.py | 2 +- .../audio_embedder/audio_embedder_test.ts | 2 +- .../gesture_recognizer_options.d.ts | 2 +- 23 files changed, 41 insertions(+), 41 deletions(-) diff --git a/mediapipe/tasks/cc/core/model_task_graph.cc b/mediapipe/tasks/cc/core/model_task_graph.cc index 0cb556ec2..653c6b9ff 100644 --- a/mediapipe/tasks/cc/core/model_task_graph.cc +++ b/mediapipe/tasks/cc/core/model_task_graph.cc @@ -138,7 +138,7 @@ class InferenceSubgraph : public Subgraph { delegate.mutable_tflite()->CopyFrom(acceleration.tflite()); break; case Acceleration::DELEGATE_NOT_SET: - // Deafult inference calculator setting. + // Default inference calculator setting. break; } return delegate; diff --git a/mediapipe/tasks/cc/core/model_task_graph.h b/mediapipe/tasks/cc/core/model_task_graph.h index 3068b2c46..aa864c9fc 100644 --- a/mediapipe/tasks/cc/core/model_task_graph.h +++ b/mediapipe/tasks/cc/core/model_task_graph.h @@ -124,10 +124,10 @@ class ModelTaskGraph : public Subgraph { // Inserts a mediapipe task inference subgraph into the provided // GraphBuilder. The returned node provides the following interfaces to the // the rest of the graph: - // - a tensor vector (std::vector) input stream with tag + // - a tensor vector (std::vector) input stream with tag // "TENSORS", representing the input tensors to be consumed by the // inference engine. - // - a tensor vector (std::vector) output stream with tag + // - a tensor vector (std::vector) output stream with tag // "TENSORS", representing the output tensors generated by the inference // engine. // - a MetadataExtractor output side packet with tag "METADATA_EXTRACTOR". diff --git a/mediapipe/tasks/cc/core/task_runner.cc b/mediapipe/tasks/cc/core/task_runner.cc index 9a87551e7..fc933d547 100644 --- a/mediapipe/tasks/cc/core/task_runner.cc +++ b/mediapipe/tasks/cc/core/task_runner.cc @@ -301,7 +301,7 @@ absl::Status TaskRunner::Close() { } is_running_ = false; MP_RETURN_IF_ERROR( - AddPayload(graph_.CloseAllInputStreams(), "Fail to close intput streams", + AddPayload(graph_.CloseAllInputStreams(), "Fail to close input streams", MediaPipeTasksStatus::kRunnerFailsToCloseError)); MP_RETURN_IF_ERROR(AddPayload( graph_.WaitUntilDone(), "Fail to shutdown the MediaPipe graph.", diff --git a/mediapipe/tasks/cc/core/task_runner.h b/mediapipe/tasks/cc/core/task_runner.h index 0d049c782..8123a45aa 100644 --- a/mediapipe/tasks/cc/core/task_runner.h +++ b/mediapipe/tasks/cc/core/task_runner.h @@ -65,7 +65,7 @@ class TaskRunner { // Creates the task runner with a CalculatorGraphConfig proto. // If a tflite op resolver object is provided, the task runner will take // it as the global op resolver for all models running within this task. - // The op resolver's owernship will be transferred into the pipeleine runner. + // The op resolver's ownership will be transferred into the pipeleine runner. // When a user-defined PacketsCallback is provided, clients must use the // asynchronous method, Send(), to provide the input packets. If the packets // callback is absent, clients must use the synchronous method, Process(), to @@ -84,7 +84,7 @@ class TaskRunner { // frames from a video file and an audio file. The call blocks the current // thread until a failure status or a successful result is returned. // If the input packets have no timestamp, an internal timestamp will be - // assigend per invocation. Otherwise, when the timestamp is set in the + // assigned per invocation. Otherwise, when the timestamp is set in the // input packets, the caller must ensure that the input packet timestamps are // greater than the timestamps of the previous invocation. This method is // thread-unsafe and it is the caller's responsibility to synchronize access diff --git a/mediapipe/tasks/cc/metadata/metadata_version.cc b/mediapipe/tasks/cc/metadata/metadata_version.cc index 923d3aa56..7e2414dd5 100644 --- a/mediapipe/tasks/cc/metadata/metadata_version.cc +++ b/mediapipe/tasks/cc/metadata/metadata_version.cc @@ -213,7 +213,7 @@ void UpdateMinimumVersionForTable(const tflite::Content* table, Version* min_version) { if (table == nullptr) return; - // Checks the ContenProperties field. + // Checks the ContentProperties field. if (table->content_properties_type() == ContentProperties_AudioProperties) { UpdateMinimumVersion( GetMemberVersion(SchemaMembers::kContentPropertiesAudioProperties), diff --git a/mediapipe/tasks/cc/metadata/tests/metadata_version_test.cc b/mediapipe/tasks/cc/metadata/tests/metadata_version_test.cc index 3085e6585..32ff51482 100644 --- a/mediapipe/tasks/cc/metadata/tests/metadata_version_test.cc +++ b/mediapipe/tasks/cc/metadata/tests/metadata_version_test.cc @@ -265,7 +265,7 @@ TEST(MetadataVersionTest, metadata_builder.add_subgraph_metadata(subgraphs); FinishModelMetadataBuffer(builder, metadata_builder.Finish()); - // Gets the mimimum metadata parser version. + // Gets the minimum metadata parser version. std::string min_version; EXPECT_EQ(GetMinimumMetadataParserVersion(builder.GetBufferPointer(), builder.GetSize(), &min_version), diff --git a/mediapipe/tasks/cc/text/language_detector/custom_ops/utils/utf/utf.h b/mediapipe/tasks/cc/text/language_detector/custom_ops/utils/utf/utf.h index f3b14772e..24d9b9dbe 100644 --- a/mediapipe/tasks/cc/text/language_detector/custom_ops/utils/utf/utf.h +++ b/mediapipe/tasks/cc/text/language_detector/custom_ops/utils/utf/utf.h @@ -56,7 +56,7 @@ extern "C" { int utf_runetochar(char* s, const Rune* r); // utf_charntorune copies (decodes) at most UTFmax bytes starting at `str` to -// one rune, pointed to by `rune`, accesss at most `length` bytes of `str`, and +// one rune, pointed to by `rune`, access at most `length` bytes of `str`, and // returns the number of bytes consumed. // If the UTF sequence is incomplete within n bytes, // utf_charntorune will set *r to Runeerror and return 0. If it is complete diff --git a/mediapipe/tasks/cc/vision/face_detector/face_detector.h b/mediapipe/tasks/cc/vision/face_detector/face_detector.h index 78715528f..ae485819d 100644 --- a/mediapipe/tasks/cc/vision/face_detector/face_detector.h +++ b/mediapipe/tasks/cc/vision/face_detector/face_detector.h @@ -74,7 +74,7 @@ class FaceDetector : core::BaseVisionTaskApi { // three running modes: // 1) Image mode for detecting faces on single image inputs. Users // provide mediapipe::Image to the `Detect` method, and will receive the - // deteced face detection results as the return value. + // detected face detection results as the return value. // 2) Video mode for detecting faces on the decoded frames of a // video. Users call `DetectForVideo` method, and will receive the detected // face detection results as the return value. diff --git a/mediapipe/tasks/cc/vision/face_geometry/libs/geometry_pipeline.cc b/mediapipe/tasks/cc/vision/face_geometry/libs/geometry_pipeline.cc index c7ac7c634..061f35f51 100644 --- a/mediapipe/tasks/cc/vision/face_geometry/libs/geometry_pipeline.cc +++ b/mediapipe/tasks/cc/vision/face_geometry/libs/geometry_pipeline.cc @@ -99,7 +99,7 @@ class ScreenToMetricSpaceConverter { // // (3) Use the canonical-to-runtime scale from (2) to unproject the screen // landmarks. The result is referenced as "intermediate landmarks" because - // they are the first estimation of the resuling metric landmarks, but are + // they are the first estimation of the resulting metric landmarks,but are // not quite there yet. // // (4) Estimate a canonical-to-runtime landmark set scale by running the @@ -347,7 +347,7 @@ class GeometryPipelineImpl : public GeometryPipeline { proto::Mesh3d* mutable_mesh = face_geometry.mutable_mesh(); // Copy the canonical face mesh as the face geometry mesh. mutable_mesh->CopyFrom(canonical_mesh_); - // Replace XYZ vertex mesh coodinates with the metric landmark positions. + // Replace XYZ vertex mesh coordinates with the metric landmark positions. for (int i = 0; i < canonical_mesh_num_vertices_; ++i) { uint32_t vertex_buffer_offset = canonical_mesh_vertex_size_ * i + canonical_mesh_vertex_position_offset_; diff --git a/mediapipe/tasks/cc/vision/face_geometry/proto/face_geometry.proto b/mediapipe/tasks/cc/vision/face_geometry/proto/face_geometry.proto index 45a02bbcf..149e10afd 100644 --- a/mediapipe/tasks/cc/vision/face_geometry/proto/face_geometry.proto +++ b/mediapipe/tasks/cc/vision/face_geometry/proto/face_geometry.proto @@ -28,7 +28,7 @@ message FaceGeometry { // the face landmark IDs. // // XYZ coordinates exist in the right-handed Metric 3D space configured by an - // environment. UV coodinates are taken from the canonical face mesh model. + // environment. UV coordinates are taken from the canonical face mesh model. // // XY coordinates are guaranteed to match the screen positions of // the input face landmarks after (1) being multiplied by the face pose diff --git a/mediapipe/tasks/cc/vision/face_landmarker/face_landmarker.h b/mediapipe/tasks/cc/vision/face_landmarker/face_landmarker.h index 5a5c8404a..2c93fcba5 100644 --- a/mediapipe/tasks/cc/vision/face_landmarker/face_landmarker.h +++ b/mediapipe/tasks/cc/vision/face_landmarker/face_landmarker.h @@ -109,7 +109,7 @@ class FaceLandmarker : tasks::vision::core::BaseVisionTaskApi { // three running modes: // 1) Image mode for detecting face landmarks on single image inputs. Users // provide mediapipe::Image to the `Detect` method, and will receive the - // deteced face landmarks results as the return value. + // detected face landmarks results as the return value. // 2) Video mode for detecting face landmarks on the decoded frames of a // video. Users call `DetectForVideo` method, and will receive the detected // face landmarks results as the return value. diff --git a/mediapipe/tasks/cc/vision/face_landmarker/face_landmarker_graph.cc b/mediapipe/tasks/cc/vision/face_landmarker/face_landmarker_graph.cc index 78927f27b..99d99466a 100644 --- a/mediapipe/tasks/cc/vision/face_landmarker/face_landmarker_graph.cc +++ b/mediapipe/tasks/cc/vision/face_landmarker/face_landmarker_graph.cc @@ -160,7 +160,7 @@ absl::Status SetSubTaskBaseOptions(const ModelAssetBundleResources& resources, ->mutable_acceleration() ->mutable_xnnpack(); LOG(WARNING) << "Face blendshape model contains CPU only ops. Sets " - << "FaceBlendshapesGraph acceleartion to Xnnpack."; + << "FaceBlendshapesGraph acceleration to Xnnpack."; } return absl::OkStatus(); @@ -180,7 +180,7 @@ absl::Status SetSubTaskBaseOptions(const ModelAssetBundleResources& resources, // would be triggered to detect faces. // // FaceGeometryFromLandmarksGraph finds the transformation from canonical face -// to the detected faces. This transformation is useful for renderring face +// to the detected faces. This transformation is useful for rendering face // effects on the detected faces. This subgraph is added if users request a // FaceGeometry Tag. // @@ -324,7 +324,7 @@ class FaceLandmarkerGraph : public core::ModelTaskGraph { !sc->Service(::mediapipe::tasks::core::kModelResourcesCacheService) .IsAvailable())); if (output_geometry) { - // Set the face geometry metdata file for + // Set the face geometry metadata file for // FaceGeometryFromLandmarksGraph. ASSIGN_OR_RETURN(auto face_geometry_pipeline_metadata_file, model_asset_bundle_resources->GetFile( diff --git a/mediapipe/tasks/cc/vision/face_landmarker/face_landmarks_detector_graph.cc b/mediapipe/tasks/cc/vision/face_landmarker/face_landmarks_detector_graph.cc index a898f2fe9..df9cab5b5 100644 --- a/mediapipe/tasks/cc/vision/face_landmarker/face_landmarks_detector_graph.cc +++ b/mediapipe/tasks/cc/vision/face_landmarker/face_landmarks_detector_graph.cc @@ -462,7 +462,7 @@ REGISTER_MEDIAPIPE_GRAPH( // - Accepts an input image and a vector of face rect RoIs to detect the // multiple face landmarks enclosed by the RoIs. Output vectors of // face landmarks related results, where each element in the vectors -// corrresponds to the result of the same face. +// corresponds to the result of the same face. // // Inputs: // IMAGE - Image diff --git a/mediapipe/tasks/cc/vision/face_stylizer/face_stylizer.h b/mediapipe/tasks/cc/vision/face_stylizer/face_stylizer.h index 27e64c934..58501c47b 100644 --- a/mediapipe/tasks/cc/vision/face_stylizer/face_stylizer.h +++ b/mediapipe/tasks/cc/vision/face_stylizer/face_stylizer.h @@ -81,7 +81,7 @@ class FaceStylizer : tasks::vision::core::BaseVisionTaskApi { // running mode. // // The input image can be of any size with format RGB or RGBA. - // To ensure that the output image has reasonable quailty, the stylized output + // To ensure that the output image has reasonable quality, the stylized output // image size is the smaller of the model output size and the size of the // 'region_of_interest' specified in 'image_processing_options'. absl::StatusOr Stylize( @@ -106,7 +106,7 @@ class FaceStylizer : tasks::vision::core::BaseVisionTaskApi { // The image can be of any size with format RGB or RGBA. It's required to // provide the video frame's timestamp (in milliseconds). The input timestamps // must be monotonically increasing. - // To ensure that the output image has reasonable quailty, the stylized output + // To ensure that the output image has reasonable quality, the stylized output // image size is the smaller of the model output size and the size of the // 'region_of_interest' specified in 'image_processing_options'. absl::StatusOr StylizeForVideo( diff --git a/mediapipe/tasks/cc/vision/pose_detector/pose_detector_graph.cc b/mediapipe/tasks/cc/vision/pose_detector/pose_detector_graph.cc index 7c8958b3c..2a888ca83 100644 --- a/mediapipe/tasks/cc/vision/pose_detector/pose_detector_graph.cc +++ b/mediapipe/tasks/cc/vision/pose_detector/pose_detector_graph.cc @@ -73,7 +73,7 @@ struct PoseDetectionOuts { // detector with model metadata. void ConfigureSsdAnchorsCalculator( mediapipe::SsdAnchorsCalculatorOptions* options) { - // Dervied from + // Derived from // mediapipe/modules/pose_detection/pose_detection_gpu.pbtxt options->set_num_layers(5); options->set_min_scale(0.1484375); @@ -96,7 +96,7 @@ void ConfigureSsdAnchorsCalculator( void ConfigureTensorsToDetectionsCalculator( const PoseDetectorGraphOptions& tasks_options, mediapipe::TensorsToDetectionsCalculatorOptions* options) { - // Dervied from + // Derived from // mediapipe/modules/pose_detection/pose_detection_gpu.pbtxt options->set_num_classes(1); options->set_num_boxes(2254); diff --git a/mediapipe/tasks/ios/common/utils/sources/MPPCommonUtils.h b/mediapipe/tasks/ios/common/utils/sources/MPPCommonUtils.h index 69c28b916..36d90f223 100644 --- a/mediapipe/tasks/ios/common/utils/sources/MPPCommonUtils.h +++ b/mediapipe/tasks/ios/common/utils/sources/MPPCommonUtils.h @@ -70,7 +70,7 @@ extern NSString *const MPPTasksErrorDomain; * @param error Pointer to the memory location where errors if any should be saved. If `nil`, no * error will be saved. * - * @return Pointer to the allocated block of memory on successfull allocation. `nil` in case as + * @return Pointer to the allocated block of memory on successful allocation. `nil` in case as * error is encountered because of invalid `memSize`. If failure is due to any other reason, method * terminates program execution. */ diff --git a/mediapipe/tasks/ios/core/sources/MPPTaskInfo.h b/mediapipe/tasks/ios/core/sources/MPPTaskInfo.h index b94e704d1..15f2ee0c1 100644 --- a/mediapipe/tasks/ios/core/sources/MPPTaskInfo.h +++ b/mediapipe/tasks/ios/core/sources/MPPTaskInfo.h @@ -22,7 +22,7 @@ NS_ASSUME_NONNULL_BEGIN /** - * Holds all needed informaton to initialize a MediaPipe Task. + * Holds all needed information to initialize a MediaPipe Task. */ @interface MPPTaskInfo : NSObject diff --git a/mediapipe/tasks/ios/core/sources/MPPTaskRunner.h b/mediapipe/tasks/ios/core/sources/MPPTaskRunner.h index 704fc453f..ed57d2df2 100644 --- a/mediapipe/tasks/ios/core/sources/MPPTaskRunner.h +++ b/mediapipe/tasks/ios/core/sources/MPPTaskRunner.h @@ -30,7 +30,7 @@ NS_ASSUME_NONNULL_BEGIN * additional functionality. For eg:, vision tasks must create an `MPPVisionTaskRunner` and provide * additional functionality. An instance of `MPPVisionTaskRunner` can in turn be used by the each * vision task for creation and execution of the task. Please see the documentation for the C++ Task - * Runner for more details on how the taks runner operates. + * Runner for more details on how the tasks runner operates. */ @interface MPPTaskRunner : NSObject @@ -66,7 +66,7 @@ NS_ASSUME_NONNULL_BEGIN * for processing either batch data such as unrelated images and texts or offline streaming data * such as the decoded frames from a video file or audio file. The call blocks the current * thread until a failure status or a successful result is returned. If the input packets have no - * timestamp, an internal timestamp will be assigend per invocation. Otherwise, when the timestamp + * timestamp, an internal timestamp will be assigned per invocation. Otherwise, when the timestamp * is set in the input packets, the caller must ensure that the input packet timestamps are greater * than the timestamps of the previous invocation. This method is thread-unsafe and it is the * caller's responsibility to synchronize access to this method across multiple threads and to diff --git a/mediapipe/tasks/python/core/pybind/task_runner.cc b/mediapipe/tasks/python/core/pybind/task_runner.cc index cb13787c3..aa48a1a9a 100644 --- a/mediapipe/tasks/python/core/pybind/task_runner.cc +++ b/mediapipe/tasks/python/core/pybind/task_runner.cc @@ -96,7 +96,7 @@ Args: Raises: RuntimeError: Any of the following: a) The graph config proto is invalid. - b) The underlying medipaipe graph fails to initilize and start. + b) The underlying medipaipe graph fails to initialize and start. )doc", py::arg("graph_config"), py::arg("packets_callback") = py::none()); @@ -120,7 +120,7 @@ This method is designed for processing either batch data such as unrelated images and texts or offline streaming data such as the decoded frames from a video file and an audio file. The call blocks the current thread until a failure status or a successful result is returned. -If the input packets have no timestamp, an internal timestamp will be assigend +If the input packets have no timestamp, an internal timestamp will be assigned per invocation. Otherwise, when the timestamp is set in the input packets, the caller must ensure that the input packet timestamps are greater than the timestamps of the previous invocation. This method is thread-unsafe and it is diff --git a/mediapipe/tasks/python/metadata/metadata.py b/mediapipe/tasks/python/metadata/metadata.py index e294bfc8c..25d83cae8 100644 --- a/mediapipe/tasks/python/metadata/metadata.py +++ b/mediapipe/tasks/python/metadata/metadata.py @@ -112,10 +112,10 @@ class MetadataPopulator(object): mediapipe/tasks/metadata/metadata_schema.fbs Example usage: - Populate matadata and label file into an image classifier model. + Populate metadata and label file into an image classifier model. First, based on metadata_schema.fbs, generate the metadata for this image - classifer model using Flatbuffers API. Attach the label file onto the ouput + classifier model using Flatbuffers API. Attach the label file onto the output tensor (the tensor of probabilities) in the metadata. Then, pack the metadata and label file into the model as follows. @@ -173,7 +173,7 @@ class MetadataPopulator(object): Raises: IOError: File not found. - ValueError: the model does not have the expected flatbuffer identifer. + ValueError: the model does not have the expected flatbuffer identifier. """ _assert_model_file_identifier(model_file) self._model_file = model_file @@ -193,7 +193,7 @@ class MetadataPopulator(object): Raises: IOError: File not found. - ValueError: the model does not have the expected flatbuffer identifer. + ValueError: the model does not have the expected flatbuffer identifier. """ return cls(model_file) @@ -210,7 +210,7 @@ class MetadataPopulator(object): A MetadataPopulator(_MetadataPopulatorWithBuffer) object. Raises: - ValueError: the model does not have the expected flatbuffer identifer. + ValueError: the model does not have the expected flatbuffer identifier. """ return _MetadataPopulatorWithBuffer(model_buf) @@ -293,7 +293,7 @@ class MetadataPopulator(object): Raises: ValueError: The metadata to be populated is empty. - ValueError: The metadata does not have the expected flatbuffer identifer. + ValueError: The metadata does not have the expected flatbuffer identifier. ValueError: Cannot get minimum metadata parser version. ValueError: The number of SubgraphMetadata is not 1. ValueError: The number of input/output tensors does not match the number @@ -646,7 +646,7 @@ class MetadataPopulator(object): class _MetadataPopulatorWithBuffer(MetadataPopulator): - """Subclass of MetadtaPopulator that populates metadata to a model buffer. + """Subclass of MetadataPopulator that populates metadata to a model buffer. This class is used to populate metadata into a in-memory model buffer. As we use Zip API to concatenate associated files after tflite model file, the @@ -664,7 +664,7 @@ class _MetadataPopulatorWithBuffer(MetadataPopulator): Raises: ValueError: model_buf is empty. - ValueError: model_buf does not have the expected flatbuffer identifer. + ValueError: model_buf does not have the expected flatbuffer identifier. """ if not model_buf: raise ValueError("model_buf cannot be empty.") @@ -826,7 +826,7 @@ def convert_to_json( metadata_buffer: valid metadata buffer in bytes. custom_metadata_schema: A dict of custom metadata schema, in which key is custom metadata name [1], value is the filepath that defines custom - metadata schema. For intance, custom_metadata_schema = + metadata schema. For instance, custom_metadata_schema = {"SEGMENTER_METADATA": "metadata/vision_tasks_metadata_schema.fbs"}. [1]: https://github.com/google/mediapipe/blob/46b5c4012d2ef76c9d92bb0d88a6b107aee83814/mediapipe/tasks/metadata/metadata_schema.fbs#L612 @@ -834,7 +834,7 @@ def convert_to_json( Metadata in JSON format. Raises: - ValueError: error occured when parsing the metadata schema file. + ValueError: error occurred when parsing the metadata schema file. """ opt = _pywrap_flatbuffers.IDLOptions() opt.strict_json = True diff --git a/mediapipe/tasks/python/metadata/metadata_writers/image_segmenter.py b/mediapipe/tasks/python/metadata/metadata_writers/image_segmenter.py index 8e215437e..3268f3b1f 100644 --- a/mediapipe/tasks/python/metadata/metadata_writers/image_segmenter.py +++ b/mediapipe/tasks/python/metadata/metadata_writers/image_segmenter.py @@ -59,7 +59,7 @@ def convert_to_json(metadata_buffer: bytearray) -> str: Metadata in JSON format. Raises: - ValueError: error occured when parsing the metadata schema file. + ValueError: error occurred when parsing the metadata schema file. """ return metadata.convert_to_json( metadata_buffer, diff --git a/mediapipe/tasks/web/audio/audio_embedder/audio_embedder_test.ts b/mediapipe/tasks/web/audio/audio_embedder/audio_embedder_test.ts index a8a2b232b..9e37e5987 100644 --- a/mediapipe/tasks/web/audio/audio_embedder/audio_embedder_test.ts +++ b/mediapipe/tasks/web/audio/audio_embedder/audio_embedder_test.ts @@ -161,7 +161,7 @@ describe('AudioEmbedder', () => { {floatEmbedding: [0.1, 0.9], headIndex: 1, headName: 'headName'}); } - it('from embeddings strem', async () => { + it('from embeddings stream', async () => { audioEmbedder.fakeWasmModule._waitUntilIdle.and.callFake(() => { verifyListenersRegistered(audioEmbedder); // Pass the test data to our listener diff --git a/mediapipe/tasks/web/vision/gesture_recognizer/gesture_recognizer_options.d.ts b/mediapipe/tasks/web/vision/gesture_recognizer/gesture_recognizer_options.d.ts index dd8fc9548..9e9728af8 100644 --- a/mediapipe/tasks/web/vision/gesture_recognizer/gesture_recognizer_options.d.ts +++ b/mediapipe/tasks/web/vision/gesture_recognizer/gesture_recognizer_options.d.ts @@ -44,7 +44,7 @@ export declare interface GestureRecognizerOptions extends VisionTaskOptions { minTrackingConfidence?: number|undefined; /** - * Sets the optional `ClassifierOptions` controling the canned gestures + * Sets the optional `ClassifierOptions` controlling the canned gestures * classifier, such as score threshold, allow list and deny list of gestures. * The categories for canned gesture * classifiers are: ["None", "Closed_Fist", "Open_Palm", "Pointing_Up",