diff --git a/mediapipe/calculators/tensor/audio_to_tensor_calculator.cc b/mediapipe/calculators/tensor/audio_to_tensor_calculator.cc index c8d38a653..7230e178d 100644 --- a/mediapipe/calculators/tensor/audio_to_tensor_calculator.cc +++ b/mediapipe/calculators/tensor/audio_to_tensor_calculator.cc @@ -109,7 +109,7 @@ bool IsValidFftSize(int size) { // Non-streaming mode: when "stream_mode" is set to false in the calculator // options, the calculators treats the packets in the input audio stream as // a batch of unrelated audio buffers. In each Process() call, the input -// buffer will be frist resampled, and framed as fixed-sized, possibly +// buffer will be first resampled, and framed as fixed-sized, possibly // overlapping tensors. The last tensor produced by a Process() invocation // will be zero-padding if the remaining samples are insufficient. As the // calculator treats the input packets as unrelated, all samples will be @@ -159,7 +159,7 @@ class AudioToTensorCalculator : public Node { public: static constexpr Input kAudioIn{"AUDIO"}; // TODO: Removes this optional input stream when the "AUDIO" stream - // uses the new mediapipe audio data containers that carry audio metatdata, + // uses the new mediapipe audio data containers that carry audio metadata, // such as sample rate. static constexpr Input::Optional kAudioSampleRateIn{"SAMPLE_RATE"}; static constexpr Output> kTensorsOut{"TENSORS"}; diff --git a/mediapipe/calculators/tensor/audio_to_tensor_calculator.proto b/mediapipe/calculators/tensor/audio_to_tensor_calculator.proto index 948c82a36..a49825586 100644 --- a/mediapipe/calculators/tensor/audio_to_tensor_calculator.proto +++ b/mediapipe/calculators/tensor/audio_to_tensor_calculator.proto @@ -37,7 +37,7 @@ message AudioToTensorCalculatorOptions { // will be converted into tensors. optional double target_sample_rate = 4; - // Whether to treat the input audio stream as a continous stream or a batch + // Whether to treat the input audio stream as a continuous stream or a batch // of unrelated audio buffers. optional bool stream_mode = 5 [default = true]; diff --git a/mediapipe/calculators/tensor/image_to_tensor_calculator_test.cc b/mediapipe/calculators/tensor/image_to_tensor_calculator_test.cc index 7017c1e3a..51150a1ca 100644 --- a/mediapipe/calculators/tensor/image_to_tensor_calculator_test.cc +++ b/mediapipe/calculators/tensor/image_to_tensor_calculator_test.cc @@ -206,7 +206,7 @@ mediapipe::ImageFormat::Format GetImageFormat(int image_channels) { } else if (image_channels == 1) { return ImageFormat::GRAY8; } - ABSL_CHECK(false) << "Unsupported input image channles: " << image_channels; + ABSL_CHECK(false) << "Unsupported input image channels: " << image_channels; } Packet MakeImageFramePacket(cv::Mat input) { diff --git a/mediapipe/calculators/tensor/tensors_to_landmarks_calculator.cc b/mediapipe/calculators/tensor/tensors_to_landmarks_calculator.cc index 5942f234d..77488443f 100644 --- a/mediapipe/calculators/tensor/tensors_to_landmarks_calculator.cc +++ b/mediapipe/calculators/tensor/tensors_to_landmarks_calculator.cc @@ -124,7 +124,7 @@ absl::Status TensorsToLandmarksCalculator::Open(CalculatorContext* cc) { kFlipVertically(cc).IsConnected())) { RET_CHECK(options_.has_input_image_height() && options_.has_input_image_width()) - << "Must provide input width/height for using flipping when outputing " + << "Must provide input width/height for using flipping when outputting " "landmarks in absolute coordinates."; } return absl::OkStatus(); diff --git a/mediapipe/calculators/tensorflow/pack_media_sequence_calculator.cc b/mediapipe/calculators/tensorflow/pack_media_sequence_calculator.cc index 4972b202d..95962c261 100644 --- a/mediapipe/calculators/tensorflow/pack_media_sequence_calculator.cc +++ b/mediapipe/calculators/tensorflow/pack_media_sequence_calculator.cc @@ -79,7 +79,7 @@ namespace mpms = mediapipe::mediasequence; // and label and label_id are optional but at least one of them should be set. // "IMAGE_${NAME}", "BBOX_${NAME}", and "KEYPOINTS_${NAME}" will also store // prefixed versions of each stream, which allows for multiple image streams to -// be included. However, the default names are suppored by more tools. +// be included. However, the default names are supported by more tools. // // Example config: // node { diff --git a/mediapipe/calculators/tensorflow/tensor_to_matrix_calculator.cc b/mediapipe/calculators/tensorflow/tensor_to_matrix_calculator.cc index dc3d97844..ed234b3fa 100644 --- a/mediapipe/calculators/tensorflow/tensor_to_matrix_calculator.cc +++ b/mediapipe/calculators/tensorflow/tensor_to_matrix_calculator.cc @@ -67,8 +67,8 @@ absl::Status FillTimeSeriesHeaderIfValid(const Packet& header_packet, // -- 1-D or 2-D Tensor // Output: // -- Matrix with the same values as the Tensor -// If input tensor is 1 dimensional, the ouput Matrix is of (1xn) shape. -// If input tensor is 2 dimensional (batched), the ouput Matrix is (mxn) shape. +// If input tensor is 1 dimensional, the output Matrix is of (1xn) shape. +// If input tensor is 2 dimensional (batched), the output Matrix is (mxn) shape. // // Example Config // node: { diff --git a/mediapipe/calculators/tensorflow/tensorflow_inference_calculator.cc b/mediapipe/calculators/tensorflow/tensorflow_inference_calculator.cc index 84c32fed6..39993ada0 100644 --- a/mediapipe/calculators/tensorflow/tensorflow_inference_calculator.cc +++ b/mediapipe/calculators/tensorflow/tensorflow_inference_calculator.cc @@ -111,8 +111,8 @@ class InferenceState { // input_side_packet. // // The input and output streams are TensorFlow tensors labeled by tags. The tags -// for the streams are matched to feeds and fetchs in a TensorFlow session using -// a named_signature.generic_signature in the ModelManifest. The +// for the streams are matched to feeds and fetches in a TensorFlow session +// using a named_signature.generic_signature in the ModelManifest. The // generic_signature is used as key-value pairs between the MediaPipe tag and // the TensorFlow tensor. The signature_name in the options proto determines // which named_signature is used. The keys in the generic_signature must be @@ -128,7 +128,7 @@ class InferenceState { // addition. Once batch_size inputs have been provided, the batch will be run // and the output tensors sent out on the output streams with timestamps // corresponding to the input stream packets. Setting the batch_size to 1 -// completely disables batching, but is indepdent of add_batch_dim_to_tensors. +// completely disables batching, but is independent of add_batch_dim_to_tensors. // // The TensorFlowInferenceCalculator also support feeding states recurrently for // RNNs and LSTMs. Simply set the recurrent_tag_pair options to define the diff --git a/mediapipe/calculators/tensorflow/tensorflow_inference_calculator.proto b/mediapipe/calculators/tensorflow/tensorflow_inference_calculator.proto index a243412c0..f09664592 100644 --- a/mediapipe/calculators/tensorflow/tensorflow_inference_calculator.proto +++ b/mediapipe/calculators/tensorflow/tensorflow_inference_calculator.proto @@ -42,7 +42,7 @@ message TensorFlowInferenceCalculatorOptions { // If the 0th dimension is the batch dimension, then the tensors are // concatenated on that dimension. If the 0th is a data dimension, then a 0th // dimension is added before concatenating. If added, the extra dimension is - // removed before outputing the tensor. Examples of each case: If you want + // removed before outputting the tensor. Examples of each case: If you want // to batch spectra of audio over time for an LSTM, a time-frequency // representation has a 0th dimension as the batch dimension. If you want to // batch frames of video that are [width, height, channels], the batch