Internal change

PiperOrigin-RevId: 516535124
This commit is contained in:
MediaPipe Team 2023-03-14 08:42:31 -07:00 committed by Copybara-Service
parent bc641a22a8
commit 2659ea0392
10 changed files with 32 additions and 32 deletions

View File

@ -30,7 +30,7 @@ constexpr absl::string_view kMediaPipeTasksPayload = "MediaPipeTasksStatus";
//
// At runtime, such codes are meant to be attached (where applicable) to a
// `absl::Status` in a key-value manner with `kMediaPipeTasksPayload` as key and
// stringifed error code as value (aka payload). This logic is encapsulated in
// stringified error code as value (aka payload). This logic is encapsulated in
// the `CreateStatusWithPayload` helper below for convenience.
//
// The returned status includes:

View File

@ -64,7 +64,7 @@ class ModelMetadataPopulator {
// Loads associated files into the TFLite FlatBuffer model. The input is a map
// of {filename, file contents}.
//
// Warning: this method removes any previoulsy present associated files.
// Warning: this method removes any previously present associated files.
// Calling this method multiple time removes any associated files from
// previous calls, so this method should usually be called only once.
void LoadAssociatedFiles(

View File

@ -31,8 +31,8 @@ PYBIND11_MODULE(_pywrap_metadata_version, m) {
// Using pybind11 type conversions to convert between Python and native
// C++ types. There are other options to provide access to native Python types
// in C++ and vice versa. See the pybind 11 instrcution [1] for more details.
// Type converstions is recommended by pybind11, though the main downside
// in C++ and vice versa. See the pybind 11 instruction [1] for more details.
// Type conversions is recommended by pybind11, though the main downside
// is that a copy of the data must be made on every Python to C++ transition:
// this is needed since the C++ and Python versions of the same type generally
// wont have the same memory layout.

View File

@ -79,7 +79,7 @@ TEST(MetadataVersionTest,
auto metadata = metadata_builder.Finish();
FinishModelMetadataBuffer(builder, metadata);
// Gets the mimimum metadata parser version.
// Gets the minimum metadata parser version.
std::string min_version;
EXPECT_EQ(GetMinimumMetadataParserVersion(builder.GetBufferPointer(),
builder.GetSize(), &min_version),
@ -100,7 +100,7 @@ TEST(MetadataVersionTest,
auto metadata = metadata_builder.Finish();
builder.Finish(metadata);
// Gets the mimimum metadata parser version and triggers error.
// Gets the minimum metadata parser version and triggers error.
std::string min_version;
EXPECT_EQ(GetMinimumMetadataParserVersion(builder.GetBufferPointer(),
builder.GetSize(), &min_version),
@ -121,7 +121,7 @@ TEST(MetadataVersionTest,
metadata_builder.add_associated_files(associated_files);
FinishModelMetadataBuffer(builder, metadata_builder.Finish());
// Gets the mimimum metadata parser version.
// Gets the minimum metadata parser version.
std::string min_version;
EXPECT_EQ(GetMinimumMetadataParserVersion(builder.GetBufferPointer(),
builder.GetSize(), &min_version),
@ -147,7 +147,7 @@ TEST(MetadataVersionTest,
metadata_builder.add_subgraph_metadata(subgraphs);
FinishModelMetadataBuffer(builder, metadata_builder.Finish());
// Gets the mimimum metadata parser version.
// Gets the minimum metadata parser version.
std::string min_version;
EXPECT_EQ(GetMinimumMetadataParserVersion(builder.GetBufferPointer(),
builder.GetSize(), &min_version),
@ -172,7 +172,7 @@ TEST(MetadataVersionTest,
std::vector<Offset<TensorMetadata>>{tensor_builder.Finish()});
CreateModelWithMetadata(tensors, builder);
// Gets the mimimum metadata parser version.
// Gets the minimum metadata parser version.
std::string min_version;
EXPECT_EQ(GetMinimumMetadataParserVersion(builder.GetBufferPointer(),
builder.GetSize(), &min_version),
@ -203,7 +203,7 @@ TEST(MetadataVersionTest,
metadata_builder.add_subgraph_metadata(subgraphs);
FinishModelMetadataBuffer(builder, metadata_builder.Finish());
// Gets the mimimum metadata parser version.
// Gets the minimum metadata parser version.
std::string min_version;
EXPECT_EQ(GetMinimumMetadataParserVersion(builder.GetBufferPointer(),
builder.GetSize(), &min_version),
@ -234,7 +234,7 @@ TEST(MetadataVersionTest,
metadata_builder.add_subgraph_metadata(subgraphs);
FinishModelMetadataBuffer(builder, metadata_builder.Finish());
// Gets the mimimum metadata parser version.
// Gets the minimum metadata parser version.
std::string min_version;
EXPECT_EQ(GetMinimumMetadataParserVersion(builder.GetBufferPointer(),
builder.GetSize(), &min_version),
@ -294,7 +294,7 @@ TEST(MetadataVersionTest,
std::vector<Offset<TensorMetadata>>{tensor_builder.Finish()});
CreateModelWithMetadata(tensors, builder);
// Gets the mimimum metadata parser version.
// Gets the minimum metadata parser version.
std::string min_version;
EXPECT_EQ(GetMinimumMetadataParserVersion(builder.GetBufferPointer(),
builder.GetSize(), &min_version),
@ -323,7 +323,7 @@ TEST(MetadataVersionTest,
std::vector<Offset<TensorMetadata>>{tensor_builder.Finish()});
CreateModelWithMetadata(tensors, builder);
// Gets the mimimum metadata parser version.
// Gets the minimum metadata parser version.
std::string min_version;
EXPECT_EQ(GetMinimumMetadataParserVersion(builder.GetBufferPointer(),
builder.GetSize(), &min_version),
@ -348,7 +348,7 @@ TEST(MetadataVersionTest,
metadata_builder.add_subgraph_metadata(subgraphs);
FinishModelMetadataBuffer(builder, metadata_builder.Finish());
// Gets the mimimum metadata parser version.
// Gets the minimum metadata parser version.
std::string min_version;
EXPECT_EQ(GetMinimumMetadataParserVersion(builder.GetBufferPointer(),
builder.GetSize(), &min_version),
@ -373,7 +373,7 @@ TEST(MetadataVersionTest,
metadata_builder.add_subgraph_metadata(subgraphs);
FinishModelMetadataBuffer(builder, metadata_builder.Finish());
// Gets the mimimum metadata parser version.
// Gets the minimum metadata parser version.
std::string min_version;
EXPECT_EQ(GetMinimumMetadataParserVersion(builder.GetBufferPointer(),
builder.GetSize(), &min_version),
@ -404,7 +404,7 @@ TEST(MetadataVersionTest,
metadata_builder.add_subgraph_metadata(subgraphs);
FinishModelMetadataBuffer(builder, metadata_builder.Finish());
// Gets the mimimum metadata parser version.
// Gets the minimum metadata parser version.
std::string min_version;
EXPECT_EQ(GetMinimumMetadataParserVersion(builder.GetBufferPointer(),
builder.GetSize(), &min_version),
@ -431,7 +431,7 @@ TEST(MetadataVersionTest,
std::vector<Offset<TensorMetadata>>{tensor_builder.Finish()});
CreateModelWithMetadata(tensors, builder);
// Gets the mimimum metadata parser version.
// Gets the minimum metadata parser version.
std::string min_version;
EXPECT_EQ(GetMinimumMetadataParserVersion(builder.GetBufferPointer(),
builder.GetSize(), &min_version),
@ -453,7 +453,7 @@ TEST(MetadataVersionTest,
metadata_builder.add_associated_files(associated_files);
FinishModelMetadataBuffer(builder, metadata_builder.Finish());
// Gets the mimimum metadata parser version.
// Gets the minimum metadata parser version.
std::string min_version;
EXPECT_EQ(GetMinimumMetadataParserVersion(builder.GetBufferPointer(),
builder.GetSize(), &min_version),
@ -476,7 +476,7 @@ TEST(MetadataVersionTest,
metadata_builder.add_associated_files(associated_files);
FinishModelMetadataBuffer(builder, metadata_builder.Finish());
// Gets the mimimum metadata parser version.
// Gets the minimum metadata parser version.
std::string min_version;
EXPECT_EQ(GetMinimumMetadataParserVersion(builder.GetBufferPointer(),
builder.GetSize(), &min_version),
@ -504,7 +504,7 @@ TEST(MetadataVersionTest, GetMinimumMetadataParserVersionForOptions) {
metadata_builder.add_subgraph_metadata(subgraphs);
FinishModelMetadataBuffer(builder, metadata_builder.Finish());
// Gets the mimimum metadata parser version.
// Gets the minimum metadata parser version.
std::string min_version;
EXPECT_EQ(GetMinimumMetadataParserVersion(builder.GetBufferPointer(),
builder.GetSize(), &min_version),

View File

@ -34,7 +34,7 @@ constexpr char kTestSPModelPath[] =
std::unique_ptr<SentencePieceTokenizer> CreateSentencePieceTokenizer(
absl::string_view model_path) {
// We are using `LoadBinaryContent()` instead of loading the model direclty
// We are using `LoadBinaryContent()` instead of loading the model directly
// via `SentencePieceTokenizer` so that the file can be located on Windows
std::string buffer = LoadBinaryContent(kTestSPModelPath);
return absl::make_unique<SentencePieceTokenizer>(buffer.data(),

View File

@ -127,7 +127,7 @@ absl::Status SetSubTaskBaseOptions(const ModelAssetBundleResources& resources,
->mutable_acceleration()
->mutable_xnnpack();
LOG(WARNING) << "Hand Gesture Recognizer contains CPU only ops. Sets "
<< "HandGestureRecognizerGraph acceleartion to Xnnpack.";
<< "HandGestureRecognizerGraph acceleration to Xnnpack.";
}
hand_gesture_recognizer_graph_options->mutable_base_options()
->set_use_stream_mode(options->base_options().use_stream_mode());

View File

@ -101,7 +101,7 @@ class HandLandmarker : tasks::vision::core::BaseVisionTaskApi {
// three running modes:
// 1) Image mode for detecting hand landmarks on single image inputs. Users
// provide mediapipe::Image to the `Detect` method, and will receive the
// deteced hand landmarks results as the return value.
// detected hand landmarks results as the return value.
// 2) Video mode for detecting hand landmarks on the decoded frames of a
// video. Users call `DetectForVideo` method, and will receive the detected
// hand landmarks results as the return value.

View File

@ -409,7 +409,7 @@ REGISTER_MEDIAPIPE_GRAPH(
// - Accepts CPU input image and a vector of hand rect RoIs to detect the
// multiple hands landmarks enclosed by the RoIs. Output vectors of
// hand landmarks related results, where each element in the vectors
// corrresponds to the result of the same hand.
// corresponds to the result of the same hand.
//
// Inputs:
// IMAGE - Image

View File

@ -52,7 +52,7 @@ constexpr char kMobileNetV3Embedder[] =
constexpr double kSimilarityTolerancy = 1e-6;
// Utility function to check the sizes, head_index and head_names of a result
// procuded by kMobileNetV3Embedder.
// procduced by kMobileNetV3Embedder.
void CheckMobileNetV3Result(const ImageEmbedderResult& result, bool quantized) {
EXPECT_EQ(result.embeddings.size(), 1);
EXPECT_EQ(result.embeddings[0].head_index, 0);

View File

@ -233,7 +233,7 @@ table ImageProperties {
//
// <Codegen usage>:
// Input image tensors: NA.
// Output image tensors: parses the values into a data stucture that represents
// Output image tensors: parses the values into a data structure that represents
// bounding boxes. For example, in the generated wrapper for Android, it returns
// the output as android.graphics.Rect objects.
enum BoundingBoxType : byte {
@ -389,7 +389,7 @@ table NormalizationOptions{
// mean and std are normalization parameters. Tensor values are normalized
// on a per-channel basis, by the formula
// (x - mean) / std.
// If there is only one value in mean or std, we'll propogate the value to
// If there is only one value in mean or std, we'll propagate the value to
// all channels.
//
// Quantized models share the same normalization parameters as their
@ -526,7 +526,7 @@ table Stats {
// Max and min are not currently used in tflite.support codegen. They mainly
// serve as references for users to better understand the model. They can also
// be used to validate model pre/post processing results.
// If there is only one value in max or min, we'll propogate the value to
// If there is only one value in max or min, we'll propagate the value to
// all channels.
// Per-channel maximum value of the tensor.
@ -542,7 +542,7 @@ table Stats {
// has four outputs: classes, scores, bounding boxes, and number of detections.
// If the four outputs are bundled together using TensorGroup (for example,
// named as "detection result"), the codegen tool will generate the class,
// `DetectionResult`, which contains the class, score, and bouding box. And the
// `DetectionResult`, which contains the class, score, and bounding box. And the
// outputs of the model will be converted to a list of `DetectionResults` and
// the number of detection. Note that the number of detection is a single
// number, therefore is inappropriate for the list of `DetectionResult`.
@ -624,7 +624,7 @@ table SubGraphMetadata {
// A description explains details about what the subgraph does.
description:string;
// Metadata of all input tensors used in this subgraph. It matches extactly
// Metadata of all input tensors used in this subgraph. It matches exactly
// the input tensors specified by `SubGraph.inputs` in the TFLite
// schema.fbs file[2]. The number of `TensorMetadata` in the array should
// equal to the number of indices in `SubGraph.inputs`.
@ -634,7 +634,7 @@ table SubGraphMetadata {
// Determines how to process the inputs.
input_tensor_metadata:[TensorMetadata];
// Metadata of all output tensors used in this subgraph. It matches extactly
// Metadata of all output tensors used in this subgraph. It matches exactly
// the output tensors specified by `SubGraph.outputs` in the TFLite
// schema.fbs file[2]. The number of `TensorMetadata` in the array should
// equal to the number of indices in `SubGraph.outputs`.
@ -724,7 +724,7 @@ table ModelMetadata {
// number among the versions of all the fields populated and the smallest
// compatible version indicated by the file identifier.
//
// This field is automaticaly populated by the MetadataPopulator when
// This field is automatically populated by the MetadataPopulator when
// the metadata is populated into a TFLite model.
min_parser_version:string;
}