This will fix the multiple typos in the new tasks internal files

PiperOrigin-RevId: 525788850
This commit is contained in:
MediaPipe Team 2023-04-20 10:40:58 -07:00 committed by Copybara-Service
parent d4c7ad0411
commit 983932b6dd
5 changed files with 20 additions and 20 deletions

View File

@ -67,7 +67,7 @@ constexpr char kPortraitExpectedFaceLandmarksName[] =
"portrait_expected_face_landmarks.pbtxt";
constexpr char kPortraitExpectedBlendshapesName[] =
"portrait_expected_blendshapes.pbtxt";
constexpr char kPortaitExpectedFaceGeomertyName[] =
constexpr char kPortraitExpectedFaceGeometryName[] =
"portrait_expected_face_geometry.pbtxt";
constexpr float kLandmarksDiffMargin = 0.03;
@ -100,7 +100,7 @@ struct FaceLandmarkerTestParams {
mediapipe::MatrixData MakePortraitExpectedFacialTransformationMatrix() {
auto face_geometry = GetExpectedProto<face_geometry::proto::FaceGeometry>(
kPortaitExpectedFaceGeomertyName);
kPortraitExpectedFaceGeometryName);
return face_geometry.pose_transform_matrix();
}

View File

@ -21,20 +21,20 @@ using ClassificationProto = ::mediapipe::Classification;
@implementation MPPCategory (Helpers)
+ (MPPCategory *)categoryWithProto:(const ClassificationProto &)clasificationProto {
+ (MPPCategory *)categoryWithProto:(const ClassificationProto &)classificationProto {
NSString *categoryName;
NSString *displayName;
if (clasificationProto.has_label()) {
categoryName = [NSString stringWithCppString:clasificationProto.label()];
if (classificationProto.has_label()) {
categoryName = [NSString stringWithCppString:classificationProto.label()];
}
if (clasificationProto.has_display_name()) {
displayName = [NSString stringWithCppString:clasificationProto.display_name()];
if (classificationProto.has_display_name()) {
displayName = [NSString stringWithCppString:classificationProto.display_name()];
}
return [[MPPCategory alloc] initWithIndex:clasificationProto.index()
score:clasificationProto.score()
return [[MPPCategory alloc] initWithIndex:classificationProto.index()
score:classificationProto.score()
categoryName:categoryName
displayName:displayName];
}

View File

@ -13,7 +13,7 @@ const audio = await FilesetResolver.forAudioTasks(
const audioClassifier = await AudioClassifier.createFromModelPath(audio,
"https://storage.googleapis.com/mediapipe-tasks/audio_classifier/yamnet_audio_classifier_with_metadata.tflite"
);
const classifications = audioClassifier.classifiy(audioData);
const classifications = audioClassifier.classify(audioData);
```
## Audio Embedding

View File

@ -28,7 +28,7 @@ const text = await FilesetResolver.forTextTasks(
const textClassifier = await TextClassifier.createFromModelPath(text,
"https://storage.googleapis.com/mediapipe-tasks/text_classifier/bert_text_classifier.tflite"
);
const classifications = textClassifier.classifiy(textData);
const classifications = textClassifier.classify(textData);
```
For more information, refer to the [Text Classification](https://developers.google.com/mediapipe/solutions/text/text_classifier/web_js) documentation.

View File

@ -56,7 +56,7 @@ const DEFAULT_OUTPUT_CONFIDENCE_MASKS = true;
* asynchronous processing is needed, all data needs to be copied before the
* callback returns.
*/
export type ImageSegmenterCallack = (result: ImageSegmenterResult) => void;
export type ImageSegmenterCallback = (result: ImageSegmenterResult) => void;
/** Performs image segmentation on images. */
export class ImageSegmenter extends VisionTaskRunner {
@ -208,7 +208,7 @@ export class ImageSegmenter extends VisionTaskRunner {
* lifetime of the returned data is only guaranteed for the duration of the
* callback.
*/
segment(image: ImageSource, callback: ImageSegmenterCallack): void;
segment(image: ImageSource, callback: ImageSegmenterCallback): void;
/**
* Performs image segmentation on the provided single image and invokes the
* callback with the response. The method returns synchronously once the
@ -224,12 +224,12 @@ export class ImageSegmenter extends VisionTaskRunner {
*/
segment(
image: ImageSource, imageProcessingOptions: ImageProcessingOptions,
callback: ImageSegmenterCallack): void;
callback: ImageSegmenterCallback): void;
segment(
image: ImageSource,
imageProcessingOptionsOrCallback: ImageProcessingOptions|
ImageSegmenterCallack,
callback?: ImageSegmenterCallack): void {
ImageSegmenterCallback,
callback?: ImageSegmenterCallback): void {
const imageProcessingOptions =
typeof imageProcessingOptionsOrCallback !== 'function' ?
imageProcessingOptionsOrCallback :
@ -258,7 +258,7 @@ export class ImageSegmenter extends VisionTaskRunner {
*/
segmentForVideo(
videoFrame: ImageSource, timestamp: number,
callback: ImageSegmenterCallack): void;
callback: ImageSegmenterCallback): void;
/**
* Performs image segmentation on the provided video frame and invokes the
* callback with the response. The method returns synchronously once the
@ -275,12 +275,12 @@ export class ImageSegmenter extends VisionTaskRunner {
*/
segmentForVideo(
videoFrame: ImageSource, imageProcessingOptions: ImageProcessingOptions,
timestamp: number, callback: ImageSegmenterCallack): void;
timestamp: number, callback: ImageSegmenterCallback): void;
segmentForVideo(
videoFrame: ImageSource,
timestampOrImageProcessingOptions: number|ImageProcessingOptions,
timestampOrCallback: number|ImageSegmenterCallack,
callback?: ImageSegmenterCallack): void {
timestampOrCallback: number|ImageSegmenterCallback,
callback?: ImageSegmenterCallback): void {
const imageProcessingOptions =
typeof timestampOrImageProcessingOptions !== 'number' ?
timestampOrImageProcessingOptions :