This will fix the multiple typos in the new tasks internal files
PiperOrigin-RevId: 525788850
This commit is contained in:
parent
d4c7ad0411
commit
983932b6dd
|
@ -67,7 +67,7 @@ constexpr char kPortraitExpectedFaceLandmarksName[] =
|
||||||
"portrait_expected_face_landmarks.pbtxt";
|
"portrait_expected_face_landmarks.pbtxt";
|
||||||
constexpr char kPortraitExpectedBlendshapesName[] =
|
constexpr char kPortraitExpectedBlendshapesName[] =
|
||||||
"portrait_expected_blendshapes.pbtxt";
|
"portrait_expected_blendshapes.pbtxt";
|
||||||
constexpr char kPortaitExpectedFaceGeomertyName[] =
|
constexpr char kPortraitExpectedFaceGeometryName[] =
|
||||||
"portrait_expected_face_geometry.pbtxt";
|
"portrait_expected_face_geometry.pbtxt";
|
||||||
|
|
||||||
constexpr float kLandmarksDiffMargin = 0.03;
|
constexpr float kLandmarksDiffMargin = 0.03;
|
||||||
|
@ -100,7 +100,7 @@ struct FaceLandmarkerTestParams {
|
||||||
|
|
||||||
mediapipe::MatrixData MakePortraitExpectedFacialTransformationMatrix() {
|
mediapipe::MatrixData MakePortraitExpectedFacialTransformationMatrix() {
|
||||||
auto face_geometry = GetExpectedProto<face_geometry::proto::FaceGeometry>(
|
auto face_geometry = GetExpectedProto<face_geometry::proto::FaceGeometry>(
|
||||||
kPortaitExpectedFaceGeomertyName);
|
kPortraitExpectedFaceGeometryName);
|
||||||
return face_geometry.pose_transform_matrix();
|
return face_geometry.pose_transform_matrix();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -21,20 +21,20 @@ using ClassificationProto = ::mediapipe::Classification;
|
||||||
|
|
||||||
@implementation MPPCategory (Helpers)
|
@implementation MPPCategory (Helpers)
|
||||||
|
|
||||||
+ (MPPCategory *)categoryWithProto:(const ClassificationProto &)clasificationProto {
|
+ (MPPCategory *)categoryWithProto:(const ClassificationProto &)classificationProto {
|
||||||
NSString *categoryName;
|
NSString *categoryName;
|
||||||
NSString *displayName;
|
NSString *displayName;
|
||||||
|
|
||||||
if (clasificationProto.has_label()) {
|
if (classificationProto.has_label()) {
|
||||||
categoryName = [NSString stringWithCppString:clasificationProto.label()];
|
categoryName = [NSString stringWithCppString:classificationProto.label()];
|
||||||
}
|
}
|
||||||
|
|
||||||
if (clasificationProto.has_display_name()) {
|
if (classificationProto.has_display_name()) {
|
||||||
displayName = [NSString stringWithCppString:clasificationProto.display_name()];
|
displayName = [NSString stringWithCppString:classificationProto.display_name()];
|
||||||
}
|
}
|
||||||
|
|
||||||
return [[MPPCategory alloc] initWithIndex:clasificationProto.index()
|
return [[MPPCategory alloc] initWithIndex:classificationProto.index()
|
||||||
score:clasificationProto.score()
|
score:classificationProto.score()
|
||||||
categoryName:categoryName
|
categoryName:categoryName
|
||||||
displayName:displayName];
|
displayName:displayName];
|
||||||
}
|
}
|
||||||
|
|
|
@ -13,7 +13,7 @@ const audio = await FilesetResolver.forAudioTasks(
|
||||||
const audioClassifier = await AudioClassifier.createFromModelPath(audio,
|
const audioClassifier = await AudioClassifier.createFromModelPath(audio,
|
||||||
"https://storage.googleapis.com/mediapipe-tasks/audio_classifier/yamnet_audio_classifier_with_metadata.tflite"
|
"https://storage.googleapis.com/mediapipe-tasks/audio_classifier/yamnet_audio_classifier_with_metadata.tflite"
|
||||||
);
|
);
|
||||||
const classifications = audioClassifier.classifiy(audioData);
|
const classifications = audioClassifier.classify(audioData);
|
||||||
```
|
```
|
||||||
|
|
||||||
## Audio Embedding
|
## Audio Embedding
|
||||||
|
|
|
@ -28,7 +28,7 @@ const text = await FilesetResolver.forTextTasks(
|
||||||
const textClassifier = await TextClassifier.createFromModelPath(text,
|
const textClassifier = await TextClassifier.createFromModelPath(text,
|
||||||
"https://storage.googleapis.com/mediapipe-tasks/text_classifier/bert_text_classifier.tflite"
|
"https://storage.googleapis.com/mediapipe-tasks/text_classifier/bert_text_classifier.tflite"
|
||||||
);
|
);
|
||||||
const classifications = textClassifier.classifiy(textData);
|
const classifications = textClassifier.classify(textData);
|
||||||
```
|
```
|
||||||
|
|
||||||
For more information, refer to the [Text Classification](https://developers.google.com/mediapipe/solutions/text/text_classifier/web_js) documentation.
|
For more information, refer to the [Text Classification](https://developers.google.com/mediapipe/solutions/text/text_classifier/web_js) documentation.
|
||||||
|
|
|
@ -56,7 +56,7 @@ const DEFAULT_OUTPUT_CONFIDENCE_MASKS = true;
|
||||||
* asynchronous processing is needed, all data needs to be copied before the
|
* asynchronous processing is needed, all data needs to be copied before the
|
||||||
* callback returns.
|
* callback returns.
|
||||||
*/
|
*/
|
||||||
export type ImageSegmenterCallack = (result: ImageSegmenterResult) => void;
|
export type ImageSegmenterCallback = (result: ImageSegmenterResult) => void;
|
||||||
|
|
||||||
/** Performs image segmentation on images. */
|
/** Performs image segmentation on images. */
|
||||||
export class ImageSegmenter extends VisionTaskRunner {
|
export class ImageSegmenter extends VisionTaskRunner {
|
||||||
|
@ -208,7 +208,7 @@ export class ImageSegmenter extends VisionTaskRunner {
|
||||||
* lifetime of the returned data is only guaranteed for the duration of the
|
* lifetime of the returned data is only guaranteed for the duration of the
|
||||||
* callback.
|
* callback.
|
||||||
*/
|
*/
|
||||||
segment(image: ImageSource, callback: ImageSegmenterCallack): void;
|
segment(image: ImageSource, callback: ImageSegmenterCallback): void;
|
||||||
/**
|
/**
|
||||||
* Performs image segmentation on the provided single image and invokes the
|
* Performs image segmentation on the provided single image and invokes the
|
||||||
* callback with the response. The method returns synchronously once the
|
* callback with the response. The method returns synchronously once the
|
||||||
|
@ -224,12 +224,12 @@ export class ImageSegmenter extends VisionTaskRunner {
|
||||||
*/
|
*/
|
||||||
segment(
|
segment(
|
||||||
image: ImageSource, imageProcessingOptions: ImageProcessingOptions,
|
image: ImageSource, imageProcessingOptions: ImageProcessingOptions,
|
||||||
callback: ImageSegmenterCallack): void;
|
callback: ImageSegmenterCallback): void;
|
||||||
segment(
|
segment(
|
||||||
image: ImageSource,
|
image: ImageSource,
|
||||||
imageProcessingOptionsOrCallback: ImageProcessingOptions|
|
imageProcessingOptionsOrCallback: ImageProcessingOptions|
|
||||||
ImageSegmenterCallack,
|
ImageSegmenterCallback,
|
||||||
callback?: ImageSegmenterCallack): void {
|
callback?: ImageSegmenterCallback): void {
|
||||||
const imageProcessingOptions =
|
const imageProcessingOptions =
|
||||||
typeof imageProcessingOptionsOrCallback !== 'function' ?
|
typeof imageProcessingOptionsOrCallback !== 'function' ?
|
||||||
imageProcessingOptionsOrCallback :
|
imageProcessingOptionsOrCallback :
|
||||||
|
@ -258,7 +258,7 @@ export class ImageSegmenter extends VisionTaskRunner {
|
||||||
*/
|
*/
|
||||||
segmentForVideo(
|
segmentForVideo(
|
||||||
videoFrame: ImageSource, timestamp: number,
|
videoFrame: ImageSource, timestamp: number,
|
||||||
callback: ImageSegmenterCallack): void;
|
callback: ImageSegmenterCallback): void;
|
||||||
/**
|
/**
|
||||||
* Performs image segmentation on the provided video frame and invokes the
|
* Performs image segmentation on the provided video frame and invokes the
|
||||||
* callback with the response. The method returns synchronously once the
|
* callback with the response. The method returns synchronously once the
|
||||||
|
@ -275,12 +275,12 @@ export class ImageSegmenter extends VisionTaskRunner {
|
||||||
*/
|
*/
|
||||||
segmentForVideo(
|
segmentForVideo(
|
||||||
videoFrame: ImageSource, imageProcessingOptions: ImageProcessingOptions,
|
videoFrame: ImageSource, imageProcessingOptions: ImageProcessingOptions,
|
||||||
timestamp: number, callback: ImageSegmenterCallack): void;
|
timestamp: number, callback: ImageSegmenterCallback): void;
|
||||||
segmentForVideo(
|
segmentForVideo(
|
||||||
videoFrame: ImageSource,
|
videoFrame: ImageSource,
|
||||||
timestampOrImageProcessingOptions: number|ImageProcessingOptions,
|
timestampOrImageProcessingOptions: number|ImageProcessingOptions,
|
||||||
timestampOrCallback: number|ImageSegmenterCallack,
|
timestampOrCallback: number|ImageSegmenterCallback,
|
||||||
callback?: ImageSegmenterCallack): void {
|
callback?: ImageSegmenterCallback): void {
|
||||||
const imageProcessingOptions =
|
const imageProcessingOptions =
|
||||||
typeof timestampOrImageProcessingOptions !== 'number' ?
|
typeof timestampOrImageProcessingOptions !== 'number' ?
|
||||||
timestampOrImageProcessingOptions :
|
timestampOrImageProcessingOptions :
|
||||||
|
|
Loading…
Reference in New Issue
Block a user