Fixes multiple typos in the tasks internal files.
PiperOrigin-RevId: 568585517
This commit is contained in:
		
							parent
							
								
									e5e75eac5e
								
							
						
					
					
						commit
						199b42278b
					
				|  | @ -59,12 +59,12 @@ using absl::StatusCode; | |||
|     return NULL; | ||||
|   } | ||||
| 
 | ||||
|   void *allocedMemory = malloc(memSize); | ||||
|   if (!allocedMemory) { | ||||
|   void *allocatedMemory = malloc(memSize); | ||||
|   if (!allocatedMemory) { | ||||
|     exit(-1); | ||||
|   } | ||||
| 
 | ||||
|   return allocedMemory; | ||||
|   return allocatedMemory; | ||||
| } | ||||
| 
 | ||||
| + (BOOL)checkCppError:(const absl::Status &)status toError:(NSError *_Nullable *)error { | ||||
|  |  | |||
|  | @ -82,7 +82,7 @@ static NSString *const kExpectedErrorDomain = @"com.google.mediapipe.tasks"; | |||
|   AssertEqualErrors(error, expectedError); | ||||
| } | ||||
| 
 | ||||
| - (void)testInitWithImageSuceeds { | ||||
| - (void)testInitWithImageSucceeds { | ||||
|   MPPImage *mppImage = [[MPPImage alloc] initWithUIImage:self.image error:nil]; | ||||
|   [self assertMPPImage:mppImage | ||||
|          hasSourceType:MPPImageSourceTypeImage | ||||
|  |  | |||
|  | @ -739,7 +739,7 @@ public final class ImageSegmenter extends BaseVisionTaskApi { | |||
|   @AutoValue | ||||
|   public abstract static class SegmentationOptions { | ||||
| 
 | ||||
|     /** Builder fo {@link SegmentationOptions} */ | ||||
|     /** Builder for {@link SegmentationOptions} */ | ||||
|     @AutoValue.Builder | ||||
|     public abstract static class Builder { | ||||
| 
 | ||||
|  |  | |||
|  | @ -289,7 +289,7 @@ public class GestureRecognizerTest { | |||
|     } | ||||
| 
 | ||||
|     @Test | ||||
|     public void recognize_successWithPreferAlowListThanDenyList() throws Exception { | ||||
|     public void recognize_successWithPreferAllowListThanDenyList() throws Exception { | ||||
|       GestureRecognizerOptions options = | ||||
|           GestureRecognizerOptions.builder() | ||||
|               .setBaseOptions( | ||||
|  |  | |||
|  | @ -140,7 +140,7 @@ export class AudioClassifier extends AudioTaskRunner<AudioClassifierResult[]> { | |||
|    * @param sampleRate The sample rate in Hz of the provided audio data. If not | ||||
|    *     set, defaults to the sample rate set via `setDefaultSampleRate()` or | ||||
|    *     `48000` if no custom default was set. | ||||
|    * @return The classification result of the audio datas | ||||
|    * @return The classification result of the audio data | ||||
|    */ | ||||
|   classify(audioData: Float32Array, sampleRate?: number): | ||||
|       AudioClassifierResult[] { | ||||
|  |  | |||
|  | @ -342,7 +342,7 @@ export class GestureRecognizer extends VisionTaskRunner { | |||
|    * Converts raw data into a landmark, and adds it to our worldLandmarks | ||||
|    * list. | ||||
|    */ | ||||
|   private adddJsWorldLandmarks(data: Uint8Array[]): void { | ||||
|   private addJsWorldLandmarks(data: Uint8Array[]): void { | ||||
|     for (const binaryProto of data) { | ||||
|       const handWorldLandmarksProto = | ||||
|           LandmarkList.deserializeBinary(binaryProto); | ||||
|  | @ -396,7 +396,7 @@ export class GestureRecognizer extends VisionTaskRunner { | |||
| 
 | ||||
|     this.graphRunner.attachProtoVectorListener( | ||||
|         WORLD_LANDMARKS_STREAM, (binaryProto, timestamp) => { | ||||
|           this.adddJsWorldLandmarks(binaryProto); | ||||
|           this.addJsWorldLandmarks(binaryProto); | ||||
|           this.setLatestOutputTimestamp(timestamp); | ||||
|         }); | ||||
|     this.graphRunner.attachEmptyPacketListener( | ||||
|  |  | |||
|  | @ -277,7 +277,7 @@ export class HandLandmarker extends VisionTaskRunner { | |||
|    * Converts raw data into a world landmark, and adds it to our worldLandmarks | ||||
|    * list. | ||||
|    */ | ||||
|   private adddJsWorldLandmarks(data: Uint8Array[]): void { | ||||
|   private addJsWorldLandmarks(data: Uint8Array[]): void { | ||||
|     for (const binaryProto of data) { | ||||
|       const handWorldLandmarksProto = | ||||
|           LandmarkList.deserializeBinary(binaryProto); | ||||
|  | @ -322,7 +322,7 @@ export class HandLandmarker extends VisionTaskRunner { | |||
| 
 | ||||
|     this.graphRunner.attachProtoVectorListener( | ||||
|         WORLD_LANDMARKS_STREAM, (binaryProto, timestamp) => { | ||||
|           this.adddJsWorldLandmarks(binaryProto); | ||||
|           this.addJsWorldLandmarks(binaryProto); | ||||
|           this.setLatestOutputTimestamp(timestamp); | ||||
|         }); | ||||
|     this.graphRunner.attachEmptyPacketListener( | ||||
|  |  | |||
|  | @ -403,7 +403,7 @@ export class PoseLandmarker extends VisionTaskRunner { | |||
|    * Converts raw data into a world landmark, and adds it to our | ||||
|    * worldLandmarks list. | ||||
|    */ | ||||
|   private adddJsWorldLandmarks(data: Uint8Array[]): void { | ||||
|   private addJsWorldLandmarks(data: Uint8Array[]): void { | ||||
|     this.worldLandmarks = []; | ||||
|     for (const binaryProto of data) { | ||||
|       const poseWorldLandmarksProto = | ||||
|  | @ -452,7 +452,7 @@ export class PoseLandmarker extends VisionTaskRunner { | |||
| 
 | ||||
|     this.graphRunner.attachProtoVectorListener( | ||||
|         WORLD_LANDMARKS_STREAM, (binaryProto, timestamp) => { | ||||
|           this.adddJsWorldLandmarks(binaryProto); | ||||
|           this.addJsWorldLandmarks(binaryProto); | ||||
|           this.setLatestOutputTimestamp(timestamp); | ||||
|         }); | ||||
|     this.graphRunner.attachEmptyPacketListener( | ||||
|  |  | |||
		Loading…
	
		Reference in New Issue
	
	Block a user