diff --git a/mediapipe/tasks/web/audio/audio_classifier/audio_classifier.ts b/mediapipe/tasks/web/audio/audio_classifier/audio_classifier.ts index 4e14ae437..907461d01 100644 --- a/mediapipe/tasks/web/audio/audio_classifier/audio_classifier.ts +++ b/mediapipe/tasks/web/audio/audio_classifier/audio_classifier.ts @@ -60,9 +60,8 @@ export class AudioClassifier extends AudioTaskRunner { static createFromOptions( wasmFileset: WasmFileset, audioClassifierOptions: AudioClassifierOptions): Promise { - return AudioTaskRunner.createInstance( - AudioClassifier, /* initializeCanvas= */ false, wasmFileset, - audioClassifierOptions); + return AudioTaskRunner.createAudioInstance( + AudioClassifier, wasmFileset, audioClassifierOptions); } /** @@ -75,9 +74,8 @@ export class AudioClassifier extends AudioTaskRunner { static createFromModelBuffer( wasmFileset: WasmFileset, modelAssetBuffer: Uint8Array): Promise { - return AudioTaskRunner.createInstance( - AudioClassifier, /* initializeCanvas= */ false, wasmFileset, - {baseOptions: {modelAssetBuffer}}); + return AudioTaskRunner.createAudioInstance( + AudioClassifier, wasmFileset, {baseOptions: {modelAssetBuffer}}); } /** @@ -91,7 +89,7 @@ export class AudioClassifier extends AudioTaskRunner { wasmFileset: WasmFileset, modelAssetPath: string): Promise { return AudioTaskRunner.createInstance( - AudioClassifier, /* initializeCanvas= */ false, wasmFileset, + AudioClassifier, /* canvas= */ null, wasmFileset, {baseOptions: {modelAssetPath}}); } diff --git a/mediapipe/tasks/web/audio/audio_embedder/audio_embedder.ts b/mediapipe/tasks/web/audio/audio_embedder/audio_embedder.ts index e6d659b9b..35531382a 100644 --- a/mediapipe/tasks/web/audio/audio_embedder/audio_embedder.ts +++ b/mediapipe/tasks/web/audio/audio_embedder/audio_embedder.ts @@ -60,9 +60,8 @@ export class AudioEmbedder extends AudioTaskRunner { static createFromOptions( wasmFileset: WasmFileset, audioEmbedderOptions: AudioEmbedderOptions): Promise { - return AudioTaskRunner.createInstance( - AudioEmbedder, /* initializeCanvas= */ false, wasmFileset, - audioEmbedderOptions); + return AudioTaskRunner.createAudioInstance( + AudioEmbedder, wasmFileset, audioEmbedderOptions); } /** @@ -75,9 +74,8 @@ export class AudioEmbedder extends AudioTaskRunner { static createFromModelBuffer( wasmFileset: WasmFileset, modelAssetBuffer: Uint8Array): Promise { - return AudioTaskRunner.createInstance( - AudioEmbedder, /* initializeCanvas= */ false, wasmFileset, - {baseOptions: {modelAssetBuffer}}); + return AudioTaskRunner.createAudioInstance( + AudioEmbedder, wasmFileset, {baseOptions: {modelAssetBuffer}}); } /** @@ -90,9 +88,8 @@ export class AudioEmbedder extends AudioTaskRunner { static createFromModelPath( wasmFileset: WasmFileset, modelAssetPath: string): Promise { - return AudioTaskRunner.createInstance( - AudioEmbedder, /* initializeCanvas= */ false, wasmFileset, - {baseOptions: {modelAssetPath}}); + return AudioTaskRunner.createAudioInstance( + AudioEmbedder, wasmFileset, {baseOptions: {modelAssetPath}}); } /** @hideconstructor */ diff --git a/mediapipe/tasks/web/audio/core/BUILD b/mediapipe/tasks/web/audio/core/BUILD index cea689838..9b4219313 100644 --- a/mediapipe/tasks/web/audio/core/BUILD +++ b/mediapipe/tasks/web/audio/core/BUILD @@ -7,5 +7,9 @@ package(default_visibility = ["//mediapipe/tasks:internal"]) mediapipe_ts_library( name = "audio_task_runner", srcs = ["audio_task_runner.ts"], - deps = ["//mediapipe/tasks/web/core:task_runner"], + deps = [ + "//mediapipe/tasks/web/core", + "//mediapipe/tasks/web/core:task_runner", + "//mediapipe/web/graph_runner:graph_runner_ts", + ], ) diff --git a/mediapipe/tasks/web/audio/core/audio_task_runner.ts b/mediapipe/tasks/web/audio/core/audio_task_runner.ts index 2c327f1ab..7462a31fa 100644 --- a/mediapipe/tasks/web/audio/core/audio_task_runner.ts +++ b/mediapipe/tasks/web/audio/core/audio_task_runner.ts @@ -15,11 +15,22 @@ */ import {TaskRunner} from '../../../../tasks/web/core/task_runner'; +import {TaskRunnerOptions} from '../../../../tasks/web/core/task_runner_options'; +import {WasmFileset} from '../../../../tasks/web/core/wasm_fileset'; +import {WasmMediaPipeConstructor} from '../../../../web/graph_runner/graph_runner'; + /** Base class for all MediaPipe Audio Tasks. */ export abstract class AudioTaskRunner extends TaskRunner { private defaultSampleRate = 48000; + protected static async createAudioInstance>( + type: WasmMediaPipeConstructor, fileset: WasmFileset, + options: TaskRunnerOptions): Promise { + return TaskRunner.createInstance( + type, /* canvas= */ null, fileset, options); + } + /** * Sets the sample rate for API calls that omit an explicit sample rate. * `48000` is used as a default if this method is not called. diff --git a/mediapipe/tasks/web/core/task_runner.ts b/mediapipe/tasks/web/core/task_runner.ts index 68208c970..8f69423fb 100644 --- a/mediapipe/tasks/web/core/task_runner.ts +++ b/mediapipe/tasks/web/core/task_runner.ts @@ -30,6 +30,7 @@ const NO_ASSETS = undefined; // tslint:disable-next-line:enforce-name-casing const CachedGraphRunnerType = SupportModelResourcesGraphService(GraphRunner); + /** * An implementation of the GraphRunner that exposes the resource graph * service. @@ -42,7 +43,8 @@ export class CachedGraphRunner extends CachedGraphRunnerType {} * @return A fully instantiated instance of `T`. */ export async function createTaskRunner( - type: WasmMediaPipeConstructor, initializeCanvas: boolean, + type: WasmMediaPipeConstructor, + canvas: HTMLCanvasElement|OffscreenCanvas|null|undefined, fileset: WasmFileset, options: TaskRunnerOptions): Promise { const fileLocator: FileLocator = { locateFile() { @@ -51,12 +53,6 @@ export async function createTaskRunner( } }; - // Initialize a canvas if requested. If OffscreenCanvas is available, we - // let the graph runner initialize it by passing `undefined`. - const canvas = initializeCanvas ? (typeof OffscreenCanvas === 'undefined' ? - document.createElement('canvas') : - undefined) : - null; const instance = await createMediaPipeLib( type, fileset.wasmLoaderPath, NO_ASSETS, canvas, fileLocator); await instance.setOptions(options); @@ -75,9 +71,10 @@ export abstract class TaskRunner { * @return A fully instantiated instance of `T`. */ protected static async createInstance( - type: WasmMediaPipeConstructor, initializeCanvas: boolean, + type: WasmMediaPipeConstructor, + canvas: HTMLCanvasElement|OffscreenCanvas|null|undefined, fileset: WasmFileset, options: TaskRunnerOptions): Promise { - return createTaskRunner(type, initializeCanvas, fileset, options); + return createTaskRunner(type, canvas, fileset, options); } /** @hideconstructor protected */ diff --git a/mediapipe/tasks/web/text/text_classifier/text_classifier.ts b/mediapipe/tasks/web/text/text_classifier/text_classifier.ts index 2495bf5a9..e9a2940d0 100644 --- a/mediapipe/tasks/web/text/text_classifier/text_classifier.ts +++ b/mediapipe/tasks/web/text/text_classifier/text_classifier.ts @@ -58,7 +58,7 @@ export class TextClassifier extends TaskRunner { wasmFileset: WasmFileset, textClassifierOptions: TextClassifierOptions): Promise { return TaskRunner.createInstance( - TextClassifier, /* initializeCanvas= */ false, wasmFileset, + TextClassifier, /* canvas= */ null, wasmFileset, textClassifierOptions); } @@ -73,7 +73,7 @@ export class TextClassifier extends TaskRunner { wasmFileset: WasmFileset, modelAssetBuffer: Uint8Array): Promise { return TaskRunner.createInstance( - TextClassifier, /* initializeCanvas= */ false, wasmFileset, + TextClassifier, /* canvas= */ null, wasmFileset, {baseOptions: {modelAssetBuffer}}); } @@ -88,7 +88,7 @@ export class TextClassifier extends TaskRunner { wasmFileset: WasmFileset, modelAssetPath: string): Promise { return TaskRunner.createInstance( - TextClassifier, /* initializeCanvas= */ false, wasmFileset, + TextClassifier, /* canvas= */ null, wasmFileset, {baseOptions: {modelAssetPath}}); } diff --git a/mediapipe/tasks/web/text/text_embedder/text_embedder.ts b/mediapipe/tasks/web/text/text_embedder/text_embedder.ts index 3b7f4f7e4..66aca4a67 100644 --- a/mediapipe/tasks/web/text/text_embedder/text_embedder.ts +++ b/mediapipe/tasks/web/text/text_embedder/text_embedder.ts @@ -62,8 +62,7 @@ export class TextEmbedder extends TaskRunner { wasmFileset: WasmFileset, textEmbedderOptions: TextEmbedderOptions): Promise { return TaskRunner.createInstance( - TextEmbedder, /* initializeCanvas= */ false, wasmFileset, - textEmbedderOptions); + TextEmbedder, /* canvas= */ null, wasmFileset, textEmbedderOptions); } /** @@ -77,7 +76,7 @@ export class TextEmbedder extends TaskRunner { wasmFileset: WasmFileset, modelAssetBuffer: Uint8Array): Promise { return TaskRunner.createInstance( - TextEmbedder, /* initializeCanvas= */ false, wasmFileset, + TextEmbedder, /* canvas= */ null, wasmFileset, {baseOptions: {modelAssetBuffer}}); } @@ -92,7 +91,7 @@ export class TextEmbedder extends TaskRunner { wasmFileset: WasmFileset, modelAssetPath: string): Promise { return TaskRunner.createInstance( - TextEmbedder, /* initializeCanvas= */ false, wasmFileset, + TextEmbedder, /* canvas= */ null, wasmFileset, {baseOptions: {modelAssetPath}}); } diff --git a/mediapipe/tasks/web/vision/core/vision_task_options.d.ts b/mediapipe/tasks/web/vision/core/vision_task_options.d.ts index 44b1660ff..72bc2efb1 100644 --- a/mediapipe/tasks/web/vision/core/vision_task_options.d.ts +++ b/mediapipe/tasks/web/vision/core/vision_task_options.d.ts @@ -25,6 +25,14 @@ export type RunningMode = 'IMAGE'|'VIDEO'; /** The options for configuring a MediaPipe vision task. */ export declare interface VisionTaskOptions extends TaskRunnerOptions { + /** + * The canvas element to bind textures to. This has to be set for GPU + * processing. The task will initialize a WebGL context and throw an eror if + * this fails (e.g. if you have already initialized a different type of + * context). + */ + canvas?: HTMLCanvasElement|OffscreenCanvas; + /** * The running mode of the task. Default to the image mode. * Vision tasks have two running modes: diff --git a/mediapipe/tasks/web/vision/core/vision_task_runner.test.ts b/mediapipe/tasks/web/vision/core/vision_task_runner.test.ts index 4eb51afdb..59e932972 100644 --- a/mediapipe/tasks/web/vision/core/vision_task_runner.test.ts +++ b/mediapipe/tasks/web/vision/core/vision_task_runner.test.ts @@ -36,6 +36,8 @@ const IMAGE = {} as unknown as HTMLImageElement; const TIMESTAMP = 42; class VisionTaskRunnerFake extends VisionTaskRunner { + override graphRunner!: VisionGraphRunner; + baseOptions = new BaseOptionsProto(); fakeGraphRunner: jasmine.SpyObj; expectedImageSource?: ImageSource; @@ -46,7 +48,7 @@ class VisionTaskRunnerFake extends VisionTaskRunner { jasmine.createSpyObj([ 'addProtoToStream', 'addGpuBufferAsImageToStream', 'setAutoRenderToScreen', 'registerModelResourcesGraphService', - 'finishProcessing' + 'finishProcessing', 'wasmModule' ]), IMAGE_STREAM, NORM_RECT_STREAM, roiAllowed); @@ -72,7 +74,7 @@ class VisionTaskRunnerFake extends VisionTaskRunner { expect(imageSource).toBe(this.expectedImageSource!); }); - // SetOptions with a modelAssetBuffer runs synchonously + // SetOptions with a modelAssetBuffer runs synchronously void this.setOptions({baseOptions: {modelAssetBuffer: new Uint8Array([])}}); } @@ -165,6 +167,24 @@ describe('VisionTaskRunner', () => { }).toThrowError(/Task is not initialized with video mode./); }); + it('validates that the canvas cannot be changed', async () => { + if (typeof OffscreenCanvas === 'undefined') { + console.log('Test is not supported under Node.'); + return; + } + + const visionTaskRunner = new VisionTaskRunnerFake(); + const canvas = new OffscreenCanvas(1, 1); + visionTaskRunner.graphRunner.wasmModule.canvas = canvas; + expect(() => { + visionTaskRunner.setOptions({canvas}); + }).not.toThrow(); + + expect(() => { + visionTaskRunner.setOptions({canvas: new OffscreenCanvas(2, 2)}); + }).toThrowError(/You must create a new task to reset the canvas./); + }); + it('sends packets to graph', async () => { const visionTaskRunner = new VisionTaskRunnerFake(); await visionTaskRunner.setOptions({runningMode: 'VIDEO'}); diff --git a/mediapipe/tasks/web/vision/core/vision_task_runner.ts b/mediapipe/tasks/web/vision/core/vision_task_runner.ts index f19b9f2df..9abbf0978 100644 --- a/mediapipe/tasks/web/vision/core/vision_task_runner.ts +++ b/mediapipe/tasks/web/vision/core/vision_task_runner.ts @@ -16,8 +16,9 @@ import {NormalizedRect} from '../../../../framework/formats/rect_pb'; import {TaskRunner} from '../../../../tasks/web/core/task_runner'; +import {WasmFileset} from '../../../../tasks/web/core/wasm_fileset'; import {ImageProcessingOptions} from '../../../../tasks/web/vision/core/image_processing_options'; -import {GraphRunner, ImageSource} from '../../../../web/graph_runner/graph_runner'; +import {GraphRunner, ImageSource, WasmMediaPipeConstructor} from '../../../../web/graph_runner/graph_runner'; import {SupportImage, WasmImage} from '../../../../web/graph_runner/graph_runner_image_lib'; import {SupportModelResourcesGraphService} from '../../../../web/graph_runner/register_model_resources_graph_service'; @@ -32,8 +33,33 @@ export class VisionGraphRunner extends GraphRunnerVisionType {} // The OSS JS API does not support the builder pattern. // tslint:disable:jspb-use-builder-pattern + +/** + * Creates a canvas for a MediaPipe vision task. Returns `undefined` if the + * GraphRunner should create its own canvas. + */ +function createCanvas(): HTMLCanvasElement|OffscreenCanvas|undefined { + // Returns an HTML canvas or `undefined` if OffscreenCanvas is available + // (since the graph runner can initialize its own OffscreenCanvas). + return typeof OffscreenCanvas === 'undefined' ? + document.createElement('canvas') : + undefined; +} + /** Base class for all MediaPipe Vision Tasks. */ export abstract class VisionTaskRunner extends TaskRunner { + protected static async createVisionInstance( + type: WasmMediaPipeConstructor, fileset: WasmFileset, + options: VisionTaskOptions): Promise { + if (options.baseOptions?.delegate === 'GPU') { + if (!options.canvas) { + throw new Error('You must specify a canvas for GPU processing.'); + } + } + const canvas = options.canvas ?? createCanvas(); + return TaskRunner.createInstance(type, canvas, fileset, options); + } + /** * Constructor to initialize a `VisionTaskRunner`. * @@ -62,6 +88,13 @@ export abstract class VisionTaskRunner extends TaskRunner { !!options.runningMode && options.runningMode !== 'IMAGE'; this.baseOptions.setUseStreamMode(useStreamMode); } + + if ('canvas' in options) { + if (this.graphRunner.wasmModule.canvas !== options.canvas) { + throw new Error('You must create a new task to reset the canvas.'); + } + } + return super.applyOptions(options); } diff --git a/mediapipe/tasks/web/vision/face_stylizer/face_stylizer.ts b/mediapipe/tasks/web/vision/face_stylizer/face_stylizer.ts index 47a4ffdfd..262b20434 100644 --- a/mediapipe/tasks/web/vision/face_stylizer/face_stylizer.ts +++ b/mediapipe/tasks/web/vision/face_stylizer/face_stylizer.ts @@ -58,9 +58,8 @@ export class FaceStylizer extends VisionTaskRunner { static createFromOptions( wasmFileset: WasmFileset, faceStylizerOptions: FaceStylizerOptions): Promise { - return VisionTaskRunner.createInstance( - FaceStylizer, /* initializeCanvas= */ true, wasmFileset, - faceStylizerOptions); + return VisionTaskRunner.createVisionInstance( + FaceStylizer, wasmFileset, faceStylizerOptions); } /** @@ -73,9 +72,8 @@ export class FaceStylizer extends VisionTaskRunner { static createFromModelBuffer( wasmFileset: WasmFileset, modelAssetBuffer: Uint8Array): Promise { - return VisionTaskRunner.createInstance( - FaceStylizer, /* initializeCanvas= */ true, wasmFileset, - {baseOptions: {modelAssetBuffer}}); + return VisionTaskRunner.createVisionInstance( + FaceStylizer, wasmFileset, {baseOptions: {modelAssetBuffer}}); } /** @@ -88,9 +86,8 @@ export class FaceStylizer extends VisionTaskRunner { static createFromModelPath( wasmFileset: WasmFileset, modelAssetPath: string): Promise { - return VisionTaskRunner.createInstance( - FaceStylizer, /* initializeCanvas= */ true, wasmFileset, - {baseOptions: {modelAssetPath}}); + return VisionTaskRunner.createVisionInstance( + FaceStylizer, wasmFileset, {baseOptions: {modelAssetPath}}); } /** @hideconstructor */ diff --git a/mediapipe/tasks/web/vision/gesture_recognizer/gesture_recognizer.ts b/mediapipe/tasks/web/vision/gesture_recognizer/gesture_recognizer.ts index 3271191ca..4927d3632 100644 --- a/mediapipe/tasks/web/vision/gesture_recognizer/gesture_recognizer.ts +++ b/mediapipe/tasks/web/vision/gesture_recognizer/gesture_recognizer.ts @@ -85,9 +85,8 @@ export class GestureRecognizer extends VisionTaskRunner { wasmFileset: WasmFileset, gestureRecognizerOptions: GestureRecognizerOptions): Promise { - return VisionTaskRunner.createInstance( - GestureRecognizer, /* initializeCanvas= */ true, wasmFileset, - gestureRecognizerOptions); + return VisionTaskRunner.createVisionInstance( + GestureRecognizer, wasmFileset, gestureRecognizerOptions); } /** @@ -100,9 +99,8 @@ export class GestureRecognizer extends VisionTaskRunner { static createFromModelBuffer( wasmFileset: WasmFileset, modelAssetBuffer: Uint8Array): Promise { - return VisionTaskRunner.createInstance( - GestureRecognizer, /* initializeCanvas= */ true, wasmFileset, - {baseOptions: {modelAssetBuffer}}); + return VisionTaskRunner.createVisionInstance( + GestureRecognizer, wasmFileset, {baseOptions: {modelAssetBuffer}}); } /** @@ -115,9 +113,8 @@ export class GestureRecognizer extends VisionTaskRunner { static createFromModelPath( wasmFileset: WasmFileset, modelAssetPath: string): Promise { - return VisionTaskRunner.createInstance( - GestureRecognizer, /* initializeCanvas= */ true, wasmFileset, - {baseOptions: {modelAssetPath}}); + return VisionTaskRunner.createVisionInstance( + GestureRecognizer, wasmFileset, {baseOptions: {modelAssetPath}}); } /** @hideconstructor */ diff --git a/mediapipe/tasks/web/vision/hand_landmarker/hand_landmarker.ts b/mediapipe/tasks/web/vision/hand_landmarker/hand_landmarker.ts index 32684faff..1978bb061 100644 --- a/mediapipe/tasks/web/vision/hand_landmarker/hand_landmarker.ts +++ b/mediapipe/tasks/web/vision/hand_landmarker/hand_landmarker.ts @@ -75,9 +75,8 @@ export class HandLandmarker extends VisionTaskRunner { static createFromOptions( wasmFileset: WasmFileset, handLandmarkerOptions: HandLandmarkerOptions): Promise { - return VisionTaskRunner.createInstance( - HandLandmarker, /* initializeCanvas= */ true, wasmFileset, - handLandmarkerOptions); + return VisionTaskRunner.createVisionInstance( + HandLandmarker, wasmFileset, handLandmarkerOptions); } /** @@ -90,9 +89,8 @@ export class HandLandmarker extends VisionTaskRunner { static createFromModelBuffer( wasmFileset: WasmFileset, modelAssetBuffer: Uint8Array): Promise { - return VisionTaskRunner.createInstance( - HandLandmarker, /* initializeCanvas= */ true, wasmFileset, - {baseOptions: {modelAssetBuffer}}); + return VisionTaskRunner.createVisionInstance( + HandLandmarker, wasmFileset, {baseOptions: {modelAssetBuffer}}); } /** @@ -105,9 +103,8 @@ export class HandLandmarker extends VisionTaskRunner { static createFromModelPath( wasmFileset: WasmFileset, modelAssetPath: string): Promise { - return VisionTaskRunner.createInstance( - HandLandmarker, /* initializeCanvas= */ true, wasmFileset, - {baseOptions: {modelAssetPath}}); + return VisionTaskRunner.createVisionInstance( + HandLandmarker, wasmFileset, {baseOptions: {modelAssetPath}}); } /** @hideconstructor */ diff --git a/mediapipe/tasks/web/vision/image_classifier/image_classifier.ts b/mediapipe/tasks/web/vision/image_classifier/image_classifier.ts index 70ce93aff..5cd690816 100644 --- a/mediapipe/tasks/web/vision/image_classifier/image_classifier.ts +++ b/mediapipe/tasks/web/vision/image_classifier/image_classifier.ts @@ -60,9 +60,8 @@ export class ImageClassifier extends VisionTaskRunner { static createFromOptions( wasmFileset: WasmFileset, imageClassifierOptions: ImageClassifierOptions): Promise { - return VisionTaskRunner.createInstance( - ImageClassifier, /* initializeCanvas= */ true, wasmFileset, - imageClassifierOptions); + return VisionTaskRunner.createVisionInstance( + ImageClassifier, wasmFileset, imageClassifierOptions); } /** @@ -75,9 +74,8 @@ export class ImageClassifier extends VisionTaskRunner { static createFromModelBuffer( wasmFileset: WasmFileset, modelAssetBuffer: Uint8Array): Promise { - return VisionTaskRunner.createInstance( - ImageClassifier, /* initializeCanvas= */ true, wasmFileset, - {baseOptions: {modelAssetBuffer}}); + return VisionTaskRunner.createVisionInstance( + ImageClassifier, wasmFileset, {baseOptions: {modelAssetBuffer}}); } /** @@ -90,9 +88,8 @@ export class ImageClassifier extends VisionTaskRunner { static createFromModelPath( wasmFileset: WasmFileset, modelAssetPath: string): Promise { - return VisionTaskRunner.createInstance( - ImageClassifier, /* initializeCanvas= */ true, wasmFileset, - {baseOptions: {modelAssetPath}}); + return VisionTaskRunner.createVisionInstance( + ImageClassifier, wasmFileset, {baseOptions: {modelAssetPath}}); } /** @hideconstructor */ diff --git a/mediapipe/tasks/web/vision/image_embedder/image_embedder.ts b/mediapipe/tasks/web/vision/image_embedder/image_embedder.ts index 0c7d8866b..229647e47 100644 --- a/mediapipe/tasks/web/vision/image_embedder/image_embedder.ts +++ b/mediapipe/tasks/web/vision/image_embedder/image_embedder.ts @@ -63,9 +63,8 @@ export class ImageEmbedder extends VisionTaskRunner { static createFromOptions( wasmFileset: WasmFileset, imageEmbedderOptions: ImageEmbedderOptions): Promise { - return VisionTaskRunner.createInstance( - ImageEmbedder, /* initializeCanvas= */ true, wasmFileset, - imageEmbedderOptions); + return VisionTaskRunner.createVisionInstance( + ImageEmbedder, wasmFileset, imageEmbedderOptions); } /** @@ -78,9 +77,8 @@ export class ImageEmbedder extends VisionTaskRunner { static createFromModelBuffer( wasmFileset: WasmFileset, modelAssetBuffer: Uint8Array): Promise { - return VisionTaskRunner.createInstance( - ImageEmbedder, /* initializeCanvas= */ true, wasmFileset, - {baseOptions: {modelAssetBuffer}}); + return VisionTaskRunner.createVisionInstance( + ImageEmbedder, wasmFileset, {baseOptions: {modelAssetBuffer}}); } /** @@ -93,9 +91,8 @@ export class ImageEmbedder extends VisionTaskRunner { static createFromModelPath( wasmFileset: WasmFileset, modelAssetPath: string): Promise { - return VisionTaskRunner.createInstance( - ImageEmbedder, /* initializeCanvas= */ true, wasmFileset, - {baseOptions: {modelAssetPath}}); + return VisionTaskRunner.createVisionInstance( + ImageEmbedder, wasmFileset, {baseOptions: {modelAssetPath}}); } /** @hideconstructor */ diff --git a/mediapipe/tasks/web/vision/image_segmenter/image_segmenter.ts b/mediapipe/tasks/web/vision/image_segmenter/image_segmenter.ts index cb192b0ce..3690fd855 100644 --- a/mediapipe/tasks/web/vision/image_segmenter/image_segmenter.ts +++ b/mediapipe/tasks/web/vision/image_segmenter/image_segmenter.ts @@ -64,9 +64,8 @@ export class ImageSegmenter extends VisionTaskRunner { static createFromOptions( wasmFileset: WasmFileset, imageSegmenterOptions: ImageSegmenterOptions): Promise { - return VisionTaskRunner.createInstance( - ImageSegmenter, /* initializeCanvas= */ true, wasmFileset, - imageSegmenterOptions); + return VisionTaskRunner.createVisionInstance( + ImageSegmenter, wasmFileset, imageSegmenterOptions); } /** @@ -79,9 +78,8 @@ export class ImageSegmenter extends VisionTaskRunner { static createFromModelBuffer( wasmFileset: WasmFileset, modelAssetBuffer: Uint8Array): Promise { - return VisionTaskRunner.createInstance( - ImageSegmenter, /* initializeCanvas= */ true, wasmFileset, - {baseOptions: {modelAssetBuffer}}); + return VisionTaskRunner.createVisionInstance( + ImageSegmenter, wasmFileset, {baseOptions: {modelAssetBuffer}}); } /** @@ -94,9 +92,8 @@ export class ImageSegmenter extends VisionTaskRunner { static createFromModelPath( wasmFileset: WasmFileset, modelAssetPath: string): Promise { - return VisionTaskRunner.createInstance( - ImageSegmenter, /* initializeCanvas= */ true, wasmFileset, - {baseOptions: {modelAssetPath}}); + return VisionTaskRunner.createVisionInstance( + ImageSegmenter, wasmFileset, {baseOptions: {modelAssetPath}}); } /** @hideconstructor */ diff --git a/mediapipe/tasks/web/vision/interactive_segmenter/interactive_segmenter.ts b/mediapipe/tasks/web/vision/interactive_segmenter/interactive_segmenter.ts index 1499a4c0c..ddcc7e592 100644 --- a/mediapipe/tasks/web/vision/interactive_segmenter/interactive_segmenter.ts +++ b/mediapipe/tasks/web/vision/interactive_segmenter/interactive_segmenter.ts @@ -87,9 +87,8 @@ export class InteractiveSegmenter extends VisionTaskRunner { wasmFileset: WasmFileset, interactiveSegmenterOptions: InteractiveSegmenterOptions): Promise { - return VisionTaskRunner.createInstance( - InteractiveSegmenter, /* initializeCanvas= */ true, wasmFileset, - interactiveSegmenterOptions); + return VisionTaskRunner.createVisionInstance( + InteractiveSegmenter, wasmFileset, interactiveSegmenterOptions); } /** @@ -103,9 +102,8 @@ export class InteractiveSegmenter extends VisionTaskRunner { static createFromModelBuffer( wasmFileset: WasmFileset, modelAssetBuffer: Uint8Array): Promise { - return VisionTaskRunner.createInstance( - InteractiveSegmenter, /* initializeCanvas= */ true, wasmFileset, - {baseOptions: {modelAssetBuffer}}); + return VisionTaskRunner.createVisionInstance( + InteractiveSegmenter, wasmFileset, {baseOptions: {modelAssetBuffer}}); } /** @@ -119,9 +117,8 @@ export class InteractiveSegmenter extends VisionTaskRunner { static createFromModelPath( wasmFileset: WasmFileset, modelAssetPath: string): Promise { - return VisionTaskRunner.createInstance( - InteractiveSegmenter, /* initializeCanvas= */ true, wasmFileset, - {baseOptions: {modelAssetPath}}); + return VisionTaskRunner.createVisionInstance( + InteractiveSegmenter, wasmFileset, {baseOptions: {modelAssetPath}}); } /** @hideconstructor */ diff --git a/mediapipe/tasks/web/vision/object_detector/object_detector.ts b/mediapipe/tasks/web/vision/object_detector/object_detector.ts index a65b64e91..42b62c9e2 100644 --- a/mediapipe/tasks/web/vision/object_detector/object_detector.ts +++ b/mediapipe/tasks/web/vision/object_detector/object_detector.ts @@ -59,9 +59,8 @@ export class ObjectDetector extends VisionTaskRunner { static createFromOptions( wasmFileset: WasmFileset, objectDetectorOptions: ObjectDetectorOptions): Promise { - return VisionTaskRunner.createInstance( - ObjectDetector, /* initializeCanvas= */ true, wasmFileset, - objectDetectorOptions); + return VisionTaskRunner.createVisionInstance( + ObjectDetector, wasmFileset, objectDetectorOptions); } /** @@ -74,9 +73,8 @@ export class ObjectDetector extends VisionTaskRunner { static createFromModelBuffer( wasmFileset: WasmFileset, modelAssetBuffer: Uint8Array): Promise { - return VisionTaskRunner.createInstance( - ObjectDetector, /* initializeCanvas= */ true, wasmFileset, - {baseOptions: {modelAssetBuffer}}); + return VisionTaskRunner.createVisionInstance( + ObjectDetector, wasmFileset, {baseOptions: {modelAssetBuffer}}); } /** @@ -89,9 +87,8 @@ export class ObjectDetector extends VisionTaskRunner { static async createFromModelPath( wasmFileset: WasmFileset, modelAssetPath: string): Promise { - return VisionTaskRunner.createInstance( - ObjectDetector, /* initializeCanvas= */ true, wasmFileset, - {baseOptions: {modelAssetPath}}); + return VisionTaskRunner.createVisionInstance( + ObjectDetector, wasmFileset, {baseOptions: {modelAssetPath}}); } /** @hideconstructor */ diff --git a/mediapipe/web/graph_runner/graph_runner.ts b/mediapipe/web/graph_runner/graph_runner.ts index e2b1684a0..afa9c7ebb 100644 --- a/mediapipe/web/graph_runner/graph_runner.ts +++ b/mediapipe/web/graph_runner/graph_runner.ts @@ -352,10 +352,15 @@ export class GraphRunner { } else { this.wasmModule._bindTextureToStream(streamNamePtr); } - const gl: any = - this.wasmModule.canvas.getContext('webgl2') || - this.wasmModule.canvas.getContext('webgl'); - console.assert(gl); + const gl = + (this.wasmModule.canvas.getContext('webgl2') || + this.wasmModule.canvas.getContext('webgl')) as WebGL2RenderingContext | + WebGLRenderingContext | null; + if (!gl) { + throw new Error( + 'Failed to obtain WebGL context from the provided canvas. ' + + '`getContext()` should only be invoked with `webgl` or `webgl2`.'); + } gl.texImage2D( gl.TEXTURE_2D, 0, gl.RGBA, gl.RGBA, gl.UNSIGNED_BYTE, imageSource);