PiperOrigin-RevId: 563843599
This commit is contained in:
Sebastian Schmidt 2023-09-08 13:53:11 -07:00 committed by Copybara-Service
parent b89ca28fe1
commit 6df05b7d2a
23 changed files with 143 additions and 217 deletions

View File

@ -51,6 +51,7 @@ export class AudioClassifier extends AudioTaskRunner<AudioClassifierResult[]> {
/**
* Initializes the Wasm runtime and creates a new audio classifier from the
* provided options.
* @export
* @param wasmFileset A configuration object that provides the location of the
* Wasm binary and its loader.
* @param audioClassifierOptions The options for the audio classifier. Note
@ -67,6 +68,7 @@ export class AudioClassifier extends AudioTaskRunner<AudioClassifierResult[]> {
/**
* Initializes the Wasm runtime and creates a new audio classifier based on
* the provided model asset buffer.
* @export
* @param wasmFileset A configuration object that provides the location of the
* Wasm binary and its loader.
* @param modelAssetBuffer A binary representation of the model.
@ -81,6 +83,7 @@ export class AudioClassifier extends AudioTaskRunner<AudioClassifierResult[]> {
/**
* Initializes the Wasm runtime and creates a new audio classifier based on
* the path to the model asset.
* @export
* @param wasmFileset A configuration object that provides the location of the
* Wasm binary and its loader.
* @param modelAssetPath The path to the model asset.
@ -116,6 +119,7 @@ export class AudioClassifier extends AudioTaskRunner<AudioClassifierResult[]> {
* You can reset an option back to its default value by explicitly setting it
* to `undefined`.
*
* @export
* @param options The options for the audio classifier.
*/
override setOptions(options: AudioClassifierOptions): Promise<void> {
@ -130,6 +134,7 @@ export class AudioClassifier extends AudioTaskRunner<AudioClassifierResult[]> {
* Performs audio classification on the provided audio clip and waits
* synchronously for the response.
*
* @export
* @param audioData An array of raw audio capture data, like from a call to
* `getChannelData()` on an AudioBuffer.
* @param sampleRate The sample rate in Hz of the provided audio data. If not

View File

@ -51,6 +51,7 @@ export class AudioEmbedder extends AudioTaskRunner<AudioEmbedderResult[]> {
/**
* Initializes the Wasm runtime and creates a new audio embedder from the
* provided options.
* @export
* @param wasmFileset A configuration object that provides the location of the
* Wasm binary and its loader.
* @param audioEmbedderOptions The options for the audio embedder. Note that
@ -67,6 +68,7 @@ export class AudioEmbedder extends AudioTaskRunner<AudioEmbedderResult[]> {
/**
* Initializes the Wasm runtime and creates a new audio embedder based on the
* provided model asset buffer.
* @export
* @param wasmFileset A configuration object that provides the location of the
* Wasm binary and its loader.
* @param modelAssetBuffer A binary representation of the TFLite model.
@ -81,6 +83,7 @@ export class AudioEmbedder extends AudioTaskRunner<AudioEmbedderResult[]> {
/**
* Initializes the Wasm runtime and creates a new audio embedder based on the
* path to the model asset.
* @export
* @param wasmFileset A configuration object that provides the location of the
* Wasm binary and its loader.
* @param modelAssetPath The path to the TFLite model.
@ -115,6 +118,7 @@ export class AudioEmbedder extends AudioTaskRunner<AudioEmbedderResult[]> {
* You can reset an option back to its default value by explicitly setting it
* to `undefined`.
*
* @export
* @param options The options for the audio embedder.
*/
override setOptions(options: AudioEmbedderOptions): Promise<void> {
@ -129,6 +133,7 @@ export class AudioEmbedder extends AudioTaskRunner<AudioEmbedderResult[]> {
* Performs embeding extraction on the provided audio clip and waits
* synchronously for the response.
*
* @export
* @param audioData An array of raw audio capture data, like from a call to
* `getChannelData()` on an AudioBuffer.
* @param sampleRate The sample rate in Hz of the provided audio data. If not

View File

@ -35,6 +35,7 @@ export abstract class AudioTaskRunner<T> extends TaskRunner {
* Sets the sample rate for API calls that omit an explicit sample rate.
* `48000` is used as a default if this method is not called.
*
* @export
* @param sampleRate A sample rate (e.g. `44100`).
*/
setDefaultSampleRate(sampleRate: number) {

View File

@ -1,27 +0,0 @@
/** @externs */
const AudioClassifier = {};
AudioClassifier.createFromModelBuffer = function() {};
AudioClassifier.createFromOptions = function() {};
AudioClassifier.createFromModelPath = function() {};
AudioClassifier.classify = function() {};
AudioClassifier.setDefaultSampleRate = function() {};
AudioClassifier.setOptions = function() {};
AudioClassifier.close = function() {};
const AudioEmbedder = {};
AudioEmbedder.createFromModelBuffer = function() {};
AudioEmbedder.createFromOptions = function() {};
AudioEmbedder.createFromModelPath = function() {};
AudioEmbedder.embded = function() {};
AudioEmbedder.setOptions = function() {};
AudioEmbedder.cosineSimilarity = function() {};
AudioEmbedder.setDefaultSampleRate = function() {};
AudioEmbedder.setOptions = function() {};
AudioEmbedder.close = function() {};
const FilesetResolver = {};
FilesetResolver.isSimdSupported = function() {};
FilesetResolver.forAudioTasks = function() {};
FilesetResolver.forTextTasks = function() {};
FilesetResolver.forVisionTasks = function() {};

View File

@ -76,6 +76,7 @@ export class FilesetResolver {
* you can use `isSimdSupported()` to decide whether to load the SIMD-based
* assets.
*
* @export
* @return Whether SIMD support was detected in the current environment.
*/
static isSimdSupported(): Promise<boolean> {
@ -85,6 +86,7 @@ export class FilesetResolver {
/**
* Creates a fileset for the MediaPipe Audio tasks.
*
* @export
* @param basePath An optional base path to specify the directory the Wasm
* files should be loaded from. If not specified, the Wasm files are
* loaded from the host's root directory.
@ -98,6 +100,7 @@ export class FilesetResolver {
/**
* Creates a fileset for the MediaPipe Text tasks.
*
* @export
* @param basePath An optional base path to specify the directory the Wasm
* files should be loaded from. If not specified, the Wasm files are
* loaded from the host's root directory.
@ -111,6 +114,7 @@ export class FilesetResolver {
/**
* Creates a fileset for the MediaPipe Vision tasks.
*
* @export
* @param basePath An optional base path to specify the directory the Wasm
* files should be loaded from. If not specified, the Wasm files are
* loaded from the host's root directory.

View File

@ -325,7 +325,10 @@ export abstract class TaskRunner {
true, FREE_MEMORY_STREAM, this.latestOutputTimestamp);
}
/** Closes and cleans up the resources held by this task. */
/**
* Closes and cleans up the resources held by this task.
* @export
*/
close(): void {
this.keepaliveNode = undefined;
this.graphRunner.closeGraph();

View File

@ -1,33 +0,0 @@
/** @externs */
const LanguageDetector = {};
LanguageDetector.createFromModelBuffer = function() {};
LanguageDetector.createFromOptions = function() {};
LanguageDetector.createFromModelPath = function() {};
LanguageDetector.detect = function() {};
LanguageDetector.setOptions = function() {};
LanguageDetector.close = function() {};
const TextClassifier = {};
TextClassifier.createFromModelBuffer = function() {};
TextClassifier.createFromOptions = function() {};
TextClassifier.createFromModelPath = function() {};
TextClassifier.classify = function() {};
TextClassifier.setOptions = function() {};
TextClassifier.close = function() {};
const TextEmbedder = {};
TextEmbedder.createFromModelBuffer = function() {};
TextEmbedder.createFromOptions = function() {};
TextEmbedder.createFromModelPath = function() {};
TextEmbedder.embded = function() {};
TextEmbedder.setOptions = function() {};
TextEmbedder.cosineSimilarity = function() {};
TextClassifier.setOptions = function() {};
TextClassifier.close = function() {};
const FilesetResolver = {};
FilesetResolver.isSimdSupported = function() {};
FilesetResolver.forTextTasks = function() {};
FilesetResolver.forTextTasks = function() {};
FilesetResolver.forVisionTasks = function() {};

View File

@ -48,6 +48,7 @@ export class LanguageDetector extends TaskRunner {
/**
* Initializes the Wasm runtime and creates a new language detector from the
* provided options.
* @export
* @param wasmFileset A configuration object that provides the location of the
* Wasm binary and its loader.
* @param textClassifierOptions The options for the language detector. Note
@ -65,6 +66,7 @@ export class LanguageDetector extends TaskRunner {
/**
* Initializes the Wasm runtime and creates a new language detector based on
* the provided model asset buffer.
* @export
* @param wasmFileset A configuration object that provides the location of the
* Wasm binary and its loader.
* @param modelAssetBuffer A binary representation of the model.
@ -80,6 +82,7 @@ export class LanguageDetector extends TaskRunner {
/**
* Initializes the Wasm runtime and creates a new language detector based on
* the path to the model asset.
* @export
* @param wasmFileset A configuration object that provides the location of the
* Wasm binary and its loader.
* @param modelAssetPath The path to the model asset.
@ -107,6 +110,7 @@ export class LanguageDetector extends TaskRunner {
* You can reset an option back to its default value by explicitly setting it
* to `undefined`.
*
* @export
* @param options The options for the language detector.
*/
override setOptions(options: LanguageDetectorOptions): Promise<void> {
@ -126,6 +130,7 @@ export class LanguageDetector extends TaskRunner {
/**
* Predicts the language of the input text.
*
* @export
* @param text The text to process.
* @return The languages detected in the input text.
*/

View File

@ -48,6 +48,7 @@ export class TextClassifier extends TaskRunner {
/**
* Initializes the Wasm runtime and creates a new text classifier from the
* provided options.
* @export
* @param wasmFileset A configuration object that provides the location of the
* Wasm binary and its loader.
* @param textClassifierOptions The options for the text classifier. Note that
@ -65,6 +66,7 @@ export class TextClassifier extends TaskRunner {
/**
* Initializes the Wasm runtime and creates a new text classifier based on the
* provided model asset buffer.
* @export
* @param wasmFileset A configuration object that provides the location of the
* Wasm binary and its loader.
* @param modelAssetBuffer A binary representation of the model.
@ -80,6 +82,7 @@ export class TextClassifier extends TaskRunner {
/**
* Initializes the Wasm runtime and creates a new text classifier based on the
* path to the model asset.
* @export
* @param wasmFileset A configuration object that provides the location of the
* Wasm binary and its loader.
* @param modelAssetPath The path to the model asset.
@ -107,6 +110,7 @@ export class TextClassifier extends TaskRunner {
* You can reset an option back to its default value by explicitly setting it
* to `undefined`.
*
* @export
* @param options The options for the text classifier.
*/
override setOptions(options: TextClassifierOptions): Promise<void> {
@ -127,6 +131,7 @@ export class TextClassifier extends TaskRunner {
* Performs Natural Language classification on the provided text and waits
* synchronously for the response.
*
* @export
* @param text The text to process.
* @return The classification result of the text
*/

View File

@ -52,6 +52,7 @@ export class TextEmbedder extends TaskRunner {
/**
* Initializes the Wasm runtime and creates a new text embedder from the
* provided options.
* @export
* @param wasmFileset A configuration object that provides the location of the
* Wasm binary and its loader.
* @param textEmbedderOptions The options for the text embedder. Note that
@ -68,6 +69,7 @@ export class TextEmbedder extends TaskRunner {
/**
* Initializes the Wasm runtime and creates a new text embedder based on the
* provided model asset buffer.
* @export
* @param wasmFileset A configuration object that provides the location of the
* Wasm binary and its loader.
* @param modelAssetBuffer A binary representation of the TFLite model.
@ -83,6 +85,7 @@ export class TextEmbedder extends TaskRunner {
/**
* Initializes the Wasm runtime and creates a new text embedder based on the
* path to the model asset.
* @export
* @param wasmFileset A configuration object that provides the location of the
* Wasm binary and its loader.
* @param modelAssetPath The path to the TFLite model.
@ -110,6 +113,7 @@ export class TextEmbedder extends TaskRunner {
* You can reset an option back to its default value by explicitly setting it
* to `undefined`.
*
* @export
* @param options The options for the text embedder.
*/
override setOptions(options: TextEmbedderOptions): Promise<void> {
@ -130,6 +134,7 @@ export class TextEmbedder extends TaskRunner {
* Performs embeding extraction on the provided text and waits synchronously
* for the response.
*
* @export
* @param text The text to process.
* @return The embedding resuls of the text
*/
@ -146,6 +151,7 @@ export class TextEmbedder extends TaskRunner {
*
* [1]: https://en.wikipedia.org/wiki/Cosine_similarity
*
* @export
* @throws if the embeddings are of different types(float vs. quantized), have
* different sizes, or have an L2-norm of 0.
*/

View File

@ -89,6 +89,7 @@ export class DrawingUtils {
/**
* Restricts a number between two endpoints (order doesn't matter).
*
* @export
* @param x The number to clamp.
* @param x0 The first boundary.
* @param x1 The second boundary.
@ -104,6 +105,7 @@ export class DrawingUtils {
* Linearly interpolates a value between two points, clamping that value to
* the endpoints.
*
* @export
* @param x The number to interpolate.
* @param x0 The x coordinate of the start value.
* @param x1 The x coordinate of the end value.
@ -121,6 +123,7 @@ export class DrawingUtils {
/**
* Draws circles onto the provided landmarks.
*
* @export
* @param landmarks The landmarks to draw.
* @param style The style to visualize the landmarks.
*/
@ -156,6 +159,7 @@ export class DrawingUtils {
/**
* Draws lines between landmarks (given a connection graph).
*
* @export
* @param landmarks The landmarks to draw.
* @param connections The connections array that contains the start and the
* end indices for the connections to draw.
@ -191,6 +195,7 @@ export class DrawingUtils {
/**
* Draws a bounding box.
*
* @export
* @param boundingBox The bounding box to draw.
* @param style The style to visualize the boundin box.
*/

View File

@ -92,8 +92,8 @@ export abstract class VisionTaskRunner extends TaskRunner {
* @param loadTfliteModel Whether to load the model specified in
* `options.baseOptions`.
*/
override applyOptions(options: VisionTaskOptions, loadTfliteModel = true):
Promise<void> {
protected override applyOptions(
options: VisionTaskOptions, loadTfliteModel = true): Promise<void> {
if ('runningMode' in options) {
const useStreamMode =
!!options.runningMode && options.runningMode !== 'IMAGE';
@ -297,7 +297,10 @@ export abstract class VisionTaskRunner extends TaskRunner {
return shouldCopyData ? mask.clone() : mask;
}
/** Closes and cleans up the resources held by this task. */
/**
* Closes and cleans up the resources held by this task.
* @export
*/
override close(): void {
this.shaderContext.close();
super.close();

View File

@ -1,145 +0,0 @@
/** @externs */
const DrawingUtils = {};
DrawingUtils.clamp = function() {};
DrawingUtils.lerp = function() {};
DrawingUtils.drawLandmarks = function() {};
DrawingUtils.drawConnectors = function() {};
DrawingUtils.drawBoundingBox = function() {};
const FaceDetector = {};
FaceDetector.createFromModelBuffer = function() {};
FaceDetector.createFromOptions = function() {};
FaceDetector.createFromModelPath = function() {};
FaceDetector.detect = function() {};
FaceDetector.detectForVideo = function() {};
FaceDetector.setOptions = function() {};
FaceDetector.close = function() {};
const FaceLandmarker = {};
FaceLandmarker.FACE_LANDMARKS_LIPS = {};
FaceLandmarker.FACE_LANDMARKS_LEFT_EYE = {};
FaceLandmarker.FACE_LANDMARKS_LEFT_EYEBROW = {};
FaceLandmarker.FACE_LANDMARKS_LEFT_IRIS = {};
FaceLandmarker.FACE_LANDMARKS_RIGHT_EYE = {};
FaceLandmarker.FACE_LANDMARKS_RIGHT_EYEBROW = {};
FaceLandmarker.FACE_LANDMARKS_RIGHT_IRIS = {};
FaceLandmarker.FACE_LANDMARKS_FACE_OVAL = {};
FaceLandmarker.FACE_LANDMARKS_CONTOURS = {};
FaceLandmarker.FACE_LANDMARKS_TESSELATION = {};
FaceLandmarker.createFromModelBuffer = function() {};
FaceLandmarker.createFromOptions = function() {};
FaceLandmarker.createFromModelPath = function() {};
FaceLandmarker.detect = function() {};
FaceLandmarker.detectForVideo = function() {};
FaceLandmarker.setOptions = function() {};
FaceLandmarker.close = function() {};
const FaceStylizer = {};
FaceStylizer.createFromModelBuffer = function() {};
FaceStylizer.createFromOptions = function() {};
FaceStylizer.createFromModelPath = function() {};
FaceStylizer.stylize = function() {};
FaceStylizer.setOptions = function() {};
FaceStylizer.close = function() {};
const FilesetResolver = {};
FilesetResolver.isSimdSupported = function() {};
FilesetResolver.forAudioTasks = function() {};
FilesetResolver.forTextTasks = function() {};
FilesetResolver.forVisionTasks = function() {};
const GestureRecognizer = {};
GestureRecognizer.createFromModelBuffer = function() {};
GestureRecognizer.createFromOptions = function() {};
GestureRecognizer.createFromModelPath = function() {};
GestureRecognizer.recognize = function() {};
GestureRecognizer.recognizeForVideo = function() {};
GestureRecognizer.setOptions = function() {};
GestureRecognizer.close = function() {};
const HandLandmarker = {};
HandLandmarker.HAND_CONNECTIONS = {};
HandLandmarker.createFromModelBuffer = function() {};
HandLandmarker.createFromOptions = function() {};
HandLandmarker.createFromModelPath = function() {};
HandLandmarker.detect = function() {};
HandLandmarker.detectForVideo = function() {};
HandLandmarker.setOptions = function() {};
HandLandmarker.close = function() {};
const ImageClassifier = {};
ImageClassifier.createFromModelBuffer = function() {};
ImageClassifier.createFromOptions = function() {};
ImageClassifier.createFromModelPath = function() {};
ImageClassifier.classify = function() {};
ImageClassifier.classifyForVideo = function() {};
ImageClassifier.setOptions = function() {};
ImageClassifier.close = function() {};
const ImageEmbedder = {};
ImageEmbedder.createFromModelBuffer = function() {};
ImageEmbedder.createFromOptions = function() {};
ImageEmbedder.createFromModelPath = function() {};
ImageEmbedder.embded = function() {};
ImageEmbedder.embedForVideo = function() {};
ImageEmbedder.setOptions = function() {};
ImageEmbedder.cosineSimilarity = function() {};
ImageEmbedder.close = function() {};
const ImageSegmenter = {};
ImageSegmenter.createFromModelBuffer = function() {};
ImageSegmenter.createFromOptions = function() {};
ImageSegmenter.createFromModelPath = function() {};
ImageSegmenter.segmment = function() {};
ImageSegmenter.segmentForVideo = function() {};
ImageSegmenter.setOptions = function() {};
ImageSegmenter.getLabels = function() {};
ImageSegmenter.close = function() {};
const InteractiveSegmenter = {};
InteractiveSegmenter.createFromModelBuffer = function() {};
InteractiveSegmenter.createFromOptions = function() {};
InteractiveSegmenter.createFromModelPath = function() {};
InteractiveSegmenter.segmment = function() {};
InteractiveSegmenter.setOptions = function() {};
InteractiveSegmenter.close = function() {};
const MPImage = {};
MPImage.hasImageData = function() {};
MPImage.hasImageBitmap = function() {};
MPImage.hasWebGLTexture = function() {};
MPImage.getAsImageData = function() {};
MPImage.getAsImageBitmap = function() {};
MPImage.getAsWebGLTexture = function() {};
MPImage.clone = function() {};
MPImage.close = function() {};
const MPMask = {};
MPMask.hasUint8Array = function() {};
MPMask.hasFloat32Array = function() {};
MPMask.hasWebGLTexture = function() {};
MPMask.getAsUint8Array = function() {};
MPMask.getAsFloat32Array = function() {};
MPMask.getAsWebGLTexture = function() {};
MPMask.clone = function() {};
MPMask.close = function() {};
const ObjectDetector = {};
ObjectDetector.createFromModelBuffer = function() {};
ObjectDetector.createFromOptions = function() {};
ObjectDetector.createFromModelPath = function() {};
ObjectDetector.detect = function() {};
ObjectDetector.detectForVideo = function() {};
ObjectDetector.setOptions = function() {};
ObjectDetector.close = function() {};
const PoseLandmarker = {};
PoseLandmarker.POSE_CONNECTIONS = {};
PoseLandmarker.createFromModelBuffer = function() {};
PoseLandmarker.createFromOptions = function() {};
PoseLandmarker.createFromModelPath = function() {};
PoseLandmarker.detect = function() {};
PoseLandmarker.detectForVideo = function() {};
PoseLandmarker.setOptions = function() {};
PoseLandmarker.close = function() {};

View File

@ -75,6 +75,7 @@ export class FaceLandmarker extends VisionTaskRunner {
/**
* Initializes the Wasm runtime and creates a new `FaceLandmarker` from the
* provided options.
* @export
* @param wasmFileset A configuration object that provides the location of the
* Wasm binary and its loader.
* @param faceLandmarkerOptions The options for the FaceLandmarker.
@ -91,6 +92,7 @@ export class FaceLandmarker extends VisionTaskRunner {
/**
* Initializes the Wasm runtime and creates a new `FaceLandmarker` based on
* the provided model asset buffer.
* @export
* @param wasmFileset A configuration object that provides the location of the
* Wasm binary and its loader.
* @param modelAssetBuffer A binary representation of the model.
@ -105,6 +107,7 @@ export class FaceLandmarker extends VisionTaskRunner {
/**
* Initializes the Wasm runtime and creates a new `FaceLandmarker` based on
* the path to the model asset.
* @export
* @param wasmFileset A configuration object that provides the location of the
* Wasm binary and its loader.
* @param modelAssetPath The path to the model asset.
@ -116,41 +119,65 @@ export class FaceLandmarker extends VisionTaskRunner {
FaceLandmarker, wasmFileset, {baseOptions: {modelAssetPath}});
}
/** Landmark connections to draw the connection between a face's lips. */
/**
* Landmark connections to draw the connection between a face's lips.
* @export
*/
static FACE_LANDMARKS_LIPS = FACE_LANDMARKS_LIPS;
/** Landmark connections to draw the connection between a face's left eye. */
/**
* Landmark connections to draw the connection between a face's left eye.
* @export
*/
static FACE_LANDMARKS_LEFT_EYE = FACE_LANDMARKS_LEFT_EYE;
/**
* Landmark connections to draw the connection between a face's left eyebrow.
* @export
*/
static FACE_LANDMARKS_LEFT_EYEBROW = FACE_LANDMARKS_LEFT_EYEBROW;
/** Landmark connections to draw the connection between a face's left iris. */
/**
* Landmark connections to draw the connection between a face's left iris.
* @export
*/
static FACE_LANDMARKS_LEFT_IRIS = FACE_LANDMARKS_LEFT_IRIS;
/** Landmark connections to draw the connection between a face's right eye. */
/**
* Landmark connections to draw the connection between a face's right eye.
* @export
*/
static FACE_LANDMARKS_RIGHT_EYE = FACE_LANDMARKS_RIGHT_EYE;
/**
* Landmark connections to draw the connection between a face's right
* eyebrow.
* @export
*/
static FACE_LANDMARKS_RIGHT_EYEBROW = FACE_LANDMARKS_RIGHT_EYEBROW;
/**
* Landmark connections to draw the connection between a face's right iris.
* @export
*/
static FACE_LANDMARKS_RIGHT_IRIS = FACE_LANDMARKS_RIGHT_IRIS;
/** Landmark connections to draw the face's oval. */
/**
* Landmark connections to draw the face's oval.
* @export
*/
static FACE_LANDMARKS_FACE_OVAL = FACE_LANDMARKS_FACE_OVAL;
/** Landmark connections to draw the face's contour. */
/**
* Landmark connections to draw the face's contour.
* @export
*/
static FACE_LANDMARKS_CONTOURS = FACE_LANDMARKS_CONTOURS;
/** Landmark connections to draw the face's tesselation. */
/**
* Landmark connections to draw the face's tesselation.
* @export
*/
static FACE_LANDMARKS_TESSELATION = FACE_LANDMARKS_TESSELATION;
/** @hideconstructor */
@ -188,6 +215,7 @@ export class FaceLandmarker extends VisionTaskRunner {
* You can reset an option back to its default value by explicitly setting it
* to `undefined`.
*
* @export
* @param options The options for the face landmarker.
*/
override setOptions(options: FaceLandmarkerOptions): Promise<void> {
@ -228,6 +256,7 @@ export class FaceLandmarker extends VisionTaskRunner {
* synchronously for the response. Only use this method when the
* FaceLandmarker is created with running mode `image`.
*
* @export
* @param image An image to process.
* @param imageProcessingOptions the `ImageProcessingOptions` specifying how
* to process the input image before running inference.
@ -245,6 +274,7 @@ export class FaceLandmarker extends VisionTaskRunner {
* synchronously for the response. Only use this method when the
* FaceLandmarker is created with running mode `video`.
*
* @export
* @param videoFrame A video frame to process.
* @param timestamp The timestamp of the current frame, in ms.
* @param imageProcessingOptions the `ImageProcessingOptions` specifying how

View File

@ -57,6 +57,7 @@ export class FaceStylizer extends VisionTaskRunner {
/**
* Initializes the Wasm runtime and creates a new Face Stylizer from the
* provided options.
* @export
* @param wasmFileset A configuration object that provides the location of
* the Wasm binary and its loader.
* @param faceStylizerOptions The options for the Face Stylizer. Note
@ -73,6 +74,7 @@ export class FaceStylizer extends VisionTaskRunner {
/**
* Initializes the Wasm runtime and creates a new Face Stylizer based on
* the provided model asset buffer.
* @export
* @param wasmFileset A configuration object that provides the location of
* the Wasm binary and its loader.
* @param modelAssetBuffer A binary representation of the model.
@ -87,6 +89,7 @@ export class FaceStylizer extends VisionTaskRunner {
/**
* Initializes the Wasm runtime and creates a new Face Stylizer based on
* the path to the model asset.
* @export
* @param wasmFileset A configuration object that provides the location of
* the Wasm binary and its loader.
* @param modelAssetPath The path to the model asset.
@ -125,6 +128,7 @@ export class FaceStylizer extends VisionTaskRunner {
* options. You can reset an option back to its default value by
* explicitly setting it to `undefined`.
*
* @export
* @param options The options for the Face Stylizer.
*/
override setOptions(options: FaceStylizerOptions): Promise<void> {
@ -202,6 +206,7 @@ export class FaceStylizer extends VisionTaskRunner {
*/
stylize(image: ImageSource, imageProcessingOptions: ImageProcessingOptions):
MPImage|null;
/** @export */
stylize(
image: ImageSource,
imageProcessingOptionsOrCallback?: ImageProcessingOptions|

View File

@ -76,12 +76,14 @@ export class GestureRecognizer extends VisionTaskRunner {
/**
* An array containing the pairs of hand landmark indices to be rendered with
* connections.
* @export
*/
static HAND_CONNECTIONS = HAND_CONNECTIONS;
/**
* Initializes the Wasm runtime and creates a new gesture recognizer from the
* provided options.
* @export
* @param wasmFileset A configuration object that provides the location of the
* Wasm binary and its loader.
* @param gestureRecognizerOptions The options for the gesture recognizer.
@ -99,6 +101,7 @@ export class GestureRecognizer extends VisionTaskRunner {
/**
* Initializes the Wasm runtime and creates a new gesture recognizer based on
* the provided model asset buffer.
* @export
* @param wasmFileset A configuration object that provides the location of the
* Wasm binary and its loader.
* @param modelAssetBuffer A binary representation of the model.
@ -113,6 +116,7 @@ export class GestureRecognizer extends VisionTaskRunner {
/**
* Initializes the Wasm runtime and creates a new gesture recognizer based on
* the path to the model asset.
* @export
* @param wasmFileset A configuration object that provides the location of the
* Wasm binary and its loader.
* @param modelAssetPath The path to the model asset.
@ -169,6 +173,7 @@ export class GestureRecognizer extends VisionTaskRunner {
* You can reset an option back to its default value by explicitly setting it
* to `undefined`.
*
* @export
* @param options The options for the gesture recognizer.
*/
override setOptions(options: GestureRecognizerOptions): Promise<void> {
@ -229,6 +234,7 @@ export class GestureRecognizer extends VisionTaskRunner {
* synchronously for the response. Only use this method when the
* GestureRecognizer is created with running mode `image`.
*
* @export
* @param image A single image to process.
* @param imageProcessingOptions the `ImageProcessingOptions` specifying how
* to process the input image before running inference.
@ -247,6 +253,7 @@ export class GestureRecognizer extends VisionTaskRunner {
* synchronously for the response. Only use this method when the
* GestureRecognizer is created with running mode `video`.
*
* @export
* @param videoFrame A video frame to process.
* @param timestamp The timestamp of the current frame, in ms.
* @param imageProcessingOptions the `ImageProcessingOptions` specifying how

View File

@ -68,12 +68,14 @@ export class HandLandmarker extends VisionTaskRunner {
/**
* An array containing the pairs of hand landmark indices to be rendered with
* connections.
* @export
*/
static HAND_CONNECTIONS = HAND_CONNECTIONS;
/**
* Initializes the Wasm runtime and creates a new `HandLandmarker` from the
* provided options.
* @export
* @param wasmFileset A configuration object that provides the location of the
* Wasm binary and its loader.
* @param handLandmarkerOptions The options for the HandLandmarker.
@ -90,6 +92,7 @@ export class HandLandmarker extends VisionTaskRunner {
/**
* Initializes the Wasm runtime and creates a new `HandLandmarker` based on
* the provided model asset buffer.
* @export
* @param wasmFileset A configuration object that provides the location of the
* Wasm binary and its loader.
* @param modelAssetBuffer A binary representation of the model.
@ -104,6 +107,7 @@ export class HandLandmarker extends VisionTaskRunner {
/**
* Initializes the Wasm runtime and creates a new `HandLandmarker` based on
* the path to the model asset.
* @export
* @param wasmFileset A configuration object that provides the location of the
* Wasm binary and its loader.
* @param modelAssetPath The path to the model asset.
@ -150,6 +154,7 @@ export class HandLandmarker extends VisionTaskRunner {
* You can reset an option back to its default value by explicitly setting it
* to `undefined`.
*
* @export
* @param options The options for the hand landmarker.
*/
override setOptions(options: HandLandmarkerOptions): Promise<void> {
@ -181,6 +186,7 @@ export class HandLandmarker extends VisionTaskRunner {
* synchronously for the response. Only use this method when the
* HandLandmarker is created with running mode `image`.
*
* @export
* @param image An image to process.
* @param imageProcessingOptions the `ImageProcessingOptions` specifying how
* to process the input image before running inference.
@ -198,6 +204,7 @@ export class HandLandmarker extends VisionTaskRunner {
* synchronously for the response. Only use this method when the
* HandLandmarker is created with running mode `video`.
*
* @export
* @param videoFrame A video frame to process.
* @param timestamp The timestamp of the current frame, in ms.
* @param imageProcessingOptions the `ImageProcessingOptions` specifying how

View File

@ -51,6 +51,7 @@ export class ImageClassifier extends VisionTaskRunner {
/**
* Initializes the Wasm runtime and creates a new image classifier from the
* provided options.
* @export
* @param wasmFileset A configuration object that provides the location
* Wasm binary and its loader.
* @param imageClassifierOptions The options for the image classifier. Note
@ -67,6 +68,7 @@ export class ImageClassifier extends VisionTaskRunner {
/**
* Initializes the Wasm runtime and creates a new image classifier based on
* the provided model asset buffer.
* @export
* @param wasmFileset A configuration object that provides the location of the
* Wasm binary and its loader.
* @param modelAssetBuffer A binary representation of the model.
@ -81,6 +83,7 @@ export class ImageClassifier extends VisionTaskRunner {
/**
* Initializes the Wasm runtime and creates a new image classifier based on
* the path to the model asset.
* @export
* @param wasmFileset A configuration object that provides the location of the
* Wasm binary and its loader.
* @param modelAssetPath The path to the model asset.
@ -117,6 +120,7 @@ export class ImageClassifier extends VisionTaskRunner {
* You can reset an option back to its default value by explicitly setting it
* to `undefined`.
*
* @export
* @param options The options for the image classifier.
*/
override setOptions(options: ImageClassifierOptions): Promise<void> {
@ -130,6 +134,7 @@ export class ImageClassifier extends VisionTaskRunner {
* synchronously for the response. Only use this method when the
* ImageClassifier is created with running mode `image`.
*
* @export
* @param image An image to process.
* @param imageProcessingOptions the `ImageProcessingOptions` specifying how
* to process the input image before running inference.
@ -147,6 +152,7 @@ export class ImageClassifier extends VisionTaskRunner {
* synchronously for the response. Only use this method when the
* ImageClassifier is created with running mode `video`.
*
* @export
* @param videoFrame A video frame to process.
* @param timestamp The timestamp of the current frame, in ms.
* @param imageProcessingOptions the `ImageProcessingOptions` specifying how

View File

@ -54,6 +54,7 @@ export class ImageEmbedder extends VisionTaskRunner {
/**
* Initializes the Wasm runtime and creates a new image embedder from the
* provided options.
* @export
* @param wasmFileset A configuration object that provides the location of the
* Wasm binary and its loader.
* @param imageEmbedderOptions The options for the image embedder. Note that
@ -70,6 +71,7 @@ export class ImageEmbedder extends VisionTaskRunner {
/**
* Initializes the Wasm runtime and creates a new image embedder based on the
* provided model asset buffer.
* @export
* @param wasmFileset A configuration object that provides the location of the
* Wasm binary and its loader.
* @param modelAssetBuffer A binary representation of the TFLite model.
@ -84,6 +86,7 @@ export class ImageEmbedder extends VisionTaskRunner {
/**
* Initializes the Wasm runtime and creates a new image embedder based on the
* path to the model asset.
* @export
* @param wasmFileset A configuration object that provides the location of the
* Wasm binary and its loader.
* @param modelAssetPath The path to the TFLite model.
@ -120,6 +123,7 @@ export class ImageEmbedder extends VisionTaskRunner {
* You can reset an option back to its default value by explicitly setting it
* to `undefined`.
*
* @export
* @param options The options for the image embedder.
*/
override setOptions(options: ImageEmbedderOptions): Promise<void> {
@ -133,6 +137,7 @@ export class ImageEmbedder extends VisionTaskRunner {
* synchronously for the response. Only use this method when the
* ImageEmbedder is created with running mode `image`.
*
* @export
* @param image The image to process.
* @param imageProcessingOptions the `ImageProcessingOptions` specifying how
* to process the input image before running inference.
@ -149,6 +154,7 @@ export class ImageEmbedder extends VisionTaskRunner {
* synchronously for the response. Only use this method when the
* ImageEmbedder is created with running mode `video`.
*
* @export
* @param imageFrame The image frame to process.
* @param timestamp The timestamp of the current frame, in ms.
* @param imageProcessingOptions the `ImageProcessingOptions` specifying how
@ -168,6 +174,7 @@ export class ImageEmbedder extends VisionTaskRunner {
*
* [1]: https://en.wikipedia.org/wiki/Cosine_similarity
*
* @export
* @throws if the embeddings are of different types(float vs. quantized), have
* different sizes, or have an L2-norm of 0.
*/

View File

@ -73,6 +73,7 @@ export class ImageSegmenter extends VisionTaskRunner {
/**
* Initializes the Wasm runtime and creates a new image segmenter from the
* provided options.
* @export
* @param wasmFileset A configuration object that provides the location of
* the Wasm binary and its loader.
* @param imageSegmenterOptions The options for the Image Segmenter. Note
@ -89,6 +90,7 @@ export class ImageSegmenter extends VisionTaskRunner {
/**
* Initializes the Wasm runtime and creates a new image segmenter based on
* the provided model asset buffer.
* @export
* @param wasmFileset A configuration object that provides the location of
* the Wasm binary and its loader.
* @param modelAssetBuffer A binary representation of the model.
@ -103,6 +105,7 @@ export class ImageSegmenter extends VisionTaskRunner {
/**
* Initializes the Wasm runtime and creates a new image segmenter based on
* the path to the model asset.
* @export
* @param wasmFileset A configuration object that provides the location of
* the Wasm binary and its loader.
* @param modelAssetPath The path to the model asset.
@ -142,6 +145,7 @@ export class ImageSegmenter extends VisionTaskRunner {
* options. You can reset an option back to its default value by
* explicitly setting it to `undefined`.
*
* @export
* @param options The options for the image segmenter.
*/
override setOptions(options: ImageSegmenterOptions): Promise<void> {
@ -253,6 +257,7 @@ export class ImageSegmenter extends VisionTaskRunner {
*/
segment(image: ImageSource, imageProcessingOptions: ImageProcessingOptions):
ImageSegmenterResult;
/** @export */
segment(
image: ImageSource,
imageProcessingOptionsOrCallback?: ImageProcessingOptions|
@ -333,6 +338,7 @@ export class ImageSegmenter extends VisionTaskRunner {
segmentForVideo(
videoFrame: ImageSource, timestamp: number,
imageProcessingOptions: ImageProcessingOptions): ImageSegmenterResult;
/** @export */
segmentForVideo(
videoFrame: ImageSource, timestamp: number,
imageProcessingOptionsOrCallback?: ImageProcessingOptions|
@ -360,6 +366,7 @@ export class ImageSegmenter extends VisionTaskRunner {
* If there is no labelmap provided in the model file, empty label array is
* returned.
*
* @export
* @return The labels used by the current model.
*/
getLabels(): string[] {

View File

@ -97,6 +97,7 @@ export class InteractiveSegmenter extends VisionTaskRunner {
/**
* Initializes the Wasm runtime and creates a new interactive segmenter from
* the provided options.
* @export
* @param wasmFileset A configuration object that provides the location of
* the Wasm binary and its loader.
* @param interactiveSegmenterOptions The options for the Interactive
@ -115,6 +116,7 @@ export class InteractiveSegmenter extends VisionTaskRunner {
/**
* Initializes the Wasm runtime and creates a new interactive segmenter based
* on the provided model asset buffer.
* @export
* @param wasmFileset A configuration object that provides the location of
* the Wasm binary and its loader.
* @param modelAssetBuffer A binary representation of the model.
@ -130,6 +132,7 @@ export class InteractiveSegmenter extends VisionTaskRunner {
/**
* Initializes the Wasm runtime and creates a new interactive segmenter based
* on the path to the model asset.
* @export
* @param wasmFileset A configuration object that provides the location of
* the Wasm binary and its loader.
* @param modelAssetPath The path to the model asset.
@ -171,6 +174,7 @@ export class InteractiveSegmenter extends VisionTaskRunner {
* options. You can reset an option back to its default value by
* explicitly setting it to `undefined`.
*
* @export
* @param options The options for the interactive segmenter.
* @return A Promise that resolves when the settings have been applied.
*/
@ -263,6 +267,7 @@ export class InteractiveSegmenter extends VisionTaskRunner {
image: ImageSource, roi: RegionOfInterest,
imageProcessingOptions: ImageProcessingOptions):
InteractiveSegmenterResult;
/** @export */
segment(
image: ImageSource, roi: RegionOfInterest,
imageProcessingOptionsOrCallback?: ImageProcessingOptions|

View File

@ -41,7 +41,9 @@ export {ImageSource}; // Used in the public API
// The OSS JS API does not support the builder pattern.
// tslint:disable:jspb-use-builder-pattern
/** Performs object detection on images. */
/**
* Performs object detection on images.
*/
export class ObjectDetector extends VisionTaskRunner {
private result: ObjectDetectorResult = {detections: []};
private readonly options = new ObjectDetectorOptionsProto();
@ -49,6 +51,7 @@ export class ObjectDetector extends VisionTaskRunner {
/**
* Initializes the Wasm runtime and creates a new object detector from the
* provided options.
* @export
* @param wasmFileset A configuration object that provides the location of the
* Wasm binary and its loader.
* @param objectDetectorOptions The options for the Object Detector. Note that
@ -65,6 +68,7 @@ export class ObjectDetector extends VisionTaskRunner {
/**
* Initializes the Wasm runtime and creates a new object detector based on the
* provided model asset buffer.
* @export
* @param wasmFileset A configuration object that provides the location of the
* Wasm binary and its loader.
* @param modelAssetBuffer A binary representation of the model.
@ -79,6 +83,7 @@ export class ObjectDetector extends VisionTaskRunner {
/**
* Initializes the Wasm runtime and creates a new object detector based on the
* path to the model asset.
* @export
* @param wasmFileset A configuration object that provides the location of the
* Wasm binary and its loader.
* @param modelAssetPath The path to the model asset.
@ -115,6 +120,7 @@ export class ObjectDetector extends VisionTaskRunner {
* You can reset an option back to its default value by explicitly setting it
* to `undefined`.
*
* @export
* @param options The options for the object detector.
*/
override setOptions(options: ObjectDetectorOptions): Promise<void> {
@ -159,6 +165,7 @@ export class ObjectDetector extends VisionTaskRunner {
* synchronously for the response. Only use this method when the
* ObjectDetector is created with running mode `image`.
*
* @export
* @param image An image to process.
* @param imageProcessingOptions the `ImageProcessingOptions` specifying how
* to process the input image before running inference.
@ -176,6 +183,7 @@ export class ObjectDetector extends VisionTaskRunner {
* synchronously for the response. Only use this method when the
* ObjectDetector is created with running mode `video`.
*
* @export
* @param videoFrame A video frame to process.
* @param timestamp The timestamp of the current frame, in ms.
* @param imageProcessingOptions the `ImageProcessingOptions` specifying how

View File

@ -76,6 +76,7 @@ export class PoseLandmarker extends VisionTaskRunner {
/**
* An array containing the pairs of pose landmark indices to be rendered with
* connections.
* @export
*/
static POSE_CONNECTIONS: Connection[] = [
{start: 0, end: 1}, {start: 1, end: 2}, {start: 2, end: 3},
@ -95,6 +96,7 @@ export class PoseLandmarker extends VisionTaskRunner {
/**
* Initializes the Wasm runtime and creates a new `PoseLandmarker` from the
* provided options.
* @export
* @param wasmFileset A configuration object that provides the location of the
* Wasm binary and its loader.
* @param poseLandmarkerOptions The options for the PoseLandmarker.
@ -111,6 +113,7 @@ export class PoseLandmarker extends VisionTaskRunner {
/**
* Initializes the Wasm runtime and creates a new `PoseLandmarker` based on
* the provided model asset buffer.
* @export
* @param wasmFileset A configuration object that provides the location of the
* Wasm binary and its loader.
* @param modelAssetBuffer A binary representation of the model.
@ -125,6 +128,7 @@ export class PoseLandmarker extends VisionTaskRunner {
/**
* Initializes the Wasm runtime and creates a new `PoseLandmarker` based on
* the path to the model asset.
* @export
* @param wasmFileset A configuration object that provides the location of the
* Wasm binary and its loader.
* @param modelAssetPath The path to the model asset.
@ -171,6 +175,7 @@ export class PoseLandmarker extends VisionTaskRunner {
* You can reset an option back to its default value by explicitly setting it
* to `undefined`.
*
* @export
* @param options The options for the pose landmarker.
*/
override setOptions(options: PoseLandmarkerOptions): Promise<void> {
@ -257,6 +262,7 @@ export class PoseLandmarker extends VisionTaskRunner {
*/
detect(image: ImageSource, imageProcessingOptions: ImageProcessingOptions):
PoseLandmarkerResult;
/** @export */
detect(
image: ImageSource,
imageProcessingOptionsOrCallback?: ImageProcessingOptions|
@ -338,6 +344,7 @@ export class PoseLandmarker extends VisionTaskRunner {
detectForVideo(
videoFrame: ImageSource, timestamp: number,
imageProcessingOptions: ImageProcessingOptions): PoseLandmarkerResult;
/** @export */
detectForVideo(
videoFrame: ImageSource, timestamp: number,
imageProcessingOptionsOrCallback?: ImageProcessingOptions|