Create README.md files to NPM packages
PiperOrigin-RevId: 500750516
This commit is contained in:
parent
c6cf598774
commit
73f4636292
|
@ -65,6 +65,7 @@ pkg_npm(
|
||||||
"wasm/audio_wasm_nosimd_internal.js",
|
"wasm/audio_wasm_nosimd_internal.js",
|
||||||
"wasm/audio_wasm_nosimd_internal.wasm",
|
"wasm/audio_wasm_nosimd_internal.wasm",
|
||||||
":audio_bundle",
|
":audio_bundle",
|
||||||
|
"//mediapipe/tasks/web/audio:README.md",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -108,6 +109,7 @@ pkg_npm(
|
||||||
"wasm/text_wasm_nosimd_internal.js",
|
"wasm/text_wasm_nosimd_internal.js",
|
||||||
"wasm/text_wasm_nosimd_internal.wasm",
|
"wasm/text_wasm_nosimd_internal.wasm",
|
||||||
":text_bundle",
|
":text_bundle",
|
||||||
|
"//mediapipe/tasks/web/text:README.md",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -151,5 +153,6 @@ pkg_npm(
|
||||||
"wasm/vision_wasm_nosimd_internal.js",
|
"wasm/vision_wasm_nosimd_internal.js",
|
||||||
"wasm/vision_wasm_nosimd_internal.wasm",
|
"wasm/vision_wasm_nosimd_internal.wasm",
|
||||||
":vision_bundle",
|
":vision_bundle",
|
||||||
|
"//mediapipe/tasks/web/vision:README.md",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
|
@ -4,6 +4,8 @@ load("//mediapipe/framework/port:build_config.bzl", "mediapipe_ts_library")
|
||||||
|
|
||||||
package(default_visibility = ["//mediapipe/tasks:internal"])
|
package(default_visibility = ["//mediapipe/tasks:internal"])
|
||||||
|
|
||||||
|
exports_files(["README.md"])
|
||||||
|
|
||||||
mediapipe_ts_library(
|
mediapipe_ts_library(
|
||||||
name = "audio_lib",
|
name = "audio_lib",
|
||||||
srcs = ["index.ts"],
|
srcs = ["index.ts"],
|
||||||
|
|
31
mediapipe/tasks/web/audio/README.md
Normal file
31
mediapipe/tasks/web/audio/README.md
Normal file
|
@ -0,0 +1,31 @@
|
||||||
|
# MediaPipe Tasks Vision Package
|
||||||
|
|
||||||
|
This package contains the audio tasks for MediaPipe.
|
||||||
|
|
||||||
|
## Audio Classification
|
||||||
|
|
||||||
|
The MediaPipe Audio Classification task performs classification on audio data.
|
||||||
|
|
||||||
|
```
|
||||||
|
const audio = await FilesetResolver.forAudioTasks(
|
||||||
|
"https://cdn.jsdelivr.net/npm/@mediapipe/tasks-audio@latest/wasm"
|
||||||
|
);
|
||||||
|
const audioClassifier = await AudioClassifier.createFromModelPath(audio,
|
||||||
|
"https://storage.googleapis.com/mediapipe-tasks/audio_classifier/yamnet_audio_classifier_with_metadata.tflite"
|
||||||
|
);
|
||||||
|
const classifications = audioClassifier.classifiy(audioData);
|
||||||
|
```
|
||||||
|
|
||||||
|
## Audio Embedding
|
||||||
|
|
||||||
|
The MediaPipe Audio Embedding task extracts embeddings from audio data.
|
||||||
|
|
||||||
|
```
|
||||||
|
const audio = await FilesetResolver.forAudioTasks(
|
||||||
|
"https://cdn.jsdelivr.net/npm/@mediapipe/tasks-audio@latest/wasm"
|
||||||
|
);
|
||||||
|
const audioEmbedder = await AudioEmbedder.createFromModelPath(audio,
|
||||||
|
"model.tflite"
|
||||||
|
);
|
||||||
|
const embeddings = audioEmbedder.embed(audioData);
|
||||||
|
```
|
|
@ -4,6 +4,8 @@ load("//mediapipe/framework/port:build_config.bzl", "mediapipe_ts_library")
|
||||||
|
|
||||||
package(default_visibility = ["//mediapipe/tasks:internal"])
|
package(default_visibility = ["//mediapipe/tasks:internal"])
|
||||||
|
|
||||||
|
exports_files(["README.md"])
|
||||||
|
|
||||||
mediapipe_ts_library(
|
mediapipe_ts_library(
|
||||||
name = "text_lib",
|
name = "text_lib",
|
||||||
srcs = ["index.ts"],
|
srcs = ["index.ts"],
|
||||||
|
|
34
mediapipe/tasks/web/text/README.md
Normal file
34
mediapipe/tasks/web/text/README.md
Normal file
|
@ -0,0 +1,34 @@
|
||||||
|
# MediaPipe Tasks Text Package
|
||||||
|
|
||||||
|
This package contains the text tasks for MediaPipe.
|
||||||
|
|
||||||
|
## Text Classification
|
||||||
|
|
||||||
|
MediaPipe Text Classifier task lets you classify text into a set of defined
|
||||||
|
categories, such as positive or negative sentiment.
|
||||||
|
|
||||||
|
```
|
||||||
|
const text = await FilesetResolver.forTextTasks(
|
||||||
|
"https://cdn.jsdelivr.net/npm/@mediapipe/tasks-text@latest/wasm"
|
||||||
|
);
|
||||||
|
const textClassifier = await TextClassifier.createFromModelPath(text,
|
||||||
|
"https://storage.googleapis.com/mediapipe-tasks/text_classifier/bert_text_classifier.tflite"
|
||||||
|
);
|
||||||
|
const classifications = textClassifier.classifiy(textData);
|
||||||
|
```
|
||||||
|
|
||||||
|
For more information, refer to the [Text Classification](https://developers.google.com/mediapipe/solutions/text/text_classifier/web_js) documentation.
|
||||||
|
|
||||||
|
## Text Embedding
|
||||||
|
|
||||||
|
The MediaPipe Text Embedding task extracts embeddings from text data.
|
||||||
|
|
||||||
|
```
|
||||||
|
const text = await FilesetResolver.forTextTasks(
|
||||||
|
"https://cdn.jsdelivr.net/npm/@mediapipe/tasks-text@latest/wasm"
|
||||||
|
);
|
||||||
|
const textEmbedder = await TextEmbedder.createFromModelPath(text,
|
||||||
|
"https://storage.googleapis.com/mediapipe-tasks/text_embedder/mobilebert_embedding_with_metadata.tflite"
|
||||||
|
);
|
||||||
|
const embeddings = textEmbedder.embed(textData);
|
||||||
|
```
|
|
@ -4,6 +4,8 @@ load("//mediapipe/framework/port:build_config.bzl", "mediapipe_ts_library")
|
||||||
|
|
||||||
package(default_visibility = ["//mediapipe/tasks:internal"])
|
package(default_visibility = ["//mediapipe/tasks:internal"])
|
||||||
|
|
||||||
|
exports_files(["README.md"])
|
||||||
|
|
||||||
mediapipe_ts_library(
|
mediapipe_ts_library(
|
||||||
name = "vision_lib",
|
name = "vision_lib",
|
||||||
srcs = ["index.ts"],
|
srcs = ["index.ts"],
|
||||||
|
|
78
mediapipe/tasks/web/vision/README.md
Normal file
78
mediapipe/tasks/web/vision/README.md
Normal file
|
@ -0,0 +1,78 @@
|
||||||
|
# MediaPipe Tasks Vision Package
|
||||||
|
|
||||||
|
This package contains the vision tasks for MediaPipe.
|
||||||
|
|
||||||
|
## Object Detection
|
||||||
|
|
||||||
|
The MediaPipe Object Detector task lets you detect the presence and location of
|
||||||
|
multiple classes of objects within images or videos.
|
||||||
|
|
||||||
|
```
|
||||||
|
const vision = await FilesetResolver.forVisionTasks(
|
||||||
|
"https://cdn.jsdelivr.net/npm/@mediapipe/tasks-vision@latest/wasm"
|
||||||
|
);
|
||||||
|
const objectDetector = await ObjectDetector.createFromModelPath(vision,
|
||||||
|
"https://storage.googleapis.com/mediapipe-tasks/object_detector/efficientdet_lite0_uint8.tflite"
|
||||||
|
);
|
||||||
|
const image = document.getElementById("image") as HTMLImageElement;
|
||||||
|
const detections = objectDetector.detect(image);
|
||||||
|
```
|
||||||
|
|
||||||
|
For more information, refer to the [Object Detector](https://developers.google.com/mediapipe/solutions/vision/object_detector/web_js) documentation.
|
||||||
|
|
||||||
|
## Image Classification
|
||||||
|
|
||||||
|
The MediaPipe Image Classifier task lets you perform classification on images.
|
||||||
|
You can use this task to identify what an image represents among a set of
|
||||||
|
categories defined at training time.
|
||||||
|
|
||||||
|
```
|
||||||
|
const vision = await FilesetResolver.forVisionTasks(
|
||||||
|
"https://cdn.jsdelivr.net/npm/@mediapipe/tasks-vision@latest/wasm"
|
||||||
|
);
|
||||||
|
const imageClassifier = await ImageClassifier.createFromModelPath(vision,
|
||||||
|
"https://storage.googleapis.com/mediapipe-tasks/image_classifier/efficientnet_lite0_uint8.tflite"
|
||||||
|
);
|
||||||
|
const image = document.getElementById("image") as HTMLImageElement;
|
||||||
|
const classifications = imageClassifier.classify(image);
|
||||||
|
```
|
||||||
|
|
||||||
|
For more information, refer to the [Image Classification](https://developers.google.com/mediapipe/solutions/vision/image_classifier/web_js) documentation.
|
||||||
|
|
||||||
|
## Gesture Recognition
|
||||||
|
|
||||||
|
The MediaPipe Gesture Recognizer task lets you recognize hand gestures in real
|
||||||
|
time, and provides the recognized hand gesture results along with the landmarks
|
||||||
|
of the detected hands. You can use this task to recognize specific hand gestures
|
||||||
|
from a user, and invoke application features that correspond to those gestures.
|
||||||
|
|
||||||
|
```
|
||||||
|
const vision = await FilesetResolver.forVisionTasks(
|
||||||
|
"https://cdn.jsdelivr.net/npm/@mediapipe/tasks-vision@latest/wasm"
|
||||||
|
);
|
||||||
|
const gestureRecognizer = await GestureRecognizer.createFromModelPath(vision,
|
||||||
|
"https://storage.googleapis.com/mediapipe-tasks/gesture_recognizer/gesture_recognizer.task"
|
||||||
|
);
|
||||||
|
const image = document.getElementById("image") as HTMLImageElement;
|
||||||
|
const recognitions = gestureRecognizer.recognize(image);
|
||||||
|
```
|
||||||
|
|
||||||
|
## Handlandmark Detection
|
||||||
|
|
||||||
|
The MediaPipe Hand Landmarker task lets you detect the landmarks of the hands in
|
||||||
|
an image. You can use this Task to localize key points of the hands and render
|
||||||
|
visual effects over the hands.
|
||||||
|
|
||||||
|
```
|
||||||
|
const vision = await FilesetResolver.forVisionTasks(
|
||||||
|
"https://cdn.jsdelivr.net/npm/@mediapipe/tasks-vision@latest/wasm"
|
||||||
|
);
|
||||||
|
const handLandmarker = await HandLandmarker.createFromModelPath(vision,
|
||||||
|
"https://storage.googleapis.com/mediapipe-tasks/hand_landmarker/hand_landmarker.task"
|
||||||
|
);
|
||||||
|
const image = document.getElementById("image") as HTMLImageElement;
|
||||||
|
const landmarks = handLandmarker.detect(image);
|
||||||
|
```
|
||||||
|
|
||||||
|
For more information, refer to the [Handlandmark Detection](https://developers.google.com/mediapipe/solutions/vision/hand_landmarker/web_js) documentation.
|
||||||
|
|
Loading…
Reference in New Issue
Block a user