Merge branch 'master' into move-headers-script
This commit is contained in:
commit
40503acdec
19
WORKSPACE
19
WORKSPACE
|
@ -176,6 +176,25 @@ http_archive(
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# 2023-06-05
|
||||||
|
# This version of Glog is required for Windows support, but currently causes
|
||||||
|
# crashes on some Android devices.
|
||||||
|
http_archive(
|
||||||
|
name = "com_github_glog_glog_windows",
|
||||||
|
strip_prefix = "glog-3a0d4d22c5ae0b9a2216988411cfa6bf860cc372",
|
||||||
|
sha256 = "170d08f80210b82d95563f4723a15095eff1aad1863000e8eeb569c96a98fefb",
|
||||||
|
urls = [
|
||||||
|
"https://github.com/google/glog/archive/3a0d4d22c5ae0b9a2216988411cfa6bf860cc372.zip",
|
||||||
|
],
|
||||||
|
patches = [
|
||||||
|
"@//third_party:com_github_glog_glog.diff",
|
||||||
|
"@//third_party:com_github_glog_glog_windows_patch.diff",
|
||||||
|
],
|
||||||
|
patch_args = [
|
||||||
|
"-p1",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
# easyexif
|
# easyexif
|
||||||
http_archive(
|
http_archive(
|
||||||
name = "easyexif",
|
name = "easyexif",
|
||||||
|
|
|
@ -304,6 +304,7 @@ class GlProcessor : public ImageToTensorConverter {
|
||||||
glBindTexture(GL_TEXTURE_2D, 0);
|
glBindTexture(GL_TEXTURE_2D, 0);
|
||||||
glActiveTexture(GL_TEXTURE0);
|
glActiveTexture(GL_TEXTURE0);
|
||||||
glBindTexture(GL_TEXTURE_2D, 0);
|
glBindTexture(GL_TEXTURE_2D, 0);
|
||||||
|
glFlush();
|
||||||
|
|
||||||
return absl::OkStatus();
|
return absl::OkStatus();
|
||||||
}
|
}
|
||||||
|
|
|
@ -406,6 +406,7 @@ absl::Status TensorConverterCalculator::ProcessGPU(CalculatorContext* cc) {
|
||||||
glActiveTexture(GL_TEXTURE1);
|
glActiveTexture(GL_TEXTURE1);
|
||||||
glBindTexture(GL_TEXTURE_2D, 0);
|
glBindTexture(GL_TEXTURE_2D, 0);
|
||||||
#endif // MEDIAPIPE_OPENGL_ES_VERSION >= MEDIAPIPE_OPENGL_ES_31
|
#endif // MEDIAPIPE_OPENGL_ES_VERSION >= MEDIAPIPE_OPENGL_ES_31
|
||||||
|
glFlush();
|
||||||
src.Release();
|
src.Release();
|
||||||
return absl::OkStatus();
|
return absl::OkStatus();
|
||||||
}));
|
}));
|
||||||
|
|
|
@ -93,6 +93,23 @@ py_test(
|
||||||
deps = [":dataset"],
|
deps = [":dataset"],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
py_library(
|
||||||
|
name = "model_with_tokenizer",
|
||||||
|
srcs = ["model_with_tokenizer.py"],
|
||||||
|
)
|
||||||
|
|
||||||
|
py_test(
|
||||||
|
name = "model_with_tokenizer_test",
|
||||||
|
srcs = ["model_with_tokenizer_test.py"],
|
||||||
|
tags = ["requires-net:external"],
|
||||||
|
deps = [
|
||||||
|
":bert_tokenizer",
|
||||||
|
":model_spec",
|
||||||
|
":model_with_tokenizer",
|
||||||
|
"//mediapipe/model_maker/python/core/utils:hub_loader",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
||||||
py_library(
|
py_library(
|
||||||
name = "bert_tokenizer",
|
name = "bert_tokenizer",
|
||||||
srcs = ["bert_tokenizer.py"],
|
srcs = ["bert_tokenizer.py"],
|
||||||
|
@ -145,10 +162,12 @@ py_library(
|
||||||
name = "text_classifier",
|
name = "text_classifier",
|
||||||
srcs = ["text_classifier.py"],
|
srcs = ["text_classifier.py"],
|
||||||
deps = [
|
deps = [
|
||||||
|
":bert_tokenizer",
|
||||||
":dataset",
|
":dataset",
|
||||||
":hyperparameters",
|
":hyperparameters",
|
||||||
":model_options",
|
":model_options",
|
||||||
":model_spec",
|
":model_spec",
|
||||||
|
":model_with_tokenizer",
|
||||||
":preprocessor",
|
":preprocessor",
|
||||||
":text_classifier_options",
|
":text_classifier_options",
|
||||||
"//mediapipe/model_maker/python/core/data:dataset",
|
"//mediapipe/model_maker/python/core/data:dataset",
|
||||||
|
@ -165,7 +184,7 @@ py_library(
|
||||||
|
|
||||||
py_test(
|
py_test(
|
||||||
name = "text_classifier_test",
|
name = "text_classifier_test",
|
||||||
size = "large",
|
size = "enormous",
|
||||||
srcs = ["text_classifier_test.py"],
|
srcs = ["text_classifier_test.py"],
|
||||||
data = [
|
data = [
|
||||||
"//mediapipe/model_maker/python/text/text_classifier/testdata",
|
"//mediapipe/model_maker/python/text/text_classifier/testdata",
|
||||||
|
|
|
@ -56,6 +56,15 @@ class BertFullTokenizer(BertTokenizer):
|
||||||
self._seq_len = seq_len
|
self._seq_len = seq_len
|
||||||
|
|
||||||
def process(self, input_tensor: tf.Tensor) -> Mapping[str, Sequence[int]]:
|
def process(self, input_tensor: tf.Tensor) -> Mapping[str, Sequence[int]]:
|
||||||
|
"""Processes one input_tensor example.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
input_tensor: A tensor with shape (1, None) of a utf-8 encoded string.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A dictionary of lists all with shape (1, self._seq_len) containing the
|
||||||
|
keys "input_word_ids", "input_type_ids", and "input_mask".
|
||||||
|
"""
|
||||||
tokens = self._tokenizer.tokenize(input_tensor.numpy()[0].decode("utf-8"))
|
tokens = self._tokenizer.tokenize(input_tensor.numpy()[0].decode("utf-8"))
|
||||||
tokens = tokens[0 : (self._seq_len - 2)] # account for [CLS] and [SEP]
|
tokens = tokens[0 : (self._seq_len - 2)] # account for [CLS] and [SEP]
|
||||||
tokens.insert(0, "[CLS]")
|
tokens.insert(0, "[CLS]")
|
||||||
|
@ -96,7 +105,18 @@ class BertFastTokenizer(BertTokenizer):
|
||||||
self._sep_id = vocab.index("[SEP]")
|
self._sep_id = vocab.index("[SEP]")
|
||||||
self._pad_id = vocab.index("[PAD]")
|
self._pad_id = vocab.index("[PAD]")
|
||||||
|
|
||||||
def process(self, input_tensor: tf.Tensor) -> Mapping[str, Sequence[int]]:
|
def process_fn(self, input_tensor: tf.Tensor) -> Mapping[str, tf.Tensor]:
|
||||||
|
"""Tensor implementation of the process function.
|
||||||
|
|
||||||
|
This implementation can be used within a model graph directly since it
|
||||||
|
takes in tensors and outputs tensors.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
input_tensor: Input string tensor
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
Dictionary of tf.Tensors.
|
||||||
|
"""
|
||||||
input_ids = self._tokenizer.tokenize(input_tensor).flat_values
|
input_ids = self._tokenizer.tokenize(input_tensor).flat_values
|
||||||
input_ids = input_ids[: (self._seq_len - 2)]
|
input_ids = input_ids[: (self._seq_len - 2)]
|
||||||
input_ids = tf.concat(
|
input_ids = tf.concat(
|
||||||
|
@ -112,7 +132,20 @@ class BertFastTokenizer(BertTokenizer):
|
||||||
input_type_ids = tf.zeros(self._seq_len, dtype=tf.int32)
|
input_type_ids = tf.zeros(self._seq_len, dtype=tf.int32)
|
||||||
input_mask = tf.cast(input_ids != self._pad_id, dtype=tf.int32)
|
input_mask = tf.cast(input_ids != self._pad_id, dtype=tf.int32)
|
||||||
return {
|
return {
|
||||||
"input_word_ids": input_ids.numpy().tolist(),
|
"input_word_ids": input_ids,
|
||||||
"input_type_ids": input_type_ids.numpy().tolist(),
|
"input_type_ids": input_type_ids,
|
||||||
"input_mask": input_mask.numpy().tolist(),
|
"input_mask": input_mask,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
def process(self, input_tensor: tf.Tensor) -> Mapping[str, Sequence[int]]:
|
||||||
|
"""Processes one input_tensor example.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
input_tensor: A tensor with shape (1, None) of a utf-8 encoded string.
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
A dictionary of lists all with shape (1, self._seq_len) containing the
|
||||||
|
keys "input_word_ids", "input_type_ids", and "input_mask".
|
||||||
|
"""
|
||||||
|
result = self.process_fn(input_tensor)
|
||||||
|
return {k: v.numpy().tolist() for k, v in result.items()}
|
||||||
|
|
|
@ -0,0 +1,35 @@
|
||||||
|
# Copyright 2023 The MediaPipe Authors.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
"""Text classifier export module library."""
|
||||||
|
import tensorflow as tf
|
||||||
|
|
||||||
|
|
||||||
|
class ModelWithTokenizer(tf.keras.Model):
|
||||||
|
"""A model with the tokenizer included in graph for exporting to TFLite."""
|
||||||
|
|
||||||
|
def __init__(self, tokenizer, model):
|
||||||
|
super().__init__()
|
||||||
|
self._tokenizer = tokenizer
|
||||||
|
self._model = model
|
||||||
|
|
||||||
|
@tf.function(
|
||||||
|
input_signature=[
|
||||||
|
tf.TensorSpec(shape=[None], dtype=tf.string, name="input")
|
||||||
|
]
|
||||||
|
)
|
||||||
|
def call(self, input_tensor):
|
||||||
|
x = self._tokenizer.process_fn(input_tensor)
|
||||||
|
x = {k: tf.expand_dims(v, axis=0) for k, v in x.items()}
|
||||||
|
x = self._model(x)
|
||||||
|
return x
|
|
@ -0,0 +1,105 @@
|
||||||
|
# Copyright 2022 The MediaPipe Authors.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
# you may not use this file except in compliance with the License.
|
||||||
|
# You may obtain a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
# See the License for the specific language governing permissions and
|
||||||
|
# limitations under the License.
|
||||||
|
|
||||||
|
import os
|
||||||
|
import tempfile
|
||||||
|
from unittest import mock as unittest_mock
|
||||||
|
|
||||||
|
import tensorflow as tf
|
||||||
|
import tensorflow_hub
|
||||||
|
|
||||||
|
from mediapipe.model_maker.python.core.utils import hub_loader
|
||||||
|
from mediapipe.model_maker.python.text.text_classifier import bert_tokenizer
|
||||||
|
from mediapipe.model_maker.python.text.text_classifier import model_spec
|
||||||
|
from mediapipe.model_maker.python.text.text_classifier import model_with_tokenizer
|
||||||
|
|
||||||
|
|
||||||
|
class BertTokenizerTest(tf.test.TestCase):
|
||||||
|
_SEQ_LEN = 128
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
super().setUp()
|
||||||
|
# Mock tempfile.gettempdir() to be unique for each test to avoid race
|
||||||
|
# condition when downloading model since these tests may run in parallel.
|
||||||
|
mock_gettempdir = unittest_mock.patch.object(
|
||||||
|
tempfile,
|
||||||
|
"gettempdir",
|
||||||
|
return_value=self.create_tempdir(),
|
||||||
|
autospec=True,
|
||||||
|
)
|
||||||
|
self.mock_gettempdir = mock_gettempdir.start()
|
||||||
|
self.addCleanup(mock_gettempdir.stop)
|
||||||
|
self._ms = model_spec.SupportedModels.MOBILEBERT_CLASSIFIER.value()
|
||||||
|
self._tokenizer = self._create_tokenizer()
|
||||||
|
self._model = self._create_model()
|
||||||
|
|
||||||
|
def _create_tokenizer(self):
|
||||||
|
vocab_file = os.path.join(
|
||||||
|
tensorflow_hub.resolve(self._ms.get_path()), "assets", "vocab.txt"
|
||||||
|
)
|
||||||
|
return bert_tokenizer.BertFastTokenizer(vocab_file, True, self._SEQ_LEN)
|
||||||
|
|
||||||
|
def _create_model(self):
|
||||||
|
encoder_inputs = dict(
|
||||||
|
input_word_ids=tf.keras.layers.Input(
|
||||||
|
shape=(self._SEQ_LEN,),
|
||||||
|
dtype=tf.int32,
|
||||||
|
name="input_word_ids",
|
||||||
|
),
|
||||||
|
input_mask=tf.keras.layers.Input(
|
||||||
|
shape=(self._SEQ_LEN,),
|
||||||
|
dtype=tf.int32,
|
||||||
|
name="input_mask",
|
||||||
|
),
|
||||||
|
input_type_ids=tf.keras.layers.Input(
|
||||||
|
shape=(self._SEQ_LEN,),
|
||||||
|
dtype=tf.int32,
|
||||||
|
name="input_type_ids",
|
||||||
|
),
|
||||||
|
)
|
||||||
|
renamed_inputs = dict(
|
||||||
|
input_ids=encoder_inputs["input_word_ids"],
|
||||||
|
input_mask=encoder_inputs["input_mask"],
|
||||||
|
segment_ids=encoder_inputs["input_type_ids"],
|
||||||
|
)
|
||||||
|
encoder = hub_loader.HubKerasLayerV1V2(
|
||||||
|
self._ms.get_path(),
|
||||||
|
signature="tokens",
|
||||||
|
output_key="pooled_output",
|
||||||
|
trainable=True,
|
||||||
|
)
|
||||||
|
pooled_output = encoder(renamed_inputs)
|
||||||
|
|
||||||
|
output = tf.keras.layers.Dropout(rate=0.1)(pooled_output)
|
||||||
|
initializer = tf.keras.initializers.TruncatedNormal(stddev=0.02)
|
||||||
|
output = tf.keras.layers.Dense(
|
||||||
|
2,
|
||||||
|
kernel_initializer=initializer,
|
||||||
|
name="output",
|
||||||
|
activation="softmax",
|
||||||
|
dtype=tf.float32,
|
||||||
|
)(output)
|
||||||
|
return tf.keras.Model(inputs=encoder_inputs, outputs=output)
|
||||||
|
|
||||||
|
def test_model_with_tokenizer(self):
|
||||||
|
model = model_with_tokenizer.ModelWithTokenizer(
|
||||||
|
self._tokenizer, self._model
|
||||||
|
)
|
||||||
|
output = model(tf.constant(["Example input".encode("utf-8")]))
|
||||||
|
self.assertAllEqual(output.shape, (1, 2))
|
||||||
|
self.assertEqual(tf.reduce_sum(output), 1)
|
||||||
|
|
||||||
|
|
||||||
|
if __name__ == "__main__":
|
||||||
|
tf.test.main()
|
|
@ -368,6 +368,10 @@ class BertClassifierPreprocessor:
|
||||||
tfrecord_cache_files=tfrecord_cache_files,
|
tfrecord_cache_files=tfrecord_cache_files,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def tokenizer(self) -> bert_tokenizer.BertTokenizer:
|
||||||
|
return self._tokenizer
|
||||||
|
|
||||||
|
|
||||||
TextClassifierPreprocessor = Union[
|
TextClassifierPreprocessor = Union[
|
||||||
BertClassifierPreprocessor, AverageWordEmbeddingClassifierPreprocessor
|
BertClassifierPreprocessor, AverageWordEmbeddingClassifierPreprocessor
|
||||||
|
|
|
@ -29,10 +29,12 @@ from mediapipe.model_maker.python.core.utils import loss_functions
|
||||||
from mediapipe.model_maker.python.core.utils import metrics
|
from mediapipe.model_maker.python.core.utils import metrics
|
||||||
from mediapipe.model_maker.python.core.utils import model_util
|
from mediapipe.model_maker.python.core.utils import model_util
|
||||||
from mediapipe.model_maker.python.core.utils import quantization
|
from mediapipe.model_maker.python.core.utils import quantization
|
||||||
|
from mediapipe.model_maker.python.text.text_classifier import bert_tokenizer
|
||||||
from mediapipe.model_maker.python.text.text_classifier import dataset as text_ds
|
from mediapipe.model_maker.python.text.text_classifier import dataset as text_ds
|
||||||
from mediapipe.model_maker.python.text.text_classifier import hyperparameters as hp
|
from mediapipe.model_maker.python.text.text_classifier import hyperparameters as hp
|
||||||
from mediapipe.model_maker.python.text.text_classifier import model_options as mo
|
from mediapipe.model_maker.python.text.text_classifier import model_options as mo
|
||||||
from mediapipe.model_maker.python.text.text_classifier import model_spec as ms
|
from mediapipe.model_maker.python.text.text_classifier import model_spec as ms
|
||||||
|
from mediapipe.model_maker.python.text.text_classifier import model_with_tokenizer
|
||||||
from mediapipe.model_maker.python.text.text_classifier import preprocessor
|
from mediapipe.model_maker.python.text.text_classifier import preprocessor
|
||||||
from mediapipe.model_maker.python.text.text_classifier import text_classifier_options
|
from mediapipe.model_maker.python.text.text_classifier import text_classifier_options
|
||||||
from mediapipe.tasks.python.metadata.metadata_writers import metadata_writer
|
from mediapipe.tasks.python.metadata.metadata_writers import metadata_writer
|
||||||
|
@ -620,3 +622,56 @@ class _BertClassifier(TextClassifier):
|
||||||
ids_name=self._model_spec.tflite_input_name["ids"],
|
ids_name=self._model_spec.tflite_input_name["ids"],
|
||||||
mask_name=self._model_spec.tflite_input_name["mask"],
|
mask_name=self._model_spec.tflite_input_name["mask"],
|
||||||
segment_name=self._model_spec.tflite_input_name["segment_ids"])
|
segment_name=self._model_spec.tflite_input_name["segment_ids"])
|
||||||
|
|
||||||
|
def export_model_with_tokenizer(
|
||||||
|
self,
|
||||||
|
model_name: str = "model_with_tokenizer.tflite",
|
||||||
|
quantization_config: Optional[quantization.QuantizationConfig] = None,
|
||||||
|
):
|
||||||
|
"""Converts and saves the model to a TFLite file with the tokenizer.
|
||||||
|
|
||||||
|
Note that unlike the export_model method, this export method will include
|
||||||
|
a FastBertTokenizer in the TFLite graph. The resulting TFLite will not have
|
||||||
|
metadata information to use with MediaPipe Tasks, but can be run directly
|
||||||
|
using TFLite Inference: https://www.tensorflow.org/lite/guide/inference
|
||||||
|
|
||||||
|
For more information on the tokenizer, see:
|
||||||
|
https://www.tensorflow.org/text/api_docs/python/text/FastBertTokenizer
|
||||||
|
|
||||||
|
Args:
|
||||||
|
model_name: File name to save TFLite model with tokenizer. The full export
|
||||||
|
path is {self._hparams.export_dir}/{model_name}.
|
||||||
|
quantization_config: The configuration for model quantization.
|
||||||
|
"""
|
||||||
|
tf.io.gfile.makedirs(self._hparams.export_dir)
|
||||||
|
tflite_file = os.path.join(self._hparams.export_dir, model_name)
|
||||||
|
if (
|
||||||
|
self._hparams.tokenizer
|
||||||
|
!= bert_tokenizer.SupportedBertTokenizers.FAST_BERT_TOKENIZER
|
||||||
|
):
|
||||||
|
print(
|
||||||
|
f"WARNING: This model was trained with {self._hparams.tokenizer} "
|
||||||
|
"tokenizer, but the exported model with tokenizer will have a "
|
||||||
|
f"{bert_tokenizer.SupportedBertTokenizers.FAST_BERT_TOKENIZER} "
|
||||||
|
"tokenizer."
|
||||||
|
)
|
||||||
|
tokenizer = bert_tokenizer.BertFastTokenizer(
|
||||||
|
vocab_file=self._text_preprocessor.get_vocab_file(),
|
||||||
|
do_lower_case=self._model_spec.do_lower_case,
|
||||||
|
seq_len=self._model_options.seq_len,
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
tokenizer = self._text_preprocessor.tokenizer
|
||||||
|
|
||||||
|
model = model_with_tokenizer.ModelWithTokenizer(tokenizer, self._model)
|
||||||
|
model(tf.constant(["Example input data".encode("utf-8")])) # build model
|
||||||
|
saved_model_file = os.path.join(
|
||||||
|
self._hparams.export_dir, "saved_model_with_tokenizer"
|
||||||
|
)
|
||||||
|
model.save(saved_model_file)
|
||||||
|
tflite_model = model_util.convert_to_tflite_from_file(
|
||||||
|
saved_model_file,
|
||||||
|
quantization_config=quantization_config,
|
||||||
|
allow_custom_ops=True,
|
||||||
|
)
|
||||||
|
model_util.save_tflite(tflite_model, tflite_file)
|
||||||
|
|
|
@ -149,6 +149,12 @@ class TextClassifierTest(tf.test.TestCase, parameterized.TestCase):
|
||||||
output_metadata_file, self._BERT_CLASSIFIER_JSON_FILE, shallow=False
|
output_metadata_file, self._BERT_CLASSIFIER_JSON_FILE, shallow=False
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
|
bert_classifier.export_model_with_tokenizer()
|
||||||
|
output_tflite_with_tokenizer_file = os.path.join(
|
||||||
|
options.hparams.export_dir, 'model_with_tokenizer.tflite'
|
||||||
|
)
|
||||||
|
self.assertTrue(os.path.exists(output_tflite_with_tokenizer_file))
|
||||||
|
self.assertGreater(os.path.getsize(output_tflite_with_tokenizer_file), 0)
|
||||||
|
|
||||||
def test_label_mismatch(self):
|
def test_label_mismatch(self):
|
||||||
options = text_classifier.TextClassifierOptions(
|
options = text_classifier.TextClassifierOptions(
|
||||||
|
|
|
@ -59,12 +59,12 @@ using absl::StatusCode;
|
||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
void *allocedMemory = malloc(memSize);
|
void *allocatedMemory = malloc(memSize);
|
||||||
if (!allocedMemory) {
|
if (!allocatedMemory) {
|
||||||
exit(-1);
|
exit(-1);
|
||||||
}
|
}
|
||||||
|
|
||||||
return allocedMemory;
|
return allocatedMemory;
|
||||||
}
|
}
|
||||||
|
|
||||||
+ (BOOL)checkCppError:(const absl::Status &)status toError:(NSError *_Nullable *)error {
|
+ (BOOL)checkCppError:(const absl::Status &)status toError:(NSError *_Nullable *)error {
|
||||||
|
|
|
@ -82,7 +82,7 @@ static NSString *const kExpectedErrorDomain = @"com.google.mediapipe.tasks";
|
||||||
AssertEqualErrors(error, expectedError);
|
AssertEqualErrors(error, expectedError);
|
||||||
}
|
}
|
||||||
|
|
||||||
- (void)testInitWithImageSuceeds {
|
- (void)testInitWithImageSucceeds {
|
||||||
MPPImage *mppImage = [[MPPImage alloc] initWithUIImage:self.image error:nil];
|
MPPImage *mppImage = [[MPPImage alloc] initWithUIImage:self.image error:nil];
|
||||||
[self assertMPPImage:mppImage
|
[self assertMPPImage:mppImage
|
||||||
hasSourceType:MPPImageSourceTypeImage
|
hasSourceType:MPPImageSourceTypeImage
|
||||||
|
|
|
@ -109,7 +109,7 @@ static const float kKeypointErrorThreshold = 1e-2;
|
||||||
|
|
||||||
NSError *error;
|
NSError *error;
|
||||||
MPPImage *mppImage = [self imageWithFileInfo:kCatImage];
|
MPPImage *mppImage = [self imageWithFileInfo:kCatImage];
|
||||||
MPPFaceDetectorResult *faceDetectorResult = [faceDetector detectInImage:mppImage error:&error];
|
MPPFaceDetectorResult *faceDetectorResult = [faceDetector detectImage:mppImage error:&error];
|
||||||
XCTAssertNil(error);
|
XCTAssertNil(error);
|
||||||
XCTAssertNotNil(faceDetectorResult);
|
XCTAssertNotNil(faceDetectorResult);
|
||||||
XCTAssertEqual(faceDetectorResult.detections.count, 0);
|
XCTAssertEqual(faceDetectorResult.detections.count, 0);
|
||||||
|
@ -125,9 +125,9 @@ static const float kKeypointErrorThreshold = 1e-2;
|
||||||
|
|
||||||
MPPImage *image = [self imageWithFileInfo:kPortraitImage];
|
MPPImage *image = [self imageWithFileInfo:kPortraitImage];
|
||||||
for (int i = 0; i < 3; i++) {
|
for (int i = 0; i < 3; i++) {
|
||||||
MPPFaceDetectorResult *faceDetectorResult = [faceDetector detectInVideoFrame:image
|
MPPFaceDetectorResult *faceDetectorResult = [faceDetector detectVideoFrame:image
|
||||||
timestampInMilliseconds:i
|
timestampInMilliseconds:i
|
||||||
error:nil];
|
error:nil];
|
||||||
[self assertFaceDetectorResult:faceDetectorResult
|
[self assertFaceDetectorResult:faceDetectorResult
|
||||||
containsExpectedKeypoints:kPortraitExpectedKeypoints];
|
containsExpectedKeypoints:kPortraitExpectedKeypoints];
|
||||||
}
|
}
|
||||||
|
@ -141,9 +141,9 @@ static const float kKeypointErrorThreshold = 1e-2;
|
||||||
|
|
||||||
MPPImage *image = [self imageWithFileInfo:kPortraitRotatedImage];
|
MPPImage *image = [self imageWithFileInfo:kPortraitRotatedImage];
|
||||||
for (int i = 0; i < 3; i++) {
|
for (int i = 0; i < 3; i++) {
|
||||||
MPPFaceDetectorResult *faceDetectorResult = [faceDetector detectInVideoFrame:image
|
MPPFaceDetectorResult *faceDetectorResult = [faceDetector detectVideoFrame:image
|
||||||
timestampInMilliseconds:i
|
timestampInMilliseconds:i
|
||||||
error:nil];
|
error:nil];
|
||||||
[self assertFaceDetectorResult:faceDetectorResult
|
[self assertFaceDetectorResult:faceDetectorResult
|
||||||
containsExpectedKeypoints:kPortraitRotatedExpectedKeypoints];
|
containsExpectedKeypoints:kPortraitRotatedExpectedKeypoints];
|
||||||
}
|
}
|
||||||
|
@ -181,7 +181,7 @@ static const float kKeypointErrorThreshold = 1e-2;
|
||||||
};
|
};
|
||||||
|
|
||||||
for (int i = 0; i < iterationCount; i++) {
|
for (int i = 0; i < iterationCount; i++) {
|
||||||
XCTAssertTrue([faceDetector detectAsyncInImage:image timestampInMilliseconds:i error:nil]);
|
XCTAssertTrue([faceDetector detectAsyncImage:image timestampInMilliseconds:i error:nil]);
|
||||||
}
|
}
|
||||||
|
|
||||||
NSTimeInterval timeout = 0.5f;
|
NSTimeInterval timeout = 0.5f;
|
||||||
|
@ -205,10 +205,10 @@ static const float kKeypointErrorThreshold = 1e-2;
|
||||||
};
|
};
|
||||||
|
|
||||||
MPPImage *image = [self imageWithFileInfo:kPortraitImage];
|
MPPImage *image = [self imageWithFileInfo:kPortraitImage];
|
||||||
XCTAssertTrue([faceDetector detectAsyncInImage:image timestampInMilliseconds:1 error:nil]);
|
XCTAssertTrue([faceDetector detectAsyncImage:image timestampInMilliseconds:1 error:nil]);
|
||||||
|
|
||||||
NSError *error;
|
NSError *error;
|
||||||
XCTAssertFalse([faceDetector detectAsyncInImage:image timestampInMilliseconds:0 error:&error]);
|
XCTAssertFalse([faceDetector detectAsyncImage:image timestampInMilliseconds:0 error:&error]);
|
||||||
|
|
||||||
NSError *expectedError =
|
NSError *expectedError =
|
||||||
[NSError errorWithDomain:kExpectedErrorDomain
|
[NSError errorWithDomain:kExpectedErrorDomain
|
||||||
|
@ -274,9 +274,9 @@ static const float kKeypointErrorThreshold = 1e-2;
|
||||||
MPPImage *image = [self imageWithFileInfo:kPortraitImage];
|
MPPImage *image = [self imageWithFileInfo:kPortraitImage];
|
||||||
|
|
||||||
NSError *liveStreamApiCallError;
|
NSError *liveStreamApiCallError;
|
||||||
XCTAssertFalse([faceDetector detectAsyncInImage:image
|
XCTAssertFalse([faceDetector detectAsyncImage:image
|
||||||
timestampInMilliseconds:0
|
timestampInMilliseconds:0
|
||||||
error:&liveStreamApiCallError]);
|
error:&liveStreamApiCallError]);
|
||||||
|
|
||||||
NSError *expectedLiveStreamApiCallError =
|
NSError *expectedLiveStreamApiCallError =
|
||||||
[NSError errorWithDomain:kExpectedErrorDomain
|
[NSError errorWithDomain:kExpectedErrorDomain
|
||||||
|
@ -288,9 +288,9 @@ static const float kKeypointErrorThreshold = 1e-2;
|
||||||
AssertEqualErrors(liveStreamApiCallError, expectedLiveStreamApiCallError);
|
AssertEqualErrors(liveStreamApiCallError, expectedLiveStreamApiCallError);
|
||||||
|
|
||||||
NSError *videoApiCallError;
|
NSError *videoApiCallError;
|
||||||
XCTAssertFalse([faceDetector detectInVideoFrame:image
|
XCTAssertFalse([faceDetector detectVideoFrame:image
|
||||||
timestampInMilliseconds:0
|
timestampInMilliseconds:0
|
||||||
error:&videoApiCallError]);
|
error:&videoApiCallError]);
|
||||||
|
|
||||||
NSError *expectedVideoApiCallError =
|
NSError *expectedVideoApiCallError =
|
||||||
[NSError errorWithDomain:kExpectedErrorDomain
|
[NSError errorWithDomain:kExpectedErrorDomain
|
||||||
|
@ -312,9 +312,9 @@ static const float kKeypointErrorThreshold = 1e-2;
|
||||||
MPPImage *image = [self imageWithFileInfo:kPortraitImage];
|
MPPImage *image = [self imageWithFileInfo:kPortraitImage];
|
||||||
|
|
||||||
NSError *liveStreamApiCallError;
|
NSError *liveStreamApiCallError;
|
||||||
XCTAssertFalse([faceDetector detectAsyncInImage:image
|
XCTAssertFalse([faceDetector detectAsyncImage:image
|
||||||
timestampInMilliseconds:0
|
timestampInMilliseconds:0
|
||||||
error:&liveStreamApiCallError]);
|
error:&liveStreamApiCallError]);
|
||||||
|
|
||||||
NSError *expectedLiveStreamApiCallError =
|
NSError *expectedLiveStreamApiCallError =
|
||||||
[NSError errorWithDomain:kExpectedErrorDomain
|
[NSError errorWithDomain:kExpectedErrorDomain
|
||||||
|
@ -326,7 +326,7 @@ static const float kKeypointErrorThreshold = 1e-2;
|
||||||
AssertEqualErrors(liveStreamApiCallError, expectedLiveStreamApiCallError);
|
AssertEqualErrors(liveStreamApiCallError, expectedLiveStreamApiCallError);
|
||||||
|
|
||||||
NSError *imageApiCallError;
|
NSError *imageApiCallError;
|
||||||
XCTAssertFalse([faceDetector detectInImage:image error:&imageApiCallError]);
|
XCTAssertFalse([faceDetector detectImage:image error:&imageApiCallError]);
|
||||||
|
|
||||||
NSError *expectedImageApiCallError =
|
NSError *expectedImageApiCallError =
|
||||||
[NSError errorWithDomain:kExpectedErrorDomain
|
[NSError errorWithDomain:kExpectedErrorDomain
|
||||||
|
@ -350,7 +350,7 @@ static const float kKeypointErrorThreshold = 1e-2;
|
||||||
MPPImage *image = [self imageWithFileInfo:kPortraitImage];
|
MPPImage *image = [self imageWithFileInfo:kPortraitImage];
|
||||||
|
|
||||||
NSError *imageApiCallError;
|
NSError *imageApiCallError;
|
||||||
XCTAssertFalse([faceDetector detectInImage:image error:&imageApiCallError]);
|
XCTAssertFalse([faceDetector detectImage:image error:&imageApiCallError]);
|
||||||
|
|
||||||
NSError *expectedImageApiCallError =
|
NSError *expectedImageApiCallError =
|
||||||
[NSError errorWithDomain:kExpectedErrorDomain
|
[NSError errorWithDomain:kExpectedErrorDomain
|
||||||
|
@ -362,9 +362,9 @@ static const float kKeypointErrorThreshold = 1e-2;
|
||||||
AssertEqualErrors(imageApiCallError, expectedImageApiCallError);
|
AssertEqualErrors(imageApiCallError, expectedImageApiCallError);
|
||||||
|
|
||||||
NSError *videoApiCallError;
|
NSError *videoApiCallError;
|
||||||
XCTAssertFalse([faceDetector detectInVideoFrame:image
|
XCTAssertFalse([faceDetector detectVideoFrame:image
|
||||||
timestampInMilliseconds:0
|
timestampInMilliseconds:0
|
||||||
error:&videoApiCallError]);
|
error:&videoApiCallError]);
|
||||||
|
|
||||||
NSError *expectedVideoApiCallError =
|
NSError *expectedVideoApiCallError =
|
||||||
[NSError errorWithDomain:kExpectedErrorDomain
|
[NSError errorWithDomain:kExpectedErrorDomain
|
||||||
|
@ -407,7 +407,7 @@ static const float kKeypointErrorThreshold = 1e-2;
|
||||||
|
|
||||||
MPPImage *image = [self imageWithFileInfo:kPortraitImage];
|
MPPImage *image = [self imageWithFileInfo:kPortraitImage];
|
||||||
for (int i = 0; i < iterationCount; i++) {
|
for (int i = 0; i < iterationCount; i++) {
|
||||||
XCTAssertTrue([faceDetector detectAsyncInImage:image timestampInMilliseconds:i error:nil]);
|
XCTAssertTrue([faceDetector detectAsyncImage:image timestampInMilliseconds:i error:nil]);
|
||||||
}
|
}
|
||||||
|
|
||||||
NSTimeInterval timeout = 0.5f;
|
NSTimeInterval timeout = 0.5f;
|
||||||
|
@ -503,7 +503,7 @@ static const float kKeypointErrorThreshold = 1e-2;
|
||||||
usingFaceDetector:(MPPFaceDetector *)faceDetector
|
usingFaceDetector:(MPPFaceDetector *)faceDetector
|
||||||
containsExpectedKeypoints:(NSArray<NSArray *> *)expectedKeypoints {
|
containsExpectedKeypoints:(NSArray<NSArray *> *)expectedKeypoints {
|
||||||
NSError *error;
|
NSError *error;
|
||||||
MPPFaceDetectorResult *faceDetectorResult = [faceDetector detectInImage:mppImage error:&error];
|
MPPFaceDetectorResult *faceDetectorResult = [faceDetector detectImage:mppImage error:&error];
|
||||||
XCTAssertNil(error);
|
XCTAssertNil(error);
|
||||||
XCTAssertNotNil(faceDetectorResult);
|
XCTAssertNotNil(faceDetectorResult);
|
||||||
[self assertFaceDetectorResult:faceDetectorResult containsExpectedKeypoints:expectedKeypoints];
|
[self assertFaceDetectorResult:faceDetectorResult containsExpectedKeypoints:expectedKeypoints];
|
||||||
|
|
|
@ -137,8 +137,8 @@ constexpr float kFacialTransformationMatrixErrorThreshold = 0.2f;
|
||||||
|
|
||||||
NSError *error;
|
NSError *error;
|
||||||
MPPImage *mppImage = [self imageWithFileInfo:kCatImage];
|
MPPImage *mppImage = [self imageWithFileInfo:kCatImage];
|
||||||
MPPFaceLandmarkerResult *faceLandmarkerResult = [faceLandmarker detectInImage:mppImage
|
MPPFaceLandmarkerResult *faceLandmarkerResult = [faceLandmarker detectImage:mppImage
|
||||||
error:&error];
|
error:&error];
|
||||||
XCTAssertNil(error);
|
XCTAssertNil(error);
|
||||||
XCTAssertNotNil(faceLandmarkerResult);
|
XCTAssertNotNil(faceLandmarkerResult);
|
||||||
XCTAssertEqualObjects(faceLandmarkerResult.faceLandmarks, [NSArray array]);
|
XCTAssertEqualObjects(faceLandmarkerResult.faceLandmarks, [NSArray array]);
|
||||||
|
@ -158,9 +158,9 @@ constexpr float kFacialTransformationMatrixErrorThreshold = 0.2f;
|
||||||
NSArray<MPPNormalizedLandmark *> *expectedLandmarks =
|
NSArray<MPPNormalizedLandmark *> *expectedLandmarks =
|
||||||
[MPPFaceLandmarkerTests expectedLandmarksFromFileInfo:kPortraitExpectedLandmarksName];
|
[MPPFaceLandmarkerTests expectedLandmarksFromFileInfo:kPortraitExpectedLandmarksName];
|
||||||
for (int i = 0; i < 3; i++) {
|
for (int i = 0; i < 3; i++) {
|
||||||
MPPFaceLandmarkerResult *faceLandmarkerResult = [faceLandmarker detectInVideoFrame:image
|
MPPFaceLandmarkerResult *faceLandmarkerResult = [faceLandmarker detectVideoFrame:image
|
||||||
timestampInMilliseconds:i
|
timestampInMilliseconds:i
|
||||||
error:nil];
|
error:nil];
|
||||||
[self assertFaceLandmarkerResult:faceLandmarkerResult
|
[self assertFaceLandmarkerResult:faceLandmarkerResult
|
||||||
containsExpectedLandmarks:expectedLandmarks
|
containsExpectedLandmarks:expectedLandmarks
|
||||||
expectedBlendshapes:NULL
|
expectedBlendshapes:NULL
|
||||||
|
@ -200,7 +200,7 @@ constexpr float kFacialTransformationMatrixErrorThreshold = 0.2f;
|
||||||
};
|
};
|
||||||
|
|
||||||
for (int i = 0; i < iterationCount; i++) {
|
for (int i = 0; i < iterationCount; i++) {
|
||||||
XCTAssertTrue([faceLandmarker detectAsyncInImage:image timestampInMilliseconds:i error:nil]);
|
XCTAssertTrue([faceLandmarker detectAsyncImage:image timestampInMilliseconds:i error:nil]);
|
||||||
}
|
}
|
||||||
|
|
||||||
NSTimeInterval timeout = 0.5f;
|
NSTimeInterval timeout = 0.5f;
|
||||||
|
@ -224,10 +224,10 @@ constexpr float kFacialTransformationMatrixErrorThreshold = 0.2f;
|
||||||
};
|
};
|
||||||
|
|
||||||
MPPImage *image = [self imageWithFileInfo:kPortraitImage];
|
MPPImage *image = [self imageWithFileInfo:kPortraitImage];
|
||||||
XCTAssertTrue([faceLandmarker detectAsyncInImage:image timestampInMilliseconds:1 error:nil]);
|
XCTAssertTrue([faceLandmarker detectAsyncImage:image timestampInMilliseconds:1 error:nil]);
|
||||||
|
|
||||||
NSError *error;
|
NSError *error;
|
||||||
XCTAssertFalse([faceLandmarker detectAsyncInImage:image timestampInMilliseconds:0 error:&error]);
|
XCTAssertFalse([faceLandmarker detectAsyncImage:image timestampInMilliseconds:0 error:&error]);
|
||||||
|
|
||||||
NSError *expectedError =
|
NSError *expectedError =
|
||||||
[NSError errorWithDomain:kExpectedErrorDomain
|
[NSError errorWithDomain:kExpectedErrorDomain
|
||||||
|
@ -292,9 +292,9 @@ constexpr float kFacialTransformationMatrixErrorThreshold = 0.2f;
|
||||||
MPPImage *image = [self imageWithFileInfo:kPortraitImage];
|
MPPImage *image = [self imageWithFileInfo:kPortraitImage];
|
||||||
|
|
||||||
NSError *liveStreamAPICallError;
|
NSError *liveStreamAPICallError;
|
||||||
XCTAssertFalse([faceLandmarker detectAsyncInImage:image
|
XCTAssertFalse([faceLandmarker detectAsyncImage:image
|
||||||
timestampInMilliseconds:0
|
timestampInMilliseconds:0
|
||||||
error:&liveStreamAPICallError]);
|
error:&liveStreamAPICallError]);
|
||||||
|
|
||||||
NSError *expectedLiveStreamAPICallError =
|
NSError *expectedLiveStreamAPICallError =
|
||||||
[NSError errorWithDomain:kExpectedErrorDomain
|
[NSError errorWithDomain:kExpectedErrorDomain
|
||||||
|
@ -306,9 +306,9 @@ constexpr float kFacialTransformationMatrixErrorThreshold = 0.2f;
|
||||||
AssertEqualErrors(liveStreamAPICallError, expectedLiveStreamAPICallError);
|
AssertEqualErrors(liveStreamAPICallError, expectedLiveStreamAPICallError);
|
||||||
|
|
||||||
NSError *videoAPICallError;
|
NSError *videoAPICallError;
|
||||||
XCTAssertFalse([faceLandmarker detectInVideoFrame:image
|
XCTAssertFalse([faceLandmarker detectVideoFrame:image
|
||||||
timestampInMilliseconds:0
|
timestampInMilliseconds:0
|
||||||
error:&videoAPICallError]);
|
error:&videoAPICallError]);
|
||||||
|
|
||||||
NSError *expectedVideoAPICallError =
|
NSError *expectedVideoAPICallError =
|
||||||
[NSError errorWithDomain:kExpectedErrorDomain
|
[NSError errorWithDomain:kExpectedErrorDomain
|
||||||
|
@ -329,9 +329,9 @@ constexpr float kFacialTransformationMatrixErrorThreshold = 0.2f;
|
||||||
|
|
||||||
MPPImage *image = [self imageWithFileInfo:kPortraitImage];
|
MPPImage *image = [self imageWithFileInfo:kPortraitImage];
|
||||||
NSError *liveStreamAPICallError;
|
NSError *liveStreamAPICallError;
|
||||||
XCTAssertFalse([faceLandmarker detectAsyncInImage:image
|
XCTAssertFalse([faceLandmarker detectAsyncImage:image
|
||||||
timestampInMilliseconds:0
|
timestampInMilliseconds:0
|
||||||
error:&liveStreamAPICallError]);
|
error:&liveStreamAPICallError]);
|
||||||
|
|
||||||
NSError *expectedLiveStreamAPICallError =
|
NSError *expectedLiveStreamAPICallError =
|
||||||
[NSError errorWithDomain:kExpectedErrorDomain
|
[NSError errorWithDomain:kExpectedErrorDomain
|
||||||
|
@ -343,7 +343,7 @@ constexpr float kFacialTransformationMatrixErrorThreshold = 0.2f;
|
||||||
AssertEqualErrors(liveStreamAPICallError, expectedLiveStreamAPICallError);
|
AssertEqualErrors(liveStreamAPICallError, expectedLiveStreamAPICallError);
|
||||||
|
|
||||||
NSError *imageAPICallError;
|
NSError *imageAPICallError;
|
||||||
XCTAssertFalse([faceLandmarker detectInImage:image error:&imageAPICallError]);
|
XCTAssertFalse([faceLandmarker detectImage:image error:&imageAPICallError]);
|
||||||
|
|
||||||
NSError *expectedImageAPICallError =
|
NSError *expectedImageAPICallError =
|
||||||
[NSError errorWithDomain:kExpectedErrorDomain
|
[NSError errorWithDomain:kExpectedErrorDomain
|
||||||
|
@ -365,7 +365,7 @@ constexpr float kFacialTransformationMatrixErrorThreshold = 0.2f;
|
||||||
MPPImage *image = [self imageWithFileInfo:kPortraitImage];
|
MPPImage *image = [self imageWithFileInfo:kPortraitImage];
|
||||||
|
|
||||||
NSError *imageAPICallError;
|
NSError *imageAPICallError;
|
||||||
XCTAssertFalse([faceLandmarker detectInImage:image error:&imageAPICallError]);
|
XCTAssertFalse([faceLandmarker detectImage:image error:&imageAPICallError]);
|
||||||
|
|
||||||
NSError *expectedImageAPICallError =
|
NSError *expectedImageAPICallError =
|
||||||
[NSError errorWithDomain:kExpectedErrorDomain
|
[NSError errorWithDomain:kExpectedErrorDomain
|
||||||
|
@ -377,9 +377,9 @@ constexpr float kFacialTransformationMatrixErrorThreshold = 0.2f;
|
||||||
AssertEqualErrors(imageAPICallError, expectedImageAPICallError);
|
AssertEqualErrors(imageAPICallError, expectedImageAPICallError);
|
||||||
|
|
||||||
NSError *videoAPICallError;
|
NSError *videoAPICallError;
|
||||||
XCTAssertFalse([faceLandmarker detectInVideoFrame:image
|
XCTAssertFalse([faceLandmarker detectVideoFrame:image
|
||||||
timestampInMilliseconds:0
|
timestampInMilliseconds:0
|
||||||
error:&videoAPICallError]);
|
error:&videoAPICallError]);
|
||||||
|
|
||||||
NSError *expectedVideoAPICallError =
|
NSError *expectedVideoAPICallError =
|
||||||
[NSError errorWithDomain:kExpectedErrorDomain
|
[NSError errorWithDomain:kExpectedErrorDomain
|
||||||
|
@ -484,7 +484,7 @@ constexpr float kFacialTransformationMatrixErrorThreshold = 0.2f;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if (expectedTransformationMatrix == NULL) {
|
if (expectedTransformationMatrix == nullptr) {
|
||||||
XCTAssertEqualObjects(faceLandmarkerResult.facialTransformationMatrixes, [NSArray array]);
|
XCTAssertEqualObjects(faceLandmarkerResult.facialTransformationMatrixes, [NSArray array]);
|
||||||
} else {
|
} else {
|
||||||
MPPTransformMatrix *actualTransformationMatrix =
|
MPPTransformMatrix *actualTransformationMatrix =
|
||||||
|
@ -539,8 +539,8 @@ constexpr float kFacialTransformationMatrixErrorThreshold = 0.2f;
|
||||||
MPPImage *mppImage = [self imageWithFileInfo:fileInfo];
|
MPPImage *mppImage = [self imageWithFileInfo:fileInfo];
|
||||||
|
|
||||||
NSError *error;
|
NSError *error;
|
||||||
MPPFaceLandmarkerResult *faceLandmarkerResult = [faceLandmarker detectInImage:mppImage
|
MPPFaceLandmarkerResult *faceLandmarkerResult = [faceLandmarker detectImage:mppImage
|
||||||
error:&error];
|
error:&error];
|
||||||
XCTAssertNil(error);
|
XCTAssertNil(error);
|
||||||
XCTAssertNotNil(faceLandmarkerResult);
|
XCTAssertNotNil(faceLandmarkerResult);
|
||||||
|
|
||||||
|
|
|
@ -25,7 +25,7 @@ TFL_DISABLED_SANITIZER_TAGS = [
|
||||||
]
|
]
|
||||||
|
|
||||||
objc_library(
|
objc_library(
|
||||||
name = "MPPFaceLandmarkeResultHelpersTestLibary",
|
name = "MPPFaceLandmarkeResultHelpersTestLibrary",
|
||||||
testonly = 1,
|
testonly = 1,
|
||||||
srcs = ["sources/MPPFaceLandmarkerResult+HelpersTests.mm"],
|
srcs = ["sources/MPPFaceLandmarkerResult+HelpersTests.mm"],
|
||||||
copts = [
|
copts = [
|
||||||
|
@ -50,6 +50,6 @@ ios_unit_test(
|
||||||
runner = tflite_ios_lab_runner("IOS_LATEST"),
|
runner = tflite_ios_lab_runner("IOS_LATEST"),
|
||||||
tags = TFL_DEFAULT_TAGS + TFL_DISABLED_SANITIZER_TAGS,
|
tags = TFL_DEFAULT_TAGS + TFL_DISABLED_SANITIZER_TAGS,
|
||||||
deps = [
|
deps = [
|
||||||
":MPPFaceLandmarkeResultHelpersTestLibary",
|
":MPPFaceLandmarkeResultHelpersTestLibrary",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
|
@ -208,10 +208,10 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
||||||
return image;
|
return image;
|
||||||
}
|
}
|
||||||
|
|
||||||
- (MPPHandLandmarkerResult *)detectInImageWithFileInfo:(ResourceFileInfo *)imageFileInfo
|
- (MPPHandLandmarkerResult *)detectImageWithFileInfo:(ResourceFileInfo *)imageFileInfo
|
||||||
usingHandLandmarker:(MPPHandLandmarker *)handLandmarker {
|
usingHandLandmarker:(MPPHandLandmarker *)handLandmarker {
|
||||||
MPPImage *mppImage = [self imageWithFileInfo:imageFileInfo];
|
MPPImage *mppImage = [self imageWithFileInfo:imageFileInfo];
|
||||||
MPPHandLandmarkerResult *handLandmarkerResult = [handLandmarker detectInImage:mppImage error:nil];
|
MPPHandLandmarkerResult *handLandmarkerResult = [handLandmarker detectImage:mppImage error:nil];
|
||||||
XCTAssertNotNil(handLandmarkerResult);
|
XCTAssertNotNil(handLandmarkerResult);
|
||||||
|
|
||||||
return handLandmarkerResult;
|
return handLandmarkerResult;
|
||||||
|
@ -221,8 +221,8 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
||||||
usingHandLandmarker:(MPPHandLandmarker *)handLandmarker
|
usingHandLandmarker:(MPPHandLandmarker *)handLandmarker
|
||||||
approximatelyEqualsHandLandmarkerResult:
|
approximatelyEqualsHandLandmarkerResult:
|
||||||
(MPPHandLandmarkerResult *)expectedHandLandmarkerResult {
|
(MPPHandLandmarkerResult *)expectedHandLandmarkerResult {
|
||||||
MPPHandLandmarkerResult *handLandmarkerResult = [self detectInImageWithFileInfo:fileInfo
|
MPPHandLandmarkerResult *handLandmarkerResult = [self detectImageWithFileInfo:fileInfo
|
||||||
usingHandLandmarker:handLandmarker];
|
usingHandLandmarker:handLandmarker];
|
||||||
[self assertHandLandmarkerResult:handLandmarkerResult
|
[self assertHandLandmarkerResult:handLandmarkerResult
|
||||||
isApproximatelyEqualToExpectedResult:expectedHandLandmarkerResult];
|
isApproximatelyEqualToExpectedResult:expectedHandLandmarkerResult];
|
||||||
}
|
}
|
||||||
|
@ -249,8 +249,8 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
||||||
MPPHandLandmarker *handLandmarker =
|
MPPHandLandmarker *handLandmarker =
|
||||||
[self createHandLandmarkerWithOptionsSucceeds:handLandmarkerOptions];
|
[self createHandLandmarkerWithOptionsSucceeds:handLandmarkerOptions];
|
||||||
|
|
||||||
MPPHandLandmarkerResult *handLandmarkerResult = [self detectInImageWithFileInfo:kNoHandsImage
|
MPPHandLandmarkerResult *handLandmarkerResult = [self detectImageWithFileInfo:kNoHandsImage
|
||||||
usingHandLandmarker:handLandmarker];
|
usingHandLandmarker:handLandmarker];
|
||||||
AssertHandLandmarkerResultIsEmpty(handLandmarkerResult);
|
AssertHandLandmarkerResultIsEmpty(handLandmarkerResult);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -264,8 +264,8 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
||||||
MPPHandLandmarker *handLandmarker =
|
MPPHandLandmarker *handLandmarker =
|
||||||
[self createHandLandmarkerWithOptionsSucceeds:handLandmarkerOptions];
|
[self createHandLandmarkerWithOptionsSucceeds:handLandmarkerOptions];
|
||||||
|
|
||||||
MPPHandLandmarkerResult *handLandmarkerResult = [self detectInImageWithFileInfo:kTwoHandsImage
|
MPPHandLandmarkerResult *handLandmarkerResult = [self detectImageWithFileInfo:kTwoHandsImage
|
||||||
usingHandLandmarker:handLandmarker];
|
usingHandLandmarker:handLandmarker];
|
||||||
|
|
||||||
XCTAssertTrue(handLandmarkerResult.handedness.count == numHands);
|
XCTAssertTrue(handLandmarkerResult.handedness.count == numHands);
|
||||||
}
|
}
|
||||||
|
@ -280,7 +280,7 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
||||||
MPPImage *mppImage = [self imageWithFileInfo:kPointingUpRotatedImage
|
MPPImage *mppImage = [self imageWithFileInfo:kPointingUpRotatedImage
|
||||||
orientation:UIImageOrientationRight];
|
orientation:UIImageOrientationRight];
|
||||||
|
|
||||||
MPPHandLandmarkerResult *handLandmarkerResult = [handLandmarker detectInImage:mppImage error:nil];
|
MPPHandLandmarkerResult *handLandmarkerResult = [handLandmarker detectImage:mppImage error:nil];
|
||||||
|
|
||||||
[self assertHandLandmarkerResult:handLandmarkerResult
|
[self assertHandLandmarkerResult:handLandmarkerResult
|
||||||
isApproximatelyEqualToExpectedResult:[MPPHandLandmarkerTests
|
isApproximatelyEqualToExpectedResult:[MPPHandLandmarkerTests
|
||||||
|
@ -339,9 +339,9 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
||||||
MPPImage *image = [self imageWithFileInfo:kThumbUpImage];
|
MPPImage *image = [self imageWithFileInfo:kThumbUpImage];
|
||||||
|
|
||||||
NSError *liveStreamApiCallError;
|
NSError *liveStreamApiCallError;
|
||||||
XCTAssertFalse([handLandmarker detectAsyncInImage:image
|
XCTAssertFalse([handLandmarker detectAsyncImage:image
|
||||||
timestampInMilliseconds:0
|
timestampInMilliseconds:0
|
||||||
error:&liveStreamApiCallError]);
|
error:&liveStreamApiCallError]);
|
||||||
|
|
||||||
NSError *expectedLiveStreamApiCallError =
|
NSError *expectedLiveStreamApiCallError =
|
||||||
[NSError errorWithDomain:kExpectedErrorDomain
|
[NSError errorWithDomain:kExpectedErrorDomain
|
||||||
|
@ -354,9 +354,9 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
||||||
AssertEqualErrors(liveStreamApiCallError, expectedLiveStreamApiCallError);
|
AssertEqualErrors(liveStreamApiCallError, expectedLiveStreamApiCallError);
|
||||||
|
|
||||||
NSError *videoApiCallError;
|
NSError *videoApiCallError;
|
||||||
XCTAssertFalse([handLandmarker detectInVideoFrame:image
|
XCTAssertFalse([handLandmarker detectVideoFrame:image
|
||||||
timestampInMilliseconds:0
|
timestampInMilliseconds:0
|
||||||
error:&videoApiCallError]);
|
error:&videoApiCallError]);
|
||||||
|
|
||||||
NSError *expectedVideoApiCallError =
|
NSError *expectedVideoApiCallError =
|
||||||
[NSError errorWithDomain:kExpectedErrorDomain
|
[NSError errorWithDomain:kExpectedErrorDomain
|
||||||
|
@ -378,9 +378,9 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
||||||
MPPImage *image = [self imageWithFileInfo:kThumbUpImage];
|
MPPImage *image = [self imageWithFileInfo:kThumbUpImage];
|
||||||
|
|
||||||
NSError *liveStreamApiCallError;
|
NSError *liveStreamApiCallError;
|
||||||
XCTAssertFalse([handLandmarker detectAsyncInImage:image
|
XCTAssertFalse([handLandmarker detectAsyncImage:image
|
||||||
timestampInMilliseconds:0
|
timestampInMilliseconds:0
|
||||||
error:&liveStreamApiCallError]);
|
error:&liveStreamApiCallError]);
|
||||||
|
|
||||||
NSError *expectedLiveStreamApiCallError =
|
NSError *expectedLiveStreamApiCallError =
|
||||||
[NSError errorWithDomain:kExpectedErrorDomain
|
[NSError errorWithDomain:kExpectedErrorDomain
|
||||||
|
@ -393,7 +393,7 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
||||||
AssertEqualErrors(liveStreamApiCallError, expectedLiveStreamApiCallError);
|
AssertEqualErrors(liveStreamApiCallError, expectedLiveStreamApiCallError);
|
||||||
|
|
||||||
NSError *imageApiCallError;
|
NSError *imageApiCallError;
|
||||||
XCTAssertFalse([handLandmarker detectInImage:image error:&imageApiCallError]);
|
XCTAssertFalse([handLandmarker detectImage:image error:&imageApiCallError]);
|
||||||
|
|
||||||
NSError *expectedImageApiCallError =
|
NSError *expectedImageApiCallError =
|
||||||
[NSError errorWithDomain:kExpectedErrorDomain
|
[NSError errorWithDomain:kExpectedErrorDomain
|
||||||
|
@ -416,7 +416,7 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
||||||
MPPImage *image = [self imageWithFileInfo:kThumbUpImage];
|
MPPImage *image = [self imageWithFileInfo:kThumbUpImage];
|
||||||
|
|
||||||
NSError *imageApiCallError;
|
NSError *imageApiCallError;
|
||||||
XCTAssertFalse([handLandmarker detectInImage:image error:&imageApiCallError]);
|
XCTAssertFalse([handLandmarker detectImage:image error:&imageApiCallError]);
|
||||||
|
|
||||||
NSError *expectedImageApiCallError =
|
NSError *expectedImageApiCallError =
|
||||||
[NSError errorWithDomain:kExpectedErrorDomain
|
[NSError errorWithDomain:kExpectedErrorDomain
|
||||||
|
@ -428,9 +428,9 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
||||||
AssertEqualErrors(imageApiCallError, expectedImageApiCallError);
|
AssertEqualErrors(imageApiCallError, expectedImageApiCallError);
|
||||||
|
|
||||||
NSError *videoApiCallError;
|
NSError *videoApiCallError;
|
||||||
XCTAssertFalse([handLandmarker detectInVideoFrame:image
|
XCTAssertFalse([handLandmarker detectVideoFrame:image
|
||||||
timestampInMilliseconds:0
|
timestampInMilliseconds:0
|
||||||
error:&videoApiCallError]);
|
error:&videoApiCallError]);
|
||||||
|
|
||||||
NSError *expectedVideoApiCallError =
|
NSError *expectedVideoApiCallError =
|
||||||
[NSError errorWithDomain:kExpectedErrorDomain
|
[NSError errorWithDomain:kExpectedErrorDomain
|
||||||
|
@ -452,9 +452,9 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
||||||
MPPImage *image = [self imageWithFileInfo:kThumbUpImage];
|
MPPImage *image = [self imageWithFileInfo:kThumbUpImage];
|
||||||
|
|
||||||
for (int i = 0; i < 3; i++) {
|
for (int i = 0; i < 3; i++) {
|
||||||
MPPHandLandmarkerResult *handLandmarkerResult = [handLandmarker detectInVideoFrame:image
|
MPPHandLandmarkerResult *handLandmarkerResult = [handLandmarker detectVideoFrame:image
|
||||||
timestampInMilliseconds:i
|
timestampInMilliseconds:i
|
||||||
error:nil];
|
error:nil];
|
||||||
[self assertHandLandmarkerResult:handLandmarkerResult
|
[self assertHandLandmarkerResult:handLandmarkerResult
|
||||||
isApproximatelyEqualToExpectedResult:[MPPHandLandmarkerTests thumbUpHandLandmarkerResult]];
|
isApproximatelyEqualToExpectedResult:[MPPHandLandmarkerTests thumbUpHandLandmarkerResult]];
|
||||||
}
|
}
|
||||||
|
@ -480,10 +480,10 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
||||||
|
|
||||||
MPPImage *image = [self imageWithFileInfo:kThumbUpImage];
|
MPPImage *image = [self imageWithFileInfo:kThumbUpImage];
|
||||||
|
|
||||||
XCTAssertTrue([handLandmarker detectAsyncInImage:image timestampInMilliseconds:1 error:nil]);
|
XCTAssertTrue([handLandmarker detectAsyncImage:image timestampInMilliseconds:1 error:nil]);
|
||||||
|
|
||||||
NSError *error;
|
NSError *error;
|
||||||
XCTAssertFalse([handLandmarker detectAsyncInImage:image timestampInMilliseconds:0 error:&error]);
|
XCTAssertFalse([handLandmarker detectAsyncImage:image timestampInMilliseconds:0 error:&error]);
|
||||||
|
|
||||||
NSError *expectedError =
|
NSError *expectedError =
|
||||||
[NSError errorWithDomain:kExpectedErrorDomain
|
[NSError errorWithDomain:kExpectedErrorDomain
|
||||||
|
@ -533,7 +533,7 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
||||||
MPPImage *image = [self imageWithFileInfo:kThumbUpImage];
|
MPPImage *image = [self imageWithFileInfo:kThumbUpImage];
|
||||||
|
|
||||||
for (int i = 0; i < iterationCount; i++) {
|
for (int i = 0; i < iterationCount; i++) {
|
||||||
XCTAssertTrue([handLandmarker detectAsyncInImage:image timestampInMilliseconds:i error:nil]);
|
XCTAssertTrue([handLandmarker detectAsyncImage:image timestampInMilliseconds:i error:nil]);
|
||||||
}
|
}
|
||||||
|
|
||||||
NSTimeInterval timeout = 0.5f;
|
NSTimeInterval timeout = 0.5f;
|
||||||
|
|
|
@ -28,10 +28,10 @@ static const float scoreDifferenceTolerance = 0.02f;
|
||||||
static NSString *const kLiveStreamTestsDictObjectDetectorKey = @"object_detector";
|
static NSString *const kLiveStreamTestsDictObjectDetectorKey = @"object_detector";
|
||||||
static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
||||||
|
|
||||||
#define AssertEqualErrors(error, expectedError) \
|
#define AssertEqualErrors(error, expectedError) \
|
||||||
XCTAssertNotNil(error); \
|
XCTAssertNotNil(error); \
|
||||||
XCTAssertEqualObjects(error.domain, expectedError.domain); \
|
XCTAssertEqualObjects(error.domain, expectedError.domain); \
|
||||||
XCTAssertEqual(error.code, expectedError.code); \
|
XCTAssertEqual(error.code, expectedError.code); \
|
||||||
XCTAssertEqualObjects(error.localizedDescription, expectedError.localizedDescription)
|
XCTAssertEqualObjects(error.localizedDescription, expectedError.localizedDescription)
|
||||||
|
|
||||||
#define AssertEqualCategories(category, expectedCategory, detectionIndex, categoryIndex) \
|
#define AssertEqualCategories(category, expectedCategory, detectionIndex, categoryIndex) \
|
||||||
|
@ -194,7 +194,7 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
||||||
usingObjectDetector:(MPPObjectDetector *)objectDetector
|
usingObjectDetector:(MPPObjectDetector *)objectDetector
|
||||||
maxResults:(NSInteger)maxResults
|
maxResults:(NSInteger)maxResults
|
||||||
equalsObjectDetectorResult:(MPPObjectDetectorResult *)expectedObjectDetectorResult {
|
equalsObjectDetectorResult:(MPPObjectDetectorResult *)expectedObjectDetectorResult {
|
||||||
MPPObjectDetectorResult *ObjectDetectorResult = [objectDetector detectInImage:mppImage error:nil];
|
MPPObjectDetectorResult *ObjectDetectorResult = [objectDetector detectImage:mppImage error:nil];
|
||||||
|
|
||||||
[self assertObjectDetectorResult:ObjectDetectorResult
|
[self assertObjectDetectorResult:ObjectDetectorResult
|
||||||
isEqualToExpectedResult:expectedObjectDetectorResult
|
isEqualToExpectedResult:expectedObjectDetectorResult
|
||||||
|
@ -495,9 +495,9 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
||||||
MPPImage *image = [self imageWithFileInfo:kCatsAndDogsImage];
|
MPPImage *image = [self imageWithFileInfo:kCatsAndDogsImage];
|
||||||
|
|
||||||
NSError *liveStreamApiCallError;
|
NSError *liveStreamApiCallError;
|
||||||
XCTAssertFalse([objectDetector detectAsyncInImage:image
|
XCTAssertFalse([objectDetector detectAsyncImage:image
|
||||||
timestampInMilliseconds:0
|
timestampInMilliseconds:0
|
||||||
error:&liveStreamApiCallError]);
|
error:&liveStreamApiCallError]);
|
||||||
|
|
||||||
NSError *expectedLiveStreamApiCallError =
|
NSError *expectedLiveStreamApiCallError =
|
||||||
[NSError errorWithDomain:kExpectedErrorDomain
|
[NSError errorWithDomain:kExpectedErrorDomain
|
||||||
|
@ -510,9 +510,9 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
||||||
AssertEqualErrors(liveStreamApiCallError, expectedLiveStreamApiCallError);
|
AssertEqualErrors(liveStreamApiCallError, expectedLiveStreamApiCallError);
|
||||||
|
|
||||||
NSError *videoApiCallError;
|
NSError *videoApiCallError;
|
||||||
XCTAssertFalse([objectDetector detectInVideoFrame:image
|
XCTAssertFalse([objectDetector detectVideoFrame:image
|
||||||
timestampInMilliseconds:0
|
timestampInMilliseconds:0
|
||||||
error:&videoApiCallError]);
|
error:&videoApiCallError]);
|
||||||
|
|
||||||
NSError *expectedVideoApiCallError =
|
NSError *expectedVideoApiCallError =
|
||||||
[NSError errorWithDomain:kExpectedErrorDomain
|
[NSError errorWithDomain:kExpectedErrorDomain
|
||||||
|
@ -533,9 +533,9 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
||||||
MPPImage *image = [self imageWithFileInfo:kCatsAndDogsImage];
|
MPPImage *image = [self imageWithFileInfo:kCatsAndDogsImage];
|
||||||
|
|
||||||
NSError *liveStreamApiCallError;
|
NSError *liveStreamApiCallError;
|
||||||
XCTAssertFalse([objectDetector detectAsyncInImage:image
|
XCTAssertFalse([objectDetector detectAsyncImage:image
|
||||||
timestampInMilliseconds:0
|
timestampInMilliseconds:0
|
||||||
error:&liveStreamApiCallError]);
|
error:&liveStreamApiCallError]);
|
||||||
|
|
||||||
NSError *expectedLiveStreamApiCallError =
|
NSError *expectedLiveStreamApiCallError =
|
||||||
[NSError errorWithDomain:kExpectedErrorDomain
|
[NSError errorWithDomain:kExpectedErrorDomain
|
||||||
|
@ -548,7 +548,7 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
||||||
AssertEqualErrors(liveStreamApiCallError, expectedLiveStreamApiCallError);
|
AssertEqualErrors(liveStreamApiCallError, expectedLiveStreamApiCallError);
|
||||||
|
|
||||||
NSError *imageApiCallError;
|
NSError *imageApiCallError;
|
||||||
XCTAssertFalse([objectDetector detectInImage:image error:&imageApiCallError]);
|
XCTAssertFalse([objectDetector detectImage:image error:&imageApiCallError]);
|
||||||
|
|
||||||
NSError *expectedImageApiCallError =
|
NSError *expectedImageApiCallError =
|
||||||
[NSError errorWithDomain:kExpectedErrorDomain
|
[NSError errorWithDomain:kExpectedErrorDomain
|
||||||
|
@ -571,7 +571,7 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
||||||
MPPImage *image = [self imageWithFileInfo:kCatsAndDogsImage];
|
MPPImage *image = [self imageWithFileInfo:kCatsAndDogsImage];
|
||||||
|
|
||||||
NSError *imageApiCallError;
|
NSError *imageApiCallError;
|
||||||
XCTAssertFalse([objectDetector detectInImage:image error:&imageApiCallError]);
|
XCTAssertFalse([objectDetector detectImage:image error:&imageApiCallError]);
|
||||||
|
|
||||||
NSError *expectedImageApiCallError =
|
NSError *expectedImageApiCallError =
|
||||||
[NSError errorWithDomain:kExpectedErrorDomain
|
[NSError errorWithDomain:kExpectedErrorDomain
|
||||||
|
@ -583,9 +583,9 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
||||||
AssertEqualErrors(imageApiCallError, expectedImageApiCallError);
|
AssertEqualErrors(imageApiCallError, expectedImageApiCallError);
|
||||||
|
|
||||||
NSError *videoApiCallError;
|
NSError *videoApiCallError;
|
||||||
XCTAssertFalse([objectDetector detectInVideoFrame:image
|
XCTAssertFalse([objectDetector detectVideoFrame:image
|
||||||
timestampInMilliseconds:0
|
timestampInMilliseconds:0
|
||||||
error:&videoApiCallError]);
|
error:&videoApiCallError]);
|
||||||
|
|
||||||
NSError *expectedVideoApiCallError =
|
NSError *expectedVideoApiCallError =
|
||||||
[NSError errorWithDomain:kExpectedErrorDomain
|
[NSError errorWithDomain:kExpectedErrorDomain
|
||||||
|
@ -610,9 +610,9 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
||||||
MPPImage *image = [self imageWithFileInfo:kCatsAndDogsImage];
|
MPPImage *image = [self imageWithFileInfo:kCatsAndDogsImage];
|
||||||
|
|
||||||
for (int i = 0; i < 3; i++) {
|
for (int i = 0; i < 3; i++) {
|
||||||
MPPObjectDetectorResult *ObjectDetectorResult = [objectDetector detectInVideoFrame:image
|
MPPObjectDetectorResult *ObjectDetectorResult = [objectDetector detectVideoFrame:image
|
||||||
timestampInMilliseconds:i
|
timestampInMilliseconds:i
|
||||||
error:nil];
|
error:nil];
|
||||||
|
|
||||||
[self assertObjectDetectorResult:ObjectDetectorResult
|
[self assertObjectDetectorResult:ObjectDetectorResult
|
||||||
isEqualToExpectedResult:
|
isEqualToExpectedResult:
|
||||||
|
@ -643,10 +643,10 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
||||||
|
|
||||||
MPPImage *image = [self imageWithFileInfo:kCatsAndDogsImage];
|
MPPImage *image = [self imageWithFileInfo:kCatsAndDogsImage];
|
||||||
|
|
||||||
XCTAssertTrue([objectDetector detectAsyncInImage:image timestampInMilliseconds:1 error:nil]);
|
XCTAssertTrue([objectDetector detectAsyncImage:image timestampInMilliseconds:1 error:nil]);
|
||||||
|
|
||||||
NSError *error;
|
NSError *error;
|
||||||
XCTAssertFalse([objectDetector detectAsyncInImage:image timestampInMilliseconds:0 error:&error]);
|
XCTAssertFalse([objectDetector detectAsyncImage:image timestampInMilliseconds:0 error:&error]);
|
||||||
|
|
||||||
NSError *expectedError =
|
NSError *expectedError =
|
||||||
[NSError errorWithDomain:kExpectedErrorDomain
|
[NSError errorWithDomain:kExpectedErrorDomain
|
||||||
|
@ -702,7 +702,7 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
||||||
MPPImage *image = [self imageWithFileInfo:kCatsAndDogsImage];
|
MPPImage *image = [self imageWithFileInfo:kCatsAndDogsImage];
|
||||||
|
|
||||||
for (int i = 0; i < iterationCount; i++) {
|
for (int i = 0; i < iterationCount; i++) {
|
||||||
XCTAssertTrue([objectDetector detectAsyncInImage:image timestampInMilliseconds:i error:nil]);
|
XCTAssertTrue([objectDetector detectAsyncImage:image timestampInMilliseconds:i error:nil]);
|
||||||
}
|
}
|
||||||
|
|
||||||
NSTimeInterval timeout = 0.5f;
|
NSTimeInterval timeout = 0.5f;
|
||||||
|
|
|
@ -100,8 +100,8 @@ NS_SWIFT_NAME(FaceDetector)
|
||||||
* system, i.e. in `[0,image_width) x [0,image_height)`, which are the dimensions of the underlying
|
* system, i.e. in `[0,image_width) x [0,image_height)`, which are the dimensions of the underlying
|
||||||
* image data.
|
* image data.
|
||||||
*/
|
*/
|
||||||
- (nullable MPPFaceDetectorResult *)detectInImage:(MPPImage *)image
|
- (nullable MPPFaceDetectorResult *)detectImage:(MPPImage *)image
|
||||||
error:(NSError **)error NS_SWIFT_NAME(detect(image:));
|
error:(NSError **)error NS_SWIFT_NAME(detect(image:));
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Performs face detection on the provided video frame of type `MPImage` using the whole
|
* Performs face detection on the provided video frame of type `MPImage` using the whole
|
||||||
|
@ -127,9 +127,9 @@ NS_SWIFT_NAME(FaceDetector)
|
||||||
* system, i.e. in `[0,image_width) x [0,image_height)`, which are the dimensions of the underlying
|
* system, i.e. in `[0,image_width) x [0,image_height)`, which are the dimensions of the underlying
|
||||||
* image data.
|
* image data.
|
||||||
*/
|
*/
|
||||||
- (nullable MPPFaceDetectorResult *)detectInVideoFrame:(MPPImage *)image
|
- (nullable MPPFaceDetectorResult *)detectVideoFrame:(MPPImage *)image
|
||||||
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
||||||
error:(NSError **)error
|
error:(NSError **)error
|
||||||
NS_SWIFT_NAME(detect(videoFrame:timestampInMilliseconds:));
|
NS_SWIFT_NAME(detect(videoFrame:timestampInMilliseconds:));
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -165,7 +165,7 @@ NS_SWIFT_NAME(FaceDetector)
|
||||||
*
|
*
|
||||||
* @return `true` if the image was sent to the task successfully, otherwise `false`.
|
* @return `true` if the image was sent to the task successfully, otherwise `false`.
|
||||||
*/
|
*/
|
||||||
- (BOOL)detectAsyncInImage:(MPPImage *)image
|
- (BOOL)detectAsyncImage:(MPPImage *)image
|
||||||
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
||||||
error:(NSError **)error
|
error:(NSError **)error
|
||||||
NS_SWIFT_NAME(detectAsync(image:timestampInMilliseconds:));
|
NS_SWIFT_NAME(detectAsync(image:timestampInMilliseconds:));
|
||||||
|
|
|
@ -130,13 +130,13 @@ static NSString *const kTaskName = @"faceDetector";
|
||||||
return [self initWithOptions:options error:error];
|
return [self initWithOptions:options error:error];
|
||||||
}
|
}
|
||||||
|
|
||||||
- (nullable MPPFaceDetectorResult *)detectInImage:(MPPImage *)image error:(NSError **)error {
|
- (nullable MPPFaceDetectorResult *)detectImage:(MPPImage *)image error:(NSError **)error {
|
||||||
std::optional<PacketMap> outputPacketMap = [_visionTaskRunner processImage:image error:error];
|
std::optional<PacketMap> outputPacketMap = [_visionTaskRunner processImage:image error:error];
|
||||||
|
|
||||||
return [MPPFaceDetector faceDetectorResultWithOptionalOutputPacketMap:outputPacketMap];
|
return [MPPFaceDetector faceDetectorResultWithOptionalOutputPacketMap:outputPacketMap];
|
||||||
}
|
}
|
||||||
|
|
||||||
- (nullable MPPFaceDetectorResult *)detectInVideoFrame:(MPPImage *)image
|
- (nullable MPPFaceDetectorResult *)detectVideoFrame:(MPPImage *)image
|
||||||
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
||||||
error:(NSError **)error {
|
error:(NSError **)error {
|
||||||
std::optional<PacketMap> outputPacketMap =
|
std::optional<PacketMap> outputPacketMap =
|
||||||
|
@ -147,7 +147,7 @@ static NSString *const kTaskName = @"faceDetector";
|
||||||
return [MPPFaceDetector faceDetectorResultWithOptionalOutputPacketMap:outputPacketMap];
|
return [MPPFaceDetector faceDetectorResultWithOptionalOutputPacketMap:outputPacketMap];
|
||||||
}
|
}
|
||||||
|
|
||||||
- (BOOL)detectAsyncInImage:(MPPImage *)image
|
- (BOOL)detectAsyncImage:(MPPImage *)image
|
||||||
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
||||||
error:(NSError **)error {
|
error:(NSError **)error {
|
||||||
return [_visionTaskRunner processLiveStreamImage:image
|
return [_visionTaskRunner processLiveStreamImage:image
|
||||||
|
|
|
@ -71,8 +71,8 @@ NS_SWIFT_NAME(FaceLandmarker)
|
||||||
* @return An `FaceLandmarkerResult` that contains a list of landmarks. `nil` if there is an error
|
* @return An `FaceLandmarkerResult` that contains a list of landmarks. `nil` if there is an error
|
||||||
* in initializing the face landmaker.
|
* in initializing the face landmaker.
|
||||||
*/
|
*/
|
||||||
- (nullable MPPFaceLandmarkerResult *)detectInImage:(MPPImage *)image
|
- (nullable MPPFaceLandmarkerResult *)detectImage:(MPPImage *)image
|
||||||
error:(NSError **)error NS_SWIFT_NAME(detect(image:));
|
error:(NSError **)error NS_SWIFT_NAME(detect(image:));
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Performs face landmark detection on the provided video frame of type `MPImage` using the whole
|
* Performs face landmark detection on the provided video frame of type `MPImage` using the whole
|
||||||
|
@ -95,9 +95,9 @@ NS_SWIFT_NAME(FaceLandmarker)
|
||||||
* @return An `FaceLandmarkerResult` that contains a list of landmarks. `nil` if there is an
|
* @return An `FaceLandmarkerResult` that contains a list of landmarks. `nil` if there is an
|
||||||
* error in initializing the face landmaker.
|
* error in initializing the face landmaker.
|
||||||
*/
|
*/
|
||||||
- (nullable MPPFaceLandmarkerResult *)detectInVideoFrame:(MPPImage *)image
|
- (nullable MPPFaceLandmarkerResult *)detectVideoFrame:(MPPImage *)image
|
||||||
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
||||||
error:(NSError **)error
|
error:(NSError **)error
|
||||||
NS_SWIFT_NAME(detect(videoFrame:timestampInMilliseconds:));
|
NS_SWIFT_NAME(detect(videoFrame:timestampInMilliseconds:));
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -132,7 +132,7 @@ NS_SWIFT_NAME(FaceLandmarker)
|
||||||
*
|
*
|
||||||
* @return `true` if the image was sent to the task successfully, otherwise `false`.
|
* @return `true` if the image was sent to the task successfully, otherwise `false`.
|
||||||
*/
|
*/
|
||||||
- (BOOL)detectAsyncInImage:(MPPImage *)image
|
- (BOOL)detectAsyncImage:(MPPImage *)image
|
||||||
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
||||||
error:(NSError **)error
|
error:(NSError **)error
|
||||||
NS_SWIFT_NAME(detectAsync(image:timestampInMilliseconds:));
|
NS_SWIFT_NAME(detectAsync(image:timestampInMilliseconds:));
|
||||||
|
|
|
@ -154,15 +154,15 @@ static NSString *const kTaskName = @"faceLandmarker";
|
||||||
return [self initWithOptions:options error:error];
|
return [self initWithOptions:options error:error];
|
||||||
}
|
}
|
||||||
|
|
||||||
- (nullable MPPFaceLandmarkerResult *)detectInImage:(MPPImage *)image error:(NSError **)error {
|
- (nullable MPPFaceLandmarkerResult *)detectImage:(MPPImage *)image error:(NSError **)error {
|
||||||
std::optional<PacketMap> outputPacketMap = [_visionTaskRunner processImage:image error:error];
|
std::optional<PacketMap> outputPacketMap = [_visionTaskRunner processImage:image error:error];
|
||||||
|
|
||||||
return [MPPFaceLandmarker faceLandmarkerResultWithOptionalOutputPacketMap:outputPacketMap];
|
return [MPPFaceLandmarker faceLandmarkerResultWithOptionalOutputPacketMap:outputPacketMap];
|
||||||
}
|
}
|
||||||
|
|
||||||
- (nullable MPPFaceLandmarkerResult *)detectInVideoFrame:(MPPImage *)image
|
- (nullable MPPFaceLandmarkerResult *)detectVideoFrame:(MPPImage *)image
|
||||||
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
||||||
error:(NSError **)error {
|
error:(NSError **)error {
|
||||||
std::optional<PacketMap> outputPacketMap =
|
std::optional<PacketMap> outputPacketMap =
|
||||||
[_visionTaskRunner processVideoFrame:image
|
[_visionTaskRunner processVideoFrame:image
|
||||||
timestampInMilliseconds:timestampInMilliseconds
|
timestampInMilliseconds:timestampInMilliseconds
|
||||||
|
@ -171,7 +171,7 @@ static NSString *const kTaskName = @"faceLandmarker";
|
||||||
return [MPPFaceLandmarker faceLandmarkerResultWithOptionalOutputPacketMap:outputPacketMap];
|
return [MPPFaceLandmarker faceLandmarkerResultWithOptionalOutputPacketMap:outputPacketMap];
|
||||||
}
|
}
|
||||||
|
|
||||||
- (BOOL)detectAsyncInImage:(MPPImage *)image
|
- (BOOL)detectAsyncImage:(MPPImage *)image
|
||||||
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
||||||
error:(NSError **)error {
|
error:(NSError **)error {
|
||||||
return [_visionTaskRunner processLiveStreamImage:image
|
return [_visionTaskRunner processLiveStreamImage:image
|
||||||
|
|
|
@ -68,7 +68,7 @@ NS_SWIFT_NAME(FaceStylizer)
|
||||||
* @return A `FaceStylizerResult` that contains the stylized image of the most visible face. The
|
* @return A `FaceStylizerResult` that contains the stylized image of the most visible face. The
|
||||||
* returned image is copied. The stylized output image size is the same as the model output
|
* returned image is copied. The stylized output image size is the same as the model output
|
||||||
* size. The `stylizedImage` of the `FaceStylizerResult` is `nil` if there is no face detected in
|
* size. The `stylizedImage` of the `FaceStylizerResult` is `nil` if there is no face detected in
|
||||||
* the imput image. `FaceStylizerResult` is `nil` if there is an error in initializing the face
|
* the input image. `FaceStylizerResult` is `nil` if there is an error in initializing the face
|
||||||
* stylizer.
|
* stylizer.
|
||||||
*/
|
*/
|
||||||
- (nullable MPPFaceStylizerResult *)stylizeImage:(MPPImage *)image
|
- (nullable MPPFaceStylizerResult *)stylizeImage:(MPPImage *)image
|
||||||
|
@ -92,7 +92,7 @@ NS_SWIFT_NAME(FaceStylizer)
|
||||||
*
|
*
|
||||||
* @param image The `MPImage` on which face stylization is to be performed.
|
* @param image The `MPImage` on which face stylization is to be performed.
|
||||||
* @param completionHandler A block to be invoked with the results of performing face stylization on
|
* @param completionHandler A block to be invoked with the results of performing face stylization on
|
||||||
* the imput image. The block takes two arguments, the optional `FaceStylizerResult` that contains
|
* the input image. The block takes two arguments, the optional `FaceStylizerResult` that contains
|
||||||
* the zero-copied stylized image if face stylization was successful and an optional error populated
|
* the zero-copied stylized image if face stylization was successful and an optional error populated
|
||||||
* upon failure. The lifetime of the stylized image is only guaranteed for the duration of the
|
* upon failure. The lifetime of the stylized image is only guaranteed for the duration of the
|
||||||
* block.
|
* block.
|
||||||
|
|
|
@ -146,8 +146,8 @@ NS_SWIFT_NAME(HandLandmarker)
|
||||||
* @return An `HandLandmarkerResult` object that contains the hand hand landmarks detection
|
* @return An `HandLandmarkerResult` object that contains the hand hand landmarks detection
|
||||||
* results.
|
* results.
|
||||||
*/
|
*/
|
||||||
- (nullable MPPHandLandmarkerResult *)detectInImage:(MPPImage *)image
|
- (nullable MPPHandLandmarkerResult *)detectImage:(MPPImage *)image
|
||||||
error:(NSError **)error NS_SWIFT_NAME(detect(image:));
|
error:(NSError **)error NS_SWIFT_NAME(detect(image:));
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Performs hand landmarks detection on the provided video frame of type `MPImage` using the whole
|
* Performs hand landmarks detection on the provided video frame of type `MPImage` using the whole
|
||||||
|
@ -176,9 +176,9 @@ NS_SWIFT_NAME(HandLandmarker)
|
||||||
* @return An `HandLandmarkerResult` object that contains the hand hand landmarks detection
|
* @return An `HandLandmarkerResult` object that contains the hand hand landmarks detection
|
||||||
* results.
|
* results.
|
||||||
*/
|
*/
|
||||||
- (nullable MPPHandLandmarkerResult *)detectInVideoFrame:(MPPImage *)image
|
- (nullable MPPHandLandmarkerResult *)detectVideoFrame:(MPPImage *)image
|
||||||
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
||||||
error:(NSError **)error
|
error:(NSError **)error
|
||||||
NS_SWIFT_NAME(detect(videoFrame:timestampInMilliseconds:));
|
NS_SWIFT_NAME(detect(videoFrame:timestampInMilliseconds:));
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -216,7 +216,7 @@ NS_SWIFT_NAME(HandLandmarker)
|
||||||
*
|
*
|
||||||
* @return `YES` if the image was sent to the task successfully, otherwise `NO`.
|
* @return `YES` if the image was sent to the task successfully, otherwise `NO`.
|
||||||
*/
|
*/
|
||||||
- (BOOL)detectAsyncInImage:(MPPImage *)image
|
- (BOOL)detectAsyncImage:(MPPImage *)image
|
||||||
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
||||||
error:(NSError **)error
|
error:(NSError **)error
|
||||||
NS_SWIFT_NAME(detectAsync(image:timestampInMilliseconds:));
|
NS_SWIFT_NAME(detectAsync(image:timestampInMilliseconds:));
|
||||||
|
|
|
@ -140,13 +140,13 @@ static NSString *const kTaskName = @"handLandmarker";
|
||||||
return [self initWithOptions:options error:error];
|
return [self initWithOptions:options error:error];
|
||||||
}
|
}
|
||||||
|
|
||||||
- (nullable MPPHandLandmarkerResult *)detectInImage:(MPPImage *)image error:(NSError **)error {
|
- (nullable MPPHandLandmarkerResult *)detectImage:(MPPImage *)image error:(NSError **)error {
|
||||||
std::optional<PacketMap> outputPacketMap = [_visionTaskRunner processImage:image error:error];
|
std::optional<PacketMap> outputPacketMap = [_visionTaskRunner processImage:image error:error];
|
||||||
|
|
||||||
return [MPPHandLandmarker handLandmarkerResultWithOptionalOutputPacketMap:outputPacketMap];
|
return [MPPHandLandmarker handLandmarkerResultWithOptionalOutputPacketMap:outputPacketMap];
|
||||||
}
|
}
|
||||||
|
|
||||||
- (nullable MPPHandLandmarkerResult *)detectInVideoFrame:(MPPImage *)image
|
- (nullable MPPHandLandmarkerResult *)detectVideoFrame:(MPPImage *)image
|
||||||
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
||||||
error:(NSError **)error {
|
error:(NSError **)error {
|
||||||
std::optional<PacketMap> outputPacketMap =
|
std::optional<PacketMap> outputPacketMap =
|
||||||
|
@ -157,7 +157,7 @@ static NSString *const kTaskName = @"handLandmarker";
|
||||||
return [MPPHandLandmarker handLandmarkerResultWithOptionalOutputPacketMap:outputPacketMap];
|
return [MPPHandLandmarker handLandmarkerResultWithOptionalOutputPacketMap:outputPacketMap];
|
||||||
}
|
}
|
||||||
|
|
||||||
- (BOOL)detectAsyncInImage:(MPPImage *)image
|
- (BOOL)detectAsyncImage:(MPPImage *)image
|
||||||
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
||||||
error:(NSError **)error {
|
error:(NSError **)error {
|
||||||
return [_visionTaskRunner processLiveStreamImage:image
|
return [_visionTaskRunner processLiveStreamImage:image
|
||||||
|
|
|
@ -112,8 +112,8 @@ NS_SWIFT_NAME(ObjectDetector)
|
||||||
* system, i.e. in `[0,image_width) x [0,image_height)`, which are the dimensions of the underlying
|
* system, i.e. in `[0,image_width) x [0,image_height)`, which are the dimensions of the underlying
|
||||||
* image data.
|
* image data.
|
||||||
*/
|
*/
|
||||||
- (nullable MPPObjectDetectorResult *)detectInImage:(MPPImage *)image
|
- (nullable MPPObjectDetectorResult *)detectImage:(MPPImage *)image
|
||||||
error:(NSError **)error NS_SWIFT_NAME(detect(image:));
|
error:(NSError **)error NS_SWIFT_NAME(detect(image:));
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Performs object detection on the provided video frame of type `MPImage` using the whole
|
* Performs object detection on the provided video frame of type `MPImage` using the whole
|
||||||
|
@ -138,9 +138,9 @@ NS_SWIFT_NAME(ObjectDetector)
|
||||||
* system, i.e. in `[0,image_width) x [0,image_height)`, which are the dimensions of the underlying
|
* system, i.e. in `[0,image_width) x [0,image_height)`, which are the dimensions of the underlying
|
||||||
* image data.
|
* image data.
|
||||||
*/
|
*/
|
||||||
- (nullable MPPObjectDetectorResult *)detectInVideoFrame:(MPPImage *)image
|
- (nullable MPPObjectDetectorResult *)detectVideoFrame:(MPPImage *)image
|
||||||
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
||||||
error:(NSError **)error
|
error:(NSError **)error
|
||||||
NS_SWIFT_NAME(detect(videoFrame:timestampInMilliseconds:));
|
NS_SWIFT_NAME(detect(videoFrame:timestampInMilliseconds:));
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -176,7 +176,7 @@ NS_SWIFT_NAME(ObjectDetector)
|
||||||
*
|
*
|
||||||
* @return `true` if the image was sent to the task successfully, otherwise `false`.
|
* @return `true` if the image was sent to the task successfully, otherwise `false`.
|
||||||
*/
|
*/
|
||||||
- (BOOL)detectAsyncInImage:(MPPImage *)image
|
- (BOOL)detectAsyncImage:(MPPImage *)image
|
||||||
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
||||||
error:(NSError **)error
|
error:(NSError **)error
|
||||||
NS_SWIFT_NAME(detectAsync(image:timestampInMilliseconds:));
|
NS_SWIFT_NAME(detectAsync(image:timestampInMilliseconds:));
|
||||||
|
|
|
@ -128,13 +128,13 @@ static NSString *const kTaskName = @"objectDetector";
|
||||||
return [self initWithOptions:options error:error];
|
return [self initWithOptions:options error:error];
|
||||||
}
|
}
|
||||||
|
|
||||||
- (nullable MPPObjectDetectorResult *)detectInImage:(MPPImage *)image error:(NSError **)error {
|
- (nullable MPPObjectDetectorResult *)detectImage:(MPPImage *)image error:(NSError **)error {
|
||||||
std::optional<PacketMap> outputPacketMap = [_visionTaskRunner processImage:image error:error];
|
std::optional<PacketMap> outputPacketMap = [_visionTaskRunner processImage:image error:error];
|
||||||
|
|
||||||
return [MPPObjectDetector objectDetectorResultWithOptionalOutputPacketMap:outputPacketMap];
|
return [MPPObjectDetector objectDetectorResultWithOptionalOutputPacketMap:outputPacketMap];
|
||||||
}
|
}
|
||||||
|
|
||||||
- (nullable MPPObjectDetectorResult *)detectInVideoFrame:(MPPImage *)image
|
- (nullable MPPObjectDetectorResult *)detectVideoFrame:(MPPImage *)image
|
||||||
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
||||||
error:(NSError **)error {
|
error:(NSError **)error {
|
||||||
std::optional<PacketMap> outputPacketMap =
|
std::optional<PacketMap> outputPacketMap =
|
||||||
|
@ -145,7 +145,7 @@ static NSString *const kTaskName = @"objectDetector";
|
||||||
return [MPPObjectDetector objectDetectorResultWithOptionalOutputPacketMap:outputPacketMap];
|
return [MPPObjectDetector objectDetectorResultWithOptionalOutputPacketMap:outputPacketMap];
|
||||||
}
|
}
|
||||||
|
|
||||||
- (BOOL)detectAsyncInImage:(MPPImage *)image
|
- (BOOL)detectAsyncImage:(MPPImage *)image
|
||||||
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
||||||
error:(NSError **)error {
|
error:(NSError **)error {
|
||||||
return [_visionTaskRunner processLiveStreamImage:image
|
return [_visionTaskRunner processLiveStreamImage:image
|
||||||
|
|
|
@ -25,7 +25,7 @@ import com.google.protobuf.ByteString;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* MediaPipe Tasks options base class. Any MediaPipe task-specific options class should extend
|
* MediaPipe Tasks options base class. Any MediaPipe task-specific options class should extend
|
||||||
* {@link TaskOptions} and implement exactly one of converTo*Proto() methods.
|
* {@link TaskOptions} and implement exactly one of convertTo*Proto() methods.
|
||||||
*/
|
*/
|
||||||
public abstract class TaskOptions {
|
public abstract class TaskOptions {
|
||||||
/**
|
/**
|
||||||
|
|
|
@ -739,7 +739,7 @@ public final class ImageSegmenter extends BaseVisionTaskApi {
|
||||||
@AutoValue
|
@AutoValue
|
||||||
public abstract static class SegmentationOptions {
|
public abstract static class SegmentationOptions {
|
||||||
|
|
||||||
/** Builder fo {@link SegmentationOptions} */
|
/** Builder for {@link SegmentationOptions} */
|
||||||
@AutoValue.Builder
|
@AutoValue.Builder
|
||||||
public abstract static class Builder {
|
public abstract static class Builder {
|
||||||
|
|
||||||
|
|
|
@ -289,7 +289,7 @@ public class GestureRecognizerTest {
|
||||||
}
|
}
|
||||||
|
|
||||||
@Test
|
@Test
|
||||||
public void recognize_successWithPreferAlowListThanDenyList() throws Exception {
|
public void recognize_successWithPreferAllowListThanDenyList() throws Exception {
|
||||||
GestureRecognizerOptions options =
|
GestureRecognizerOptions options =
|
||||||
GestureRecognizerOptions.builder()
|
GestureRecognizerOptions.builder()
|
||||||
.setBaseOptions(
|
.setBaseOptions(
|
||||||
|
|
|
@ -736,7 +736,7 @@ class MetadataWriter(object):
|
||||||
content is used to interpret the metadata content.
|
content is used to interpret the metadata content.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
A tuple of (model_with_metadata_in_bytes, metdata_json_content)
|
A tuple of (model_with_metadata_in_bytes, metadata_json_content)
|
||||||
"""
|
"""
|
||||||
# Populates metadata and associated files into TFLite model buffer.
|
# Populates metadata and associated files into TFLite model buffer.
|
||||||
populator = metadata.MetadataPopulator.with_model_buffer(self._model_buffer)
|
populator = metadata.MetadataPopulator.with_model_buffer(self._model_buffer)
|
||||||
|
@ -840,6 +840,6 @@ class MetadataWriterBase:
|
||||||
content is used to interpret the metadata content.
|
content is used to interpret the metadata content.
|
||||||
|
|
||||||
Returns:
|
Returns:
|
||||||
A tuple of (model_with_metadata_in_bytes, metdata_json_content)
|
A tuple of (model_with_metadata_in_bytes, metadata_json_content)
|
||||||
"""
|
"""
|
||||||
return self.writer.populate()
|
return self.writer.populate()
|
||||||
|
|
|
@ -140,7 +140,7 @@ export class AudioClassifier extends AudioTaskRunner<AudioClassifierResult[]> {
|
||||||
* @param sampleRate The sample rate in Hz of the provided audio data. If not
|
* @param sampleRate The sample rate in Hz of the provided audio data. If not
|
||||||
* set, defaults to the sample rate set via `setDefaultSampleRate()` or
|
* set, defaults to the sample rate set via `setDefaultSampleRate()` or
|
||||||
* `48000` if no custom default was set.
|
* `48000` if no custom default was set.
|
||||||
* @return The classification result of the audio datas
|
* @return The classification result of the audio data
|
||||||
*/
|
*/
|
||||||
classify(audioData: Float32Array, sampleRate?: number):
|
classify(audioData: Float32Array, sampleRate?: number):
|
||||||
AudioClassifierResult[] {
|
AudioClassifierResult[] {
|
||||||
|
|
|
@ -9,7 +9,7 @@
|
||||||
"import": "./__NAME___bundle.mjs",
|
"import": "./__NAME___bundle.mjs",
|
||||||
"require": "./__NAME___bundle.cjs",
|
"require": "./__NAME___bundle.cjs",
|
||||||
"default": "./__NAME___bundle.mjs",
|
"default": "./__NAME___bundle.mjs",
|
||||||
"types": "./__NAME___.d.ts"
|
"types": "./__TYPES__"
|
||||||
},
|
},
|
||||||
"author": "mediapipe@google.com",
|
"author": "mediapipe@google.com",
|
||||||
"license": "Apache-2.0",
|
"license": "Apache-2.0",
|
||||||
|
|
|
@ -342,7 +342,7 @@ export class GestureRecognizer extends VisionTaskRunner {
|
||||||
* Converts raw data into a landmark, and adds it to our worldLandmarks
|
* Converts raw data into a landmark, and adds it to our worldLandmarks
|
||||||
* list.
|
* list.
|
||||||
*/
|
*/
|
||||||
private adddJsWorldLandmarks(data: Uint8Array[]): void {
|
private addJsWorldLandmarks(data: Uint8Array[]): void {
|
||||||
for (const binaryProto of data) {
|
for (const binaryProto of data) {
|
||||||
const handWorldLandmarksProto =
|
const handWorldLandmarksProto =
|
||||||
LandmarkList.deserializeBinary(binaryProto);
|
LandmarkList.deserializeBinary(binaryProto);
|
||||||
|
@ -396,7 +396,7 @@ export class GestureRecognizer extends VisionTaskRunner {
|
||||||
|
|
||||||
this.graphRunner.attachProtoVectorListener(
|
this.graphRunner.attachProtoVectorListener(
|
||||||
WORLD_LANDMARKS_STREAM, (binaryProto, timestamp) => {
|
WORLD_LANDMARKS_STREAM, (binaryProto, timestamp) => {
|
||||||
this.adddJsWorldLandmarks(binaryProto);
|
this.addJsWorldLandmarks(binaryProto);
|
||||||
this.setLatestOutputTimestamp(timestamp);
|
this.setLatestOutputTimestamp(timestamp);
|
||||||
});
|
});
|
||||||
this.graphRunner.attachEmptyPacketListener(
|
this.graphRunner.attachEmptyPacketListener(
|
||||||
|
|
|
@ -277,7 +277,7 @@ export class HandLandmarker extends VisionTaskRunner {
|
||||||
* Converts raw data into a world landmark, and adds it to our worldLandmarks
|
* Converts raw data into a world landmark, and adds it to our worldLandmarks
|
||||||
* list.
|
* list.
|
||||||
*/
|
*/
|
||||||
private adddJsWorldLandmarks(data: Uint8Array[]): void {
|
private addJsWorldLandmarks(data: Uint8Array[]): void {
|
||||||
for (const binaryProto of data) {
|
for (const binaryProto of data) {
|
||||||
const handWorldLandmarksProto =
|
const handWorldLandmarksProto =
|
||||||
LandmarkList.deserializeBinary(binaryProto);
|
LandmarkList.deserializeBinary(binaryProto);
|
||||||
|
@ -322,7 +322,7 @@ export class HandLandmarker extends VisionTaskRunner {
|
||||||
|
|
||||||
this.graphRunner.attachProtoVectorListener(
|
this.graphRunner.attachProtoVectorListener(
|
||||||
WORLD_LANDMARKS_STREAM, (binaryProto, timestamp) => {
|
WORLD_LANDMARKS_STREAM, (binaryProto, timestamp) => {
|
||||||
this.adddJsWorldLandmarks(binaryProto);
|
this.addJsWorldLandmarks(binaryProto);
|
||||||
this.setLatestOutputTimestamp(timestamp);
|
this.setLatestOutputTimestamp(timestamp);
|
||||||
});
|
});
|
||||||
this.graphRunner.attachEmptyPacketListener(
|
this.graphRunner.attachEmptyPacketListener(
|
||||||
|
|
|
@ -403,7 +403,7 @@ export class PoseLandmarker extends VisionTaskRunner {
|
||||||
* Converts raw data into a world landmark, and adds it to our
|
* Converts raw data into a world landmark, and adds it to our
|
||||||
* worldLandmarks list.
|
* worldLandmarks list.
|
||||||
*/
|
*/
|
||||||
private adddJsWorldLandmarks(data: Uint8Array[]): void {
|
private addJsWorldLandmarks(data: Uint8Array[]): void {
|
||||||
this.worldLandmarks = [];
|
this.worldLandmarks = [];
|
||||||
for (const binaryProto of data) {
|
for (const binaryProto of data) {
|
||||||
const poseWorldLandmarksProto =
|
const poseWorldLandmarksProto =
|
||||||
|
@ -452,7 +452,7 @@ export class PoseLandmarker extends VisionTaskRunner {
|
||||||
|
|
||||||
this.graphRunner.attachProtoVectorListener(
|
this.graphRunner.attachProtoVectorListener(
|
||||||
WORLD_LANDMARKS_STREAM, (binaryProto, timestamp) => {
|
WORLD_LANDMARKS_STREAM, (binaryProto, timestamp) => {
|
||||||
this.adddJsWorldLandmarks(binaryProto);
|
this.addJsWorldLandmarks(binaryProto);
|
||||||
this.setLatestOutputTimestamp(timestamp);
|
this.setLatestOutputTimestamp(timestamp);
|
||||||
});
|
});
|
||||||
this.graphRunner.attachEmptyPacketListener(
|
this.graphRunner.attachEmptyPacketListener(
|
||||||
|
|
2
third_party/BUILD
vendored
2
third_party/BUILD
vendored
|
@ -45,7 +45,7 @@ cc_library(
|
||||||
"@com_github_glog_glog//:glog",
|
"@com_github_glog_glog//:glog",
|
||||||
],
|
],
|
||||||
"//mediapipe:windows": [
|
"//mediapipe:windows": [
|
||||||
"@com_github_glog_glog//:glog",
|
"@com_github_glog_glog_windows//:glog",
|
||||||
],
|
],
|
||||||
"//conditions:default": [
|
"//conditions:default": [
|
||||||
"@com_github_glog_glog//:glog",
|
"@com_github_glog_glog//:glog",
|
||||||
|
|
26
third_party/com_github_glog_glog_windows_patch.diff
vendored
Normal file
26
third_party/com_github_glog_glog_windows_patch.diff
vendored
Normal file
|
@ -0,0 +1,26 @@
|
||||||
|
diff --git a/bazel/glog.bzl b/bazel/glog.bzl
|
||||||
|
index dacd934..d7b3d78 100644
|
||||||
|
--- a/bazel/glog.bzl
|
||||||
|
+++ b/bazel/glog.bzl
|
||||||
|
@@ -53,7 +53,6 @@ def glog_library(namespace = "google", with_gflags = 1, **kwargs):
|
||||||
|
)
|
||||||
|
|
||||||
|
common_copts = [
|
||||||
|
- "-std=c++14",
|
||||||
|
"-DGLOG_BAZEL_BUILD",
|
||||||
|
# Inject a C++ namespace.
|
||||||
|
"-DGOOGLE_NAMESPACE='%s'" % namespace,
|
||||||
|
@@ -145,7 +144,13 @@ def glog_library(namespace = "google", with_gflags = 1, **kwargs):
|
||||||
|
],
|
||||||
|
})
|
||||||
|
|
||||||
|
+ c14_opts = ["-std=c++14"]
|
||||||
|
+ c17_opts = ["-std=c++17"]
|
||||||
|
+
|
||||||
|
final_lib_copts = select({
|
||||||
|
+ "@bazel_tools//src/conditions:windows": c17_opts,
|
||||||
|
+ "//conditions:default": c14_opts,
|
||||||
|
+ }) + select({
|
||||||
|
"@bazel_tools//src/conditions:windows": common_copts + windows_only_copts,
|
||||||
|
"@bazel_tools//src/conditions:darwin": common_copts + linux_or_darwin_copts + darwin_only_copts,
|
||||||
|
"@bazel_tools//src/conditions:freebsd": common_copts + linux_or_darwin_copts + freebsd_only_copts,
|
Loading…
Reference in New Issue
Block a user