Merge branch 'master' into move-headers-script
This commit is contained in:
commit
40503acdec
19
WORKSPACE
19
WORKSPACE
|
@ -176,6 +176,25 @@ http_archive(
|
|||
],
|
||||
)
|
||||
|
||||
# 2023-06-05
|
||||
# This version of Glog is required for Windows support, but currently causes
|
||||
# crashes on some Android devices.
|
||||
http_archive(
|
||||
name = "com_github_glog_glog_windows",
|
||||
strip_prefix = "glog-3a0d4d22c5ae0b9a2216988411cfa6bf860cc372",
|
||||
sha256 = "170d08f80210b82d95563f4723a15095eff1aad1863000e8eeb569c96a98fefb",
|
||||
urls = [
|
||||
"https://github.com/google/glog/archive/3a0d4d22c5ae0b9a2216988411cfa6bf860cc372.zip",
|
||||
],
|
||||
patches = [
|
||||
"@//third_party:com_github_glog_glog.diff",
|
||||
"@//third_party:com_github_glog_glog_windows_patch.diff",
|
||||
],
|
||||
patch_args = [
|
||||
"-p1",
|
||||
],
|
||||
)
|
||||
|
||||
# easyexif
|
||||
http_archive(
|
||||
name = "easyexif",
|
||||
|
|
|
@ -304,6 +304,7 @@ class GlProcessor : public ImageToTensorConverter {
|
|||
glBindTexture(GL_TEXTURE_2D, 0);
|
||||
glActiveTexture(GL_TEXTURE0);
|
||||
glBindTexture(GL_TEXTURE_2D, 0);
|
||||
glFlush();
|
||||
|
||||
return absl::OkStatus();
|
||||
}
|
||||
|
|
|
@ -406,6 +406,7 @@ absl::Status TensorConverterCalculator::ProcessGPU(CalculatorContext* cc) {
|
|||
glActiveTexture(GL_TEXTURE1);
|
||||
glBindTexture(GL_TEXTURE_2D, 0);
|
||||
#endif // MEDIAPIPE_OPENGL_ES_VERSION >= MEDIAPIPE_OPENGL_ES_31
|
||||
glFlush();
|
||||
src.Release();
|
||||
return absl::OkStatus();
|
||||
}));
|
||||
|
|
|
@ -93,6 +93,23 @@ py_test(
|
|||
deps = [":dataset"],
|
||||
)
|
||||
|
||||
py_library(
|
||||
name = "model_with_tokenizer",
|
||||
srcs = ["model_with_tokenizer.py"],
|
||||
)
|
||||
|
||||
py_test(
|
||||
name = "model_with_tokenizer_test",
|
||||
srcs = ["model_with_tokenizer_test.py"],
|
||||
tags = ["requires-net:external"],
|
||||
deps = [
|
||||
":bert_tokenizer",
|
||||
":model_spec",
|
||||
":model_with_tokenizer",
|
||||
"//mediapipe/model_maker/python/core/utils:hub_loader",
|
||||
],
|
||||
)
|
||||
|
||||
py_library(
|
||||
name = "bert_tokenizer",
|
||||
srcs = ["bert_tokenizer.py"],
|
||||
|
@ -145,10 +162,12 @@ py_library(
|
|||
name = "text_classifier",
|
||||
srcs = ["text_classifier.py"],
|
||||
deps = [
|
||||
":bert_tokenizer",
|
||||
":dataset",
|
||||
":hyperparameters",
|
||||
":model_options",
|
||||
":model_spec",
|
||||
":model_with_tokenizer",
|
||||
":preprocessor",
|
||||
":text_classifier_options",
|
||||
"//mediapipe/model_maker/python/core/data:dataset",
|
||||
|
@ -165,7 +184,7 @@ py_library(
|
|||
|
||||
py_test(
|
||||
name = "text_classifier_test",
|
||||
size = "large",
|
||||
size = "enormous",
|
||||
srcs = ["text_classifier_test.py"],
|
||||
data = [
|
||||
"//mediapipe/model_maker/python/text/text_classifier/testdata",
|
||||
|
|
|
@ -56,6 +56,15 @@ class BertFullTokenizer(BertTokenizer):
|
|||
self._seq_len = seq_len
|
||||
|
||||
def process(self, input_tensor: tf.Tensor) -> Mapping[str, Sequence[int]]:
|
||||
"""Processes one input_tensor example.
|
||||
|
||||
Args:
|
||||
input_tensor: A tensor with shape (1, None) of a utf-8 encoded string.
|
||||
|
||||
Returns:
|
||||
A dictionary of lists all with shape (1, self._seq_len) containing the
|
||||
keys "input_word_ids", "input_type_ids", and "input_mask".
|
||||
"""
|
||||
tokens = self._tokenizer.tokenize(input_tensor.numpy()[0].decode("utf-8"))
|
||||
tokens = tokens[0 : (self._seq_len - 2)] # account for [CLS] and [SEP]
|
||||
tokens.insert(0, "[CLS]")
|
||||
|
@ -96,7 +105,18 @@ class BertFastTokenizer(BertTokenizer):
|
|||
self._sep_id = vocab.index("[SEP]")
|
||||
self._pad_id = vocab.index("[PAD]")
|
||||
|
||||
def process(self, input_tensor: tf.Tensor) -> Mapping[str, Sequence[int]]:
|
||||
def process_fn(self, input_tensor: tf.Tensor) -> Mapping[str, tf.Tensor]:
|
||||
"""Tensor implementation of the process function.
|
||||
|
||||
This implementation can be used within a model graph directly since it
|
||||
takes in tensors and outputs tensors.
|
||||
|
||||
Args:
|
||||
input_tensor: Input string tensor
|
||||
|
||||
Returns:
|
||||
Dictionary of tf.Tensors.
|
||||
"""
|
||||
input_ids = self._tokenizer.tokenize(input_tensor).flat_values
|
||||
input_ids = input_ids[: (self._seq_len - 2)]
|
||||
input_ids = tf.concat(
|
||||
|
@ -112,7 +132,20 @@ class BertFastTokenizer(BertTokenizer):
|
|||
input_type_ids = tf.zeros(self._seq_len, dtype=tf.int32)
|
||||
input_mask = tf.cast(input_ids != self._pad_id, dtype=tf.int32)
|
||||
return {
|
||||
"input_word_ids": input_ids.numpy().tolist(),
|
||||
"input_type_ids": input_type_ids.numpy().tolist(),
|
||||
"input_mask": input_mask.numpy().tolist(),
|
||||
"input_word_ids": input_ids,
|
||||
"input_type_ids": input_type_ids,
|
||||
"input_mask": input_mask,
|
||||
}
|
||||
|
||||
def process(self, input_tensor: tf.Tensor) -> Mapping[str, Sequence[int]]:
|
||||
"""Processes one input_tensor example.
|
||||
|
||||
Args:
|
||||
input_tensor: A tensor with shape (1, None) of a utf-8 encoded string.
|
||||
|
||||
Returns:
|
||||
A dictionary of lists all with shape (1, self._seq_len) containing the
|
||||
keys "input_word_ids", "input_type_ids", and "input_mask".
|
||||
"""
|
||||
result = self.process_fn(input_tensor)
|
||||
return {k: v.numpy().tolist() for k, v in result.items()}
|
||||
|
|
|
@ -0,0 +1,35 @@
|
|||
# Copyright 2023 The MediaPipe Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
"""Text classifier export module library."""
|
||||
import tensorflow as tf
|
||||
|
||||
|
||||
class ModelWithTokenizer(tf.keras.Model):
|
||||
"""A model with the tokenizer included in graph for exporting to TFLite."""
|
||||
|
||||
def __init__(self, tokenizer, model):
|
||||
super().__init__()
|
||||
self._tokenizer = tokenizer
|
||||
self._model = model
|
||||
|
||||
@tf.function(
|
||||
input_signature=[
|
||||
tf.TensorSpec(shape=[None], dtype=tf.string, name="input")
|
||||
]
|
||||
)
|
||||
def call(self, input_tensor):
|
||||
x = self._tokenizer.process_fn(input_tensor)
|
||||
x = {k: tf.expand_dims(v, axis=0) for k, v in x.items()}
|
||||
x = self._model(x)
|
||||
return x
|
|
@ -0,0 +1,105 @@
|
|||
# Copyright 2022 The MediaPipe Authors.
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
import os
|
||||
import tempfile
|
||||
from unittest import mock as unittest_mock
|
||||
|
||||
import tensorflow as tf
|
||||
import tensorflow_hub
|
||||
|
||||
from mediapipe.model_maker.python.core.utils import hub_loader
|
||||
from mediapipe.model_maker.python.text.text_classifier import bert_tokenizer
|
||||
from mediapipe.model_maker.python.text.text_classifier import model_spec
|
||||
from mediapipe.model_maker.python.text.text_classifier import model_with_tokenizer
|
||||
|
||||
|
||||
class BertTokenizerTest(tf.test.TestCase):
|
||||
_SEQ_LEN = 128
|
||||
|
||||
def setUp(self):
|
||||
super().setUp()
|
||||
# Mock tempfile.gettempdir() to be unique for each test to avoid race
|
||||
# condition when downloading model since these tests may run in parallel.
|
||||
mock_gettempdir = unittest_mock.patch.object(
|
||||
tempfile,
|
||||
"gettempdir",
|
||||
return_value=self.create_tempdir(),
|
||||
autospec=True,
|
||||
)
|
||||
self.mock_gettempdir = mock_gettempdir.start()
|
||||
self.addCleanup(mock_gettempdir.stop)
|
||||
self._ms = model_spec.SupportedModels.MOBILEBERT_CLASSIFIER.value()
|
||||
self._tokenizer = self._create_tokenizer()
|
||||
self._model = self._create_model()
|
||||
|
||||
def _create_tokenizer(self):
|
||||
vocab_file = os.path.join(
|
||||
tensorflow_hub.resolve(self._ms.get_path()), "assets", "vocab.txt"
|
||||
)
|
||||
return bert_tokenizer.BertFastTokenizer(vocab_file, True, self._SEQ_LEN)
|
||||
|
||||
def _create_model(self):
|
||||
encoder_inputs = dict(
|
||||
input_word_ids=tf.keras.layers.Input(
|
||||
shape=(self._SEQ_LEN,),
|
||||
dtype=tf.int32,
|
||||
name="input_word_ids",
|
||||
),
|
||||
input_mask=tf.keras.layers.Input(
|
||||
shape=(self._SEQ_LEN,),
|
||||
dtype=tf.int32,
|
||||
name="input_mask",
|
||||
),
|
||||
input_type_ids=tf.keras.layers.Input(
|
||||
shape=(self._SEQ_LEN,),
|
||||
dtype=tf.int32,
|
||||
name="input_type_ids",
|
||||
),
|
||||
)
|
||||
renamed_inputs = dict(
|
||||
input_ids=encoder_inputs["input_word_ids"],
|
||||
input_mask=encoder_inputs["input_mask"],
|
||||
segment_ids=encoder_inputs["input_type_ids"],
|
||||
)
|
||||
encoder = hub_loader.HubKerasLayerV1V2(
|
||||
self._ms.get_path(),
|
||||
signature="tokens",
|
||||
output_key="pooled_output",
|
||||
trainable=True,
|
||||
)
|
||||
pooled_output = encoder(renamed_inputs)
|
||||
|
||||
output = tf.keras.layers.Dropout(rate=0.1)(pooled_output)
|
||||
initializer = tf.keras.initializers.TruncatedNormal(stddev=0.02)
|
||||
output = tf.keras.layers.Dense(
|
||||
2,
|
||||
kernel_initializer=initializer,
|
||||
name="output",
|
||||
activation="softmax",
|
||||
dtype=tf.float32,
|
||||
)(output)
|
||||
return tf.keras.Model(inputs=encoder_inputs, outputs=output)
|
||||
|
||||
def test_model_with_tokenizer(self):
|
||||
model = model_with_tokenizer.ModelWithTokenizer(
|
||||
self._tokenizer, self._model
|
||||
)
|
||||
output = model(tf.constant(["Example input".encode("utf-8")]))
|
||||
self.assertAllEqual(output.shape, (1, 2))
|
||||
self.assertEqual(tf.reduce_sum(output), 1)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
tf.test.main()
|
|
@ -368,6 +368,10 @@ class BertClassifierPreprocessor:
|
|||
tfrecord_cache_files=tfrecord_cache_files,
|
||||
)
|
||||
|
||||
@property
|
||||
def tokenizer(self) -> bert_tokenizer.BertTokenizer:
|
||||
return self._tokenizer
|
||||
|
||||
|
||||
TextClassifierPreprocessor = Union[
|
||||
BertClassifierPreprocessor, AverageWordEmbeddingClassifierPreprocessor
|
||||
|
|
|
@ -29,10 +29,12 @@ from mediapipe.model_maker.python.core.utils import loss_functions
|
|||
from mediapipe.model_maker.python.core.utils import metrics
|
||||
from mediapipe.model_maker.python.core.utils import model_util
|
||||
from mediapipe.model_maker.python.core.utils import quantization
|
||||
from mediapipe.model_maker.python.text.text_classifier import bert_tokenizer
|
||||
from mediapipe.model_maker.python.text.text_classifier import dataset as text_ds
|
||||
from mediapipe.model_maker.python.text.text_classifier import hyperparameters as hp
|
||||
from mediapipe.model_maker.python.text.text_classifier import model_options as mo
|
||||
from mediapipe.model_maker.python.text.text_classifier import model_spec as ms
|
||||
from mediapipe.model_maker.python.text.text_classifier import model_with_tokenizer
|
||||
from mediapipe.model_maker.python.text.text_classifier import preprocessor
|
||||
from mediapipe.model_maker.python.text.text_classifier import text_classifier_options
|
||||
from mediapipe.tasks.python.metadata.metadata_writers import metadata_writer
|
||||
|
@ -620,3 +622,56 @@ class _BertClassifier(TextClassifier):
|
|||
ids_name=self._model_spec.tflite_input_name["ids"],
|
||||
mask_name=self._model_spec.tflite_input_name["mask"],
|
||||
segment_name=self._model_spec.tflite_input_name["segment_ids"])
|
||||
|
||||
def export_model_with_tokenizer(
|
||||
self,
|
||||
model_name: str = "model_with_tokenizer.tflite",
|
||||
quantization_config: Optional[quantization.QuantizationConfig] = None,
|
||||
):
|
||||
"""Converts and saves the model to a TFLite file with the tokenizer.
|
||||
|
||||
Note that unlike the export_model method, this export method will include
|
||||
a FastBertTokenizer in the TFLite graph. The resulting TFLite will not have
|
||||
metadata information to use with MediaPipe Tasks, but can be run directly
|
||||
using TFLite Inference: https://www.tensorflow.org/lite/guide/inference
|
||||
|
||||
For more information on the tokenizer, see:
|
||||
https://www.tensorflow.org/text/api_docs/python/text/FastBertTokenizer
|
||||
|
||||
Args:
|
||||
model_name: File name to save TFLite model with tokenizer. The full export
|
||||
path is {self._hparams.export_dir}/{model_name}.
|
||||
quantization_config: The configuration for model quantization.
|
||||
"""
|
||||
tf.io.gfile.makedirs(self._hparams.export_dir)
|
||||
tflite_file = os.path.join(self._hparams.export_dir, model_name)
|
||||
if (
|
||||
self._hparams.tokenizer
|
||||
!= bert_tokenizer.SupportedBertTokenizers.FAST_BERT_TOKENIZER
|
||||
):
|
||||
print(
|
||||
f"WARNING: This model was trained with {self._hparams.tokenizer} "
|
||||
"tokenizer, but the exported model with tokenizer will have a "
|
||||
f"{bert_tokenizer.SupportedBertTokenizers.FAST_BERT_TOKENIZER} "
|
||||
"tokenizer."
|
||||
)
|
||||
tokenizer = bert_tokenizer.BertFastTokenizer(
|
||||
vocab_file=self._text_preprocessor.get_vocab_file(),
|
||||
do_lower_case=self._model_spec.do_lower_case,
|
||||
seq_len=self._model_options.seq_len,
|
||||
)
|
||||
else:
|
||||
tokenizer = self._text_preprocessor.tokenizer
|
||||
|
||||
model = model_with_tokenizer.ModelWithTokenizer(tokenizer, self._model)
|
||||
model(tf.constant(["Example input data".encode("utf-8")])) # build model
|
||||
saved_model_file = os.path.join(
|
||||
self._hparams.export_dir, "saved_model_with_tokenizer"
|
||||
)
|
||||
model.save(saved_model_file)
|
||||
tflite_model = model_util.convert_to_tflite_from_file(
|
||||
saved_model_file,
|
||||
quantization_config=quantization_config,
|
||||
allow_custom_ops=True,
|
||||
)
|
||||
model_util.save_tflite(tflite_model, tflite_file)
|
||||
|
|
|
@ -149,6 +149,12 @@ class TextClassifierTest(tf.test.TestCase, parameterized.TestCase):
|
|||
output_metadata_file, self._BERT_CLASSIFIER_JSON_FILE, shallow=False
|
||||
)
|
||||
)
|
||||
bert_classifier.export_model_with_tokenizer()
|
||||
output_tflite_with_tokenizer_file = os.path.join(
|
||||
options.hparams.export_dir, 'model_with_tokenizer.tflite'
|
||||
)
|
||||
self.assertTrue(os.path.exists(output_tflite_with_tokenizer_file))
|
||||
self.assertGreater(os.path.getsize(output_tflite_with_tokenizer_file), 0)
|
||||
|
||||
def test_label_mismatch(self):
|
||||
options = text_classifier.TextClassifierOptions(
|
||||
|
|
|
@ -59,12 +59,12 @@ using absl::StatusCode;
|
|||
return NULL;
|
||||
}
|
||||
|
||||
void *allocedMemory = malloc(memSize);
|
||||
if (!allocedMemory) {
|
||||
void *allocatedMemory = malloc(memSize);
|
||||
if (!allocatedMemory) {
|
||||
exit(-1);
|
||||
}
|
||||
|
||||
return allocedMemory;
|
||||
return allocatedMemory;
|
||||
}
|
||||
|
||||
+ (BOOL)checkCppError:(const absl::Status &)status toError:(NSError *_Nullable *)error {
|
||||
|
|
|
@ -82,7 +82,7 @@ static NSString *const kExpectedErrorDomain = @"com.google.mediapipe.tasks";
|
|||
AssertEqualErrors(error, expectedError);
|
||||
}
|
||||
|
||||
- (void)testInitWithImageSuceeds {
|
||||
- (void)testInitWithImageSucceeds {
|
||||
MPPImage *mppImage = [[MPPImage alloc] initWithUIImage:self.image error:nil];
|
||||
[self assertMPPImage:mppImage
|
||||
hasSourceType:MPPImageSourceTypeImage
|
||||
|
|
|
@ -109,7 +109,7 @@ static const float kKeypointErrorThreshold = 1e-2;
|
|||
|
||||
NSError *error;
|
||||
MPPImage *mppImage = [self imageWithFileInfo:kCatImage];
|
||||
MPPFaceDetectorResult *faceDetectorResult = [faceDetector detectInImage:mppImage error:&error];
|
||||
MPPFaceDetectorResult *faceDetectorResult = [faceDetector detectImage:mppImage error:&error];
|
||||
XCTAssertNil(error);
|
||||
XCTAssertNotNil(faceDetectorResult);
|
||||
XCTAssertEqual(faceDetectorResult.detections.count, 0);
|
||||
|
@ -125,7 +125,7 @@ static const float kKeypointErrorThreshold = 1e-2;
|
|||
|
||||
MPPImage *image = [self imageWithFileInfo:kPortraitImage];
|
||||
for (int i = 0; i < 3; i++) {
|
||||
MPPFaceDetectorResult *faceDetectorResult = [faceDetector detectInVideoFrame:image
|
||||
MPPFaceDetectorResult *faceDetectorResult = [faceDetector detectVideoFrame:image
|
||||
timestampInMilliseconds:i
|
||||
error:nil];
|
||||
[self assertFaceDetectorResult:faceDetectorResult
|
||||
|
@ -141,7 +141,7 @@ static const float kKeypointErrorThreshold = 1e-2;
|
|||
|
||||
MPPImage *image = [self imageWithFileInfo:kPortraitRotatedImage];
|
||||
for (int i = 0; i < 3; i++) {
|
||||
MPPFaceDetectorResult *faceDetectorResult = [faceDetector detectInVideoFrame:image
|
||||
MPPFaceDetectorResult *faceDetectorResult = [faceDetector detectVideoFrame:image
|
||||
timestampInMilliseconds:i
|
||||
error:nil];
|
||||
[self assertFaceDetectorResult:faceDetectorResult
|
||||
|
@ -181,7 +181,7 @@ static const float kKeypointErrorThreshold = 1e-2;
|
|||
};
|
||||
|
||||
for (int i = 0; i < iterationCount; i++) {
|
||||
XCTAssertTrue([faceDetector detectAsyncInImage:image timestampInMilliseconds:i error:nil]);
|
||||
XCTAssertTrue([faceDetector detectAsyncImage:image timestampInMilliseconds:i error:nil]);
|
||||
}
|
||||
|
||||
NSTimeInterval timeout = 0.5f;
|
||||
|
@ -205,10 +205,10 @@ static const float kKeypointErrorThreshold = 1e-2;
|
|||
};
|
||||
|
||||
MPPImage *image = [self imageWithFileInfo:kPortraitImage];
|
||||
XCTAssertTrue([faceDetector detectAsyncInImage:image timestampInMilliseconds:1 error:nil]);
|
||||
XCTAssertTrue([faceDetector detectAsyncImage:image timestampInMilliseconds:1 error:nil]);
|
||||
|
||||
NSError *error;
|
||||
XCTAssertFalse([faceDetector detectAsyncInImage:image timestampInMilliseconds:0 error:&error]);
|
||||
XCTAssertFalse([faceDetector detectAsyncImage:image timestampInMilliseconds:0 error:&error]);
|
||||
|
||||
NSError *expectedError =
|
||||
[NSError errorWithDomain:kExpectedErrorDomain
|
||||
|
@ -274,7 +274,7 @@ static const float kKeypointErrorThreshold = 1e-2;
|
|||
MPPImage *image = [self imageWithFileInfo:kPortraitImage];
|
||||
|
||||
NSError *liveStreamApiCallError;
|
||||
XCTAssertFalse([faceDetector detectAsyncInImage:image
|
||||
XCTAssertFalse([faceDetector detectAsyncImage:image
|
||||
timestampInMilliseconds:0
|
||||
error:&liveStreamApiCallError]);
|
||||
|
||||
|
@ -288,7 +288,7 @@ static const float kKeypointErrorThreshold = 1e-2;
|
|||
AssertEqualErrors(liveStreamApiCallError, expectedLiveStreamApiCallError);
|
||||
|
||||
NSError *videoApiCallError;
|
||||
XCTAssertFalse([faceDetector detectInVideoFrame:image
|
||||
XCTAssertFalse([faceDetector detectVideoFrame:image
|
||||
timestampInMilliseconds:0
|
||||
error:&videoApiCallError]);
|
||||
|
||||
|
@ -312,7 +312,7 @@ static const float kKeypointErrorThreshold = 1e-2;
|
|||
MPPImage *image = [self imageWithFileInfo:kPortraitImage];
|
||||
|
||||
NSError *liveStreamApiCallError;
|
||||
XCTAssertFalse([faceDetector detectAsyncInImage:image
|
||||
XCTAssertFalse([faceDetector detectAsyncImage:image
|
||||
timestampInMilliseconds:0
|
||||
error:&liveStreamApiCallError]);
|
||||
|
||||
|
@ -326,7 +326,7 @@ static const float kKeypointErrorThreshold = 1e-2;
|
|||
AssertEqualErrors(liveStreamApiCallError, expectedLiveStreamApiCallError);
|
||||
|
||||
NSError *imageApiCallError;
|
||||
XCTAssertFalse([faceDetector detectInImage:image error:&imageApiCallError]);
|
||||
XCTAssertFalse([faceDetector detectImage:image error:&imageApiCallError]);
|
||||
|
||||
NSError *expectedImageApiCallError =
|
||||
[NSError errorWithDomain:kExpectedErrorDomain
|
||||
|
@ -350,7 +350,7 @@ static const float kKeypointErrorThreshold = 1e-2;
|
|||
MPPImage *image = [self imageWithFileInfo:kPortraitImage];
|
||||
|
||||
NSError *imageApiCallError;
|
||||
XCTAssertFalse([faceDetector detectInImage:image error:&imageApiCallError]);
|
||||
XCTAssertFalse([faceDetector detectImage:image error:&imageApiCallError]);
|
||||
|
||||
NSError *expectedImageApiCallError =
|
||||
[NSError errorWithDomain:kExpectedErrorDomain
|
||||
|
@ -362,7 +362,7 @@ static const float kKeypointErrorThreshold = 1e-2;
|
|||
AssertEqualErrors(imageApiCallError, expectedImageApiCallError);
|
||||
|
||||
NSError *videoApiCallError;
|
||||
XCTAssertFalse([faceDetector detectInVideoFrame:image
|
||||
XCTAssertFalse([faceDetector detectVideoFrame:image
|
||||
timestampInMilliseconds:0
|
||||
error:&videoApiCallError]);
|
||||
|
||||
|
@ -407,7 +407,7 @@ static const float kKeypointErrorThreshold = 1e-2;
|
|||
|
||||
MPPImage *image = [self imageWithFileInfo:kPortraitImage];
|
||||
for (int i = 0; i < iterationCount; i++) {
|
||||
XCTAssertTrue([faceDetector detectAsyncInImage:image timestampInMilliseconds:i error:nil]);
|
||||
XCTAssertTrue([faceDetector detectAsyncImage:image timestampInMilliseconds:i error:nil]);
|
||||
}
|
||||
|
||||
NSTimeInterval timeout = 0.5f;
|
||||
|
@ -503,7 +503,7 @@ static const float kKeypointErrorThreshold = 1e-2;
|
|||
usingFaceDetector:(MPPFaceDetector *)faceDetector
|
||||
containsExpectedKeypoints:(NSArray<NSArray *> *)expectedKeypoints {
|
||||
NSError *error;
|
||||
MPPFaceDetectorResult *faceDetectorResult = [faceDetector detectInImage:mppImage error:&error];
|
||||
MPPFaceDetectorResult *faceDetectorResult = [faceDetector detectImage:mppImage error:&error];
|
||||
XCTAssertNil(error);
|
||||
XCTAssertNotNil(faceDetectorResult);
|
||||
[self assertFaceDetectorResult:faceDetectorResult containsExpectedKeypoints:expectedKeypoints];
|
||||
|
|
|
@ -137,7 +137,7 @@ constexpr float kFacialTransformationMatrixErrorThreshold = 0.2f;
|
|||
|
||||
NSError *error;
|
||||
MPPImage *mppImage = [self imageWithFileInfo:kCatImage];
|
||||
MPPFaceLandmarkerResult *faceLandmarkerResult = [faceLandmarker detectInImage:mppImage
|
||||
MPPFaceLandmarkerResult *faceLandmarkerResult = [faceLandmarker detectImage:mppImage
|
||||
error:&error];
|
||||
XCTAssertNil(error);
|
||||
XCTAssertNotNil(faceLandmarkerResult);
|
||||
|
@ -158,7 +158,7 @@ constexpr float kFacialTransformationMatrixErrorThreshold = 0.2f;
|
|||
NSArray<MPPNormalizedLandmark *> *expectedLandmarks =
|
||||
[MPPFaceLandmarkerTests expectedLandmarksFromFileInfo:kPortraitExpectedLandmarksName];
|
||||
for (int i = 0; i < 3; i++) {
|
||||
MPPFaceLandmarkerResult *faceLandmarkerResult = [faceLandmarker detectInVideoFrame:image
|
||||
MPPFaceLandmarkerResult *faceLandmarkerResult = [faceLandmarker detectVideoFrame:image
|
||||
timestampInMilliseconds:i
|
||||
error:nil];
|
||||
[self assertFaceLandmarkerResult:faceLandmarkerResult
|
||||
|
@ -200,7 +200,7 @@ constexpr float kFacialTransformationMatrixErrorThreshold = 0.2f;
|
|||
};
|
||||
|
||||
for (int i = 0; i < iterationCount; i++) {
|
||||
XCTAssertTrue([faceLandmarker detectAsyncInImage:image timestampInMilliseconds:i error:nil]);
|
||||
XCTAssertTrue([faceLandmarker detectAsyncImage:image timestampInMilliseconds:i error:nil]);
|
||||
}
|
||||
|
||||
NSTimeInterval timeout = 0.5f;
|
||||
|
@ -224,10 +224,10 @@ constexpr float kFacialTransformationMatrixErrorThreshold = 0.2f;
|
|||
};
|
||||
|
||||
MPPImage *image = [self imageWithFileInfo:kPortraitImage];
|
||||
XCTAssertTrue([faceLandmarker detectAsyncInImage:image timestampInMilliseconds:1 error:nil]);
|
||||
XCTAssertTrue([faceLandmarker detectAsyncImage:image timestampInMilliseconds:1 error:nil]);
|
||||
|
||||
NSError *error;
|
||||
XCTAssertFalse([faceLandmarker detectAsyncInImage:image timestampInMilliseconds:0 error:&error]);
|
||||
XCTAssertFalse([faceLandmarker detectAsyncImage:image timestampInMilliseconds:0 error:&error]);
|
||||
|
||||
NSError *expectedError =
|
||||
[NSError errorWithDomain:kExpectedErrorDomain
|
||||
|
@ -292,7 +292,7 @@ constexpr float kFacialTransformationMatrixErrorThreshold = 0.2f;
|
|||
MPPImage *image = [self imageWithFileInfo:kPortraitImage];
|
||||
|
||||
NSError *liveStreamAPICallError;
|
||||
XCTAssertFalse([faceLandmarker detectAsyncInImage:image
|
||||
XCTAssertFalse([faceLandmarker detectAsyncImage:image
|
||||
timestampInMilliseconds:0
|
||||
error:&liveStreamAPICallError]);
|
||||
|
||||
|
@ -306,7 +306,7 @@ constexpr float kFacialTransformationMatrixErrorThreshold = 0.2f;
|
|||
AssertEqualErrors(liveStreamAPICallError, expectedLiveStreamAPICallError);
|
||||
|
||||
NSError *videoAPICallError;
|
||||
XCTAssertFalse([faceLandmarker detectInVideoFrame:image
|
||||
XCTAssertFalse([faceLandmarker detectVideoFrame:image
|
||||
timestampInMilliseconds:0
|
||||
error:&videoAPICallError]);
|
||||
|
||||
|
@ -329,7 +329,7 @@ constexpr float kFacialTransformationMatrixErrorThreshold = 0.2f;
|
|||
|
||||
MPPImage *image = [self imageWithFileInfo:kPortraitImage];
|
||||
NSError *liveStreamAPICallError;
|
||||
XCTAssertFalse([faceLandmarker detectAsyncInImage:image
|
||||
XCTAssertFalse([faceLandmarker detectAsyncImage:image
|
||||
timestampInMilliseconds:0
|
||||
error:&liveStreamAPICallError]);
|
||||
|
||||
|
@ -343,7 +343,7 @@ constexpr float kFacialTransformationMatrixErrorThreshold = 0.2f;
|
|||
AssertEqualErrors(liveStreamAPICallError, expectedLiveStreamAPICallError);
|
||||
|
||||
NSError *imageAPICallError;
|
||||
XCTAssertFalse([faceLandmarker detectInImage:image error:&imageAPICallError]);
|
||||
XCTAssertFalse([faceLandmarker detectImage:image error:&imageAPICallError]);
|
||||
|
||||
NSError *expectedImageAPICallError =
|
||||
[NSError errorWithDomain:kExpectedErrorDomain
|
||||
|
@ -365,7 +365,7 @@ constexpr float kFacialTransformationMatrixErrorThreshold = 0.2f;
|
|||
MPPImage *image = [self imageWithFileInfo:kPortraitImage];
|
||||
|
||||
NSError *imageAPICallError;
|
||||
XCTAssertFalse([faceLandmarker detectInImage:image error:&imageAPICallError]);
|
||||
XCTAssertFalse([faceLandmarker detectImage:image error:&imageAPICallError]);
|
||||
|
||||
NSError *expectedImageAPICallError =
|
||||
[NSError errorWithDomain:kExpectedErrorDomain
|
||||
|
@ -377,7 +377,7 @@ constexpr float kFacialTransformationMatrixErrorThreshold = 0.2f;
|
|||
AssertEqualErrors(imageAPICallError, expectedImageAPICallError);
|
||||
|
||||
NSError *videoAPICallError;
|
||||
XCTAssertFalse([faceLandmarker detectInVideoFrame:image
|
||||
XCTAssertFalse([faceLandmarker detectVideoFrame:image
|
||||
timestampInMilliseconds:0
|
||||
error:&videoAPICallError]);
|
||||
|
||||
|
@ -484,7 +484,7 @@ constexpr float kFacialTransformationMatrixErrorThreshold = 0.2f;
|
|||
}
|
||||
}
|
||||
|
||||
if (expectedTransformationMatrix == NULL) {
|
||||
if (expectedTransformationMatrix == nullptr) {
|
||||
XCTAssertEqualObjects(faceLandmarkerResult.facialTransformationMatrixes, [NSArray array]);
|
||||
} else {
|
||||
MPPTransformMatrix *actualTransformationMatrix =
|
||||
|
@ -539,7 +539,7 @@ constexpr float kFacialTransformationMatrixErrorThreshold = 0.2f;
|
|||
MPPImage *mppImage = [self imageWithFileInfo:fileInfo];
|
||||
|
||||
NSError *error;
|
||||
MPPFaceLandmarkerResult *faceLandmarkerResult = [faceLandmarker detectInImage:mppImage
|
||||
MPPFaceLandmarkerResult *faceLandmarkerResult = [faceLandmarker detectImage:mppImage
|
||||
error:&error];
|
||||
XCTAssertNil(error);
|
||||
XCTAssertNotNil(faceLandmarkerResult);
|
||||
|
|
|
@ -25,7 +25,7 @@ TFL_DISABLED_SANITIZER_TAGS = [
|
|||
]
|
||||
|
||||
objc_library(
|
||||
name = "MPPFaceLandmarkeResultHelpersTestLibary",
|
||||
name = "MPPFaceLandmarkeResultHelpersTestLibrary",
|
||||
testonly = 1,
|
||||
srcs = ["sources/MPPFaceLandmarkerResult+HelpersTests.mm"],
|
||||
copts = [
|
||||
|
@ -50,6 +50,6 @@ ios_unit_test(
|
|||
runner = tflite_ios_lab_runner("IOS_LATEST"),
|
||||
tags = TFL_DEFAULT_TAGS + TFL_DISABLED_SANITIZER_TAGS,
|
||||
deps = [
|
||||
":MPPFaceLandmarkeResultHelpersTestLibary",
|
||||
":MPPFaceLandmarkeResultHelpersTestLibrary",
|
||||
],
|
||||
)
|
||||
|
|
|
@ -208,10 +208,10 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
|||
return image;
|
||||
}
|
||||
|
||||
- (MPPHandLandmarkerResult *)detectInImageWithFileInfo:(ResourceFileInfo *)imageFileInfo
|
||||
- (MPPHandLandmarkerResult *)detectImageWithFileInfo:(ResourceFileInfo *)imageFileInfo
|
||||
usingHandLandmarker:(MPPHandLandmarker *)handLandmarker {
|
||||
MPPImage *mppImage = [self imageWithFileInfo:imageFileInfo];
|
||||
MPPHandLandmarkerResult *handLandmarkerResult = [handLandmarker detectInImage:mppImage error:nil];
|
||||
MPPHandLandmarkerResult *handLandmarkerResult = [handLandmarker detectImage:mppImage error:nil];
|
||||
XCTAssertNotNil(handLandmarkerResult);
|
||||
|
||||
return handLandmarkerResult;
|
||||
|
@ -221,7 +221,7 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
|||
usingHandLandmarker:(MPPHandLandmarker *)handLandmarker
|
||||
approximatelyEqualsHandLandmarkerResult:
|
||||
(MPPHandLandmarkerResult *)expectedHandLandmarkerResult {
|
||||
MPPHandLandmarkerResult *handLandmarkerResult = [self detectInImageWithFileInfo:fileInfo
|
||||
MPPHandLandmarkerResult *handLandmarkerResult = [self detectImageWithFileInfo:fileInfo
|
||||
usingHandLandmarker:handLandmarker];
|
||||
[self assertHandLandmarkerResult:handLandmarkerResult
|
||||
isApproximatelyEqualToExpectedResult:expectedHandLandmarkerResult];
|
||||
|
@ -249,7 +249,7 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
|||
MPPHandLandmarker *handLandmarker =
|
||||
[self createHandLandmarkerWithOptionsSucceeds:handLandmarkerOptions];
|
||||
|
||||
MPPHandLandmarkerResult *handLandmarkerResult = [self detectInImageWithFileInfo:kNoHandsImage
|
||||
MPPHandLandmarkerResult *handLandmarkerResult = [self detectImageWithFileInfo:kNoHandsImage
|
||||
usingHandLandmarker:handLandmarker];
|
||||
AssertHandLandmarkerResultIsEmpty(handLandmarkerResult);
|
||||
}
|
||||
|
@ -264,7 +264,7 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
|||
MPPHandLandmarker *handLandmarker =
|
||||
[self createHandLandmarkerWithOptionsSucceeds:handLandmarkerOptions];
|
||||
|
||||
MPPHandLandmarkerResult *handLandmarkerResult = [self detectInImageWithFileInfo:kTwoHandsImage
|
||||
MPPHandLandmarkerResult *handLandmarkerResult = [self detectImageWithFileInfo:kTwoHandsImage
|
||||
usingHandLandmarker:handLandmarker];
|
||||
|
||||
XCTAssertTrue(handLandmarkerResult.handedness.count == numHands);
|
||||
|
@ -280,7 +280,7 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
|||
MPPImage *mppImage = [self imageWithFileInfo:kPointingUpRotatedImage
|
||||
orientation:UIImageOrientationRight];
|
||||
|
||||
MPPHandLandmarkerResult *handLandmarkerResult = [handLandmarker detectInImage:mppImage error:nil];
|
||||
MPPHandLandmarkerResult *handLandmarkerResult = [handLandmarker detectImage:mppImage error:nil];
|
||||
|
||||
[self assertHandLandmarkerResult:handLandmarkerResult
|
||||
isApproximatelyEqualToExpectedResult:[MPPHandLandmarkerTests
|
||||
|
@ -339,7 +339,7 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
|||
MPPImage *image = [self imageWithFileInfo:kThumbUpImage];
|
||||
|
||||
NSError *liveStreamApiCallError;
|
||||
XCTAssertFalse([handLandmarker detectAsyncInImage:image
|
||||
XCTAssertFalse([handLandmarker detectAsyncImage:image
|
||||
timestampInMilliseconds:0
|
||||
error:&liveStreamApiCallError]);
|
||||
|
||||
|
@ -354,7 +354,7 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
|||
AssertEqualErrors(liveStreamApiCallError, expectedLiveStreamApiCallError);
|
||||
|
||||
NSError *videoApiCallError;
|
||||
XCTAssertFalse([handLandmarker detectInVideoFrame:image
|
||||
XCTAssertFalse([handLandmarker detectVideoFrame:image
|
||||
timestampInMilliseconds:0
|
||||
error:&videoApiCallError]);
|
||||
|
||||
|
@ -378,7 +378,7 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
|||
MPPImage *image = [self imageWithFileInfo:kThumbUpImage];
|
||||
|
||||
NSError *liveStreamApiCallError;
|
||||
XCTAssertFalse([handLandmarker detectAsyncInImage:image
|
||||
XCTAssertFalse([handLandmarker detectAsyncImage:image
|
||||
timestampInMilliseconds:0
|
||||
error:&liveStreamApiCallError]);
|
||||
|
||||
|
@ -393,7 +393,7 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
|||
AssertEqualErrors(liveStreamApiCallError, expectedLiveStreamApiCallError);
|
||||
|
||||
NSError *imageApiCallError;
|
||||
XCTAssertFalse([handLandmarker detectInImage:image error:&imageApiCallError]);
|
||||
XCTAssertFalse([handLandmarker detectImage:image error:&imageApiCallError]);
|
||||
|
||||
NSError *expectedImageApiCallError =
|
||||
[NSError errorWithDomain:kExpectedErrorDomain
|
||||
|
@ -416,7 +416,7 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
|||
MPPImage *image = [self imageWithFileInfo:kThumbUpImage];
|
||||
|
||||
NSError *imageApiCallError;
|
||||
XCTAssertFalse([handLandmarker detectInImage:image error:&imageApiCallError]);
|
||||
XCTAssertFalse([handLandmarker detectImage:image error:&imageApiCallError]);
|
||||
|
||||
NSError *expectedImageApiCallError =
|
||||
[NSError errorWithDomain:kExpectedErrorDomain
|
||||
|
@ -428,7 +428,7 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
|||
AssertEqualErrors(imageApiCallError, expectedImageApiCallError);
|
||||
|
||||
NSError *videoApiCallError;
|
||||
XCTAssertFalse([handLandmarker detectInVideoFrame:image
|
||||
XCTAssertFalse([handLandmarker detectVideoFrame:image
|
||||
timestampInMilliseconds:0
|
||||
error:&videoApiCallError]);
|
||||
|
||||
|
@ -452,7 +452,7 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
|||
MPPImage *image = [self imageWithFileInfo:kThumbUpImage];
|
||||
|
||||
for (int i = 0; i < 3; i++) {
|
||||
MPPHandLandmarkerResult *handLandmarkerResult = [handLandmarker detectInVideoFrame:image
|
||||
MPPHandLandmarkerResult *handLandmarkerResult = [handLandmarker detectVideoFrame:image
|
||||
timestampInMilliseconds:i
|
||||
error:nil];
|
||||
[self assertHandLandmarkerResult:handLandmarkerResult
|
||||
|
@ -480,10 +480,10 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
|||
|
||||
MPPImage *image = [self imageWithFileInfo:kThumbUpImage];
|
||||
|
||||
XCTAssertTrue([handLandmarker detectAsyncInImage:image timestampInMilliseconds:1 error:nil]);
|
||||
XCTAssertTrue([handLandmarker detectAsyncImage:image timestampInMilliseconds:1 error:nil]);
|
||||
|
||||
NSError *error;
|
||||
XCTAssertFalse([handLandmarker detectAsyncInImage:image timestampInMilliseconds:0 error:&error]);
|
||||
XCTAssertFalse([handLandmarker detectAsyncImage:image timestampInMilliseconds:0 error:&error]);
|
||||
|
||||
NSError *expectedError =
|
||||
[NSError errorWithDomain:kExpectedErrorDomain
|
||||
|
@ -533,7 +533,7 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
|||
MPPImage *image = [self imageWithFileInfo:kThumbUpImage];
|
||||
|
||||
for (int i = 0; i < iterationCount; i++) {
|
||||
XCTAssertTrue([handLandmarker detectAsyncInImage:image timestampInMilliseconds:i error:nil]);
|
||||
XCTAssertTrue([handLandmarker detectAsyncImage:image timestampInMilliseconds:i error:nil]);
|
||||
}
|
||||
|
||||
NSTimeInterval timeout = 0.5f;
|
||||
|
|
|
@ -194,7 +194,7 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
|||
usingObjectDetector:(MPPObjectDetector *)objectDetector
|
||||
maxResults:(NSInteger)maxResults
|
||||
equalsObjectDetectorResult:(MPPObjectDetectorResult *)expectedObjectDetectorResult {
|
||||
MPPObjectDetectorResult *ObjectDetectorResult = [objectDetector detectInImage:mppImage error:nil];
|
||||
MPPObjectDetectorResult *ObjectDetectorResult = [objectDetector detectImage:mppImage error:nil];
|
||||
|
||||
[self assertObjectDetectorResult:ObjectDetectorResult
|
||||
isEqualToExpectedResult:expectedObjectDetectorResult
|
||||
|
@ -495,7 +495,7 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
|||
MPPImage *image = [self imageWithFileInfo:kCatsAndDogsImage];
|
||||
|
||||
NSError *liveStreamApiCallError;
|
||||
XCTAssertFalse([objectDetector detectAsyncInImage:image
|
||||
XCTAssertFalse([objectDetector detectAsyncImage:image
|
||||
timestampInMilliseconds:0
|
||||
error:&liveStreamApiCallError]);
|
||||
|
||||
|
@ -510,7 +510,7 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
|||
AssertEqualErrors(liveStreamApiCallError, expectedLiveStreamApiCallError);
|
||||
|
||||
NSError *videoApiCallError;
|
||||
XCTAssertFalse([objectDetector detectInVideoFrame:image
|
||||
XCTAssertFalse([objectDetector detectVideoFrame:image
|
||||
timestampInMilliseconds:0
|
||||
error:&videoApiCallError]);
|
||||
|
||||
|
@ -533,7 +533,7 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
|||
MPPImage *image = [self imageWithFileInfo:kCatsAndDogsImage];
|
||||
|
||||
NSError *liveStreamApiCallError;
|
||||
XCTAssertFalse([objectDetector detectAsyncInImage:image
|
||||
XCTAssertFalse([objectDetector detectAsyncImage:image
|
||||
timestampInMilliseconds:0
|
||||
error:&liveStreamApiCallError]);
|
||||
|
||||
|
@ -548,7 +548,7 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
|||
AssertEqualErrors(liveStreamApiCallError, expectedLiveStreamApiCallError);
|
||||
|
||||
NSError *imageApiCallError;
|
||||
XCTAssertFalse([objectDetector detectInImage:image error:&imageApiCallError]);
|
||||
XCTAssertFalse([objectDetector detectImage:image error:&imageApiCallError]);
|
||||
|
||||
NSError *expectedImageApiCallError =
|
||||
[NSError errorWithDomain:kExpectedErrorDomain
|
||||
|
@ -571,7 +571,7 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
|||
MPPImage *image = [self imageWithFileInfo:kCatsAndDogsImage];
|
||||
|
||||
NSError *imageApiCallError;
|
||||
XCTAssertFalse([objectDetector detectInImage:image error:&imageApiCallError]);
|
||||
XCTAssertFalse([objectDetector detectImage:image error:&imageApiCallError]);
|
||||
|
||||
NSError *expectedImageApiCallError =
|
||||
[NSError errorWithDomain:kExpectedErrorDomain
|
||||
|
@ -583,7 +583,7 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
|||
AssertEqualErrors(imageApiCallError, expectedImageApiCallError);
|
||||
|
||||
NSError *videoApiCallError;
|
||||
XCTAssertFalse([objectDetector detectInVideoFrame:image
|
||||
XCTAssertFalse([objectDetector detectVideoFrame:image
|
||||
timestampInMilliseconds:0
|
||||
error:&videoApiCallError]);
|
||||
|
||||
|
@ -610,7 +610,7 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
|||
MPPImage *image = [self imageWithFileInfo:kCatsAndDogsImage];
|
||||
|
||||
for (int i = 0; i < 3; i++) {
|
||||
MPPObjectDetectorResult *ObjectDetectorResult = [objectDetector detectInVideoFrame:image
|
||||
MPPObjectDetectorResult *ObjectDetectorResult = [objectDetector detectVideoFrame:image
|
||||
timestampInMilliseconds:i
|
||||
error:nil];
|
||||
|
||||
|
@ -643,10 +643,10 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
|||
|
||||
MPPImage *image = [self imageWithFileInfo:kCatsAndDogsImage];
|
||||
|
||||
XCTAssertTrue([objectDetector detectAsyncInImage:image timestampInMilliseconds:1 error:nil]);
|
||||
XCTAssertTrue([objectDetector detectAsyncImage:image timestampInMilliseconds:1 error:nil]);
|
||||
|
||||
NSError *error;
|
||||
XCTAssertFalse([objectDetector detectAsyncInImage:image timestampInMilliseconds:0 error:&error]);
|
||||
XCTAssertFalse([objectDetector detectAsyncImage:image timestampInMilliseconds:0 error:&error]);
|
||||
|
||||
NSError *expectedError =
|
||||
[NSError errorWithDomain:kExpectedErrorDomain
|
||||
|
@ -702,7 +702,7 @@ static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
|||
MPPImage *image = [self imageWithFileInfo:kCatsAndDogsImage];
|
||||
|
||||
for (int i = 0; i < iterationCount; i++) {
|
||||
XCTAssertTrue([objectDetector detectAsyncInImage:image timestampInMilliseconds:i error:nil]);
|
||||
XCTAssertTrue([objectDetector detectAsyncImage:image timestampInMilliseconds:i error:nil]);
|
||||
}
|
||||
|
||||
NSTimeInterval timeout = 0.5f;
|
||||
|
|
|
@ -100,7 +100,7 @@ NS_SWIFT_NAME(FaceDetector)
|
|||
* system, i.e. in `[0,image_width) x [0,image_height)`, which are the dimensions of the underlying
|
||||
* image data.
|
||||
*/
|
||||
- (nullable MPPFaceDetectorResult *)detectInImage:(MPPImage *)image
|
||||
- (nullable MPPFaceDetectorResult *)detectImage:(MPPImage *)image
|
||||
error:(NSError **)error NS_SWIFT_NAME(detect(image:));
|
||||
|
||||
/**
|
||||
|
@ -127,7 +127,7 @@ NS_SWIFT_NAME(FaceDetector)
|
|||
* system, i.e. in `[0,image_width) x [0,image_height)`, which are the dimensions of the underlying
|
||||
* image data.
|
||||
*/
|
||||
- (nullable MPPFaceDetectorResult *)detectInVideoFrame:(MPPImage *)image
|
||||
- (nullable MPPFaceDetectorResult *)detectVideoFrame:(MPPImage *)image
|
||||
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
||||
error:(NSError **)error
|
||||
NS_SWIFT_NAME(detect(videoFrame:timestampInMilliseconds:));
|
||||
|
@ -165,7 +165,7 @@ NS_SWIFT_NAME(FaceDetector)
|
|||
*
|
||||
* @return `true` if the image was sent to the task successfully, otherwise `false`.
|
||||
*/
|
||||
- (BOOL)detectAsyncInImage:(MPPImage *)image
|
||||
- (BOOL)detectAsyncImage:(MPPImage *)image
|
||||
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
||||
error:(NSError **)error
|
||||
NS_SWIFT_NAME(detectAsync(image:timestampInMilliseconds:));
|
||||
|
|
|
@ -130,13 +130,13 @@ static NSString *const kTaskName = @"faceDetector";
|
|||
return [self initWithOptions:options error:error];
|
||||
}
|
||||
|
||||
- (nullable MPPFaceDetectorResult *)detectInImage:(MPPImage *)image error:(NSError **)error {
|
||||
- (nullable MPPFaceDetectorResult *)detectImage:(MPPImage *)image error:(NSError **)error {
|
||||
std::optional<PacketMap> outputPacketMap = [_visionTaskRunner processImage:image error:error];
|
||||
|
||||
return [MPPFaceDetector faceDetectorResultWithOptionalOutputPacketMap:outputPacketMap];
|
||||
}
|
||||
|
||||
- (nullable MPPFaceDetectorResult *)detectInVideoFrame:(MPPImage *)image
|
||||
- (nullable MPPFaceDetectorResult *)detectVideoFrame:(MPPImage *)image
|
||||
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
||||
error:(NSError **)error {
|
||||
std::optional<PacketMap> outputPacketMap =
|
||||
|
@ -147,7 +147,7 @@ static NSString *const kTaskName = @"faceDetector";
|
|||
return [MPPFaceDetector faceDetectorResultWithOptionalOutputPacketMap:outputPacketMap];
|
||||
}
|
||||
|
||||
- (BOOL)detectAsyncInImage:(MPPImage *)image
|
||||
- (BOOL)detectAsyncImage:(MPPImage *)image
|
||||
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
||||
error:(NSError **)error {
|
||||
return [_visionTaskRunner processLiveStreamImage:image
|
||||
|
|
|
@ -71,7 +71,7 @@ NS_SWIFT_NAME(FaceLandmarker)
|
|||
* @return An `FaceLandmarkerResult` that contains a list of landmarks. `nil` if there is an error
|
||||
* in initializing the face landmaker.
|
||||
*/
|
||||
- (nullable MPPFaceLandmarkerResult *)detectInImage:(MPPImage *)image
|
||||
- (nullable MPPFaceLandmarkerResult *)detectImage:(MPPImage *)image
|
||||
error:(NSError **)error NS_SWIFT_NAME(detect(image:));
|
||||
|
||||
/**
|
||||
|
@ -95,7 +95,7 @@ NS_SWIFT_NAME(FaceLandmarker)
|
|||
* @return An `FaceLandmarkerResult` that contains a list of landmarks. `nil` if there is an
|
||||
* error in initializing the face landmaker.
|
||||
*/
|
||||
- (nullable MPPFaceLandmarkerResult *)detectInVideoFrame:(MPPImage *)image
|
||||
- (nullable MPPFaceLandmarkerResult *)detectVideoFrame:(MPPImage *)image
|
||||
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
||||
error:(NSError **)error
|
||||
NS_SWIFT_NAME(detect(videoFrame:timestampInMilliseconds:));
|
||||
|
@ -132,7 +132,7 @@ NS_SWIFT_NAME(FaceLandmarker)
|
|||
*
|
||||
* @return `true` if the image was sent to the task successfully, otherwise `false`.
|
||||
*/
|
||||
- (BOOL)detectAsyncInImage:(MPPImage *)image
|
||||
- (BOOL)detectAsyncImage:(MPPImage *)image
|
||||
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
||||
error:(NSError **)error
|
||||
NS_SWIFT_NAME(detectAsync(image:timestampInMilliseconds:));
|
||||
|
|
|
@ -154,13 +154,13 @@ static NSString *const kTaskName = @"faceLandmarker";
|
|||
return [self initWithOptions:options error:error];
|
||||
}
|
||||
|
||||
- (nullable MPPFaceLandmarkerResult *)detectInImage:(MPPImage *)image error:(NSError **)error {
|
||||
- (nullable MPPFaceLandmarkerResult *)detectImage:(MPPImage *)image error:(NSError **)error {
|
||||
std::optional<PacketMap> outputPacketMap = [_visionTaskRunner processImage:image error:error];
|
||||
|
||||
return [MPPFaceLandmarker faceLandmarkerResultWithOptionalOutputPacketMap:outputPacketMap];
|
||||
}
|
||||
|
||||
- (nullable MPPFaceLandmarkerResult *)detectInVideoFrame:(MPPImage *)image
|
||||
- (nullable MPPFaceLandmarkerResult *)detectVideoFrame:(MPPImage *)image
|
||||
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
||||
error:(NSError **)error {
|
||||
std::optional<PacketMap> outputPacketMap =
|
||||
|
@ -171,7 +171,7 @@ static NSString *const kTaskName = @"faceLandmarker";
|
|||
return [MPPFaceLandmarker faceLandmarkerResultWithOptionalOutputPacketMap:outputPacketMap];
|
||||
}
|
||||
|
||||
- (BOOL)detectAsyncInImage:(MPPImage *)image
|
||||
- (BOOL)detectAsyncImage:(MPPImage *)image
|
||||
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
||||
error:(NSError **)error {
|
||||
return [_visionTaskRunner processLiveStreamImage:image
|
||||
|
|
|
@ -68,7 +68,7 @@ NS_SWIFT_NAME(FaceStylizer)
|
|||
* @return A `FaceStylizerResult` that contains the stylized image of the most visible face. The
|
||||
* returned image is copied. The stylized output image size is the same as the model output
|
||||
* size. The `stylizedImage` of the `FaceStylizerResult` is `nil` if there is no face detected in
|
||||
* the imput image. `FaceStylizerResult` is `nil` if there is an error in initializing the face
|
||||
* the input image. `FaceStylizerResult` is `nil` if there is an error in initializing the face
|
||||
* stylizer.
|
||||
*/
|
||||
- (nullable MPPFaceStylizerResult *)stylizeImage:(MPPImage *)image
|
||||
|
@ -92,7 +92,7 @@ NS_SWIFT_NAME(FaceStylizer)
|
|||
*
|
||||
* @param image The `MPImage` on which face stylization is to be performed.
|
||||
* @param completionHandler A block to be invoked with the results of performing face stylization on
|
||||
* the imput image. The block takes two arguments, the optional `FaceStylizerResult` that contains
|
||||
* the input image. The block takes two arguments, the optional `FaceStylizerResult` that contains
|
||||
* the zero-copied stylized image if face stylization was successful and an optional error populated
|
||||
* upon failure. The lifetime of the stylized image is only guaranteed for the duration of the
|
||||
* block.
|
||||
|
|
|
@ -146,7 +146,7 @@ NS_SWIFT_NAME(HandLandmarker)
|
|||
* @return An `HandLandmarkerResult` object that contains the hand hand landmarks detection
|
||||
* results.
|
||||
*/
|
||||
- (nullable MPPHandLandmarkerResult *)detectInImage:(MPPImage *)image
|
||||
- (nullable MPPHandLandmarkerResult *)detectImage:(MPPImage *)image
|
||||
error:(NSError **)error NS_SWIFT_NAME(detect(image:));
|
||||
|
||||
/**
|
||||
|
@ -176,7 +176,7 @@ NS_SWIFT_NAME(HandLandmarker)
|
|||
* @return An `HandLandmarkerResult` object that contains the hand hand landmarks detection
|
||||
* results.
|
||||
*/
|
||||
- (nullable MPPHandLandmarkerResult *)detectInVideoFrame:(MPPImage *)image
|
||||
- (nullable MPPHandLandmarkerResult *)detectVideoFrame:(MPPImage *)image
|
||||
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
||||
error:(NSError **)error
|
||||
NS_SWIFT_NAME(detect(videoFrame:timestampInMilliseconds:));
|
||||
|
@ -216,7 +216,7 @@ NS_SWIFT_NAME(HandLandmarker)
|
|||
*
|
||||
* @return `YES` if the image was sent to the task successfully, otherwise `NO`.
|
||||
*/
|
||||
- (BOOL)detectAsyncInImage:(MPPImage *)image
|
||||
- (BOOL)detectAsyncImage:(MPPImage *)image
|
||||
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
||||
error:(NSError **)error
|
||||
NS_SWIFT_NAME(detectAsync(image:timestampInMilliseconds:));
|
||||
|
|
|
@ -140,13 +140,13 @@ static NSString *const kTaskName = @"handLandmarker";
|
|||
return [self initWithOptions:options error:error];
|
||||
}
|
||||
|
||||
- (nullable MPPHandLandmarkerResult *)detectInImage:(MPPImage *)image error:(NSError **)error {
|
||||
- (nullable MPPHandLandmarkerResult *)detectImage:(MPPImage *)image error:(NSError **)error {
|
||||
std::optional<PacketMap> outputPacketMap = [_visionTaskRunner processImage:image error:error];
|
||||
|
||||
return [MPPHandLandmarker handLandmarkerResultWithOptionalOutputPacketMap:outputPacketMap];
|
||||
}
|
||||
|
||||
- (nullable MPPHandLandmarkerResult *)detectInVideoFrame:(MPPImage *)image
|
||||
- (nullable MPPHandLandmarkerResult *)detectVideoFrame:(MPPImage *)image
|
||||
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
||||
error:(NSError **)error {
|
||||
std::optional<PacketMap> outputPacketMap =
|
||||
|
@ -157,7 +157,7 @@ static NSString *const kTaskName = @"handLandmarker";
|
|||
return [MPPHandLandmarker handLandmarkerResultWithOptionalOutputPacketMap:outputPacketMap];
|
||||
}
|
||||
|
||||
- (BOOL)detectAsyncInImage:(MPPImage *)image
|
||||
- (BOOL)detectAsyncImage:(MPPImage *)image
|
||||
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
||||
error:(NSError **)error {
|
||||
return [_visionTaskRunner processLiveStreamImage:image
|
||||
|
|
|
@ -112,7 +112,7 @@ NS_SWIFT_NAME(ObjectDetector)
|
|||
* system, i.e. in `[0,image_width) x [0,image_height)`, which are the dimensions of the underlying
|
||||
* image data.
|
||||
*/
|
||||
- (nullable MPPObjectDetectorResult *)detectInImage:(MPPImage *)image
|
||||
- (nullable MPPObjectDetectorResult *)detectImage:(MPPImage *)image
|
||||
error:(NSError **)error NS_SWIFT_NAME(detect(image:));
|
||||
|
||||
/**
|
||||
|
@ -138,7 +138,7 @@ NS_SWIFT_NAME(ObjectDetector)
|
|||
* system, i.e. in `[0,image_width) x [0,image_height)`, which are the dimensions of the underlying
|
||||
* image data.
|
||||
*/
|
||||
- (nullable MPPObjectDetectorResult *)detectInVideoFrame:(MPPImage *)image
|
||||
- (nullable MPPObjectDetectorResult *)detectVideoFrame:(MPPImage *)image
|
||||
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
||||
error:(NSError **)error
|
||||
NS_SWIFT_NAME(detect(videoFrame:timestampInMilliseconds:));
|
||||
|
@ -176,7 +176,7 @@ NS_SWIFT_NAME(ObjectDetector)
|
|||
*
|
||||
* @return `true` if the image was sent to the task successfully, otherwise `false`.
|
||||
*/
|
||||
- (BOOL)detectAsyncInImage:(MPPImage *)image
|
||||
- (BOOL)detectAsyncImage:(MPPImage *)image
|
||||
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
||||
error:(NSError **)error
|
||||
NS_SWIFT_NAME(detectAsync(image:timestampInMilliseconds:));
|
||||
|
|
|
@ -128,13 +128,13 @@ static NSString *const kTaskName = @"objectDetector";
|
|||
return [self initWithOptions:options error:error];
|
||||
}
|
||||
|
||||
- (nullable MPPObjectDetectorResult *)detectInImage:(MPPImage *)image error:(NSError **)error {
|
||||
- (nullable MPPObjectDetectorResult *)detectImage:(MPPImage *)image error:(NSError **)error {
|
||||
std::optional<PacketMap> outputPacketMap = [_visionTaskRunner processImage:image error:error];
|
||||
|
||||
return [MPPObjectDetector objectDetectorResultWithOptionalOutputPacketMap:outputPacketMap];
|
||||
}
|
||||
|
||||
- (nullable MPPObjectDetectorResult *)detectInVideoFrame:(MPPImage *)image
|
||||
- (nullable MPPObjectDetectorResult *)detectVideoFrame:(MPPImage *)image
|
||||
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
||||
error:(NSError **)error {
|
||||
std::optional<PacketMap> outputPacketMap =
|
||||
|
@ -145,7 +145,7 @@ static NSString *const kTaskName = @"objectDetector";
|
|||
return [MPPObjectDetector objectDetectorResultWithOptionalOutputPacketMap:outputPacketMap];
|
||||
}
|
||||
|
||||
- (BOOL)detectAsyncInImage:(MPPImage *)image
|
||||
- (BOOL)detectAsyncImage:(MPPImage *)image
|
||||
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
||||
error:(NSError **)error {
|
||||
return [_visionTaskRunner processLiveStreamImage:image
|
||||
|
|
|
@ -25,7 +25,7 @@ import com.google.protobuf.ByteString;
|
|||
|
||||
/**
|
||||
* MediaPipe Tasks options base class. Any MediaPipe task-specific options class should extend
|
||||
* {@link TaskOptions} and implement exactly one of converTo*Proto() methods.
|
||||
* {@link TaskOptions} and implement exactly one of convertTo*Proto() methods.
|
||||
*/
|
||||
public abstract class TaskOptions {
|
||||
/**
|
||||
|
|
|
@ -739,7 +739,7 @@ public final class ImageSegmenter extends BaseVisionTaskApi {
|
|||
@AutoValue
|
||||
public abstract static class SegmentationOptions {
|
||||
|
||||
/** Builder fo {@link SegmentationOptions} */
|
||||
/** Builder for {@link SegmentationOptions} */
|
||||
@AutoValue.Builder
|
||||
public abstract static class Builder {
|
||||
|
||||
|
|
|
@ -289,7 +289,7 @@ public class GestureRecognizerTest {
|
|||
}
|
||||
|
||||
@Test
|
||||
public void recognize_successWithPreferAlowListThanDenyList() throws Exception {
|
||||
public void recognize_successWithPreferAllowListThanDenyList() throws Exception {
|
||||
GestureRecognizerOptions options =
|
||||
GestureRecognizerOptions.builder()
|
||||
.setBaseOptions(
|
||||
|
|
|
@ -736,7 +736,7 @@ class MetadataWriter(object):
|
|||
content is used to interpret the metadata content.
|
||||
|
||||
Returns:
|
||||
A tuple of (model_with_metadata_in_bytes, metdata_json_content)
|
||||
A tuple of (model_with_metadata_in_bytes, metadata_json_content)
|
||||
"""
|
||||
# Populates metadata and associated files into TFLite model buffer.
|
||||
populator = metadata.MetadataPopulator.with_model_buffer(self._model_buffer)
|
||||
|
@ -840,6 +840,6 @@ class MetadataWriterBase:
|
|||
content is used to interpret the metadata content.
|
||||
|
||||
Returns:
|
||||
A tuple of (model_with_metadata_in_bytes, metdata_json_content)
|
||||
A tuple of (model_with_metadata_in_bytes, metadata_json_content)
|
||||
"""
|
||||
return self.writer.populate()
|
||||
|
|
|
@ -140,7 +140,7 @@ export class AudioClassifier extends AudioTaskRunner<AudioClassifierResult[]> {
|
|||
* @param sampleRate The sample rate in Hz of the provided audio data. If not
|
||||
* set, defaults to the sample rate set via `setDefaultSampleRate()` or
|
||||
* `48000` if no custom default was set.
|
||||
* @return The classification result of the audio datas
|
||||
* @return The classification result of the audio data
|
||||
*/
|
||||
classify(audioData: Float32Array, sampleRate?: number):
|
||||
AudioClassifierResult[] {
|
||||
|
|
|
@ -9,7 +9,7 @@
|
|||
"import": "./__NAME___bundle.mjs",
|
||||
"require": "./__NAME___bundle.cjs",
|
||||
"default": "./__NAME___bundle.mjs",
|
||||
"types": "./__NAME___.d.ts"
|
||||
"types": "./__TYPES__"
|
||||
},
|
||||
"author": "mediapipe@google.com",
|
||||
"license": "Apache-2.0",
|
||||
|
|
|
@ -342,7 +342,7 @@ export class GestureRecognizer extends VisionTaskRunner {
|
|||
* Converts raw data into a landmark, and adds it to our worldLandmarks
|
||||
* list.
|
||||
*/
|
||||
private adddJsWorldLandmarks(data: Uint8Array[]): void {
|
||||
private addJsWorldLandmarks(data: Uint8Array[]): void {
|
||||
for (const binaryProto of data) {
|
||||
const handWorldLandmarksProto =
|
||||
LandmarkList.deserializeBinary(binaryProto);
|
||||
|
@ -396,7 +396,7 @@ export class GestureRecognizer extends VisionTaskRunner {
|
|||
|
||||
this.graphRunner.attachProtoVectorListener(
|
||||
WORLD_LANDMARKS_STREAM, (binaryProto, timestamp) => {
|
||||
this.adddJsWorldLandmarks(binaryProto);
|
||||
this.addJsWorldLandmarks(binaryProto);
|
||||
this.setLatestOutputTimestamp(timestamp);
|
||||
});
|
||||
this.graphRunner.attachEmptyPacketListener(
|
||||
|
|
|
@ -277,7 +277,7 @@ export class HandLandmarker extends VisionTaskRunner {
|
|||
* Converts raw data into a world landmark, and adds it to our worldLandmarks
|
||||
* list.
|
||||
*/
|
||||
private adddJsWorldLandmarks(data: Uint8Array[]): void {
|
||||
private addJsWorldLandmarks(data: Uint8Array[]): void {
|
||||
for (const binaryProto of data) {
|
||||
const handWorldLandmarksProto =
|
||||
LandmarkList.deserializeBinary(binaryProto);
|
||||
|
@ -322,7 +322,7 @@ export class HandLandmarker extends VisionTaskRunner {
|
|||
|
||||
this.graphRunner.attachProtoVectorListener(
|
||||
WORLD_LANDMARKS_STREAM, (binaryProto, timestamp) => {
|
||||
this.adddJsWorldLandmarks(binaryProto);
|
||||
this.addJsWorldLandmarks(binaryProto);
|
||||
this.setLatestOutputTimestamp(timestamp);
|
||||
});
|
||||
this.graphRunner.attachEmptyPacketListener(
|
||||
|
|
|
@ -403,7 +403,7 @@ export class PoseLandmarker extends VisionTaskRunner {
|
|||
* Converts raw data into a world landmark, and adds it to our
|
||||
* worldLandmarks list.
|
||||
*/
|
||||
private adddJsWorldLandmarks(data: Uint8Array[]): void {
|
||||
private addJsWorldLandmarks(data: Uint8Array[]): void {
|
||||
this.worldLandmarks = [];
|
||||
for (const binaryProto of data) {
|
||||
const poseWorldLandmarksProto =
|
||||
|
@ -452,7 +452,7 @@ export class PoseLandmarker extends VisionTaskRunner {
|
|||
|
||||
this.graphRunner.attachProtoVectorListener(
|
||||
WORLD_LANDMARKS_STREAM, (binaryProto, timestamp) => {
|
||||
this.adddJsWorldLandmarks(binaryProto);
|
||||
this.addJsWorldLandmarks(binaryProto);
|
||||
this.setLatestOutputTimestamp(timestamp);
|
||||
});
|
||||
this.graphRunner.attachEmptyPacketListener(
|
||||
|
|
2
third_party/BUILD
vendored
2
third_party/BUILD
vendored
|
@ -45,7 +45,7 @@ cc_library(
|
|||
"@com_github_glog_glog//:glog",
|
||||
],
|
||||
"//mediapipe:windows": [
|
||||
"@com_github_glog_glog//:glog",
|
||||
"@com_github_glog_glog_windows//:glog",
|
||||
],
|
||||
"//conditions:default": [
|
||||
"@com_github_glog_glog//:glog",
|
||||
|
|
26
third_party/com_github_glog_glog_windows_patch.diff
vendored
Normal file
26
third_party/com_github_glog_glog_windows_patch.diff
vendored
Normal file
|
@ -0,0 +1,26 @@
|
|||
diff --git a/bazel/glog.bzl b/bazel/glog.bzl
|
||||
index dacd934..d7b3d78 100644
|
||||
--- a/bazel/glog.bzl
|
||||
+++ b/bazel/glog.bzl
|
||||
@@ -53,7 +53,6 @@ def glog_library(namespace = "google", with_gflags = 1, **kwargs):
|
||||
)
|
||||
|
||||
common_copts = [
|
||||
- "-std=c++14",
|
||||
"-DGLOG_BAZEL_BUILD",
|
||||
# Inject a C++ namespace.
|
||||
"-DGOOGLE_NAMESPACE='%s'" % namespace,
|
||||
@@ -145,7 +144,13 @@ def glog_library(namespace = "google", with_gflags = 1, **kwargs):
|
||||
],
|
||||
})
|
||||
|
||||
+ c14_opts = ["-std=c++14"]
|
||||
+ c17_opts = ["-std=c++17"]
|
||||
+
|
||||
final_lib_copts = select({
|
||||
+ "@bazel_tools//src/conditions:windows": c17_opts,
|
||||
+ "//conditions:default": c14_opts,
|
||||
+ }) + select({
|
||||
"@bazel_tools//src/conditions:windows": common_copts + windows_only_copts,
|
||||
"@bazel_tools//src/conditions:darwin": common_copts + linux_or_darwin_copts + darwin_only_copts,
|
||||
"@bazel_tools//src/conditions:freebsd": common_copts + linux_or_darwin_copts + freebsd_only_copts,
|
Loading…
Reference in New Issue
Block a user