Merge branch 'master' into ios-gesture-recognizer-tests
This commit is contained in:
commit
e1d8854388
|
@ -1,17 +1,16 @@
|
||||||
---
|
---
|
||||||
layout: default
|
layout: forward
|
||||||
|
target: https://developers.google.com/mediapipe
|
||||||
title: Home
|
title: Home
|
||||||
nav_order: 1
|
nav_order: 1
|
||||||
---
|
---
|
||||||
|
|
||||||
----
|
----
|
||||||
|
|
||||||
**Attention:** *Thanks for your interest in MediaPipe! We have moved to
|
**Attention:** *We have moved to
|
||||||
[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
|
[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
|
||||||
as the primary developer documentation site for MediaPipe as of April 3, 2023.*
|
as the primary developer documentation site for MediaPipe as of April 3, 2023.*
|
||||||
|
|
||||||
*This notice and web page will be removed on June 1, 2023.*
|
|
||||||
|
|
||||||
![MediaPipe](https://developers.google.com/static/mediapipe/images/home/hero_01_1920.png)
|
![MediaPipe](https://developers.google.com/static/mediapipe/images/home/hero_01_1920.png)
|
||||||
|
|
||||||
**Attention**: MediaPipe Solutions Preview is an early release. [Learn
|
**Attention**: MediaPipe Solutions Preview is an early release. [Learn
|
||||||
|
|
|
@ -266,10 +266,10 @@ http_archive(
|
||||||
|
|
||||||
http_archive(
|
http_archive(
|
||||||
name = "com_googlesource_code_re2",
|
name = "com_googlesource_code_re2",
|
||||||
sha256 = "e06b718c129f4019d6e7aa8b7631bee38d3d450dd980246bfaf493eb7db67868",
|
sha256 = "ef516fb84824a597c4d5d0d6d330daedb18363b5a99eda87d027e6bdd9cba299",
|
||||||
strip_prefix = "re2-fe4a310131c37f9a7e7f7816fa6ce2a8b27d65a8",
|
strip_prefix = "re2-03da4fc0857c285e3a26782f6bc8931c4c950df4",
|
||||||
urls = [
|
urls = [
|
||||||
"https://github.com/google/re2/archive/fe4a310131c37f9a7e7f7816fa6ce2a8b27d65a8.tar.gz",
|
"https://github.com/google/re2/archive/03da4fc0857c285e3a26782f6bc8931c4c950df4.tar.gz",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -1,17 +1,16 @@
|
||||||
---
|
---
|
||||||
layout: default
|
layout: forward
|
||||||
|
target: https://developers.google.com/mediapipe
|
||||||
title: Home
|
title: Home
|
||||||
nav_order: 1
|
nav_order: 1
|
||||||
---
|
---
|
||||||
|
|
||||||
----
|
----
|
||||||
|
|
||||||
**Attention:** *Thanks for your interest in MediaPipe! We have moved to
|
**Attention:** *We have moved to
|
||||||
[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
|
[https://developers.google.com/mediapipe](https://developers.google.com/mediapipe)
|
||||||
as the primary developer documentation site for MediaPipe as of April 3, 2023.*
|
as the primary developer documentation site for MediaPipe as of April 3, 2023.*
|
||||||
|
|
||||||
*This notice and web page will be removed on June 1, 2023.*
|
|
||||||
|
|
||||||
![MediaPipe](https://developers.google.com/static/mediapipe/images/home/hero_01_1920.png)
|
![MediaPipe](https://developers.google.com/static/mediapipe/images/home/hero_01_1920.png)
|
||||||
|
|
||||||
**Attention**: MediaPipe Solutions Preview is an early release. [Learn
|
**Attention**: MediaPipe Solutions Preview is an early release. [Learn
|
||||||
|
|
|
@ -20,9 +20,9 @@ nav_order: 1
|
||||||
---
|
---
|
||||||
|
|
||||||
**Attention:** *Thank you for your interest in MediaPipe Solutions.
|
**Attention:** *Thank you for your interest in MediaPipe Solutions.
|
||||||
As of March 1, 2023, this solution is planned to be upgraded to a new MediaPipe
|
As of May 10, 2023, this solution was upgraded to a new MediaPipe
|
||||||
Solution. For more information, see the
|
Solution. For more information, see the
|
||||||
[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy)
|
[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/vision/face_detector)
|
||||||
site.*
|
site.*
|
||||||
|
|
||||||
----
|
----
|
||||||
|
|
|
@ -20,9 +20,9 @@ nav_order: 2
|
||||||
---
|
---
|
||||||
|
|
||||||
**Attention:** *Thank you for your interest in MediaPipe Solutions.
|
**Attention:** *Thank you for your interest in MediaPipe Solutions.
|
||||||
As of March 1, 2023, this solution is planned to be upgraded to a new MediaPipe
|
As of May 10, 2023, this solution was upgraded to a new MediaPipe
|
||||||
Solution. For more information, see the
|
Solution. For more information, see the
|
||||||
[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy)
|
[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/vision/face_landmarker)
|
||||||
site.*
|
site.*
|
||||||
|
|
||||||
----
|
----
|
||||||
|
|
|
@ -20,9 +20,9 @@ nav_order: 3
|
||||||
---
|
---
|
||||||
|
|
||||||
**Attention:** *Thank you for your interest in MediaPipe Solutions.
|
**Attention:** *Thank you for your interest in MediaPipe Solutions.
|
||||||
As of March 1, 2023, this solution is planned to be upgraded to a new MediaPipe
|
As of May 10, 2023, this solution was upgraded to a new MediaPipe
|
||||||
Solution. For more information, see the
|
Solution. For more information, see the
|
||||||
[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy)
|
[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/vision/face_landmarker)
|
||||||
site.*
|
site.*
|
||||||
|
|
||||||
----
|
----
|
||||||
|
|
|
@ -22,9 +22,9 @@ nav_order: 5
|
||||||
---
|
---
|
||||||
|
|
||||||
**Attention:** *Thank you for your interest in MediaPipe Solutions.
|
**Attention:** *Thank you for your interest in MediaPipe Solutions.
|
||||||
As of March 1, 2023, this solution is planned to be upgraded to a new MediaPipe
|
As of May 10, 2023, this solution was upgraded to a new MediaPipe
|
||||||
Solution. For more information, see the
|
Solution. For more information, see the
|
||||||
[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/vision/pose_landmarker/)
|
[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/vision/pose_landmarker)
|
||||||
site.*
|
site.*
|
||||||
|
|
||||||
----
|
----
|
||||||
|
|
|
@ -21,7 +21,7 @@ nav_order: 1
|
||||||
---
|
---
|
||||||
|
|
||||||
**Attention:** *Thank you for your interest in MediaPipe Solutions.
|
**Attention:** *Thank you for your interest in MediaPipe Solutions.
|
||||||
As of March 1, 2023, this solution is planned to be upgraded to a new MediaPipe
|
As of May 10, 2023, this solution was upgraded to a new MediaPipe
|
||||||
Solution. For more information, see the
|
Solution. For more information, see the
|
||||||
[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/vision/pose_landmarker/)
|
[MediaPipe Solutions](https://developers.google.com/mediapipe/solutions/vision/pose_landmarker/)
|
||||||
site.*
|
site.*
|
||||||
|
|
|
@ -1,5 +1,6 @@
|
||||||
---
|
---
|
||||||
layout: default
|
layout: forward
|
||||||
|
target: https://developers.google.com/mediapipe/solutions/guide#legacy
|
||||||
title: MediaPipe Legacy Solutions
|
title: MediaPipe Legacy Solutions
|
||||||
nav_order: 3
|
nav_order: 3
|
||||||
has_children: true
|
has_children: true
|
||||||
|
@ -13,8 +14,7 @@ has_toc: false
|
||||||
{:toc}
|
{:toc}
|
||||||
---
|
---
|
||||||
|
|
||||||
**Attention:** *Thank you for your interest in MediaPipe Solutions. We have
|
**Attention:** *We have ended support for
|
||||||
ended support for
|
|
||||||
[these MediaPipe Legacy Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy)
|
[these MediaPipe Legacy Solutions](https://developers.google.com/mediapipe/solutions/guide#legacy)
|
||||||
as of March 1, 2023. All other
|
as of March 1, 2023. All other
|
||||||
[MediaPipe Legacy Solutions will be upgraded](https://developers.google.com/mediapipe/solutions/guide#legacy)
|
[MediaPipe Legacy Solutions will be upgraded](https://developers.google.com/mediapipe/solutions/guide#legacy)
|
||||||
|
@ -25,14 +25,6 @@ be provided on an as-is basis. We encourage you to check out the new MediaPipe
|
||||||
Solutions at:
|
Solutions at:
|
||||||
[https://developers.google.com/mediapipe/solutions](https://developers.google.com/mediapipe/solutions)*
|
[https://developers.google.com/mediapipe/solutions](https://developers.google.com/mediapipe/solutions)*
|
||||||
|
|
||||||
*This notice and web page will be removed on June 1, 2023.*
|
|
||||||
|
|
||||||
----
|
|
||||||
|
|
||||||
<br><br><br><br><br><br><br><br><br><br>
|
|
||||||
<br><br><br><br><br><br><br><br><br><br>
|
|
||||||
<br><br><br><br><br><br><br><br><br><br>
|
|
||||||
|
|
||||||
----
|
----
|
||||||
|
|
||||||
MediaPipe offers open source cross-platform, customizable ML solutions for live
|
MediaPipe offers open source cross-platform, customizable ML solutions for live
|
||||||
|
|
|
@ -1240,6 +1240,7 @@ cc_library(
|
||||||
"//mediapipe/framework/formats:classification_cc_proto",
|
"//mediapipe/framework/formats:classification_cc_proto",
|
||||||
"//mediapipe/framework/formats:detection_cc_proto",
|
"//mediapipe/framework/formats:detection_cc_proto",
|
||||||
"//mediapipe/framework/formats:landmark_cc_proto",
|
"//mediapipe/framework/formats:landmark_cc_proto",
|
||||||
|
"//mediapipe/framework/formats:rect_cc_proto",
|
||||||
"//mediapipe/framework/port:ret_check",
|
"//mediapipe/framework/port:ret_check",
|
||||||
"//mediapipe/framework/port:status",
|
"//mediapipe/framework/port:status",
|
||||||
],
|
],
|
||||||
|
|
|
@ -17,6 +17,7 @@
|
||||||
#include "mediapipe/framework/formats/classification.pb.h"
|
#include "mediapipe/framework/formats/classification.pb.h"
|
||||||
#include "mediapipe/framework/formats/detection.pb.h"
|
#include "mediapipe/framework/formats/detection.pb.h"
|
||||||
#include "mediapipe/framework/formats/landmark.pb.h"
|
#include "mediapipe/framework/formats/landmark.pb.h"
|
||||||
|
#include "mediapipe/framework/formats/rect.pb.h"
|
||||||
|
|
||||||
namespace mediapipe {
|
namespace mediapipe {
|
||||||
namespace api2 {
|
namespace api2 {
|
||||||
|
@ -37,5 +38,12 @@ using GetDetectionVectorItemCalculator =
|
||||||
GetVectorItemCalculator<mediapipe::Detection>;
|
GetVectorItemCalculator<mediapipe::Detection>;
|
||||||
REGISTER_CALCULATOR(GetDetectionVectorItemCalculator);
|
REGISTER_CALCULATOR(GetDetectionVectorItemCalculator);
|
||||||
|
|
||||||
|
using GetNormalizedRectVectorItemCalculator =
|
||||||
|
GetVectorItemCalculator<NormalizedRect>;
|
||||||
|
REGISTER_CALCULATOR(GetNormalizedRectVectorItemCalculator);
|
||||||
|
|
||||||
|
using GetRectVectorItemCalculator = GetVectorItemCalculator<Rect>;
|
||||||
|
REGISTER_CALCULATOR(GetRectVectorItemCalculator);
|
||||||
|
|
||||||
} // namespace api2
|
} // namespace api2
|
||||||
} // namespace mediapipe
|
} // namespace mediapipe
|
||||||
|
|
|
@ -290,17 +290,17 @@ void LandmarksToNormalizedLandmarks(const LandmarkList& landmarks,
|
||||||
// Scale Z the same way as X (using image width).
|
// Scale Z the same way as X (using image width).
|
||||||
norm_landmark->set_z(landmark.z() / image_width);
|
norm_landmark->set_z(landmark.z() / image_width);
|
||||||
|
|
||||||
if (landmark.has_presence()) {
|
|
||||||
norm_landmark->set_presence(landmark.presence());
|
|
||||||
} else {
|
|
||||||
norm_landmark->clear_presence();
|
|
||||||
}
|
|
||||||
|
|
||||||
if (landmark.has_visibility()) {
|
if (landmark.has_visibility()) {
|
||||||
norm_landmark->set_visibility(landmark.visibility());
|
norm_landmark->set_visibility(landmark.visibility());
|
||||||
} else {
|
} else {
|
||||||
norm_landmark->clear_visibility();
|
norm_landmark->clear_visibility();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (landmark.has_presence()) {
|
||||||
|
norm_landmark->set_presence(landmark.presence());
|
||||||
|
} else {
|
||||||
|
norm_landmark->clear_presence();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -165,7 +165,7 @@ template <class V, class... U>
|
||||||
struct IsCompatibleType<V, OneOf<U...>>
|
struct IsCompatibleType<V, OneOf<U...>>
|
||||||
: std::integral_constant<bool, (std::is_same_v<V, U> || ...)> {};
|
: std::integral_constant<bool, (std::is_same_v<V, U> || ...)> {};
|
||||||
|
|
||||||
}; // namespace internal
|
} // namespace internal
|
||||||
|
|
||||||
template <typename T>
|
template <typename T>
|
||||||
inline Packet<T> PacketBase::As() const {
|
inline Packet<T> PacketBase::As() const {
|
||||||
|
@ -259,19 +259,19 @@ struct First {
|
||||||
|
|
||||||
template <class T>
|
template <class T>
|
||||||
struct AddStatus {
|
struct AddStatus {
|
||||||
using type = StatusOr<T>;
|
using type = absl::StatusOr<T>;
|
||||||
};
|
};
|
||||||
template <class T>
|
template <class T>
|
||||||
struct AddStatus<StatusOr<T>> {
|
struct AddStatus<absl::StatusOr<T>> {
|
||||||
using type = StatusOr<T>;
|
using type = absl::StatusOr<T>;
|
||||||
};
|
};
|
||||||
template <>
|
template <>
|
||||||
struct AddStatus<Status> {
|
struct AddStatus<absl::Status> {
|
||||||
using type = Status;
|
using type = absl::Status;
|
||||||
};
|
};
|
||||||
template <>
|
template <>
|
||||||
struct AddStatus<void> {
|
struct AddStatus<void> {
|
||||||
using type = Status;
|
using type = absl::Status;
|
||||||
};
|
};
|
||||||
|
|
||||||
template <class R, class F, class... A>
|
template <class R, class F, class... A>
|
||||||
|
@ -282,7 +282,7 @@ struct CallAndAddStatusImpl {
|
||||||
};
|
};
|
||||||
template <class F, class... A>
|
template <class F, class... A>
|
||||||
struct CallAndAddStatusImpl<void, F, A...> {
|
struct CallAndAddStatusImpl<void, F, A...> {
|
||||||
Status operator()(const F& f, A&&... a) {
|
absl::Status operator()(const F& f, A&&... a) {
|
||||||
f(std::forward<A>(a)...);
|
f(std::forward<A>(a)...);
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
|
@ -88,10 +88,13 @@ class SafeIntStrongIntValidator {
|
||||||
|
|
||||||
// If the argument is floating point, we can do a simple check to make
|
// If the argument is floating point, we can do a simple check to make
|
||||||
// sure the value is in range. It is undefined behavior to convert to int
|
// sure the value is in range. It is undefined behavior to convert to int
|
||||||
// from a float that is out of range.
|
// from a float that is out of range. Since large integers will loose some
|
||||||
|
// precision when being converted to floating point, the integer max and min
|
||||||
|
// are explicitly converted back to floating point for this comparison, in
|
||||||
|
// order to satisfy compiler warnings.
|
||||||
if (std::is_floating_point<U>::value) {
|
if (std::is_floating_point<U>::value) {
|
||||||
if (arg < std::numeric_limits<T>::min() ||
|
if (arg < static_cast<U>(std::numeric_limits<T>::min()) ||
|
||||||
arg > std::numeric_limits<T>::max()) {
|
arg > static_cast<U>(std::numeric_limits<T>::max())) {
|
||||||
ErrorType::Error("SafeInt: init from out of bounds float", arg, "=");
|
ErrorType::Error("SafeInt: init from out of bounds float", arg, "=");
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|
|
@ -94,7 +94,7 @@ const Packet& OutputStreamShard::Header() const {
|
||||||
// binary. This function can be defined in the .cc file because only two
|
// binary. This function can be defined in the .cc file because only two
|
||||||
// versions are ever instantiated, and all call sites are within this .cc file.
|
// versions are ever instantiated, and all call sites are within this .cc file.
|
||||||
template <typename T>
|
template <typename T>
|
||||||
Status OutputStreamShard::AddPacketInternal(T&& packet) {
|
absl::Status OutputStreamShard::AddPacketInternal(T&& packet) {
|
||||||
if (IsClosed()) {
|
if (IsClosed()) {
|
||||||
return mediapipe::FailedPreconditionErrorBuilder(MEDIAPIPE_LOC)
|
return mediapipe::FailedPreconditionErrorBuilder(MEDIAPIPE_LOC)
|
||||||
<< "Packet sent to closed stream \"" << Name() << "\".";
|
<< "Packet sent to closed stream \"" << Name() << "\".";
|
||||||
|
@ -113,7 +113,7 @@ Status OutputStreamShard::AddPacketInternal(T&& packet) {
|
||||||
<< timestamp.DebugString();
|
<< timestamp.DebugString();
|
||||||
}
|
}
|
||||||
|
|
||||||
Status result = output_stream_spec_->packet_type->Validate(packet);
|
absl::Status result = output_stream_spec_->packet_type->Validate(packet);
|
||||||
if (!result.ok()) {
|
if (!result.ok()) {
|
||||||
return StatusBuilder(result, MEDIAPIPE_LOC).SetPrepend() << absl::StrCat(
|
return StatusBuilder(result, MEDIAPIPE_LOC).SetPrepend() << absl::StrCat(
|
||||||
"Packet type mismatch on calculator outputting to stream \"",
|
"Packet type mismatch on calculator outputting to stream \"",
|
||||||
|
@ -132,14 +132,14 @@ Status OutputStreamShard::AddPacketInternal(T&& packet) {
|
||||||
}
|
}
|
||||||
|
|
||||||
void OutputStreamShard::AddPacket(const Packet& packet) {
|
void OutputStreamShard::AddPacket(const Packet& packet) {
|
||||||
Status status = AddPacketInternal(packet);
|
absl::Status status = AddPacketInternal(packet);
|
||||||
if (!status.ok()) {
|
if (!status.ok()) {
|
||||||
output_stream_spec_->TriggerErrorCallback(status);
|
output_stream_spec_->TriggerErrorCallback(status);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void OutputStreamShard::AddPacket(Packet&& packet) {
|
void OutputStreamShard::AddPacket(Packet&& packet) {
|
||||||
Status status = AddPacketInternal(std::move(packet));
|
absl::Status status = AddPacketInternal(std::move(packet));
|
||||||
if (!status.ok()) {
|
if (!status.ok()) {
|
||||||
output_stream_spec_->TriggerErrorCallback(status);
|
output_stream_spec_->TriggerErrorCallback(status);
|
||||||
}
|
}
|
||||||
|
|
|
@ -59,8 +59,8 @@ absl::Status CombinedStatus(absl::string_view general_comment,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (error_code == StatusCode::kOk) return OkStatus();
|
if (error_code == absl::StatusCode::kOk) return absl::OkStatus();
|
||||||
Status combined;
|
absl::Status combined;
|
||||||
combined = absl::Status(
|
combined = absl::Status(
|
||||||
error_code,
|
error_code,
|
||||||
absl::StrCat(general_comment, "\n", absl::StrJoin(errors, "\n")));
|
absl::StrCat(general_comment, "\n", absl::StrJoin(errors, "\n")));
|
||||||
|
|
|
@ -241,9 +241,9 @@ class StaticMap {
|
||||||
#define DEFINE_MEDIAPIPE_TYPE_MAP(MapName, KeyType) \
|
#define DEFINE_MEDIAPIPE_TYPE_MAP(MapName, KeyType) \
|
||||||
class MapName : public type_map_internal::StaticMap<MapName, KeyType> {};
|
class MapName : public type_map_internal::StaticMap<MapName, KeyType> {};
|
||||||
// Defines a map from unique typeid number to MediaPipeTypeData.
|
// Defines a map from unique typeid number to MediaPipeTypeData.
|
||||||
DEFINE_MEDIAPIPE_TYPE_MAP(PacketTypeIdToMediaPipeTypeData, size_t);
|
DEFINE_MEDIAPIPE_TYPE_MAP(PacketTypeIdToMediaPipeTypeData, size_t)
|
||||||
// Defines a map from unique type string to MediaPipeTypeData.
|
// Defines a map from unique type string to MediaPipeTypeData.
|
||||||
DEFINE_MEDIAPIPE_TYPE_MAP(PacketTypeStringToMediaPipeTypeData, std::string);
|
DEFINE_MEDIAPIPE_TYPE_MAP(PacketTypeStringToMediaPipeTypeData, std::string)
|
||||||
|
|
||||||
// MEDIAPIPE_REGISTER_TYPE can be used to register a type.
|
// MEDIAPIPE_REGISTER_TYPE can be used to register a type.
|
||||||
// Convention:
|
// Convention:
|
||||||
|
|
|
@ -211,6 +211,14 @@ GlTexture GlCalculatorHelper::CreateDestinationTexture(int width, int height,
|
||||||
return MapGpuBuffer(gpu_buffer, gpu_buffer.GetWriteView<GlTextureView>(0));
|
return MapGpuBuffer(gpu_buffer, gpu_buffer.GetWriteView<GlTextureView>(0));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
GlTexture GlCalculatorHelper::CreateDestinationTexture(
|
||||||
|
const ImageFrame& image_frame) {
|
||||||
|
// TODO: ensure buffer pool is used when creating textures out of
|
||||||
|
// ImageFrame.
|
||||||
|
GpuBuffer gpu_buffer = GpuBufferCopyingImageFrame(image_frame);
|
||||||
|
return MapGpuBuffer(gpu_buffer, gpu_buffer.GetWriteView<GlTextureView>(0));
|
||||||
|
}
|
||||||
|
|
||||||
GlTexture GlCalculatorHelper::CreateSourceTexture(
|
GlTexture GlCalculatorHelper::CreateSourceTexture(
|
||||||
const mediapipe::Image& image) {
|
const mediapipe::Image& image) {
|
||||||
return CreateSourceTexture(image.GetGpuBuffer());
|
return CreateSourceTexture(image.GetGpuBuffer());
|
||||||
|
|
|
@ -135,6 +135,12 @@ class GlCalculatorHelper {
|
||||||
// This is deprecated because: 1) it encourages the use of GlTexture as a
|
// This is deprecated because: 1) it encourages the use of GlTexture as a
|
||||||
// long-lived object; 2) it requires copying the ImageFrame's contents,
|
// long-lived object; 2) it requires copying the ImageFrame's contents,
|
||||||
// which may not always be necessary.
|
// which may not always be necessary.
|
||||||
|
//
|
||||||
|
// WARNING: do NOT use as a destination texture which will be sent to
|
||||||
|
// downstream calculators as it may lead to synchronization issues. The result
|
||||||
|
// is meant to be a short-lived object, local to a single calculator and
|
||||||
|
// single GL thread. Use `CreateDestinationTexture` instead, if you need a
|
||||||
|
// destination texture.
|
||||||
ABSL_DEPRECATED("Use `GpuBufferWithImageFrame`.")
|
ABSL_DEPRECATED("Use `GpuBufferWithImageFrame`.")
|
||||||
GlTexture CreateSourceTexture(const ImageFrame& image_frame);
|
GlTexture CreateSourceTexture(const ImageFrame& image_frame);
|
||||||
|
|
||||||
|
@ -156,6 +162,14 @@ class GlCalculatorHelper {
|
||||||
int output_width, int output_height,
|
int output_width, int output_height,
|
||||||
GpuBufferFormat format = GpuBufferFormat::kBGRA32);
|
GpuBufferFormat format = GpuBufferFormat::kBGRA32);
|
||||||
|
|
||||||
|
// Creates a destination texture copying and uploading passed image frame.
|
||||||
|
//
|
||||||
|
// WARNING: mind that this functions creates a new texture every time and
|
||||||
|
// doesn't use MediaPipe's gpu buffer pool.
|
||||||
|
// TODO: ensure buffer pool is used when creating textures out of
|
||||||
|
// ImageFrame.
|
||||||
|
GlTexture CreateDestinationTexture(const ImageFrame& image_frame);
|
||||||
|
|
||||||
// The OpenGL name of the output framebuffer.
|
// The OpenGL name of the output framebuffer.
|
||||||
GLuint framebuffer() const;
|
GLuint framebuffer() const;
|
||||||
|
|
||||||
|
@ -196,7 +210,7 @@ class GlCalculatorHelper {
|
||||||
// This class should be the main way to interface with GL memory within a single
|
// This class should be the main way to interface with GL memory within a single
|
||||||
// calculator. This is the preferred way to utilize the memory pool inside of
|
// calculator. This is the preferred way to utilize the memory pool inside of
|
||||||
// the helper, because GlTexture manages efficiently releasing memory back into
|
// the helper, because GlTexture manages efficiently releasing memory back into
|
||||||
// the pool. A GPU backed Image can be extracted from the unerlying
|
// the pool. A GPU backed Image can be extracted from the underlying
|
||||||
// memory.
|
// memory.
|
||||||
class GlTexture {
|
class GlTexture {
|
||||||
public:
|
public:
|
||||||
|
|
|
@ -65,7 +65,7 @@ static void SetThreadName(const char* name) {
|
||||||
#elif __APPLE__
|
#elif __APPLE__
|
||||||
pthread_setname_np(name);
|
pthread_setname_np(name);
|
||||||
#endif
|
#endif
|
||||||
ANNOTATE_THREAD_NAME(name);
|
ABSL_ANNOTATE_THREAD_NAME(name);
|
||||||
}
|
}
|
||||||
|
|
||||||
GlContext::DedicatedThread::DedicatedThread() {
|
GlContext::DedicatedThread::DedicatedThread() {
|
||||||
|
|
|
@ -91,9 +91,9 @@ class GlTextureBuffer
|
||||||
// TODO: turn into a single call?
|
// TODO: turn into a single call?
|
||||||
GLuint name() const { return name_; }
|
GLuint name() const { return name_; }
|
||||||
GLenum target() const { return target_; }
|
GLenum target() const { return target_; }
|
||||||
int width() const { return width_; }
|
int width() const override { return width_; }
|
||||||
int height() const { return height_; }
|
int height() const override { return height_; }
|
||||||
GpuBufferFormat format() const { return format_; }
|
GpuBufferFormat format() const override { return format_; }
|
||||||
|
|
||||||
GlTextureView GetReadView(internal::types<GlTextureView>,
|
GlTextureView GetReadView(internal::types<GlTextureView>,
|
||||||
int plane) const override;
|
int plane) const override;
|
||||||
|
|
|
@ -71,11 +71,10 @@ absl::Status ImageFrameToGpuBufferCalculator::Process(CalculatorContext* cc) {
|
||||||
#else
|
#else
|
||||||
const auto& input = cc->Inputs().Index(0).Get<ImageFrame>();
|
const auto& input = cc->Inputs().Index(0).Get<ImageFrame>();
|
||||||
helper_.RunInGlContext([this, &input, &cc]() {
|
helper_.RunInGlContext([this, &input, &cc]() {
|
||||||
auto src = helper_.CreateSourceTexture(input);
|
GlTexture dst = helper_.CreateDestinationTexture(input);
|
||||||
auto output = src.GetFrame<GpuBuffer>();
|
std::unique_ptr<GpuBuffer> output = dst.GetFrame<GpuBuffer>();
|
||||||
glFlush();
|
|
||||||
cc->Outputs().Index(0).Add(output.release(), cc->InputTimestamp());
|
cc->Outputs().Index(0).Add(output.release(), cc->InputTimestamp());
|
||||||
src.Release();
|
dst.Release();
|
||||||
});
|
});
|
||||||
#endif // MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
|
#endif // MEDIAPIPE_GPU_BUFFER_USE_CV_PIXEL_BUFFER
|
||||||
return absl::OkStatus();
|
return absl::OkStatus();
|
||||||
|
|
|
@ -15,9 +15,12 @@
|
||||||
|
|
||||||
import dataclasses
|
import dataclasses
|
||||||
import tempfile
|
import tempfile
|
||||||
|
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
|
import tensorflow as tf
|
||||||
|
|
||||||
|
from official.common import distribute_utils
|
||||||
|
|
||||||
|
|
||||||
@dataclasses.dataclass
|
@dataclasses.dataclass
|
||||||
class BaseHParams:
|
class BaseHParams:
|
||||||
|
@ -43,10 +46,10 @@ class BaseHParams:
|
||||||
documentation for more details:
|
documentation for more details:
|
||||||
https://www.tensorflow.org/api_docs/python/tf/distribute/Strategy.
|
https://www.tensorflow.org/api_docs/python/tf/distribute/Strategy.
|
||||||
num_gpus: How many GPUs to use at each worker with the
|
num_gpus: How many GPUs to use at each worker with the
|
||||||
DistributionStrategies API. The default is -1, which means utilize all
|
DistributionStrategies API. The default is 0.
|
||||||
available GPUs.
|
tpu: The TPU resource to be used for training. This should be either the
|
||||||
tpu: The Cloud TPU to use for training. This should be either the name used
|
name used when creating the Cloud TPU, a grpc://ip.address.of.tpu:8470
|
||||||
when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 url.
|
url, or an empty string if using a local TPU.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# Parameters for train configuration
|
# Parameters for train configuration
|
||||||
|
@ -63,5 +66,16 @@ class BaseHParams:
|
||||||
|
|
||||||
# Parameters for hardware acceleration
|
# Parameters for hardware acceleration
|
||||||
distribution_strategy: str = 'off'
|
distribution_strategy: str = 'off'
|
||||||
num_gpus: int = -1 # default value of -1 means use all available GPUs
|
num_gpus: int = 0
|
||||||
tpu: str = ''
|
tpu: str = ''
|
||||||
|
_strategy: tf.distribute.Strategy = dataclasses.field(init=False)
|
||||||
|
|
||||||
|
def __post_init__(self):
|
||||||
|
self._strategy = distribute_utils.get_distribution_strategy(
|
||||||
|
distribution_strategy=self.distribution_strategy,
|
||||||
|
num_gpus=self.num_gpus,
|
||||||
|
tpu_address=self.tpu,
|
||||||
|
)
|
||||||
|
|
||||||
|
def get_strategy(self):
|
||||||
|
return self._strategy
|
||||||
|
|
|
@ -85,8 +85,10 @@ class ModelSpecTest(tf.test.TestCase):
|
||||||
steps_per_epoch=None,
|
steps_per_epoch=None,
|
||||||
shuffle=False,
|
shuffle=False,
|
||||||
distribution_strategy='off',
|
distribution_strategy='off',
|
||||||
num_gpus=-1,
|
num_gpus=0,
|
||||||
tpu=''))
|
tpu='',
|
||||||
|
),
|
||||||
|
)
|
||||||
|
|
||||||
def test_custom_bert_spec(self):
|
def test_custom_bert_spec(self):
|
||||||
custom_bert_classifier_options = (
|
custom_bert_classifier_options = (
|
||||||
|
|
|
@ -311,9 +311,11 @@ class _BertClassifier(TextClassifier):
|
||||||
label_names: Sequence[str]):
|
label_names: Sequence[str]):
|
||||||
super().__init__(model_spec, hparams, label_names)
|
super().__init__(model_spec, hparams, label_names)
|
||||||
self._model_options = model_options
|
self._model_options = model_options
|
||||||
|
with self._hparams.get_strategy().scope():
|
||||||
self._loss_function = tf.keras.losses.SparseCategoricalCrossentropy()
|
self._loss_function = tf.keras.losses.SparseCategoricalCrossentropy()
|
||||||
self._metric_function = tf.keras.metrics.SparseCategoricalAccuracy(
|
self._metric_function = tf.keras.metrics.SparseCategoricalAccuracy(
|
||||||
"test_accuracy", dtype=tf.float32)
|
"test_accuracy", dtype=tf.float32
|
||||||
|
)
|
||||||
self._text_preprocessor: preprocessor.BertClassifierPreprocessor = None
|
self._text_preprocessor: preprocessor.BertClassifierPreprocessor = None
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
|
@ -350,6 +352,7 @@ class _BertClassifier(TextClassifier):
|
||||||
"""
|
"""
|
||||||
(processed_train_data, processed_validation_data) = (
|
(processed_train_data, processed_validation_data) = (
|
||||||
self._load_and_run_preprocessor(train_data, validation_data))
|
self._load_and_run_preprocessor(train_data, validation_data))
|
||||||
|
with self._hparams.get_strategy().scope():
|
||||||
self._create_model()
|
self._create_model()
|
||||||
self._create_optimizer(processed_train_data)
|
self._create_optimizer(processed_train_data)
|
||||||
self._train_model(processed_train_data, processed_validation_data)
|
self._train_model(processed_train_data, processed_validation_data)
|
||||||
|
|
|
@ -53,6 +53,7 @@ CALCULATORS_AND_GRAPHS = [
|
||||||
"//mediapipe/tasks/cc/text/text_classifier:text_classifier_graph",
|
"//mediapipe/tasks/cc/text/text_classifier:text_classifier_graph",
|
||||||
"//mediapipe/tasks/cc/text/text_embedder:text_embedder_graph",
|
"//mediapipe/tasks/cc/text/text_embedder:text_embedder_graph",
|
||||||
"//mediapipe/tasks/cc/vision/face_detector:face_detector_graph",
|
"//mediapipe/tasks/cc/vision/face_detector:face_detector_graph",
|
||||||
|
"//mediapipe/tasks/cc/vision/face_landmarker:face_landmarker_graph",
|
||||||
"//mediapipe/tasks/cc/vision/image_classifier:image_classifier_graph",
|
"//mediapipe/tasks/cc/vision/image_classifier:image_classifier_graph",
|
||||||
"//mediapipe/tasks/cc/vision/object_detector:object_detector_graph",
|
"//mediapipe/tasks/cc/vision/object_detector:object_detector_graph",
|
||||||
]
|
]
|
||||||
|
@ -80,6 +81,9 @@ strip_api_include_path_prefix(
|
||||||
"//mediapipe/tasks/ios/vision/face_detector:sources/MPPFaceDetector.h",
|
"//mediapipe/tasks/ios/vision/face_detector:sources/MPPFaceDetector.h",
|
||||||
"//mediapipe/tasks/ios/vision/face_detector:sources/MPPFaceDetectorOptions.h",
|
"//mediapipe/tasks/ios/vision/face_detector:sources/MPPFaceDetectorOptions.h",
|
||||||
"//mediapipe/tasks/ios/vision/face_detector:sources/MPPFaceDetectorResult.h",
|
"//mediapipe/tasks/ios/vision/face_detector:sources/MPPFaceDetectorResult.h",
|
||||||
|
"//mediapipe/tasks/ios/vision/face_landmarker:sources/MPPFaceLandmarker.h",
|
||||||
|
"//mediapipe/tasks/ios/vision/face_landmarker:sources/MPPFaceLandmarkerOptions.h",
|
||||||
|
"//mediapipe/tasks/ios/vision/face_landmarker:sources/MPPFaceLandmarkerResult.h",
|
||||||
"//mediapipe/tasks/ios/vision/image_classifier:sources/MPPImageClassifier.h",
|
"//mediapipe/tasks/ios/vision/image_classifier:sources/MPPImageClassifier.h",
|
||||||
"//mediapipe/tasks/ios/vision/image_classifier:sources/MPPImageClassifierOptions.h",
|
"//mediapipe/tasks/ios/vision/image_classifier:sources/MPPImageClassifierOptions.h",
|
||||||
"//mediapipe/tasks/ios/vision/image_classifier:sources/MPPImageClassifierResult.h",
|
"//mediapipe/tasks/ios/vision/image_classifier:sources/MPPImageClassifierResult.h",
|
||||||
|
@ -164,6 +168,9 @@ apple_static_xcframework(
|
||||||
":MPPFaceDetector.h",
|
":MPPFaceDetector.h",
|
||||||
":MPPFaceDetectorOptions.h",
|
":MPPFaceDetectorOptions.h",
|
||||||
":MPPFaceDetectorResult.h",
|
":MPPFaceDetectorResult.h",
|
||||||
|
":MPPFaceLandmarker.h",
|
||||||
|
":MPPFaceLandmarkerOptions.h",
|
||||||
|
":MPPFaceLandmarkerResult.h",
|
||||||
":MPPImageClassifier.h",
|
":MPPImageClassifier.h",
|
||||||
":MPPImageClassifierOptions.h",
|
":MPPImageClassifierOptions.h",
|
||||||
":MPPImageClassifierResult.h",
|
":MPPImageClassifierResult.h",
|
||||||
|
@ -173,6 +180,7 @@ apple_static_xcframework(
|
||||||
],
|
],
|
||||||
deps = [
|
deps = [
|
||||||
"//mediapipe/tasks/ios/vision/face_detector:MPPFaceDetector",
|
"//mediapipe/tasks/ios/vision/face_detector:MPPFaceDetector",
|
||||||
|
"//mediapipe/tasks/ios/vision/face_landmarker:MPPFaceLandmarker",
|
||||||
"//mediapipe/tasks/ios/vision/image_classifier:MPPImageClassifier",
|
"//mediapipe/tasks/ios/vision/image_classifier:MPPImageClassifier",
|
||||||
"//mediapipe/tasks/ios/vision/object_detector:MPPObjectDetector",
|
"//mediapipe/tasks/ios/vision/object_detector:MPPObjectDetector",
|
||||||
],
|
],
|
||||||
|
|
|
@ -4,9 +4,9 @@ Pod::Spec.new do |s|
|
||||||
s.authors = 'Google Inc.'
|
s.authors = 'Google Inc.'
|
||||||
s.license = { :type => 'Apache',:file => "LICENSE" }
|
s.license = { :type => 'Apache',:file => "LICENSE" }
|
||||||
s.homepage = 'https://github.com/google/mediapipe'
|
s.homepage = 'https://github.com/google/mediapipe'
|
||||||
s.source = { :http => '${MPP_DOWNLOAD_URL}' }
|
s.source = { :http => '${MPP_COMMON_DOWNLOAD_URL}' }
|
||||||
s.summary = 'MediaPipe Task Library - Text'
|
s.summary = 'MediaPipe Task Library - Text'
|
||||||
s.description = 'The Natural Language APIs of the MediaPipe Task Library'
|
s.description = 'The common libraries of the MediaPipe Task Library'
|
||||||
|
|
||||||
s.ios.deployment_target = '11.0'
|
s.ios.deployment_target = '11.0'
|
||||||
|
|
||||||
|
|
|
@ -4,7 +4,7 @@ Pod::Spec.new do |s|
|
||||||
s.authors = 'Google Inc.'
|
s.authors = 'Google Inc.'
|
||||||
s.license = { :type => 'Apache',:file => "LICENSE" }
|
s.license = { :type => 'Apache',:file => "LICENSE" }
|
||||||
s.homepage = 'https://github.com/google/mediapipe'
|
s.homepage = 'https://github.com/google/mediapipe'
|
||||||
s.source = { :http => '${MPP_DOWNLOAD_URL}' }
|
s.source = { :http => '${MPP_TEXT_DOWNLOAD_URL}' }
|
||||||
s.summary = 'MediaPipe Task Library - Text'
|
s.summary = 'MediaPipe Task Library - Text'
|
||||||
s.description = 'The Natural Language APIs of the MediaPipe Task Library'
|
s.description = 'The Natural Language APIs of the MediaPipe Task Library'
|
||||||
|
|
||||||
|
|
|
@ -4,7 +4,7 @@ Pod::Spec.new do |s|
|
||||||
s.authors = 'Google Inc.'
|
s.authors = 'Google Inc.'
|
||||||
s.license = { :type => 'Apache',:file => "LICENSE" }
|
s.license = { :type => 'Apache',:file => "LICENSE" }
|
||||||
s.homepage = 'https://github.com/google/mediapipe'
|
s.homepage = 'https://github.com/google/mediapipe'
|
||||||
s.source = { :http => '${MPP_DOWNLOAD_URL}' }
|
s.source = { :http => '${MPP_VISION_DOWNLOAD_URL}' }
|
||||||
s.summary = 'MediaPipe Task Library - Vision'
|
s.summary = 'MediaPipe Task Library - Vision'
|
||||||
s.description = 'The Vision APIs of the MediaPipe Task Library'
|
s.description = 'The Vision APIs of the MediaPipe Task Library'
|
||||||
|
|
||||||
|
|
71
mediapipe/tasks/ios/test/vision/face_landmarker/BUILD
Normal file
71
mediapipe/tasks/ios/test/vision/face_landmarker/BUILD
Normal file
|
@ -0,0 +1,71 @@
|
||||||
|
load("@build_bazel_rules_apple//apple:ios.bzl", "ios_unit_test")
|
||||||
|
load(
|
||||||
|
"//mediapipe/framework/tool:ios.bzl",
|
||||||
|
"MPP_TASK_MINIMUM_OS_VERSION",
|
||||||
|
)
|
||||||
|
load(
|
||||||
|
"@org_tensorflow//tensorflow/lite:special_rules.bzl",
|
||||||
|
"tflite_ios_lab_runner",
|
||||||
|
)
|
||||||
|
|
||||||
|
package(default_visibility = ["//mediapipe/tasks:internal"])
|
||||||
|
|
||||||
|
licenses(["notice"])
|
||||||
|
|
||||||
|
# Default tags for filtering iOS targets. Targets are restricted to Apple platforms.
|
||||||
|
TFL_DEFAULT_TAGS = [
|
||||||
|
"apple",
|
||||||
|
]
|
||||||
|
|
||||||
|
# Following sanitizer tests are not supported by iOS test targets.
|
||||||
|
TFL_DISABLED_SANITIZER_TAGS = [
|
||||||
|
"noasan",
|
||||||
|
"nomsan",
|
||||||
|
"notsan",
|
||||||
|
]
|
||||||
|
|
||||||
|
objc_library(
|
||||||
|
name = "MPPFaceLandmarkerObjcTestLibrary",
|
||||||
|
testonly = 1,
|
||||||
|
srcs = ["MPPFaceLandmarkerTests.mm"],
|
||||||
|
copts = [
|
||||||
|
"-ObjC++",
|
||||||
|
"-std=c++17",
|
||||||
|
"-x objective-c++",
|
||||||
|
],
|
||||||
|
data = [
|
||||||
|
"//mediapipe/tasks/testdata/vision:test_images",
|
||||||
|
"//mediapipe/tasks/testdata/vision:test_models",
|
||||||
|
"//mediapipe/tasks/testdata/vision:test_protos",
|
||||||
|
],
|
||||||
|
deps = [
|
||||||
|
"//mediapipe/framework/formats:classification_cc_proto",
|
||||||
|
"//mediapipe/framework/formats:landmark_cc_proto",
|
||||||
|
"//mediapipe/framework/formats:matrix_data_cc_proto",
|
||||||
|
"//mediapipe/tasks/cc/vision/face_geometry/proto:face_geometry_cc_proto",
|
||||||
|
"//mediapipe/tasks/ios/common:MPPCommon",
|
||||||
|
"//mediapipe/tasks/ios/components/containers/utils:MPPClassificationResultHelpers",
|
||||||
|
"//mediapipe/tasks/ios/components/containers/utils:MPPDetectionHelpers",
|
||||||
|
"//mediapipe/tasks/ios/components/containers/utils:MPPLandmarkHelpers",
|
||||||
|
"//mediapipe/tasks/ios/test/vision/utils:MPPImageTestUtils",
|
||||||
|
"//mediapipe/tasks/ios/test/vision/utils:parse_proto_utils",
|
||||||
|
"//mediapipe/tasks/ios/vision/face_landmarker:MPPFaceLandmarker",
|
||||||
|
"//mediapipe/tasks/ios/vision/face_landmarker:MPPFaceLandmarkerResult",
|
||||||
|
"//third_party/apple_frameworks:UIKit",
|
||||||
|
] + select({
|
||||||
|
"//third_party:opencv_ios_sim_arm64_source_build": ["@ios_opencv_source//:opencv_xcframework"],
|
||||||
|
"//third_party:opencv_ios_arm64_source_build": ["@ios_opencv_source//:opencv_xcframework"],
|
||||||
|
"//third_party:opencv_ios_x86_64_source_build": ["@ios_opencv_source//:opencv_xcframework"],
|
||||||
|
"//conditions:default": ["@ios_opencv//:OpencvFramework"],
|
||||||
|
}),
|
||||||
|
)
|
||||||
|
|
||||||
|
ios_unit_test(
|
||||||
|
name = "MPPFaceLandmarkerObjcTest",
|
||||||
|
minimum_os_version = MPP_TASK_MINIMUM_OS_VERSION,
|
||||||
|
runner = tflite_ios_lab_runner("IOS_LATEST"),
|
||||||
|
tags = TFL_DEFAULT_TAGS + TFL_DISABLED_SANITIZER_TAGS,
|
||||||
|
deps = [
|
||||||
|
":MPPFaceLandmarkerObjcTestLibrary",
|
||||||
|
],
|
||||||
|
)
|
|
@ -0,0 +1,553 @@
|
||||||
|
// Copyright 2023 The MediaPipe Authors.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#import <Foundation/Foundation.h>
|
||||||
|
#import <UIKit/UIKit.h>
|
||||||
|
#import <XCTest/XCTest.h>
|
||||||
|
|
||||||
|
#include "mediapipe/framework/formats/classification.pb.h"
|
||||||
|
#include "mediapipe/framework/formats/landmark.pb.h"
|
||||||
|
#include "mediapipe/framework/formats/matrix_data.pb.h"
|
||||||
|
#include "mediapipe/tasks/cc/vision/face_geometry/proto/face_geometry.pb.h"
|
||||||
|
#import "mediapipe/tasks/ios/common/sources/MPPCommon.h"
|
||||||
|
#import "mediapipe/tasks/ios/components/containers/utils/sources/MPPClassificationResult+Helpers.h"
|
||||||
|
#import "mediapipe/tasks/ios/components/containers/utils/sources/MPPDetection+Helpers.h"
|
||||||
|
#import "mediapipe/tasks/ios/components/containers/utils/sources/MPPLandmark+Helpers.h"
|
||||||
|
#import "mediapipe/tasks/ios/test/vision/utils/sources/MPPImage+TestUtils.h"
|
||||||
|
#include "mediapipe/tasks/ios/test/vision/utils/sources/parse_proto_utils.h"
|
||||||
|
#import "mediapipe/tasks/ios/vision/face_landmarker/sources/MPPFaceLandmarker.h"
|
||||||
|
#import "mediapipe/tasks/ios/vision/face_landmarker/sources/MPPFaceLandmarkerResult.h"
|
||||||
|
|
||||||
|
using NormalizedLandmarkListProto = ::mediapipe::NormalizedLandmarkList;
|
||||||
|
using ClassificationListProto = ::mediapipe::ClassificationList;
|
||||||
|
using FaceGeometryProto = ::mediapipe::tasks::vision::face_geometry::proto::FaceGeometry;
|
||||||
|
using ::mediapipe::tasks::ios::test::vision::utils::get_proto_from_pbtxt;
|
||||||
|
|
||||||
|
static NSString *const kPbFileExtension = @"pbtxt";
|
||||||
|
|
||||||
|
typedef NSDictionary<NSString *, NSString *> ResourceFileInfo;
|
||||||
|
|
||||||
|
static ResourceFileInfo *const kPortraitImage =
|
||||||
|
@{@"name" : @"portrait", @"type" : @"jpg", @"orientation" : @(UIImageOrientationUp)};
|
||||||
|
static ResourceFileInfo *const kPortraitRotatedImage =
|
||||||
|
@{@"name" : @"portrait_rotated", @"type" : @"jpg", @"orientation" : @(UIImageOrientationRight)};
|
||||||
|
static ResourceFileInfo *const kCatImage = @{@"name" : @"cat", @"type" : @"jpg"};
|
||||||
|
static ResourceFileInfo *const kPortraitExpectedLandmarksName =
|
||||||
|
@{@"name" : @"portrait_expected_face_landmarks", @"type" : kPbFileExtension};
|
||||||
|
static ResourceFileInfo *const kPortraitExpectedBlendshapesName =
|
||||||
|
@{@"name" : @"portrait_expected_blendshapes", @"type" : kPbFileExtension};
|
||||||
|
static ResourceFileInfo *const kPortraitExpectedGeometryName =
|
||||||
|
@{@"name" : @"portrait_expected_face_geometry", @"type" : kPbFileExtension};
|
||||||
|
static NSString *const kFaceLandmarkerModelName = @"face_landmarker_v2";
|
||||||
|
static NSString *const kFaceLandmarkerWithBlendshapesModelName =
|
||||||
|
@"face_landmarker_v2_with_blendshapes";
|
||||||
|
static NSString *const kExpectedErrorDomain = @"com.google.mediapipe.tasks";
|
||||||
|
static NSString *const kLiveStreamTestsDictFaceLandmarkerKey = @"face_landmarker";
|
||||||
|
static NSString *const kLiveStreamTestsDictExpectationKey = @"expectation";
|
||||||
|
|
||||||
|
constexpr float kLandmarkErrorThreshold = 0.03f;
|
||||||
|
constexpr float kBlendshapesErrorThreshold = 0.1f;
|
||||||
|
constexpr float kFacialTransformationMatrixErrorThreshold = 0.2f;
|
||||||
|
|
||||||
|
#define AssertEqualErrors(error, expectedError) \
|
||||||
|
XCTAssertNotNil(error); \
|
||||||
|
XCTAssertEqualObjects(error.domain, expectedError.domain); \
|
||||||
|
XCTAssertEqual(error.code, expectedError.code); \
|
||||||
|
XCTAssertEqualObjects(error.localizedDescription, expectedError.localizedDescription)
|
||||||
|
|
||||||
|
@interface MPPFaceLandmarkerTests : XCTestCase <MPPFaceLandmarkerLiveStreamDelegate> {
|
||||||
|
NSDictionary *_liveStreamSucceedsTestDict;
|
||||||
|
NSDictionary *_outOfOrderTimestampTestDict;
|
||||||
|
}
|
||||||
|
@end
|
||||||
|
|
||||||
|
@implementation MPPFaceLandmarkerTests
|
||||||
|
|
||||||
|
#pragma mark General Tests
|
||||||
|
|
||||||
|
- (void)testCreateFaceLandmarkerWithMissingModelPathFails {
|
||||||
|
NSString *modelPath = [MPPFaceLandmarkerTests filePathWithName:@"" extension:@""];
|
||||||
|
|
||||||
|
NSError *error = nil;
|
||||||
|
MPPFaceLandmarker *faceLandmarker = [[MPPFaceLandmarker alloc] initWithModelPath:modelPath
|
||||||
|
error:&error];
|
||||||
|
XCTAssertNil(faceLandmarker);
|
||||||
|
|
||||||
|
NSError *expectedError = [NSError
|
||||||
|
errorWithDomain:kExpectedErrorDomain
|
||||||
|
code:MPPTasksErrorCodeInvalidArgumentError
|
||||||
|
userInfo:@{
|
||||||
|
NSLocalizedDescriptionKey :
|
||||||
|
@"INVALID_ARGUMENT: ExternalFile must specify at least one of 'file_content', "
|
||||||
|
@"'file_name', 'file_pointer_meta' or 'file_descriptor_meta'."
|
||||||
|
}];
|
||||||
|
AssertEqualErrors(error, expectedError);
|
||||||
|
}
|
||||||
|
|
||||||
|
#pragma mark Image Mode Tests
|
||||||
|
|
||||||
|
- (void)testDetectWithImageModeAndPotraitSucceeds {
|
||||||
|
NSString *modelPath = [MPPFaceLandmarkerTests filePathWithName:kFaceLandmarkerModelName
|
||||||
|
extension:@"task"];
|
||||||
|
MPPFaceLandmarker *faceLandmarker = [[MPPFaceLandmarker alloc] initWithModelPath:modelPath
|
||||||
|
error:nil];
|
||||||
|
NSArray<MPPNormalizedLandmark *> *expectedLandmarks =
|
||||||
|
[MPPFaceLandmarkerTests expectedLandmarksFromFileInfo:kPortraitExpectedLandmarksName];
|
||||||
|
[self assertResultsOfDetectInImageWithFileInfo:kPortraitImage
|
||||||
|
usingFaceLandmarker:faceLandmarker
|
||||||
|
containsExpectedLandmarks:expectedLandmarks
|
||||||
|
expectedBlendshapes:NULL
|
||||||
|
expectedTransformationMatrix:NULL];
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void)testDetectWithImageModeAndPotraitAndFacialTransformationMatrixesSucceeds {
|
||||||
|
MPPFaceLandmarkerOptions *options =
|
||||||
|
[self faceLandmarkerOptionsWithModelName:kFaceLandmarkerModelName];
|
||||||
|
options.outputFacialTransformationMatrixes = YES;
|
||||||
|
MPPFaceLandmarker *faceLandmarker = [[MPPFaceLandmarker alloc] initWithOptions:options error:nil];
|
||||||
|
|
||||||
|
NSArray<MPPNormalizedLandmark *> *expectedLandmarks =
|
||||||
|
[MPPFaceLandmarkerTests expectedLandmarksFromFileInfo:kPortraitExpectedLandmarksName];
|
||||||
|
MPPTransformMatrix *expectedTransformationMatrix = [MPPFaceLandmarkerTests
|
||||||
|
expectedTransformationMatrixFromFileInfo:kPortraitExpectedGeometryName];
|
||||||
|
[self assertResultsOfDetectInImageWithFileInfo:kPortraitImage
|
||||||
|
usingFaceLandmarker:faceLandmarker
|
||||||
|
containsExpectedLandmarks:expectedLandmarks
|
||||||
|
expectedBlendshapes:NULL
|
||||||
|
expectedTransformationMatrix:expectedTransformationMatrix];
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void)testDetectWithImageModeAndNoFaceSucceeds {
|
||||||
|
NSString *modelPath = [MPPFaceLandmarkerTests filePathWithName:kFaceLandmarkerModelName
|
||||||
|
extension:@"task"];
|
||||||
|
MPPFaceLandmarker *faceLandmarker = [[MPPFaceLandmarker alloc] initWithModelPath:modelPath
|
||||||
|
error:nil];
|
||||||
|
XCTAssertNotNil(faceLandmarker);
|
||||||
|
|
||||||
|
NSError *error;
|
||||||
|
MPPImage *mppImage = [self imageWithFileInfo:kCatImage];
|
||||||
|
MPPFaceLandmarkerResult *faceLandmarkerResult = [faceLandmarker detectInImage:mppImage
|
||||||
|
error:&error];
|
||||||
|
XCTAssertNil(error);
|
||||||
|
XCTAssertNotNil(faceLandmarkerResult);
|
||||||
|
XCTAssertEqualObjects(faceLandmarkerResult.faceLandmarks, [NSArray array]);
|
||||||
|
XCTAssertEqualObjects(faceLandmarkerResult.faceBlendshapes, [NSArray array]);
|
||||||
|
XCTAssertEqualObjects(faceLandmarkerResult.facialTransformationMatrixes, [NSArray array]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#pragma mark Video Mode Tests
|
||||||
|
|
||||||
|
- (void)testDetectWithVideoModeAndPotraitSucceeds {
|
||||||
|
MPPFaceLandmarkerOptions *options =
|
||||||
|
[self faceLandmarkerOptionsWithModelName:kFaceLandmarkerModelName];
|
||||||
|
options.runningMode = MPPRunningModeVideo;
|
||||||
|
MPPFaceLandmarker *faceLandmarker = [[MPPFaceLandmarker alloc] initWithOptions:options error:nil];
|
||||||
|
|
||||||
|
MPPImage *image = [self imageWithFileInfo:kPortraitImage];
|
||||||
|
NSArray<MPPNormalizedLandmark *> *expectedLandmarks =
|
||||||
|
[MPPFaceLandmarkerTests expectedLandmarksFromFileInfo:kPortraitExpectedLandmarksName];
|
||||||
|
for (int i = 0; i < 3; i++) {
|
||||||
|
MPPFaceLandmarkerResult *faceLandmarkerResult = [faceLandmarker detectInVideoFrame:image
|
||||||
|
timestampInMilliseconds:i
|
||||||
|
error:nil];
|
||||||
|
[self assertFaceLandmarkerResult:faceLandmarkerResult
|
||||||
|
containsExpectedLandmarks:expectedLandmarks
|
||||||
|
expectedBlendshapes:NULL
|
||||||
|
expectedTransformationMatrix:NULL];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#pragma mark Live Stream Mode Tests
|
||||||
|
|
||||||
|
- (void)testDetectWithLiveStreamModeAndPotraitSucceeds {
|
||||||
|
NSInteger iterationCount = 100;
|
||||||
|
|
||||||
|
// Because of flow limiting, the callback might be invoked fewer than `iterationCount` times. An
|
||||||
|
// normal expectation will fail if expectation.fullfill() is not called
|
||||||
|
// `expectation.expectedFulfillmentCount` times. If `expectation.isInverted = true`, the test will
|
||||||
|
// only succeed if expectation is not fullfilled for the specified `expectedFulfillmentCount`.
|
||||||
|
// Since it is not possible to predict how many times the expectation is supposed to be
|
||||||
|
// fullfilled, `expectation.expectedFulfillmentCount` = `iterationCount` + 1 and
|
||||||
|
// `expectation.isInverted = true` ensures that test succeeds if expectation is fullfilled <=
|
||||||
|
// `iterationCount` times.
|
||||||
|
XCTestExpectation *expectation = [[XCTestExpectation alloc]
|
||||||
|
initWithDescription:@"detectWithOutOfOrderTimestampsAndLiveStream"];
|
||||||
|
expectation.expectedFulfillmentCount = iterationCount + 1;
|
||||||
|
expectation.inverted = YES;
|
||||||
|
|
||||||
|
MPPFaceLandmarkerOptions *options =
|
||||||
|
[self faceLandmarkerOptionsWithModelName:kFaceLandmarkerModelName];
|
||||||
|
options.runningMode = MPPRunningModeLiveStream;
|
||||||
|
options.faceLandmarkerLiveStreamDelegate = self;
|
||||||
|
|
||||||
|
MPPFaceLandmarker *faceLandmarker = [[MPPFaceLandmarker alloc] initWithOptions:options error:nil];
|
||||||
|
MPPImage *image = [self imageWithFileInfo:kPortraitImage];
|
||||||
|
|
||||||
|
_liveStreamSucceedsTestDict = @{
|
||||||
|
kLiveStreamTestsDictFaceLandmarkerKey : faceLandmarker,
|
||||||
|
kLiveStreamTestsDictExpectationKey : expectation
|
||||||
|
};
|
||||||
|
|
||||||
|
for (int i = 0; i < iterationCount; i++) {
|
||||||
|
XCTAssertTrue([faceLandmarker detectAsyncInImage:image timestampInMilliseconds:i error:nil]);
|
||||||
|
}
|
||||||
|
|
||||||
|
NSTimeInterval timeout = 0.5f;
|
||||||
|
[self waitForExpectations:@[ expectation ] timeout:timeout];
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void)testDetectWithOutOfOrderTimestampsAndLiveStreamModeFails {
|
||||||
|
MPPFaceLandmarkerOptions *options =
|
||||||
|
[self faceLandmarkerOptionsWithModelName:kFaceLandmarkerModelName];
|
||||||
|
options.runningMode = MPPRunningModeLiveStream;
|
||||||
|
options.faceLandmarkerLiveStreamDelegate = self;
|
||||||
|
|
||||||
|
XCTestExpectation *expectation = [[XCTestExpectation alloc]
|
||||||
|
initWithDescription:@"detectWithOutOfOrderTimestampsAndLiveStream"];
|
||||||
|
expectation.expectedFulfillmentCount = 1;
|
||||||
|
|
||||||
|
MPPFaceLandmarker *faceLandmarker = [[MPPFaceLandmarker alloc] initWithOptions:options error:nil];
|
||||||
|
_liveStreamSucceedsTestDict = @{
|
||||||
|
kLiveStreamTestsDictFaceLandmarkerKey : faceLandmarker,
|
||||||
|
kLiveStreamTestsDictExpectationKey : expectation
|
||||||
|
};
|
||||||
|
|
||||||
|
MPPImage *image = [self imageWithFileInfo:kPortraitImage];
|
||||||
|
XCTAssertTrue([faceLandmarker detectAsyncInImage:image timestampInMilliseconds:1 error:nil]);
|
||||||
|
|
||||||
|
NSError *error;
|
||||||
|
XCTAssertFalse([faceLandmarker detectAsyncInImage:image timestampInMilliseconds:0 error:&error]);
|
||||||
|
|
||||||
|
NSError *expectedError =
|
||||||
|
[NSError errorWithDomain:kExpectedErrorDomain
|
||||||
|
code:MPPTasksErrorCodeInvalidArgumentError
|
||||||
|
userInfo:@{
|
||||||
|
NSLocalizedDescriptionKey :
|
||||||
|
@"INVALID_ARGUMENT: Input timestamp must be monotonically increasing."
|
||||||
|
}];
|
||||||
|
AssertEqualErrors(error, expectedError);
|
||||||
|
|
||||||
|
NSTimeInterval timeout = 0.5f;
|
||||||
|
[self waitForExpectations:@[ expectation ] timeout:timeout];
|
||||||
|
}
|
||||||
|
|
||||||
|
#pragma mark Running Mode Tests
|
||||||
|
|
||||||
|
- (void)testCreateFaceLandmarkerFailsWithDelegateInNonLiveStreamMode {
|
||||||
|
MPPRunningMode runningModesToTest[] = {MPPRunningModeImage, MPPRunningModeVideo};
|
||||||
|
for (int i = 0; i < sizeof(runningModesToTest) / sizeof(runningModesToTest[0]); i++) {
|
||||||
|
MPPFaceLandmarkerOptions *options =
|
||||||
|
[self faceLandmarkerOptionsWithModelName:kFaceLandmarkerModelName];
|
||||||
|
|
||||||
|
options.runningMode = runningModesToTest[i];
|
||||||
|
options.faceLandmarkerLiveStreamDelegate = self;
|
||||||
|
|
||||||
|
[self
|
||||||
|
assertCreateFaceLandmarkerWithOptions:options
|
||||||
|
failsWithExpectedError:
|
||||||
|
[NSError errorWithDomain:kExpectedErrorDomain
|
||||||
|
code:MPPTasksErrorCodeInvalidArgumentError
|
||||||
|
userInfo:@{
|
||||||
|
NSLocalizedDescriptionKey :
|
||||||
|
@"The vision task is in image or video mode. The "
|
||||||
|
@"delegate must not be set in the task's options."
|
||||||
|
}]];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void)testCreateFaceLandmarkerFailsWithMissingDelegateInLiveStreamMode {
|
||||||
|
MPPFaceLandmarkerOptions *options =
|
||||||
|
[self faceLandmarkerOptionsWithModelName:kFaceLandmarkerModelName];
|
||||||
|
options.runningMode = MPPRunningModeLiveStream;
|
||||||
|
|
||||||
|
[self assertCreateFaceLandmarkerWithOptions:options
|
||||||
|
failsWithExpectedError:
|
||||||
|
[NSError errorWithDomain:kExpectedErrorDomain
|
||||||
|
code:MPPTasksErrorCodeInvalidArgumentError
|
||||||
|
userInfo:@{
|
||||||
|
NSLocalizedDescriptionKey :
|
||||||
|
@"The vision task is in live stream mode. An "
|
||||||
|
@"object must be set as the delegate of the task "
|
||||||
|
@"in its options to ensure asynchronous delivery "
|
||||||
|
@"of results."
|
||||||
|
}]];
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void)testDetectFailsWithCallingWrongAPIInImageMode {
|
||||||
|
MPPFaceLandmarkerOptions *options =
|
||||||
|
[self faceLandmarkerOptionsWithModelName:kFaceLandmarkerModelName];
|
||||||
|
MPPFaceLandmarker *faceLandmarker = [[MPPFaceLandmarker alloc] initWithOptions:options error:nil];
|
||||||
|
|
||||||
|
MPPImage *image = [self imageWithFileInfo:kPortraitImage];
|
||||||
|
|
||||||
|
NSError *liveStreamAPICallError;
|
||||||
|
XCTAssertFalse([faceLandmarker detectAsyncInImage:image
|
||||||
|
timestampInMilliseconds:0
|
||||||
|
error:&liveStreamAPICallError]);
|
||||||
|
|
||||||
|
NSError *expectedLiveStreamAPICallError =
|
||||||
|
[NSError errorWithDomain:kExpectedErrorDomain
|
||||||
|
code:MPPTasksErrorCodeInvalidArgumentError
|
||||||
|
userInfo:@{
|
||||||
|
NSLocalizedDescriptionKey : @"The vision task is not initialized with live "
|
||||||
|
@"stream mode. Current Running Mode: Image"
|
||||||
|
}];
|
||||||
|
AssertEqualErrors(liveStreamAPICallError, expectedLiveStreamAPICallError);
|
||||||
|
|
||||||
|
NSError *videoAPICallError;
|
||||||
|
XCTAssertFalse([faceLandmarker detectInVideoFrame:image
|
||||||
|
timestampInMilliseconds:0
|
||||||
|
error:&videoAPICallError]);
|
||||||
|
|
||||||
|
NSError *expectedVideoAPICallError =
|
||||||
|
[NSError errorWithDomain:kExpectedErrorDomain
|
||||||
|
code:MPPTasksErrorCodeInvalidArgumentError
|
||||||
|
userInfo:@{
|
||||||
|
NSLocalizedDescriptionKey : @"The vision task is not initialized with "
|
||||||
|
@"video mode. Current Running Mode: Image"
|
||||||
|
}];
|
||||||
|
AssertEqualErrors(videoAPICallError, expectedVideoAPICallError);
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void)testDetectFailsWithCallingWrongAPIInVideoMode {
|
||||||
|
MPPFaceLandmarkerOptions *options =
|
||||||
|
[self faceLandmarkerOptionsWithModelName:kFaceLandmarkerModelName];
|
||||||
|
options.runningMode = MPPRunningModeVideo;
|
||||||
|
|
||||||
|
MPPFaceLandmarker *faceLandmarker = [[MPPFaceLandmarker alloc] initWithOptions:options error:nil];
|
||||||
|
|
||||||
|
MPPImage *image = [self imageWithFileInfo:kPortraitImage];
|
||||||
|
NSError *liveStreamAPICallError;
|
||||||
|
XCTAssertFalse([faceLandmarker detectAsyncInImage:image
|
||||||
|
timestampInMilliseconds:0
|
||||||
|
error:&liveStreamAPICallError]);
|
||||||
|
|
||||||
|
NSError *expectedLiveStreamAPICallError =
|
||||||
|
[NSError errorWithDomain:kExpectedErrorDomain
|
||||||
|
code:MPPTasksErrorCodeInvalidArgumentError
|
||||||
|
userInfo:@{
|
||||||
|
NSLocalizedDescriptionKey : @"The vision task is not initialized with live "
|
||||||
|
@"stream mode. Current Running Mode: Video"
|
||||||
|
}];
|
||||||
|
AssertEqualErrors(liveStreamAPICallError, expectedLiveStreamAPICallError);
|
||||||
|
|
||||||
|
NSError *imageAPICallError;
|
||||||
|
XCTAssertFalse([faceLandmarker detectInImage:image error:&imageAPICallError]);
|
||||||
|
|
||||||
|
NSError *expectedImageAPICallError =
|
||||||
|
[NSError errorWithDomain:kExpectedErrorDomain
|
||||||
|
code:MPPTasksErrorCodeInvalidArgumentError
|
||||||
|
userInfo:@{
|
||||||
|
NSLocalizedDescriptionKey : @"The vision task is not initialized with "
|
||||||
|
@"image mode. Current Running Mode: Video"
|
||||||
|
}];
|
||||||
|
AssertEqualErrors(imageAPICallError, expectedImageAPICallError);
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void)testDetectFailsWithCallingWrongAPIInLiveStreamMode {
|
||||||
|
MPPFaceLandmarkerOptions *options =
|
||||||
|
[self faceLandmarkerOptionsWithModelName:kFaceLandmarkerModelName];
|
||||||
|
options.runningMode = MPPRunningModeLiveStream;
|
||||||
|
options.faceLandmarkerLiveStreamDelegate = self;
|
||||||
|
MPPFaceLandmarker *faceLandmarker = [[MPPFaceLandmarker alloc] initWithOptions:options error:nil];
|
||||||
|
|
||||||
|
MPPImage *image = [self imageWithFileInfo:kPortraitImage];
|
||||||
|
|
||||||
|
NSError *imageAPICallError;
|
||||||
|
XCTAssertFalse([faceLandmarker detectInImage:image error:&imageAPICallError]);
|
||||||
|
|
||||||
|
NSError *expectedImageAPICallError =
|
||||||
|
[NSError errorWithDomain:kExpectedErrorDomain
|
||||||
|
code:MPPTasksErrorCodeInvalidArgumentError
|
||||||
|
userInfo:@{
|
||||||
|
NSLocalizedDescriptionKey : @"The vision task is not initialized with "
|
||||||
|
@"image mode. Current Running Mode: Live Stream"
|
||||||
|
}];
|
||||||
|
AssertEqualErrors(imageAPICallError, expectedImageAPICallError);
|
||||||
|
|
||||||
|
NSError *videoAPICallError;
|
||||||
|
XCTAssertFalse([faceLandmarker detectInVideoFrame:image
|
||||||
|
timestampInMilliseconds:0
|
||||||
|
error:&videoAPICallError]);
|
||||||
|
|
||||||
|
NSError *expectedVideoAPICallError =
|
||||||
|
[NSError errorWithDomain:kExpectedErrorDomain
|
||||||
|
code:MPPTasksErrorCodeInvalidArgumentError
|
||||||
|
userInfo:@{
|
||||||
|
NSLocalizedDescriptionKey : @"The vision task is not initialized with "
|
||||||
|
@"video mode. Current Running Mode: Live Stream"
|
||||||
|
}];
|
||||||
|
AssertEqualErrors(videoAPICallError, expectedVideoAPICallError);
|
||||||
|
}
|
||||||
|
|
||||||
|
#pragma mark MPPFaceLandmarkerLiveStreamDelegate Methods
|
||||||
|
- (void)faceLandmarker:(MPPFaceLandmarker *)faceLandmarker
|
||||||
|
didFinishDetectionWithResult:(MPPFaceLandmarkerResult *)faceLandmarkerResult
|
||||||
|
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
||||||
|
error:(NSError *)error {
|
||||||
|
NSArray<MPPNormalizedLandmark *> *expectedLandmarks =
|
||||||
|
[MPPFaceLandmarkerTests expectedLandmarksFromFileInfo:kPortraitExpectedLandmarksName];
|
||||||
|
[self assertFaceLandmarkerResult:faceLandmarkerResult
|
||||||
|
containsExpectedLandmarks:expectedLandmarks
|
||||||
|
expectedBlendshapes:NULL
|
||||||
|
expectedTransformationMatrix:NULL];
|
||||||
|
|
||||||
|
if (faceLandmarker == _outOfOrderTimestampTestDict[kLiveStreamTestsDictFaceLandmarkerKey]) {
|
||||||
|
[_outOfOrderTimestampTestDict[kLiveStreamTestsDictExpectationKey] fulfill];
|
||||||
|
} else if (faceLandmarker == _liveStreamSucceedsTestDict[kLiveStreamTestsDictFaceLandmarkerKey]) {
|
||||||
|
[_liveStreamSucceedsTestDict[kLiveStreamTestsDictExpectationKey] fulfill];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
+ (NSString *)filePathWithName:(NSString *)fileName extension:(NSString *)extension {
|
||||||
|
NSString *filePath =
|
||||||
|
[[NSBundle bundleForClass:[MPPFaceLandmarkerTests class]] pathForResource:fileName
|
||||||
|
ofType:extension];
|
||||||
|
return filePath;
|
||||||
|
}
|
||||||
|
|
||||||
|
+ (NSArray<MPPNormalizedLandmark *> *)expectedLandmarksFromFileInfo:(NSDictionary *)fileInfo {
|
||||||
|
NSString *filePath = [self filePathWithName:fileInfo[@"name"] extension:fileInfo[@"type"]];
|
||||||
|
NormalizedLandmarkListProto proto;
|
||||||
|
if (!get_proto_from_pbtxt([filePath UTF8String], proto).ok()) {
|
||||||
|
return nil;
|
||||||
|
}
|
||||||
|
NSMutableArray<MPPNormalizedLandmark *> *landmarks =
|
||||||
|
[NSMutableArray arrayWithCapacity:(NSUInteger)proto.landmark_size()];
|
||||||
|
for (const auto &landmarkProto : proto.landmark()) {
|
||||||
|
[landmarks addObject:[MPPNormalizedLandmark normalizedLandmarkWithProto:landmarkProto]];
|
||||||
|
}
|
||||||
|
return landmarks;
|
||||||
|
}
|
||||||
|
|
||||||
|
+ (MPPClassifications *)expectedBlendshapesFromFileInfo:(NSDictionary *)fileInfo {
|
||||||
|
NSString *filePath = [self filePathWithName:fileInfo[@"name"] extension:fileInfo[@"type"]];
|
||||||
|
ClassificationListProto proto;
|
||||||
|
if (!get_proto_from_pbtxt([filePath UTF8String], proto).ok()) {
|
||||||
|
return nil;
|
||||||
|
}
|
||||||
|
return [MPPClassifications classificationsWithClassificationListProto:proto
|
||||||
|
headIndex:0
|
||||||
|
headName:[NSString string]];
|
||||||
|
}
|
||||||
|
|
||||||
|
+ (MPPTransformMatrix *)expectedTransformationMatrixFromFileInfo:(NSDictionary *)fileInfo {
|
||||||
|
NSString *filePath = [self filePathWithName:fileInfo[@"name"] extension:fileInfo[@"type"]];
|
||||||
|
FaceGeometryProto proto;
|
||||||
|
if (!get_proto_from_pbtxt([filePath UTF8String], proto).ok()) {
|
||||||
|
return nil;
|
||||||
|
}
|
||||||
|
return [[MPPTransformMatrix alloc] initWithData:proto.pose_transform_matrix().packed_data().data()
|
||||||
|
rows:proto.pose_transform_matrix().rows()
|
||||||
|
columns:proto.pose_transform_matrix().cols()];
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void)assertFaceLandmarkerResult:(MPPFaceLandmarkerResult *)faceLandmarkerResult
|
||||||
|
containsExpectedLandmarks:(NSArray<MPPNormalizedLandmark *> *)expectedLandmarks
|
||||||
|
expectedBlendshapes:(nullable MPPClassifications *)expectedBlendshapes
|
||||||
|
expectedTransformationMatrix:(nullable MPPTransformMatrix *)expectedTransformationMatrix {
|
||||||
|
NSArray<MPPNormalizedLandmark *> *landmarks = faceLandmarkerResult.faceLandmarks[0];
|
||||||
|
XCTAssertEqual(landmarks.count, expectedLandmarks.count);
|
||||||
|
for (int i = 0; i < landmarks.count; ++i) {
|
||||||
|
XCTAssertEqualWithAccuracy(landmarks[i].x, expectedLandmarks[i].x, kLandmarkErrorThreshold,
|
||||||
|
@"index i = %d", i);
|
||||||
|
XCTAssertEqualWithAccuracy(landmarks[i].y, expectedLandmarks[i].y, kLandmarkErrorThreshold,
|
||||||
|
@"index i = %d", i);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (expectedBlendshapes == NULL) {
|
||||||
|
XCTAssertEqualObjects(faceLandmarkerResult.faceBlendshapes, [NSArray array]);
|
||||||
|
} else {
|
||||||
|
MPPClassifications *blendshapes = faceLandmarkerResult.faceBlendshapes[0];
|
||||||
|
NSArray<MPPCategory *> *actualCategories = blendshapes.categories;
|
||||||
|
NSArray<MPPCategory *> *expectedCategories = expectedBlendshapes.categories;
|
||||||
|
XCTAssertEqual(actualCategories.count, expectedCategories.count);
|
||||||
|
for (int i = 0; i < actualCategories.count; ++i) {
|
||||||
|
XCTAssertEqual(actualCategories[i].index, expectedCategories[i].index, @"index i = %d", i);
|
||||||
|
XCTAssertEqualWithAccuracy(actualCategories[i].score, expectedCategories[i].score,
|
||||||
|
kBlendshapesErrorThreshold, @"index i = %d", i);
|
||||||
|
XCTAssertEqualObjects(actualCategories[i].categoryName, expectedCategories[i].categoryName,
|
||||||
|
@"index i = %d", i);
|
||||||
|
XCTAssertEqualObjects(actualCategories[i].displayName, expectedCategories[i].displayName,
|
||||||
|
@"index i = %d", i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if (expectedTransformationMatrix == NULL) {
|
||||||
|
XCTAssertEqualObjects(faceLandmarkerResult.facialTransformationMatrixes, [NSArray array]);
|
||||||
|
} else {
|
||||||
|
MPPTransformMatrix *actualTransformationMatrix =
|
||||||
|
faceLandmarkerResult.facialTransformationMatrixes[0];
|
||||||
|
XCTAssertEqual(actualTransformationMatrix.rows, expectedTransformationMatrix.rows);
|
||||||
|
XCTAssertEqual(actualTransformationMatrix.columns, expectedTransformationMatrix.columns);
|
||||||
|
for (int i = 0; i < actualTransformationMatrix.rows * actualTransformationMatrix.columns; ++i) {
|
||||||
|
XCTAssertEqualWithAccuracy(actualTransformationMatrix.data[i],
|
||||||
|
expectedTransformationMatrix.data[i],
|
||||||
|
kFacialTransformationMatrixErrorThreshold, @"index i = %d", i);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#pragma mark Face Landmarker Initializers
|
||||||
|
|
||||||
|
- (MPPFaceLandmarkerOptions *)faceLandmarkerOptionsWithModelName:(NSString *)modelName {
|
||||||
|
NSString *modelPath = [MPPFaceLandmarkerTests filePathWithName:modelName extension:@"task"];
|
||||||
|
MPPFaceLandmarkerOptions *faceLandmarkerOptions = [[MPPFaceLandmarkerOptions alloc] init];
|
||||||
|
faceLandmarkerOptions.baseOptions.modelAssetPath = modelPath;
|
||||||
|
return faceLandmarkerOptions;
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void)assertCreateFaceLandmarkerWithOptions:(MPPFaceLandmarkerOptions *)faceLandmarkerOptions
|
||||||
|
failsWithExpectedError:(NSError *)expectedError {
|
||||||
|
NSError *error = nil;
|
||||||
|
MPPFaceLandmarker *faceLandmarker =
|
||||||
|
[[MPPFaceLandmarker alloc] initWithOptions:faceLandmarkerOptions error:&error];
|
||||||
|
XCTAssertNil(faceLandmarker);
|
||||||
|
AssertEqualErrors(error, expectedError);
|
||||||
|
}
|
||||||
|
|
||||||
|
#pragma mark Assert Detection Results
|
||||||
|
|
||||||
|
- (MPPImage *)imageWithFileInfo:(ResourceFileInfo *)fileInfo {
|
||||||
|
UIImageOrientation orientation = (UIImageOrientation)[fileInfo[@"orientation"] intValue];
|
||||||
|
MPPImage *image = [MPPImage imageFromBundleWithClass:[MPPFaceLandmarkerTests class]
|
||||||
|
fileName:fileInfo[@"name"]
|
||||||
|
ofType:fileInfo[@"type"]
|
||||||
|
orientation:orientation];
|
||||||
|
XCTAssertNotNil(image);
|
||||||
|
return image;
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void)assertResultsOfDetectInImageWithFileInfo:(ResourceFileInfo *)fileInfo
|
||||||
|
usingFaceLandmarker:(MPPFaceLandmarker *)faceLandmarker
|
||||||
|
containsExpectedLandmarks:
|
||||||
|
(NSArray<MPPNormalizedLandmark *> *)expectedLandmarks
|
||||||
|
expectedBlendshapes:(nullable MPPClassifications *)expectedBlendshapes
|
||||||
|
expectedTransformationMatrix:
|
||||||
|
(nullable MPPTransformMatrix *)expectedTransformationMatrix {
|
||||||
|
MPPImage *mppImage = [self imageWithFileInfo:fileInfo];
|
||||||
|
|
||||||
|
NSError *error;
|
||||||
|
MPPFaceLandmarkerResult *faceLandmarkerResult = [faceLandmarker detectInImage:mppImage
|
||||||
|
error:&error];
|
||||||
|
XCTAssertNil(error);
|
||||||
|
XCTAssertNotNil(faceLandmarkerResult);
|
||||||
|
|
||||||
|
[self assertFaceLandmarkerResult:faceLandmarkerResult
|
||||||
|
containsExpectedLandmarks:expectedLandmarks
|
||||||
|
expectedBlendshapes:expectedBlendshapes
|
||||||
|
expectedTransformationMatrix:expectedTransformationMatrix];
|
||||||
|
}
|
||||||
|
|
||||||
|
@end
|
|
@ -37,7 +37,32 @@ objc_library(
|
||||||
srcs = ["sources/MPPFaceLandmarkerOptions.m"],
|
srcs = ["sources/MPPFaceLandmarkerOptions.m"],
|
||||||
hdrs = ["sources/MPPFaceLandmarkerOptions.h"],
|
hdrs = ["sources/MPPFaceLandmarkerOptions.h"],
|
||||||
deps = [
|
deps = [
|
||||||
|
":MPPFaceLandmarkerResult",
|
||||||
"//mediapipe/tasks/ios/core:MPPTaskOptions",
|
"//mediapipe/tasks/ios/core:MPPTaskOptions",
|
||||||
"//mediapipe/tasks/ios/vision/core:MPPRunningMode",
|
"//mediapipe/tasks/ios/vision/core:MPPRunningMode",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
objc_library(
|
||||||
|
name = "MPPFaceLandmarker",
|
||||||
|
srcs = ["sources/MPPFaceLandmarker.m"],
|
||||||
|
hdrs = ["sources/MPPFaceLandmarker.h"],
|
||||||
|
copts = [
|
||||||
|
"-ObjC++",
|
||||||
|
"-std=c++17",
|
||||||
|
"-x objective-c++",
|
||||||
|
],
|
||||||
|
deps = [
|
||||||
|
":MPPFaceLandmarkerOptions",
|
||||||
|
":MPPFaceLandmarkerResult",
|
||||||
|
"//mediapipe/tasks/cc/vision/face_landmarker:face_landmarker_graph",
|
||||||
|
"//mediapipe/tasks/ios/common/utils:MPPCommonUtils",
|
||||||
|
"//mediapipe/tasks/ios/common/utils:NSStringHelpers",
|
||||||
|
"//mediapipe/tasks/ios/core:MPPTaskInfo",
|
||||||
|
"//mediapipe/tasks/ios/vision/core:MPPImage",
|
||||||
|
"//mediapipe/tasks/ios/vision/core:MPPVisionPacketCreator",
|
||||||
|
"//mediapipe/tasks/ios/vision/core:MPPVisionTaskRunner",
|
||||||
|
"//mediapipe/tasks/ios/vision/face_landmarker/utils:MPPFaceLandmarkerOptionsHelpers",
|
||||||
|
"//mediapipe/tasks/ios/vision/face_landmarker/utils:MPPFaceLandmarkerResultHelpers",
|
||||||
|
],
|
||||||
|
)
|
||||||
|
|
|
@ -0,0 +1,156 @@
|
||||||
|
// Copyright 2023 The MediaPipe Authors.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#import <Foundation/Foundation.h>
|
||||||
|
|
||||||
|
#import "mediapipe/tasks/ios/vision/core/sources/MPPImage.h"
|
||||||
|
#import "mediapipe/tasks/ios/vision/face_landmarker/sources/MPPFaceLandmarkerOptions.h"
|
||||||
|
#import "mediapipe/tasks/ios/vision/face_landmarker/sources/MPPFaceLandmarkerResult.h"
|
||||||
|
|
||||||
|
NS_ASSUME_NONNULL_BEGIN
|
||||||
|
|
||||||
|
/**
|
||||||
|
* @brief Class that performs face landmark detection on images.
|
||||||
|
*
|
||||||
|
* The API expects a TFLite model with mandatory TFLite Model Metadata.
|
||||||
|
*/
|
||||||
|
NS_SWIFT_NAME(FaceLandmarker)
|
||||||
|
@interface MPPFaceLandmarker : NSObject
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new instance of `MPPFaceLandmarker` from an absolute path to a TensorFlow Lite model
|
||||||
|
* file stored locally on the device and the default `MPPFaceLandmarker`.
|
||||||
|
*
|
||||||
|
* @param modelPath An absolute path to a TensorFlow Lite model file stored locally on the device.
|
||||||
|
* @param error An optional error parameter populated when there is an error in initializing the
|
||||||
|
* face landmaker.
|
||||||
|
*
|
||||||
|
* @return A new instance of `MPPFaceLandmarker` with the given model path. `nil` if there is an
|
||||||
|
* error in initializing the face landmaker.
|
||||||
|
*/
|
||||||
|
- (nullable instancetype)initWithModelPath:(NSString *)modelPath error:(NSError **)error;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Creates a new instance of `MPPFaceLandmarker` from the given `MPPFaceLandmarkerOptions`.
|
||||||
|
*
|
||||||
|
* @param options The options of type `MPPFaceLandmarkerOptions` to use for configuring the
|
||||||
|
* `MPPFaceLandmarker`.
|
||||||
|
* @param error An optional error parameter populated when there is an error in initializing the
|
||||||
|
* face landmaker.
|
||||||
|
*
|
||||||
|
* @return A new instance of `MPPFaceLandmarker` with the given options. `nil` if there is an error
|
||||||
|
* in initializing the face landmaker.
|
||||||
|
*/
|
||||||
|
- (nullable instancetype)initWithOptions:(MPPFaceLandmarkerOptions *)options
|
||||||
|
error:(NSError **)error NS_DESIGNATED_INITIALIZER;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Performs face landmark detection on the provided MPPImage using the whole image as region of
|
||||||
|
* interest. Rotation will be applied according to the `orientation` property of the provided
|
||||||
|
* `MPPImage`. Only use this method when the `MPPFaceLandmarker` is created with
|
||||||
|
* `MPPRunningModeImage`.
|
||||||
|
*
|
||||||
|
* This method supports RGBA images. If your `MPPImage` has a source type of
|
||||||
|
* `MPPImageSourceTypePixelBuffer` or `MPPImageSourceTypeSampleBuffer`, the underlying pixel buffer
|
||||||
|
* must have one of the following pixel format types:
|
||||||
|
* 1. kCVPixelFormatType_32BGRA
|
||||||
|
* 2. kCVPixelFormatType_32RGBA
|
||||||
|
*
|
||||||
|
* If your `MPPImage` has a source type of `MPPImageSourceTypeImage` ensure that the color space is
|
||||||
|
* RGB with an Alpha channel.
|
||||||
|
*
|
||||||
|
* @param image The `MPPImage` on which face landmark detection is to be performed.
|
||||||
|
* @param error An optional error parameter populated when there is an error in performing face
|
||||||
|
* landmark detection on the input image.
|
||||||
|
*
|
||||||
|
* @return An `MPPFaceLandmarkerResult` that contains a list of landmarks.
|
||||||
|
*/
|
||||||
|
- (nullable MPPFaceLandmarkerResult *)detectInImage:(MPPImage *)image
|
||||||
|
error:(NSError **)error NS_SWIFT_NAME(detect(image:));
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Performs face landmark detection on the provided video frame of type `MPPImage` using the whole
|
||||||
|
* image as region of interest. Rotation will be applied according to the `orientation` property of
|
||||||
|
* the provided `MPPImage`. Only use this method when the `MPPFaceLandmarker` is created with
|
||||||
|
* `MPPRunningModeVideo`.
|
||||||
|
*
|
||||||
|
* This method supports RGBA images. If your `MPPImage` has a source type of
|
||||||
|
* `MPPImageSourceTypePixelBuffer` or `MPPImageSourceTypeSampleBuffer`, the underlying pixel buffer
|
||||||
|
* must have one of the following pixel format types:
|
||||||
|
* 1. kCVPixelFormatType_32BGRA
|
||||||
|
* 2. kCVPixelFormatType_32RGBA
|
||||||
|
*
|
||||||
|
* If your `MPPImage` has a source type of `MPPImageSourceTypeImage` ensure that the color space is
|
||||||
|
* RGB with an Alpha channel.
|
||||||
|
*
|
||||||
|
* @param image The `MPPImage` on which face landmark detection is to be performed.
|
||||||
|
* @param timestampInMilliseconds The video frame's timestamp (in milliseconds). The input
|
||||||
|
* timestamps must be monotonically increasing.
|
||||||
|
* @param error An optional error parameter populated when there is an error in performing face
|
||||||
|
* landmark detection on the input image.
|
||||||
|
*
|
||||||
|
* @return An `MPPFaceLandmarkerResult` that contains a list of landmarks.
|
||||||
|
*/
|
||||||
|
- (nullable MPPFaceLandmarkerResult *)detectInVideoFrame:(MPPImage *)image
|
||||||
|
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
||||||
|
error:(NSError **)error
|
||||||
|
NS_SWIFT_NAME(detect(videoFrame:timestampInMilliseconds:));
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Sends live stream image data of type `MPPImage` to perform face landmark detection using the
|
||||||
|
* whole image as region of interest. Rotation will be applied according to the `orientation`
|
||||||
|
* property of the provided `MPPImage`. Only use this method when the `MPPFaceLandmarker` is created
|
||||||
|
* with `MPPRunningModeLiveStream`.
|
||||||
|
*
|
||||||
|
* The object which needs to be continuously notified of the available results of face
|
||||||
|
* detection must confirm to `MPPFaceLandmarkerLiveStreamDelegate` protocol and implement the
|
||||||
|
* `faceLandmarker:didFinishDetectionWithResult:timestampInMilliseconds:error:` delegate method.
|
||||||
|
*
|
||||||
|
* It's required to provide a timestamp (in milliseconds) to indicate when the input image is sent
|
||||||
|
* to the face detector. The input timestamps must be monotonically increasing.
|
||||||
|
*
|
||||||
|
* This method supports RGBA images. If your `MPPImage` has a source type of
|
||||||
|
* `MPPImageSourceTypePixelBuffer` or `MPPImageSourceTypeSampleBuffer`, the underlying pixel buffer
|
||||||
|
* must have one of the following pixel format types:
|
||||||
|
* 1. kCVPixelFormatType_32BGRA
|
||||||
|
* 2. kCVPixelFormatType_32RGBA
|
||||||
|
*
|
||||||
|
* If the input `MPPImage` has a source type of `MPPImageSourceTypeImage` ensure that the color
|
||||||
|
* space is RGB with an Alpha channel.
|
||||||
|
*
|
||||||
|
* If this method is used for classifying live camera frames using `AVFoundation`, ensure that you
|
||||||
|
* request `AVCaptureVideoDataOutput` to output frames in `kCMPixelFormat_32RGBA` using its
|
||||||
|
* `videoSettings` property.
|
||||||
|
*
|
||||||
|
* @param image A live stream image data of type `MPPImage` on which face landmark detection is to
|
||||||
|
* be performed.
|
||||||
|
* @param timestampInMilliseconds The timestamp (in milliseconds) which indicates when the input
|
||||||
|
* image is sent to the face detector. The input timestamps must be monotonically increasing.
|
||||||
|
* @param error An optional error parameter populated when there is an error when sending the input
|
||||||
|
* image to the graph.
|
||||||
|
*
|
||||||
|
* @return `YES` if the image was sent to the task successfully, otherwise `NO`.
|
||||||
|
*/
|
||||||
|
- (BOOL)detectAsyncInImage:(MPPImage *)image
|
||||||
|
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
||||||
|
error:(NSError **)error
|
||||||
|
NS_SWIFT_NAME(detectAsync(image:timestampInMilliseconds:));
|
||||||
|
|
||||||
|
- (instancetype)init NS_UNAVAILABLE;
|
||||||
|
|
||||||
|
+ (instancetype)new NS_UNAVAILABLE;
|
||||||
|
|
||||||
|
@end
|
||||||
|
|
||||||
|
NS_ASSUME_NONNULL_END
|
|
@ -0,0 +1,280 @@
|
||||||
|
// Copyright 2023 The MediaPipe Authors.
|
||||||
|
//
|
||||||
|
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
// you may not use this file except in compliance with the License.
|
||||||
|
// You may obtain a copy of the License at
|
||||||
|
//
|
||||||
|
// http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
//
|
||||||
|
// Unless required by applicable law or agreed to in writing, software
|
||||||
|
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
// See the License for the specific language governing permissions and
|
||||||
|
// limitations under the License.
|
||||||
|
|
||||||
|
#import "mediapipe/tasks/ios/vision/face_landmarker/sources/MPPFaceLandmarker.h"
|
||||||
|
#import <Foundation/Foundation.h>
|
||||||
|
|
||||||
|
#import "mediapipe/tasks/ios/common/utils/sources/MPPCommonUtils.h"
|
||||||
|
#import "mediapipe/tasks/ios/common/utils/sources/NSString+Helpers.h"
|
||||||
|
#import "mediapipe/tasks/ios/core/sources/MPPTaskInfo.h"
|
||||||
|
#import "mediapipe/tasks/ios/vision/core/sources/MPPVisionPacketCreator.h"
|
||||||
|
#import "mediapipe/tasks/ios/vision/core/sources/MPPVisionTaskRunner.h"
|
||||||
|
#import "mediapipe/tasks/ios/vision/face_landmarker/utils/sources/MPPFaceLandmarkerOptions+Helpers.h"
|
||||||
|
#import "mediapipe/tasks/ios/vision/face_landmarker/utils/sources/MPPFaceLandmarkerResult+Helpers.h"
|
||||||
|
|
||||||
|
using ::mediapipe::NormalizedRect;
|
||||||
|
using ::mediapipe::Packet;
|
||||||
|
using ::mediapipe::Timestamp;
|
||||||
|
using ::mediapipe::tasks::core::PacketMap;
|
||||||
|
using ::mediapipe::tasks::core::PacketsCallback;
|
||||||
|
|
||||||
|
static constexpr int kMicrosecondsPerMillisecond = 1000;
|
||||||
|
|
||||||
|
// Constants for the underlying MP Tasks Graph. See
|
||||||
|
// https://github.com/google/mediapipe/tree/master/mediapipe/tasks/cc/vision/face_landmarker/face_landmarker_graph.cc
|
||||||
|
static NSString *const kLandmarksOutStreamName = @"landmarks_out";
|
||||||
|
static NSString *const kLandmarksOutTag = @"NORM_LANDMARKS";
|
||||||
|
static NSString *const kBlendshapesOutStreamName = @"blendshapes_out";
|
||||||
|
static NSString *const kBlendshapesOutTag = @"BLENDSHAPES";
|
||||||
|
static NSString *const kFaceGeometryOutStreamName = @"face_geometry_out";
|
||||||
|
static NSString *const kFaceGeometryOutTag = @"FACE_GEOMETRY";
|
||||||
|
static NSString *const kNormRectStreamName = @"norm_rect_in";
|
||||||
|
static NSString *const kNormRectTag = @"NORM_RECT";
|
||||||
|
static NSString *const kImageInStreamName = @"image_in";
|
||||||
|
static NSString *const kImageOutStreamName = @"image_out";
|
||||||
|
static NSString *const kImageTag = @"IMAGE";
|
||||||
|
static NSString *const kTaskGraphName =
|
||||||
|
@"mediapipe.tasks.vision.face_landmarker.FaceLandmarkerGraph";
|
||||||
|
static NSString *const kTaskName = @"faceLandmarker";
|
||||||
|
|
||||||
|
#define InputPacketMap(imagePacket, normalizedRectPacket) \
|
||||||
|
{ \
|
||||||
|
{kImageInStreamName.cppString, imagePacket}, { \
|
||||||
|
kNormRectStreamName.cppString, normalizedRectPacket \
|
||||||
|
} \
|
||||||
|
}
|
||||||
|
|
||||||
|
@interface MPPFaceLandmarker () {
|
||||||
|
/** iOS Vision Task Runner */
|
||||||
|
MPPVisionTaskRunner *_visionTaskRunner;
|
||||||
|
/**
|
||||||
|
* The callback queue for the live stream delegate. This is only set if the user provides a live
|
||||||
|
* stream delegate.
|
||||||
|
*/
|
||||||
|
dispatch_queue_t _callbackQueue;
|
||||||
|
/** The user-provided live stream delegate if set. */
|
||||||
|
__weak id<MPPFaceLandmarkerLiveStreamDelegate> _faceLandmarkerLiveStreamDelegate;
|
||||||
|
}
|
||||||
|
@end
|
||||||
|
|
||||||
|
@implementation MPPFaceLandmarker
|
||||||
|
|
||||||
|
- (instancetype)initWithOptions:(MPPFaceLandmarkerOptions *)options error:(NSError **)error {
|
||||||
|
self = [super init];
|
||||||
|
if (self) {
|
||||||
|
NSArray<NSString *> *inputStreams = @[
|
||||||
|
[NSString stringWithFormat:@"%@:%@", kImageTag, kImageInStreamName],
|
||||||
|
[NSString stringWithFormat:@"%@:%@", kNormRectTag, kNormRectStreamName]
|
||||||
|
];
|
||||||
|
|
||||||
|
NSMutableArray<NSString *> *outputStreams = [NSMutableArray
|
||||||
|
arrayWithObjects:[NSString
|
||||||
|
stringWithFormat:@"%@:%@", kLandmarksOutTag, kLandmarksOutStreamName],
|
||||||
|
[NSString stringWithFormat:@"%@:%@", kImageTag, kImageOutStreamName], nil];
|
||||||
|
if (options.outputFaceBlendshapes) {
|
||||||
|
[outputStreams addObject:[NSString stringWithFormat:@"%@:%@", kBlendshapesOutTag,
|
||||||
|
kBlendshapesOutStreamName]];
|
||||||
|
}
|
||||||
|
if (options.outputFacialTransformationMatrixes) {
|
||||||
|
[outputStreams addObject:[NSString stringWithFormat:@"%@:%@", kFaceGeometryOutTag,
|
||||||
|
kFaceGeometryOutStreamName]];
|
||||||
|
}
|
||||||
|
|
||||||
|
MPPTaskInfo *taskInfo =
|
||||||
|
[[MPPTaskInfo alloc] initWithTaskGraphName:kTaskGraphName
|
||||||
|
inputStreams:inputStreams
|
||||||
|
outputStreams:outputStreams
|
||||||
|
taskOptions:options
|
||||||
|
enableFlowLimiting:options.runningMode == MPPRunningModeLiveStream
|
||||||
|
error:error];
|
||||||
|
|
||||||
|
if (!taskInfo) {
|
||||||
|
return nil;
|
||||||
|
}
|
||||||
|
|
||||||
|
PacketsCallback packetsCallback = nullptr;
|
||||||
|
|
||||||
|
if (options.faceLandmarkerLiveStreamDelegate) {
|
||||||
|
_faceLandmarkerLiveStreamDelegate = options.faceLandmarkerLiveStreamDelegate;
|
||||||
|
|
||||||
|
// Create a private serial dispatch queue in which the delegate method will be called
|
||||||
|
// asynchronously. This is to ensure that if the client performs a long running operation in
|
||||||
|
// the delegate method, the queue on which the C++ callbacks is invoked is not blocked and is
|
||||||
|
// freed up to continue with its operations.
|
||||||
|
_callbackQueue = dispatch_queue_create(
|
||||||
|
[MPPVisionTaskRunner uniqueDispatchQueueNameWithSuffix:kTaskName], NULL);
|
||||||
|
|
||||||
|
// Capturing `self` as weak in order to avoid `self` being kept in memory
|
||||||
|
// and cause a retain cycle, after self is set to `nil`.
|
||||||
|
MPPFaceLandmarker *__weak weakSelf = self;
|
||||||
|
packetsCallback = [weakSelf](absl::StatusOr<PacketMap> liveStreamResult) {
|
||||||
|
[weakSelf processLiveStreamResult:liveStreamResult];
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
_visionTaskRunner =
|
||||||
|
[[MPPVisionTaskRunner alloc] initWithCalculatorGraphConfig:[taskInfo generateGraphConfig]
|
||||||
|
runningMode:options.runningMode
|
||||||
|
packetsCallback:std::move(packetsCallback)
|
||||||
|
error:error];
|
||||||
|
|
||||||
|
if (!_visionTaskRunner) {
|
||||||
|
return nil;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return self;
|
||||||
|
}
|
||||||
|
|
||||||
|
- (instancetype)initWithModelPath:(NSString *)modelPath error:(NSError **)error {
|
||||||
|
MPPFaceLandmarkerOptions *options = [[MPPFaceLandmarkerOptions alloc] init];
|
||||||
|
options.baseOptions.modelAssetPath = modelPath;
|
||||||
|
return [self initWithOptions:options error:error];
|
||||||
|
}
|
||||||
|
|
||||||
|
- (std::optional<PacketMap>)inputPacketMapWithMPPImage:(MPPImage *)image
|
||||||
|
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
||||||
|
error:(NSError **)error {
|
||||||
|
std::optional<NormalizedRect> rect =
|
||||||
|
[_visionTaskRunner normalizedRectWithImageOrientation:image.orientation
|
||||||
|
imageSize:CGSizeMake(image.width, image.height)
|
||||||
|
error:error];
|
||||||
|
if (!rect.has_value()) {
|
||||||
|
return std::nullopt;
|
||||||
|
}
|
||||||
|
|
||||||
|
Packet imagePacket = [MPPVisionPacketCreator createPacketWithMPPImage:image
|
||||||
|
timestampInMilliseconds:timestampInMilliseconds
|
||||||
|
error:error];
|
||||||
|
if (imagePacket.IsEmpty()) {
|
||||||
|
return std::nullopt;
|
||||||
|
}
|
||||||
|
|
||||||
|
Packet normalizedRectPacket =
|
||||||
|
[MPPVisionPacketCreator createPacketWithNormalizedRect:*rect
|
||||||
|
timestampInMilliseconds:timestampInMilliseconds];
|
||||||
|
|
||||||
|
PacketMap inputPacketMap = InputPacketMap(imagePacket, normalizedRectPacket);
|
||||||
|
return inputPacketMap;
|
||||||
|
}
|
||||||
|
|
||||||
|
- (nullable MPPFaceLandmarkerResult *)detectInImage:(MPPImage *)image error:(NSError **)error {
|
||||||
|
std::optional<NormalizedRect> rect =
|
||||||
|
[_visionTaskRunner normalizedRectWithImageOrientation:image.orientation
|
||||||
|
imageSize:CGSizeMake(image.width, image.height)
|
||||||
|
error:error];
|
||||||
|
if (!rect.has_value()) {
|
||||||
|
return nil;
|
||||||
|
}
|
||||||
|
|
||||||
|
Packet imagePacket = [MPPVisionPacketCreator createPacketWithMPPImage:image error:error];
|
||||||
|
if (imagePacket.IsEmpty()) {
|
||||||
|
return nil;
|
||||||
|
}
|
||||||
|
|
||||||
|
Packet normalizedRectPacket = [MPPVisionPacketCreator createPacketWithNormalizedRect:*rect];
|
||||||
|
|
||||||
|
PacketMap inputPacketMap = InputPacketMap(imagePacket, normalizedRectPacket);
|
||||||
|
|
||||||
|
std::optional<PacketMap> outputPacketMap = [_visionTaskRunner processImagePacketMap:inputPacketMap
|
||||||
|
error:error];
|
||||||
|
if (!outputPacketMap.has_value()) {
|
||||||
|
return nil;
|
||||||
|
}
|
||||||
|
|
||||||
|
return [MPPFaceLandmarkerResult
|
||||||
|
faceLandmarkerResultWithLandmarksPacket:outputPacketMap
|
||||||
|
.value()[kLandmarksOutStreamName.cppString]
|
||||||
|
blendshapesPacket:outputPacketMap
|
||||||
|
.value()[kBlendshapesOutStreamName.cppString]
|
||||||
|
transformationMatrixesPacket:outputPacketMap
|
||||||
|
.value()[kFaceGeometryOutStreamName.cppString]];
|
||||||
|
}
|
||||||
|
|
||||||
|
- (nullable MPPFaceLandmarkerResult *)detectInVideoFrame:(MPPImage *)image
|
||||||
|
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
||||||
|
error:(NSError **)error {
|
||||||
|
std::optional<PacketMap> inputPacketMap = [self inputPacketMapWithMPPImage:image
|
||||||
|
timestampInMilliseconds:timestampInMilliseconds
|
||||||
|
error:error];
|
||||||
|
if (!inputPacketMap.has_value()) {
|
||||||
|
return nil;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::optional<PacketMap> outputPacketMap =
|
||||||
|
[_visionTaskRunner processVideoFramePacketMap:*inputPacketMap error:error];
|
||||||
|
if (!outputPacketMap.has_value()) {
|
||||||
|
return nil;
|
||||||
|
}
|
||||||
|
|
||||||
|
return [MPPFaceLandmarkerResult
|
||||||
|
faceLandmarkerResultWithLandmarksPacket:outputPacketMap
|
||||||
|
.value()[kLandmarksOutStreamName.cppString]
|
||||||
|
blendshapesPacket:outputPacketMap
|
||||||
|
.value()[kBlendshapesOutStreamName.cppString]
|
||||||
|
transformationMatrixesPacket:outputPacketMap
|
||||||
|
.value()[kFaceGeometryOutStreamName.cppString]];
|
||||||
|
}
|
||||||
|
|
||||||
|
- (BOOL)detectAsyncInImage:(MPPImage *)image
|
||||||
|
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
||||||
|
error:(NSError **)error {
|
||||||
|
std::optional<PacketMap> inputPacketMap = [self inputPacketMapWithMPPImage:image
|
||||||
|
timestampInMilliseconds:timestampInMilliseconds
|
||||||
|
error:error];
|
||||||
|
if (!inputPacketMap.has_value()) {
|
||||||
|
return NO;
|
||||||
|
}
|
||||||
|
|
||||||
|
return [_visionTaskRunner processLiveStreamPacketMap:*inputPacketMap error:error];
|
||||||
|
}
|
||||||
|
|
||||||
|
- (void)processLiveStreamResult:(absl::StatusOr<PacketMap>)liveStreamResult {
|
||||||
|
NSError *callbackError;
|
||||||
|
if (![MPPCommonUtils checkCppError:liveStreamResult.status() toError:&callbackError]) {
|
||||||
|
dispatch_async(_callbackQueue, ^{
|
||||||
|
[_faceLandmarkerLiveStreamDelegate faceLandmarker:self
|
||||||
|
didFinishDetectionWithResult:nil
|
||||||
|
timestampInMilliseconds:Timestamp::Unset().Value()
|
||||||
|
error:callbackError];
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
PacketMap &outputPacketMap = *liveStreamResult;
|
||||||
|
if (outputPacketMap[kImageOutStreamName.cppString].IsEmpty()) {
|
||||||
|
// The graph did not return a result. We therefore do not raise the user callback. This mirrors
|
||||||
|
// returning `nil` in the other methods and is acceptable for the live stream delegate since
|
||||||
|
// it is expected that we drop frames and don't return results for every input.
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
MPPFaceLandmarkerResult *result = [MPPFaceLandmarkerResult
|
||||||
|
faceLandmarkerResultWithLandmarksPacket:outputPacketMap[kLandmarksOutStreamName.cppString]
|
||||||
|
blendshapesPacket:outputPacketMap[kBlendshapesOutStreamName.cppString]
|
||||||
|
transformationMatrixesPacket:outputPacketMap[kFaceGeometryOutStreamName
|
||||||
|
.cppString]];
|
||||||
|
|
||||||
|
NSInteger timeStampInMilliseconds =
|
||||||
|
outputPacketMap[kImageOutStreamName.cppString].Timestamp().Value() /
|
||||||
|
kMicrosecondsPerMillisecond;
|
||||||
|
dispatch_async(_callbackQueue, ^{
|
||||||
|
[_faceLandmarkerLiveStreamDelegate faceLandmarker:self
|
||||||
|
didFinishDetectionWithResult:result
|
||||||
|
timestampInMilliseconds:timeStampInMilliseconds
|
||||||
|
error:callbackError];
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
@end
|
|
@ -16,9 +16,45 @@
|
||||||
|
|
||||||
#import "mediapipe/tasks/ios/core/sources/MPPTaskOptions.h"
|
#import "mediapipe/tasks/ios/core/sources/MPPTaskOptions.h"
|
||||||
#import "mediapipe/tasks/ios/vision/core/sources/MPPRunningMode.h"
|
#import "mediapipe/tasks/ios/vision/core/sources/MPPRunningMode.h"
|
||||||
|
#import "mediapipe/tasks/ios/vision/face_landmarker/sources/MPPFaceLandmarkerResult.h"
|
||||||
|
|
||||||
NS_ASSUME_NONNULL_BEGIN
|
NS_ASSUME_NONNULL_BEGIN
|
||||||
|
|
||||||
|
@class MPPFaceLandmarker;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This protocol defines an interface for the delegates of `MPPFaceLandmarker` face to receive
|
||||||
|
* results of performing asynchronous face detection on images (i.e, when `runningMode` =
|
||||||
|
* `MPPRunningModeLiveStream`).
|
||||||
|
*
|
||||||
|
* The delegate of `MPPFaceLandmarker` must adopt `MPPFaceLandmarkerLiveStreamDelegate` protocol.
|
||||||
|
* The methods in this protocol are optional.
|
||||||
|
*/
|
||||||
|
NS_SWIFT_NAME(FaceDetectorLiveStreamDelegate)
|
||||||
|
@protocol MPPFaceLandmarkerLiveStreamDelegate <NSObject>
|
||||||
|
|
||||||
|
/**
|
||||||
|
* This method notifies a delegate that the results of asynchronous face detection of
|
||||||
|
* an image submitted to the `MPPFaceLandmarker` is available.
|
||||||
|
*
|
||||||
|
* This method is called on a private serial dispatch queue created by the `MPPFaceLandmarker`
|
||||||
|
* for performing the asynchronous delegates calls.
|
||||||
|
*
|
||||||
|
* @param faceLandmarker The face landmarker which performed the face landmark detctions.
|
||||||
|
* This is useful to test equality when there are multiple instances of `MPPFaceLandmarker`.
|
||||||
|
* @param result The `MPPFaceLandmarkerResult` object that contains a list of landmarks.
|
||||||
|
* @param timestampInMilliseconds The timestamp (in milliseconds) which indicates when the input
|
||||||
|
* image was sent to the face detector.
|
||||||
|
* @param error An optional error parameter populated when there is an error in performing face
|
||||||
|
* detection on the input live stream image data.
|
||||||
|
*/
|
||||||
|
- (void)faceLandmarker:(MPPFaceLandmarker *)faceLandmarker
|
||||||
|
didFinishDetectionWithResult:(nullable MPPFaceLandmarkerResult *)result
|
||||||
|
timestampInMilliseconds:(NSInteger)timestampInMilliseconds
|
||||||
|
error:(nullable NSError *)error
|
||||||
|
NS_SWIFT_NAME(faceLandmarker(_:didFinishDetection:timestampInMilliseconds:error:));
|
||||||
|
@end
|
||||||
|
|
||||||
/** Options for setting up a `MPPFaceLandmarker`. */
|
/** Options for setting up a `MPPFaceLandmarker`. */
|
||||||
NS_SWIFT_NAME(FaceLandmarkerOptions)
|
NS_SWIFT_NAME(FaceLandmarkerOptions)
|
||||||
@interface MPPFaceLandmarkerOptions : MPPTaskOptions <NSCopying>
|
@interface MPPFaceLandmarkerOptions : MPPTaskOptions <NSCopying>
|
||||||
|
@ -34,6 +70,15 @@ NS_SWIFT_NAME(FaceLandmarkerOptions)
|
||||||
*/
|
*/
|
||||||
@property(nonatomic) MPPRunningMode runningMode;
|
@property(nonatomic) MPPRunningMode runningMode;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* An object that confirms to `MPPFaceLandmarkerLiveStreamDelegate` protocol. This object must
|
||||||
|
* implement `faceLandmarker:didFinishDetectionWithResult:timestampInMilliseconds:error:` to receive
|
||||||
|
* the results of performing asynchronous face landmark detection on images (i.e, when `runningMode`
|
||||||
|
* = `MPPRunningModeLiveStream`).
|
||||||
|
*/
|
||||||
|
@property(nonatomic, weak, nullable) id<MPPFaceLandmarkerLiveStreamDelegate>
|
||||||
|
faceLandmarkerLiveStreamDelegate;
|
||||||
|
|
||||||
/** The maximum number of faces can be detected by the FaceLandmarker. Defaults to 1. */
|
/** The maximum number of faces can be detected by the FaceLandmarker. Defaults to 1. */
|
||||||
@property(nonatomic) NSInteger numFaces;
|
@property(nonatomic) NSInteger numFaces;
|
||||||
|
|
||||||
|
@ -59,6 +104,13 @@ NS_SWIFT_NAME(FaceLandmarkerOptions)
|
||||||
*/
|
*/
|
||||||
@property(nonatomic) BOOL outputFaceBlendshapes;
|
@property(nonatomic) BOOL outputFaceBlendshapes;
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Whether FaceLandmarker outputs facial transformation_matrix. Facial transformation matrix is used
|
||||||
|
* to transform the face landmarks in canonical face to the detected face, so that users can apply
|
||||||
|
* face effects on the detected landmarks.
|
||||||
|
*/
|
||||||
|
@property(nonatomic) BOOL outputFacialTransformationMatrixes;
|
||||||
|
|
||||||
@end
|
@end
|
||||||
|
|
||||||
NS_ASSUME_NONNULL_END
|
NS_ASSUME_NONNULL_END
|
||||||
|
|
|
@ -24,6 +24,8 @@
|
||||||
_minFacePresenceConfidence = 0.5f;
|
_minFacePresenceConfidence = 0.5f;
|
||||||
_minTrackingConfidence = 0.5f;
|
_minTrackingConfidence = 0.5f;
|
||||||
_outputFaceBlendshapes = NO;
|
_outputFaceBlendshapes = NO;
|
||||||
|
_outputFacialTransformationMatrixes = NO;
|
||||||
|
_outputFacialTransformationMatrixes = NO;
|
||||||
}
|
}
|
||||||
return self;
|
return self;
|
||||||
}
|
}
|
||||||
|
@ -36,6 +38,9 @@
|
||||||
faceLandmarkerOptions.minFacePresenceConfidence = self.minFacePresenceConfidence;
|
faceLandmarkerOptions.minFacePresenceConfidence = self.minFacePresenceConfidence;
|
||||||
faceLandmarkerOptions.minTrackingConfidence = self.minTrackingConfidence;
|
faceLandmarkerOptions.minTrackingConfidence = self.minTrackingConfidence;
|
||||||
faceLandmarkerOptions.outputFaceBlendshapes = self.outputFaceBlendshapes;
|
faceLandmarkerOptions.outputFaceBlendshapes = self.outputFaceBlendshapes;
|
||||||
|
faceLandmarkerOptions.outputFacialTransformationMatrixes =
|
||||||
|
self.outputFacialTransformationMatrixes;
|
||||||
|
faceLandmarkerOptions.faceLandmarkerLiveStreamDelegate = self.faceLandmarkerLiveStreamDelegate;
|
||||||
|
|
||||||
return faceLandmarkerOptions;
|
return faceLandmarkerOptions;
|
||||||
}
|
}
|
||||||
|
|
|
@ -55,7 +55,6 @@ objc_library(
|
||||||
"//mediapipe/tasks/ios/common/utils:MPPCommonUtils",
|
"//mediapipe/tasks/ios/common/utils:MPPCommonUtils",
|
||||||
"//mediapipe/tasks/ios/common/utils:NSStringHelpers",
|
"//mediapipe/tasks/ios/common/utils:NSStringHelpers",
|
||||||
"//mediapipe/tasks/ios/core:MPPTaskInfo",
|
"//mediapipe/tasks/ios/core:MPPTaskInfo",
|
||||||
"//mediapipe/tasks/ios/core:MPPTaskOptions",
|
|
||||||
"//mediapipe/tasks/ios/vision/core:MPPImage",
|
"//mediapipe/tasks/ios/vision/core:MPPImage",
|
||||||
"//mediapipe/tasks/ios/vision/core:MPPVisionPacketCreator",
|
"//mediapipe/tasks/ios/vision/core:MPPVisionPacketCreator",
|
||||||
"//mediapipe/tasks/ios/vision/core:MPPVisionTaskRunner",
|
"//mediapipe/tasks/ios/vision/core:MPPVisionTaskRunner",
|
||||||
|
|
|
@ -14,7 +14,6 @@
|
||||||
|
|
||||||
#import <Foundation/Foundation.h>
|
#import <Foundation/Foundation.h>
|
||||||
|
|
||||||
#import "mediapipe/tasks/ios/core/sources/MPPTaskOptions.h"
|
|
||||||
#import "mediapipe/tasks/ios/vision/core/sources/MPPImage.h"
|
#import "mediapipe/tasks/ios/vision/core/sources/MPPImage.h"
|
||||||
#import "mediapipe/tasks/ios/vision/image_classifier/sources/MPPImageClassifierOptions.h"
|
#import "mediapipe/tasks/ios/vision/image_classifier/sources/MPPImageClassifierOptions.h"
|
||||||
#import "mediapipe/tasks/ios/vision/image_classifier/sources/MPPImageClassifierResult.h"
|
#import "mediapipe/tasks/ios/vision/image_classifier/sources/MPPImageClassifierResult.h"
|
||||||
|
|
|
@ -55,6 +55,7 @@ static const int kMicroSecondsPerMilliSecond = 1000;
|
||||||
@interface MPPImageClassifier () {
|
@interface MPPImageClassifier () {
|
||||||
/** iOS Vision Task Runner */
|
/** iOS Vision Task Runner */
|
||||||
MPPVisionTaskRunner *_visionTaskRunner;
|
MPPVisionTaskRunner *_visionTaskRunner;
|
||||||
|
dispatch_queue_t _callbackQueue;
|
||||||
}
|
}
|
||||||
@property(nonatomic, weak) id<MPPImageClassifierLiveStreamDelegate>
|
@property(nonatomic, weak) id<MPPImageClassifierLiveStreamDelegate>
|
||||||
imageClassifierLiveStreamDelegate;
|
imageClassifierLiveStreamDelegate;
|
||||||
|
@ -62,6 +63,44 @@ static const int kMicroSecondsPerMilliSecond = 1000;
|
||||||
|
|
||||||
@implementation MPPImageClassifier
|
@implementation MPPImageClassifier
|
||||||
|
|
||||||
|
- (void)processLiveStreamResult:(absl::StatusOr<PacketMap>)liveStreamResult {
|
||||||
|
if (![self.imageClassifierLiveStreamDelegate
|
||||||
|
respondsToSelector:@selector
|
||||||
|
(imageClassifier:didFinishClassificationWithResult:timestampInMilliseconds:error:)]) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
NSError *callbackError = nil;
|
||||||
|
if (![MPPCommonUtils checkCppError:liveStreamResult.status() toError:&callbackError]) {
|
||||||
|
dispatch_async(_callbackQueue, ^{
|
||||||
|
[self.imageClassifierLiveStreamDelegate imageClassifier:self
|
||||||
|
didFinishClassificationWithResult:nil
|
||||||
|
timestampInMilliseconds:Timestamp::Unset().Value()
|
||||||
|
error:callbackError];
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
PacketMap &outputPacketMap = liveStreamResult.value();
|
||||||
|
if (outputPacketMap[kImageOutStreamName.cppString].IsEmpty()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
MPPImageClassifierResult *result = [MPPImageClassifierResult
|
||||||
|
imageClassifierResultWithClassificationsPacket:outputPacketMap[kClassificationsStreamName
|
||||||
|
.cppString]];
|
||||||
|
|
||||||
|
NSInteger timeStampInMilliseconds =
|
||||||
|
outputPacketMap[kImageOutStreamName.cppString].Timestamp().Value() /
|
||||||
|
kMicroSecondsPerMilliSecond;
|
||||||
|
dispatch_async(_callbackQueue, ^{
|
||||||
|
[self.imageClassifierLiveStreamDelegate imageClassifier:self
|
||||||
|
didFinishClassificationWithResult:result
|
||||||
|
timestampInMilliseconds:timeStampInMilliseconds
|
||||||
|
error:callbackError];
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
- (instancetype)initWithOptions:(MPPImageClassifierOptions *)options error:(NSError **)error {
|
- (instancetype)initWithOptions:(MPPImageClassifierOptions *)options error:(NSError **)error {
|
||||||
self = [super init];
|
self = [super init];
|
||||||
if (self) {
|
if (self) {
|
||||||
|
@ -88,56 +127,19 @@ static const int kMicroSecondsPerMilliSecond = 1000;
|
||||||
|
|
||||||
if (options.imageClassifierLiveStreamDelegate) {
|
if (options.imageClassifierLiveStreamDelegate) {
|
||||||
_imageClassifierLiveStreamDelegate = options.imageClassifierLiveStreamDelegate;
|
_imageClassifierLiveStreamDelegate = options.imageClassifierLiveStreamDelegate;
|
||||||
// Capturing `self` as weak in order to avoid `self` being kept in memory
|
|
||||||
// and cause a retain cycle, after self is set to `nil`.
|
|
||||||
MPPImageClassifier *__weak weakSelf = self;
|
|
||||||
|
|
||||||
// Create a private serial dispatch queue in which the deleagte method will be called
|
// Create a private serial dispatch queue in which the deleagte method will be called
|
||||||
// asynchronously. This is to ensure that if the client performs a long running operation in
|
// asynchronously. This is to ensure that if the client performs a long running operation in
|
||||||
// the delegate method, the queue on which the C++ callbacks is invoked is not blocked and is
|
// the delegate method, the queue on which the C++ callbacks is invoked is not blocked and is
|
||||||
// freed up to continue with its operations.
|
// freed up to continue with its operations.
|
||||||
const char *queueName = [MPPVisionTaskRunner uniqueDispatchQueueNameWithSuffix:kTaskName];
|
_callbackQueue = dispatch_queue_create(
|
||||||
dispatch_queue_t callbackQueue = dispatch_queue_create(queueName, NULL);
|
[MPPVisionTaskRunner uniqueDispatchQueueNameWithSuffix:kTaskName], NULL);
|
||||||
packetsCallback = [=](absl::StatusOr<PacketMap> status_or_packets) {
|
|
||||||
if (!weakSelf) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if (![weakSelf.imageClassifierLiveStreamDelegate
|
|
||||||
respondsToSelector:@selector
|
|
||||||
(imageClassifier:
|
|
||||||
didFinishClassificationWithResult:timestampInMilliseconds:error:)]) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
NSError *callbackError = nil;
|
// Capturing `self` as weak in order to avoid `self` being kept in memory
|
||||||
if (![MPPCommonUtils checkCppError:status_or_packets.status() toError:&callbackError]) {
|
// and cause a retain cycle, after self is set to `nil`.
|
||||||
dispatch_async(callbackQueue, ^{
|
MPPImageClassifier *__weak weakSelf = self;
|
||||||
[weakSelf.imageClassifierLiveStreamDelegate imageClassifier:weakSelf
|
packetsCallback = [=](absl::StatusOr<PacketMap> liveStreamResult) {
|
||||||
didFinishClassificationWithResult:nil
|
[weakSelf processLiveStreamResult:liveStreamResult];
|
||||||
timestampInMilliseconds:Timestamp::Unset().Value()
|
|
||||||
error:callbackError];
|
|
||||||
});
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
PacketMap &outputPacketMap = status_or_packets.value();
|
|
||||||
if (outputPacketMap[kImageOutStreamName.cppString].IsEmpty()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
MPPImageClassifierResult *result =
|
|
||||||
[MPPImageClassifierResult imageClassifierResultWithClassificationsPacket:
|
|
||||||
outputPacketMap[kClassificationsStreamName.cppString]];
|
|
||||||
|
|
||||||
NSInteger timeStampInMilliseconds =
|
|
||||||
outputPacketMap[kImageOutStreamName.cppString].Timestamp().Value() /
|
|
||||||
kMicroSecondsPerMilliSecond;
|
|
||||||
dispatch_async(callbackQueue, ^{
|
|
||||||
[weakSelf.imageClassifierLiveStreamDelegate imageClassifier:weakSelf
|
|
||||||
didFinishClassificationWithResult:result
|
|
||||||
timestampInMilliseconds:timeStampInMilliseconds
|
|
||||||
error:callbackError];
|
|
||||||
});
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -50,12 +50,51 @@ static NSString *const kTaskName = @"objectDetector";
|
||||||
@interface MPPObjectDetector () {
|
@interface MPPObjectDetector () {
|
||||||
/** iOS Vision Task Runner */
|
/** iOS Vision Task Runner */
|
||||||
MPPVisionTaskRunner *_visionTaskRunner;
|
MPPVisionTaskRunner *_visionTaskRunner;
|
||||||
|
dispatch_queue_t _callbackQueue;
|
||||||
}
|
}
|
||||||
@property(nonatomic, weak) id<MPPObjectDetectorLiveStreamDelegate> objectDetectorLiveStreamDelegate;
|
@property(nonatomic, weak) id<MPPObjectDetectorLiveStreamDelegate> objectDetectorLiveStreamDelegate;
|
||||||
@end
|
@end
|
||||||
|
|
||||||
@implementation MPPObjectDetector
|
@implementation MPPObjectDetector
|
||||||
|
|
||||||
|
- (void)processLiveStreamResult:(absl::StatusOr<PacketMap>)liveStreamResult {
|
||||||
|
if (![self.objectDetectorLiveStreamDelegate
|
||||||
|
respondsToSelector:@selector(objectDetector:
|
||||||
|
didFinishDetectionWithResult:timestampInMilliseconds:error:)]) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
NSError *callbackError = nil;
|
||||||
|
if (![MPPCommonUtils checkCppError:liveStreamResult.status() toError:&callbackError]) {
|
||||||
|
dispatch_async(_callbackQueue, ^{
|
||||||
|
[self.objectDetectorLiveStreamDelegate objectDetector:self
|
||||||
|
didFinishDetectionWithResult:nil
|
||||||
|
timestampInMilliseconds:Timestamp::Unset().Value()
|
||||||
|
error:callbackError];
|
||||||
|
});
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
PacketMap &outputPacketMap = liveStreamResult.value();
|
||||||
|
if (outputPacketMap[kImageOutStreamName.cppString].IsEmpty()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
MPPObjectDetectorResult *result = [MPPObjectDetectorResult
|
||||||
|
objectDetectorResultWithDetectionsPacket:
|
||||||
|
outputPacketMap[kDetectionsStreamName.cppString]];
|
||||||
|
|
||||||
|
NSInteger timeStampInMilliseconds =
|
||||||
|
outputPacketMap[kImageOutStreamName.cppString].Timestamp().Value() /
|
||||||
|
kMicroSecondsPerMilliSecond;
|
||||||
|
dispatch_async(_callbackQueue, ^{
|
||||||
|
[self.objectDetectorLiveStreamDelegate objectDetector:self
|
||||||
|
didFinishDetectionWithResult:result
|
||||||
|
timestampInMilliseconds:timeStampInMilliseconds
|
||||||
|
error:callbackError];
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
- (instancetype)initWithOptions:(MPPObjectDetectorOptions *)options error:(NSError **)error {
|
- (instancetype)initWithOptions:(MPPObjectDetectorOptions *)options error:(NSError **)error {
|
||||||
self = [super init];
|
self = [super init];
|
||||||
if (self) {
|
if (self) {
|
||||||
|
@ -82,55 +121,18 @@ static NSString *const kTaskName = @"objectDetector";
|
||||||
if (options.objectDetectorLiveStreamDelegate) {
|
if (options.objectDetectorLiveStreamDelegate) {
|
||||||
_objectDetectorLiveStreamDelegate = options.objectDetectorLiveStreamDelegate;
|
_objectDetectorLiveStreamDelegate = options.objectDetectorLiveStreamDelegate;
|
||||||
|
|
||||||
// Capturing `self` as weak in order to avoid `self` being kept in memory
|
|
||||||
// and cause a retain cycle, after self is set to `nil`.
|
|
||||||
MPPObjectDetector *__weak weakSelf = self;
|
|
||||||
|
|
||||||
// Create a private serial dispatch queue in which the delegate method will be called
|
// Create a private serial dispatch queue in which the delegate method will be called
|
||||||
// asynchronously. This is to ensure that if the client performs a long running operation in
|
// asynchronously. This is to ensure that if the client performs a long running operation in
|
||||||
// the delegate method, the queue on which the C++ callbacks is invoked is not blocked and is
|
// the delegate method, the queue on which the C++ callbacks is invoked is not blocked and is
|
||||||
// freed up to continue with its operations.
|
// freed up to continue with its operations.
|
||||||
dispatch_queue_t callbackQueue = dispatch_queue_create(
|
_callbackQueue = dispatch_queue_create(
|
||||||
[MPPVisionTaskRunner uniqueDispatchQueueNameWithSuffix:kTaskName], NULL);
|
[MPPVisionTaskRunner uniqueDispatchQueueNameWithSuffix:kTaskName], NULL);
|
||||||
packetsCallback = [=](absl::StatusOr<PacketMap> statusOrPackets) {
|
|
||||||
if (!weakSelf) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
if (![weakSelf.objectDetectorLiveStreamDelegate
|
|
||||||
respondsToSelector:@selector
|
|
||||||
(objectDetector:didFinishDetectionWithResult:timestampInMilliseconds:error:)]) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
NSError *callbackError = nil;
|
// Capturing `self` as weak in order to avoid `self` being kept in memory
|
||||||
if (![MPPCommonUtils checkCppError:statusOrPackets.status() toError:&callbackError]) {
|
// and cause a retain cycle, after self is set to `nil`.
|
||||||
dispatch_async(callbackQueue, ^{
|
MPPObjectDetector *__weak weakSelf = self;
|
||||||
[weakSelf.objectDetectorLiveStreamDelegate objectDetector:weakSelf
|
packetsCallback = [=](absl::StatusOr<PacketMap> liveStreamResult) {
|
||||||
didFinishDetectionWithResult:nil
|
[weakSelf processLiveStreamResult:liveStreamResult];
|
||||||
timestampInMilliseconds:Timestamp::Unset().Value()
|
|
||||||
error:callbackError];
|
|
||||||
});
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
PacketMap &outputPacketMap = statusOrPackets.value();
|
|
||||||
if (outputPacketMap[kImageOutStreamName.cppString].IsEmpty()) {
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
|
|
||||||
MPPObjectDetectorResult *result = [MPPObjectDetectorResult
|
|
||||||
objectDetectorResultWithDetectionsPacket:statusOrPackets
|
|
||||||
.value()[kDetectionsStreamName.cppString]];
|
|
||||||
|
|
||||||
NSInteger timeStampInMilliseconds =
|
|
||||||
outputPacketMap[kImageOutStreamName.cppString].Timestamp().Value() /
|
|
||||||
kMicroSecondsPerMilliSecond;
|
|
||||||
dispatch_async(callbackQueue, ^{
|
|
||||||
[weakSelf.objectDetectorLiveStreamDelegate objectDetector:weakSelf
|
|
||||||
didFinishDetectionWithResult:result
|
|
||||||
timestampInMilliseconds:timeStampInMilliseconds
|
|
||||||
error:callbackError];
|
|
||||||
});
|
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2096,6 +2096,12 @@ bool RegionFlowComputation::GainCorrectFrame(const cv::Mat& reference_frame,
|
||||||
void RegionFlowComputation::WideBaselineMatchFeatures(
|
void RegionFlowComputation::WideBaselineMatchFeatures(
|
||||||
FrameTrackingData* from_data_ptr, FrameTrackingData* to_data_ptr,
|
FrameTrackingData* from_data_ptr, FrameTrackingData* to_data_ptr,
|
||||||
TrackedFeatureList* results) {
|
TrackedFeatureList* results) {
|
||||||
|
#if (defined(__ANDROID__) || defined(__APPLE__) || defined(__EMSCRIPTEN__)) && \
|
||||||
|
!defined(CV_WRAPPER_3X)
|
||||||
|
LOG(FATAL) << "Supported on only with OpenCV 3.0. "
|
||||||
|
<< "Use bazel build flag : --define CV_WRAPPER=3X";
|
||||||
|
#else // (defined(__ANDROID__) || defined(__APPLE__) ||
|
||||||
|
// defined(__EMSCRIPTEN__)) && !defined(CV_WRAPPER_3X)
|
||||||
results->clear();
|
results->clear();
|
||||||
|
|
||||||
const auto& frame1 = from_data_ptr->frame;
|
const auto& frame1 = from_data_ptr->frame;
|
||||||
|
@ -2168,6 +2174,8 @@ void RegionFlowComputation::WideBaselineMatchFeatures(
|
||||||
results->push_back(tracked_feature);
|
results->push_back(tracked_feature);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
#endif // (defined(__ANDROID__) || defined(__APPLE__) ||
|
||||||
|
// defined(__EMSCRIPTEN__)) && !defined(CV_WRAPPER_3X)
|
||||||
}
|
}
|
||||||
|
|
||||||
void RegionFlowComputation::RemoveAbsentFeatures(
|
void RegionFlowComputation::RemoveAbsentFeatures(
|
||||||
|
|
48
third_party/wasm_files.bzl
vendored
48
third_party/wasm_files.bzl
vendored
|
@ -12,72 +12,72 @@ def wasm_files():
|
||||||
|
|
||||||
http_file(
|
http_file(
|
||||||
name = "com_google_mediapipe_wasm_audio_wasm_internal_js",
|
name = "com_google_mediapipe_wasm_audio_wasm_internal_js",
|
||||||
sha256 = "b07bf0eda990b19c48f2b51c358bb281f40a7c3002f50f16986fe96f68103ac1",
|
sha256 = "0d66a26fa5ca638c54ec3e5bffb50aec74ee0880b108d4b5f7d316e9ae36cc9a",
|
||||||
urls = ["https://storage.googleapis.com/mediapipe-assets/wasm/audio_wasm_internal.js?generation=1683564589395847"],
|
urls = ["https://storage.googleapis.com/mediapipe-assets/wasm/audio_wasm_internal.js?generation=1685638894464709"],
|
||||||
)
|
)
|
||||||
|
|
||||||
http_file(
|
http_file(
|
||||||
name = "com_google_mediapipe_wasm_audio_wasm_internal_wasm",
|
name = "com_google_mediapipe_wasm_audio_wasm_internal_wasm",
|
||||||
sha256 = "725fa5d13fdce79beaccb287b24d79d32e7bfb40f2cc51ef7f26a8dd8dec993c",
|
sha256 = "014963d19ef6b1f25720379c3df07a6e08b24894ada4938d45b1256e97739318",
|
||||||
urls = ["https://storage.googleapis.com/mediapipe-assets/wasm/audio_wasm_internal.wasm?generation=1683564592604733"],
|
urls = ["https://storage.googleapis.com/mediapipe-assets/wasm/audio_wasm_internal.wasm?generation=1685638897160853"],
|
||||||
)
|
)
|
||||||
|
|
||||||
http_file(
|
http_file(
|
||||||
name = "com_google_mediapipe_wasm_audio_wasm_nosimd_internal_js",
|
name = "com_google_mediapipe_wasm_audio_wasm_nosimd_internal_js",
|
||||||
sha256 = "f2917690317ae381782c8f4fb17d40f7d0b8e340fb490a604a37959ecee637c8",
|
sha256 = "f03d4826c251783bfc1fb8b82b2d08c00b2e3cb2efcc606305eb210f09fc686b",
|
||||||
urls = ["https://storage.googleapis.com/mediapipe-assets/wasm/audio_wasm_nosimd_internal.js?generation=1683564595399509"],
|
urls = ["https://storage.googleapis.com/mediapipe-assets/wasm/audio_wasm_nosimd_internal.js?generation=1685638899477366"],
|
||||||
)
|
)
|
||||||
|
|
||||||
http_file(
|
http_file(
|
||||||
name = "com_google_mediapipe_wasm_audio_wasm_nosimd_internal_wasm",
|
name = "com_google_mediapipe_wasm_audio_wasm_nosimd_internal_wasm",
|
||||||
sha256 = "908f6ce2420b5e88770b61b20b200cb3cd62a1c727cf0a134aa645351eaa1350",
|
sha256 = "36972cf62138bcb5fde37a1fecce334a86b0261eefc1f1daa17b4b8acdc784b4",
|
||||||
urls = ["https://storage.googleapis.com/mediapipe-assets/wasm/audio_wasm_nosimd_internal.wasm?generation=1683564598602444"],
|
urls = ["https://storage.googleapis.com/mediapipe-assets/wasm/audio_wasm_nosimd_internal.wasm?generation=1685638901926088"],
|
||||||
)
|
)
|
||||||
|
|
||||||
http_file(
|
http_file(
|
||||||
name = "com_google_mediapipe_wasm_text_wasm_internal_js",
|
name = "com_google_mediapipe_wasm_text_wasm_internal_js",
|
||||||
sha256 = "64c4a3927e732b99473b072228130b922427b2aba16d64863579928df16a7946",
|
sha256 = "5745360da942f3bcb585547e8720cb11f19793e68851b119b8f9ea22b120fd06",
|
||||||
urls = ["https://storage.googleapis.com/mediapipe-assets/wasm/text_wasm_internal.js?generation=1683564601486513"],
|
urls = ["https://storage.googleapis.com/mediapipe-assets/wasm/text_wasm_internal.js?generation=1685638904214551"],
|
||||||
)
|
)
|
||||||
|
|
||||||
http_file(
|
http_file(
|
||||||
name = "com_google_mediapipe_wasm_text_wasm_internal_wasm",
|
name = "com_google_mediapipe_wasm_text_wasm_internal_wasm",
|
||||||
sha256 = "c6e3027f4a7b1fd11d5ebbd8254f168636658e7a5d19a293e900f19497e48d5e",
|
sha256 = "b6d8b03fa7fc3e969febfcb63e3db2de900f1f54b82bf2205f02d865fc4790b2",
|
||||||
urls = ["https://storage.googleapis.com/mediapipe-assets/wasm/text_wasm_internal.wasm?generation=1683564604687320"],
|
urls = ["https://storage.googleapis.com/mediapipe-assets/wasm/text_wasm_internal.wasm?generation=1685638906864568"],
|
||||||
)
|
)
|
||||||
|
|
||||||
http_file(
|
http_file(
|
||||||
name = "com_google_mediapipe_wasm_text_wasm_nosimd_internal_js",
|
name = "com_google_mediapipe_wasm_text_wasm_nosimd_internal_js",
|
||||||
sha256 = "af59aacaddc076ca9e4ea139d4f440b5b114576e72ab69e50f0f501e0e0c07e5",
|
sha256 = "837ca361044441e6202858b4a9d94b3296c8440099b40e6dafb1efcce76a8f63",
|
||||||
urls = ["https://storage.googleapis.com/mediapipe-assets/wasm/text_wasm_nosimd_internal.js?generation=1683564606939116"],
|
urls = ["https://storage.googleapis.com/mediapipe-assets/wasm/text_wasm_nosimd_internal.js?generation=1685638909139832"],
|
||||||
)
|
)
|
||||||
|
|
||||||
http_file(
|
http_file(
|
||||||
name = "com_google_mediapipe_wasm_text_wasm_nosimd_internal_wasm",
|
name = "com_google_mediapipe_wasm_text_wasm_nosimd_internal_wasm",
|
||||||
sha256 = "d313497c003b6e00670664463fbbd9f5a2388946fe3d132c2794dd87cb91beb0",
|
sha256 = "507f4089f4a2cf8fe7fb61f48e180f3f86d5e8057fc60ef24c77aae724eb66ba",
|
||||||
urls = ["https://storage.googleapis.com/mediapipe-assets/wasm/text_wasm_nosimd_internal.wasm?generation=1683564609634068"],
|
urls = ["https://storage.googleapis.com/mediapipe-assets/wasm/text_wasm_nosimd_internal.wasm?generation=1685638911843312"],
|
||||||
)
|
)
|
||||||
|
|
||||||
http_file(
|
http_file(
|
||||||
name = "com_google_mediapipe_wasm_vision_wasm_internal_js",
|
name = "com_google_mediapipe_wasm_vision_wasm_internal_js",
|
||||||
sha256 = "efebf9d676d8828c31e7c0d9718c5c80de8a3de084e97aa3ea5472a5346c518e",
|
sha256 = "82de7a40fdb14833b5ceaeb1ebf219421dbb06ba5e525204737dec196161420d",
|
||||||
urls = ["https://storage.googleapis.com/mediapipe-assets/wasm/vision_wasm_internal.js?generation=1683564611613681"],
|
urls = ["https://storage.googleapis.com/mediapipe-assets/wasm/vision_wasm_internal.js?generation=1685638914190745"],
|
||||||
)
|
)
|
||||||
|
|
||||||
http_file(
|
http_file(
|
||||||
name = "com_google_mediapipe_wasm_vision_wasm_internal_wasm",
|
name = "com_google_mediapipe_wasm_vision_wasm_internal_wasm",
|
||||||
sha256 = "0431b3bacfcb26d91d800450216b305b9378f4e063d78c2e85a944aba432e0dd",
|
sha256 = "d06ac49f4c156cf0c24ef62387b13e48b67476e7f04a423889c59ee835c460f2",
|
||||||
urls = ["https://storage.googleapis.com/mediapipe-assets/wasm/vision_wasm_internal.wasm?generation=1683564614567083"],
|
urls = ["https://storage.googleapis.com/mediapipe-assets/wasm/vision_wasm_internal.wasm?generation=1685638917012370"],
|
||||||
)
|
)
|
||||||
|
|
||||||
http_file(
|
http_file(
|
||||||
name = "com_google_mediapipe_wasm_vision_wasm_nosimd_internal_js",
|
name = "com_google_mediapipe_wasm_vision_wasm_nosimd_internal_js",
|
||||||
sha256 = "e51fa49f60493d7122d26e6fcb45d4031a3247a05d83b3f62e5155653a89d8f8",
|
sha256 = "fff428ef91d8cc936f9c3ec81750f5e7ee3c20bc0c76677eb5d8d4d010d2fac0",
|
||||||
urls = ["https://storage.googleapis.com/mediapipe-assets/wasm/vision_wasm_nosimd_internal.js?generation=1683564616879456"],
|
urls = ["https://storage.googleapis.com/mediapipe-assets/wasm/vision_wasm_nosimd_internal.js?generation=1685638919406810"],
|
||||||
)
|
)
|
||||||
|
|
||||||
http_file(
|
http_file(
|
||||||
name = "com_google_mediapipe_wasm_vision_wasm_nosimd_internal_wasm",
|
name = "com_google_mediapipe_wasm_vision_wasm_nosimd_internal_wasm",
|
||||||
sha256 = "737830aab48e77ff5e6c1826f15801cfb2d68dbb622b3b39c3d7528334b73f94",
|
sha256 = "f87c51b8744b0ba564ce725fc3659dba5ef90b4615ac34135ca91c6508434fe9",
|
||||||
urls = ["https://storage.googleapis.com/mediapipe-assets/wasm/vision_wasm_nosimd_internal.wasm?generation=1683564619390853"],
|
urls = ["https://storage.googleapis.com/mediapipe-assets/wasm/vision_wasm_nosimd_internal.wasm?generation=1685638922016130"],
|
||||||
)
|
)
|
||||||
|
|
Loading…
Reference in New Issue
Block a user