Rename HandGestureRecognizer to GestureRecognizer and update namespace for Tasks C++ conventions.

PiperOrigin-RevId: 478700907
This commit is contained in:
MediaPipe Team 2022-10-03 23:43:13 -07:00 committed by Copybara-Service
parent f7fa3dc9be
commit 2cb9ebb5e3
16 changed files with 114 additions and 113 deletions

View File

@ -41,8 +41,8 @@ cc_test(
)
cc_library(
name = "hand_gesture_recognizer_subgraph",
srcs = ["hand_gesture_recognizer_subgraph.cc"],
name = "hand_gesture_recognizer_graph",
srcs = ["hand_gesture_recognizer_graph.cc"],
deps = [
"//mediapipe/calculators/core:concatenate_vector_calculator",
"//mediapipe/calculators/tensor:tensor_converter_calculator",
@ -62,10 +62,10 @@ cc_library(
"//mediapipe/tasks/cc/core:model_task_graph",
"//mediapipe/tasks/cc/core:utils",
"//mediapipe/tasks/cc/core/proto:inference_subgraph_cc_proto",
"//mediapipe/tasks/cc/vision/hand_gesture_recognizer/calculators:handedness_to_matrix_calculator",
"//mediapipe/tasks/cc/vision/hand_gesture_recognizer/calculators:landmarks_to_matrix_calculator",
"//mediapipe/tasks/cc/vision/hand_gesture_recognizer/proto:hand_gesture_recognizer_subgraph_options_cc_proto",
"//mediapipe/tasks/cc/vision/hand_gesture_recognizer/proto:landmarks_to_matrix_calculator_cc_proto",
"//mediapipe/tasks/cc/vision/gesture_recognizer/calculators:handedness_to_matrix_calculator",
"//mediapipe/tasks/cc/vision/gesture_recognizer/calculators:landmarks_to_matrix_calculator",
"//mediapipe/tasks/cc/vision/gesture_recognizer/calculators:landmarks_to_matrix_calculator_cc_proto",
"//mediapipe/tasks/cc/vision/gesture_recognizer/proto:hand_gesture_recognizer_graph_options_cc_proto",
"//mediapipe/tasks/cc/vision/hand_landmarker:hand_landmarker_subgraph",
"//mediapipe/tasks/cc/vision/utils:image_tensor_specs",
"//mediapipe/tasks/metadata:metadata_schema_cc",

View File

@ -12,11 +12,23 @@
# See the License for the specific language governing permissions and
# limitations under the License.
load("//mediapipe/framework/port:build_config.bzl", "mediapipe_proto_library")
package(default_visibility = [
"//mediapipe/app/xeno:__subpackages__",
"//mediapipe/tasks:internal",
])
mediapipe_proto_library(
name = "landmarks_to_matrix_calculator_proto",
srcs = ["landmarks_to_matrix_calculator.proto"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
"//mediapipe/tasks/cc/core/proto:base_options_proto",
],
)
cc_library(
name = "handedness_to_matrix_calculator",
srcs = ["handedness_to_matrix_calculator.cc"],
@ -25,7 +37,7 @@ cc_library(
"//mediapipe/framework/formats:classification_cc_proto",
"//mediapipe/framework/formats:matrix",
"//mediapipe/framework/port:ret_check",
"//mediapipe/tasks/cc/vision/hand_gesture_recognizer:handedness_util",
"//mediapipe/tasks/cc/vision/gesture_recognizer:handedness_util",
"@com_google_absl//absl/memory",
"@com_google_absl//absl/status",
"@com_google_absl//absl/status:statusor",
@ -53,11 +65,11 @@ cc_library(
name = "landmarks_to_matrix_calculator",
srcs = ["landmarks_to_matrix_calculator.cc"],
deps = [
":landmarks_to_matrix_calculator_cc_proto",
"//mediapipe/framework:calculator_framework",
"//mediapipe/framework/formats:landmark_cc_proto",
"//mediapipe/framework/formats:matrix",
"//mediapipe/framework/port:ret_check",
"//mediapipe/tasks/cc/vision/hand_gesture_recognizer/proto:landmarks_to_matrix_calculator_cc_proto",
"@com_google_absl//absl/status",
"@com_google_absl//absl/status:statusor",
"@com_google_absl//absl/strings",

View File

@ -26,14 +26,16 @@ limitations under the License.
#include "mediapipe/framework/formats/classification.pb.h"
#include "mediapipe/framework/formats/matrix.h"
#include "mediapipe/framework/port/ret_check.h"
#include "mediapipe/tasks/cc/vision/hand_gesture_recognizer/handedness_util.h"
#include "mediapipe/tasks/cc/vision/gesture_recognizer/handedness_util.h"
// TODO Update to use API2
namespace mediapipe {
namespace tasks {
namespace vision {
namespace api2 {
namespace {
using ::mediapipe::tasks::vision::gesture_recognizer::GetLeftHandScore;
constexpr char kHandednessTag[] = "HANDEDNESS";
constexpr char kHandednessMatrixTag[] = "HANDEDNESS_MATRIX";
@ -71,6 +73,8 @@ class HandednessToMatrixCalculator : public CalculatorBase {
return absl::OkStatus();
}
// TODO remove this after change to API2, because Setting offset
// to 0 is the default in API2
absl::Status Open(CalculatorContext* cc) override {
cc->SetOffset(TimestampDiff(0));
return absl::OkStatus();
@ -95,6 +99,5 @@ absl::Status HandednessToMatrixCalculator::Process(CalculatorContext* cc) {
return absl::OkStatus();
}
} // namespace vision
} // namespace tasks
} // namespace api2
} // namespace mediapipe

View File

@ -28,8 +28,6 @@ limitations under the License.
#include "mediapipe/framework/port/status_matchers.h"
namespace mediapipe {
namespace tasks {
namespace vision {
namespace {
@ -95,6 +93,4 @@ INSTANTIATE_TEST_CASE_P(
} // namespace
} // namespace vision
} // namespace tasks
} // namespace mediapipe

View File

@ -27,13 +27,11 @@ limitations under the License.
#include "mediapipe/framework/formats/landmark.pb.h"
#include "mediapipe/framework/formats/matrix.h"
#include "mediapipe/framework/port/ret_check.h"
#include "mediapipe/tasks/cc/vision/hand_gesture_recognizer/proto/landmarks_to_matrix_calculator.pb.h"
#include "mediapipe/tasks/cc/vision/gesture_recognizer/calculators/landmarks_to_matrix_calculator.pb.h"
// TODO Update to use API2
namespace mediapipe {
namespace tasks {
namespace vision {
using proto::LandmarksToMatrixCalculatorOptions;
namespace api2 {
namespace {
@ -175,7 +173,7 @@ absl::Status ProcessLandmarks(LandmarkListT landmarks, CalculatorContext* cc) {
// input_stream: "IMAGE_SIZE:image_size"
// output_stream: "LANDMARKS_MATRIX:landmarks_matrix"
// options {
// [mediapipe.tasks.vision.proto.LandmarksToMatrixCalculatorOptions.ext] {
// [mediapipe.LandmarksToMatrixCalculatorOptions.ext] {
// object_normalization: true
// object_normalization_origin_offset: 0
// }
@ -221,6 +219,5 @@ absl::Status LandmarksToMatrixCalculator::Process(CalculatorContext* cc) {
return absl::OkStatus();
}
} // namespace vision
} // namespace tasks
} // namespace api2
} // namespace mediapipe

View File

@ -15,7 +15,7 @@ limitations under the License.
syntax = "proto2";
package mediapipe.tasks.vision.proto;
package mediapipe;
import "mediapipe/framework/calculator.proto";

View File

@ -28,8 +28,6 @@ limitations under the License.
#include "mediapipe/framework/port/status_matchers.h"
namespace mediapipe {
namespace tasks {
namespace vision {
namespace {
@ -72,8 +70,7 @@ TEST_P(Landmarks2dToMatrixCalculatorTest, OutputsCorrectResult) {
input_stream: "IMAGE_SIZE:image_size"
output_stream: "LANDMARKS_MATRIX:landmarks_matrix"
options {
[mediapipe.tasks.vision.proto.LandmarksToMatrixCalculatorOptions
.ext] {
[mediapipe.LandmarksToMatrixCalculatorOptions.ext] {
object_normalization: $0
object_normalization_origin_offset: $1
}
@ -145,8 +142,7 @@ TEST_P(LandmarksWorld3dToMatrixCalculatorTest, OutputsCorrectResult) {
input_stream: "IMAGE_SIZE:image_size"
output_stream: "LANDMARKS_MATRIX:landmarks_matrix"
options {
[mediapipe.tasks.vision.proto.LandmarksToMatrixCalculatorOptions
.ext] {
[mediapipe.LandmarksToMatrixCalculatorOptions.ext] {
object_normalization: $0
object_normalization_origin_offset: $1
}
@ -202,6 +198,4 @@ INSTANTIATE_TEST_CASE_P(
} // namespace
} // namespace vision
} // namespace tasks
} // namespace mediapipe

View File

@ -34,14 +34,15 @@ limitations under the License.
#include "mediapipe/tasks/cc/core/model_task_graph.h"
#include "mediapipe/tasks/cc/core/proto/inference_subgraph.pb.h"
#include "mediapipe/tasks/cc/core/utils.h"
#include "mediapipe/tasks/cc/vision/hand_gesture_recognizer/proto/hand_gesture_recognizer_subgraph_options.pb.h"
#include "mediapipe/tasks/cc/vision/hand_gesture_recognizer/proto/landmarks_to_matrix_calculator.pb.h"
#include "mediapipe/tasks/cc/vision/gesture_recognizer/calculators/landmarks_to_matrix_calculator.pb.h"
#include "mediapipe/tasks/cc/vision/gesture_recognizer/proto/hand_gesture_recognizer_graph_options.pb.h"
#include "mediapipe/tasks/cc/vision/utils/image_tensor_specs.h"
#include "mediapipe/tasks/metadata/metadata_schema_generated.h"
namespace mediapipe {
namespace tasks {
namespace vision {
namespace gesture_recognizer {
namespace {
@ -50,9 +51,8 @@ using ::mediapipe::api2::Output;
using ::mediapipe::api2::builder::Graph;
using ::mediapipe::api2::builder::Source;
using ::mediapipe::tasks::components::containers::proto::ClassificationResult;
using ::mediapipe::tasks::vision::hand_gesture_recognizer::proto::
HandGestureRecognizerSubgraphOptions;
using ::mediapipe::tasks::vision::proto::LandmarksToMatrixCalculatorOptions;
using ::mediapipe::tasks::vision::gesture_recognizer::proto::
HandGestureRecognizerGraphOptions;
constexpr char kHandednessTag[] = "HANDEDNESS";
constexpr char kLandmarksTag[] = "LANDMARKS";
@ -70,18 +70,6 @@ constexpr char kIndexTag[] = "INDEX";
constexpr char kIterableTag[] = "ITERABLE";
constexpr char kBatchEndTag[] = "BATCH_END";
absl::Status SanityCheckOptions(
const HandGestureRecognizerSubgraphOptions& options) {
if (options.min_tracking_confidence() < 0 ||
options.min_tracking_confidence() > 1) {
return CreateStatusWithPayload(absl::StatusCode::kInvalidArgument,
"Invalid `min_tracking_confidence` option: "
"value must be in the range [0.0, 1.0]",
MediaPipeTasksStatus::kInvalidArgumentError);
}
return absl::OkStatus();
}
Source<std::vector<Tensor>> ConvertMatrixToTensor(Source<Matrix> matrix,
Graph& graph) {
auto& node = graph.AddNode("TensorConverterCalculator");
@ -91,9 +79,10 @@ Source<std::vector<Tensor>> ConvertMatrixToTensor(Source<Matrix> matrix,
} // namespace
// A "mediapipe.tasks.vision.SingleHandGestureRecognizerSubgraph" performs
// single hand gesture recognition. This graph is used as a building block for
// mediapipe.tasks.vision.HandGestureRecognizerGraph.
// A
// "mediapipe.tasks.vision.gesture_recognizer.SingleHandGestureRecognizerGraph"
// performs single hand gesture recognition. This graph is used as a building
// block for mediapipe.tasks.vision.GestureRecognizerGraph.
//
// Inputs:
// HANDEDNESS - ClassificationList
@ -113,14 +102,15 @@ Source<std::vector<Tensor>> ConvertMatrixToTensor(Source<Matrix> matrix,
//
// Example:
// node {
// calculator: "mediapipe.tasks.vision.SingleHandGestureRecognizerSubgraph"
// calculator:
// "mediapipe.tasks.vision.gesture_recognizer.SingleHandGestureRecognizerGraph"
// input_stream: "HANDEDNESS:handedness"
// input_stream: "LANDMARKS:landmarks"
// input_stream: "WORLD_LANDMARKS:world_landmarks"
// input_stream: "IMAGE_SIZE:image_size"
// output_stream: "HAND_GESTURES:hand_gestures"
// options {
// [mediapipe.tasks.vision.hand_gesture_recognizer.proto.HandGestureRecognizerSubgraphOptions.ext]
// [mediapipe.tasks.vision.gesture_recognizer.proto.HandGestureRecognizerGraphOptions.ext]
// {
// base_options {
// model_asset {
@ -130,19 +120,19 @@ Source<std::vector<Tensor>> ConvertMatrixToTensor(Source<Matrix> matrix,
// }
// }
// }
class SingleHandGestureRecognizerSubgraph : public core::ModelTaskGraph {
class SingleHandGestureRecognizerGraph : public core::ModelTaskGraph {
public:
absl::StatusOr<CalculatorGraphConfig> GetConfig(
SubgraphContext* sc) override {
ASSIGN_OR_RETURN(
const auto* model_resources,
CreateModelResources<HandGestureRecognizerSubgraphOptions>(sc));
CreateModelResources<HandGestureRecognizerGraphOptions>(sc));
Graph graph;
ASSIGN_OR_RETURN(
auto hand_gestures,
BuildHandGestureRecognizerGraph(
sc->Options<HandGestureRecognizerSubgraphOptions>(),
*model_resources, graph[Input<ClassificationList>(kHandednessTag)],
BuildGestureRecognizerGraph(
sc->Options<HandGestureRecognizerGraphOptions>(), *model_resources,
graph[Input<ClassificationList>(kHandednessTag)],
graph[Input<NormalizedLandmarkList>(kLandmarksTag)],
graph[Input<LandmarkList>(kWorldLandmarksTag)],
graph[Input<std::pair<int, int>>(kImageSizeTag)], graph));
@ -151,15 +141,13 @@ class SingleHandGestureRecognizerSubgraph : public core::ModelTaskGraph {
}
private:
absl::StatusOr<Source<ClassificationResult>> BuildHandGestureRecognizerGraph(
const HandGestureRecognizerSubgraphOptions& graph_options,
absl::StatusOr<Source<ClassificationResult>> BuildGestureRecognizerGraph(
const HandGestureRecognizerGraphOptions& graph_options,
const core::ModelResources& model_resources,
Source<ClassificationList> handedness,
Source<NormalizedLandmarkList> hand_landmarks,
Source<LandmarkList> hand_world_landmarks,
Source<std::pair<int, int>> image_size, Graph& graph) {
MP_RETURN_IF_ERROR(SanityCheckOptions(graph_options));
// Converts the ClassificationList to a matrix.
auto& handedness_to_matrix = graph.AddNode("HandednessToMatrixCalculator");
handedness >> handedness_to_matrix.In(kHandednessTag);
@ -235,12 +223,15 @@ class SingleHandGestureRecognizerSubgraph : public core::ModelTaskGraph {
}
};
// clang-format off
REGISTER_MEDIAPIPE_GRAPH(
::mediapipe::tasks::vision::SingleHandGestureRecognizerSubgraph);
::mediapipe::tasks::vision::gesture_recognizer::SingleHandGestureRecognizerGraph); // NOLINT
// clang-format on
// A "mediapipe.tasks.vision.HandGestureRecognizerSubgraph" performs multi
// hand gesture recognition. This graph is used as a building block for
// mediapipe.tasks.vision.HandGestureRecognizerGraph.
// A
// "mediapipe.tasks.vision.gesture_recognizer.MultipleHandGestureRecognizerGraph"
// performs multi hand gesture recognition. This graph is used as a building
// block for mediapipe.tasks.vision.gesture_recognizer.GestureRecognizerGraph.
//
// Inputs:
// HANDEDNESS - std::vector<ClassificationList>
@ -263,7 +254,8 @@ REGISTER_MEDIAPIPE_GRAPH(
//
// Example:
// node {
// calculator: "mediapipe.tasks.vision.HandGestureRecognizerSubgraph"
// calculator:
// "mediapipe.tasks.vision.gesture_recognizer.MultipleHandGestureRecognizerGraph"
// input_stream: "HANDEDNESS:handedness"
// input_stream: "LANDMARKS:landmarks"
// input_stream: "WORLD_LANDMARKS:world_landmarks"
@ -271,7 +263,7 @@ REGISTER_MEDIAPIPE_GRAPH(
// input_stream: "HAND_TRACKING_IDS:hand_tracking_ids"
// output_stream: "HAND_GESTURES:hand_gestures"
// options {
// [mediapipe.tasks.vision.hand_gesture_recognizer.proto.HandGestureRecognizerSubgraph.ext]
// [mediapipe.tasks.vision.gesture_recognizer.proto.MultipleHandGestureRecognizerGraph.ext]
// {
// base_options {
// model_asset {
@ -281,15 +273,15 @@ REGISTER_MEDIAPIPE_GRAPH(
// }
// }
// }
class HandGestureRecognizerSubgraph : public core::ModelTaskGraph {
class MultipleHandGestureRecognizerGraph : public core::ModelTaskGraph {
public:
absl::StatusOr<CalculatorGraphConfig> GetConfig(
SubgraphContext* sc) override {
Graph graph;
ASSIGN_OR_RETURN(
auto multi_hand_gestures,
BuildMultiHandGestureRecognizerSubraph(
sc->Options<HandGestureRecognizerSubgraphOptions>(),
BuildMultiGestureRecognizerSubraph(
sc->Options<HandGestureRecognizerGraphOptions>(),
graph[Input<std::vector<ClassificationList>>(kHandednessTag)],
graph[Input<std::vector<NormalizedLandmarkList>>(kLandmarksTag)],
graph[Input<std::vector<LandmarkList>>(kWorldLandmarksTag)],
@ -302,8 +294,8 @@ class HandGestureRecognizerSubgraph : public core::ModelTaskGraph {
private:
absl::StatusOr<Source<std::vector<ClassificationResult>>>
BuildMultiHandGestureRecognizerSubraph(
const HandGestureRecognizerSubgraphOptions& graph_options,
BuildMultiGestureRecognizerSubraph(
const HandGestureRecognizerGraphOptions& graph_options,
Source<std::vector<ClassificationList>> multi_handedness,
Source<std::vector<NormalizedLandmarkList>> multi_hand_landmarks,
Source<std::vector<LandmarkList>> multi_hand_world_landmarks,
@ -341,17 +333,18 @@ class HandGestureRecognizerSubgraph : public core::ModelTaskGraph {
hand_tracking_id >> get_world_landmarks_at_index.In(kIndexTag);
auto hand_world_landmarks = get_world_landmarks_at_index.Out(kItemTag);
auto& hand_gesture_recognizer_subgraph = graph.AddNode(
"mediapipe.tasks.vision.SingleHandGestureRecognizerSubgraph");
hand_gesture_recognizer_subgraph
.GetOptions<HandGestureRecognizerSubgraphOptions>()
auto& hand_gesture_recognizer_graph = graph.AddNode(
"mediapipe.tasks.vision.gesture_recognizer."
"SingleHandGestureRecognizerGraph");
hand_gesture_recognizer_graph
.GetOptions<HandGestureRecognizerGraphOptions>()
.CopyFrom(graph_options);
handedness >> hand_gesture_recognizer_subgraph.In(kHandednessTag);
hand_landmarks >> hand_gesture_recognizer_subgraph.In(kLandmarksTag);
handedness >> hand_gesture_recognizer_graph.In(kHandednessTag);
hand_landmarks >> hand_gesture_recognizer_graph.In(kLandmarksTag);
hand_world_landmarks >>
hand_gesture_recognizer_subgraph.In(kWorldLandmarksTag);
image_size_clone >> hand_gesture_recognizer_subgraph.In(kImageSizeTag);
auto hand_gestures = hand_gesture_recognizer_subgraph.Out(kHandGesturesTag);
hand_gesture_recognizer_graph.In(kWorldLandmarksTag);
image_size_clone >> hand_gesture_recognizer_graph.In(kImageSizeTag);
auto hand_gestures = hand_gesture_recognizer_graph.Out(kHandGesturesTag);
auto& end_loop_classification_results =
graph.AddNode("mediapipe.tasks.EndLoopClassificationResultCalculator");
@ -364,9 +357,12 @@ class HandGestureRecognizerSubgraph : public core::ModelTaskGraph {
}
};
// clang-format off
REGISTER_MEDIAPIPE_GRAPH(
::mediapipe::tasks::vision::HandGestureRecognizerSubgraph);
::mediapipe::tasks::vision::gesture_recognizer::MultipleHandGestureRecognizerGraph); // NOLINT
// clang-format on
} // namespace gesture_recognizer
} // namespace vision
} // namespace tasks
} // namespace mediapipe

View File

@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "mediapipe/tasks/cc/vision/hand_gesture_recognizer/handedness_util.h"
#include "mediapipe/tasks/cc/vision/gesture_recognizer/handedness_util.h"
#include <algorithm>
@ -25,6 +25,7 @@ limitations under the License.
namespace mediapipe {
namespace tasks {
namespace vision {
namespace gesture_recognizer {
namespace {} // namespace
@ -58,6 +59,7 @@ absl::StatusOr<float> GetLeftHandScore(
}
}
} // namespace gesture_recognizer
} // namespace vision
} // namespace tasks
} // namespace mediapipe

View File

@ -13,8 +13,8 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#ifndef MEDIAPIPE_TASKS_CC_VISION_HAND_GESTURE_RECOGNIZER_HADNDEDNESS_UTILS_H_
#define MEDIAPIPE_TASKS_CC_VISION_HAND_GESTURE_RECOGNIZER_HADNDEDNESS_UTILS_H_
#ifndef MEDIAPIPE_TASKS_CC_VISION_GESTURE_RECOGNIZER_HADNDEDNESS_UTILS_H_
#define MEDIAPIPE_TASKS_CC_VISION_GESTURE_RECOGNIZER_HADNDEDNESS_UTILS_H_
#include "absl/status/statusor.h"
#include "mediapipe/framework/formats/classification.pb.h"
@ -22,6 +22,7 @@ limitations under the License.
namespace mediapipe {
namespace tasks {
namespace vision {
namespace gesture_recognizer {
bool IsLeftHand(const mediapipe::Classification& c);
@ -30,8 +31,9 @@ bool IsRightHand(const mediapipe::Classification& c);
absl::StatusOr<float> GetLeftHandScore(
const mediapipe::ClassificationList& classification_list);
} // namespace gesture_recognizer
} // namespace vision
} // namespace tasks
} // namespace mediapipe
#endif // MEDIAPIPE_TASKS_CC_VISION_HAND_GESTURE_RECOGNIZER_HADNDEDNESS_UTILS_H_
#endif // MEDIAPIPE_TASKS_CC_VISION_GESTURE_RECOGNIZER_HADNDEDNESS_UTILS_H_

View File

@ -13,7 +13,7 @@ See the License for the specific language governing permissions and
limitations under the License.
==============================================================================*/
#include "mediapipe/tasks/cc/vision/hand_gesture_recognizer/handedness_util.h"
#include "mediapipe/tasks/cc/vision/gesture_recognizer/handedness_util.h"
#include "mediapipe/framework/formats/classification.pb.h"
#include "mediapipe/framework/port/gmock.h"
@ -23,6 +23,7 @@ limitations under the License.
namespace mediapipe {
namespace tasks {
namespace vision {
namespace gesture_recognizer {
namespace {
TEST(GetLeftHandScore, SingleLeftHandClassification) {
@ -72,6 +73,7 @@ TEST(GetLeftHandScore, LeftAndRightLowerCaseHandClassification) {
}
} // namespace
} // namespace gesture_recognizer
} // namespace vision
} // namespace tasks
} // namespace mediapipe

View File

@ -21,8 +21,8 @@ package(default_visibility = [
licenses(["notice"])
mediapipe_proto_library(
name = "hand_gesture_recognizer_subgraph_options_proto",
srcs = ["hand_gesture_recognizer_subgraph_options.proto"],
name = "hand_gesture_recognizer_graph_options_proto",
srcs = ["hand_gesture_recognizer_graph_options.proto"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
@ -30,12 +30,3 @@ mediapipe_proto_library(
"//mediapipe/tasks/cc/core/proto:base_options_proto",
],
)
mediapipe_proto_library(
name = "landmarks_to_matrix_calculator_proto",
srcs = ["landmarks_to_matrix_calculator.proto"],
deps = [
"//mediapipe/framework:calculator_options_proto",
"//mediapipe/framework:calculator_proto",
],
)

View File

@ -15,15 +15,15 @@ limitations under the License.
// TODO Refactor naming and class structure of hand related Tasks.
syntax = "proto2";
package mediapipe.tasks.vision.hand_gesture_recognizer.proto;
package mediapipe.tasks.vision.gesture_recognizer.proto;
import "mediapipe/framework/calculator.proto";
import "mediapipe/tasks/cc/components/processors/proto/classifier_options.proto";
import "mediapipe/tasks/cc/core/proto/base_options.proto";
message HandGestureRecognizerSubgraphOptions {
message HandGestureRecognizerGraphOptions {
extend mediapipe.CalculatorOptions {
optional HandGestureRecognizerSubgraphOptions ext = 463370452;
optional HandGestureRecognizerGraphOptions ext = 463370452;
}
// Base options for configuring hand gesture recognition subgraph, such as
// specifying the TfLite model file with metadata, accelerator options, etc.

View File

@ -46,6 +46,7 @@ limitations under the License.
namespace mediapipe {
namespace tasks {
namespace vision {
namespace hand_detector {
namespace {
@ -139,9 +140,9 @@ void ConfigureRectTransformationCalculator(
} // namespace
// A "mediapipe.tasks.vision.HandDetectorGraph" performs hand detection. The
// Hand Detection Graph is based on palm detection model, and scale the detected
// palm bounding box to enclose the detected whole hand.
// A "mediapipe.tasks.vision.hand_detector.HandDetectorGraph" performs hand
// detection. The Hand Detection Graph is based on palm detection model, and
// scale the detected palm bounding box to enclose the detected whole hand.
// Accepts CPU input images and outputs Landmark on CPU.
//
// Inputs:
@ -161,14 +162,15 @@ void ConfigureRectTransformationCalculator(
//
// Example:
// node {
// calculator: "mediapipe.tasks.vision.HandDetectorGraph"
// calculator: "mediapipe.tasks.vision.hand_detector.HandDetectorGraph"
// input_stream: "IMAGE:image"
// output_stream: "PALM_DETECTIONS:palm_detections"
// output_stream: "HAND_RECTS:hand_rects_from_palm_detections"
// output_stream: "PALM_RECTS:palm_rects"
// output_stream: "IMAGE:image_out"
// options {
// [mediapipe.tasks.hand_detector.proto.HandDetectorGraphOptions.ext] {
// [mediapipe.tasks.vision.hand_detector.proto.HandDetectorGraphOptions.ext]
// {
// base_options {
// model_asset {
// file_name: "palm_detection.tflite"
@ -334,8 +336,10 @@ class HandDetectorGraph : public core::ModelTaskGraph {
}
};
REGISTER_MEDIAPIPE_GRAPH(::mediapipe::tasks::vision::HandDetectorGraph);
REGISTER_MEDIAPIPE_GRAPH(
::mediapipe::tasks::vision::hand_detector::HandDetectorGraph);
} // namespace hand_detector
} // namespace vision
} // namespace tasks
} // namespace mediapipe

View File

@ -47,6 +47,7 @@ limitations under the License.
namespace mediapipe {
namespace tasks {
namespace vision {
namespace hand_detector {
namespace {
using ::file::Defaults;
@ -105,7 +106,7 @@ absl::StatusOr<std::unique_ptr<TaskRunner>> CreateTaskRunner(
Graph graph;
auto& hand_detection =
graph.AddNode("mediapipe.tasks.vision.HandDetectorGraph");
graph.AddNode("mediapipe.tasks.vision.hand_detector.HandDetectorGraph");
auto options = std::make_unique<HandDetectorGraphOptions>();
options->mutable_base_options()->mutable_model_asset()->set_file_name(
@ -201,6 +202,7 @@ INSTANTIATE_TEST_SUITE_P(
});
} // namespace
} // namespace hand_detector
} // namespace vision
} // namespace tasks
} // namespace mediapipe

View File

@ -216,7 +216,7 @@ class HandLandmarkerGraph : public core::ModelTaskGraph {
DisallowIf(image_in, has_enough_hands, graph);
auto& hand_detector =
graph.AddNode("mediapipe.tasks.vision.HandDetectorGraph");
graph.AddNode("mediapipe.tasks.vision.hand_detector.HandDetectorGraph");
hand_detector.GetOptions<HandDetectorGraphOptions>().CopyFrom(
tasks_options.hand_detector_graph_options());
image_for_hand_detector >> hand_detector.In("IMAGE");