Add CPU invoke to Perfetto trace

PiperOrigin-RevId: 485464221
This commit is contained in:
MediaPipe Team 2022-11-01 17:48:46 -07:00 committed by Copybara-Service
parent ecd93ff080
commit aaf98ea43c
7 changed files with 18 additions and 7 deletions

View File

@ -530,6 +530,7 @@ cc_library(
}), }),
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
deps = [ deps = [
"//mediapipe/framework:calculator_context",
"//mediapipe/framework/formats:tensor", "//mediapipe/framework/formats:tensor",
"@com_google_absl//absl/status:statusor", "@com_google_absl//absl/status:statusor",
], ],
@ -550,6 +551,7 @@ cc_library(
visibility = ["//visibility:public"], visibility = ["//visibility:public"],
deps = [ deps = [
":inference_runner", ":inference_runner",
"//mediapipe/framework:mediapipe_profiling",
"//mediapipe/framework/api2:packet", "//mediapipe/framework/api2:packet",
"//mediapipe/framework/formats:tensor", "//mediapipe/framework/formats:tensor",
"//mediapipe/framework/port:ret_check", "//mediapipe/framework/port:ret_check",

View File

@ -72,7 +72,7 @@ absl::Status InferenceCalculatorCpuImpl::Process(CalculatorContext* cc) {
RET_CHECK(!input_tensors.empty()); RET_CHECK(!input_tensors.empty());
ASSIGN_OR_RETURN(std::vector<Tensor> output_tensors, ASSIGN_OR_RETURN(std::vector<Tensor> output_tensors,
inference_runner_->Run(input_tensors)); inference_runner_->Run(cc, input_tensors));
kOutTensors(cc).Send(std::move(output_tensors)); kOutTensors(cc).Send(std::move(output_tensors));
return absl::OkStatus(); return absl::OkStatus();
} }

View File

@ -70,7 +70,7 @@ absl::Status InferenceCalculatorXnnpackImpl::Process(CalculatorContext* cc) {
RET_CHECK(!input_tensors.empty()); RET_CHECK(!input_tensors.empty());
ASSIGN_OR_RETURN(std::vector<Tensor> output_tensors, ASSIGN_OR_RETURN(std::vector<Tensor> output_tensors,
inference_runner_->Run(input_tensors)); inference_runner_->Run(cc, input_tensors));
kOutTensors(cc).Send(std::move(output_tensors)); kOutTensors(cc).Send(std::move(output_tensors));
return absl::OkStatus(); return absl::OkStatus();
} }

View File

@ -20,12 +20,15 @@
#include "absl/status/status.h" #include "absl/status/status.h"
#include "absl/status/statusor.h" #include "absl/status/statusor.h"
#include "mediapipe/framework/formats/tensor.h" #include "mediapipe/framework/formats/tensor.h"
#include "mediapipe/framework/mediapipe_profiling.h"
#include "mediapipe/framework/port/ret_check.h" #include "mediapipe/framework/port/ret_check.h"
#include "tensorflow/lite/c/c_api_types.h" #include "tensorflow/lite/c/c_api_types.h"
#include "tensorflow/lite/interpreter.h" #include "tensorflow/lite/interpreter.h"
#include "tensorflow/lite/interpreter_builder.h" #include "tensorflow/lite/interpreter_builder.h"
#include "tensorflow/lite/string_util.h" #include "tensorflow/lite/string_util.h"
#define PERFETTO_TRACK_EVENT_NAMESPACE mediapipe
namespace mediapipe { namespace mediapipe {
namespace { namespace {
@ -79,7 +82,7 @@ class InferenceInterpreterDelegateRunner : public InferenceRunner {
delegate_(std::move(delegate)) {} delegate_(std::move(delegate)) {}
absl::StatusOr<std::vector<Tensor>> Run( absl::StatusOr<std::vector<Tensor>> Run(
const std::vector<Tensor>& input_tensors) override; CalculatorContext* cc, const std::vector<Tensor>& input_tensors) override;
private: private:
api2::Packet<TfLiteModelPtr> model_; api2::Packet<TfLiteModelPtr> model_;
@ -88,7 +91,7 @@ class InferenceInterpreterDelegateRunner : public InferenceRunner {
}; };
absl::StatusOr<std::vector<Tensor>> InferenceInterpreterDelegateRunner::Run( absl::StatusOr<std::vector<Tensor>> InferenceInterpreterDelegateRunner::Run(
const std::vector<Tensor>& input_tensors) { CalculatorContext* cc, const std::vector<Tensor>& input_tensors) {
// Read CPU input into tensors. // Read CPU input into tensors.
RET_CHECK_EQ(interpreter_->inputs().size(), input_tensors.size()); RET_CHECK_EQ(interpreter_->inputs().size(), input_tensors.size());
for (int i = 0; i < input_tensors.size(); ++i) { for (int i = 0; i < input_tensors.size(); ++i) {
@ -131,8 +134,10 @@ absl::StatusOr<std::vector<Tensor>> InferenceInterpreterDelegateRunner::Run(
} }
// Run inference. // Run inference.
RET_CHECK_EQ(interpreter_->Invoke(), kTfLiteOk); {
MEDIAPIPE_PROFILING(CPU_TASK_INVOKE, cc);
RET_CHECK_EQ(interpreter_->Invoke(), kTfLiteOk);
}
// Output result tensors (CPU). // Output result tensors (CPU).
const auto& tensor_indexes = interpreter_->outputs(); const auto& tensor_indexes = interpreter_->outputs();
std::vector<Tensor> output_tensors; std::vector<Tensor> output_tensors;

View File

@ -2,6 +2,7 @@
#define MEDIAPIPE_CALCULATORS_TENSOR_INFERENCE_RUNNER_H_ #define MEDIAPIPE_CALCULATORS_TENSOR_INFERENCE_RUNNER_H_
#include "absl/status/statusor.h" #include "absl/status/statusor.h"
#include "mediapipe/framework/calculator_context.h"
#include "mediapipe/framework/formats/tensor.h" #include "mediapipe/framework/formats/tensor.h"
namespace mediapipe { namespace mediapipe {
@ -11,7 +12,7 @@ class InferenceRunner {
public: public:
virtual ~InferenceRunner() = default; virtual ~InferenceRunner() = default;
virtual absl::StatusOr<std::vector<Tensor>> Run( virtual absl::StatusOr<std::vector<Tensor>> Run(
const std::vector<Tensor>& inputs) = 0; CalculatorContext* cc, const std::vector<Tensor>& inputs) = 0;
}; };
} // namespace mediapipe } // namespace mediapipe

View File

@ -135,6 +135,7 @@ message GraphTrace {
PACKET_QUEUED = 15; PACKET_QUEUED = 15;
GPU_TASK_INVOKE = 16; GPU_TASK_INVOKE = 16;
TPU_TASK_INVOKE = 17; TPU_TASK_INVOKE = 17;
CPU_TASK_INVOKE = 18;
} }
// //depot/mediapipe/framework/mediapipe_profiling.h:profiler_census_tags, // //depot/mediapipe/framework/mediapipe_profiling.h:profiler_census_tags,
// //depot/mediapipe/framework/profiler/trace_buffer.h:event_type_list, // //depot/mediapipe/framework/profiler/trace_buffer.h:event_type_list,

View File

@ -111,6 +111,8 @@ struct TraceEvent {
static constexpr EventType PACKET_QUEUED = GraphTrace::PACKET_QUEUED; static constexpr EventType PACKET_QUEUED = GraphTrace::PACKET_QUEUED;
static constexpr EventType GPU_TASK_INVOKE = GraphTrace::GPU_TASK_INVOKE; static constexpr EventType GPU_TASK_INVOKE = GraphTrace::GPU_TASK_INVOKE;
static constexpr EventType TPU_TASK_INVOKE = GraphTrace::TPU_TASK_INVOKE; static constexpr EventType TPU_TASK_INVOKE = GraphTrace::TPU_TASK_INVOKE;
static constexpr EventType CPU_TASK_INVOKE = GraphTrace::CPU_TASK_INVOKE;
// //depot/mediapipe/framework/mediapipe_profiling.h:profiler_census_tags, // //depot/mediapipe/framework/mediapipe_profiling.h:profiler_census_tags,
// //depot/mediapipe/framework/calculator_profile.proto:event_type, // //depot/mediapipe/framework/calculator_profile.proto:event_type,
// ) // )