Add CPU invoke to Perfetto trace
PiperOrigin-RevId: 485464221
This commit is contained in:
parent
ecd93ff080
commit
aaf98ea43c
|
@ -530,6 +530,7 @@ cc_library(
|
||||||
}),
|
}),
|
||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
deps = [
|
deps = [
|
||||||
|
"//mediapipe/framework:calculator_context",
|
||||||
"//mediapipe/framework/formats:tensor",
|
"//mediapipe/framework/formats:tensor",
|
||||||
"@com_google_absl//absl/status:statusor",
|
"@com_google_absl//absl/status:statusor",
|
||||||
],
|
],
|
||||||
|
@ -550,6 +551,7 @@ cc_library(
|
||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
deps = [
|
deps = [
|
||||||
":inference_runner",
|
":inference_runner",
|
||||||
|
"//mediapipe/framework:mediapipe_profiling",
|
||||||
"//mediapipe/framework/api2:packet",
|
"//mediapipe/framework/api2:packet",
|
||||||
"//mediapipe/framework/formats:tensor",
|
"//mediapipe/framework/formats:tensor",
|
||||||
"//mediapipe/framework/port:ret_check",
|
"//mediapipe/framework/port:ret_check",
|
||||||
|
|
|
@ -72,7 +72,7 @@ absl::Status InferenceCalculatorCpuImpl::Process(CalculatorContext* cc) {
|
||||||
RET_CHECK(!input_tensors.empty());
|
RET_CHECK(!input_tensors.empty());
|
||||||
|
|
||||||
ASSIGN_OR_RETURN(std::vector<Tensor> output_tensors,
|
ASSIGN_OR_RETURN(std::vector<Tensor> output_tensors,
|
||||||
inference_runner_->Run(input_tensors));
|
inference_runner_->Run(cc, input_tensors));
|
||||||
kOutTensors(cc).Send(std::move(output_tensors));
|
kOutTensors(cc).Send(std::move(output_tensors));
|
||||||
return absl::OkStatus();
|
return absl::OkStatus();
|
||||||
}
|
}
|
||||||
|
|
|
@ -70,7 +70,7 @@ absl::Status InferenceCalculatorXnnpackImpl::Process(CalculatorContext* cc) {
|
||||||
RET_CHECK(!input_tensors.empty());
|
RET_CHECK(!input_tensors.empty());
|
||||||
|
|
||||||
ASSIGN_OR_RETURN(std::vector<Tensor> output_tensors,
|
ASSIGN_OR_RETURN(std::vector<Tensor> output_tensors,
|
||||||
inference_runner_->Run(input_tensors));
|
inference_runner_->Run(cc, input_tensors));
|
||||||
kOutTensors(cc).Send(std::move(output_tensors));
|
kOutTensors(cc).Send(std::move(output_tensors));
|
||||||
return absl::OkStatus();
|
return absl::OkStatus();
|
||||||
}
|
}
|
||||||
|
|
|
@ -20,12 +20,15 @@
|
||||||
#include "absl/status/status.h"
|
#include "absl/status/status.h"
|
||||||
#include "absl/status/statusor.h"
|
#include "absl/status/statusor.h"
|
||||||
#include "mediapipe/framework/formats/tensor.h"
|
#include "mediapipe/framework/formats/tensor.h"
|
||||||
|
#include "mediapipe/framework/mediapipe_profiling.h"
|
||||||
#include "mediapipe/framework/port/ret_check.h"
|
#include "mediapipe/framework/port/ret_check.h"
|
||||||
#include "tensorflow/lite/c/c_api_types.h"
|
#include "tensorflow/lite/c/c_api_types.h"
|
||||||
#include "tensorflow/lite/interpreter.h"
|
#include "tensorflow/lite/interpreter.h"
|
||||||
#include "tensorflow/lite/interpreter_builder.h"
|
#include "tensorflow/lite/interpreter_builder.h"
|
||||||
#include "tensorflow/lite/string_util.h"
|
#include "tensorflow/lite/string_util.h"
|
||||||
|
|
||||||
|
#define PERFETTO_TRACK_EVENT_NAMESPACE mediapipe
|
||||||
|
|
||||||
namespace mediapipe {
|
namespace mediapipe {
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
@ -79,7 +82,7 @@ class InferenceInterpreterDelegateRunner : public InferenceRunner {
|
||||||
delegate_(std::move(delegate)) {}
|
delegate_(std::move(delegate)) {}
|
||||||
|
|
||||||
absl::StatusOr<std::vector<Tensor>> Run(
|
absl::StatusOr<std::vector<Tensor>> Run(
|
||||||
const std::vector<Tensor>& input_tensors) override;
|
CalculatorContext* cc, const std::vector<Tensor>& input_tensors) override;
|
||||||
|
|
||||||
private:
|
private:
|
||||||
api2::Packet<TfLiteModelPtr> model_;
|
api2::Packet<TfLiteModelPtr> model_;
|
||||||
|
@ -88,7 +91,7 @@ class InferenceInterpreterDelegateRunner : public InferenceRunner {
|
||||||
};
|
};
|
||||||
|
|
||||||
absl::StatusOr<std::vector<Tensor>> InferenceInterpreterDelegateRunner::Run(
|
absl::StatusOr<std::vector<Tensor>> InferenceInterpreterDelegateRunner::Run(
|
||||||
const std::vector<Tensor>& input_tensors) {
|
CalculatorContext* cc, const std::vector<Tensor>& input_tensors) {
|
||||||
// Read CPU input into tensors.
|
// Read CPU input into tensors.
|
||||||
RET_CHECK_EQ(interpreter_->inputs().size(), input_tensors.size());
|
RET_CHECK_EQ(interpreter_->inputs().size(), input_tensors.size());
|
||||||
for (int i = 0; i < input_tensors.size(); ++i) {
|
for (int i = 0; i < input_tensors.size(); ++i) {
|
||||||
|
@ -131,8 +134,10 @@ absl::StatusOr<std::vector<Tensor>> InferenceInterpreterDelegateRunner::Run(
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run inference.
|
// Run inference.
|
||||||
|
{
|
||||||
|
MEDIAPIPE_PROFILING(CPU_TASK_INVOKE, cc);
|
||||||
RET_CHECK_EQ(interpreter_->Invoke(), kTfLiteOk);
|
RET_CHECK_EQ(interpreter_->Invoke(), kTfLiteOk);
|
||||||
|
}
|
||||||
// Output result tensors (CPU).
|
// Output result tensors (CPU).
|
||||||
const auto& tensor_indexes = interpreter_->outputs();
|
const auto& tensor_indexes = interpreter_->outputs();
|
||||||
std::vector<Tensor> output_tensors;
|
std::vector<Tensor> output_tensors;
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
#define MEDIAPIPE_CALCULATORS_TENSOR_INFERENCE_RUNNER_H_
|
#define MEDIAPIPE_CALCULATORS_TENSOR_INFERENCE_RUNNER_H_
|
||||||
|
|
||||||
#include "absl/status/statusor.h"
|
#include "absl/status/statusor.h"
|
||||||
|
#include "mediapipe/framework/calculator_context.h"
|
||||||
#include "mediapipe/framework/formats/tensor.h"
|
#include "mediapipe/framework/formats/tensor.h"
|
||||||
|
|
||||||
namespace mediapipe {
|
namespace mediapipe {
|
||||||
|
@ -11,7 +12,7 @@ class InferenceRunner {
|
||||||
public:
|
public:
|
||||||
virtual ~InferenceRunner() = default;
|
virtual ~InferenceRunner() = default;
|
||||||
virtual absl::StatusOr<std::vector<Tensor>> Run(
|
virtual absl::StatusOr<std::vector<Tensor>> Run(
|
||||||
const std::vector<Tensor>& inputs) = 0;
|
CalculatorContext* cc, const std::vector<Tensor>& inputs) = 0;
|
||||||
};
|
};
|
||||||
|
|
||||||
} // namespace mediapipe
|
} // namespace mediapipe
|
||||||
|
|
|
@ -135,6 +135,7 @@ message GraphTrace {
|
||||||
PACKET_QUEUED = 15;
|
PACKET_QUEUED = 15;
|
||||||
GPU_TASK_INVOKE = 16;
|
GPU_TASK_INVOKE = 16;
|
||||||
TPU_TASK_INVOKE = 17;
|
TPU_TASK_INVOKE = 17;
|
||||||
|
CPU_TASK_INVOKE = 18;
|
||||||
}
|
}
|
||||||
// //depot/mediapipe/framework/mediapipe_profiling.h:profiler_census_tags,
|
// //depot/mediapipe/framework/mediapipe_profiling.h:profiler_census_tags,
|
||||||
// //depot/mediapipe/framework/profiler/trace_buffer.h:event_type_list,
|
// //depot/mediapipe/framework/profiler/trace_buffer.h:event_type_list,
|
||||||
|
|
|
@ -111,6 +111,8 @@ struct TraceEvent {
|
||||||
static constexpr EventType PACKET_QUEUED = GraphTrace::PACKET_QUEUED;
|
static constexpr EventType PACKET_QUEUED = GraphTrace::PACKET_QUEUED;
|
||||||
static constexpr EventType GPU_TASK_INVOKE = GraphTrace::GPU_TASK_INVOKE;
|
static constexpr EventType GPU_TASK_INVOKE = GraphTrace::GPU_TASK_INVOKE;
|
||||||
static constexpr EventType TPU_TASK_INVOKE = GraphTrace::TPU_TASK_INVOKE;
|
static constexpr EventType TPU_TASK_INVOKE = GraphTrace::TPU_TASK_INVOKE;
|
||||||
|
static constexpr EventType CPU_TASK_INVOKE = GraphTrace::CPU_TASK_INVOKE;
|
||||||
|
|
||||||
// //depot/mediapipe/framework/mediapipe_profiling.h:profiler_census_tags,
|
// //depot/mediapipe/framework/mediapipe_profiling.h:profiler_census_tags,
|
||||||
// //depot/mediapipe/framework/calculator_profile.proto:event_type,
|
// //depot/mediapipe/framework/calculator_profile.proto:event_type,
|
||||||
// )
|
// )
|
||||||
|
|
Loading…
Reference in New Issue
Block a user