mediapipe/mediapipe/calculators/tflite/tflite_inference_calculator.proto
MediaPipe Team e9fbe868e5 Project import generated by Copybara.
GitOrigin-RevId: e3a43e4e5e519cd14df7095749059e2613bdcf76
2020-07-08 18:24:46 -07:00

94 lines
3.2 KiB
Protocol Buffer

// Copyright 2019 The MediaPipe Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto2";
package mediapipe;
import "mediapipe/framework/calculator.proto";
// Full Example:
//
// node {
// calculator: "TfLiteInferenceCalculator"
// input_stream: "TENSOR_IN:image_tensors"
// output_stream: "TENSOR_OUT:result_tensors"
// options {
// [mediapipe.TfLiteInferenceCalculatorOptions.ext] {
// model_path: "model.tflite"
// delegate { gpu {} }
// }
// }
// }
//
message TfLiteInferenceCalculatorOptions {
extend mediapipe.CalculatorOptions {
optional TfLiteInferenceCalculatorOptions ext = 233867213;
}
message Delegate {
// Default inference provided by tflite.
message TfLite {}
// Delegate to run GPU inference depending on the device.
// (Can use OpenGl, OpenCl, Metal depending on the device.)
message Gpu {
// Experimental, Android/Linux only. Use TFLite GPU delegate API2 for
// the NN inference.
// example:
// delegate: { gpu { use_advanced_gpu_api: true } }
optional bool use_advanced_gpu_api = 1 [default = false];
}
// Android only.
message Nnapi {}
message Xnnpack {
// Number of threads for XNNPACK delegate. (By default, calculator tries
// to choose optimal number of threads depending on the device.)
optional int32 num_threads = 1 [default = -1];
}
oneof delegate {
TfLite tflite = 1;
Gpu gpu = 2;
Nnapi nnapi = 3;
Xnnpack xnnpack = 4;
}
}
// Path to the TF Lite model (ex: /path/to/modelname.tflite).
// On mobile, this is generally just modelname.tflite.
optional string model_path = 1;
// Whether the TF Lite GPU or CPU backend should be used. Effective only when
// input tensors are on CPU. For input tensors on GPU, GPU backend is always
// used.
// DEPRECATED: configure "delegate" instead.
optional bool use_gpu = 2 [deprecated = true, default = false];
// Android only. When true, an NNAPI delegate will be used for inference.
// If NNAPI is not available, then the default CPU delegate will be used
// automatically.
// DEPRECATED: configure "delegate" instead.
optional bool use_nnapi = 3 [deprecated = true, default = false];
// The number of threads available to the interpreter. Effective only when
// input tensors are on CPU and 'use_gpu' is false.
optional int32 cpu_num_thread = 4 [default = -1];
// TfLite delegate to run inference.
// NOTE: calculator is free to choose delegate if not specified explicitly.
// NOTE: use_gpu/use_nnapi are ignored if specified. (Delegate takes
// precedence over use_* deprecated options.)
optional Delegate delegate = 5;
}