enable NNAPI in CPU path

With NNAPI delegate added, Android devices with floating point
NNAPI device(s) could run facedectioncpu and objectdectioncpu
accelerated. On device without accelerators, tflite interpreter
will simple fall back to CPU.
This commit is contained in:
Koan-Sin Tan 2019-11-08 15:39:23 +08:00
parent fce372d153
commit aea83912f9

View File

@ -305,6 +305,10 @@ REGISTER_CALCULATOR(TfLiteInferenceCalculator);
#endif #endif
} else { } else {
// Read CPU input into tensors. // Read CPU input into tensors.
interpreter_->SetAllowFp16PrecisionForFp32(1);
delegate_ = tflite::NnApiDelegate();
interpreter_->ModifyGraphWithDelegate(delegate_);
const auto& input_tensors = const auto& input_tensors =
cc->Inputs().Tag("TENSORS").Get<std::vector<TfLiteTensor>>(); cc->Inputs().Tag("TENSORS").Get<std::vector<TfLiteTensor>>();
RET_CHECK_GT(input_tensors.size(), 0); RET_CHECK_GT(input_tensors.size(), 0);