From aea83912f9464c5fea0e855cb60ccf398d7fad60 Mon Sep 17 00:00:00 2001 From: Koan-Sin Tan Date: Fri, 8 Nov 2019 15:39:23 +0800 Subject: [PATCH] enable NNAPI in CPU path With NNAPI delegate added, Android devices with floating point NNAPI device(s) could run facedectioncpu and objectdectioncpu accelerated. On device without accelerators, tflite interpreter will simple fall back to CPU. --- mediapipe/calculators/tflite/tflite_inference_calculator.cc | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/mediapipe/calculators/tflite/tflite_inference_calculator.cc b/mediapipe/calculators/tflite/tflite_inference_calculator.cc index ebd632df9..e21087e9b 100644 --- a/mediapipe/calculators/tflite/tflite_inference_calculator.cc +++ b/mediapipe/calculators/tflite/tflite_inference_calculator.cc @@ -305,6 +305,10 @@ REGISTER_CALCULATOR(TfLiteInferenceCalculator); #endif } else { // Read CPU input into tensors. + interpreter_->SetAllowFp16PrecisionForFp32(1); + delegate_ = tflite::NnApiDelegate(); + interpreter_->ModifyGraphWithDelegate(delegate_); + const auto& input_tensors = cc->Inputs().Tag("TENSORS").Get>(); RET_CHECK_GT(input_tensors.size(), 0);