Fix -Wunused-variable warnings

Remove unused variables. Add __unused attribute to variable used in assert clause. Move variable to inside header guard when only used in header guard.

PiperOrigin-RevId: 481699164
This commit is contained in:
MediaPipe Team 2022-10-17 11:46:04 -07:00 committed by Copybara-Service
parent 660f1812c2
commit f6c2f86086
6 changed files with 3 additions and 8 deletions

View File

@ -262,7 +262,6 @@ class SubRectExtractorMetal {
RET_CHECK(pipeline_state != nil); RET_CHECK(pipeline_state != nil);
std::string output_type_def; std::string output_type_def;
MTLPixelFormat pixel_format;
switch (output_format) { switch (output_format) {
case OutputFormat::kF16C4: case OutputFormat::kF16C4:
output_type_def = R"( output_type_def = R"(

View File

@ -224,9 +224,6 @@ absl::Status InferenceCalculatorMetalImpl::InitInterpreter(
void InferenceCalculatorMetalImpl::AddDelegate( void InferenceCalculatorMetalImpl::AddDelegate(
CalculatorContext* cc, tflite::InterpreterBuilder* interpreter_builder) { CalculatorContext* cc, tflite::InterpreterBuilder* interpreter_builder) {
const auto& calculator_opts =
cc->Options<mediapipe::InferenceCalculatorOptions>();
// Configure and create the delegate. // Configure and create the delegate.
TFLGpuDelegateOptions options; TFLGpuDelegateOptions options;
// `enable_quantization` enables the run of sparse models i.e. the models with // `enable_quantization` enables the run of sparse models i.e. the models with

View File

@ -296,7 +296,6 @@ absl::Status TensorConverterCalculator::ProcessGPU(CalculatorContext* cc) {
output_tensors->emplace_back(Tensor::ElementType::kFloat32, output_tensors->emplace_back(Tensor::ElementType::kFloat32,
Tensor::Shape{1, height, width, channels}); Tensor::Shape{1, height, width, channels});
#if MEDIAPIPE_METAL_ENABLED #if MEDIAPIPE_METAL_ENABLED
id<MTLDevice> device = gpu_helper_.mtlDevice;
id<MTLCommandBuffer> command_buffer = [gpu_helper_ commandBuffer]; id<MTLCommandBuffer> command_buffer = [gpu_helper_ commandBuffer];
command_buffer.label = @"TensorConverterCalculatorConvert"; command_buffer.label = @"TensorConverterCalculatorConvert";
id<MTLComputeCommandEncoder> compute_encoder = id<MTLComputeCommandEncoder> compute_encoder =

View File

@ -532,7 +532,6 @@ absl::Status TensorsToDetectionsCalculator::ProcessGPU(
detection_classes.data(), detection_classes.data(),
output_detections)); output_detections));
#elif MEDIAPIPE_METAL_ENABLED #elif MEDIAPIPE_METAL_ENABLED
id<MTLDevice> device = gpu_helper_.mtlDevice;
if (!anchors_init_) { if (!anchors_init_) {
if (input_tensors.size() == kNumInputTensorsWithAnchors) { if (input_tensors.size() == kNumInputTensorsWithAnchors) {
RET_CHECK_EQ(input_tensors.size(), kNumInputTensorsWithAnchors); RET_CHECK_EQ(input_tensors.size(), kNumInputTensorsWithAnchors);

View File

@ -499,7 +499,6 @@ absl::Status TfLiteConverterCalculator::InitGpu(CalculatorContext* cc) {
gpu_data_out_ = absl::make_unique<GPUData>(); gpu_data_out_ = absl::make_unique<GPUData>();
gpu_data_out_->elements = input.height() * input.width() * max_num_channels_; gpu_data_out_->elements = input.height() * input.width() * max_num_channels_;
const bool include_alpha = (max_num_channels_ == 4); const bool include_alpha = (max_num_channels_ == 4);
const bool single_channel = (max_num_channels_ == 1);
if (!(format == mediapipe::ImageFormat::GRAY8 || if (!(format == mediapipe::ImageFormat::GRAY8 ||
format == mediapipe::ImageFormat::SRGB || format == mediapipe::ImageFormat::SRGB ||
format == mediapipe::ImageFormat::SRGBA)) format == mediapipe::ImageFormat::SRGBA))
@ -509,6 +508,7 @@ absl::Status TfLiteConverterCalculator::InitGpu(CalculatorContext* cc) {
#endif // MEDIAPIPE_TFLITE_GPU_SUPPORTED #endif // MEDIAPIPE_TFLITE_GPU_SUPPORTED
#if MEDIAPIPE_TFLITE_GL_INFERENCE #if MEDIAPIPE_TFLITE_GL_INFERENCE
const bool single_channel = (max_num_channels_ == 1);
MP_RETURN_IF_ERROR(gpu_helper_.RunInGlContext( MP_RETURN_IF_ERROR(gpu_helper_.RunInGlContext(
[this, &include_alpha, &input, &single_channel]() -> absl::Status { [this, &include_alpha, &input, &single_channel]() -> absl::Status {
// Device memory. // Device memory.

View File

@ -111,7 +111,8 @@ typedef CVOpenGLESTextureCacheRef CVTextureCacheType;
- (CVMetalTextureCacheRef)mtlTextureCache { - (CVMetalTextureCacheRef)mtlTextureCache {
@synchronized(self) { @synchronized(self) {
if (!_mtlTextureCache) { if (!_mtlTextureCache) {
CVReturn err = CVMetalTextureCacheCreate(NULL, NULL, self.mtlDevice, NULL, &_mtlTextureCache); CVReturn __unused err =
CVMetalTextureCacheCreate(NULL, NULL, self.mtlDevice, NULL, &_mtlTextureCache);
NSAssert(err == kCVReturnSuccess, @"Error at CVMetalTextureCacheCreate %d", err); NSAssert(err == kCVReturnSuccess, @"Error at CVMetalTextureCacheCreate %d", err);
// TODO: register and flush metal caches too. // TODO: register and flush metal caches too.
} }