Fix -Wunused-variable warnings
Remove unused variables. Add __unused attribute to variable used in assert clause. Move variable to inside header guard when only used in header guard. PiperOrigin-RevId: 481699164
This commit is contained in:
parent
660f1812c2
commit
f6c2f86086
|
@ -262,7 +262,6 @@ class SubRectExtractorMetal {
|
|||
RET_CHECK(pipeline_state != nil);
|
||||
|
||||
std::string output_type_def;
|
||||
MTLPixelFormat pixel_format;
|
||||
switch (output_format) {
|
||||
case OutputFormat::kF16C4:
|
||||
output_type_def = R"(
|
||||
|
|
|
@ -224,9 +224,6 @@ absl::Status InferenceCalculatorMetalImpl::InitInterpreter(
|
|||
|
||||
void InferenceCalculatorMetalImpl::AddDelegate(
|
||||
CalculatorContext* cc, tflite::InterpreterBuilder* interpreter_builder) {
|
||||
const auto& calculator_opts =
|
||||
cc->Options<mediapipe::InferenceCalculatorOptions>();
|
||||
|
||||
// Configure and create the delegate.
|
||||
TFLGpuDelegateOptions options;
|
||||
// `enable_quantization` enables the run of sparse models i.e. the models with
|
||||
|
|
|
@ -296,7 +296,6 @@ absl::Status TensorConverterCalculator::ProcessGPU(CalculatorContext* cc) {
|
|||
output_tensors->emplace_back(Tensor::ElementType::kFloat32,
|
||||
Tensor::Shape{1, height, width, channels});
|
||||
#if MEDIAPIPE_METAL_ENABLED
|
||||
id<MTLDevice> device = gpu_helper_.mtlDevice;
|
||||
id<MTLCommandBuffer> command_buffer = [gpu_helper_ commandBuffer];
|
||||
command_buffer.label = @"TensorConverterCalculatorConvert";
|
||||
id<MTLComputeCommandEncoder> compute_encoder =
|
||||
|
|
|
@ -532,7 +532,6 @@ absl::Status TensorsToDetectionsCalculator::ProcessGPU(
|
|||
detection_classes.data(),
|
||||
output_detections));
|
||||
#elif MEDIAPIPE_METAL_ENABLED
|
||||
id<MTLDevice> device = gpu_helper_.mtlDevice;
|
||||
if (!anchors_init_) {
|
||||
if (input_tensors.size() == kNumInputTensorsWithAnchors) {
|
||||
RET_CHECK_EQ(input_tensors.size(), kNumInputTensorsWithAnchors);
|
||||
|
|
|
@ -499,7 +499,6 @@ absl::Status TfLiteConverterCalculator::InitGpu(CalculatorContext* cc) {
|
|||
gpu_data_out_ = absl::make_unique<GPUData>();
|
||||
gpu_data_out_->elements = input.height() * input.width() * max_num_channels_;
|
||||
const bool include_alpha = (max_num_channels_ == 4);
|
||||
const bool single_channel = (max_num_channels_ == 1);
|
||||
if (!(format == mediapipe::ImageFormat::GRAY8 ||
|
||||
format == mediapipe::ImageFormat::SRGB ||
|
||||
format == mediapipe::ImageFormat::SRGBA))
|
||||
|
@ -509,6 +508,7 @@ absl::Status TfLiteConverterCalculator::InitGpu(CalculatorContext* cc) {
|
|||
#endif // MEDIAPIPE_TFLITE_GPU_SUPPORTED
|
||||
|
||||
#if MEDIAPIPE_TFLITE_GL_INFERENCE
|
||||
const bool single_channel = (max_num_channels_ == 1);
|
||||
MP_RETURN_IF_ERROR(gpu_helper_.RunInGlContext(
|
||||
[this, &include_alpha, &input, &single_channel]() -> absl::Status {
|
||||
// Device memory.
|
||||
|
|
|
@ -111,7 +111,8 @@ typedef CVOpenGLESTextureCacheRef CVTextureCacheType;
|
|||
- (CVMetalTextureCacheRef)mtlTextureCache {
|
||||
@synchronized(self) {
|
||||
if (!_mtlTextureCache) {
|
||||
CVReturn err = CVMetalTextureCacheCreate(NULL, NULL, self.mtlDevice, NULL, &_mtlTextureCache);
|
||||
CVReturn __unused err =
|
||||
CVMetalTextureCacheCreate(NULL, NULL, self.mtlDevice, NULL, &_mtlTextureCache);
|
||||
NSAssert(err == kCVReturnSuccess, @"Error at CVMetalTextureCacheCreate %d", err);
|
||||
// TODO: register and flush metal caches too.
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue
Block a user