diff mbox

[FFmpeg-devel,V2,2/7] libavfilter/vf_sr: refine code to remove keyword 'else'

Message ID 1556158441-15075-1-git-send-email-yejun.guo@intel.com
State New
Headers show

Commit Message

Guo, Yejun April 25, 2019, 2:14 a.m. UTC
remove 'else' since there is always 'return' in 'if' scope,
so the code will be clean for later maintenance

Signed-off-by: Guo, Yejun <yejun.guo@intel.com>
---
 libavfilter/vf_sr.c | 143 ++++++++++++++++++++++++++--------------------------
 1 file changed, 71 insertions(+), 72 deletions(-)

Comments

Pedro Arthur April 29, 2019, 5:35 p.m. UTC | #1
Em qua, 24 de abr de 2019 às 23:14, Guo, Yejun <yejun.guo@intel.com> escreveu:
>
> remove 'else' since there is always 'return' in 'if' scope,
> so the code will be clean for later maintenance
>
> Signed-off-by: Guo, Yejun <yejun.guo@intel.com>
> ---
>  libavfilter/vf_sr.c | 143 ++++++++++++++++++++++++++--------------------------
>  1 file changed, 71 insertions(+), 72 deletions(-)
>
> diff --git a/libavfilter/vf_sr.c b/libavfilter/vf_sr.c
> index 6423d2e..9bb0fc5 100644
> --- a/libavfilter/vf_sr.c
> +++ b/libavfilter/vf_sr.c
> @@ -127,88 +127,87 @@ static int config_props(AVFilterLink *inlink)
>          av_log(context, AV_LOG_ERROR, "could not set input and output for the model\n");
>          return AVERROR(EIO);
>      }
> -    else{
> -        if (sr_context->input.height != sr_context->output.height || sr_context->input.width != sr_context->output.width){
> -            sr_context->input.width = inlink->w;
> -            sr_context->input.height = inlink->h;
> -            result = (sr_context->model->set_input_output)(sr_context->model->model, &sr_context->input, &sr_context->output);
> -            if (result != DNN_SUCCESS){
> -                av_log(context, AV_LOG_ERROR, "could not set input and output for the model\n");
> -                return AVERROR(EIO);
> -            }
> -            sr_context->scale_factor = 0;
> +
> +    if (sr_context->input.height != sr_context->output.height || sr_context->input.width != sr_context->output.width){
> +        sr_context->input.width = inlink->w;
> +        sr_context->input.height = inlink->h;
> +        result = (sr_context->model->set_input_output)(sr_context->model->model, &sr_context->input, &sr_context->output);
> +        if (result != DNN_SUCCESS){
> +            av_log(context, AV_LOG_ERROR, "could not set input and output for the model\n");
> +            return AVERROR(EIO);
>          }
> -        outlink->h = sr_context->output.height;
> -        outlink->w = sr_context->output.width;
> -        sr_context->sws_contexts[1] = sws_getContext(sr_context->input.width, sr_context->input.height, AV_PIX_FMT_GRAY8,
> -                                                     sr_context->input.width, sr_context->input.height, AV_PIX_FMT_GRAYF32,
> -                                                     0, NULL, NULL, NULL);
> -        sr_context->sws_input_linesize = sr_context->input.width << 2;
> -        sr_context->sws_contexts[2] = sws_getContext(sr_context->output.width, sr_context->output.height, AV_PIX_FMT_GRAYF32,
> -                                                     sr_context->output.width, sr_context->output.height, AV_PIX_FMT_GRAY8,
> -                                                     0, NULL, NULL, NULL);
> -        sr_context->sws_output_linesize = sr_context->output.width << 2;
> -        if (!sr_context->sws_contexts[1] || !sr_context->sws_contexts[2]){
> -            av_log(context, AV_LOG_ERROR, "could not create SwsContext for conversions\n");
> +        sr_context->scale_factor = 0;
> +    }
> +    outlink->h = sr_context->output.height;
> +    outlink->w = sr_context->output.width;
> +    sr_context->sws_contexts[1] = sws_getContext(sr_context->input.width, sr_context->input.height, AV_PIX_FMT_GRAY8,
> +                                                 sr_context->input.width, sr_context->input.height, AV_PIX_FMT_GRAYF32,
> +                                                 0, NULL, NULL, NULL);
> +    sr_context->sws_input_linesize = sr_context->input.width << 2;
> +    sr_context->sws_contexts[2] = sws_getContext(sr_context->output.width, sr_context->output.height, AV_PIX_FMT_GRAYF32,
> +                                                 sr_context->output.width, sr_context->output.height, AV_PIX_FMT_GRAY8,
> +                                                 0, NULL, NULL, NULL);
> +    sr_context->sws_output_linesize = sr_context->output.width << 2;
> +    if (!sr_context->sws_contexts[1] || !sr_context->sws_contexts[2]){
> +        av_log(context, AV_LOG_ERROR, "could not create SwsContext for conversions\n");
> +        return AVERROR(ENOMEM);
> +    }
> +    if (sr_context->scale_factor){
> +        sr_context->sws_contexts[0] = sws_getContext(inlink->w, inlink->h, inlink->format,
> +                                                     outlink->w, outlink->h, outlink->format,
> +                                                     SWS_BICUBIC, NULL, NULL, NULL);
> +        if (!sr_context->sws_contexts[0]){
> +            av_log(context, AV_LOG_ERROR, "could not create SwsContext for scaling\n");
>              return AVERROR(ENOMEM);
>          }
> -        if (sr_context->scale_factor){
> -            sr_context->sws_contexts[0] = sws_getContext(inlink->w, inlink->h, inlink->format,
> -                                                         outlink->w, outlink->h, outlink->format,
> +        sr_context->sws_slice_h = inlink->h;
> +    }
> +    else{
> +        if (inlink->format != AV_PIX_FMT_GRAY8){
> +            sws_src_h = sr_context->input.height;
> +            sws_src_w = sr_context->input.width;
> +            sws_dst_h = sr_context->output.height;
> +            sws_dst_w = sr_context->output.width;
> +
> +            switch (inlink->format){
> +            case AV_PIX_FMT_YUV420P:
> +                sws_src_h = AV_CEIL_RSHIFT(sws_src_h, 1);
> +                sws_src_w = AV_CEIL_RSHIFT(sws_src_w, 1);
> +                sws_dst_h = AV_CEIL_RSHIFT(sws_dst_h, 1);
> +                sws_dst_w = AV_CEIL_RSHIFT(sws_dst_w, 1);
> +                break;
> +            case AV_PIX_FMT_YUV422P:
> +                sws_src_w = AV_CEIL_RSHIFT(sws_src_w, 1);
> +                sws_dst_w = AV_CEIL_RSHIFT(sws_dst_w, 1);
> +                break;
> +            case AV_PIX_FMT_YUV444P:
> +                break;
> +            case AV_PIX_FMT_YUV410P:
> +                sws_src_h = AV_CEIL_RSHIFT(sws_src_h, 2);
> +                sws_src_w = AV_CEIL_RSHIFT(sws_src_w, 2);
> +                sws_dst_h = AV_CEIL_RSHIFT(sws_dst_h, 2);
> +                sws_dst_w = AV_CEIL_RSHIFT(sws_dst_w, 2);
> +                break;
> +            case AV_PIX_FMT_YUV411P:
> +                sws_src_w = AV_CEIL_RSHIFT(sws_src_w, 2);
> +                sws_dst_w = AV_CEIL_RSHIFT(sws_dst_w, 2);
> +                break;
> +            default:
> +                av_log(context, AV_LOG_ERROR, "could not create SwsContext for scaling for given input pixel format");
> +                return AVERROR(EIO);
> +            }
> +            sr_context->sws_contexts[0] = sws_getContext(sws_src_w, sws_src_h, AV_PIX_FMT_GRAY8,
> +                                                         sws_dst_w, sws_dst_h, AV_PIX_FMT_GRAY8,
>                                                           SWS_BICUBIC, NULL, NULL, NULL);
>              if (!sr_context->sws_contexts[0]){
>                  av_log(context, AV_LOG_ERROR, "could not create SwsContext for scaling\n");
>                  return AVERROR(ENOMEM);
>              }
> -            sr_context->sws_slice_h = inlink->h;
> +            sr_context->sws_slice_h = sws_src_h;
>          }
> -        else{
> -            if (inlink->format != AV_PIX_FMT_GRAY8){
> -                sws_src_h = sr_context->input.height;
> -                sws_src_w = sr_context->input.width;
> -                sws_dst_h = sr_context->output.height;
> -                sws_dst_w = sr_context->output.width;
> -
> -                switch (inlink->format){
> -                case AV_PIX_FMT_YUV420P:
> -                    sws_src_h = AV_CEIL_RSHIFT(sws_src_h, 1);
> -                    sws_src_w = AV_CEIL_RSHIFT(sws_src_w, 1);
> -                    sws_dst_h = AV_CEIL_RSHIFT(sws_dst_h, 1);
> -                    sws_dst_w = AV_CEIL_RSHIFT(sws_dst_w, 1);
> -                    break;
> -                case AV_PIX_FMT_YUV422P:
> -                    sws_src_w = AV_CEIL_RSHIFT(sws_src_w, 1);
> -                    sws_dst_w = AV_CEIL_RSHIFT(sws_dst_w, 1);
> -                    break;
> -                case AV_PIX_FMT_YUV444P:
> -                    break;
> -                case AV_PIX_FMT_YUV410P:
> -                    sws_src_h = AV_CEIL_RSHIFT(sws_src_h, 2);
> -                    sws_src_w = AV_CEIL_RSHIFT(sws_src_w, 2);
> -                    sws_dst_h = AV_CEIL_RSHIFT(sws_dst_h, 2);
> -                    sws_dst_w = AV_CEIL_RSHIFT(sws_dst_w, 2);
> -                    break;
> -                case AV_PIX_FMT_YUV411P:
> -                    sws_src_w = AV_CEIL_RSHIFT(sws_src_w, 2);
> -                    sws_dst_w = AV_CEIL_RSHIFT(sws_dst_w, 2);
> -                    break;
> -                default:
> -                    av_log(context, AV_LOG_ERROR, "could not create SwsContext for scaling for given input pixel format");
> -                    return AVERROR(EIO);
> -                }
> -                sr_context->sws_contexts[0] = sws_getContext(sws_src_w, sws_src_h, AV_PIX_FMT_GRAY8,
> -                                                             sws_dst_w, sws_dst_h, AV_PIX_FMT_GRAY8,
> -                                                             SWS_BICUBIC, NULL, NULL, NULL);
> -                if (!sr_context->sws_contexts[0]){
> -                    av_log(context, AV_LOG_ERROR, "could not create SwsContext for scaling\n");
> -                    return AVERROR(ENOMEM);
> -                }
> -                sr_context->sws_slice_h = sws_src_h;
> -            }
> -        }
> -
> -        return 0;
>      }
> +
> +    return 0;
>  }
>
>  static int filter_frame(AVFilterLink *inlink, AVFrame *in)
> --
> 2.7.4
>

LGTM.

> _______________________________________________
> ffmpeg-devel mailing list
> ffmpeg-devel@ffmpeg.org
> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>
> To unsubscribe, visit link above, or email
> ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
Carl Eugen Hoyos April 29, 2019, 5:57 p.m. UTC | #2
2019-04-25 4:14 GMT+02:00, Guo, Yejun <yejun.guo@intel.com>:

> +    }
> +    else{

Please try to fix this.

Carl Eugen
diff mbox

Patch

diff --git a/libavfilter/vf_sr.c b/libavfilter/vf_sr.c
index 6423d2e..9bb0fc5 100644
--- a/libavfilter/vf_sr.c
+++ b/libavfilter/vf_sr.c
@@ -127,88 +127,87 @@  static int config_props(AVFilterLink *inlink)
         av_log(context, AV_LOG_ERROR, "could not set input and output for the model\n");
         return AVERROR(EIO);
     }
-    else{
-        if (sr_context->input.height != sr_context->output.height || sr_context->input.width != sr_context->output.width){
-            sr_context->input.width = inlink->w;
-            sr_context->input.height = inlink->h;
-            result = (sr_context->model->set_input_output)(sr_context->model->model, &sr_context->input, &sr_context->output);
-            if (result != DNN_SUCCESS){
-                av_log(context, AV_LOG_ERROR, "could not set input and output for the model\n");
-                return AVERROR(EIO);
-            }
-            sr_context->scale_factor = 0;
+
+    if (sr_context->input.height != sr_context->output.height || sr_context->input.width != sr_context->output.width){
+        sr_context->input.width = inlink->w;
+        sr_context->input.height = inlink->h;
+        result = (sr_context->model->set_input_output)(sr_context->model->model, &sr_context->input, &sr_context->output);
+        if (result != DNN_SUCCESS){
+            av_log(context, AV_LOG_ERROR, "could not set input and output for the model\n");
+            return AVERROR(EIO);
         }
-        outlink->h = sr_context->output.height;
-        outlink->w = sr_context->output.width;
-        sr_context->sws_contexts[1] = sws_getContext(sr_context->input.width, sr_context->input.height, AV_PIX_FMT_GRAY8,
-                                                     sr_context->input.width, sr_context->input.height, AV_PIX_FMT_GRAYF32,
-                                                     0, NULL, NULL, NULL);
-        sr_context->sws_input_linesize = sr_context->input.width << 2;
-        sr_context->sws_contexts[2] = sws_getContext(sr_context->output.width, sr_context->output.height, AV_PIX_FMT_GRAYF32,
-                                                     sr_context->output.width, sr_context->output.height, AV_PIX_FMT_GRAY8,
-                                                     0, NULL, NULL, NULL);
-        sr_context->sws_output_linesize = sr_context->output.width << 2;
-        if (!sr_context->sws_contexts[1] || !sr_context->sws_contexts[2]){
-            av_log(context, AV_LOG_ERROR, "could not create SwsContext for conversions\n");
+        sr_context->scale_factor = 0;
+    }
+    outlink->h = sr_context->output.height;
+    outlink->w = sr_context->output.width;
+    sr_context->sws_contexts[1] = sws_getContext(sr_context->input.width, sr_context->input.height, AV_PIX_FMT_GRAY8,
+                                                 sr_context->input.width, sr_context->input.height, AV_PIX_FMT_GRAYF32,
+                                                 0, NULL, NULL, NULL);
+    sr_context->sws_input_linesize = sr_context->input.width << 2;
+    sr_context->sws_contexts[2] = sws_getContext(sr_context->output.width, sr_context->output.height, AV_PIX_FMT_GRAYF32,
+                                                 sr_context->output.width, sr_context->output.height, AV_PIX_FMT_GRAY8,
+                                                 0, NULL, NULL, NULL);
+    sr_context->sws_output_linesize = sr_context->output.width << 2;
+    if (!sr_context->sws_contexts[1] || !sr_context->sws_contexts[2]){
+        av_log(context, AV_LOG_ERROR, "could not create SwsContext for conversions\n");
+        return AVERROR(ENOMEM);
+    }
+    if (sr_context->scale_factor){
+        sr_context->sws_contexts[0] = sws_getContext(inlink->w, inlink->h, inlink->format,
+                                                     outlink->w, outlink->h, outlink->format,
+                                                     SWS_BICUBIC, NULL, NULL, NULL);
+        if (!sr_context->sws_contexts[0]){
+            av_log(context, AV_LOG_ERROR, "could not create SwsContext for scaling\n");
             return AVERROR(ENOMEM);
         }
-        if (sr_context->scale_factor){
-            sr_context->sws_contexts[0] = sws_getContext(inlink->w, inlink->h, inlink->format,
-                                                         outlink->w, outlink->h, outlink->format,
+        sr_context->sws_slice_h = inlink->h;
+    }
+    else{
+        if (inlink->format != AV_PIX_FMT_GRAY8){
+            sws_src_h = sr_context->input.height;
+            sws_src_w = sr_context->input.width;
+            sws_dst_h = sr_context->output.height;
+            sws_dst_w = sr_context->output.width;
+
+            switch (inlink->format){
+            case AV_PIX_FMT_YUV420P:
+                sws_src_h = AV_CEIL_RSHIFT(sws_src_h, 1);
+                sws_src_w = AV_CEIL_RSHIFT(sws_src_w, 1);
+                sws_dst_h = AV_CEIL_RSHIFT(sws_dst_h, 1);
+                sws_dst_w = AV_CEIL_RSHIFT(sws_dst_w, 1);
+                break;
+            case AV_PIX_FMT_YUV422P:
+                sws_src_w = AV_CEIL_RSHIFT(sws_src_w, 1);
+                sws_dst_w = AV_CEIL_RSHIFT(sws_dst_w, 1);
+                break;
+            case AV_PIX_FMT_YUV444P:
+                break;
+            case AV_PIX_FMT_YUV410P:
+                sws_src_h = AV_CEIL_RSHIFT(sws_src_h, 2);
+                sws_src_w = AV_CEIL_RSHIFT(sws_src_w, 2);
+                sws_dst_h = AV_CEIL_RSHIFT(sws_dst_h, 2);
+                sws_dst_w = AV_CEIL_RSHIFT(sws_dst_w, 2);
+                break;
+            case AV_PIX_FMT_YUV411P:
+                sws_src_w = AV_CEIL_RSHIFT(sws_src_w, 2);
+                sws_dst_w = AV_CEIL_RSHIFT(sws_dst_w, 2);
+                break;
+            default:
+                av_log(context, AV_LOG_ERROR, "could not create SwsContext for scaling for given input pixel format");
+                return AVERROR(EIO);
+            }
+            sr_context->sws_contexts[0] = sws_getContext(sws_src_w, sws_src_h, AV_PIX_FMT_GRAY8,
+                                                         sws_dst_w, sws_dst_h, AV_PIX_FMT_GRAY8,
                                                          SWS_BICUBIC, NULL, NULL, NULL);
             if (!sr_context->sws_contexts[0]){
                 av_log(context, AV_LOG_ERROR, "could not create SwsContext for scaling\n");
                 return AVERROR(ENOMEM);
             }
-            sr_context->sws_slice_h = inlink->h;
+            sr_context->sws_slice_h = sws_src_h;
         }
-        else{
-            if (inlink->format != AV_PIX_FMT_GRAY8){
-                sws_src_h = sr_context->input.height;
-                sws_src_w = sr_context->input.width;
-                sws_dst_h = sr_context->output.height;
-                sws_dst_w = sr_context->output.width;
-
-                switch (inlink->format){
-                case AV_PIX_FMT_YUV420P:
-                    sws_src_h = AV_CEIL_RSHIFT(sws_src_h, 1);
-                    sws_src_w = AV_CEIL_RSHIFT(sws_src_w, 1);
-                    sws_dst_h = AV_CEIL_RSHIFT(sws_dst_h, 1);
-                    sws_dst_w = AV_CEIL_RSHIFT(sws_dst_w, 1);
-                    break;
-                case AV_PIX_FMT_YUV422P:
-                    sws_src_w = AV_CEIL_RSHIFT(sws_src_w, 1);
-                    sws_dst_w = AV_CEIL_RSHIFT(sws_dst_w, 1);
-                    break;
-                case AV_PIX_FMT_YUV444P:
-                    break;
-                case AV_PIX_FMT_YUV410P:
-                    sws_src_h = AV_CEIL_RSHIFT(sws_src_h, 2);
-                    sws_src_w = AV_CEIL_RSHIFT(sws_src_w, 2);
-                    sws_dst_h = AV_CEIL_RSHIFT(sws_dst_h, 2);
-                    sws_dst_w = AV_CEIL_RSHIFT(sws_dst_w, 2);
-                    break;
-                case AV_PIX_FMT_YUV411P:
-                    sws_src_w = AV_CEIL_RSHIFT(sws_src_w, 2);
-                    sws_dst_w = AV_CEIL_RSHIFT(sws_dst_w, 2);
-                    break;
-                default:
-                    av_log(context, AV_LOG_ERROR, "could not create SwsContext for scaling for given input pixel format");
-                    return AVERROR(EIO);
-                }
-                sr_context->sws_contexts[0] = sws_getContext(sws_src_w, sws_src_h, AV_PIX_FMT_GRAY8,
-                                                             sws_dst_w, sws_dst_h, AV_PIX_FMT_GRAY8,
-                                                             SWS_BICUBIC, NULL, NULL, NULL);
-                if (!sr_context->sws_contexts[0]){
-                    av_log(context, AV_LOG_ERROR, "could not create SwsContext for scaling\n");
-                    return AVERROR(ENOMEM);
-                }
-                sr_context->sws_slice_h = sws_src_h;
-            }
-        }
-
-        return 0;
     }
+
+    return 0;
 }
 
 static int filter_frame(AVFilterLink *inlink, AVFrame *in)