diff mbox

[FFmpeg-devel,2/7] libavfilter: Code style fixes for pointers in DNN module and sr filter.

Message ID CAAeE=qqFxmyY4nE6rgDQyW-sQLewTLB15Bjz=dFx1H2t3uf4Vw@mail.gmail.com
State New
Headers show

Commit Message

Sergey Lavrushkin Aug. 6, 2018, 9:11 p.m. UTC
Updated patch.

2018-08-06 17:55 GMT+03:00 Pedro Arthur <bygrandao@gmail.com>:

> 2018-08-02 15:52 GMT-03:00 Sergey Lavrushkin <dualfal@gmail.com>:
> > ---
> >  libavfilter/dnn_backend_native.c |  84 +++++++++++++++---------------
> >  libavfilter/dnn_backend_native.h |   8 +--
> >  libavfilter/dnn_backend_tf.c     | 108 +++++++++++++++++++-----------
> ---------
> >  libavfilter/dnn_backend_tf.h     |   8 +--
> >  libavfilter/dnn_espcn.h          |   6 +--
> >  libavfilter/dnn_interface.c      |   4 +-
> >  libavfilter/dnn_interface.h      |  16 +++---
> >  libavfilter/dnn_srcnn.h          |   6 +--
> >  libavfilter/vf_sr.c              |  60 +++++++++++-----------
> >  9 files changed, 150 insertions(+), 150 deletions(-)
> >
> > diff --git a/libavfilter/dnn_backend_native.c b/libavfilter/dnn_backend_
> native.c
> > index 3e6b86280d..baefea7fcb 100644
> > --- a/libavfilter/dnn_backend_native.c
> > +++ b/libavfilter/dnn_backend_native.c
> > @@ -34,15 +34,15 @@ typedef enum {RELU, TANH, SIGMOID} ActivationFunc;
> >
> >  typedef struct Layer{
> >      LayerType type;
> > -    float* output;
> > -    void* params;
> > +    float *output;
> > +    void *params;
> >  } Layer;
> >
> >  typedef struct ConvolutionalParams{
> >      int32_t input_num, output_num, kernel_size;
> >      ActivationFunc activation;
> > -    float* kernel;
> > -    float* biases;
> > +    float *kernel;
> > +    float *biases;
> >  } ConvolutionalParams;
> >
> >  typedef struct InputParams{
> > @@ -55,16 +55,16 @@ typedef struct DepthToSpaceParams{
> >
> >  // Represents simple feed-forward convolutional network.
> >  typedef struct ConvolutionalNetwork{
> > -    Layer* layers;
> > +    Layer *layers;
> >      int32_t layers_num;
> >  } ConvolutionalNetwork;
> >
> > -static DNNReturnType set_input_output_native(void* model, DNNData*
> input, DNNData* output)
> > +static DNNReturnType set_input_output_native(void *model, DNNData
> *input, DNNData *output)
> >  {
> > -    ConvolutionalNetwork* network = (ConvolutionalNetwork*)model;
> > -    InputParams* input_params;
> > -    ConvolutionalParams* conv_params;
> > -    DepthToSpaceParams* depth_to_space_params;
> > +    ConvolutionalNetwork *network = (ConvolutionalNetwork *)model;
> > +    InputParams *input_params;
> > +    ConvolutionalParams *conv_params;
> > +    DepthToSpaceParams *depth_to_space_params;
> >      int cur_width, cur_height, cur_channels;
> >      int32_t layer;
> >
> > @@ -72,7 +72,7 @@ static DNNReturnType set_input_output_native(void*
> model, DNNData* input, DNNDat
> >          return DNN_ERROR;
> >      }
> >      else{
> > -        input_params = (InputParams*)network->layers[0].params;
> > +        input_params = (InputParams *)network->layers[0].params;
> >          input_params->width = cur_width = input->width;
> >          input_params->height = cur_height = input->height;
> >          input_params->channels = cur_channels = input->channels;
> > @@ -88,14 +88,14 @@ static DNNReturnType set_input_output_native(void*
> model, DNNData* input, DNNDat
> >      for (layer = 1; layer < network->layers_num; ++layer){
> >          switch (network->layers[layer].type){
> >          case CONV:
> > -            conv_params = (ConvolutionalParams*)network-
> >layers[layer].params;
> > +            conv_params = (ConvolutionalParams *)network->layers[layer].
> params;
> >              if (conv_params->input_num != cur_channels){
> >                  return DNN_ERROR;
> >              }
> >              cur_channels = conv_params->output_num;
> >              break;
> >          case DEPTH_TO_SPACE:
> > -            depth_to_space_params = (DepthToSpaceParams*)network->
> layers[layer].params;
> > +            depth_to_space_params = (DepthToSpaceParams
> *)network->layers[layer].params;
> >              if (cur_channels % (depth_to_space_params->block_size *
> depth_to_space_params->block_size) != 0){
> >                  return DNN_ERROR;
> >              }
> > @@ -127,16 +127,16 @@ static DNNReturnType set_input_output_native(void*
> model, DNNData* input, DNNDat
> >  // layers_num,layer_type,layer_parameterss,layer_type,layer_
> parameters...
> >  // For CONV layer: activation_function, input_num, output_num,
> kernel_size, kernel, biases
> >  // For DEPTH_TO_SPACE layer: block_size
> > -DNNModel* ff_dnn_load_model_native(const char* model_filename)
> > +DNNModel *ff_dnn_load_model_native(const char *model_filename)
> >  {
> > -    DNNModel* model = NULL;
> > -    ConvolutionalNetwork* network = NULL;
> > -    AVIOContext* model_file_context;
> > +    DNNModel *model = NULL;
> > +    ConvolutionalNetwork *network = NULL;
> > +    AVIOContext *model_file_context;
> >      int file_size, dnn_size, kernel_size, i;
> >      int32_t layer;
> >      LayerType layer_type;
> > -    ConvolutionalParams* conv_params;
> > -    DepthToSpaceParams* depth_to_space_params;
> > +    ConvolutionalParams *conv_params;
> > +    DepthToSpaceParams *depth_to_space_params;
> >
> >      model = av_malloc(sizeof(DNNModel));
> >      if (!model){
> > @@ -155,7 +155,7 @@ DNNModel* ff_dnn_load_model_native(const char*
> model_filename)
> >          av_freep(&model);
> >          return NULL;
> >      }
> > -    model->model = (void*)network;
> > +    model->model = (void *)network;
> >
> >      network->layers_num = 1 + (int32_t)avio_rl32(model_file_context);
> >      dnn_size = 4;
> > @@ -251,10 +251,10 @@ DNNModel* ff_dnn_load_model_native(const char*
> model_filename)
> >      return model;
> >  }
> >
> > -static int set_up_conv_layer(Layer* layer, const float* kernel, const
> float* biases, ActivationFunc activation,
> > +static int set_up_conv_layer(Layer *layer, const float *kernel, const
> float *biases, ActivationFunc activation,
> >                               int32_t input_num, int32_t output_num,
> int32_t size)
> >  {
> > -    ConvolutionalParams* conv_params;
> > +    ConvolutionalParams *conv_params;
> >      int kernel_size;
> >
> >      conv_params = av_malloc(sizeof(ConvolutionalParams));
> > @@ -282,11 +282,11 @@ static int set_up_conv_layer(Layer* layer, const
> float* kernel, const float* bia
> >      return DNN_SUCCESS;
> >  }
> >
> > -DNNModel* ff_dnn_load_default_model_native(DNNDefaultModel model_type)
> > +DNNModel *ff_dnn_load_default_model_native(DNNDefaultModel model_type)
> >  {
> > -    DNNModel* model = NULL;
> > -    ConvolutionalNetwork* network = NULL;
> > -    DepthToSpaceParams* depth_to_space_params;
> > +    DNNModel *model = NULL;
> > +    ConvolutionalNetwork *network = NULL;
> > +    DepthToSpaceParams *depth_to_space_params;
> >      int32_t layer;
> >
> >      model = av_malloc(sizeof(DNNModel));
> > @@ -299,7 +299,7 @@ DNNModel* ff_dnn_load_default_model_native(DNNDefaultModel
> model_type)
> >          av_freep(&model);
> >          return NULL;
> >      }
> > -    model->model = (void*)network;
> > +    model->model = (void *)network;
> >
> >      switch (model_type){
> >      case DNN_SRCNN:
> > @@ -365,7 +365,7 @@ DNNModel* ff_dnn_load_default_model_native(DNNDefaultModel
> model_type)
> >
> >  #define CLAMP_TO_EDGE(x, w) ((x) < 0 ? 0 : ((x) >= (w) ? (w - 1) : (x)))
> >
> > -static void convolve(const float* input, float* output, const
> ConvolutionalParams* conv_params, int width, int height)
> > +static void convolve(const float *input, float *output, const
> ConvolutionalParams *conv_params, int width, int height)
> >  {
> >      int y, x, n_filter, ch, kernel_y, kernel_x;
> >      int radius = conv_params->kernel_size >> 1;
> > @@ -403,7 +403,7 @@ static void convolve(const float* input, float*
> output, const ConvolutionalParam
> >      }
> >  }
> >
> > -static void depth_to_space(const float* input, float* output, int
> block_size, int width, int height, int channels)
> > +static void depth_to_space(const float *input, float *output, int
> block_size, int width, int height, int channels)
> >  {
> >      int y, x, by, bx, ch;
> >      int new_channels = channels / (block_size * block_size);
> > @@ -426,20 +426,20 @@ static void depth_to_space(const float* input,
> float* output, int block_size, in
> >      }
> >  }
> >
> > -DNNReturnType ff_dnn_execute_model_native(const DNNModel* model)
> > +DNNReturnType ff_dnn_execute_model_native(const DNNModel *model)
> >  {
> > -    ConvolutionalNetwork* network = (ConvolutionalNetwork*)model->
> model;
> > +    ConvolutionalNetwork *network = (ConvolutionalNetwork
> *)model->model;
> >      int cur_width, cur_height, cur_channels;
> >      int32_t layer;
> > -    InputParams* input_params;
> > -    ConvolutionalParams* conv_params;
> > -    DepthToSpaceParams* depth_to_space_params;
> > +    InputParams *input_params;
> > +    ConvolutionalParams *conv_params;
> > +    DepthToSpaceParams *depth_to_space_params;
> >
> >      if (network->layers_num <= 0 || network->layers[0].type != INPUT ||
> !network->layers[0].output){
> >          return DNN_ERROR;
> >      }
> >      else{
> > -        input_params = (InputParams*)network->layers[0].params;
> > +        input_params = (InputParams *)network->layers[0].params;
> >          cur_width = input_params->width;
> >          cur_height = input_params->height;
> >          cur_channels = input_params->channels;
> > @@ -451,12 +451,12 @@ DNNReturnType ff_dnn_execute_model_native(const
> DNNModel* model)
> >          }
> >          switch (network->layers[layer].type){
> >          case CONV:
> > -            conv_params = (ConvolutionalParams*)network-
> >layers[layer].params;
> > +            conv_params = (ConvolutionalParams *)network->layers[layer].
> params;
> >              convolve(network->layers[layer - 1].output,
> network->layers[layer].output, conv_params, cur_width, cur_height);
> >              cur_channels = conv_params->output_num;
> >              break;
> >          case DEPTH_TO_SPACE:
> > -            depth_to_space_params = (DepthToSpaceParams*)network->
> layers[layer].params;
> > +            depth_to_space_params = (DepthToSpaceParams
> *)network->layers[layer].params;
> >              depth_to_space(network->layers[layer - 1].output,
> network->layers[layer].output,
> >                             depth_to_space_params->block_size,
> cur_width, cur_height, cur_channels);
> >              cur_height *= depth_to_space_params->block_size;
> > @@ -471,19 +471,19 @@ DNNReturnType ff_dnn_execute_model_native(const
> DNNModel* model)
> >      return DNN_SUCCESS;
> >  }
> >
> > -void ff_dnn_free_model_native(DNNModel** model)
> > +void ff_dnn_free_model_native(DNNModel **model)
> >  {
> > -    ConvolutionalNetwork* network;
> > -    ConvolutionalParams* conv_params;
> > +    ConvolutionalNetwork *network;
> > +    ConvolutionalParams *conv_params;
> >      int32_t layer;
> >
> >      if (*model)
> >      {
> > -        network = (ConvolutionalNetwork*)(*model)->model;
> > +        network = (ConvolutionalNetwork *)(*model)->model;
> >          for (layer = 0; layer < network->layers_num; ++layer){
> >              av_freep(&network->layers[layer].output);
> >              if (network->layers[layer].type == CONV){
> > -                conv_params = (ConvolutionalParams*)network-
> >layers[layer].params;
> > +                conv_params = (ConvolutionalParams
> *)network->layers[layer].params;
> >                  av_freep(&conv_params->kernel);
> >                  av_freep(&conv_params->biases);
> >              }
> > diff --git a/libavfilter/dnn_backend_native.h b/libavfilter/dnn_backend_
> native.h
> > index 599c1302e2..adbb7088b4 100644
> > --- a/libavfilter/dnn_backend_native.h
> > +++ b/libavfilter/dnn_backend_native.h
> > @@ -29,12 +29,12 @@
> >
> >  #include "dnn_interface.h"
> >
> > -DNNModel* ff_dnn_load_model_native(const char* model_filename);
> > +DNNModel *ff_dnn_load_model_native(const char *model_filename);
> >
> > -DNNModel* ff_dnn_load_default_model_native(DNNDefaultModel model_type);
> > +DNNModel *ff_dnn_load_default_model_native(DNNDefaultModel model_type);
> >
> > -DNNReturnType ff_dnn_execute_model_native(const DNNModel* model);
> > +DNNReturnType ff_dnn_execute_model_native(const DNNModel *model);
> >
> > -void ff_dnn_free_model_native(DNNModel** model);
> > +void ff_dnn_free_model_native(DNNModel **model);
> >
> >  #endif
> > diff --git a/libavfilter/dnn_backend_tf.c b/libavfilter/dnn_backend_tf.c
> > index 21516471c3..6307c794a5 100644
> > --- a/libavfilter/dnn_backend_tf.c
> > +++ b/libavfilter/dnn_backend_tf.c
> > @@ -31,24 +31,24 @@
> >  #include <tensorflow/c/c_api.h>
> >
> >  typedef struct TFModel{
> > -    TF_Graph* graph;
> > -    TF_Session* session;
> > -    TF_Status* status;
> > +    TF_Graph *graph;
> > +    TF_Session *session;
> > +    TF_Status *status;
> >      TF_Output input, output;
> > -    TF_Tensor* input_tensor;
> > -    DNNData* output_data;
> > +    TF_Tensor *input_tensor;
> > +    DNNData *output_data;
> >  } TFModel;
> >
> > -static void free_buffer(void* data, size_t length)
> > +static void free_buffer(void *data, size_t length)
> >  {
> >      av_freep(&data);
> >  }
> >
> > -static TF_Buffer* read_graph(const char* model_filename)
> > +static TF_Buffer *read_graph(const char *model_filename)
> >  {
> > -    TF_Buffer* graph_buf;
> > -    unsigned char* graph_data = NULL;
> > -    AVIOContext* model_file_context;
> > +    TF_Buffer *graph_buf;
> > +    unsigned char *graph_data = NULL;
> > +    AVIOContext *model_file_context;
> >      long size, bytes_read;
> >
> >      if (avio_open(&model_file_context, model_filename, AVIO_FLAG_READ)
> < 0){
> > @@ -70,20 +70,20 @@ static TF_Buffer* read_graph(const char*
> model_filename)
> >      }
> >
> >      graph_buf = TF_NewBuffer();
> > -    graph_buf->data = (void*)graph_data;
> > +    graph_buf->data = (void *)graph_data;
> >      graph_buf->length = size;
> >      graph_buf->data_deallocator = free_buffer;
> >
> >      return graph_buf;
> >  }
> >
> > -static DNNReturnType set_input_output_tf(void* model, DNNData* input,
> DNNData* output)
> > +static DNNReturnType set_input_output_tf(void *model, DNNData *input,
> DNNData *output)
> >  {
> > -    TFModel* tf_model = (TFModel*)model;
> > +    TFModel *tf_model = (TFModel *)model;
> >      int64_t input_dims[] = {1, input->height, input->width,
> input->channels};
> > -    TF_SessionOptions* sess_opts;
> > -    const TF_Operation* init_op = TF_GraphOperationByName(tf_model->graph,
> "init");
> > -    TF_Tensor* output_tensor;
> > +    TF_SessionOptions *sess_opts;
> > +    const TF_Operation *init_op = TF_GraphOperationByName(tf_model->graph,
> "init");
> > +    TF_Tensor *output_tensor;
> >
> >      // Input operation should be named 'x'
> >      tf_model->input.oper = TF_GraphOperationByName(tf_model->graph,
> "x");
> > @@ -99,7 +99,7 @@ static DNNReturnType set_input_output_tf(void* model,
> DNNData* input, DNNData* o
> >      if (!tf_model->input_tensor){
> >          return DNN_ERROR;
> >      }
> > -    input->data = (float*)TF_TensorData(tf_model->input_tensor);
> > +    input->data = (float *)TF_TensorData(tf_model->input_tensor);
> >
> >      // Output operation should be named 'y'
> >      tf_model->output.oper = TF_GraphOperationByName(tf_model->graph,
> "y");
> > @@ -156,12 +156,12 @@ static DNNReturnType set_input_output_tf(void*
> model, DNNData* input, DNNData* o
> >      return DNN_SUCCESS;
> >  }
> >
> > -DNNModel* ff_dnn_load_model_tf(const char* model_filename)
> > +DNNModel *ff_dnn_load_model_tf(const char *model_filename)
> >  {
> > -    DNNModel* model = NULL;
> > -    TFModel* tf_model = NULL;
> > -    TF_Buffer* graph_def;
> > -    TF_ImportGraphDefOptions* graph_opts;
> > +    DNNModel *model = NULL;
> > +    TFModel *tf_model = NULL;
> > +    TF_Buffer *graph_def;
> > +    TF_ImportGraphDefOptions *graph_opts;
> >
> >      model = av_malloc(sizeof(DNNModel));
> >      if (!model){
> > @@ -197,25 +197,25 @@ DNNModel* ff_dnn_load_model_tf(const char*
> model_filename)
> >          return NULL;
> >      }
> >
> > -    model->model = (void*)tf_model;
> > +    model->model = (void *)tf_model;
> >      model->set_input_output = &set_input_output_tf;
> >
> >      return model;
> >  }
> >
> > -static TF_Operation* add_pad_op(TFModel* tf_model, TF_Operation*
> input_op, int32_t pad)
> > +static TF_Operation *add_pad_op(TFModel *tf_model, TF_Operation
> *input_op, int32_t pad)
> >  {
> > -    TF_OperationDescription* op_desc;
> > -    TF_Operation* op;
> > -    TF_Tensor* tensor;
> > +    TF_OperationDescription *op_desc;
> > +    TF_Operation *op;
> > +    TF_Tensor *tensor;
> >      TF_Output input;
> > -    int32_t* pads;
> > +    int32_t *pads;
> >      int64_t pads_shape[] = {4, 2};
> >
> >      op_desc = TF_NewOperation(tf_model->graph, "Const", "pads");
> >      TF_SetAttrType(op_desc, "dtype", TF_INT32);
> >      tensor = TF_AllocateTensor(TF_INT32, pads_shape, 2, 4 * 2 *
> sizeof(int32_t));
> > -    pads = (int32_t*)TF_TensorData(tensor);
> > +    pads = (int32_t *)TF_TensorData(tensor);
> >      pads[0] = 0;   pads[1] = 0;
> >      pads[2] = pad; pads[3] = pad;
> >      pads[4] = pad; pads[5] = pad;
> > @@ -246,11 +246,11 @@ static TF_Operation* add_pad_op(TFModel* tf_model,
> TF_Operation* input_op, int32
> >      return op;
> >  }
> >
> > -static TF_Operation* add_const_op(TFModel* tf_model, const float*
> values, const int64_t* dims, int dims_len, const char* name)
> > +static TF_Operation *add_const_op(TFModel *tf_model, const float
> *values, const int64_t *dims, int dims_len, const char *name)
> >  {
> >      int dim;
> > -    TF_OperationDescription* op_desc;
> > -    TF_Tensor* tensor;
> > +    TF_OperationDescription *op_desc;
> > +    TF_Tensor *tensor;
> >      size_t len;
> >
> >      op_desc = TF_NewOperation(tf_model->graph, "Const", name);
> > @@ -269,25 +269,25 @@ static TF_Operation* add_const_op(TFModel*
> tf_model, const float* values, const
> >      return TF_FinishOperation(op_desc, tf_model->status);
> >  }
> >
> > -static TF_Operation* add_conv_layers(TFModel* tf_model, const float**
> consts, const int64_t** consts_dims,
> > -                                     const int* consts_dims_len, const
> char** activations,
> > -                                     TF_Operation* input_op, int
> layers_num)
> > +static TF_Operation* add_conv_layers(TFModel *tf_model, const float
> **consts, const int64_t **consts_dims,
> > +                                     const int *consts_dims_len, const
> char **activations,
> > +                                     TF_Operation *input_op, int
> layers_num)
> >  {
> >      int i;
> > -    TF_OperationDescription* op_desc;
> > -    TF_Operation* op;
> > -    TF_Operation* transpose_op;
> > +    TF_OperationDescription *op_desc;
> > +    TF_Operation *op;
> > +    TF_Operation *transpose_op;
> >      TF_Output input;
> >      int64_t strides[] = {1, 1, 1, 1};
> > -    int32_t* transpose_perm;
> > -    TF_Tensor* tensor;
> > +    int32_t *transpose_perm;
> > +    TF_Tensor *tensor;
> >      int64_t transpose_perm_shape[] = {4};
> >      char name_buffer[256];
> >
> >      op_desc = TF_NewOperation(tf_model->graph, "Const",
> "transpose_perm");
> >      TF_SetAttrType(op_desc, "dtype", TF_INT32);
> >      tensor = TF_AllocateTensor(TF_INT32, transpose_perm_shape, 1, 4 *
> sizeof(int32_t));
> > -    transpose_perm = (int32_t*)TF_TensorData(tensor);
> > +    transpose_perm = (int32_t *)TF_TensorData(tensor);
> >      transpose_perm[0] = 1;
> >      transpose_perm[1] = 2;
> >      transpose_perm[2] = 3;
> > @@ -368,13 +368,13 @@ static TF_Operation* add_conv_layers(TFModel*
> tf_model, const float** consts, co
> >      return input_op;
> >  }
> >
> > -DNNModel* ff_dnn_load_default_model_tf(DNNDefaultModel model_type)
> > +DNNModel *ff_dnn_load_default_model_tf(DNNDefaultModel model_type)
> >  {
> > -    DNNModel* model = NULL;
> > -    TFModel* tf_model = NULL;
> > -    TF_OperationDescription* op_desc;
> > -    TF_Operation* op;
> > -    TF_Operation* const_ops_buffer[6];
> > +    DNNModel *model = NULL;
> > +    TFModel *tf_model = NULL;
> > +    TF_OperationDescription *op_desc;
> > +    TF_Operation *op;
> > +    TF_Operation *const_ops_buffer[6];
> >      TF_Output input;
> >      int64_t input_shape[] = {1, -1, -1, 1};
> >
> > @@ -460,16 +460,16 @@ DNNModel* ff_dnn_load_default_model_tf(DNNDefaultModel
> model_type)
> >          CLEANUP_ON_ERROR(tf_model, model);
> >      }
> >
> > -    model->model = (void*)tf_model;
> > +    model->model = (void *)tf_model;
> >      model->set_input_output = &set_input_output_tf;
> >
> >      return model;
> >  }
> >
> > -DNNReturnType ff_dnn_execute_model_tf(const DNNModel* model)
> > +DNNReturnType ff_dnn_execute_model_tf(const DNNModel *model)
> >  {
> > -    TFModel* tf_model = (TFModel*)model->model;
> > -    TF_Tensor* output_tensor;
> > +    TFModel *tf_model = (TFModel *)model->model;
> > +    TF_Tensor *output_tensor;
> >
> >      TF_SessionRun(tf_model->session, NULL,
> >                    &tf_model->input, &tf_model->input_tensor, 1,
> > @@ -489,12 +489,12 @@ DNNReturnType ff_dnn_execute_model_tf(const
> DNNModel* model)
> >      }
> >  }
> >
> > -void ff_dnn_free_model_tf(DNNModel** model)
> > +void ff_dnn_free_model_tf(DNNModel **model)
> >  {
> > -    TFModel* tf_model;
> > +    TFModel *tf_model;
> >
> >      if (*model){
> > -        tf_model = (TFModel*)(*model)->model;
> > +        tf_model = (TFModel *)(*model)->model;
> >          if (tf_model->graph){
> >              TF_DeleteGraph(tf_model->graph);
> >          }
> > diff --git a/libavfilter/dnn_backend_tf.h b/libavfilter/dnn_backend_tf.h
> > index 08e4a568b3..357a82d948 100644
> > --- a/libavfilter/dnn_backend_tf.h
> > +++ b/libavfilter/dnn_backend_tf.h
> > @@ -29,12 +29,12 @@
> >
> >  #include "dnn_interface.h"
> >
> > -DNNModel* ff_dnn_load_model_tf(const char* model_filename);
> > +DNNModel *ff_dnn_load_model_tf(const char *model_filename);
> >
> > -DNNModel* ff_dnn_load_default_model_tf(DNNDefaultModel model_type);
> > +DNNModel *ff_dnn_load_default_model_tf(DNNDefaultModel model_type);
> >
> > -DNNReturnType ff_dnn_execute_model_tf(const DNNModel* model);
> > +DNNReturnType ff_dnn_execute_model_tf(const DNNModel *model);
> >
> > -void ff_dnn_free_model_tf(DNNModel** model);
> > +void ff_dnn_free_model_tf(DNNModel **model);
> >
> >  #endif
> > diff --git a/libavfilter/dnn_espcn.h b/libavfilter/dnn_espcn.h
> > index 315ecf031d..a0dd61cd0d 100644
> > --- a/libavfilter/dnn_espcn.h
> > +++ b/libavfilter/dnn_espcn.h
> > @@ -5398,7 +5398,7 @@ static const long int espcn_conv3_bias_dims[] = {
> >      4
> >  };
> >
> > -static const float* espcn_consts[] = {
> > +static const float *espcn_consts[] = {
> >      espcn_conv1_kernel,
> >      espcn_conv1_bias,
> >      espcn_conv2_kernel,
> > @@ -5407,7 +5407,7 @@ static const float* espcn_consts[] = {
> >      espcn_conv3_bias
> >  };
> >
> > -static const long int* espcn_consts_dims[] = {
> > +static const long int *espcn_consts_dims[] = {
> >      espcn_conv1_kernel_dims,
> >      espcn_conv1_bias_dims,
> >      espcn_conv2_kernel_dims,
> > @@ -5429,7 +5429,7 @@ static const char espcn_tanh[] = "Tanh";
> >
> >  static const char espcn_sigmoid[] = "Sigmoid";
> >
> > -static const char* espcn_activations[] = {
> > +static const char *espcn_activations[] = {
> >      espcn_tanh,
> >      espcn_tanh,
> >      espcn_sigmoid
> > diff --git a/libavfilter/dnn_interface.c b/libavfilter/dnn_interface.c
> > index 87c90526be..ca7d6d1ea5 100644
> > --- a/libavfilter/dnn_interface.c
> > +++ b/libavfilter/dnn_interface.c
> > @@ -28,9 +28,9 @@
> >  #include "dnn_backend_tf.h"
> >  #include "libavutil/mem.h"
> >
> > -DNNModule* ff_get_dnn_module(DNNBackendType backend_type)
> > +DNNModule *ff_get_dnn_module(DNNBackendType backend_type)
> >  {
> > -    DNNModule* dnn_module;
> > +    DNNModule *dnn_module;
> >
> >      dnn_module = av_malloc(sizeof(DNNModule));
> >      if(!dnn_module){
> > diff --git a/libavfilter/dnn_interface.h b/libavfilter/dnn_interface.h
> > index 6b820d1d5b..a69717ae62 100644
> > --- a/libavfilter/dnn_interface.h
> > +++ b/libavfilter/dnn_interface.h
> > @@ -33,31 +33,31 @@ typedef enum {DNN_NATIVE, DNN_TF} DNNBackendType;
> >  typedef enum {DNN_SRCNN, DNN_ESPCN} DNNDefaultModel;
> >
> >  typedef struct DNNData{
> > -    float* data;
> > +    float *data;
> >      int width, height, channels;
> >  } DNNData;
> >
> >  typedef struct DNNModel{
> >      // Stores model that can be different for different backends.
> > -    void* model;
> > +    void *model;
> >      // Sets model input and output, while allocating additional memory
> for intermediate calculations.
> >      // Should be called at least once before model execution.
> > -    DNNReturnType (*set_input_output)(void* model, DNNData* input,
> DNNData* output);
> > +    DNNReturnType (*set_input_output)(void *model, DNNData *input,
> DNNData *output);
> >  } DNNModel;
> >
> >  // Stores pointers to functions for loading, executing, freeing DNN
> models for one of the backends.
> >  typedef struct DNNModule{
> >      // Loads model and parameters from given file. Returns NULL if it
> is not possible.
> > -    DNNModel* (*load_model)(const char* model_filename);
> > +    DNNModel *(*load_model)(const char *model_filename);
> >      // Loads one of the default models
> > -    DNNModel* (*load_default_model)(DNNDefaultModel model_type);
> > +    DNNModel *(*load_default_model)(DNNDefaultModel model_type);
> >      // Executes model with specified input and output. Returns
> DNN_ERROR otherwise.
> > -    DNNReturnType (*execute_model)(const DNNModel* model);
> > +    DNNReturnType (*execute_model)(const DNNModel *model);
> >      // Frees memory allocated for model.
> > -    void (*free_model)(DNNModel** model);
> > +    void (*free_model)(DNNModel **model);
> >  } DNNModule;
> >
> >  // Initializes DNNModule depending on chosen backend.
> > -DNNModule* ff_get_dnn_module(DNNBackendType backend_type);
> > +DNNModule *ff_get_dnn_module(DNNBackendType backend_type);
> >
> >  #endif
> > diff --git a/libavfilter/dnn_srcnn.h b/libavfilter/dnn_srcnn.h
> > index 7ec11654b3..26143654b8 100644
> > --- a/libavfilter/dnn_srcnn.h
> > +++ b/libavfilter/dnn_srcnn.h
> > @@ -2110,7 +2110,7 @@ static const long int srcnn_conv3_bias_dims[] = {
> >      1
> >  };
> >
> > -static const float* srcnn_consts[] = {
> > +static const float *srcnn_consts[] = {
> >      srcnn_conv1_kernel,
> >      srcnn_conv1_bias,
> >      srcnn_conv2_kernel,
> > @@ -2119,7 +2119,7 @@ static const float* srcnn_consts[] = {
> >      srcnn_conv3_bias
> >  };
> >
> > -static const long int* srcnn_consts_dims[] = {
> > +static const long int *srcnn_consts_dims[] = {
> >      srcnn_conv1_kernel_dims,
> >      srcnn_conv1_bias_dims,
> >      srcnn_conv2_kernel_dims,
> > @@ -2139,7 +2139,7 @@ static const int srcnn_consts_dims_len[] = {
> >
> >  static const char srcnn_relu[] = "Relu";
> >
> > -static const char* srcnn_activations[] = {
> > +static const char *srcnn_activations[] = {
> >      srcnn_relu,
> >      srcnn_relu,
> >      srcnn_relu
> > diff --git a/libavfilter/vf_sr.c b/libavfilter/vf_sr.c
> > index f3ca9a09a8..944a0e28e7 100644
> > --- a/libavfilter/vf_sr.c
> > +++ b/libavfilter/vf_sr.c
> > @@ -39,13 +39,13 @@ typedef struct SRContext {
> >      const AVClass *class;
> >
> >      SRModel model_type;
> > -    char* model_filename;
> > +    char *model_filename;
> >      DNNBackendType backend_type;
> > -    DNNModule* dnn_module;
> > -    DNNModel* model;
> > +    DNNModule *dnn_module;
> > +    DNNModel *model;
> >      DNNData input, output;
> >      int scale_factor;
> > -    struct SwsContext* sws_context;
> > +    struct SwsContext *sws_context;
> >      int sws_slice_h;
> >  } SRContext;
> >
> > @@ -67,9 +67,9 @@ static const AVOption sr_options[] = {
> >
> >  AVFILTER_DEFINE_CLASS(sr);
> >
> > -static av_cold int init(AVFilterContext* context)
> > +static av_cold int init(AVFilterContext *context)
> >  {
> > -    SRContext* sr_context = context->priv;
> > +    SRContext *sr_context = context->priv;
> >
> >      sr_context->dnn_module = ff_get_dnn_module(sr_context->
> backend_type);
> >      if (!sr_context->dnn_module){
> > @@ -98,12 +98,12 @@ static av_cold int init(AVFilterContext* context)
> >      return 0;
> >  }
> >
> > -static int query_formats(AVFilterContext* context)
> > +static int query_formats(AVFilterContext *context)
> >  {
> >      const enum AVPixelFormat pixel_formats[] = {AV_PIX_FMT_YUV420P,
> AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P,
> >                                                  AV_PIX_FMT_YUV410P,
> AV_PIX_FMT_YUV411P, AV_PIX_FMT_GRAY8,
> >                                                  AV_PIX_FMT_NONE};
> > -    AVFilterFormats* formats_list;
> > +    AVFilterFormats *formats_list;
> >
> >      formats_list = ff_make_format_list(pixel_formats);
> >      if (!formats_list){
> > @@ -113,11 +113,11 @@ static int query_formats(AVFilterContext* context)
> >      return ff_set_common_formats(context, formats_list);
> >  }
> >
> > -static int config_props(AVFilterLink* inlink)
> > +static int config_props(AVFilterLink *inlink)
> >  {
> > -    AVFilterContext* context = inlink->dst;
> > -    SRContext* sr_context = context->priv;
> > -    AVFilterLink* outlink = context->outputs[0];
> > +    AVFilterContext *context = inlink->dst;
> > +    SRContext *sr_context = context->priv;
> > +    AVFilterLink *outlink = context->outputs[0];
> >      DNNReturnType result;
> >      int sws_src_h, sws_src_w, sws_dst_h, sws_dst_w;
> >
> > @@ -202,18 +202,18 @@ static int config_props(AVFilterLink* inlink)
> >  }
> >
> >  typedef struct ThreadData{
> > -    uint8_t* data;
> > +    uint8_t *data;
> >      int data_linesize, height, width;
> >  } ThreadData;
> >
> > -static int uint8_to_float(AVFilterContext* context, void* arg, int
> jobnr, int nb_jobs)
> > +static int uint8_to_float(AVFilterContext *context, void *arg, int
> jobnr, int nb_jobs)
> >  {
> > -    SRContext* sr_context = context->priv;
> > -    const ThreadData* td = arg;
> > +    SRContext *sr_context = context->priv;
> > +    const ThreadData *td = arg;
> >      const int slice_start = (td->height *  jobnr     ) / nb_jobs;
> >      const int slice_end   = (td->height * (jobnr + 1)) / nb_jobs;
> > -    const uint8_t* src = td->data + slice_start * td->data_linesize;
> > -    float* dst = sr_context->input.data + slice_start * td->width;
> > +    const uint8_t *src = td->data + slice_start * td->data_linesize;
> > +    float *dst = sr_context->input.data + slice_start * td->width;
> >      int y, x;
> >
> >      for (y = slice_start; y < slice_end; ++y){
> > @@ -227,14 +227,14 @@ static int uint8_to_float(AVFilterContext*
> context, void* arg, int jobnr, int nb
> >      return 0;
> >  }
> >
> > -static int float_to_uint8(AVFilterContext* context, void* arg, int
> jobnr, int nb_jobs)
> > +static int float_to_uint8(AVFilterContext *context, void *arg, int
> jobnr, int nb_jobs)
> >  {
> > -    SRContext* sr_context = context->priv;
> > -    const ThreadData* td = arg;
> > +    SRContext *sr_context = context->priv;
> > +    const ThreadData *td = arg;
> >      const int slice_start = (td->height *  jobnr     ) / nb_jobs;
> >      const int slice_end   = (td->height * (jobnr + 1)) / nb_jobs;
> > -    const float* src = sr_context->output.data + slice_start *
> td->width;
> > -    uint8_t* dst = td->data + slice_start * td->data_linesize;
> > +    const float *src = sr_context->output.data + slice_start *
> td->width;
> > +    uint8_t *dst = td->data + slice_start * td->data_linesize;
> >      int y, x;
> >
> >      for (y = slice_start; y < slice_end; ++y){
> > @@ -248,12 +248,12 @@ static int float_to_uint8(AVFilterContext*
> context, void* arg, int jobnr, int nb
> >      return 0;
> >  }
> >
> > -static int filter_frame(AVFilterLink* inlink, AVFrame* in)
> > +static int filter_frame(AVFilterLink *inlink, AVFrame *in)
> >  {
> > -    AVFilterContext* context = inlink->dst;
> > -    SRContext* sr_context = context->priv;
> > -    AVFilterLink* outlink = context->outputs[0];
> > -    AVFrame* out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
> > +    AVFilterContext *context = inlink->dst;
> > +    SRContext *sr_context = context->priv;
> > +    AVFilterLink *outlink = context->outputs[0];
> > +    AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
> >      ThreadData td;
> >      int nb_threads;
> >      DNNReturnType dnn_result;
> > @@ -307,9 +307,9 @@ static int filter_frame(AVFilterLink* inlink,
> AVFrame* in)
> >      return ff_filter_frame(outlink, out);
> >  }
> >
> > -static av_cold void uninit(AVFilterContext* context)
> > +static av_cold void uninit(AVFilterContext *context)
> >  {
> > -    SRContext* sr_context = context->priv;
> > +    SRContext *sr_context = context->priv;
> >
> >      if (sr_context->dnn_module){
> >          (sr_context->dnn_module->free_model)(&sr_context->model);
> > --
> > 2.14.1
> >
> > _______________________________________________
> > ffmpeg-devel mailing list
> > ffmpeg-devel@ffmpeg.org
> > http://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>
> LGTM.
> I intend to push it by tomorrow.
>

Comments

Pedro Arthur Aug. 7, 2018, 4:31 p.m. UTC | #1
2018-08-06 18:11 GMT-03:00 Sergey Lavrushkin <dualfal@gmail.com>:
> Updated patch.
>
> 2018-08-06 17:55 GMT+03:00 Pedro Arthur <bygrandao@gmail.com>:
>
>> 2018-08-02 15:52 GMT-03:00 Sergey Lavrushkin <dualfal@gmail.com>:
>> > ---
>> >  libavfilter/dnn_backend_native.c |  84 +++++++++++++++---------------
>> >  libavfilter/dnn_backend_native.h |   8 +--
>> >  libavfilter/dnn_backend_tf.c     | 108 +++++++++++++++++++-----------
>> ---------
>> >  libavfilter/dnn_backend_tf.h     |   8 +--
>> >  libavfilter/dnn_espcn.h          |   6 +--
>> >  libavfilter/dnn_interface.c      |   4 +-
>> >  libavfilter/dnn_interface.h      |  16 +++---
>> >  libavfilter/dnn_srcnn.h          |   6 +--
>> >  libavfilter/vf_sr.c              |  60 +++++++++++-----------
>> >  9 files changed, 150 insertions(+), 150 deletions(-)
>> >
>> > diff --git a/libavfilter/dnn_backend_native.c b/libavfilter/dnn_backend_
>> native.c
>> > index 3e6b86280d..baefea7fcb 100644
>> > --- a/libavfilter/dnn_backend_native.c
>> > +++ b/libavfilter/dnn_backend_native.c
>> > @@ -34,15 +34,15 @@ typedef enum {RELU, TANH, SIGMOID} ActivationFunc;
>> >
>> >  typedef struct Layer{
>> >      LayerType type;
>> > -    float* output;
>> > -    void* params;
>> > +    float *output;
>> > +    void *params;
>> >  } Layer;
>> >
>> >  typedef struct ConvolutionalParams{
>> >      int32_t input_num, output_num, kernel_size;
>> >      ActivationFunc activation;
>> > -    float* kernel;
>> > -    float* biases;
>> > +    float *kernel;
>> > +    float *biases;
>> >  } ConvolutionalParams;
>> >
>> >  typedef struct InputParams{
>> > @@ -55,16 +55,16 @@ typedef struct DepthToSpaceParams{
>> >
>> >  // Represents simple feed-forward convolutional network.
>> >  typedef struct ConvolutionalNetwork{
>> > -    Layer* layers;
>> > +    Layer *layers;
>> >      int32_t layers_num;
>> >  } ConvolutionalNetwork;
>> >
>> > -static DNNReturnType set_input_output_native(void* model, DNNData*
>> input, DNNData* output)
>> > +static DNNReturnType set_input_output_native(void *model, DNNData
>> *input, DNNData *output)
>> >  {
>> > -    ConvolutionalNetwork* network = (ConvolutionalNetwork*)model;
>> > -    InputParams* input_params;
>> > -    ConvolutionalParams* conv_params;
>> > -    DepthToSpaceParams* depth_to_space_params;
>> > +    ConvolutionalNetwork *network = (ConvolutionalNetwork *)model;
>> > +    InputParams *input_params;
>> > +    ConvolutionalParams *conv_params;
>> > +    DepthToSpaceParams *depth_to_space_params;
>> >      int cur_width, cur_height, cur_channels;
>> >      int32_t layer;
>> >
>> > @@ -72,7 +72,7 @@ static DNNReturnType set_input_output_native(void*
>> model, DNNData* input, DNNDat
>> >          return DNN_ERROR;
>> >      }
>> >      else{
>> > -        input_params = (InputParams*)network->layers[0].params;
>> > +        input_params = (InputParams *)network->layers[0].params;
>> >          input_params->width = cur_width = input->width;
>> >          input_params->height = cur_height = input->height;
>> >          input_params->channels = cur_channels = input->channels;
>> > @@ -88,14 +88,14 @@ static DNNReturnType set_input_output_native(void*
>> model, DNNData* input, DNNDat
>> >      for (layer = 1; layer < network->layers_num; ++layer){
>> >          switch (network->layers[layer].type){
>> >          case CONV:
>> > -            conv_params = (ConvolutionalParams*)network-
>> >layers[layer].params;
>> > +            conv_params = (ConvolutionalParams *)network->layers[layer].
>> params;
>> >              if (conv_params->input_num != cur_channels){
>> >                  return DNN_ERROR;
>> >              }
>> >              cur_channels = conv_params->output_num;
>> >              break;
>> >          case DEPTH_TO_SPACE:
>> > -            depth_to_space_params = (DepthToSpaceParams*)network->
>> layers[layer].params;
>> > +            depth_to_space_params = (DepthToSpaceParams
>> *)network->layers[layer].params;
>> >              if (cur_channels % (depth_to_space_params->block_size *
>> depth_to_space_params->block_size) != 0){
>> >                  return DNN_ERROR;
>> >              }
>> > @@ -127,16 +127,16 @@ static DNNReturnType set_input_output_native(void*
>> model, DNNData* input, DNNDat
>> >  // layers_num,layer_type,layer_parameterss,layer_type,layer_
>> parameters...
>> >  // For CONV layer: activation_function, input_num, output_num,
>> kernel_size, kernel, biases
>> >  // For DEPTH_TO_SPACE layer: block_size
>> > -DNNModel* ff_dnn_load_model_native(const char* model_filename)
>> > +DNNModel *ff_dnn_load_model_native(const char *model_filename)
>> >  {
>> > -    DNNModel* model = NULL;
>> > -    ConvolutionalNetwork* network = NULL;
>> > -    AVIOContext* model_file_context;
>> > +    DNNModel *model = NULL;
>> > +    ConvolutionalNetwork *network = NULL;
>> > +    AVIOContext *model_file_context;
>> >      int file_size, dnn_size, kernel_size, i;
>> >      int32_t layer;
>> >      LayerType layer_type;
>> > -    ConvolutionalParams* conv_params;
>> > -    DepthToSpaceParams* depth_to_space_params;
>> > +    ConvolutionalParams *conv_params;
>> > +    DepthToSpaceParams *depth_to_space_params;
>> >
>> >      model = av_malloc(sizeof(DNNModel));
>> >      if (!model){
>> > @@ -155,7 +155,7 @@ DNNModel* ff_dnn_load_model_native(const char*
>> model_filename)
>> >          av_freep(&model);
>> >          return NULL;
>> >      }
>> > -    model->model = (void*)network;
>> > +    model->model = (void *)network;
>> >
>> >      network->layers_num = 1 + (int32_t)avio_rl32(model_file_context);
>> >      dnn_size = 4;
>> > @@ -251,10 +251,10 @@ DNNModel* ff_dnn_load_model_native(const char*
>> model_filename)
>> >      return model;
>> >  }
>> >
>> > -static int set_up_conv_layer(Layer* layer, const float* kernel, const
>> float* biases, ActivationFunc activation,
>> > +static int set_up_conv_layer(Layer *layer, const float *kernel, const
>> float *biases, ActivationFunc activation,
>> >                               int32_t input_num, int32_t output_num,
>> int32_t size)
>> >  {
>> > -    ConvolutionalParams* conv_params;
>> > +    ConvolutionalParams *conv_params;
>> >      int kernel_size;
>> >
>> >      conv_params = av_malloc(sizeof(ConvolutionalParams));
>> > @@ -282,11 +282,11 @@ static int set_up_conv_layer(Layer* layer, const
>> float* kernel, const float* bia
>> >      return DNN_SUCCESS;
>> >  }
>> >
>> > -DNNModel* ff_dnn_load_default_model_native(DNNDefaultModel model_type)
>> > +DNNModel *ff_dnn_load_default_model_native(DNNDefaultModel model_type)
>> >  {
>> > -    DNNModel* model = NULL;
>> > -    ConvolutionalNetwork* network = NULL;
>> > -    DepthToSpaceParams* depth_to_space_params;
>> > +    DNNModel *model = NULL;
>> > +    ConvolutionalNetwork *network = NULL;
>> > +    DepthToSpaceParams *depth_to_space_params;
>> >      int32_t layer;
>> >
>> >      model = av_malloc(sizeof(DNNModel));
>> > @@ -299,7 +299,7 @@ DNNModel* ff_dnn_load_default_model_native(DNNDefaultModel
>> model_type)
>> >          av_freep(&model);
>> >          return NULL;
>> >      }
>> > -    model->model = (void*)network;
>> > +    model->model = (void *)network;
>> >
>> >      switch (model_type){
>> >      case DNN_SRCNN:
>> > @@ -365,7 +365,7 @@ DNNModel* ff_dnn_load_default_model_native(DNNDefaultModel
>> model_type)
>> >
>> >  #define CLAMP_TO_EDGE(x, w) ((x) < 0 ? 0 : ((x) >= (w) ? (w - 1) : (x)))
>> >
>> > -static void convolve(const float* input, float* output, const
>> ConvolutionalParams* conv_params, int width, int height)
>> > +static void convolve(const float *input, float *output, const
>> ConvolutionalParams *conv_params, int width, int height)
>> >  {
>> >      int y, x, n_filter, ch, kernel_y, kernel_x;
>> >      int radius = conv_params->kernel_size >> 1;
>> > @@ -403,7 +403,7 @@ static void convolve(const float* input, float*
>> output, const ConvolutionalParam
>> >      }
>> >  }
>> >
>> > -static void depth_to_space(const float* input, float* output, int
>> block_size, int width, int height, int channels)
>> > +static void depth_to_space(const float *input, float *output, int
>> block_size, int width, int height, int channels)
>> >  {
>> >      int y, x, by, bx, ch;
>> >      int new_channels = channels / (block_size * block_size);
>> > @@ -426,20 +426,20 @@ static void depth_to_space(const float* input,
>> float* output, int block_size, in
>> >      }
>> >  }
>> >
>> > -DNNReturnType ff_dnn_execute_model_native(const DNNModel* model)
>> > +DNNReturnType ff_dnn_execute_model_native(const DNNModel *model)
>> >  {
>> > -    ConvolutionalNetwork* network = (ConvolutionalNetwork*)model->
>> model;
>> > +    ConvolutionalNetwork *network = (ConvolutionalNetwork
>> *)model->model;
>> >      int cur_width, cur_height, cur_channels;
>> >      int32_t layer;
>> > -    InputParams* input_params;
>> > -    ConvolutionalParams* conv_params;
>> > -    DepthToSpaceParams* depth_to_space_params;
>> > +    InputParams *input_params;
>> > +    ConvolutionalParams *conv_params;
>> > +    DepthToSpaceParams *depth_to_space_params;
>> >
>> >      if (network->layers_num <= 0 || network->layers[0].type != INPUT ||
>> !network->layers[0].output){
>> >          return DNN_ERROR;
>> >      }
>> >      else{
>> > -        input_params = (InputParams*)network->layers[0].params;
>> > +        input_params = (InputParams *)network->layers[0].params;
>> >          cur_width = input_params->width;
>> >          cur_height = input_params->height;
>> >          cur_channels = input_params->channels;
>> > @@ -451,12 +451,12 @@ DNNReturnType ff_dnn_execute_model_native(const
>> DNNModel* model)
>> >          }
>> >          switch (network->layers[layer].type){
>> >          case CONV:
>> > -            conv_params = (ConvolutionalParams*)network-
>> >layers[layer].params;
>> > +            conv_params = (ConvolutionalParams *)network->layers[layer].
>> params;
>> >              convolve(network->layers[layer - 1].output,
>> network->layers[layer].output, conv_params, cur_width, cur_height);
>> >              cur_channels = conv_params->output_num;
>> >              break;
>> >          case DEPTH_TO_SPACE:
>> > -            depth_to_space_params = (DepthToSpaceParams*)network->
>> layers[layer].params;
>> > +            depth_to_space_params = (DepthToSpaceParams
>> *)network->layers[layer].params;
>> >              depth_to_space(network->layers[layer - 1].output,
>> network->layers[layer].output,
>> >                             depth_to_space_params->block_size,
>> cur_width, cur_height, cur_channels);
>> >              cur_height *= depth_to_space_params->block_size;
>> > @@ -471,19 +471,19 @@ DNNReturnType ff_dnn_execute_model_native(const
>> DNNModel* model)
>> >      return DNN_SUCCESS;
>> >  }
>> >
>> > -void ff_dnn_free_model_native(DNNModel** model)
>> > +void ff_dnn_free_model_native(DNNModel **model)
>> >  {
>> > -    ConvolutionalNetwork* network;
>> > -    ConvolutionalParams* conv_params;
>> > +    ConvolutionalNetwork *network;
>> > +    ConvolutionalParams *conv_params;
>> >      int32_t layer;
>> >
>> >      if (*model)
>> >      {
>> > -        network = (ConvolutionalNetwork*)(*model)->model;
>> > +        network = (ConvolutionalNetwork *)(*model)->model;
>> >          for (layer = 0; layer < network->layers_num; ++layer){
>> >              av_freep(&network->layers[layer].output);
>> >              if (network->layers[layer].type == CONV){
>> > -                conv_params = (ConvolutionalParams*)network-
>> >layers[layer].params;
>> > +                conv_params = (ConvolutionalParams
>> *)network->layers[layer].params;
>> >                  av_freep(&conv_params->kernel);
>> >                  av_freep(&conv_params->biases);
>> >              }
>> > diff --git a/libavfilter/dnn_backend_native.h b/libavfilter/dnn_backend_
>> native.h
>> > index 599c1302e2..adbb7088b4 100644
>> > --- a/libavfilter/dnn_backend_native.h
>> > +++ b/libavfilter/dnn_backend_native.h
>> > @@ -29,12 +29,12 @@
>> >
>> >  #include "dnn_interface.h"
>> >
>> > -DNNModel* ff_dnn_load_model_native(const char* model_filename);
>> > +DNNModel *ff_dnn_load_model_native(const char *model_filename);
>> >
>> > -DNNModel* ff_dnn_load_default_model_native(DNNDefaultModel model_type);
>> > +DNNModel *ff_dnn_load_default_model_native(DNNDefaultModel model_type);
>> >
>> > -DNNReturnType ff_dnn_execute_model_native(const DNNModel* model);
>> > +DNNReturnType ff_dnn_execute_model_native(const DNNModel *model);
>> >
>> > -void ff_dnn_free_model_native(DNNModel** model);
>> > +void ff_dnn_free_model_native(DNNModel **model);
>> >
>> >  #endif
>> > diff --git a/libavfilter/dnn_backend_tf.c b/libavfilter/dnn_backend_tf.c
>> > index 21516471c3..6307c794a5 100644
>> > --- a/libavfilter/dnn_backend_tf.c
>> > +++ b/libavfilter/dnn_backend_tf.c
>> > @@ -31,24 +31,24 @@
>> >  #include <tensorflow/c/c_api.h>
>> >
>> >  typedef struct TFModel{
>> > -    TF_Graph* graph;
>> > -    TF_Session* session;
>> > -    TF_Status* status;
>> > +    TF_Graph *graph;
>> > +    TF_Session *session;
>> > +    TF_Status *status;
>> >      TF_Output input, output;
>> > -    TF_Tensor* input_tensor;
>> > -    DNNData* output_data;
>> > +    TF_Tensor *input_tensor;
>> > +    DNNData *output_data;
>> >  } TFModel;
>> >
>> > -static void free_buffer(void* data, size_t length)
>> > +static void free_buffer(void *data, size_t length)
>> >  {
>> >      av_freep(&data);
>> >  }
>> >
>> > -static TF_Buffer* read_graph(const char* model_filename)
>> > +static TF_Buffer *read_graph(const char *model_filename)
>> >  {
>> > -    TF_Buffer* graph_buf;
>> > -    unsigned char* graph_data = NULL;
>> > -    AVIOContext* model_file_context;
>> > +    TF_Buffer *graph_buf;
>> > +    unsigned char *graph_data = NULL;
>> > +    AVIOContext *model_file_context;
>> >      long size, bytes_read;
>> >
>> >      if (avio_open(&model_file_context, model_filename, AVIO_FLAG_READ)
>> < 0){
>> > @@ -70,20 +70,20 @@ static TF_Buffer* read_graph(const char*
>> model_filename)
>> >      }
>> >
>> >      graph_buf = TF_NewBuffer();
>> > -    graph_buf->data = (void*)graph_data;
>> > +    graph_buf->data = (void *)graph_data;
>> >      graph_buf->length = size;
>> >      graph_buf->data_deallocator = free_buffer;
>> >
>> >      return graph_buf;
>> >  }
>> >
>> > -static DNNReturnType set_input_output_tf(void* model, DNNData* input,
>> DNNData* output)
>> > +static DNNReturnType set_input_output_tf(void *model, DNNData *input,
>> DNNData *output)
>> >  {
>> > -    TFModel* tf_model = (TFModel*)model;
>> > +    TFModel *tf_model = (TFModel *)model;
>> >      int64_t input_dims[] = {1, input->height, input->width,
>> input->channels};
>> > -    TF_SessionOptions* sess_opts;
>> > -    const TF_Operation* init_op = TF_GraphOperationByName(tf_model->graph,
>> "init");
>> > -    TF_Tensor* output_tensor;
>> > +    TF_SessionOptions *sess_opts;
>> > +    const TF_Operation *init_op = TF_GraphOperationByName(tf_model->graph,
>> "init");
>> > +    TF_Tensor *output_tensor;
>> >
>> >      // Input operation should be named 'x'
>> >      tf_model->input.oper = TF_GraphOperationByName(tf_model->graph,
>> "x");
>> > @@ -99,7 +99,7 @@ static DNNReturnType set_input_output_tf(void* model,
>> DNNData* input, DNNData* o
>> >      if (!tf_model->input_tensor){
>> >          return DNN_ERROR;
>> >      }
>> > -    input->data = (float*)TF_TensorData(tf_model->input_tensor);
>> > +    input->data = (float *)TF_TensorData(tf_model->input_tensor);
>> >
>> >      // Output operation should be named 'y'
>> >      tf_model->output.oper = TF_GraphOperationByName(tf_model->graph,
>> "y");
>> > @@ -156,12 +156,12 @@ static DNNReturnType set_input_output_tf(void*
>> model, DNNData* input, DNNData* o
>> >      return DNN_SUCCESS;
>> >  }
>> >
>> > -DNNModel* ff_dnn_load_model_tf(const char* model_filename)
>> > +DNNModel *ff_dnn_load_model_tf(const char *model_filename)
>> >  {
>> > -    DNNModel* model = NULL;
>> > -    TFModel* tf_model = NULL;
>> > -    TF_Buffer* graph_def;
>> > -    TF_ImportGraphDefOptions* graph_opts;
>> > +    DNNModel *model = NULL;
>> > +    TFModel *tf_model = NULL;
>> > +    TF_Buffer *graph_def;
>> > +    TF_ImportGraphDefOptions *graph_opts;
>> >
>> >      model = av_malloc(sizeof(DNNModel));
>> >      if (!model){
>> > @@ -197,25 +197,25 @@ DNNModel* ff_dnn_load_model_tf(const char*
>> model_filename)
>> >          return NULL;
>> >      }
>> >
>> > -    model->model = (void*)tf_model;
>> > +    model->model = (void *)tf_model;
>> >      model->set_input_output = &set_input_output_tf;
>> >
>> >      return model;
>> >  }
>> >
>> > -static TF_Operation* add_pad_op(TFModel* tf_model, TF_Operation*
>> input_op, int32_t pad)
>> > +static TF_Operation *add_pad_op(TFModel *tf_model, TF_Operation
>> *input_op, int32_t pad)
>> >  {
>> > -    TF_OperationDescription* op_desc;
>> > -    TF_Operation* op;
>> > -    TF_Tensor* tensor;
>> > +    TF_OperationDescription *op_desc;
>> > +    TF_Operation *op;
>> > +    TF_Tensor *tensor;
>> >      TF_Output input;
>> > -    int32_t* pads;
>> > +    int32_t *pads;
>> >      int64_t pads_shape[] = {4, 2};
>> >
>> >      op_desc = TF_NewOperation(tf_model->graph, "Const", "pads");
>> >      TF_SetAttrType(op_desc, "dtype", TF_INT32);
>> >      tensor = TF_AllocateTensor(TF_INT32, pads_shape, 2, 4 * 2 *
>> sizeof(int32_t));
>> > -    pads = (int32_t*)TF_TensorData(tensor);
>> > +    pads = (int32_t *)TF_TensorData(tensor);
>> >      pads[0] = 0;   pads[1] = 0;
>> >      pads[2] = pad; pads[3] = pad;
>> >      pads[4] = pad; pads[5] = pad;
>> > @@ -246,11 +246,11 @@ static TF_Operation* add_pad_op(TFModel* tf_model,
>> TF_Operation* input_op, int32
>> >      return op;
>> >  }
>> >
>> > -static TF_Operation* add_const_op(TFModel* tf_model, const float*
>> values, const int64_t* dims, int dims_len, const char* name)
>> > +static TF_Operation *add_const_op(TFModel *tf_model, const float
>> *values, const int64_t *dims, int dims_len, const char *name)
>> >  {
>> >      int dim;
>> > -    TF_OperationDescription* op_desc;
>> > -    TF_Tensor* tensor;
>> > +    TF_OperationDescription *op_desc;
>> > +    TF_Tensor *tensor;
>> >      size_t len;
>> >
>> >      op_desc = TF_NewOperation(tf_model->graph, "Const", name);
>> > @@ -269,25 +269,25 @@ static TF_Operation* add_const_op(TFModel*
>> tf_model, const float* values, const
>> >      return TF_FinishOperation(op_desc, tf_model->status);
>> >  }
>> >
>> > -static TF_Operation* add_conv_layers(TFModel* tf_model, const float**
>> consts, const int64_t** consts_dims,
>> > -                                     const int* consts_dims_len, const
>> char** activations,
>> > -                                     TF_Operation* input_op, int
>> layers_num)
>> > +static TF_Operation* add_conv_layers(TFModel *tf_model, const float
>> **consts, const int64_t **consts_dims,
>> > +                                     const int *consts_dims_len, const
>> char **activations,
>> > +                                     TF_Operation *input_op, int
>> layers_num)
>> >  {
>> >      int i;
>> > -    TF_OperationDescription* op_desc;
>> > -    TF_Operation* op;
>> > -    TF_Operation* transpose_op;
>> > +    TF_OperationDescription *op_desc;
>> > +    TF_Operation *op;
>> > +    TF_Operation *transpose_op;
>> >      TF_Output input;
>> >      int64_t strides[] = {1, 1, 1, 1};
>> > -    int32_t* transpose_perm;
>> > -    TF_Tensor* tensor;
>> > +    int32_t *transpose_perm;
>> > +    TF_Tensor *tensor;
>> >      int64_t transpose_perm_shape[] = {4};
>> >      char name_buffer[256];
>> >
>> >      op_desc = TF_NewOperation(tf_model->graph, "Const",
>> "transpose_perm");
>> >      TF_SetAttrType(op_desc, "dtype", TF_INT32);
>> >      tensor = TF_AllocateTensor(TF_INT32, transpose_perm_shape, 1, 4 *
>> sizeof(int32_t));
>> > -    transpose_perm = (int32_t*)TF_TensorData(tensor);
>> > +    transpose_perm = (int32_t *)TF_TensorData(tensor);
>> >      transpose_perm[0] = 1;
>> >      transpose_perm[1] = 2;
>> >      transpose_perm[2] = 3;
>> > @@ -368,13 +368,13 @@ static TF_Operation* add_conv_layers(TFModel*
>> tf_model, const float** consts, co
>> >      return input_op;
>> >  }
>> >
>> > -DNNModel* ff_dnn_load_default_model_tf(DNNDefaultModel model_type)
>> > +DNNModel *ff_dnn_load_default_model_tf(DNNDefaultModel model_type)
>> >  {
>> > -    DNNModel* model = NULL;
>> > -    TFModel* tf_model = NULL;
>> > -    TF_OperationDescription* op_desc;
>> > -    TF_Operation* op;
>> > -    TF_Operation* const_ops_buffer[6];
>> > +    DNNModel *model = NULL;
>> > +    TFModel *tf_model = NULL;
>> > +    TF_OperationDescription *op_desc;
>> > +    TF_Operation *op;
>> > +    TF_Operation *const_ops_buffer[6];
>> >      TF_Output input;
>> >      int64_t input_shape[] = {1, -1, -1, 1};
>> >
>> > @@ -460,16 +460,16 @@ DNNModel* ff_dnn_load_default_model_tf(DNNDefaultModel
>> model_type)
>> >          CLEANUP_ON_ERROR(tf_model, model);
>> >      }
>> >
>> > -    model->model = (void*)tf_model;
>> > +    model->model = (void *)tf_model;
>> >      model->set_input_output = &set_input_output_tf;
>> >
>> >      return model;
>> >  }
>> >
>> > -DNNReturnType ff_dnn_execute_model_tf(const DNNModel* model)
>> > +DNNReturnType ff_dnn_execute_model_tf(const DNNModel *model)
>> >  {
>> > -    TFModel* tf_model = (TFModel*)model->model;
>> > -    TF_Tensor* output_tensor;
>> > +    TFModel *tf_model = (TFModel *)model->model;
>> > +    TF_Tensor *output_tensor;
>> >
>> >      TF_SessionRun(tf_model->session, NULL,
>> >                    &tf_model->input, &tf_model->input_tensor, 1,
>> > @@ -489,12 +489,12 @@ DNNReturnType ff_dnn_execute_model_tf(const
>> DNNModel* model)
>> >      }
>> >  }
>> >
>> > -void ff_dnn_free_model_tf(DNNModel** model)
>> > +void ff_dnn_free_model_tf(DNNModel **model)
>> >  {
>> > -    TFModel* tf_model;
>> > +    TFModel *tf_model;
>> >
>> >      if (*model){
>> > -        tf_model = (TFModel*)(*model)->model;
>> > +        tf_model = (TFModel *)(*model)->model;
>> >          if (tf_model->graph){
>> >              TF_DeleteGraph(tf_model->graph);
>> >          }
>> > diff --git a/libavfilter/dnn_backend_tf.h b/libavfilter/dnn_backend_tf.h
>> > index 08e4a568b3..357a82d948 100644
>> > --- a/libavfilter/dnn_backend_tf.h
>> > +++ b/libavfilter/dnn_backend_tf.h
>> > @@ -29,12 +29,12 @@
>> >
>> >  #include "dnn_interface.h"
>> >
>> > -DNNModel* ff_dnn_load_model_tf(const char* model_filename);
>> > +DNNModel *ff_dnn_load_model_tf(const char *model_filename);
>> >
>> > -DNNModel* ff_dnn_load_default_model_tf(DNNDefaultModel model_type);
>> > +DNNModel *ff_dnn_load_default_model_tf(DNNDefaultModel model_type);
>> >
>> > -DNNReturnType ff_dnn_execute_model_tf(const DNNModel* model);
>> > +DNNReturnType ff_dnn_execute_model_tf(const DNNModel *model);
>> >
>> > -void ff_dnn_free_model_tf(DNNModel** model);
>> > +void ff_dnn_free_model_tf(DNNModel **model);
>> >
>> >  #endif
>> > diff --git a/libavfilter/dnn_espcn.h b/libavfilter/dnn_espcn.h
>> > index 315ecf031d..a0dd61cd0d 100644
>> > --- a/libavfilter/dnn_espcn.h
>> > +++ b/libavfilter/dnn_espcn.h
>> > @@ -5398,7 +5398,7 @@ static const long int espcn_conv3_bias_dims[] = {
>> >      4
>> >  };
>> >
>> > -static const float* espcn_consts[] = {
>> > +static const float *espcn_consts[] = {
>> >      espcn_conv1_kernel,
>> >      espcn_conv1_bias,
>> >      espcn_conv2_kernel,
>> > @@ -5407,7 +5407,7 @@ static const float* espcn_consts[] = {
>> >      espcn_conv3_bias
>> >  };
>> >
>> > -static const long int* espcn_consts_dims[] = {
>> > +static const long int *espcn_consts_dims[] = {
>> >      espcn_conv1_kernel_dims,
>> >      espcn_conv1_bias_dims,
>> >      espcn_conv2_kernel_dims,
>> > @@ -5429,7 +5429,7 @@ static const char espcn_tanh[] = "Tanh";
>> >
>> >  static const char espcn_sigmoid[] = "Sigmoid";
>> >
>> > -static const char* espcn_activations[] = {
>> > +static const char *espcn_activations[] = {
>> >      espcn_tanh,
>> >      espcn_tanh,
>> >      espcn_sigmoid
>> > diff --git a/libavfilter/dnn_interface.c b/libavfilter/dnn_interface.c
>> > index 87c90526be..ca7d6d1ea5 100644
>> > --- a/libavfilter/dnn_interface.c
>> > +++ b/libavfilter/dnn_interface.c
>> > @@ -28,9 +28,9 @@
>> >  #include "dnn_backend_tf.h"
>> >  #include "libavutil/mem.h"
>> >
>> > -DNNModule* ff_get_dnn_module(DNNBackendType backend_type)
>> > +DNNModule *ff_get_dnn_module(DNNBackendType backend_type)
>> >  {
>> > -    DNNModule* dnn_module;
>> > +    DNNModule *dnn_module;
>> >
>> >      dnn_module = av_malloc(sizeof(DNNModule));
>> >      if(!dnn_module){
>> > diff --git a/libavfilter/dnn_interface.h b/libavfilter/dnn_interface.h
>> > index 6b820d1d5b..a69717ae62 100644
>> > --- a/libavfilter/dnn_interface.h
>> > +++ b/libavfilter/dnn_interface.h
>> > @@ -33,31 +33,31 @@ typedef enum {DNN_NATIVE, DNN_TF} DNNBackendType;
>> >  typedef enum {DNN_SRCNN, DNN_ESPCN} DNNDefaultModel;
>> >
>> >  typedef struct DNNData{
>> > -    float* data;
>> > +    float *data;
>> >      int width, height, channels;
>> >  } DNNData;
>> >
>> >  typedef struct DNNModel{
>> >      // Stores model that can be different for different backends.
>> > -    void* model;
>> > +    void *model;
>> >      // Sets model input and output, while allocating additional memory
>> for intermediate calculations.
>> >      // Should be called at least once before model execution.
>> > -    DNNReturnType (*set_input_output)(void* model, DNNData* input,
>> DNNData* output);
>> > +    DNNReturnType (*set_input_output)(void *model, DNNData *input,
>> DNNData *output);
>> >  } DNNModel;
>> >
>> >  // Stores pointers to functions for loading, executing, freeing DNN
>> models for one of the backends.
>> >  typedef struct DNNModule{
>> >      // Loads model and parameters from given file. Returns NULL if it
>> is not possible.
>> > -    DNNModel* (*load_model)(const char* model_filename);
>> > +    DNNModel *(*load_model)(const char *model_filename);
>> >      // Loads one of the default models
>> > -    DNNModel* (*load_default_model)(DNNDefaultModel model_type);
>> > +    DNNModel *(*load_default_model)(DNNDefaultModel model_type);
>> >      // Executes model with specified input and output. Returns
>> DNN_ERROR otherwise.
>> > -    DNNReturnType (*execute_model)(const DNNModel* model);
>> > +    DNNReturnType (*execute_model)(const DNNModel *model);
>> >      // Frees memory allocated for model.
>> > -    void (*free_model)(DNNModel** model);
>> > +    void (*free_model)(DNNModel **model);
>> >  } DNNModule;
>> >
>> >  // Initializes DNNModule depending on chosen backend.
>> > -DNNModule* ff_get_dnn_module(DNNBackendType backend_type);
>> > +DNNModule *ff_get_dnn_module(DNNBackendType backend_type);
>> >
>> >  #endif
>> > diff --git a/libavfilter/dnn_srcnn.h b/libavfilter/dnn_srcnn.h
>> > index 7ec11654b3..26143654b8 100644
>> > --- a/libavfilter/dnn_srcnn.h
>> > +++ b/libavfilter/dnn_srcnn.h
>> > @@ -2110,7 +2110,7 @@ static const long int srcnn_conv3_bias_dims[] = {
>> >      1
>> >  };
>> >
>> > -static const float* srcnn_consts[] = {
>> > +static const float *srcnn_consts[] = {
>> >      srcnn_conv1_kernel,
>> >      srcnn_conv1_bias,
>> >      srcnn_conv2_kernel,
>> > @@ -2119,7 +2119,7 @@ static const float* srcnn_consts[] = {
>> >      srcnn_conv3_bias
>> >  };
>> >
>> > -static const long int* srcnn_consts_dims[] = {
>> > +static const long int *srcnn_consts_dims[] = {
>> >      srcnn_conv1_kernel_dims,
>> >      srcnn_conv1_bias_dims,
>> >      srcnn_conv2_kernel_dims,
>> > @@ -2139,7 +2139,7 @@ static const int srcnn_consts_dims_len[] = {
>> >
>> >  static const char srcnn_relu[] = "Relu";
>> >
>> > -static const char* srcnn_activations[] = {
>> > +static const char *srcnn_activations[] = {
>> >      srcnn_relu,
>> >      srcnn_relu,
>> >      srcnn_relu
>> > diff --git a/libavfilter/vf_sr.c b/libavfilter/vf_sr.c
>> > index f3ca9a09a8..944a0e28e7 100644
>> > --- a/libavfilter/vf_sr.c
>> > +++ b/libavfilter/vf_sr.c
>> > @@ -39,13 +39,13 @@ typedef struct SRContext {
>> >      const AVClass *class;
>> >
>> >      SRModel model_type;
>> > -    char* model_filename;
>> > +    char *model_filename;
>> >      DNNBackendType backend_type;
>> > -    DNNModule* dnn_module;
>> > -    DNNModel* model;
>> > +    DNNModule *dnn_module;
>> > +    DNNModel *model;
>> >      DNNData input, output;
>> >      int scale_factor;
>> > -    struct SwsContext* sws_context;
>> > +    struct SwsContext *sws_context;
>> >      int sws_slice_h;
>> >  } SRContext;
>> >
>> > @@ -67,9 +67,9 @@ static const AVOption sr_options[] = {
>> >
>> >  AVFILTER_DEFINE_CLASS(sr);
>> >
>> > -static av_cold int init(AVFilterContext* context)
>> > +static av_cold int init(AVFilterContext *context)
>> >  {
>> > -    SRContext* sr_context = context->priv;
>> > +    SRContext *sr_context = context->priv;
>> >
>> >      sr_context->dnn_module = ff_get_dnn_module(sr_context->
>> backend_type);
>> >      if (!sr_context->dnn_module){
>> > @@ -98,12 +98,12 @@ static av_cold int init(AVFilterContext* context)
>> >      return 0;
>> >  }
>> >
>> > -static int query_formats(AVFilterContext* context)
>> > +static int query_formats(AVFilterContext *context)
>> >  {
>> >      const enum AVPixelFormat pixel_formats[] = {AV_PIX_FMT_YUV420P,
>> AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P,
>> >                                                  AV_PIX_FMT_YUV410P,
>> AV_PIX_FMT_YUV411P, AV_PIX_FMT_GRAY8,
>> >                                                  AV_PIX_FMT_NONE};
>> > -    AVFilterFormats* formats_list;
>> > +    AVFilterFormats *formats_list;
>> >
>> >      formats_list = ff_make_format_list(pixel_formats);
>> >      if (!formats_list){
>> > @@ -113,11 +113,11 @@ static int query_formats(AVFilterContext* context)
>> >      return ff_set_common_formats(context, formats_list);
>> >  }
>> >
>> > -static int config_props(AVFilterLink* inlink)
>> > +static int config_props(AVFilterLink *inlink)
>> >  {
>> > -    AVFilterContext* context = inlink->dst;
>> > -    SRContext* sr_context = context->priv;
>> > -    AVFilterLink* outlink = context->outputs[0];
>> > +    AVFilterContext *context = inlink->dst;
>> > +    SRContext *sr_context = context->priv;
>> > +    AVFilterLink *outlink = context->outputs[0];
>> >      DNNReturnType result;
>> >      int sws_src_h, sws_src_w, sws_dst_h, sws_dst_w;
>> >
>> > @@ -202,18 +202,18 @@ static int config_props(AVFilterLink* inlink)
>> >  }
>> >
>> >  typedef struct ThreadData{
>> > -    uint8_t* data;
>> > +    uint8_t *data;
>> >      int data_linesize, height, width;
>> >  } ThreadData;
>> >
>> > -static int uint8_to_float(AVFilterContext* context, void* arg, int
>> jobnr, int nb_jobs)
>> > +static int uint8_to_float(AVFilterContext *context, void *arg, int
>> jobnr, int nb_jobs)
>> >  {
>> > -    SRContext* sr_context = context->priv;
>> > -    const ThreadData* td = arg;
>> > +    SRContext *sr_context = context->priv;
>> > +    const ThreadData *td = arg;
>> >      const int slice_start = (td->height *  jobnr     ) / nb_jobs;
>> >      const int slice_end   = (td->height * (jobnr + 1)) / nb_jobs;
>> > -    const uint8_t* src = td->data + slice_start * td->data_linesize;
>> > -    float* dst = sr_context->input.data + slice_start * td->width;
>> > +    const uint8_t *src = td->data + slice_start * td->data_linesize;
>> > +    float *dst = sr_context->input.data + slice_start * td->width;
>> >      int y, x;
>> >
>> >      for (y = slice_start; y < slice_end; ++y){
>> > @@ -227,14 +227,14 @@ static int uint8_to_float(AVFilterContext*
>> context, void* arg, int jobnr, int nb
>> >      return 0;
>> >  }
>> >
>> > -static int float_to_uint8(AVFilterContext* context, void* arg, int
>> jobnr, int nb_jobs)
>> > +static int float_to_uint8(AVFilterContext *context, void *arg, int
>> jobnr, int nb_jobs)
>> >  {
>> > -    SRContext* sr_context = context->priv;
>> > -    const ThreadData* td = arg;
>> > +    SRContext *sr_context = context->priv;
>> > +    const ThreadData *td = arg;
>> >      const int slice_start = (td->height *  jobnr     ) / nb_jobs;
>> >      const int slice_end   = (td->height * (jobnr + 1)) / nb_jobs;
>> > -    const float* src = sr_context->output.data + slice_start *
>> td->width;
>> > -    uint8_t* dst = td->data + slice_start * td->data_linesize;
>> > +    const float *src = sr_context->output.data + slice_start *
>> td->width;
>> > +    uint8_t *dst = td->data + slice_start * td->data_linesize;
>> >      int y, x;
>> >
>> >      for (y = slice_start; y < slice_end; ++y){
>> > @@ -248,12 +248,12 @@ static int float_to_uint8(AVFilterContext*
>> context, void* arg, int jobnr, int nb
>> >      return 0;
>> >  }
>> >
>> > -static int filter_frame(AVFilterLink* inlink, AVFrame* in)
>> > +static int filter_frame(AVFilterLink *inlink, AVFrame *in)
>> >  {
>> > -    AVFilterContext* context = inlink->dst;
>> > -    SRContext* sr_context = context->priv;
>> > -    AVFilterLink* outlink = context->outputs[0];
>> > -    AVFrame* out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
>> > +    AVFilterContext *context = inlink->dst;
>> > +    SRContext *sr_context = context->priv;
>> > +    AVFilterLink *outlink = context->outputs[0];
>> > +    AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
>> >      ThreadData td;
>> >      int nb_threads;
>> >      DNNReturnType dnn_result;
>> > @@ -307,9 +307,9 @@ static int filter_frame(AVFilterLink* inlink,
>> AVFrame* in)
>> >      return ff_filter_frame(outlink, out);
>> >  }
>> >
>> > -static av_cold void uninit(AVFilterContext* context)
>> > +static av_cold void uninit(AVFilterContext *context)
>> >  {
>> > -    SRContext* sr_context = context->priv;
>> > +    SRContext *sr_context = context->priv;
>> >
>> >      if (sr_context->dnn_module){
>> >          (sr_context->dnn_module->free_model)(&sr_context->model);
>> > --
>> > 2.14.1
>> >
>> > _______________________________________________
>> > ffmpeg-devel mailing list
>> > ffmpeg-devel@ffmpeg.org
>> > http://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>>
>> LGTM.
>> I intend to push it by tomorrow.
>>
>
> _______________________________________________
> ffmpeg-devel mailing list
> ffmpeg-devel@ffmpeg.org
> http://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>


Pushed.
diff mbox

Patch

From 2b844e65cb66b2135cc89d5105f00fad48017780 Mon Sep 17 00:00:00 2001
From: Sergey Lavrushkin <dualfal@gmail.com>
Date: Fri, 27 Jul 2018 19:34:02 +0300
Subject: [PATCH 2/9] libavfilter: Code style fixes for pointers in DNN module
 and sr filter.

---
 libavfilter/dnn_backend_native.c |  84 +++++++++++++++---------------
 libavfilter/dnn_backend_native.h |   8 +--
 libavfilter/dnn_backend_tf.c     | 108 +++++++++++++++++++--------------------
 libavfilter/dnn_backend_tf.h     |   8 +--
 libavfilter/dnn_espcn.h          |   6 +--
 libavfilter/dnn_interface.c      |   4 +-
 libavfilter/dnn_interface.h      |  16 +++---
 libavfilter/dnn_srcnn.h          |   6 +--
 libavfilter/vf_sr.c              |  60 +++++++++++-----------
 9 files changed, 150 insertions(+), 150 deletions(-)

diff --git a/libavfilter/dnn_backend_native.c b/libavfilter/dnn_backend_native.c
index 3e6b86280d..baefea7fcb 100644
--- a/libavfilter/dnn_backend_native.c
+++ b/libavfilter/dnn_backend_native.c
@@ -34,15 +34,15 @@  typedef enum {RELU, TANH, SIGMOID} ActivationFunc;
 
 typedef struct Layer{
     LayerType type;
-    float* output;
-    void* params;
+    float *output;
+    void *params;
 } Layer;
 
 typedef struct ConvolutionalParams{
     int32_t input_num, output_num, kernel_size;
     ActivationFunc activation;
-    float* kernel;
-    float* biases;
+    float *kernel;
+    float *biases;
 } ConvolutionalParams;
 
 typedef struct InputParams{
@@ -55,16 +55,16 @@  typedef struct DepthToSpaceParams{
 
 // Represents simple feed-forward convolutional network.
 typedef struct ConvolutionalNetwork{
-    Layer* layers;
+    Layer *layers;
     int32_t layers_num;
 } ConvolutionalNetwork;
 
-static DNNReturnType set_input_output_native(void* model, DNNData* input, DNNData* output)
+static DNNReturnType set_input_output_native(void *model, DNNData *input, DNNData *output)
 {
-    ConvolutionalNetwork* network = (ConvolutionalNetwork*)model;
-    InputParams* input_params;
-    ConvolutionalParams* conv_params;
-    DepthToSpaceParams* depth_to_space_params;
+    ConvolutionalNetwork *network = (ConvolutionalNetwork *)model;
+    InputParams *input_params;
+    ConvolutionalParams *conv_params;
+    DepthToSpaceParams *depth_to_space_params;
     int cur_width, cur_height, cur_channels;
     int32_t layer;
 
@@ -72,7 +72,7 @@  static DNNReturnType set_input_output_native(void* model, DNNData* input, DNNDat
         return DNN_ERROR;
     }
     else{
-        input_params = (InputParams*)network->layers[0].params;
+        input_params = (InputParams *)network->layers[0].params;
         input_params->width = cur_width = input->width;
         input_params->height = cur_height = input->height;
         input_params->channels = cur_channels = input->channels;
@@ -88,14 +88,14 @@  static DNNReturnType set_input_output_native(void* model, DNNData* input, DNNDat
     for (layer = 1; layer < network->layers_num; ++layer){
         switch (network->layers[layer].type){
         case CONV:
-            conv_params = (ConvolutionalParams*)network->layers[layer].params;
+            conv_params = (ConvolutionalParams *)network->layers[layer].params;
             if (conv_params->input_num != cur_channels){
                 return DNN_ERROR;
             }
             cur_channels = conv_params->output_num;
             break;
         case DEPTH_TO_SPACE:
-            depth_to_space_params = (DepthToSpaceParams*)network->layers[layer].params;
+            depth_to_space_params = (DepthToSpaceParams *)network->layers[layer].params;
             if (cur_channels % (depth_to_space_params->block_size * depth_to_space_params->block_size) != 0){
                 return DNN_ERROR;
             }
@@ -127,16 +127,16 @@  static DNNReturnType set_input_output_native(void* model, DNNData* input, DNNDat
 // layers_num,layer_type,layer_parameterss,layer_type,layer_parameters...
 // For CONV layer: activation_function, input_num, output_num, kernel_size, kernel, biases
 // For DEPTH_TO_SPACE layer: block_size
-DNNModel* ff_dnn_load_model_native(const char* model_filename)
+DNNModel *ff_dnn_load_model_native(const char *model_filename)
 {
-    DNNModel* model = NULL;
-    ConvolutionalNetwork* network = NULL;
-    AVIOContext* model_file_context;
+    DNNModel *model = NULL;
+    ConvolutionalNetwork *network = NULL;
+    AVIOContext *model_file_context;
     int file_size, dnn_size, kernel_size, i;
     int32_t layer;
     LayerType layer_type;
-    ConvolutionalParams* conv_params;
-    DepthToSpaceParams* depth_to_space_params;
+    ConvolutionalParams *conv_params;
+    DepthToSpaceParams *depth_to_space_params;
 
     model = av_malloc(sizeof(DNNModel));
     if (!model){
@@ -155,7 +155,7 @@  DNNModel* ff_dnn_load_model_native(const char* model_filename)
         av_freep(&model);
         return NULL;
     }
-    model->model = (void*)network;
+    model->model = (void *)network;
 
     network->layers_num = 1 + (int32_t)avio_rl32(model_file_context);
     dnn_size = 4;
@@ -251,10 +251,10 @@  DNNModel* ff_dnn_load_model_native(const char* model_filename)
     return model;
 }
 
-static int set_up_conv_layer(Layer* layer, const float* kernel, const float* biases, ActivationFunc activation,
+static int set_up_conv_layer(Layer *layer, const float *kernel, const float *biases, ActivationFunc activation,
                              int32_t input_num, int32_t output_num, int32_t size)
 {
-    ConvolutionalParams* conv_params;
+    ConvolutionalParams *conv_params;
     int kernel_size;
 
     conv_params = av_malloc(sizeof(ConvolutionalParams));
@@ -282,11 +282,11 @@  static int set_up_conv_layer(Layer* layer, const float* kernel, const float* bia
     return DNN_SUCCESS;
 }
 
-DNNModel* ff_dnn_load_default_model_native(DNNDefaultModel model_type)
+DNNModel *ff_dnn_load_default_model_native(DNNDefaultModel model_type)
 {
-    DNNModel* model = NULL;
-    ConvolutionalNetwork* network = NULL;
-    DepthToSpaceParams* depth_to_space_params;
+    DNNModel *model = NULL;
+    ConvolutionalNetwork *network = NULL;
+    DepthToSpaceParams *depth_to_space_params;
     int32_t layer;
 
     model = av_malloc(sizeof(DNNModel));
@@ -299,7 +299,7 @@  DNNModel* ff_dnn_load_default_model_native(DNNDefaultModel model_type)
         av_freep(&model);
         return NULL;
     }
-    model->model = (void*)network;
+    model->model = (void *)network;
 
     switch (model_type){
     case DNN_SRCNN:
@@ -365,7 +365,7 @@  DNNModel* ff_dnn_load_default_model_native(DNNDefaultModel model_type)
 
 #define CLAMP_TO_EDGE(x, w) ((x) < 0 ? 0 : ((x) >= (w) ? (w - 1) : (x)))
 
-static void convolve(const float* input, float* output, const ConvolutionalParams* conv_params, int width, int height)
+static void convolve(const float *input, float *output, const ConvolutionalParams *conv_params, int width, int height)
 {
     int y, x, n_filter, ch, kernel_y, kernel_x;
     int radius = conv_params->kernel_size >> 1;
@@ -403,7 +403,7 @@  static void convolve(const float* input, float* output, const ConvolutionalParam
     }
 }
 
-static void depth_to_space(const float* input, float* output, int block_size, int width, int height, int channels)
+static void depth_to_space(const float *input, float *output, int block_size, int width, int height, int channels)
 {
     int y, x, by, bx, ch;
     int new_channels = channels / (block_size * block_size);
@@ -426,20 +426,20 @@  static void depth_to_space(const float* input, float* output, int block_size, in
     }
 }
 
-DNNReturnType ff_dnn_execute_model_native(const DNNModel* model)
+DNNReturnType ff_dnn_execute_model_native(const DNNModel *model)
 {
-    ConvolutionalNetwork* network = (ConvolutionalNetwork*)model->model;
+    ConvolutionalNetwork *network = (ConvolutionalNetwork *)model->model;
     int cur_width, cur_height, cur_channels;
     int32_t layer;
-    InputParams* input_params;
-    ConvolutionalParams* conv_params;
-    DepthToSpaceParams* depth_to_space_params;
+    InputParams *input_params;
+    ConvolutionalParams *conv_params;
+    DepthToSpaceParams *depth_to_space_params;
 
     if (network->layers_num <= 0 || network->layers[0].type != INPUT || !network->layers[0].output){
         return DNN_ERROR;
     }
     else{
-        input_params = (InputParams*)network->layers[0].params;
+        input_params = (InputParams *)network->layers[0].params;
         cur_width = input_params->width;
         cur_height = input_params->height;
         cur_channels = input_params->channels;
@@ -451,12 +451,12 @@  DNNReturnType ff_dnn_execute_model_native(const DNNModel* model)
         }
         switch (network->layers[layer].type){
         case CONV:
-            conv_params = (ConvolutionalParams*)network->layers[layer].params;
+            conv_params = (ConvolutionalParams *)network->layers[layer].params;
             convolve(network->layers[layer - 1].output, network->layers[layer].output, conv_params, cur_width, cur_height);
             cur_channels = conv_params->output_num;
             break;
         case DEPTH_TO_SPACE:
-            depth_to_space_params = (DepthToSpaceParams*)network->layers[layer].params;
+            depth_to_space_params = (DepthToSpaceParams *)network->layers[layer].params;
             depth_to_space(network->layers[layer - 1].output, network->layers[layer].output,
                            depth_to_space_params->block_size, cur_width, cur_height, cur_channels);
             cur_height *= depth_to_space_params->block_size;
@@ -471,19 +471,19 @@  DNNReturnType ff_dnn_execute_model_native(const DNNModel* model)
     return DNN_SUCCESS;
 }
 
-void ff_dnn_free_model_native(DNNModel** model)
+void ff_dnn_free_model_native(DNNModel **model)
 {
-    ConvolutionalNetwork* network;
-    ConvolutionalParams* conv_params;
+    ConvolutionalNetwork *network;
+    ConvolutionalParams *conv_params;
     int32_t layer;
 
     if (*model)
     {
-        network = (ConvolutionalNetwork*)(*model)->model;
+        network = (ConvolutionalNetwork *)(*model)->model;
         for (layer = 0; layer < network->layers_num; ++layer){
             av_freep(&network->layers[layer].output);
             if (network->layers[layer].type == CONV){
-                conv_params = (ConvolutionalParams*)network->layers[layer].params;
+                conv_params = (ConvolutionalParams *)network->layers[layer].params;
                 av_freep(&conv_params->kernel);
                 av_freep(&conv_params->biases);
             }
diff --git a/libavfilter/dnn_backend_native.h b/libavfilter/dnn_backend_native.h
index 599c1302e2..adbb7088b4 100644
--- a/libavfilter/dnn_backend_native.h
+++ b/libavfilter/dnn_backend_native.h
@@ -29,12 +29,12 @@ 
 
 #include "dnn_interface.h"
 
-DNNModel* ff_dnn_load_model_native(const char* model_filename);
+DNNModel *ff_dnn_load_model_native(const char *model_filename);
 
-DNNModel* ff_dnn_load_default_model_native(DNNDefaultModel model_type);
+DNNModel *ff_dnn_load_default_model_native(DNNDefaultModel model_type);
 
-DNNReturnType ff_dnn_execute_model_native(const DNNModel* model);
+DNNReturnType ff_dnn_execute_model_native(const DNNModel *model);
 
-void ff_dnn_free_model_native(DNNModel** model);
+void ff_dnn_free_model_native(DNNModel **model);
 
 #endif
diff --git a/libavfilter/dnn_backend_tf.c b/libavfilter/dnn_backend_tf.c
index 51608c73d9..6528a2a390 100644
--- a/libavfilter/dnn_backend_tf.c
+++ b/libavfilter/dnn_backend_tf.c
@@ -31,24 +31,24 @@ 
 #include <tensorflow/c/c_api.h>
 
 typedef struct TFModel{
-    TF_Graph* graph;
-    TF_Session* session;
-    TF_Status* status;
+    TF_Graph *graph;
+    TF_Session *session;
+    TF_Status *status;
     TF_Output input, output;
-    TF_Tensor* input_tensor;
-    DNNData* output_data;
+    TF_Tensor *input_tensor;
+    DNNData *output_data;
 } TFModel;
 
-static void free_buffer(void* data, size_t length)
+static void free_buffer(void *data, size_t length)
 {
     av_freep(&data);
 }
 
-static TF_Buffer* read_graph(const char* model_filename)
+static TF_Buffer *read_graph(const char *model_filename)
 {
-    TF_Buffer* graph_buf;
-    unsigned char* graph_data = NULL;
-    AVIOContext* model_file_context;
+    TF_Buffer *graph_buf;
+    unsigned char *graph_data = NULL;
+    AVIOContext *model_file_context;
     long size, bytes_read;
 
     if (avio_open(&model_file_context, model_filename, AVIO_FLAG_READ) < 0){
@@ -70,20 +70,20 @@  static TF_Buffer* read_graph(const char* model_filename)
     }
 
     graph_buf = TF_NewBuffer();
-    graph_buf->data = (void*)graph_data;
+    graph_buf->data = (void *)graph_data;
     graph_buf->length = size;
     graph_buf->data_deallocator = free_buffer;
 
     return graph_buf;
 }
 
-static DNNReturnType set_input_output_tf(void* model, DNNData* input, DNNData* output)
+static DNNReturnType set_input_output_tf(void *model, DNNData *input, DNNData *output)
 {
-    TFModel* tf_model = (TFModel*)model;
+    TFModel *tf_model = (TFModel *)model;
     int64_t input_dims[] = {1, input->height, input->width, input->channels};
-    TF_SessionOptions* sess_opts;
-    const TF_Operation* init_op = TF_GraphOperationByName(tf_model->graph, "init");
-    TF_Tensor* output_tensor;
+    TF_SessionOptions *sess_opts;
+    const TF_Operation *init_op = TF_GraphOperationByName(tf_model->graph, "init");
+    TF_Tensor *output_tensor;
 
     // Input operation should be named 'x'
     tf_model->input.oper = TF_GraphOperationByName(tf_model->graph, "x");
@@ -99,7 +99,7 @@  static DNNReturnType set_input_output_tf(void* model, DNNData* input, DNNData* o
     if (!tf_model->input_tensor){
         return DNN_ERROR;
     }
-    input->data = (float*)TF_TensorData(tf_model->input_tensor);
+    input->data = (float *)TF_TensorData(tf_model->input_tensor);
 
     // Output operation should be named 'y'
     tf_model->output.oper = TF_GraphOperationByName(tf_model->graph, "y");
@@ -156,12 +156,12 @@  static DNNReturnType set_input_output_tf(void* model, DNNData* input, DNNData* o
     return DNN_SUCCESS;
 }
 
-DNNModel* ff_dnn_load_model_tf(const char* model_filename)
+DNNModel *ff_dnn_load_model_tf(const char *model_filename)
 {
-    DNNModel* model = NULL;
-    TFModel* tf_model = NULL;
-    TF_Buffer* graph_def;
-    TF_ImportGraphDefOptions* graph_opts;
+    DNNModel *model = NULL;
+    TFModel *tf_model = NULL;
+    TF_Buffer *graph_def;
+    TF_ImportGraphDefOptions *graph_opts;
 
     model = av_malloc(sizeof(DNNModel));
     if (!model){
@@ -197,25 +197,25 @@  DNNModel* ff_dnn_load_model_tf(const char* model_filename)
         return NULL;
     }
 
-    model->model = (void*)tf_model;
+    model->model = (void *)tf_model;
     model->set_input_output = &set_input_output_tf;
 
     return model;
 }
 
-static TF_Operation* add_pad_op(TFModel* tf_model, TF_Operation* input_op, int32_t pad)
+static TF_Operation *add_pad_op(TFModel *tf_model, TF_Operation *input_op, int32_t pad)
 {
-    TF_OperationDescription* op_desc;
-    TF_Operation* op;
-    TF_Tensor* tensor;
+    TF_OperationDescription *op_desc;
+    TF_Operation *op;
+    TF_Tensor *tensor;
     TF_Output input;
-    int32_t* pads;
+    int32_t *pads;
     int64_t pads_shape[] = {4, 2};
 
     op_desc = TF_NewOperation(tf_model->graph, "Const", "pads");
     TF_SetAttrType(op_desc, "dtype", TF_INT32);
     tensor = TF_AllocateTensor(TF_INT32, pads_shape, 2, 4 * 2 * sizeof(int32_t));
-    pads = (int32_t*)TF_TensorData(tensor);
+    pads = (int32_t *)TF_TensorData(tensor);
     pads[0] = 0;   pads[1] = 0;
     pads[2] = pad; pads[3] = pad;
     pads[4] = pad; pads[5] = pad;
@@ -246,11 +246,11 @@  static TF_Operation* add_pad_op(TFModel* tf_model, TF_Operation* input_op, int32
     return op;
 }
 
-static TF_Operation* add_const_op(TFModel* tf_model, const float* values, const int64_t* dims, int dims_len, const char* name)
+static TF_Operation *add_const_op(TFModel *tf_model, const float *values, const int64_t *dims, int dims_len, const char *name)
 {
     int dim;
-    TF_OperationDescription* op_desc;
-    TF_Tensor* tensor;
+    TF_OperationDescription *op_desc;
+    TF_Tensor *tensor;
     size_t len;
 
     op_desc = TF_NewOperation(tf_model->graph, "Const", name);
@@ -269,18 +269,18 @@  static TF_Operation* add_const_op(TFModel* tf_model, const float* values, const
     return TF_FinishOperation(op_desc, tf_model->status);
 }
 
-static TF_Operation* add_conv_layers(TFModel* tf_model, const float** consts, const int64_t** consts_dims,
-                                     const int* consts_dims_len, const char** activations,
-                                     TF_Operation* input_op, int layers_num)
+static TF_Operation* add_conv_layers(TFModel *tf_model, const float **consts, const int64_t **consts_dims,
+                                     const int *consts_dims_len, const char **activations,
+                                     TF_Operation *input_op, int layers_num)
 {
     int i;
-    TF_OperationDescription* op_desc;
-    TF_Operation* op;
-    TF_Operation* transpose_op;
+    TF_OperationDescription *op_desc;
+    TF_Operation *op;
+    TF_Operation *transpose_op;
     TF_Output input;
     int64_t strides[] = {1, 1, 1, 1};
-    int32_t* transpose_perm;
-    TF_Tensor* tensor;
+    int32_t *transpose_perm;
+    TF_Tensor *tensor;
     int64_t transpose_perm_shape[] = {4};
     #define NAME_BUFF_SIZE 256
     char name_buffer[NAME_BUFF_SIZE];
@@ -288,7 +288,7 @@  static TF_Operation* add_conv_layers(TFModel* tf_model, const float** consts, co
     op_desc = TF_NewOperation(tf_model->graph, "Const", "transpose_perm");
     TF_SetAttrType(op_desc, "dtype", TF_INT32);
     tensor = TF_AllocateTensor(TF_INT32, transpose_perm_shape, 1, 4 * sizeof(int32_t));
-    transpose_perm = (int32_t*)TF_TensorData(tensor);
+    transpose_perm = (int32_t *)TF_TensorData(tensor);
     transpose_perm[0] = 1;
     transpose_perm[1] = 2;
     transpose_perm[2] = 3;
@@ -369,13 +369,13 @@  static TF_Operation* add_conv_layers(TFModel* tf_model, const float** consts, co
     return input_op;
 }
 
-DNNModel* ff_dnn_load_default_model_tf(DNNDefaultModel model_type)
+DNNModel *ff_dnn_load_default_model_tf(DNNDefaultModel model_type)
 {
-    DNNModel* model = NULL;
-    TFModel* tf_model = NULL;
-    TF_OperationDescription* op_desc;
-    TF_Operation* op;
-    TF_Operation* const_ops_buffer[6];
+    DNNModel *model = NULL;
+    TFModel *tf_model = NULL;
+    TF_OperationDescription *op_desc;
+    TF_Operation *op;
+    TF_Operation *const_ops_buffer[6];
     TF_Output input;
     int64_t input_shape[] = {1, -1, -1, 1};
 
@@ -461,16 +461,16 @@  DNNModel* ff_dnn_load_default_model_tf(DNNDefaultModel model_type)
         CLEANUP_ON_ERROR(tf_model, model);
     }
 
-    model->model = (void*)tf_model;
+    model->model = (void *)tf_model;
     model->set_input_output = &set_input_output_tf;
 
     return model;
 }
 
-DNNReturnType ff_dnn_execute_model_tf(const DNNModel* model)
+DNNReturnType ff_dnn_execute_model_tf(const DNNModel *model)
 {
-    TFModel* tf_model = (TFModel*)model->model;
-    TF_Tensor* output_tensor;
+    TFModel *tf_model = (TFModel *)model->model;
+    TF_Tensor *output_tensor;
 
     TF_SessionRun(tf_model->session, NULL,
                   &tf_model->input, &tf_model->input_tensor, 1,
@@ -490,12 +490,12 @@  DNNReturnType ff_dnn_execute_model_tf(const DNNModel* model)
     }
 }
 
-void ff_dnn_free_model_tf(DNNModel** model)
+void ff_dnn_free_model_tf(DNNModel **model)
 {
-    TFModel* tf_model;
+    TFModel *tf_model;
 
     if (*model){
-        tf_model = (TFModel*)(*model)->model;
+        tf_model = (TFModel *)(*model)->model;
         if (tf_model->graph){
             TF_DeleteGraph(tf_model->graph);
         }
diff --git a/libavfilter/dnn_backend_tf.h b/libavfilter/dnn_backend_tf.h
index 08e4a568b3..357a82d948 100644
--- a/libavfilter/dnn_backend_tf.h
+++ b/libavfilter/dnn_backend_tf.h
@@ -29,12 +29,12 @@ 
 
 #include "dnn_interface.h"
 
-DNNModel* ff_dnn_load_model_tf(const char* model_filename);
+DNNModel *ff_dnn_load_model_tf(const char *model_filename);
 
-DNNModel* ff_dnn_load_default_model_tf(DNNDefaultModel model_type);
+DNNModel *ff_dnn_load_default_model_tf(DNNDefaultModel model_type);
 
-DNNReturnType ff_dnn_execute_model_tf(const DNNModel* model);
+DNNReturnType ff_dnn_execute_model_tf(const DNNModel *model);
 
-void ff_dnn_free_model_tf(DNNModel** model);
+void ff_dnn_free_model_tf(DNNModel **model);
 
 #endif
diff --git a/libavfilter/dnn_espcn.h b/libavfilter/dnn_espcn.h
index 315ecf031d..a0dd61cd0d 100644
--- a/libavfilter/dnn_espcn.h
+++ b/libavfilter/dnn_espcn.h
@@ -5398,7 +5398,7 @@  static const long int espcn_conv3_bias_dims[] = {
     4
 };
 
-static const float* espcn_consts[] = {
+static const float *espcn_consts[] = {
     espcn_conv1_kernel,
     espcn_conv1_bias,
     espcn_conv2_kernel,
@@ -5407,7 +5407,7 @@  static const float* espcn_consts[] = {
     espcn_conv3_bias
 };
 
-static const long int* espcn_consts_dims[] = {
+static const long int *espcn_consts_dims[] = {
     espcn_conv1_kernel_dims,
     espcn_conv1_bias_dims,
     espcn_conv2_kernel_dims,
@@ -5429,7 +5429,7 @@  static const char espcn_tanh[] = "Tanh";
 
 static const char espcn_sigmoid[] = "Sigmoid";
 
-static const char* espcn_activations[] = {
+static const char *espcn_activations[] = {
     espcn_tanh,
     espcn_tanh,
     espcn_sigmoid
diff --git a/libavfilter/dnn_interface.c b/libavfilter/dnn_interface.c
index 87c90526be..ca7d6d1ea5 100644
--- a/libavfilter/dnn_interface.c
+++ b/libavfilter/dnn_interface.c
@@ -28,9 +28,9 @@ 
 #include "dnn_backend_tf.h"
 #include "libavutil/mem.h"
 
-DNNModule* ff_get_dnn_module(DNNBackendType backend_type)
+DNNModule *ff_get_dnn_module(DNNBackendType backend_type)
 {
-    DNNModule* dnn_module;
+    DNNModule *dnn_module;
 
     dnn_module = av_malloc(sizeof(DNNModule));
     if(!dnn_module){
diff --git a/libavfilter/dnn_interface.h b/libavfilter/dnn_interface.h
index 6b820d1d5b..a69717ae62 100644
--- a/libavfilter/dnn_interface.h
+++ b/libavfilter/dnn_interface.h
@@ -33,31 +33,31 @@  typedef enum {DNN_NATIVE, DNN_TF} DNNBackendType;
 typedef enum {DNN_SRCNN, DNN_ESPCN} DNNDefaultModel;
 
 typedef struct DNNData{
-    float* data;
+    float *data;
     int width, height, channels;
 } DNNData;
 
 typedef struct DNNModel{
     // Stores model that can be different for different backends.
-    void* model;
+    void *model;
     // Sets model input and output, while allocating additional memory for intermediate calculations.
     // Should be called at least once before model execution.
-    DNNReturnType (*set_input_output)(void* model, DNNData* input, DNNData* output);
+    DNNReturnType (*set_input_output)(void *model, DNNData *input, DNNData *output);
 } DNNModel;
 
 // Stores pointers to functions for loading, executing, freeing DNN models for one of the backends.
 typedef struct DNNModule{
     // Loads model and parameters from given file. Returns NULL if it is not possible.
-    DNNModel* (*load_model)(const char* model_filename);
+    DNNModel *(*load_model)(const char *model_filename);
     // Loads one of the default models
-    DNNModel* (*load_default_model)(DNNDefaultModel model_type);
+    DNNModel *(*load_default_model)(DNNDefaultModel model_type);
     // Executes model with specified input and output. Returns DNN_ERROR otherwise.
-    DNNReturnType (*execute_model)(const DNNModel* model);
+    DNNReturnType (*execute_model)(const DNNModel *model);
     // Frees memory allocated for model.
-    void (*free_model)(DNNModel** model);
+    void (*free_model)(DNNModel **model);
 } DNNModule;
 
 // Initializes DNNModule depending on chosen backend.
-DNNModule* ff_get_dnn_module(DNNBackendType backend_type);
+DNNModule *ff_get_dnn_module(DNNBackendType backend_type);
 
 #endif
diff --git a/libavfilter/dnn_srcnn.h b/libavfilter/dnn_srcnn.h
index 7ec11654b3..26143654b8 100644
--- a/libavfilter/dnn_srcnn.h
+++ b/libavfilter/dnn_srcnn.h
@@ -2110,7 +2110,7 @@  static const long int srcnn_conv3_bias_dims[] = {
     1
 };
 
-static const float* srcnn_consts[] = {
+static const float *srcnn_consts[] = {
     srcnn_conv1_kernel,
     srcnn_conv1_bias,
     srcnn_conv2_kernel,
@@ -2119,7 +2119,7 @@  static const float* srcnn_consts[] = {
     srcnn_conv3_bias
 };
 
-static const long int* srcnn_consts_dims[] = {
+static const long int *srcnn_consts_dims[] = {
     srcnn_conv1_kernel_dims,
     srcnn_conv1_bias_dims,
     srcnn_conv2_kernel_dims,
@@ -2139,7 +2139,7 @@  static const int srcnn_consts_dims_len[] = {
 
 static const char srcnn_relu[] = "Relu";
 
-static const char* srcnn_activations[] = {
+static const char *srcnn_activations[] = {
     srcnn_relu,
     srcnn_relu,
     srcnn_relu
diff --git a/libavfilter/vf_sr.c b/libavfilter/vf_sr.c
index f3ca9a09a8..944a0e28e7 100644
--- a/libavfilter/vf_sr.c
+++ b/libavfilter/vf_sr.c
@@ -39,13 +39,13 @@  typedef struct SRContext {
     const AVClass *class;
 
     SRModel model_type;
-    char* model_filename;
+    char *model_filename;
     DNNBackendType backend_type;
-    DNNModule* dnn_module;
-    DNNModel* model;
+    DNNModule *dnn_module;
+    DNNModel *model;
     DNNData input, output;
     int scale_factor;
-    struct SwsContext* sws_context;
+    struct SwsContext *sws_context;
     int sws_slice_h;
 } SRContext;
 
@@ -67,9 +67,9 @@  static const AVOption sr_options[] = {
 
 AVFILTER_DEFINE_CLASS(sr);
 
-static av_cold int init(AVFilterContext* context)
+static av_cold int init(AVFilterContext *context)
 {
-    SRContext* sr_context = context->priv;
+    SRContext *sr_context = context->priv;
 
     sr_context->dnn_module = ff_get_dnn_module(sr_context->backend_type);
     if (!sr_context->dnn_module){
@@ -98,12 +98,12 @@  static av_cold int init(AVFilterContext* context)
     return 0;
 }
 
-static int query_formats(AVFilterContext* context)
+static int query_formats(AVFilterContext *context)
 {
     const enum AVPixelFormat pixel_formats[] = {AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P,
                                                 AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV411P, AV_PIX_FMT_GRAY8,
                                                 AV_PIX_FMT_NONE};
-    AVFilterFormats* formats_list;
+    AVFilterFormats *formats_list;
 
     formats_list = ff_make_format_list(pixel_formats);
     if (!formats_list){
@@ -113,11 +113,11 @@  static int query_formats(AVFilterContext* context)
     return ff_set_common_formats(context, formats_list);
 }
 
-static int config_props(AVFilterLink* inlink)
+static int config_props(AVFilterLink *inlink)
 {
-    AVFilterContext* context = inlink->dst;
-    SRContext* sr_context = context->priv;
-    AVFilterLink* outlink = context->outputs[0];
+    AVFilterContext *context = inlink->dst;
+    SRContext *sr_context = context->priv;
+    AVFilterLink *outlink = context->outputs[0];
     DNNReturnType result;
     int sws_src_h, sws_src_w, sws_dst_h, sws_dst_w;
 
@@ -202,18 +202,18 @@  static int config_props(AVFilterLink* inlink)
 }
 
 typedef struct ThreadData{
-    uint8_t* data;
+    uint8_t *data;
     int data_linesize, height, width;
 } ThreadData;
 
-static int uint8_to_float(AVFilterContext* context, void* arg, int jobnr, int nb_jobs)
+static int uint8_to_float(AVFilterContext *context, void *arg, int jobnr, int nb_jobs)
 {
-    SRContext* sr_context = context->priv;
-    const ThreadData* td = arg;
+    SRContext *sr_context = context->priv;
+    const ThreadData *td = arg;
     const int slice_start = (td->height *  jobnr     ) / nb_jobs;
     const int slice_end   = (td->height * (jobnr + 1)) / nb_jobs;
-    const uint8_t* src = td->data + slice_start * td->data_linesize;
-    float* dst = sr_context->input.data + slice_start * td->width;
+    const uint8_t *src = td->data + slice_start * td->data_linesize;
+    float *dst = sr_context->input.data + slice_start * td->width;
     int y, x;
 
     for (y = slice_start; y < slice_end; ++y){
@@ -227,14 +227,14 @@  static int uint8_to_float(AVFilterContext* context, void* arg, int jobnr, int nb
     return 0;
 }
 
-static int float_to_uint8(AVFilterContext* context, void* arg, int jobnr, int nb_jobs)
+static int float_to_uint8(AVFilterContext *context, void *arg, int jobnr, int nb_jobs)
 {
-    SRContext* sr_context = context->priv;
-    const ThreadData* td = arg;
+    SRContext *sr_context = context->priv;
+    const ThreadData *td = arg;
     const int slice_start = (td->height *  jobnr     ) / nb_jobs;
     const int slice_end   = (td->height * (jobnr + 1)) / nb_jobs;
-    const float* src = sr_context->output.data + slice_start * td->width;
-    uint8_t* dst = td->data + slice_start * td->data_linesize;
+    const float *src = sr_context->output.data + slice_start * td->width;
+    uint8_t *dst = td->data + slice_start * td->data_linesize;
     int y, x;
 
     for (y = slice_start; y < slice_end; ++y){
@@ -248,12 +248,12 @@  static int float_to_uint8(AVFilterContext* context, void* arg, int jobnr, int nb
     return 0;
 }
 
-static int filter_frame(AVFilterLink* inlink, AVFrame* in)
+static int filter_frame(AVFilterLink *inlink, AVFrame *in)
 {
-    AVFilterContext* context = inlink->dst;
-    SRContext* sr_context = context->priv;
-    AVFilterLink* outlink = context->outputs[0];
-    AVFrame* out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
+    AVFilterContext *context = inlink->dst;
+    SRContext *sr_context = context->priv;
+    AVFilterLink *outlink = context->outputs[0];
+    AVFrame *out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
     ThreadData td;
     int nb_threads;
     DNNReturnType dnn_result;
@@ -307,9 +307,9 @@  static int filter_frame(AVFilterLink* inlink, AVFrame* in)
     return ff_filter_frame(outlink, out);
 }
 
-static av_cold void uninit(AVFilterContext* context)
+static av_cold void uninit(AVFilterContext *context)
 {
-    SRContext* sr_context = context->priv;
+    SRContext *sr_context = context->priv;
 
     if (sr_context->dnn_module){
         (sr_context->dnn_module->free_model)(&sr_context->model);
-- 
2.14.1