diff mbox

[FFmpeg-devel,1/4] libavfilter/dnn: separate conv2d layer from dnn_backend_native.c to a new file

Message ID 1567663228-26315-1-git-send-email-yejun.guo@intel.com
State Accepted
Commit 5f058dd693c4bebcd6a293da4630441f3540902f
Headers show

Commit Message

Guo, Yejun Sept. 5, 2019, 6 a.m. UTC
the logic is that one layer in one separated source file to make
the source files simple for maintaining.

Signed-off-by: Guo, Yejun <yejun.guo@intel.com>
---
 libavfilter/dnn/Makefile                          |   1 +
 libavfilter/dnn/dnn_backend_native.c              |  80 +----------------
 libavfilter/dnn/dnn_backend_native.h              |  13 ---
 libavfilter/dnn/dnn_backend_native_layer_conv2d.c | 101 ++++++++++++++++++++++
 libavfilter/dnn/dnn_backend_native_layer_conv2d.h |  39 +++++++++
 libavfilter/dnn/dnn_backend_tf.c                  |   1 +
 6 files changed, 143 insertions(+), 92 deletions(-)
 create mode 100644 libavfilter/dnn/dnn_backend_native_layer_conv2d.c
 create mode 100644 libavfilter/dnn/dnn_backend_native_layer_conv2d.h

Comments

Pedro Arthur Sept. 19, 2019, 2:38 p.m. UTC | #1
Em qui, 5 de set de 2019 às 03:05, Guo, Yejun <yejun.guo@intel.com> escreveu:
>
> the logic is that one layer in one separated source file to make
> the source files simple for maintaining.
>
> Signed-off-by: Guo, Yejun <yejun.guo@intel.com>
> ---
>  libavfilter/dnn/Makefile                          |   1 +
>  libavfilter/dnn/dnn_backend_native.c              |  80 +----------------
>  libavfilter/dnn/dnn_backend_native.h              |  13 ---
>  libavfilter/dnn/dnn_backend_native_layer_conv2d.c | 101 ++++++++++++++++++++++
>  libavfilter/dnn/dnn_backend_native_layer_conv2d.h |  39 +++++++++
>  libavfilter/dnn/dnn_backend_tf.c                  |   1 +
>  6 files changed, 143 insertions(+), 92 deletions(-)
>  create mode 100644 libavfilter/dnn/dnn_backend_native_layer_conv2d.c
>  create mode 100644 libavfilter/dnn/dnn_backend_native_layer_conv2d.h
>
> diff --git a/libavfilter/dnn/Makefile b/libavfilter/dnn/Makefile
> index 83938e5..40b848b 100644
> --- a/libavfilter/dnn/Makefile
> +++ b/libavfilter/dnn/Makefile
> @@ -1,6 +1,7 @@
>  OBJS-$(CONFIG_DNN)                           += dnn/dnn_interface.o
>  OBJS-$(CONFIG_DNN)                           += dnn/dnn_backend_native.o
>  OBJS-$(CONFIG_DNN)                           += dnn/dnn_backend_native_layer_pad.o
> +OBJS-$(CONFIG_DNN)                           += dnn/dnn_backend_native_layer_conv2d.o
>
>  DNN-OBJS-$(CONFIG_LIBTENSORFLOW)             += dnn/dnn_backend_tf.o
>
> diff --git a/libavfilter/dnn/dnn_backend_native.c b/libavfilter/dnn/dnn_backend_native.c
> index f56cd81..5dabd15 100644
> --- a/libavfilter/dnn/dnn_backend_native.c
> +++ b/libavfilter/dnn/dnn_backend_native.c
> @@ -26,6 +26,7 @@
>  #include "dnn_backend_native.h"
>  #include "libavutil/avassert.h"
>  #include "dnn_backend_native_layer_pad.h"
> +#include "dnn_backend_native_layer_conv2d.h"
>
>  static DNNReturnType set_input_output_native(void *model, DNNInputData *input, const char *input_name, const char **output_names, uint32_t nb_output)
>  {
> @@ -281,85 +282,6 @@ DNNModel *ff_dnn_load_model_native(const char *model_filename)
>      return model;
>  }
>
> -#define CLAMP_TO_EDGE(x, w) ((x) < 0 ? 0 : ((x) >= (w) ? (w - 1) : (x)))
> -
> -static int convolve(DnnOperand *operands, const int32_t *input_operand_indexes, int32_t output_operand_index, const ConvolutionalParams *conv_params)
> -{
> -    float *output;
> -    int32_t input_operand_index = input_operand_indexes[0];
> -    int number = operands[input_operand_index].dims[0];
> -    int height = operands[input_operand_index].dims[1];
> -    int width = operands[input_operand_index].dims[2];
> -    int channel = operands[input_operand_index].dims[3];
> -    const float *input = operands[input_operand_index].data;
> -
> -    int radius = conv_params->kernel_size >> 1;
> -    int src_linesize = width * conv_params->input_num;
> -    int filter_linesize = conv_params->kernel_size * conv_params->input_num;
> -    int filter_size = conv_params->kernel_size * filter_linesize;
> -    int pad_size = (conv_params->padding_method == VALID) ? (conv_params->kernel_size - 1) / 2 * conv_params->dilation : 0;
> -
> -    DnnOperand *output_operand = &operands[output_operand_index];
> -    output_operand->dims[0] = number;
> -    output_operand->dims[1] = height - pad_size * 2;
> -    output_operand->dims[2] = width - pad_size * 2;
> -    output_operand->dims[3] = conv_params->output_num;
> -    output_operand->length = calculate_operand_data_length(output_operand);
> -    output_operand->data = av_realloc(output_operand->data, output_operand->length);
> -    if (!output_operand->data)
> -        return -1;
> -    output = output_operand->data;
> -
> -    av_assert0(channel == conv_params->input_num);
> -
> -    for (int y = pad_size; y < height - pad_size; ++y) {
> -        for (int x = pad_size; x < width - pad_size; ++x) {
> -            for (int n_filter = 0; n_filter < conv_params->output_num; ++n_filter) {
> -                output[n_filter] = conv_params->biases[n_filter];
> -
> -                for (int ch = 0; ch < conv_params->input_num; ++ch) {
> -                    for (int kernel_y = 0; kernel_y < conv_params->kernel_size; ++kernel_y) {
> -                        for (int kernel_x = 0; kernel_x < conv_params->kernel_size; ++kernel_x) {
> -                            float input_pel;
> -                            if (conv_params->padding_method == SAME_CLAMP_TO_EDGE) {
> -                                int y_pos = CLAMP_TO_EDGE(y + (kernel_y - radius) * conv_params->dilation, height);
> -                                int x_pos = CLAMP_TO_EDGE(x + (kernel_x - radius) * conv_params->dilation, width);
> -                                input_pel = input[y_pos * src_linesize + x_pos * conv_params->input_num + ch];
> -                            } else {
> -                                int y_pos = y + (kernel_y - radius) * conv_params->dilation;
> -                                int x_pos = x + (kernel_x - radius) * conv_params->dilation;
> -                                input_pel = (x_pos < 0 || x_pos >= width || y_pos < 0 || y_pos >= height) ? 0.0 :
> -                                                   input[y_pos * src_linesize + x_pos * conv_params->input_num + ch];
> -                            }
> -
> -
> -                            output[n_filter] += input_pel * conv_params->kernel[n_filter * filter_size + kernel_y * filter_linesize +
> -                                                                                kernel_x * conv_params->input_num + ch];
> -                        }
> -                    }
> -                }
> -                switch (conv_params->activation){
> -                case RELU:
> -                    output[n_filter] = FFMAX(output[n_filter], 0.0);
> -                    break;
> -                case TANH:
> -                    output[n_filter] = 2.0f  / (1.0f + exp(-2.0f * output[n_filter])) - 1.0f;
> -                    break;
> -                case SIGMOID:
> -                    output[n_filter] = 1.0f / (1.0f + exp(-output[n_filter]));
> -                    break;
> -                case NONE:
> -                    break;
> -                case LEAKY_RELU:
> -                    output[n_filter] = FFMAX(output[n_filter], 0.0) + 0.2 * FFMIN(output[n_filter], 0.0);
> -                }
> -            }
> -            output += conv_params->output_num;
> -        }
> -    }
> -    return 0;
> -}
> -
>  static int depth_to_space(DnnOperand *operands, const int32_t *input_operand_indexes, int32_t output_operand_index, int block_size)
>  {
>      float *output;
> diff --git a/libavfilter/dnn/dnn_backend_native.h b/libavfilter/dnn/dnn_backend_native.h
> index 08e7d15..aa52222 100644
> --- a/libavfilter/dnn/dnn_backend_native.h
> +++ b/libavfilter/dnn/dnn_backend_native.h
> @@ -32,10 +32,6 @@
>
>  typedef enum {INPUT, CONV, DEPTH_TO_SPACE, MIRROR_PAD} DNNLayerType;
>
> -typedef enum {RELU, TANH, SIGMOID, NONE, LEAKY_RELU} DNNActivationFunc;
> -
> -typedef enum {VALID, SAME, SAME_CLAMP_TO_EDGE} DNNConvPaddingParam;
> -
>  typedef enum {DOT_INPUT = 1, DOT_OUTPUT = 2, DOT_INTERMEDIATE = DOT_INPUT | DOT_INPUT} DNNOperandType;
>
>  typedef struct Layer{
> @@ -90,15 +86,6 @@ typedef struct DnnOperand{
>      int32_t usedNumbersLeft;
>  }DnnOperand;
>
> -typedef struct ConvolutionalParams{
> -    int32_t input_num, output_num, kernel_size;
> -    DNNActivationFunc activation;
> -    DNNConvPaddingParam padding_method;
> -    int32_t dilation;
> -    float *kernel;
> -    float *biases;
> -} ConvolutionalParams;
> -
>  typedef struct InputParams{
>      int height, width, channels;
>  } InputParams;
> diff --git a/libavfilter/dnn/dnn_backend_native_layer_conv2d.c b/libavfilter/dnn/dnn_backend_native_layer_conv2d.c
> new file mode 100644
> index 0000000..b13b431
> --- /dev/null
> +++ b/libavfilter/dnn/dnn_backend_native_layer_conv2d.c
> @@ -0,0 +1,101 @@
> +/*
> + * Copyright (c) 2018 Sergey Lavrushkin
> + *
> + * This file is part of FFmpeg.
> + *
> + * FFmpeg is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU Lesser General Public
> + * License as published by the Free Software Foundation; either
> + * version 2.1 of the License, or (at your option) any later version.
> + *
> + * FFmpeg is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> + * Lesser General Public License for more details.
> + *
> + * You should have received a copy of the GNU Lesser General Public
> + * License along with FFmpeg; if not, write to the Free Software
> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
> + */
> +
> +#include "libavutil/avassert.h"
> +#include "dnn_backend_native_layer_conv2d.h"
> +
> +#define CLAMP_TO_EDGE(x, w) ((x) < 0 ? 0 : ((x) >= (w) ? (w - 1) : (x)))
> +
> +int convolve(DnnOperand *operands, const int32_t *input_operand_indexes, int32_t output_operand_index, const ConvolutionalParams *conv_params)
> +{
> +    float *output;
> +    int32_t input_operand_index = input_operand_indexes[0];
> +    int number = operands[input_operand_index].dims[0];
> +    int height = operands[input_operand_index].dims[1];
> +    int width = operands[input_operand_index].dims[2];
> +    int channel = operands[input_operand_index].dims[3];
> +    const float *input = operands[input_operand_index].data;
> +
> +    int radius = conv_params->kernel_size >> 1;
> +    int src_linesize = width * conv_params->input_num;
> +    int filter_linesize = conv_params->kernel_size * conv_params->input_num;
> +    int filter_size = conv_params->kernel_size * filter_linesize;
> +    int pad_size = (conv_params->padding_method == VALID) ? (conv_params->kernel_size - 1) / 2 * conv_params->dilation : 0;
> +
> +    DnnOperand *output_operand = &operands[output_operand_index];
> +    output_operand->dims[0] = number;
> +    output_operand->dims[1] = height - pad_size * 2;
> +    output_operand->dims[2] = width - pad_size * 2;
> +    output_operand->dims[3] = conv_params->output_num;
> +    output_operand->length = calculate_operand_data_length(output_operand);
> +    output_operand->data = av_realloc(output_operand->data, output_operand->length);
> +    if (!output_operand->data)
> +        return -1;
> +    output = output_operand->data;
> +
> +    av_assert0(channel == conv_params->input_num);
> +
> +    for (int y = pad_size; y < height - pad_size; ++y) {
> +        for (int x = pad_size; x < width - pad_size; ++x) {
> +            for (int n_filter = 0; n_filter < conv_params->output_num; ++n_filter) {
> +                output[n_filter] = conv_params->biases[n_filter];
> +
> +                for (int ch = 0; ch < conv_params->input_num; ++ch) {
> +                    for (int kernel_y = 0; kernel_y < conv_params->kernel_size; ++kernel_y) {
> +                        for (int kernel_x = 0; kernel_x < conv_params->kernel_size; ++kernel_x) {
> +                            float input_pel;
> +                            if (conv_params->padding_method == SAME_CLAMP_TO_EDGE) {
> +                                int y_pos = CLAMP_TO_EDGE(y + (kernel_y - radius) * conv_params->dilation, height);
> +                                int x_pos = CLAMP_TO_EDGE(x + (kernel_x - radius) * conv_params->dilation, width);
> +                                input_pel = input[y_pos * src_linesize + x_pos * conv_params->input_num + ch];
> +                            } else {
> +                                int y_pos = y + (kernel_y - radius) * conv_params->dilation;
> +                                int x_pos = x + (kernel_x - radius) * conv_params->dilation;
> +                                input_pel = (x_pos < 0 || x_pos >= width || y_pos < 0 || y_pos >= height) ? 0.0 :
> +                                                   input[y_pos * src_linesize + x_pos * conv_params->input_num + ch];
> +                            }
> +
> +
> +                            output[n_filter] += input_pel * conv_params->kernel[n_filter * filter_size + kernel_y * filter_linesize +
> +                                                                                kernel_x * conv_params->input_num + ch];
> +                        }
> +                    }
> +                }
> +                switch (conv_params->activation){
> +                case RELU:
> +                    output[n_filter] = FFMAX(output[n_filter], 0.0);
> +                    break;
> +                case TANH:
> +                    output[n_filter] = 2.0f  / (1.0f + exp(-2.0f * output[n_filter])) - 1.0f;
> +                    break;
> +                case SIGMOID:
> +                    output[n_filter] = 1.0f / (1.0f + exp(-output[n_filter]));
> +                    break;
> +                case NONE:
> +                    break;
> +                case LEAKY_RELU:
> +                    output[n_filter] = FFMAX(output[n_filter], 0.0) + 0.2 * FFMIN(output[n_filter], 0.0);
> +                }
> +            }
> +            output += conv_params->output_num;
> +        }
> +    }
> +    return 0;
> +}
> diff --git a/libavfilter/dnn/dnn_backend_native_layer_conv2d.h b/libavfilter/dnn/dnn_backend_native_layer_conv2d.h
> new file mode 100644
> index 0000000..7ddfff3
> --- /dev/null
> +++ b/libavfilter/dnn/dnn_backend_native_layer_conv2d.h
> @@ -0,0 +1,39 @@
> +/*
> + * Copyright (c) 2018 Sergey Lavrushkin
> + *
> + * This file is part of FFmpeg.
> + *
> + * FFmpeg is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU Lesser General Public
> + * License as published by the Free Software Foundation; either
> + * version 2.1 of the License, or (at your option) any later version.
> + *
> + * FFmpeg is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> + * Lesser General Public License for more details.
> + *
> + * You should have received a copy of the GNU Lesser General Public
> + * License along with FFmpeg; if not, write to the Free Software
> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
> + */
> +
> +#ifndef AVFILTER_DNN_DNN_BACKEND_NATIVE_LAYER_CONV2D_H
> +#define AVFILTER_DNN_DNN_BACKEND_NATIVE_LAYER_CONV2D_H
> +
> +#include "dnn_backend_native.h"
> +
> +typedef enum {RELU, TANH, SIGMOID, NONE, LEAKY_RELU} DNNActivationFunc;
> +typedef enum {VALID, SAME, SAME_CLAMP_TO_EDGE} DNNConvPaddingParam;
> +
> +typedef struct ConvolutionalParams{
> +    int32_t input_num, output_num, kernel_size;
> +    DNNActivationFunc activation;
> +    DNNConvPaddingParam padding_method;
> +    int32_t dilation;
> +    float *kernel;
> +    float *biases;
> +} ConvolutionalParams;
> +
> +int convolve(DnnOperand *operands, const int32_t *input_operand_indexes, int32_t output_operand_index, const ConvolutionalParams *conv_params);
> +#endif
> diff --git a/libavfilter/dnn/dnn_backend_tf.c b/libavfilter/dnn/dnn_backend_tf.c
> index 626fba9..46dfa00 100644
> --- a/libavfilter/dnn/dnn_backend_tf.c
> +++ b/libavfilter/dnn/dnn_backend_tf.c
> @@ -25,6 +25,7 @@
>
>  #include "dnn_backend_tf.h"
>  #include "dnn_backend_native.h"
> +#include "dnn_backend_native_layer_conv2d.h"
>  #include "libavformat/avio.h"
>  #include "libavutil/avassert.h"
>  #include "dnn_backend_native_layer_pad.h"
> --
> 2.7.4
>

LGTM

Pushed, thanks!

> _______________________________________________
> ffmpeg-devel mailing list
> ffmpeg-devel@ffmpeg.org
> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>
> To unsubscribe, visit link above, or email
> ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
diff mbox

Patch

diff --git a/libavfilter/dnn/Makefile b/libavfilter/dnn/Makefile
index 83938e5..40b848b 100644
--- a/libavfilter/dnn/Makefile
+++ b/libavfilter/dnn/Makefile
@@ -1,6 +1,7 @@ 
 OBJS-$(CONFIG_DNN)                           += dnn/dnn_interface.o
 OBJS-$(CONFIG_DNN)                           += dnn/dnn_backend_native.o
 OBJS-$(CONFIG_DNN)                           += dnn/dnn_backend_native_layer_pad.o
+OBJS-$(CONFIG_DNN)                           += dnn/dnn_backend_native_layer_conv2d.o
 
 DNN-OBJS-$(CONFIG_LIBTENSORFLOW)             += dnn/dnn_backend_tf.o
 
diff --git a/libavfilter/dnn/dnn_backend_native.c b/libavfilter/dnn/dnn_backend_native.c
index f56cd81..5dabd15 100644
--- a/libavfilter/dnn/dnn_backend_native.c
+++ b/libavfilter/dnn/dnn_backend_native.c
@@ -26,6 +26,7 @@ 
 #include "dnn_backend_native.h"
 #include "libavutil/avassert.h"
 #include "dnn_backend_native_layer_pad.h"
+#include "dnn_backend_native_layer_conv2d.h"
 
 static DNNReturnType set_input_output_native(void *model, DNNInputData *input, const char *input_name, const char **output_names, uint32_t nb_output)
 {
@@ -281,85 +282,6 @@  DNNModel *ff_dnn_load_model_native(const char *model_filename)
     return model;
 }
 
-#define CLAMP_TO_EDGE(x, w) ((x) < 0 ? 0 : ((x) >= (w) ? (w - 1) : (x)))
-
-static int convolve(DnnOperand *operands, const int32_t *input_operand_indexes, int32_t output_operand_index, const ConvolutionalParams *conv_params)
-{
-    float *output;
-    int32_t input_operand_index = input_operand_indexes[0];
-    int number = operands[input_operand_index].dims[0];
-    int height = operands[input_operand_index].dims[1];
-    int width = operands[input_operand_index].dims[2];
-    int channel = operands[input_operand_index].dims[3];
-    const float *input = operands[input_operand_index].data;
-
-    int radius = conv_params->kernel_size >> 1;
-    int src_linesize = width * conv_params->input_num;
-    int filter_linesize = conv_params->kernel_size * conv_params->input_num;
-    int filter_size = conv_params->kernel_size * filter_linesize;
-    int pad_size = (conv_params->padding_method == VALID) ? (conv_params->kernel_size - 1) / 2 * conv_params->dilation : 0;
-
-    DnnOperand *output_operand = &operands[output_operand_index];
-    output_operand->dims[0] = number;
-    output_operand->dims[1] = height - pad_size * 2;
-    output_operand->dims[2] = width - pad_size * 2;
-    output_operand->dims[3] = conv_params->output_num;
-    output_operand->length = calculate_operand_data_length(output_operand);
-    output_operand->data = av_realloc(output_operand->data, output_operand->length);
-    if (!output_operand->data)
-        return -1;
-    output = output_operand->data;
-
-    av_assert0(channel == conv_params->input_num);
-
-    for (int y = pad_size; y < height - pad_size; ++y) {
-        for (int x = pad_size; x < width - pad_size; ++x) {
-            for (int n_filter = 0; n_filter < conv_params->output_num; ++n_filter) {
-                output[n_filter] = conv_params->biases[n_filter];
-
-                for (int ch = 0; ch < conv_params->input_num; ++ch) {
-                    for (int kernel_y = 0; kernel_y < conv_params->kernel_size; ++kernel_y) {
-                        for (int kernel_x = 0; kernel_x < conv_params->kernel_size; ++kernel_x) {
-                            float input_pel;
-                            if (conv_params->padding_method == SAME_CLAMP_TO_EDGE) {
-                                int y_pos = CLAMP_TO_EDGE(y + (kernel_y - radius) * conv_params->dilation, height);
-                                int x_pos = CLAMP_TO_EDGE(x + (kernel_x - radius) * conv_params->dilation, width);
-                                input_pel = input[y_pos * src_linesize + x_pos * conv_params->input_num + ch];
-                            } else {
-                                int y_pos = y + (kernel_y - radius) * conv_params->dilation;
-                                int x_pos = x + (kernel_x - radius) * conv_params->dilation;
-                                input_pel = (x_pos < 0 || x_pos >= width || y_pos < 0 || y_pos >= height) ? 0.0 :
-                                                   input[y_pos * src_linesize + x_pos * conv_params->input_num + ch];
-                            }
-
-
-                            output[n_filter] += input_pel * conv_params->kernel[n_filter * filter_size + kernel_y * filter_linesize +
-                                                                                kernel_x * conv_params->input_num + ch];
-                        }
-                    }
-                }
-                switch (conv_params->activation){
-                case RELU:
-                    output[n_filter] = FFMAX(output[n_filter], 0.0);
-                    break;
-                case TANH:
-                    output[n_filter] = 2.0f  / (1.0f + exp(-2.0f * output[n_filter])) - 1.0f;
-                    break;
-                case SIGMOID:
-                    output[n_filter] = 1.0f / (1.0f + exp(-output[n_filter]));
-                    break;
-                case NONE:
-                    break;
-                case LEAKY_RELU:
-                    output[n_filter] = FFMAX(output[n_filter], 0.0) + 0.2 * FFMIN(output[n_filter], 0.0);
-                }
-            }
-            output += conv_params->output_num;
-        }
-    }
-    return 0;
-}
-
 static int depth_to_space(DnnOperand *operands, const int32_t *input_operand_indexes, int32_t output_operand_index, int block_size)
 {
     float *output;
diff --git a/libavfilter/dnn/dnn_backend_native.h b/libavfilter/dnn/dnn_backend_native.h
index 08e7d15..aa52222 100644
--- a/libavfilter/dnn/dnn_backend_native.h
+++ b/libavfilter/dnn/dnn_backend_native.h
@@ -32,10 +32,6 @@ 
 
 typedef enum {INPUT, CONV, DEPTH_TO_SPACE, MIRROR_PAD} DNNLayerType;
 
-typedef enum {RELU, TANH, SIGMOID, NONE, LEAKY_RELU} DNNActivationFunc;
-
-typedef enum {VALID, SAME, SAME_CLAMP_TO_EDGE} DNNConvPaddingParam;
-
 typedef enum {DOT_INPUT = 1, DOT_OUTPUT = 2, DOT_INTERMEDIATE = DOT_INPUT | DOT_INPUT} DNNOperandType;
 
 typedef struct Layer{
@@ -90,15 +86,6 @@  typedef struct DnnOperand{
     int32_t usedNumbersLeft;
 }DnnOperand;
 
-typedef struct ConvolutionalParams{
-    int32_t input_num, output_num, kernel_size;
-    DNNActivationFunc activation;
-    DNNConvPaddingParam padding_method;
-    int32_t dilation;
-    float *kernel;
-    float *biases;
-} ConvolutionalParams;
-
 typedef struct InputParams{
     int height, width, channels;
 } InputParams;
diff --git a/libavfilter/dnn/dnn_backend_native_layer_conv2d.c b/libavfilter/dnn/dnn_backend_native_layer_conv2d.c
new file mode 100644
index 0000000..b13b431
--- /dev/null
+++ b/libavfilter/dnn/dnn_backend_native_layer_conv2d.c
@@ -0,0 +1,101 @@ 
+/*
+ * Copyright (c) 2018 Sergey Lavrushkin
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avassert.h"
+#include "dnn_backend_native_layer_conv2d.h"
+
+#define CLAMP_TO_EDGE(x, w) ((x) < 0 ? 0 : ((x) >= (w) ? (w - 1) : (x)))
+
+int convolve(DnnOperand *operands, const int32_t *input_operand_indexes, int32_t output_operand_index, const ConvolutionalParams *conv_params)
+{
+    float *output;
+    int32_t input_operand_index = input_operand_indexes[0];
+    int number = operands[input_operand_index].dims[0];
+    int height = operands[input_operand_index].dims[1];
+    int width = operands[input_operand_index].dims[2];
+    int channel = operands[input_operand_index].dims[3];
+    const float *input = operands[input_operand_index].data;
+
+    int radius = conv_params->kernel_size >> 1;
+    int src_linesize = width * conv_params->input_num;
+    int filter_linesize = conv_params->kernel_size * conv_params->input_num;
+    int filter_size = conv_params->kernel_size * filter_linesize;
+    int pad_size = (conv_params->padding_method == VALID) ? (conv_params->kernel_size - 1) / 2 * conv_params->dilation : 0;
+
+    DnnOperand *output_operand = &operands[output_operand_index];
+    output_operand->dims[0] = number;
+    output_operand->dims[1] = height - pad_size * 2;
+    output_operand->dims[2] = width - pad_size * 2;
+    output_operand->dims[3] = conv_params->output_num;
+    output_operand->length = calculate_operand_data_length(output_operand);
+    output_operand->data = av_realloc(output_operand->data, output_operand->length);
+    if (!output_operand->data)
+        return -1;
+    output = output_operand->data;
+
+    av_assert0(channel == conv_params->input_num);
+
+    for (int y = pad_size; y < height - pad_size; ++y) {
+        for (int x = pad_size; x < width - pad_size; ++x) {
+            for (int n_filter = 0; n_filter < conv_params->output_num; ++n_filter) {
+                output[n_filter] = conv_params->biases[n_filter];
+
+                for (int ch = 0; ch < conv_params->input_num; ++ch) {
+                    for (int kernel_y = 0; kernel_y < conv_params->kernel_size; ++kernel_y) {
+                        for (int kernel_x = 0; kernel_x < conv_params->kernel_size; ++kernel_x) {
+                            float input_pel;
+                            if (conv_params->padding_method == SAME_CLAMP_TO_EDGE) {
+                                int y_pos = CLAMP_TO_EDGE(y + (kernel_y - radius) * conv_params->dilation, height);
+                                int x_pos = CLAMP_TO_EDGE(x + (kernel_x - radius) * conv_params->dilation, width);
+                                input_pel = input[y_pos * src_linesize + x_pos * conv_params->input_num + ch];
+                            } else {
+                                int y_pos = y + (kernel_y - radius) * conv_params->dilation;
+                                int x_pos = x + (kernel_x - radius) * conv_params->dilation;
+                                input_pel = (x_pos < 0 || x_pos >= width || y_pos < 0 || y_pos >= height) ? 0.0 :
+                                                   input[y_pos * src_linesize + x_pos * conv_params->input_num + ch];
+                            }
+
+
+                            output[n_filter] += input_pel * conv_params->kernel[n_filter * filter_size + kernel_y * filter_linesize +
+                                                                                kernel_x * conv_params->input_num + ch];
+                        }
+                    }
+                }
+                switch (conv_params->activation){
+                case RELU:
+                    output[n_filter] = FFMAX(output[n_filter], 0.0);
+                    break;
+                case TANH:
+                    output[n_filter] = 2.0f  / (1.0f + exp(-2.0f * output[n_filter])) - 1.0f;
+                    break;
+                case SIGMOID:
+                    output[n_filter] = 1.0f / (1.0f + exp(-output[n_filter]));
+                    break;
+                case NONE:
+                    break;
+                case LEAKY_RELU:
+                    output[n_filter] = FFMAX(output[n_filter], 0.0) + 0.2 * FFMIN(output[n_filter], 0.0);
+                }
+            }
+            output += conv_params->output_num;
+        }
+    }
+    return 0;
+}
diff --git a/libavfilter/dnn/dnn_backend_native_layer_conv2d.h b/libavfilter/dnn/dnn_backend_native_layer_conv2d.h
new file mode 100644
index 0000000..7ddfff3
--- /dev/null
+++ b/libavfilter/dnn/dnn_backend_native_layer_conv2d.h
@@ -0,0 +1,39 @@ 
+/*
+ * Copyright (c) 2018 Sergey Lavrushkin
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVFILTER_DNN_DNN_BACKEND_NATIVE_LAYER_CONV2D_H
+#define AVFILTER_DNN_DNN_BACKEND_NATIVE_LAYER_CONV2D_H
+
+#include "dnn_backend_native.h"
+
+typedef enum {RELU, TANH, SIGMOID, NONE, LEAKY_RELU} DNNActivationFunc;
+typedef enum {VALID, SAME, SAME_CLAMP_TO_EDGE} DNNConvPaddingParam;
+
+typedef struct ConvolutionalParams{
+    int32_t input_num, output_num, kernel_size;
+    DNNActivationFunc activation;
+    DNNConvPaddingParam padding_method;
+    int32_t dilation;
+    float *kernel;
+    float *biases;
+} ConvolutionalParams;
+
+int convolve(DnnOperand *operands, const int32_t *input_operand_indexes, int32_t output_operand_index, const ConvolutionalParams *conv_params);
+#endif
diff --git a/libavfilter/dnn/dnn_backend_tf.c b/libavfilter/dnn/dnn_backend_tf.c
index 626fba9..46dfa00 100644
--- a/libavfilter/dnn/dnn_backend_tf.c
+++ b/libavfilter/dnn/dnn_backend_tf.c
@@ -25,6 +25,7 @@ 
 
 #include "dnn_backend_tf.h"
 #include "dnn_backend_native.h"
+#include "dnn_backend_native_layer_conv2d.h"
 #include "libavformat/avio.h"
 #include "libavutil/avassert.h"
 #include "dnn_backend_native_layer_pad.h"