diff mbox series

[FFmpeg-devel,2/3] dnn: remove type cast which is not necessary

Message ID 20210126012458.10918-1-yejun.guo@intel.com
State Accepted
Commit 06c01f1763bd2a9266659be6097d2fc07e014163
Headers show
Series [FFmpeg-devel,1/3] tests/dnn: enable unit test dense | expand

Checks

Context Check Description
andriy/x86_make success Make finished
andriy/x86_make_fate success Make fate finished
andriy/PPC64_make success Make finished
andriy/PPC64_make_fate success Make fate finished

Commit Message

Guo, Yejun Jan. 26, 2021, 1:24 a.m. UTC
Signed-off-by: Guo, Yejun <yejun.guo@intel.com>
---
 libavfilter/dnn/dnn_backend_native.c             | 10 +++++-----
 .../dnn/dnn_backend_native_layer_avgpool.c       |  2 +-
 .../dnn/dnn_backend_native_layer_conv2d.c        |  4 ++--
 libavfilter/dnn/dnn_backend_native_layer_dense.c |  2 +-
 .../dnn/dnn_backend_native_layer_depth2space.c   |  2 +-
 .../dnn/dnn_backend_native_layer_mathbinary.c    |  2 +-
 .../dnn/dnn_backend_native_layer_mathunary.c     |  2 +-
 .../dnn/dnn_backend_native_layer_maximum.c       |  2 +-
 libavfilter/dnn/dnn_backend_native_layer_pad.c   |  2 +-
 libavfilter/dnn/dnn_backend_openvino.c           | 16 ++++++++--------
 libavfilter/dnn/dnn_backend_tf.c                 | 16 ++++++++--------
 11 files changed, 30 insertions(+), 30 deletions(-)
diff mbox series

Patch

diff --git a/libavfilter/dnn/dnn_backend_native.c b/libavfilter/dnn/dnn_backend_native.c
index 1f89ee4110..87f3568cc2 100644
--- a/libavfilter/dnn/dnn_backend_native.c
+++ b/libavfilter/dnn/dnn_backend_native.c
@@ -50,7 +50,7 @@  static DNNReturnType execute_model_native(const DNNModel *model, const char *inp
 
 static DNNReturnType get_input_native(void *model, DNNData *input, const char *input_name)
 {
-    NativeModel *native_model = (NativeModel *)model;
+    NativeModel *native_model = model;
     NativeContext *ctx = &native_model->ctx;
 
     for (int i = 0; i < native_model->operands_num; ++i) {
@@ -78,7 +78,7 @@  static DNNReturnType get_output_native(void *model, const char *input_name, int
                                        const char *output_name, int *output_width, int *output_height)
 {
     DNNReturnType ret;
-    NativeModel *native_model = (NativeModel *)model;
+    NativeModel *native_model = model;
     NativeContext *ctx = &native_model->ctx;
     AVFrame *in_frame = av_frame_alloc();
     AVFrame *out_frame = NULL;
@@ -269,7 +269,7 @@  static DNNReturnType execute_model_native(const DNNModel *model, const char *inp
                                           const char **output_names, uint32_t nb_output, AVFrame *out_frame,
                                           int do_ioproc)
 {
-    NativeModel *native_model = (NativeModel *)model->model;
+    NativeModel *native_model = model->model;
     NativeContext *ctx = &native_model->ctx;
     int32_t layer;
     DNNData input, output;
@@ -382,7 +382,7 @@  static DNNReturnType execute_model_native(const DNNModel *model, const char *inp
 DNNReturnType ff_dnn_execute_model_native(const DNNModel *model, const char *input_name, AVFrame *in_frame,
                                           const char **output_names, uint32_t nb_output, AVFrame *out_frame)
 {
-    NativeModel *native_model = (NativeModel *)model->model;
+    NativeModel *native_model = model->model;
     NativeContext *ctx = &native_model->ctx;
 
     if (!in_frame) {
@@ -428,7 +428,7 @@  void ff_dnn_free_model_native(DNNModel **model)
     if (*model)
     {
         if ((*model)->model) {
-            native_model = (NativeModel *)(*model)->model;
+            native_model = (*model)->model;
             if (native_model->layers) {
                 for (layer = 0; layer < native_model->layers_num; ++layer){
                     if (native_model->layers[layer].type == DLT_CONV2D){
diff --git a/libavfilter/dnn/dnn_backend_native_layer_avgpool.c b/libavfilter/dnn/dnn_backend_native_layer_avgpool.c
index 8164bb45a6..dcfb8c816f 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_avgpool.c
+++ b/libavfilter/dnn/dnn_backend_native_layer_avgpool.c
@@ -66,7 +66,7 @@  int ff_dnn_execute_layer_avg_pool(DnnOperand *operands, const int32_t *input_ope
     int width = operands[input_operand_index].dims[2];
     int channel = operands[input_operand_index].dims[3];
     const float *input = operands[input_operand_index].data;
-    const AvgPoolParams *avgpool_params = (const AvgPoolParams *)parameters;
+    const AvgPoolParams *avgpool_params = parameters;
 
     int kernel_strides = avgpool_params->strides;
     int src_linesize = width * channel;
diff --git a/libavfilter/dnn/dnn_backend_native_layer_conv2d.c b/libavfilter/dnn/dnn_backend_native_layer_conv2d.c
index 210db7c77e..b5c2c394ef 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_conv2d.c
+++ b/libavfilter/dnn/dnn_backend_native_layer_conv2d.c
@@ -116,7 +116,7 @@  static void * dnn_execute_layer_conv2d_thread(void *threadarg)
     int width = operands[input_operand_index].dims[2];
     int channel = operands[input_operand_index].dims[3];
     const float *input = operands[input_operand_index].data;
-    const ConvolutionalParams *conv_params = (const ConvolutionalParams *)(thread_common_param->parameters);
+    const ConvolutionalParams *conv_params = thread_common_param->parameters;
 
     int radius = conv_params->kernel_size >> 1;
     int src_linesize = width * conv_params->input_num;
@@ -192,7 +192,7 @@  int ff_dnn_execute_layer_conv2d(DnnOperand *operands, const int32_t *input_opera
 #endif
     ThreadParam **thread_param = av_malloc_array(thread_num, sizeof(*thread_param));
     ThreadCommonParam thread_common_param;
-    const ConvolutionalParams *conv_params = (const ConvolutionalParams *)(parameters);
+    const ConvolutionalParams *conv_params = parameters;
     int height = operands[input_operand_indexes[0]].dims[1];
     int width = operands[input_operand_indexes[0]].dims[2];
     int pad_size = (conv_params->padding_method == VALID) ? (conv_params->kernel_size - 1) / 2 * conv_params->dilation : 0;
diff --git a/libavfilter/dnn/dnn_backend_native_layer_dense.c b/libavfilter/dnn/dnn_backend_native_layer_dense.c
index 8629b52cfb..117590d7bb 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_dense.c
+++ b/libavfilter/dnn/dnn_backend_native_layer_dense.c
@@ -92,7 +92,7 @@  int ff_dnn_execute_layer_dense(DnnOperand *operands, const int32_t *input_operan
     int width = operands[input_operand_index].dims[2];
     int channel = operands[input_operand_index].dims[3];
     const float *input = operands[input_operand_index].data;
-    const DenseParams *dense_params = (const DenseParams *)parameters;
+    const DenseParams *dense_params = parameters;
 
     int src_linesize = width * channel;
     DnnOperand *output_operand = &operands[output_operand_index];
diff --git a/libavfilter/dnn/dnn_backend_native_layer_depth2space.c b/libavfilter/dnn/dnn_backend_native_layer_depth2space.c
index 26942eb3ab..2fde6d1ba4 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_depth2space.c
+++ b/libavfilter/dnn/dnn_backend_native_layer_depth2space.c
@@ -53,7 +53,7 @@  int ff_dnn_execute_layer_depth2space(DnnOperand *operands, const int32_t *input_
                                      int32_t output_operand_index, const void *parameters, NativeContext *ctx)
 {
     float *output;
-    const DepthToSpaceParams *params = (const DepthToSpaceParams *)parameters;
+    const DepthToSpaceParams *params = parameters;
     int block_size = params->block_size;
     int32_t input_operand_index = input_operand_indexes[0];
     int number = operands[input_operand_index].dims[0];
diff --git a/libavfilter/dnn/dnn_backend_native_layer_mathbinary.c b/libavfilter/dnn/dnn_backend_native_layer_mathbinary.c
index 2a23bfaa77..c116188bab 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_mathbinary.c
+++ b/libavfilter/dnn/dnn_backend_native_layer_mathbinary.c
@@ -152,7 +152,7 @@  int ff_dnn_execute_layer_math_binary(DnnOperand *operands, const int32_t *input_
 {
     const DnnOperand *input = &operands[input_operand_indexes[0]];
     DnnOperand *output = &operands[output_operand_index];
-    const DnnLayerMathBinaryParams *params = (const DnnLayerMathBinaryParams *)parameters;
+    const DnnLayerMathBinaryParams *params = parameters;
 
     for (int i = 0; i < 4; ++i)
         output->dims[i] = input->dims[i];
diff --git a/libavfilter/dnn/dnn_backend_native_layer_mathunary.c b/libavfilter/dnn/dnn_backend_native_layer_mathunary.c
index 77e36c6ed3..1bb05d02a7 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_mathunary.c
+++ b/libavfilter/dnn/dnn_backend_native_layer_mathunary.c
@@ -57,7 +57,7 @@  int ff_dnn_execute_layer_math_unary(DnnOperand *operands, const int32_t *input_o
 {
     const DnnOperand *input = &operands[input_operand_indexes[0]];
     DnnOperand *output = &operands[output_operand_index];
-    const DnnLayerMathUnaryParams *params = (const DnnLayerMathUnaryParams *)parameters;
+    const DnnLayerMathUnaryParams *params = parameters;
     int dims_count;
     const float *src;
     float *dst;
diff --git a/libavfilter/dnn/dnn_backend_native_layer_maximum.c b/libavfilter/dnn/dnn_backend_native_layer_maximum.c
index baae889755..65af553765 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_maximum.c
+++ b/libavfilter/dnn/dnn_backend_native_layer_maximum.c
@@ -54,7 +54,7 @@  int ff_dnn_execute_layer_maximum(DnnOperand *operands, const int32_t *input_oper
 {
     const DnnOperand *input = &operands[input_operand_indexes[0]];
     DnnOperand *output = &operands[output_operand_index];
-    const DnnLayerMaximumParams *params = (const DnnLayerMaximumParams *)parameters;
+    const DnnLayerMaximumParams *params = parameters;
     int dims_count;
     const float *src;
     float *dst;
diff --git a/libavfilter/dnn/dnn_backend_native_layer_pad.c b/libavfilter/dnn/dnn_backend_native_layer_pad.c
index 8d5d47883a..a60451a8de 100644
--- a/libavfilter/dnn/dnn_backend_native_layer_pad.c
+++ b/libavfilter/dnn/dnn_backend_native_layer_pad.c
@@ -81,7 +81,7 @@  int ff_dnn_execute_layer_pad(DnnOperand *operands, const int32_t *input_operand_
     int32_t before_paddings;
     int32_t after_paddings;
     float* output;
-    const LayerPadParams *params = (const LayerPadParams *)parameters;
+    const LayerPadParams *params = parameters;
 
     // suppose format is <N, H, W, C>
     int32_t input_operand_index = input_operand_indexes[0];
diff --git a/libavfilter/dnn/dnn_backend_openvino.c b/libavfilter/dnn/dnn_backend_openvino.c
index 573344e628..9329891c3f 100644
--- a/libavfilter/dnn/dnn_backend_openvino.c
+++ b/libavfilter/dnn/dnn_backend_openvino.c
@@ -394,7 +394,7 @@  static DNNReturnType execute_model_ov(RequestItem *request)
 
 static DNNReturnType get_input_ov(void *model, DNNData *input, const char *input_name)
 {
-    OVModel *ov_model = (OVModel *)model;
+    OVModel *ov_model = model;
     OVContext *ctx = &ov_model->ctx;
     char *model_input_name = NULL;
     char *all_input_names = NULL;
@@ -446,7 +446,7 @@  static DNNReturnType get_output_ov(void *model, const char *input_name, int inpu
                                    const char *output_name, int *output_width, int *output_height)
 {
     DNNReturnType ret;
-    OVModel *ov_model = (OVModel *)model;
+    OVModel *ov_model = model;
     OVContext *ctx = &ov_model->ctx;
     TaskItem task;
     RequestItem request;
@@ -527,7 +527,7 @@  DNNModel *ff_dnn_load_model_ov(const char *model_filename, const char *options,
         av_freep(&model);
         return NULL;
     }
-    model->model = (void *)ov_model;
+    model->model = ov_model;
     ov_model->model = model;
     ov_model->ctx.class = &dnn_openvino_class;
     ctx = &ov_model->ctx;
@@ -569,7 +569,7 @@  err:
 DNNReturnType ff_dnn_execute_model_ov(const DNNModel *model, const char *input_name, AVFrame *in_frame,
                                       const char **output_names, uint32_t nb_output, AVFrame *out_frame)
 {
-    OVModel *ov_model = (OVModel *)model->model;
+    OVModel *ov_model = model->model;
     OVContext *ctx = &ov_model->ctx;
     TaskItem task;
     RequestItem request;
@@ -623,7 +623,7 @@  DNNReturnType ff_dnn_execute_model_ov(const DNNModel *model, const char *input_n
 DNNReturnType ff_dnn_execute_model_async_ov(const DNNModel *model, const char *input_name, AVFrame *in_frame,
                                             const char **output_names, uint32_t nb_output, AVFrame *out_frame)
 {
-    OVModel *ov_model = (OVModel *)model->model;
+    OVModel *ov_model = model->model;
     OVContext *ctx = &ov_model->ctx;
     RequestItem *request;
     TaskItem *task;
@@ -677,7 +677,7 @@  DNNReturnType ff_dnn_execute_model_async_ov(const DNNModel *model, const char *i
 
 DNNAsyncStatusType ff_dnn_get_async_result_ov(const DNNModel *model, AVFrame **in, AVFrame **out)
 {
-    OVModel *ov_model = (OVModel *)model->model;
+    OVModel *ov_model = model->model;
     TaskItem *task = ff_queue_peek_front(ov_model->task_queue);
 
     if (!task) {
@@ -698,7 +698,7 @@  DNNAsyncStatusType ff_dnn_get_async_result_ov(const DNNModel *model, AVFrame **i
 
 DNNReturnType ff_dnn_flush_ov(const DNNModel *model)
 {
-    OVModel *ov_model = (OVModel *)model->model;
+    OVModel *ov_model = model->model;
     OVContext *ctx = &ov_model->ctx;
     RequestItem *request;
     IEStatusCode status;
@@ -741,7 +741,7 @@  DNNReturnType ff_dnn_flush_ov(const DNNModel *model)
 void ff_dnn_free_model_ov(DNNModel **model)
 {
     if (*model){
-        OVModel *ov_model = (OVModel *)(*model)->model;
+        OVModel *ov_model = (*model)->model;
         while (ff_safe_queue_size(ov_model->request_queue) != 0) {
             RequestItem *item = ff_safe_queue_pop_front(ov_model->request_queue);
             if (item && item->infer_request) {
diff --git a/libavfilter/dnn/dnn_backend_tf.c b/libavfilter/dnn/dnn_backend_tf.c
index f77f3dd817..71a2a308b5 100644
--- a/libavfilter/dnn/dnn_backend_tf.c
+++ b/libavfilter/dnn/dnn_backend_tf.c
@@ -97,7 +97,7 @@  static TF_Buffer *read_graph(const char *model_filename)
     }
 
     graph_buf = TF_NewBuffer();
-    graph_buf->data = (void *)graph_data;
+    graph_buf->data = graph_data;
     graph_buf->length = size;
     graph_buf->data_deallocator = free_buffer;
 
@@ -128,7 +128,7 @@  static TF_Tensor *allocate_input_tensor(const DNNData *input)
 
 static DNNReturnType get_input_tf(void *model, DNNData *input, const char *input_name)
 {
-    TFModel *tf_model = (TFModel *)model;
+    TFModel *tf_model = model;
     TFContext *ctx = &tf_model->ctx;
     TF_Status *status;
     int64_t dims[4];
@@ -165,7 +165,7 @@  static DNNReturnType get_output_tf(void *model, const char *input_name, int inpu
                                    const char *output_name, int *output_width, int *output_height)
 {
     DNNReturnType ret;
-    TFModel *tf_model = (TFModel *)model;
+    TFModel *tf_model = model;
     TFContext *ctx = &tf_model->ctx;
     AVFrame *in_frame = av_frame_alloc();
     AVFrame *out_frame = NULL;
@@ -586,7 +586,7 @@  static DNNReturnType load_native_model(TFModel *tf_model, const char *model_file
         return DNN_ERROR;
     }
 
-    native_model = (NativeModel *)model->model;
+    native_model = model->model;
     tf_model->graph = TF_NewGraph();
     tf_model->status = TF_NewStatus();
 
@@ -700,7 +700,7 @@  DNNModel *ff_dnn_load_model_tf(const char *model_filename, const char *options,
         }
     }
 
-    model->model = (void *)tf_model;
+    model->model = tf_model;
     model->get_input = &get_input_tf;
     model->get_output = &get_output_tf;
     model->options = options;
@@ -714,7 +714,7 @@  static DNNReturnType execute_model_tf(const DNNModel *model, const char *input_n
                                       int do_ioproc)
 {
     TF_Output *tf_outputs;
-    TFModel *tf_model = (TFModel *)model->model;
+    TFModel *tf_model = model->model;
     TFContext *ctx = &tf_model->ctx;
     DNNData input, output;
     TF_Tensor **output_tensors;
@@ -822,7 +822,7 @@  static DNNReturnType execute_model_tf(const DNNModel *model, const char *input_n
 DNNReturnType ff_dnn_execute_model_tf(const DNNModel *model, const char *input_name, AVFrame *in_frame,
                                       const char **output_names, uint32_t nb_output, AVFrame *out_frame)
 {
-    TFModel *tf_model = (TFModel *)model->model;
+    TFModel *tf_model = model->model;
     TFContext *ctx = &tf_model->ctx;
 
     if (!in_frame) {
@@ -843,7 +843,7 @@  void ff_dnn_free_model_tf(DNNModel **model)
     TFModel *tf_model;
 
     if (*model){
-        tf_model = (TFModel *)(*model)->model;
+        tf_model = (*model)->model;
         if (tf_model->graph){
             TF_DeleteGraph(tf_model->graph);
         }