Message ID | 20230920022609.3393635-3-wenbin.chen@intel.com |
---|---|
State | New |
Headers | show |
Series | [FFmpeg-devel,1/3] libavfilter/dnn: add layout option to openvino backend | expand |
Context | Check | Description |
---|---|---|
andriy/make_x86 | success | Make finished |
andriy/make_fate_x86 | success | Make fate finished |
> On Sep 20, 2023, at 10:26, wenbin.chen-at-intel.com@ffmpeg.org wrote: > > From: Wenbin Chen <wenbin.chen@intel.com> > > Signed-off-by: Wenbin Chen <wenbin.chen@intel.com> > --- > libavfilter/dnn/dnn_backend_tf.c | 3 ++- > 1 file changed, 2 insertions(+), 1 deletion(-) > > diff --git a/libavfilter/dnn/dnn_backend_tf.c b/libavfilter/dnn/dnn_backend_tf.c > index b521de7fbe..e1e8cef0d2 100644 > --- a/libavfilter/dnn/dnn_backend_tf.c > +++ b/libavfilter/dnn/dnn_backend_tf.c > @@ -629,6 +629,7 @@ static int fill_model_input_tf(TFModel *tf_model, TFRequestItem *request) { > TFContext *ctx = &tf_model->ctx; > int ret = 0; > > + memset(&input, 0, sizeof(input)); Can be simplified with DNNData input = { 0 }; > lltask = ff_queue_pop_front(tf_model->lltask_queue); > av_assert0(lltask); > task = lltask->task; > @@ -724,7 +725,7 @@ static void infer_completion_callback(void *args) { > TFModel *tf_model = task->model; > TFContext *ctx = &tf_model->ctx; > > - outputs = av_malloc_array(task->nb_output, sizeof(*outputs)); > + outputs = av_calloc(task->nb_output, sizeof(*outputs)); > if (!outputs) { > av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for *outputs\n"); > goto err; > -- > 2.34.1 > > _______________________________________________ > ffmpeg-devel mailing list > ffmpeg-devel@ffmpeg.org > https://ffmpeg.org/mailman/listinfo/ffmpeg-devel > > To unsubscribe, visit link above, or email > ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
> > On Sep 20, 2023, at 10:26, wenbin.chen-at-intel.com@ffmpeg.org wrote: > > > > From: Wenbin Chen <wenbin.chen@intel.com> > > > > Signed-off-by: Wenbin Chen <wenbin.chen@intel.com> > > --- > > libavfilter/dnn/dnn_backend_tf.c | 3 ++- > > 1 file changed, 2 insertions(+), 1 deletion(-) > > > > diff --git a/libavfilter/dnn/dnn_backend_tf.c > b/libavfilter/dnn/dnn_backend_tf.c > > index b521de7fbe..e1e8cef0d2 100644 > > --- a/libavfilter/dnn/dnn_backend_tf.c > > +++ b/libavfilter/dnn/dnn_backend_tf.c > > @@ -629,6 +629,7 @@ static int fill_model_input_tf(TFModel *tf_model, > TFRequestItem *request) { > > TFContext *ctx = &tf_model->ctx; > > int ret = 0; > > > > + memset(&input, 0, sizeof(input)); > > Can be simplified with DNNData input = { 0 }; Thanks for your advice. I update it in patch v2. > > > lltask = ff_queue_pop_front(tf_model->lltask_queue); > > av_assert0(lltask); > > task = lltask->task; > > @@ -724,7 +725,7 @@ static void infer_completion_callback(void *args) { > > TFModel *tf_model = task->model; > > TFContext *ctx = &tf_model->ctx; > > > > - outputs = av_malloc_array(task->nb_output, sizeof(*outputs)); > > + outputs = av_calloc(task->nb_output, sizeof(*outputs)); > > if (!outputs) { > > av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for > *outputs\n"); > > goto err; > > -- > > 2.34.1 > > > > _______________________________________________ > > ffmpeg-devel mailing list > > ffmpeg-devel@ffmpeg.org > > https://ffmpeg.org/mailman/listinfo/ffmpeg-devel > > > > To unsubscribe, visit link above, or email > > ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe". > > _______________________________________________ > ffmpeg-devel mailing list > ffmpeg-devel@ffmpeg.org > https://ffmpeg.org/mailman/listinfo/ffmpeg-devel > > To unsubscribe, visit link above, or email > ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
diff --git a/libavfilter/dnn/dnn_backend_tf.c b/libavfilter/dnn/dnn_backend_tf.c index b521de7fbe..e1e8cef0d2 100644 --- a/libavfilter/dnn/dnn_backend_tf.c +++ b/libavfilter/dnn/dnn_backend_tf.c @@ -629,6 +629,7 @@ static int fill_model_input_tf(TFModel *tf_model, TFRequestItem *request) { TFContext *ctx = &tf_model->ctx; int ret = 0; + memset(&input, 0, sizeof(input)); lltask = ff_queue_pop_front(tf_model->lltask_queue); av_assert0(lltask); task = lltask->task; @@ -724,7 +725,7 @@ static void infer_completion_callback(void *args) { TFModel *tf_model = task->model; TFContext *ctx = &tf_model->ctx; - outputs = av_malloc_array(task->nb_output, sizeof(*outputs)); + outputs = av_calloc(task->nb_output, sizeof(*outputs)); if (!outputs) { av_log(ctx, AV_LOG_ERROR, "Failed to allocate memory for *outputs\n"); goto err;