Message ID | 20210310215446.1396386-7-andreas.rheinhardt@gmail.com |
---|---|
State | Accepted |
Headers | show |
Series | [FFmpeg-devel,1/8] avcodec/cbs: Remove redundant checks for CodedBitstreamContext.codec | expand |
> -----Original Message----- > From: ffmpeg-devel <ffmpeg-devel-bounces@ffmpeg.org> On Behalf Of > Andreas Rheinhardt > Sent: 2021年3月11日 5:55 > To: ffmpeg-devel@ffmpeg.org > Cc: Andreas Rheinhardt <andreas.rheinhardt@gmail.com> > Subject: [FFmpeg-devel] [PATCH 16/23] > dnn/dnn_backend_native_layer_conv2d: Avoid allocation when > single-threaded > > Also fixes a memleak in single-threaded mode when an error happens in > preparing the output data buffer; and also removes an unchecked allocation. > > Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@gmail.com> > --- > .../dnn/dnn_backend_native_layer_conv2d.c | 18 +++++++++--------- > 1 file changed, 9 insertions(+), 9 deletions(-) > > diff --git a/libavfilter/dnn/dnn_backend_native_layer_conv2d.c > b/libavfilter/dnn/dnn_backend_native_layer_conv2d.c > index 2e5aacdc2b..cdf65974b6 100644 > --- a/libavfilter/dnn/dnn_backend_native_layer_conv2d.c > +++ b/libavfilter/dnn/dnn_backend_native_layer_conv2d.c > @@ -187,12 +187,14 @@ static void * > dnn_execute_layer_conv2d_thread(void *threadarg) int > ff_dnn_execute_layer_conv2d(DnnOperand *operands, const int32_t > *input_operand_indexes, > int32_t output_operand_index, const > void *parameters, NativeContext *ctx) { > +#if HAVE_PTHREAD_CANCEL > int thread_num = (ctx->options.conv2d_threads <= 0 || > ctx->options.conv2d_threads > av_cpu_count()) > ? (av_cpu_count() + 1) : (ctx->options.conv2d_threads); -#if > HAVE_PTHREAD_CANCEL > int thread_stride; > -#endif > ThreadParam **thread_param = av_malloc_array(thread_num, > sizeof(*thread_param)); > +#else > + ThreadParam thread_param = { 0 }; > +#endif > ThreadCommonParam thread_common_param; > const ConvolutionalParams *conv_params = parameters; > int height = operands[input_operand_indexes[0]].dims[1]; > @@ -244,15 +246,13 @@ int ff_dnn_execute_layer_conv2d(DnnOperand > *operands, const int32_t *input_opera > for (int i = 0; i < thread_num; i++){ > av_freep(&thread_param[i]); > } > + av_free(thread_param); The habit is to use av_freep(&thread_param), others LGTM. > #else > - thread_param[0] = av_malloc(sizeof(*thread_param[0])); > - thread_param[0]->thread_common_param = &thread_common_param; > - thread_param[0]->thread_start = pad_size; > - thread_param[0]->thread_end = height - pad_size; > - dnn_execute_layer_conv2d_thread((void *)thread_param[0]); > - av_freep(&thread_param[0]); > + thread_param.thread_common_param = &thread_common_param; > + thread_param.thread_start = pad_size; > + thread_param.thread_end = height - pad_size; > + dnn_execute_layer_conv2d_thread(&thread_param); > #endif > > - av_freep(&thread_param); > return DNN_SUCCESS; > } > -- > 2.27.0 > > _______________________________________________ > ffmpeg-devel mailing list > ffmpeg-devel@ffmpeg.org > https://ffmpeg.org/mailman/listinfo/ffmpeg-devel > > To unsubscribe, visit link above, or email ffmpeg-devel-request@ffmpeg.org > with subject "unsubscribe".
diff --git a/libavfilter/dnn/dnn_backend_native_layer_conv2d.c b/libavfilter/dnn/dnn_backend_native_layer_conv2d.c index 2e5aacdc2b..cdf65974b6 100644 --- a/libavfilter/dnn/dnn_backend_native_layer_conv2d.c +++ b/libavfilter/dnn/dnn_backend_native_layer_conv2d.c @@ -187,12 +187,14 @@ static void * dnn_execute_layer_conv2d_thread(void *threadarg) int ff_dnn_execute_layer_conv2d(DnnOperand *operands, const int32_t *input_operand_indexes, int32_t output_operand_index, const void *parameters, NativeContext *ctx) { +#if HAVE_PTHREAD_CANCEL int thread_num = (ctx->options.conv2d_threads <= 0 || ctx->options.conv2d_threads > av_cpu_count()) ? (av_cpu_count() + 1) : (ctx->options.conv2d_threads); -#if HAVE_PTHREAD_CANCEL int thread_stride; -#endif ThreadParam **thread_param = av_malloc_array(thread_num, sizeof(*thread_param)); +#else + ThreadParam thread_param = { 0 }; +#endif ThreadCommonParam thread_common_param; const ConvolutionalParams *conv_params = parameters; int height = operands[input_operand_indexes[0]].dims[1]; @@ -244,15 +246,13 @@ int ff_dnn_execute_layer_conv2d(DnnOperand *operands, const int32_t *input_opera for (int i = 0; i < thread_num; i++){ av_freep(&thread_param[i]); } + av_free(thread_param); #else - thread_param[0] = av_malloc(sizeof(*thread_param[0])); - thread_param[0]->thread_common_param = &thread_common_param; - thread_param[0]->thread_start = pad_size; - thread_param[0]->thread_end = height - pad_size; - dnn_execute_layer_conv2d_thread((void *)thread_param[0]); - av_freep(&thread_param[0]); + thread_param.thread_common_param = &thread_common_param; + thread_param.thread_start = pad_size; + thread_param.thread_end = height - pad_size; + dnn_execute_layer_conv2d_thread(&thread_param); #endif - av_freep(&thread_param); return DNN_SUCCESS; }
Also fixes a memleak in single-threaded mode when an error happens in preparing the output data buffer; and also removes an unchecked allocation. Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@gmail.com> --- .../dnn/dnn_backend_native_layer_conv2d.c | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-)