@@ -10283,11 +10283,8 @@ and the second line is the name of label id 1, etc.
The label id is considered as name if the label file is not provided.
@item backend_configs
-Set the configs to be passed into backend
-
-@item async
-use DNN async execution if set (default: set),
-roll back to sync execution if the backend does not support async.
+Set the configs to be passed into backend. To use async execution, set async (default: set).
+Roll back to sync execution if the backend does not support async.
@end table
@@ -10339,15 +10336,12 @@ Set the input name of the dnn network.
Set the output name of the dnn network.
@item backend_configs
-Set the configs to be passed into backend
+Set the configs to be passed into backend. To use async execution, set async (default: set).
+Roll back to sync execution if the backend does not support async.
For tensorflow backend, you can set its configs with @option{sess_config} options,
please use tools/python/tf_sess_config.py to get the configs of TensorFlow backend for your system.
-@item async
-use DNN async execution if set (default: set),
-roll back to sync execution if the backend does not support async.
-
@end table
@subsection Examples
@@ -884,6 +884,13 @@ DNNModel *ff_dnn_load_model_tf(const char *model_filename, DNNFunctionType func_
ctx->options.nireq = av_cpu_count() / 2 + 1;
}
+#if !HAVE_PTHREAD_CANCEL
+ if (ctx->options.async) {
+ ctx->options.async = 0;
+ av_log(filter_ctx, AV_LOG_WARNING, "pthread is not supported, roll back to sync.\n");
+ }
+#endif
+
tf_model->request_queue = ff_safe_queue_create();
if (!tf_model->request_queue) {
goto err;
@@ -84,13 +84,6 @@ int ff_dnn_init(DnnContext *ctx, DNNFunctionType func_type, AVFilterContext *fil
return AVERROR(EINVAL);
}
-#if !HAVE_PTHREAD_CANCEL
- if (ctx->async) {
- ctx->async = 0;
- av_log(filter_ctx, AV_LOG_WARNING, "pthread is not supported, roll back to sync.\n");
- }
-#endif
-
return 0;
}
@@ -46,7 +46,7 @@ typedef struct DnnContext {
{ "output", "output name of the model", OFFSET(model_outputnames_string), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },\
{ "backend_configs", "backend configs", OFFSET(backend_options), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },\
{ "options", "backend configs (deprecated, use backend_configs)", OFFSET(backend_options), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS | AV_OPT_FLAG_DEPRECATED},\
- { "async", "use DNN async inference", OFFSET(async), AV_OPT_TYPE_BOOL, { .i64 = 1}, 0, 1, FLAGS},
+ { "async", "use DNN async inference (ignored, use backend_configs='async=1')", OFFSET(async), AV_OPT_TYPE_BOOL, { .i64 = 1}, 0, 1, FLAGS},
int ff_dnn_init(DnnContext *ctx, DNNFunctionType func_type, AVFilterContext *filter_ctx);
Remove async flag from filter's perspective after the unification of async and sync modes in the DNN backend. Signed-off-by: Shubhanshu Saxena <shubhanshu.e01@gmail.com> --- doc/filters.texi | 14 ++++---------- libavfilter/dnn/dnn_backend_tf.c | 7 +++++++ libavfilter/dnn_filter_common.c | 7 ------- libavfilter/dnn_filter_common.h | 2 +- 4 files changed, 12 insertions(+), 18 deletions(-)