diff mbox series

[FFmpeg-devel,5/5] avcodec/vaapi_av1: Use ProgressFrames

Message ID AS8P250MB0744B942017200AB7A73DBF08F0D2@AS8P250MB0744.EURP250.PROD.OUTLOOK.COM
State New
Headers show
Series [FFmpeg-devel,1/5] avcodec/progressframe: Explain how unnamed union can simplify accesses | expand

Checks

Context Check Description
yinshiyou/make_loongarch64 success Make finished
yinshiyou/make_fate_loongarch64 success Make fate finished
andriy/make_x86 success Make finished
andriy/make_fate_x86 success Make fate finished

Commit Message

Andreas Rheinhardt April 19, 2024, 4:07 p.m. UTC
The rationale here is exactly the same as for using them for AV1:
It avoids av_frame_ref() when putting the same frame into
multiple reference slots. (In case av_frame_ref() failed,
the context would be left in an inconsistent state that
claimed that an AVFrame was valid (i.e. not blank), when
in fact it was not valid.)

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com>
---
 libavcodec/vaapi_av1.c | 61 +++++++++++-------------------------------
 1 file changed, 15 insertions(+), 46 deletions(-)
diff mbox series

Patch

diff --git a/libavcodec/vaapi_av1.c b/libavcodec/vaapi_av1.c
index 1f563483b9..20651a0ac2 100644
--- a/libavcodec/vaapi_av1.c
+++ b/libavcodec/vaapi_av1.c
@@ -18,17 +18,11 @@ 
  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  */
 
-#include "libavutil/frame.h"
 #include "hwaccel_internal.h"
 #include "vaapi_decode.h"
 #include "internal.h"
 #include "av1dec.h"
-#include "thread.h"
-
-typedef struct VAAPIAV1FrameRef {
-    AVFrame *frame;
-    int valid;
-} VAAPIAV1FrameRef;
+#include "progressframe.h"
 
 typedef struct VAAPIAV1DecContext {
     VAAPIDecodeContext base;
@@ -40,8 +34,8 @@  typedef struct VAAPIAV1DecContext {
      * applying film grain here. And current_display_picture will be
      * used to apply film grain and push to downstream.
     */
-    VAAPIAV1FrameRef ref_tab[AV1_NUM_REF_FRAMES];
-    AVFrame *tmp_frame;
+    ProgressFrame ref_tab[AV1_NUM_REF_FRAMES];
+    ProgressFrame tmp_frame;
 } VAAPIAV1DecContext;
 
 static VASurfaceID vaapi_av1_surface_id(AV1Frame *vf)
@@ -70,32 +64,14 @@  static int8_t vaapi_av1_get_bit_depth_idx(AVCodecContext *avctx)
     return bit_depth == 8 ? 0 : bit_depth == 10 ? 1 : 2;
 }
 
-static int vaapi_av1_decode_init(AVCodecContext *avctx)
-{
-    VAAPIAV1DecContext *ctx = avctx->internal->hwaccel_priv_data;
-
-    ctx->tmp_frame = av_frame_alloc();
-    if (!ctx->tmp_frame)
-        return AVERROR(ENOMEM);
-
-    for (int i = 0; i < FF_ARRAY_ELEMS(ctx->ref_tab); i++) {
-        ctx->ref_tab[i].frame = av_frame_alloc();
-        if (!ctx->ref_tab[i].frame)
-            return AVERROR(ENOMEM);
-        ctx->ref_tab[i].valid = 0;
-    }
-
-    return ff_vaapi_decode_init(avctx);
-}
-
 static int vaapi_av1_decode_uninit(AVCodecContext *avctx)
 {
     VAAPIAV1DecContext *ctx = avctx->internal->hwaccel_priv_data;
 
-    av_frame_free(&ctx->tmp_frame);
+    ff_progress_frame_unref(&ctx->tmp_frame);
 
     for (int i = 0; i < FF_ARRAY_ELEMS(ctx->ref_tab); i++)
-        av_frame_free(&ctx->ref_tab[i].frame);
+        ff_progress_frame_unref(&ctx->ref_tab[i]);
 
     return ff_vaapi_decode_uninit(avctx);
 }
@@ -125,12 +101,12 @@  static int vaapi_av1_start_frame(AVCodecContext *avctx,
         goto fail;
 
     if (apply_grain) {
-        if (ctx->tmp_frame->buf[0])
-            av_frame_unref(ctx->tmp_frame);
-        err = ff_thread_get_buffer(avctx, ctx->tmp_frame, AV_GET_BUFFER_FLAG_REF);
+        ff_progress_frame_unref(&ctx->tmp_frame);
+        err = ff_progress_frame_get_buffer(avctx, &ctx->tmp_frame,
+                                           AV_GET_BUFFER_FLAG_REF);
         if (err < 0)
             goto fail;
-        pic->output_surface = ff_vaapi_get_surface_id(ctx->tmp_frame);
+        pic->output_surface = ff_vaapi_get_surface_id(ctx->tmp_frame.f);
     } else {
         pic->output_surface = ff_vaapi_get_surface_id(s->cur_frame.f);
     }
@@ -265,8 +241,8 @@  static int vaapi_av1_start_frame(AVCodecContext *avctx,
         if (pic_param.pic_info_fields.bits.frame_type == AV1_FRAME_KEY && frame_header->show_frame)
             pic_param.ref_frame_map[i] = VA_INVALID_ID;
         else
-            pic_param.ref_frame_map[i] = ctx->ref_tab[i].valid ?
-                                         ff_vaapi_get_surface_id(ctx->ref_tab[i].frame) :
+            pic_param.ref_frame_map[i] = ctx->ref_tab[i].f ?
+                                         ff_vaapi_get_surface_id(ctx->ref_tab[i].f) :
                                          vaapi_av1_surface_id(&s->ref[i]);
     }
     for (int i = 0; i < AV1_REFS_PER_FRAME; i++) {
@@ -370,17 +346,10 @@  static int vaapi_av1_end_frame(AVCodecContext *avctx)
 
     for (int i = 0; i < AV1_NUM_REF_FRAMES; i++) {
         if (header->refresh_frame_flags & (1 << i)) {
-            if (ctx->ref_tab[i].frame->buf[0])
-                av_frame_unref(ctx->ref_tab[i].frame);
+            ff_progress_frame_unref(&ctx->ref_tab[i]);
 
-            if (apply_grain) {
-                ret = av_frame_ref(ctx->ref_tab[i].frame, ctx->tmp_frame);
-                if (ret < 0)
-                    return ret;
-                ctx->ref_tab[i].valid = 1;
-            } else {
-                ctx->ref_tab[i].valid = 0;
-            }
+            if (apply_grain)
+                ff_progress_frame_ref(&ctx->ref_tab[i], &ctx->tmp_frame);
         }
     }
 
@@ -431,7 +400,7 @@  const FFHWAccel ff_av1_vaapi_hwaccel = {
     .end_frame            = vaapi_av1_end_frame,
     .decode_slice         = vaapi_av1_decode_slice,
     .frame_priv_data_size = sizeof(VAAPIDecodePicture),
-    .init                 = vaapi_av1_decode_init,
+    .init                 = ff_vaapi_decode_init,
     .uninit               = vaapi_av1_decode_uninit,
     .frame_params         = ff_vaapi_common_frame_params,
     .priv_data_size       = sizeof(VAAPIAV1DecContext),