diff mbox series

[FFmpeg-devel,v2,2/9] avcodec/vaapi_encode: introduce a base layer for vaapi encode

Message ID 20240123081932.1574-2-tong1.wu@intel.com
State New
Headers show
Series [FFmpeg-devel,v2,1/9] avcodec/vaapi_encode: move pic->input_surface initialization to encode_alloc | expand

Checks

Context Check Description
yinshiyou/make_fate_loongarch64 success Make fate finished
yinshiyou/make_loongarch64 warning New warnings during build
andriy/make_fate_x86 success Make fate finished
andriy/make_x86 warning New warnings during build

Commit Message

Wu, Tong1 Jan. 23, 2024, 8:19 a.m. UTC
From: Tong Wu <tong1.wu@intel.com>

Since VAAPI and future D3D12VA implementation may share the same dpb
logic and some other functions. A base layer encode context is introduced
as vaapi context's base and extract the related funtions to a common
file such as receive_packet, init, close, etc.

Signed-off-by: Tong Wu <tong1.wu@intel.com>
---
 libavcodec/Makefile             |   2 +-
 libavcodec/hw_base_encode.c     | 637 ++++++++++++++++++++++
 libavcodec/hw_base_encode.h     | 277 ++++++++++
 libavcodec/vaapi_encode.c       | 924 +++++++-------------------------
 libavcodec/vaapi_encode.h       | 229 +-------
 libavcodec/vaapi_encode_av1.c   |  73 +--
 libavcodec/vaapi_encode_h264.c  | 201 +++----
 libavcodec/vaapi_encode_h265.c  | 163 +++---
 libavcodec/vaapi_encode_mjpeg.c |  22 +-
 libavcodec/vaapi_encode_mpeg2.c |  53 +-
 libavcodec/vaapi_encode_vp8.c   |  28 +-
 libavcodec/vaapi_encode_vp9.c   |  70 +--
 12 files changed, 1427 insertions(+), 1252 deletions(-)
 create mode 100644 libavcodec/hw_base_encode.c
 create mode 100644 libavcodec/hw_base_encode.h
diff mbox series

Patch

diff --git a/libavcodec/Makefile b/libavcodec/Makefile
index bb42095165..f9a5c9d616 100644
--- a/libavcodec/Makefile
+++ b/libavcodec/Makefile
@@ -163,7 +163,7 @@  OBJS-$(CONFIG_STARTCODE)               += startcode.o
 OBJS-$(CONFIG_TEXTUREDSP)              += texturedsp.o
 OBJS-$(CONFIG_TEXTUREDSPENC)           += texturedspenc.o
 OBJS-$(CONFIG_TPELDSP)                 += tpeldsp.o
-OBJS-$(CONFIG_VAAPI_ENCODE)            += vaapi_encode.o
+OBJS-$(CONFIG_VAAPI_ENCODE)            += vaapi_encode.o hw_base_encode.o
 OBJS-$(CONFIG_AV1_AMF_ENCODER)         += amfenc_av1.o
 OBJS-$(CONFIG_VC1DSP)                  += vc1dsp.o
 OBJS-$(CONFIG_VIDEODSP)                += videodsp.o
diff --git a/libavcodec/hw_base_encode.c b/libavcodec/hw_base_encode.c
new file mode 100644
index 0000000000..62adda2fc3
--- /dev/null
+++ b/libavcodec/hw_base_encode.c
@@ -0,0 +1,637 @@ 
+/*
+ * Copyright (c) 2024 Intel Corporation
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/common.h"
+#include "libavutil/internal.h"
+#include "libavutil/log.h"
+#include "libavutil/pixdesc.h"
+
+#include "encode.h"
+#include "avcodec.h"
+#include "hw_base_encode.h"
+
+static void hw_base_encode_add_ref(AVCodecContext *avctx,
+                                   HWBaseEncodePicture *pic,
+                                   HWBaseEncodePicture *target,
+                                   int is_ref, int in_dpb, int prev)
+{
+    int refs = 0;
+
+    if (is_ref) {
+        av_assert0(pic != target);
+        av_assert0(pic->nb_refs[0] < MAX_PICTURE_REFERENCES &&
+                   pic->nb_refs[1] < MAX_PICTURE_REFERENCES);
+        if (target->display_order < pic->display_order)
+            pic->refs[0][pic->nb_refs[0]++] = target;
+        else
+            pic->refs[1][pic->nb_refs[1]++] = target;
+        ++refs;
+    }
+
+    if (in_dpb) {
+        av_assert0(pic->nb_dpb_pics < MAX_DPB_SIZE);
+        pic->dpb[pic->nb_dpb_pics++] = target;
+        ++refs;
+    }
+
+    if (prev) {
+        av_assert0(!pic->prev);
+        pic->prev = target;
+        ++refs;
+    }
+
+    target->ref_count[0] += refs;
+    target->ref_count[1] += refs;
+}
+
+static void hw_base_encode_remove_refs(AVCodecContext *avctx,
+                                       HWBaseEncodePicture *pic,
+                                       int level)
+{
+    int i;
+
+    if (pic->ref_removed[level])
+        return;
+
+    for (i = 0; i < pic->nb_refs[0]; i++) {
+        av_assert0(pic->refs[0][i]);
+        --pic->refs[0][i]->ref_count[level];
+        av_assert0(pic->refs[0][i]->ref_count[level] >= 0);
+    }
+
+    for (i = 0; i < pic->nb_refs[1]; i++) {
+        av_assert0(pic->refs[1][i]);
+        --pic->refs[1][i]->ref_count[level];
+        av_assert0(pic->refs[1][i]->ref_count[level] >= 0);
+    }
+
+    for (i = 0; i < pic->nb_dpb_pics; i++) {
+        av_assert0(pic->dpb[i]);
+        --pic->dpb[i]->ref_count[level];
+        av_assert0(pic->dpb[i]->ref_count[level] >= 0);
+    }
+
+    av_assert0(pic->prev || pic->type == PICTURE_TYPE_IDR);
+    if (pic->prev) {
+        --pic->prev->ref_count[level];
+        av_assert0(pic->prev->ref_count[level] >= 0);
+    }
+
+    pic->ref_removed[level] = 1;
+}
+
+static void hw_base_encode_set_b_pictures(AVCodecContext *avctx,
+                                          HWBaseEncodePicture *start,
+                                          HWBaseEncodePicture *end,
+                                          HWBaseEncodePicture *prev,
+                                          int current_depth,
+                                          HWBaseEncodePicture **last)
+{
+    HWBaseEncodeContext *ctx = avctx->priv_data;
+    HWBaseEncodePicture *pic, *next, *ref;
+    int i, len;
+
+    av_assert0(start && end && start != end && start->next != end);
+
+    // If we are at the maximum depth then encode all pictures as
+    // non-referenced B-pictures.  Also do this if there is exactly one
+    // picture left, since there will be nothing to reference it.
+    if (current_depth == ctx->max_b_depth || start->next->next == end) {
+        for (pic = start->next; pic; pic = pic->next) {
+            if (pic == end)
+                break;
+            pic->type    = PICTURE_TYPE_B;
+            pic->b_depth = current_depth;
+
+            hw_base_encode_add_ref(avctx, pic, start, 1, 1, 0);
+            hw_base_encode_add_ref(avctx, pic, end,   1, 1, 0);
+            hw_base_encode_add_ref(avctx, pic, prev,  0, 0, 1);
+
+            for (ref = end->refs[1][0]; ref; ref = ref->refs[1][0])
+                hw_base_encode_add_ref(avctx, pic, ref, 0, 1, 0);
+        }
+        *last = prev;
+
+    } else {
+        // Split the current list at the midpoint with a referenced
+        // B-picture, then descend into each side separately.
+        len = 0;
+        for (pic = start->next; pic != end; pic = pic->next)
+            ++len;
+        for (pic = start->next, i = 1; 2 * i < len; pic = pic->next, i++);
+
+        pic->type    = PICTURE_TYPE_B;
+        pic->b_depth = current_depth;
+
+        pic->is_reference = 1;
+
+        hw_base_encode_add_ref(avctx, pic, pic,   0, 1, 0);
+        hw_base_encode_add_ref(avctx, pic, start, 1, 1, 0);
+        hw_base_encode_add_ref(avctx, pic, end,   1, 1, 0);
+        hw_base_encode_add_ref(avctx, pic, prev,  0, 0, 1);
+
+        for (ref = end->refs[1][0]; ref; ref = ref->refs[1][0])
+            hw_base_encode_add_ref(avctx, pic, ref, 0, 1, 0);
+
+        if (i > 1)
+            hw_base_encode_set_b_pictures(avctx, start, pic, pic,
+                                          current_depth + 1, &next);
+        else
+            next = pic;
+
+        hw_base_encode_set_b_pictures(avctx, pic, end, next,
+                                      current_depth + 1, last);
+    }
+}
+
+static void hw_base_encode_add_next_prev(AVCodecContext *avctx,
+                                         HWBaseEncodePicture *pic)
+{
+    HWBaseEncodeContext *ctx = avctx->priv_data;
+    int i;
+
+    if (!pic)
+        return;
+
+    if (pic->type == PICTURE_TYPE_IDR) {
+        for (i = 0; i < ctx->nb_next_prev; i++) {
+            --ctx->next_prev[i]->ref_count[0];
+            ctx->next_prev[i] = NULL;
+        }
+        ctx->next_prev[0] = pic;
+        ++pic->ref_count[0];
+        ctx->nb_next_prev = 1;
+
+        return;
+    }
+
+    if (ctx->nb_next_prev < MAX_PICTURE_REFERENCES) {
+        ctx->next_prev[ctx->nb_next_prev++] = pic;
+        ++pic->ref_count[0];
+    } else {
+        --ctx->next_prev[0]->ref_count[0];
+        for (i = 0; i < MAX_PICTURE_REFERENCES - 1; i++)
+            ctx->next_prev[i] = ctx->next_prev[i + 1];
+        ctx->next_prev[i] = pic;
+        ++pic->ref_count[0];
+    }
+}
+
+static int hw_base_encode_pick_next(AVCodecContext *avctx,
+                                    HWBaseEncodePicture **pic_out)
+{
+    HWBaseEncodeContext *ctx = avctx->priv_data;
+    HWBaseEncodePicture *pic = NULL, *prev = NULL, *next, *start;
+    int i, b_counter, closed_gop_end;
+
+    // If there are any B-frames already queued, the next one to encode
+    // is the earliest not-yet-issued frame for which all references are
+    // available.
+    for (pic = ctx->pic_start; pic; pic = pic->next) {
+        if (pic->encode_issued)
+            continue;
+        if (pic->type != PICTURE_TYPE_B)
+            continue;
+        for (i = 0; i < pic->nb_refs[0]; i++) {
+            if (!pic->refs[0][i]->encode_issued)
+                break;
+        }
+        if (i != pic->nb_refs[0])
+            continue;
+
+        for (i = 0; i < pic->nb_refs[1]; i++) {
+            if (!pic->refs[1][i]->encode_issued)
+                break;
+        }
+        if (i == pic->nb_refs[1])
+            break;
+    }
+
+    if (pic) {
+        av_log(avctx, AV_LOG_DEBUG, "Pick B-picture at depth %d to "
+               "encode next.\n", pic->b_depth);
+        *pic_out = pic;
+        return 0;
+    }
+
+    // Find the B-per-Pth available picture to become the next picture
+    // on the top layer.
+    start = NULL;
+    b_counter = 0;
+    closed_gop_end = ctx->closed_gop ||
+                     ctx->idr_counter == ctx->gop_per_idr;
+    for (pic = ctx->pic_start; pic; pic = next) {
+        next = pic->next;
+        if (pic->encode_issued) {
+            start = pic;
+            continue;
+        }
+        // If the next available picture is force-IDR, encode it to start
+        // a new GOP immediately.
+        if (pic->force_idr)
+            break;
+        if (b_counter == ctx->b_per_p)
+            break;
+        // If this picture ends a closed GOP or starts a new GOP then it
+        // needs to be in the top layer.
+        if (ctx->gop_counter + b_counter + closed_gop_end >= ctx->gop_size)
+            break;
+        // If the picture after this one is force-IDR, we need to encode
+        // this one in the top layer.
+        if (next && next->force_idr)
+            break;
+        ++b_counter;
+    }
+
+    // At the end of the stream the last picture must be in the top layer.
+    if (!pic && ctx->end_of_stream) {
+        --b_counter;
+        pic = ctx->pic_end;
+        if (pic->encode_complete)
+            return AVERROR_EOF;
+        else if (pic->encode_issued)
+            return AVERROR(EAGAIN);
+    }
+
+    if (!pic) {
+        av_log(avctx, AV_LOG_DEBUG, "Pick nothing to encode next - "
+               "need more input for reference pictures.\n");
+        return AVERROR(EAGAIN);
+    }
+    if (ctx->input_order <= ctx->decode_delay && !ctx->end_of_stream) {
+        av_log(avctx, AV_LOG_DEBUG, "Pick nothing to encode next - "
+               "need more input for timestamps.\n");
+        return AVERROR(EAGAIN);
+    }
+
+    if (pic->force_idr) {
+        av_log(avctx, AV_LOG_DEBUG, "Pick forced IDR-picture to "
+               "encode next.\n");
+        pic->type = PICTURE_TYPE_IDR;
+        ctx->idr_counter = 1;
+        ctx->gop_counter = 1;
+
+    } else if (ctx->gop_counter + b_counter >= ctx->gop_size) {
+        if (ctx->idr_counter == ctx->gop_per_idr) {
+            av_log(avctx, AV_LOG_DEBUG, "Pick new-GOP IDR-picture to "
+                   "encode next.\n");
+            pic->type = PICTURE_TYPE_IDR;
+            ctx->idr_counter = 1;
+        } else {
+            av_log(avctx, AV_LOG_DEBUG, "Pick new-GOP I-picture to "
+                   "encode next.\n");
+            pic->type = PICTURE_TYPE_I;
+            ++ctx->idr_counter;
+        }
+        ctx->gop_counter = 1;
+
+    } else {
+        if (ctx->gop_counter + b_counter + closed_gop_end == ctx->gop_size) {
+            av_log(avctx, AV_LOG_DEBUG, "Pick group-end P-picture to "
+                   "encode next.\n");
+        } else {
+            av_log(avctx, AV_LOG_DEBUG, "Pick normal P-picture to "
+                   "encode next.\n");
+        }
+        pic->type = PICTURE_TYPE_P;
+        av_assert0(start);
+        ctx->gop_counter += 1 + b_counter;
+    }
+    pic->is_reference = 1;
+    *pic_out = pic;
+
+    hw_base_encode_add_ref(avctx, pic, pic, 0, 1, 0);
+    if (pic->type != PICTURE_TYPE_IDR) {
+        // TODO: apply both previous and forward multi reference for all vaapi encoders.
+        // And L0/L1 reference frame number can be set dynamically through query
+        // VAConfigAttribEncMaxRefFrames attribute.
+        if (avctx->codec_id == AV_CODEC_ID_AV1) {
+            for (i = 0; i < ctx->nb_next_prev; i++)
+                hw_base_encode_add_ref(avctx, pic, ctx->next_prev[i],
+                                       pic->type == PICTURE_TYPE_P,
+                                       b_counter > 0, 0);
+        } else
+            hw_base_encode_add_ref(avctx, pic, start,
+                                   pic->type == PICTURE_TYPE_P,
+                                   b_counter > 0, 0);
+
+        hw_base_encode_add_ref(avctx, pic, ctx->next_prev[ctx->nb_next_prev - 1], 0, 0, 1);
+    }
+
+    if (b_counter > 0) {
+        hw_base_encode_set_b_pictures(avctx, start, pic, pic, 1,
+                                      &prev);
+    } else {
+        prev = pic;
+    }
+    hw_base_encode_add_next_prev(avctx, prev);
+
+    return 0;
+}
+
+static int hw_base_encode_clear_old(AVCodecContext *avctx)
+{
+    HWBaseEncodeContext *ctx = avctx->priv_data;
+    HWBaseEncodePicture *pic, *prev, *next;
+
+    av_assert0(ctx->pic_start);
+
+    // Remove direct references once each picture is complete.
+    for (pic = ctx->pic_start; pic; pic = pic->next) {
+        if (pic->encode_complete && pic->next)
+            hw_base_encode_remove_refs(avctx, pic, 0);
+    }
+
+    // Remove indirect references once a picture has no direct references.
+    for (pic = ctx->pic_start; pic; pic = pic->next) {
+        if (pic->encode_complete && pic->ref_count[0] == 0)
+            hw_base_encode_remove_refs(avctx, pic, 1);
+    }
+
+    // Clear out all complete pictures with no remaining references.
+    prev = NULL;
+    for (pic = ctx->pic_start; pic; pic = next) {
+        next = pic->next;
+        if (pic->encode_complete && pic->ref_count[1] == 0) {
+            av_assert0(pic->ref_removed[0] && pic->ref_removed[1]);
+            if (prev)
+                prev->next = next;
+            else
+                ctx->pic_start = next;
+            ctx->hw->free(avctx, pic);
+        } else {
+            prev = pic;
+        }
+    }
+
+    return 0;
+}
+
+static int hw_base_encode_check_frame(AVCodecContext *avctx,
+                                      const AVFrame *frame)
+{
+    HWBaseEncodeContext *ctx = avctx->priv_data;
+
+    if ((frame->crop_top  || frame->crop_bottom ||
+         frame->crop_left || frame->crop_right) && !ctx->crop_warned) {
+        av_log(avctx, AV_LOG_WARNING, "Cropping information on input "
+               "frames ignored due to lack of API support.\n");
+        ctx->crop_warned = 1;
+    }
+
+    if (!ctx->roi_allowed) {
+        AVFrameSideData *sd =
+            av_frame_get_side_data(frame, AV_FRAME_DATA_REGIONS_OF_INTEREST);
+
+        if (sd && !ctx->roi_warned) {
+            av_log(avctx, AV_LOG_WARNING, "ROI side data on input "
+                   "frames ignored due to lack of driver support.\n");
+            ctx->roi_warned = 1;
+        }
+    }
+
+    return 0;
+}
+
+static int hw_base_encode_send_frame(AVCodecContext *avctx, AVFrame *frame)
+{
+    HWBaseEncodeContext *ctx = avctx->priv_data;
+    HWBaseEncodePicture *pic;
+    int err;
+
+    if (frame) {
+        av_log(avctx, AV_LOG_DEBUG, "Input frame: %ux%u (%"PRId64").\n",
+               frame->width, frame->height, frame->pts);
+
+        err = hw_base_encode_check_frame(avctx, frame);
+        if (err < 0)
+            return err;
+
+        pic = ctx->hw->alloc(avctx, frame);
+        if (!pic)
+            return AVERROR(ENOMEM);
+
+        pic->input_image = av_frame_alloc();
+        if (!pic->input_image) {
+            err = AVERROR(ENOMEM);
+            goto fail;
+        }
+
+        if (ctx->input_order == 0 || frame->pict_type == AV_PICTURE_TYPE_I)
+            pic->force_idr = 1;
+
+        pic->pts = frame->pts;
+        pic->duration = frame->duration;
+
+        if (avctx->flags & AV_CODEC_FLAG_COPY_OPAQUE) {
+            err = av_buffer_replace(&pic->opaque_ref, frame->opaque_ref);
+            if (err < 0)
+                goto fail;
+
+            pic->opaque = frame->opaque;
+        }
+
+        av_frame_move_ref(pic->input_image, frame);
+
+        if (ctx->input_order == 0)
+            ctx->first_pts = pic->pts;
+        if (ctx->input_order == ctx->decode_delay)
+            ctx->dts_pts_diff = pic->pts - ctx->first_pts;
+        if (ctx->output_delay > 0)
+            ctx->ts_ring[ctx->input_order %
+                        (3 * ctx->output_delay + ctx->async_depth)] = pic->pts;
+
+        pic->display_order = ctx->input_order;
+        ++ctx->input_order;
+
+        if (ctx->pic_start) {
+            ctx->pic_end->next = pic;
+            ctx->pic_end       = pic;
+        } else {
+            ctx->pic_start     = pic;
+            ctx->pic_end       = pic;
+        }
+
+    } else {
+        ctx->end_of_stream = 1;
+
+        // Fix timestamps if we hit end-of-stream before the initial decode
+        // delay has elapsed.
+        if (ctx->input_order < ctx->decode_delay)
+            ctx->dts_pts_diff = ctx->pic_end->pts - ctx->first_pts;
+    }
+
+    return 0;
+
+fail:
+    ctx->hw->free(avctx, pic);
+    return err;
+}
+
+int ff_hw_base_encode_receive_packet(AVCodecContext *avctx, AVPacket *pkt)
+{
+    HWBaseEncodeContext *ctx = avctx->priv_data;
+    HWBaseEncodePicture *pic = NULL;
+    AVFrame *frame = ctx->frame;
+    int err;
+
+start:
+    /** if no B frame before repeat P frame, sent repeat P frame out. */
+    if (ctx->tail_pkt->size) {
+        for (HWBaseEncodePicture *tmp = ctx->pic_start; tmp; tmp = tmp->next) {
+            if (tmp->type == PICTURE_TYPE_B && tmp->pts < ctx->tail_pkt->pts)
+                break;
+            else if (!tmp->next) {
+                av_packet_move_ref(pkt, ctx->tail_pkt);
+                goto end;
+            }
+        }
+    }
+
+    err = ff_encode_get_frame(avctx, frame);
+    if (err < 0 && err != AVERROR_EOF)
+        return err;
+
+    if (err == AVERROR_EOF)
+        frame = NULL;
+
+    if (!(ctx->hw && ctx->hw->alloc && ctx->hw->issue &&
+          ctx->hw->output && ctx->hw->free)) {
+        err = AVERROR(EINVAL);
+        return err;
+    }
+
+    err = hw_base_encode_send_frame(avctx, frame);
+    if (err < 0)
+        return err;
+
+    if (!ctx->pic_start) {
+        if (ctx->end_of_stream)
+            return AVERROR_EOF;
+        else
+            return AVERROR(EAGAIN);
+    }
+
+    if (ctx->async_encode) {
+        if (av_fifo_can_write(ctx->encode_fifo)) {
+            err = hw_base_encode_pick_next(avctx, &pic);
+            if (!err) {
+                av_assert0(pic);
+                pic->encode_order = ctx->encode_order +
+                    av_fifo_can_read(ctx->encode_fifo);
+                err = ctx->hw->issue(avctx, pic);
+                if (err < 0) {
+                    av_log(avctx, AV_LOG_ERROR, "Encode failed: %d.\n", err);
+                    return err;
+                }
+                av_fifo_write(ctx->encode_fifo, &pic, 1);
+            }
+        }
+
+        if (!av_fifo_can_read(ctx->encode_fifo))
+            return err;
+
+        // More frames can be buffered
+        if (av_fifo_can_write(ctx->encode_fifo) && !ctx->end_of_stream)
+            return AVERROR(EAGAIN);
+
+        av_fifo_read(ctx->encode_fifo, &pic, 1);
+        ctx->encode_order = pic->encode_order + 1;
+    } else {
+        err = hw_base_encode_pick_next(avctx, &pic);
+        if (err < 0)
+            return err;
+        av_assert0(pic);
+
+        pic->encode_order = ctx->encode_order++;
+
+        err = ctx->hw->issue(avctx, pic);
+        if (err < 0) {
+            av_log(avctx, AV_LOG_ERROR, "Encode failed: %d.\n", err);
+            return err;
+        }
+    }
+
+    err = ctx->hw->output(avctx, pic, pkt);
+    if (err < 0) {
+        av_log(avctx, AV_LOG_ERROR, "Output failed: %d.\n", err);
+        return err;
+    }
+
+    ctx->output_order = pic->encode_order;
+    hw_base_encode_clear_old(avctx);
+
+    /** loop to get an available pkt in encoder flushing. */
+    if (ctx->end_of_stream && !pkt->size)
+        goto start;
+
+end:
+    if (pkt->size)
+        av_log(avctx, AV_LOG_DEBUG, "Output packet: pts %"PRId64", dts %"PRId64", "
+               "size %d bytes.\n", pkt->pts, pkt->dts, pkt->size);
+
+    return 0;
+}
+
+int ff_hw_base_encode_init(AVCodecContext *avctx)
+{
+    HWBaseEncodeContext *ctx = avctx->priv_data;
+
+    ctx->frame = av_frame_alloc();
+    if (!ctx->frame)
+        return AVERROR(ENOMEM);
+
+    if (!avctx->hw_frames_ctx) {
+        av_log(avctx, AV_LOG_ERROR, "A hardware frames reference is "
+               "required to associate the encoding device.\n");
+        return AVERROR(EINVAL);
+    }
+
+    ctx->input_frames_ref = av_buffer_ref(avctx->hw_frames_ctx);
+    if (!ctx->input_frames_ref)
+        return AVERROR(ENOMEM);
+
+    ctx->input_frames = (AVHWFramesContext *)ctx->input_frames_ref->data;
+
+    ctx->device_ref = av_buffer_ref(ctx->input_frames->device_ref);
+    if (!ctx->device_ref)
+        return AVERROR(ENOMEM);
+
+    ctx->device = (AVHWDeviceContext *)ctx->device_ref->data;
+
+    ctx->tail_pkt = av_packet_alloc();
+    if (!ctx->tail_pkt)
+        return AVERROR(ENOMEM);
+
+    return 0;
+}
+
+int ff_hw_base_encode_close(AVCodecContext *avctx)
+{
+    HWBaseEncodeContext *ctx = avctx->priv_data;
+
+    av_packet_free(&ctx->tail_pkt);
+    av_buffer_unref(&ctx->device_ref);
+    av_buffer_unref(&ctx->input_frames_ref);
+    av_frame_free(&ctx->frame);
+
+    return 0;
+}
diff --git a/libavcodec/hw_base_encode.h b/libavcodec/hw_base_encode.h
new file mode 100644
index 0000000000..be4c6b034e
--- /dev/null
+++ b/libavcodec/hw_base_encode.h
@@ -0,0 +1,277 @@ 
+/*
+ * Copyright (c) 2024 Intel Corporation
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_HW_BASE_ENCODE_H
+#define AVCODEC_HW_BASE_ENCODE_H
+
+#include "libavutil/hwcontext.h"
+#include "libavutil/fifo.h"
+
+#include "avcodec.h"
+#include "hwconfig.h"
+
+enum {
+    MAX_DPB_SIZE           = 16,
+    MAX_PICTURE_REFERENCES = 2,
+    MAX_REORDER_DELAY      = 16,
+    MAX_ASYNC_DEPTH        = 64,
+    MAX_REFERENCE_LIST_NUM = 2,
+};
+
+enum {
+    PICTURE_TYPE_IDR = 0,
+    PICTURE_TYPE_I   = 1,
+    PICTURE_TYPE_P   = 2,
+    PICTURE_TYPE_B   = 3,
+};
+
+enum {
+    // Codec supports controlling the subdivision of pictures into slices.
+    FLAG_SLICE_CONTROL         = 1 << 0,
+    // Codec only supports constant quality (no rate control).
+    FLAG_CONSTANT_QUALITY_ONLY = 1 << 1,
+    // Codec is intra-only.
+    FLAG_INTRA_ONLY            = 1 << 2,
+    // Codec supports B-pictures.
+    FLAG_B_PICTURES            = 1 << 3,
+    // Codec supports referencing B-pictures.
+    FLAG_B_PICTURE_REFERENCES  = 1 << 4,
+    // Codec supports non-IDR key pictures (that is, key pictures do
+    // not necessarily empty the DPB).
+    FLAG_NON_IDR_KEY_PICTURES  = 1 << 5,
+    // Codec output packet without timestamp delay, which means the
+    // output packet has same PTS and DTS.
+    FLAG_TIMESTAMP_NO_DELAY    = 1 << 6,
+};
+
+enum {
+    RC_MODE_AUTO,
+    RC_MODE_CQP,
+    RC_MODE_CBR,
+    RC_MODE_VBR,
+    RC_MODE_ICQ,
+    RC_MODE_QVBR,
+    RC_MODE_AVBR,
+    RC_MODE_MAX = RC_MODE_AVBR,
+};
+
+typedef struct HWBaseEncodePicture {
+    struct HWBaseEncodePicture *next;
+
+    int64_t         display_order;
+    int64_t         encode_order;
+    int64_t         pts;
+    int64_t         duration;
+    int             force_idr;
+
+    void           *opaque;
+    AVBufferRef    *opaque_ref;
+
+    int             type;
+    int             b_depth;
+    int             encode_issued;
+    int             encode_complete;
+
+    AVFrame        *input_image;
+    AVFrame        *recon_image;
+
+    void           *priv_data;
+
+    // Whether this picture is a reference picture.
+    int             is_reference;
+
+    // The contents of the DPB after this picture has been decoded.
+    // This will contain the picture itself if it is a reference picture,
+    // but not if it isn't.
+    int                     nb_dpb_pics;
+    struct HWBaseEncodePicture *dpb[MAX_DPB_SIZE];
+    // The reference pictures used in decoding this picture. If they are
+    // used by later pictures they will also appear in the DPB. ref[0][] for
+    // previous reference frames. ref[1][] for future reference frames.
+    int                     nb_refs[MAX_REFERENCE_LIST_NUM];
+    struct HWBaseEncodePicture *refs[MAX_REFERENCE_LIST_NUM][MAX_PICTURE_REFERENCES];
+    // The previous reference picture in encode order.  Must be in at least
+    // one of the reference list and DPB list.
+    struct HWBaseEncodePicture *prev;
+    // Reference count for other pictures referring to this one through
+    // the above pointers, directly from incomplete pictures and indirectly
+    // through completed pictures.
+    int             ref_count[2];
+    int             ref_removed[2];
+} HWBaseEncodePicture;
+
+typedef struct HWEncodeType {
+    HWBaseEncodePicture * (*alloc)(AVCodecContext *avctx, AVFrame *frame);
+
+    int (*issue)(AVCodecContext *avctx, HWBaseEncodePicture *base_pic);
+
+    int (*output)(AVCodecContext *avctx, HWBaseEncodePicture *base_pic, AVPacket *pkt);
+
+    int (*free)(AVCodecContext *avctx, HWBaseEncodePicture *base_pic);
+}  HWEncodeType;
+
+typedef struct HWBaseEncodeContext {
+    const AVClass *class;
+
+    // Hardware-specific hooks.
+    const struct HWEncodeType *hw;
+
+    // Global options.
+
+    // Number of I frames between IDR frames.
+    int             idr_interval;
+
+    // Desired B frame reference depth.
+    int             desired_b_depth;
+
+    // Explicitly set RC mode (otherwise attempt to pick from
+    // available modes).
+    int             explicit_rc_mode;
+
+    // Explicitly-set QP, for use with the "qp" options.
+    // (Forces CQP mode when set, overriding everything else.)
+    int             explicit_qp;
+
+    // The required size of surfaces.  This is probably the input
+    // size (AVCodecContext.width|height) aligned up to whatever
+    // block size is required by the codec.
+    int             surface_width;
+    int             surface_height;
+
+    // The block size for slice calculations.
+    int             slice_block_width;
+    int             slice_block_height;
+
+    // RC quality level - meaning depends on codec and RC mode.
+    // In CQP mode this sets the fixed quantiser value.
+    int             rc_quality;
+
+    AVBufferRef    *device_ref;
+    AVHWDeviceContext *device;
+
+    // The hardware frame context containing the input frames.
+    AVBufferRef    *input_frames_ref;
+    AVHWFramesContext *input_frames;
+
+    // The hardware frame context containing the reconstructed frames.
+    AVBufferRef    *recon_frames_ref;
+    AVHWFramesContext *recon_frames;
+
+    // Current encoding window, in display (input) order.
+    HWBaseEncodePicture *pic_start, *pic_end;
+    // The next picture to use as the previous reference picture in
+    // encoding order. Order from small to large in encoding order.
+    HWBaseEncodePicture *next_prev[MAX_PICTURE_REFERENCES];
+    int                  nb_next_prev;
+
+    // Next input order index (display order).
+    int64_t         input_order;
+    // Number of frames that output is behind input.
+    int64_t         output_delay;
+    // Next encode order index.
+    int64_t         encode_order;
+    // Number of frames decode output will need to be delayed.
+    int64_t         decode_delay;
+    // Next output order index (in encode order).
+    int64_t         output_order;
+
+    // Timestamp handling.
+    int64_t         first_pts;
+    int64_t         dts_pts_diff;
+    int64_t         ts_ring[MAX_REORDER_DELAY * 3 +
+                            MAX_ASYNC_DEPTH];
+
+    // Frame type decision.
+    int gop_size;
+    int closed_gop;
+    int gop_per_idr;
+    int p_per_i;
+    int max_b_depth;
+    int b_per_p;
+    int force_idr;
+    int idr_counter;
+    int gop_counter;
+    int end_of_stream;
+    int p_to_gpb;
+
+    // Whether the driver supports ROI at all.
+    int             roi_allowed;
+
+    // The encoder does not support cropping information, so warn about
+    // it the first time we encounter any nonzero crop fields.
+    int             crop_warned;
+    // If the driver does not support ROI then warn the first time we
+    // encounter a frame with ROI side data.
+    int             roi_warned;
+
+    AVFrame         *frame;
+
+    // Whether the HW supports sync buffer function.
+    // If supported, encode_fifo/async_depth will be used together.
+    // Used for output buffer synchronization.
+    int             async_encode;
+
+    // Store buffered pic
+    AVFifo          *encode_fifo;
+    // Max number of frame buffered in encoder.
+    int             async_depth;
+
+    /** Tail data of a pic, now only used for av1 repeat frame header. */
+    AVPacket        *tail_pkt;
+} HWBaseEncodeContext;
+
+int ff_hw_base_encode_receive_packet(AVCodecContext *avctx, AVPacket *pkt);
+
+int ff_hw_base_encode_init(AVCodecContext *avctx);
+
+int ff_hw_base_encode_close(AVCodecContext *avctx);
+
+#define HW_BASE_ENCODE_COMMON_OPTIONS \
+    { "idr_interval", \
+      "Distance (in I-frames) between IDR frames", \
+      OFFSET(common.base.idr_interval), AV_OPT_TYPE_INT, \
+      { .i64 = 0 }, 0, INT_MAX, FLAGS }, \
+    { "b_depth", \
+      "Maximum B-frame reference depth", \
+      OFFSET(common.base.desired_b_depth), AV_OPT_TYPE_INT, \
+      { .i64 = 1 }, 1, INT_MAX, FLAGS }, \
+    { "async_depth", "Maximum processing parallelism. " \
+      "Increase this to improve single channel performance.", \
+      OFFSET(common.base.async_depth), AV_OPT_TYPE_INT, \
+      { .i64 = 2 }, 1, MAX_ASYNC_DEPTH, FLAGS }
+
+#define HW_BASE_ENCODE_RC_MODE(name, desc) \
+    { #name, desc, 0, AV_OPT_TYPE_CONST, { .i64 = RC_MODE_ ## name }, \
+      0, 0, FLAGS, "rc_mode" }
+#define HW_BASE_ENCODE_RC_OPTIONS \
+    { "rc_mode",\
+      "Set rate control mode", \
+      OFFSET(common.base.explicit_rc_mode), AV_OPT_TYPE_INT, \
+      { .i64 = RC_MODE_AUTO }, RC_MODE_AUTO, RC_MODE_MAX, FLAGS, "rc_mode" }, \
+    { "auto", "Choose mode automatically based on other parameters", \
+      0, AV_OPT_TYPE_CONST, { .i64 = RC_MODE_AUTO }, 0, 0, FLAGS, "rc_mode" }, \
+    HW_BASE_ENCODE_RC_MODE(CQP,  "Constant-quality"), \
+    HW_BASE_ENCODE_RC_MODE(CBR,  "Constant-bitrate"), \
+    HW_BASE_ENCODE_RC_MODE(VBR,  "Variable-bitrate"), \
+    HW_BASE_ENCODE_RC_MODE(ICQ,  "Intelligent constant-quality"), \
+    HW_BASE_ENCODE_RC_MODE(QVBR, "Quality-defined variable-bitrate"), \
+    HW_BASE_ENCODE_RC_MODE(AVBR, "Average variable-bitrate")
+
+#endif /* AVCODEC_HW_BASE_ENCODE_H */
diff --git a/libavcodec/vaapi_encode.c b/libavcodec/vaapi_encode.c
index 38d855fbd4..e2f968c36d 100644
--- a/libavcodec/vaapi_encode.c
+++ b/libavcodec/vaapi_encode.c
@@ -139,22 +139,24 @@  static int vaapi_encode_make_misc_param_buffer(AVCodecContext *avctx,
 static int vaapi_encode_wait(AVCodecContext *avctx,
                              VAAPIEncodePicture *pic)
 {
+    HWBaseEncodeContext *base_ctx = avctx->priv_data;
     VAAPIEncodeContext *ctx = avctx->priv_data;
+    HWBaseEncodePicture *base_pic = (HWBaseEncodePicture*)pic;
     VAStatus vas;
 
-    av_assert0(pic->encode_issued);
+    av_assert0(base_pic->encode_issued);
 
-    if (pic->encode_complete) {
+    if (base_pic->encode_complete) {
         // Already waited for this picture.
         return 0;
     }
 
     av_log(avctx, AV_LOG_DEBUG, "Sync to pic %"PRId64"/%"PRId64" "
-           "(input surface %#x).\n", pic->display_order,
-           pic->encode_order, pic->input_surface);
+           "(input surface %#x).\n", base_pic->display_order,
+           base_pic->encode_order, pic->input_surface);
 
 #if VA_CHECK_VERSION(1, 9, 0)
-    if (ctx->has_sync_buffer_func) {
+    if (base_ctx->async_encode) {
         vas = vaSyncBuffer(ctx->hwctx->display,
                            pic->output_buffer,
                            VA_TIMEOUT_INFINITE);
@@ -175,9 +177,9 @@  static int vaapi_encode_wait(AVCodecContext *avctx,
     }
 
     // Input is definitely finished with now.
-    av_frame_free(&pic->input_image);
+    av_frame_free(&base_pic->input_image);
 
-    pic->encode_complete = 1;
+    base_pic->encode_complete = 1;
     return 0;
 }
 
@@ -264,9 +266,11 @@  static int vaapi_encode_make_tile_slice(AVCodecContext *avctx,
 }
 
 static int vaapi_encode_issue(AVCodecContext *avctx,
-                              VAAPIEncodePicture *pic)
+                              HWBaseEncodePicture *base_pic)
 {
-    VAAPIEncodeContext *ctx = avctx->priv_data;
+    HWBaseEncodeContext *base_ctx = avctx->priv_data;
+    VAAPIEncodeContext       *ctx = avctx->priv_data;
+    VAAPIEncodePicture *pic = (VAAPIEncodePicture*)base_pic;
     VAAPIEncodeSlice *slice;
     VAStatus vas;
     int err, i;
@@ -275,52 +279,52 @@  static int vaapi_encode_issue(AVCodecContext *avctx,
     av_unused AVFrameSideData *sd;
 
     av_log(avctx, AV_LOG_DEBUG, "Issuing encode for pic %"PRId64"/%"PRId64" "
-           "as type %s.\n", pic->display_order, pic->encode_order,
-           picture_type_name[pic->type]);
-    if (pic->nb_refs[0] == 0 && pic->nb_refs[1] == 0) {
+           "as type %s.\n", base_pic->display_order, base_pic->encode_order,
+           picture_type_name[base_pic->type]);
+    if (base_pic->nb_refs[0] == 0 && base_pic->nb_refs[1] == 0) {
         av_log(avctx, AV_LOG_DEBUG, "No reference pictures.\n");
     } else {
         av_log(avctx, AV_LOG_DEBUG, "L0 refers to");
-        for (i = 0; i < pic->nb_refs[0]; i++) {
+        for (i = 0; i < base_pic->nb_refs[0]; i++) {
             av_log(avctx, AV_LOG_DEBUG, " %"PRId64"/%"PRId64,
-                   pic->refs[0][i]->display_order, pic->refs[0][i]->encode_order);
+                   base_pic->refs[0][i]->display_order, base_pic->refs[0][i]->encode_order);
         }
         av_log(avctx, AV_LOG_DEBUG, ".\n");
 
-        if (pic->nb_refs[1]) {
+        if (base_pic->nb_refs[1]) {
             av_log(avctx, AV_LOG_DEBUG, "L1 refers to");
-            for (i = 0; i < pic->nb_refs[1]; i++) {
+            for (i = 0; i < base_pic->nb_refs[1]; i++) {
                 av_log(avctx, AV_LOG_DEBUG, " %"PRId64"/%"PRId64,
-                       pic->refs[1][i]->display_order, pic->refs[1][i]->encode_order);
+                       base_pic->refs[1][i]->display_order, base_pic->refs[1][i]->encode_order);
             }
             av_log(avctx, AV_LOG_DEBUG, ".\n");
         }
     }
 
-    av_assert0(!pic->encode_issued);
-    for (i = 0; i < pic->nb_refs[0]; i++) {
-        av_assert0(pic->refs[0][i]);
-        av_assert0(pic->refs[0][i]->encode_issued);
+    av_assert0(!base_pic->encode_issued);
+    for (i = 0; i < base_pic->nb_refs[0]; i++) {
+        av_assert0(base_pic->refs[0][i]);
+        av_assert0(base_pic->refs[0][i]->encode_issued);
     }
-    for (i = 0; i < pic->nb_refs[1]; i++) {
-        av_assert0(pic->refs[1][i]);
-        av_assert0(pic->refs[1][i]->encode_issued);
+    for (i = 0; i < base_pic->nb_refs[1]; i++) {
+        av_assert0(base_pic->refs[1][i]);
+        av_assert0(base_pic->refs[1][i]->encode_issued);
     }
 
     av_log(avctx, AV_LOG_DEBUG, "Input surface is %#x.\n", pic->input_surface);
 
-    pic->recon_image = av_frame_alloc();
-    if (!pic->recon_image) {
+    base_pic->recon_image = av_frame_alloc();
+    if (!base_pic->recon_image) {
         err = AVERROR(ENOMEM);
         goto fail;
     }
 
-    err = av_hwframe_get_buffer(ctx->recon_frames_ref, pic->recon_image, 0);
+    err = av_hwframe_get_buffer(base_ctx->recon_frames_ref, base_pic->recon_image, 0);
     if (err < 0) {
         err = AVERROR(ENOMEM);
         goto fail;
     }
-    pic->recon_surface = (VASurfaceID)(uintptr_t)pic->recon_image->data[3];
+    pic->recon_surface = (VASurfaceID)(uintptr_t)base_pic->recon_image->data[3];
     av_log(avctx, AV_LOG_DEBUG, "Recon surface is %#x.\n", pic->recon_surface);
 
     pic->output_buffer_ref = ff_refstruct_pool_get(ctx->output_buffer_pool);
@@ -344,7 +348,7 @@  static int vaapi_encode_issue(AVCodecContext *avctx,
 
     pic->nb_param_buffers = 0;
 
-    if (pic->type == PICTURE_TYPE_IDR && ctx->codec->init_sequence_params) {
+    if (base_pic->type == PICTURE_TYPE_IDR && ctx->codec->init_sequence_params) {
         err = vaapi_encode_make_param_buffer(avctx, pic,
                                              VAEncSequenceParameterBufferType,
                                              ctx->codec_sequence_params,
@@ -353,7 +357,7 @@  static int vaapi_encode_issue(AVCodecContext *avctx,
             goto fail;
     }
 
-    if (pic->type == PICTURE_TYPE_IDR) {
+    if (base_pic->type == PICTURE_TYPE_IDR) {
         for (i = 0; i < ctx->nb_global_params; i++) {
             err = vaapi_encode_make_misc_param_buffer(avctx, pic,
                                                       ctx->global_params_type[i],
@@ -390,7 +394,7 @@  static int vaapi_encode_issue(AVCodecContext *avctx,
     }
 #endif
 
-    if (pic->type == PICTURE_TYPE_IDR) {
+    if (base_pic->type == PICTURE_TYPE_IDR) {
         if (ctx->va_packed_headers & VA_ENC_PACKED_HEADER_SEQUENCE &&
             ctx->codec->write_sequence_header) {
             bit_len = 8 * sizeof(data);
@@ -530,9 +534,9 @@  static int vaapi_encode_issue(AVCodecContext *avctx,
     }
 
 #if VA_CHECK_VERSION(1, 0, 0)
-    sd = av_frame_get_side_data(pic->input_image,
+    sd = av_frame_get_side_data(base_pic->input_image,
                                 AV_FRAME_DATA_REGIONS_OF_INTEREST);
-    if (sd && ctx->roi_allowed) {
+    if (sd && base_ctx->roi_allowed) {
         const AVRegionOfInterest *roi;
         uint32_t roi_size;
         VAEncMiscParameterBufferROI param_roi;
@@ -543,11 +547,11 @@  static int vaapi_encode_issue(AVCodecContext *avctx,
         av_assert0(roi_size && sd->size % roi_size == 0);
         nb_roi = sd->size / roi_size;
         if (nb_roi > ctx->roi_max_regions) {
-            if (!ctx->roi_warned) {
+            if (!base_ctx->roi_warned) {
                 av_log(avctx, AV_LOG_WARNING, "More ROIs set than "
                        "supported by driver (%d > %d).\n",
                        nb_roi, ctx->roi_max_regions);
-                ctx->roi_warned = 1;
+                base_ctx->roi_warned = 1;
             }
             nb_roi = ctx->roi_max_regions;
         }
@@ -640,7 +644,7 @@  static int vaapi_encode_issue(AVCodecContext *avctx,
         }
     }
 
-    pic->encode_issued = 1;
+    base_pic->encode_issued = 1;
 
     return 0;
 
@@ -658,16 +662,17 @@  fail_at_end:
     av_freep(&pic->param_buffers);
     av_freep(&pic->slices);
     av_freep(&pic->roi);
-    av_frame_free(&pic->recon_image);
+    av_frame_free(&base_pic->recon_image);
     ff_refstruct_unref(&pic->output_buffer_ref);
     pic->output_buffer = VA_INVALID_ID;
     return err;
 }
 
 static int vaapi_encode_set_output_property(AVCodecContext *avctx,
-                                            VAAPIEncodePicture *pic,
+                                            HWBaseEncodePicture *pic,
                                             AVPacket *pkt)
 {
+    HWBaseEncodeContext *base_ctx = avctx->priv_data;
     VAAPIEncodeContext *ctx = avctx->priv_data;
 
     if (pic->type == PICTURE_TYPE_IDR)
@@ -689,16 +694,16 @@  static int vaapi_encode_set_output_property(AVCodecContext *avctx,
         return 0;
     }
 
-    if (ctx->output_delay == 0) {
+    if (base_ctx->output_delay == 0) {
         pkt->dts = pkt->pts;
-    } else if (pic->encode_order < ctx->decode_delay) {
-        if (ctx->ts_ring[pic->encode_order] < INT64_MIN + ctx->dts_pts_diff)
+    } else if (pic->encode_order < base_ctx->decode_delay) {
+        if (base_ctx->ts_ring[pic->encode_order] < INT64_MIN + base_ctx->dts_pts_diff)
             pkt->dts = INT64_MIN;
         else
-            pkt->dts = ctx->ts_ring[pic->encode_order] - ctx->dts_pts_diff;
+            pkt->dts = base_ctx->ts_ring[pic->encode_order] - base_ctx->dts_pts_diff;
     } else {
-        pkt->dts = ctx->ts_ring[(pic->encode_order - ctx->decode_delay) %
-                                (3 * ctx->output_delay + ctx->async_depth)];
+        pkt->dts = base_ctx->ts_ring[(pic->encode_order - base_ctx->decode_delay) %
+                                     (3 * base_ctx->output_delay + base_ctx->async_depth)];
     }
 
     return 0;
@@ -817,9 +822,11 @@  end:
 }
 
 static int vaapi_encode_output(AVCodecContext *avctx,
-                               VAAPIEncodePicture *pic, AVPacket *pkt)
+                               HWBaseEncodePicture *base_pic, AVPacket *pkt)
 {
-    VAAPIEncodeContext *ctx = avctx->priv_data;
+    HWBaseEncodeContext *base_ctx = avctx->priv_data;
+    VAAPIEncodeContext       *ctx = avctx->priv_data;
+    VAAPIEncodePicture       *pic = (VAAPIEncodePicture*)base_pic;
     AVPacket *pkt_ptr = pkt;
     int err;
 
@@ -832,17 +839,17 @@  static int vaapi_encode_output(AVCodecContext *avctx,
         ctx->coded_buffer_ref = ff_refstruct_ref(pic->output_buffer_ref);
 
         if (pic->tail_size) {
-            if (ctx->tail_pkt->size) {
+            if (base_ctx->tail_pkt->size) {
                 err = AVERROR_BUG;
                 goto end;
             }
 
-            err = ff_get_encode_buffer(avctx, ctx->tail_pkt, pic->tail_size, 0);
+            err = ff_get_encode_buffer(avctx, base_ctx->tail_pkt, pic->tail_size, 0);
             if (err < 0)
                 goto end;
 
-            memcpy(ctx->tail_pkt->data, pic->tail_data, pic->tail_size);
-            pkt_ptr = ctx->tail_pkt;
+            memcpy(base_ctx->tail_pkt->data, pic->tail_data, pic->tail_size);
+            pkt_ptr = base_ctx->tail_pkt;
         }
     } else {
         err = vaapi_encode_get_coded_data(avctx, pic, pkt);
@@ -851,9 +858,9 @@  static int vaapi_encode_output(AVCodecContext *avctx,
     }
 
     av_log(avctx, AV_LOG_DEBUG, "Output read for pic %"PRId64"/%"PRId64".\n",
-           pic->display_order, pic->encode_order);
+           base_pic->display_order, base_pic->encode_order);
 
-    vaapi_encode_set_output_property(avctx, pic, pkt_ptr);
+    vaapi_encode_set_output_property(avctx, base_pic, pkt_ptr);
 
 end:
     ff_refstruct_unref(&pic->output_buffer_ref);
@@ -864,12 +871,13 @@  end:
 static int vaapi_encode_discard(AVCodecContext *avctx,
                                 VAAPIEncodePicture *pic)
 {
+    HWBaseEncodePicture *base_pic = (HWBaseEncodePicture*)pic;
     vaapi_encode_wait(avctx, pic);
 
     if (pic->output_buffer_ref) {
         av_log(avctx, AV_LOG_DEBUG, "Discard output for pic "
                "%"PRId64"/%"PRId64".\n",
-               pic->display_order, pic->encode_order);
+               base_pic->display_order, base_pic->encode_order);
 
         ff_refstruct_unref(&pic->output_buffer_ref);
         pic->output_buffer = VA_INVALID_ID;
@@ -878,8 +886,8 @@  static int vaapi_encode_discard(AVCodecContext *avctx,
     return 0;
 }
 
-static VAAPIEncodePicture *vaapi_encode_alloc(AVCodecContext *avctx,
-                                              const AVFrame *frame)
+static HWBaseEncodePicture *vaapi_encode_alloc(AVCodecContext *avctx,
+                                               const AVFrame *frame)
 {
     VAAPIEncodeContext *ctx = avctx->priv_data;
     VAAPIEncodePicture *pic;
@@ -889,8 +897,8 @@  static VAAPIEncodePicture *vaapi_encode_alloc(AVCodecContext *avctx,
         return NULL;
 
     if (ctx->codec->picture_priv_data_size > 0) {
-        pic->priv_data = av_mallocz(ctx->codec->picture_priv_data_size);
-        if (!pic->priv_data) {
+        pic->base.priv_data = av_mallocz(ctx->codec->picture_priv_data_size);
+        if (!pic->base.priv_data) {
             av_freep(&pic);
             return NULL;
         }
@@ -900,15 +908,16 @@  static VAAPIEncodePicture *vaapi_encode_alloc(AVCodecContext *avctx,
     pic->recon_surface = VA_INVALID_ID;
     pic->output_buffer = VA_INVALID_ID;
 
-    return pic;
+    return (HWBaseEncodePicture*)pic;
 }
 
 static int vaapi_encode_free(AVCodecContext *avctx,
-                             VAAPIEncodePicture *pic)
+                             HWBaseEncodePicture *base_pic)
 {
+    VAAPIEncodePicture *pic = (VAAPIEncodePicture*)base_pic;
     int i;
 
-    if (pic->encode_issued)
+    if (base_pic->encode_issued)
         vaapi_encode_discard(avctx, pic);
 
     if (pic->slices) {
@@ -917,17 +926,17 @@  static int vaapi_encode_free(AVCodecContext *avctx,
     }
     av_freep(&pic->codec_picture_params);
 
-    av_frame_free(&pic->input_image);
-    av_frame_free(&pic->recon_image);
+    av_frame_free(&base_pic->input_image);
+    av_frame_free(&base_pic->recon_image);
 
-    av_buffer_unref(&pic->opaque_ref);
+    av_buffer_unref(&base_pic->opaque_ref);
 
     av_freep(&pic->param_buffers);
     av_freep(&pic->slices);
     // Output buffer should already be destroyed.
     av_assert0(pic->output_buffer == VA_INVALID_ID);
 
-    av_freep(&pic->priv_data);
+    av_freep(&base_pic->priv_data);
     av_freep(&pic->codec_picture_params);
     av_freep(&pic->roi);
 
@@ -936,563 +945,6 @@  static int vaapi_encode_free(AVCodecContext *avctx,
     return 0;
 }
 
-static void vaapi_encode_add_ref(AVCodecContext *avctx,
-                                 VAAPIEncodePicture *pic,
-                                 VAAPIEncodePicture *target,
-                                 int is_ref, int in_dpb, int prev)
-{
-    int refs = 0;
-
-    if (is_ref) {
-        av_assert0(pic != target);
-        av_assert0(pic->nb_refs[0] < MAX_PICTURE_REFERENCES &&
-                   pic->nb_refs[1] < MAX_PICTURE_REFERENCES);
-        if (target->display_order < pic->display_order)
-            pic->refs[0][pic->nb_refs[0]++] = target;
-        else
-            pic->refs[1][pic->nb_refs[1]++] = target;
-        ++refs;
-    }
-
-    if (in_dpb) {
-        av_assert0(pic->nb_dpb_pics < MAX_DPB_SIZE);
-        pic->dpb[pic->nb_dpb_pics++] = target;
-        ++refs;
-    }
-
-    if (prev) {
-        av_assert0(!pic->prev);
-        pic->prev = target;
-        ++refs;
-    }
-
-    target->ref_count[0] += refs;
-    target->ref_count[1] += refs;
-}
-
-static void vaapi_encode_remove_refs(AVCodecContext *avctx,
-                                     VAAPIEncodePicture *pic,
-                                     int level)
-{
-    int i;
-
-    if (pic->ref_removed[level])
-        return;
-
-    for (i = 0; i < pic->nb_refs[0]; i++) {
-        av_assert0(pic->refs[0][i]);
-        --pic->refs[0][i]->ref_count[level];
-        av_assert0(pic->refs[0][i]->ref_count[level] >= 0);
-    }
-
-    for (i = 0; i < pic->nb_refs[1]; i++) {
-        av_assert0(pic->refs[1][i]);
-        --pic->refs[1][i]->ref_count[level];
-        av_assert0(pic->refs[1][i]->ref_count[level] >= 0);
-    }
-
-    for (i = 0; i < pic->nb_dpb_pics; i++) {
-        av_assert0(pic->dpb[i]);
-        --pic->dpb[i]->ref_count[level];
-        av_assert0(pic->dpb[i]->ref_count[level] >= 0);
-    }
-
-    av_assert0(pic->prev || pic->type == PICTURE_TYPE_IDR);
-    if (pic->prev) {
-        --pic->prev->ref_count[level];
-        av_assert0(pic->prev->ref_count[level] >= 0);
-    }
-
-    pic->ref_removed[level] = 1;
-}
-
-static void vaapi_encode_set_b_pictures(AVCodecContext *avctx,
-                                        VAAPIEncodePicture *start,
-                                        VAAPIEncodePicture *end,
-                                        VAAPIEncodePicture *prev,
-                                        int current_depth,
-                                        VAAPIEncodePicture **last)
-{
-    VAAPIEncodeContext *ctx = avctx->priv_data;
-    VAAPIEncodePicture *pic, *next, *ref;
-    int i, len;
-
-    av_assert0(start && end && start != end && start->next != end);
-
-    // If we are at the maximum depth then encode all pictures as
-    // non-referenced B-pictures.  Also do this if there is exactly one
-    // picture left, since there will be nothing to reference it.
-    if (current_depth == ctx->max_b_depth || start->next->next == end) {
-        for (pic = start->next; pic; pic = pic->next) {
-            if (pic == end)
-                break;
-            pic->type    = PICTURE_TYPE_B;
-            pic->b_depth = current_depth;
-
-            vaapi_encode_add_ref(avctx, pic, start, 1, 1, 0);
-            vaapi_encode_add_ref(avctx, pic, end,   1, 1, 0);
-            vaapi_encode_add_ref(avctx, pic, prev,  0, 0, 1);
-
-            for (ref = end->refs[1][0]; ref; ref = ref->refs[1][0])
-                vaapi_encode_add_ref(avctx, pic, ref, 0, 1, 0);
-        }
-        *last = prev;
-
-    } else {
-        // Split the current list at the midpoint with a referenced
-        // B-picture, then descend into each side separately.
-        len = 0;
-        for (pic = start->next; pic != end; pic = pic->next)
-            ++len;
-        for (pic = start->next, i = 1; 2 * i < len; pic = pic->next, i++);
-
-        pic->type    = PICTURE_TYPE_B;
-        pic->b_depth = current_depth;
-
-        pic->is_reference = 1;
-
-        vaapi_encode_add_ref(avctx, pic, pic,   0, 1, 0);
-        vaapi_encode_add_ref(avctx, pic, start, 1, 1, 0);
-        vaapi_encode_add_ref(avctx, pic, end,   1, 1, 0);
-        vaapi_encode_add_ref(avctx, pic, prev,  0, 0, 1);
-
-        for (ref = end->refs[1][0]; ref; ref = ref->refs[1][0])
-            vaapi_encode_add_ref(avctx, pic, ref, 0, 1, 0);
-
-        if (i > 1)
-            vaapi_encode_set_b_pictures(avctx, start, pic, pic,
-                                        current_depth + 1, &next);
-        else
-            next = pic;
-
-        vaapi_encode_set_b_pictures(avctx, pic, end, next,
-                                    current_depth + 1, last);
-    }
-}
-
-static void vaapi_encode_add_next_prev(AVCodecContext *avctx,
-                                       VAAPIEncodePicture *pic)
-{
-    VAAPIEncodeContext *ctx = avctx->priv_data;
-    int i;
-
-    if (!pic)
-        return;
-
-    if (pic->type == PICTURE_TYPE_IDR) {
-        for (i = 0; i < ctx->nb_next_prev; i++) {
-            --ctx->next_prev[i]->ref_count[0];
-            ctx->next_prev[i] = NULL;
-        }
-        ctx->next_prev[0] = pic;
-        ++pic->ref_count[0];
-        ctx->nb_next_prev = 1;
-
-        return;
-    }
-
-    if (ctx->nb_next_prev < MAX_PICTURE_REFERENCES) {
-        ctx->next_prev[ctx->nb_next_prev++] = pic;
-        ++pic->ref_count[0];
-    } else {
-        --ctx->next_prev[0]->ref_count[0];
-        for (i = 0; i < MAX_PICTURE_REFERENCES - 1; i++)
-            ctx->next_prev[i] = ctx->next_prev[i + 1];
-        ctx->next_prev[i] = pic;
-        ++pic->ref_count[0];
-    }
-}
-
-static int vaapi_encode_pick_next(AVCodecContext *avctx,
-                                  VAAPIEncodePicture **pic_out)
-{
-    VAAPIEncodeContext *ctx = avctx->priv_data;
-    VAAPIEncodePicture *pic = NULL, *prev = NULL, *next, *start;
-    int i, b_counter, closed_gop_end;
-
-    // If there are any B-frames already queued, the next one to encode
-    // is the earliest not-yet-issued frame for which all references are
-    // available.
-    for (pic = ctx->pic_start; pic; pic = pic->next) {
-        if (pic->encode_issued)
-            continue;
-        if (pic->type != PICTURE_TYPE_B)
-            continue;
-        for (i = 0; i < pic->nb_refs[0]; i++) {
-            if (!pic->refs[0][i]->encode_issued)
-                break;
-        }
-        if (i != pic->nb_refs[0])
-            continue;
-
-        for (i = 0; i < pic->nb_refs[1]; i++) {
-            if (!pic->refs[1][i]->encode_issued)
-                break;
-        }
-        if (i == pic->nb_refs[1])
-            break;
-    }
-
-    if (pic) {
-        av_log(avctx, AV_LOG_DEBUG, "Pick B-picture at depth %d to "
-               "encode next.\n", pic->b_depth);
-        *pic_out = pic;
-        return 0;
-    }
-
-    // Find the B-per-Pth available picture to become the next picture
-    // on the top layer.
-    start = NULL;
-    b_counter = 0;
-    closed_gop_end = ctx->closed_gop ||
-                     ctx->idr_counter == ctx->gop_per_idr;
-    for (pic = ctx->pic_start; pic; pic = next) {
-        next = pic->next;
-        if (pic->encode_issued) {
-            start = pic;
-            continue;
-        }
-        // If the next available picture is force-IDR, encode it to start
-        // a new GOP immediately.
-        if (pic->force_idr)
-            break;
-        if (b_counter == ctx->b_per_p)
-            break;
-        // If this picture ends a closed GOP or starts a new GOP then it
-        // needs to be in the top layer.
-        if (ctx->gop_counter + b_counter + closed_gop_end >= ctx->gop_size)
-            break;
-        // If the picture after this one is force-IDR, we need to encode
-        // this one in the top layer.
-        if (next && next->force_idr)
-            break;
-        ++b_counter;
-    }
-
-    // At the end of the stream the last picture must be in the top layer.
-    if (!pic && ctx->end_of_stream) {
-        --b_counter;
-        pic = ctx->pic_end;
-        if (pic->encode_complete)
-            return AVERROR_EOF;
-        else if (pic->encode_issued)
-            return AVERROR(EAGAIN);
-    }
-
-    if (!pic) {
-        av_log(avctx, AV_LOG_DEBUG, "Pick nothing to encode next - "
-               "need more input for reference pictures.\n");
-        return AVERROR(EAGAIN);
-    }
-    if (ctx->input_order <= ctx->decode_delay && !ctx->end_of_stream) {
-        av_log(avctx, AV_LOG_DEBUG, "Pick nothing to encode next - "
-               "need more input for timestamps.\n");
-        return AVERROR(EAGAIN);
-    }
-
-    if (pic->force_idr) {
-        av_log(avctx, AV_LOG_DEBUG, "Pick forced IDR-picture to "
-               "encode next.\n");
-        pic->type = PICTURE_TYPE_IDR;
-        ctx->idr_counter = 1;
-        ctx->gop_counter = 1;
-
-    } else if (ctx->gop_counter + b_counter >= ctx->gop_size) {
-        if (ctx->idr_counter == ctx->gop_per_idr) {
-            av_log(avctx, AV_LOG_DEBUG, "Pick new-GOP IDR-picture to "
-                   "encode next.\n");
-            pic->type = PICTURE_TYPE_IDR;
-            ctx->idr_counter = 1;
-        } else {
-            av_log(avctx, AV_LOG_DEBUG, "Pick new-GOP I-picture to "
-                   "encode next.\n");
-            pic->type = PICTURE_TYPE_I;
-            ++ctx->idr_counter;
-        }
-        ctx->gop_counter = 1;
-
-    } else {
-        if (ctx->gop_counter + b_counter + closed_gop_end == ctx->gop_size) {
-            av_log(avctx, AV_LOG_DEBUG, "Pick group-end P-picture to "
-                   "encode next.\n");
-        } else {
-            av_log(avctx, AV_LOG_DEBUG, "Pick normal P-picture to "
-                   "encode next.\n");
-        }
-        pic->type = PICTURE_TYPE_P;
-        av_assert0(start);
-        ctx->gop_counter += 1 + b_counter;
-    }
-    pic->is_reference = 1;
-    *pic_out = pic;
-
-    vaapi_encode_add_ref(avctx, pic, pic, 0, 1, 0);
-    if (pic->type != PICTURE_TYPE_IDR) {
-        // TODO: apply both previous and forward multi reference for all vaapi encoders.
-        // And L0/L1 reference frame number can be set dynamically through query
-        // VAConfigAttribEncMaxRefFrames attribute.
-        if (avctx->codec_id == AV_CODEC_ID_AV1) {
-            for (i = 0; i < ctx->nb_next_prev; i++)
-                vaapi_encode_add_ref(avctx, pic, ctx->next_prev[i],
-                                     pic->type == PICTURE_TYPE_P,
-                                     b_counter > 0, 0);
-        } else
-            vaapi_encode_add_ref(avctx, pic, start,
-                                 pic->type == PICTURE_TYPE_P,
-                                 b_counter > 0, 0);
-
-        vaapi_encode_add_ref(avctx, pic, ctx->next_prev[ctx->nb_next_prev - 1], 0, 0, 1);
-    }
-
-    if (b_counter > 0) {
-        vaapi_encode_set_b_pictures(avctx, start, pic, pic, 1,
-                                    &prev);
-    } else {
-        prev = pic;
-    }
-    vaapi_encode_add_next_prev(avctx, prev);
-
-    return 0;
-}
-
-static int vaapi_encode_clear_old(AVCodecContext *avctx)
-{
-    VAAPIEncodeContext *ctx = avctx->priv_data;
-    VAAPIEncodePicture *pic, *prev, *next;
-
-    av_assert0(ctx->pic_start);
-
-    // Remove direct references once each picture is complete.
-    for (pic = ctx->pic_start; pic; pic = pic->next) {
-        if (pic->encode_complete && pic->next)
-            vaapi_encode_remove_refs(avctx, pic, 0);
-    }
-
-    // Remove indirect references once a picture has no direct references.
-    for (pic = ctx->pic_start; pic; pic = pic->next) {
-        if (pic->encode_complete && pic->ref_count[0] == 0)
-            vaapi_encode_remove_refs(avctx, pic, 1);
-    }
-
-    // Clear out all complete pictures with no remaining references.
-    prev = NULL;
-    for (pic = ctx->pic_start; pic; pic = next) {
-        next = pic->next;
-        if (pic->encode_complete && pic->ref_count[1] == 0) {
-            av_assert0(pic->ref_removed[0] && pic->ref_removed[1]);
-            if (prev)
-                prev->next = next;
-            else
-                ctx->pic_start = next;
-            vaapi_encode_free(avctx, pic);
-        } else {
-            prev = pic;
-        }
-    }
-
-    return 0;
-}
-
-static int vaapi_encode_check_frame(AVCodecContext *avctx,
-                                    const AVFrame *frame)
-{
-    VAAPIEncodeContext *ctx = avctx->priv_data;
-
-    if ((frame->crop_top  || frame->crop_bottom ||
-         frame->crop_left || frame->crop_right) && !ctx->crop_warned) {
-        av_log(avctx, AV_LOG_WARNING, "Cropping information on input "
-               "frames ignored due to lack of API support.\n");
-        ctx->crop_warned = 1;
-    }
-
-    if (!ctx->roi_allowed) {
-        AVFrameSideData *sd =
-            av_frame_get_side_data(frame, AV_FRAME_DATA_REGIONS_OF_INTEREST);
-
-        if (sd && !ctx->roi_warned) {
-            av_log(avctx, AV_LOG_WARNING, "ROI side data on input "
-                   "frames ignored due to lack of driver support.\n");
-            ctx->roi_warned = 1;
-        }
-    }
-
-    return 0;
-}
-
-static int vaapi_encode_send_frame(AVCodecContext *avctx, AVFrame *frame)
-{
-    VAAPIEncodeContext *ctx = avctx->priv_data;
-    VAAPIEncodePicture *pic;
-    int err;
-
-    if (frame) {
-        av_log(avctx, AV_LOG_DEBUG, "Input frame: %ux%u (%"PRId64").\n",
-               frame->width, frame->height, frame->pts);
-
-        err = vaapi_encode_check_frame(avctx, frame);
-        if (err < 0)
-            return err;
-
-        pic = vaapi_encode_alloc(avctx, frame);
-        if (!pic)
-            return AVERROR(ENOMEM);
-
-        pic->input_image = av_frame_alloc();
-        if (!pic->input_image) {
-            err = AVERROR(ENOMEM);
-            goto fail;
-        }
-
-        if (ctx->input_order == 0 || frame->pict_type == AV_PICTURE_TYPE_I)
-            pic->force_idr = 1;
-
-        pic->pts = frame->pts;
-        pic->duration = frame->duration;
-
-        if (avctx->flags & AV_CODEC_FLAG_COPY_OPAQUE) {
-            err = av_buffer_replace(&pic->opaque_ref, frame->opaque_ref);
-            if (err < 0)
-                goto fail;
-
-            pic->opaque = frame->opaque;
-        }
-
-        av_frame_move_ref(pic->input_image, frame);
-
-        if (ctx->input_order == 0)
-            ctx->first_pts = pic->pts;
-        if (ctx->input_order == ctx->decode_delay)
-            ctx->dts_pts_diff = pic->pts - ctx->first_pts;
-        if (ctx->output_delay > 0)
-            ctx->ts_ring[ctx->input_order %
-                        (3 * ctx->output_delay + ctx->async_depth)] = pic->pts;
-
-        pic->display_order = ctx->input_order;
-        ++ctx->input_order;
-
-        if (ctx->pic_start) {
-            ctx->pic_end->next = pic;
-            ctx->pic_end       = pic;
-        } else {
-            ctx->pic_start     = pic;
-            ctx->pic_end       = pic;
-        }
-
-    } else {
-        ctx->end_of_stream = 1;
-
-        // Fix timestamps if we hit end-of-stream before the initial decode
-        // delay has elapsed.
-        if (ctx->input_order < ctx->decode_delay)
-            ctx->dts_pts_diff = ctx->pic_end->pts - ctx->first_pts;
-    }
-
-    return 0;
-
-fail:
-    vaapi_encode_free(avctx, pic);
-    return err;
-}
-
-int ff_vaapi_encode_receive_packet(AVCodecContext *avctx, AVPacket *pkt)
-{
-    VAAPIEncodeContext *ctx = avctx->priv_data;
-    VAAPIEncodePicture *pic = NULL;
-    AVFrame *frame = ctx->frame;
-    int err;
-
-start:
-    /** if no B frame before repeat P frame, sent repeat P frame out. */
-    if (ctx->tail_pkt->size) {
-        for (VAAPIEncodePicture *tmp = ctx->pic_start; tmp; tmp = tmp->next) {
-            if (tmp->type == PICTURE_TYPE_B && tmp->pts < ctx->tail_pkt->pts)
-                break;
-            else if (!tmp->next) {
-                av_packet_move_ref(pkt, ctx->tail_pkt);
-                goto end;
-            }
-        }
-    }
-
-    err = ff_encode_get_frame(avctx, frame);
-    if (err < 0 && err != AVERROR_EOF)
-        return err;
-
-    if (err == AVERROR_EOF)
-        frame = NULL;
-
-    err = vaapi_encode_send_frame(avctx, frame);
-    if (err < 0)
-        return err;
-
-    if (!ctx->pic_start) {
-        if (ctx->end_of_stream)
-            return AVERROR_EOF;
-        else
-            return AVERROR(EAGAIN);
-    }
-
-    if (ctx->has_sync_buffer_func) {
-        if (av_fifo_can_write(ctx->encode_fifo)) {
-            err = vaapi_encode_pick_next(avctx, &pic);
-            if (!err) {
-                av_assert0(pic);
-                pic->encode_order = ctx->encode_order +
-                    av_fifo_can_read(ctx->encode_fifo);
-                err = vaapi_encode_issue(avctx, pic);
-                if (err < 0) {
-                    av_log(avctx, AV_LOG_ERROR, "Encode failed: %d.\n", err);
-                    return err;
-                }
-                av_fifo_write(ctx->encode_fifo, &pic, 1);
-            }
-        }
-
-        if (!av_fifo_can_read(ctx->encode_fifo))
-            return err;
-
-        // More frames can be buffered
-        if (av_fifo_can_write(ctx->encode_fifo) && !ctx->end_of_stream)
-            return AVERROR(EAGAIN);
-
-        av_fifo_read(ctx->encode_fifo, &pic, 1);
-        ctx->encode_order = pic->encode_order + 1;
-    } else {
-        err = vaapi_encode_pick_next(avctx, &pic);
-        if (err < 0)
-            return err;
-        av_assert0(pic);
-
-        pic->encode_order = ctx->encode_order++;
-
-        err = vaapi_encode_issue(avctx, pic);
-        if (err < 0) {
-            av_log(avctx, AV_LOG_ERROR, "Encode failed: %d.\n", err);
-            return err;
-        }
-    }
-
-    err = vaapi_encode_output(avctx, pic, pkt);
-    if (err < 0) {
-        av_log(avctx, AV_LOG_ERROR, "Output failed: %d.\n", err);
-        return err;
-    }
-
-    ctx->output_order = pic->encode_order;
-    vaapi_encode_clear_old(avctx);
-
-    /** loop to get an available pkt in encoder flushing. */
-    if (ctx->end_of_stream && !pkt->size)
-        goto start;
-
-end:
-    if (pkt->size)
-        av_log(avctx, AV_LOG_DEBUG, "Output packet: pts %"PRId64", dts %"PRId64", "
-               "size %d bytes.\n", pkt->pts, pkt->dts, pkt->size);
-
-    return 0;
-}
-
 static av_cold void vaapi_encode_add_global_param(AVCodecContext *avctx, int type,
                                                   void *buffer, size_t size)
 {
@@ -1552,9 +1004,10 @@  static const VAEntrypoint vaapi_encode_entrypoints_low_power[] = {
 
 static av_cold int vaapi_encode_profile_entrypoint(AVCodecContext *avctx)
 {
-    VAAPIEncodeContext      *ctx = avctx->priv_data;
-    VAProfile    *va_profiles    = NULL;
-    VAEntrypoint *va_entrypoints = NULL;
+    HWBaseEncodeContext *base_ctx = avctx->priv_data;
+    VAAPIEncodeContext       *ctx = avctx->priv_data;
+    VAProfile     *va_profiles    = NULL;
+    VAEntrypoint  *va_entrypoints = NULL;
     VAStatus vas;
     const VAEntrypoint *usable_entrypoints;
     const VAAPIEncodeProfile *profile;
@@ -1577,10 +1030,10 @@  static av_cold int vaapi_encode_profile_entrypoint(AVCodecContext *avctx)
         usable_entrypoints = vaapi_encode_entrypoints_normal;
     }
 
-    desc = av_pix_fmt_desc_get(ctx->input_frames->sw_format);
+    desc = av_pix_fmt_desc_get(base_ctx->input_frames->sw_format);
     if (!desc) {
         av_log(avctx, AV_LOG_ERROR, "Invalid input pixfmt (%d).\n",
-               ctx->input_frames->sw_format);
+               base_ctx->input_frames->sw_format);
         return AVERROR(EINVAL);
     }
     depth = desc->comp[0].depth;
@@ -1773,7 +1226,8 @@  static const VAAPIEncodeRCMode vaapi_encode_rc_modes[] = {
 
 static av_cold int vaapi_encode_init_rate_control(AVCodecContext *avctx)
 {
-    VAAPIEncodeContext *ctx = avctx->priv_data;
+    HWBaseEncodeContext *base_ctx = avctx->priv_data;
+    VAAPIEncodeContext       *ctx = avctx->priv_data;
     uint32_t supported_va_rc_modes;
     const VAAPIEncodeRCMode *rc_mode;
     int64_t rc_bits_per_second;
@@ -1856,10 +1310,10 @@  static av_cold int vaapi_encode_init_rate_control(AVCodecContext *avctx)
         } \
     } while (0)
 
-    if (ctx->explicit_rc_mode)
-        TRY_RC_MODE(ctx->explicit_rc_mode, 1);
+    if (base_ctx->explicit_rc_mode)
+        TRY_RC_MODE(base_ctx->explicit_rc_mode, 1);
 
-    if (ctx->explicit_qp)
+    if (base_ctx->explicit_qp)
         TRY_RC_MODE(RC_MODE_CQP, 1);
 
     if (ctx->codec->flags & FLAG_CONSTANT_QUALITY_ONLY)
@@ -1954,8 +1408,8 @@  rc_mode_found:
     }
 
     if (rc_mode->quality) {
-        if (ctx->explicit_qp) {
-            rc_quality = ctx->explicit_qp;
+        if (base_ctx->explicit_qp) {
+            rc_quality = base_ctx->explicit_qp;
         } else if (avctx->global_quality > 0) {
             rc_quality = avctx->global_quality;
         } else {
@@ -2011,10 +1465,10 @@  rc_mode_found:
         return AVERROR(EINVAL);
     }
 
-    ctx->rc_mode     = rc_mode;
-    ctx->rc_quality  = rc_quality;
-    ctx->va_rc_mode  = rc_mode->va_mode;
-    ctx->va_bit_rate = rc_bits_per_second;
+    ctx->rc_mode          = rc_mode;
+    base_ctx->rc_quality  = rc_quality;
+    ctx->va_rc_mode       = rc_mode->va_mode;
+    ctx->va_bit_rate      = rc_bits_per_second;
 
     av_log(avctx, AV_LOG_VERBOSE, "RC mode: %s.\n", rc_mode->name);
     if (rc_attr.value == VA_ATTRIB_NOT_SUPPORTED) {
@@ -2160,7 +1614,8 @@  static av_cold int vaapi_encode_init_max_frame_size(AVCodecContext *avctx)
 
 static av_cold int vaapi_encode_init_gop_structure(AVCodecContext *avctx)
 {
-    VAAPIEncodeContext *ctx = avctx->priv_data;
+    HWBaseEncodeContext *base_ctx = avctx->priv_data;
+    VAAPIEncodeContext       *ctx = avctx->priv_data;
     VAStatus vas;
     VAConfigAttrib attr = { VAConfigAttribEncMaxRefFrames };
     uint32_t ref_l0, ref_l1;
@@ -2183,7 +1638,7 @@  static av_cold int vaapi_encode_init_gop_structure(AVCodecContext *avctx)
         ref_l1 = attr.value >> 16 & 0xffff;
     }
 
-    ctx->p_to_gpb = 0;
+    base_ctx->p_to_gpb = 0;
     prediction_pre_only = 0;
 
 #if VA_CHECK_VERSION(1, 9, 0)
@@ -2219,7 +1674,7 @@  static av_cold int vaapi_encode_init_gop_structure(AVCodecContext *avctx)
 
             if (attr.value & VA_PREDICTION_DIRECTION_BI_NOT_EMPTY) {
                 if (ref_l0 > 0 && ref_l1 > 0) {
-                    ctx->p_to_gpb = 1;
+                    base_ctx->p_to_gpb = 1;
                     av_log(avctx, AV_LOG_VERBOSE, "Driver does not support P-frames, "
                            "replacing them with B-frames.\n");
                 }
@@ -2231,7 +1686,7 @@  static av_cold int vaapi_encode_init_gop_structure(AVCodecContext *avctx)
     if (ctx->codec->flags & FLAG_INTRA_ONLY ||
         avctx->gop_size <= 1) {
         av_log(avctx, AV_LOG_VERBOSE, "Using intra frames only.\n");
-        ctx->gop_size = 1;
+        base_ctx->gop_size = 1;
     } else if (ref_l0 < 1) {
         av_log(avctx, AV_LOG_ERROR, "Driver does not support any "
                "reference frames.\n");
@@ -2239,41 +1694,41 @@  static av_cold int vaapi_encode_init_gop_structure(AVCodecContext *avctx)
     } else if (!(ctx->codec->flags & FLAG_B_PICTURES) ||
                ref_l1 < 1 || avctx->max_b_frames < 1 ||
                prediction_pre_only) {
-        if (ctx->p_to_gpb)
+        if (base_ctx->p_to_gpb)
            av_log(avctx, AV_LOG_VERBOSE, "Using intra and B-frames "
                   "(supported references: %d / %d).\n",
                   ref_l0, ref_l1);
         else
             av_log(avctx, AV_LOG_VERBOSE, "Using intra and P-frames "
                    "(supported references: %d / %d).\n", ref_l0, ref_l1);
-        ctx->gop_size = avctx->gop_size;
-        ctx->p_per_i  = INT_MAX;
-        ctx->b_per_p  = 0;
+        base_ctx->gop_size = avctx->gop_size;
+        base_ctx->p_per_i  = INT_MAX;
+        base_ctx->b_per_p  = 0;
     } else {
-       if (ctx->p_to_gpb)
+       if (base_ctx->p_to_gpb)
            av_log(avctx, AV_LOG_VERBOSE, "Using intra and B-frames "
                   "(supported references: %d / %d).\n",
                   ref_l0, ref_l1);
        else
            av_log(avctx, AV_LOG_VERBOSE, "Using intra, P- and B-frames "
                   "(supported references: %d / %d).\n", ref_l0, ref_l1);
-        ctx->gop_size = avctx->gop_size;
-        ctx->p_per_i  = INT_MAX;
-        ctx->b_per_p  = avctx->max_b_frames;
+        base_ctx->gop_size = avctx->gop_size;
+        base_ctx->p_per_i  = INT_MAX;
+        base_ctx->b_per_p  = avctx->max_b_frames;
         if (ctx->codec->flags & FLAG_B_PICTURE_REFERENCES) {
-            ctx->max_b_depth = FFMIN(ctx->desired_b_depth,
-                                     av_log2(ctx->b_per_p) + 1);
+            base_ctx->max_b_depth = FFMIN(base_ctx->desired_b_depth,
+                                          av_log2(base_ctx->b_per_p) + 1);
         } else {
-            ctx->max_b_depth = 1;
+            base_ctx->max_b_depth = 1;
         }
     }
 
     if (ctx->codec->flags & FLAG_NON_IDR_KEY_PICTURES) {
-        ctx->closed_gop  = !!(avctx->flags & AV_CODEC_FLAG_CLOSED_GOP);
-        ctx->gop_per_idr = ctx->idr_interval + 1;
+        base_ctx->closed_gop  = !!(avctx->flags & AV_CODEC_FLAG_CLOSED_GOP);
+        base_ctx->gop_per_idr = base_ctx->idr_interval + 1;
     } else {
-        ctx->closed_gop  = 1;
-        ctx->gop_per_idr = 1;
+        base_ctx->closed_gop  = 1;
+        base_ctx->gop_per_idr = 1;
     }
 
     return 0;
@@ -2387,6 +1842,7 @@  static av_cold int vaapi_encode_init_tile_slice_structure(AVCodecContext *avctx,
 
 static av_cold int vaapi_encode_init_slice_structure(AVCodecContext *avctx)
 {
+    HWBaseEncodeContext *base_ctx = avctx->priv_data;
     VAAPIEncodeContext *ctx = avctx->priv_data;
     VAConfigAttrib attr[3] = { { VAConfigAttribEncMaxSlices },
                                { VAConfigAttribEncSliceStructure },
@@ -2406,12 +1862,12 @@  static av_cold int vaapi_encode_init_slice_structure(AVCodecContext *avctx)
         return 0;
     }
 
-    av_assert0(ctx->slice_block_height > 0 && ctx->slice_block_width > 0);
+    av_assert0(base_ctx->slice_block_height > 0 && base_ctx->slice_block_width > 0);
 
-    ctx->slice_block_rows = (avctx->height + ctx->slice_block_height - 1) /
-                             ctx->slice_block_height;
-    ctx->slice_block_cols = (avctx->width  + ctx->slice_block_width  - 1) /
-                             ctx->slice_block_width;
+    ctx->slice_block_rows = (avctx->height + base_ctx->slice_block_height - 1) /
+                             base_ctx->slice_block_height;
+    ctx->slice_block_cols = (avctx->width  + base_ctx->slice_block_width  - 1) /
+                             base_ctx->slice_block_width;
 
     if (avctx->slices <= 1 && !ctx->tile_rows && !ctx->tile_cols) {
         ctx->nb_slices  = 1;
@@ -2586,7 +2042,8 @@  static av_cold int vaapi_encode_init_quality(AVCodecContext *avctx)
 static av_cold int vaapi_encode_init_roi(AVCodecContext *avctx)
 {
 #if VA_CHECK_VERSION(1, 0, 0)
-    VAAPIEncodeContext *ctx = avctx->priv_data;
+    HWBaseEncodeContext *base_ctx = avctx->priv_data;
+    VAAPIEncodeContext       *ctx = avctx->priv_data;
     VAStatus vas;
     VAConfigAttrib attr = { VAConfigAttribEncROI };
 
@@ -2601,14 +2058,14 @@  static av_cold int vaapi_encode_init_roi(AVCodecContext *avctx)
     }
 
     if (attr.value == VA_ATTRIB_NOT_SUPPORTED) {
-        ctx->roi_allowed = 0;
+        base_ctx->roi_allowed = 0;
     } else {
         VAConfigAttribValEncROI roi = {
             .value = attr.value,
         };
 
         ctx->roi_max_regions = roi.bits.num_roi_regions;
-        ctx->roi_allowed = ctx->roi_max_regions > 0 &&
+        base_ctx->roi_allowed = ctx->roi_max_regions > 0 &&
             (ctx->va_rc_mode == VA_RC_CQP ||
              roi.bits.roi_rc_qp_delta_support);
     }
@@ -2632,7 +2089,8 @@  static void vaapi_encode_free_output_buffer(FFRefStructOpaque opaque,
 static int vaapi_encode_alloc_output_buffer(FFRefStructOpaque opaque, void *obj)
 {
     AVCodecContext   *avctx = opaque.nc;
-    VAAPIEncodeContext *ctx = avctx->priv_data;
+    HWBaseEncodeContext *base_ctx = avctx->priv_data;
+    VAAPIEncodeContext       *ctx = avctx->priv_data;
     VABufferID *buffer_id = obj;
     VAStatus vas;
 
@@ -2642,7 +2100,7 @@  static int vaapi_encode_alloc_output_buffer(FFRefStructOpaque opaque, void *obj)
     // bound on that.
     vas = vaCreateBuffer(ctx->hwctx->display, ctx->va_context,
                          VAEncCodedBufferType,
-                         3 * ctx->surface_width * ctx->surface_height +
+                         3 * base_ctx->surface_width * base_ctx->surface_height +
                          (1 << 16), 1, 0, buffer_id);
     if (vas != VA_STATUS_SUCCESS) {
         av_log(avctx, AV_LOG_ERROR, "Failed to create bitstream "
@@ -2657,20 +2115,21 @@  static int vaapi_encode_alloc_output_buffer(FFRefStructOpaque opaque, void *obj)
 
 static av_cold int vaapi_encode_create_recon_frames(AVCodecContext *avctx)
 {
-    VAAPIEncodeContext *ctx = avctx->priv_data;
+    HWBaseEncodeContext *base_ctx = avctx->priv_data;
+    VAAPIEncodeContext       *ctx = avctx->priv_data;
     AVVAAPIHWConfig *hwconfig = NULL;
     AVHWFramesConstraints *constraints = NULL;
     enum AVPixelFormat recon_format;
     int err, i;
 
-    hwconfig = av_hwdevice_hwconfig_alloc(ctx->device_ref);
+    hwconfig = av_hwdevice_hwconfig_alloc(base_ctx->device_ref);
     if (!hwconfig) {
         err = AVERROR(ENOMEM);
         goto fail;
     }
     hwconfig->config_id = ctx->va_config;
 
-    constraints = av_hwdevice_get_hwframe_constraints(ctx->device_ref,
+    constraints = av_hwdevice_get_hwframe_constraints(base_ctx->device_ref,
                                                       hwconfig);
     if (!constraints) {
         err = AVERROR(ENOMEM);
@@ -2683,9 +2142,9 @@  static av_cold int vaapi_encode_create_recon_frames(AVCodecContext *avctx)
     recon_format = AV_PIX_FMT_NONE;
     if (constraints->valid_sw_formats) {
         for (i = 0; constraints->valid_sw_formats[i] != AV_PIX_FMT_NONE; i++) {
-            if (ctx->input_frames->sw_format ==
+            if (base_ctx->input_frames->sw_format ==
                 constraints->valid_sw_formats[i]) {
-                recon_format = ctx->input_frames->sw_format;
+                recon_format = base_ctx->input_frames->sw_format;
                 break;
             }
         }
@@ -2696,18 +2155,18 @@  static av_cold int vaapi_encode_create_recon_frames(AVCodecContext *avctx)
         }
     } else {
         // No idea what to use; copy input format.
-        recon_format = ctx->input_frames->sw_format;
+        recon_format = base_ctx->input_frames->sw_format;
     }
     av_log(avctx, AV_LOG_DEBUG, "Using %s as format of "
            "reconstructed frames.\n", av_get_pix_fmt_name(recon_format));
 
-    if (ctx->surface_width  < constraints->min_width  ||
-        ctx->surface_height < constraints->min_height ||
-        ctx->surface_width  > constraints->max_width ||
-        ctx->surface_height > constraints->max_height) {
+    if (base_ctx->surface_width  < constraints->min_width  ||
+        base_ctx->surface_height < constraints->min_height ||
+        base_ctx->surface_width  > constraints->max_width ||
+        base_ctx->surface_height > constraints->max_height) {
         av_log(avctx, AV_LOG_ERROR, "Hardware does not support encoding at "
                "size %dx%d (constraints: width %d-%d height %d-%d).\n",
-               ctx->surface_width, ctx->surface_height,
+               base_ctx->surface_width, base_ctx->surface_height,
                constraints->min_width,  constraints->max_width,
                constraints->min_height, constraints->max_height);
         err = AVERROR(EINVAL);
@@ -2717,19 +2176,19 @@  static av_cold int vaapi_encode_create_recon_frames(AVCodecContext *avctx)
     av_freep(&hwconfig);
     av_hwframe_constraints_free(&constraints);
 
-    ctx->recon_frames_ref = av_hwframe_ctx_alloc(ctx->device_ref);
-    if (!ctx->recon_frames_ref) {
+    base_ctx->recon_frames_ref = av_hwframe_ctx_alloc(base_ctx->device_ref);
+    if (!base_ctx->recon_frames_ref) {
         err = AVERROR(ENOMEM);
         goto fail;
     }
-    ctx->recon_frames = (AVHWFramesContext*)ctx->recon_frames_ref->data;
+    base_ctx->recon_frames = (AVHWFramesContext*)base_ctx->recon_frames_ref->data;
 
-    ctx->recon_frames->format    = AV_PIX_FMT_VAAPI;
-    ctx->recon_frames->sw_format = recon_format;
-    ctx->recon_frames->width     = ctx->surface_width;
-    ctx->recon_frames->height    = ctx->surface_height;
+    base_ctx->recon_frames->format    = AV_PIX_FMT_VAAPI;
+    base_ctx->recon_frames->sw_format = recon_format;
+    base_ctx->recon_frames->width     = base_ctx->surface_width;
+    base_ctx->recon_frames->height    = base_ctx->surface_height;
 
-    err = av_hwframe_ctx_init(ctx->recon_frames_ref);
+    err = av_hwframe_ctx_init(base_ctx->recon_frames_ref);
     if (err < 0) {
         av_log(avctx, AV_LOG_ERROR, "Failed to initialise reconstructed "
                "frame context: %d.\n", err);
@@ -2743,49 +2202,34 @@  static av_cold int vaapi_encode_create_recon_frames(AVCodecContext *avctx)
     return err;
 }
 
+static const HWEncodeType vaapi_type = {
+    .alloc  = &vaapi_encode_alloc,
+
+    .issue  = &vaapi_encode_issue,
+
+    .output = &vaapi_encode_output,
+
+    .free   = &vaapi_encode_free,
+};
+
 av_cold int ff_vaapi_encode_init(AVCodecContext *avctx)
 {
-    VAAPIEncodeContext *ctx = avctx->priv_data;
+    HWBaseEncodeContext *base_ctx = avctx->priv_data;
+    VAAPIEncodeContext       *ctx = avctx->priv_data;
     AVVAAPIFramesContext *recon_hwctx = NULL;
     VAStatus vas;
     int err;
 
+    err = ff_hw_base_encode_init(avctx);
+    if (err < 0)
+        goto fail;
+
     ctx->va_config  = VA_INVALID_ID;
     ctx->va_context = VA_INVALID_ID;
 
-    /* If you add something that can fail above this av_frame_alloc(),
-     * modify ff_vaapi_encode_close() accordingly. */
-    ctx->frame = av_frame_alloc();
-    if (!ctx->frame) {
-        return AVERROR(ENOMEM);
-    }
+    base_ctx->hw = &vaapi_type;
 
-    if (!avctx->hw_frames_ctx) {
-        av_log(avctx, AV_LOG_ERROR, "A hardware frames reference is "
-               "required to associate the encoding device.\n");
-        return AVERROR(EINVAL);
-    }
-
-    ctx->input_frames_ref = av_buffer_ref(avctx->hw_frames_ctx);
-    if (!ctx->input_frames_ref) {
-        err = AVERROR(ENOMEM);
-        goto fail;
-    }
-    ctx->input_frames = (AVHWFramesContext*)ctx->input_frames_ref->data;
-
-    ctx->device_ref = av_buffer_ref(ctx->input_frames->device_ref);
-    if (!ctx->device_ref) {
-        err = AVERROR(ENOMEM);
-        goto fail;
-    }
-    ctx->device = (AVHWDeviceContext*)ctx->device_ref->data;
-    ctx->hwctx = ctx->device->hwctx;
-
-    ctx->tail_pkt = av_packet_alloc();
-    if (!ctx->tail_pkt) {
-        err = AVERROR(ENOMEM);
-        goto fail;
-    }
+    ctx->hwctx = base_ctx->device->hwctx;
 
     err = vaapi_encode_profile_entrypoint(avctx);
     if (err < 0)
@@ -2797,11 +2241,11 @@  av_cold int ff_vaapi_encode_init(AVCodecContext *avctx)
             goto fail;
     } else {
         // Assume 16x16 blocks.
-        ctx->surface_width  = FFALIGN(avctx->width,  16);
-        ctx->surface_height = FFALIGN(avctx->height, 16);
+        base_ctx->surface_width  = FFALIGN(avctx->width,  16);
+        base_ctx->surface_height = FFALIGN(avctx->height, 16);
         if (ctx->codec->flags & FLAG_SLICE_CONTROL) {
-            ctx->slice_block_width  = 16;
-            ctx->slice_block_height = 16;
+            base_ctx->slice_block_width  = 16;
+            base_ctx->slice_block_height = 16;
         }
     }
 
@@ -2852,9 +2296,9 @@  av_cold int ff_vaapi_encode_init(AVCodecContext *avctx)
     if (err < 0)
         goto fail;
 
-    recon_hwctx = ctx->recon_frames->hwctx;
+    recon_hwctx = base_ctx->recon_frames->hwctx;
     vas = vaCreateContext(ctx->hwctx->display, ctx->va_config,
-                          ctx->surface_width, ctx->surface_height,
+                          base_ctx->surface_width, base_ctx->surface_height,
                           VA_PROGRESSIVE,
                           recon_hwctx->surface_ids,
                           recon_hwctx->nb_surfaces,
@@ -2881,8 +2325,8 @@  av_cold int ff_vaapi_encode_init(AVCodecContext *avctx)
             goto fail;
     }
 
-    ctx->output_delay = ctx->b_per_p;
-    ctx->decode_delay = ctx->max_b_depth;
+    base_ctx->output_delay = base_ctx->b_per_p;
+    base_ctx->decode_delay = base_ctx->max_b_depth;
 
     if (ctx->codec->sequence_params_size > 0) {
         ctx->codec_sequence_params =
@@ -2937,11 +2381,11 @@  av_cold int ff_vaapi_encode_init(AVCodecContext *avctx)
     // check vaSyncBuffer function
     vas = vaSyncBuffer(ctx->hwctx->display, VA_INVALID_ID, 0);
     if (vas != VA_STATUS_ERROR_UNIMPLEMENTED) {
-        ctx->has_sync_buffer_func = 1;
-        ctx->encode_fifo = av_fifo_alloc2(ctx->async_depth,
-                                          sizeof(VAAPIEncodePicture *),
-                                          0);
-        if (!ctx->encode_fifo)
+        base_ctx->async_encode = 1;
+        base_ctx->encode_fifo = av_fifo_alloc2(base_ctx->async_depth,
+                                               sizeof(VAAPIEncodePicture*),
+                                               0);
+        if (!base_ctx->encode_fifo)
             return AVERROR(ENOMEM);
     }
 #endif
@@ -2954,15 +2398,16 @@  fail:
 
 av_cold int ff_vaapi_encode_close(AVCodecContext *avctx)
 {
-    VAAPIEncodeContext *ctx = avctx->priv_data;
-    VAAPIEncodePicture *pic, *next;
+    HWBaseEncodeContext *base_ctx = avctx->priv_data;
+    VAAPIEncodeContext       *ctx = avctx->priv_data;
+    HWBaseEncodePicture *pic, *next;
 
     /* We check ctx->frame to know whether ff_vaapi_encode_init()
      * has been called and va_config/va_context initialized. */
-    if (!ctx->frame)
+    if (!base_ctx->frame)
         return 0;
 
-    for (pic = ctx->pic_start; pic; pic = next) {
+    for (pic = base_ctx->pic_start; pic; pic = next) {
         next = pic->next;
         vaapi_encode_free(avctx, pic);
     }
@@ -2979,16 +2424,13 @@  av_cold int ff_vaapi_encode_close(AVCodecContext *avctx)
         ctx->va_config = VA_INVALID_ID;
     }
 
-    av_frame_free(&ctx->frame);
-    av_packet_free(&ctx->tail_pkt);
-
     av_freep(&ctx->codec_sequence_params);
     av_freep(&ctx->codec_picture_params);
-    av_fifo_freep2(&ctx->encode_fifo);
+    av_fifo_freep2(&base_ctx->encode_fifo);
+
+    av_buffer_unref(&base_ctx->recon_frames_ref);
 
-    av_buffer_unref(&ctx->recon_frames_ref);
-    av_buffer_unref(&ctx->input_frames_ref);
-    av_buffer_unref(&ctx->device_ref);
+    ff_hw_base_encode_close(avctx);
 
     return 0;
 }
diff --git a/libavcodec/vaapi_encode.h b/libavcodec/vaapi_encode.h
index d5d6d5eb1b..772fb83f5b 100644
--- a/libavcodec/vaapi_encode.h
+++ b/libavcodec/vaapi_encode.h
@@ -29,36 +29,24 @@ 
 
 #include "libavutil/hwcontext.h"
 #include "libavutil/hwcontext_vaapi.h"
-#include "libavutil/fifo.h"
 
 #include "avcodec.h"
 #include "hwconfig.h"
+#include "hw_base_encode.h"
 
 struct VAAPIEncodeType;
 struct VAAPIEncodePicture;
 
+extern const AVCodecHWConfigInternal *const ff_vaapi_encode_hw_configs[];
+
 enum {
     MAX_CONFIG_ATTRIBUTES  = 4,
     MAX_GLOBAL_PARAMS      = 4,
-    MAX_DPB_SIZE           = 16,
-    MAX_PICTURE_REFERENCES = 2,
-    MAX_REORDER_DELAY      = 16,
     MAX_PARAM_BUFFER_SIZE  = 1024,
     // A.4.1: table A.6 allows at most 22 tile rows for any level.
     MAX_TILE_ROWS          = 22,
     // A.4.1: table A.6 allows at most 20 tile columns for any level.
     MAX_TILE_COLS          = 20,
-    MAX_ASYNC_DEPTH        = 64,
-    MAX_REFERENCE_LIST_NUM = 2,
-};
-
-extern const AVCodecHWConfigInternal *const ff_vaapi_encode_hw_configs[];
-
-enum {
-    PICTURE_TYPE_IDR = 0,
-    PICTURE_TYPE_I   = 1,
-    PICTURE_TYPE_P   = 2,
-    PICTURE_TYPE_B   = 3,
 };
 
 typedef struct VAAPIEncodeSlice {
@@ -71,16 +59,7 @@  typedef struct VAAPIEncodeSlice {
 } VAAPIEncodeSlice;
 
 typedef struct VAAPIEncodePicture {
-    struct VAAPIEncodePicture *next;
-
-    int64_t         display_order;
-    int64_t         encode_order;
-    int64_t         pts;
-    int64_t         duration;
-    int             force_idr;
-
-    void           *opaque;
-    AVBufferRef    *opaque_ref;
+    HWBaseEncodePicture base;
 
 #if VA_CHECK_VERSION(1, 0, 0)
     // ROI regions.
@@ -89,15 +68,7 @@  typedef struct VAAPIEncodePicture {
     void           *roi;
 #endif
 
-    int             type;
-    int             b_depth;
-    int             encode_issued;
-    int             encode_complete;
-
-    AVFrame        *input_image;
     VASurfaceID     input_surface;
-
-    AVFrame        *recon_image;
     VASurfaceID     recon_surface;
 
     int          nb_param_buffers;
@@ -107,34 +78,10 @@  typedef struct VAAPIEncodePicture {
     VABufferID     *output_buffer_ref;
     VABufferID      output_buffer;
 
-    void           *priv_data;
     void           *codec_picture_params;
 
-    // Whether this picture is a reference picture.
-    int             is_reference;
-
-    // The contents of the DPB after this picture has been decoded.
-    // This will contain the picture itself if it is a reference picture,
-    // but not if it isn't.
-    int                     nb_dpb_pics;
-    struct VAAPIEncodePicture *dpb[MAX_DPB_SIZE];
-    // The reference pictures used in decoding this picture. If they are
-    // used by later pictures they will also appear in the DPB. ref[0][] for
-    // previous reference frames. ref[1][] for future reference frames.
-    int                     nb_refs[MAX_REFERENCE_LIST_NUM];
-    struct VAAPIEncodePicture *refs[MAX_REFERENCE_LIST_NUM][MAX_PICTURE_REFERENCES];
-    // The previous reference picture in encode order.  Must be in at least
-    // one of the reference list and DPB list.
-    struct VAAPIEncodePicture *prev;
-    // Reference count for other pictures referring to this one through
-    // the above pointers, directly from incomplete pictures and indirectly
-    // through completed pictures.
-    int             ref_count[2];
-    int             ref_removed[2];
-
     int          nb_slices;
     VAAPIEncodeSlice *slices;
-
     /**
      * indicate if current frame is an independent frame that the coded data
      * can be pushed to downstream directly. Coded of non-independent frame
@@ -162,17 +109,6 @@  typedef struct VAAPIEncodeProfile {
     VAProfile va_profile;
 } VAAPIEncodeProfile;
 
-enum {
-    RC_MODE_AUTO,
-    RC_MODE_CQP,
-    RC_MODE_CBR,
-    RC_MODE_VBR,
-    RC_MODE_ICQ,
-    RC_MODE_QVBR,
-    RC_MODE_AVBR,
-    RC_MODE_MAX = RC_MODE_AVBR,
-};
-
 typedef struct VAAPIEncodeRCMode {
     // Mode from above enum (RC_MODE_*).
     int mode;
@@ -193,57 +129,26 @@  typedef struct VAAPIEncodeRCMode {
 } VAAPIEncodeRCMode;
 
 typedef struct VAAPIEncodeContext {
-    const AVClass *class;
+    // Base.
+    HWBaseEncodeContext base;
 
     // Codec-specific hooks.
     const struct VAAPIEncodeType *codec;
 
-    // Global options.
-
     // Use low power encoding mode.
     int             low_power;
 
-    // Number of I frames between IDR frames.
-    int             idr_interval;
-
-    // Desired B frame reference depth.
-    int             desired_b_depth;
-
     // Max Frame Size
     int             max_frame_size;
 
-    // Explicitly set RC mode (otherwise attempt to pick from
-    // available modes).
-    int             explicit_rc_mode;
-
-    // Explicitly-set QP, for use with the "qp" options.
-    // (Forces CQP mode when set, overriding everything else.)
-    int             explicit_qp;
-
     // Desired packed headers.
     unsigned int    desired_packed_headers;
 
-    // The required size of surfaces.  This is probably the input
-    // size (AVCodecContext.width|height) aligned up to whatever
-    // block size is required by the codec.
-    int             surface_width;
-    int             surface_height;
-
-    // The block size for slice calculations.
-    int             slice_block_width;
-    int             slice_block_height;
-
-    // Everything above this point must be set before calling
-    // ff_vaapi_encode_init().
-
     // Chosen encoding profile details.
     const VAAPIEncodeProfile *profile;
 
     // Chosen rate control mode details.
     const VAAPIEncodeRCMode *rc_mode;
-    // RC quality level - meaning depends on codec and RC mode.
-    // In CQP mode this sets the fixed quantiser value.
-    int             rc_quality;
 
     // Encoding profile (VAProfile*).
     VAProfile       va_profile;
@@ -263,18 +168,8 @@  typedef struct VAAPIEncodeContext {
     VAConfigID      va_config;
     VAContextID     va_context;
 
-    AVBufferRef    *device_ref;
-    AVHWDeviceContext *device;
     AVVAAPIDeviceContext *hwctx;
 
-    // The hardware frame context containing the input frames.
-    AVBufferRef    *input_frames_ref;
-    AVHWFramesContext *input_frames;
-
-    // The hardware frame context containing the reconstructed frames.
-    AVBufferRef    *recon_frames_ref;
-    AVHWFramesContext *recon_frames;
-
     // Pool of (reusable) bitstream output buffers.
     struct FFRefStructPool *output_buffer_pool;
 
@@ -301,30 +196,6 @@  typedef struct VAAPIEncodeContext {
     // structure (VAEncPictureParameterBuffer*).
     void           *codec_picture_params;
 
-    // Current encoding window, in display (input) order.
-    VAAPIEncodePicture *pic_start, *pic_end;
-    // The next picture to use as the previous reference picture in
-    // encoding order. Order from small to large in encoding order.
-    VAAPIEncodePicture *next_prev[MAX_PICTURE_REFERENCES];
-    int                 nb_next_prev;
-
-    // Next input order index (display order).
-    int64_t         input_order;
-    // Number of frames that output is behind input.
-    int64_t         output_delay;
-    // Next encode order index.
-    int64_t         encode_order;
-    // Number of frames decode output will need to be delayed.
-    int64_t         decode_delay;
-    // Next output order index (in encode order).
-    int64_t         output_order;
-
-    // Timestamp handling.
-    int64_t         first_pts;
-    int64_t         dts_pts_diff;
-    int64_t         ts_ring[MAX_REORDER_DELAY * 3 +
-                            MAX_ASYNC_DEPTH];
-
     // Slice structure.
     int slice_block_rows;
     int slice_block_cols;
@@ -343,43 +214,12 @@  typedef struct VAAPIEncodeContext {
     // Location of the i-th tile row boundary.
     int row_bd[MAX_TILE_ROWS + 1];
 
-    // Frame type decision.
-    int gop_size;
-    int closed_gop;
-    int gop_per_idr;
-    int p_per_i;
-    int max_b_depth;
-    int b_per_p;
-    int force_idr;
-    int idr_counter;
-    int gop_counter;
-    int end_of_stream;
-    int p_to_gpb;
-
-    // Whether the driver supports ROI at all.
-    int             roi_allowed;
     // Maximum number of regions supported by the driver.
     int             roi_max_regions;
     // Quantisation range for offset calculations.  Set by codec-specific
     // code, as it may change based on parameters.
     int             roi_quant_range;
 
-    // The encoder does not support cropping information, so warn about
-    // it the first time we encounter any nonzero crop fields.
-    int             crop_warned;
-    // If the driver does not support ROI then warn the first time we
-    // encounter a frame with ROI side data.
-    int             roi_warned;
-
-    AVFrame         *frame;
-
-    // Whether the driver support vaSyncBuffer
-    int             has_sync_buffer_func;
-    // Store buffered pic
-    AVFifo          *encode_fifo;
-    // Max number of frame buffered in encoder.
-    int             async_depth;
-
     /** Head data for current output pkt, used only for AV1. */
     //void  *header_data;
     //size_t header_data_size;
@@ -389,30 +229,8 @@  typedef struct VAAPIEncodeContext {
      * This is a RefStruct reference.
      */
     VABufferID     *coded_buffer_ref;
-
-    /** Tail data of a pic, now only used for av1 repeat frame header. */
-    AVPacket        *tail_pkt;
 } VAAPIEncodeContext;
 
-enum {
-    // Codec supports controlling the subdivision of pictures into slices.
-    FLAG_SLICE_CONTROL         = 1 << 0,
-    // Codec only supports constant quality (no rate control).
-    FLAG_CONSTANT_QUALITY_ONLY = 1 << 1,
-    // Codec is intra-only.
-    FLAG_INTRA_ONLY            = 1 << 2,
-    // Codec supports B-pictures.
-    FLAG_B_PICTURES            = 1 << 3,
-    // Codec supports referencing B-pictures.
-    FLAG_B_PICTURE_REFERENCES  = 1 << 4,
-    // Codec supports non-IDR key pictures (that is, key pictures do
-    // not necessarily empty the DPB).
-    FLAG_NON_IDR_KEY_PICTURES  = 1 << 5,
-    // Codec output packet without timestamp delay, which means the
-    // output packet has same PTS and DTS.
-    FLAG_TIMESTAMP_NO_DELAY    = 1 << 6,
-};
-
 typedef struct VAAPIEncodeType {
     // List of supported profiles and corresponding VAAPI profiles.
     // (Must end with AV_PROFILE_UNKNOWN.)
@@ -492,53 +310,18 @@  typedef struct VAAPIEncodeType {
                                  char *data, size_t *data_len);
 } VAAPIEncodeType;
 
-
-int ff_vaapi_encode_receive_packet(AVCodecContext *avctx, AVPacket *pkt);
-
 int ff_vaapi_encode_init(AVCodecContext *avctx);
 int ff_vaapi_encode_close(AVCodecContext *avctx);
 
-
 #define VAAPI_ENCODE_COMMON_OPTIONS \
     { "low_power", \
       "Use low-power encoding mode (only available on some platforms; " \
       "may not support all encoding features)", \
       OFFSET(common.low_power), AV_OPT_TYPE_BOOL, \
       { .i64 = 0 }, 0, 1, FLAGS }, \
-    { "idr_interval", \
-      "Distance (in I-frames) between IDR frames", \
-      OFFSET(common.idr_interval), AV_OPT_TYPE_INT, \
-      { .i64 = 0 }, 0, INT_MAX, FLAGS }, \
-    { "b_depth", \
-      "Maximum B-frame reference depth", \
-      OFFSET(common.desired_b_depth), AV_OPT_TYPE_INT, \
-      { .i64 = 1 }, 1, INT_MAX, FLAGS }, \
-    { "async_depth", "Maximum processing parallelism. " \
-      "Increase this to improve single channel performance. This option " \
-      "doesn't work if driver doesn't implement vaSyncBuffer function.", \
-      OFFSET(common.async_depth), AV_OPT_TYPE_INT, \
-      { .i64 = 2 }, 1, MAX_ASYNC_DEPTH, FLAGS }, \
     { "max_frame_size", \
       "Maximum frame size (in bytes)",\
       OFFSET(common.max_frame_size), AV_OPT_TYPE_INT, \
       { .i64 = 0 }, 0, INT_MAX, FLAGS }
 
-#define VAAPI_ENCODE_RC_MODE(name, desc) \
-    { #name, desc, 0, AV_OPT_TYPE_CONST, { .i64 = RC_MODE_ ## name }, \
-      0, 0, FLAGS, "rc_mode" }
-#define VAAPI_ENCODE_RC_OPTIONS \
-    { "rc_mode",\
-      "Set rate control mode", \
-      OFFSET(common.explicit_rc_mode), AV_OPT_TYPE_INT, \
-      { .i64 = RC_MODE_AUTO }, RC_MODE_AUTO, RC_MODE_MAX, FLAGS, "rc_mode" }, \
-    { "auto", "Choose mode automatically based on other parameters", \
-      0, AV_OPT_TYPE_CONST, { .i64 = RC_MODE_AUTO }, 0, 0, FLAGS, "rc_mode" }, \
-    VAAPI_ENCODE_RC_MODE(CQP,  "Constant-quality"), \
-    VAAPI_ENCODE_RC_MODE(CBR,  "Constant-bitrate"), \
-    VAAPI_ENCODE_RC_MODE(VBR,  "Variable-bitrate"), \
-    VAAPI_ENCODE_RC_MODE(ICQ,  "Intelligent constant-quality"), \
-    VAAPI_ENCODE_RC_MODE(QVBR, "Quality-defined variable-bitrate"), \
-    VAAPI_ENCODE_RC_MODE(AVBR, "Average variable-bitrate")
-
-
 #endif /* AVCODEC_VAAPI_ENCODE_H */
diff --git a/libavcodec/vaapi_encode_av1.c b/libavcodec/vaapi_encode_av1.c
index 5a9ff0f798..d3785e133c 100644
--- a/libavcodec/vaapi_encode_av1.c
+++ b/libavcodec/vaapi_encode_av1.c
@@ -109,20 +109,21 @@  static void vaapi_encode_av1_trace_write_log(void *ctx,
 
 static av_cold int vaapi_encode_av1_get_encoder_caps(AVCodecContext *avctx)
 {
-    VAAPIEncodeContext *ctx = avctx->priv_data;
-    VAAPIEncodeAV1Context *priv = avctx->priv_data;
+    HWBaseEncodeContext *base_ctx = avctx->priv_data;
+    VAAPIEncodeAV1Context   *priv = avctx->priv_data;
 
     // Surfaces must be aligned to superblock boundaries.
-    ctx->surface_width  = FFALIGN(avctx->width,  priv->use_128x128_superblock ? 128 : 64);
-    ctx->surface_height = FFALIGN(avctx->height, priv->use_128x128_superblock ? 128 : 64);
+    base_ctx->surface_width  = FFALIGN(avctx->width,  priv->use_128x128_superblock ? 128 : 64);
+    base_ctx->surface_height = FFALIGN(avctx->height, priv->use_128x128_superblock ? 128 : 64);
 
     return 0;
 }
 
 static av_cold int vaapi_encode_av1_configure(AVCodecContext *avctx)
 {
-    VAAPIEncodeContext     *ctx = avctx->priv_data;
-    VAAPIEncodeAV1Context *priv = avctx->priv_data;
+    HWBaseEncodeContext *base_ctx = avctx->priv_data;
+    VAAPIEncodeContext       *ctx = avctx->priv_data;
+    VAAPIEncodeAV1Context   *priv = avctx->priv_data;
     int ret;
 
     ret = ff_cbs_init(&priv->cbc, AV_CODEC_ID_AV1, avctx);
@@ -134,7 +135,7 @@  static av_cold int vaapi_encode_av1_configure(AVCodecContext *avctx)
     priv->cbc->trace_write_callback = vaapi_encode_av1_trace_write_log;
 
     if (ctx->rc_mode->quality) {
-        priv->q_idx_p = av_clip(ctx->rc_quality, 0, AV1_MAX_QUANT);
+        priv->q_idx_p = av_clip(base_ctx->rc_quality, 0, AV1_MAX_QUANT);
         if (fabs(avctx->i_quant_factor) > 0.0)
             priv->q_idx_idr =
                 av_clip((fabs(avctx->i_quant_factor) * priv->q_idx_p  +
@@ -355,6 +356,7 @@  static int vaapi_encode_av1_write_sequence_header(AVCodecContext *avctx,
 
 static int vaapi_encode_av1_init_sequence_params(AVCodecContext *avctx)
 {
+    HWBaseEncodeContext         *base_ctx = avctx->priv_data;
     VAAPIEncodeContext               *ctx = avctx->priv_data;
     VAAPIEncodeAV1Context           *priv = avctx->priv_data;
     AV1RawOBU                     *sh_obu = &priv->sh;
@@ -367,7 +369,7 @@  static int vaapi_encode_av1_init_sequence_params(AVCodecContext *avctx)
     memset(sh_obu, 0, sizeof(*sh_obu));
     sh_obu->header.obu_type = AV1_OBU_SEQUENCE_HEADER;
 
-    desc = av_pix_fmt_desc_get(priv->common.input_frames->sw_format);
+    desc = av_pix_fmt_desc_get(base_ctx->input_frames->sw_format);
     av_assert0(desc);
 
     sh->seq_profile  = avctx->profile;
@@ -419,7 +421,7 @@  static int vaapi_encode_av1_init_sequence_params(AVCodecContext *avctx)
             framerate = 0;
 
         level = ff_av1_guess_level(avctx->bit_rate, priv->tier,
-                                   ctx->surface_width, ctx->surface_height,
+                                   base_ctx->surface_width, base_ctx->surface_height,
                                    priv->tile_rows * priv->tile_cols,
                                    priv->tile_cols, framerate);
         if (level) {
@@ -436,8 +438,8 @@  static int vaapi_encode_av1_init_sequence_params(AVCodecContext *avctx)
     vseq->seq_level_idx           = sh->seq_level_idx[0];
     vseq->seq_tier                = sh->seq_tier[0];
     vseq->order_hint_bits_minus_1 = sh->order_hint_bits_minus_1;
-    vseq->intra_period            = ctx->gop_size;
-    vseq->ip_period               = ctx->b_per_p + 1;
+    vseq->intra_period            = base_ctx->gop_size;
+    vseq->ip_period               = base_ctx->b_per_p + 1;
 
     vseq->seq_fields.bits.enable_order_hint = sh->enable_order_hint;
 
@@ -464,12 +466,13 @@  static int vaapi_encode_av1_init_picture_params(AVCodecContext *avctx,
 {
     VAAPIEncodeContext              *ctx = avctx->priv_data;
     VAAPIEncodeAV1Context          *priv = avctx->priv_data;
-    VAAPIEncodeAV1Picture          *hpic = pic->priv_data;
+    HWBaseEncodePicture        *base_pic = (HWBaseEncodePicture *)pic;
+    VAAPIEncodeAV1Picture          *hpic = base_pic->priv_data;
     AV1RawOBU                    *fh_obu = &priv->fh;
     AV1RawFrameHeader                *fh = &fh_obu->obu.frame.header;
     VAEncPictureParameterBufferAV1 *vpic = pic->codec_picture_params;
     CodedBitstreamFragment          *obu = &priv->current_obu;
-    VAAPIEncodePicture    *ref;
+    HWBaseEncodePicture    *ref;
     VAAPIEncodeAV1Picture *href;
     int slot, i;
     int ret;
@@ -478,24 +481,24 @@  static int vaapi_encode_av1_init_picture_params(AVCodecContext *avctx,
 
     memset(fh_obu, 0, sizeof(*fh_obu));
     pic->nb_slices = priv->tile_groups;
-    pic->non_independent_frame = pic->encode_order < pic->display_order;
+    pic->non_independent_frame = base_pic->encode_order < base_pic->display_order;
     fh_obu->header.obu_type = AV1_OBU_FRAME_HEADER;
     fh_obu->header.obu_has_size_field = 1;
 
-    switch (pic->type) {
+    switch (base_pic->type) {
     case PICTURE_TYPE_IDR:
-        av_assert0(pic->nb_refs[0] == 0 || pic->nb_refs[1]);
+        av_assert0(base_pic->nb_refs[0] == 0 || base_pic->nb_refs[1]);
         fh->frame_type = AV1_FRAME_KEY;
         fh->refresh_frame_flags = 0xFF;
         fh->base_q_idx = priv->q_idx_idr;
         hpic->slot = 0;
-        hpic->last_idr_frame = pic->display_order;
+        hpic->last_idr_frame = base_pic->display_order;
         break;
     case PICTURE_TYPE_P:
-        av_assert0(pic->nb_refs[0]);
+        av_assert0(base_pic->nb_refs[0]);
         fh->frame_type = AV1_FRAME_INTER;
         fh->base_q_idx = priv->q_idx_p;
-        ref = pic->refs[0][pic->nb_refs[0] - 1];
+        ref = base_pic->refs[0][base_pic->nb_refs[0] - 1];
         href = ref->priv_data;
         hpic->slot = !href->slot;
         hpic->last_idr_frame = href->last_idr_frame;
@@ -510,8 +513,8 @@  static int vaapi_encode_av1_init_picture_params(AVCodecContext *avctx,
         vpic->ref_frame_ctrl_l0.fields.search_idx0 = AV1_REF_FRAME_LAST;
 
         /** set the 2nd nearest frame in L0 as Golden frame. */
-        if (pic->nb_refs[0] > 1) {
-            ref = pic->refs[0][pic->nb_refs[0] - 2];
+        if (base_pic->nb_refs[0] > 1) {
+            ref = base_pic->refs[0][base_pic->nb_refs[0] - 2];
             href = ref->priv_data;
             fh->ref_frame_idx[3] = href->slot;
             fh->ref_order_hint[href->slot] = ref->display_order - href->last_idr_frame;
@@ -519,7 +522,7 @@  static int vaapi_encode_av1_init_picture_params(AVCodecContext *avctx,
         }
         break;
     case PICTURE_TYPE_B:
-        av_assert0(pic->nb_refs[0] && pic->nb_refs[1]);
+        av_assert0(base_pic->nb_refs[0] && base_pic->nb_refs[1]);
         fh->frame_type = AV1_FRAME_INTER;
         fh->base_q_idx = priv->q_idx_b;
         fh->refresh_frame_flags = 0x0;
@@ -532,7 +535,7 @@  static int vaapi_encode_av1_init_picture_params(AVCodecContext *avctx,
         vpic->ref_frame_ctrl_l0.fields.search_idx0 = AV1_REF_FRAME_LAST;
         vpic->ref_frame_ctrl_l1.fields.search_idx0 = AV1_REF_FRAME_BWDREF;
 
-        ref                            = pic->refs[0][pic->nb_refs[0] - 1];
+        ref                            = base_pic->refs[0][base_pic->nb_refs[0] - 1];
         href                           = ref->priv_data;
         hpic->last_idr_frame           = href->last_idr_frame;
         fh->primary_ref_frame          = href->slot;
@@ -541,7 +544,7 @@  static int vaapi_encode_av1_init_picture_params(AVCodecContext *avctx,
             fh->ref_frame_idx[i] = href->slot;
         }
 
-        ref                            = pic->refs[1][pic->nb_refs[1] - 1];
+        ref                            = base_pic->refs[1][base_pic->nb_refs[1] - 1];
         href                           = ref->priv_data;
         fh->ref_order_hint[href->slot] = ref->display_order - href->last_idr_frame;
         for (i = AV1_REF_FRAME_GOLDEN; i < AV1_REFS_PER_FRAME; i++) {
@@ -552,13 +555,13 @@  static int vaapi_encode_av1_init_picture_params(AVCodecContext *avctx,
         av_assert0(0 && "invalid picture type");
     }
 
-    fh->show_frame                = pic->display_order <= pic->encode_order;
+    fh->show_frame                = base_pic->display_order <= base_pic->encode_order;
     fh->showable_frame            = fh->frame_type != AV1_FRAME_KEY;
     fh->frame_width_minus_1       = avctx->width - 1;
     fh->frame_height_minus_1      = avctx->height - 1;
     fh->render_width_minus_1      = fh->frame_width_minus_1;
     fh->render_height_minus_1     = fh->frame_height_minus_1;
-    fh->order_hint                = pic->display_order - hpic->last_idr_frame;
+    fh->order_hint                = base_pic->display_order - hpic->last_idr_frame;
     fh->tile_cols                 = priv->tile_cols;
     fh->tile_rows                 = priv->tile_rows;
     fh->tile_cols_log2            = priv->tile_cols_log2;
@@ -624,13 +627,13 @@  static int vaapi_encode_av1_init_picture_params(AVCodecContext *avctx,
         vpic->reference_frames[i] = VA_INVALID_SURFACE;
 
     for (i = 0; i < MAX_REFERENCE_LIST_NUM; i++) {
-        for (int j = 0; j < pic->nb_refs[i]; j++) {
-            VAAPIEncodePicture *ref_pic = pic->refs[i][j];
+        for (int j = 0; j < base_pic->nb_refs[i]; j++) {
+            HWBaseEncodePicture *ref_pic = base_pic->refs[i][j];
 
             slot = ((VAAPIEncodeAV1Picture*)ref_pic->priv_data)->slot;
             av_assert0(vpic->reference_frames[slot] == VA_INVALID_SURFACE);
 
-            vpic->reference_frames[slot] = ref_pic->recon_surface;
+            vpic->reference_frames[slot] = ((VAAPIEncodePicture *)ref_pic)->recon_surface;
         }
     }
 
@@ -651,7 +654,7 @@  static int vaapi_encode_av1_init_picture_params(AVCodecContext *avctx,
         vpic->bit_offset_cdef_params         = priv->cdef_start_offset;
         vpic->size_in_bits_cdef_params       = priv->cdef_param_size;
         vpic->size_in_bits_frame_hdr_obu     = priv->fh_data_len;
-        vpic->byte_offset_frame_hdr_obu_size = (((pic->type == PICTURE_TYPE_IDR) ?
+        vpic->byte_offset_frame_hdr_obu_size = (((base_pic->type == PICTURE_TYPE_IDR) ?
                                                priv->sh_data_len / 8 : 0) +
                                                (fh_obu->header.obu_extension_flag ?
                                                2 : 1));
@@ -693,14 +696,15 @@  static int vaapi_encode_av1_write_picture_header(AVCodecContext *avctx,
     CodedBitstreamAV1Context *cbctx = priv->cbc->priv_data;
     AV1RawOBU               *fh_obu = &priv->fh;
     AV1RawFrameHeader       *rep_fh = &fh_obu->obu.frame_header;
+    HWBaseEncodePicture *base_pic   = (HWBaseEncodePicture *)pic;
     VAAPIEncodeAV1Picture *href;
     int ret = 0;
 
     pic->tail_size = 0;
     /** Pack repeat frame header. */
-    if (pic->display_order > pic->encode_order) {
+    if (base_pic->display_order > base_pic->encode_order) {
         memset(fh_obu, 0, sizeof(*fh_obu));
-        href = pic->refs[0][pic->nb_refs[0] - 1]->priv_data;
+        href = base_pic->refs[0][base_pic->nb_refs[0] - 1]->priv_data;
         fh_obu->header.obu_type = AV1_OBU_FRAME_HEADER;
         fh_obu->header.obu_has_size_field = 1;
 
@@ -862,8 +866,9 @@  static av_cold int vaapi_encode_av1_close(AVCodecContext *avctx)
 #define FLAGS (AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM)
 
 static const AVOption vaapi_encode_av1_options[] = {
+    HW_BASE_ENCODE_COMMON_OPTIONS,
     VAAPI_ENCODE_COMMON_OPTIONS,
-    VAAPI_ENCODE_RC_OPTIONS,
+    HW_BASE_ENCODE_RC_OPTIONS,
     { "profile", "Set profile (seq_profile)",
       OFFSET(profile), AV_OPT_TYPE_INT,
       { .i64 = AV_PROFILE_UNKNOWN }, AV_PROFILE_UNKNOWN, 0xff, FLAGS, "profile" },
@@ -934,7 +939,7 @@  const FFCodec ff_av1_vaapi_encoder = {
     .p.id           = AV_CODEC_ID_AV1,
     .priv_data_size = sizeof(VAAPIEncodeAV1Context),
     .init           = &vaapi_encode_av1_init,
-    FF_CODEC_RECEIVE_PACKET_CB(&ff_vaapi_encode_receive_packet),
+    FF_CODEC_RECEIVE_PACKET_CB(&ff_hw_base_encode_receive_packet),
     .close          = &vaapi_encode_av1_close,
     .p.priv_class   = &vaapi_encode_av1_class,
     .p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HARDWARE |
diff --git a/libavcodec/vaapi_encode_h264.c b/libavcodec/vaapi_encode_h264.c
index 57b5ea2bab..fe02f75f85 100644
--- a/libavcodec/vaapi_encode_h264.c
+++ b/libavcodec/vaapi_encode_h264.c
@@ -234,7 +234,7 @@  static int vaapi_encode_h264_write_extra_header(AVCodecContext *avctx,
                 goto fail;
         }
         if (priv->sei_needed & SEI_TIMING) {
-            if (pic->type == PICTURE_TYPE_IDR) {
+            if (pic->base.type == PICTURE_TYPE_IDR) {
                 err = ff_cbs_sei_add_message(priv->cbc, au, 1,
                                              SEI_TYPE_BUFFERING_PERIOD,
                                              &priv->sei_buffering_period, NULL);
@@ -296,6 +296,7 @@  fail:
 
 static int vaapi_encode_h264_init_sequence_params(AVCodecContext *avctx)
 {
+    HWBaseEncodeContext          *base_ctx = avctx->priv_data;
     VAAPIEncodeContext                *ctx = avctx->priv_data;
     VAAPIEncodeH264Context           *priv = avctx->priv_data;
     H264RawSPS                        *sps = &priv->raw_sps;
@@ -308,7 +309,7 @@  static int vaapi_encode_h264_init_sequence_params(AVCodecContext *avctx)
     memset(sps, 0, sizeof(*sps));
     memset(pps, 0, sizeof(*pps));
 
-    desc = av_pix_fmt_desc_get(priv->common.input_frames->sw_format);
+    desc = av_pix_fmt_desc_get(base_ctx->input_frames->sw_format);
     av_assert0(desc);
     if (desc->nb_components == 1 || desc->log2_chroma_w != 1 || desc->log2_chroma_h != 1) {
         av_log(avctx, AV_LOG_ERROR, "Chroma format of input pixel format "
@@ -327,18 +328,18 @@  static int vaapi_encode_h264_init_sequence_params(AVCodecContext *avctx)
         sps->constraint_set1_flag = 1;
 
     if (avctx->profile == AV_PROFILE_H264_HIGH || avctx->profile == AV_PROFILE_H264_HIGH_10)
-        sps->constraint_set3_flag = ctx->gop_size == 1;
+        sps->constraint_set3_flag = base_ctx->gop_size == 1;
 
     if (avctx->profile == AV_PROFILE_H264_MAIN ||
         avctx->profile == AV_PROFILE_H264_HIGH || avctx->profile == AV_PROFILE_H264_HIGH_10) {
         sps->constraint_set4_flag = 1;
-        sps->constraint_set5_flag = ctx->b_per_p == 0;
+        sps->constraint_set5_flag = base_ctx->b_per_p == 0;
     }
 
-    if (ctx->gop_size == 1)
+    if (base_ctx->gop_size == 1)
         priv->dpb_frames = 0;
     else
-        priv->dpb_frames = 1 + ctx->max_b_depth;
+        priv->dpb_frames = 1 + base_ctx->max_b_depth;
 
     if (avctx->level != AV_LEVEL_UNKNOWN) {
         sps->level_idc = avctx->level;
@@ -375,7 +376,7 @@  static int vaapi_encode_h264_init_sequence_params(AVCodecContext *avctx)
     sps->bit_depth_chroma_minus8 = bit_depth - 8;
 
     sps->log2_max_frame_num_minus4 = 4;
-    sps->pic_order_cnt_type        = ctx->max_b_depth ? 0 : 2;
+    sps->pic_order_cnt_type        = base_ctx->max_b_depth ? 0 : 2;
     if (sps->pic_order_cnt_type == 0) {
         sps->log2_max_pic_order_cnt_lsb_minus4 = 4;
     }
@@ -502,8 +503,8 @@  static int vaapi_encode_h264_init_sequence_params(AVCodecContext *avctx)
     sps->vui.motion_vectors_over_pic_boundaries_flag = 1;
     sps->vui.log2_max_mv_length_horizontal = 15;
     sps->vui.log2_max_mv_length_vertical   = 15;
-    sps->vui.max_num_reorder_frames        = ctx->max_b_depth;
-    sps->vui.max_dec_frame_buffering       = ctx->max_b_depth + 1;
+    sps->vui.max_num_reorder_frames        = base_ctx->max_b_depth;
+    sps->vui.max_dec_frame_buffering       = base_ctx->max_b_depth + 1;
 
     pps->nal_unit_header.nal_ref_idc = 3;
     pps->nal_unit_header.nal_unit_type = H264_NAL_PPS;
@@ -536,9 +537,9 @@  static int vaapi_encode_h264_init_sequence_params(AVCodecContext *avctx)
     *vseq = (VAEncSequenceParameterBufferH264) {
         .seq_parameter_set_id = sps->seq_parameter_set_id,
         .level_idc        = sps->level_idc,
-        .intra_period     = ctx->gop_size,
-        .intra_idr_period = ctx->gop_size,
-        .ip_period        = ctx->b_per_p + 1,
+        .intra_period     = base_ctx->gop_size,
+        .intra_idr_period = base_ctx->gop_size,
+        .ip_period        = base_ctx->b_per_p + 1,
 
         .bits_per_second       = ctx->va_bit_rate,
         .max_num_ref_frames    = sps->max_num_ref_frames,
@@ -622,19 +623,20 @@  static int vaapi_encode_h264_init_sequence_params(AVCodecContext *avctx)
 static int vaapi_encode_h264_init_picture_params(AVCodecContext *avctx,
                                                  VAAPIEncodePicture *pic)
 {
-    VAAPIEncodeContext               *ctx = avctx->priv_data;
+    HWBaseEncodeContext         *base_ctx = avctx->priv_data;
     VAAPIEncodeH264Context          *priv = avctx->priv_data;
-    VAAPIEncodeH264Picture          *hpic = pic->priv_data;
-    VAAPIEncodePicture              *prev = pic->prev;
+    HWBaseEncodePicture         *base_pic = (HWBaseEncodePicture *)pic;
+    VAAPIEncodeH264Picture          *hpic = base_pic->priv_data;
+    HWBaseEncodePicture             *prev = base_pic->prev;
     VAAPIEncodeH264Picture         *hprev = prev ? prev->priv_data : NULL;
     VAEncPictureParameterBufferH264 *vpic = pic->codec_picture_params;
     int i, j = 0;
 
-    if (pic->type == PICTURE_TYPE_IDR) {
-        av_assert0(pic->display_order == pic->encode_order);
+    if (base_pic->type == PICTURE_TYPE_IDR) {
+        av_assert0(base_pic->display_order == base_pic->encode_order);
 
         hpic->frame_num      = 0;
-        hpic->last_idr_frame = pic->display_order;
+        hpic->last_idr_frame = base_pic->display_order;
         hpic->idr_pic_id     = hprev ? hprev->idr_pic_id + 1 : 0;
 
         hpic->primary_pic_type = 0;
@@ -647,10 +649,10 @@  static int vaapi_encode_h264_init_picture_params(AVCodecContext *avctx,
         hpic->last_idr_frame = hprev->last_idr_frame;
         hpic->idr_pic_id     = hprev->idr_pic_id;
 
-        if (pic->type == PICTURE_TYPE_I) {
+        if (base_pic->type == PICTURE_TYPE_I) {
             hpic->slice_type       = 7;
             hpic->primary_pic_type = 0;
-        } else if (pic->type == PICTURE_TYPE_P) {
+        } else if (base_pic->type == PICTURE_TYPE_P) {
             hpic->slice_type       = 5;
             hpic->primary_pic_type = 1;
         } else {
@@ -658,13 +660,13 @@  static int vaapi_encode_h264_init_picture_params(AVCodecContext *avctx,
             hpic->primary_pic_type = 2;
         }
     }
-    hpic->pic_order_cnt = pic->display_order - hpic->last_idr_frame;
+    hpic->pic_order_cnt = base_pic->display_order - hpic->last_idr_frame;
     if (priv->raw_sps.pic_order_cnt_type == 2) {
         hpic->pic_order_cnt *= 2;
     }
 
-    hpic->dpb_delay     = pic->display_order - pic->encode_order + ctx->max_b_depth;
-    hpic->cpb_delay     = pic->encode_order - hpic->last_idr_frame;
+    hpic->dpb_delay     = base_pic->display_order - base_pic->encode_order + base_ctx->max_b_depth;
+    hpic->cpb_delay     = base_pic->encode_order - hpic->last_idr_frame;
 
     if (priv->aud) {
         priv->aud_needed = 1;
@@ -680,7 +682,7 @@  static int vaapi_encode_h264_init_picture_params(AVCodecContext *avctx,
 
     priv->sei_needed = 0;
 
-    if (priv->sei & SEI_IDENTIFIER && pic->encode_order == 0)
+    if (priv->sei & SEI_IDENTIFIER && base_pic->encode_order == 0)
         priv->sei_needed |= SEI_IDENTIFIER;
 #if !CONFIG_VAAPI_1
     if (ctx->va_rc_mode == VA_RC_CBR)
@@ -696,11 +698,11 @@  static int vaapi_encode_h264_init_picture_params(AVCodecContext *avctx,
         priv->sei_needed |= SEI_TIMING;
     }
 
-    if (priv->sei & SEI_RECOVERY_POINT && pic->type == PICTURE_TYPE_I) {
+    if (priv->sei & SEI_RECOVERY_POINT && base_pic->type == PICTURE_TYPE_I) {
         priv->sei_recovery_point = (H264RawSEIRecoveryPoint) {
             .recovery_frame_cnt = 0,
             .exact_match_flag   = 1,
-            .broken_link_flag   = ctx->b_per_p > 0,
+            .broken_link_flag   = base_ctx->b_per_p > 0,
         };
 
         priv->sei_needed |= SEI_RECOVERY_POINT;
@@ -710,7 +712,7 @@  static int vaapi_encode_h264_init_picture_params(AVCodecContext *avctx,
         int err;
         size_t sei_a53cc_len;
         av_freep(&priv->sei_a53cc_data);
-        err = ff_alloc_a53_sei(pic->input_image, 0, &priv->sei_a53cc_data, &sei_a53cc_len);
+        err = ff_alloc_a53_sei(base_pic->input_image, 0, &priv->sei_a53cc_data, &sei_a53cc_len);
         if (err < 0)
             return err;
         if (priv->sei_a53cc_data != NULL) {
@@ -730,15 +732,15 @@  static int vaapi_encode_h264_init_picture_params(AVCodecContext *avctx,
         .BottomFieldOrderCnt = hpic->pic_order_cnt,
     };
     for (int k = 0; k < MAX_REFERENCE_LIST_NUM; k++) {
-        for (i = 0; i < pic->nb_refs[k]; i++) {
-            VAAPIEncodePicture      *ref = pic->refs[k][i];
+        for (i = 0; i < base_pic->nb_refs[k]; i++) {
+            HWBaseEncodePicture    *ref = base_pic->refs[k][i];
             VAAPIEncodeH264Picture *href;
 
-            av_assert0(ref && ref->encode_order < pic->encode_order);
+            av_assert0(ref && ref->encode_order < base_pic->encode_order);
             href = ref->priv_data;
 
             vpic->ReferenceFrames[j++] = (VAPictureH264) {
-                .picture_id          = ref->recon_surface,
+                .picture_id          = ((VAAPIEncodePicture *)ref)->recon_surface,
                 .frame_idx           = href->frame_num,
                 .flags               = VA_PICTURE_H264_SHORT_TERM_REFERENCE,
                 .TopFieldOrderCnt    = href->pic_order_cnt,
@@ -758,8 +760,8 @@  static int vaapi_encode_h264_init_picture_params(AVCodecContext *avctx,
 
     vpic->frame_num = hpic->frame_num;
 
-    vpic->pic_fields.bits.idr_pic_flag       = (pic->type == PICTURE_TYPE_IDR);
-    vpic->pic_fields.bits.reference_pic_flag = (pic->type != PICTURE_TYPE_B);
+    vpic->pic_fields.bits.idr_pic_flag       = (base_pic->type == PICTURE_TYPE_IDR);
+    vpic->pic_fields.bits.reference_pic_flag = (base_pic->type != PICTURE_TYPE_B);
 
     return 0;
 }
@@ -770,31 +772,32 @@  static void vaapi_encode_h264_default_ref_pic_list(AVCodecContext *avctx,
                                                    VAAPIEncodePicture **rpl1,
                                                    int *rpl_size)
 {
-    VAAPIEncodePicture *prev;
+    HWBaseEncodePicture *base_pic = (HWBaseEncodePicture *)pic;
+    HWBaseEncodePicture *prev;
     VAAPIEncodeH264Picture *hp, *hn, *hc;
     int i, j, n = 0;
 
-    prev = pic->prev;
+    prev = base_pic->prev;
     av_assert0(prev);
-    hp = pic->priv_data;
+    hp = base_pic->priv_data;
 
-    for (i = 0; i < pic->prev->nb_dpb_pics; i++) {
+    for (i = 0; i < base_pic->prev->nb_dpb_pics; i++) {
         hn = prev->dpb[i]->priv_data;
         av_assert0(hn->frame_num < hp->frame_num);
 
-        if (pic->type == PICTURE_TYPE_P) {
+        if (base_pic->type == PICTURE_TYPE_P) {
             for (j = n; j > 0; j--) {
-                hc = rpl0[j - 1]->priv_data;
+                hc = rpl0[j - 1]->base.priv_data;
                 av_assert0(hc->frame_num != hn->frame_num);
                 if (hc->frame_num > hn->frame_num)
                     break;
                 rpl0[j] = rpl0[j - 1];
             }
-            rpl0[j] = prev->dpb[i];
+            rpl0[j] = (VAAPIEncodePicture *)prev->dpb[i];
 
-        } else if (pic->type == PICTURE_TYPE_B) {
+        } else if (base_pic->type == PICTURE_TYPE_B) {
             for (j = n; j > 0; j--) {
-                hc = rpl0[j - 1]->priv_data;
+                hc = rpl0[j - 1]->base.priv_data;
                 av_assert0(hc->pic_order_cnt != hp->pic_order_cnt);
                 if (hc->pic_order_cnt < hp->pic_order_cnt) {
                     if (hn->pic_order_cnt > hp->pic_order_cnt ||
@@ -806,10 +809,10 @@  static void vaapi_encode_h264_default_ref_pic_list(AVCodecContext *avctx,
                 }
                 rpl0[j] = rpl0[j - 1];
             }
-            rpl0[j] = prev->dpb[i];
+            rpl0[j] = (VAAPIEncodePicture *)prev->dpb[i];
 
             for (j = n; j > 0; j--) {
-                hc = rpl1[j - 1]->priv_data;
+                hc = rpl1[j - 1]->base.priv_data;
                 av_assert0(hc->pic_order_cnt != hp->pic_order_cnt);
                 if (hc->pic_order_cnt > hp->pic_order_cnt) {
                     if (hn->pic_order_cnt < hp->pic_order_cnt ||
@@ -821,13 +824,13 @@  static void vaapi_encode_h264_default_ref_pic_list(AVCodecContext *avctx,
                 }
                 rpl1[j] = rpl1[j - 1];
             }
-            rpl1[j] = prev->dpb[i];
+            rpl1[j] = (VAAPIEncodePicture *)prev->dpb[i];
         }
 
         ++n;
     }
 
-    if (pic->type == PICTURE_TYPE_B) {
+    if (base_pic->type == PICTURE_TYPE_B) {
         for (i = 0; i < n; i++) {
             if (rpl0[i] != rpl1[i])
                 break;
@@ -836,22 +839,22 @@  static void vaapi_encode_h264_default_ref_pic_list(AVCodecContext *avctx,
             FFSWAP(VAAPIEncodePicture*, rpl1[0], rpl1[1]);
     }
 
-    if (pic->type == PICTURE_TYPE_P ||
-        pic->type == PICTURE_TYPE_B) {
+    if (base_pic->type == PICTURE_TYPE_P ||
+        base_pic->type == PICTURE_TYPE_B) {
         av_log(avctx, AV_LOG_DEBUG, "Default RefPicList0 for fn=%d/poc=%d:",
                hp->frame_num, hp->pic_order_cnt);
         for (i = 0; i < n; i++) {
-            hn = rpl0[i]->priv_data;
+            hn = rpl0[i]->base.priv_data;
             av_log(avctx, AV_LOG_DEBUG, "  fn=%d/poc=%d",
                    hn->frame_num, hn->pic_order_cnt);
         }
         av_log(avctx, AV_LOG_DEBUG, "\n");
     }
-    if (pic->type == PICTURE_TYPE_B) {
+    if (base_pic->type == PICTURE_TYPE_B) {
         av_log(avctx, AV_LOG_DEBUG, "Default RefPicList1 for fn=%d/poc=%d:",
                hp->frame_num, hp->pic_order_cnt);
         for (i = 0; i < n; i++) {
-            hn = rpl1[i]->priv_data;
+            hn = rpl1[i]->base.priv_data;
             av_log(avctx, AV_LOG_DEBUG, "  fn=%d/poc=%d",
                    hn->frame_num, hn->pic_order_cnt);
         }
@@ -866,8 +869,9 @@  static int vaapi_encode_h264_init_slice_params(AVCodecContext *avctx,
                                                VAAPIEncodeSlice *slice)
 {
     VAAPIEncodeH264Context          *priv = avctx->priv_data;
-    VAAPIEncodeH264Picture          *hpic = pic->priv_data;
-    VAAPIEncodePicture              *prev = pic->prev;
+    HWBaseEncodePicture          *base_pic = (HWBaseEncodePicture *)pic;
+    VAAPIEncodeH264Picture          *hpic = base_pic->priv_data;
+    HWBaseEncodePicture             *prev = base_pic->prev;
     H264RawSPS                       *sps = &priv->raw_sps;
     H264RawPPS                       *pps = &priv->raw_pps;
     H264RawSliceHeader                *sh = &priv->raw_slice.header;
@@ -875,12 +879,12 @@  static int vaapi_encode_h264_init_slice_params(AVCodecContext *avctx,
     VAEncSliceParameterBufferH264 *vslice = slice->codec_slice_params;
     int i, j;
 
-    if (pic->type == PICTURE_TYPE_IDR) {
+    if (base_pic->type == PICTURE_TYPE_IDR) {
         sh->nal_unit_header.nal_unit_type = H264_NAL_IDR_SLICE;
         sh->nal_unit_header.nal_ref_idc   = 3;
     } else {
         sh->nal_unit_header.nal_unit_type = H264_NAL_SLICE;
-        sh->nal_unit_header.nal_ref_idc   = pic->is_reference;
+        sh->nal_unit_header.nal_ref_idc   = base_pic->is_reference;
     }
 
     sh->first_mb_in_slice = slice->block_start;
@@ -896,25 +900,25 @@  static int vaapi_encode_h264_init_slice_params(AVCodecContext *avctx,
 
     sh->direct_spatial_mv_pred_flag = 1;
 
-    if (pic->type == PICTURE_TYPE_B)
+    if (base_pic->type == PICTURE_TYPE_B)
         sh->slice_qp_delta = priv->fixed_qp_b - (pps->pic_init_qp_minus26 + 26);
-    else if (pic->type == PICTURE_TYPE_P)
+    else if (base_pic->type == PICTURE_TYPE_P)
         sh->slice_qp_delta = priv->fixed_qp_p - (pps->pic_init_qp_minus26 + 26);
     else
         sh->slice_qp_delta = priv->fixed_qp_idr - (pps->pic_init_qp_minus26 + 26);
 
-    if (pic->is_reference && pic->type != PICTURE_TYPE_IDR) {
-        VAAPIEncodePicture *discard_list[MAX_DPB_SIZE];
+    if (base_pic->is_reference && base_pic->type != PICTURE_TYPE_IDR) {
+        HWBaseEncodePicture *discard_list[MAX_DPB_SIZE];
         int discard = 0, keep = 0;
 
         // Discard everything which is in the DPB of the previous frame but
         // not in the DPB of this one.
         for (i = 0; i < prev->nb_dpb_pics; i++) {
-            for (j = 0; j < pic->nb_dpb_pics; j++) {
-                if (prev->dpb[i] == pic->dpb[j])
+            for (j = 0; j < base_pic->nb_dpb_pics; j++) {
+                if (prev->dpb[i] == base_pic->dpb[j])
                     break;
             }
-            if (j == pic->nb_dpb_pics) {
+            if (j == base_pic->nb_dpb_pics) {
                 discard_list[discard] = prev->dpb[i];
                 ++discard;
             } else {
@@ -940,7 +944,7 @@  static int vaapi_encode_h264_init_slice_params(AVCodecContext *avctx,
 
     // If the intended references are not the first entries of RefPicListN
     // by default, use ref-pic-list-modification to move them there.
-    if (pic->type == PICTURE_TYPE_P || pic->type == PICTURE_TYPE_B) {
+    if (base_pic->type == PICTURE_TYPE_P || base_pic->type == PICTURE_TYPE_B) {
         VAAPIEncodePicture *def_l0[MAX_DPB_SIZE], *def_l1[MAX_DPB_SIZE];
         VAAPIEncodeH264Picture *href;
         int n;
@@ -948,19 +952,19 @@  static int vaapi_encode_h264_init_slice_params(AVCodecContext *avctx,
         vaapi_encode_h264_default_ref_pic_list(avctx, pic,
                                                def_l0, def_l1, &n);
 
-        if (pic->type == PICTURE_TYPE_P) {
+        if (base_pic->type == PICTURE_TYPE_P) {
             int need_rplm = 0;
-            for (i = 0; i < pic->nb_refs[0]; i++) {
-                av_assert0(pic->refs[0][i]);
-                if (pic->refs[0][i] != def_l0[i])
+            for (i = 0; i < base_pic->nb_refs[0]; i++) {
+                av_assert0(base_pic->refs[0][i]);
+                if (base_pic->refs[0][i] != (HWBaseEncodePicture *)def_l0[i])
                     need_rplm = 1;
             }
 
             sh->ref_pic_list_modification_flag_l0 = need_rplm;
             if (need_rplm) {
                 int pic_num = hpic->frame_num;
-                for (i = 0; i < pic->nb_refs[0]; i++) {
-                    href = pic->refs[0][i]->priv_data;
+                for (i = 0; i < base_pic->nb_refs[0]; i++) {
+                    href = base_pic->refs[0][i]->priv_data;
                     av_assert0(href->frame_num != pic_num);
                     if (href->frame_num < pic_num) {
                         sh->rplm_l0[i].modification_of_pic_nums_idc = 0;
@@ -979,20 +983,20 @@  static int vaapi_encode_h264_init_slice_params(AVCodecContext *avctx,
         } else {
             int need_rplm_l0 = 0, need_rplm_l1 = 0;
             int n0 = 0, n1 = 0;
-            for (i = 0; i < pic->nb_refs[0]; i++) {
-                av_assert0(pic->refs[0][i]);
-                href = pic->refs[0][i]->priv_data;
+            for (i = 0; i < base_pic->nb_refs[0]; i++) {
+                av_assert0(base_pic->refs[0][i]);
+                href = base_pic->refs[0][i]->priv_data;
                 av_assert0(href->pic_order_cnt < hpic->pic_order_cnt);
-                if (pic->refs[0][i] != def_l0[n0])
+                if (base_pic->refs[0][i] != (HWBaseEncodePicture *)def_l0[n0])
                     need_rplm_l0 = 1;
                 ++n0;
             }
 
-            for (i = 0; i < pic->nb_refs[1]; i++) {
-                av_assert0(pic->refs[1][i]);
-                href = pic->refs[1][i]->priv_data;
+            for (i = 0; i < base_pic->nb_refs[1]; i++) {
+                av_assert0(base_pic->refs[1][i]);
+                href = base_pic->refs[1][i]->priv_data;
                 av_assert0(href->pic_order_cnt > hpic->pic_order_cnt);
-                if (pic->refs[1][i] != def_l1[n1])
+                if (base_pic->refs[1][i] != (HWBaseEncodePicture *)def_l1[n1])
                     need_rplm_l1 = 1;
                 ++n1;
             }
@@ -1000,8 +1004,8 @@  static int vaapi_encode_h264_init_slice_params(AVCodecContext *avctx,
             sh->ref_pic_list_modification_flag_l0 = need_rplm_l0;
             if (need_rplm_l0) {
                 int pic_num = hpic->frame_num;
-                for (i = j = 0; i < pic->nb_refs[0]; i++) {
-                    href = pic->refs[0][i]->priv_data;
+                for (i = j = 0; i < base_pic->nb_refs[0]; i++) {
+                    href = base_pic->refs[0][i]->priv_data;
                     av_assert0(href->frame_num != pic_num);
                     if (href->frame_num < pic_num) {
                         sh->rplm_l0[j].modification_of_pic_nums_idc = 0;
@@ -1022,8 +1026,8 @@  static int vaapi_encode_h264_init_slice_params(AVCodecContext *avctx,
             sh->ref_pic_list_modification_flag_l1 = need_rplm_l1;
             if (need_rplm_l1) {
                 int pic_num = hpic->frame_num;
-                for (i = j = 0; i < pic->nb_refs[1]; i++) {
-                    href = pic->refs[1][i]->priv_data;
+                for (i = j = 0; i < base_pic->nb_refs[1]; i++) {
+                    href = base_pic->refs[1][i]->priv_data;
                     av_assert0(href->frame_num != pic_num);
                     if (href->frame_num < pic_num) {
                         sh->rplm_l1[j].modification_of_pic_nums_idc = 0;
@@ -1063,15 +1067,15 @@  static int vaapi_encode_h264_init_slice_params(AVCodecContext *avctx,
         vslice->RefPicList1[i].flags      = VA_PICTURE_H264_INVALID;
     }
 
-    if (pic->nb_refs[0]) {
+    if (base_pic->nb_refs[0]) {
         // Backward reference for P- or B-frame.
-        av_assert0(pic->type == PICTURE_TYPE_P ||
-                   pic->type == PICTURE_TYPE_B);
+        av_assert0(base_pic->type == PICTURE_TYPE_P ||
+                   base_pic->type == PICTURE_TYPE_B);
         vslice->RefPicList0[0] = vpic->ReferenceFrames[0];
     }
-    if (pic->nb_refs[1]) {
+    if (base_pic->nb_refs[1]) {
         // Forward reference for B-frame.
-        av_assert0(pic->type == PICTURE_TYPE_B);
+        av_assert0(base_pic->type == PICTURE_TYPE_B);
         vslice->RefPicList1[0] = vpic->ReferenceFrames[1];
     }
 
@@ -1082,8 +1086,9 @@  static int vaapi_encode_h264_init_slice_params(AVCodecContext *avctx,
 
 static av_cold int vaapi_encode_h264_configure(AVCodecContext *avctx)
 {
-    VAAPIEncodeContext      *ctx = avctx->priv_data;
-    VAAPIEncodeH264Context *priv = avctx->priv_data;
+    HWBaseEncodeContext *base_ctx = avctx->priv_data;
+    VAAPIEncodeContext       *ctx = avctx->priv_data;
+    VAAPIEncodeH264Context  *priv = avctx->priv_data;
     int err;
 
     err = ff_cbs_init(&priv->cbc, AV_CODEC_ID_H264, avctx);
@@ -1094,7 +1099,7 @@  static av_cold int vaapi_encode_h264_configure(AVCodecContext *avctx)
     priv->mb_height = FFALIGN(avctx->height, 16) / 16;
 
     if (ctx->va_rc_mode == VA_RC_CQP) {
-        priv->fixed_qp_p = av_clip(ctx->rc_quality, 1, 51);
+        priv->fixed_qp_p = av_clip(base_ctx->rc_quality, 1, 51);
         if (avctx->i_quant_factor > 0.0)
             priv->fixed_qp_idr =
                 av_clip((avctx->i_quant_factor * priv->fixed_qp_p +
@@ -1202,8 +1207,9 @@  static const VAAPIEncodeType vaapi_encode_type_h264 = {
 
 static av_cold int vaapi_encode_h264_init(AVCodecContext *avctx)
 {
-    VAAPIEncodeContext      *ctx = avctx->priv_data;
-    VAAPIEncodeH264Context *priv = avctx->priv_data;
+    HWBaseEncodeContext *base_ctx = avctx->priv_data;
+    VAAPIEncodeContext       *ctx = avctx->priv_data;
+    VAAPIEncodeH264Context  *priv = avctx->priv_data;
 
     ctx->codec = &vaapi_encode_type_h264;
 
@@ -1251,13 +1257,13 @@  static av_cold int vaapi_encode_h264_init(AVCodecContext *avctx)
         VA_ENC_PACKED_HEADER_SLICE    | // Slice headers.
         VA_ENC_PACKED_HEADER_MISC;      // SEI.
 
-    ctx->surface_width  = FFALIGN(avctx->width,  16);
-    ctx->surface_height = FFALIGN(avctx->height, 16);
+    base_ctx->surface_width  = FFALIGN(avctx->width,  16);
+    base_ctx->surface_height = FFALIGN(avctx->height, 16);
 
-    ctx->slice_block_height = ctx->slice_block_width = 16;
+    base_ctx->slice_block_height = base_ctx->slice_block_width = 16;
 
     if (priv->qp > 0)
-        ctx->explicit_qp = priv->qp;
+        base_ctx->explicit_qp = priv->qp;
 
     return ff_vaapi_encode_init(avctx);
 }
@@ -1277,8 +1283,9 @@  static av_cold int vaapi_encode_h264_close(AVCodecContext *avctx)
 #define OFFSET(x) offsetof(VAAPIEncodeH264Context, x)
 #define FLAGS (AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM)
 static const AVOption vaapi_encode_h264_options[] = {
+    HW_BASE_ENCODE_COMMON_OPTIONS,
     VAAPI_ENCODE_COMMON_OPTIONS,
-    VAAPI_ENCODE_RC_OPTIONS,
+    HW_BASE_ENCODE_RC_OPTIONS,
 
     { "qp", "Constant QP (for P-frames; scaled by qfactor/qoffset for I/B)",
       OFFSET(qp), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 52, FLAGS },
@@ -1380,7 +1387,7 @@  const FFCodec ff_h264_vaapi_encoder = {
     .p.id           = AV_CODEC_ID_H264,
     .priv_data_size = sizeof(VAAPIEncodeH264Context),
     .init           = &vaapi_encode_h264_init,
-    FF_CODEC_RECEIVE_PACKET_CB(&ff_vaapi_encode_receive_packet),
+    FF_CODEC_RECEIVE_PACKET_CB(&ff_hw_base_encode_receive_packet),
     .close          = &vaapi_encode_h264_close,
     .p.priv_class   = &vaapi_encode_h264_class,
     .p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HARDWARE |
diff --git a/libavcodec/vaapi_encode_h265.c b/libavcodec/vaapi_encode_h265.c
index 239ef2359a..52b9d8e73f 100644
--- a/libavcodec/vaapi_encode_h265.c
+++ b/libavcodec/vaapi_encode_h265.c
@@ -260,6 +260,7 @@  fail:
 
 static int vaapi_encode_h265_init_sequence_params(AVCodecContext *avctx)
 {
+    HWBaseEncodeContext          *base_ctx = avctx->priv_data;
     VAAPIEncodeContext                *ctx = avctx->priv_data;
     VAAPIEncodeH265Context           *priv = avctx->priv_data;
     H265RawVPS                        *vps = &priv->raw_vps;
@@ -278,7 +279,7 @@  static int vaapi_encode_h265_init_sequence_params(AVCodecContext *avctx)
     memset(pps, 0, sizeof(*pps));
 
 
-    desc = av_pix_fmt_desc_get(priv->common.input_frames->sw_format);
+    desc = av_pix_fmt_desc_get(base_ctx->input_frames->sw_format);
     av_assert0(desc);
     if (desc->nb_components == 1) {
         chroma_format = 0;
@@ -341,7 +342,7 @@  static int vaapi_encode_h265_init_sequence_params(AVCodecContext *avctx)
     ptl->general_max_420chroma_constraint_flag  = chroma_format <= 1;
     ptl->general_max_monochrome_constraint_flag = chroma_format == 0;
 
-    ptl->general_intra_constraint_flag = ctx->gop_size == 1;
+    ptl->general_intra_constraint_flag = base_ctx->gop_size == 1;
     ptl->general_one_picture_only_constraint_flag = 0;
 
     ptl->general_lower_bit_rate_constraint_flag = 1;
@@ -352,9 +353,9 @@  static int vaapi_encode_h265_init_sequence_params(AVCodecContext *avctx)
         const H265LevelDescriptor *level;
 
         level = ff_h265_guess_level(ptl, avctx->bit_rate,
-                                    ctx->surface_width, ctx->surface_height,
+                                    base_ctx->surface_width, base_ctx->surface_height,
                                     ctx->nb_slices, ctx->tile_rows, ctx->tile_cols,
-                                    (ctx->b_per_p > 0) + 1);
+                                    (base_ctx->b_per_p > 0) + 1);
         if (level) {
             av_log(avctx, AV_LOG_VERBOSE, "Using level %s.\n", level->name);
             ptl->general_level_idc = level->level_idc;
@@ -368,8 +369,8 @@  static int vaapi_encode_h265_init_sequence_params(AVCodecContext *avctx)
     }
 
     vps->vps_sub_layer_ordering_info_present_flag = 0;
-    vps->vps_max_dec_pic_buffering_minus1[0]      = ctx->max_b_depth + 1;
-    vps->vps_max_num_reorder_pics[0]              = ctx->max_b_depth;
+    vps->vps_max_dec_pic_buffering_minus1[0]      = base_ctx->max_b_depth + 1;
+    vps->vps_max_num_reorder_pics[0]              = base_ctx->max_b_depth;
     vps->vps_max_latency_increase_plus1[0]        = 0;
 
     vps->vps_max_layer_id             = 0;
@@ -410,18 +411,18 @@  static int vaapi_encode_h265_init_sequence_params(AVCodecContext *avctx)
     sps->chroma_format_idc          = chroma_format;
     sps->separate_colour_plane_flag = 0;
 
-    sps->pic_width_in_luma_samples  = ctx->surface_width;
-    sps->pic_height_in_luma_samples = ctx->surface_height;
+    sps->pic_width_in_luma_samples  = base_ctx->surface_width;
+    sps->pic_height_in_luma_samples = base_ctx->surface_height;
 
-    if (avctx->width  != ctx->surface_width ||
-        avctx->height != ctx->surface_height) {
+    if (avctx->width  != base_ctx->surface_width ||
+        avctx->height != base_ctx->surface_height) {
         sps->conformance_window_flag = 1;
         sps->conf_win_left_offset   = 0;
         sps->conf_win_right_offset  =
-            (ctx->surface_width - avctx->width) >> desc->log2_chroma_w;
+            (base_ctx->surface_width - avctx->width) >> desc->log2_chroma_w;
         sps->conf_win_top_offset    = 0;
         sps->conf_win_bottom_offset =
-            (ctx->surface_height - avctx->height) >> desc->log2_chroma_h;
+            (base_ctx->surface_height - avctx->height) >> desc->log2_chroma_h;
     } else {
         sps->conformance_window_flag = 0;
     }
@@ -643,9 +644,9 @@  static int vaapi_encode_h265_init_sequence_params(AVCodecContext *avctx)
         .general_level_idc   = vps->profile_tier_level.general_level_idc,
         .general_tier_flag   = vps->profile_tier_level.general_tier_flag,
 
-        .intra_period     = ctx->gop_size,
-        .intra_idr_period = ctx->gop_size,
-        .ip_period        = ctx->b_per_p + 1,
+        .intra_period     = base_ctx->gop_size,
+        .intra_idr_period = base_ctx->gop_size,
+        .ip_period        = base_ctx->b_per_p + 1,
         .bits_per_second  = ctx->va_bit_rate,
 
         .pic_width_in_luma_samples  = sps->pic_width_in_luma_samples,
@@ -758,18 +759,19 @@  static int vaapi_encode_h265_init_sequence_params(AVCodecContext *avctx)
 static int vaapi_encode_h265_init_picture_params(AVCodecContext *avctx,
                                                  VAAPIEncodePicture *pic)
 {
-    VAAPIEncodeContext               *ctx = avctx->priv_data;
+    HWBaseEncodeContext         *base_ctx = avctx->priv_data;
     VAAPIEncodeH265Context          *priv = avctx->priv_data;
-    VAAPIEncodeH265Picture          *hpic = pic->priv_data;
-    VAAPIEncodePicture              *prev = pic->prev;
+    HWBaseEncodePicture         *base_pic = (HWBaseEncodePicture *)pic;
+    VAAPIEncodeH265Picture          *hpic = base_pic->priv_data;
+    HWBaseEncodePicture             *prev = base_pic->prev;
     VAAPIEncodeH265Picture         *hprev = prev ? prev->priv_data : NULL;
     VAEncPictureParameterBufferHEVC *vpic = pic->codec_picture_params;
     int i, j = 0;
 
-    if (pic->type == PICTURE_TYPE_IDR) {
-        av_assert0(pic->display_order == pic->encode_order);
+    if (base_pic->type == PICTURE_TYPE_IDR) {
+        av_assert0(base_pic->display_order == base_pic->encode_order);
 
-        hpic->last_idr_frame = pic->display_order;
+        hpic->last_idr_frame = base_pic->display_order;
 
         hpic->slice_nal_unit = HEVC_NAL_IDR_W_RADL;
         hpic->slice_type     = HEVC_SLICE_I;
@@ -778,23 +780,23 @@  static int vaapi_encode_h265_init_picture_params(AVCodecContext *avctx,
         av_assert0(prev);
         hpic->last_idr_frame = hprev->last_idr_frame;
 
-        if (pic->type == PICTURE_TYPE_I) {
+        if (base_pic->type == PICTURE_TYPE_I) {
             hpic->slice_nal_unit = HEVC_NAL_CRA_NUT;
             hpic->slice_type     = HEVC_SLICE_I;
             hpic->pic_type       = 0;
-        } else if (pic->type == PICTURE_TYPE_P) {
-            av_assert0(pic->refs[0]);
+        } else if (base_pic->type == PICTURE_TYPE_P) {
+            av_assert0(base_pic->refs[0]);
             hpic->slice_nal_unit = HEVC_NAL_TRAIL_R;
             hpic->slice_type     = HEVC_SLICE_P;
             hpic->pic_type       = 1;
         } else {
-            VAAPIEncodePicture *irap_ref;
-            av_assert0(pic->refs[0][0] && pic->refs[1][0]);
-            for (irap_ref = pic; irap_ref; irap_ref = irap_ref->refs[1][0]) {
+            HWBaseEncodePicture *irap_ref;
+            av_assert0(base_pic->refs[0][0] && base_pic->refs[1][0]);
+            for (irap_ref = base_pic; irap_ref; irap_ref = irap_ref->refs[1][0]) {
                 if (irap_ref->type == PICTURE_TYPE_I)
                     break;
             }
-            if (pic->b_depth == ctx->max_b_depth) {
+            if (base_pic->b_depth == base_ctx->max_b_depth) {
                 hpic->slice_nal_unit = irap_ref ? HEVC_NAL_RASL_N
                                                 : HEVC_NAL_TRAIL_N;
             } else {
@@ -805,7 +807,7 @@  static int vaapi_encode_h265_init_picture_params(AVCodecContext *avctx,
             hpic->pic_type   = 2;
         }
     }
-    hpic->pic_order_cnt = pic->display_order - hpic->last_idr_frame;
+    hpic->pic_order_cnt = base_pic->display_order - hpic->last_idr_frame;
 
     if (priv->aud) {
         priv->aud_needed = 1;
@@ -827,9 +829,9 @@  static int vaapi_encode_h265_init_picture_params(AVCodecContext *avctx,
     // may force an IDR frame on the output where the medadata gets
     // changed on the input frame.
     if ((priv->sei & SEI_MASTERING_DISPLAY) &&
-        (pic->type == PICTURE_TYPE_I || pic->type == PICTURE_TYPE_IDR)) {
+        (base_pic->type == PICTURE_TYPE_I || base_pic->type == PICTURE_TYPE_IDR)) {
         AVFrameSideData *sd =
-            av_frame_get_side_data(pic->input_image,
+            av_frame_get_side_data(base_pic->input_image,
                                    AV_FRAME_DATA_MASTERING_DISPLAY_METADATA);
 
         if (sd) {
@@ -875,9 +877,9 @@  static int vaapi_encode_h265_init_picture_params(AVCodecContext *avctx,
     }
 
     if ((priv->sei & SEI_CONTENT_LIGHT_LEVEL) &&
-        (pic->type == PICTURE_TYPE_I || pic->type == PICTURE_TYPE_IDR)) {
+        (base_pic->type == PICTURE_TYPE_I || base_pic->type == PICTURE_TYPE_IDR)) {
         AVFrameSideData *sd =
-            av_frame_get_side_data(pic->input_image,
+            av_frame_get_side_data(base_pic->input_image,
                                    AV_FRAME_DATA_CONTENT_LIGHT_LEVEL);
 
         if (sd) {
@@ -897,7 +899,7 @@  static int vaapi_encode_h265_init_picture_params(AVCodecContext *avctx,
         int err;
         size_t sei_a53cc_len;
         av_freep(&priv->sei_a53cc_data);
-        err = ff_alloc_a53_sei(pic->input_image, 0, &priv->sei_a53cc_data, &sei_a53cc_len);
+        err = ff_alloc_a53_sei(base_pic->input_image, 0, &priv->sei_a53cc_data, &sei_a53cc_len);
         if (err < 0)
             return err;
         if (priv->sei_a53cc_data != NULL) {
@@ -916,19 +918,19 @@  static int vaapi_encode_h265_init_picture_params(AVCodecContext *avctx,
     };
 
     for (int k = 0; k < MAX_REFERENCE_LIST_NUM; k++) {
-        for (i = 0; i < pic->nb_refs[k]; i++) {
-            VAAPIEncodePicture      *ref = pic->refs[k][i];
+        for (i = 0; i < base_pic->nb_refs[k]; i++) {
+            HWBaseEncodePicture    *ref = base_pic->refs[k][i];
             VAAPIEncodeH265Picture *href;
 
-            av_assert0(ref && ref->encode_order < pic->encode_order);
+            av_assert0(ref && ref->encode_order < base_pic->encode_order);
             href = ref->priv_data;
 
             vpic->reference_frames[j++] = (VAPictureHEVC) {
-                .picture_id    = ref->recon_surface,
+                .picture_id    = ((VAAPIEncodePicture *)ref)->recon_surface,
                 .pic_order_cnt = href->pic_order_cnt,
-                .flags = (ref->display_order < pic->display_order ?
+                .flags = (ref->display_order < base_pic->display_order ?
                           VA_PICTURE_HEVC_RPS_ST_CURR_BEFORE : 0) |
-                          (ref->display_order > pic->display_order ?
+                          (ref->display_order > base_pic->display_order ?
                           VA_PICTURE_HEVC_RPS_ST_CURR_AFTER  : 0),
             };
         }
@@ -945,7 +947,7 @@  static int vaapi_encode_h265_init_picture_params(AVCodecContext *avctx,
 
     vpic->nal_unit_type = hpic->slice_nal_unit;
 
-    switch (pic->type) {
+    switch (base_pic->type) {
     case PICTURE_TYPE_IDR:
         vpic->pic_fields.bits.idr_pic_flag       = 1;
         vpic->pic_fields.bits.coding_type        = 1;
@@ -977,9 +979,10 @@  static int vaapi_encode_h265_init_slice_params(AVCodecContext *avctx,
                                                VAAPIEncodePicture *pic,
                                                VAAPIEncodeSlice *slice)
 {
-    VAAPIEncodeContext                *ctx = avctx->priv_data;
+    HWBaseEncodeContext          *base_ctx = avctx->priv_data;
     VAAPIEncodeH265Context           *priv = avctx->priv_data;
-    VAAPIEncodeH265Picture           *hpic = pic->priv_data;
+    HWBaseEncodePicture          *base_pic = (HWBaseEncodePicture *)pic;
+    VAAPIEncodeH265Picture           *hpic = base_pic->priv_data;
     const H265RawSPS                  *sps = &priv->raw_sps;
     const H265RawPPS                  *pps = &priv->raw_pps;
     H265RawSliceHeader                 *sh = &priv->raw_slice.header;
@@ -1000,13 +1003,13 @@  static int vaapi_encode_h265_init_slice_params(AVCodecContext *avctx,
 
     sh->slice_type = hpic->slice_type;
 
-    if (sh->slice_type == HEVC_SLICE_P && ctx->p_to_gpb)
+    if (sh->slice_type == HEVC_SLICE_P && base_ctx->p_to_gpb)
         sh->slice_type = HEVC_SLICE_B;
 
     sh->slice_pic_order_cnt_lsb = hpic->pic_order_cnt &
         (1 << (sps->log2_max_pic_order_cnt_lsb_minus4 + 4)) - 1;
 
-    if (pic->type != PICTURE_TYPE_IDR) {
+    if (base_pic->type != PICTURE_TYPE_IDR) {
         H265RawSTRefPicSet *rps;
         const VAAPIEncodeH265Picture *strp;
         int rps_poc[MAX_DPB_SIZE];
@@ -1020,33 +1023,33 @@  static int vaapi_encode_h265_init_slice_params(AVCodecContext *avctx,
 
         rps_pics = 0;
         for (i = 0; i < MAX_REFERENCE_LIST_NUM; i++) {
-            for (j = 0; j < pic->nb_refs[i]; j++) {
-                strp = pic->refs[i][j]->priv_data;
+            for (j = 0; j < base_pic->nb_refs[i]; j++) {
+                strp = base_pic->refs[i][j]->priv_data;
                 rps_poc[rps_pics]  = strp->pic_order_cnt;
                 rps_used[rps_pics] = 1;
                 ++rps_pics;
             }
         }
 
-        for (i = 0; i < pic->nb_dpb_pics; i++) {
-            if (pic->dpb[i] == pic)
+        for (i = 0; i < base_pic->nb_dpb_pics; i++) {
+            if (base_pic->dpb[i] == base_pic)
                 continue;
 
-            for (j = 0; j < pic->nb_refs[0]; j++) {
-                if (pic->dpb[i] == pic->refs[0][j])
+            for (j = 0; j < base_pic->nb_refs[0]; j++) {
+                if (base_pic->dpb[i] == base_pic->refs[0][j])
                     break;
             }
-            if (j < pic->nb_refs[0])
+            if (j < base_pic->nb_refs[0])
                 continue;
 
-            for (j = 0; j < pic->nb_refs[1]; j++) {
-                if (pic->dpb[i] == pic->refs[1][j])
+            for (j = 0; j < base_pic->nb_refs[1]; j++) {
+                if (base_pic->dpb[i] == base_pic->refs[1][j])
                     break;
             }
-            if (j < pic->nb_refs[1])
+            if (j < base_pic->nb_refs[1])
                 continue;
 
-            strp = pic->dpb[i]->priv_data;
+            strp = base_pic->dpb[i]->priv_data;
             rps_poc[rps_pics]  = strp->pic_order_cnt;
             rps_used[rps_pics] = 0;
             ++rps_pics;
@@ -1113,9 +1116,9 @@  static int vaapi_encode_h265_init_slice_params(AVCodecContext *avctx,
     sh->slice_sao_luma_flag = sh->slice_sao_chroma_flag =
         sps->sample_adaptive_offset_enabled_flag;
 
-    if (pic->type == PICTURE_TYPE_B)
+    if (base_pic->type == PICTURE_TYPE_B)
         sh->slice_qp_delta = priv->fixed_qp_b - (pps->init_qp_minus26 + 26);
-    else if (pic->type == PICTURE_TYPE_P)
+    else if (base_pic->type == PICTURE_TYPE_P)
         sh->slice_qp_delta = priv->fixed_qp_p - (pps->init_qp_minus26 + 26);
     else
         sh->slice_qp_delta = priv->fixed_qp_idr - (pps->init_qp_minus26 + 26);
@@ -1170,22 +1173,22 @@  static int vaapi_encode_h265_init_slice_params(AVCodecContext *avctx,
         vslice->ref_pic_list1[i].flags      = VA_PICTURE_HEVC_INVALID;
     }
 
-    if (pic->nb_refs[0]) {
+    if (base_pic->nb_refs[0]) {
         // Backward reference for P- or B-frame.
-        av_assert0(pic->type == PICTURE_TYPE_P ||
-                   pic->type == PICTURE_TYPE_B);
+        av_assert0(base_pic->type == PICTURE_TYPE_P ||
+                   base_pic->type == PICTURE_TYPE_B);
         vslice->ref_pic_list0[0] = vpic->reference_frames[0];
-        if (ctx->p_to_gpb && pic->type == PICTURE_TYPE_P)
+        if (base_ctx->p_to_gpb && base_pic->type == PICTURE_TYPE_P)
             // Reference for GPB B-frame, L0 == L1
             vslice->ref_pic_list1[0] = vpic->reference_frames[0];
     }
-    if (pic->nb_refs[1]) {
+    if (base_pic->nb_refs[1]) {
         // Forward reference for B-frame.
-        av_assert0(pic->type == PICTURE_TYPE_B);
+        av_assert0(base_pic->type == PICTURE_TYPE_B);
         vslice->ref_pic_list1[0] = vpic->reference_frames[1];
     }
 
-    if (pic->type == PICTURE_TYPE_P && ctx->p_to_gpb) {
+    if (base_pic->type == PICTURE_TYPE_P && base_ctx->p_to_gpb) {
         vslice->slice_type = HEVC_SLICE_B;
         for (i = 0; i < FF_ARRAY_ELEMS(vslice->ref_pic_list0); i++) {
             vslice->ref_pic_list1[i].picture_id = vslice->ref_pic_list0[i].picture_id;
@@ -1198,8 +1201,9 @@  static int vaapi_encode_h265_init_slice_params(AVCodecContext *avctx,
 
 static av_cold int vaapi_encode_h265_get_encoder_caps(AVCodecContext *avctx)
 {
-    VAAPIEncodeContext      *ctx = avctx->priv_data;
-    VAAPIEncodeH265Context *priv = avctx->priv_data;
+    HWBaseEncodeContext *base_ctx = avctx->priv_data;
+    VAAPIEncodeContext       *ctx = avctx->priv_data;
+    VAAPIEncodeH265Context  *priv = avctx->priv_data;
 
 #if VA_CHECK_VERSION(1, 13, 0)
     {
@@ -1250,18 +1254,19 @@  static av_cold int vaapi_encode_h265_get_encoder_caps(AVCodecContext *avctx)
            "min CB size %dx%d.\n", priv->ctu_size, priv->ctu_size,
            priv->min_cb_size, priv->min_cb_size);
 
-    ctx->surface_width  = FFALIGN(avctx->width,  priv->min_cb_size);
-    ctx->surface_height = FFALIGN(avctx->height, priv->min_cb_size);
+    base_ctx->surface_width  = FFALIGN(avctx->width,  priv->min_cb_size);
+    base_ctx->surface_height = FFALIGN(avctx->height, priv->min_cb_size);
 
-    ctx->slice_block_width = ctx->slice_block_height = priv->ctu_size;
+    base_ctx->slice_block_width = base_ctx->slice_block_height = priv->ctu_size;
 
     return 0;
 }
 
 static av_cold int vaapi_encode_h265_configure(AVCodecContext *avctx)
 {
-    VAAPIEncodeContext      *ctx = avctx->priv_data;
-    VAAPIEncodeH265Context *priv = avctx->priv_data;
+    HWBaseEncodeContext *base_ctx = avctx->priv_data;
+    VAAPIEncodeContext       *ctx = avctx->priv_data;
+    VAAPIEncodeH265Context  *priv = avctx->priv_data;
     int err;
 
     err = ff_cbs_init(&priv->cbc, AV_CODEC_ID_HEVC, avctx);
@@ -1273,7 +1278,7 @@  static av_cold int vaapi_encode_h265_configure(AVCodecContext *avctx)
         // therefore always bounded below by 1, even in 10-bit mode where
         // it should go down to -12.
 
-        priv->fixed_qp_p = av_clip(ctx->rc_quality, 1, 51);
+        priv->fixed_qp_p = av_clip(base_ctx->rc_quality, 1, 51);
         if (avctx->i_quant_factor > 0.0)
             priv->fixed_qp_idr =
                 av_clip((avctx->i_quant_factor * priv->fixed_qp_p +
@@ -1357,8 +1362,9 @@  static const VAAPIEncodeType vaapi_encode_type_h265 = {
 
 static av_cold int vaapi_encode_h265_init(AVCodecContext *avctx)
 {
-    VAAPIEncodeContext      *ctx = avctx->priv_data;
-    VAAPIEncodeH265Context *priv = avctx->priv_data;
+    HWBaseEncodeContext *base_ctx = avctx->priv_data;
+    VAAPIEncodeContext       *ctx = avctx->priv_data;
+    VAAPIEncodeH265Context  *priv = avctx->priv_data;
 
     ctx->codec = &vaapi_encode_type_h265;
 
@@ -1379,7 +1385,7 @@  static av_cold int vaapi_encode_h265_init(AVCodecContext *avctx)
         VA_ENC_PACKED_HEADER_MISC;      // SEI
 
     if (priv->qp > 0)
-        ctx->explicit_qp = priv->qp;
+        base_ctx->explicit_qp = priv->qp;
 
     return ff_vaapi_encode_init(avctx);
 }
@@ -1398,8 +1404,9 @@  static av_cold int vaapi_encode_h265_close(AVCodecContext *avctx)
 #define OFFSET(x) offsetof(VAAPIEncodeH265Context, x)
 #define FLAGS (AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM)
 static const AVOption vaapi_encode_h265_options[] = {
+    HW_BASE_ENCODE_COMMON_OPTIONS,
     VAAPI_ENCODE_COMMON_OPTIONS,
-    VAAPI_ENCODE_RC_OPTIONS,
+    HW_BASE_ENCODE_RC_OPTIONS,
 
     { "qp", "Constant QP (for P-frames; scaled by qfactor/qoffset for I/B)",
       OFFSET(qp), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 52, FLAGS },
@@ -1497,7 +1504,7 @@  const FFCodec ff_hevc_vaapi_encoder = {
     .p.id           = AV_CODEC_ID_HEVC,
     .priv_data_size = sizeof(VAAPIEncodeH265Context),
     .init           = &vaapi_encode_h265_init,
-    FF_CODEC_RECEIVE_PACKET_CB(&ff_vaapi_encode_receive_packet),
+    FF_CODEC_RECEIVE_PACKET_CB(&ff_hw_base_encode_receive_packet),
     .close          = &vaapi_encode_h265_close,
     .p.priv_class   = &vaapi_encode_h265_class,
     .p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HARDWARE |
diff --git a/libavcodec/vaapi_encode_mjpeg.c b/libavcodec/vaapi_encode_mjpeg.c
index c17747e3a9..3096043991 100644
--- a/libavcodec/vaapi_encode_mjpeg.c
+++ b/libavcodec/vaapi_encode_mjpeg.c
@@ -222,7 +222,9 @@  static int vaapi_encode_mjpeg_write_extra_buffer(AVCodecContext *avctx,
 static int vaapi_encode_mjpeg_init_picture_params(AVCodecContext *avctx,
                                                   VAAPIEncodePicture *pic)
 {
+    HWBaseEncodeContext         *base_ctx = avctx->priv_data;
     VAAPIEncodeMJPEGContext         *priv = avctx->priv_data;
+    HWBaseEncodePicture         *base_pic = (HWBaseEncodePicture *)pic;
     JPEGRawFrameHeader                *fh = &priv->frame_header;
     JPEGRawScanHeader                 *sh = &priv->scan.header;
     VAEncPictureParameterBufferJPEG *vpic = pic->codec_picture_params;
@@ -232,9 +234,9 @@  static int vaapi_encode_mjpeg_init_picture_params(AVCodecContext *avctx,
     const uint8_t *components;
     int t, i, quant_scale, len;
 
-    av_assert0(pic->type == PICTURE_TYPE_IDR);
+    av_assert0(base_pic->type == PICTURE_TYPE_IDR);
 
-    desc = av_pix_fmt_desc_get(priv->common.input_frames->sw_format);
+    desc = av_pix_fmt_desc_get(base_ctx->input_frames->sw_format);
     av_assert0(desc);
     if (desc->flags & AV_PIX_FMT_FLAG_RGB)
         components = components_rgb;
@@ -261,7 +263,7 @@  static int vaapi_encode_mjpeg_init_picture_params(AVCodecContext *avctx,
     // JFIF header.
     if (priv->jfif) {
         JPEGRawApplicationData *app = &priv->jfif_header;
-        AVRational sar = pic->input_image->sample_aspect_ratio;
+        AVRational sar = base_pic->input_image->sample_aspect_ratio;
         int sar_w, sar_h;
         PutByteContext pbc;
 
@@ -436,25 +438,26 @@  static int vaapi_encode_mjpeg_init_slice_params(AVCodecContext *avctx,
 
 static av_cold int vaapi_encode_mjpeg_get_encoder_caps(AVCodecContext *avctx)
 {
-    VAAPIEncodeContext *ctx = avctx->priv_data;
+    HWBaseEncodeContext *base_ctx = avctx->priv_data;
     const AVPixFmtDescriptor *desc;
 
-    desc = av_pix_fmt_desc_get(ctx->input_frames->sw_format);
+    desc = av_pix_fmt_desc_get(base_ctx->input_frames->sw_format);
     av_assert0(desc);
 
-    ctx->surface_width  = FFALIGN(avctx->width,  8 << desc->log2_chroma_w);
-    ctx->surface_height = FFALIGN(avctx->height, 8 << desc->log2_chroma_h);
+    base_ctx->surface_width  = FFALIGN(avctx->width,  8 << desc->log2_chroma_w);
+    base_ctx->surface_height = FFALIGN(avctx->height, 8 << desc->log2_chroma_h);
 
     return 0;
 }
 
 static av_cold int vaapi_encode_mjpeg_configure(AVCodecContext *avctx)
 {
+    HWBaseEncodeContext *base_ctx = avctx->priv_data;
     VAAPIEncodeContext       *ctx = avctx->priv_data;
     VAAPIEncodeMJPEGContext *priv = avctx->priv_data;
     int err;
 
-    priv->quality = ctx->rc_quality;
+    priv->quality = base_ctx->rc_quality;
     if (priv->quality < 1 || priv->quality > 100) {
         av_log(avctx, AV_LOG_ERROR, "Invalid quality value %d "
                "(must be 1-100).\n", priv->quality);
@@ -540,6 +543,7 @@  static av_cold int vaapi_encode_mjpeg_close(AVCodecContext *avctx)
 #define OFFSET(x) offsetof(VAAPIEncodeMJPEGContext, x)
 #define FLAGS (AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM)
 static const AVOption vaapi_encode_mjpeg_options[] = {
+    HW_BASE_ENCODE_COMMON_OPTIONS,
     VAAPI_ENCODE_COMMON_OPTIONS,
 
     { "jfif", "Include JFIF header",
@@ -571,7 +575,7 @@  const FFCodec ff_mjpeg_vaapi_encoder = {
     .p.id           = AV_CODEC_ID_MJPEG,
     .priv_data_size = sizeof(VAAPIEncodeMJPEGContext),
     .init           = &vaapi_encode_mjpeg_init,
-    FF_CODEC_RECEIVE_PACKET_CB(&ff_vaapi_encode_receive_packet),
+    FF_CODEC_RECEIVE_PACKET_CB(&ff_hw_base_encode_receive_packet),
     .close          = &vaapi_encode_mjpeg_close,
     .p.priv_class   = &vaapi_encode_mjpeg_class,
     .p.capabilities = AV_CODEC_CAP_HARDWARE | AV_CODEC_CAP_DR1 |
diff --git a/libavcodec/vaapi_encode_mpeg2.c b/libavcodec/vaapi_encode_mpeg2.c
index d1904bf4f5..c5485dbcc4 100644
--- a/libavcodec/vaapi_encode_mpeg2.c
+++ b/libavcodec/vaapi_encode_mpeg2.c
@@ -166,6 +166,7 @@  fail:
 
 static int vaapi_encode_mpeg2_init_sequence_params(AVCodecContext *avctx)
 {
+    HWBaseEncodeContext           *base_ctx = avctx->priv_data;
     VAAPIEncodeContext                 *ctx = avctx->priv_data;
     VAAPIEncodeMPEG2Context           *priv = avctx->priv_data;
     MPEG2RawSequenceHeader              *sh = &priv->sequence_header;
@@ -281,7 +282,7 @@  static int vaapi_encode_mpeg2_init_sequence_params(AVCodecContext *avctx)
 
     se->bit_rate_extension        = priv->bit_rate >> 18;
     se->vbv_buffer_size_extension = priv->vbv_buffer_size >> 10;
-    se->low_delay                 = ctx->b_per_p == 0;
+    se->low_delay                 = base_ctx->b_per_p == 0;
 
     se->frame_rate_extension_n = ext_n;
     se->frame_rate_extension_d = ext_d;
@@ -353,8 +354,8 @@  static int vaapi_encode_mpeg2_init_sequence_params(AVCodecContext *avctx)
 
 
     *vseq = (VAEncSequenceParameterBufferMPEG2) {
-        .intra_period = ctx->gop_size,
-        .ip_period    = ctx->b_per_p + 1,
+        .intra_period = base_ctx->gop_size,
+        .ip_period    = base_ctx->b_per_p + 1,
 
         .picture_width  = avctx->width,
         .picture_height = avctx->height,
@@ -417,30 +418,31 @@  static int vaapi_encode_mpeg2_init_sequence_params(AVCodecContext *avctx)
 }
 
 static int vaapi_encode_mpeg2_init_picture_params(AVCodecContext *avctx,
-                                                 VAAPIEncodePicture *pic)
+                                                  VAAPIEncodePicture *pic)
 {
     VAAPIEncodeMPEG2Context          *priv = avctx->priv_data;
+    HWBaseEncodePicture          *base_pic = (HWBaseEncodePicture *)pic;
     MPEG2RawPictureHeader              *ph = &priv->picture_header;
     MPEG2RawPictureCodingExtension    *pce = &priv->picture_coding_extension.data.picture_coding;
     VAEncPictureParameterBufferMPEG2 *vpic = pic->codec_picture_params;
 
-    if (pic->type == PICTURE_TYPE_IDR || pic->type == PICTURE_TYPE_I) {
+    if (base_pic->type == PICTURE_TYPE_IDR || base_pic->type == PICTURE_TYPE_I) {
         ph->temporal_reference  = 0;
         ph->picture_coding_type = 1;
-        priv->last_i_frame = pic->display_order;
+        priv->last_i_frame = base_pic->display_order;
     } else {
-        ph->temporal_reference = pic->display_order - priv->last_i_frame;
-        ph->picture_coding_type = pic->type == PICTURE_TYPE_B ? 3 : 2;
+        ph->temporal_reference = base_pic->display_order - priv->last_i_frame;
+        ph->picture_coding_type = base_pic->type == PICTURE_TYPE_B ? 3 : 2;
     }
 
-    if (pic->type == PICTURE_TYPE_P || pic->type == PICTURE_TYPE_B) {
+    if (base_pic->type == PICTURE_TYPE_P || base_pic->type == PICTURE_TYPE_B) {
         pce->f_code[0][0] = priv->f_code_horizontal;
         pce->f_code[0][1] = priv->f_code_vertical;
     } else {
         pce->f_code[0][0] = 15;
         pce->f_code[0][1] = 15;
     }
-    if (pic->type == PICTURE_TYPE_B) {
+    if (base_pic->type == PICTURE_TYPE_B) {
         pce->f_code[1][0] = priv->f_code_horizontal;
         pce->f_code[1][1] = priv->f_code_vertical;
     } else {
@@ -451,19 +453,19 @@  static int vaapi_encode_mpeg2_init_picture_params(AVCodecContext *avctx,
     vpic->reconstructed_picture = pic->recon_surface;
     vpic->coded_buf             = pic->output_buffer;
 
-    switch (pic->type) {
+    switch (base_pic->type) {
     case PICTURE_TYPE_IDR:
     case PICTURE_TYPE_I:
         vpic->picture_type = VAEncPictureTypeIntra;
         break;
     case PICTURE_TYPE_P:
         vpic->picture_type = VAEncPictureTypePredictive;
-        vpic->forward_reference_picture = pic->refs[0][0]->recon_surface;
+        vpic->forward_reference_picture = ((VAAPIEncodePicture *)base_pic->refs[0][0])->recon_surface;
         break;
     case PICTURE_TYPE_B:
         vpic->picture_type = VAEncPictureTypeBidirectional;
-        vpic->forward_reference_picture  = pic->refs[0][0]->recon_surface;
-        vpic->backward_reference_picture = pic->refs[1][0]->recon_surface;
+        vpic->forward_reference_picture  = ((VAAPIEncodePicture *)base_pic->refs[0][0])->recon_surface;
+        vpic->backward_reference_picture = ((VAAPIEncodePicture *)base_pic->refs[1][0])->recon_surface;
         break;
     default:
         av_assert0(0 && "invalid picture type");
@@ -479,17 +481,18 @@  static int vaapi_encode_mpeg2_init_picture_params(AVCodecContext *avctx,
 }
 
 static int vaapi_encode_mpeg2_init_slice_params(AVCodecContext *avctx,
-                                               VAAPIEncodePicture *pic,
-                                               VAAPIEncodeSlice *slice)
+                                                VAAPIEncodePicture *pic,
+                                                VAAPIEncodeSlice *slice)
 {
-    VAAPIEncodeMPEG2Context            *priv = avctx->priv_data;
-    VAEncSliceParameterBufferMPEG2   *vslice = slice->codec_slice_params;
+    HWBaseEncodePicture          *base_pic = (HWBaseEncodePicture *)pic;
+    VAAPIEncodeMPEG2Context          *priv = avctx->priv_data;
+    VAEncSliceParameterBufferMPEG2 *vslice = slice->codec_slice_params;
     int qp;
 
     vslice->macroblock_address = slice->block_start;
     vslice->num_macroblocks    = slice->block_size;
 
-    switch (pic->type) {
+    switch (base_pic->type) {
     case PICTURE_TYPE_IDR:
     case PICTURE_TYPE_I:
         qp = priv->quant_i;
@@ -505,14 +508,15 @@  static int vaapi_encode_mpeg2_init_slice_params(AVCodecContext *avctx,
     }
 
     vslice->quantiser_scale_code = qp;
-    vslice->is_intra_slice = (pic->type == PICTURE_TYPE_IDR ||
-                              pic->type == PICTURE_TYPE_I);
+    vslice->is_intra_slice = (base_pic->type == PICTURE_TYPE_IDR ||
+                              base_pic->type == PICTURE_TYPE_I);
 
     return 0;
 }
 
 static av_cold int vaapi_encode_mpeg2_configure(AVCodecContext *avctx)
 {
+    HWBaseEncodeContext *base_ctx = avctx->priv_data;
     VAAPIEncodeContext       *ctx = avctx->priv_data;
     VAAPIEncodeMPEG2Context *priv = avctx->priv_data;
     int err;
@@ -522,7 +526,7 @@  static av_cold int vaapi_encode_mpeg2_configure(AVCodecContext *avctx)
         return err;
 
     if (ctx->va_rc_mode == VA_RC_CQP) {
-        priv->quant_p = av_clip(ctx->rc_quality, 1, 31);
+        priv->quant_p = av_clip(base_ctx->rc_quality, 1, 31);
         if (avctx->i_quant_factor > 0.0)
             priv->quant_i =
                 av_clip((avctx->i_quant_factor * priv->quant_p +
@@ -639,8 +643,9 @@  static av_cold int vaapi_encode_mpeg2_close(AVCodecContext *avctx)
 #define OFFSET(x) offsetof(VAAPIEncodeMPEG2Context, x)
 #define FLAGS (AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM)
 static const AVOption vaapi_encode_mpeg2_options[] = {
+    HW_BASE_ENCODE_COMMON_OPTIONS,
     VAAPI_ENCODE_COMMON_OPTIONS,
-    VAAPI_ENCODE_RC_OPTIONS,
+    HW_BASE_ENCODE_RC_OPTIONS,
 
     { "profile", "Set profile (in profile_and_level_indication)",
       OFFSET(profile), AV_OPT_TYPE_INT,
@@ -694,7 +699,7 @@  const FFCodec ff_mpeg2_vaapi_encoder = {
     .p.id           = AV_CODEC_ID_MPEG2VIDEO,
     .priv_data_size = sizeof(VAAPIEncodeMPEG2Context),
     .init           = &vaapi_encode_mpeg2_init,
-    FF_CODEC_RECEIVE_PACKET_CB(&ff_vaapi_encode_receive_packet),
+    FF_CODEC_RECEIVE_PACKET_CB(&ff_hw_base_encode_receive_packet),
     .close          = &vaapi_encode_mpeg2_close,
     .p.priv_class   = &vaapi_encode_mpeg2_class,
     .p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HARDWARE |
diff --git a/libavcodec/vaapi_encode_vp8.c b/libavcodec/vaapi_encode_vp8.c
index 8a557b967e..d1e63dfc91 100644
--- a/libavcodec/vaapi_encode_vp8.c
+++ b/libavcodec/vaapi_encode_vp8.c
@@ -52,6 +52,7 @@  typedef struct VAAPIEncodeVP8Context {
 
 static int vaapi_encode_vp8_init_sequence_params(AVCodecContext *avctx)
 {
+    HWBaseEncodeContext         *base_ctx = avctx->priv_data;
     VAAPIEncodeContext               *ctx = avctx->priv_data;
     VAEncSequenceParameterBufferVP8 *vseq = ctx->codec_sequence_params;
 
@@ -66,7 +67,7 @@  static int vaapi_encode_vp8_init_sequence_params(AVCodecContext *avctx)
 
     if (!(ctx->va_rc_mode & VA_RC_CQP)) {
         vseq->bits_per_second = ctx->va_bit_rate;
-        vseq->intra_period    = ctx->gop_size;
+        vseq->intra_period    = base_ctx->gop_size;
     }
 
     return 0;
@@ -75,6 +76,7 @@  static int vaapi_encode_vp8_init_sequence_params(AVCodecContext *avctx)
 static int vaapi_encode_vp8_init_picture_params(AVCodecContext *avctx,
                                                 VAAPIEncodePicture *pic)
 {
+    HWBaseEncodePicture        *base_pic = (HWBaseEncodePicture *)pic;
     VAAPIEncodeVP8Context          *priv = avctx->priv_data;
     VAEncPictureParameterBufferVP8 *vpic = pic->codec_picture_params;
     int i;
@@ -83,10 +85,10 @@  static int vaapi_encode_vp8_init_picture_params(AVCodecContext *avctx,
 
     vpic->coded_buf = pic->output_buffer;
 
-    switch (pic->type) {
+    switch (base_pic->type) {
     case PICTURE_TYPE_IDR:
     case PICTURE_TYPE_I:
-        av_assert0(pic->nb_refs[0] == 0 && pic->nb_refs[1] == 0);
+        av_assert0(base_pic->nb_refs[0] == 0 && base_pic->nb_refs[1] == 0);
         vpic->ref_flags.bits.force_kf = 1;
         vpic->ref_last_frame =
         vpic->ref_gf_frame   =
@@ -94,20 +96,20 @@  static int vaapi_encode_vp8_init_picture_params(AVCodecContext *avctx,
             VA_INVALID_SURFACE;
         break;
     case PICTURE_TYPE_P:
-        av_assert0(!pic->nb_refs[1]);
+        av_assert0(!base_pic->nb_refs[1]);
         vpic->ref_flags.bits.no_ref_last = 0;
         vpic->ref_flags.bits.no_ref_gf   = 1;
         vpic->ref_flags.bits.no_ref_arf  = 1;
         vpic->ref_last_frame =
         vpic->ref_gf_frame   =
         vpic->ref_arf_frame  =
-            pic->refs[0][0]->recon_surface;
+            ((VAAPIEncodePicture *)base_pic->refs[0][0])->recon_surface;
         break;
     default:
         av_assert0(0 && "invalid picture type");
     }
 
-    vpic->pic_flags.bits.frame_type = (pic->type != PICTURE_TYPE_IDR);
+    vpic->pic_flags.bits.frame_type = (base_pic->type != PICTURE_TYPE_IDR);
     vpic->pic_flags.bits.show_frame = 1;
 
     vpic->pic_flags.bits.refresh_last            = 1;
@@ -145,7 +147,7 @@  static int vaapi_encode_vp8_write_quant_table(AVCodecContext *avctx,
 
     memset(&quant, 0, sizeof(quant));
 
-    if (pic->type == PICTURE_TYPE_P)
+    if (pic->base.type == PICTURE_TYPE_P)
         q = priv->q_index_p;
     else
         q = priv->q_index_i;
@@ -161,10 +163,11 @@  static int vaapi_encode_vp8_write_quant_table(AVCodecContext *avctx,
 
 static av_cold int vaapi_encode_vp8_configure(AVCodecContext *avctx)
 {
-    VAAPIEncodeContext     *ctx = avctx->priv_data;
-    VAAPIEncodeVP8Context *priv = avctx->priv_data;
+    HWBaseEncodeContext *base_ctx = avctx->priv_data;
+    VAAPIEncodeContext      *ctx = avctx->priv_data;
+    VAAPIEncodeVP8Context  *priv = avctx->priv_data;
 
-    priv->q_index_p = av_clip(ctx->rc_quality, 0, VP8_MAX_QUANT);
+    priv->q_index_p = av_clip(base_ctx->rc_quality, 0, VP8_MAX_QUANT);
     if (avctx->i_quant_factor > 0.0)
         priv->q_index_i =
             av_clip((avctx->i_quant_factor * priv->q_index_p  +
@@ -216,8 +219,9 @@  static av_cold int vaapi_encode_vp8_init(AVCodecContext *avctx)
 #define OFFSET(x) offsetof(VAAPIEncodeVP8Context, x)
 #define FLAGS (AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM)
 static const AVOption vaapi_encode_vp8_options[] = {
+    HW_BASE_ENCODE_COMMON_OPTIONS,
     VAAPI_ENCODE_COMMON_OPTIONS,
-    VAAPI_ENCODE_RC_OPTIONS,
+    HW_BASE_ENCODE_RC_OPTIONS,
 
     { "loop_filter_level", "Loop filter level",
       OFFSET(loop_filter_level), AV_OPT_TYPE_INT, { .i64 = 16 }, 0, 63, FLAGS },
@@ -249,7 +253,7 @@  const FFCodec ff_vp8_vaapi_encoder = {
     .p.id           = AV_CODEC_ID_VP8,
     .priv_data_size = sizeof(VAAPIEncodeVP8Context),
     .init           = &vaapi_encode_vp8_init,
-    FF_CODEC_RECEIVE_PACKET_CB(&ff_vaapi_encode_receive_packet),
+    FF_CODEC_RECEIVE_PACKET_CB(&ff_hw_base_encode_receive_packet),
     .close          = &ff_vaapi_encode_close,
     .p.priv_class   = &vaapi_encode_vp8_class,
     .p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HARDWARE |
diff --git a/libavcodec/vaapi_encode_vp9.c b/libavcodec/vaapi_encode_vp9.c
index c2a8dec71b..85621d7471 100644
--- a/libavcodec/vaapi_encode_vp9.c
+++ b/libavcodec/vaapi_encode_vp9.c
@@ -53,6 +53,7 @@  typedef struct VAAPIEncodeVP9Context {
 
 static int vaapi_encode_vp9_init_sequence_params(AVCodecContext *avctx)
 {
+    HWBaseEncodeContext         *base_ctx = avctx->priv_data;
     VAAPIEncodeContext               *ctx = avctx->priv_data;
     VAEncSequenceParameterBufferVP9 *vseq = ctx->codec_sequence_params;
     VAEncPictureParameterBufferVP9  *vpic = ctx->codec_picture_params;
@@ -64,7 +65,7 @@  static int vaapi_encode_vp9_init_sequence_params(AVCodecContext *avctx)
 
     if (!(ctx->va_rc_mode & VA_RC_CQP)) {
         vseq->bits_per_second = ctx->va_bit_rate;
-        vseq->intra_period    = ctx->gop_size;
+        vseq->intra_period    = base_ctx->gop_size;
     }
 
     vpic->frame_width_src  = avctx->width;
@@ -78,9 +79,10 @@  static int vaapi_encode_vp9_init_sequence_params(AVCodecContext *avctx)
 static int vaapi_encode_vp9_init_picture_params(AVCodecContext *avctx,
                                                 VAAPIEncodePicture *pic)
 {
-    VAAPIEncodeContext              *ctx = avctx->priv_data;
+    HWBaseEncodeContext        *base_ctx = avctx->priv_data;
     VAAPIEncodeVP9Context          *priv = avctx->priv_data;
-    VAAPIEncodeVP9Picture          *hpic = pic->priv_data;
+    HWBaseEncodePicture        *base_pic = (HWBaseEncodePicture *)pic;
+    VAAPIEncodeVP9Picture          *hpic = base_pic->priv_data;
     VAEncPictureParameterBufferVP9 *vpic = pic->codec_picture_params;
     int i;
     int num_tile_columns;
@@ -94,20 +96,20 @@  static int vaapi_encode_vp9_init_picture_params(AVCodecContext *avctx,
     num_tile_columns = (vpic->frame_width_src + VP9_MAX_TILE_WIDTH - 1) / VP9_MAX_TILE_WIDTH;
     vpic->log2_tile_columns = num_tile_columns == 1 ? 0 : av_log2(num_tile_columns - 1) + 1;
 
-    switch (pic->type) {
+    switch (base_pic->type) {
     case PICTURE_TYPE_IDR:
-        av_assert0(pic->nb_refs[0] == 0 && pic->nb_refs[1] == 0);
+        av_assert0(base_pic->nb_refs[0] == 0 && base_pic->nb_refs[1] == 0);
         vpic->ref_flags.bits.force_kf = 1;
         vpic->refresh_frame_flags = 0xff;
         hpic->slot = 0;
         break;
     case PICTURE_TYPE_P:
-        av_assert0(!pic->nb_refs[1]);
+        av_assert0(!base_pic->nb_refs[1]);
         {
-            VAAPIEncodeVP9Picture *href = pic->refs[0][0]->priv_data;
+            VAAPIEncodeVP9Picture *href = base_pic->refs[0][0]->priv_data;
             av_assert0(href->slot == 0 || href->slot == 1);
 
-            if (ctx->max_b_depth > 0) {
+            if (base_ctx->max_b_depth > 0) {
                 hpic->slot = !href->slot;
                 vpic->refresh_frame_flags = 1 << hpic->slot | 0xfc;
             } else {
@@ -120,20 +122,20 @@  static int vaapi_encode_vp9_init_picture_params(AVCodecContext *avctx,
         }
         break;
     case PICTURE_TYPE_B:
-        av_assert0(pic->nb_refs[0] && pic->nb_refs[1]);
+        av_assert0(base_pic->nb_refs[0] && base_pic->nb_refs[1]);
         {
-            VAAPIEncodeVP9Picture *href0 = pic->refs[0][0]->priv_data,
-                                  *href1 = pic->refs[1][0]->priv_data;
-            av_assert0(href0->slot < pic->b_depth + 1 &&
-                       href1->slot < pic->b_depth + 1);
+            VAAPIEncodeVP9Picture *href0 = base_pic->refs[0][0]->priv_data,
+                                  *href1 = base_pic->refs[1][0]->priv_data;
+            av_assert0(href0->slot < base_pic->b_depth + 1 &&
+                       href1->slot < base_pic->b_depth + 1);
 
-            if (pic->b_depth == ctx->max_b_depth) {
+            if (base_pic->b_depth == base_ctx->max_b_depth) {
                 // Unreferenced frame.
                 vpic->refresh_frame_flags = 0x00;
                 hpic->slot = 8;
             } else {
-                vpic->refresh_frame_flags = 0xfe << pic->b_depth & 0xff;
-                hpic->slot = 1 + pic->b_depth;
+                vpic->refresh_frame_flags = 0xfe << base_pic->b_depth & 0xff;
+                hpic->slot = 1 + base_pic->b_depth;
             }
             vpic->ref_flags.bits.ref_frame_ctrl_l0  = 1;
             vpic->ref_flags.bits.ref_frame_ctrl_l1  = 2;
@@ -148,31 +150,31 @@  static int vaapi_encode_vp9_init_picture_params(AVCodecContext *avctx,
     }
     if (vpic->refresh_frame_flags == 0x00) {
         av_log(avctx, AV_LOG_DEBUG, "Pic %"PRId64" not stored.\n",
-               pic->display_order);
+               base_pic->display_order);
     } else {
         av_log(avctx, AV_LOG_DEBUG, "Pic %"PRId64" stored in slot %d.\n",
-               pic->display_order, hpic->slot);
+               base_pic->display_order, hpic->slot);
     }
 
     for (i = 0; i < FF_ARRAY_ELEMS(vpic->reference_frames); i++)
         vpic->reference_frames[i] = VA_INVALID_SURFACE;
 
     for (i = 0; i < MAX_REFERENCE_LIST_NUM; i++) {
-        for (int j = 0; j < pic->nb_refs[i]; j++) {
-            VAAPIEncodePicture *ref_pic = pic->refs[i][j];
+        for (int j = 0; j < base_pic->nb_refs[i]; j++) {
+            HWBaseEncodePicture *ref_pic = base_pic->refs[i][j];
             int slot;
             slot = ((VAAPIEncodeVP9Picture*)ref_pic->priv_data)->slot;
             av_assert0(vpic->reference_frames[slot] == VA_INVALID_SURFACE);
-            vpic->reference_frames[slot] = ref_pic->recon_surface;
+            vpic->reference_frames[slot] = ((VAAPIEncodePicture *)ref_pic)->recon_surface;
         }
     }
 
-    vpic->pic_flags.bits.frame_type = (pic->type != PICTURE_TYPE_IDR);
-    vpic->pic_flags.bits.show_frame = pic->display_order <= pic->encode_order;
+    vpic->pic_flags.bits.frame_type = (base_pic->type != PICTURE_TYPE_IDR);
+    vpic->pic_flags.bits.show_frame = base_pic->display_order <= base_pic->encode_order;
 
-    if (pic->type == PICTURE_TYPE_IDR)
+    if (base_pic->type == PICTURE_TYPE_IDR)
         vpic->luma_ac_qindex     = priv->q_idx_idr;
-    else if (pic->type == PICTURE_TYPE_P)
+    else if (base_pic->type == PICTURE_TYPE_P)
         vpic->luma_ac_qindex     = priv->q_idx_p;
     else
         vpic->luma_ac_qindex     = priv->q_idx_b;
@@ -188,22 +190,23 @@  static int vaapi_encode_vp9_init_picture_params(AVCodecContext *avctx,
 
 static av_cold int vaapi_encode_vp9_get_encoder_caps(AVCodecContext *avctx)
 {
-    VAAPIEncodeContext *ctx = avctx->priv_data;
+    HWBaseEncodeContext *base_ctx = avctx->priv_data;
 
     // Surfaces must be aligned to 64x64 superblock boundaries.
-    ctx->surface_width  = FFALIGN(avctx->width,  64);
-    ctx->surface_height = FFALIGN(avctx->height, 64);
+    base_ctx->surface_width  = FFALIGN(avctx->width,  64);
+    base_ctx->surface_height = FFALIGN(avctx->height, 64);
 
     return 0;
 }
 
 static av_cold int vaapi_encode_vp9_configure(AVCodecContext *avctx)
 {
-    VAAPIEncodeContext     *ctx = avctx->priv_data;
-    VAAPIEncodeVP9Context *priv = avctx->priv_data;
+    HWBaseEncodeContext *base_ctx = avctx->priv_data;
+    VAAPIEncodeContext       *ctx = avctx->priv_data;
+    VAAPIEncodeVP9Context   *priv = avctx->priv_data;
 
     if (ctx->rc_mode->quality) {
-        priv->q_idx_p = av_clip(ctx->rc_quality, 0, VP9_MAX_QUANT);
+        priv->q_idx_p = av_clip(base_ctx->rc_quality, 0, VP9_MAX_QUANT);
         if (avctx->i_quant_factor > 0.0)
             priv->q_idx_idr =
                 av_clip((avctx->i_quant_factor * priv->q_idx_p  +
@@ -273,8 +276,9 @@  static av_cold int vaapi_encode_vp9_init(AVCodecContext *avctx)
 #define OFFSET(x) offsetof(VAAPIEncodeVP9Context, x)
 #define FLAGS (AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM)
 static const AVOption vaapi_encode_vp9_options[] = {
+    HW_BASE_ENCODE_COMMON_OPTIONS,
     VAAPI_ENCODE_COMMON_OPTIONS,
-    VAAPI_ENCODE_RC_OPTIONS,
+    HW_BASE_ENCODE_RC_OPTIONS,
 
     { "loop_filter_level", "Loop filter level",
       OFFSET(loop_filter_level), AV_OPT_TYPE_INT, { .i64 = 16 }, 0, 63, FLAGS },
@@ -306,7 +310,7 @@  const FFCodec ff_vp9_vaapi_encoder = {
     .p.id           = AV_CODEC_ID_VP9,
     .priv_data_size = sizeof(VAAPIEncodeVP9Context),
     .init           = &vaapi_encode_vp9_init,
-    FF_CODEC_RECEIVE_PACKET_CB(&ff_vaapi_encode_receive_packet),
+    FF_CODEC_RECEIVE_PACKET_CB(&ff_hw_base_encode_receive_packet),
     .close          = &ff_vaapi_encode_close,
     .p.priv_class   = &vaapi_encode_vp9_class,
     .p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HARDWARE |