diff mbox series

[FFmpeg-devel,v2,57/69] avcodec/mpegvideo: Add proper MPVMainContext

Message ID AM7PR03MB6660C11DE7F30BDC78A54EB48F269@AM7PR03MB6660.eurprd03.prod.outlook.com
State New
Headers show
Series [FFmpeg-devel,v2,01/69] avcodec/avcodec: Avoid MpegEncContext in AVHWAccel.decode_mb | expand

Commit Message

Andreas Rheinhardt Feb. 1, 2022, 1:06 p.m. UTC
This will allow to remove the array of slice context pointers
from MPVContext (and therefore from the slice contexts).

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com>
---
 libavcodec/dnxhdenc.c          |  40 ++--
 libavcodec/dxva2_vc1.c         |  16 +-
 libavcodec/flvdec.c            |   3 +-
 libavcodec/flvenc.c            |   2 +-
 libavcodec/h261dec.c           |  33 ++--
 libavcodec/h261enc.c           |   4 +-
 libavcodec/h263dec.c           |  58 +++---
 libavcodec/intelh263dec.c      |   5 +-
 libavcodec/ituh263dec.c        |  11 +-
 libavcodec/ituh263enc.c        |   8 +-
 libavcodec/mjpegenc.c          |   9 +-
 libavcodec/mpeg12.c            |   3 +-
 libavcodec/mpeg12dec.c         |  98 ++++++----
 libavcodec/mpeg12enc.c         |  10 +-
 libavcodec/mpeg4video.c        |   2 +-
 libavcodec/mpeg4video.h        |   2 +-
 libavcodec/mpeg4video_parser.c |   6 +-
 libavcodec/mpeg4videodec.c     |  68 ++++---
 libavcodec/mpeg4videoenc.c     |  14 +-
 libavcodec/mpeg_er.c           |   3 +-
 libavcodec/mpegvideo.c         |  49 +++--
 libavcodec/mpegvideo.h         |   4 +-
 libavcodec/mpegvideo_dec.c     |  39 ++--
 libavcodec/mpegvideo_enc.c     |  37 ++--
 libavcodec/mpegvideoenc.h      |   2 +-
 libavcodec/msmpeg4.c           |   3 +-
 libavcodec/msmpeg4dec.c        |  13 +-
 libavcodec/msmpeg4enc.c        |   6 +-
 libavcodec/mss2.c              |  27 +--
 libavcodec/nvdec_mpeg4.c       |   2 +-
 libavcodec/nvdec_vc1.c         |   4 +-
 libavcodec/ratecontrol.c       |  22 +--
 libavcodec/rv10.c              |  32 ++--
 libavcodec/rv10enc.c           |   2 +-
 libavcodec/rv20enc.c           |   2 +-
 libavcodec/rv30.c              |  16 +-
 libavcodec/rv34.c              | 105 ++++++-----
 libavcodec/rv40.c              |  18 +-
 libavcodec/snow.c              |   2 +-
 libavcodec/snowenc.c           |  14 +-
 libavcodec/speedhqenc.c        |   4 +-
 libavcodec/svq1enc.c           |   6 +-
 libavcodec/vaapi_mpeg4.c       |   2 +-
 libavcodec/vaapi_vc1.c         |  50 ++---
 libavcodec/vc1.c               | 333 +++++++++++++++++----------------
 libavcodec/vc1_block.c         | 178 +++++++++---------
 libavcodec/vc1_loopfilter.c    |  28 +--
 libavcodec/vc1_mc.c            |  30 +--
 libavcodec/vc1_parser.c        |   8 +-
 libavcodec/vc1_pred.c          |  22 +--
 libavcodec/vc1dec.c            |  63 ++++---
 libavcodec/vdpau_mpeg4.c       |   2 +-
 libavcodec/vdpau_vc1.c         |   8 +-
 libavcodec/wmv2.c              |   3 +-
 libavcodec/wmv2dec.c           |  37 ++--
 libavcodec/wmv2enc.c           |   6 +-
 56 files changed, 838 insertions(+), 736 deletions(-)
diff mbox series

Patch

diff --git a/libavcodec/dnxhdenc.c b/libavcodec/dnxhdenc.c
index 9ad95ac20a..141ecdb5cf 100644
--- a/libavcodec/dnxhdenc.c
+++ b/libavcodec/dnxhdenc.c
@@ -267,7 +267,7 @@  static av_cold int dnxhd_init_qmat(DNXHDEncContext *ctx, int lbias, int cbias)
     int qscale, i;
     const uint8_t *luma_weight_table   = ctx->cid_table->luma_weight;
     const uint8_t *chroma_weight_table = ctx->cid_table->chroma_weight;
-    MPVEncContext *const s = &ctx->m.common;
+    MPVEncContext *const s = &ctx->m.common.s;
 
     if (!FF_ALLOCZ_TYPED_ARRAY(ctx->qmatrix_l,   s->avctx->qmax + 1) ||
         !FF_ALLOCZ_TYPED_ARRAY(ctx->qmatrix_c,   s->avctx->qmax + 1) ||
@@ -336,7 +336,7 @@  static av_cold int dnxhd_init_qmat(DNXHDEncContext *ctx, int lbias, int cbias)
 
 static av_cold int dnxhd_init_rc(DNXHDEncContext *ctx)
 {
-    MPVEncContext *const s = &ctx->m.common;
+    MPVEncContext *const s = &ctx->m.common.s;
     if (!FF_ALLOCZ_TYPED_ARRAY(ctx->mb_rc, (s->avctx->qmax + 1) * s->mb_num))
         return AVERROR(ENOMEM);
 
@@ -355,7 +355,7 @@  static av_cold int dnxhd_init_rc(DNXHDEncContext *ctx)
 static av_cold int dnxhd_encode_init(AVCodecContext *avctx)
 {
     DNXHDEncContext *ctx = avctx->priv_data;
-    MPVEncContext *const s = &ctx->m.common;
+    MPVEncContext *const s = &ctx->m.common.s;
     int i, ret;
 
     switch (avctx->pix_fmt) {
@@ -528,7 +528,7 @@  static av_cold int dnxhd_encode_init(AVCodecContext *avctx)
 static int dnxhd_write_header(AVCodecContext *avctx, uint8_t *buf)
 {
     DNXHDEncContext *ctx = avctx->priv_data;
-    MPVEncContext *const s = &ctx->m.common;
+    MPVEncContext *const s = &ctx->m.common.s;
 
     memset(buf, 0, ctx->data_offset);
 
@@ -571,7 +571,7 @@  static av_always_inline void dnxhd_encode_dc(DNXHDEncContext *ctx, int diff)
     } else {
         nbits = av_log2_16bit(2 * diff);
     }
-    put_bits(&ctx->m.common.pb, ctx->cid_table->dc_bits[nbits] + nbits,
+    put_bits(&ctx->m.common.s.pb, ctx->cid_table->dc_bits[nbits] + nbits,
              (ctx->cid_table->dc_codes[nbits] << nbits) +
              av_mod_uintp2(diff, nbits));
 }
@@ -580,7 +580,7 @@  static av_always_inline
 void dnxhd_encode_block(DNXHDEncContext *ctx, int16_t *block,
                         int last_index, int n)
 {
-    MPVEncContext *const s = &ctx->m.common;
+    MPVEncContext *const s = &ctx->m.common.s;
     int last_non_zero = 0;
     int slevel, i, j;
 
@@ -620,7 +620,7 @@  void dnxhd_unquantize_c(DNXHDEncContext *ctx, int16_t *block, int n,
     }
 
     for (i = 1; i <= last_index; i++) {
-        int j = ctx->m.common.intra_scantable.permutated[i];
+        int j = ctx->m.common.s.intra_scantable.permutated[i];
         level = block[j];
         if (level) {
             if (level < 0) {
@@ -668,7 +668,7 @@  int dnxhd_calc_ac_bits(DNXHDEncContext *ctx, int16_t *block, int last_index)
     int bits = 0;
     int i, j, level;
     for (i = 1; i <= last_index; i++) {
-        j = ctx->m.common.intra_scantable.permutated[i];
+        j = ctx->m.common.s.intra_scantable.permutated[i];
         level = block[j];
         if (level) {
             int run_level = i - last_non_zero - 1;
@@ -683,7 +683,7 @@  int dnxhd_calc_ac_bits(DNXHDEncContext *ctx, int16_t *block, int last_index)
 static av_always_inline
 void dnxhd_get_blocks(DNXHDEncContext *ctx, int mb_x, int mb_y)
 {
-    MPVEncContext *const s = &ctx->m.common;
+    MPVEncContext *const s = &ctx->m.common.s;
     const int bs = ctx->block_width_l2;
     const int bw = 1 << bs;
     int dct_y_offset = ctx->dct_y_offset;
@@ -827,7 +827,7 @@  static int dnxhd_calc_bits_thread(AVCodecContext *avctx, void *arg,
                                   int jobnr, int threadnr)
 {
     DNXHDEncContext *ctx = avctx->priv_data;
-    MPVEncContext *const s = &ctx->m.common;
+    MPVEncContext *const s = &ctx->m.common.s;
     int mb_y = jobnr, mb_x;
     int qscale = ctx->qscale;
     LOCAL_ALIGNED_16(int16_t, block, [64]);
@@ -885,7 +885,7 @@  static int dnxhd_encode_thread(AVCodecContext *avctx, void *arg,
                                int jobnr, int threadnr)
 {
     DNXHDEncContext *ctx = avctx->priv_data;
-    MPVEncContext *const s = &ctx->m.common;
+    MPVEncContext *const s = &ctx->m.common.s;
     int mb_y = jobnr, mb_x;
     ctx = ctx->thread[threadnr];
     init_put_bits(&s->pb, (uint8_t *)arg + ctx->data_offset + ctx->slice_offs[jobnr],
@@ -922,7 +922,7 @@  static int dnxhd_encode_thread(AVCodecContext *avctx, void *arg,
 
 static void dnxhd_setup_threads_slices(DNXHDEncContext *ctx)
 {
-    MPVEncContext *const s = &ctx->m.common;
+    MPVEncContext *const s = &ctx->m.common.s;
     int mb_y, mb_x;
     int offset = 0;
     for (mb_y = 0; mb_y < s->mb_height; mb_y++) {
@@ -944,7 +944,7 @@  static int dnxhd_mb_var_thread(AVCodecContext *avctx, void *arg,
                                int jobnr, int threadnr)
 {
     DNXHDEncContext *ctx = avctx->priv_data;
-    MPVEncContext *const s = &ctx->m.common;
+    MPVEncContext *const s = &ctx->m.common.s;
     int mb_y = jobnr, mb_x, x, y;
     int partial_last_row = (mb_y == s->mb_height - 1) &&
                            ((avctx->height >> ctx->interlaced) & 0xF);
@@ -1011,7 +1011,7 @@  static int dnxhd_mb_var_thread(AVCodecContext *avctx, void *arg,
 
 static int dnxhd_encode_rdo(AVCodecContext *avctx, DNXHDEncContext *ctx)
 {
-    MPVEncContext *const s = &ctx->m.common;
+    MPVEncContext *const s = &ctx->m.common.s;
     int lambda, up_step, down_step;
     int last_lower = INT_MAX, last_higher = 0;
     int x, y, q;
@@ -1089,7 +1089,7 @@  static int dnxhd_encode_rdo(AVCodecContext *avctx, DNXHDEncContext *ctx)
 
 static int dnxhd_find_qscale(DNXHDEncContext *ctx)
 {
-    MPVEncContext *const s = &ctx->m.common;
+    MPVEncContext *const s = &ctx->m.common.s;
     int bits = 0;
     int up_step = 1;
     int down_step = 1;
@@ -1202,7 +1202,7 @@  static void radix_sort(RCCMPEntry *data, RCCMPEntry *tmp, int size)
 
 static int dnxhd_encode_fast(AVCodecContext *avctx, DNXHDEncContext *ctx)
 {
-    MPVEncContext *const s = &ctx->m.common;
+    MPVEncContext *const s = &ctx->m.common.s;
     int max_bits = 0;
     int ret, x, y;
     if ((ret = dnxhd_find_qscale(ctx)) < 0)
@@ -1247,11 +1247,11 @@  static int dnxhd_encode_fast(AVCodecContext *avctx, DNXHDEncContext *ctx)
 
 static void dnxhd_load_picture(DNXHDEncContext *ctx, const AVFrame *frame)
 {
-    MPVEncContext *const s = &ctx->m.common;
+    MPVEncContext *const s = &ctx->m.common.s;
 
     for (int i = 0; i < s->avctx->thread_count; i++) {
-        ctx->thread[i]->m.common.linesize    = frame->linesize[0] << ctx->interlaced;
-        ctx->thread[i]->m.common.uvlinesize  = frame->linesize[1] << ctx->interlaced;
+        ctx->thread[i]->m.common.s.linesize   = frame->linesize[0] << ctx->interlaced;
+        ctx->thread[i]->m.common.s.uvlinesize = frame->linesize[1] << ctx->interlaced;
         ctx->thread[i]->dct_y_offset  = s->linesize  *8;
         ctx->thread[i]->dct_uv_offset = s->uvlinesize*8;
     }
@@ -1263,7 +1263,7 @@  static int dnxhd_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
                                 const AVFrame *frame, int *got_packet)
 {
     DNXHDEncContext *ctx = avctx->priv_data;
-    MPVEncContext *const s = &ctx->m.common;
+    MPVEncContext *const s = &ctx->m.common.s;
     int first_field = 1;
     int offset, i, ret;
     uint8_t *buf;
diff --git a/libavcodec/dxva2_vc1.c b/libavcodec/dxva2_vc1.c
index e86f3ed0f2..836367ffe8 100644
--- a/libavcodec/dxva2_vc1.c
+++ b/libavcodec/dxva2_vc1.c
@@ -41,7 +41,7 @@  static void fill_picture_parameters(AVCodecContext *avctx,
                                     AVDXVAContext *ctx, const VC1Context *v,
                                     DXVA_PictureParameters *pp)
 {
-    const MPVDecContext *const s = &v->s;
+    const MPVDecContext *const s = &v->s.s;
     const Picture *current_picture = s->current_picture_ptr;
     int intcomp = 0;
 
@@ -164,7 +164,7 @@  static void fill_slice(AVCodecContext *avctx, DXVA_SliceInfo *slice,
                        unsigned position, unsigned size)
 {
     const VC1Context *v = avctx->priv_data;
-    const MPVDecContext *const s = &v->s;
+    const MPVDecContext *const s = &v->s.s;
 
     memset(slice, 0, sizeof(*slice));
     slice->wHorizontalPosition = 0;
@@ -186,7 +186,7 @@  static int commit_bitstream_and_slice_buffer(AVCodecContext *avctx,
 {
     const VC1Context *v = avctx->priv_data;
     AVDXVAContext *ctx = DXVA_CONTEXT(avctx);
-    const MPVDecContext *const s = &v->s;
+    const MPVDecContext *const s = &v->s.s;
     struct dxva2_picture_context *ctx_pic = s->current_picture_ptr->hwaccel_picture_private;
 
     static const uint8_t start_code[] = { 0, 0, 1, 0x0d };
@@ -313,7 +313,7 @@  static int dxva2_vc1_start_frame(AVCodecContext *avctx,
 {
     const VC1Context *v = avctx->priv_data;
     AVDXVAContext *ctx = DXVA_CONTEXT(avctx);
-    struct dxva2_picture_context *ctx_pic = v->s.current_picture_ptr->hwaccel_picture_private;
+    struct dxva2_picture_context *ctx_pic = v->s.s.current_picture_ptr->hwaccel_picture_private;
 
     if (!DXVA_CONTEXT_VALID(avctx, ctx))
         return -1;
@@ -332,7 +332,7 @@  static int dxva2_vc1_decode_slice(AVCodecContext *avctx,
                                   uint32_t size)
 {
     const VC1Context *v = avctx->priv_data;
-    const Picture *current_picture = v->s.current_picture_ptr;
+    const Picture *current_picture = v->s.s.current_picture_ptr;
     struct dxva2_picture_context *ctx_pic = current_picture->hwaccel_picture_private;
     unsigned position;
 
@@ -360,18 +360,18 @@  static int dxva2_vc1_decode_slice(AVCodecContext *avctx,
 static int dxva2_vc1_end_frame(AVCodecContext *avctx)
 {
     VC1Context *v = avctx->priv_data;
-    struct dxva2_picture_context *ctx_pic = v->s.current_picture_ptr->hwaccel_picture_private;
+    struct dxva2_picture_context *ctx_pic = v->s.s.current_picture_ptr->hwaccel_picture_private;
     int ret;
 
     if (ctx_pic->slice_count <= 0 || ctx_pic->bitstream_size <= 0)
         return -1;
 
-    ret = ff_dxva2_common_end_frame(avctx, v->s.current_picture_ptr->f,
+    ret = ff_dxva2_common_end_frame(avctx, v->s.s.current_picture_ptr->f,
                                     &ctx_pic->pp, sizeof(ctx_pic->pp),
                                     NULL, 0,
                                     commit_bitstream_and_slice_buffer);
     if (!ret)
-        ff_mpeg_draw_horiz_band(&v->s, 0, avctx->height);
+        ff_mpeg_draw_horiz_band(&v->s.s, 0, avctx->height);
     return ret;
 }
 
diff --git a/libavcodec/flvdec.c b/libavcodec/flvdec.c
index bf4df242cc..469a8ceae9 100644
--- a/libavcodec/flvdec.c
+++ b/libavcodec/flvdec.c
@@ -26,8 +26,9 @@ 
 #include "mpegvideodata.h"
 #include "mpegvideodec.h"
 
-int ff_flv_decode_picture_header(MPVMainDecContext *s)
+int ff_flv_decode_picture_header(MPVMainDecContext *m)
 {
+    MPVDecContext *const s = &m->s;
     int format, width, height;
 
     /* picture header */
diff --git a/libavcodec/flvenc.c b/libavcodec/flvenc.c
index ba6bce029e..ad6fb9ebd3 100644
--- a/libavcodec/flvenc.c
+++ b/libavcodec/flvenc.c
@@ -25,7 +25,7 @@ 
 
 void ff_flv_encode_picture_header(MPVMainEncContext *m, int picture_number)
 {
-    MPVEncContext *const s = &m->common;
+    MPVEncContext *const s = &m->common.s;
     int format;
 
     align_put_bits(&s->pb);
diff --git a/libavcodec/h261dec.c b/libavcodec/h261dec.c
index ab534e9ff1..8113d2ff7d 100644
--- a/libavcodec/h261dec.c
+++ b/libavcodec/h261dec.c
@@ -82,11 +82,12 @@  static av_cold int h261_decode_init(AVCodecContext *avctx)
 {
     static AVOnce init_static_once = AV_ONCE_INIT;
     H261DecContext *const h = avctx->priv_data;
-    MPVMainDecContext *const s = &h->s;
+    MPVMainDecContext *const m = &h->s;
+    MPVDecContext *const s = &m->s;
 
     s->private_ctx = &h->common;
     // set defaults
-    ff_mpv_decode_init(s, avctx);
+    ff_mpv_decode_init(m, avctx);
 
     s->out_format  = FMT_H261;
     s->low_delay   = 1;
@@ -106,7 +107,7 @@  static av_cold int h261_decode_init(AVCodecContext *avctx)
  */
 static int h261_decode_gob_header(H261DecContext *h)
 {
-    MPVDecContext *const s = &h->s;
+    MPVDecContext *const s = &h->s.s;
     unsigned int val;
 
     if (!h->gob_start_code_skipped) {
@@ -160,7 +161,7 @@  static int h261_decode_gob_header(H261DecContext *h)
  */
 static int h261_resync(H261DecContext *h)
 {
-    MPVDecContext *const s = &h->s;
+    MPVDecContext *const s = &h->s.s;
     int left, ret;
 
     if (h->gob_start_code_skipped) {
@@ -201,7 +202,7 @@  static int h261_resync(H261DecContext *h)
  */
 static int h261_decode_mb_skipped(H261DecContext *h, int mba1, int mba2)
 {
-    MPVDecContext *const s = &h->s;
+    MPVDecContext *const s = &h->s.s;
     int i;
 
     s->mb_intra = 0;
@@ -271,7 +272,7 @@  static int decode_mv_component(GetBitContext *gb, int v)
  */
 static int h261_decode_block(H261DecContext *h, int16_t *block, int n, int coded)
 {
-    MPVDecContext *const s = &h->s;
+    MPVDecContext *const s = &h->s.s;
     int level, i, j, run;
     RLTable *rl = &ff_h261_rl_tcoeff;
     const uint8_t *scan_table;
@@ -363,7 +364,7 @@  static int h261_decode_block(H261DecContext *h, int16_t *block, int n, int coded
 
 static int h261_decode_mb(H261DecContext *h)
 {
-    MPVDecContext *const s = &h->s;
+    MPVDecContext *const s = &h->s.s;
     H261Context *const com = &h->common;
     int i, cbp, xy;
 
@@ -488,7 +489,7 @@  intra:
  */
 static int h261_decode_picture_header(H261DecContext *h)
 {
-    MPVDecContext *const s = &h->s;
+    MPVDecContext *const s = &h->s.s;
     int format, i;
     uint32_t startcode = 0;
 
@@ -552,7 +553,7 @@  static int h261_decode_picture_header(H261DecContext *h)
 
 static int h261_decode_gob(H261DecContext *h)
 {
-    MPVDecContext *const s = &h->s;
+    MPVDecContext *const s = &h->s.s;
 
     ff_set_qscale(s, s->qscale);
 
@@ -597,8 +598,8 @@  static int h261_decode_frame(AVCodecContext *avctx, void *data,
                              int *got_frame, AVPacket *avpkt)
 {
     H261DecContext *const h = avctx->priv_data;
-    MPVMainDecContext *const s2 = &h->s;
-    MPVDecContext *const s = s2;
+    MPVMainDecContext *const m = &h->s;
+    MPVDecContext *const s = &m->s;
     const uint8_t *buf = avpkt->data;
     int buf_size       = avpkt->size;
     int ret;
@@ -621,11 +622,11 @@  retry:
     }
 
     if (s->width != avctx->coded_width || s->height != avctx->coded_height) {
-        ff_mpv_common_end(s2);
+        ff_mpv_common_end(m);
     }
 
     if (!s->context_initialized) {
-        if ((ret = ff_mpv_common_init(s2)) < 0)
+        if ((ret = ff_mpv_common_init(m)) < 0)
             return ret;
 
         ret = ff_set_dimensions(avctx, s->width, s->height);
@@ -644,10 +645,10 @@  retry:
          avctx->skip_frame >= AVDISCARD_ALL)
         return get_consumed_bytes(s, buf_size);
 
-    if (ff_mpv_frame_start(s, avctx) < 0)
+    if (ff_mpv_frame_start(m, avctx) < 0)
         return -1;
 
-    ff_mpeg_er_frame_start(s);
+    ff_mpeg_er_frame_start(m);
 
     /* decode each macroblock */
     s->mb_x = 0;
@@ -658,7 +659,7 @@  retry:
             break;
         h261_decode_gob(h);
     }
-    ff_mpv_frame_end(s);
+    ff_mpv_frame_end(m);
 
     av_assert0(s->current_picture.f->pict_type == s->current_picture_ptr->f->pict_type);
     av_assert0(s->current_picture.f->pict_type == s->pict_type);
diff --git a/libavcodec/h261enc.c b/libavcodec/h261enc.c
index 9c8d9d1b08..0146ec9985 100644
--- a/libavcodec/h261enc.c
+++ b/libavcodec/h261enc.c
@@ -63,7 +63,7 @@  int ff_h261_get_picture_format(int width, int height)
 void ff_h261_encode_picture_header(MPVMainEncContext *m, int picture_number)
 {
     H261EncContext *const h = (H261EncContext *)m;
-    MPVEncContext  *const s = &m->common;
+    MPVEncContext  *const s = &m->common.s;
     int format, temp_ref;
 
     align_put_bits(&s->pb);
@@ -384,7 +384,7 @@  static av_cold void h261_encode_init_static(void)
 av_cold void ff_h261_encode_init(MPVMainEncContext *m)
 {
     H261EncContext *const h = (H261EncContext*)m;
-    MPVEncContext  *const s = &m->common;
+    MPVEncContext  *const s = &m->common.s;
     static AVOnce init_static_once = AV_ONCE_INIT;
 
     s->private_ctx = &h->common;
diff --git a/libavcodec/h263dec.c b/libavcodec/h263dec.c
index df78867735..20a1348d88 100644
--- a/libavcodec/h263dec.c
+++ b/libavcodec/h263dec.c
@@ -75,13 +75,14 @@  static enum AVPixelFormat h263_get_format(AVCodecContext *avctx)
 
 av_cold int ff_h263_decode_init(AVCodecContext *avctx)
 {
-    MPVMainDecContext *const s = avctx->priv_data;
+    MPVMainDecContext *const m = avctx->priv_data;
+    MPVDecContext *const s = &m->s;
     int ret;
 
     s->out_format      = FMT_H263;
 
     // set defaults
-    ff_mpv_decode_init(s, avctx);
+    ff_mpv_decode_init(m, avctx);
 
     s->quant_precision = 5;
     s->decode_mb       = ff_h263_decode_mb;
@@ -145,7 +146,7 @@  av_cold int ff_h263_decode_init(AVCodecContext *avctx)
         avctx->codec->id != AV_CODEC_ID_MPEG4) {
         avctx->pix_fmt = h263_get_format(avctx);
         ff_mpv_idct_init(s);
-        if ((ret = ff_mpv_common_init(s)) < 0)
+        if ((ret = ff_mpv_common_init(m)) < 0)
             return ret;
     }
 
@@ -167,8 +168,9 @@  av_cold int ff_h263_decode_end(AVCodecContext *avctx)
 /**
  * Return the number of bytes consumed for building the current frame.
  */
-static int get_consumed_bytes(MPVMainDecContext *s, int buf_size)
+static int get_consumed_bytes(MPVMainDecContext *m, int buf_size)
 {
+    MPVDecContext *const s = &m->s;
     int pos = (get_bits_count(&s->gb) + 7) >> 3;
 
     if (s->divx_packed || s->avctx->hwaccel) {
@@ -195,8 +197,9 @@  static int get_consumed_bytes(MPVMainDecContext *s, int buf_size)
     }
 }
 
-static int decode_slice(MPVMainDecContext *s)
+static int decode_slice(MPVMainDecContext *m)
 {
+    MPVDecContext *const s = &m->s;
     const int part_mask = s->partitioned_frame
                           ? (ER_AC_END | ER_AC_ERROR) : 0x7F;
     const int mb_size   = 16 >> s->avctx->lowres;
@@ -427,7 +430,8 @@  static int decode_slice(MPVMainDecContext *s)
 int ff_h263_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
                          AVPacket *avpkt)
 {
-    MPVMainDecContext *const s  = avctx->priv_data;
+    MPVMainDecContext *const m = avctx->priv_data;
+    MPVDecContext *const s = &m->s;
     const uint8_t *buf = avpkt->data;
     int buf_size       = avpkt->size;
     int ret;
@@ -500,9 +504,9 @@  retry:
 
     /* let's go :-) */
     if (CONFIG_WMV2_DECODER && s->msmpeg4_version == 5) {
-        ret = ff_wmv2_decode_picture_header(s);
+        ret = ff_wmv2_decode_picture_header(m);
     } else if (CONFIG_MSMPEG4_DECODER && s->msmpeg4_version) {
-        ret = ff_msmpeg4_decode_picture_header(s);
+        ret = ff_msmpeg4_decode_picture_header(m);
     } else if (CONFIG_MPEG4_DECODER && avctx->codec_id == AV_CODEC_ID_MPEG4) {
         if (s->avctx->extradata_size && s->picture_number == 0) {
             GetBitContext gb;
@@ -512,11 +516,11 @@  retry:
         }
         ret = ff_mpeg4_decode_picture_header(avctx->priv_data, &s->gb, 0, 0);
     } else if (CONFIG_H263I_DECODER && s->codec_id == AV_CODEC_ID_H263I) {
-        ret = ff_intel_h263_decode_picture_header(s);
+        ret = ff_intel_h263_decode_picture_header(m);
     } else if (CONFIG_FLV_DECODER && s->h263_flv) {
-        ret = ff_flv_decode_picture_header(s);
+        ret = ff_flv_decode_picture_header(m);
     } else {
-        ret = ff_h263_decode_picture_header(s);
+        ret = ff_h263_decode_picture_header(m);
     }
 
     if (ret < 0 || ret == FRAME_SKIPPED) {
@@ -528,7 +532,7 @@  retry:
         }
     }
     if (ret == FRAME_SKIPPED)
-        return get_consumed_bytes(s, buf_size);
+        return get_consumed_bytes(m, buf_size);
 
     /* skip if the header was thrashed */
     if (ret < 0) {
@@ -538,7 +542,7 @@  retry:
 
     if (!s->context_initialized) {
         avctx->pix_fmt = h263_get_format(avctx);
-        if ((ret = ff_mpv_common_init(s)) < 0)
+        if ((ret = ff_mpv_common_init(m)) < 0)
             return ret;
     }
 
@@ -574,7 +578,7 @@  retry:
 
         ff_set_sar(avctx, avctx->sample_aspect_ratio);
 
-        if ((ret = ff_mpv_common_frame_size_change(s)))
+        if ((ret = ff_mpv_common_frame_size_change(m)))
             return ret;
 
         if (avctx->pix_fmt != h263_get_format(avctx)) {
@@ -596,13 +600,13 @@  retry:
     /* skip B-frames if we don't have reference frames */
     if (!s->last_picture_ptr &&
         (s->pict_type == AV_PICTURE_TYPE_B || s->droppable))
-        return get_consumed_bytes(s, buf_size);
+        return get_consumed_bytes(m, buf_size);
     if ((avctx->skip_frame >= AVDISCARD_NONREF &&
          s->pict_type == AV_PICTURE_TYPE_B)    ||
         (avctx->skip_frame >= AVDISCARD_NONKEY &&
          s->pict_type != AV_PICTURE_TYPE_I)    ||
         avctx->skip_frame >= AVDISCARD_ALL)
-        return get_consumed_bytes(s, buf_size);
+        return get_consumed_bytes(m, buf_size);
 
     if ((!s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
         s->me.qpel_put = s->qdsp.put_qpel_pixels_tab;
@@ -612,7 +616,7 @@  retry:
         s->me.qpel_avg = s->qdsp.avg_qpel_pixels_tab;
     }
 
-    if ((ret = ff_mpv_frame_start(s, avctx)) < 0)
+    if ((ret = ff_mpv_frame_start(m, avctx)) < 0)
         return ret;
 
     if (!s->divx_packed && !avctx->hwaccel)
@@ -625,13 +629,13 @@  retry:
             return ret;
     }
 
-    ff_mpeg_er_frame_start(s);
+    ff_mpeg_er_frame_start(m);
 
     /* the second part of the wmv2 header contains the MB skip bits which
      * are stored in current_picture->mb_type which is not available before
      * ff_mpv_frame_start() */
     if (CONFIG_WMV2_DECODER && s->msmpeg4_version == 5) {
-        ret = ff_wmv2_decode_secondary_picture_header(s);
+        ret = ff_wmv2_decode_secondary_picture_header(m);
         if (ret < 0)
             return ret;
         if (ret == 1)
@@ -642,7 +646,7 @@  retry:
     s->mb_x = 0;
     s->mb_y = 0;
 
-    slice_ret = decode_slice(s);
+    slice_ret = decode_slice(m);
     while (s->mb_y < s->mb_height) {
         if (s->msmpeg4_version) {
             if (s->slice_height == 0 || s->mb_x != 0 || slice_ret < 0 ||
@@ -650,7 +654,7 @@  retry:
                 break;
         } else {
             int prev_x = s->mb_x, prev_y = s->mb_y;
-            if (ff_h263_resync(s) < 0)
+            if (ff_h263_resync(m) < 0)
                 break;
             if (prev_y * s->mb_width + prev_x < s->mb_y * s->mb_width + s->mb_x)
                 s->er.error_occurred = 1;
@@ -659,14 +663,14 @@  retry:
         if (s->msmpeg4_version < 4 && s->h263_pred)
             ff_mpeg4_clean_buffers(s);
 
-        if (decode_slice(s) < 0)
+        if (decode_slice(m) < 0)
             slice_ret = AVERROR_INVALIDDATA;
     }
 
     if (s->msmpeg4_version && s->msmpeg4_version < 4 &&
         s->pict_type == AV_PICTURE_TYPE_I)
         if (!CONFIG_MSMPEG4_DECODER ||
-            ff_msmpeg4_decode_ext_header(s, buf_size) < 0)
+            ff_msmpeg4_decode_ext_header(m, buf_size) < 0)
             s->er.error_status_table[s->mb_num - 1] = ER_MB_ERROR;
 
     av_assert1(s->bitstream_buffer_size == 0);
@@ -680,7 +684,7 @@  frame_end:
             return ret;
     }
 
-    ff_mpv_frame_end(s);
+    ff_mpv_frame_end(m);
 
     if (CONFIG_MPEG4_DECODER && avctx->codec_id == AV_CODEC_ID_MPEG4)
         ff_mpeg4_frame_end(avctx, buf, buf_size);
@@ -694,12 +698,12 @@  frame_end:
         if ((ret = av_frame_ref(pict, s->current_picture_ptr->f)) < 0)
             return ret;
         ff_print_debug_info(s, s->current_picture_ptr, pict);
-        ff_mpv_export_qp_table(s, pict, s->current_picture_ptr, FF_QSCALE_TYPE_MPEG1);
+        ff_mpv_export_qp_table(m, pict, s->current_picture_ptr, FF_QSCALE_TYPE_MPEG1);
     } else if (s->last_picture_ptr) {
         if ((ret = av_frame_ref(pict, s->last_picture_ptr->f)) < 0)
             return ret;
         ff_print_debug_info(s, s->last_picture_ptr, pict);
-        ff_mpv_export_qp_table(s, pict, s->last_picture_ptr, FF_QSCALE_TYPE_MPEG1);
+        ff_mpv_export_qp_table(m, pict, s->last_picture_ptr, FF_QSCALE_TYPE_MPEG1);
     }
 
     if (s->last_picture_ptr || s->low_delay) {
@@ -724,7 +728,7 @@  frame_end:
     if (slice_ret < 0 && (avctx->err_recognition & AV_EF_EXPLODE))
         return slice_ret;
     else
-        return get_consumed_bytes(s, buf_size);
+        return get_consumed_bytes(m, buf_size);
 }
 
 const enum AVPixelFormat ff_h263_hwaccel_pixfmt_list_420[] = {
diff --git a/libavcodec/intelh263dec.c b/libavcodec/intelh263dec.c
index 45b66033c9..e26d45ec20 100644
--- a/libavcodec/intelh263dec.c
+++ b/libavcodec/intelh263dec.c
@@ -26,8 +26,9 @@ 
 #include "mpegvideodata.h"
 
 /* don't understand why they choose a different header ! */
-int ff_intel_h263_decode_picture_header(MPVMainDecContext *s)
+int ff_intel_h263_decode_picture_header(MPVMainDecContext *m)
 {
+    MPVDecContext *const s = &m->s;
     int format;
 
     if (get_bits_left(&s->gb) == 64) { /* special dummy frames */
@@ -124,7 +125,7 @@  int ff_intel_h263_decode_picture_header(MPVMainDecContext *s)
     s->y_dc_scale_table=
     s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
 
-    ff_h263_show_pict_info(s);
+    ff_h263_show_pict_info(m);
 
     return 0;
 }
diff --git a/libavcodec/ituh263dec.c b/libavcodec/ituh263dec.c
index 17d01c2d76..9cfdac63d6 100644
--- a/libavcodec/ituh263dec.c
+++ b/libavcodec/ituh263dec.c
@@ -73,8 +73,9 @@  static const int h263_mb_type_b_map[15]= {
     MB_TYPE_INTRA4x4                | MB_TYPE_CBP | MB_TYPE_QUANT,
 };
 
-void ff_h263_show_pict_info(MPVMainDecContext *s)
+void ff_h263_show_pict_info(MPVMainDecContext *m)
 {
+    MPVDecContext *const s = &m->s;
     if(s->avctx->debug&FF_DEBUG_PICT_INFO){
     av_log(s->avctx, AV_LOG_DEBUG, "qp:%d %c size:%d rnd:%d%s%s%s%s%s%s%s%s%s %d/%d\n",
          s->qscale, av_get_picture_type_char(s->pict_type),
@@ -210,8 +211,9 @@  static int h263_decode_gob_header(MPVDecContext *s)
  * Decode the group of blocks / video packet header / slice header (MPEG-4 Studio).
  * @return bit position of the resync_marker, or <0 if none was found
  */
-int ff_h263_resync(MPVMainDecContext *s)
+int ff_h263_resync(MPVMainDecContext *m)
 {
+    MPVDecContext *const s = &m->s;
     int left, pos, ret;
 
     /* In MPEG-4 studio mode look for a new slice startcode
@@ -1086,8 +1088,9 @@  end:
 }
 
 /* Most is hardcoded; should extend to handle all H.263 streams. */
-int ff_h263_decode_picture_header(MPVMainDecContext *s)
+int ff_h263_decode_picture_header(MPVMainDecContext *m)
 {
+    MPVDecContext *const s = &m->s;
     int format, width, height, i, ret;
     uint32_t startcode;
 
@@ -1366,7 +1369,7 @@  int ff_h263_decode_picture_header(MPVMainDecContext *s)
         s->c_dc_scale_table= ff_mpeg1_dc_scale_table;
     }
 
-        ff_h263_show_pict_info(s);
+    ff_h263_show_pict_info(m);
     if (s->pict_type == AV_PICTURE_TYPE_I && s->codec_tag == AV_RL32("ZYGO") && get_bits_left(&s->gb) >= 85 + 13*3*16 + 50){
         int i,j;
         for(i=0; i<85; i++) av_log(s->avctx, AV_LOG_DEBUG, "%d", get_bits1(&s->gb));
diff --git a/libavcodec/ituh263enc.c b/libavcodec/ituh263enc.c
index 56f9f91d19..a2f895dfc0 100644
--- a/libavcodec/ituh263enc.c
+++ b/libavcodec/ituh263enc.c
@@ -104,7 +104,7 @@  av_const int ff_h263_aspect_to_info(AVRational aspect){
 
 void ff_h263_encode_picture_header(MPVMainEncContext *m, int picture_number)
 {
-    MPVEncContext *const s = &m->common;
+    MPVEncContext *const s = &m->common.s;
     int format, coded_frame_rate, coded_frame_rate_base, i, temp_ref;
     int best_clock_code=1;
     int best_divisor=60;
@@ -269,7 +269,7 @@  void ff_h263_encode_gob_header(MPVEncContext *s, int mb_line)
  */
 void ff_clean_h263_qscales(MPVMainEncContext *m)
 {
-    MPVEncContext *const s = &m->common;
+    MPVEncContext *const s = &m->common.s;
     int i;
     int8_t * const qscale_table = s->current_picture.qscale_table;
 
@@ -815,7 +815,7 @@  static av_cold void h263_encode_init_static(void)
 
 av_cold void ff_h263_encode_init(MPVMainEncContext *m)
 {
-    MPVEncContext *const s = &m->common;
+    MPVEncContext *const s = &m->common.s;
     static AVOnce init_static_once = AV_ONCE_INIT;
 
     s->me.mv_penalty= mv_penalty; // FIXME exact table for MSMPEG4 & H.263+
@@ -880,7 +880,7 @@  void ff_h263_encode_mba(MPVEncContext *s)
     put_bits(&s->pb, ff_mba_length[i], mb_pos);
 }
 
-#define OFFSET(x) offsetof(MPVMainEncContext, common.x)
+#define OFFSET(x) offsetof(MPVMainEncContext, common.s.x)
 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
 static const AVOption h263_options[] = {
     { "obmc",         "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
diff --git a/libavcodec/mjpegenc.c b/libavcodec/mjpegenc.c
index 360653c5a1..faf59566b1 100644
--- a/libavcodec/mjpegenc.c
+++ b/libavcodec/mjpegenc.c
@@ -77,7 +77,8 @@  static av_cold void init_uni_ac_vlc(const uint8_t huff_size_ac[256],
 
 static void mjpeg_encode_picture_header(MPVMainEncContext *m)
 {
-    MPVEncContext *const s = &m->common;
+    MPVMainContext *const m2 = &m->common;
+    MPVEncContext *const s = &m2->s;
     ff_mjpeg_encode_picture_header(s->avctx, &s->pb, s->mjpeg_ctx,
                                    &s->intra_scantable, 0,
                                    s->intra_matrix, s->chroma_intra_matrix,
@@ -91,7 +92,7 @@  static void mjpeg_encode_picture_header(MPVMainEncContext *m)
 void ff_mjpeg_amv_encode_picture_header(MPVMainEncContext *main)
 {
     MJPEGEncContext *const m = (MJPEGEncContext*)main;
-    av_assert2(main->common.mjpeg_ctx == &m->mjpeg);
+    av_assert2(main->common.s.mjpeg_ctx == &m->mjpeg);
     /* s->huffman == HUFFMAN_TABLE_OPTIMAL can only be true for MJPEG. */
     if (!CONFIG_MJPEG_ENCODER || m->mjpeg.huffman != HUFFMAN_TABLE_OPTIMAL)
         mjpeg_encode_picture_header(main);
@@ -105,7 +106,7 @@  void ff_mjpeg_amv_encode_picture_header(MPVMainEncContext *main)
  */
 static void mjpeg_encode_picture_frame(MPVMainEncContext *main)
 {
-    MPVEncContext *const s = &main->common;
+    MPVEncContext *const s = &main->common.s;
     int nbits, code, table_id;
     MJpegContext *m = s->mjpeg_ctx;
     uint8_t  *huff_size[4] = { m->huff_size_dc_luminance,
@@ -296,7 +297,7 @@  static int alloc_huffman(MPVEncContext *s)
 av_cold int ff_mjpeg_encode_init(MPVMainEncContext *main)
 {
     MJpegContext *const m = &((MJPEGEncContext*)main)->mjpeg;
-    MPVEncContext *const s = &main->common;
+    MPVEncContext *const s = &main->common.s;
     int ret, use_slices;
 
     s->mjpeg_ctx = m;
diff --git a/libavcodec/mpeg12.c b/libavcodec/mpeg12.c
index 7934c7ea2a..dc1ea8287e 100644
--- a/libavcodec/mpeg12.c
+++ b/libavcodec/mpeg12.c
@@ -100,8 +100,9 @@  av_cold void ff_init_2d_vlc_rl(RLTable *rl, unsigned static_size, int flags)
     }
 }
 
-av_cold void ff_mpeg12_common_init(MPVMainContext *s)
+av_cold void ff_mpeg12_common_init(MPVMainContext *m)
 {
+    MPVContext *const s = &m->s;
 
     s->y_dc_scale_table =
     s->c_dc_scale_table = ff_mpeg2_dc_scale_table[s->intra_dc_precision];
diff --git a/libavcodec/mpeg12dec.c b/libavcodec/mpeg12dec.c
index d3c32818cf..ef2fa80aba 100644
--- a/libavcodec/mpeg12dec.c
+++ b/libavcodec/mpeg12dec.c
@@ -1056,6 +1056,7 @@  static av_cold int mpeg_decode_init(AVCodecContext *avctx)
 {
     Mpeg1Context *s    = avctx->priv_data;
     MPVMainDecContext *const s2 = &s->mpeg_ctx;
+    MPVDecContext *const s3 = &s2->s;
 
     if (   avctx->codec_tag != AV_RL32("VCR2")
         && avctx->codec_tag != AV_RL32("BW10"))
@@ -1064,11 +1065,11 @@  static av_cold int mpeg_decode_init(AVCodecContext *avctx)
 
     /* we need some permutation to store matrices,
      * until the decoder sets the real permutation. */
-    ff_mpv_idct_init(s2);
+    ff_mpv_idct_init(s3);
     ff_mpeg12_common_init(&s->mpeg_ctx);
     ff_mpeg12_init_vlcs();
 
-    s2->chroma_format              = 1;
+    s3->chroma_format              = 1;
     s->mpeg_ctx_allocated = 0;
     s->repeat_field                = 0;
     avctx->color_range             = AVCOL_RANGE_MPEG;
@@ -1086,7 +1087,7 @@  static int mpeg_decode_update_thread_context(AVCodecContext *avctx,
 
     if (avctx == avctx_from               ||
         !ctx_from->mpeg_ctx_allocated ||
-        !s1->context_initialized)
+        !s1->s.context_initialized)
         return 0;
 
     err = ff_mpeg_update_thread_context(avctx, avctx_from);
@@ -1166,7 +1167,8 @@  static const enum AVPixelFormat mpeg12_pixfmt_list_444[] = {
 static enum AVPixelFormat mpeg_get_pixelformat(AVCodecContext *avctx)
 {
     Mpeg1Context *s1  = avctx->priv_data;
-    MPVMainDecContext *const s = &s1->mpeg_ctx;
+    MPVMainDecContext *const m = &s1->mpeg_ctx;
+    MPVDecContext *const s = &m->s;
     const enum AVPixelFormat *pix_fmts;
 
     if (CONFIG_GRAY && (avctx->flags & AV_CODEC_FLAG_GRAY))
@@ -1195,7 +1197,7 @@  static void setup_hwaccel_for_pixfmt(AVCodecContext *avctx)
         Mpeg1Context *s1 = avctx->priv_data;
         MPVMainDecContext *const s = &s1->mpeg_ctx;
 
-        s->pack_pblocks = 1;
+        s->s.pack_pblocks = 1;
     }
 }
 
@@ -1204,7 +1206,8 @@  static void setup_hwaccel_for_pixfmt(AVCodecContext *avctx)
 static int mpeg_decode_postinit(AVCodecContext *avctx)
 {
     Mpeg1Context *s1  = avctx->priv_data;
-    MPVMainDecContext *const s = &s1->mpeg_ctx;
+    MPVMainDecContext *const m = &s1->mpeg_ctx;
+    MPVDecContext *const s = &m->s;
     uint8_t old_permutation[64];
     int ret;
 
@@ -1271,7 +1274,7 @@  static int mpeg_decode_postinit(AVCodecContext *avctx)
 #if FF_API_FLAG_TRUNCATED
             ParseContext pc = s->parse_context;
             s->parse_context.buffer = 0;
-            ff_mpv_common_end(s);
+            ff_mpv_common_end(m);
             s->parse_context = pc;
 #else
             ff_mpv_common_end(s);
@@ -1329,7 +1332,7 @@  static int mpeg_decode_postinit(AVCodecContext *avctx)
         memcpy(old_permutation, s->idsp.idct_permutation, 64 * sizeof(uint8_t));
 
         ff_mpv_idct_init(s);
-        if ((ret = ff_mpv_common_init(s)) < 0)
+        if ((ret = ff_mpv_common_init(m)) < 0)
             return ret;
 
         quant_matrix_rebuild(s->intra_matrix,        old_permutation, s->idsp.idct_permutation);
@@ -1346,7 +1349,8 @@  static int mpeg1_decode_picture(AVCodecContext *avctx, const uint8_t *buf,
                                 int buf_size)
 {
     Mpeg1Context *s1  = avctx->priv_data;
-    MPVMainDecContext *const s = &s1->mpeg_ctx;
+    MPVMainDecContext *const m = &s1->mpeg_ctx;
+    MPVDecContext *const s = &m->s;
     int ref, f_code, vbv_delay, ret;
 
     ret = init_get_bits8(&s->gb, buf, buf_size);
@@ -1393,7 +1397,8 @@  static int mpeg1_decode_picture(AVCodecContext *avctx, const uint8_t *buf,
 
 static void mpeg_decode_sequence_extension(Mpeg1Context *s1)
 {
-    MPVMainDecContext *const s = &s1->mpeg_ctx;
+    MPVMainDecContext *const m = &s1->mpeg_ctx;
+    MPVDecContext *const s = &m->s;
     int horiz_size_ext, vert_size_ext;
     int bit_rate_ext;
     AVCPBProperties *cpb_props;
@@ -1443,7 +1448,8 @@  static void mpeg_decode_sequence_extension(Mpeg1Context *s1)
 
 static void mpeg_decode_sequence_display_extension(Mpeg1Context *s1)
 {
-    MPVMainDecContext *const s = &s1->mpeg_ctx;
+    MPVMainDecContext *const m = &s1->mpeg_ctx;
+    MPVDecContext *const s = &m->s;
     int color_description, w, h;
 
     skip_bits(&s->gb, 3); /* video format */
@@ -1467,7 +1473,8 @@  static void mpeg_decode_sequence_display_extension(Mpeg1Context *s1)
 
 static void mpeg_decode_picture_display_extension(Mpeg1Context *s1)
 {
-    MPVMainDecContext *const s = &s1->mpeg_ctx;
+    MPVMainDecContext *const m = &s1->mpeg_ctx;
+    MPVDecContext *const s = &m->s;
     int i, nofco;
 
     nofco = 1;
@@ -1499,7 +1506,7 @@  static void mpeg_decode_picture_display_extension(Mpeg1Context *s1)
                s1->pan_scan.position[2][0], s1->pan_scan.position[2][1]);
 }
 
-static int load_matrix(MPVMainDecContext *s, uint16_t matrix0[64],
+static int load_matrix(MPVDecContext *s, uint16_t matrix0[64],
                        uint16_t matrix1[64], int intra)
 {
     int i;
@@ -1522,7 +1529,7 @@  static int load_matrix(MPVMainDecContext *s, uint16_t matrix0[64],
     return 0;
 }
 
-static void mpeg_decode_quant_matrix_extension(MPVMainDecContext *s)
+static void mpeg_decode_quant_matrix_extension(MPVDecContext *s)
 {
     ff_dlog(s->avctx, "matrix extension\n");
 
@@ -1538,7 +1545,8 @@  static void mpeg_decode_quant_matrix_extension(MPVMainDecContext *s)
 
 static int mpeg_decode_picture_coding_extension(Mpeg1Context *s1)
 {
-    MPVMainDecContext *const s = &s1->mpeg_ctx;
+    MPVMainDecContext *const m = &s1->mpeg_ctx;
+    MPVDecContext *const s = &m->s;
 
     s->full_pel[0]       = s->full_pel[1] = 0;
     s->mpeg_f_code[0][0] = get_bits(&s->gb, 4);
@@ -1599,10 +1607,11 @@  static int mpeg_decode_picture_coding_extension(Mpeg1Context *s1)
     return 0;
 }
 
-static int mpeg_field_start(MPVMainDecContext *s, const uint8_t *buf, int buf_size)
+static int mpeg_field_start(Mpeg1Context *s1, const uint8_t *buf, int buf_size)
 {
+    MPVMainDecContext *const m = &s1->mpeg_ctx;
+    MPVDecContext *const s = &m->s;
     AVCodecContext *avctx = s->avctx;
-    Mpeg1Context *s1      = (Mpeg1Context *) s;
     int ret;
 
     if (!(avctx->flags2 & AV_CODEC_FLAG2_CHUNKS)) {
@@ -1614,10 +1623,10 @@  static int mpeg_field_start(MPVMainDecContext *s, const uint8_t *buf, int buf_si
     if (s->first_field || s->picture_structure == PICT_FRAME) {
         AVFrameSideData *pan_scan;
 
-        if ((ret = ff_mpv_frame_start(s, avctx)) < 0)
+        if ((ret = ff_mpv_frame_start(m, avctx)) < 0)
             return ret;
 
-        ff_mpeg_er_frame_start(s);
+        ff_mpeg_er_frame_start(m);
 
         /* first check if we must repeat the frame */
         s->current_picture_ptr->f->repeat_pict = 0;
@@ -2044,7 +2053,8 @@  static int slice_decode_thread(AVCodecContext *c, void *arg)
 static int slice_end(AVCodecContext *avctx, AVFrame *pict)
 {
     Mpeg1Context *s1  = avctx->priv_data;
-    MPVMainDecContext *const s = &s1->mpeg_ctx;
+    MPVMainDecContext *const m = &s1->mpeg_ctx;
+    MPVDecContext *const s = &m->s;
 
     if (!s1->mpeg_ctx_allocated || !s->current_picture_ptr)
         return 0;
@@ -2064,14 +2074,14 @@  static int slice_end(AVCodecContext *avctx, AVFrame *pict)
 
         ff_er_frame_end(&s->er);
 
-        ff_mpv_frame_end(s);
+        ff_mpv_frame_end(m);
 
         if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
             int ret = av_frame_ref(pict, s->current_picture_ptr->f);
             if (ret < 0)
                 return ret;
             ff_print_debug_info(s, s->current_picture_ptr, pict);
-            ff_mpv_export_qp_table(s, pict, s->current_picture_ptr, FF_QSCALE_TYPE_MPEG2);
+            ff_mpv_export_qp_table(m, pict, s->current_picture_ptr, FF_QSCALE_TYPE_MPEG2);
         } else {
             /* latency of 1 frame for I- and P-frames */
             if (s->last_picture_ptr) {
@@ -2079,7 +2089,7 @@  static int slice_end(AVCodecContext *avctx, AVFrame *pict)
                 if (ret < 0)
                     return ret;
                 ff_print_debug_info(s, s->last_picture_ptr, pict);
-                ff_mpv_export_qp_table(s, pict, s->last_picture_ptr, FF_QSCALE_TYPE_MPEG2);
+                ff_mpv_export_qp_table(m, pict, s->last_picture_ptr, FF_QSCALE_TYPE_MPEG2);
             }
         }
 
@@ -2093,7 +2103,8 @@  static int mpeg1_decode_sequence(AVCodecContext *avctx,
                                  const uint8_t *buf, int buf_size)
 {
     Mpeg1Context *s1  = avctx->priv_data;
-    MPVMainDecContext *const s = &s1->mpeg_ctx;
+    MPVMainDecContext *const m = &s1->mpeg_ctx;
+    MPVDecContext *const s = &m->s;
     int width, height;
     int i, v, j;
 
@@ -2181,13 +2192,14 @@  static int mpeg1_decode_sequence(AVCodecContext *avctx,
 static int vcr2_init_sequence(AVCodecContext *avctx)
 {
     Mpeg1Context *s1  = avctx->priv_data;
-    MPVMainDecContext *const s = &s1->mpeg_ctx;
+    MPVMainDecContext *const m = &s1->mpeg_ctx;
+    MPVDecContext *const s = &m->s;
     int i, v, ret;
 
     /* start new MPEG-1 context decoding */
     s->out_format = FMT_MPEG1;
     if (s1->mpeg_ctx_allocated) {
-        ff_mpv_common_end(s);
+        ff_mpv_common_end(m);
         s1->mpeg_ctx_allocated = 0;
     }
     s->width            = avctx->coded_width;
@@ -2199,7 +2211,7 @@  static int vcr2_init_sequence(AVCodecContext *avctx)
     setup_hwaccel_for_pixfmt(avctx);
 
     ff_mpv_idct_init(s);
-    if ((ret = ff_mpv_common_init(s)) < 0)
+    if ((ret = ff_mpv_common_init(m)) < 0)
         return ret;
     s1->mpeg_ctx_allocated = 1;
 
@@ -2292,7 +2304,7 @@  static int mpeg_decode_a53_cc(AVCodecContext *avctx,
                         cap[0] = cap[1] = cap[2] = 0x00;
                     } else {
                         field = (field == 2 ? 1 : 0);
-                        if (!s1->mpeg_ctx.top_field_first) field = !field;
+                        if (!s1->mpeg_ctx.s.top_field_first) field = !field;
                         cap[0] = 0x04 | field;
                         cap[1] = ff_reverse[cc1];
                         cap[2] = ff_reverse[cc2];
@@ -2441,7 +2453,8 @@  static void mpeg_decode_gop(AVCodecContext *avctx,
                             const uint8_t *buf, int buf_size)
 {
     Mpeg1Context *s1  = avctx->priv_data;
-    MPVMainDecContext *const s = &s1->mpeg_ctx;
+    MPVMainDecContext *const m = &s1->mpeg_ctx;
+    MPVDecContext *const s = &m->s;
     int broken_link;
     int64_t tc;
 
@@ -2468,7 +2481,9 @@  static int decode_chunks(AVCodecContext *avctx, AVFrame *picture,
                          int *got_output, const uint8_t *buf, int buf_size)
 {
     Mpeg1Context *s = avctx->priv_data;
-    MPVMainDecContext *const s2 = &s->mpeg_ctx;
+    MPVMainDecContext *const m = &s->mpeg_ctx;
+    MPVMainContext *const m2 = m;
+    MPVDecContext *const s2 = &m2->s;
     const uint8_t *buf_ptr = buf;
     const uint8_t *buf_end = buf + buf_size;
     int ret, input_size;
@@ -2742,7 +2757,7 @@  static int decode_chunks(AVCodecContext *avctx, AVFrame *picture,
                 if (s->first_slice) {
                     skip_frame     = 0;
                     s->first_slice = 0;
-                    if ((ret = mpeg_field_start(s2, buf, buf_size)) < 0)
+                    if ((ret = mpeg_field_start(s, buf, buf_size)) < 0)
                         return ret;
                 }
                 if (!s2->current_picture_ptr) {
@@ -2803,8 +2818,9 @@  static int mpeg_decode_frame(AVCodecContext *avctx, void *data,
     int ret;
     int buf_size = avpkt->size;
     Mpeg1Context *s = avctx->priv_data;
+    MPVMainDecContext *const m = &s->mpeg_ctx;
+    MPVDecContext *const s2 = &m->s;
     AVFrame *picture = data;
-    MPVMainDecContext *const s2 = &s->mpeg_ctx;
 
     if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == SEQ_END_CODE)) {
         /* special case for last picture */
@@ -3013,7 +3029,8 @@  static int ipu_decode_frame(AVCodecContext *avctx, void *data,
                             int *got_frame, AVPacket *avpkt)
 {
     IPUContext *s = avctx->priv_data;
-    MPVMainDecContext *const m = &s->m;
+    MPVMainDecContext *const m2 = &s->m;
+    MPVDecContext *const m = &m2->s;
     GetBitContext *gb = &m->gb;
     AVFrame * const frame = data;
     int ret;
@@ -3114,26 +3131,27 @@  static av_cold int ipu_decode_init(AVCodecContext *avctx)
 {
     IPUContext *s = avctx->priv_data;
     MPVMainDecContext *const m = &s->m;
+    MPVDecContext *const s2 = &m->s;
 
     avctx->pix_fmt = AV_PIX_FMT_YUV420P;
 
     ff_mpv_decode_init(m, avctx);
-    ff_mpv_idct_init(m);
+    ff_mpv_idct_init(s2);
     ff_mpeg12_common_init(m);
     ff_mpeg12_init_vlcs();
 
     for (int i = 0; i < 64; i++) {
-        int j = m->idsp.idct_permutation[i];
+        int j = s2->idsp.idct_permutation[i];
         int v = ff_mpeg1_default_intra_matrix[i];
-        m->intra_matrix[j]        = v;
-        m->chroma_intra_matrix[j] = v;
+        s2->intra_matrix[j]        = v;
+        s2->chroma_intra_matrix[j] = v;
     }
 
     for (int i = 0; i < 64; i++) {
-        int j = m->idsp.idct_permutation[i];
+        int j = s2->idsp.idct_permutation[i];
         int v = ff_mpeg1_default_non_intra_matrix[i];
-        m->inter_matrix[j]        = v;
-        m->chroma_inter_matrix[j] = v;
+        s2->inter_matrix[j]        = v;
+        s2->chroma_inter_matrix[j] = v;
     }
 
     return 0;
diff --git a/libavcodec/mpeg12enc.c b/libavcodec/mpeg12enc.c
index a0cbd5d8de..4b885669bc 100644
--- a/libavcodec/mpeg12enc.c
+++ b/libavcodec/mpeg12enc.c
@@ -123,7 +123,7 @@  av_cold void ff_mpeg1_init_uni_ac_vlc(const RLTable *rl, uint8_t *uni_ac_vlc_len
 static int find_frame_rate_index(MPEG12EncContext *mpeg12)
 {
     MPVMainEncContext *const m = &mpeg12->mpeg;
-    MPVEncContext     *const s = &m->common;
+    MPVEncContext     *const s = &m->common.s;
     int i;
     AVRational bestq = (AVRational) {0, 0};
     AVRational ext;
@@ -165,7 +165,7 @@  static av_cold int encode_init(AVCodecContext *avctx)
 {
     MPEG12EncContext *const mpeg12 = avctx->priv_data;
     MPVMainEncContext *const m = &mpeg12->mpeg;
-    MPVEncContext     *const s = &m->common;
+    MPVEncContext     *const s = &m->common.s;
     int ret;
     int max_size = avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 16383 : 4095;
 
@@ -268,7 +268,7 @@  static void put_header(MPVEncContext *s, int header)
 static void mpeg1_encode_sequence_header(MPVMainEncContext *m)
 {
     MPEG12EncContext *const mpeg12 = (MPEG12EncContext*)m;
-    MPVEncContext    *const      s = &m->common;
+    MPVEncContext    *const      s = &m->common.s;
     unsigned int vbv_buffer_size, fps, v;
     int constraint_parameter_flag;
     AVRational framerate = ff_mpeg12_frame_rate_tab[mpeg12->frame_rate_index];
@@ -457,7 +457,7 @@  void ff_mpeg1_encode_slice_header(MPVEncContext *s)
 void ff_mpeg1_encode_picture_header(MPVMainEncContext *m, int picture_number)
 {
     MPEG12EncContext *const mpeg12 = (MPEG12EncContext*)m;
-    MPVEncContext    *const      s = &m->common;
+    MPVEncContext    *const      s = &m->common.s;
     AVFrameSideData *side_data;
     mpeg1_encode_sequence_header(m);
 
@@ -1132,7 +1132,7 @@  static av_cold void mpeg12_encode_init_static(void)
 
 av_cold void ff_mpeg1_encode_init(MPVMainEncContext *m)
 {
-    MPVEncContext *const s = &m->common;
+    MPVEncContext *const s = &m->common.s;
     static AVOnce init_static_once = AV_ONCE_INIT;
 
     ff_mpeg12_common_init(&m->common);
diff --git a/libavcodec/mpeg4video.c b/libavcodec/mpeg4video.c
index 68be7e21ed..6a5b368a17 100644
--- a/libavcodec/mpeg4video.c
+++ b/libavcodec/mpeg4video.c
@@ -80,7 +80,7 @@  void ff_mpeg4_clean_buffers(MPVContext *s)
 #define tab_bias (tab_size / 2)
 
 // used by MPEG-4 and rv10 decoder
-void ff_mpeg4_init_direct_mv(MPVMainContext *s)
+void ff_mpeg4_init_direct_mv(MPVContext *s)
 {
     int i;
     for (i = 0; i < tab_size; i++) {
diff --git a/libavcodec/mpeg4video.h b/libavcodec/mpeg4video.h
index 4af66be5ad..2a7e106a7a 100644
--- a/libavcodec/mpeg4video.h
+++ b/libavcodec/mpeg4video.h
@@ -71,7 +71,7 @@ 
 
 void ff_mpeg4_clean_buffers(MPVContext *s);
 int ff_mpeg4_get_video_packet_prefix_length(MPVContext *s);
-void ff_mpeg4_init_direct_mv(MPVMainContext *m);
+void ff_mpeg4_init_direct_mv(MPVContext *s);
 
 /**
  * @return the mb_type
diff --git a/libavcodec/mpeg4video_parser.c b/libavcodec/mpeg4video_parser.c
index 81e5376a28..adc2409953 100644
--- a/libavcodec/mpeg4video_parser.c
+++ b/libavcodec/mpeg4video_parser.c
@@ -92,7 +92,7 @@  static int mpeg4_decode_header(AVCodecParserContext *s1, AVCodecContext *avctx,
 {
     struct Mp4vParseContext *pc = s1->priv_data;
     Mpeg4DecContext *dec_ctx = &pc->dec_ctx;
-    MPVDecContext *const s = &dec_ctx->m;
+    MPVDecContext *const s = &dec_ctx->m.s;
     GetBitContext gb1, *gb = &gb1;
     int ret;
 
@@ -131,8 +131,8 @@  static av_cold int mpeg4video_parse_init(AVCodecParserContext *s)
     struct Mp4vParseContext *pc = s->priv_data;
 
     pc->first_picture           = 1;
-    pc->dec_ctx.m.quant_precision     = 5;
-    pc->dec_ctx.m.slice_context_count = 1;
+    pc->dec_ctx.m.s.quant_precision     = 5;
+    pc->dec_ctx.m.s.slice_context_count = 1;
     pc->dec_ctx.showed_packed_warning = 1;
     return 0;
 }
diff --git a/libavcodec/mpeg4videodec.c b/libavcodec/mpeg4videodec.c
index 63fc3fa8fc..6e3981b523 100644
--- a/libavcodec/mpeg4videodec.c
+++ b/libavcodec/mpeg4videodec.c
@@ -195,7 +195,7 @@  void ff_mpeg4_pred_ac(MPVDecContext *s, int16_t *block, int n, int dir)
  */
 static inline int mpeg4_is_resync(Mpeg4DecContext *ctx)
 {
-    MPVDecContext *const s = &ctx->m;
+    MPVDecContext *const s = &ctx->m.s;
     int bits_count = get_bits_count(&s->gb);
     int v          = show_bits(&s->gb, 16);
 
@@ -245,7 +245,7 @@  static inline int mpeg4_is_resync(Mpeg4DecContext *ctx)
 
 static int mpeg4_decode_sprite_trajectory(Mpeg4DecContext *ctx, GetBitContext *gb)
 {
-    MPVDecContext *const s = &ctx->m;
+    MPVDecContext *const s = &ctx->m.s;
     int a     = 2 << s->sprite_warping_accuracy;
     int rho   = 3  - s->sprite_warping_accuracy;
     int r     = 16 / a;
@@ -497,7 +497,7 @@  overflow:
 }
 
 static int decode_new_pred(Mpeg4DecContext *ctx, GetBitContext *gb) {
-    MPVDecContext *const s = &ctx->m;
+    MPVDecContext *const s = &ctx->m.s;
     int len = FFMIN(ctx->time_increment_bits + 3, 15);
 
     get_bits(gb, len);
@@ -514,7 +514,7 @@  static int decode_new_pred(Mpeg4DecContext *ctx, GetBitContext *gb) {
  */
 int ff_mpeg4_decode_video_packet_header(Mpeg4DecContext *ctx)
 {
-    MPVDecContext *const s = &ctx->m;
+    MPVDecContext *const s = &ctx->m.s;
 
     int mb_num_bits      = av_log2(s->mb_num - 1) + 1;
     int header_extension = 0, mb_num, len;
@@ -615,7 +615,7 @@  static void reset_studio_dc_predictors(MPVDecContext *s)
  */
 int ff_mpeg4_decode_studio_slice_header(Mpeg4DecContext *ctx)
 {
-    MPVDecContext *const s = &ctx->m;
+    MPVDecContext *const s = &ctx->m.s;
     GetBitContext *gb = &s->gb;
     unsigned vlc_len;
     uint16_t mb_num;
@@ -657,7 +657,7 @@  int ff_mpeg4_decode_studio_slice_header(Mpeg4DecContext *ctx)
  */
 static inline int get_amv(Mpeg4DecContext *ctx, int n)
 {
-    MPVDecContext *const s = &ctx->m;
+    MPVDecContext *const s = &ctx->m.s;
     int x, y, mb_v, sum, dx, dy, shift;
     int len     = 1 << (s->f_code + 4);
     const int a = s->sprite_warping_accuracy;
@@ -757,7 +757,7 @@  static inline int mpeg4_decode_dc(MPVDecContext *s, int n, int *dir_ptr)
  */
 static int mpeg4_decode_partition_a(Mpeg4DecContext *ctx)
 {
-    MPVDecContext *const s = &ctx->m;
+    MPVDecContext *const s = &ctx->m.s;
     int mb_num = 0;
     static const int8_t quant_tab[4] = { -1, -2, 1, 2 };
 
@@ -1044,7 +1044,7 @@  static int mpeg4_decode_partition_b(MPVDecContext *s, int mb_count)
  */
 int ff_mpeg4_decode_partitions(Mpeg4DecContext *ctx)
 {
-    MPVDecContext *const s = &ctx->m;
+    MPVDecContext *const s = &ctx->m.s;
     int mb_num;
     int ret;
     const int part_a_error = s->pict_type == AV_PICTURE_TYPE_I ? (ER_DC_ERROR | ER_MV_ERROR) : ER_MV_ERROR;
@@ -1111,7 +1111,7 @@  static inline int mpeg4_decode_block(Mpeg4DecContext *ctx, int16_t *block,
                                      int n, int coded, int intra,
                                      int use_intra_dc_vlc, int rvlc)
 {
-    MPVDecContext *const s = &ctx->m;
+    MPVDecContext *const s = &ctx->m.s;
     int level, i, last, run, qmul, qadd;
     int av_uninit(dc_pred_dir);
     RLTable *rl;
@@ -2133,8 +2133,9 @@  static int mpeg4_decode_studio_mb(MPVDecContext *s, int16_t block_[12][64])
     return SLICE_OK;
 }
 
-static int mpeg4_decode_gop_header(MPVMainDecContext *s, GetBitContext *gb)
+static int mpeg4_decode_gop_header(MPVMainDecContext *m, GetBitContext *gb)
 {
+    MPVDecContext *const s = &m->s;
     int hours, minutes, seconds;
 
     if (!show_bits(gb, 23)) {
@@ -2169,8 +2170,9 @@  static int mpeg4_decode_profile_level(GetBitContext *gb, int *profile, int *leve
     return 0;
 }
 
-static int mpeg4_decode_visual_object(MPVMainDecContext *s, GetBitContext *gb)
+static int mpeg4_decode_visual_object(MPVMainDecContext *m, GetBitContext *gb)
 {
+    MPVDecContext *const s = &m->s;
     int visual_object_type;
     int is_visual_object_identifier = get_bits1(gb);
 
@@ -2286,7 +2288,8 @@  static void extension_and_user_data(MPVDecContext *s, GetBitContext *gb, int id)
 
 static int decode_studio_vol_header(Mpeg4DecContext *ctx, GetBitContext *gb)
 {
-    MPVMainDecContext *const s = &ctx->m;
+    MPVMainDecContext *const m = &ctx->m;
+    MPVDecContext *const s = &m->s;
     int width, height, aspect_ratio_info;
     int bits_per_raw_sample;
     int rgb, chroma_format;
@@ -2372,7 +2375,8 @@  static int decode_studio_vol_header(Mpeg4DecContext *ctx, GetBitContext *gb)
 
 static int decode_vol_header(Mpeg4DecContext *ctx, GetBitContext *gb)
 {
-    MPVMainDecContext *const s = &ctx->m;
+    MPVMainDecContext *const m = &ctx->m;
+    MPVDecContext *const s = &m->s;
     int width, height, vo_ver_id, aspect_ratio_info;
 
     /* vol header */
@@ -2747,7 +2751,7 @@  no_cplx_est:
  */
 static int decode_user_data(Mpeg4DecContext *ctx, GetBitContext *gb)
 {
-    MPVDecContext *const s = &ctx->m;
+    MPVDecContext *const s = &ctx->m.s;
     char buf[256];
     int i;
     int e;
@@ -2805,7 +2809,8 @@  static int decode_user_data(Mpeg4DecContext *ctx, GetBitContext *gb)
 int ff_mpeg4_workaround_bugs(AVCodecContext *avctx)
 {
     Mpeg4DecContext *ctx = avctx->priv_data;
-    MPVMainDecContext *const s = &ctx->m;
+    MPVMainDecContext *const m = &ctx->m;
+    MPVDecContext *const s = &m->s;
 
     if (ctx->xvid_build == -1 && ctx->divx_version == -1 && ctx->lavc_build == -1) {
         if (s->codec_tag        == AV_RL32("XVID") ||
@@ -2923,7 +2928,8 @@  int ff_mpeg4_workaround_bugs(AVCodecContext *avctx)
 static int decode_vop_header(Mpeg4DecContext *ctx, GetBitContext *gb,
                              int parse_only)
 {
-    MPVMainDecContext *const s = &ctx->m;
+    MPVMainDecContext *const m = &ctx->m;
+    MPVDecContext *const s = &m->s;
     int time_incr, time_increment;
     int64_t pts;
 
@@ -3205,7 +3211,7 @@  end:
 
 static void decode_smpte_tc(Mpeg4DecContext *ctx, GetBitContext *gb)
 {
-    MPVDecContext *const s = &ctx->m;
+    MPVDecContext *const s = &ctx->m.s;
 
     skip_bits(gb, 16); /* Time_code[63..48] */
     check_marker(s->avctx, gb, "after Time_code[63..48]");
@@ -3224,7 +3230,8 @@  static void decode_smpte_tc(Mpeg4DecContext *ctx, GetBitContext *gb)
  */
 static int decode_studio_vop_header(Mpeg4DecContext *ctx, GetBitContext *gb)
 {
-    MPVMainDecContext *const s = &ctx->m;
+    MPVMainDecContext *const m = &ctx->m;
+    MPVDecContext *const s = &m->s;
 
     if (get_bits_left(gb) <= 32)
         return 0;
@@ -3279,7 +3286,7 @@  static int decode_studio_vop_header(Mpeg4DecContext *ctx, GetBitContext *gb)
 
 static int decode_studiovisualobject(Mpeg4DecContext *ctx, GetBitContext *gb)
 {
-    MPVDecContext *const s = &ctx->m;
+    MPVDecContext *const s = &ctx->m.s;
     int visual_object_type;
 
     skip_bits(gb, 4); /* visual_object_verid */
@@ -3308,7 +3315,8 @@  static int decode_studiovisualobject(Mpeg4DecContext *ctx, GetBitContext *gb)
 int ff_mpeg4_decode_picture_header(Mpeg4DecContext *ctx, GetBitContext *gb,
                                    int header, int parse_only)
 {
-    MPVMainDecContext *const s = &ctx->m;
+    MPVMainDecContext *const m = &ctx->m;
+    MPVDecContext *const s = &m->s;
     unsigned startcode, v;
     int ret;
     int vol = 0;
@@ -3419,7 +3427,7 @@  int ff_mpeg4_decode_picture_header(Mpeg4DecContext *ctx, GetBitContext *gb,
         } else if (startcode == USER_DATA_STARTCODE) {
             decode_user_data(ctx, gb);
         } else if (startcode == GOP_STARTCODE) {
-            mpeg4_decode_gop_header(s, gb);
+            mpeg4_decode_gop_header(m, gb);
         } else if (startcode == VOS_STARTCODE) {
             int profile, level;
             mpeg4_decode_profile_level(gb, &profile, &level);
@@ -3439,7 +3447,7 @@  int ff_mpeg4_decode_picture_header(Mpeg4DecContext *ctx, GetBitContext *gb,
                 if ((ret = decode_studiovisualobject(ctx, gb)) < 0)
                     return ret;
             } else
-                mpeg4_decode_visual_object(s, gb);
+                mpeg4_decode_visual_object(m, gb);
         } else if (startcode == VOP_STARTCODE) {
             break;
         }
@@ -3466,7 +3474,8 @@  end:
 int ff_mpeg4_frame_end(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
 {
     Mpeg4DecContext *ctx = avctx->priv_data;
-    MPVMainDecContext *const s = &ctx->m;
+    MPVMainDecContext *const m = &ctx->m;
+    MPVDecContext *const s = &m->s;
 
     /* divx 5.01+ bitstream reorder stuff */
     /* Since this clobbers the input buffer and hwaccel codecs still need the
@@ -3518,7 +3527,7 @@  static int mpeg4_update_thread_context(AVCodecContext *dst,
 {
     Mpeg4DecContext *s = dst->priv_data;
     const Mpeg4DecContext *s1 = src->priv_data;
-    int init = s->m.context_initialized;
+    int init = s->m.s.context_initialized;
 
     int ret = ff_mpeg_update_thread_context(dst, src);
 
@@ -3554,7 +3563,7 @@  static int mpeg4_update_thread_context(AVCodecContext *dst,
     memcpy(s->sprite_traj,  s1->sprite_traj,  sizeof(s1->sprite_traj));
 
     if (CONFIG_MPEG4_DECODER && !init && s1->xvid_build >= 0)
-        ff_xvid_idct_init(&s->m.idsp, dst);
+        ff_xvid_idct_init(&s->m.s.idsp, dst);
 
     return 0;
 }
@@ -3565,8 +3574,8 @@  static int mpeg4_update_thread_context_for_user(AVCodecContext *dst,
     MPVMainDecContext *const m = dst->priv_data;
     const MPVMainDecContext *const m1 = src->priv_data;
 
-    m->quarter_sample = m1->quarter_sample;
-    m->divx_packed    = m1->divx_packed;
+    m->s.quarter_sample = m1->s.quarter_sample;
+    m->s.divx_packed    = m1->s.divx_packed;
 
     return 0;
 }
@@ -3623,7 +3632,8 @@  static av_cold int decode_init(AVCodecContext *avctx)
 {
     static AVOnce init_static_once = AV_ONCE_INIT;
     Mpeg4DecContext *ctx = avctx->priv_data;
-    MPVMainDecContext *const s = &ctx->m;
+    MPVMainDecContext *const m = &ctx->m;
+    MPVDecContext *const s = &m->s;
     int ret;
 
     ctx->divx_version =
@@ -3646,7 +3656,7 @@  static av_cold int decode_init(AVCodecContext *avctx)
     return 0;
 }
 
-#define OFFSET(x) offsetof(MPVMainDecContext, x)
+#define OFFSET(x) offsetof(MPVMainDecContext, s.x)
 #define FLAGS AV_OPT_FLAG_EXPORT | AV_OPT_FLAG_READONLY
 static const AVOption mpeg4_options[] = {
     {"quarter_sample", "1/4 subpel MC", OFFSET(quarter_sample), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS},
diff --git a/libavcodec/mpeg4videoenc.c b/libavcodec/mpeg4videoenc.c
index a8c94c9d28..7ad93f9e4c 100644
--- a/libavcodec/mpeg4videoenc.c
+++ b/libavcodec/mpeg4videoenc.c
@@ -218,7 +218,7 @@  static inline int decide_ac_pred(MPVEncContext *s, int16_t block[6][64],
  */
 void ff_clean_mpeg4_qscales(MPVMainEncContext *m)
 {
-    MPVEncContext *const s = &m->common;
+    MPVEncContext *const s = &m->common.s;
     int i;
     int8_t *const qscale_table = s->current_picture.qscale_table;
 
@@ -871,7 +871,7 @@  void ff_mpeg4_stuffing(PutBitContext *pbc)
 /* must be called before writing the header */
 void ff_set_mpeg4_time(MPVMainEncContext *m)
 {
-    MPVEncContext *const s = &m->common;
+    MPVEncContext *const s = &m->common.s;
     if (s->pict_type == AV_PICTURE_TYPE_B) {
         ff_mpeg4_init_direct_mv(s);
     } else {
@@ -882,7 +882,7 @@  void ff_set_mpeg4_time(MPVMainEncContext *m)
 
 static void mpeg4_encode_gop_header(MPVMainEncContext *m)
 {
-    MPVEncContext *const s = &m->common;
+    MPVEncContext *const s = &m->common.s;
     int64_t hours, minutes, seconds;
     int64_t time;
 
@@ -913,7 +913,7 @@  static void mpeg4_encode_gop_header(MPVMainEncContext *m)
 
 static void mpeg4_encode_visual_object_header(MPVMainEncContext *m)
 {
-    MPVEncContext *const s = &m->common;
+    MPVEncContext *const s = &m->common.s;
     int profile_and_level_indication;
     int vo_ver_id;
 
@@ -960,7 +960,7 @@  static void mpeg4_encode_vol_header(MPVMainEncContext *m,
                                     int vo_number,
                                     int vol_number)
 {
-    MPVEncContext *const s = &m->common;
+    MPVEncContext *const s = &m->common.s;
     int vo_ver_id, vo_type, aspect_ratio_info;
 
     if (s->max_b_frames || s->quarter_sample) {
@@ -1060,7 +1060,7 @@  static void mpeg4_encode_vol_header(MPVMainEncContext *m,
 /* write MPEG-4 VOP header */
 int ff_mpeg4_encode_picture_header(MPVMainEncContext *m, int picture_number)
 {
-    MPVEncContext *const s = &m->common;
+    MPVEncContext *const s = &m->common.s;
     uint64_t time_incr;
     int64_t time_div, time_mod;
 
@@ -1283,7 +1283,7 @@  static av_cold int encode_init(AVCodecContext *avctx)
 {
     static AVOnce init_static_once = AV_ONCE_INIT;
     MPVMainEncContext *const m = avctx->priv_data;
-    MPVEncContext     *const s = &m->common;
+    MPVEncContext     *const s = &m->common.s;
     int ret;
 
     if (avctx->width >= (1<<13) || avctx->height >= (1<<13)) {
diff --git a/libavcodec/mpeg_er.c b/libavcodec/mpeg_er.c
index d2968b448b..b788a2f8fd 100644
--- a/libavcodec/mpeg_er.c
+++ b/libavcodec/mpeg_er.c
@@ -43,8 +43,9 @@  static void set_erpic(ERPicture *dst, Picture *src)
     dst->field_picture = src->field_picture;
 }
 
-void ff_mpeg_er_frame_start(MPVMainDecContext *s)
+void ff_mpeg_er_frame_start(MPVMainDecContext *m)
 {
+    MPVDecContext *const s = &m->s;
     ERContext *er = &s->er;
 
     set_erpic(&er->cur_pic,  s->current_picture_ptr);
diff --git a/libavcodec/mpegvideo.c b/libavcodec/mpegvideo.c
index b71e12c3d9..072a82e6b3 100644
--- a/libavcodec/mpegvideo.c
+++ b/libavcodec/mpegvideo.c
@@ -277,8 +277,10 @@  static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
 }
 
 /* init common dct for both encoder and decoder */
-static av_cold int dct_init(MPVMainContext *s)
+static av_cold int dct_init(MPVMainContext *m)
 {
+    MPVContext *const s = &m->s;
+
     ff_blockdsp_init(&s->bdsp, s->avctx);
     ff_h264chroma_init(&s->h264chroma, 8); //for lowres
     ff_hpeldsp_init(&s->hdsp, s->avctx->flags);
@@ -389,8 +391,9 @@  static int init_duplicate_context(MPVContext *s)
     return 0;
 }
 
-int ff_mpv_init_duplicate_contexts(MPVMainContext *s)
+int ff_mpv_init_duplicate_contexts(MPVMainContext *m)
 {
+    MPVContext *const s = &m->s;
     int nb_slices = s->slice_context_count, ret;
 
     /* We initialize the copies before the original so that
@@ -433,8 +436,10 @@  static void free_duplicate_context(MPVContext *s)
     s->block = NULL;
 }
 
-static void free_duplicate_contexts(MPVMainContext *s)
+static void free_duplicate_contexts(MPVMainContext *m)
 {
+    MPVContext *const s = &m->s;
+
     for (int i = 1; i < s->slice_context_count; i++) {
         free_duplicate_context(s->thread_context[i]);
         av_freep(&s->thread_context[i]);
@@ -500,8 +505,10 @@  int ff_update_duplicate_context(MPVContext *dst, const MPVContext *src)
  * The changed fields will not depend upon the
  * prior state of the MPVMainContext.
  */
-void ff_mpv_common_defaults(MPVMainContext *s)
+void ff_mpv_common_defaults(MPVMainContext *m)
 {
+    MPVContext *const s = &m->s;
+
     s->y_dc_scale_table      =
     s->c_dc_scale_table      = ff_mpeg1_dc_scale_table;
     s->chroma_qscale_table   = ff_default_chroma_qscale_table;
@@ -518,8 +525,9 @@  void ff_mpv_common_defaults(MPVMainContext *s)
     s->slice_context_count   = 1;
 }
 
-int ff_mpv_init_context_frame(MPVMainContext *s)
+int ff_mpv_init_context_frame(MPVMainContext *m)
 {
+    MPVContext *const s = &m->s;
     int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
 
     s->mb_width   = (s->width + 15) / 16;
@@ -604,8 +612,10 @@  int ff_mpv_init_context_frame(MPVMainContext *s)
     return !CONFIG_MPEGVIDEODEC || s->encoding ? 0 : ff_mpeg_er_init(s);
 }
 
-static void clear_context(MPVMainContext *s)
+static void clear_context(MPVMainContext *m)
 {
+    MPVContext *const s = &m->s;
+
     memset(&s->next_picture, 0, sizeof(s->next_picture));
     memset(&s->last_picture, 0, sizeof(s->last_picture));
     memset(&s->current_picture, 0, sizeof(s->current_picture));
@@ -656,14 +666,15 @@  static void clear_context(MPVMainContext *s)
  * init common structure for both encoder and decoder.
  * this assumes that some variables like width/height are already set
  */
-av_cold int ff_mpv_common_init(MPVMainContext *s)
+av_cold int ff_mpv_common_init(MPVMainContext *m)
 {
+    MPVContext *const s = &m->s;
     int i, ret;
     int nb_slices = (HAVE_THREADS &&
                      s->avctx->active_thread_type & FF_THREAD_SLICE) ?
                     s->avctx->thread_count : 1;
 
-    clear_context(s);
+    clear_context(m);
 
     if (s->encoding && s->avctx->slices)
         nb_slices = s->avctx->slices;
@@ -694,7 +705,7 @@  av_cold int ff_mpv_common_init(MPVMainContext *s)
         av_image_check_size(s->width, s->height, 0, s->avctx))
         return AVERROR(EINVAL);
 
-    dct_init(s);
+    dct_init(m);
 
     /* set chroma shifts */
     ret = av_pix_fmt_get_chroma_sub_sample(s->avctx->pix_fmt,
@@ -717,7 +728,7 @@  av_cold int ff_mpv_common_init(MPVMainContext *s)
         !(s->new_picture.f     = av_frame_alloc()))
         goto fail_nomem;
 
-    if ((ret = ff_mpv_init_context_frame(s)))
+    if ((ret = ff_mpv_init_context_frame(m)))
         goto fail;
 
 #if FF_API_FLAG_TRUNCATED
@@ -730,7 +741,7 @@  av_cold int ff_mpv_common_init(MPVMainContext *s)
     s->slice_context_count = nb_slices;
 
 //     if (s->width && s->height) {
-    ret = ff_mpv_init_duplicate_contexts(s);
+    ret = ff_mpv_init_duplicate_contexts(m);
     if (ret < 0)
         goto fail;
 //     }
@@ -739,13 +750,15 @@  av_cold int ff_mpv_common_init(MPVMainContext *s)
  fail_nomem:
     ret = AVERROR(ENOMEM);
  fail:
-    ff_mpv_common_end(s);
+    ff_mpv_common_end(m);
     return ret;
 }
 
-void ff_mpv_free_context_frame(MPVMainContext *s)
+void ff_mpv_free_context_frame(MPVMainContext *m)
 {
-    free_duplicate_contexts(s);
+    MPVContext *const s = &m->s;
+
+    free_duplicate_contexts(m);
 
     av_freep(&s->p_field_mv_table_base);
     for (int i = 0; i < 2; i++)
@@ -768,12 +781,14 @@  void ff_mpv_free_context_frame(MPVMainContext *s)
 }
 
 /* init common structure for both encoder and decoder */
-void ff_mpv_common_end(MPVMainContext *s)
+void ff_mpv_common_end(MPVMainContext *m)
 {
-    if (!s)
+    MPVContext *s;
+    if (!m)
         return;
+    s = &m->s;
 
-    ff_mpv_free_context_frame(s);
+    ff_mpv_free_context_frame(m);
     if (s->slice_context_count > 1)
         s->slice_context_count = 1;
 
diff --git a/libavcodec/mpegvideo.h b/libavcodec/mpegvideo.h
index cb947ff56b..9adadc5b02 100644
--- a/libavcodec/mpegvideo.h
+++ b/libavcodec/mpegvideo.h
@@ -504,7 +504,9 @@  typedef struct MPVContext {
  * also show what needs to be resolved in order for a codec to
  * support slice threading.
  */
-typedef MPVContext MPVMainContext;
+typedef struct MPVMainContext {
+    MPVContext s;
+} MPVMainContext;
 
 /**
  * Set the given MPVMainContext to common defaults (same for encoding
diff --git a/libavcodec/mpegvideo_dec.c b/libavcodec/mpegvideo_dec.c
index 6ed0716e0d..e70e306cf9 100644
--- a/libavcodec/mpegvideo_dec.c
+++ b/libavcodec/mpegvideo_dec.c
@@ -34,9 +34,11 @@ 
 #include "mpegvideodec.h"
 #include "thread.h"
 
-void ff_mpv_decode_init(MPVMainDecContext *s, AVCodecContext *avctx)
+void ff_mpv_decode_init(MPVMainDecContext *m, AVCodecContext *avctx)
 {
-    ff_mpv_common_defaults(s);
+    MPVDecContext *const s = &m->s;
+
+    ff_mpv_common_defaults(m);
 
     s->avctx           = avctx;
     s->width           = avctx->coded_width;
@@ -51,8 +53,10 @@  void ff_mpv_decode_init(MPVMainDecContext *s, AVCodecContext *avctx)
 int ff_mpeg_update_thread_context(AVCodecContext *dst,
                                   const AVCodecContext *src)
 {
-    MPVMainDecContext *const s1 = src->priv_data;
-    MPVMainDecContext *const s  = dst->priv_data;
+    MPVMainDecContext *const m1 = src->priv_data;
+    MPVMainDecContext *const m  = dst->priv_data;
+    MPVDecContext *const s1 = &m1->s;
+    MPVDecContext *const s  = &m->s;
     int ret;
 
     if (dst == src)
@@ -76,7 +80,7 @@  int ff_mpeg_update_thread_context(AVCodecContext *dst,
 //             s->picture_range_start  += MAX_PICTURE_COUNT;
 //             s->picture_range_end    += MAX_PICTURE_COUNT;
             ff_mpv_idct_init(s);
-            if ((err = ff_mpv_common_init(s)) < 0) {
+            if ((err = ff_mpv_common_init(m)) < 0) {
                 memset(s, 0, sizeof(*s));
                 s->avctx = dst;
                 s->private_ctx = private_ctx;
@@ -88,7 +92,7 @@  int ff_mpeg_update_thread_context(AVCodecContext *dst,
     if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
         s->height = s1->height;
         s->width  = s1->width;
-        if ((ret = ff_mpv_common_frame_size_change(s)) < 0)
+        if ((ret = ff_mpv_common_frame_size_change(m)) < 0)
             return ret;
     }
 
@@ -191,14 +195,15 @@  do {\
     return 0;
 }
 
-int ff_mpv_common_frame_size_change(MPVMainDecContext *s)
+int ff_mpv_common_frame_size_change(MPVMainDecContext *m)
 {
+    MPVDecContext *const s = &m->s;
     int err = 0;
 
     if (!s->context_initialized)
         return AVERROR(EINVAL);
 
-    ff_mpv_free_context_frame(s);
+    ff_mpv_free_context_frame(m);
 
     if (s->picture)
         for (int i = 0; i < MAX_PICTURE_COUNT; i++)
@@ -225,14 +230,14 @@  int ff_mpv_common_frame_size_change(MPVMainDecContext *s)
     if (err < 0)
         goto fail;
 
-    if ((err = ff_mpv_init_context_frame(s)))
+    if ((err = ff_mpv_init_context_frame(m)))
         goto fail;
 
     memset(s->thread_context, 0, sizeof(s->thread_context));
     s->thread_context[0]   = s;
 
     if (s->width && s->height) {
-        err = ff_mpv_init_duplicate_contexts(s);
+        err = ff_mpv_init_duplicate_contexts(m);
         if (err < 0)
             goto fail;
     }
@@ -240,7 +245,7 @@  int ff_mpv_common_frame_size_change(MPVMainDecContext *s)
 
     return 0;
  fail:
-    ff_mpv_free_context_frame(s);
+    ff_mpv_free_context_frame(m);
     s->context_reinit = 1;
     return err;
 }
@@ -273,8 +278,9 @@  static void gray_frame(AVFrame *frame)
  * generic function called after decoding
  * the header and before a frame is decoded.
  */
-int ff_mpv_frame_start(MPVMainDecContext *s, AVCodecContext *avctx)
+int ff_mpv_frame_start(MPVMainDecContext *m, AVCodecContext *avctx)
 {
+    MPVDecContext *const s = &m->s;
     Picture *pic;
     int idx, ret;
 
@@ -497,8 +503,9 @@  int ff_mpv_frame_start(MPVMainDecContext *s, AVCodecContext *avctx)
 }
 
 /* called after a frame has been decoded. */
-void ff_mpv_frame_end(MPVMainDecContext *s)
+void ff_mpv_frame_end(MPVMainDecContext *m)
 {
+    MPVDecContext *const s = &m->s;
     emms_c();
 
     if (s->current_picture.reference)
@@ -512,8 +519,9 @@  void ff_print_debug_info(MPVDecContext *s, Picture *p, AVFrame *pict)
                          s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
 }
 
-int ff_mpv_export_qp_table(MPVMainDecContext *s, AVFrame *f, Picture *p, int qp_type)
+int ff_mpv_export_qp_table(MPVMainDecContext *m, AVFrame *f, Picture *p, int qp_type)
 {
+    MPVDecContext *const s = &m->s;
     AVVideoEncParams *par;
     int mult = (qp_type == FF_QSCALE_TYPE_MPEG1) ? 2 : 1;
     unsigned int nb_mb = p->alloc_mb_height * p->alloc_mb_width;
@@ -552,7 +560,8 @@  void ff_mpeg_draw_horiz_band(MPVDecContext *s, int y, int h)
 
 void ff_mpeg_flush(AVCodecContext *avctx)
 {
-    MPVMainDecContext *const s = avctx->priv_data;
+    MPVMainDecContext *const m = avctx->priv_data;
+    MPVDecContext *const s = &m->s;
 
     if (!s->picture)
         return;
diff --git a/libavcodec/mpegvideo_enc.c b/libavcodec/mpegvideo_enc.c
index 6c40bdb39e..656aab04cf 100644
--- a/libavcodec/mpegvideo_enc.c
+++ b/libavcodec/mpegvideo_enc.c
@@ -239,7 +239,7 @@  void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
  */
 void ff_init_qscale_tab(MPVMainEncContext *m)
 {
-    MPVEncContext *const s = &m->common;
+    MPVEncContext *const s = &m->common.s;
     int8_t * const qscale_table = s->current_picture.qscale_table;
     int i;
 
@@ -281,7 +281,7 @@  static void mpv_encode_init_static(void)
 static void mpv_encode_defaults(MPVMainEncContext *m)
 {
     MPVMainContext *const com = &m->common;
-    MPVEncContext *const s = com;
+    MPVEncContext *const s = &com->s;
     static AVOnce init_static_once = AV_ONCE_INIT;
 
     ff_mpv_common_defaults(com);
@@ -317,7 +317,8 @@  av_cold int ff_dct_encode_init(MPVEncContext *s)
 av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
 {
     MPVMainEncContext *const m = avctx->priv_data;
-    MPVMainContext *const s = &m->common;
+    MPVMainContext *const m2 = &m->common;
+    MPVEncContext *const s = &m2->s;
     AVCPBProperties *cpb_props;
     int i, ret, mv_table_size, mb_array_size;
 
@@ -811,7 +812,7 @@  av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
 
     /* init */
     ff_mpv_idct_init(s);
-    if ((ret = ff_mpv_common_init(s)) < 0)
+    if ((ret = ff_mpv_common_init(m2)) < 0)
         return ret;
 
     ff_fdctdsp_init(&s->fdsp, avctx);
@@ -986,7 +987,8 @@  av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
 av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
 {
     MPVMainEncContext *const m = avctx->priv_data;
-    MPVMainContext  *const s = &m->common;
+    MPVMainContext *const m2 = &m->common;
+    MPVEncContext *const s = &m2->s;
 
     ff_rate_control_uninit(m);
 
@@ -1069,7 +1071,7 @@  static int alloc_picture(MPVEncContext *s, Picture *pic, int shared)
 static int load_input_picture(MPVMainEncContext *m, const AVFrame *pic_arg)
 {
     MPVMainContext *const com = &m->common;
-    MPVEncContext *const s = com;
+    MPVEncContext *const s = &com->s;
     Picture *pic = NULL;
     int64_t pts;
     int i, display_picture_number = 0, ret;
@@ -1221,7 +1223,7 @@  static int load_input_picture(MPVMainEncContext *m, const AVFrame *pic_arg)
 
 static int skip_check(MPVMainEncContext *m, Picture *p, Picture *ref)
 {
-    MPVEncContext *const s = &m->common;
+    MPVEncContext *const s = &m->common.s;
     int x, y, plane;
     int score = 0;
     int64_t score64 = 0;
@@ -1284,7 +1286,7 @@  static int encode_frame(AVCodecContext *c, AVFrame *frame, AVPacket *pkt)
 
 static int estimate_best_b_count(MPVMainEncContext *m)
 {
-    MPVEncContext *const s = &m->common;
+    MPVEncContext *const s = &m->common.s;
     AVPacket *pkt;
     const int scale = m->brd_scale;
     int width  = s->width  >> scale;
@@ -1431,7 +1433,8 @@  fail:
 
 static int select_input_picture(MPVMainEncContext *m)
 {
-    MPVEncContext *const s = &m->common;
+    MPVMainContext *const m2 = &m->common;
+    MPVEncContext *const s = &m2->s;
     int i, ret;
 
     for (i = 1; i < MAX_PICTURE_COUNT; i++)
@@ -1609,7 +1612,7 @@  no_output_pic:
 
 static void frame_end(MPVMainEncContext *m)
 {
-    MPVEncContext *const s = &m->common;
+    MPVEncContext *const s = &m->common.s;
     if (s->unrestricted_mv &&
         s->current_picture.reference &&
         !s->intra_only) {
@@ -1647,7 +1650,7 @@  static void frame_end(MPVMainEncContext *m)
 
 static void update_noise_reduction(MPVMainEncContext *m)
 {
-    MPVEncContext *const s = &m->common;
+    MPVEncContext *const s = &m->common.s;
     int intra, i;
 
     for (intra = 0; intra < 2; intra++) {
@@ -1669,7 +1672,7 @@  static void update_noise_reduction(MPVMainEncContext *m)
 
 static int frame_start(MPVMainEncContext *m)
 {
-    MPVEncContext *const s = &m->common;
+    MPVEncContext *const s = &m->common.s;
     int ret;
 
     /* mark & release old frames */
@@ -1743,7 +1746,8 @@  int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
                           const AVFrame *pic_arg, int *got_packet)
 {
     MPVMainEncContext *const m = avctx->priv_data;
-    MPVEncContext *const s = &m->common;
+    MPVMainContext *const m2 = &m->common;
+    MPVEncContext *const s = &m2->s;
     int i, stuffing_count, ret;
     int context_count = s->slice_context_count;
 
@@ -3497,7 +3501,7 @@  static void merge_context_after_encode(MPVEncContext *dst, MPVEncContext *src)
 
 static int estimate_qp(MPVMainEncContext *m, int dry_run)
 {
-    MPVEncContext *const s = &m->common;
+    MPVEncContext *const s = &m->common.s;
     if (m->next_lambda) {
         s->current_picture_ptr->f->quality =
         s->current_picture.f->quality = m->next_lambda;
@@ -3537,7 +3541,7 @@  static int estimate_qp(MPVMainEncContext *m, int dry_run)
 /* must be called before writing the header */
 static void set_frame_distances(MPVMainEncContext *m)
 {
-    MPVEncContext *const s = &m->common;
+    MPVEncContext *const s = &m->common.s;
     av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
     s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
 
@@ -3553,7 +3557,8 @@  static void set_frame_distances(MPVMainEncContext *m)
 
 static int encode_picture(MPVMainEncContext *m, int picture_number)
 {
-    MPVEncContext *const s = &m->common;
+    MPVMainContext *const m2 = &m->common;
+    MPVEncContext *const s = &m2->s;
     int i, ret;
     int bits;
     int context_count = s->slice_context_count;
diff --git a/libavcodec/mpegvideoenc.h b/libavcodec/mpegvideoenc.h
index 5c579cc155..6dc940d3aa 100644
--- a/libavcodec/mpegvideoenc.h
+++ b/libavcodec/mpegvideoenc.h
@@ -149,7 +149,7 @@  typedef struct MPVMainEncContext {
 { "msad",   "Sum of absolute differences, median predicted", 0, AV_OPT_TYPE_CONST, {.i64 = FF_CMP_MEDIAN_SAD }, INT_MIN, INT_MAX, FF_MPV_OPT_FLAGS, "cmp_func" }
 
 #define FF_MPV_MAIN_OFFSET(x) offsetof(MPVMainEncContext, x)
-#define FF_MPV_OFFSET(x) FF_MPV_MAIN_OFFSET(common.x)
+#define FF_MPV_OFFSET(x) FF_MPV_MAIN_OFFSET(common.s.x)
 #define FF_MPV_OPT_FLAGS (AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM)
 #define FF_MPV_COMMON_OPTS \
 FF_MPV_OPT_CMP_FUNC, \
diff --git a/libavcodec/msmpeg4.c b/libavcodec/msmpeg4.c
index 3658df6b08..8c364ef356 100644
--- a/libavcodec/msmpeg4.c
+++ b/libavcodec/msmpeg4.c
@@ -112,9 +112,10 @@  static av_cold void msmpeg4_common_init_static(void)
     init_h263_dc_for_msmpeg4();
 }
 
-av_cold void ff_msmpeg4_common_init(MPVMainContext *s)
+av_cold void ff_msmpeg4_common_init(MPVMainContext *m)
 {
     static AVOnce init_static_once = AV_ONCE_INIT;
+    MPVContext *const s = &m->s;
 
     switch(s->msmpeg4_version){
     case 1:
diff --git a/libavcodec/msmpeg4dec.c b/libavcodec/msmpeg4dec.c
index c8527d18dc..f02181e075 100644
--- a/libavcodec/msmpeg4dec.c
+++ b/libavcodec/msmpeg4dec.c
@@ -296,7 +296,8 @@  static int msmpeg4v34_decode_mb(MPVDecContext *s, int16_t block[6][64])
 /* init all vlc decoding tables */
 av_cold int ff_msmpeg4_decode_init(AVCodecContext *avctx)
 {
-    MPVMainDecContext *const s = avctx->priv_data;
+    MPVMainDecContext *const m = avctx->priv_data;
+    MPVDecContext *const s = &m->s;
     static volatile int done = 0;
     int ret;
     MVTable *mv;
@@ -307,7 +308,7 @@  av_cold int ff_msmpeg4_decode_init(AVCodecContext *avctx)
     if (ff_h263_decode_init(avctx) < 0)
         return -1;
 
-    ff_msmpeg4_common_init(s);
+    ff_msmpeg4_common_init(m);
 
     if (!done) {
         INIT_FIRST_VLC_RL(ff_rl_table[0], 642);
@@ -399,8 +400,9 @@  av_cold int ff_msmpeg4_decode_init(AVCodecContext *avctx)
     return 0;
 }
 
-int ff_msmpeg4_decode_picture_header(MPVMainDecContext *s)
+int ff_msmpeg4_decode_picture_header(MPVMainDecContext *m)
 {
+    MPVDecContext *const s = &m->s;
     int code;
 
     // at minimum one bit per macroblock is required at least in a valid frame,
@@ -467,7 +469,7 @@  int ff_msmpeg4_decode_picture_header(MPVMainDecContext *s)
             s->dc_table_index = get_bits1(&s->gb);
             break;
         case 4:
-            ff_msmpeg4_decode_ext_header(s, (2+5+5+17+7)/8);
+            ff_msmpeg4_decode_ext_header(m, (2+5+5+17+7)/8);
 
             if(s->bit_rate > MBAC_BITRATE) s->per_mb_rl_table= get_bits1(&s->gb);
             else                           s->per_mb_rl_table= 0;
@@ -555,8 +557,9 @@  int ff_msmpeg4_decode_picture_header(MPVMainDecContext *s)
     return 0;
 }
 
-int ff_msmpeg4_decode_ext_header(MPVMainDecContext *s, int buf_size)
+int ff_msmpeg4_decode_ext_header(MPVMainDecContext *m, int buf_size)
 {
+    MPVDecContext *const s = &m->s;
     int left= buf_size*8 - get_bits_count(&s->gb);
     int length= s->msmpeg4_version>=3 ? 17 : 16;
     /* the alt_bitstream reader could read over the end so we need to check it */
diff --git a/libavcodec/msmpeg4enc.c b/libavcodec/msmpeg4enc.c
index f8080a194c..b25c0df063 100644
--- a/libavcodec/msmpeg4enc.c
+++ b/libavcodec/msmpeg4enc.c
@@ -137,7 +137,7 @@  static av_cold void msmpeg4_encode_init_static(void)
 av_cold void ff_msmpeg4_encode_init(MPVMainEncContext *m)
 {
     static AVOnce init_static_once = AV_ONCE_INIT;
-    MPVEncContext *const s = &m->common;
+    MPVEncContext *const s = &m->common.s;
 
     ff_msmpeg4_common_init(&m->common);
     if (s->msmpeg4_version >= 4) {
@@ -152,7 +152,7 @@  av_cold void ff_msmpeg4_encode_init(MPVMainEncContext *m)
 static void find_best_tables(MSMPEG4EncContext *ms)
 {
     MPVMainEncContext *const m = &ms->s;
-    MPVEncContext *const s = &m->common;
+    MPVEncContext *const s = &m->common.s;
     int i;
     int best        = 0, best_size        = INT_MAX;
     int chroma_best = 0, best_chroma_size = INT_MAX;
@@ -219,7 +219,7 @@  static void find_best_tables(MSMPEG4EncContext *ms)
 void ff_msmpeg4_encode_picture_header(MPVMainEncContext *m, int picture_number)
 {
     MSMPEG4EncContext *const ms = (MSMPEG4EncContext*)m;
-    MPVEncContext     *const  s = &m->common;
+    MPVEncContext     *const  s = &m->common.s;
 
     find_best_tables(ms);
 
diff --git a/libavcodec/mss2.c b/libavcodec/mss2.c
index 2a89bd47e8..66ce266b51 100644
--- a/libavcodec/mss2.c
+++ b/libavcodec/mss2.c
@@ -382,7 +382,8 @@  static int decode_wmv9(AVCodecContext *avctx, const uint8_t *buf, int buf_size,
     MSS2Context *ctx  = avctx->priv_data;
     MSS12Context *c   = &ctx->c;
     VC1Context *v     = avctx->priv_data;
-    MPVMainDecContext *const s = &v->s;
+    MPVMainDecContext *const m = &v->s;
+    MPVDecContext *const s = &m->s;
     AVFrame *f;
     int ret;
 
@@ -394,24 +395,24 @@  static int decode_wmv9(AVCodecContext *avctx, const uint8_t *buf, int buf_size,
     s->loop_filter = avctx->skip_loop_filter < AVDISCARD_ALL;
 
     if (ff_vc1_parse_frame_header(v, &s->gb) < 0) {
-        av_log(v->s.avctx, AV_LOG_ERROR, "header error\n");
+        av_log(s->avctx, AV_LOG_ERROR, "header error\n");
         return AVERROR_INVALIDDATA;
     }
 
     if (s->pict_type != AV_PICTURE_TYPE_I) {
-        av_log(v->s.avctx, AV_LOG_ERROR, "expected I-frame\n");
+        av_log(s->avctx, AV_LOG_ERROR, "expected I-frame\n");
         return AVERROR_INVALIDDATA;
     }
 
     avctx->pix_fmt = AV_PIX_FMT_YUV420P;
 
-    if ((ret = ff_mpv_frame_start(s, avctx)) < 0) {
-        av_log(v->s.avctx, AV_LOG_ERROR, "ff_mpv_frame_start error\n");
+    if ((ret = ff_mpv_frame_start(m, avctx)) < 0) {
+        av_log(s->avctx, AV_LOG_ERROR, "ff_mpv_frame_start error\n");
         avctx->pix_fmt = AV_PIX_FMT_RGB24;
         return ret;
     }
 
-    ff_mpeg_er_frame_start(s);
+    ff_mpeg_er_frame_start(m);
 
     v->end_mb_x = (w + 15) >> 4;
     s->end_mb_y = (h + 15) >> 4;
@@ -425,12 +426,12 @@  static int decode_wmv9(AVCodecContext *avctx, const uint8_t *buf, int buf_size,
     if (v->end_mb_x == s->mb_width && s->end_mb_y == s->mb_height) {
         ff_er_frame_end(&s->er);
     } else {
-        av_log(v->s.avctx, AV_LOG_WARNING,
+        av_log(s->avctx, AV_LOG_WARNING,
                "disabling error correction due to block count mismatch %dx%d != %dx%d\n",
                v->end_mb_x, s->end_mb_y, s->mb_width, s->mb_height);
     }
 
-    ff_mpv_frame_end(s);
+    ff_mpv_frame_end(m);
 
     f = s->current_picture.f;
 
@@ -439,7 +440,7 @@  static int decode_wmv9(AVCodecContext *avctx, const uint8_t *buf, int buf_size,
         ctx->dsp.upsample_plane(f->data[1], f->linesize[1], w+1 >> 1, h+1 >> 1);
         ctx->dsp.upsample_plane(f->data[2], f->linesize[2], w+1 >> 1, h+1 >> 1);
     } else if (v->respic)
-        avpriv_request_sample(v->s.avctx,
+        avpriv_request_sample(s->avctx,
                               "Asymmetric WMV9 rectangle subsampling");
 
     av_assert0(f->linesize[1] == f->linesize[2]);
@@ -750,7 +751,7 @@  static av_cold int wmv9_init(AVCodecContext *avctx)
     VC1Context *v = avctx->priv_data;
     int ret;
 
-    v->s.avctx    = avctx;
+    v->s.s.avctx  = avctx;
 
     ff_vc1_init_common(v);
 
@@ -782,7 +783,7 @@  static av_cold int wmv9_init(AVCodecContext *avctx)
     v->resync_marker   = 0;
     v->rangered        = 0;
 
-    v->s.max_b_frames = avctx->max_b_frames = 0;
+    v->s.s.max_b_frames = avctx->max_b_frames = 0;
     v->quantizer_mode = 0;
 
     v->finterpflag = 0;
@@ -796,8 +797,8 @@  static av_cold int wmv9_init(AVCodecContext *avctx)
         return ret;
 
     /* error concealment */
-    v->s.me.qpel_put = v->s.qdsp.put_qpel_pixels_tab;
-    v->s.me.qpel_avg = v->s.qdsp.avg_qpel_pixels_tab;
+    v->s.s.me.qpel_put = v->s.s.qdsp.put_qpel_pixels_tab;
+    v->s.s.me.qpel_avg = v->s.s.qdsp.avg_qpel_pixels_tab;
 
     return 0;
 }
diff --git a/libavcodec/nvdec_mpeg4.c b/libavcodec/nvdec_mpeg4.c
index b00e415b5c..5c37dba3e4 100644
--- a/libavcodec/nvdec_mpeg4.c
+++ b/libavcodec/nvdec_mpeg4.c
@@ -30,7 +30,7 @@ 
 static int nvdec_mpeg4_start_frame(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
 {
     Mpeg4DecContext *m = avctx->priv_data;
-    MPVDecContext *const s = &m->m;
+    MPVDecContext *const s = &m->m.s;
 
     NVDECContext      *ctx = avctx->internal->hwaccel_priv_data;
     CUVIDPICPARAMS     *pp = &ctx->pic_params;
diff --git a/libavcodec/nvdec_vc1.c b/libavcodec/nvdec_vc1.c
index 9dc9f49b6a..afec5c1f7c 100644
--- a/libavcodec/nvdec_vc1.c
+++ b/libavcodec/nvdec_vc1.c
@@ -28,7 +28,7 @@ 
 static int nvdec_vc1_start_frame(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
 {
     VC1Context *v = avctx->priv_data;
-    MPVDecContext *const s = &v->s;
+    MPVDecContext *const s = &v->s.s;
 
     NVDECContext      *ctx = avctx->internal->hwaccel_priv_data;
     CUVIDPICPARAMS     *pp = &ctx->pic_params;
@@ -87,7 +87,7 @@  static int nvdec_vc1_start_frame(AVCodecContext *avctx, const uint8_t *buffer, u
             .extended_mv       = v->extended_mv,
             .dquant            = v->dquant,
             .vstransform       = v->vstransform,
-            .loopfilter        = v->s.loop_filter,
+            .loopfilter        = v->s.s.loop_filter,
             .fastuvmc          = v->fastuvmc,
             .overlap           = v->overlap,
             .quantizer         = v->quantizer_mode,
diff --git a/libavcodec/ratecontrol.c b/libavcodec/ratecontrol.c
index bbaa9191a0..bdf5c82aa1 100644
--- a/libavcodec/ratecontrol.c
+++ b/libavcodec/ratecontrol.c
@@ -37,7 +37,7 @@ 
 
 void ff_write_pass1_stats(MPVMainEncContext *m)
 {
-    MPVEncContext *const s = &m->common;
+    MPVEncContext *const s = &m->common.s;
     snprintf(s->avctx->stats_out, 256,
              "in:%d out:%d type:%d q:%d itex:%d ptex:%d mv:%d misc:%d "
              "fcode:%d bcode:%d mc-var:%"PRId64" var:%"PRId64" icount:%d skipcount:%d hbits:%d;\n",
@@ -80,7 +80,7 @@  static inline double bits2qp(RateControlEntry *rce, double bits)
 
 static double get_diff_limited_q(MPVMainEncContext *m, RateControlEntry *rce, double q)
 {
-    MPVEncContext    *const s = &m->common;
+    MPVEncContext    *const s = &m->common.s;
     RateControlContext *rcc   = &m->rc_context;
     AVCodecContext *a         = s->avctx;
     const int pict_type       = rce->new_pict_type;
@@ -121,7 +121,7 @@  static double get_diff_limited_q(MPVMainEncContext *m, RateControlEntry *rce, do
 static void get_qminmax(int *qmin_ret, int *qmax_ret,
                         MPVMainEncContext *m, int pict_type)
 {
-    MPVEncContext *const s = &m->common;
+    MPVEncContext *const s = &m->common.s;
     int qmin = m->lmin;
     int qmax = m->lmax;
 
@@ -151,7 +151,7 @@  static void get_qminmax(int *qmin_ret, int *qmax_ret,
 static double modify_qscale(MPVMainEncContext *m, RateControlEntry *rce,
                             double q, int frame_num)
 {
-    MPVEncContext   *const s = &m->common;
+    MPVEncContext   *const s = &m->common.s;
     RateControlContext *rcc  = &m->rc_context;
     const double buffer_size = s->avctx->rc_buffer_size;
     const double fps         = get_fps(s->avctx);
@@ -243,7 +243,7 @@  static double modify_qscale(MPVMainEncContext *m, RateControlEntry *rce,
 static double get_qscale(MPVMainEncContext *m, RateControlEntry *rce,
                          double rate_factor, int frame_num)
 {
-    MPVEncContext  *const s = &m->common;
+    MPVEncContext  *const s = &m->common.s;
     RateControlContext *rcc = &m->rc_context;
     AVCodecContext *a       = s->avctx;
     const int pict_type     = rce->new_pict_type;
@@ -316,7 +316,7 @@  static double get_qscale(MPVMainEncContext *m, RateControlEntry *rce,
 
 static int init_pass2(MPVMainEncContext *m)
 {
-    MPVEncContext  *const s = &m->common;
+    MPVEncContext  *const s = &m->common.s;
     RateControlContext *rcc = &m->rc_context;
     AVCodecContext *a       = s->avctx;
     int i, toobig;
@@ -478,7 +478,7 @@  static int init_pass2(MPVMainEncContext *m)
 
 av_cold int ff_rate_control_init(MPVMainEncContext *m)
 {
-    MPVEncContext  *const s = &m->common;
+    MPVEncContext  *const s = &m->common.s;
     RateControlContext *rcc = &m->rc_context;
     int i, res;
     static const char * const const_names[] = {
@@ -688,7 +688,7 @@  av_cold void ff_rate_control_uninit(MPVMainEncContext *m)
 
 int ff_vbv_update(MPVMainEncContext *m, int frame_size)
 {
-    MPVEncContext  *const s = &m->common;
+    MPVEncContext  *const s = &m->common.s;
     RateControlContext *rcc = &m->rc_context;
     const double fps        = get_fps(s->avctx);
     const int buffer_size   = s->avctx->rc_buffer_size;
@@ -748,7 +748,7 @@  static void update_predictor(Predictor *p, double q, double var, double size)
 
 static void adaptive_quantization(MPVMainEncContext *m, double q)
 {
-    MPVEncContext *const s = &m->common;
+    MPVEncContext *const s = &m->common.s;
     int i;
     const float lumi_masking         = s->avctx->lumi_masking / (128.0 * 128.0);
     const float dark_masking         = s->avctx->dark_masking / (128.0 * 128.0);
@@ -866,7 +866,7 @@  static void adaptive_quantization(MPVMainEncContext *m, double q)
 
 void ff_get_2pass_fcode(MPVMainEncContext *m)
 {
-    MPVEncContext  *const s = &m->common;
+    MPVEncContext  *const s = &m->common.s;
     RateControlContext *rcc = &m->rc_context;
     RateControlEntry *rce   = &rcc->entry[s->picture_number];
 
@@ -878,7 +878,7 @@  void ff_get_2pass_fcode(MPVMainEncContext *m)
 
 float ff_rate_estimate_qscale(MPVMainEncContext *m, int dry_run)
 {
-    MPVEncContext *const s = &m->common;
+    MPVEncContext *const s = &m->common.s;
     float q;
     int qmin, qmax;
     float br_compensation;
diff --git a/libavcodec/rv10.c b/libavcodec/rv10.c
index 10450aa791..1a59b2a578 100644
--- a/libavcodec/rv10.c
+++ b/libavcodec/rv10.c
@@ -157,7 +157,8 @@  static int rv10_decode_picture_header(MPVDecContext *s)
 
 static int rv20_decode_picture_header(RVDecContext *rv, int whole_size)
 {
-    MPVMainDecContext *const s = &rv->m;
+    MPVMainDecContext *const m = &rv->m;
+    MPVDecContext *const s = &m->s;
     int seq, mb_pos, i, ret;
     int rpr_max;
 
@@ -237,7 +238,7 @@  static int rv20_decode_picture_header(RVDecContext *rv, int whole_size)
             if (whole_size < (new_w + 15)/16 * ((new_h + 15)/16) / 8)
                 return AVERROR_INVALIDDATA;
 
-            ff_mpv_common_end(s);
+            ff_mpv_common_end(m);
 
             // attempt to keep aspect during typical resolution switches
             if (!old_aspect.num)
@@ -253,7 +254,7 @@  static int rv20_decode_picture_header(RVDecContext *rv, int whole_size)
 
             s->width  = new_w;
             s->height = new_h;
-            if ((ret = ff_mpv_common_init(s)) < 0)
+            if ((ret = ff_mpv_common_init(m)) < 0)
                 return ret;
         }
 
@@ -367,7 +368,8 @@  static av_cold int rv10_decode_init(AVCodecContext *avctx)
 {
     static AVOnce init_static_once = AV_ONCE_INIT;
     RVDecContext *rv = avctx->priv_data;
-    MPVMainDecContext *const s = &rv->m;
+    MPVMainDecContext *const m = &rv->m;
+    MPVDecContext *const s = &m->s;
     int major_ver, minor_ver, micro_ver, ret;
 
     if (avctx->extradata_size < 8) {
@@ -378,7 +380,7 @@  static av_cold int rv10_decode_init(AVCodecContext *avctx)
                                    avctx->coded_height, 0, avctx)) < 0)
         return ret;
 
-    ff_mpv_decode_init(s, avctx);
+    ff_mpv_decode_init(m, avctx);
 
     s->out_format  = FMT_H263;
 
@@ -420,7 +422,7 @@  static av_cold int rv10_decode_init(AVCodecContext *avctx)
     avctx->pix_fmt = AV_PIX_FMT_YUV420P;
 
     ff_mpv_idct_init(s);
-    if ((ret = ff_mpv_common_init(s)) < 0)
+    if ((ret = ff_mpv_common_init(m)) < 0)
         return ret;
 
     ff_h263dsp_init(&s->h263dsp);
@@ -443,7 +445,8 @@  static int rv10_decode_packet(AVCodecContext *avctx, const uint8_t *buf,
                               int buf_size, int buf_size2, int whole_size)
 {
     RVDecContext *rv = avctx->priv_data;
-    MPVDecContext *const s = &rv->m;
+    MPVMainDecContext *const m = &rv->m;
+    MPVDecContext *const s = &m->s;
     int mb_count, mb_pos, left, start_mb_x, active_bits_size, ret;
 
     active_bits_size = buf_size * 8;
@@ -477,12 +480,12 @@  static int rv10_decode_packet(AVCodecContext *avctx, const uint8_t *buf,
         // FIXME write parser so we always have complete frames?
         if (s->current_picture_ptr) {
             ff_er_frame_end(&s->er);
-            ff_mpv_frame_end(s);
+            ff_mpv_frame_end(m);
             s->mb_x = s->mb_y = s->resync_mb_x = s->resync_mb_y = 0;
         }
-        if ((ret = ff_mpv_frame_start(s, avctx)) < 0)
+        if ((ret = ff_mpv_frame_start(m, avctx)) < 0)
             return ret;
-        ff_mpeg_er_frame_start(s);
+        ff_mpeg_er_frame_start(m);
     } else {
         if (s->current_picture_ptr->f->pict_type != s->pict_type) {
             av_log(s->avctx, AV_LOG_ERROR, "Slice type mismatch\n");
@@ -595,9 +598,10 @@  static int get_slice_offset(AVCodecContext *avctx, const uint8_t *buf, int n)
 static int rv10_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
                              AVPacket *avpkt)
 {
+    MPVMainDecContext *const m = avctx->priv_data;
+    MPVDecContext *const s = &m->s;
     const uint8_t *buf = avpkt->data;
     int buf_size       = avpkt->size;
-    MPVMainDecContext *const s = avctx->priv_data;
     AVFrame *pict = data;
     int i, ret;
     int slice_count;
@@ -656,18 +660,18 @@  static int rv10_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
 
     if (s->current_picture_ptr && s->mb_y >= s->mb_height) {
         ff_er_frame_end(&s->er);
-        ff_mpv_frame_end(s);
+        ff_mpv_frame_end(m);
 
         if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
             if ((ret = av_frame_ref(pict, s->current_picture_ptr->f)) < 0)
                 return ret;
             ff_print_debug_info(s, s->current_picture_ptr, pict);
-            ff_mpv_export_qp_table(s, pict, s->current_picture_ptr, FF_QSCALE_TYPE_MPEG1);
+            ff_mpv_export_qp_table(m, pict, s->current_picture_ptr, FF_QSCALE_TYPE_MPEG1);
         } else if (s->last_picture_ptr) {
             if ((ret = av_frame_ref(pict, s->last_picture_ptr->f)) < 0)
                 return ret;
             ff_print_debug_info(s, s->last_picture_ptr, pict);
-            ff_mpv_export_qp_table(s, pict,s->last_picture_ptr, FF_QSCALE_TYPE_MPEG1);
+            ff_mpv_export_qp_table(m, pict,s->last_picture_ptr, FF_QSCALE_TYPE_MPEG1);
         }
 
         if (s->last_picture_ptr || s->low_delay) {
diff --git a/libavcodec/rv10enc.c b/libavcodec/rv10enc.c
index ddbea6298c..4940e35de3 100644
--- a/libavcodec/rv10enc.c
+++ b/libavcodec/rv10enc.c
@@ -31,7 +31,7 @@ 
 
 int ff_rv10_encode_picture_header(MPVMainEncContext *m, int picture_number)
 {
-    MPVEncContext *const s = &m->common;
+    MPVEncContext *const s = &m->common.s;
     int full_frame= 0;
 
     align_put_bits(&s->pb);
diff --git a/libavcodec/rv20enc.c b/libavcodec/rv20enc.c
index 06f6751549..3e26151513 100644
--- a/libavcodec/rv20enc.c
+++ b/libavcodec/rv20enc.c
@@ -34,7 +34,7 @@ 
 
 void ff_rv20_encode_picture_header(MPVMainEncContext *m, int picture_number)
 {
-    MPVEncContext *const s = &m->common;
+    MPVEncContext *const s = &m->common.s;
     put_bits(&s->pb, 2, s->pict_type); //I 0 vs. 1 ?
     put_bits(&s->pb, 1, 0);     /* unknown bit */
     put_bits(&s->pb, 5, s->qscale);
diff --git a/libavcodec/rv30.c b/libavcodec/rv30.c
index 698765bef4..edaf4c0e1e 100644
--- a/libavcodec/rv30.c
+++ b/libavcodec/rv30.c
@@ -37,9 +37,9 @@ 
 
 static int rv30_parse_slice_header(RV34DecContext *r, GetBitContext *gb, SliceInfo *si)
 {
-    AVCodecContext *avctx = r->s.avctx;
+    AVCodecContext *avctx = r->s.s.avctx;
     int mb_bits;
-    int w = r->s.width, h = r->s.height;
+    int w = r->s.s.width, h = r->s.s.height;
     int mb_size;
     int rpr;
 
@@ -67,8 +67,8 @@  static int rv30_parse_slice_header(RV34DecContext *r, GetBitContext *gb, SliceIn
             return AVERROR(EINVAL);
         }
 
-        w = r->s.avctx->extradata[6 + rpr*2] << 2;
-        h = r->s.avctx->extradata[7 + rpr*2] << 2;
+        w = r->s.s.avctx->extradata[6 + rpr*2] << 2;
+        h = r->s.s.avctx->extradata[7 + rpr*2] << 2;
     } else {
         w = r->orig_width;
         h = r->orig_height;
@@ -93,7 +93,7 @@  static int rv30_decode_intra_types(RV34DecContext *r, GetBitContext *gb, int8_t
         for(j = 0; j < 4; j+= 2){
             unsigned code = get_interleaved_ue_golomb(gb) << 1;
             if (code > 80U*2U) {
-                av_log(r->s.avctx, AV_LOG_ERROR, "Incorrect intra prediction code\n");
+                av_log(r->s.s.avctx, AV_LOG_ERROR, "Incorrect intra prediction code\n");
                 return -1;
             }
             for(k = 0; k < 2; k++){
@@ -101,7 +101,7 @@  static int rv30_decode_intra_types(RV34DecContext *r, GetBitContext *gb, int8_t
                 int B = dst[-1] + 1;
                 *dst++ = rv30_itype_from_context[A * 90 + B * 9 + rv30_itype_code[code + k]];
                 if(dst[-1] == 9){
-                    av_log(r->s.avctx, AV_LOG_ERROR, "Incorrect intra prediction mode\n");
+                    av_log(r->s.s.avctx, AV_LOG_ERROR, "Incorrect intra prediction mode\n");
                     return -1;
                 }
             }
@@ -117,7 +117,7 @@  static int rv30_decode_mb_info(RV34DecContext *r)
 {
     static const int rv30_p_types[6] = { RV34_MB_SKIP, RV34_MB_P_16x16, RV34_MB_P_8x8, -1, RV34_MB_TYPE_INTRA, RV34_MB_TYPE_INTRA16x16 };
     static const int rv30_b_types[6] = { RV34_MB_SKIP, RV34_MB_B_DIRECT, RV34_MB_B_FORWARD, RV34_MB_B_BACKWARD, RV34_MB_TYPE_INTRA, RV34_MB_TYPE_INTRA16x16 };
-    MPVDecContext *const s = &r->s;
+    MPVDecContext *const s = &r->s.s;
     GetBitContext *gb = &s->gb;
     unsigned code = get_interleaved_ue_golomb(gb);
 
@@ -152,7 +152,7 @@  static inline void rv30_weak_loop_filter(uint8_t *src, const int step,
 
 static void rv30_loop_filter(RV34DecContext *r, int row)
 {
-    MPVDecContext *const s = &r->s;
+    MPVDecContext *const s = &r->s.s;
     int mb_pos, mb_x;
     int i, j, k;
     uint8_t *Y, *C;
diff --git a/libavcodec/rv34.c b/libavcodec/rv34.c
index 83d601c57b..a3437099b2 100644
--- a/libavcodec/rv34.c
+++ b/libavcodec/rv34.c
@@ -346,7 +346,7 @@  static inline RV34VLC* choose_vlc_set(int quant, int mod, int type)
  */
 static int rv34_decode_intra_mb_header(RV34DecContext *r, int8_t *intra_types)
 {
-    MPVDecContext *const s = &r->s;
+    MPVDecContext *const s = &r->s.s;
     GetBitContext *gb = &s->gb;
     int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
     int t;
@@ -381,7 +381,7 @@  static int rv34_decode_intra_mb_header(RV34DecContext *r, int8_t *intra_types)
  */
 static int rv34_decode_inter_mb_header(RV34DecContext *r, int8_t *intra_types)
 {
-    MPVDecContext *const s = &r->s;
+    MPVDecContext *const s = &r->s.s;
     GetBitContext *gb = &s->gb;
     int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
     int i, t;
@@ -459,7 +459,7 @@  static const uint8_t avail_indexes[4] = { 6, 7, 10, 11 };
  */
 static void rv34_pred_mv(RV34DecContext *r, int block_type, int subblock_no, int dmv_no)
 {
-    MPVDecContext *const s = &r->s;
+    MPVDecContext *const s = &r->s.s;
     int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
     int A[2] = {0}, B[2], C[2];
     int i, j;
@@ -543,7 +543,7 @@  static inline void rv34_pred_b_vector(int A[2], int B[2], int C[2],
  */
 static void rv34_pred_mv_b(RV34DecContext *r, int block_type, int dir)
 {
-    MPVDecContext *const s = &r->s;
+    MPVDecContext *const s = &r->s.s;
     int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
     int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
     int A[2] = { 0 }, B[2] = { 0 }, C[2] = { 0 };
@@ -595,7 +595,7 @@  static void rv34_pred_mv_b(RV34DecContext *r, int block_type, int dir)
  */
 static void rv34_pred_mv_rv3(RV34DecContext *r, int block_type, int dir)
 {
-    MPVDecContext *const s = &r->s;
+    MPVDecContext *const s = &r->s.s;
     int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
     int A[2] = {0}, B[2], C[2];
     int i, j, k;
@@ -663,7 +663,7 @@  static inline void rv34_mc(RV34DecContext *r, const int block_type,
                           qpel_mc_func (*qpel_mc)[16],
                           h264_chroma_mc_func (*chroma_mc))
 {
-    MPVDecContext *const s = &r->s;
+    MPVDecContext *const s = &r->s.s;
     uint8_t *Y, *U, *V, *srcY, *srcU, *srcV;
     int dxy, mx, my, umx, umy, lx, ly, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
     int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride + mv_off;
@@ -783,24 +783,24 @@  static void rv34_mc_1mv(RV34DecContext *r, const int block_type,
 
 static void rv4_weight(RV34DecContext *r)
 {
-    r->rdsp.rv40_weight_pixels_tab[r->scaled_weight][0](r->s.dest[0],
+    r->rdsp.rv40_weight_pixels_tab[r->scaled_weight][0](r->s.s.dest[0],
                                                         r->tmp_b_block_y[0],
                                                         r->tmp_b_block_y[1],
                                                         r->weight1,
                                                         r->weight2,
-                                                        r->s.linesize);
-    r->rdsp.rv40_weight_pixels_tab[r->scaled_weight][1](r->s.dest[1],
+                                                        r->s.s.linesize);
+    r->rdsp.rv40_weight_pixels_tab[r->scaled_weight][1](r->s.s.dest[1],
                                                         r->tmp_b_block_uv[0],
                                                         r->tmp_b_block_uv[2],
                                                         r->weight1,
                                                         r->weight2,
-                                                        r->s.uvlinesize);
-    r->rdsp.rv40_weight_pixels_tab[r->scaled_weight][1](r->s.dest[2],
+                                                        r->s.s.uvlinesize);
+    r->rdsp.rv40_weight_pixels_tab[r->scaled_weight][1](r->s.s.dest[2],
                                                         r->tmp_b_block_uv[1],
                                                         r->tmp_b_block_uv[3],
                                                         r->weight1,
                                                         r->weight2,
-                                                        r->s.uvlinesize);
+                                                        r->s.s.uvlinesize);
 }
 
 static void rv34_mc_2mv(RV34DecContext *r, const int block_type)
@@ -829,11 +829,11 @@  static void rv34_mc_2mv_skip(RV34DecContext *r)
 
     for(j = 0; j < 2; j++)
         for(i = 0; i < 2; i++){
-             rv34_mc(r, RV34_MB_P_8x8, i*8, j*8, i+j*r->s.b8_stride, 1, 1, 0, r->rv30,
+             rv34_mc(r, RV34_MB_P_8x8, i*8, j*8, i+j*r->s.s.b8_stride, 1, 1, 0, r->rv30,
                      weighted,
                      r->rdsp.put_pixels_tab,
                      r->rdsp.put_chroma_pixels_tab);
-             rv34_mc(r, RV34_MB_P_8x8, i*8, j*8, i+j*r->s.b8_stride, 1, 1, 1, r->rv30,
+             rv34_mc(r, RV34_MB_P_8x8, i*8, j*8, i+j*r->s.s.b8_stride, 1, 1, 1, r->rv30,
                      weighted,
                      weighted ? r->rdsp.put_pixels_tab : r->rdsp.avg_pixels_tab,
                      weighted ? r->rdsp.put_chroma_pixels_tab : r->rdsp.avg_chroma_pixels_tab);
@@ -851,7 +851,7 @@  static const int num_mvs[RV34_MB_TYPES] = { 0, 0, 1, 4, 1, 1, 0, 0, 2, 2, 2, 1 }
  */
 static int rv34_decode_mv(RV34DecContext *r, int block_type)
 {
-    MPVDecContext *const s = &r->s;
+    MPVDecContext *const s = &r->s.s;
     GetBitContext *gb = &s->gb;
     int i, j, k, l;
     int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
@@ -1010,7 +1010,7 @@  static inline void rv34_process_block(RV34DecContext *r,
                                       uint8_t *pdst, int stride,
                                       int fc, int sc, int q_dc, int q_ac)
 {
-    MPVDecContext *const s = &r->s;
+    MPVDecContext *const s = &r->s.s;
     int16_t *ptr = s->block[0];
     int has_ac = rv34_decode_block(ptr, &s->gb, r->cur_vlcs,
                                    fc, sc, q_dc, q_ac, q_ac);
@@ -1025,7 +1025,7 @@  static inline void rv34_process_block(RV34DecContext *r,
 static void rv34_output_i16x16(RV34DecContext *r, int8_t *intra_types, int cbp)
 {
     LOCAL_ALIGNED_16(int16_t, block16, [16]);
-    MPVDecContext *const s = &r->s;
+    MPVDecContext *const s = &r->s.s;
     GetBitContext  *gb   = &s->gb;
     int             q_dc = rv34_qscale_tab[ r->luma_dc_quant_i[s->qscale] ],
                     q_ac = rv34_qscale_tab[s->qscale];
@@ -1087,7 +1087,7 @@  static void rv34_output_i16x16(RV34DecContext *r, int8_t *intra_types, int cbp)
 
 static void rv34_output_intra(RV34DecContext *r, int8_t *intra_types, int cbp)
 {
-    MPVDecContext *const s = &r->s;
+    MPVDecContext *const s = &r->s.s;
     uint8_t        *dst = s->dest[0];
     int      avail[6*8] = {0};
     int i, j, k;
@@ -1163,7 +1163,7 @@  static int is_mv_diff_gt_3(int16_t (*motion_val)[2], int step)
 
 static int rv34_set_deblock_coef(RV34DecContext *r)
 {
-    MPVDecContext *const s = &r->s;
+    MPVDecContext *const s = &r->s.s;
     int hmvmask = 0, vmvmask = 0, i, j;
     int midx = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
     int16_t (*motion_val)[2] = &s->current_picture_ptr->motion_val[0][midx];
@@ -1193,7 +1193,7 @@  static int rv34_set_deblock_coef(RV34DecContext *r)
 
 static int rv34_decode_inter_macroblock(RV34DecContext *r, int8_t *intra_types)
 {
-    MPVDecContext *const s = &r->s;
+    MPVDecContext *const s = &r->s.s;
     GetBitContext  *gb  = &s->gb;
     uint8_t        *dst = s->dest[0];
     int16_t        *ptr = s->block[0];
@@ -1301,7 +1301,7 @@  static int rv34_decode_inter_macroblock(RV34DecContext *r, int8_t *intra_types)
 
 static int rv34_decode_intra_macroblock(RV34DecContext *r, int8_t *intra_types)
 {
-    MPVDecContext *const s = &r->s;
+    MPVDecContext *const s = &r->s.s;
     int cbp, dist;
     int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
 
@@ -1346,7 +1346,7 @@  static int check_slice_end(RV34DecContext *r, MPVDecContext *s)
         return 1;
     if(!s->mb_num_left)
         return 1;
-    if(r->s.mb_skip_run > 1)
+    if(r->s.s.mb_skip_run > 1)
         return 0;
     bits = get_bits_left(&s->gb);
     if(bits <= 0 || (bits < 8 && !show_bits(&s->gb, bits)))
@@ -1369,22 +1369,22 @@  static void rv34_decoder_free(RV34DecContext *r)
 
 static int rv34_decoder_alloc(RV34DecContext *r)
 {
-    r->intra_types_stride = r->s.mb_width * 4 + 4;
+    r->intra_types_stride = r->s.s.mb_width * 4 + 4;
 
-    r->cbp_chroma       = av_mallocz(r->s.mb_stride * r->s.mb_height *
+    r->cbp_chroma       = av_mallocz(r->s.s.mb_stride * r->s.s.mb_height *
                                     sizeof(*r->cbp_chroma));
-    r->cbp_luma         = av_mallocz(r->s.mb_stride * r->s.mb_height *
+    r->cbp_luma         = av_mallocz(r->s.s.mb_stride * r->s.s.mb_height *
                                     sizeof(*r->cbp_luma));
-    r->deblock_coefs    = av_mallocz(r->s.mb_stride * r->s.mb_height *
+    r->deblock_coefs    = av_mallocz(r->s.s.mb_stride * r->s.s.mb_height *
                                     sizeof(*r->deblock_coefs));
     r->intra_types_hist = av_malloc(r->intra_types_stride * 4 * 2 *
                                     sizeof(*r->intra_types_hist));
-    r->mb_type          = av_mallocz(r->s.mb_stride * r->s.mb_height *
+    r->mb_type          = av_mallocz(r->s.s.mb_stride * r->s.s.mb_height *
                                      sizeof(*r->mb_type));
 
     if (!(r->cbp_chroma       && r->cbp_luma && r->deblock_coefs &&
           r->intra_types_hist && r->mb_type)) {
-        r->s.context_reinit = 1;
+        r->s.s.context_reinit = 1;
         rv34_decoder_free(r);
         return AVERROR(ENOMEM);
     }
@@ -1404,12 +1404,12 @@  static int rv34_decoder_realloc(RV34DecContext *r)
 
 static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t* buf, int buf_size)
 {
-    MPVDecContext *const s = &r->s;
+    MPVDecContext *const s = &r->s.s;
     GetBitContext *gb = &s->gb;
     int mb_pos, slice_type;
     int res;
 
-    init_get_bits(&r->s.gb, buf, buf_size*8);
+    init_get_bits(&r->s.s.gb, buf, buf_size*8);
     res = r->parse_slice_header(r, gb, &r->si);
     if(res < 0){
         av_log(s->avctx, AV_LOG_ERROR, "Incorrect or unknown slice header\n");
@@ -1429,7 +1429,7 @@  static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t* buf, int
     r->si.end = end;
     s->qscale = r->si.quant;
     s->mb_num_left = r->si.end - r->si.start;
-    r->s.mb_skip_run = 0;
+    r->s.s.mb_skip_run = 0;
 
     mb_pos = s->mb_x + s->mb_y * s->mb_width;
     if(r->si.start != mb_pos){
@@ -1488,10 +1488,11 @@  av_cold int ff_rv34_decode_init(AVCodecContext *avctx)
 {
     static AVOnce init_static_once = AV_ONCE_INIT;
     RV34DecContext *r = avctx->priv_data;
-    MPVMainDecContext *const s = &r->s;
+    MPVMainDecContext *const m = &r->s;
+    MPVDecContext *const s = &m->s;
     int ret;
 
-    ff_mpv_decode_init(s, avctx);
+    ff_mpv_decode_init(m, avctx);
     s->out_format = FMT_H263;
 
     avctx->pix_fmt = AV_PIX_FMT_YUV420P;
@@ -1499,7 +1500,7 @@  av_cold int ff_rv34_decode_init(AVCodecContext *avctx)
     s->low_delay = 0;
 
     ff_mpv_idct_init(s);
-    if ((ret = ff_mpv_common_init(s)) < 0)
+    if ((ret = ff_mpv_common_init(m)) < 0)
         return ret;
 
     ff_h264_pred_init(&r->h, AV_CODEC_ID_RV40, 8, 1);
@@ -1517,8 +1518,10 @@  av_cold int ff_rv34_decode_init(AVCodecContext *avctx)
 int ff_rv34_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
 {
     RV34DecContext *r = dst->priv_data, *r1 = src->priv_data;
-    MPVMainDecContext *const s = &r->s;
-    const MPVMainDecContext *const s1 = &r1->s;
+    MPVMainDecContext *const m = &r->s;
+    MPVDecContext *const s = &m->s;
+    const MPVMainDecContext *const m1 = &r1->s;
+    const MPVDecContext *const s1 = &m1->s;
     int err;
 
     if (dst == src || !s1->context_initialized)
@@ -1527,7 +1530,7 @@  int ff_rv34_decode_update_thread_context(AVCodecContext *dst, const AVCodecConte
     if (s->height != s1->height || s->width != s1->width || s->context_reinit) {
         s->height = s1->height;
         s->width  = s1->width;
-        if ((err = ff_mpv_common_frame_size_change(s)) < 0)
+        if ((err = ff_mpv_common_frame_size_change(m)) < 0)
             return err;
         if ((err = rv34_decoder_realloc(r)) < 0)
             return err;
@@ -1559,11 +1562,12 @@  static int get_slice_offset(AVCodecContext *avctx, const uint8_t *buf, int n, in
 static int finish_frame(AVCodecContext *avctx, AVFrame *pict)
 {
     RV34DecContext *r = avctx->priv_data;
-    MPVMainDecContext *const s = &r->s;
+    MPVMainDecContext *const m = &r->s;
+    MPVDecContext *const s = &m->s;
     int got_picture = 0, ret;
 
     ff_er_frame_end(&s->er);
-    ff_mpv_frame_end(s);
+    ff_mpv_frame_end(m);
     s->mb_num_left = 0;
 
     if (HAVE_THREADS && (s->avctx->active_thread_type & FF_THREAD_FRAME))
@@ -1573,13 +1577,13 @@  static int finish_frame(AVCodecContext *avctx, AVFrame *pict)
         if ((ret = av_frame_ref(pict, s->current_picture_ptr->f)) < 0)
             return ret;
         ff_print_debug_info(s, s->current_picture_ptr, pict);
-        ff_mpv_export_qp_table(s, pict, s->current_picture_ptr, FF_QSCALE_TYPE_MPEG1);
+        ff_mpv_export_qp_table(m, pict, s->current_picture_ptr, FF_QSCALE_TYPE_MPEG1);
         got_picture = 1;
     } else if (s->last_picture_ptr) {
         if ((ret = av_frame_ref(pict, s->last_picture_ptr->f)) < 0)
             return ret;
         ff_print_debug_info(s, s->last_picture_ptr, pict);
-        ff_mpv_export_qp_table(s, pict, s->last_picture_ptr, FF_QSCALE_TYPE_MPEG1);
+        ff_mpv_export_qp_table(m, pict, s->last_picture_ptr, FF_QSCALE_TYPE_MPEG1);
         got_picture = 1;
     }
 
@@ -1603,7 +1607,8 @@  int ff_rv34_decode_frame(AVCodecContext *avctx,
     const uint8_t *buf = avpkt->data;
     int buf_size = avpkt->size;
     RV34DecContext *r = avctx->priv_data;
-    MPVMainDecContext *const s = &r->s;
+    MPVMainDecContext *const m = &r->s;
+    MPVDecContext *const s = &m->s;
     AVFrame *pict = data;
     SliceInfo si;
     int i, ret;
@@ -1641,7 +1646,7 @@  int ff_rv34_decode_frame(AVCodecContext *avctx,
         return AVERROR_INVALIDDATA;
     }
     init_get_bits(&s->gb, buf+offset, (buf_size-offset)*8);
-    if(r->parse_slice_header(r, &r->s.gb, &si) < 0 || si.start){
+    if(r->parse_slice_header(r, &r->s.s.gb, &si) < 0 || si.start){
         av_log(avctx, AV_LOG_ERROR, "First slice header is incorrect\n");
         return AVERROR_INVALIDDATA;
     }
@@ -1663,7 +1668,7 @@  int ff_rv34_decode_frame(AVCodecContext *avctx,
                    s->mb_num_left);
             if (!s->context_reinit)
                 ff_er_frame_end(&s->er);
-            ff_mpv_frame_end(s);
+            ff_mpv_frame_end(m);
         }
 
         if (s->width != si.width || s->height != si.height || s->context_reinit) {
@@ -1684,7 +1689,7 @@  int ff_rv34_decode_frame(AVCodecContext *avctx,
             err = ff_set_dimensions(s->avctx, s->width, s->height);
             if (err < 0)
                 return err;
-            if ((err = ff_mpv_common_frame_size_change(s)) < 0)
+            if ((err = ff_mpv_common_frame_size_change(m)) < 0)
                 return err;
             if ((err = rv34_decoder_realloc(r)) < 0)
                 return err;
@@ -1692,9 +1697,9 @@  int ff_rv34_decode_frame(AVCodecContext *avctx,
         if (faulty_b)
             return AVERROR_INVALIDDATA;
         s->pict_type = si.type ? si.type : AV_PICTURE_TYPE_I;
-        if (ff_mpv_frame_start(s, s->avctx) < 0)
+        if (ff_mpv_frame_start(m, s->avctx) < 0)
             return -1;
-        ff_mpeg_er_frame_start(s);
+        ff_mpeg_er_frame_start(m);
         if (!r->tmp_b_block_base) {
             int i;
 
@@ -1761,7 +1766,7 @@  int ff_rv34_decode_frame(AVCodecContext *avctx,
         size = offset1 - offset;
 
         r->si.end = s->mb_width * s->mb_height;
-        s->mb_num_left = r->s.mb_x + r->s.mb_y*r->s.mb_width - r->si.start;
+        s->mb_num_left = r->s.s.mb_x + r->s.s.mb_y*r->s.s.mb_width - r->si.start;
 
         if(i+1 < slice_count){
             int offset2 = get_slice_offset(avctx, slices_hdr, i+2, slice_count, buf_size);
@@ -1770,7 +1775,7 @@  int ff_rv34_decode_frame(AVCodecContext *avctx,
                 break;
             }
             init_get_bits(&s->gb, buf+offset1, (buf_size-offset1)*8);
-            if(r->parse_slice_header(r, &r->s.gb, &si) < 0){
+            if(r->parse_slice_header(r, &r->s.s.gb, &si) < 0){
                 size = offset2 - offset;
             }else
                 r->si.end = si.start;
@@ -1796,7 +1801,7 @@  int ff_rv34_decode_frame(AVCodecContext *avctx,
             /* always mark the current frame as finished, frame-mt supports
              * only complete frames */
             ff_er_frame_end(&s->er);
-            ff_mpv_frame_end(s);
+            ff_mpv_frame_end(m);
             s->mb_num_left = 0;
             ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
             return AVERROR_INVALIDDATA;
diff --git a/libavcodec/rv40.c b/libavcodec/rv40.c
index cc069b3f2b..89b6a8c46a 100644
--- a/libavcodec/rv40.c
+++ b/libavcodec/rv40.c
@@ -136,7 +136,7 @@  static void rv40_parse_picture_size(GetBitContext *gb, int *w, int *h)
 static int rv40_parse_slice_header(RV34DecContext *r, GetBitContext *gb, SliceInfo *si)
 {
     int mb_bits;
-    int w = r->s.width, h = r->s.height;
+    int w = r->s.s.width, h = r->s.s.height;
     int mb_size;
     int ret;
 
@@ -153,7 +153,7 @@  static int rv40_parse_slice_header(RV34DecContext *r, GetBitContext *gb, SliceIn
     si->pts = get_bits(gb, 13);
     if(!si->type || !get_bits1(gb))
         rv40_parse_picture_size(gb, &w, &h);
-    if ((ret = av_image_check_size(w, h, 0, r->s.avctx)) < 0)
+    if ((ret = av_image_check_size(w, h, 0, r->s.s.avctx)) < 0)
         return ret;
     si->width  = w;
     si->height = h;
@@ -169,7 +169,7 @@  static int rv40_parse_slice_header(RV34DecContext *r, GetBitContext *gb, SliceIn
  */
 static int rv40_decode_intra_types(RV34DecContext *r, GetBitContext *gb, int8_t *dst)
 {
-    MPVDecContext *const s = &r->s;
+    MPVDecContext *const s = &r->s.s;
     int i, j, k, v;
     int A, B, C;
     int pattern;
@@ -231,19 +231,19 @@  static int rv40_decode_intra_types(RV34DecContext *r, GetBitContext *gb, int8_t
  */
 static int rv40_decode_mb_info(RV34DecContext *r)
 {
-    MPVDecContext *const s = &r->s;
+    MPVDecContext *const s = &r->s.s;
     GetBitContext *gb = &s->gb;
     int q, i;
     int prev_type = 0;
     int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
 
-    if(!r->s.mb_skip_run) {
-        r->s.mb_skip_run = get_interleaved_ue_golomb(gb) + 1;
-        if(r->s.mb_skip_run > (unsigned)s->mb_num)
+    if (!s->mb_skip_run) {
+        s->mb_skip_run = get_interleaved_ue_golomb(gb) + 1;
+        if (s->mb_skip_run > (unsigned)s->mb_num)
             return -1;
     }
 
-    if(--r->s.mb_skip_run)
+    if (--s->mb_skip_run)
          return RV34_MB_SKIP;
 
     if(r->avail_cache[6-4]){
@@ -341,7 +341,7 @@  static void rv40_adaptive_loop_filter(RV34DSPContext *rdsp,
  */
 static void rv40_loop_filter(RV34DecContext *r, int row)
 {
-    MPVDecContext *const s = &r->s;
+    MPVDecContext *const s = &r->s.s;
     int mb_pos, mb_x;
     int i, j, k;
     uint8_t *Y, *C;
diff --git a/libavcodec/snow.c b/libavcodec/snow.c
index de9741a888..073943a594 100644
--- a/libavcodec/snow.c
+++ b/libavcodec/snow.c
@@ -689,7 +689,7 @@  int ff_snow_frame_start(SnowContext *s){
 av_cold void ff_snow_common_end(SnowContext *s)
 {
     int plane_index, level, orientation, i;
-    MPVEncContext *const m = &s->m.common;
+    MPVEncContext *const m = &s->m.common.s;
 
     av_freep(&s->spatial_dwt_buffer);
     av_freep(&s->temp_dwt_buffer);
diff --git a/libavcodec/snowenc.c b/libavcodec/snowenc.c
index ce5baa754f..7200d94f65 100644
--- a/libavcodec/snowenc.c
+++ b/libavcodec/snowenc.c
@@ -39,7 +39,7 @@ 
 static av_cold int encode_init(AVCodecContext *avctx)
 {
     SnowContext *s = avctx->priv_data;
-    MPVEncContext *const mpv = &s->m.common;
+    MPVEncContext *const mpv = &s->m.common.s;
     int plane_index, ret;
     int i;
 
@@ -217,7 +217,7 @@  static inline int get_penalty_factor(int lambda, int lambda2, int type){
 #define FLAG_QPEL   1 //must be 1
 
 static int encode_q_branch(SnowContext *s, int level, int x, int y){
-    MPVEncContext *const mpv = &s->m.common;
+    MPVEncContext *const mpv = &s->m.common.s;
     uint8_t p_buffer[1024];
     uint8_t i_buffer[1024];
     uint8_t p_state[sizeof(s->block_state)];
@@ -511,7 +511,7 @@  static int get_dc(SnowContext *s, int mb_x, int mb_y, int plane_index){
     const int obmc_stride= plane_index ? (2*block_size)>>s->chroma_h_shift : 2*block_size;
     const int ref_stride= s->current_picture->linesize[plane_index];
     uint8_t *src= s-> input_picture->data[plane_index];
-    IDWTELEM *dst= (IDWTELEM*)s->m.common.sc.obmc_scratchpad + plane_index*block_size*block_size*4; //FIXME change to unsigned
+    IDWTELEM *dst= (IDWTELEM*)s->m.common.s.sc.obmc_scratchpad + plane_index*block_size*block_size*4; //FIXME change to unsigned
     const int b_stride = s->b_width << s->block_max_depth;
     const int w= p->width;
     const int h= p->height;
@@ -598,7 +598,7 @@  static inline int get_block_bits(SnowContext *s, int x, int y, int w){
 }
 
 static int get_block_rd(SnowContext *s, int mb_x, int mb_y, int plane_index, uint8_t (*obmc_edged)[MB_SIZE * 2]){
-    MPVEncContext *const mpv = &s->m.common;
+    MPVEncContext *const mpv = &s->m.common.s;
     Plane *p= &s->plane[plane_index];
     const int block_size = MB_SIZE >> s->block_max_depth;
     const int block_w    = plane_index ? block_size>>s->chroma_h_shift : block_size;
@@ -700,7 +700,7 @@  static int get_block_rd(SnowContext *s, int mb_x, int mb_y, int plane_index, uin
 }
 
 static int get_4block_rd(SnowContext *s, int mb_x, int mb_y, int plane_index){
-    MPVEncContext *const mpv = &s->m.common;
+    MPVEncContext *const mpv = &s->m.common.s;
     int i, y2;
     Plane *p= &s->plane[plane_index];
     const int block_size = MB_SIZE >> s->block_max_depth;
@@ -1481,7 +1481,7 @@  static int qscale2qlog(int qscale){
 
 static int ratecontrol_1pass(SnowContext *s, AVFrame *pict)
 {
-    MPVEncContext *const mpv = &s->m.common;
+    MPVEncContext *const mpv = &s->m.common.s;
     /* Estimate the frame's complexity as a sum of weighted dwt coefficients.
      * FIXME we know exact mv bits at this point,
      * but ratecontrol isn't set up to include them. */
@@ -1562,7 +1562,7 @@  static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
                         const AVFrame *pict, int *got_packet)
 {
     SnowContext *s = avctx->priv_data;
-    MPVEncContext *const mpv = &s->m.common;
+    MPVEncContext *const mpv = &s->m.common.s;
     RangeCoder * const c= &s->c;
     AVFrame *pic;
     const int width= s->avctx->width;
diff --git a/libavcodec/speedhqenc.c b/libavcodec/speedhqenc.c
index 6f89927d68..cfd5beea04 100644
--- a/libavcodec/speedhqenc.c
+++ b/libavcodec/speedhqenc.c
@@ -91,7 +91,7 @@  static av_cold void speedhq_init_static_data(void)
 
 av_cold int ff_speedhq_encode_init(MPVMainEncContext *m)
 {
-    MPVEncContext *const s = &m->common;
+    MPVEncContext *const s = &m->common.s;
     static AVOnce init_static_once = AV_ONCE_INIT;
 
     if (s->width > 65500 || s->height > 65500) {
@@ -129,7 +129,7 @@  av_cold int ff_speedhq_encode_init(MPVMainEncContext *m)
 void ff_speedhq_encode_picture_header(MPVMainEncContext *m)
 {
     SpeedHQEncContext *ctx = (SpeedHQEncContext*)m;
-    MPVEncContext *const s = &m->common;
+    MPVEncContext *const s = &m->common.s;
 
     put_bits_le(&s->pb, 8, 100 - s->qscale * 2);  /* FIXME why doubled */
     put_bits_le(&s->pb, 24, 4);  /* no second field */
diff --git a/libavcodec/svq1enc.c b/libavcodec/svq1enc.c
index 5e26c1073a..3a84bb6a07 100644
--- a/libavcodec/svq1enc.c
+++ b/libavcodec/svq1enc.c
@@ -253,7 +253,7 @@  static int svq1_encode_plane(SVQ1EncContext *s, int plane,
                              unsigned char *decoded_plane,
                              int width, int height, int src_stride, int stride)
 {
-    MPVEncContext *const mpv = &s->m.common;
+    MPVEncContext *const mpv = &s->m.common.s;
     int x, y;
     int i;
     int block_width, block_height;
@@ -488,7 +488,7 @@  static int svq1_encode_plane(SVQ1EncContext *s, int plane,
 static av_cold int svq1_encode_end(AVCodecContext *avctx)
 {
     SVQ1EncContext *const s = avctx->priv_data;
-    MPVEncContext *const mpv = &s->m.common;
+    MPVEncContext *const mpv = &s->m.common.s;
     int i;
 
     if (avctx->frame_number)
@@ -520,7 +520,7 @@  static av_cold int svq1_encode_end(AVCodecContext *avctx)
 static av_cold int svq1_encode_init(AVCodecContext *avctx)
 {
     SVQ1EncContext *const s = avctx->priv_data;
-    MPVEncContext *const mpv = &s->m.common;
+    MPVEncContext *const mpv = &s->m.common.s;
     int ret;
 
     if (avctx->width >= 4096 || avctx->height >= 4096) {
diff --git a/libavcodec/vaapi_mpeg4.c b/libavcodec/vaapi_mpeg4.c
index 1e746ecc6a..0a0c2d0f8e 100644
--- a/libavcodec/vaapi_mpeg4.c
+++ b/libavcodec/vaapi_mpeg4.c
@@ -47,7 +47,7 @@  static int mpeg4_get_intra_dc_vlc_thr(Mpeg4DecContext *s)
 static int vaapi_mpeg4_start_frame(AVCodecContext *avctx, av_unused const uint8_t *buffer, av_unused uint32_t size)
 {
     Mpeg4DecContext *ctx = avctx->priv_data;
-    MPVDecContext *const s = &ctx->m;
+    MPVDecContext *const s = &ctx->m.s;
     VAAPIDecodePicture *pic = s->current_picture_ptr->hwaccel_picture_private;
     VAPictureParameterBufferMPEG4 pic_param;
     int i, err;
diff --git a/libavcodec/vaapi_vc1.c b/libavcodec/vaapi_vc1.c
index 6830c12236..fd3b963e33 100644
--- a/libavcodec/vaapi_vc1.c
+++ b/libavcodec/vaapi_vc1.c
@@ -46,7 +46,7 @@  static inline int vc1_has_MVTYPEMB_bitplane(const VC1Context *v)
     if (v->mv_type_is_raw)
         return 0;
     return v->fcm == PROGRESSIVE &&
-           (v->s.pict_type == AV_PICTURE_TYPE_P && !v->p_frame_skipped) &&
+           (v->s.s.pict_type == AV_PICTURE_TYPE_P && !v->p_frame_skipped) &&
            (v->mv_mode == MV_PMODE_MIXED_MV ||
             (v->mv_mode == MV_PMODE_INTENSITY_COMP &&
              v->mv_mode2 == MV_PMODE_MIXED_MV));
@@ -58,8 +58,8 @@  static inline int vc1_has_SKIPMB_bitplane(const VC1Context *v)
     if (v->skip_is_raw)
         return 0;
     return (v->fcm == PROGRESSIVE || v->fcm == ILACE_FRAME) &&
-           ((v->s.pict_type == AV_PICTURE_TYPE_P && !v->p_frame_skipped) ||
-            (v->s.pict_type == AV_PICTURE_TYPE_B && !v->bi_type));
+           ((v->s.s.pict_type == AV_PICTURE_TYPE_P && !v->p_frame_skipped) ||
+            (v->s.s.pict_type == AV_PICTURE_TYPE_B && !v->bi_type));
 }
 
 /** Check whether the DIRECTMB bitplane is present */
@@ -68,7 +68,7 @@  static inline int vc1_has_DIRECTMB_bitplane(const VC1Context *v)
     if (v->dmb_is_raw)
         return 0;
     return (v->fcm == PROGRESSIVE || v->fcm == ILACE_FRAME) &&
-           (v->s.pict_type == AV_PICTURE_TYPE_B && !v->bi_type);
+           (v->s.s.pict_type == AV_PICTURE_TYPE_B && !v->bi_type);
 }
 
 /** Check whether the ACPRED bitplane is present */
@@ -77,8 +77,8 @@  static inline int vc1_has_ACPRED_bitplane(const VC1Context *v)
     if (v->acpred_is_raw)
         return 0;
     return v->profile == PROFILE_ADVANCED &&
-           (v->s.pict_type == AV_PICTURE_TYPE_I ||
-            (v->s.pict_type == AV_PICTURE_TYPE_B && v->bi_type));
+           (v->s.s.pict_type == AV_PICTURE_TYPE_I ||
+            (v->s.s.pict_type == AV_PICTURE_TYPE_B && v->bi_type));
 }
 
 /** Check whether the OVERFLAGS bitplane is present */
@@ -87,8 +87,8 @@  static inline int vc1_has_OVERFLAGS_bitplane(const VC1Context *v)
     if (v->overflg_is_raw)
         return 0;
     return v->profile == PROFILE_ADVANCED &&
-           (v->s.pict_type == AV_PICTURE_TYPE_I ||
-            (v->s.pict_type == AV_PICTURE_TYPE_B && v->bi_type)) &&
+           (v->s.s.pict_type == AV_PICTURE_TYPE_I ||
+            (v->s.s.pict_type == AV_PICTURE_TYPE_B && v->bi_type)) &&
            (v->overlap && v->pq <= 8) &&
            v->condover == CONDOVER_SELECT;
 }
@@ -99,8 +99,8 @@  static inline int vc1_has_FIELDTX_bitplane(const VC1Context *v)
     if (v->fieldtx_is_raw)
         return 0;
     return v->fcm == ILACE_FRAME &&
-           (v->s.pict_type == AV_PICTURE_TYPE_I ||
-            (v->s.pict_type == AV_PICTURE_TYPE_B && v->bi_type));
+           (v->s.s.pict_type == AV_PICTURE_TYPE_I ||
+            (v->s.s.pict_type == AV_PICTURE_TYPE_B && v->bi_type));
 }
 
 /** Check whether the FORWARDMB bitplane is present */
@@ -109,13 +109,13 @@  static inline int vc1_has_FORWARDMB_bitplane(const VC1Context *v)
     if (v->fmb_is_raw)
         return 0;
     return v->fcm == ILACE_FIELD &&
-           (v->s.pict_type == AV_PICTURE_TYPE_B && !v->bi_type);
+           (v->s.s.pict_type == AV_PICTURE_TYPE_B && !v->bi_type);
 }
 
 /** Reconstruct bitstream PTYPE (7.1.1.4, index into Table-35) */
 static int vc1_get_PTYPE(const VC1Context *v)
 {
-    const MPVDecContext *const s = &v->s;
+    const MPVDecContext *const s = &v->s.s;
     switch (s->pict_type) {
     case AV_PICTURE_TYPE_I: return 0;
     case AV_PICTURE_TYPE_P: return v->p_frame_skipped ? 4 : 1;
@@ -127,7 +127,7 @@  static int vc1_get_PTYPE(const VC1Context *v)
 /** Reconstruct bitstream FPTYPE (9.1.1.42, index into Table-105) */
 static int vc1_get_FPTYPE(const VC1Context *v)
 {
-    const MPVDecContext *const s = &v->s;
+    const MPVDecContext *const s = &v->s.s;
     switch (s->pict_type) {
     case AV_PICTURE_TYPE_I: return 0;
     case AV_PICTURE_TYPE_P: return 3;
@@ -140,8 +140,8 @@  static int vc1_get_FPTYPE(const VC1Context *v)
 static inline VAMvModeVC1 vc1_get_MVMODE(const VC1Context *v)
 {
     if ((v->fcm == PROGRESSIVE || v->fcm == ILACE_FIELD) &&
-        ((v->s.pict_type == AV_PICTURE_TYPE_P && !v->p_frame_skipped) ||
-         (v->s.pict_type == AV_PICTURE_TYPE_B && !v->bi_type)))
+        ((v->s.s.pict_type == AV_PICTURE_TYPE_P && !v->p_frame_skipped) ||
+         (v->s.s.pict_type == AV_PICTURE_TYPE_B && !v->bi_type)))
         return get_VAMvModeVC1(v->mv_mode);
     return 0;
 }
@@ -150,7 +150,7 @@  static inline VAMvModeVC1 vc1_get_MVMODE(const VC1Context *v)
 static inline VAMvModeVC1 vc1_get_MVMODE2(const VC1Context *v)
 {
     if ((v->fcm == PROGRESSIVE || v->fcm == ILACE_FIELD) &&
-        (v->s.pict_type == AV_PICTURE_TYPE_P && !v->p_frame_skipped) &&
+        (v->s.s.pict_type == AV_PICTURE_TYPE_P && !v->p_frame_skipped) &&
         v->mv_mode == MV_PMODE_INTENSITY_COMP)
         return get_VAMvModeVC1(v->mv_mode2);
     return 0;
@@ -158,7 +158,7 @@  static inline VAMvModeVC1 vc1_get_MVMODE2(const VC1Context *v)
 
 av_unused static inline int vc1_get_INTCOMPFIELD(const VC1Context *v)
 {
-    if ((v->s.pict_type == AV_PICTURE_TYPE_P && !v->p_frame_skipped) &&
+    if ((v->s.s.pict_type == AV_PICTURE_TYPE_P && !v->p_frame_skipped) &&
         v->fcm == ILACE_FIELD &&
         v->mv_mode == MV_PMODE_INTENSITY_COMP)
         switch (v->intcompfield) {
@@ -171,7 +171,7 @@  av_unused static inline int vc1_get_INTCOMPFIELD(const VC1Context *v)
 
 static inline int vc1_get_LUMSCALE(const VC1Context *v)
 {
-    if (v->s.pict_type == AV_PICTURE_TYPE_P && !v->p_frame_skipped) {
+    if (v->s.s.pict_type == AV_PICTURE_TYPE_P && !v->p_frame_skipped) {
         if ((v->fcm == PROGRESSIVE && v->mv_mode == MV_PMODE_INTENSITY_COMP) ||
             (v->fcm == ILACE_FRAME && v->intcomp))
             return v->lumscale;
@@ -187,7 +187,7 @@  static inline int vc1_get_LUMSCALE(const VC1Context *v)
 
 static inline int vc1_get_LUMSHIFT(const VC1Context *v)
 {
-    if (v->s.pict_type == AV_PICTURE_TYPE_P && !v->p_frame_skipped) {
+    if (v->s.s.pict_type == AV_PICTURE_TYPE_P && !v->p_frame_skipped) {
         if ((v->fcm == PROGRESSIVE && v->mv_mode == MV_PMODE_INTENSITY_COMP) ||
             (v->fcm == ILACE_FRAME && v->intcomp))
             return v->lumshift;
@@ -203,7 +203,7 @@  static inline int vc1_get_LUMSHIFT(const VC1Context *v)
 
 av_unused static inline int vc1_get_LUMSCALE2(const VC1Context *v)
 {
-    if ((v->s.pict_type == AV_PICTURE_TYPE_P && !v->p_frame_skipped) &&
+    if ((v->s.s.pict_type == AV_PICTURE_TYPE_P && !v->p_frame_skipped) &&
         v->fcm == ILACE_FIELD &&
         v->mv_mode == MV_PMODE_INTENSITY_COMP &&
         v->intcompfield == 3)
@@ -213,7 +213,7 @@  av_unused static inline int vc1_get_LUMSCALE2(const VC1Context *v)
 
 av_unused static inline int vc1_get_LUMSHIFT2(const VC1Context *v)
 {
-    if ((v->s.pict_type == AV_PICTURE_TYPE_P && !v->p_frame_skipped) &&
+    if ((v->s.s.pict_type == AV_PICTURE_TYPE_P && !v->p_frame_skipped) &&
         v->fcm == ILACE_FIELD &&
         v->mv_mode == MV_PMODE_INTENSITY_COMP &&
         v->intcompfield == 3)
@@ -251,7 +251,7 @@  static inline void vc1_pack_bitplanes(uint8_t *bitplane, int n, const uint8_t *f
 static int vaapi_vc1_start_frame(AVCodecContext *avctx, av_unused const uint8_t *buffer, av_unused uint32_t size)
 {
     const VC1Context *v = avctx->priv_data;
-    const MPVDecContext *const s = &v->s;
+    const MPVDecContext *const s = &v->s.s;
     VAAPIDecodePicture *pic = s->current_picture_ptr->hwaccel_picture_private;
     VAPictureParameterBufferVC1 pic_param;
     int err;
@@ -367,7 +367,7 @@  static int vaapi_vc1_start_frame(AVCodecContext *avctx, av_unused const uint8_t
             .frame_level_transform_type    = vc1_get_TTFRM(v),
             .transform_ac_codingset_idx1   = v->c_ac_table_index,
             .transform_ac_codingset_idx2   = v->y_ac_table_index,
-            .intra_transform_dc_table      = v->s.dc_table_index,
+            .intra_transform_dc_table      = v->s.s.dc_table_index,
         },
     };
 
@@ -448,7 +448,7 @@  fail:
 static int vaapi_vc1_end_frame(AVCodecContext *avctx)
 {
     VC1Context *v = avctx->priv_data;
-    MPVDecContext *const s = &v->s;
+    MPVDecContext *const s = &v->s.s;
     VAAPIDecodePicture *pic = s->current_picture_ptr->hwaccel_picture_private;
     int ret;
 
@@ -465,7 +465,7 @@  fail:
 static int vaapi_vc1_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
 {
     const VC1Context *v = avctx->priv_data;
-    const MPVDecContext *const s = &v->s;
+    const MPVDecContext *const s = &v->s.s;
     VAAPIDecodePicture *pic = s->current_picture_ptr->hwaccel_picture_private;
     VASliceParameterBufferVC1 slice_param;
     int mb_height;
diff --git a/libavcodec/vc1.c b/libavcodec/vc1.c
index 40e2b4d692..d9061b1f74 100644
--- a/libavcodec/vc1.c
+++ b/libavcodec/vc1.c
@@ -97,15 +97,16 @@  static void decode_colskip(uint8_t* plane, int width, int height, int stride,
  */
 static int bitplane_decoding(uint8_t* data, int *raw_flag, VC1Context *v)
 {
-    GetBitContext *gb = &v->s.gb;
+    MPVDecContext *const s = &v->s.s;
+    GetBitContext *gb = &s->gb;
 
     int imode, x, y, code, offset;
     uint8_t invert, *planep = data;
     int width, height, stride;
 
-    width  = v->s.mb_width;
-    height = v->s.mb_height >> v->field_mode;
-    stride = v->s.mb_stride;
+    width  = s->mb_width;
+    height = s->mb_height >> v->field_mode;
+    stride = s->mb_stride;
     invert = get_bits1(gb);
     imode = get_vlc2(gb, ff_vc1_imode_vlc.table, VC1_IMODE_VLC_BITS, 1);
 
@@ -151,7 +152,7 @@  static int bitplane_decoding(uint8_t* data, int *raw_flag, VC1Context *v)
                 for (x = width & 1; x < width; x += 2) {
                     code = get_vlc2(gb, ff_vc1_norm6_vlc.table, VC1_NORM6_VLC_BITS, 2);
                     if (code < 0) {
-                        av_log(v->s.avctx, AV_LOG_DEBUG, "invalid NORM-6 VLC\n");
+                        av_log(s->avctx, AV_LOG_DEBUG, "invalid NORM-6 VLC\n");
                         return -1;
                     }
                     planep[x + 0]              = (code >> 0) & 1;
@@ -164,14 +165,14 @@  static int bitplane_decoding(uint8_t* data, int *raw_flag, VC1Context *v)
                 planep += stride * 3;
             }
             if (width & 1)
-                decode_colskip(data, 1, height, stride, &v->s.gb);
+                decode_colskip(data, 1, height, stride, gb);
         } else { // 3x2
             planep += (height & 1) * stride;
             for (y = height & 1; y < height; y += 2) {
                 for (x = width % 3; x < width; x += 3) {
                     code = get_vlc2(gb, ff_vc1_norm6_vlc.table, VC1_NORM6_VLC_BITS, 2);
                     if (code < 0) {
-                        av_log(v->s.avctx, AV_LOG_DEBUG, "invalid NORM-6 VLC\n");
+                        av_log(s->avctx, AV_LOG_DEBUG, "invalid NORM-6 VLC\n");
                         return -1;
                     }
                     planep[x + 0]          = (code >> 0) & 1;
@@ -185,16 +186,16 @@  static int bitplane_decoding(uint8_t* data, int *raw_flag, VC1Context *v)
             }
             x = width % 3;
             if (x)
-                decode_colskip(data,             x, height, stride, &v->s.gb);
+                decode_colskip(data,             x, height, stride, gb);
             if (height & 1)
-                decode_rowskip(data + x, width - x,      1, stride, &v->s.gb);
+                decode_rowskip(data + x, width - x,      1, stride, gb);
         }
         break;
     case IMODE_ROWSKIP:
-        decode_rowskip(data, width, height, stride, &v->s.gb);
+        decode_rowskip(data, width, height, stride, gb);
         break;
     case IMODE_COLSKIP:
-        decode_colskip(data, width, height, stride, &v->s.gb);
+        decode_colskip(data, width, height, stride, gb);
         break;
     default:
         break;
@@ -230,7 +231,7 @@  static int bitplane_decoding(uint8_t* data, int *raw_flag, VC1Context *v)
  */
 static int vop_dquant_decoding(VC1Context *v)
 {
-    GetBitContext *gb = &v->s.gb;
+    GetBitContext *gb = &v->s.s.gb;
     int pqdiff;
 
     //variable size
@@ -276,6 +277,8 @@  static int decode_sequence_header_adv(VC1Context *v, GetBitContext *gb);
  */
 int ff_vc1_decode_sequence_header(AVCodecContext *avctx, VC1Context *v, GetBitContext *gb)
 {
+    MPVDecContext *const s = &v->s.s;
+
     av_log(avctx, AV_LOG_DEBUG, "Header: %0X\n", show_bits_long(gb, 32));
     v->profile = get_bits(gb, 2);
     if (v->profile == PROFILE_COMPLEX) {
@@ -303,13 +306,13 @@  int ff_vc1_decode_sequence_header(AVCodecContext *avctx, VC1Context *v, GetBitCo
     v->frmrtq_postproc = get_bits(gb, 3); //common
     // (bitrate-32kbps)/64kbps
     v->bitrtq_postproc = get_bits(gb, 5); //common
-    v->s.loop_filter   = get_bits1(gb); //common
-    if (v->s.loop_filter == 1 && v->profile == PROFILE_SIMPLE) {
+    s->loop_filter   = get_bits1(gb); //common
+    if (s->loop_filter == 1 && v->profile == PROFILE_SIMPLE) {
         av_log(avctx, AV_LOG_ERROR,
                "LOOPFILTER shall not be enabled in Simple Profile\n");
     }
-    if (v->s.avctx->skip_loop_filter >= AVDISCARD_ALL)
-        v->s.loop_filter = 0;
+    if (s->avctx->skip_loop_filter >= AVDISCARD_ALL)
+        s->loop_filter = 0;
 
     v->res_x8          = get_bits1(gb); //reserved
     v->multires        = get_bits1(gb);
@@ -356,7 +359,7 @@  int ff_vc1_decode_sequence_header(AVCodecContext *avctx, VC1Context *v, GetBitCo
                "RANGERED should be set to 0 in Simple Profile\n");
     }
 
-    v->s.max_b_frames = avctx->max_b_frames = get_bits(gb, 3); //common
+    s->max_b_frames = avctx->max_b_frames = get_bits(gb, 3); //common
     v->quantizer_mode = get_bits(gb, 2); //common
 
     v->finterpflag = get_bits1(gb); //common
@@ -364,7 +367,7 @@  int ff_vc1_decode_sequence_header(AVCodecContext *avctx, VC1Context *v, GetBitCo
     if (v->res_sprite) {
         int w = get_bits(gb, 11);
         int h = get_bits(gb, 11);
-        int ret = ff_set_dimensions(v->s.avctx, w, h);
+        int ret = ff_set_dimensions(s->avctx, w, h);
         if (ret < 0) {
             av_log(avctx, AV_LOG_ERROR, "Failed to set dimensions %d %d\n", w, h);
             return ret;
@@ -389,7 +392,7 @@  int ff_vc1_decode_sequence_header(AVCodecContext *avctx, VC1Context *v, GetBitCo
            "Rangered=%i, VSTransform=%i, Overlap=%i, SyncMarker=%i\n"
            "DQuant=%i, Quantizer mode=%i, Max B-frames=%i\n",
            v->profile, v->frmrtq_postproc, v->bitrtq_postproc,
-           v->s.loop_filter, v->multires, v->fastuvmc, v->extended_mv,
+           s->loop_filter, v->multires, v->fastuvmc, v->extended_mv,
            v->rangered, v->vstransform, v->overlap, v->resync_marker,
            v->dquant, v->quantizer_mode, avctx->max_b_frames);
     return 0;
@@ -397,14 +400,16 @@  int ff_vc1_decode_sequence_header(AVCodecContext *avctx, VC1Context *v, GetBitCo
 
 static int decode_sequence_header_adv(VC1Context *v, GetBitContext *gb)
 {
+    MPVDecContext *const s = &v->s.s;
+
     v->res_rtm_flag = 1;
     v->level = get_bits(gb, 3);
     if (v->level >= 5) {
-        av_log(v->s.avctx, AV_LOG_ERROR, "Reserved LEVEL %i\n",v->level);
+        av_log(s->avctx, AV_LOG_ERROR, "Reserved LEVEL %i\n",v->level);
     }
     v->chromaformat = get_bits(gb, 2);
     if (v->chromaformat != 1) {
-        av_log(v->s.avctx, AV_LOG_ERROR,
+        av_log(s->avctx, AV_LOG_ERROR,
                "Only 4:2:0 chroma format supported\n");
         return -1;
     }
@@ -423,65 +428,65 @@  static int decode_sequence_header_adv(VC1Context *v, GetBitContext *gb)
     v->finterpflag           = get_bits1(gb);
     skip_bits1(gb); // reserved
 
-    av_log(v->s.avctx, AV_LOG_DEBUG,
+    av_log(s->avctx, AV_LOG_DEBUG,
            "Advanced Profile level %i:\nfrmrtq_postproc=%i, bitrtq_postproc=%i\n"
            "LoopFilter=%i, ChromaFormat=%i, Pulldown=%i, Interlace: %i\n"
            "TFCTRflag=%i, FINTERPflag=%i\n",
            v->level, v->frmrtq_postproc, v->bitrtq_postproc,
-           v->s.loop_filter, v->chromaformat, v->broadcast, v->interlace,
+           s->loop_filter, v->chromaformat, v->broadcast, v->interlace,
            v->tfcntrflag, v->finterpflag);
 
     v->psf = get_bits1(gb);
     if (v->psf) { //PsF, 6.1.13
-        av_log(v->s.avctx, AV_LOG_ERROR, "Progressive Segmented Frame mode: not supported (yet)\n");
+        av_log(s->avctx, AV_LOG_ERROR, "Progressive Segmented Frame mode: not supported (yet)\n");
         return -1;
     }
-    v->s.max_b_frames = v->s.avctx->max_b_frames = 7;
+    s->max_b_frames = s->avctx->max_b_frames = 7;
     if (get_bits1(gb)) { //Display Info - decoding is not affected by it
         int w, h, ar = 0;
-        av_log(v->s.avctx, AV_LOG_DEBUG, "Display extended info:\n");
+        av_log(s->avctx, AV_LOG_DEBUG, "Display extended info:\n");
         w = get_bits(gb, 14) + 1;
         h = get_bits(gb, 14) + 1;
-        av_log(v->s.avctx, AV_LOG_DEBUG, "Display dimensions: %ix%i\n", w, h);
+        av_log(s->avctx, AV_LOG_DEBUG, "Display dimensions: %ix%i\n", w, h);
         if (get_bits1(gb))
             ar = get_bits(gb, 4);
         if (ar && ar < 14) {
-            v->s.avctx->sample_aspect_ratio = ff_vc1_pixel_aspect[ar];
+            s->avctx->sample_aspect_ratio = ff_vc1_pixel_aspect[ar];
         } else if (ar == 15) {
             w = get_bits(gb, 8) + 1;
             h = get_bits(gb, 8) + 1;
-            v->s.avctx->sample_aspect_ratio = (AVRational){w, h};
+            s->avctx->sample_aspect_ratio = (AVRational){w, h};
         } else {
-            if (v->s.avctx->width  > v->max_coded_width ||
-                v->s.avctx->height > v->max_coded_height) {
-                avpriv_request_sample(v->s.avctx, "Huge resolution");
+            if (s->avctx->width  > v->max_coded_width ||
+                s->avctx->height > v->max_coded_height) {
+                avpriv_request_sample(s->avctx, "Huge resolution");
             } else
-                av_reduce(&v->s.avctx->sample_aspect_ratio.num,
-                      &v->s.avctx->sample_aspect_ratio.den,
-                      v->s.avctx->height * w,
-                      v->s.avctx->width * h,
+                av_reduce(&s->avctx->sample_aspect_ratio.num,
+                      &s->avctx->sample_aspect_ratio.den,
+                      s->avctx->height * w,
+                      s->avctx->width * h,
                       1 << 30);
         }
-        ff_set_sar(v->s.avctx, v->s.avctx->sample_aspect_ratio);
-        av_log(v->s.avctx, AV_LOG_DEBUG, "Aspect: %i:%i\n",
-               v->s.avctx->sample_aspect_ratio.num,
-               v->s.avctx->sample_aspect_ratio.den);
+        ff_set_sar(s->avctx, s->avctx->sample_aspect_ratio);
+        av_log(s->avctx, AV_LOG_DEBUG, "Aspect: %i:%i\n",
+               s->avctx->sample_aspect_ratio.num,
+               s->avctx->sample_aspect_ratio.den);
 
         if (get_bits1(gb)) { //framerate stuff
             if (get_bits1(gb)) {
-                v->s.avctx->framerate.den = 32;
-                v->s.avctx->framerate.num = get_bits(gb, 16) + 1;
+                s->avctx->framerate.den = 32;
+                s->avctx->framerate.num = get_bits(gb, 16) + 1;
             } else {
                 int nr, dr;
                 nr = get_bits(gb, 8);
                 dr = get_bits(gb, 4);
                 if (nr > 0 && nr < 8 && dr > 0 && dr < 3) {
-                    v->s.avctx->framerate.den = ff_vc1_fps_dr[dr - 1];
-                    v->s.avctx->framerate.num = ff_vc1_fps_nr[nr - 1] * 1000;
+                    s->avctx->framerate.den = ff_vc1_fps_dr[dr - 1];
+                    s->avctx->framerate.num = ff_vc1_fps_nr[nr - 1] * 1000;
                 }
             }
             if (v->broadcast) { // Pulldown may be present
-                v->s.avctx->ticks_per_frame = 2;
+                s->avctx->ticks_per_frame = 2;
             }
         }
 
@@ -517,9 +522,9 @@  int ff_vc1_decode_entry_point(AVCodecContext *avctx, VC1Context *v, GetBitContex
     v->closed_entry   = get_bits1(gb);
     v->panscanflag    = get_bits1(gb);
     v->refdist_flag   = get_bits1(gb);
-    v->s.loop_filter  = get_bits1(gb);
-    if (v->s.avctx->skip_loop_filter >= AVDISCARD_ALL)
-        v->s.loop_filter = 0;
+    v->s.s.loop_filter  = get_bits1(gb);
+    if (v->s.s.avctx->skip_loop_filter >= AVDISCARD_ALL)
+        v->s.s.loop_filter = 0;
     v->fastuvmc       = get_bits1(gb);
     v->extended_mv    = get_bits1(gb);
     v->dquant         = get_bits(gb, 2);
@@ -560,7 +565,7 @@  int ff_vc1_decode_entry_point(AVCodecContext *avctx, VC1Context *v, GetBitContex
            "BrokenLink=%i, ClosedEntry=%i, PanscanFlag=%i\n"
            "RefDist=%i, Postproc=%i, FastUVMC=%i, ExtMV=%i\n"
            "DQuant=%i, VSTransform=%i, Overlap=%i, Qmode=%i\n",
-           v->broken_link, v->closed_entry, v->panscanflag, v->refdist_flag, v->s.loop_filter,
+           v->broken_link, v->closed_entry, v->panscanflag, v->refdist_flag, v->s.s.loop_filter,
            v->fastuvmc, v->extended_mv, v->dquant, v->vstransform, v->overlap, v->quantizer_mode);
 
     return 0;
@@ -592,7 +597,7 @@  int ff_vc1_decode_entry_point(AVCodecContext *avctx, VC1Context *v, GetBitContex
 static void rotate_luts(VC1Context *v)
 {
 #define ROTATE(DEF, L, N, C, A) do {                          \
-        if (v->s.pict_type == AV_PICTURE_TYPE_BI || v->s.pict_type == AV_PICTURE_TYPE_B) { \
+        if (v->s.s.pict_type == AV_PICTURE_TYPE_BI || v->s.s.pict_type == AV_PICTURE_TYPE_B) { \
             C = A;                                            \
         } else {                                              \
             DEF;                                              \
@@ -616,7 +621,7 @@  static int read_bfraction(VC1Context *v, GetBitContext* gb) {
     int bfraction_lut_index = get_vlc2(gb, ff_vc1_bfraction_vlc.table, VC1_BFRACTION_VLC_BITS, 1);
 
     if (bfraction_lut_index == 21 || bfraction_lut_index < 0) {
-        av_log(v->s.avctx, AV_LOG_ERROR, "bfraction invalid\n");
+        av_log(v->s.s.avctx, AV_LOG_ERROR, "bfraction invalid\n");
         return AVERROR_INVALIDDATA;
     }
     v->bfraction_lut_index = bfraction_lut_index;
@@ -626,15 +631,16 @@  static int read_bfraction(VC1Context *v, GetBitContext* gb) {
 
 int ff_vc1_parse_frame_header(VC1Context *v, GetBitContext* gb)
 {
+    MPVDecContext *const s = &v->s.s;
     int pqindex, lowquant, status;
 
     v->field_mode = 0;
     v->fcm = PROGRESSIVE;
     if (v->finterpflag)
         v->interpfrm = get_bits1(gb);
-    if (!v->s.avctx->codec)
+    if (!s->avctx->codec)
         return -1;
-    if (v->s.avctx->codec_id == AV_CODEC_ID_MSS2)
+    if (s->avctx->codec_id == AV_CODEC_ID_MSS2)
         v->respic   =
         v->rangered =
         v->multires = get_bits(gb, 2) == 1;
@@ -644,32 +650,32 @@  int ff_vc1_parse_frame_header(VC1Context *v, GetBitContext* gb)
     if (v->rangered)
         v->rangeredfrm = get_bits1(gb);
     if (get_bits1(gb)) {
-        v->s.pict_type = AV_PICTURE_TYPE_P;
+        s->pict_type = AV_PICTURE_TYPE_P;
     } else {
-        if (v->s.avctx->max_b_frames && !get_bits1(gb)) {
-            v->s.pict_type = AV_PICTURE_TYPE_B;
+        if (s->avctx->max_b_frames && !get_bits1(gb)) {
+            s->pict_type = AV_PICTURE_TYPE_B;
         } else
-            v->s.pict_type = AV_PICTURE_TYPE_I;
+            s->pict_type = AV_PICTURE_TYPE_I;
     }
 
     v->bi_type = 0;
-    if (v->s.pict_type == AV_PICTURE_TYPE_B) {
+    if (s->pict_type == AV_PICTURE_TYPE_B) {
         if (read_bfraction(v, gb) < 0)
             return AVERROR_INVALIDDATA;
         if (v->bfraction == 0) {
-            v->s.pict_type = AV_PICTURE_TYPE_BI;
+            s->pict_type = AV_PICTURE_TYPE_BI;
         }
     }
-    if (v->s.pict_type == AV_PICTURE_TYPE_I || v->s.pict_type == AV_PICTURE_TYPE_BI)
+    if (s->pict_type == AV_PICTURE_TYPE_I || s->pict_type == AV_PICTURE_TYPE_BI)
         skip_bits(gb, 7); // skip buffer fullness
 
     if (v->parse_only)
         return 0;
 
     /* calculate RND */
-    if (v->s.pict_type == AV_PICTURE_TYPE_I || v->s.pict_type == AV_PICTURE_TYPE_BI)
+    if (s->pict_type == AV_PICTURE_TYPE_I || s->pict_type == AV_PICTURE_TYPE_BI)
         v->rnd = 1;
-    if (v->s.pict_type == AV_PICTURE_TYPE_P)
+    if (s->pict_type == AV_PICTURE_TYPE_P)
         v->rnd ^= 1;
 
     if (get_bits_left(gb) < 5)
@@ -708,21 +714,21 @@  int ff_vc1_parse_frame_header(VC1Context *v, GetBitContext* gb)
     v->k_y = v->mvrange + 8; //k_y can be 8 9 10 11
     v->range_x = 1 << (v->k_x - 1);
     v->range_y = 1 << (v->k_y - 1);
-    if (v->multires && v->s.pict_type != AV_PICTURE_TYPE_B)
+    if (v->multires && s->pict_type != AV_PICTURE_TYPE_B)
         v->respic = get_bits(gb, 2);
 
-    if (v->res_x8 && (v->s.pict_type == AV_PICTURE_TYPE_I || v->s.pict_type == AV_PICTURE_TYPE_BI)) {
+    if (v->res_x8 && (s->pict_type == AV_PICTURE_TYPE_I || s->pict_type == AV_PICTURE_TYPE_BI)) {
         v->x8_type = get_bits1(gb);
     } else
         v->x8_type = 0;
-    ff_dlog(v->s.avctx, "%c Frame: QP=[%i]%i (+%i/2) %i\n",
-            (v->s.pict_type == AV_PICTURE_TYPE_P) ? 'P' : ((v->s.pict_type == AV_PICTURE_TYPE_I) ? 'I' : 'B'),
+    ff_dlog(s->avctx, "%c Frame: QP=[%i]%i (+%i/2) %i\n",
+            (s->pict_type == AV_PICTURE_TYPE_P) ? 'P' : ((s->pict_type == AV_PICTURE_TYPE_I) ? 'I' : 'B'),
             pqindex, v->pq, v->halfpq, v->rangeredfrm);
 
     if (v->first_pic_header_flag)
         rotate_luts(v);
 
-    switch (v->s.pict_type) {
+    switch (s->pict_type) {
     case AV_PICTURE_TYPE_P:
         v->tt_index = (v->pq > 4) + (v->pq > 12);
 
@@ -737,15 +743,15 @@  int ff_vc1_parse_frame_header(VC1Context *v, GetBitContext* gb)
             INIT_LUT(v->lumscale, v->lumshift, v->last_luty[0], v->last_lutuv[0], 1);
             INIT_LUT(v->lumscale, v->lumshift, v->last_luty[1], v->last_lutuv[1], 1);
         }
-        v->qs_last = v->s.quarter_sample;
+        v->qs_last = s->quarter_sample;
         if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
-            v->s.quarter_sample = (v->mv_mode2 != MV_PMODE_1MV_HPEL &&
-                                   v->mv_mode2 != MV_PMODE_1MV_HPEL_BILIN);
-            v->s.mspel          = (v->mv_mode2 != MV_PMODE_1MV_HPEL_BILIN);
+            s->quarter_sample = (v->mv_mode2 != MV_PMODE_1MV_HPEL &&
+                                 v->mv_mode2 != MV_PMODE_1MV_HPEL_BILIN);
+            s->mspel          = (v->mv_mode2 != MV_PMODE_1MV_HPEL_BILIN);
         } else {
-            v->s.quarter_sample = (v->mv_mode != MV_PMODE_1MV_HPEL &&
-                                   v->mv_mode != MV_PMODE_1MV_HPEL_BILIN);
-            v->s.mspel          = (v->mv_mode != MV_PMODE_1MV_HPEL_BILIN);
+            s->quarter_sample = (v->mv_mode != MV_PMODE_1MV_HPEL &&
+                                 v->mv_mode != MV_PMODE_1MV_HPEL_BILIN);
+            s->mspel          = (v->mv_mode != MV_PMODE_1MV_HPEL_BILIN);
         }
 
         if ((v->mv_mode  == MV_PMODE_INTENSITY_COMP &&
@@ -754,28 +760,28 @@  int ff_vc1_parse_frame_header(VC1Context *v, GetBitContext* gb)
             status = bitplane_decoding(v->mv_type_mb_plane, &v->mv_type_is_raw, v);
             if (status < 0)
                 return -1;
-            av_log(v->s.avctx, AV_LOG_DEBUG, "MB MV Type plane encoding: "
+            av_log(s->avctx, AV_LOG_DEBUG, "MB MV Type plane encoding: "
                    "Imode: %i, Invert: %i\n", status>>1, status&1);
         } else {
             v->mv_type_is_raw = 0;
-            memset(v->mv_type_mb_plane, 0, v->s.mb_stride * v->s.mb_height);
+            memset(v->mv_type_mb_plane, 0, s->mb_stride * s->mb_height);
         }
-        status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
+        status = bitplane_decoding(s->mbskip_table, &v->skip_is_raw, v);
         if (status < 0)
             return -1;
-        av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
+        av_log(s->avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
                "Imode: %i, Invert: %i\n", status>>1, status&1);
 
         if (get_bits_left(gb) < 4)
             return AVERROR_INVALIDDATA;
 
         /* Hopefully this is correct for P-frames */
-        v->s.mv_table_index = get_bits(gb, 2); //but using ff_vc1_ tables
+        s->mv_table_index = get_bits(gb, 2); //but using ff_vc1_ tables
         v->cbptab = get_bits(gb, 2);
         v->cbpcy_vlc = &ff_vc1_cbpcy_p_vlc[v->cbptab];
 
         if (v->dquant) {
-            av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
+            av_log(s->avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
             vop_dquant_decoding(v);
         }
 
@@ -793,28 +799,28 @@  int ff_vc1_parse_frame_header(VC1Context *v, GetBitContext* gb)
     case AV_PICTURE_TYPE_B:
         v->tt_index = (v->pq > 4) + (v->pq > 12);
 
-        v->mv_mode          = get_bits1(gb) ? MV_PMODE_1MV : MV_PMODE_1MV_HPEL_BILIN;
-        v->qs_last          = v->s.quarter_sample;
-        v->s.quarter_sample = (v->mv_mode == MV_PMODE_1MV);
-        v->s.mspel          = v->s.quarter_sample;
+        v->mv_mode        = get_bits1(gb) ? MV_PMODE_1MV : MV_PMODE_1MV_HPEL_BILIN;
+        v->qs_last        = s->quarter_sample;
+        s->quarter_sample = (v->mv_mode == MV_PMODE_1MV);
+        s->mspel          = s->quarter_sample;
 
         status = bitplane_decoding(v->direct_mb_plane, &v->dmb_is_raw, v);
         if (status < 0)
             return -1;
-        av_log(v->s.avctx, AV_LOG_DEBUG, "MB Direct Type plane encoding: "
+        av_log(s->avctx, AV_LOG_DEBUG, "MB Direct Type plane encoding: "
                "Imode: %i, Invert: %i\n", status>>1, status&1);
-        status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
+        status = bitplane_decoding(s->mbskip_table, &v->skip_is_raw, v);
         if (status < 0)
             return -1;
-        av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
+        av_log(s->avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
                "Imode: %i, Invert: %i\n", status>>1, status&1);
 
-        v->s.mv_table_index = get_bits(gb, 2);
+        s->mv_table_index = get_bits(gb, 2);
         v->cbptab           = get_bits(gb, 2);
         v->cbpcy_vlc        = &ff_vc1_cbpcy_p_vlc[v->cbptab];
 
         if (v->dquant) {
-            av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
+            av_log(s->avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
             vop_dquant_decoding(v);
         }
 
@@ -834,15 +840,15 @@  int ff_vc1_parse_frame_header(VC1Context *v, GetBitContext* gb)
     if (!v->x8_type) {
         /* AC Syntax */
         v->c_ac_table_index = decode012(gb);
-        if (v->s.pict_type == AV_PICTURE_TYPE_I || v->s.pict_type == AV_PICTURE_TYPE_BI) {
+        if (s->pict_type == AV_PICTURE_TYPE_I || s->pict_type == AV_PICTURE_TYPE_BI) {
             v->y_ac_table_index = decode012(gb);
         }
         /* DC Syntax */
-        v->s.dc_table_index = get_bits1(gb);
+        s->dc_table_index = get_bits1(gb);
     }
 
-    if (v->s.pict_type == AV_PICTURE_TYPE_BI) {
-        v->s.pict_type = AV_PICTURE_TYPE_B;
+    if (s->pict_type == AV_PICTURE_TYPE_BI) {
+        s->pict_type = AV_PICTURE_TYPE_B;
         v->bi_type     = 1;
     }
     return 0;
@@ -850,6 +856,7 @@  int ff_vc1_parse_frame_header(VC1Context *v, GetBitContext* gb)
 
 int ff_vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
 {
+    MPVDecContext *const s = &v->s.s;
     int pqindex, lowquant;
     int status;
     int field_mode, fcm;
@@ -860,10 +867,10 @@  int ff_vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
         if (v->fcm != ILACE_FIELD || v->field_mode!=1)
             return -1;
         if (v->fptype & 4)
-            v->s.pict_type = (v->fptype & 1) ? AV_PICTURE_TYPE_BI : AV_PICTURE_TYPE_B;
+            s->pict_type = (v->fptype & 1) ? AV_PICTURE_TYPE_BI : AV_PICTURE_TYPE_B;
         else
-            v->s.pict_type = (v->fptype & 1) ? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_I;
-        v->s.current_picture_ptr->f->pict_type = v->s.pict_type;
+            s->pict_type = (v->fptype & 1) ? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_I;
+        s->current_picture_ptr->f->pict_type = s->pict_type;
         if (!v->pic_header_flag)
             goto parse_common_info;
     }
@@ -883,32 +890,32 @@  int ff_vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
     v->field_mode = field_mode;
     v->fcm = fcm;
 
-    av_assert0(    v->s.mb_height == v->s.height + 15 >> 4
-                || v->s.mb_height == FFALIGN(v->s.height + 15 >> 4, 2));
+    av_assert0(s->mb_height == s->height + 15 >> 4 ||
+               s->mb_height == FFALIGN(s->height + 15 >> 4, 2));
     if (v->field_mode) {
-        v->s.mb_height = FFALIGN(v->s.height + 15 >> 4, 2);
+        s->mb_height = FFALIGN(s->height + 15 >> 4, 2);
         v->fptype = get_bits(gb, 3);
         if (v->fptype & 4) // B-picture
-            v->s.pict_type = (v->fptype & 2) ? AV_PICTURE_TYPE_BI : AV_PICTURE_TYPE_B;
+            s->pict_type = (v->fptype & 2) ? AV_PICTURE_TYPE_BI : AV_PICTURE_TYPE_B;
         else
-            v->s.pict_type = (v->fptype & 2) ? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_I;
+            s->pict_type = (v->fptype & 2) ? AV_PICTURE_TYPE_P : AV_PICTURE_TYPE_I;
     } else {
-        v->s.mb_height = v->s.height + 15 >> 4;
+        s->mb_height = s->height + 15 >> 4;
         switch (get_unary(gb, 0, 4)) {
         case 0:
-            v->s.pict_type = AV_PICTURE_TYPE_P;
+            s->pict_type = AV_PICTURE_TYPE_P;
             break;
         case 1:
-            v->s.pict_type = AV_PICTURE_TYPE_B;
+            s->pict_type = AV_PICTURE_TYPE_B;
             break;
         case 2:
-            v->s.pict_type = AV_PICTURE_TYPE_I;
+            s->pict_type = AV_PICTURE_TYPE_I;
             break;
         case 3:
-            v->s.pict_type = AV_PICTURE_TYPE_BI;
+            s->pict_type = AV_PICTURE_TYPE_BI;
             break;
         case 4:
-            v->s.pict_type = AV_PICTURE_TYPE_P; // skipped pic
+            s->pict_type = AV_PICTURE_TYPE_P; // skipped pic
             v->p_frame_skipped = 1;
             break;
         }
@@ -926,7 +933,7 @@  int ff_vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
         v->tff = 1;
     }
     if (v->panscanflag) {
-        avpriv_report_missing_feature(v->s.avctx, "Pan-scan");
+        avpriv_report_missing_feature(s->avctx, "Pan-scan");
         //...
     }
     if (v->p_frame_skipped) {
@@ -940,14 +947,14 @@  int ff_vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
     if (v->field_mode) {
         if (!v->refdist_flag)
             v->refdist = 0;
-        else if ((v->s.pict_type != AV_PICTURE_TYPE_B) && (v->s.pict_type != AV_PICTURE_TYPE_BI)) {
+        else if ((s->pict_type != AV_PICTURE_TYPE_B) && (s->pict_type != AV_PICTURE_TYPE_BI)) {
             v->refdist = get_bits(gb, 2);
             if (v->refdist == 3)
                 v->refdist += get_unary(gb, 0, 14);
             if (v->refdist > 16)
                 return AVERROR_INVALIDDATA;
         }
-        if ((v->s.pict_type == AV_PICTURE_TYPE_B) || (v->s.pict_type == AV_PICTURE_TYPE_BI)) {
+        if ((s->pict_type == AV_PICTURE_TYPE_B) || (s->pict_type == AV_PICTURE_TYPE_BI)) {
             if (read_bfraction(v, gb) < 0)
                 return AVERROR_INVALIDDATA;
             v->frfd = (v->bfraction * v->refdist) >> 8;
@@ -960,11 +967,11 @@  int ff_vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
     if (v->fcm == PROGRESSIVE) {
         if (v->finterpflag)
             v->interpfrm = get_bits1(gb);
-        if (v->s.pict_type == AV_PICTURE_TYPE_B) {
+        if (s->pict_type == AV_PICTURE_TYPE_B) {
             if (read_bfraction(v, gb) < 0)
                 return AVERROR_INVALIDDATA;
             if (v->bfraction == 0) {
-                v->s.pict_type = AV_PICTURE_TYPE_BI; /* XXX: should not happen here */
+                s->pict_type = AV_PICTURE_TYPE_BI; /* XXX: should not happen here */
             }
         }
     }
@@ -1008,21 +1015,21 @@  int ff_vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
     if (v->first_pic_header_flag)
         rotate_luts(v);
 
-    switch (v->s.pict_type) {
+    switch (s->pict_type) {
     case AV_PICTURE_TYPE_I:
     case AV_PICTURE_TYPE_BI:
         if (v->fcm == ILACE_FRAME) { //interlace frame picture
             status = bitplane_decoding(v->fieldtx_plane, &v->fieldtx_is_raw, v);
             if (status < 0)
                 return -1;
-            av_log(v->s.avctx, AV_LOG_DEBUG, "FIELDTX plane encoding: "
+            av_log(s->avctx, AV_LOG_DEBUG, "FIELDTX plane encoding: "
                    "Imode: %i, Invert: %i\n", status>>1, status&1);
         } else
             v->fieldtx_is_raw = 0;
         status = bitplane_decoding(v->acpred_plane, &v->acpred_is_raw, v);
         if (status < 0)
             return -1;
-        av_log(v->s.avctx, AV_LOG_DEBUG, "ACPRED plane encoding: "
+        av_log(s->avctx, AV_LOG_DEBUG, "ACPRED plane encoding: "
                "Imode: %i, Invert: %i\n", status>>1, status&1);
         v->condover = CONDOVER_NONE;
         if (v->overlap && v->pq <= 8) {
@@ -1031,7 +1038,7 @@  int ff_vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
                 status = bitplane_decoding(v->over_flags_plane, &v->overflg_is_raw, v);
                 if (status < 0)
                     return -1;
-                av_log(v->s.avctx, AV_LOG_DEBUG, "CONDOVER plane encoding: "
+                av_log(s->avctx, AV_LOG_DEBUG, "CONDOVER plane encoding: "
                        "Imode: %i, Invert: %i\n", status>>1, status&1);
             }
         }
@@ -1063,10 +1070,10 @@  int ff_vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
                     INIT_LUT(v->lumscale, v->lumshift, v->last_luty[1], v->last_lutuv[1], 1);
                     v->last_use_ic = 1;
                 }
-                status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
+                status = bitplane_decoding(s->mbskip_table, &v->skip_is_raw, v);
                 if (status < 0)
                     return -1;
-                av_log(v->s.avctx, AV_LOG_DEBUG, "SKIPMB plane encoding: "
+                av_log(s->avctx, AV_LOG_DEBUG, "SKIPMB plane encoding: "
                        "Imode: %i, Invert: %i\n", status>>1, status&1);
                 v->mbmodetab = get_bits(gb, 2);
                 if (v->fourmvswitch)
@@ -1134,15 +1141,15 @@  int ff_vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
                 }
                 v->last_use_ic = 1;
             }
-            v->qs_last = v->s.quarter_sample;
+            v->qs_last = s->quarter_sample;
             if (v->mv_mode == MV_PMODE_INTENSITY_COMP) {
-                v->s.quarter_sample = (v->mv_mode2 != MV_PMODE_1MV_HPEL &&
-                                       v->mv_mode2 != MV_PMODE_1MV_HPEL_BILIN);
-                v->s.mspel          = (v->mv_mode2 != MV_PMODE_1MV_HPEL_BILIN);
+                s->quarter_sample = (v->mv_mode2 != MV_PMODE_1MV_HPEL &&
+                                     v->mv_mode2 != MV_PMODE_1MV_HPEL_BILIN);
+                s->mspel          = (v->mv_mode2 != MV_PMODE_1MV_HPEL_BILIN);
             } else {
-                v->s.quarter_sample = (v->mv_mode != MV_PMODE_1MV_HPEL &&
-                                       v->mv_mode != MV_PMODE_1MV_HPEL_BILIN);
-                v->s.mspel          = (v->mv_mode != MV_PMODE_1MV_HPEL_BILIN);
+                s->quarter_sample = (v->mv_mode != MV_PMODE_1MV_HPEL &&
+                                     v->mv_mode != MV_PMODE_1MV_HPEL_BILIN);
+                s->mspel          = (v->mv_mode != MV_PMODE_1MV_HPEL_BILIN);
             }
         }
         if (v->fcm == PROGRESSIVE) { // progressive
@@ -1152,26 +1159,26 @@  int ff_vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
                 status = bitplane_decoding(v->mv_type_mb_plane, &v->mv_type_is_raw, v);
                 if (status < 0)
                     return -1;
-                av_log(v->s.avctx, AV_LOG_DEBUG, "MB MV Type plane encoding: "
+                av_log(s->avctx, AV_LOG_DEBUG, "MB MV Type plane encoding: "
                        "Imode: %i, Invert: %i\n", status>>1, status&1);
             } else {
                 v->mv_type_is_raw = 0;
-                memset(v->mv_type_mb_plane, 0, v->s.mb_stride * v->s.mb_height);
+                memset(v->mv_type_mb_plane, 0, s->mb_stride * s->mb_height);
             }
-            status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
+            status = bitplane_decoding(s->mbskip_table, &v->skip_is_raw, v);
             if (status < 0)
                 return -1;
-            av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
+            av_log(s->avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
                    "Imode: %i, Invert: %i\n", status>>1, status&1);
 
             /* Hopefully this is correct for P-frames */
-            v->s.mv_table_index = get_bits(gb, 2); //but using ff_vc1_ tables
+            s->mv_table_index   = get_bits(gb, 2); //but using ff_vc1_ tables
             v->cbptab           = get_bits(gb, 2);
             v->cbpcy_vlc        = &ff_vc1_cbpcy_p_vlc[v->cbptab];
         } else if (v->fcm == ILACE_FRAME) { // frame interlaced
-            v->qs_last          = v->s.quarter_sample;
-            v->s.quarter_sample = 1;
-            v->s.mspel          = 1;
+            v->qs_last          = s->quarter_sample;
+            s->quarter_sample   = 1;
+            s->mspel            = 1;
         } else {    // field interlaced
             v->mbmodetab = get_bits(gb, 3);
             v->imvtab = get_bits(gb, 2 + v->numref);
@@ -1191,7 +1198,7 @@  int ff_vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
             }
         }
         if (v->dquant) {
-            av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
+            av_log(s->avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
             vop_dquant_decoding(v);
         }
 
@@ -1227,19 +1234,19 @@  int ff_vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
 
         if (v->field_mode) {
             int mvmode;
-            av_log(v->s.avctx, AV_LOG_DEBUG, "B Fields\n");
+            av_log(s->avctx, AV_LOG_DEBUG, "B Fields\n");
             if (v->extended_dmv)
                 v->dmvrange = get_unary(gb, 0, 3);
             mvmode = get_unary(gb, 1, 3);
             lowquant = (v->pq > 12) ? 0 : 1;
-            v->mv_mode          = ff_vc1_mv_pmode_table2[lowquant][mvmode];
-            v->qs_last          = v->s.quarter_sample;
-            v->s.quarter_sample = (v->mv_mode == MV_PMODE_1MV || v->mv_mode == MV_PMODE_MIXED_MV);
-            v->s.mspel          = (v->mv_mode != MV_PMODE_1MV_HPEL_BILIN);
+            v->mv_mode        = ff_vc1_mv_pmode_table2[lowquant][mvmode];
+            v->qs_last        = s->quarter_sample;
+            s->quarter_sample = (v->mv_mode == MV_PMODE_1MV || v->mv_mode == MV_PMODE_MIXED_MV);
+            s->mspel          = (v->mv_mode != MV_PMODE_1MV_HPEL_BILIN);
             status = bitplane_decoding(v->forward_mb_plane, &v->fmb_is_raw, v);
             if (status < 0)
                 return -1;
-            av_log(v->s.avctx, AV_LOG_DEBUG, "MB Forward Type plane encoding: "
+            av_log(s->avctx, AV_LOG_DEBUG, "MB Forward Type plane encoding: "
                    "Imode: %i, Invert: %i\n", status>>1, status&1);
             v->mbmodetab = get_bits(gb, 3);
             if (v->mv_mode == MV_PMODE_MIXED_MV)
@@ -1259,22 +1266,22 @@  int ff_vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
             if (v->extended_dmv)
                 v->dmvrange = get_unary(gb, 0, 3);
             if (get_bits1(gb)) /* intcomp - present but shall always be 0 */
-                av_log(v->s.avctx, AV_LOG_WARNING, "Intensity compensation set for B picture\n");
+                av_log(s->avctx, AV_LOG_WARNING, "Intensity compensation set for B picture\n");
             v->intcomp          = 0;
             v->mv_mode          = MV_PMODE_1MV;
             v->fourmvswitch     = 0;
-            v->qs_last          = v->s.quarter_sample;
-            v->s.quarter_sample = 1;
-            v->s.mspel          = 1;
+            v->qs_last          = s->quarter_sample;
+            s->quarter_sample   = 1;
+            s->mspel            = 1;
             status              = bitplane_decoding(v->direct_mb_plane, &v->dmb_is_raw, v);
             if (status < 0)
                 return -1;
-            av_log(v->s.avctx, AV_LOG_DEBUG, "MB Direct Type plane encoding: "
+            av_log(s->avctx, AV_LOG_DEBUG, "MB Direct Type plane encoding: "
                    "Imode: %i, Invert: %i\n", status>>1, status&1);
-            status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
+            status = bitplane_decoding(s->mbskip_table, &v->skip_is_raw, v);
             if (status < 0)
                 return -1;
-            av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
+            av_log(s->avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
                    "Imode: %i, Invert: %i\n", status>>1, status&1);
             v->mbmodetab       = get_bits(gb, 2);
             v->mbmode_vlc   = &ff_vc1_intfr_non4mv_mbmode_vlc[v->mbmodetab];
@@ -1288,27 +1295,27 @@  int ff_vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
             v->fourmvbptab     = get_bits(gb, 2);
             v->fourmvbp_vlc = &ff_vc1_4mv_block_pattern_vlc[v->fourmvbptab];
         } else {
-            v->mv_mode          = get_bits1(gb) ? MV_PMODE_1MV : MV_PMODE_1MV_HPEL_BILIN;
-            v->qs_last          = v->s.quarter_sample;
-            v->s.quarter_sample = (v->mv_mode == MV_PMODE_1MV);
-            v->s.mspel          = v->s.quarter_sample;
+            v->mv_mode        = get_bits1(gb) ? MV_PMODE_1MV : MV_PMODE_1MV_HPEL_BILIN;
+            v->qs_last        = s->quarter_sample;
+            s->quarter_sample = (v->mv_mode == MV_PMODE_1MV);
+            s->mspel          = s->quarter_sample;
             status              = bitplane_decoding(v->direct_mb_plane, &v->dmb_is_raw, v);
             if (status < 0)
                 return -1;
-            av_log(v->s.avctx, AV_LOG_DEBUG, "MB Direct Type plane encoding: "
+            av_log(s->avctx, AV_LOG_DEBUG, "MB Direct Type plane encoding: "
                    "Imode: %i, Invert: %i\n", status>>1, status&1);
-            status = bitplane_decoding(v->s.mbskip_table, &v->skip_is_raw, v);
+            status = bitplane_decoding(s->mbskip_table, &v->skip_is_raw, v);
             if (status < 0)
                 return -1;
-            av_log(v->s.avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
+            av_log(s->avctx, AV_LOG_DEBUG, "MB Skip plane encoding: "
                    "Imode: %i, Invert: %i\n", status>>1, status&1);
-            v->s.mv_table_index = get_bits(gb, 2);
+            s->mv_table_index = get_bits(gb, 2);
             v->cbptab = get_bits(gb, 2);
             v->cbpcy_vlc = &ff_vc1_cbpcy_p_vlc[v->cbptab];
         }
 
         if (v->dquant) {
-            av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
+            av_log(s->avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
             vop_dquant_decoding(v);
         }
 
@@ -1328,25 +1335,25 @@  int ff_vc1_parse_frame_header_adv(VC1Context *v, GetBitContext* gb)
 
     /* AC Syntax */
     v->c_ac_table_index = decode012(gb);
-    if (v->s.pict_type == AV_PICTURE_TYPE_I || v->s.pict_type == AV_PICTURE_TYPE_BI) {
+    if (s->pict_type == AV_PICTURE_TYPE_I || s->pict_type == AV_PICTURE_TYPE_BI) {
         v->y_ac_table_index = decode012(gb);
     }
-    else if (v->fcm != PROGRESSIVE && !v->s.quarter_sample) {
+    else if (v->fcm != PROGRESSIVE && !s->quarter_sample) {
         v->range_x <<= 1;
         v->range_y <<= 1;
     }
 
     /* DC Syntax */
-    v->s.dc_table_index = get_bits1(gb);
-    if ((v->s.pict_type == AV_PICTURE_TYPE_I || v->s.pict_type == AV_PICTURE_TYPE_BI)
+    s->dc_table_index = get_bits1(gb);
+    if ((s->pict_type == AV_PICTURE_TYPE_I || s->pict_type == AV_PICTURE_TYPE_BI)
         && v->dquant) {
-        av_log(v->s.avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
+        av_log(s->avctx, AV_LOG_DEBUG, "VOP DQuant info\n");
         vop_dquant_decoding(v);
     }
 
-    v->bi_type = (v->s.pict_type == AV_PICTURE_TYPE_BI);
+    v->bi_type = (s->pict_type == AV_PICTURE_TYPE_BI);
     if (v->bi_type)
-        v->s.pict_type = AV_PICTURE_TYPE_B;
+        s->pict_type = AV_PICTURE_TYPE_B;
 
     return 0;
 }
diff --git a/libavcodec/vc1_block.c b/libavcodec/vc1_block.c
index 65c0398842..081c47894f 100644
--- a/libavcodec/vc1_block.c
+++ b/libavcodec/vc1_block.c
@@ -59,7 +59,7 @@  static const int block_map[6] = {0, 2, 1, 3, 4, 5};
 
 static inline void init_block_index(VC1Context *v)
 {
-    MPVDecContext *const s = &v->s;
+    MPVDecContext *const s = &v->s.s;
     ff_init_block_index(s);
     if (v->field_mode && !(v->second_field ^ v->tff)) {
         s->dest[0] += s->current_picture_ptr->f->linesize[0];
@@ -72,7 +72,7 @@  static inline void init_block_index(VC1Context *v)
 
 static void vc1_put_blocks_clamped(VC1Context *v, int put_signed)
 {
-    MPVDecContext *const s = &v->s;
+    MPVDecContext *const s = &v->s.s;
     uint8_t *dest;
     int block_count = CONFIG_GRAY && (s->avctx->flags & AV_CODEC_FLAG_GRAY) ? 4 : 6;
     int fieldtx = 0;
@@ -209,7 +209,7 @@  static void vc1_put_blocks_clamped(VC1Context *v, int put_signed)
             s->mb_y == ((s->mb_height >> v->field_mode) - 1))  \
             mquant = -v->altpq;                                \
         if (!mquant || mquant > 31 || mquant < -31) {                          \
-            av_log(v->s.avctx, AV_LOG_ERROR,                   \
+            av_log(s->avctx, AV_LOG_ERROR,                     \
                    "Overriding invalid mquant %d\n", mquant);  \
             mquant = 1;                                        \
         }                                                      \
@@ -265,7 +265,7 @@  static av_always_inline void get_mvdata_interlaced(VC1Context *v, int *dmv_x,
 {
     int index, index1;
     int extend_x, extend_y;
-    GetBitContext *gb = &v->s.gb;
+    GetBitContext *const gb = &v->s.s.gb;
     int bits, esc;
     int val, sign;
 
@@ -512,7 +512,8 @@  static inline int vc1_coded_block_pred(MPVDecContext * s, int n,
 static int vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip,
                                 int *value, int codingset)
 {
-    GetBitContext *gb = &v->s.gb;
+    MPVDecContext *const s = &v->s.s;
+    GetBitContext *gb = &s->gb;
     int index, run, level, lst, sign;
 
     index = get_vlc2(gb, ff_vc1_ac_coeff_table[codingset].table, AC_VLC_BITS, 3);
@@ -546,19 +547,19 @@  static int vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip,
             sign = get_bits1(gb);
         } else {
             lst = get_bits1(gb);
-            if (v->s.esc3_level_length == 0) {
+            if (s->esc3_level_length == 0) {
                 if (v->pq < 8 || v->dquantfrm) { // table 59
-                    v->s.esc3_level_length = get_bits(gb, 3);
-                    if (!v->s.esc3_level_length)
-                        v->s.esc3_level_length = get_bits(gb, 2) + 8;
+                    s->esc3_level_length = get_bits(gb, 3);
+                    if (!s->esc3_level_length)
+                        s->esc3_level_length = get_bits(gb, 2) + 8;
                 } else { // table 60
-                    v->s.esc3_level_length = get_unary(gb, 1, 6) + 2;
+                    s->esc3_level_length = get_unary(gb, 1, 6) + 2;
                 }
-                v->s.esc3_run_length = 3 + get_bits(gb, 2);
+                s->esc3_run_length = 3 + get_bits(gb, 2);
             }
-            run   = get_bits(gb, v->s.esc3_run_length);
+            run   = get_bits(gb, s->esc3_run_length);
             sign  = get_bits1(gb);
-            level = get_bits(gb, v->s.esc3_level_length);
+            level = get_bits(gb, s->esc3_level_length);
         }
     }
 
@@ -579,8 +580,8 @@  static int vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip,
 static int vc1_decode_i_block(VC1Context *v, int16_t block[64], int n,
                               int coded, int codingset)
 {
-    MPVDecContext *const s = &v->s;
-    GetBitContext *gb = &v->s.gb;
+    MPVDecContext *const s = &v->s.s;
+    GetBitContext *gb = &s->gb;
     int dc_pred_dir = 0; /* Direction of the DC prediction used */
     int i;
     int16_t *dc_val;
@@ -606,7 +607,7 @@  static int vc1_decode_i_block(VC1Context *v, int16_t block[64], int n,
     }
 
     /* Prediction */
-    dcdiff += vc1_i_pred_dc(&v->s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
+    dcdiff += vc1_i_pred_dc(s, v->overlap, v->pq, n, &dc_val, &dc_pred_dir);
     *dc_val = dcdiff;
 
     /* Store the quantized DC coeff, used for prediction */
@@ -633,7 +634,7 @@  static int vc1_decode_i_block(VC1Context *v, int16_t block[64], int n,
         const uint8_t *zz_table;
         int k;
 
-        if (v->s.ac_pred) {
+        if (s->ac_pred) {
             if (!dc_pred_dir)
                 zz_table = v->zz_8x8[2];
             else
@@ -717,8 +718,8 @@  static int vc1_decode_i_block(VC1Context *v, int16_t block[64], int n,
 static int vc1_decode_i_block_adv(VC1Context *v, int16_t block[64], int n,
                                   int coded, int codingset, int mquant)
 {
-    MPVDecContext *const s = &v->s;
-    GetBitContext *gb = &v->s.gb;
+    MPVDecContext *const s = &v->s.s;
+    GetBitContext *gb = &s->gb;
     int dc_pred_dir = 0; /* Direction of the DC prediction used */
     int i;
     int16_t *dc_val = NULL;
@@ -750,7 +751,8 @@  static int vc1_decode_i_block_adv(VC1Context *v, int16_t block[64], int n,
     }
 
     /* Prediction */
-    dcdiff += ff_vc1_pred_dc(&v->s, v->overlap, quant, n, v->a_avail, v->c_avail, &dc_val, &dc_pred_dir);
+    dcdiff += ff_vc1_pred_dc(s, v->overlap, quant, n, v->a_avail,
+                             v->c_avail, &dc_val, &dc_pred_dir);
     *dc_val = dcdiff;
 
     /* Store the quantized DC coeff, used for prediction */
@@ -796,7 +798,7 @@  static int vc1_decode_i_block_adv(VC1Context *v, int16_t block[64], int n,
         const uint8_t *zz_table;
         int k;
 
-        if (v->s.ac_pred) {
+        if (s->ac_pred) {
             if (!use_pred && v->fcm == ILACE_FRAME) {
                 zz_table = v->zzi_8x8;
             } else {
@@ -908,8 +910,8 @@  static int vc1_decode_i_block_adv(VC1Context *v, int16_t block[64], int n,
 static int vc1_decode_intra_block(VC1Context *v, int16_t block[64], int n,
                                   int coded, int mquant, int codingset)
 {
-    MPVDecContext *const s = &v->s;
-    GetBitContext *gb = &v->s.gb;
+    MPVDecContext *const s = &v->s.s;
+    GetBitContext *gb = &s->gb;
     int dc_pred_dir = 0; /* Direction of the DC prediction used */
     int i;
     int16_t *dc_val = NULL;
@@ -950,7 +952,8 @@  static int vc1_decode_intra_block(VC1Context *v, int16_t block[64], int n,
     }
 
     /* Prediction */
-    dcdiff += ff_vc1_pred_dc(&v->s, v->overlap, quant, n, a_avail, c_avail, &dc_val, &dc_pred_dir);
+    dcdiff += ff_vc1_pred_dc(s, v->overlap, quant, n, a_avail,
+                             c_avail, &dc_val, &dc_pred_dir);
     *dc_val = dcdiff;
 
     /* Store the quantized DC coeff, used for prediction */
@@ -1117,7 +1120,7 @@  static int vc1_decode_p_block(VC1Context *v, int16_t block[64], int n,
                               uint8_t *dst, int linesize, int skip_block,
                               int *ttmb_out)
 {
-    MPVDecContext *const s = &v->s;
+    MPVDecContext *const s = &v->s.s;
     GetBitContext *gb = &s->gb;
     int i, j;
     int subblkpat = 0;
@@ -1286,7 +1289,7 @@  static const uint8_t size_table[6] = { 0, 2, 3, 4,  5,  8 };
  */
 static int vc1_decode_p_mb(VC1Context *v)
 {
-    MPVDecContext *const s = &v->s;
+    MPVDecContext *const s = &v->s.s;
     GetBitContext *gb = &s->gb;
     int i, j;
     int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
@@ -1312,7 +1315,7 @@  static int vc1_decode_p_mb(VC1Context *v)
     if (v->skip_is_raw)
         skipped = get_bits1(gb);
     else
-        skipped = v->s.mbskip_table[mb_pos];
+        skipped = s->mbskip_table[mb_pos];
 
     if (!fourmv) { /* 1MV mode */
         if (!skipped) {
@@ -1333,7 +1336,7 @@  static int vc1_decode_p_mb(VC1Context *v)
             } else if (mb_has_coeffs) {
                 if (s->mb_intra)
                     s->ac_pred = get_bits1(gb);
-                cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
+                cbp = get_vlc2(gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
                 GET_MQUANT();
             } else {
                 mquant = v->pq;
@@ -1398,7 +1401,7 @@  static int vc1_decode_p_mb(VC1Context *v)
             int intra_count = 0, coded_inter = 0;
             int is_intra[6], is_coded[6];
             /* Get CBPCY */
-            cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
+            cbp = get_vlc2(gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
             for (i = 0; i < 6; i++) {
                 val = ((cbp >> (5 - i)) & 1);
                 s->dc_val[0][s->block_index[i]] = 0;
@@ -1518,7 +1521,7 @@  end:
 
 static int vc1_decode_p_mb_intfr(VC1Context *v)
 {
-    MPVDecContext *const s = &v->s;
+    MPVDecContext *const s = &v->s.s;
     GetBitContext *gb = &s->gb;
     int i;
     int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
@@ -1541,7 +1544,7 @@  static int vc1_decode_p_mb_intfr(VC1Context *v)
     if (v->skip_is_raw)
         skipped = get_bits1(gb);
     else
-        skipped = v->s.mbskip_table[mb_pos];
+        skipped = s->mbskip_table[mb_pos];
     if (!skipped) {
         if (v->fourmvswitch)
             idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_4MV_MBMODE_VLC_BITS, 2); // try getting this done
@@ -1588,8 +1591,8 @@  static int vc1_decode_p_mb_intfr(VC1Context *v)
             fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
             mb_has_coeffs = get_bits1(gb);
             if (mb_has_coeffs)
-                cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
-            v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
+                cbp = 1 + get_vlc2(gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
+            s->ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
             GET_MQUANT();
             s->current_picture.qscale_table[mb_pos] = mquant;
             /* Set DC scale - y and c use the same (not sure if necessary here) */
@@ -1622,7 +1625,7 @@  static int vc1_decode_p_mb_intfr(VC1Context *v)
         } else { // inter MB
             mb_has_coeffs = ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][3];
             if (mb_has_coeffs)
-                cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
+                cbp = 1 + get_vlc2(gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
             if (ff_vc1_mbmode_intfrp[v->fourmvswitch][idx_mbmode][0] == MV_PMODE_INTFR_2MV_FIELD) {
                 v->twomvbp = get_vlc2(gb, v->twomvbp_vlc->table, VC1_2MV_BLOCK_PATTERN_VLC_BITS, 1);
             } else {
@@ -1728,7 +1731,7 @@  static int vc1_decode_p_mb_intfr(VC1Context *v)
 
 static int vc1_decode_p_mb_intfi(VC1Context *v)
 {
-    MPVDecContext *const s = &v->s;
+    MPVDecContext *const s = &v->s.s;
     GetBitContext *gb = &s->gb;
     int i;
     int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
@@ -1759,10 +1762,10 @@  static int vc1_decode_p_mb_intfi(VC1Context *v)
         /* Set DC scale - y and c use the same (not sure if necessary here) */
         s->y_dc_scale = s->y_dc_scale_table[FFABS(mquant)];
         s->c_dc_scale = s->c_dc_scale_table[FFABS(mquant)];
-        v->s.ac_pred  = v->acpred_plane[mb_pos] = get_bits1(gb);
+        s->ac_pred  = v->acpred_plane[mb_pos] = get_bits1(gb);
         mb_has_coeffs = idx_mbmode & 1;
         if (mb_has_coeffs)
-            cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
+            cbp = 1 + get_vlc2(gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
         dst_idx = 0;
         for (i = 0; i < 6; i++) {
             v->a_avail = v->c_avail          = 0;
@@ -1809,7 +1812,7 @@  static int vc1_decode_p_mb_intfi(VC1Context *v)
             mb_has_coeffs = idx_mbmode & 1;
         }
         if (mb_has_coeffs)
-            cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
+            cbp = 1 + get_vlc2(gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
         if (cbp) {
             GET_MQUANT();
         }
@@ -1852,7 +1855,7 @@  static int vc1_decode_p_mb_intfi(VC1Context *v)
  */
 static int vc1_decode_b_mb(VC1Context *v)
 {
-    MPVDecContext *const s = &v->s;
+    MPVDecContext *const s = &v->s.s;
     GetBitContext *gb = &s->gb;
     int i, j;
     int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
@@ -1878,7 +1881,7 @@  static int vc1_decode_b_mb(VC1Context *v)
     if (v->skip_is_raw)
         skipped = get_bits1(gb);
     else
-        skipped = v->s.mbskip_table[mb_pos];
+        skipped = s->mbskip_table[mb_pos];
 
     dmv_x[0] = dmv_x[1] = dmv_y[0] = dmv_y[1] = 0;
     for (i = 0; i < 6; i++) {
@@ -1919,7 +1922,7 @@  static int vc1_decode_b_mb(VC1Context *v)
         return 0;
     }
     if (direct) {
-        cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
+        cbp = get_vlc2(gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
         GET_MQUANT();
         s->mb_intra = 0;
         s->current_picture.qscale_table[mb_pos] = mquant;
@@ -1957,7 +1960,7 @@  static int vc1_decode_b_mb(VC1Context *v)
             }
             if (s->mb_intra)
                 s->ac_pred = get_bits1(gb);
-            cbp = get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
+            cbp = get_vlc2(gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
             GET_MQUANT();
             s->current_picture.qscale_table[mb_pos] = mquant;
             if (!v->ttmbf && !s->mb_intra && mb_has_coeffs)
@@ -2010,7 +2013,7 @@  static int vc1_decode_b_mb(VC1Context *v)
  */
 static int vc1_decode_b_mb_intfi(VC1Context *v)
 {
-    MPVDecContext *const s = &v->s;
+    MPVDecContext *const s = &v->s.s;
     GetBitContext *gb = &s->gb;
     int i, j;
     int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
@@ -2042,10 +2045,10 @@  static int vc1_decode_b_mb_intfi(VC1Context *v)
         /* Set DC scale - y and c use the same (not sure if necessary here) */
         s->y_dc_scale = s->y_dc_scale_table[FFABS(mquant)];
         s->c_dc_scale = s->c_dc_scale_table[FFABS(mquant)];
-        v->s.ac_pred  = v->acpred_plane[mb_pos] = get_bits1(gb);
+        s->ac_pred  = v->acpred_plane[mb_pos] = get_bits1(gb);
         mb_has_coeffs = idx_mbmode & 1;
         if (mb_has_coeffs)
-            cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
+            cbp = 1 + get_vlc2(gb, v->cbpcy_vlc->table, VC1_ICBPCY_VLC_BITS, 2);
         dst_idx = 0;
         for (i = 0; i < 6; i++) {
             v->a_avail = v->c_avail          = 0;
@@ -2139,7 +2142,7 @@  static int vc1_decode_b_mb_intfi(VC1Context *v)
             mb_has_coeffs = idx_mbmode & 1;
         }
         if (mb_has_coeffs)
-            cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
+            cbp = 1 + get_vlc2(gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
         if (cbp) {
             GET_MQUANT();
         }
@@ -2177,7 +2180,7 @@  static int vc1_decode_b_mb_intfi(VC1Context *v)
  */
 static int vc1_decode_b_mb_intfr(VC1Context *v)
 {
-    MPVDecContext *const s = &v->s;
+    MPVDecContext *const s = &v->s.s;
     GetBitContext *gb = &s->gb;
     int i, j;
     int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
@@ -2202,7 +2205,7 @@  static int vc1_decode_b_mb_intfr(VC1Context *v)
     if (v->skip_is_raw)
         skipped = get_bits1(gb);
     else
-        skipped = v->s.mbskip_table[mb_pos];
+        skipped = s->mbskip_table[mb_pos];
 
     if (!skipped) {
         idx_mbmode = get_vlc2(gb, v->mbmode_vlc->table, VC1_INTFR_NON4MV_MBMODE_VLC_BITS, 2);
@@ -2233,8 +2236,8 @@  static int vc1_decode_b_mb_intfr(VC1Context *v)
         fieldtx = v->fieldtx_plane[mb_pos] = get_bits1(gb);
         mb_has_coeffs = get_bits1(gb);
         if (mb_has_coeffs)
-            cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
-        v->s.ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
+            cbp = 1 + get_vlc2(gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
+        s->ac_pred = v->acpred_plane[mb_pos] = get_bits1(gb);
         GET_MQUANT();
         s->current_picture.qscale_table[mb_pos] = mquant;
         /* Set DC scale - y and c use the same (not sure if necessary here) */
@@ -2328,7 +2331,7 @@  static int vc1_decode_b_mb_intfr(VC1Context *v)
         if (!skipped) { // inter MB
             mb_has_coeffs = ff_vc1_mbmode_intfrp[0][idx_mbmode][3];
             if (mb_has_coeffs)
-                cbp = 1 + get_vlc2(&v->s.gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
+                cbp = 1 + get_vlc2(gb, v->cbpcy_vlc->table, VC1_CBPCY_P_VLC_BITS, 2);
             if (!direct) {
                 if (bmvtype == BMV_TYPE_INTERPOLATED && twomv) {
                     v->fourmvbp = get_vlc2(gb, v->fourmvbp_vlc->table, VC1_4MV_BLOCK_PATTERN_VLC_BITS, 1);
@@ -2527,7 +2530,7 @@  static int vc1_decode_b_mb_intfr(VC1Context *v)
  */
 static void vc1_decode_i_blocks(VC1Context *v)
 {
-    MPVDecContext *const s = &v->s;
+    MPVDecContext *const s = &v->s.s;
     int k, j;
     int cbp, val;
     uint8_t *coded_val;
@@ -2581,8 +2584,8 @@  static void vc1_decode_i_blocks(VC1Context *v)
             }
 
             // do actual MB decoding and displaying
-            cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
-            v->s.ac_pred = get_bits1(&v->s.gb);
+            cbp = get_vlc2(&s->gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
+            s->ac_pred = get_bits1(&s->gb);
 
             for (k = 0; k < 6; k++) {
                 v->mb_type[0][s->block_index[k]] = 1;
@@ -2590,7 +2593,7 @@  static void vc1_decode_i_blocks(VC1Context *v)
                 val = ((cbp >> (5 - k)) & 1);
 
                 if (k < 4) {
-                    int pred   = vc1_coded_block_pred(&v->s, k, &coded_val);
+                    int pred   = vc1_coded_block_pred(s, k, &coded_val);
                     val        = val ^ pred;
                     *coded_val = val;
                 }
@@ -2618,7 +2621,7 @@  static void vc1_decode_i_blocks(VC1Context *v)
                 vc1_put_blocks_clamped(v, 0);
             }
 
-            if (v->s.loop_filter)
+            if (s->loop_filter)
                 ff_vc1_i_loop_filter(v);
 
             if (get_bits_left(&s->gb) < 0) {
@@ -2633,14 +2636,14 @@  static void vc1_decode_i_blocks(VC1Context *v)
             v->left_blk_idx = (v->left_blk_idx + 1) % (v->end_mb_x + 2);
             v->cur_blk_idx = (v->cur_blk_idx + 1) % (v->end_mb_x + 2);
         }
-        if (!v->s.loop_filter)
+        if (!s->loop_filter)
             ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
         else if (s->mb_y)
             ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
 
         s->first_slice_line = 0;
     }
-    if (v->s.loop_filter)
+    if (s->loop_filter)
         ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
 
     /* This is intentionally mb_height and not end_mb_y - unlike in advanced
@@ -2652,7 +2655,7 @@  static void vc1_decode_i_blocks(VC1Context *v)
  */
 static int vc1_decode_i_blocks_adv(VC1Context *v)
 {
-    MPVDecContext *const s = &v->s;
+    MPVDecContext *const s = &v->s.s;
     int k;
     int cbp, val;
     uint8_t *coded_val;
@@ -2716,20 +2719,20 @@  static int vc1_decode_i_blocks_adv(VC1Context *v)
 
             // do actual MB decoding and displaying
             if (v->fieldtx_is_raw)
-                v->fieldtx_plane[mb_pos] = get_bits1(&v->s.gb);
-            if (get_bits_left(&v->s.gb) <= 1) {
+                v->fieldtx_plane[mb_pos] = get_bits1(gb);
+            if (get_bits_left(gb) <= 1) {
                 ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
                 return 0;
             }
 
-            cbp = get_vlc2(&v->s.gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
+            cbp = get_vlc2(gb, ff_msmp4_mb_i_vlc.table, MB_INTRA_VLC_BITS, 2);
             if (v->acpred_is_raw)
-                v->s.ac_pred = get_bits1(&v->s.gb);
+                s->ac_pred = get_bits1(gb);
             else
-                v->s.ac_pred = v->acpred_plane[mb_pos];
+                s->ac_pred = v->acpred_plane[mb_pos];
 
             if (v->condover == CONDOVER_SELECT && v->overflg_is_raw)
-                v->over_flags_plane[mb_pos] = get_bits1(&v->s.gb);
+                v->over_flags_plane[mb_pos] = get_bits1(gb);
 
             GET_MQUANT();
 
@@ -2744,7 +2747,7 @@  static int vc1_decode_i_blocks_adv(VC1Context *v)
                 val = ((cbp >> (5 - k)) & 1);
 
                 if (k < 4) {
-                    int pred   = vc1_coded_block_pred(&v->s, k, &coded_val);
+                    int pred   = vc1_coded_block_pred(s, k, &coded_val);
                     val        = val ^ pred;
                     *coded_val = val;
                 }
@@ -2764,7 +2767,7 @@  static int vc1_decode_i_blocks_adv(VC1Context *v)
             if (v->overlap && (v->pq >= 9 || v->condover != CONDOVER_NONE))
                 ff_vc1_i_overlap_filter(v);
             vc1_put_blocks_clamped(v, 1);
-            if (v->s.loop_filter)
+            if (s->loop_filter)
                 ff_vc1_i_loop_filter(v);
 
             if (get_bits_left(&s->gb) < 0) {
@@ -2779,14 +2782,14 @@  static int vc1_decode_i_blocks_adv(VC1Context *v)
             inc_blk_idx(v->left_blk_idx);
             inc_blk_idx(v->cur_blk_idx);
         }
-        if (!v->s.loop_filter)
+        if (!s->loop_filter)
             ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
         else if (s->mb_y)
             ff_mpeg_draw_horiz_band(s, (s->mb_y-1) * 16, 16);
         s->first_slice_line = 0;
     }
 
-    if (v->s.loop_filter)
+    if (s->loop_filter)
         ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
     ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
                     (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
@@ -2795,7 +2798,7 @@  static int vc1_decode_i_blocks_adv(VC1Context *v)
 
 static void vc1_decode_p_blocks(VC1Context *v)
 {
-    MPVDecContext *const s = &v->s;
+    MPVDecContext *const s = &v->s.s;
     int apply_loop_filter;
 
     /* select coding mode used for VLC tables selection */
@@ -2833,7 +2836,7 @@  static void vc1_decode_p_blocks(VC1Context *v)
             ff_update_block_index(s);
 
             if (v->fcm == ILACE_FIELD || (v->fcm == PROGRESSIVE && v->mv_type_is_raw) || v->skip_is_raw)
-                if (get_bits_left(&v->s.gb) <= 1) {
+                if (get_bits_left(&s->gb) <= 1) {
                     ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
                     return;
                 }
@@ -2887,7 +2890,7 @@  static void vc1_decode_p_blocks(VC1Context *v)
 
 static void vc1_decode_b_blocks(VC1Context *v)
 {
-    MPVDecContext *const s = &v->s;
+    MPVDecContext *const s = &v->s.s;
 
     /* select coding mode used for VLC tables selection */
     switch (v->c_ac_table_index) {
@@ -2922,22 +2925,22 @@  static void vc1_decode_b_blocks(VC1Context *v)
             ff_update_block_index(s);
 
             if (v->fcm == ILACE_FIELD || v->skip_is_raw || v->dmb_is_raw)
-                if (get_bits_left(&v->s.gb) <= 1) {
+                if (get_bits_left(&s->gb) <= 1) {
                     ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_x, s->mb_y, ER_MB_ERROR);
                     return;
                 }
 
             if (v->fcm == ILACE_FIELD) {
                 vc1_decode_b_mb_intfi(v);
-                if (v->s.loop_filter)
+                if (s->loop_filter)
                     ff_vc1_b_intfi_loop_filter(v);
             } else if (v->fcm == ILACE_FRAME) {
                 vc1_decode_b_mb_intfr(v);
-                if (v->s.loop_filter)
+                if (s->loop_filter)
                     ff_vc1_p_intfr_loop_filter(v);
             } else {
                 vc1_decode_b_mb(v);
-                if (v->s.loop_filter)
+                if (s->loop_filter)
                     ff_vc1_i_loop_filter(v);
             }
             if (get_bits_left(&s->gb) < 0 || get_bits_count(&s->gb) < 0) {
@@ -2957,13 +2960,13 @@  static void vc1_decode_b_blocks(VC1Context *v)
         memmove(v->is_intra_base,
                 v->is_intra - s->mb_stride,
                 sizeof(v->is_intra_base[0]) * 2 * s->mb_stride);
-        if (!v->s.loop_filter)
+        if (!s->loop_filter)
             ff_mpeg_draw_horiz_band(s, s->mb_y * 16, 16);
         else if (s->mb_y)
             ff_mpeg_draw_horiz_band(s, (s->mb_y - 1) * 16, 16);
         s->first_slice_line = 0;
     }
-    if (v->s.loop_filter)
+    if (s->loop_filter)
         ff_mpeg_draw_horiz_band(s, (s->end_mb_y - 1) * 16, 16);
     ff_er_add_slice(&s->er, 0, s->start_mb_y << v->field_mode, s->mb_width - 1,
                     (s->end_mb_y << v->field_mode) - 1, ER_MB_END);
@@ -2971,9 +2974,9 @@  static void vc1_decode_b_blocks(VC1Context *v)
 
 static void vc1_decode_skip_blocks(VC1Context *v)
 {
-    MPVDecContext *const s = &v->s;
+    MPVDecContext *const s = &v->s.s;
 
-    if (!v->s.last_picture.f->data[0])
+    if (!s->last_picture.f->data[0])
         return;
 
     ff_er_add_slice(&s->er, 0, s->start_mb_y, s->mb_width - 1, s->end_mb_y - 1, ER_MB_END);
@@ -2993,23 +2996,24 @@  static void vc1_decode_skip_blocks(VC1Context *v)
 
 void ff_vc1_decode_blocks(VC1Context *v)
 {
+    MPVDecContext *const s = &v->s.s;
 
-    v->s.esc3_level_length = 0;
+    s->esc3_level_length = 0;
     if (v->x8_type) {
-        ff_intrax8_decode_picture(&v->x8, &v->s.current_picture,
-                                  &v->s.gb, &v->s.mb_x, &v->s.mb_y,
+        ff_intrax8_decode_picture(&v->x8, &s->current_picture,
+                                  &s->gb, &s->mb_x, &s->mb_y,
                                   2 * v->pq + v->halfpq, v->pq * !v->pquantizer,
-                                  v->s.loop_filter, v->s.low_delay);
+                                  s->loop_filter, s->low_delay);
 
-        ff_er_add_slice(&v->s.er, 0, 0,
-                        (v->s.mb_x >> 1) - 1, (v->s.mb_y >> 1) - 1,
+        ff_er_add_slice(&s->er, 0, 0,
+                        (s->mb_x >> 1) - 1, (s->mb_y >> 1) - 1,
                         ER_MB_END);
     } else {
         v->cur_blk_idx     =  0;
         v->left_blk_idx    = -1;
         v->topleft_blk_idx =  1;
         v->top_blk_idx     =  2;
-        switch (v->s.pict_type) {
+        switch (s->pict_type) {
         case AV_PICTURE_TYPE_I:
             if (v->profile == PROFILE_ADVANCED)
                 vc1_decode_i_blocks_adv(v);
diff --git a/libavcodec/vc1_loopfilter.c b/libavcodec/vc1_loopfilter.c
index 3eb2feea36..d664392fc4 100644
--- a/libavcodec/vc1_loopfilter.c
+++ b/libavcodec/vc1_loopfilter.c
@@ -104,7 +104,7 @@  static av_always_inline void vc1_v_overlap_filter(VC1Context *v, int16_t (*top_b
 
 void ff_vc1_i_overlap_filter(VC1Context *v)
 {
-    MPVDecContext *const s = &v->s;
+    MPVDecContext *const s = &v->s.s;
     int16_t (*topleft_blk)[64], (*top_blk)[64], (*left_blk)[64], (*cur_blk)[64];
     int block_count = CONFIG_GRAY && (s->avctx->flags & AV_CODEC_FLAG_GRAY) ? 4 : 6;
     int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
@@ -160,7 +160,7 @@  void ff_vc1_i_overlap_filter(VC1Context *v)
 
 void ff_vc1_p_overlap_filter(VC1Context *v)
 {
-    MPVDecContext *const s = &v->s;
+    MPVDecContext *const s = &v->s.s;
     int16_t (*topleft_blk)[64], (*top_blk)[64], (*left_blk)[64], (*cur_blk)[64];
     int block_count = CONFIG_GRAY && (s->avctx->flags & AV_CODEC_FLAG_GRAY) ? 4 : 6;
     int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
@@ -206,7 +206,7 @@  void ff_vc1_p_overlap_filter(VC1Context *v)
 static av_always_inline void vc1_i_h_loop_filter(VC1Context *v, uint8_t *dest,
                                                  uint32_t flags, int block_num)
 {
-    MPVDecContext *const s  = &v->s;
+    MPVDecContext *const s = &v->s.s;
     int pq = v->pq;
     uint8_t *dst;
 
@@ -239,7 +239,7 @@  static av_always_inline void vc1_i_v_loop_filter(VC1Context *v, uint8_t *dest,
                                                  uint32_t flags, uint8_t fieldtx,
                                                  int block_num)
 {
-    MPVDecContext *const s  = &v->s;
+    MPVDecContext *const s = &v->s.s;
     int pq = v->pq;
     uint8_t *dst;
 
@@ -270,7 +270,7 @@  static av_always_inline void vc1_i_v_loop_filter(VC1Context *v, uint8_t *dest,
 
 void ff_vc1_i_loop_filter(VC1Context *v)
 {
-    MPVDecContext *const s = &v->s;
+    MPVDecContext *const s = &v->s.s;
     int block_count = CONFIG_GRAY && (s->avctx->flags & AV_CODEC_FLAG_GRAY) ? 4 : 6;
     int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
     uint8_t *dest, fieldtx;
@@ -364,7 +364,7 @@  static av_always_inline void vc1_p_h_loop_filter(VC1Context *v, uint8_t *dest, u
                                                  uint8_t *is_intra, int16_t (*mv)[2], uint8_t *mv_f,
                                                  int *ttblk, uint32_t flags, int block_num)
 {
-    MPVDecContext *const s  = &v->s;
+    MPVDecContext *const s = &v->s.s;
     int pq = v->pq;
     uint32_t left_cbp = cbp[0] >> (block_num * 4), right_cbp;
     uint8_t left_is_intra, right_is_intra;
@@ -417,7 +417,7 @@  static av_always_inline void vc1_p_v_loop_filter(VC1Context *v, uint8_t *dest, u
                                                  uint8_t *is_intra, int16_t (*mv)[2], uint8_t *mv_f,
                                                  int *ttblk, uint32_t flags, int block_num)
 {
-    MPVDecContext *const s  = &v->s;
+    MPVDecContext *const s = &v->s.s;
     int pq = v->pq;
     uint32_t top_cbp = cbp[0] >> (block_num * 4), bottom_cbp;
     uint8_t top_is_intra, bottom_is_intra;
@@ -469,7 +469,7 @@  static av_always_inline void vc1_p_v_loop_filter(VC1Context *v, uint8_t *dest, u
 
 void ff_vc1_p_loop_filter(VC1Context *v)
 {
-    MPVDecContext *const s = &v->s;
+    MPVDecContext *const s = &v->s.s;
     int block_count = CONFIG_GRAY && (s->avctx->flags & AV_CODEC_FLAG_GRAY) ? 4 : 6;
     uint8_t *dest;
     uint32_t *cbp;
@@ -802,7 +802,7 @@  void ff_vc1_p_loop_filter(VC1Context *v)
 static av_always_inline void vc1_p_h_intfr_loop_filter(VC1Context *v, uint8_t *dest, int *ttblk,
                                                        uint32_t flags, uint8_t fieldtx, int block_num)
 {
-    MPVDecContext *const s  = &v->s;
+    MPVDecContext *const s = &v->s.s;
     int pq = v->pq;
     int tt;
     int linesize  = block_num > 3 ? s->uvlinesize : s->linesize;
@@ -852,7 +852,7 @@  static av_always_inline void vc1_p_h_intfr_loop_filter(VC1Context *v, uint8_t *d
 static av_always_inline void vc1_p_v_intfr_loop_filter(VC1Context *v, uint8_t *dest, int *ttblk,
                                                        uint32_t flags, uint8_t fieldtx, int block_num)
 {
-    MPVDecContext *const s  = &v->s;
+    MPVDecContext *const s = &v->s.s;
     int pq = v->pq;
     int tt;
     int linesize  = block_num > 3 ? s->uvlinesize : s->linesize;
@@ -908,7 +908,7 @@  static av_always_inline void vc1_p_v_intfr_loop_filter(VC1Context *v, uint8_t *d
 
 void ff_vc1_p_intfr_loop_filter(VC1Context *v)
 {
-    MPVDecContext *const s = &v->s;
+    MPVDecContext *const s = &v->s.s;
     int block_count = CONFIG_GRAY && (s->avctx->flags & AV_CODEC_FLAG_GRAY) ? 4 : 6;
     int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
     uint8_t *dest;
@@ -1112,7 +1112,7 @@  void ff_vc1_p_intfr_loop_filter(VC1Context *v)
 static av_always_inline void vc1_b_h_intfi_loop_filter(VC1Context *v, uint8_t *dest, uint32_t *cbp,
                                                        int *ttblk, uint32_t flags, int block_num)
 {
-    MPVDecContext *const s  = &v->s;
+    MPVDecContext *const s = &v->s.s;
     int pq = v->pq;
     uint8_t *dst;
     uint32_t block_cbp = cbp[0] >> (block_num * 4);
@@ -1144,7 +1144,7 @@  static av_always_inline void vc1_b_h_intfi_loop_filter(VC1Context *v, uint8_t *d
 static av_always_inline void vc1_b_v_intfi_loop_filter(VC1Context *v, uint8_t *dest, uint32_t *cbp,
                                                        int *ttblk, uint32_t flags, int block_num)
 {
-    MPVDecContext *const s  = &v->s;
+    MPVDecContext *const s = &v->s.s;
     int pq = v->pq;
     uint8_t *dst;
     uint32_t block_cbp = cbp[0] >> (block_num * 4);
@@ -1171,7 +1171,7 @@  static av_always_inline void vc1_b_v_intfi_loop_filter(VC1Context *v, uint8_t *d
 
 void ff_vc1_b_intfi_loop_filter(VC1Context *v)
 {
-    MPVDecContext *const s = &v->s;
+    MPVDecContext *const s = &v->s.s;
     int block_count = CONFIG_GRAY && (s->avctx->flags & AV_CODEC_FLAG_GRAY) ? 4 : 6;
     uint8_t *dest;
     uint32_t *cbp;
diff --git a/libavcodec/vc1_mc.c b/libavcodec/vc1_mc.c
index 3e3bf27552..7942f6f373 100644
--- a/libavcodec/vc1_mc.c
+++ b/libavcodec/vc1_mc.c
@@ -107,7 +107,7 @@  static const uint8_t popcount4[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3,
 
 static av_always_inline int get_luma_mv(VC1Context *v, int dir, int16_t *tx, int16_t *ty)
 {
-    MPVDecContext *const s = &v->s;
+    MPVDecContext *const s = &v->s.s;
     int idx = v->mv_f[dir][s->block_index[0] + v->blocks_off] |
              (v->mv_f[dir][s->block_index[1] + v->blocks_off] << 1) |
              (v->mv_f[dir][s->block_index[2] + v->blocks_off] << 2) |
@@ -139,7 +139,7 @@  static av_always_inline int get_luma_mv(VC1Context *v, int dir, int16_t *tx, int
 
 static av_always_inline int get_chroma_mv(VC1Context *v, int dir, int16_t *tx, int16_t *ty)
 {
-    MPVDecContext *const s = &v->s;
+    MPVDecContext *const s = &v->s.s;
     int idx = !v->mb_type[0][s->block_index[0]] |
              (!v->mb_type[0][s->block_index[1]] << 1) |
              (!v->mb_type[0][s->block_index[2]] << 2) |
@@ -171,7 +171,7 @@  static av_always_inline int get_chroma_mv(VC1Context *v, int dir, int16_t *tx, i
  */
 void ff_vc1_mc_1mv(VC1Context *v, int dir)
 {
-    MPVDecContext *const s = &v->s;
+    MPVDecContext *const s = &v->s.s;
     H264ChromaContext *h264chroma = &v->h264chroma;
     uint8_t *srcY, *srcU, *srcV;
     int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
@@ -184,7 +184,7 @@  void ff_vc1_mc_1mv(VC1Context *v, int dir)
 
     if ((!v->field_mode ||
          (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
-        !v->s.last_picture.f->data[0])
+        !s->last_picture.f->data[0])
         return;
 
     linesize = s->current_picture_ptr->f->linesize[0];
@@ -246,7 +246,7 @@  void ff_vc1_mc_1mv(VC1Context *v, int dir)
     }
 
     if (!srcY || !srcU) {
-        av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
+        av_log(s->avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
         return;
     }
 
@@ -451,7 +451,7 @@  void ff_vc1_mc_1mv(VC1Context *v, int dir)
  */
 void ff_vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg)
 {
-    MPVDecContext *const s = &v->s;
+    MPVDecContext *const s = &v->s.s;
     uint8_t *srcY;
     int dxy, mx, my, src_x, src_y;
     int off;
@@ -464,7 +464,7 @@  void ff_vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg)
 
     if ((!v->field_mode ||
          (v->ref_field_type[dir] == 1 && v->cur_field_type == 1)) &&
-        !v->s.last_picture.f->data[0])
+        !s->last_picture.f->data[0])
         return;
 
     linesize = s->current_picture_ptr->f->linesize[0];
@@ -492,7 +492,7 @@  void ff_vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg)
     }
 
     if (!srcY) {
-        av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
+        av_log(s->avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
         return;
     }
 
@@ -633,7 +633,7 @@  void ff_vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg)
  */
 void ff_vc1_mc_4mv_chroma(VC1Context *v, int dir)
 {
-    MPVDecContext *const s = &v->s;
+    MPVDecContext *const s = &v->s.s;
     H264ChromaContext *h264chroma = &v->h264chroma;
     uint8_t *srcU, *srcV;
     int uvmx, uvmy, uvsrc_x, uvsrc_y;
@@ -645,7 +645,7 @@  void ff_vc1_mc_4mv_chroma(VC1Context *v, int dir)
     int interlace;
     int uvlinesize;
 
-    if (!v->field_mode && !v->s.last_picture.f->data[0])
+    if (!v->field_mode && !s->last_picture.f->data[0])
         return;
     if (CONFIG_GRAY && s->avctx->flags & AV_CODEC_FLAG_GRAY)
         return;
@@ -664,7 +664,7 @@  void ff_vc1_mc_4mv_chroma(VC1Context *v, int dir)
         int opp_count = get_luma_mv(v, dir, &tx, &ty);
         chroma_ref_type = v->cur_field_type ^ (opp_count > 2);
     }
-    if (v->field_mode && chroma_ref_type == 1 && v->cur_field_type == 1 && !v->s.last_picture.f->data[0])
+    if (v->field_mode && chroma_ref_type == 1 && v->cur_field_type == 1 && !s->last_picture.f->data[0])
         return;
     s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][0] = tx;
     s->current_picture.motion_val[1][s->block_index[0] + v->blocks_off][1] = ty;
@@ -719,7 +719,7 @@  void ff_vc1_mc_4mv_chroma(VC1Context *v, int dir)
     }
 
     if (!srcU) {
-        av_log(v->s.avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
+        av_log(s->avctx, AV_LOG_ERROR, "Referenced frame missing.\n");
         return;
     }
 
@@ -838,7 +838,7 @@  void ff_vc1_mc_4mv_chroma(VC1Context *v, int dir)
  */
 void ff_vc1_mc_4mv_chroma4(VC1Context *v, int dir, int dir2, int avg)
 {
-    MPVDecContext *const s = &v->s;
+    MPVDecContext *const s = &v->s.s;
     H264ChromaContext *h264chroma = &v->h264chroma;
     uint8_t *srcU, *srcV;
     int uvsrc_x, uvsrc_y;
@@ -1003,7 +1003,7 @@  void ff_vc1_mc_4mv_chroma4(VC1Context *v, int dir, int dir2, int avg)
  */
 void ff_vc1_interp_mc(VC1Context *v)
 {
-    MPVDecContext *const s = &v->s;
+    MPVDecContext *const s = &v->s.s;
     H264ChromaContext *h264chroma = &v->h264chroma;
     uint8_t *srcY, *srcU, *srcV;
     int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
@@ -1012,7 +1012,7 @@  void ff_vc1_interp_mc(VC1Context *v)
     int interlace;
     int linesize, uvlinesize;
 
-    if (!v->field_mode && !v->s.next_picture.f->data[0])
+    if (!v->field_mode && !s->next_picture.f->data[0])
         return;
 
     linesize = s->current_picture_ptr->f->linesize[0];
diff --git a/libavcodec/vc1_parser.c b/libavcodec/vc1_parser.c
index 00896ddb09..b320826e5b 100644
--- a/libavcodec/vc1_parser.c
+++ b/libavcodec/vc1_parser.c
@@ -65,7 +65,7 @@  static void vc1_extract_header(AVCodecParserContext *s, AVCodecContext *avctx,
     VC1ParseContext *vpc = s->priv_data;
     GetBitContext gb;
     int ret;
-    vpc->v.s.avctx = avctx;
+    vpc->v.s.s.avctx = avctx;
     vpc->v.parse_only = 1;
     init_get_bits8(&gb, buf, buf_size);
     switch (vpc->prev_start_code) {
@@ -85,10 +85,10 @@  static void vc1_extract_header(AVCodecParserContext *s, AVCodecContext *avctx,
             break;
 
         /* keep AV_PICTURE_TYPE_BI internal to VC1 */
-        if (vpc->v.s.pict_type == AV_PICTURE_TYPE_BI)
+        if (vpc->v.s.s.pict_type == AV_PICTURE_TYPE_BI)
             s->pict_type = AV_PICTURE_TYPE_B;
         else
-            s->pict_type = vpc->v.s.pict_type;
+            s->pict_type = vpc->v.s.s.pict_type;
 
         if (avctx->ticks_per_frame > 1){
             // process pulldown flags
@@ -259,7 +259,7 @@  static int vc1_parse(AVCodecParserContext *s,
 static av_cold int vc1_parse_init(AVCodecParserContext *s)
 {
     VC1ParseContext *vpc = s->priv_data;
-    vpc->v.s.slice_context_count = 1;
+    vpc->v.s.s.slice_context_count = 1;
     vpc->v.first_pic_header_flag = 1;
     vpc->prev_start_code = 0;
     vpc->bytes_to_skip = 0;
diff --git a/libavcodec/vc1_pred.c b/libavcodec/vc1_pred.c
index 75240e239a..2924aff9e6 100644
--- a/libavcodec/vc1_pred.c
+++ b/libavcodec/vc1_pred.c
@@ -40,7 +40,7 @@  static av_always_inline int scaleforsame_x(VC1Context *v, int n /* MV */, int di
     int scalezone1_x, zone1offset_x;
     int table_index = dir ^ v->second_field;
 
-    if (v->s.pict_type != AV_PICTURE_TYPE_B)
+    if (v->s.s.pict_type != AV_PICTURE_TYPE_B)
         refdist = v->refdist;
     else
         refdist = dir ? v->brfd : v->frfd;
@@ -73,7 +73,7 @@  static av_always_inline int scaleforsame_y(VC1Context *v, int i, int n /* MV */,
     int scalezone1_y, zone1offset_y;
     int table_index = dir ^ v->second_field;
 
-    if (v->s.pict_type != AV_PICTURE_TYPE_B)
+    if (v->s.s.pict_type != AV_PICTURE_TYPE_B)
         refdist = v->refdist;
     else
         refdist = dir ? v->brfd : v->frfd;
@@ -165,10 +165,10 @@  static av_always_inline int scaleforsame(VC1Context *v, int i, int n /* MV */,
                                          int dim, int dir)
 {
     int brfd, scalesame;
-    int hpel = 1 - v->s.quarter_sample;
+    int hpel = 1 - v->s.s.quarter_sample;
 
     n >>= hpel;
-    if (v->s.pict_type != AV_PICTURE_TYPE_B || v->second_field || !dir) {
+    if (v->s.s.pict_type != AV_PICTURE_TYPE_B || v->second_field || !dir) {
         if (dim)
             n = scaleforsame_y(v, i, n, dir) * (1 << hpel);
         else
@@ -186,17 +186,17 @@  static av_always_inline int scaleforopp(VC1Context *v, int n /* MV */,
                                         int dim, int dir)
 {
     int refdist, scaleopp;
-    int hpel = 1 - v->s.quarter_sample;
+    int hpel = 1 - v->s.s.quarter_sample;
 
     n >>= hpel;
-    if (v->s.pict_type == AV_PICTURE_TYPE_B && !v->second_field && dir == 1) {
+    if (v->s.s.pict_type == AV_PICTURE_TYPE_B && !v->second_field && dir == 1) {
         if (dim)
             n = scaleforopp_y(v, n, dir) * (1 << hpel);
         else
             n = scaleforopp_x(v, n)      * (1 << hpel);
         return n;
     }
-    if (v->s.pict_type != AV_PICTURE_TYPE_B)
+    if (v->s.s.pict_type != AV_PICTURE_TYPE_B)
         refdist = v->refdist;
     else
         refdist = dir ? v->brfd : v->frfd;
@@ -213,7 +213,7 @@  void ff_vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
                     int mv1, int r_x, int r_y, uint8_t* is_intra,
                     int pred_flag, int dir)
 {
-    MPVDecContext *const s = &v->s;
+    MPVDecContext *const s = &v->s.s;
     int xy, wrap, off = 0;
     int16_t *A, *B, *C;
     int px, py;
@@ -470,7 +470,7 @@  void ff_vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
 void ff_vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
                           int mvn, int r_x, int r_y, int dir)
 {
-    MPVDecContext *const s = &v->s;
+    MPVDecContext *const s = &v->s.s;
     int xy, wrap, off = 0;
     int A[2], B[2], C[2];
     int px = 0, py = 0;
@@ -691,7 +691,7 @@  void ff_vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
 void ff_vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2],
                       int direct, int mvtype)
 {
-    MPVDecContext *const s = &v->s;
+    MPVDecContext *const s = &v->s.s;
     int xy, wrap, off = 0;
     int16_t *A, *B, *C;
     int px, py;
@@ -892,7 +892,7 @@  void ff_vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y,
                             int mv1, int *pred_flag)
 {
     int dir = (v->bmvtype == BMV_TYPE_BACKWARD) ? 1 : 0;
-    MPVDecContext *const s = &v->s;
+    MPVDecContext *const s = &v->s.s;
     int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
 
     if (v->bmvtype == BMV_TYPE_DIRECT) {
diff --git a/libavcodec/vc1dec.c b/libavcodec/vc1dec.c
index 6945042450..502ea2bae4 100644
--- a/libavcodec/vc1dec.c
+++ b/libavcodec/vc1dec.c
@@ -104,7 +104,7 @@  static void vc1_sprite_parse_transform(GetBitContext* gb, int c[7])
 
 static int vc1_parse_sprites(VC1Context *v, GetBitContext* gb, SpriteData* sd)
 {
-    AVCodecContext *avctx = v->s.avctx;
+    AVCodecContext *avctx = v->s.s.avctx;
     int sprite, i;
 
     for (sprite = 0; sprite <= v->two_sprites; sprite++) {
@@ -175,7 +175,7 @@  static int vc1_parse_sprites(VC1Context *v, GetBitContext* gb, SpriteData* sd)
 
 static void vc1_draw_sprites(VC1Context *v, SpriteData* sd)
 {
-    MPVDecContext *const s = &v->s;
+    MPVDecContext *const s = &v->s.s;
     int i, plane, row, sprite;
     int sr_cache[2][2] = { { -1, -1 }, { -1, -1 } };
     uint8_t* src_h[2][2];
@@ -272,7 +272,7 @@  static void vc1_draw_sprites(VC1Context *v, SpriteData* sd)
 
 static int vc1_decode_sprites(VC1Context *v, GetBitContext* gb)
 {
-    MPVDecContext *const s = &v->s;
+    MPVDecContext *const s = &v->s.s;
     int ret;
     AVCodecContext *avctx = s->avctx;
     SpriteData sd;
@@ -305,7 +305,7 @@  static int vc1_decode_sprites(VC1Context *v, GetBitContext* gb)
 static void vc1_sprite_flush(AVCodecContext *avctx)
 {
     VC1Context *v     = avctx->priv_data;
-    MPVDecContext *const s = &v->s;
+    MPVDecContext *const s = &v->s.s;
     AVFrame *f = s->current_picture.f;
     int plane, i;
 
@@ -324,7 +324,7 @@  static void vc1_sprite_flush(AVCodecContext *avctx)
 
 av_cold int ff_vc1_decode_init_alloc_tables(VC1Context *v)
 {
-    MPVMainDecContext *const s = &v->s;
+    MPVDecContext *const s = &v->s.s;
     int i, ret = AVERROR(ENOMEM);
     int mb_height = FFALIGN(s->mb_height, 2);
 
@@ -423,7 +423,7 @@  av_cold void ff_vc1_init_transposed_scantables(VC1Context *v)
 static av_cold int vc1_decode_init(AVCodecContext *avctx)
 {
     VC1Context *v = avctx->priv_data;
-    MPVMainDecContext *const s = &v->s;
+    MPVDecContext *const s = &v->s.s;
     GetBitContext gb;
     int ret;
 
@@ -433,7 +433,7 @@  static av_cold int vc1_decode_init(AVCodecContext *avctx)
 
     if (!avctx->extradata_size || !avctx->extradata)
         return AVERROR_INVALIDDATA;
-    v->s.avctx = avctx;
+    s->avctx = avctx;
 
     ff_vc1_init_common(v);
 
@@ -625,7 +625,8 @@  static int vc1_decode_frame(AVCodecContext *avctx, void *data,
     const uint8_t *buf = avpkt->data;
     int buf_size = avpkt->size, n_slices = 0, i, ret;
     VC1Context *v = avctx->priv_data;
-    MPVMainDecContext *const s = &v->s;
+    MPVMainDecContext *const m = &v->s;
+    MPVDecContext *const s = &m->s;
     AVFrame *pict = data;
     uint8_t *buf2 = NULL;
     const uint8_t *buf_start = buf, *buf_start_second_field = NULL;
@@ -806,7 +807,7 @@  static int vc1_decode_frame(AVCodecContext *avctx, void *data,
         if ((ret = ff_msmpeg4_decode_init(avctx)) < 0)
             goto err;
         if ((ret = ff_vc1_decode_init_alloc_tables(v)) < 0) {
-            ff_mpv_common_end(s);
+            ff_mpv_common_end(m);
             goto err;
         }
 
@@ -837,22 +838,22 @@  static int vc1_decode_frame(AVCodecContext *avctx, void *data,
     v->first_pic_header_flag = 0;
 
     if (avctx->debug & FF_DEBUG_PICT_INFO)
-        av_log(v->s.avctx, AV_LOG_DEBUG, "pict_type: %c\n", av_get_picture_type_char(s->pict_type));
+        av_log(s->avctx, AV_LOG_DEBUG, "pict_type: %c\n", av_get_picture_type_char(s->pict_type));
 
     if ((avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE)
         && s->pict_type != AV_PICTURE_TYPE_I) {
-        av_log(v->s.avctx, AV_LOG_ERROR, "Sprite decoder: expected I-frame\n");
+        av_log(s->avctx, AV_LOG_ERROR, "Sprite decoder: expected I-frame\n");
         ret = AVERROR_INVALIDDATA;
         goto err;
     }
     if ((avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE)
         && v->field_mode) {
-        av_log(v->s.avctx, AV_LOG_ERROR, "Sprite decoder: expected Frames not Fields\n");
+        av_log(s->avctx, AV_LOG_ERROR, "Sprite decoder: expected Frames not Fields\n");
         ret = AVERROR_INVALIDDATA;
         goto err;
     }
     if ((s->mb_height >> v->field_mode) == 0) {
-        av_log(v->s.avctx, AV_LOG_ERROR, "image too short\n");
+        av_log(s->avctx, AV_LOG_ERROR, "image too short\n");
         ret = AVERROR_INVALIDDATA;
         goto err;
     }
@@ -863,7 +864,7 @@  static int vc1_decode_frame(AVCodecContext *avctx, void *data,
 
     /* skip B-frames if we don't have reference frames */
     if (!s->last_picture_ptr && s->pict_type == AV_PICTURE_TYPE_B) {
-        av_log(v->s.avctx, AV_LOG_DEBUG, "Skipping B frame without reference frames\n");
+        av_log(s->avctx, AV_LOG_DEBUG, "Skipping B frame without reference frames\n");
         goto end;
     }
     if ((avctx->skip_frame >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B) ||
@@ -872,13 +873,13 @@  static int vc1_decode_frame(AVCodecContext *avctx, void *data,
         goto end;
     }
 
-    if ((ret = ff_mpv_frame_start(s, avctx)) < 0) {
+    if ((ret = ff_mpv_frame_start(m, avctx)) < 0) {
         goto err;
     }
 
-    v->s.current_picture_ptr->field_picture = v->field_mode;
-    v->s.current_picture_ptr->f->interlaced_frame = (v->fcm != PROGRESSIVE);
-    v->s.current_picture_ptr->f->top_field_first  = v->tff;
+    s->current_picture_ptr->field_picture = v->field_mode;
+    s->current_picture_ptr->f->interlaced_frame = (v->fcm != PROGRESSIVE);
+    s->current_picture_ptr->f->top_field_first  = v->tff;
 
     // process pulldown flags
     s->current_picture_ptr->f->repeat_pict = 0;
@@ -918,7 +919,7 @@  static int vc1_decode_frame(AVCodecContext *avctx, void *data,
                     v->pic_header_flag = get_bits1(&s->gb);
                     if (v->pic_header_flag) {
                         if (ff_vc1_parse_frame_header_adv(v, &s->gb) < 0) {
-                            av_log(v->s.avctx, AV_LOG_ERROR, "Slice header damaged\n");
+                            av_log(s->avctx, AV_LOG_ERROR, "Slice header damaged\n");
                             ret = AVERROR_INVALIDDATA;
                             if (avctx->err_recognition & AV_EF_EXPLODE)
                                 goto err;
@@ -945,7 +946,7 @@  static int vc1_decode_frame(AVCodecContext *avctx, void *data,
                 ret = AVERROR_INVALIDDATA;
                 goto err;
             }
-            v->s.current_picture_ptr->f->pict_type = v->s.pict_type;
+            s->current_picture_ptr->f->pict_type = s->pict_type;
 
             if ((ret = avctx->hwaccel->start_frame(avctx, buf_start_second_field, (buf + buf_size) - buf_start_second_field)) < 0)
                 goto err;
@@ -965,7 +966,7 @@  static int vc1_decode_frame(AVCodecContext *avctx, void *data,
                     v->pic_header_flag = get_bits1(&s->gb);
                     if (v->pic_header_flag) {
                         if (ff_vc1_parse_frame_header_adv(v, &s->gb) < 0) {
-                            av_log(v->s.avctx, AV_LOG_ERROR, "Slice header damaged\n");
+                            av_log(s->avctx, AV_LOG_ERROR, "Slice header damaged\n");
                             ret = AVERROR_INVALIDDATA;
                             if (avctx->err_recognition & AV_EF_EXPLODE)
                                 goto err;
@@ -1002,7 +1003,7 @@  static int vc1_decode_frame(AVCodecContext *avctx, void *data,
                     v->pic_header_flag = get_bits1(&s->gb);
                     if (v->pic_header_flag) {
                         if (ff_vc1_parse_frame_header_adv(v, &s->gb) < 0) {
-                            av_log(v->s.avctx, AV_LOG_ERROR, "Slice header damaged\n");
+                            av_log(s->avctx, AV_LOG_ERROR, "Slice header damaged\n");
                             ret = AVERROR_INVALIDDATA;
                             if (avctx->err_recognition & AV_EF_EXPLODE)
                                 goto err;
@@ -1020,7 +1021,7 @@  static int vc1_decode_frame(AVCodecContext *avctx, void *data,
     } else {
         int header_ret = 0;
 
-        ff_mpeg_er_frame_start(s);
+        ff_mpeg_er_frame_start(m);
 
         v->end_mb_x = s->mb_width;
         if (v->field_mode) {
@@ -1037,7 +1038,7 @@  static int vc1_decode_frame(AVCodecContext *avctx, void *data,
         for (i = 0; i <= n_slices; i++) {
             if (i > 0 &&  slices[i - 1].mby_start >= mb_height) {
                 if (v->field_mode <= 0) {
-                    av_log(v->s.avctx, AV_LOG_ERROR, "Slice %d starts beyond "
+                    av_log(s->avctx, AV_LOG_ERROR, "Slice %d starts beyond "
                            "picture boundary (%d >= %d)\n", i,
                            slices[i - 1].mby_start, mb_height);
                     continue;
@@ -1055,7 +1056,7 @@  static int vc1_decode_frame(AVCodecContext *avctx, void *data,
                 v->pic_header_flag = 0;
                 if (v->field_mode && i == n_slices1 + 2) {
                     if ((header_ret = ff_vc1_parse_frame_header_adv(v, &s->gb)) < 0) {
-                        av_log(v->s.avctx, AV_LOG_ERROR, "Field header damaged\n");
+                        av_log(s->avctx, AV_LOG_ERROR, "Field header damaged\n");
                         ret = AVERROR_INVALIDDATA;
                         if (avctx->err_recognition & AV_EF_EXPLODE)
                             goto err;
@@ -1064,7 +1065,7 @@  static int vc1_decode_frame(AVCodecContext *avctx, void *data,
                 } else if (get_bits1(&s->gb)) {
                     v->pic_header_flag = 1;
                     if ((header_ret = ff_vc1_parse_frame_header_adv(v, &s->gb)) < 0) {
-                        av_log(v->s.avctx, AV_LOG_ERROR, "Slice header damaged\n");
+                        av_log(s->avctx, AV_LOG_ERROR, "Slice header damaged\n");
                         ret = AVERROR_INVALIDDATA;
                         if (avctx->err_recognition & AV_EF_EXPLODE)
                             goto err;
@@ -1079,19 +1080,19 @@  static int vc1_decode_frame(AVCodecContext *avctx, void *data,
                 s->end_mb_y = (i == n_slices     ) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
             else {
                 if (i >= n_slices) {
-                    av_log(v->s.avctx, AV_LOG_ERROR, "first field slice count too large\n");
+                    av_log(s->avctx, AV_LOG_ERROR, "first field slice count too large\n");
                     continue;
                 }
                 s->end_mb_y = (i == n_slices1 + 1) ? mb_height : FFMIN(mb_height, slices[i].mby_start % mb_height);
             }
             if (s->end_mb_y <= s->start_mb_y) {
-                av_log(v->s.avctx, AV_LOG_ERROR, "end mb y %d %d invalid\n", s->end_mb_y, s->start_mb_y);
+                av_log(s->avctx, AV_LOG_ERROR, "end mb y %d %d invalid\n", s->end_mb_y, s->start_mb_y);
                 continue;
             }
             if (((s->pict_type == AV_PICTURE_TYPE_P && !v->p_frame_skipped) ||
                  (s->pict_type == AV_PICTURE_TYPE_B && !v->bi_type)) &&
                 !v->cbpcy_vlc) {
-                av_log(v->s.avctx, AV_LOG_ERROR, "missing cbpcy_vlc\n");
+                av_log(s->avctx, AV_LOG_ERROR, "missing cbpcy_vlc\n");
                 continue;
             }
             ff_vc1_decode_blocks(v);
@@ -1106,7 +1107,7 @@  static int vc1_decode_frame(AVCodecContext *avctx, void *data,
             s->current_picture.f->linesize[2] >>= 1;
             s->linesize                      >>= 1;
             s->uvlinesize                    >>= 1;
-            if (v->s.pict_type != AV_PICTURE_TYPE_BI && v->s.pict_type != AV_PICTURE_TYPE_B) {
+            if (s->pict_type != AV_PICTURE_TYPE_BI && s->pict_type != AV_PICTURE_TYPE_B) {
                 FFSWAP(uint8_t *, v->mv_f_next[0], v->mv_f[0]);
                 FFSWAP(uint8_t *, v->mv_f_next[1], v->mv_f[1]);
             }
@@ -1125,7 +1126,7 @@  static int vc1_decode_frame(AVCodecContext *avctx, void *data,
             ff_er_frame_end(&s->er);
     }
 
-    ff_mpv_frame_end(s);
+    ff_mpv_frame_end(m);
 
     if (avctx->codec_id == AV_CODEC_ID_WMV3IMAGE || avctx->codec_id == AV_CODEC_ID_VC1IMAGE) {
 image:
diff --git a/libavcodec/vdpau_mpeg4.c b/libavcodec/vdpau_mpeg4.c
index 7fcbd06647..e9483168aa 100644
--- a/libavcodec/vdpau_mpeg4.c
+++ b/libavcodec/vdpau_mpeg4.c
@@ -33,7 +33,7 @@  static int vdpau_mpeg4_start_frame(AVCodecContext *avctx,
                                    const uint8_t *buffer, uint32_t size)
 {
     Mpeg4DecContext *ctx = avctx->priv_data;
-    MPVDecContext *const s = &ctx->m;
+    MPVDecContext *const s = &ctx->m.s;
     Picture *pic             = s->current_picture_ptr;
     struct vdpau_picture_context *pic_ctx = pic->hwaccel_picture_private;
     VdpPictureInfoMPEG4Part2 *info = &pic_ctx->info.mpeg4;
diff --git a/libavcodec/vdpau_vc1.c b/libavcodec/vdpau_vc1.c
index e3aa36fbdd..f13b62f723 100644
--- a/libavcodec/vdpau_vc1.c
+++ b/libavcodec/vdpau_vc1.c
@@ -33,7 +33,7 @@  static int vdpau_vc1_start_frame(AVCodecContext *avctx,
                                  const uint8_t *buffer, uint32_t size)
 {
     VC1Context * const v  = avctx->priv_data;
-    MPVDecContext *const s = &v->s;
+    MPVDecContext *const s = &v->s.s;
     Picture *pic          = s->current_picture_ptr;
     struct vdpau_picture_context *pic_ctx = pic->hwaccel_picture_private;
     VdpPictureInfoVC1 *info = &pic_ctx->info.vc1;
@@ -80,7 +80,7 @@  static int vdpau_vc1_start_frame(AVCodecContext *avctx,
     info->extended_dmv      = v->extended_dmv;
     info->overlap           = v->overlap;
     info->vstransform       = v->vstransform;
-    info->loopfilter        = v->s.loop_filter;
+    info->loopfilter        = v->s.s.loop_filter;
     info->fastuvmc          = v->fastuvmc;
     info->range_mapy_flag   = v->range_mapy_flag;
     info->range_mapy        = v->range_mapy;
@@ -90,7 +90,7 @@  static int vdpau_vc1_start_frame(AVCodecContext *avctx,
     info->multires          = v->multires;
     info->syncmarker        = v->resync_marker;
     info->rangered          = v->rangered | (v->rangeredfrm << 1);
-    info->maxbframes        = v->s.max_b_frames;
+    info->maxbframes        = v->s.s.max_b_frames;
     info->deblockEnable     = v->postprocflag & 1;
     info->pquant            = v->pq;
 
@@ -101,7 +101,7 @@  static int vdpau_vc1_decode_slice(AVCodecContext *avctx,
                                   const uint8_t *buffer, uint32_t size)
 {
     VC1Context * const v  = avctx->priv_data;
-    MPVDecContext *const s = &v->s;
+    MPVDecContext *const s = &v->s.s;
     Picture *pic          = s->current_picture_ptr;
     struct vdpau_picture_context *pic_ctx = pic->hwaccel_picture_private;
     int val;
diff --git a/libavcodec/wmv2.c b/libavcodec/wmv2.c
index 29ed71f8aa..bfb14e2da3 100644
--- a/libavcodec/wmv2.c
+++ b/libavcodec/wmv2.c
@@ -25,8 +25,9 @@ 
 #include "wmv2.h"
 
 
-av_cold void ff_wmv2_common_init(MPVMainContext *s)
+av_cold void ff_wmv2_common_init(MPVMainContext *m)
 {
+    MPVContext *const s = &m->s;
     WMV2Context *const w = s->private_ctx;
 
     ff_blockdsp_init(&s->bdsp, s->avctx);
diff --git a/libavcodec/wmv2dec.c b/libavcodec/wmv2dec.c
index 128d78aa1d..7c4dc51782 100644
--- a/libavcodec/wmv2dec.c
+++ b/libavcodec/wmv2dec.c
@@ -60,7 +60,7 @@  typedef struct WMV2DecContext {
 static void wmv2_add_block(WMV2DecContext *w, int16_t *block1,
                            uint8_t *dst, int stride, int n)
 {
-    MPVDecContext *const s = &w->s;
+    MPVDecContext *const s = &w->s.s;
 
     if (s->block_last_index[n] >= 0) {
         switch (w->abt_type_table[n]) {
@@ -102,9 +102,9 @@  void ff_wmv2_add_mb(MPVDecContext *s, int16_t block1[6][64],
 
 static int parse_mb_skip(WMV2DecContext *w)
 {
+    MPVDecContext *const s = &w->s.s;
     int mb_x, mb_y;
     int coded_mb_count = 0;
-    MPVDecContext *const s = &w->s;
     uint32_t *const mb_type = s->current_picture_ptr->mb_type;
 
     w->skip_type = get_bits(&s->gb, 2);
@@ -167,7 +167,7 @@  static int parse_mb_skip(WMV2DecContext *w)
 
 static int decode_ext_header(WMV2DecContext *w)
 {
-    MPVDecContext *const s = &w->s;
+    MPVDecContext *const s = &w->s.s;
     GetBitContext gb;
     int fps;
     int code;
@@ -203,9 +203,10 @@  static int decode_ext_header(WMV2DecContext *w)
     return 0;
 }
 
-int ff_wmv2_decode_picture_header(MPVMainDecContext *s)
+int ff_wmv2_decode_picture_header(MPVMainDecContext *m)
 {
-    WMV2DecContext *const w = (WMV2DecContext *) s;
+    WMV2DecContext *const w = (WMV2DecContext *)m;
+    MPVDecContext *const s = &m->s;
     int code;
 
     if (s->picture_number == 0)
@@ -238,9 +239,10 @@  int ff_wmv2_decode_picture_header(MPVMainDecContext *s)
     return 0;
 }
 
-int ff_wmv2_decode_secondary_picture_header(MPVMainDecContext *s)
+int ff_wmv2_decode_secondary_picture_header(MPVMainDecContext *m)
 {
-    WMV2DecContext *const w = (WMV2DecContext *) s;
+    WMV2DecContext *const w = (WMV2DecContext *)m;
+    MPVDecContext *const s = &m->s;
 
     if (s->pict_type == AV_PICTURE_TYPE_I) {
         if (w->j_type_bit)
@@ -339,8 +341,8 @@  int ff_wmv2_decode_secondary_picture_header(MPVMainDecContext *s)
                                   2 * s->qscale, (s->qscale - 1) | 1,
                                   s->loop_filter, s->low_delay);
 
-        ff_er_add_slice(&w->s.er, 0, 0,
-                        (w->s.mb_x >> 1) - 1, (w->s.mb_y >> 1) - 1,
+        ff_er_add_slice(&s->er, 0, 0,
+                        (s->mb_x >> 1) - 1, (s->mb_y >> 1) - 1,
                         ER_MB_END);
         return 1;
     }
@@ -350,7 +352,7 @@  int ff_wmv2_decode_secondary_picture_header(MPVMainDecContext *s)
 
 static inline void wmv2_decode_motion(WMV2DecContext *w, int *mx_ptr, int *my_ptr)
 {
-    MPVDecContext *const s = &w->s;
+    MPVDecContext *const s = &w->s.s;
 
     ff_msmpeg4_decode_motion(s, mx_ptr, my_ptr);
 
@@ -362,7 +364,7 @@  static inline void wmv2_decode_motion(WMV2DecContext *w, int *mx_ptr, int *my_pt
 
 static int16_t *wmv2_pred_motion(WMV2DecContext *w, int *px, int *py)
 {
-    MPVDecContext *const s = &w->s;
+    MPVDecContext *const s = &w->s.s;
     int xy, wrap, diff, type;
     int16_t *A, *B, *C, *mot_val;
 
@@ -408,7 +410,7 @@  static int16_t *wmv2_pred_motion(WMV2DecContext *w, int *px, int *py)
 static inline int wmv2_decode_inter_block(WMV2DecContext *w, int16_t *block,
                                           int n, int cbp)
 {
-    MPVDecContext *const s = &w->s;
+    MPVDecContext *const s = &w->s.s;
     static const int sub_cbp_table[3] = { 2, 3, 1 };
     int sub_cbp, ret;
 
@@ -565,7 +567,8 @@  int ff_wmv2_decode_mb(MPVDecContext *s, int16_t block[6][64])
 static av_cold int wmv2_decode_init(AVCodecContext *avctx)
 {
     WMV2DecContext *const w = avctx->priv_data;
-    MPVMainDecContext *const s = &w->s;
+    MPVMainDecContext *const m = &w->s;
+    MPVDecContext *const s = &m->s;
     int ret;
 
     s->private_ctx = &w->common;
@@ -573,15 +576,15 @@  static av_cold int wmv2_decode_init(AVCodecContext *avctx)
     if ((ret = ff_msmpeg4_decode_init(avctx)) < 0)
         return ret;
 
-    ff_wmv2_common_init(s);
+    ff_wmv2_common_init(m);
     ff_init_scantable(s->idsp.idct_permutation, &w->abt_scantable[0],
                       ff_wmv2_scantableA);
     ff_init_scantable(s->idsp.idct_permutation, &w->abt_scantable[1],
                       ff_wmv2_scantableB);
 
-    return ff_intrax8_common_init(avctx, &w->x8, &w->s.idsp,
-                                  w->s.block, w->s.block_last_index,
-                                  w->s.mb_width, w->s.mb_height);
+    return ff_intrax8_common_init(avctx, &w->x8, &s->idsp,
+                                  s->block, s->block_last_index,
+                                  s->mb_width, s->mb_height);
 }
 
 static av_cold int wmv2_decode_end(AVCodecContext *avctx)
diff --git a/libavcodec/wmv2enc.c b/libavcodec/wmv2enc.c
index 11d566fe5a..cd21677955 100644
--- a/libavcodec/wmv2enc.c
+++ b/libavcodec/wmv2enc.c
@@ -43,7 +43,7 @@  typedef struct WMV2EncContext {
 
 static int encode_ext_header(WMV2EncContext *w)
 {
-    MPVEncContext *const s = &w->msmpeg4.s.common;
+    MPVEncContext *const s = &w->msmpeg4.s.common.s;
     PutBitContext pb;
     int code;
 
@@ -71,7 +71,7 @@  static av_cold int wmv2_encode_init(AVCodecContext *avctx)
 {
     WMV2EncContext *const w = avctx->priv_data;
     MPVMainEncContext *const m = &w->msmpeg4.s;
-    MPVEncContext     *const s = &m->common;
+    MPVEncContext     *const s = &m->common.s;
 
     s->private_ctx = &w->common;
     if (ff_mpv_encode_init(avctx) < 0)
@@ -92,7 +92,7 @@  static av_cold int wmv2_encode_init(AVCodecContext *avctx)
 int ff_wmv2_encode_picture_header(MPVMainEncContext *m, int picture_number)
 {
     WMV2EncContext *const w = (WMV2EncContext *) m;
-    MPVEncContext  *const s = &m->common;
+    MPVEncContext  *const s = &m->common.s;
 
     put_bits(&s->pb, 1, s->pict_type - 1);
     if (s->pict_type == AV_PICTURE_TYPE_I)