diff mbox series

[FFmpeg-devel,16/41] avcodec/mpegvideo: Use typedefs for MPV(Main)?(Dec|Enc)?Context

Message ID AM7PR03MB66601CDB7B9699B44F9FF9338F249@AM7PR03MB6660.eurprd03.prod.outlook.com
State Superseded
Headers show
Series [FFmpeg-devel,01/41] avcodec/mpegvideo_enc: Allow slices only for slice-thread-able codecs | expand

Checks

Context Check Description
andriy/make_x86 success Make finished
andriy/make_fate_x86 success Make fate finished
andriy/make_ppc success Make finished
andriy/make_fate_ppc success Make fate finished

Commit Message

Andreas Rheinhardt Jan. 30, 2022, 6:27 a.m. UTC
This is in preparation for actually adding such contexts
and e.g. moving fields only used by the main thread to
the corresponding "Main" context.

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com>
---
 libavcodec/alpha/mpegvideo_alpha.c     |   6 +-
 libavcodec/arm/asm-offsets.h           |   2 +-
 libavcodec/arm/me_cmp_init_arm.c       |  12 +--
 libavcodec/arm/mpegvideo_arm.c         |  18 ++--
 libavcodec/arm/mpegvideo_arm.h         |   2 +-
 libavcodec/arm/mpegvideo_armv5te.c     |   6 +-
 libavcodec/dnxhdenc.c                  |   4 +-
 libavcodec/dnxhdenc.h                  |   4 +-
 libavcodec/dxva2_mpeg2.c               |  14 +--
 libavcodec/dxva2_vc1.c                 |   6 +-
 libavcodec/flv.h                       |   5 +-
 libavcodec/flvdec.c                    |   4 +-
 libavcodec/flvenc.c                    |   4 +-
 libavcodec/h261.c                      |   2 +-
 libavcodec/h261.h                      |  11 +--
 libavcodec/h261dec.c                   |  29 +++---
 libavcodec/h261enc.c                   |  16 ++--
 libavcodec/h263.c                      |   8 +-
 libavcodec/h263.h                      |   6 +-
 libavcodec/h263dec.c                   |  16 ++--
 libavcodec/h263dec.h                   |  15 ++--
 libavcodec/h263enc.h                   |  18 ++--
 libavcodec/intelh263dec.c              |   4 +-
 libavcodec/ituh263dec.c                |  34 +++----
 libavcodec/ituh263enc.c                |  23 ++---
 libavcodec/me_cmp.c                    |  60 ++++++-------
 libavcodec/me_cmp.h                    |   6 +-
 libavcodec/mips/h263dsp_mips.h         |   6 +-
 libavcodec/mips/me_cmp_mips.h          |  32 +++----
 libavcodec/mips/me_cmp_msa.c           |  28 +++---
 libavcodec/mips/mpegvideo_init_mips.c  |   2 +-
 libavcodec/mips/mpegvideo_mips.h       |  12 +--
 libavcodec/mips/mpegvideo_mmi.c        |  12 +--
 libavcodec/mips/mpegvideo_msa.c        |   6 +-
 libavcodec/mjpegenc.c                  |  32 +++----
 libavcodec/mjpegenc.h                  |  14 +--
 libavcodec/motion_est.c                |  54 +++++------
 libavcodec/motion_est.h                |  25 +++---
 libavcodec/motion_est_template.c       |  34 +++----
 libavcodec/mpeg12.c                    |   4 +-
 libavcodec/mpeg12.h                    |  13 +--
 libavcodec/mpeg12dec.c                 | 112 +++++++++++------------
 libavcodec/mpeg12enc.c                 |  32 +++----
 libavcodec/mpeg4video.c                |  10 +--
 libavcodec/mpeg4video.h                |  31 +++----
 libavcodec/mpeg4video_parser.c         |   2 +-
 libavcodec/mpeg4videodec.c             |  80 ++++++++---------
 libavcodec/mpeg4videoenc.c             |  40 ++++-----
 libavcodec/mpeg_er.c                   |   6 +-
 libavcodec/mpeg_er.h                   |   4 +-
 libavcodec/mpegvideo.c                 |  79 ++++++++--------
 libavcodec/mpegvideo.h                 | 110 ++++++++++++-----------
 libavcodec/mpegvideo_dec.c             |  24 ++---
 libavcodec/mpegvideo_enc.c             | 119 ++++++++++++++-----------
 libavcodec/mpegvideo_motion.c          |  28 +++---
 libavcodec/mpegvideo_xvmc.c            |  14 +--
 libavcodec/mpegvideoenc.h              |  19 ++--
 libavcodec/msmpeg4.c                   |   6 +-
 libavcodec/msmpeg4.h                   |  14 +--
 libavcodec/msmpeg4dec.c                |  28 +++---
 libavcodec/msmpeg4enc.c                |  21 ++---
 libavcodec/msmpeg4enc.h                |  18 ++--
 libavcodec/mss2.c                      |   2 +-
 libavcodec/neon/mpegvideo.c            |   6 +-
 libavcodec/nvdec_mpeg12.c              |   2 +-
 libavcodec/nvdec_mpeg4.c               |   2 +-
 libavcodec/nvdec_vc1.c                 |   2 +-
 libavcodec/ppc/me_cmp.c                |  22 ++---
 libavcodec/ppc/mpegvideo_altivec.c     |   4 +-
 libavcodec/ratecontrol.c               |  24 ++---
 libavcodec/ratecontrol.h               |  16 ++--
 libavcodec/rv10.c                      |  16 ++--
 libavcodec/rv10.h                      |   7 +-
 libavcodec/rv10enc.c                   |   4 +-
 libavcodec/rv20enc.c                   |   5 +-
 libavcodec/rv30.c                      |   4 +-
 libavcodec/rv34.c                      |  38 ++++----
 libavcodec/rv34.h                      |   2 +-
 libavcodec/rv40.c                      |   6 +-
 libavcodec/snow.h                      |   4 +-
 libavcodec/snow_dwt.c                  |  16 ++--
 libavcodec/snow_dwt.h                  |   9 +-
 libavcodec/speedhqenc.c                |  12 +--
 libavcodec/speedhqenc.h                |  12 +--
 libavcodec/svq1enc.c                   |   3 +-
 libavcodec/svq1enc.h                   |   6 +-
 libavcodec/vaapi_mpeg2.c               |  10 +--
 libavcodec/vaapi_mpeg4.c               |   6 +-
 libavcodec/vaapi_vc1.c                 |  10 +--
 libavcodec/vc1.h                       |   2 +-
 libavcodec/vc1_block.c                 |  44 ++++-----
 libavcodec/vc1_loopfilter.c            |  28 +++---
 libavcodec/vc1_mc.c                    |  14 +--
 libavcodec/vc1_pred.c                  |   8 +-
 libavcodec/vc1dec.c                    |  13 ++-
 libavcodec/vdpau.c                     |   2 +-
 libavcodec/vdpau_mpeg12.c              |   4 +-
 libavcodec/vdpau_mpeg4.c               |   2 +-
 libavcodec/vdpau_vc1.c                 |   4 +-
 libavcodec/videotoolbox.c              |   2 +-
 libavcodec/wmv2.c                      |   4 +-
 libavcodec/wmv2.h                      |  19 ++--
 libavcodec/wmv2dec.c                   |  24 ++---
 libavcodec/wmv2enc.c                   |   8 +-
 libavcodec/x86/me_cmp.asm              |  16 ++--
 libavcodec/x86/me_cmp_init.c           |  72 +++++++--------
 libavcodec/x86/mpegvideo.c             |  14 +--
 libavcodec/x86/mpegvideoenc.c          |   8 +-
 libavcodec/x86/mpegvideoenc_template.c |   2 +-
 libavcodec/xvmc_internal.h             |   6 +-
 110 files changed, 999 insertions(+), 953 deletions(-)

Comments

Michael Niedermayer Jan. 30, 2022, 11:40 a.m. UTC | #1
On Sun, Jan 30, 2022 at 07:27:24AM +0100, Andreas Rheinhardt wrote:
> This is in preparation for actually adding such contexts
> and e.g. moving fields only used by the main thread to
> the corresponding "Main" context.
> 
> Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com>

This breaks build on mingw64

src/libavcodec/dxva2_mpeg2.c:42:50: warning: ‘struct MPVDecContext’ declared inside parameter list will not be visible outside of this definition or declaration
                                     const struct MPVDecContext *s,
                                                  ^~~~~~~~~~~~~
src/libavcodec/dxva2_mpeg2.c: In function ‘fill_picture_parameters’:
src/libavcodec/dxva2_mpeg2.c:45:39: error: dereferencing pointer to incomplete type ‘const struct MPVDecContext’
     const Picture *current_picture = s->current_picture_ptr;
                                       ^~
src/libavcodec/dxva2_mpeg2.c: At top level:
src/libavcodec/dxva2_mpeg2.c:108:53: warning: ‘struct MPVDecContext’ declared inside parameter list will not be visible outside of this definition or declaration
                                        const struct MPVDecContext *s,
                                                     ^~~~~~~~~~~~~
src/libavcodec/dxva2_mpeg2.c: In function ‘fill_quantization_matrices’:
src/libavcodec/dxva2_mpeg2.c:115:18: error: dereferencing pointer to incomplete type ‘const struct MPVDecContext’
         int n = s->idsp.idct_permutation[ff_zigzag_direct[i]];
                  ^~
src/libavcodec/dxva2_mpeg2.c:115:13: warning: variable ‘n’ set but not used [-Wunused-but-set-variable]
         int n = s->idsp.idct_permutation[ff_zigzag_direct[i]];
             ^
src/libavcodec/dxva2_mpeg2.c: At top level:
src/libavcodec/dxva2_mpeg2.c:124:37: warning: ‘struct MPVDecContext’ declared inside parameter list will not be visible outside of this definition or declaration
                        const struct MPVDecContext *s,
                                     ^~~~~~~~~~~~~
src/libavcodec/dxva2_mpeg2.c: In function ‘fill_slice’:
src/libavcodec/dxva2_mpeg2.c:129:21: error: dereferencing pointer to incomplete type ‘const struct MPVDecContext’
     int is_field = s->picture_structure != PICT_FRAME;
                     ^~
src/libavcodec/dxva2_mpeg2.c: In function ‘commit_bitstream_and_slice_buffer’:
src/libavcodec/dxva2_mpeg2.c:157:10: error: dereferencing pointer to incomplete type ‘const struct MPVDecContext’
         s->current_picture_ptr->hwaccel_picture_private;
          ^~
src/libavcodec/dxva2_mpeg2.c: In function ‘dxva2_mpeg2_start_frame’:
src/libavcodec/dxva2_mpeg2.c:260:10: error: dereferencing pointer to incomplete type ‘const struct MPVDecContext’
         s->current_picture_ptr->hwaccel_picture_private;
          ^~
src/libavcodec/dxva2_mpeg2.c:266:41: warning: passing argument 3 of ‘fill_picture_parameters’ from incompatible pointer type [-Wincompatible-pointer-types]
     fill_picture_parameters(avctx, ctx, s, &ctx_pic->pp);
                                         ^
src/libavcodec/dxva2_mpeg2.c:40:13: note: expected ‘const struct MPVDecContext *’ but argument is of type ‘const struct MPVDecContext * const’
 static void fill_picture_parameters(AVCodecContext *avctx,
             ^~~~~~~~~~~~~~~~~~~~~~~
src/libavcodec/dxva2_mpeg2.c:267:44: warning: passing argument 3 of ‘fill_quantization_matrices’ from incompatible pointer type [-Wincompatible-pointer-types]
     fill_quantization_matrices(avctx, ctx, s, &ctx_pic->qm);
                                            ^
src/libavcodec/dxva2_mpeg2.c:106:13: note: expected ‘const struct MPVDecContext *’ but argument is of type ‘const struct MPVDecContext * const’
 static void fill_quantization_matrices(AVCodecContext *avctx,
             ^~~~~~~~~~~~~~~~~~~~~~~~~~
src/libavcodec/dxva2_mpeg2.c: In function ‘dxva2_mpeg2_decode_slice’:
src/libavcodec/dxva2_mpeg2.c:280:10: error: dereferencing pointer to incomplete type ‘const struct MPVDecContext’
         s->current_picture_ptr->hwaccel_picture_private;
          ^~
src/libavcodec/dxva2_mpeg2.c:293:23: warning: passing argument 2 of ‘fill_slice’ from incompatible pointer type [-Wincompatible-pointer-types]
     fill_slice(avctx, s, &ctx_pic->slice[ctx_pic->slice_count++], position,
                       ^
src/libavcodec/dxva2_mpeg2.c:123:13: note: expected ‘const struct MPVDecContext *’ but argument is of type ‘const struct MPVDecContext * const’
 static void fill_slice(AVCodecContext *avctx,
             ^~~~~~~~~~
src/libavcodec/dxva2_mpeg2.c: In function ‘dxva2_mpeg2_end_frame’:
src/libavcodec/dxva2_mpeg2.c:302:10: error: dereferencing pointer to incomplete type ‘struct MPVDecContext’
         s->current_picture_ptr->hwaccel_picture_private;
          ^~
src/libavcodec/dxva2_mpeg2.c:312:33: warning: passing argument 1 of ‘ff_mpeg_draw_horiz_band’ from incompatible pointer type [-Wincompatible-pointer-types]
         ff_mpeg_draw_horiz_band(s, 0, avctx->height);
                                 ^
In file included from src/libavcodec/dxva2_mpeg2.c:27:0:
src/libavcodec/mpegvideo.h:606:6: note: expected ‘MPVContext * {aka struct MPVContext *}’ but argument is of type ‘struct MPVDecContext * const’
 void ff_mpeg_draw_horiz_band(MPVContext *s, int y, int h);
      ^~~~~~~~~~~~~~~~~~~~~~~
src/ffbuild/common.mak:78: recipe for target 'libavcodec/dxva2_mpeg2.o' failed
make: *** [libavcodec/dxva2_mpeg2.o] Error 1
make: *** Waiting for unfinished jobs....

[...]
Michael Niedermayer Jan. 30, 2022, 11:43 a.m. UTC | #2
On Sun, Jan 30, 2022 at 07:27:24AM +0100, Andreas Rheinhardt wrote:
> This is in preparation for actually adding such contexts
> and e.g. moving fields only used by the main thread to
> the corresponding "Main" context.
[...]

> +} MPVContext;
>  
> +typedef MPVContext MPVDecContext;
> +typedef MPVContext MPVMainContext;
> +typedef MPVContext MPVMainDecContext;

Missing documentation, what these represent, specifically the "Main"

thx

[...]
Andreas Rheinhardt Jan. 30, 2022, 11:05 p.m. UTC | #3
Michael Niedermayer:
> On Sun, Jan 30, 2022 at 07:27:24AM +0100, Andreas Rheinhardt wrote:
>> This is in preparation for actually adding such contexts
>> and e.g. moving fields only used by the main thread to
>> the corresponding "Main" context.
>>
>> Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com>
> 
> This breaks build on mingw64
> 
> src/libavcodec/dxva2_mpeg2.c:42:50: warning: ‘struct MPVDecContext’ declared inside parameter list will not be visible outside of this definition or declaration
>                                      const struct MPVDecContext *s,
>                                                   ^~~~~~~~~~~~~
> src/libavcodec/dxva2_mpeg2.c: In function ‘fill_picture_parameters’:
> src/libavcodec/dxva2_mpeg2.c:45:39: error: dereferencing pointer to incomplete type ‘const struct MPVDecContext’
>      const Picture *current_picture = s->current_picture_ptr;
>                                        ^~
> src/libavcodec/dxva2_mpeg2.c: At top level:
> src/libavcodec/dxva2_mpeg2.c:108:53: warning: ‘struct MPVDecContext’ declared inside parameter list will not be visible outside of this definition or declaration
>                                         const struct MPVDecContext *s,
>                                                      ^~~~~~~~~~~~~
> src/libavcodec/dxva2_mpeg2.c: In function ‘fill_quantization_matrices’:
> src/libavcodec/dxva2_mpeg2.c:115:18: error: dereferencing pointer to incomplete type ‘const struct MPVDecContext’
>          int n = s->idsp.idct_permutation[ff_zigzag_direct[i]];
>                   ^~
> src/libavcodec/dxva2_mpeg2.c:115:13: warning: variable ‘n’ set but not used [-Wunused-but-set-variable]
>          int n = s->idsp.idct_permutation[ff_zigzag_direct[i]];
>              ^
> src/libavcodec/dxva2_mpeg2.c: At top level:
> src/libavcodec/dxva2_mpeg2.c:124:37: warning: ‘struct MPVDecContext’ declared inside parameter list will not be visible outside of this definition or declaration
>                         const struct MPVDecContext *s,
>                                      ^~~~~~~~~~~~~
> src/libavcodec/dxva2_mpeg2.c: In function ‘fill_slice’:
> src/libavcodec/dxva2_mpeg2.c:129:21: error: dereferencing pointer to incomplete type ‘const struct MPVDecContext’
>      int is_field = s->picture_structure != PICT_FRAME;
>                      ^~
> src/libavcodec/dxva2_mpeg2.c: In function ‘commit_bitstream_and_slice_buffer’:
> src/libavcodec/dxva2_mpeg2.c:157:10: error: dereferencing pointer to incomplete type ‘const struct MPVDecContext’
>          s->current_picture_ptr->hwaccel_picture_private;
>           ^~
> src/libavcodec/dxva2_mpeg2.c: In function ‘dxva2_mpeg2_start_frame’:
> src/libavcodec/dxva2_mpeg2.c:260:10: error: dereferencing pointer to incomplete type ‘const struct MPVDecContext’
>          s->current_picture_ptr->hwaccel_picture_private;
>           ^~
> src/libavcodec/dxva2_mpeg2.c:266:41: warning: passing argument 3 of ‘fill_picture_parameters’ from incompatible pointer type [-Wincompatible-pointer-types]
>      fill_picture_parameters(avctx, ctx, s, &ctx_pic->pp);
>                                          ^
> src/libavcodec/dxva2_mpeg2.c:40:13: note: expected ‘const struct MPVDecContext *’ but argument is of type ‘const struct MPVDecContext * const’
>  static void fill_picture_parameters(AVCodecContext *avctx,
>              ^~~~~~~~~~~~~~~~~~~~~~~
> src/libavcodec/dxva2_mpeg2.c:267:44: warning: passing argument 3 of ‘fill_quantization_matrices’ from incompatible pointer type [-Wincompatible-pointer-types]
>      fill_quantization_matrices(avctx, ctx, s, &ctx_pic->qm);
>                                             ^
> src/libavcodec/dxva2_mpeg2.c:106:13: note: expected ‘const struct MPVDecContext *’ but argument is of type ‘const struct MPVDecContext * const’
>  static void fill_quantization_matrices(AVCodecContext *avctx,
>              ^~~~~~~~~~~~~~~~~~~~~~~~~~
> src/libavcodec/dxva2_mpeg2.c: In function ‘dxva2_mpeg2_decode_slice’:
> src/libavcodec/dxva2_mpeg2.c:280:10: error: dereferencing pointer to incomplete type ‘const struct MPVDecContext’
>          s->current_picture_ptr->hwaccel_picture_private;
>           ^~
> src/libavcodec/dxva2_mpeg2.c:293:23: warning: passing argument 2 of ‘fill_slice’ from incompatible pointer type [-Wincompatible-pointer-types]
>      fill_slice(avctx, s, &ctx_pic->slice[ctx_pic->slice_count++], position,
>                        ^
> src/libavcodec/dxva2_mpeg2.c:123:13: note: expected ‘const struct MPVDecContext *’ but argument is of type ‘const struct MPVDecContext * const’
>  static void fill_slice(AVCodecContext *avctx,
>              ^~~~~~~~~~
> src/libavcodec/dxva2_mpeg2.c: In function ‘dxva2_mpeg2_end_frame’:
> src/libavcodec/dxva2_mpeg2.c:302:10: error: dereferencing pointer to incomplete type ‘struct MPVDecContext’
>          s->current_picture_ptr->hwaccel_picture_private;
>           ^~
> src/libavcodec/dxva2_mpeg2.c:312:33: warning: passing argument 1 of ‘ff_mpeg_draw_horiz_band’ from incompatible pointer type [-Wincompatible-pointer-types]
>          ff_mpeg_draw_horiz_band(s, 0, avctx->height);
>                                  ^
> In file included from src/libavcodec/dxva2_mpeg2.c:27:0:
> src/libavcodec/mpegvideo.h:606:6: note: expected ‘MPVContext * {aka struct MPVContext *}’ but argument is of type ‘struct MPVDecContext * const’
>  void ff_mpeg_draw_horiz_band(MPVContext *s, int y, int h);
>       ^~~~~~~~~~~~~~~~~~~~~~~
> src/ffbuild/common.mak:78: recipe for target 'libavcodec/dxva2_mpeg2.o' failed
> make: *** [libavcodec/dxva2_mpeg2.o] Error 1
> make: *** Waiting for unfinished jobs....
> 
> [...]
> 

Thanks, fixed in https://github.com/mkver/FFmpeg/commits/mpegvideo (as
well as the commit message of #2 and the missing documentation for the
typedefs and some smaller stuff, too). I don't think it is worth
resending it now.
I am btw very interested into whether I should split all relevant
headers into decoder- and encoder-only parts; I only did it for those
for which there is enough stuff for the new headers and for example not
for flv.h, but splitting all headers would have the advantage that e.g.
the decoders never have the encoder headers included.

- Andreas
Michael Niedermayer Jan. 31, 2022, 3:37 p.m. UTC | #4
On Mon, Jan 31, 2022 at 12:05:11AM +0100, Andreas Rheinhardt wrote:
> Michael Niedermayer:
> > On Sun, Jan 30, 2022 at 07:27:24AM +0100, Andreas Rheinhardt wrote:
> >> This is in preparation for actually adding such contexts
> >> and e.g. moving fields only used by the main thread to
> >> the corresponding "Main" context.
> >>
> >> Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com>
> > 
> > This breaks build on mingw64
> > 
> > src/libavcodec/dxva2_mpeg2.c:42:50: warning: ‘struct MPVDecContext’ declared inside parameter list will not be visible outside of this definition or declaration
> >                                      const struct MPVDecContext *s,
> >                                                   ^~~~~~~~~~~~~
> > src/libavcodec/dxva2_mpeg2.c: In function ‘fill_picture_parameters’:
> > src/libavcodec/dxva2_mpeg2.c:45:39: error: dereferencing pointer to incomplete type ‘const struct MPVDecContext’
> >      const Picture *current_picture = s->current_picture_ptr;
> >                                        ^~
> > src/libavcodec/dxva2_mpeg2.c: At top level:
> > src/libavcodec/dxva2_mpeg2.c:108:53: warning: ‘struct MPVDecContext’ declared inside parameter list will not be visible outside of this definition or declaration
> >                                         const struct MPVDecContext *s,
> >                                                      ^~~~~~~~~~~~~
> > src/libavcodec/dxva2_mpeg2.c: In function ‘fill_quantization_matrices’:
> > src/libavcodec/dxva2_mpeg2.c:115:18: error: dereferencing pointer to incomplete type ‘const struct MPVDecContext’
> >          int n = s->idsp.idct_permutation[ff_zigzag_direct[i]];
> >                   ^~
> > src/libavcodec/dxva2_mpeg2.c:115:13: warning: variable ‘n’ set but not used [-Wunused-but-set-variable]
> >          int n = s->idsp.idct_permutation[ff_zigzag_direct[i]];
> >              ^
> > src/libavcodec/dxva2_mpeg2.c: At top level:
> > src/libavcodec/dxva2_mpeg2.c:124:37: warning: ‘struct MPVDecContext’ declared inside parameter list will not be visible outside of this definition or declaration
> >                         const struct MPVDecContext *s,
> >                                      ^~~~~~~~~~~~~
> > src/libavcodec/dxva2_mpeg2.c: In function ‘fill_slice’:
> > src/libavcodec/dxva2_mpeg2.c:129:21: error: dereferencing pointer to incomplete type ‘const struct MPVDecContext’
> >      int is_field = s->picture_structure != PICT_FRAME;
> >                      ^~
> > src/libavcodec/dxva2_mpeg2.c: In function ‘commit_bitstream_and_slice_buffer’:
> > src/libavcodec/dxva2_mpeg2.c:157:10: error: dereferencing pointer to incomplete type ‘const struct MPVDecContext’
> >          s->current_picture_ptr->hwaccel_picture_private;
> >           ^~
> > src/libavcodec/dxva2_mpeg2.c: In function ‘dxva2_mpeg2_start_frame’:
> > src/libavcodec/dxva2_mpeg2.c:260:10: error: dereferencing pointer to incomplete type ‘const struct MPVDecContext’
> >          s->current_picture_ptr->hwaccel_picture_private;
> >           ^~
> > src/libavcodec/dxva2_mpeg2.c:266:41: warning: passing argument 3 of ‘fill_picture_parameters’ from incompatible pointer type [-Wincompatible-pointer-types]
> >      fill_picture_parameters(avctx, ctx, s, &ctx_pic->pp);
> >                                          ^
> > src/libavcodec/dxva2_mpeg2.c:40:13: note: expected ‘const struct MPVDecContext *’ but argument is of type ‘const struct MPVDecContext * const’
> >  static void fill_picture_parameters(AVCodecContext *avctx,
> >              ^~~~~~~~~~~~~~~~~~~~~~~
> > src/libavcodec/dxva2_mpeg2.c:267:44: warning: passing argument 3 of ‘fill_quantization_matrices’ from incompatible pointer type [-Wincompatible-pointer-types]
> >      fill_quantization_matrices(avctx, ctx, s, &ctx_pic->qm);
> >                                             ^
> > src/libavcodec/dxva2_mpeg2.c:106:13: note: expected ‘const struct MPVDecContext *’ but argument is of type ‘const struct MPVDecContext * const’
> >  static void fill_quantization_matrices(AVCodecContext *avctx,
> >              ^~~~~~~~~~~~~~~~~~~~~~~~~~
> > src/libavcodec/dxva2_mpeg2.c: In function ‘dxva2_mpeg2_decode_slice’:
> > src/libavcodec/dxva2_mpeg2.c:280:10: error: dereferencing pointer to incomplete type ‘const struct MPVDecContext’
> >          s->current_picture_ptr->hwaccel_picture_private;
> >           ^~
> > src/libavcodec/dxva2_mpeg2.c:293:23: warning: passing argument 2 of ‘fill_slice’ from incompatible pointer type [-Wincompatible-pointer-types]
> >      fill_slice(avctx, s, &ctx_pic->slice[ctx_pic->slice_count++], position,
> >                        ^
> > src/libavcodec/dxva2_mpeg2.c:123:13: note: expected ‘const struct MPVDecContext *’ but argument is of type ‘const struct MPVDecContext * const’
> >  static void fill_slice(AVCodecContext *avctx,
> >              ^~~~~~~~~~
> > src/libavcodec/dxva2_mpeg2.c: In function ‘dxva2_mpeg2_end_frame’:
> > src/libavcodec/dxva2_mpeg2.c:302:10: error: dereferencing pointer to incomplete type ‘struct MPVDecContext’
> >          s->current_picture_ptr->hwaccel_picture_private;
> >           ^~
> > src/libavcodec/dxva2_mpeg2.c:312:33: warning: passing argument 1 of ‘ff_mpeg_draw_horiz_band’ from incompatible pointer type [-Wincompatible-pointer-types]
> >          ff_mpeg_draw_horiz_band(s, 0, avctx->height);
> >                                  ^
> > In file included from src/libavcodec/dxva2_mpeg2.c:27:0:
> > src/libavcodec/mpegvideo.h:606:6: note: expected ‘MPVContext * {aka struct MPVContext *}’ but argument is of type ‘struct MPVDecContext * const’
> >  void ff_mpeg_draw_horiz_band(MPVContext *s, int y, int h);
> >       ^~~~~~~~~~~~~~~~~~~~~~~
> > src/ffbuild/common.mak:78: recipe for target 'libavcodec/dxva2_mpeg2.o' failed
> > make: *** [libavcodec/dxva2_mpeg2.o] Error 1
> > make: *** Waiting for unfinished jobs....
> > 
> > [...]
> > 
> 
> Thanks, fixed in https://github.com/mkver/FFmpeg/commits/mpegvideo (as
> well as the commit message of #2 and the missing documentation for the
> typedefs and some smaller stuff, too). I don't think it is worth
> resending it now.
> I am btw very interested into whether I should split all relevant
> headers into decoder- and encoder-only parts; I only did it for those
> for which there is enough stuff for the new headers and for example not
> for flv.h, but splitting all headers would have the advantage that e.g.
> the decoders never have the encoder headers included.

I think things like libavcodec/flv.h are better split
OTOH anything that just isnt currently used because for example we do
not have an encoder implemented should be left in a common file 

thx

[...]
diff mbox series

Patch

diff --git a/libavcodec/alpha/mpegvideo_alpha.c b/libavcodec/alpha/mpegvideo_alpha.c
index 126fe264a1..ee553ae2fb 100644
--- a/libavcodec/alpha/mpegvideo_alpha.c
+++ b/libavcodec/alpha/mpegvideo_alpha.c
@@ -69,7 +69,7 @@  static void dct_unquantize_h263_axp(int16_t *block, int n_coeffs,
     }
 }
 
-static void dct_unquantize_h263_intra_axp(MpegEncContext *s, int16_t *block,
+static void dct_unquantize_h263_intra_axp(MPVContext *s, int16_t *block,
                                     int n, int qscale)
 {
     int n_coeffs;
@@ -96,14 +96,14 @@  static void dct_unquantize_h263_intra_axp(MpegEncContext *s, int16_t *block,
     block[0] = block0;
 }
 
-static void dct_unquantize_h263_inter_axp(MpegEncContext *s, int16_t *block,
+static void dct_unquantize_h263_inter_axp(MPVContext *s, int16_t *block,
                                     int n, int qscale)
 {
     int n_coeffs = s->inter_scantable.raster_end[s->block_last_index[n]];
     dct_unquantize_h263_axp(block, n_coeffs, qscale, (qscale - 1) | 1);
 }
 
-av_cold void ff_mpv_common_init_axp(MpegEncContext *s)
+av_cold void ff_mpv_common_init_axp(MPVMainContext *s)
 {
     s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_axp;
     s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_axp;
diff --git a/libavcodec/arm/asm-offsets.h b/libavcodec/arm/asm-offsets.h
index a2174b0a08..65a3f819cc 100644
--- a/libavcodec/arm/asm-offsets.h
+++ b/libavcodec/arm/asm-offsets.h
@@ -21,7 +21,7 @@ 
 #ifndef AVCODEC_ARM_ASM_OFFSETS_H
 #define AVCODEC_ARM_ASM_OFFSETS_H
 
-/* MpegEncContext */
+/* MPVContext */
 #define Y_DC_SCALE               0x04
 #define C_DC_SCALE               0x08
 #define AC_PRED                  0x0c
diff --git a/libavcodec/arm/me_cmp_init_arm.c b/libavcodec/arm/me_cmp_init_arm.c
index 03870a2bfa..981c247c75 100644
--- a/libavcodec/arm/me_cmp_init_arm.c
+++ b/libavcodec/arm/me_cmp_init_arm.c
@@ -23,19 +23,19 @@ 
 #include "libavutil/arm/cpu.h"
 #include "libavcodec/avcodec.h"
 #include "libavcodec/me_cmp.h"
-#include "libavcodec/mpegvideo.h"
+#include "libavcodec/mpegvideoenc.h"
 
-int ff_pix_abs16_armv6(MpegEncContext *s, uint8_t *blk1, uint8_t *blk2,
+int ff_pix_abs16_armv6(MPVEncContext *s, uint8_t *blk1, uint8_t *blk2,
                        ptrdiff_t stride, int h);
-int ff_pix_abs16_x2_armv6(MpegEncContext *s, uint8_t *blk1, uint8_t *blk2,
+int ff_pix_abs16_x2_armv6(MPVEncContext *s, uint8_t *blk1, uint8_t *blk2,
                           ptrdiff_t stride, int h);
-int ff_pix_abs16_y2_armv6(MpegEncContext *s, uint8_t *blk1, uint8_t *blk2,
+int ff_pix_abs16_y2_armv6(MPVEncContext *s, uint8_t *blk1, uint8_t *blk2,
                           ptrdiff_t stride, int h);
 
-int ff_pix_abs8_armv6(MpegEncContext *s, uint8_t *blk1, uint8_t *blk2,
+int ff_pix_abs8_armv6(MPVEncContext *s, uint8_t *blk1, uint8_t *blk2,
                       ptrdiff_t stride, int h);
 
-int ff_sse16_armv6(MpegEncContext *s, uint8_t *blk1, uint8_t *blk2,
+int ff_sse16_armv6(MPVEncContext *s, uint8_t *blk1, uint8_t *blk2,
                    ptrdiff_t stride, int h);
 
 av_cold void ff_me_cmp_init_arm(MECmpContext *c, AVCodecContext *avctx)
diff --git a/libavcodec/arm/mpegvideo_arm.c b/libavcodec/arm/mpegvideo_arm.c
index 008ef18eea..daea438bb1 100644
--- a/libavcodec/arm/mpegvideo_arm.c
+++ b/libavcodec/arm/mpegvideo_arm.c
@@ -27,21 +27,21 @@ 
 #include "asm-offsets.h"
 
 #if HAVE_NEON
-AV_CHECK_OFFSET(MpegEncContext, y_dc_scale,       Y_DC_SCALE);
-AV_CHECK_OFFSET(MpegEncContext, c_dc_scale,       C_DC_SCALE);
-AV_CHECK_OFFSET(MpegEncContext, ac_pred,          AC_PRED);
-AV_CHECK_OFFSET(MpegEncContext, block_last_index, BLOCK_LAST_INDEX);
-AV_CHECK_OFFSET(MpegEncContext, inter_scantable.raster_end,
+AV_CHECK_OFFSET(MPVContext, y_dc_scale,       Y_DC_SCALE);
+AV_CHECK_OFFSET(MPVContext, c_dc_scale,       C_DC_SCALE);
+AV_CHECK_OFFSET(MPVContext, ac_pred,          AC_PRED);
+AV_CHECK_OFFSET(MPVContext, block_last_index, BLOCK_LAST_INDEX);
+AV_CHECK_OFFSET(MPVContext, inter_scantable.raster_end,
                 INTER_SCANTAB_RASTER_END);
-AV_CHECK_OFFSET(MpegEncContext, h263_aic,         H263_AIC);
+AV_CHECK_OFFSET(MPVContext, h263_aic,         H263_AIC);
 #endif
 
-void ff_dct_unquantize_h263_inter_neon(MpegEncContext *s, int16_t *block,
+void ff_dct_unquantize_h263_inter_neon(MPVContext *s, int16_t *block,
                                        int n, int qscale);
-void ff_dct_unquantize_h263_intra_neon(MpegEncContext *s, int16_t *block,
+void ff_dct_unquantize_h263_intra_neon(MPVContext *s, int16_t *block,
                                        int n, int qscale);
 
-av_cold void ff_mpv_common_init_arm(MpegEncContext *s)
+av_cold void ff_mpv_common_init_arm(MPVMainContext *s)
 {
     int cpu_flags = av_get_cpu_flags();
 
diff --git a/libavcodec/arm/mpegvideo_arm.h b/libavcodec/arm/mpegvideo_arm.h
index 709ae6b247..1ba4053e04 100644
--- a/libavcodec/arm/mpegvideo_arm.h
+++ b/libavcodec/arm/mpegvideo_arm.h
@@ -21,6 +21,6 @@ 
 
 #include "libavcodec/mpegvideo.h"
 
-void ff_mpv_common_init_armv5te(MpegEncContext *s);
+void ff_mpv_common_init_armv5te(MPVMainContext *s);
 
 #endif /* AVCODEC_ARM_MPEGVIDEO_ARM_H */
diff --git a/libavcodec/arm/mpegvideo_armv5te.c b/libavcodec/arm/mpegvideo_armv5te.c
index e20bb4c645..3b5185baee 100644
--- a/libavcodec/arm/mpegvideo_armv5te.c
+++ b/libavcodec/arm/mpegvideo_armv5te.c
@@ -50,7 +50,7 @@  static inline void dct_unquantize_h263_helper_c(int16_t *block, int qmul, int qa
 }
 #endif
 
-static void dct_unquantize_h263_intra_armv5te(MpegEncContext *s,
+static void dct_unquantize_h263_intra_armv5te(MPVContext *s,
                                   int16_t *block, int n, int qscale)
 {
     int level, qmul, qadd;
@@ -79,7 +79,7 @@  static void dct_unquantize_h263_intra_armv5te(MpegEncContext *s,
     block[0] = level;
 }
 
-static void dct_unquantize_h263_inter_armv5te(MpegEncContext *s,
+static void dct_unquantize_h263_inter_armv5te(MPVContext *s,
                                   int16_t *block, int n, int qscale)
 {
     int qmul, qadd;
@@ -95,7 +95,7 @@  static void dct_unquantize_h263_inter_armv5te(MpegEncContext *s,
     ff_dct_unquantize_h263_armv5te(block, qmul, qadd, nCoeffs + 1);
 }
 
-av_cold void ff_mpv_common_init_armv5te(MpegEncContext *s)
+av_cold void ff_mpv_common_init_armv5te(MPVMainContext *s)
 {
     s->dct_unquantize_h263_intra = dct_unquantize_h263_intra_armv5te;
     s->dct_unquantize_h263_inter = dct_unquantize_h263_inter_armv5te;
diff --git a/libavcodec/dnxhdenc.c b/libavcodec/dnxhdenc.c
index ac92474e56..74989276b9 100644
--- a/libavcodec/dnxhdenc.c
+++ b/libavcodec/dnxhdenc.c
@@ -115,7 +115,7 @@  void dnxhd_10bit_get_pixels_8x4_sym(int16_t *av_restrict block,
     memcpy(block + 4 * 8, pixels + 3 * line_size, 8 * sizeof(*block));
 }
 
-static int dnxhd_10bit_dct_quantize_444(MpegEncContext *ctx, int16_t *block,
+static int dnxhd_10bit_dct_quantize_444(MPVEncContext *ctx, int16_t *block,
                                         int n, int qscale, int *overflow)
 {
     int i, j, level, last_non_zero, start_i;
@@ -174,7 +174,7 @@  static int dnxhd_10bit_dct_quantize_444(MpegEncContext *ctx, int16_t *block,
     return last_non_zero;
 }
 
-static int dnxhd_10bit_dct_quantize(MpegEncContext *ctx, int16_t *block,
+static int dnxhd_10bit_dct_quantize(MPVEncContext *ctx, int16_t *block,
                                     int n, int qscale, int *overflow)
 {
     const uint8_t *scantable= ctx->intra_scantable.scantable;
diff --git a/libavcodec/dnxhdenc.h b/libavcodec/dnxhdenc.h
index 9e4c869bc4..ef52735ad6 100644
--- a/libavcodec/dnxhdenc.h
+++ b/libavcodec/dnxhdenc.h
@@ -30,8 +30,8 @@ 
 
 #include "libavutil/mem_internal.h"
 
-#include "mpegvideo.h"
 #include "dnxhddata.h"
+#include "mpegvideoenc.h"
 
 typedef struct RCCMPEntry {
     uint16_t mb;
@@ -46,7 +46,7 @@  typedef struct RCEntry {
 typedef struct DNXHDEncContext {
     AVClass *class;
     BlockDSPContext bdsp;
-    MpegEncContext m; ///< Used for quantization dsp functions
+    MPVMainEncContext m; ///< Used for quantization dsp functions
 
     int cid;
     int profile;
diff --git a/libavcodec/dxva2_mpeg2.c b/libavcodec/dxva2_mpeg2.c
index 8cc21bf199..76932d4d63 100644
--- a/libavcodec/dxva2_mpeg2.c
+++ b/libavcodec/dxva2_mpeg2.c
@@ -39,7 +39,7 @@  struct dxva2_picture_context {
 
 static void fill_picture_parameters(AVCodecContext *avctx,
                                     AVDXVAContext *ctx,
-                                    const struct MpegEncContext *s,
+                                    const struct MPVDecContext *s,
                                     DXVA_PictureParameters *pp)
 {
     const Picture *current_picture = s->current_picture_ptr;
@@ -105,7 +105,7 @@  static void fill_picture_parameters(AVCodecContext *avctx,
 
 static void fill_quantization_matrices(AVCodecContext *avctx,
                                        AVDXVAContext *ctx,
-                                       const struct MpegEncContext *s,
+                                       const struct MPVDecContext *s,
                                        DXVA_QmatrixData *qm)
 {
     int i;
@@ -121,7 +121,7 @@  static void fill_quantization_matrices(AVCodecContext *avctx,
 }
 
 static void fill_slice(AVCodecContext *avctx,
-                       const struct MpegEncContext *s,
+                       const struct MPVDecContext *s,
                        DXVA_SliceInfo *slice,
                        unsigned position,
                        const uint8_t *buffer, unsigned size)
@@ -151,7 +151,7 @@  static int commit_bitstream_and_slice_buffer(AVCodecContext *avctx,
                                              DECODER_BUFFER_DESC *bs,
                                              DECODER_BUFFER_DESC *sc)
 {
-    const struct MpegEncContext *s = avctx->priv_data;
+    const struct MPVDecContext *const s = avctx->priv_data;
     AVDXVAContext *ctx = DXVA_CONTEXT(avctx);
     struct dxva2_picture_context *ctx_pic =
         s->current_picture_ptr->hwaccel_picture_private;
@@ -254,7 +254,7 @@  static int dxva2_mpeg2_start_frame(AVCodecContext *avctx,
                                    av_unused const uint8_t *buffer,
                                    av_unused uint32_t size)
 {
-    const struct MpegEncContext *s = avctx->priv_data;
+    const struct MPVDecContext *const s = avctx->priv_data;
     AVDXVAContext *ctx = DXVA_CONTEXT(avctx);
     struct dxva2_picture_context *ctx_pic =
         s->current_picture_ptr->hwaccel_picture_private;
@@ -275,7 +275,7 @@  static int dxva2_mpeg2_start_frame(AVCodecContext *avctx,
 static int dxva2_mpeg2_decode_slice(AVCodecContext *avctx,
                                     const uint8_t *buffer, uint32_t size)
 {
-    const struct MpegEncContext *s = avctx->priv_data;
+    const struct MPVDecContext *const s = avctx->priv_data;
     struct dxva2_picture_context *ctx_pic =
         s->current_picture_ptr->hwaccel_picture_private;
     unsigned position;
@@ -297,7 +297,7 @@  static int dxva2_mpeg2_decode_slice(AVCodecContext *avctx,
 
 static int dxva2_mpeg2_end_frame(AVCodecContext *avctx)
 {
-    struct MpegEncContext *s = avctx->priv_data;
+    struct MPVDecContext *const s = avctx->priv_data;
     struct dxva2_picture_context *ctx_pic =
         s->current_picture_ptr->hwaccel_picture_private;
     int ret;
diff --git a/libavcodec/dxva2_vc1.c b/libavcodec/dxva2_vc1.c
index f08ac8b5a0..d3171a2443 100644
--- a/libavcodec/dxva2_vc1.c
+++ b/libavcodec/dxva2_vc1.c
@@ -40,7 +40,7 @@  static void fill_picture_parameters(AVCodecContext *avctx,
                                     AVDXVAContext *ctx, const VC1Context *v,
                                     DXVA_PictureParameters *pp)
 {
-    const MpegEncContext *s = &v->s;
+    const MPVDecContext *const s = &v->s;
     const Picture *current_picture = s->current_picture_ptr;
     int intcomp = 0;
 
@@ -163,7 +163,7 @@  static void fill_slice(AVCodecContext *avctx, DXVA_SliceInfo *slice,
                        unsigned position, unsigned size)
 {
     const VC1Context *v = avctx->priv_data;
-    const MpegEncContext *s = &v->s;
+    const MPVDecContext *const s = &v->s;
 
     memset(slice, 0, sizeof(*slice));
     slice->wHorizontalPosition = 0;
@@ -185,7 +185,7 @@  static int commit_bitstream_and_slice_buffer(AVCodecContext *avctx,
 {
     const VC1Context *v = avctx->priv_data;
     AVDXVAContext *ctx = DXVA_CONTEXT(avctx);
-    const MpegEncContext *s = &v->s;
+    const MPVDecContext *const s = &v->s;
     struct dxva2_picture_context *ctx_pic = s->current_picture_ptr->hwaccel_picture_private;
 
     static const uint8_t start_code[] = { 0, 0, 1, 0x0d };
diff --git a/libavcodec/flv.h b/libavcodec/flv.h
index 561cfe0baa..54b13faeb1 100644
--- a/libavcodec/flv.h
+++ b/libavcodec/flv.h
@@ -23,12 +23,13 @@ 
 
 #include "get_bits.h"
 #include "mpegvideo.h"
+#include "mpegvideoenc.h"
 #include "put_bits.h"
 
-void ff_flv_encode_picture_header(MpegEncContext *s, int picture_number);
+void ff_flv_encode_picture_header(MPVMainEncContext *s, int picture_number);
 void ff_flv2_encode_ac_esc(PutBitContext *pb, int slevel, int level, int run,
                            int last);
 
-int ff_flv_decode_picture_header(MpegEncContext *s);
+int ff_flv_decode_picture_header(MPVMainDecContext *s);
 
 #endif /* AVCODEC_FLV_H */
diff --git a/libavcodec/flvdec.c b/libavcodec/flvdec.c
index 108cf88e34..27bad20efb 100644
--- a/libavcodec/flvdec.c
+++ b/libavcodec/flvdec.c
@@ -26,7 +26,7 @@ 
 #include "mpegvideo.h"
 #include "mpegvideodata.h"
 
-int ff_flv_decode_picture_header(MpegEncContext *s)
+int ff_flv_decode_picture_header(MPVMainDecContext *s)
 {
     int format, width, height;
 
@@ -118,7 +118,7 @@  const AVCodec ff_flv_decoder = {
     .long_name      = NULL_IF_CONFIG_SMALL("FLV / Sorenson Spark / Sorenson H.263 (Flash Video)"),
     .type           = AVMEDIA_TYPE_VIDEO,
     .id             = AV_CODEC_ID_FLV1,
-    .priv_data_size = sizeof(MpegEncContext),
+    .priv_data_size = sizeof(MPVMainDecContext),
     .init           = ff_h263_decode_init,
     .close          = ff_h263_decode_end,
     .decode         = ff_h263_decode_frame,
diff --git a/libavcodec/flvenc.c b/libavcodec/flvenc.c
index 6824327734..1de9e4da55 100644
--- a/libavcodec/flvenc.c
+++ b/libavcodec/flvenc.c
@@ -24,7 +24,7 @@ 
 #include "mpegvideodata.h"
 #include "mpegvideoenc.h"
 
-void ff_flv_encode_picture_header(MpegEncContext *s, int picture_number)
+void ff_flv_encode_picture_header(MPVMainEncContext *s, int picture_number)
 {
     int format;
 
@@ -97,7 +97,7 @@  const AVCodec ff_flv_encoder = {
     .type           = AVMEDIA_TYPE_VIDEO,
     .id             = AV_CODEC_ID_FLV1,
     .priv_class     = &ff_mpv_enc_class,
-    .priv_data_size = sizeof(MpegEncContext),
+    .priv_data_size = sizeof(MPVMainEncContext),
     .init           = ff_mpv_encode_init,
     .encode2        = ff_mpv_encode_picture,
     .close          = ff_mpv_encode_end,
diff --git a/libavcodec/h261.c b/libavcodec/h261.c
index 7dfaee7dc4..a4ad742d76 100644
--- a/libavcodec/h261.c
+++ b/libavcodec/h261.c
@@ -58,7 +58,7 @@  static void h261_loop_filter(uint8_t *src, int stride)
     }
 }
 
-void ff_h261_loop_filter(MpegEncContext *s)
+void ff_h261_loop_filter(MPVContext *s)
 {
     H261Context *const h = s->private_ctx;
     const int linesize   = s->linesize;
diff --git a/libavcodec/h261.h b/libavcodec/h261.h
index ff1903e508..ba9b7a033e 100644
--- a/libavcodec/h261.h
+++ b/libavcodec/h261.h
@@ -29,6 +29,7 @@ 
 #define AVCODEC_H261_H
 
 #include "mpegvideo.h"
+#include "mpegvideoenc.h"
 #include "rl.h"
 
 /**
@@ -49,13 +50,13 @@  extern const uint8_t ff_h261_mv_tab[17][2];
 extern const uint8_t ff_h261_cbp_tab[63][2];
 extern RLTable ff_h261_rl_tcoeff;
 
-void ff_h261_loop_filter(MpegEncContext *s);
+void ff_h261_loop_filter(MPVContext *s);
 
 int ff_h261_get_picture_format(int width, int height);
-void ff_h261_reorder_mb_index(MpegEncContext *s);
-void ff_h261_encode_mb(MpegEncContext *s, int16_t block[6][64],
+void ff_h261_reorder_mb_index(MPVEncContext *s);
+void ff_h261_encode_mb(MPVEncContext *s, int16_t block[6][64],
                        int motion_x, int motion_y);
-void ff_h261_encode_picture_header(MpegEncContext *s, int picture_number);
-void ff_h261_encode_init(MpegEncContext *s);
+void ff_h261_encode_picture_header(MPVMainEncContext *s, int picture_number);
+void ff_h261_encode_init(MPVMainEncContext *s);
 
 #endif /* AVCODEC_H261_H */
diff --git a/libavcodec/h261dec.c b/libavcodec/h261dec.c
index 17f1067b15..b95a9f6a80 100644
--- a/libavcodec/h261dec.c
+++ b/libavcodec/h261dec.c
@@ -48,7 +48,7 @@  static VLC h261_mv_vlc;
 static VLC h261_cbp_vlc;
 
 typedef struct H261DecContext {
-    MpegEncContext s;
+    MPVMainDecContext s;
 
     H261Context common;
 
@@ -81,7 +81,7 @@  static av_cold int h261_decode_init(AVCodecContext *avctx)
 {
     static AVOnce init_static_once = AV_ONCE_INIT;
     H261DecContext *const h = avctx->priv_data;
-    MpegEncContext *const s = &h->s;
+    MPVMainDecContext *const s = &h->s;
 
     s->private_ctx = &h->common;
     // set defaults
@@ -106,7 +106,7 @@  static av_cold int h261_decode_init(AVCodecContext *avctx)
 static int h261_decode_gob_header(H261DecContext *h)
 {
     unsigned int val;
-    MpegEncContext *const s = &h->s;
+    MPVDecContext *const s = &h->s;
 
     if (!h->gob_start_code_skipped) {
         /* Check for GOB Start Code */
@@ -159,7 +159,7 @@  static int h261_decode_gob_header(H261DecContext *h)
  */
 static int h261_resync(H261DecContext *h)
 {
-    MpegEncContext *const s = &h->s;
+    MPVDecContext *const s = &h->s;
     int left, ret;
 
     if (h->gob_start_code_skipped) {
@@ -200,7 +200,7 @@  static int h261_resync(H261DecContext *h)
  */
 static int h261_decode_mb_skipped(H261DecContext *h, int mba1, int mba2)
 {
-    MpegEncContext *const s = &h->s;
+    MPVDecContext *const s = &h->s;
     int i;
 
     s->mb_intra = 0;
@@ -270,7 +270,7 @@  static int decode_mv_component(GetBitContext *gb, int v)
  */
 static int h261_decode_block(H261DecContext *h, int16_t *block, int n, int coded)
 {
-    MpegEncContext *const s = &h->s;
+    MPVDecContext *const s = &h->s;
     int level, i, j, run;
     RLTable *rl = &ff_h261_rl_tcoeff;
     const uint8_t *scan_table;
@@ -362,7 +362,7 @@  static int h261_decode_block(H261DecContext *h, int16_t *block, int n, int coded
 
 static int h261_decode_mb(H261DecContext *h)
 {
-    MpegEncContext *const s = &h->s;
+    MPVDecContext *const s = &h->s;
     H261Context *const com = &h->common;
     int i, cbp, xy;
 
@@ -487,7 +487,7 @@  intra:
  */
 static int h261_decode_picture_header(H261DecContext *h)
 {
-    MpegEncContext *const s = &h->s;
+    MPVDecContext *const s = &h->s;
     int format, i;
     uint32_t startcode = 0;
 
@@ -551,7 +551,7 @@  static int h261_decode_picture_header(H261DecContext *h)
 
 static int h261_decode_gob(H261DecContext *h)
 {
-    MpegEncContext *const s = &h->s;
+    MPVDecContext *const s = &h->s;
 
     ff_set_qscale(s, s->qscale);
 
@@ -581,7 +581,7 @@  static int h261_decode_gob(H261DecContext *h)
 /**
  * returns the number of bytes consumed for building the current frame
  */
-static int get_consumed_bytes(MpegEncContext *s, int buf_size)
+static int get_consumed_bytes(MPVDecContext *s, int buf_size)
 {
     int pos = get_bits_count(&s->gb) >> 3;
     if (pos == 0)
@@ -596,9 +596,10 @@  static int h261_decode_frame(AVCodecContext *avctx, void *data,
                              int *got_frame, AVPacket *avpkt)
 {
     H261DecContext *const h = avctx->priv_data;
+    MPVMainDecContext *const s2 = &h->s;
+    MPVDecContext *const s = s2;
     const uint8_t *buf = avpkt->data;
     int buf_size       = avpkt->size;
-    MpegEncContext *s  = &h->s;
     int ret;
     AVFrame *pict = data;
 
@@ -619,11 +620,11 @@  retry:
     }
 
     if (s->width != avctx->coded_width || s->height != avctx->coded_height) {
-        ff_mpv_common_end(s);
+        ff_mpv_common_end(s2);
     }
 
     if (!s->context_initialized) {
-        if ((ret = ff_mpv_common_init(s)) < 0)
+        if ((ret = ff_mpv_common_init(s2)) < 0)
             return ret;
 
         ret = ff_set_dimensions(avctx, s->width, s->height);
@@ -673,7 +674,7 @@  retry:
 static av_cold int h261_decode_end(AVCodecContext *avctx)
 {
     H261DecContext *const h = avctx->priv_data;
-    MpegEncContext *s = &h->s;
+    MPVMainContext *const s = &h->s;
 
     ff_mpv_common_end(s);
     return 0;
diff --git a/libavcodec/h261enc.c b/libavcodec/h261enc.c
index 710d292df6..a2e4c55c93 100644
--- a/libavcodec/h261enc.c
+++ b/libavcodec/h261enc.c
@@ -39,7 +39,7 @@  static uint8_t uni_h261_rl_len [64*64*2*2];
 #define UNI_ENC_INDEX(last,run,level) ((last)*128*64 + (run)*128 + (level))
 
 typedef struct H261EncContext {
-    MpegEncContext s;
+    MPVMainEncContext s;
 
     H261Context common;
 
@@ -59,7 +59,7 @@  int ff_h261_get_picture_format(int width, int height)
         return AVERROR(EINVAL);
 }
 
-void ff_h261_encode_picture_header(MpegEncContext *s, int picture_number)
+void ff_h261_encode_picture_header(MPVMainEncContext *s, int picture_number)
 {
     H261EncContext *const h = (H261EncContext *)s;
     int format, temp_ref;
@@ -97,7 +97,7 @@  void ff_h261_encode_picture_header(MpegEncContext *s, int picture_number)
 /**
  * Encode a group of blocks header.
  */
-static void h261_encode_gob_header(MpegEncContext *s, int mb_line)
+static void h261_encode_gob_header(MPVEncContext *s, int mb_line)
 {
     H261EncContext *const h = (H261EncContext *)s;
     if (ff_h261_get_picture_format(s->width, s->height) == 0) {
@@ -114,7 +114,7 @@  static void h261_encode_gob_header(MpegEncContext *s, int mb_line)
     s->last_mv[0][0][1] = 0;
 }
 
-void ff_h261_reorder_mb_index(MpegEncContext *s)
+void ff_h261_reorder_mb_index(MPVEncContext *s)
 {
     int index = s->mb_x + s->mb_y * s->mb_width;
 
@@ -159,7 +159,7 @@  static void h261_encode_motion(PutBitContext *pb, int val)
     }
 }
 
-static inline int get_cbp(MpegEncContext *s, int16_t block[6][64])
+static inline int get_cbp(MPVEncContext *s, int16_t block[6][64])
 {
     int i, cbp;
     cbp = 0;
@@ -176,7 +176,7 @@  static inline int get_cbp(MpegEncContext *s, int16_t block[6][64])
  */
 static void h261_encode_block(H261EncContext *h, int16_t *block, int n)
 {
-    MpegEncContext *const s = &h->s;
+    MPVEncContext *const s = &h->s;
     int level, run, i, j, last_index, last_non_zero, sign, slevel, code;
     RLTable *rl;
 
@@ -242,7 +242,7 @@  static void h261_encode_block(H261EncContext *h, int16_t *block, int n)
         put_bits(&s->pb, rl->table_vlc[0][1], rl->table_vlc[0][0]); // EOB
 }
 
-void ff_h261_encode_mb(MpegEncContext *s, int16_t block[6][64],
+void ff_h261_encode_mb(MPVEncContext *s, int16_t block[6][64],
                        int motion_x, int motion_y)
 {
     /* The following is only allowed because this encoder
@@ -380,7 +380,7 @@  static av_cold void h261_encode_init_static(void)
     init_uni_h261_rl_tab(&ff_h261_rl_tcoeff, uni_h261_rl_len);
 }
 
-av_cold void ff_h261_encode_init(MpegEncContext *s)
+av_cold void ff_h261_encode_init(MPVMainEncContext *s)
 {
     H261EncContext *const h = (H261EncContext*)s;
     static AVOnce init_static_once = AV_ONCE_INIT;
diff --git a/libavcodec/h263.c b/libavcodec/h263.c
index b30ffaf878..c60666172a 100644
--- a/libavcodec/h263.c
+++ b/libavcodec/h263.c
@@ -50,7 +50,8 @@  av_cold void ff_h263_init_rl_inter(void)
     ff_thread_once(&init_static_once, h263_init_rl_inter);
 }
 
-void ff_h263_update_motion_val(MpegEncContext * s){
+void ff_h263_update_motion_val(MPVContext *s)
+{
     const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
                //FIXME a lot of that is only needed for !low_delay
     const int wrap = s->b8_stride;
@@ -102,7 +103,8 @@  void ff_h263_update_motion_val(MpegEncContext * s){
     }
 }
 
-void ff_h263_loop_filter(MpegEncContext * s){
+void ff_h263_loop_filter(MPVContext *s)
+{
     int qp_c;
     const int linesize  = s->linesize;
     const int uvlinesize= s->uvlinesize;
@@ -187,7 +189,7 @@  void ff_h263_loop_filter(MpegEncContext * s){
     }
 }
 
-int16_t *ff_h263_pred_motion(MpegEncContext * s, int block, int dir,
+int16_t *ff_h263_pred_motion(MPVContext * s, int block, int dir,
                              int *px, int *py)
 {
     int wrap;
diff --git a/libavcodec/h263.h b/libavcodec/h263.h
index 5e4a5bc6e5..a3ad0625d5 100644
--- a/libavcodec/h263.h
+++ b/libavcodec/h263.h
@@ -28,11 +28,11 @@ 
 #define H263_GOB_HEIGHT(h) ((h) <= 400 ? 1 : (h) <= 800 ? 2 : 4)
 
 av_const int ff_h263_aspect_to_info(AVRational aspect);
-int16_t *ff_h263_pred_motion(MpegEncContext * s, int block, int dir,
+int16_t *ff_h263_pred_motion(MPVContext * s, int block, int dir,
                              int *px, int *py);
 void ff_h263_init_rl_inter(void);
-void ff_h263_update_motion_val(MpegEncContext * s);
-void ff_h263_loop_filter(MpegEncContext * s);
+void ff_h263_update_motion_val(MPVContext * s);
+void ff_h263_loop_filter(MPVContext * s);
 
 
 
diff --git a/libavcodec/h263dec.c b/libavcodec/h263dec.c
index 3466027286..56138eca04 100644
--- a/libavcodec/h263dec.c
+++ b/libavcodec/h263dec.c
@@ -55,7 +55,7 @@  static enum AVPixelFormat h263_get_format(AVCodecContext *avctx)
 {
     /* MPEG-4 Studio Profile only, not supported by hardware */
     if (avctx->bits_per_raw_sample > 8) {
-        av_assert1(((MpegEncContext *)avctx->priv_data)->studio_profile);
+        av_assert1(((MPVDecContext *)avctx->priv_data)->studio_profile);
         return avctx->pix_fmt;
     }
 
@@ -73,7 +73,7 @@  static enum AVPixelFormat h263_get_format(AVCodecContext *avctx)
 
 av_cold int ff_h263_decode_init(AVCodecContext *avctx)
 {
-    MpegEncContext *s = avctx->priv_data;
+    MPVMainDecContext *const s = avctx->priv_data;
     int ret;
 
     s->out_format      = FMT_H263;
@@ -156,7 +156,7 @@  av_cold int ff_h263_decode_init(AVCodecContext *avctx)
 
 av_cold int ff_h263_decode_end(AVCodecContext *avctx)
 {
-    MpegEncContext *s = avctx->priv_data;
+    MPVMainDecContext *const s = avctx->priv_data;
 
     ff_mpv_common_end(s);
     return 0;
@@ -165,7 +165,7 @@  av_cold int ff_h263_decode_end(AVCodecContext *avctx)
 /**
  * Return the number of bytes consumed for building the current frame.
  */
-static int get_consumed_bytes(MpegEncContext *s, int buf_size)
+static int get_consumed_bytes(MPVMainDecContext *s, int buf_size)
 {
     int pos = (get_bits_count(&s->gb) + 7) >> 3;
 
@@ -193,7 +193,7 @@  static int get_consumed_bytes(MpegEncContext *s, int buf_size)
     }
 }
 
-static int decode_slice(MpegEncContext *s)
+static int decode_slice(MPVMainDecContext *s)
 {
     const int part_mask = s->partitioned_frame
                           ? (ER_AC_END | ER_AC_ERROR) : 0x7F;
@@ -425,9 +425,9 @@  static int decode_slice(MpegEncContext *s)
 int ff_h263_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
                          AVPacket *avpkt)
 {
+    MPVMainDecContext *const s  = avctx->priv_data;
     const uint8_t *buf = avpkt->data;
     int buf_size       = avpkt->size;
-    MpegEncContext *s  = avctx->priv_data;
     int ret;
     int slice_ret = 0;
     AVFrame *pict = data;
@@ -763,7 +763,7 @@  const AVCodec ff_h263_decoder = {
     .long_name      = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996, H.263+ / H.263-1998 / H.263 version 2"),
     .type           = AVMEDIA_TYPE_VIDEO,
     .id             = AV_CODEC_ID_H263,
-    .priv_data_size = sizeof(MpegEncContext),
+    .priv_data_size = sizeof(MPVMainDecContext),
     .init           = ff_h263_decode_init,
     .close          = ff_h263_decode_end,
     .decode         = ff_h263_decode_frame,
@@ -784,7 +784,7 @@  const AVCodec ff_h263p_decoder = {
     .long_name      = NULL_IF_CONFIG_SMALL("H.263 / H.263-1996, H.263+ / H.263-1998 / H.263 version 2"),
     .type           = AVMEDIA_TYPE_VIDEO,
     .id             = AV_CODEC_ID_H263P,
-    .priv_data_size = sizeof(MpegEncContext),
+    .priv_data_size = sizeof(MPVMainDecContext),
     .init           = ff_h263_decode_init,
     .close          = ff_h263_decode_end,
     .decode         = ff_h263_decode_frame,
diff --git a/libavcodec/h263dec.h b/libavcodec/h263dec.h
index 8d5f9a7add..5261844812 100644
--- a/libavcodec/h263dec.h
+++ b/libavcodec/h263dec.h
@@ -40,26 +40,25 @@  extern VLC ff_h263_mv_vlc;
 
 extern const enum AVPixelFormat ff_h263_hwaccel_pixfmt_list_420[];
 
-int ff_h263_decode_motion(MpegEncContext * s, int pred, int f_code);
+int ff_h263_decode_motion(MPVDecContext * s, int pred, int f_code);
 int ff_h263_decode_init(AVCodecContext *avctx);
 int ff_h263_decode_frame(AVCodecContext *avctx,
                              void *data, int *got_frame,
                              AVPacket *avpkt);
 int ff_h263_decode_end(AVCodecContext *avctx);
 void ff_h263_decode_init_vlc(void);
-int ff_h263_decode_picture_header(MpegEncContext *s);
-int ff_h263_decode_gob_header(MpegEncContext *s);
-int ff_h263_decode_mba(MpegEncContext *s);
+int ff_h263_decode_picture_header(MPVMainDecContext *s);
+int ff_h263_decode_mba(MPVDecContext *s);
 
 /**
  * Print picture info if FF_DEBUG_PICT_INFO is set.
  */
-void ff_h263_show_pict_info(MpegEncContext *s);
+void ff_h263_show_pict_info(MPVMainDecContext *s);
 
-int ff_intel_h263_decode_picture_header(MpegEncContext *s);
-int ff_h263_decode_mb(MpegEncContext *s,
+int ff_intel_h263_decode_picture_header(MPVMainDecContext *s);
+int ff_h263_decode_mb(MPVDecContext *s,
                       int16_t block[6][64]);
 
-int ff_h263_resync(MpegEncContext *s);
+int ff_h263_resync(MPVMainDecContext *s);
 
 #endif
diff --git a/libavcodec/h263enc.h b/libavcodec/h263enc.h
index fff85a18f2..180eb52be3 100644
--- a/libavcodec/h263enc.h
+++ b/libavcodec/h263enc.h
@@ -24,16 +24,16 @@ 
 #include "h263data.h"
 #include "mpegvideoenc.h"
 
-void ff_h263_encode_init(MpegEncContext *s);
-void ff_h263_encode_picture_header(MpegEncContext *s, int picture_number);
-void ff_h263_encode_gob_header(MpegEncContext * s, int mb_line);
-void ff_h263_encode_mb(MpegEncContext *s,
+void ff_h263_encode_init(MPVMainEncContext *s);
+void ff_h263_encode_picture_header(MPVMainEncContext *s, int picture_number);
+void ff_h263_encode_gob_header(MPVEncContext * s, int mb_line);
+void ff_h263_encode_mb(MPVEncContext *s,
                        int16_t block[6][64],
                        int motion_x, int motion_y);
-void ff_h263_encode_mba(MpegEncContext *s);
+void ff_h263_encode_mba(MPVEncContext *s);
 
-void ff_init_qscale_tab(MpegEncContext *s);
-void ff_clean_h263_qscales(MpegEncContext *s);
+void ff_init_qscale_tab(MPVMainEncContext *s);
+void ff_clean_h263_qscales(MPVMainEncContext *s);
 
 void ff_h263_encode_motion(PutBitContext *pb, int val, int f_code);
 
@@ -57,7 +57,7 @@  static inline int h263_get_motion_length(int val, int f_code)
     }
 }
 
-static inline void ff_h263_encode_motion_vector(MpegEncContext * s,
+static inline void ff_h263_encode_motion_vector(MPVEncContext * s,
                                                 int x, int y, int f_code)
 {
     if (s->avctx->flags2 & AV_CODEC_FLAG2_NO_OUTPUT) {
@@ -70,7 +70,7 @@  static inline void ff_h263_encode_motion_vector(MpegEncContext * s,
     }
 }
 
-static inline int get_p_cbp(MpegEncContext * s,
+static inline int get_p_cbp(MPVEncContext * s,
                       int16_t block[6][64],
                       int motion_x, int motion_y){
     int cbp;
diff --git a/libavcodec/intelh263dec.c b/libavcodec/intelh263dec.c
index b61effa4df..1f79f36f9f 100644
--- a/libavcodec/intelh263dec.c
+++ b/libavcodec/intelh263dec.c
@@ -26,7 +26,7 @@ 
 #include "mpegvideodata.h"
 
 /* don't understand why they choose a different header ! */
-int ff_intel_h263_decode_picture_header(MpegEncContext *s)
+int ff_intel_h263_decode_picture_header(MPVMainDecContext *s)
 {
     int format;
 
@@ -134,7 +134,7 @@  const AVCodec ff_h263i_decoder = {
     .long_name      = NULL_IF_CONFIG_SMALL("Intel H.263"),
     .type           = AVMEDIA_TYPE_VIDEO,
     .id             = AV_CODEC_ID_H263I,
-    .priv_data_size = sizeof(MpegEncContext),
+    .priv_data_size = sizeof(MPVMainDecContext),
     .init           = ff_h263_decode_init,
     .close          = ff_h263_decode_end,
     .decode         = ff_h263_decode_frame,
diff --git a/libavcodec/ituh263dec.c b/libavcodec/ituh263dec.c
index 7d7a1f01a2..ac19d52a37 100644
--- a/libavcodec/ituh263dec.c
+++ b/libavcodec/ituh263dec.c
@@ -74,7 +74,8 @@  static const int h263_mb_type_b_map[15]= {
     MB_TYPE_INTRA4x4                | MB_TYPE_CBP | MB_TYPE_QUANT,
 };
 
-void ff_h263_show_pict_info(MpegEncContext *s){
+void ff_h263_show_pict_info(MPVMainDecContext *s)
+{
     if(s->avctx->debug&FF_DEBUG_PICT_INFO){
     av_log(s->avctx, AV_LOG_DEBUG, "qp:%d %c size:%d rnd:%d%s%s%s%s%s%s%s%s%s %d/%d\n",
          s->qscale, av_get_picture_type_char(s->pict_type),
@@ -136,7 +137,7 @@  av_cold void ff_h263_decode_init_vlc(void)
     }
 }
 
-int ff_h263_decode_mba(MpegEncContext *s)
+int ff_h263_decode_mba(MPVDecContext *s)
 {
     int i, mb_pos;
 
@@ -154,7 +155,7 @@  int ff_h263_decode_mba(MpegEncContext *s)
  * Decode the group of blocks header or slice header.
  * @return <0 if an error occurred
  */
-static int h263_decode_gob_header(MpegEncContext *s)
+static int h263_decode_gob_header(MPVMainDecContext *s)
 {
     unsigned int val, gob_number;
     int left;
@@ -210,7 +211,8 @@  static int h263_decode_gob_header(MpegEncContext *s)
  * Decode the group of blocks / video packet header / slice header (MPEG-4 Studio).
  * @return bit position of the resync_marker, or <0 if none was found
  */
-int ff_h263_resync(MpegEncContext *s){
+int ff_h263_resync(MPVMainDecContext *s)
+{
     int left, pos, ret;
 
     /* In MPEG-4 studio mode look for a new slice startcode
@@ -267,7 +269,7 @@  int ff_h263_resync(MpegEncContext *s){
     return -1;
 }
 
-int ff_h263_decode_motion(MpegEncContext * s, int pred, int f_code)
+int ff_h263_decode_motion(MPVDecContext *s, int pred, int f_code)
 {
     int code, val, sign, shift;
     code = get_vlc2(&s->gb, ff_h263_mv_vlc.table, H263_MV_VLC_BITS, 2);
@@ -305,7 +307,7 @@  int ff_h263_decode_motion(MpegEncContext * s, int pred, int f_code)
 
 
 /* Decode RVLC of H.263+ UMV */
-static int h263p_decode_umotion(MpegEncContext * s, int pred)
+static int h263p_decode_umotion(MPVDecContext *s, int pred)
 {
    int code = 0, sign;
 
@@ -335,7 +337,8 @@  static int h263p_decode_umotion(MpegEncContext * s, int pred)
 /**
  * read the next MVs for OBMC. yes this is an ugly hack, feel free to send a patch :)
  */
-static void preview_obmc(MpegEncContext *s){
+static void preview_obmc(MPVDecContext *s)
+{
     GetBitContext gb= s->gb;
 
     int cbpc, i, pred_x, pred_y, mx, my;
@@ -427,7 +430,8 @@  end:
     s->gb= gb;
 }
 
-static void h263_decode_dquant(MpegEncContext *s){
+static void h263_decode_dquant(MPVDecContext *s)
+{
     static const int8_t quant_tab[4] = { -1, -2, 1, 2 };
 
     if(s->modified_quant){
@@ -440,7 +444,7 @@  static void h263_decode_dquant(MpegEncContext *s){
     ff_set_qscale(s, s->qscale);
 }
 
-static void h263_pred_acdc(MpegEncContext * s, int16_t *block, int n)
+static void h263_pred_acdc(MPVDecContext *s, int16_t *block, int n)
 {
     int x, y, wrap, a, c, pred_dc, scale;
     int16_t *dc_val, *ac_val, *ac_val1;
@@ -527,7 +531,7 @@  static void h263_pred_acdc(MpegEncContext * s, int16_t *block, int n)
         ac_val1[8 + i] = block[s->idsp.idct_permutation[i]];
 }
 
-static int h263_decode_block(MpegEncContext * s, int16_t * block,
+static int h263_decode_block(MPVDecContext *s, int16_t * block,
                              int n, int coded)
 {
     int level, i, j, run;
@@ -674,7 +678,7 @@  not_coded:
     return 0;
 }
 
-static int h263_skip_b_part(MpegEncContext *s, int cbp)
+static int h263_skip_b_part(MPVDecContext *s, int cbp)
 {
     LOCAL_ALIGNED_32(int16_t, dblock, [64]);
     int i, mbi;
@@ -716,7 +720,7 @@  static int h263_get_modb(GetBitContext *gb, int pb_frame, int *cbpb)
 
 #define tab_size ((signed)FF_ARRAY_ELEMS(s->direct_scale_mv[0]))
 #define tab_bias (tab_size / 2)
-static inline void set_one_direct_mv(MpegEncContext *s, Picture *p, int i)
+static inline void set_one_direct_mv(MPVDecContext *s, Picture *p, int i)
 {
     int xy           = s->block_index[i];
     uint16_t time_pp = s->pp_time;
@@ -744,7 +748,7 @@  static inline void set_one_direct_mv(MpegEncContext *s, Picture *p, int i)
 /**
  * @return the mb_type
  */
-static int set_direct_mv(MpegEncContext *s)
+static int set_direct_mv(MPVDecContext *s)
 {
     const int mb_index = s->mb_x + s->mb_y * s->mb_stride;
     Picture *p = &s->next_picture;
@@ -781,7 +785,7 @@  static int set_direct_mv(MpegEncContext *s)
     }
 }
 
-int ff_h263_decode_mb(MpegEncContext *s,
+int ff_h263_decode_mb(MPVDecContext *s,
                       int16_t block[6][64])
 {
     int cbpc, cbpy, i, cbp, pred_x, pred_y, mx, my, dquant;
@@ -1083,7 +1087,7 @@  end:
 }
 
 /* Most is hardcoded; should extend to handle all H.263 streams. */
-int ff_h263_decode_picture_header(MpegEncContext *s)
+int ff_h263_decode_picture_header(MPVMainDecContext *s)
 {
     int format, width, height, i, ret;
     uint32_t startcode;
diff --git a/libavcodec/ituh263enc.c b/libavcodec/ituh263enc.c
index ef7caf8009..97f3ad95f2 100644
--- a/libavcodec/ituh263enc.c
+++ b/libavcodec/ituh263enc.c
@@ -102,7 +102,7 @@  av_const int ff_h263_aspect_to_info(AVRational aspect){
     return FF_ASPECT_EXTENDED;
 }
 
-void ff_h263_encode_picture_header(MpegEncContext * s, int picture_number)
+void ff_h263_encode_picture_header(MPVMainEncContext * s, int picture_number)
 {
     int format, coded_frame_rate, coded_frame_rate_base, i, temp_ref;
     int best_clock_code=1;
@@ -240,7 +240,7 @@  void ff_h263_encode_picture_header(MpegEncContext * s, int picture_number)
 /**
  * Encode a group of blocks header.
  */
-void ff_h263_encode_gob_header(MpegEncContext * s, int mb_line)
+void ff_h263_encode_gob_header(MPVEncContext *s, int mb_line)
 {
     put_bits(&s->pb, 17, 1); /* GBSC */
 
@@ -266,7 +266,8 @@  void ff_h263_encode_gob_header(MpegEncContext * s, int mb_line)
 /**
  * modify qscale so that encoding is actually possible in H.263 (limit difference to -2..2)
  */
-void ff_clean_h263_qscales(MpegEncContext *s){
+void ff_clean_h263_qscales(MPVMainEncContext *s)
+{
     int i;
     int8_t * const qscale_table = s->current_picture.qscale_table;
 
@@ -299,7 +300,7 @@  static const int dquant_code[5]= {1,0,9,2,3};
  * @param block the 8x8 block
  * @param n block index (0-3 are luma, 4-5 are chroma)
  */
-static void h263_encode_block(MpegEncContext * s, int16_t * block, int n)
+static void h263_encode_block(MPVEncContext *s, int16_t * block, int n)
 {
     int level, run, last, i, j, last_index, last_non_zero, sign, slevel, code;
     RLTable *rl;
@@ -447,7 +448,7 @@  static void h263p_encode_umotion(PutBitContext *pb, int val)
     }
 }
 
-static int h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr)
+static int h263_pred_dc(MPVEncContext *s, int n, int16_t **dc_val_ptr)
 {
     int x, y, wrap, a, c, pred_dc;
     int16_t *dc_val;
@@ -488,7 +489,7 @@  static int h263_pred_dc(MpegEncContext * s, int n, int16_t **dc_val_ptr)
     return pred_dc;
 }
 
-void ff_h263_encode_mb(MpegEncContext * s,
+void ff_h263_encode_mb(MPVEncContext *s,
                        int16_t block[6][64],
                        int motion_x, int motion_y)
 {
@@ -811,7 +812,7 @@  static av_cold void h263_encode_init_static(void)
     init_mv_penalty_and_fcode();
 }
 
-av_cold void ff_h263_encode_init(MpegEncContext *s)
+av_cold void ff_h263_encode_init(MPVMainEncContext *s)
 {
     static AVOnce init_static_once = AV_ONCE_INIT;
 
@@ -866,7 +867,7 @@  av_cold void ff_h263_encode_init(MpegEncContext *s)
     ff_thread_once(&init_static_once, h263_encode_init_static);
 }
 
-void ff_h263_encode_mba(MpegEncContext *s)
+void ff_h263_encode_mba(MPVEncContext *s)
 {
     int i, mb_pos;
 
@@ -877,7 +878,7 @@  void ff_h263_encode_mba(MpegEncContext *s)
     put_bits(&s->pb, ff_mba_length[i], mb_pos);
 }
 
-#define OFFSET(x) offsetof(MpegEncContext, x)
+#define OFFSET(x) offsetof(MPVMainEncContext, x)
 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
 static const AVOption h263_options[] = {
     { "obmc",         "use overlapped block motion compensation.", OFFSET(obmc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
@@ -907,7 +908,7 @@  const AVCodec ff_h263_encoder = {
     .pix_fmts= (const enum AVPixelFormat[]){AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE},
     .priv_class     = &h263_class,
     .caps_internal  = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
-    .priv_data_size = sizeof(MpegEncContext),
+    .priv_data_size = sizeof(MPVMainEncContext),
     .init           = ff_mpv_encode_init,
     .encode2        = ff_mpv_encode_picture,
     .close          = ff_mpv_encode_end,
@@ -943,7 +944,7 @@  const AVCodec ff_h263p_encoder = {
     .priv_class     = &h263p_class,
     .capabilities   = AV_CODEC_CAP_SLICE_THREADS,
     .caps_internal  = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
-    .priv_data_size = sizeof(MpegEncContext),
+    .priv_data_size = sizeof(MPVMainEncContext),
     .init           = ff_mpv_encode_init,
     .encode2        = ff_mpv_encode_picture,
     .close          = ff_mpv_encode_end,
diff --git a/libavcodec/me_cmp.c b/libavcodec/me_cmp.c
index 60545248b3..8fa8c258ee 100644
--- a/libavcodec/me_cmp.c
+++ b/libavcodec/me_cmp.c
@@ -27,7 +27,7 @@ 
 #include "copy_block.h"
 #include "simple_idct.h"
 #include "me_cmp.h"
-#include "mpegvideo.h"
+#include "mpegvideoenc.h"
 #include "config.h"
 
 /* (i - 256) * (i - 256) */
@@ -66,7 +66,7 @@  const uint32_t ff_square_tab[512] = {
     57600, 58081, 58564, 59049, 59536, 60025, 60516, 61009, 61504, 62001, 62500, 63001, 63504, 64009, 64516, 65025,
 };
 
-static int sse4_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+static int sse4_c(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                   ptrdiff_t stride, int h)
 {
     int s = 0, i;
@@ -83,7 +83,7 @@  static int sse4_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
     return s;
 }
 
-static int sse8_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+static int sse8_c(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                   ptrdiff_t stride, int h)
 {
     int s = 0, i;
@@ -104,7 +104,7 @@  static int sse8_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
     return s;
 }
 
-static int sse16_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+static int sse16_c(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                    ptrdiff_t stride, int h)
 {
     int s = 0, i;
@@ -146,7 +146,7 @@  static int sum_abs_dctelem_c(int16_t *block)
 #define avg2(a, b) (((a) + (b) + 1) >> 1)
 #define avg4(a, b, c, d) (((a) + (b) + (c) + (d) + 2) >> 2)
 
-static inline int pix_abs16_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+static inline int pix_abs16_c(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                               ptrdiff_t stride, int h)
 {
     int s = 0, i;
@@ -174,7 +174,7 @@  static inline int pix_abs16_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
     return s;
 }
 
-static inline int pix_median_abs16_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+static inline int pix_median_abs16_c(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                              ptrdiff_t stride, int h)
 {
     int s = 0, i, j;
@@ -213,7 +213,7 @@  static inline int pix_median_abs16_c(MpegEncContext *v, uint8_t *pix1, uint8_t *
     return s;
 }
 
-static int pix_abs16_x2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+static int pix_abs16_x2_c(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                           ptrdiff_t stride, int h)
 {
     int s = 0, i;
@@ -241,7 +241,7 @@  static int pix_abs16_x2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
     return s;
 }
 
-static int pix_abs16_y2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+static int pix_abs16_y2_c(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                           ptrdiff_t stride, int h)
 {
     int s = 0, i;
@@ -271,7 +271,7 @@  static int pix_abs16_y2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
     return s;
 }
 
-static int pix_abs16_xy2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+static int pix_abs16_xy2_c(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                            ptrdiff_t stride, int h)
 {
     int s = 0, i;
@@ -301,7 +301,7 @@  static int pix_abs16_xy2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
     return s;
 }
 
-static inline int pix_abs8_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+static inline int pix_abs8_c(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                              ptrdiff_t stride, int h)
 {
     int s = 0, i;
@@ -321,7 +321,7 @@  static inline int pix_abs8_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
     return s;
 }
 
-static inline int pix_median_abs8_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+static inline int pix_median_abs8_c(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                              ptrdiff_t stride, int h)
 {
     int s = 0, i, j;
@@ -352,7 +352,7 @@  static inline int pix_median_abs8_c(MpegEncContext *v, uint8_t *pix1, uint8_t *p
     return s;
 }
 
-static int pix_abs8_x2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+static int pix_abs8_x2_c(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                          ptrdiff_t stride, int h)
 {
     int s = 0, i;
@@ -372,7 +372,7 @@  static int pix_abs8_x2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
     return s;
 }
 
-static int pix_abs8_y2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+static int pix_abs8_y2_c(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                          ptrdiff_t stride, int h)
 {
     int s = 0, i;
@@ -394,7 +394,7 @@  static int pix_abs8_y2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
     return s;
 }
 
-static int pix_abs8_xy2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+static int pix_abs8_xy2_c(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                           ptrdiff_t stride, int h)
 {
     int s = 0, i;
@@ -416,7 +416,7 @@  static int pix_abs8_xy2_c(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
     return s;
 }
 
-static int nsse16_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2,
+static int nsse16_c(MPVEncContext *c, uint8_t *s1, uint8_t *s2,
                     ptrdiff_t stride, int h)
 {
     int score1 = 0, score2 = 0, x, y;
@@ -441,7 +441,7 @@  static int nsse16_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2,
         return score1 + FFABS(score2) * 8;
 }
 
-static int nsse8_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2,
+static int nsse8_c(MPVEncContext *c, uint8_t *s1, uint8_t *s2,
                    ptrdiff_t stride, int h)
 {
     int score1 = 0, score2 = 0, x, y;
@@ -466,7 +466,7 @@  static int nsse8_c(MpegEncContext *c, uint8_t *s1, uint8_t *s2,
         return score1 + FFABS(score2) * 8;
 }
 
-static int zero_cmp(MpegEncContext *s, uint8_t *a, uint8_t *b,
+static int zero_cmp(MPVEncContext *s, uint8_t *a, uint8_t *b,
                     ptrdiff_t stride, int h)
 {
     return 0;
@@ -552,7 +552,7 @@  void ff_set_cmp(MECmpContext *c, me_cmp_func *cmp, int type)
 
 #define BUTTERFLYA(x, y) (FFABS((x) + (y)) + FFABS((x) - (y)))
 
-static int hadamard8_diff8x8_c(MpegEncContext *s, uint8_t *dst,
+static int hadamard8_diff8x8_c(MPVEncContext *s, uint8_t *dst,
                                uint8_t *src, ptrdiff_t stride, int h)
 {
     int i, temp[64], sum = 0;
@@ -604,7 +604,7 @@  static int hadamard8_diff8x8_c(MpegEncContext *s, uint8_t *dst,
     return sum;
 }
 
-static int hadamard8_intra8x8_c(MpegEncContext *s, uint8_t *src,
+static int hadamard8_intra8x8_c(MPVEncContext *s, uint8_t *src,
                                 uint8_t *dummy, ptrdiff_t stride, int h)
 {
     int i, temp[64], sum = 0;
@@ -656,7 +656,7 @@  static int hadamard8_intra8x8_c(MpegEncContext *s, uint8_t *src,
     return sum;
 }
 
-static int dct_sad8x8_c(MpegEncContext *s, uint8_t *src1,
+static int dct_sad8x8_c(MPVEncContext *s, uint8_t *src1,
                         uint8_t *src2, ptrdiff_t stride, int h)
 {
     LOCAL_ALIGNED_16(int16_t, temp, [64]);
@@ -697,7 +697,7 @@  static int dct_sad8x8_c(MpegEncContext *s, uint8_t *src1,
         DST(7, (a4 >> 2) - a7);                         \
     }
 
-static int dct264_sad8x8_c(MpegEncContext *s, uint8_t *src1,
+static int dct264_sad8x8_c(MPVEncContext *s, uint8_t *src1,
                            uint8_t *src2, ptrdiff_t stride, int h)
 {
     int16_t dct[8][8];
@@ -722,7 +722,7 @@  static int dct264_sad8x8_c(MpegEncContext *s, uint8_t *src1,
 }
 #endif
 
-static int dct_max8x8_c(MpegEncContext *s, uint8_t *src1,
+static int dct_max8x8_c(MPVEncContext *s, uint8_t *src1,
                         uint8_t *src2, ptrdiff_t stride, int h)
 {
     LOCAL_ALIGNED_16(int16_t, temp, [64]);
@@ -739,7 +739,7 @@  static int dct_max8x8_c(MpegEncContext *s, uint8_t *src1,
     return sum;
 }
 
-static int quant_psnr8x8_c(MpegEncContext *s, uint8_t *src1,
+static int quant_psnr8x8_c(MPVEncContext *s, uint8_t *src1,
                            uint8_t *src2, ptrdiff_t stride, int h)
 {
     LOCAL_ALIGNED_16(int16_t, temp, [64 * 2]);
@@ -764,7 +764,7 @@  static int quant_psnr8x8_c(MpegEncContext *s, uint8_t *src1,
     return sum;
 }
 
-static int rd8x8_c(MpegEncContext *s, uint8_t *src1, uint8_t *src2,
+static int rd8x8_c(MPVEncContext *s, uint8_t *src1, uint8_t *src2,
                    ptrdiff_t stride, int h)
 {
     const uint8_t *scantable = s->intra_scantable.permutated;
@@ -841,7 +841,7 @@  static int rd8x8_c(MpegEncContext *s, uint8_t *src1, uint8_t *src2,
     return distortion + ((bits * s->qscale * s->qscale * 109 + 64) >> 7);
 }
 
-static int bit8x8_c(MpegEncContext *s, uint8_t *src1, uint8_t *src2,
+static int bit8x8_c(MPVEncContext *s, uint8_t *src1, uint8_t *src2,
                     ptrdiff_t stride, int h)
 {
     const uint8_t *scantable = s->intra_scantable.permutated;
@@ -903,7 +903,7 @@  static int bit8x8_c(MpegEncContext *s, uint8_t *src1, uint8_t *src2,
 }
 
 #define VSAD_INTRA(size)                                                \
-static int vsad_intra ## size ## _c(MpegEncContext *c,                  \
+static int vsad_intra ## size ## _c(MPVEncContext *c,                   \
                                     uint8_t *s, uint8_t *dummy,         \
                                     ptrdiff_t stride, int h)            \
 {                                                                       \
@@ -925,7 +925,7 @@  VSAD_INTRA(8)
 VSAD_INTRA(16)
 
 #define VSAD(size)                                                             \
-static int vsad ## size ## _c(MpegEncContext *c,                               \
+static int vsad ## size ## _c(MPVEncContext *c,                                \
                               uint8_t *s1, uint8_t *s2,                        \
                               ptrdiff_t stride, int h)                               \
 {                                                                              \
@@ -945,7 +945,7 @@  VSAD(16)
 
 #define SQ(a) ((a) * (a))
 #define VSSE_INTRA(size)                                                \
-static int vsse_intra ## size ## _c(MpegEncContext *c,                  \
+static int vsse_intra ## size ## _c(MPVEncContext *c,                   \
                                     uint8_t *s, uint8_t *dummy,         \
                                     ptrdiff_t stride, int h)            \
 {                                                                       \
@@ -967,7 +967,7 @@  VSSE_INTRA(8)
 VSSE_INTRA(16)
 
 #define VSSE(size)                                                             \
-static int vsse ## size ## _c(MpegEncContext *c, uint8_t *s1, uint8_t *s2,     \
+static int vsse ## size ## _c(MPVEncContext *c, uint8_t *s1, uint8_t *s2,      \
                               ptrdiff_t stride, int h)                         \
 {                                                                              \
     int score = 0, x, y;                                                       \
@@ -985,7 +985,7 @@  VSSE(8)
 VSSE(16)
 
 #define WRAPPER8_16_SQ(name8, name16)                                   \
-static int name16(MpegEncContext *s, uint8_t *dst, uint8_t *src,        \
+static int name16(MPVEncContext *s, uint8_t *dst, uint8_t *src,         \
                   ptrdiff_t stride, int h)                              \
 {                                                                       \
     int score = 0;                                                      \
diff --git a/libavcodec/me_cmp.h b/libavcodec/me_cmp.h
index e9b5161c9a..ca9a617a6a 100644
--- a/libavcodec/me_cmp.h
+++ b/libavcodec/me_cmp.h
@@ -39,13 +39,14 @@  extern const uint32_t ff_square_tab[512];
  * !future video codecs might need functions with less strict alignment
  */
 
-struct MpegEncContext;
+#define MPVEncContext MPVContext
+struct MPVEncContext;
 /* Motion estimation:
  * h is limited to { width / 2, width, 2 * width },
  * but never larger than 16 and never smaller than 2.
  * Although currently h < 4 is not used as functions with
  * width < 8 are neither used nor implemented. */
-typedef int (*me_cmp_func)(struct MpegEncContext *c,
+typedef int (*me_cmp_func)(struct MPVEncContext *c,
                            uint8_t *blk1 /* align width (8 or 16) */,
                            uint8_t *blk2 /* align 1 */, ptrdiff_t stride,
                            int h);
@@ -89,5 +90,6 @@  void ff_me_cmp_init_mips(MECmpContext *c, AVCodecContext *avctx);
 void ff_set_cmp(MECmpContext *c, me_cmp_func *cmp, int type);
 
 void ff_dsputil_init_dwt(MECmpContext *c);
+#undef MPVEncContext
 
 #endif /* AVCODEC_ME_CMP_H */
diff --git a/libavcodec/mips/h263dsp_mips.h b/libavcodec/mips/h263dsp_mips.h
index 99a43cd44a..852856db55 100644
--- a/libavcodec/mips/h263dsp_mips.h
+++ b/libavcodec/mips/h263dsp_mips.h
@@ -25,11 +25,11 @@ 
 
 void ff_h263_h_loop_filter_msa(uint8_t *src, int stride, int q_scale);
 void ff_h263_v_loop_filter_msa(uint8_t *src, int stride, int q_scale);
-void ff_dct_unquantize_mpeg2_inter_msa(MpegEncContext *s, int16_t *block,
+void ff_dct_unquantize_mpeg2_inter_msa(MPVContext *s, int16_t *block,
                                        int32_t index, int32_t q_scale);
-void ff_dct_unquantize_h263_inter_msa(MpegEncContext *s, int16_t *block,
+void ff_dct_unquantize_h263_inter_msa(MPVContext *s, int16_t *block,
                                       int32_t index, int32_t q_scale);
-void ff_dct_unquantize_h263_intra_msa(MpegEncContext *s, int16_t *block,
+void ff_dct_unquantize_h263_intra_msa(MPVContext *s, int16_t *block,
                                       int32_t index, int32_t q_scale);
 int ff_pix_sum_msa(uint8_t *pix, int line_size);
 
diff --git a/libavcodec/mips/me_cmp_mips.h b/libavcodec/mips/me_cmp_mips.h
index e0d0f51af8..71187f7a5c 100644
--- a/libavcodec/mips/me_cmp_mips.h
+++ b/libavcodec/mips/me_cmp_mips.h
@@ -21,38 +21,38 @@ 
 #ifndef AVCODEC_MIPS_ME_CMP_MIPS_H
 #define AVCODEC_MIPS_ME_CMP_MIPS_H
 
-#include "../mpegvideo.h"
+#include "libavcodec/mpegvideoenc.h"
 #include "libavcodec/bit_depth_template.c"
 
-int ff_hadamard8_diff8x8_msa(MpegEncContext *s, uint8_t *dst, uint8_t *src,
+int ff_hadamard8_diff8x8_msa(MPVEncContext *s, uint8_t *dst, uint8_t *src,
                              ptrdiff_t stride, int h);
-int ff_hadamard8_intra8x8_msa(MpegEncContext *s, uint8_t *dst, uint8_t *src,
+int ff_hadamard8_intra8x8_msa(MPVEncContext *s, uint8_t *dst, uint8_t *src,
                               ptrdiff_t stride, int h);
-int ff_hadamard8_diff16_msa(MpegEncContext *s, uint8_t *dst, uint8_t *src,
+int ff_hadamard8_diff16_msa(MPVEncContext *s, uint8_t *dst, uint8_t *src,
                             ptrdiff_t stride, int h);
-int ff_hadamard8_intra16_msa(MpegEncContext *s, uint8_t *dst, uint8_t *src,
+int ff_hadamard8_intra16_msa(MPVEncContext *s, uint8_t *dst, uint8_t *src,
                              ptrdiff_t stride, int h);
-int ff_pix_abs16_msa(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+int ff_pix_abs16_msa(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                      ptrdiff_t stride, int h);
-int ff_pix_abs16_x2_msa(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+int ff_pix_abs16_x2_msa(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                         ptrdiff_t stride, int h);
-int ff_pix_abs16_y2_msa(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+int ff_pix_abs16_y2_msa(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                         ptrdiff_t stride, int h);
-int ff_pix_abs16_xy2_msa(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+int ff_pix_abs16_xy2_msa(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                          ptrdiff_t stride, int h);
-int ff_pix_abs8_msa(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+int ff_pix_abs8_msa(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                     ptrdiff_t stride, int h);
-int ff_pix_abs8_x2_msa(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+int ff_pix_abs8_x2_msa(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                        ptrdiff_t stride, int h);
-int ff_pix_abs8_y2_msa(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+int ff_pix_abs8_y2_msa(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                        ptrdiff_t stride, int h);
-int ff_pix_abs8_xy2_msa(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+int ff_pix_abs8_xy2_msa(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                         ptrdiff_t stride, int h);
-int ff_sse16_msa(MpegEncContext *v, uint8_t *pu8Src, uint8_t *pu8Ref,
+int ff_sse16_msa(MPVEncContext *v, uint8_t *pu8Src, uint8_t *pu8Ref,
                  ptrdiff_t stride, int i32Height);
-int ff_sse8_msa(MpegEncContext *v, uint8_t *pu8Src, uint8_t *pu8Ref,
+int ff_sse8_msa(MPVEncContext *v, uint8_t *pu8Src, uint8_t *pu8Ref,
                 ptrdiff_t stride, int i32Height);
-int ff_sse4_msa(MpegEncContext *v, uint8_t *pu8Src, uint8_t *pu8Ref,
+int ff_sse4_msa(MPVEncContext *v, uint8_t *pu8Src, uint8_t *pu8Ref,
                 ptrdiff_t stride, int i32Height);
 void ff_add_pixels8_msa(uint8_t *av_restrict pixels, int16_t *block,
                         ptrdiff_t stride);
diff --git a/libavcodec/mips/me_cmp_msa.c b/libavcodec/mips/me_cmp_msa.c
index 7cb7af0047..f71bb7e4aa 100644
--- a/libavcodec/mips/me_cmp_msa.c
+++ b/libavcodec/mips/me_cmp_msa.c
@@ -587,79 +587,79 @@  static int32_t hadamard_intra_8x8_msa(uint8_t *src, int32_t src_stride,
     return sum_res;
 }
 
-int ff_pix_abs16_msa(MpegEncContext *v, uint8_t *src, uint8_t *ref,
+int ff_pix_abs16_msa(MPVEncContext *v, uint8_t *src, uint8_t *ref,
                      ptrdiff_t stride, int height)
 {
     return sad_16width_msa(src, stride, ref, stride, height);
 }
 
-int ff_pix_abs8_msa(MpegEncContext *v, uint8_t *src, uint8_t *ref,
+int ff_pix_abs8_msa(MPVEncContext *v, uint8_t *src, uint8_t *ref,
                     ptrdiff_t stride, int height)
 {
     return sad_8width_msa(src, stride, ref, stride, height);
 }
 
-int ff_pix_abs16_x2_msa(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+int ff_pix_abs16_x2_msa(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                         ptrdiff_t stride, int h)
 {
     return sad_horiz_bilinear_filter_16width_msa(pix1, stride, pix2, stride, h);
 }
 
-int ff_pix_abs16_y2_msa(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+int ff_pix_abs16_y2_msa(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                         ptrdiff_t stride, int h)
 {
     return sad_vert_bilinear_filter_16width_msa(pix1, stride, pix2, stride, h);
 }
 
-int ff_pix_abs16_xy2_msa(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+int ff_pix_abs16_xy2_msa(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                          ptrdiff_t stride, int h)
 {
     return sad_hv_bilinear_filter_16width_msa(pix1, stride, pix2, stride, h);
 }
 
-int ff_pix_abs8_x2_msa(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+int ff_pix_abs8_x2_msa(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                        ptrdiff_t stride, int h)
 {
     return sad_horiz_bilinear_filter_8width_msa(pix1, stride, pix2, stride, h);
 }
 
-int ff_pix_abs8_y2_msa(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+int ff_pix_abs8_y2_msa(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                        ptrdiff_t stride, int h)
 {
     return sad_vert_bilinear_filter_8width_msa(pix1, stride, pix2, stride, h);
 }
 
-int ff_pix_abs8_xy2_msa(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+int ff_pix_abs8_xy2_msa(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                         ptrdiff_t stride, int h)
 {
     return sad_hv_bilinear_filter_8width_msa(pix1, stride, pix2, stride, h);
 }
 
-int ff_sse16_msa(MpegEncContext *v, uint8_t *src, uint8_t *ref,
+int ff_sse16_msa(MPVEncContext *v, uint8_t *src, uint8_t *ref,
                  ptrdiff_t stride, int height)
 {
     return sse_16width_msa(src, stride, ref, stride, height);
 }
 
-int ff_sse8_msa(MpegEncContext *v, uint8_t *src, uint8_t *ref,
+int ff_sse8_msa(MPVEncContext *v, uint8_t *src, uint8_t *ref,
                 ptrdiff_t stride, int height)
 {
     return sse_8width_msa(src, stride, ref, stride, height);
 }
 
-int ff_sse4_msa(MpegEncContext *v, uint8_t *src, uint8_t *ref,
+int ff_sse4_msa(MPVEncContext *v, uint8_t *src, uint8_t *ref,
                 ptrdiff_t stride, int height)
 {
     return sse_4width_msa(src, stride, ref, stride, height);
 }
 
-int ff_hadamard8_diff8x8_msa(MpegEncContext *s, uint8_t *dst, uint8_t *src,
+int ff_hadamard8_diff8x8_msa(MPVEncContext *s, uint8_t *dst, uint8_t *src,
                              ptrdiff_t stride, int h)
 {
     return hadamard_diff_8x8_msa(src, stride, dst, stride);
 }
 
-int ff_hadamard8_intra8x8_msa(MpegEncContext *s, uint8_t *dst, uint8_t *src,
+int ff_hadamard8_intra8x8_msa(MPVEncContext *s, uint8_t *dst, uint8_t *src,
                               ptrdiff_t stride, int h)
 {
     return hadamard_intra_8x8_msa(src, stride, dst, stride);
@@ -667,7 +667,7 @@  int ff_hadamard8_intra8x8_msa(MpegEncContext *s, uint8_t *dst, uint8_t *src,
 
 /* Hadamard Transform functions */
 #define WRAPPER8_16_SQ(name8, name16)                      \
-int name16(MpegEncContext *s, uint8_t *dst, uint8_t *src,  \
+int name16(MPVEncContext *s, uint8_t *dst, uint8_t *src,   \
            ptrdiff_t stride, int h)                        \
 {                                                          \
     int score = 0;                                         \
diff --git a/libavcodec/mips/mpegvideo_init_mips.c b/libavcodec/mips/mpegvideo_init_mips.c
index f687ad18f1..1c1008d2e5 100644
--- a/libavcodec/mips/mpegvideo_init_mips.c
+++ b/libavcodec/mips/mpegvideo_init_mips.c
@@ -23,7 +23,7 @@ 
 #include "h263dsp_mips.h"
 #include "mpegvideo_mips.h"
 
-av_cold void ff_mpv_common_init_mips(MpegEncContext *s)
+av_cold void ff_mpv_common_init_mips(MPVMainContext *s)
 {
     int cpu_flags = av_get_cpu_flags();
 
diff --git a/libavcodec/mips/mpegvideo_mips.h b/libavcodec/mips/mpegvideo_mips.h
index 760d7b3295..88d675eee6 100644
--- a/libavcodec/mips/mpegvideo_mips.h
+++ b/libavcodec/mips/mpegvideo_mips.h
@@ -23,16 +23,16 @@ 
 
 #include "libavcodec/mpegvideo.h"
 
-void ff_dct_unquantize_h263_intra_mmi(MpegEncContext *s, int16_t *block,
+void ff_dct_unquantize_h263_intra_mmi(MPVContext *s, int16_t *block,
         int n, int qscale);
-void ff_dct_unquantize_h263_inter_mmi(MpegEncContext *s, int16_t *block,
+void ff_dct_unquantize_h263_inter_mmi(MPVContext *s, int16_t *block,
         int n, int qscale);
-void ff_dct_unquantize_mpeg1_intra_mmi(MpegEncContext *s, int16_t *block,
+void ff_dct_unquantize_mpeg1_intra_mmi(MPVContext *s, int16_t *block,
         int n, int qscale);
-void ff_dct_unquantize_mpeg1_inter_mmi(MpegEncContext *s, int16_t *block,
+void ff_dct_unquantize_mpeg1_inter_mmi(MPVContext *s, int16_t *block,
         int n, int qscale);
-void ff_dct_unquantize_mpeg2_intra_mmi(MpegEncContext *s, int16_t *block,
+void ff_dct_unquantize_mpeg2_intra_mmi(MPVContext *s, int16_t *block,
         int n, int qscale);
-void ff_denoise_dct_mmi(MpegEncContext *s, int16_t *block);
+void ff_denoise_dct_mmi(MPVContext *s, int16_t *block);
 
 #endif /* AVCODEC_MIPS_MPEGVIDEO_MIPS_H */
diff --git a/libavcodec/mips/mpegvideo_mmi.c b/libavcodec/mips/mpegvideo_mmi.c
index 3d5b5e20ab..e8ff72c946 100644
--- a/libavcodec/mips/mpegvideo_mmi.c
+++ b/libavcodec/mips/mpegvideo_mmi.c
@@ -25,7 +25,7 @@ 
 #include "mpegvideo_mips.h"
 #include "libavutil/mips/mmiutils.h"
 
-void ff_dct_unquantize_h263_intra_mmi(MpegEncContext *s, int16_t *block,
+void ff_dct_unquantize_h263_intra_mmi(MPVContext *s, int16_t *block,
         int n, int qscale)
 {
     int64_t level, nCoeffs;
@@ -101,7 +101,7 @@  void ff_dct_unquantize_h263_intra_mmi(MpegEncContext *s, int16_t *block,
     block[0] = level;
 }
 
-void ff_dct_unquantize_h263_inter_mmi(MpegEncContext *s, int16_t *block,
+void ff_dct_unquantize_h263_inter_mmi(MPVContext *s, int16_t *block,
         int n, int qscale)
 {
     int64_t nCoeffs;
@@ -160,7 +160,7 @@  void ff_dct_unquantize_h263_inter_mmi(MpegEncContext *s, int16_t *block,
     );
 }
 
-void ff_dct_unquantize_mpeg1_intra_mmi(MpegEncContext *s, int16_t *block,
+void ff_dct_unquantize_mpeg1_intra_mmi(MPVContext *s, int16_t *block,
         int n, int qscale)
 {
     int64_t nCoeffs;
@@ -254,7 +254,7 @@  void ff_dct_unquantize_mpeg1_intra_mmi(MpegEncContext *s, int16_t *block,
     block[0] = block0;
 }
 
-void ff_dct_unquantize_mpeg1_inter_mmi(MpegEncContext *s, int16_t *block,
+void ff_dct_unquantize_mpeg1_inter_mmi(MPVContext *s, int16_t *block,
         int n, int qscale)
 {
     int64_t nCoeffs;
@@ -342,7 +342,7 @@  void ff_dct_unquantize_mpeg1_inter_mmi(MpegEncContext *s, int16_t *block,
     );
 }
 
-void ff_dct_unquantize_mpeg2_intra_mmi(MpegEncContext *s, int16_t *block,
+void ff_dct_unquantize_mpeg2_intra_mmi(MPVContext *s, int16_t *block,
         int n, int qscale)
 {
     uint64_t nCoeffs;
@@ -435,7 +435,7 @@  void ff_dct_unquantize_mpeg2_intra_mmi(MpegEncContext *s, int16_t *block,
     block[0]= block0;
 }
 
-void ff_denoise_dct_mmi(MpegEncContext *s, int16_t *block)
+void ff_denoise_dct_mmi(MPVContext *s, int16_t *block)
 {
     const int intra = s->mb_intra;
     int *sum = s->dct_error_sum[intra];
diff --git a/libavcodec/mips/mpegvideo_msa.c b/libavcodec/mips/mpegvideo_msa.c
index aa9ef770eb..26dc3df92c 100644
--- a/libavcodec/mips/mpegvideo_msa.c
+++ b/libavcodec/mips/mpegvideo_msa.c
@@ -193,7 +193,7 @@  static int32_t mpeg2_dct_unquantize_inter_msa(int16_t *block,
     return sum_res;
 }
 
-void ff_dct_unquantize_h263_intra_msa(MpegEncContext *s,
+void ff_dct_unquantize_h263_intra_msa(MPVContext *s,
                                       int16_t *block, int32_t index,
                                       int32_t qscale)
 {
@@ -218,7 +218,7 @@  void ff_dct_unquantize_h263_intra_msa(MpegEncContext *s,
     h263_dct_unquantize_msa(block, qmul, qadd, nCoeffs, 1);
 }
 
-void ff_dct_unquantize_h263_inter_msa(MpegEncContext *s,
+void ff_dct_unquantize_h263_inter_msa(MPVContext *s,
                                       int16_t *block, int32_t index,
                                       int32_t qscale)
 {
@@ -235,7 +235,7 @@  void ff_dct_unquantize_h263_inter_msa(MpegEncContext *s,
     h263_dct_unquantize_msa(block, qmul, qadd, nCoeffs, 0);
 }
 
-void ff_dct_unquantize_mpeg2_inter_msa(MpegEncContext *s,
+void ff_dct_unquantize_mpeg2_inter_msa(MPVContext *s,
                                        int16_t *block, int32_t index,
                                        int32_t qscale)
 {
diff --git a/libavcodec/mjpegenc.c b/libavcodec/mjpegenc.c
index b9b17a50df..481ee884e9 100644
--- a/libavcodec/mjpegenc.c
+++ b/libavcodec/mjpegenc.c
@@ -44,10 +44,10 @@ 
 
 /* The following is the private context of MJPEG/AMV decoder.
  * Note that when using slice threading only the main thread's
- * MpegEncContext is followed by a MjpegContext; the other threads
- * can access this shared context via MpegEncContext.mjpeg. */
+ * MPVMainEncContext is followed by a MjpegContext; the other threads
+ * can access this shared context via MPVEncContext.mjpeg. */
 typedef struct MJPEGEncContext {
-    MpegEncContext mpeg;
+    MPVMainEncContext mpeg;
     MJpegContext   mjpeg;
 } MJPEGEncContext;
 
@@ -75,7 +75,7 @@  static av_cold void init_uni_ac_vlc(const uint8_t huff_size_ac[256],
     }
 }
 
-static void mjpeg_encode_picture_header(MpegEncContext *s)
+static void mjpeg_encode_picture_header(MPVMainEncContext *s)
 {
     ff_mjpeg_encode_picture_header(s->avctx, &s->pb, s->mjpeg_ctx,
                                    &s->intra_scantable, 0,
@@ -87,7 +87,7 @@  static void mjpeg_encode_picture_header(MpegEncContext *s)
         s->thread_context[i]->esc_pos = 0;
 }
 
-void ff_mjpeg_amv_encode_picture_header(MpegEncContext *s)
+void ff_mjpeg_amv_encode_picture_header(MPVMainEncContext *s)
 {
     MJPEGEncContext *const m = (MJPEGEncContext*)s;
     av_assert2(s->mjpeg_ctx == &m->mjpeg);
@@ -100,9 +100,9 @@  void ff_mjpeg_amv_encode_picture_header(MpegEncContext *s)
 /**
  * Encodes and outputs the entire frame in the JPEG format.
  *
- * @param s The MpegEncContext.
+ * @param s The MPVEncContext.
  */
-static void mjpeg_encode_picture_frame(MpegEncContext *s)
+static void mjpeg_encode_picture_frame(MPVEncContext *s)
 {
     int nbits, code, table_id;
     MJpegContext *m = s->mjpeg_ctx;
@@ -211,10 +211,10 @@  static void mjpeg_build_optimal_huffman(MJpegContext *m)
  *
  * Header + values + stuffing.
  *
- * @param s The MpegEncContext.
+ * @param s The MPVEncContext.
  * @return int Error code, 0 if successful.
  */
-int ff_mjpeg_encode_stuffing(MpegEncContext *s)
+int ff_mjpeg_encode_stuffing(MPVEncContext *s)
 {
     MJpegContext *const m = s->mjpeg_ctx;
     PutBitContext *pbc = &s->pb;
@@ -260,7 +260,7 @@  fail:
     return ret;
 }
 
-static int alloc_huffman(MpegEncContext *s)
+static int alloc_huffman(MPVMainEncContext *s)
 {
     MJpegContext *m = s->mjpeg_ctx;
     size_t num_mbs, num_blocks, num_codes;
@@ -288,7 +288,7 @@  static int alloc_huffman(MpegEncContext *s)
     return 0;
 }
 
-av_cold int ff_mjpeg_encode_init(MpegEncContext *s)
+av_cold int ff_mjpeg_encode_init(MPVMainEncContext *s)
 {
     MJpegContext *const m = &((MJPEGEncContext*)s)->mjpeg;
     int ret, use_slices;
@@ -412,11 +412,11 @@  static void ff_mjpeg_encode_coef(MJpegContext *s, uint8_t table_id, int val, int
 /**
  * Add the block's data into the JPEG buffer.
  *
- * @param s The MpegEncContext that contains the JPEG buffer.
+ * @param s The MPVEncContext that contains the JPEG buffer.
  * @param block The block.
  * @param n The block's index or number.
  */
-static void record_block(MpegEncContext *s, int16_t *block, int n)
+static void record_block(MPVEncContext *s, int16_t *block, int n)
 {
     int i, j, table_id;
     int component, dc, last_index, val, run;
@@ -459,7 +459,7 @@  static void record_block(MpegEncContext *s, int16_t *block, int n)
         ff_mjpeg_encode_code(m, table_id, 0);
 }
 
-static void encode_block(MpegEncContext *s, int16_t *block, int n)
+static void encode_block(MPVEncContext *s, int16_t *block, int n)
 {
     int mant, nbits, code, i, j;
     int component, dc, run, last_index, val;
@@ -517,7 +517,7 @@  static void encode_block(MpegEncContext *s, int16_t *block, int n)
         put_bits(&s->pb, huff_size_ac[0], huff_code_ac[0]);
 }
 
-void ff_mjpeg_encode_mb(MpegEncContext *s, int16_t block[12][64])
+void ff_mjpeg_encode_mb(MPVEncContext *s, int16_t block[12][64])
 {
     int i;
     if (s->mjpeg_ctx->huffman == HUFFMAN_TABLE_OPTIMAL) {
@@ -589,7 +589,7 @@  void ff_mjpeg_encode_mb(MpegEncContext *s, int16_t block[12][64])
 static int amv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
                               const AVFrame *pic_arg, int *got_packet)
 {
-    MpegEncContext *s = avctx->priv_data;
+    MPVMainEncContext *const s = avctx->priv_data;
     AVFrame *pic;
     int i, ret;
     int chroma_h_shift, chroma_v_shift;
diff --git a/libavcodec/mjpegenc.h b/libavcodec/mjpegenc.h
index a593b67e96..609b663e09 100644
--- a/libavcodec/mjpegenc.h
+++ b/libavcodec/mjpegenc.h
@@ -36,7 +36,7 @@ 
 #include <stdint.h>
 
 #include "mjpeg.h"
-#include "mpegvideo.h"
+#include "mpegvideoenc.h"
 #include "put_bits.h"
 
 /**
@@ -71,9 +71,9 @@  typedef struct MJpegContext {
     uint8_t huff_size_ac_chrominance[256];  ///< AC chrominance Huffman table size.
     uint16_t huff_code_ac_chrominance[256]; ///< AC chrominance Huffman table codes.
 
-    /** Storage for AC luminance VLC (in MpegEncContext) */
+    /** Storage for AC luminance VLC (in MPVEncContext) */
     uint8_t uni_ac_vlc_len[64 * 64 * 2];
-    /** Storage for AC chrominance VLC (in MpegEncContext) */
+    /** Storage for AC chrominance VLC (in MPVEncContext) */
     uint8_t uni_chroma_ac_vlc_len[64 * 64 * 2];
 
     // Default DC tables have exactly 12 values
@@ -107,9 +107,9 @@  static inline void put_marker(PutBitContext *p, enum JpegMarker code)
     put_bits(p, 8, code);
 }
 
-int  ff_mjpeg_encode_init(MpegEncContext *s);
-void ff_mjpeg_amv_encode_picture_header(MpegEncContext *s);
-void ff_mjpeg_encode_mb(MpegEncContext *s, int16_t block[12][64]);
-int  ff_mjpeg_encode_stuffing(MpegEncContext *s);
+int  ff_mjpeg_encode_init(MPVMainEncContext *s);
+void ff_mjpeg_amv_encode_picture_header(MPVMainEncContext *s);
+void ff_mjpeg_encode_mb(MPVEncContext *s, int16_t block[12][64]);
+int  ff_mjpeg_encode_stuffing(MPVEncContext *s);
 
 #endif /* AVCODEC_MJPEGENC_H */
diff --git a/libavcodec/motion_est.c b/libavcodec/motion_est.c
index 35ca40b90a..34ea72fe3d 100644
--- a/libavcodec/motion_est.c
+++ b/libavcodec/motion_est.c
@@ -47,7 +47,7 @@ 
 #define ME_MAP_SHIFT 3
 #define ME_MAP_MV_BITS 11
 
-static int sad_hpel_motion_search(MpegEncContext * s,
+static int sad_hpel_motion_search(MPVEncContext * s,
                                   int *mx_ptr, int *my_ptr, int dmin,
                                   int src_index, int ref_index,
                                   int size, int h);
@@ -104,7 +104,7 @@  static int get_flags(MotionEstContext *c, int direct, int chroma){
            + (chroma ? FLAG_CHROMA : 0);
 }
 
-static av_always_inline int cmp_direct_inline(MpegEncContext *s, const int x, const int y, const int subx, const int suby,
+static av_always_inline int cmp_direct_inline(MPVEncContext *s, const int x, const int y, const int subx, const int suby,
                       const int size, const int h, int ref_index, int src_index,
                       me_cmp_func cmp_func, me_cmp_func chroma_cmp_func, int qpel){
     MotionEstContext * const c= &s->me;
@@ -176,7 +176,7 @@  static av_always_inline int cmp_direct_inline(MpegEncContext *s, const int x, co
     return d;
 }
 
-static av_always_inline int cmp_inline(MpegEncContext *s, const int x, const int y, const int subx, const int suby,
+static av_always_inline int cmp_inline(MPVEncContext *s, const int x, const int y, const int subx, const int suby,
                       const int size, const int h, int ref_index, int src_index,
                       me_cmp_func cmp_func, me_cmp_func chroma_cmp_func, int qpel, int chroma){
     MotionEstContext * const c= &s->me;
@@ -228,13 +228,13 @@  static av_always_inline int cmp_inline(MpegEncContext *s, const int x, const int
     return d;
 }
 
-static int cmp_simple(MpegEncContext *s, const int x, const int y,
+static int cmp_simple(MPVEncContext *s, const int x, const int y,
                       int ref_index, int src_index,
                       me_cmp_func cmp_func, me_cmp_func chroma_cmp_func){
     return cmp_inline(s,x,y,0,0,0,16,ref_index,src_index, cmp_func, chroma_cmp_func, 0, 0);
 }
 
-static int cmp_fpel_internal(MpegEncContext *s, const int x, const int y,
+static int cmp_fpel_internal(MPVEncContext *s, const int x, const int y,
                       const int size, const int h, int ref_index, int src_index,
                       me_cmp_func cmp_func, me_cmp_func chroma_cmp_func, const int flags){
     if(flags&FLAG_DIRECT){
@@ -244,7 +244,7 @@  static int cmp_fpel_internal(MpegEncContext *s, const int x, const int y,
     }
 }
 
-static int cmp_internal(MpegEncContext *s, const int x, const int y, const int subx, const int suby,
+static int cmp_internal(MPVEncContext *s, const int x, const int y, const int subx, const int suby,
                       const int size, const int h, int ref_index, int src_index,
                       me_cmp_func cmp_func, me_cmp_func chroma_cmp_func, const int flags){
     if(flags&FLAG_DIRECT){
@@ -257,7 +257,7 @@  static int cmp_internal(MpegEncContext *s, const int x, const int y, const int s
 /** @brief compares a block (either a full macroblock or a partition thereof)
     against a proposed motion-compensated prediction of that block
  */
-static av_always_inline int cmp(MpegEncContext *s, const int x, const int y, const int subx, const int suby,
+static av_always_inline int cmp(MPVEncContext *s, const int x, const int y, const int subx, const int suby,
                       const int size, const int h, int ref_index, int src_index,
                       me_cmp_func cmp_func, me_cmp_func chroma_cmp_func, const int flags){
     if(av_builtin_constant_p(flags) && av_builtin_constant_p(h) && av_builtin_constant_p(size)
@@ -272,7 +272,7 @@  static av_always_inline int cmp(MpegEncContext *s, const int x, const int y, con
     }
 }
 
-static int cmp_hpel(MpegEncContext *s, const int x, const int y, const int subx, const int suby,
+static int cmp_hpel(MPVEncContext *s, const int x, const int y, const int subx, const int suby,
                       const int size, const int h, int ref_index, int src_index,
                       me_cmp_func cmp_func, me_cmp_func chroma_cmp_func, const int flags){
     if(flags&FLAG_DIRECT){
@@ -282,7 +282,7 @@  static int cmp_hpel(MpegEncContext *s, const int x, const int y, const int subx,
     }
 }
 
-static int cmp_qpel(MpegEncContext *s, const int x, const int y, const int subx, const int suby,
+static int cmp_qpel(MPVEncContext *s, const int x, const int y, const int subx, const int suby,
                       const int size, const int h, int ref_index, int src_index,
                       me_cmp_func cmp_func, me_cmp_func chroma_cmp_func, const int flags){
     if(flags&FLAG_DIRECT){
@@ -294,7 +294,7 @@  static int cmp_qpel(MpegEncContext *s, const int x, const int y, const int subx,
 
 #include "motion_est_template.c"
 
-static int zero_cmp(MpegEncContext *s, uint8_t *a, uint8_t *b,
+static int zero_cmp(MPVEncContext *s, uint8_t *a, uint8_t *b,
                     ptrdiff_t stride, int h)
 {
     return 0;
@@ -303,7 +303,7 @@  static int zero_cmp(MpegEncContext *s, uint8_t *a, uint8_t *b,
 static void zero_hpel(uint8_t *a, const uint8_t *b, ptrdiff_t stride, int h){
 }
 
-int ff_init_me(MpegEncContext *s){
+int ff_init_me(MPVEncContext *s){
     MotionEstContext * const c= &s->me;
     int cache_size= FFMIN(ME_MAP_SIZE>>ME_MAP_SHIFT, 1<<ME_MAP_SHIFT);
     int dia_size= FFMAX(FFABS(s->avctx->dia_size)&255, FFABS(s->avctx->pre_dia_size)&255);
@@ -389,7 +389,7 @@  int ff_init_me(MpegEncContext *s){
     COPY3_IF_LT(dminh, d, dx, x, dy, y)\
 }
 
-static int sad_hpel_motion_search(MpegEncContext * s,
+static int sad_hpel_motion_search(MPVEncContext * s,
                                   int *mx_ptr, int *my_ptr, int dmin,
                                   int src_index, int ref_index,
                                   int size, int h)
@@ -494,7 +494,7 @@  static int sad_hpel_motion_search(MpegEncContext * s,
     return dminh;
 }
 
-static inline void set_p_mv_tables(MpegEncContext * s, int mx, int my, int mv4)
+static inline void set_p_mv_tables(MPVEncContext * s, int mx, int my, int mv4)
 {
     const int xy= s->mb_x + s->mb_y*s->mb_stride;
 
@@ -521,7 +521,7 @@  static inline void set_p_mv_tables(MpegEncContext * s, int mx, int my, int mv4)
 /**
  * get fullpel ME search limits.
  */
-static inline void get_limits(MpegEncContext *s, int x, int y)
+static inline void get_limits(MPVEncContext *s, int x, int y)
 {
     MotionEstContext * const c= &s->me;
     int range= c->avctx->me_range >> (1 + !!(c->flags&FLAG_QPEL));
@@ -568,7 +568,7 @@  static inline void init_mv4_ref(MotionEstContext *c){
     c->src[3][0] = c->src[2][0] + 8;
 }
 
-static inline int h263_mv4_search(MpegEncContext *s, int mx, int my, int shift)
+static inline int h263_mv4_search(MPVEncContext *s, int mx, int my, int shift)
 {
     MotionEstContext * const c= &s->me;
     const int size= 1;
@@ -722,7 +722,7 @@  static inline int h263_mv4_search(MpegEncContext *s, int mx, int my, int shift)
     }
 }
 
-static inline void init_interlaced_ref(MpegEncContext *s, int ref_index){
+static inline void init_interlaced_ref(MPVEncContext *s, int ref_index){
     MotionEstContext * const c= &s->me;
 
     c->ref[1+ref_index][0] = c->ref[0+ref_index][0] + s->linesize;
@@ -735,7 +735,7 @@  static inline void init_interlaced_ref(MpegEncContext *s, int ref_index){
     }
 }
 
-static int interlaced_search(MpegEncContext *s, int ref_index,
+static int interlaced_search(MPVEncContext *s, int ref_index,
                              int16_t (*mv_tables[2][2])[2], uint8_t *field_select_tables[2], int mx, int my, int user_field_select)
 {
     MotionEstContext * const c= &s->me;
@@ -882,7 +882,7 @@  static inline int get_penalty_factor(int lambda, int lambda2, int type){
     }
 }
 
-void ff_estimate_p_frame_motion(MpegEncContext * s,
+void ff_estimate_p_frame_motion(MPVEncContext * s,
                                 int mb_x, int mb_y)
 {
     MotionEstContext * const c= &s->me;
@@ -1058,7 +1058,7 @@  void ff_estimate_p_frame_motion(MpegEncContext * s,
     s->mb_type[mb_y*s->mb_stride + mb_x]= mb_type;
 }
 
-int ff_pre_estimate_p_frame_motion(MpegEncContext * s,
+int ff_pre_estimate_p_frame_motion(MPVEncContext * s,
                                     int mb_x, int mb_y)
 {
     MotionEstContext * const c= &s->me;
@@ -1111,7 +1111,7 @@  int ff_pre_estimate_p_frame_motion(MpegEncContext * s,
     return dmin;
 }
 
-static int estimate_motion_b(MpegEncContext *s, int mb_x, int mb_y,
+static int estimate_motion_b(MPVEncContext *s, int mb_x, int mb_y,
                              int16_t (*mv_table)[2], int ref_index, int f_code)
 {
     MotionEstContext * const c= &s->me;
@@ -1173,7 +1173,7 @@  static int estimate_motion_b(MpegEncContext *s, int mb_x, int mb_y,
     return dmin;
 }
 
-static inline int check_bidir_mv(MpegEncContext * s,
+static inline int check_bidir_mv(MPVEncContext * s,
                    int motion_fx, int motion_fy,
                    int motion_bx, int motion_by,
                    int pred_fx, int pred_fy,
@@ -1238,7 +1238,7 @@  static inline int check_bidir_mv(MpegEncContext * s,
 }
 
 /* refine the bidir vectors in hq mode and return the score in both lq & hq mode*/
-static inline int bidir_refine(MpegEncContext * s, int mb_x, int mb_y)
+static inline int bidir_refine(MPVEncContext * s, int mb_x, int mb_y)
 {
     MotionEstContext * const c= &s->me;
     const int mot_stride = s->mb_stride;
@@ -1385,7 +1385,7 @@  CHECK_BIDIR(-(a),-(b),-(c),-(d))
     return fbmin;
 }
 
-static inline int direct_search(MpegEncContext * s, int mb_x, int mb_y)
+static inline int direct_search(MPVEncContext * s, int mb_x, int mb_y)
 {
     MotionEstContext * const c= &s->me;
     int P[10][2];
@@ -1487,7 +1487,7 @@  static inline int direct_search(MpegEncContext * s, int mb_x, int mb_y)
     return dmin;
 }
 
-void ff_estimate_b_frame_motion(MpegEncContext * s,
+void ff_estimate_b_frame_motion(MPVEncContext * s,
                              int mb_x, int mb_y)
 {
     MotionEstContext * const c= &s->me;
@@ -1595,7 +1595,7 @@  void ff_estimate_b_frame_motion(MpegEncContext * s,
 }
 
 /* find best f_code for ME which do unlimited searches */
-int ff_get_best_fcode(MpegEncContext * s, int16_t (*mv_table)[2], int type)
+int ff_get_best_fcode(MPVEncContext * s, int16_t (*mv_table)[2], int type)
 {
     if (s->motion_est != FF_ME_ZERO) {
         int score[8];
@@ -1648,7 +1648,7 @@  int ff_get_best_fcode(MpegEncContext * s, int16_t (*mv_table)[2], int type)
     }
 }
 
-void ff_fix_long_p_mvs(MpegEncContext * s, int type)
+void ff_fix_long_p_mvs(MPVEncContext * s, int type)
 {
     MotionEstContext * const c= &s->me;
     const int f_code= s->f_code;
@@ -1697,7 +1697,7 @@  void ff_fix_long_p_mvs(MpegEncContext * s, int type)
 /**
  * @param truncate 1 for truncation, 0 for using intra
  */
-void ff_fix_long_mvs(MpegEncContext * s, uint8_t *field_select_table, int field_select,
+void ff_fix_long_mvs(MPVEncContext * s, uint8_t *field_select_table, int field_select,
                      int16_t (*mv_table)[2], int f_code, int type, int truncate)
 {
     MotionEstContext * const c= &s->me;
diff --git a/libavcodec/motion_est.h b/libavcodec/motion_est.h
index 5742e51486..b52b91ab3f 100644
--- a/libavcodec/motion_est.h
+++ b/libavcodec/motion_est.h
@@ -27,7 +27,8 @@ 
 #include "hpeldsp.h"
 #include "qpeldsp.h"
 
-struct MpegEncContext;
+#define MPVEncContext MPVContext
+struct MPVEncContext;
 
 #if ARCH_IA64 // Limit static arrays to avoid gcc failing "short data segment overflowed"
 #define MAX_MV 1024
@@ -92,7 +93,7 @@  typedef struct MotionEstContext {
     qpel_mc_func(*qpel_avg)[16];
     const uint8_t (*mv_penalty)[MAX_DMV * 2 + 1]; ///< bit amount needed to encode a MV
     const uint8_t *current_mv_penalty;
-    int (*sub_motion_search)(struct MpegEncContext *s,
+    int (*sub_motion_search)(struct MPVEncContext *s,
                              int *mx_ptr, int *my_ptr, int dmin,
                              int src_index, int ref_index,
                              int size, int h);
@@ -108,28 +109,28 @@  static inline int ff_h263_round_chroma(int x)
     return h263_chroma_roundtab[x & 0xf] + (x >> 3);
 }
 
-int ff_init_me(struct MpegEncContext *s);
+int ff_init_me(struct MPVEncContext *s);
 
-void ff_estimate_p_frame_motion(struct MpegEncContext *s, int mb_x, int mb_y);
-void ff_estimate_b_frame_motion(struct MpegEncContext *s, int mb_x, int mb_y);
+void ff_estimate_p_frame_motion(struct MPVEncContext *s, int mb_x, int mb_y);
+void ff_estimate_b_frame_motion(struct MPVEncContext *s, int mb_x, int mb_y);
 
-int ff_pre_estimate_p_frame_motion(struct MpegEncContext *s,
+int ff_pre_estimate_p_frame_motion(struct MPVEncContext *s,
                                    int mb_x, int mb_y);
 
-int ff_epzs_motion_search(struct MpegEncContext *s, int *mx_ptr, int *my_ptr,
+int ff_epzs_motion_search(struct MPVEncContext *s, int *mx_ptr, int *my_ptr,
                           int P[10][2], int src_index, int ref_index,
                           int16_t (*last_mv)[2], int ref_mv_scale, int size,
                           int h);
 
-int ff_get_mb_score(struct MpegEncContext *s, int mx, int my, int src_index,
+int ff_get_mb_score(struct MPVEncContext *s, int mx, int my, int src_index,
                     int ref_index, int size, int h, int add_rate);
 
-int ff_get_best_fcode(struct MpegEncContext *s,
+int ff_get_best_fcode(struct MPVEncContext *s,
                       int16_t (*mv_table)[2], int type);
 
-void ff_fix_long_p_mvs(struct MpegEncContext *s, int type);
-void ff_fix_long_mvs(struct MpegEncContext *s, uint8_t *field_select_table,
+void ff_fix_long_p_mvs(struct MPVEncContext *s, int type);
+void ff_fix_long_mvs(struct MPVEncContext *s, uint8_t *field_select_table,
                      int field_select, int16_t (*mv_table)[2], int f_code,
                      int type, int truncate);
-
+#undef MPVEncContext
 #endif /* AVCODEC_MOTION_EST_H */
diff --git a/libavcodec/motion_est_template.c b/libavcodec/motion_est_template.c
index 6ab0ea13dc..57df6436c3 100644
--- a/libavcodec/motion_est_template.c
+++ b/libavcodec/motion_est_template.c
@@ -47,7 +47,7 @@ 
     COPY3_IF_LT(dmin, d, bx, hx, by, hy)\
 }
 
-static int hpel_motion_search(MpegEncContext * s,
+static int hpel_motion_search(MPVEncContext * s,
                                   int *mx_ptr, int *my_ptr, int dmin,
                                   int src_index, int ref_index,
                                   int size, int h)
@@ -152,7 +152,7 @@  static int hpel_motion_search(MpegEncContext * s,
     return dmin;
 }
 
-static int no_sub_motion_search(MpegEncContext * s,
+static int no_sub_motion_search(MPVEncContext * s,
           int *mx_ptr, int *my_ptr, int dmin,
                                   int src_index, int ref_index,
                                   int size, int h)
@@ -162,7 +162,7 @@  static int no_sub_motion_search(MpegEncContext * s,
     return dmin;
 }
 
-static inline int get_mb_score(MpegEncContext *s, int mx, int my,
+static inline int get_mb_score(MPVEncContext *s, int mx, int my,
                                int src_index, int ref_index, int size,
                                int h, int add_rate)
 {
@@ -189,7 +189,7 @@  static inline int get_mb_score(MpegEncContext *s, int mx, int my,
     return d;
 }
 
-int ff_get_mb_score(MpegEncContext *s, int mx, int my, int src_index,
+int ff_get_mb_score(MPVEncContext *s, int mx, int my, int src_index,
                     int ref_index, int size, int h, int add_rate)
 {
     return get_mb_score(s, mx, my, src_index, ref_index, size, h, add_rate);
@@ -204,7 +204,7 @@  int ff_get_mb_score(MpegEncContext *s, int mx, int my, int src_index,
     COPY3_IF_LT(dmin, d, bx, hx, by, hy)\
 }
 
-static int qpel_motion_search(MpegEncContext * s,
+static int qpel_motion_search(MPVEncContext * s,
                                   int *mx_ptr, int *my_ptr, int dmin,
                                   int src_index, int ref_index,
                                   int size, int h)
@@ -413,7 +413,7 @@  if( (y)>(ymax<<(S)) ) av_log(NULL, AV_LOG_ERROR, "%d %d %d %d %d ymax" #v, ymax,
     const int qpel= flags&FLAG_QPEL;\
     const int shift= 1+qpel;\
 
-static av_always_inline int small_diamond_search(MpegEncContext * s, int *best, int dmin,
+static av_always_inline int small_diamond_search(MPVEncContext * s, int *best, int dmin,
                                        int src_index, int ref_index, const int penalty_factor,
                                        int size, int h, int flags)
 {
@@ -454,7 +454,7 @@  static av_always_inline int small_diamond_search(MpegEncContext * s, int *best,
     }
 }
 
-static int funny_diamond_search(MpegEncContext * s, int *best, int dmin,
+static int funny_diamond_search(MPVEncContext * s, int *best, int dmin,
                                        int src_index, int ref_index, const int penalty_factor,
                                        int size, int h, int flags)
 {
@@ -496,7 +496,7 @@  static int funny_diamond_search(MpegEncContext * s, int *best, int dmin,
     return dmin;
 }
 
-static int hex_search(MpegEncContext * s, int *best, int dmin,
+static int hex_search(MPVEncContext * s, int *best, int dmin,
                                        int src_index, int ref_index, const int penalty_factor,
                                        int size, int h, int flags, int dia_size)
 {
@@ -530,7 +530,7 @@  static int hex_search(MpegEncContext * s, int *best, int dmin,
     return dmin;
 }
 
-static int l2s_dia_search(MpegEncContext * s, int *best, int dmin,
+static int l2s_dia_search(MPVEncContext * s, int *best, int dmin,
                                        int src_index, int ref_index, const int penalty_factor,
                                        int size, int h, int flags)
 {
@@ -568,7 +568,7 @@  static int l2s_dia_search(MpegEncContext * s, int *best, int dmin,
     return dmin;
 }
 
-static int umh_search(MpegEncContext * s, int *best, int dmin,
+static int umh_search(MPVEncContext * s, int *best, int dmin,
                                        int src_index, int ref_index, const int penalty_factor,
                                        int size, int h, int flags)
 {
@@ -615,7 +615,7 @@  static int umh_search(MpegEncContext * s, int *best, int dmin,
     return hex_search(s, best, dmin, src_index, ref_index, penalty_factor, size, h, flags, 2);
 }
 
-static int full_search(MpegEncContext * s, int *best, int dmin,
+static int full_search(MPVEncContext * s, int *best, int dmin,
                                        int src_index, int ref_index, const int penalty_factor,
                                        int size, int h, int flags)
 {
@@ -678,7 +678,7 @@  static int full_search(MpegEncContext * s, int *best, int dmin,
 }
 
 #define MAX_SAB_SIZE ME_MAP_SIZE
-static int sab_diamond_search(MpegEncContext * s, int *best, int dmin,
+static int sab_diamond_search(MPVEncContext * s, int *best, int dmin,
                                        int src_index, int ref_index, const int penalty_factor,
                                        int size, int h, int flags)
 {
@@ -768,7 +768,7 @@  static int sab_diamond_search(MpegEncContext * s, int *best, int dmin,
     return dmin;
 }
 
-static int var_diamond_search(MpegEncContext * s, int *best, int dmin,
+static int var_diamond_search(MPVEncContext * s, int *best, int dmin,
                                        int src_index, int ref_index, const int penalty_factor,
                                        int size, int h, int flags)
 {
@@ -829,7 +829,7 @@  static int var_diamond_search(MpegEncContext * s, int *best, int dmin,
     return dmin;
 }
 
-static av_always_inline int diamond_search(MpegEncContext * s, int *best, int dmin,
+static av_always_inline int diamond_search(MPVEncContext * s, int *best, int dmin,
                                        int src_index, int ref_index, const int penalty_factor,
                                        int size, int h, int flags){
     MotionEstContext * const c= &s->me;
@@ -857,7 +857,7 @@  static av_always_inline int diamond_search(MpegEncContext * s, int *best, int dm
    it takes fewer iterations. And it increases the chance that we find the
    optimal mv.
  */
-static av_always_inline int epzs_motion_search_internal(MpegEncContext * s, int *mx_ptr, int *my_ptr,
+static av_always_inline int epzs_motion_search_internal(MPVEncContext * s, int *mx_ptr, int *my_ptr,
                              int P[10][2], int src_index, int ref_index, int16_t (*last_mv)[2],
                              int ref_mv_scale, int flags, int size, int h)
 {
@@ -974,7 +974,7 @@  static av_always_inline int epzs_motion_search_internal(MpegEncContext * s, int
 }
 
 //this function is dedicated to the brain damaged gcc
-int ff_epzs_motion_search(MpegEncContext *s, int *mx_ptr, int *my_ptr,
+int ff_epzs_motion_search(MPVEncContext *s, int *mx_ptr, int *my_ptr,
                           int P[10][2], int src_index, int ref_index,
                           int16_t (*last_mv)[2], int ref_mv_scale,
                           int size, int h)
@@ -990,7 +990,7 @@  int ff_epzs_motion_search(MpegEncContext *s, int *mx_ptr, int *my_ptr,
     }
 }
 
-static int epzs_motion_search2(MpegEncContext * s,
+static int epzs_motion_search2(MPVEncContext * s,
                              int *mx_ptr, int *my_ptr, int P[10][2],
                              int src_index, int ref_index, int16_t (*last_mv)[2],
                              int ref_mv_scale, const int size)
diff --git a/libavcodec/mpeg12.c b/libavcodec/mpeg12.c
index 58e03c05d4..c65805bd8f 100644
--- a/libavcodec/mpeg12.c
+++ b/libavcodec/mpeg12.c
@@ -103,7 +103,7 @@  av_cold void ff_init_2d_vlc_rl(RLTable *rl, unsigned static_size, int flags)
     }
 }
 
-av_cold void ff_mpeg12_common_init(MpegEncContext *s)
+av_cold void ff_mpeg12_common_init(MPVMainContext *s)
 {
 
     s->y_dc_scale_table =
@@ -111,7 +111,7 @@  av_cold void ff_mpeg12_common_init(MpegEncContext *s)
 
 }
 
-void ff_mpeg1_clean_buffers(MpegEncContext *s)
+void ff_mpeg1_clean_buffers(MPVContext *s)
 {
     s->last_dc[0] = 1 << (7 + s->intra_dc_precision);
     s->last_dc[1] = s->last_dc[0];
diff --git a/libavcodec/mpeg12.h b/libavcodec/mpeg12.h
index 7ab2527a36..b18568daa8 100644
--- a/libavcodec/mpeg12.h
+++ b/libavcodec/mpeg12.h
@@ -24,6 +24,7 @@ 
 
 #include "mpeg12vlc.h"
 #include "mpegvideo.h"
+#include "mpegvideoenc.h"
 #include "rl.h"
 
 /* Start codes. */
@@ -36,7 +37,7 @@ 
 #define EXT_START_CODE          0x000001b5
 #define USER_START_CODE         0x000001b2
 
-void ff_mpeg12_common_init(MpegEncContext *s);
+void ff_mpeg12_common_init(MPVMainContext *s);
 
 #define INIT_2D_VLC_RL(rl, static_size, flags)\
 {\
@@ -70,16 +71,16 @@  int ff_mpeg1_decode_block_intra(GetBitContext *gb,
                                 uint8_t *const scantable, int last_dc[3],
                                 int16_t *block, int index, int qscale);
 
-void ff_mpeg1_clean_buffers(MpegEncContext *s);
+void ff_mpeg1_clean_buffers(MPVContext *s);
 #if FF_API_FLAG_TRUNCATED
 int ff_mpeg1_find_frame_end(ParseContext *pc, const uint8_t *buf, int buf_size, AVCodecParserContext *s);
 #endif
 
-void ff_mpeg1_encode_picture_header(MpegEncContext *s, int picture_number);
-void ff_mpeg1_encode_mb(MpegEncContext *s, int16_t block[8][64],
+void ff_mpeg1_encode_picture_header(MPVMainEncContext *s, int picture_number);
+void ff_mpeg1_encode_mb(MPVEncContext *s, int16_t block[8][64],
                         int motion_x, int motion_y);
-void ff_mpeg1_encode_init(MpegEncContext *s);
-void ff_mpeg1_encode_slice_header(MpegEncContext *s);
+void ff_mpeg1_encode_init(MPVMainEncContext *s);
+void ff_mpeg1_encode_slice_header(MPVEncContext *s);
 
 void ff_mpeg12_find_best_frame_rate(AVRational frame_rate,
                                     int *code, int *ext_n, int *ext_d,
diff --git a/libavcodec/mpeg12dec.c b/libavcodec/mpeg12dec.c
index 4a7bd6d466..4c53451d8c 100644
--- a/libavcodec/mpeg12dec.c
+++ b/libavcodec/mpeg12dec.c
@@ -55,8 +55,8 @@ 
 #define A53_MAX_CC_COUNT 2000
 
 typedef struct Mpeg1Context {
-    MpegEncContext mpeg_enc_ctx;
-    int mpeg_enc_ctx_allocated; /* true if decoding context allocated */
+    MPVMainDecContext mpeg_ctx;
+    int mpeg_ctx_allocated;     /* true if decoding context allocated */
     int repeat_field;           /* true if we must repeat the field */
     AVPanScan pan_scan;         /* some temporary storage for the panscan */
     AVStereo3D stereo3d;
@@ -106,7 +106,7 @@  static const uint32_t btype2mb_type[11] = {
 };
 
 /* as H.263, but only 17 codes */
-static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred)
+static int mpeg_decode_motion(MPVDecContext *s, int fcode, int pred)
 {
     int code, sign, val, shift;
 
@@ -142,7 +142,7 @@  static int mpeg_decode_motion(MpegEncContext *s, int fcode, int pred)
         }                                                                     \
     } while (0)
 
-static inline int mpeg1_decode_block_inter(MpegEncContext *s,
+static inline int mpeg1_decode_block_inter(MPVDecContext *s,
                                            int16_t *block, int n)
 {
     int level, i, j, run;
@@ -231,7 +231,7 @@  end:
  * Changing this would eat up any speed benefits it has.
  * Do not use "fast" flag if you need the code to be robust.
  */
-static inline int mpeg1_fast_decode_block_inter(MpegEncContext *s,
+static inline int mpeg1_fast_decode_block_inter(MPVDecContext *s,
                                                 int16_t *block, int n)
 {
     int level, i, j, run;
@@ -316,7 +316,7 @@  end:
     return 0;
 }
 
-static inline int mpeg2_decode_block_non_intra(MpegEncContext *s,
+static inline int mpeg2_decode_block_non_intra(MPVDecContext *s,
                                                int16_t *block, int n)
 {
     int level, i, j, run;
@@ -406,7 +406,7 @@  end:
  * Changing this would eat up any speed benefits it has.
  * Do not use "fast" flag if you need the code to be robust.
  */
-static inline int mpeg2_fast_decode_block_non_intra(MpegEncContext *s,
+static inline int mpeg2_fast_decode_block_non_intra(MPVDecContext *s,
                                                     int16_t *block, int n)
 {
     int level, i, j, run;
@@ -478,7 +478,7 @@  end:
     return 0;
 }
 
-static inline int mpeg2_decode_block_intra(MpegEncContext *s,
+static inline int mpeg2_decode_block_intra(MPVDecContext *s,
                                            int16_t *block, int n)
 {
     int level, dc, diff, i, j, run;
@@ -564,7 +564,7 @@  static inline int mpeg2_decode_block_intra(MpegEncContext *s,
  * Changing this would eat up any speed benefits it has.
  * Do not use "fast" flag if you need the code to be robust.
  */
-static inline int mpeg2_fast_decode_block_intra(MpegEncContext *s,
+static inline int mpeg2_fast_decode_block_intra(MPVDecContext *s,
                                                 int16_t *block, int n)
 {
     int level, dc, diff, i, j, run;
@@ -640,7 +640,7 @@  static inline int mpeg2_fast_decode_block_intra(MpegEncContext *s,
 /******************************************/
 /* decoding */
 
-static inline int get_dmv(MpegEncContext *s)
+static inline int get_dmv(MPVDecContext *s)
 {
     if (get_bits1(&s->gb))
         return 1 - (get_bits1(&s->gb) << 1);
@@ -654,7 +654,7 @@  static inline int get_dmv(MpegEncContext *s)
 #define MT_16X8  2
 #define MT_DMV   3
 
-static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64])
+static int mpeg_decode_mb(MPVDecContext *s, int16_t block[12][64])
 {
     int i, j, k, cbp, val, mb_type, motion_type;
     const int mb_block_count = 4 + (1 << s->chroma_format);
@@ -1054,7 +1054,7 @@  static int mpeg_decode_mb(MpegEncContext *s, int16_t block[12][64])
 static av_cold int mpeg_decode_init(AVCodecContext *avctx)
 {
     Mpeg1Context *s    = avctx->priv_data;
-    MpegEncContext *s2 = &s->mpeg_enc_ctx;
+    MPVMainDecContext *const s2 = &s->mpeg_ctx;
 
     if (   avctx->codec_tag != AV_RL32("VCR2")
         && avctx->codec_tag != AV_RL32("BW10"))
@@ -1064,11 +1064,11 @@  static av_cold int mpeg_decode_init(AVCodecContext *avctx)
     /* we need some permutation to store matrices,
      * until the decoder sets the real permutation. */
     ff_mpv_idct_init(s2);
-    ff_mpeg12_common_init(&s->mpeg_enc_ctx);
+    ff_mpeg12_common_init(&s->mpeg_ctx);
     ff_mpeg12_init_vlcs();
 
     s2->chroma_format              = 1;
-    s->mpeg_enc_ctx_allocated      = 0;
+    s->mpeg_ctx_allocated      = 0;
     s->repeat_field                = 0;
     avctx->color_range             = AVCOL_RANGE_MPEG;
     return 0;
@@ -1079,11 +1079,11 @@  static int mpeg_decode_update_thread_context(AVCodecContext *avctx,
                                              const AVCodecContext *avctx_from)
 {
     Mpeg1Context *ctx = avctx->priv_data, *ctx_from = avctx_from->priv_data;
-    MpegEncContext *s = &ctx->mpeg_enc_ctx, *s1 = &ctx_from->mpeg_enc_ctx;
+    MPVMainDecContext *const s = &ctx->mpeg_ctx, *s1 = &ctx_from->mpeg_ctx;
     int err;
 
     if (avctx == avctx_from               ||
-        !ctx_from->mpeg_enc_ctx_allocated ||
+        !ctx_from->mpeg_ctx_allocated ||
         !s1->context_initialized)
         return 0;
 
@@ -1091,8 +1091,8 @@  static int mpeg_decode_update_thread_context(AVCodecContext *avctx,
     if (err)
         return err;
 
-    if (!ctx->mpeg_enc_ctx_allocated)
-        memcpy(s + 1, s1 + 1, sizeof(Mpeg1Context) - sizeof(MpegEncContext));
+    if (!ctx->mpeg_ctx_allocated)
+        memcpy(s + 1, s1 + 1, sizeof(Mpeg1Context) - sizeof(MPVMainDecContext));
 
     return 0;
 }
@@ -1164,7 +1164,7 @@  static const enum AVPixelFormat mpeg12_pixfmt_list_444[] = {
 static enum AVPixelFormat mpeg_get_pixelformat(AVCodecContext *avctx)
 {
     Mpeg1Context *s1  = avctx->priv_data;
-    MpegEncContext *s = &s1->mpeg_enc_ctx;
+    MPVMainDecContext *const s = &s1->mpeg_ctx;
     const enum AVPixelFormat *pix_fmts;
 
     if (CONFIG_GRAY && (avctx->flags & AV_CODEC_FLAG_GRAY))
@@ -1191,7 +1191,7 @@  static void setup_hwaccel_for_pixfmt(AVCodecContext *avctx)
 
     if (avctx->hwaccel && avctx->pix_fmt == AV_PIX_FMT_XVMC) {
         Mpeg1Context *s1 = avctx->priv_data;
-        MpegEncContext *s = &s1->mpeg_enc_ctx;
+        MPVMainDecContext *const s = &s1->mpeg_ctx;
 
         s->pack_pblocks = 1;
     }
@@ -1202,7 +1202,7 @@  static void setup_hwaccel_for_pixfmt(AVCodecContext *avctx)
 static int mpeg_decode_postinit(AVCodecContext *avctx)
 {
     Mpeg1Context *s1  = avctx->priv_data;
-    MpegEncContext *s = &s1->mpeg_enc_ctx;
+    MPVMainDecContext *const s = &s1->mpeg_ctx;
     uint8_t old_permutation[64];
     int ret;
 
@@ -1257,7 +1257,7 @@  static int mpeg_decode_postinit(AVCodecContext *avctx)
         avctx->sample_aspect_ratio = (AVRational){ 0, 1 };
     }
 
-    if ((s1->mpeg_enc_ctx_allocated == 0)                   ||
+    if ((s1->mpeg_ctx_allocated == 0)                   ||
         avctx->coded_width       != s->width                ||
         avctx->coded_height      != s->height               ||
         s1->save_width           != s->width                ||
@@ -1265,7 +1265,7 @@  static int mpeg_decode_postinit(AVCodecContext *avctx)
         av_cmp_q(s1->save_aspect, s->avctx->sample_aspect_ratio) ||
         (s1->save_progressive_seq != s->progressive_sequence && FFALIGN(s->height, 16) != FFALIGN(s->height, 32)) ||
         0) {
-        if (s1->mpeg_enc_ctx_allocated) {
+        if (s1->mpeg_ctx_allocated) {
 #if FF_API_FLAG_TRUNCATED
             ParseContext pc = s->parse_context;
             s->parse_context.buffer = 0;
@@ -1274,7 +1274,7 @@  static int mpeg_decode_postinit(AVCodecContext *avctx)
 #else
             ff_mpv_common_end(s);
 #endif
-            s1->mpeg_enc_ctx_allocated = 0;
+            s1->mpeg_ctx_allocated = 0;
         }
 
         ret = ff_set_dimensions(avctx, s->width, s->height);
@@ -1335,7 +1335,7 @@  static int mpeg_decode_postinit(AVCodecContext *avctx)
         quant_matrix_rebuild(s->chroma_intra_matrix, old_permutation, s->idsp.idct_permutation);
         quant_matrix_rebuild(s->chroma_inter_matrix, old_permutation, s->idsp.idct_permutation);
 
-        s1->mpeg_enc_ctx_allocated = 1;
+        s1->mpeg_ctx_allocated = 1;
     }
     return 0;
 }
@@ -1344,7 +1344,7 @@  static int mpeg1_decode_picture(AVCodecContext *avctx, const uint8_t *buf,
                                 int buf_size)
 {
     Mpeg1Context *s1  = avctx->priv_data;
-    MpegEncContext *s = &s1->mpeg_enc_ctx;
+    MPVMainDecContext *const s = &s1->mpeg_ctx;
     int ref, f_code, vbv_delay, ret;
 
     ret = init_get_bits8(&s->gb, buf, buf_size);
@@ -1391,7 +1391,7 @@  static int mpeg1_decode_picture(AVCodecContext *avctx, const uint8_t *buf,
 
 static void mpeg_decode_sequence_extension(Mpeg1Context *s1)
 {
-    MpegEncContext *s = &s1->mpeg_enc_ctx;
+    MPVMainDecContext *const s = &s1->mpeg_ctx;
     int horiz_size_ext, vert_size_ext;
     int bit_rate_ext;
     AVCPBProperties *cpb_props;
@@ -1441,7 +1441,7 @@  static void mpeg_decode_sequence_extension(Mpeg1Context *s1)
 
 static void mpeg_decode_sequence_display_extension(Mpeg1Context *s1)
 {
-    MpegEncContext *s = &s1->mpeg_enc_ctx;
+    MPVMainDecContext *const s = &s1->mpeg_ctx;
     int color_description, w, h;
 
     skip_bits(&s->gb, 3); /* video format */
@@ -1465,7 +1465,7 @@  static void mpeg_decode_sequence_display_extension(Mpeg1Context *s1)
 
 static void mpeg_decode_picture_display_extension(Mpeg1Context *s1)
 {
-    MpegEncContext *s = &s1->mpeg_enc_ctx;
+    MPVMainDecContext *const s = &s1->mpeg_ctx;
     int i, nofco;
 
     nofco = 1;
@@ -1497,7 +1497,7 @@  static void mpeg_decode_picture_display_extension(Mpeg1Context *s1)
                s1->pan_scan.position[2][0], s1->pan_scan.position[2][1]);
 }
 
-static int load_matrix(MpegEncContext *s, uint16_t matrix0[64],
+static int load_matrix(MPVMainDecContext *s, uint16_t matrix0[64],
                        uint16_t matrix1[64], int intra)
 {
     int i;
@@ -1520,7 +1520,7 @@  static int load_matrix(MpegEncContext *s, uint16_t matrix0[64],
     return 0;
 }
 
-static void mpeg_decode_quant_matrix_extension(MpegEncContext *s)
+static void mpeg_decode_quant_matrix_extension(MPVMainDecContext *s)
 {
     ff_dlog(s->avctx, "matrix extension\n");
 
@@ -1536,7 +1536,7 @@  static void mpeg_decode_quant_matrix_extension(MpegEncContext *s)
 
 static int mpeg_decode_picture_coding_extension(Mpeg1Context *s1)
 {
-    MpegEncContext *s = &s1->mpeg_enc_ctx;
+    MPVMainDecContext *const s = &s1->mpeg_ctx;
 
     s->full_pel[0]       = s->full_pel[1] = 0;
     s->mpeg_f_code[0][0] = get_bits(&s->gb, 4);
@@ -1547,7 +1547,7 @@  static int mpeg_decode_picture_coding_extension(Mpeg1Context *s1)
     s->mpeg_f_code[0][1] += !s->mpeg_f_code[0][1];
     s->mpeg_f_code[1][0] += !s->mpeg_f_code[1][0];
     s->mpeg_f_code[1][1] += !s->mpeg_f_code[1][1];
-    if (!s->pict_type && s1->mpeg_enc_ctx_allocated) {
+    if (!s->pict_type && s1->mpeg_ctx_allocated) {
         av_log(s->avctx, AV_LOG_ERROR, "Missing picture start code\n");
         if (s->avctx->err_recognition & AV_EF_EXPLODE)
             return AVERROR_INVALIDDATA;
@@ -1597,7 +1597,7 @@  static int mpeg_decode_picture_coding_extension(Mpeg1Context *s1)
     return 0;
 }
 
-static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
+static int mpeg_field_start(MPVMainDecContext *s, const uint8_t *buf, int buf_size)
 {
     AVCodecContext *avctx = s->avctx;
     Mpeg1Context *s1      = (Mpeg1Context *) s;
@@ -1705,11 +1705,11 @@  static int mpeg_field_start(MpegEncContext *s, const uint8_t *buf, int buf_size)
 
 /**
  * Decode a slice.
- * MpegEncContext.mb_y must be set to the MB row from the startcode.
+ * MPVDecContext.mb_y must be set to the MB row from the startcode.
  * @return DECODE_SLICE_ERROR if the slice is damaged,
  *         DECODE_SLICE_OK if this slice is OK
  */
-static int mpeg_decode_slice(MpegEncContext *s, int mb_y,
+static int mpeg_decode_slice(MPVDecContext *s, int mb_y,
                              const uint8_t **buf, int buf_size)
 {
     AVCodecContext *avctx = s->avctx;
@@ -1988,7 +1988,7 @@  eos: // end of slice
 
 static int slice_decode_thread(AVCodecContext *c, void *arg)
 {
-    MpegEncContext *s   = *(void **) arg;
+    MPVDecContext *s   = *(void **) arg;
     const uint8_t *buf  = s->gb.buffer;
     int mb_y            = s->start_mb_y;
     const int field_pic = s->picture_structure != PICT_FRAME;
@@ -2042,9 +2042,9 @@  static int slice_decode_thread(AVCodecContext *c, void *arg)
 static int slice_end(AVCodecContext *avctx, AVFrame *pict)
 {
     Mpeg1Context *s1  = avctx->priv_data;
-    MpegEncContext *s = &s1->mpeg_enc_ctx;
+    MPVMainDecContext *const s = &s1->mpeg_ctx;
 
-    if (!s1->mpeg_enc_ctx_allocated || !s->current_picture_ptr)
+    if (!s1->mpeg_ctx_allocated || !s->current_picture_ptr)
         return 0;
 
     if (s->avctx->hwaccel) {
@@ -2091,7 +2091,7 @@  static int mpeg1_decode_sequence(AVCodecContext *avctx,
                                  const uint8_t *buf, int buf_size)
 {
     Mpeg1Context *s1  = avctx->priv_data;
-    MpegEncContext *s = &s1->mpeg_enc_ctx;
+    MPVMainDecContext *const s = &s1->mpeg_ctx;
     int width, height;
     int i, v, j;
 
@@ -2179,14 +2179,14 @@  static int mpeg1_decode_sequence(AVCodecContext *avctx,
 static int vcr2_init_sequence(AVCodecContext *avctx)
 {
     Mpeg1Context *s1  = avctx->priv_data;
-    MpegEncContext *s = &s1->mpeg_enc_ctx;
+    MPVMainDecContext *const s = &s1->mpeg_ctx;
     int i, v, ret;
 
     /* start new MPEG-1 context decoding */
     s->out_format = FMT_MPEG1;
-    if (s1->mpeg_enc_ctx_allocated) {
+    if (s1->mpeg_ctx_allocated) {
         ff_mpv_common_end(s);
-        s1->mpeg_enc_ctx_allocated = 0;
+        s1->mpeg_ctx_allocated = 0;
     }
     s->width            = avctx->coded_width;
     s->height           = avctx->coded_height;
@@ -2199,7 +2199,7 @@  static int vcr2_init_sequence(AVCodecContext *avctx)
     ff_mpv_idct_init(s);
     if ((ret = ff_mpv_common_init(s)) < 0)
         return ret;
-    s1->mpeg_enc_ctx_allocated = 1;
+    s1->mpeg_ctx_allocated = 1;
 
     for (i = 0; i < 64; i++) {
         int j = s->idsp.idct_permutation[i];
@@ -2290,7 +2290,7 @@  static int mpeg_decode_a53_cc(AVCodecContext *avctx,
                         cap[0] = cap[1] = cap[2] = 0x00;
                     } else {
                         field = (field == 2 ? 1 : 0);
-                        if (!s1->mpeg_enc_ctx.top_field_first) field = !field;
+                        if (!s1->mpeg_ctx.top_field_first) field = !field;
                         cap[0] = 0x04 | field;
                         cap[1] = ff_reverse[cc1];
                         cap[2] = ff_reverse[cc2];
@@ -2439,7 +2439,7 @@  static void mpeg_decode_gop(AVCodecContext *avctx,
                             const uint8_t *buf, int buf_size)
 {
     Mpeg1Context *s1  = avctx->priv_data;
-    MpegEncContext *s = &s1->mpeg_enc_ctx;
+    MPVMainDecContext *const s = &s1->mpeg_ctx;
     int broken_link;
     int64_t tc;
 
@@ -2466,7 +2466,7 @@  static int decode_chunks(AVCodecContext *avctx, AVFrame *picture,
                          int *got_output, const uint8_t *buf, int buf_size)
 {
     Mpeg1Context *s = avctx->priv_data;
-    MpegEncContext *s2 = &s->mpeg_enc_ctx;
+    MPVMainDecContext *const s2 = &s->mpeg_ctx;
     const uint8_t *buf_ptr = buf;
     const uint8_t *buf_end = buf + buf_size;
     int ret, input_size;
@@ -2721,7 +2721,7 @@  static int decode_chunks(AVCodecContext *avctx, AVFrame *picture,
                     break;
                 }
 
-                if (!s->mpeg_enc_ctx_allocated)
+                if (!s->mpeg_ctx_allocated)
                     break;
 
                 if (s2->codec_id == AV_CODEC_ID_MPEG2VIDEO) {
@@ -2757,7 +2757,7 @@  static int decode_chunks(AVCodecContext *avctx, AVFrame *picture,
                                     s2->slice_context_count;
                     av_assert0(avctx->thread_count > 1);
                     if (threshold <= mb_y) {
-                        MpegEncContext *thread_context = s2->thread_context[s->slice_count];
+                        MPVDecContext *const thread_context = s2->thread_context[s->slice_count];
 
                         thread_context->start_mb_y = mb_y;
                         thread_context->end_mb_y   = s2->mb_height;
@@ -2802,7 +2802,7 @@  static int mpeg_decode_frame(AVCodecContext *avctx, void *data,
     int buf_size = avpkt->size;
     Mpeg1Context *s = avctx->priv_data;
     AVFrame *picture = data;
-    MpegEncContext *s2 = &s->mpeg_enc_ctx;
+    MPVMainDecContext *const s2 = &s->mpeg_ctx;
 
     if (buf_size == 0 || (buf_size == 4 && AV_RB32(buf) == SEQ_END_CODE)) {
         /* special case for last picture */
@@ -2830,7 +2830,7 @@  static int mpeg_decode_frame(AVCodecContext *avctx, void *data,
 #endif
 
     s2->codec_tag = ff_toupper4(avctx->codec_tag);
-    if (s->mpeg_enc_ctx_allocated == 0 && (   s2->codec_tag == AV_RL32("VCR2")
+    if (s->mpeg_ctx_allocated == 0 && (   s2->codec_tag == AV_RL32("VCR2")
                                            || s2->codec_tag == AV_RL32("BW10")
                                           ))
         vcr2_init_sequence(avctx);
@@ -2889,8 +2889,8 @@  static av_cold int mpeg_decode_end(AVCodecContext *avctx)
 {
     Mpeg1Context *s = avctx->priv_data;
 
-    if (s->mpeg_enc_ctx_allocated)
-        ff_mpv_common_end(&s->mpeg_enc_ctx);
+    if (s->mpeg_ctx_allocated)
+        ff_mpv_common_end(&s->mpeg_ctx);
     av_buffer_unref(&s->a53_buf_ref);
     return 0;
 }
@@ -3001,7 +3001,7 @@  const AVCodec ff_mpegvideo_decoder = {
 };
 
 typedef struct IPUContext {
-    MpegEncContext m;
+    MPVMainDecContext m;
 
     int flags;
     DECLARE_ALIGNED(32, int16_t, block)[6][64];
@@ -3011,7 +3011,7 @@  static int ipu_decode_frame(AVCodecContext *avctx, void *data,
                             int *got_frame, AVPacket *avpkt)
 {
     IPUContext *s = avctx->priv_data;
-    MpegEncContext *m = &s->m;
+    MPVMainDecContext *const m = &s->m;
     GetBitContext *gb = &m->gb;
     AVFrame * const frame = data;
     int ret;
@@ -3111,7 +3111,7 @@  static int ipu_decode_frame(AVCodecContext *avctx, void *data,
 static av_cold int ipu_decode_init(AVCodecContext *avctx)
 {
     IPUContext *s = avctx->priv_data;
-    MpegEncContext *m = &s->m;
+    MPVMainDecContext *const m = &s->m;
 
     avctx->pix_fmt = AV_PIX_FMT_YUV420P;
 
diff --git a/libavcodec/mpeg12enc.c b/libavcodec/mpeg12enc.c
index 3ec3ac1860..4d01a9215a 100644
--- a/libavcodec/mpeg12enc.c
+++ b/libavcodec/mpeg12enc.c
@@ -64,7 +64,7 @@  static uint32_t mpeg1_lum_dc_uni[512];
 static uint32_t mpeg1_chr_dc_uni[512];
 
 typedef struct MPEG12EncContext {
-    MpegEncContext mpeg;
+    MPVMainEncContext mpeg;
     AVRational frame_rate_ext;
     unsigned frame_rate_index;
 
@@ -122,7 +122,7 @@  av_cold void ff_mpeg1_init_uni_ac_vlc(const RLTable *rl, uint8_t *uni_ac_vlc_len
 #if CONFIG_MPEG1VIDEO_ENCODER || CONFIG_MPEG2VIDEO_ENCODER
 static int find_frame_rate_index(MPEG12EncContext *mpeg12)
 {
-    MpegEncContext *const s = &mpeg12->mpeg;
+    MPVMainEncContext *const s = &mpeg12->mpeg;
     int i;
     AVRational bestq = (AVRational) {0, 0};
     AVRational ext;
@@ -163,7 +163,7 @@  static int find_frame_rate_index(MPEG12EncContext *mpeg12)
 static av_cold int encode_init(AVCodecContext *avctx)
 {
     MPEG12EncContext *const mpeg12 = avctx->priv_data;
-    MpegEncContext *const s = &mpeg12->mpeg;
+    MPVMainEncContext *const s = &mpeg12->mpeg;
     int ret;
     int max_size = avctx->codec_id == AV_CODEC_ID_MPEG2VIDEO ? 16383 : 4095;
 
@@ -255,7 +255,7 @@  static av_cold int encode_init(AVCodecContext *avctx)
     return 0;
 }
 
-static void put_header(MpegEncContext *s, int header)
+static void put_header(MPVEncContext *s, int header)
 {
     align_put_bits(&s->pb);
     put_bits(&s->pb, 16, header >> 16);
@@ -263,7 +263,7 @@  static void put_header(MpegEncContext *s, int header)
 }
 
 /* put sequence header if needed */
-static void mpeg1_encode_sequence_header(MpegEncContext *s)
+static void mpeg1_encode_sequence_header(MPVMainEncContext *s)
 {
     MPEG12EncContext *const mpeg12 = (MPEG12EncContext*)s;
     unsigned int vbv_buffer_size, fps, v;
@@ -422,7 +422,7 @@  static void mpeg1_encode_sequence_header(MpegEncContext *s)
     put_bits(&s->pb, 1, 0);                     // broken link
 }
 
-static inline void encode_mb_skip_run(MpegEncContext *s, int run)
+static inline void encode_mb_skip_run(MPVEncContext *s, int run)
 {
     while (run >= 33) {
         put_bits(&s->pb, 11, 0x008);
@@ -432,12 +432,12 @@  static inline void encode_mb_skip_run(MpegEncContext *s, int run)
              ff_mpeg12_mbAddrIncrTable[run][0]);
 }
 
-static av_always_inline void put_qscale(MpegEncContext *s)
+static av_always_inline void put_qscale(MPVEncContext *s)
 {
     put_bits(&s->pb, 5, s->qscale);
 }
 
-void ff_mpeg1_encode_slice_header(MpegEncContext *s)
+void ff_mpeg1_encode_slice_header(MPVEncContext *s)
 {
     if (s->codec_id == AV_CODEC_ID_MPEG2VIDEO && s->height > 2800) {
         put_header(s, SLICE_MIN_START_CODE + (s->mb_y & 127));
@@ -451,7 +451,7 @@  void ff_mpeg1_encode_slice_header(MpegEncContext *s)
     put_bits(&s->pb, 1, 0);
 }
 
-void ff_mpeg1_encode_picture_header(MpegEncContext *s, int picture_number)
+void ff_mpeg1_encode_picture_header(MPVMainEncContext *s, int picture_number)
 {
     MPEG12EncContext *const mpeg12 = (MPEG12EncContext*)s;
     AVFrameSideData *side_data;
@@ -611,7 +611,7 @@  void ff_mpeg1_encode_picture_header(MpegEncContext *s, int picture_number)
     ff_mpeg1_encode_slice_header(s);
 }
 
-static inline void put_mb_modes(MpegEncContext *s, int n, int bits,
+static inline void put_mb_modes(MPVEncContext *s, int n, int bits,
                                 int has_mv, int field_motion)
 {
     put_bits(&s->pb, n, bits);
@@ -624,7 +624,7 @@  static inline void put_mb_modes(MpegEncContext *s, int n, int bits,
 }
 
 // RAL: Parameter added: f_or_b_code
-static void mpeg1_encode_motion(MpegEncContext *s, int val, int f_or_b_code)
+static void mpeg1_encode_motion(MPVEncContext *s, int val, int f_or_b_code)
 {
     if (val == 0) {
         /* zero vector, corresponds to ff_mpeg12_mbMotionVectorTable[0] */
@@ -661,7 +661,7 @@  static void mpeg1_encode_motion(MpegEncContext *s, int val, int f_or_b_code)
     }
 }
 
-static inline void encode_dc(MpegEncContext *s, int diff, int component)
+static inline void encode_dc(MPVEncContext *s, int diff, int component)
 {
     unsigned int diff_u = diff + 255;
     if (diff_u >= 511) {
@@ -695,7 +695,7 @@  static inline void encode_dc(MpegEncContext *s, int diff, int component)
     }
 }
 
-static void mpeg1_encode_block(MpegEncContext *s, int16_t *block, int n)
+static void mpeg1_encode_block(MPVEncContext *s, int16_t *block, int n)
 {
     int alevel, level, last_non_zero, dc, diff, i, j, run, last_index, sign;
     int code, component;
@@ -776,7 +776,7 @@  next_coef:
     put_bits(&s->pb, table_vlc[112][1], table_vlc[112][0]);
 }
 
-static av_always_inline void mpeg1_encode_mb_internal(MpegEncContext *s,
+static av_always_inline void mpeg1_encode_mb_internal(MPVEncContext *s,
                                                       int16_t block[8][64],
                                                       int motion_x, int motion_y,
                                                       int mb_block_count,
@@ -1057,7 +1057,7 @@  static av_always_inline void mpeg1_encode_mb_internal(MpegEncContext *s,
     }
 }
 
-void ff_mpeg1_encode_mb(MpegEncContext *s, int16_t block[8][64],
+void ff_mpeg1_encode_mb(MPVEncContext *s, int16_t block[8][64],
                         int motion_x, int motion_y)
 {
     if (s->chroma_format == CHROMA_420)
@@ -1131,7 +1131,7 @@  static av_cold void mpeg12_encode_init_static(void)
             fcode_tab[mv + MAX_MV] = f_code;
 }
 
-av_cold void ff_mpeg1_encode_init(MpegEncContext *s)
+av_cold void ff_mpeg1_encode_init(MPVMainEncContext *s)
 {
     static AVOnce init_static_once = AV_ONCE_INIT;
 
diff --git a/libavcodec/mpeg4video.c b/libavcodec/mpeg4video.c
index ffeaf822b2..68be7e21ed 100644
--- a/libavcodec/mpeg4video.c
+++ b/libavcodec/mpeg4video.c
@@ -39,7 +39,7 @@  av_cold void ff_mpeg4_init_rl_intra(void)
     ff_thread_once(&init_static_once, mpeg4_init_rl_intra);
 }
 
-int ff_mpeg4_get_video_packet_prefix_length(MpegEncContext *s)
+int ff_mpeg4_get_video_packet_prefix_length(MPVContext *s)
 {
     switch (s->pict_type) {
     case AV_PICTURE_TYPE_I:
@@ -54,7 +54,7 @@  int ff_mpeg4_get_video_packet_prefix_length(MpegEncContext *s)
     }
 }
 
-void ff_mpeg4_clean_buffers(MpegEncContext *s)
+void ff_mpeg4_clean_buffers(MPVContext *s)
 {
     int c_wrap, c_xy, l_wrap, l_xy;
 
@@ -80,7 +80,7 @@  void ff_mpeg4_clean_buffers(MpegEncContext *s)
 #define tab_bias (tab_size / 2)
 
 // used by MPEG-4 and rv10 decoder
-void ff_mpeg4_init_direct_mv(MpegEncContext *s)
+void ff_mpeg4_init_direct_mv(MPVMainContext *s)
 {
     int i;
     for (i = 0; i < tab_size; i++) {
@@ -90,7 +90,7 @@  void ff_mpeg4_init_direct_mv(MpegEncContext *s)
     }
 }
 
-static inline void ff_mpeg4_set_one_direct_mv(MpegEncContext *s, int mx,
+static inline void ff_mpeg4_set_one_direct_mv(MPVContext *s, int mx,
                                               int my, int i)
 {
     int xy           = s->block_index[i];
@@ -126,7 +126,7 @@  static inline void ff_mpeg4_set_one_direct_mv(MpegEncContext *s, int mx,
 /**
  * @return the mb_type
  */
-int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my)
+int ff_mpeg4_set_direct_mv(MPVContext *s, int mx, int my)
 {
     const int mb_index          = s->mb_x + s->mb_y * s->mb_stride;
     const int colocated_mb_type = s->next_picture.mb_type[mb_index];
diff --git a/libavcodec/mpeg4video.h b/libavcodec/mpeg4video.h
index 14fc5e1396..6e72c6478a 100644
--- a/libavcodec/mpeg4video.h
+++ b/libavcodec/mpeg4video.h
@@ -27,6 +27,7 @@ 
 
 #include "get_bits.h"
 #include "mpegvideo.h"
+#include "mpegvideoenc.h"
 
 // shapes
 #define RECT_SHAPE       0
@@ -71,7 +72,7 @@ 
 #define MAX_NVOP_SIZE 19
 
 typedef struct Mpeg4DecContext {
-    MpegEncContext m;
+    MPVMainDecContext m;
 
     /// number of bits to represent the fractional part of time
     int time_increment_bits;
@@ -124,37 +125,37 @@  typedef struct Mpeg4DecContext {
 } Mpeg4DecContext;
 
 
-void ff_mpeg4_decode_studio(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb,
+void ff_mpeg4_decode_studio(MPVContext *s, uint8_t *dest_y, uint8_t *dest_cb,
                             uint8_t *dest_cr, int block_size, int uvlinesize,
                             int dct_linesize, int dct_offset);
-void ff_mpeg4_encode_mb(MpegEncContext *s,
+void ff_mpeg4_encode_mb(MPVEncContext *s,
                         int16_t block[6][64],
                         int motion_x, int motion_y);
-void ff_mpeg4_pred_ac(MpegEncContext *s, int16_t *block, int n,
+void ff_mpeg4_pred_ac(MPVDecContext *s, int16_t *block, int n,
                       int dir);
-void ff_set_mpeg4_time(MpegEncContext *s);
-int ff_mpeg4_encode_picture_header(MpegEncContext *s, int picture_number);
+void ff_set_mpeg4_time(MPVMainEncContext *s);
+int ff_mpeg4_encode_picture_header(MPVMainEncContext *s, int picture_number);
 
 int ff_mpeg4_decode_picture_header(Mpeg4DecContext *ctx, GetBitContext *gb,
                                    int header, int parse_only);
-void ff_mpeg4_encode_video_packet_header(MpegEncContext *s);
-void ff_mpeg4_clean_buffers(MpegEncContext *s);
+void ff_mpeg4_encode_video_packet_header(MPVEncContext *s);
+void ff_mpeg4_clean_buffers(MPVContext *s);
 void ff_mpeg4_stuffing(PutBitContext *pbc);
-void ff_mpeg4_init_partitions(MpegEncContext *s);
-void ff_mpeg4_merge_partitions(MpegEncContext *s);
-void ff_clean_mpeg4_qscales(MpegEncContext *s);
+void ff_mpeg4_init_partitions(MPVEncContext *s);
+void ff_mpeg4_merge_partitions(MPVEncContext *s);
+void ff_clean_mpeg4_qscales(MPVMainEncContext *s);
 int ff_mpeg4_decode_partitions(Mpeg4DecContext *ctx);
-int ff_mpeg4_get_video_packet_prefix_length(MpegEncContext *s);
+int ff_mpeg4_get_video_packet_prefix_length(MPVContext *s);
 int ff_mpeg4_decode_video_packet_header(Mpeg4DecContext *ctx);
 int ff_mpeg4_decode_studio_slice_header(Mpeg4DecContext *ctx);
-void ff_mpeg4_init_direct_mv(MpegEncContext *s);
+void ff_mpeg4_init_direct_mv(MPVContext *s);
 int ff_mpeg4_workaround_bugs(AVCodecContext *avctx);
 int ff_mpeg4_frame_end(AVCodecContext *avctx, const uint8_t *buf, int buf_size);
 
 /**
  * @return the mb_type
  */
-int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my);
+int ff_mpeg4_set_direct_mv(MPVContext *s, int mx, int my);
 
 #if 0 //3IV1 is quite rare and it slows things down a tiny bit
 #define IS_3IV1 s->codec_tag == AV_RL32("3IV1")
@@ -169,7 +170,7 @@  int ff_mpeg4_set_direct_mv(MpegEncContext *s, int mx, int my);
  * @param n block index (0-3 are luma, 4-5 are chroma)
  * @param dir_ptr pointer to an integer where the prediction direction will be stored
  */
-static inline int ff_mpeg4_pred_dc(MpegEncContext *s, int n, int level,
+static inline int ff_mpeg4_pred_dc(MPVContext *s, int n, int level,
                                    int *dir_ptr, int encoding)
 {
     int a, b, c, wrap, pred, scale, ret;
diff --git a/libavcodec/mpeg4video_parser.c b/libavcodec/mpeg4video_parser.c
index 1f89bae490..46601ab1e1 100644
--- a/libavcodec/mpeg4video_parser.c
+++ b/libavcodec/mpeg4video_parser.c
@@ -91,7 +91,7 @@  static int mpeg4_decode_header(AVCodecParserContext *s1, AVCodecContext *avctx,
 {
     struct Mp4vParseContext *pc = s1->priv_data;
     Mpeg4DecContext *dec_ctx = &pc->dec_ctx;
-    MpegEncContext *s = &dec_ctx->m;
+    MPVDecContext *const s = &dec_ctx->m;
     GetBitContext gb1, *gb = &gb1;
     int ret;
 
diff --git a/libavcodec/mpeg4videodec.c b/libavcodec/mpeg4videodec.c
index c0eaa00eba..8f67c9bdd0 100644
--- a/libavcodec/mpeg4videodec.c
+++ b/libavcodec/mpeg4videodec.c
@@ -68,7 +68,7 @@  static const int mb_type_b_map[4] = {
     MB_TYPE_L0      | MB_TYPE_16x16,
 };
 
-void ff_mpeg4_decode_studio(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb,
+void ff_mpeg4_decode_studio(MPVContext *s, uint8_t *dest_y, uint8_t *dest_cb,
                             uint8_t *dest_cr, int block_size, int uvlinesize,
                             int dct_linesize, int dct_offset)
 {
@@ -136,7 +136,7 @@  void ff_mpeg4_decode_studio(MpegEncContext *s, uint8_t *dest_y, uint8_t *dest_cb
  * @param n block index (0-3 are luma, 4-5 are chroma)
  * @param dir the ac prediction direction
  */
-void ff_mpeg4_pred_ac(MpegEncContext *s, int16_t *block, int n, int dir)
+void ff_mpeg4_pred_ac(MPVDecContext *s, int16_t *block, int n, int dir)
 {
     int i;
     int16_t *ac_val, *ac_val1;
@@ -193,7 +193,7 @@  void ff_mpeg4_pred_ac(MpegEncContext *s, int16_t *block, int n, int dir)
  */
 static inline int mpeg4_is_resync(Mpeg4DecContext *ctx)
 {
-    MpegEncContext *s = &ctx->m;
+    MPVDecContext *const s = &ctx->m;
     int bits_count = get_bits_count(&s->gb);
     int v          = show_bits(&s->gb, 16);
 
@@ -243,7 +243,7 @@  static inline int mpeg4_is_resync(Mpeg4DecContext *ctx)
 
 static int mpeg4_decode_sprite_trajectory(Mpeg4DecContext *ctx, GetBitContext *gb)
 {
-    MpegEncContext *s = &ctx->m;
+    MPVDecContext *const s = &ctx->m;
     int a     = 2 << s->sprite_warping_accuracy;
     int rho   = 3  - s->sprite_warping_accuracy;
     int r     = 16 / a;
@@ -495,7 +495,7 @@  overflow:
 }
 
 static int decode_new_pred(Mpeg4DecContext *ctx, GetBitContext *gb) {
-    MpegEncContext *s = &ctx->m;
+    MPVDecContext *const s = &ctx->m;
     int len = FFMIN(ctx->time_increment_bits + 3, 15);
 
     get_bits(gb, len);
@@ -512,7 +512,7 @@  static int decode_new_pred(Mpeg4DecContext *ctx, GetBitContext *gb) {
  */
 int ff_mpeg4_decode_video_packet_header(Mpeg4DecContext *ctx)
 {
-    MpegEncContext *s = &ctx->m;
+    MPVMainDecContext *const s = &ctx->m;
 
     int mb_num_bits      = av_log2(s->mb_num - 1) + 1;
     int header_extension = 0, mb_num, len;
@@ -599,7 +599,7 @@  int ff_mpeg4_decode_video_packet_header(Mpeg4DecContext *ctx)
     return 0;
 }
 
-static void reset_studio_dc_predictors(MpegEncContext *s)
+static void reset_studio_dc_predictors(MPVDecContext *s)
 {
     /* Reset DC Predictors */
     s->last_dc[0] =
@@ -613,7 +613,7 @@  static void reset_studio_dc_predictors(MpegEncContext *s)
  */
 int ff_mpeg4_decode_studio_slice_header(Mpeg4DecContext *ctx)
 {
-    MpegEncContext *s = &ctx->m;
+    MPVMainDecContext *const s = &ctx->m;
     GetBitContext *gb = &s->gb;
     unsigned vlc_len;
     uint16_t mb_num;
@@ -655,7 +655,7 @@  int ff_mpeg4_decode_studio_slice_header(Mpeg4DecContext *ctx)
  */
 static inline int get_amv(Mpeg4DecContext *ctx, int n)
 {
-    MpegEncContext *s = &ctx->m;
+    MPVDecContext *const s = &ctx->m;
     int x, y, mb_v, sum, dx, dy, shift;
     int len     = 1 << (s->f_code + 4);
     const int a = s->sprite_warping_accuracy;
@@ -706,7 +706,7 @@  static inline int get_amv(Mpeg4DecContext *ctx, int n)
  * @param dir_ptr the prediction direction will be stored here
  * @return the quantized dc
  */
-static inline int mpeg4_decode_dc(MpegEncContext *s, int n, int *dir_ptr)
+static inline int mpeg4_decode_dc(MPVDecContext *s, int n, int *dir_ptr)
 {
     int level, code;
 
@@ -755,7 +755,7 @@  static inline int mpeg4_decode_dc(MpegEncContext *s, int n, int *dir_ptr)
  */
 static int mpeg4_decode_partition_a(Mpeg4DecContext *ctx)
 {
-    MpegEncContext *s = &ctx->m;
+    MPVMainDecContext *const s = &ctx->m;
     int mb_num = 0;
     static const int8_t quant_tab[4] = { -1, -2, 1, 2 };
 
@@ -947,7 +947,7 @@  try_again:
  * decode second partition.
  * @return <0 if an error occurred
  */
-static int mpeg4_decode_partition_b(MpegEncContext *s, int mb_count)
+static int mpeg4_decode_partition_b(MPVMainDecContext *s, int mb_count)
 {
     int mb_num = 0;
     static const int8_t quant_tab[4] = { -1, -2, 1, 2 };
@@ -1042,7 +1042,7 @@  static int mpeg4_decode_partition_b(MpegEncContext *s, int mb_count)
  */
 int ff_mpeg4_decode_partitions(Mpeg4DecContext *ctx)
 {
-    MpegEncContext *s = &ctx->m;
+    MPVMainDecContext *const s = &ctx->m;
     int mb_num;
     int ret;
     const int part_a_error = s->pict_type == AV_PICTURE_TYPE_I ? (ER_DC_ERROR | ER_MV_ERROR) : ER_MV_ERROR;
@@ -1109,7 +1109,7 @@  static inline int mpeg4_decode_block(Mpeg4DecContext *ctx, int16_t *block,
                                      int n, int coded, int intra,
                                      int use_intra_dc_vlc, int rvlc)
 {
-    MpegEncContext *s = &ctx->m;
+    MPVDecContext *const s = &ctx->m;
     int level, i, last, run, qmul, qadd;
     int av_uninit(dc_pred_dir);
     RLTable *rl;
@@ -1377,7 +1377,7 @@  not_coded:
  * decode partition C of one MB.
  * @return <0 if an error occurred
  */
-static int mpeg4_decode_partitioned_mb(MpegEncContext *s, int16_t block[6][64])
+static int mpeg4_decode_partitioned_mb(MPVDecContext *s, int16_t block[6][64])
 {
     Mpeg4DecContext *ctx = s->avctx->priv_data;
     int cbp, mb_type, use_intra_dc_vlc;
@@ -1465,7 +1465,7 @@  static int mpeg4_decode_partitioned_mb(MpegEncContext *s, int16_t block[6][64])
     }
 }
 
-static int mpeg4_decode_mb(MpegEncContext *s, int16_t block[6][64])
+static int mpeg4_decode_mb(MPVDecContext *s, int16_t block[6][64])
 {
     Mpeg4DecContext *ctx = s->avctx->priv_data;
     int cbpc, cbpy, i, cbp, pred_x, pred_y, mx, my, dquant;
@@ -1885,7 +1885,7 @@  static const uint8_t ac_state_tab[22][2] =
     {0, 11}
 };
 
-static int mpeg4_decode_studio_block(MpegEncContext *s, int32_t block[64], int n)
+static int mpeg4_decode_studio_block(MPVDecContext *s, int32_t block[64], int n)
 {
     Mpeg4DecContext *ctx = s->avctx->priv_data;
 
@@ -2000,7 +2000,7 @@  static int mpeg4_decode_studio_block(MpegEncContext *s, int32_t block[64], int n
     return 0;
 }
 
-static int mpeg4_decode_dpcm_macroblock(MpegEncContext *s, int16_t macroblock[256], int n)
+static int mpeg4_decode_dpcm_macroblock(MPVDecContext *s, int16_t macroblock[256], int n)
 {
     int i, j, w, h, idx = 0;
     int block_mean, rice_parameter, rice_prefix_code, rice_suffix_code,
@@ -2083,7 +2083,7 @@  static int mpeg4_decode_dpcm_macroblock(MpegEncContext *s, int16_t macroblock[25
     return 0;
 }
 
-static int mpeg4_decode_studio_mb(MpegEncContext *s, int16_t block_[12][64])
+static int mpeg4_decode_studio_mb(MPVDecContext *s, int16_t block_[12][64])
 {
     Mpeg4DecContext *const ctx = (Mpeg4DecContext*)s;
     int i;
@@ -2131,7 +2131,7 @@  static int mpeg4_decode_studio_mb(MpegEncContext *s, int16_t block_[12][64])
     return SLICE_OK;
 }
 
-static int mpeg4_decode_gop_header(MpegEncContext *s, GetBitContext *gb)
+static int mpeg4_decode_gop_header(MPVMainDecContext *s, GetBitContext *gb)
 {
     int hours, minutes, seconds;
 
@@ -2153,7 +2153,7 @@  static int mpeg4_decode_gop_header(MpegEncContext *s, GetBitContext *gb)
     return 0;
 }
 
-static int mpeg4_decode_profile_level(MpegEncContext *s, GetBitContext *gb, int *profile, int *level)
+static int mpeg4_decode_profile_level(GetBitContext *gb, int *profile, int *level)
 {
 
     *profile = get_bits(gb, 4);
@@ -2167,7 +2167,7 @@  static int mpeg4_decode_profile_level(MpegEncContext *s, GetBitContext *gb, int
     return 0;
 }
 
-static int mpeg4_decode_visual_object(MpegEncContext *s, GetBitContext *gb)
+static int mpeg4_decode_visual_object(MPVMainDecContext *s, GetBitContext *gb)
 {
     int visual_object_type;
     int is_visual_object_identifier = get_bits1(gb);
@@ -2199,7 +2199,7 @@  static int mpeg4_decode_visual_object(MpegEncContext *s, GetBitContext *gb)
     return 0;
 }
 
-static void mpeg4_load_default_matrices(MpegEncContext *s)
+static void mpeg4_load_default_matrices(MPVMainDecContext *s)
 {
     int i, v;
 
@@ -2216,7 +2216,7 @@  static void mpeg4_load_default_matrices(MpegEncContext *s)
     }
 }
 
-static int read_quant_matrix_ext(MpegEncContext *s, GetBitContext *gb)
+static int read_quant_matrix_ext(MPVMainDecContext *s, GetBitContext *gb)
 {
     int i, j, v;
 
@@ -2265,7 +2265,7 @@  static int read_quant_matrix_ext(MpegEncContext *s, GetBitContext *gb)
     return 0;
 }
 
-static void extension_and_user_data(MpegEncContext *s, GetBitContext *gb, int id)
+static void extension_and_user_data(MPVMainDecContext *s, GetBitContext *gb, int id)
 {
     uint32_t startcode;
     uint8_t extension_type;
@@ -2284,7 +2284,7 @@  static void extension_and_user_data(MpegEncContext *s, GetBitContext *gb, int id
 
 static int decode_studio_vol_header(Mpeg4DecContext *ctx, GetBitContext *gb)
 {
-    MpegEncContext *s = &ctx->m;
+    MPVMainDecContext *const s = &ctx->m;
     int width, height, aspect_ratio_info;
     int bits_per_raw_sample;
     int rgb, chroma_format;
@@ -2370,7 +2370,7 @@  static int decode_studio_vol_header(Mpeg4DecContext *ctx, GetBitContext *gb)
 
 static int decode_vol_header(Mpeg4DecContext *ctx, GetBitContext *gb)
 {
-    MpegEncContext *s = &ctx->m;
+    MPVMainDecContext *const s = &ctx->m;
     int width, height, vo_ver_id, aspect_ratio_info;
 
     /* vol header */
@@ -2745,7 +2745,7 @@  no_cplx_est:
  */
 static int decode_user_data(Mpeg4DecContext *ctx, GetBitContext *gb)
 {
-    MpegEncContext *s = &ctx->m;
+    MPVMainDecContext *const s = &ctx->m;
     char buf[256];
     int i;
     int e;
@@ -2803,7 +2803,7 @@  static int decode_user_data(Mpeg4DecContext *ctx, GetBitContext *gb)
 int ff_mpeg4_workaround_bugs(AVCodecContext *avctx)
 {
     Mpeg4DecContext *ctx = avctx->priv_data;
-    MpegEncContext *s = &ctx->m;
+    MPVMainDecContext *const s = &ctx->m;
 
     if (ctx->xvid_build == -1 && ctx->divx_version == -1 && ctx->lavc_build == -1) {
         if (s->codec_tag        == AV_RL32("XVID") ||
@@ -2921,7 +2921,7 @@  int ff_mpeg4_workaround_bugs(AVCodecContext *avctx)
 static int decode_vop_header(Mpeg4DecContext *ctx, GetBitContext *gb,
                              int parse_only)
 {
-    MpegEncContext *s = &ctx->m;
+    MPVMainDecContext *const s = &ctx->m;
     int time_incr, time_increment;
     int64_t pts;
 
@@ -3203,7 +3203,7 @@  end:
 
 static void decode_smpte_tc(Mpeg4DecContext *ctx, GetBitContext *gb)
 {
-    MpegEncContext *s = &ctx->m;
+    MPVMainDecContext *const s = &ctx->m;
 
     skip_bits(gb, 16); /* Time_code[63..48] */
     check_marker(s->avctx, gb, "after Time_code[63..48]");
@@ -3222,7 +3222,7 @@  static void decode_smpte_tc(Mpeg4DecContext *ctx, GetBitContext *gb)
  */
 static int decode_studio_vop_header(Mpeg4DecContext *ctx, GetBitContext *gb)
 {
-    MpegEncContext *s = &ctx->m;
+    MPVMainDecContext *const s = &ctx->m;
 
     if (get_bits_left(gb) <= 32)
         return 0;
@@ -3277,7 +3277,7 @@  static int decode_studio_vop_header(Mpeg4DecContext *ctx, GetBitContext *gb)
 
 static int decode_studiovisualobject(Mpeg4DecContext *ctx, GetBitContext *gb)
 {
-    MpegEncContext *s = &ctx->m;
+    MPVMainDecContext *const s = &ctx->m;
     int visual_object_type;
 
     skip_bits(gb, 4); /* visual_object_verid */
@@ -3306,7 +3306,7 @@  static int decode_studiovisualobject(Mpeg4DecContext *ctx, GetBitContext *gb)
 int ff_mpeg4_decode_picture_header(Mpeg4DecContext *ctx, GetBitContext *gb,
                                    int header, int parse_only)
 {
-    MpegEncContext *s = &ctx->m;
+    MPVMainDecContext *const s = &ctx->m;
     unsigned startcode, v;
     int ret;
     int vol = 0;
@@ -3420,7 +3420,7 @@  int ff_mpeg4_decode_picture_header(Mpeg4DecContext *ctx, GetBitContext *gb,
             mpeg4_decode_gop_header(s, gb);
         } else if (startcode == VOS_STARTCODE) {
             int profile, level;
-            mpeg4_decode_profile_level(s, gb, &profile, &level);
+            mpeg4_decode_profile_level(gb, &profile, &level);
             if (profile == FF_PROFILE_MPEG4_SIMPLE_STUDIO &&
                 (level > 0 && level < 9)) {
                 s->studio_profile = 1;
@@ -3464,7 +3464,7 @@  end:
 int ff_mpeg4_frame_end(AVCodecContext *avctx, const uint8_t *buf, int buf_size)
 {
     Mpeg4DecContext *ctx = avctx->priv_data;
-    MpegEncContext    *s = &ctx->m;
+    MPVMainDecContext *const s = &ctx->m;
 
     /* divx 5.01+ bitstream reorder stuff */
     /* Since this clobbers the input buffer and hwaccel codecs still need the
@@ -3560,8 +3560,8 @@  static int mpeg4_update_thread_context(AVCodecContext *dst,
 static int mpeg4_update_thread_context_for_user(AVCodecContext *dst,
                                                 const AVCodecContext *src)
 {
-    MpegEncContext *m = dst->priv_data;
-    const MpegEncContext *m1 = src->priv_data;
+    MPVMainDecContext *const m = dst->priv_data;
+    const MPVMainDecContext *const m1 = src->priv_data;
 
     m->quarter_sample = m1->quarter_sample;
     m->divx_packed    = m1->divx_packed;
@@ -3621,7 +3621,7 @@  static av_cold int decode_init(AVCodecContext *avctx)
 {
     static AVOnce init_static_once = AV_ONCE_INIT;
     Mpeg4DecContext *ctx = avctx->priv_data;
-    MpegEncContext *s = &ctx->m;
+    MPVMainDecContext *const s = &ctx->m;
     int ret;
 
     ctx->divx_version =
@@ -3644,7 +3644,7 @@  static av_cold int decode_init(AVCodecContext *avctx)
     return 0;
 }
 
-#define OFFSET(x) offsetof(MpegEncContext, x)
+#define OFFSET(x) offsetof(MPVMainDecContext, x)
 #define FLAGS AV_OPT_FLAG_EXPORT | AV_OPT_FLAG_READONLY
 static const AVOption mpeg4_options[] = {
     {"quarter_sample", "1/4 subpel MC", OFFSET(quarter_sample), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS},
diff --git a/libavcodec/mpeg4videoenc.c b/libavcodec/mpeg4videoenc.c
index d09e20605d..cd7ce1f382 100644
--- a/libavcodec/mpeg4videoenc.c
+++ b/libavcodec/mpeg4videoenc.c
@@ -66,7 +66,7 @@  static uint8_t  uni_mpeg4_inter_rl_len[64 * 64 * 2 * 2];
  * Return the number of bits that encoding the 8x8 block in block would need.
  * @param[in]  block_last_index last index in scantable order that refers to a non zero element in block.
  */
-static inline int get_block_rate(MpegEncContext *s, int16_t block[64],
+static inline int get_block_rate(MPVEncContext *s, int16_t block[64],
                                  int block_last_index, uint8_t scantable[64])
 {
     int last = 0;
@@ -101,7 +101,7 @@  static inline int get_block_rate(MpegEncContext *s, int16_t block[64],
  * @param[out] st scantable for each 8x8 block
  * @param[in] zigzag_last_index index referring to the last non zero coefficient in zigzag order
  */
-static inline void restore_ac_coeffs(MpegEncContext *s, int16_t block[6][64],
+static inline void restore_ac_coeffs(MPVEncContext *s, int16_t block[6][64],
                                      const int dir[6], uint8_t *st[6],
                                      const int zigzag_last_index[6])
 {
@@ -132,7 +132,7 @@  static inline void restore_ac_coeffs(MpegEncContext *s, int16_t block[6][64],
  * @param[out] st scantable for each 8x8 block
  * @param[out] zigzag_last_index index referring to the last non zero coefficient in zigzag order
  */
-static inline int decide_ac_pred(MpegEncContext *s, int16_t block[6][64],
+static inline int decide_ac_pred(MPVEncContext *s, int16_t block[6][64],
                                  const int dir[6], uint8_t *st[6],
                                  int zigzag_last_index[6])
 {
@@ -215,7 +215,7 @@  static inline int decide_ac_pred(MpegEncContext *s, int16_t block[6][64],
 /**
  * modify mb_type & qscale so that encoding is actually possible in MPEG-4
  */
-void ff_clean_mpeg4_qscales(MpegEncContext *s)
+void ff_clean_mpeg4_qscales(MPVMainEncContext *s)
 {
     int i;
     int8_t *const qscale_table = s->current_picture.qscale_table;
@@ -284,7 +284,7 @@  static inline int mpeg4_get_dc_length(int level, int n)
  * Encode an 8x8 block.
  * @param n block index (0-3 are luma, 4-5 are chroma)
  */
-static inline void mpeg4_encode_block(MpegEncContext *s,
+static inline void mpeg4_encode_block(MPVEncContext *s,
                                       int16_t *block, int n, int intra_dc,
                                       uint8_t *scan_table, PutBitContext *dc_pb,
                                       PutBitContext *ac_pb)
@@ -345,7 +345,7 @@  static inline void mpeg4_encode_block(MpegEncContext *s,
     }
 }
 
-static int mpeg4_get_block_length(MpegEncContext *s,
+static int mpeg4_get_block_length(MPVEncContext *s,
                                   int16_t *block, int n,
                                   int intra_dc, uint8_t *scan_table)
 {
@@ -399,7 +399,7 @@  static int mpeg4_get_block_length(MpegEncContext *s,
     return len;
 }
 
-static inline void mpeg4_encode_blocks(MpegEncContext *s, int16_t block[6][64],
+static inline void mpeg4_encode_blocks(MPVEncContext *s, int16_t block[6][64],
                                        int intra_dc[6], uint8_t **scan_table,
                                        PutBitContext *dc_pb,
                                        PutBitContext *ac_pb)
@@ -433,7 +433,7 @@  static inline void mpeg4_encode_blocks(MpegEncContext *s, int16_t block[6][64],
     }
 }
 
-static inline int get_b_cbp(MpegEncContext *s, int16_t block[6][64],
+static inline int get_b_cbp(MPVEncContext *s, int16_t block[6][64],
                             int motion_x, int motion_y, int mb_type)
 {
     int cbp = 0, i;
@@ -477,7 +477,7 @@  static inline int get_b_cbp(MpegEncContext *s, int16_t block[6][64],
 // FIXME this is duplicated to h263.c
 static const int dquant_code[5] = { 1, 0, 9, 2, 3 };
 
-void ff_mpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64],
+void ff_mpeg4_encode_mb(MPVEncContext *s, int16_t block[6][64],
                         int motion_x, int motion_y)
 {
     int cbpc, cbpy, pred_x, pred_y;
@@ -875,7 +875,7 @@  void ff_mpeg4_stuffing(PutBitContext *pbc)
 }
 
 /* must be called before writing the header */
-void ff_set_mpeg4_time(MpegEncContext *s)
+void ff_set_mpeg4_time(MPVMainEncContext *s)
 {
     if (s->pict_type == AV_PICTURE_TYPE_B) {
         ff_mpeg4_init_direct_mv(s);
@@ -885,7 +885,7 @@  void ff_set_mpeg4_time(MpegEncContext *s)
     }
 }
 
-static void mpeg4_encode_gop_header(MpegEncContext *s)
+static void mpeg4_encode_gop_header(MPVMainEncContext *s)
 {
     int64_t hours, minutes, seconds;
     int64_t time;
@@ -915,7 +915,7 @@  static void mpeg4_encode_gop_header(MpegEncContext *s)
     ff_mpeg4_stuffing(&s->pb);
 }
 
-static void mpeg4_encode_visual_object_header(MpegEncContext *s)
+static void mpeg4_encode_visual_object_header(MPVMainEncContext *s)
 {
     int profile_and_level_indication;
     int vo_ver_id;
@@ -959,7 +959,7 @@  static void mpeg4_encode_visual_object_header(MpegEncContext *s)
     ff_mpeg4_stuffing(&s->pb);
 }
 
-static void mpeg4_encode_vol_header(MpegEncContext *s,
+static void mpeg4_encode_vol_header(MPVMainEncContext *s,
                                     int vo_number,
                                     int vol_number)
 {
@@ -1060,7 +1060,7 @@  static void mpeg4_encode_vol_header(MpegEncContext *s,
 }
 
 /* write MPEG-4 VOP header */
-int ff_mpeg4_encode_picture_header(MpegEncContext *s, int picture_number)
+int ff_mpeg4_encode_picture_header(MPVMainEncContext *s, int picture_number)
 {
     uint64_t time_incr;
     int64_t time_div, time_mod;
@@ -1283,7 +1283,7 @@  static av_cold void mpeg4_encode_init_static(void)
 static av_cold int encode_init(AVCodecContext *avctx)
 {
     static AVOnce init_static_once = AV_ONCE_INIT;
-    MpegEncContext *s = avctx->priv_data;
+    MPVMainEncContext *const s = avctx->priv_data;
     int ret;
 
     if (avctx->width >= (1<<13) || avctx->height >= (1<<13)) {
@@ -1324,7 +1324,7 @@  static av_cold int encode_init(AVCodecContext *avctx)
     return 0;
 }
 
-void ff_mpeg4_init_partitions(MpegEncContext *s)
+void ff_mpeg4_init_partitions(MPVEncContext *s)
 {
     uint8_t *start = put_bits_ptr(&s->pb);
     uint8_t *end   = s->pb.buf_end;
@@ -1337,7 +1337,7 @@  void ff_mpeg4_init_partitions(MpegEncContext *s)
     init_put_bits(&s->pb2, start + pb_size + tex_size, pb_size);
 }
 
-void ff_mpeg4_merge_partitions(MpegEncContext *s)
+void ff_mpeg4_merge_partitions(MPVEncContext *s)
 {
     const int pb2_len    = put_bits_count(&s->pb2);
     const int tex_pb_len = put_bits_count(&s->tex_pb);
@@ -1363,7 +1363,7 @@  void ff_mpeg4_merge_partitions(MpegEncContext *s)
     s->last_bits = put_bits_count(&s->pb);
 }
 
-void ff_mpeg4_encode_video_packet_header(MpegEncContext *s)
+void ff_mpeg4_encode_video_packet_header(MPVEncContext *s)
 {
     int mb_num_bits = av_log2(s->mb_num - 1) + 1;
 
@@ -1375,7 +1375,7 @@  void ff_mpeg4_encode_video_packet_header(MpegEncContext *s)
     put_bits(&s->pb, 1, 0); /* no HEC */
 }
 
-#define OFFSET(x) offsetof(MpegEncContext, x)
+#define OFFSET(x) offsetof(MPVMainEncContext, x)
 #define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
 static const AVOption options[] = {
     { "data_partitioning", "Use data partitioning.",      OFFSET(data_partitioning), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE },
@@ -1404,7 +1404,7 @@  const AVCodec ff_mpeg4_encoder = {
     .long_name      = NULL_IF_CONFIG_SMALL("MPEG-4 part 2"),
     .type           = AVMEDIA_TYPE_VIDEO,
     .id             = AV_CODEC_ID_MPEG4,
-    .priv_data_size = sizeof(MpegEncContext),
+    .priv_data_size = sizeof(MPVMainEncContext),
     .init           = encode_init,
     .encode2        = ff_mpv_encode_picture,
     .close          = ff_mpv_encode_end,
diff --git a/libavcodec/mpeg_er.c b/libavcodec/mpeg_er.c
index f54cb8548b..bdbcea7ccd 100644
--- a/libavcodec/mpeg_er.c
+++ b/libavcodec/mpeg_er.c
@@ -43,7 +43,7 @@  static void set_erpic(ERPicture *dst, Picture *src)
     dst->field_picture = src->field_picture;
 }
 
-void ff_mpeg_er_frame_start(MpegEncContext *s)
+void ff_mpeg_er_frame_start(MPVMainDecContext *s)
 {
     ERContext *er = &s->er;
 
@@ -63,7 +63,7 @@  static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
                               int (*mv)[2][4][2], int mb_x, int mb_y,
                               int mb_intra, int mb_skipped)
 {
-    MpegEncContext *s = opaque;
+    MPVDecContext *const s = opaque;
 
     s->mv_dir     = mv_dir;
     s->mv_type    = mv_type;
@@ -97,7 +97,7 @@  static void mpeg_er_decode_mb(void *opaque, int ref, int mv_dir, int mv_type,
     ff_mpv_reconstruct_mb(s, s->block);
 }
 
-int ff_mpeg_er_init(MpegEncContext *s)
+int ff_mpeg_er_init(MPVDecContext *s)
 {
     ERContext *er = &s->er;
     int mb_array_size = s->mb_height * s->mb_stride;
diff --git a/libavcodec/mpeg_er.h b/libavcodec/mpeg_er.h
index bb627a4d06..b3c284c9a3 100644
--- a/libavcodec/mpeg_er.h
+++ b/libavcodec/mpeg_er.h
@@ -21,7 +21,7 @@ 
 
 #include "mpegvideo.h"
 
-int ff_mpeg_er_init(MpegEncContext *s);
-void ff_mpeg_er_frame_start(MpegEncContext *s);
+int ff_mpeg_er_init(MPVDecContext *s);
+void ff_mpeg_er_frame_start(MPVMainDecContext *s);
 
 #endif /* AVCODEC_MPEG_ER_H */
diff --git a/libavcodec/mpegvideo.c b/libavcodec/mpegvideo.c
index bbcf00b014..500f8af19d 100644
--- a/libavcodec/mpegvideo.c
+++ b/libavcodec/mpegvideo.c
@@ -47,7 +47,7 @@ 
 #include "wmv2.h"
 #include <limits.h>
 
-static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
+static void dct_unquantize_mpeg1_intra_c(MPVContext *s,
                                    int16_t *block, int n, int qscale)
 {
     int i, level, nCoeffs;
@@ -76,7 +76,7 @@  static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
     }
 }
 
-static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
+static void dct_unquantize_mpeg1_inter_c(MPVContext *s,
                                    int16_t *block, int n, int qscale)
 {
     int i, level, nCoeffs;
@@ -105,7 +105,7 @@  static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s,
     }
 }
 
-static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
+static void dct_unquantize_mpeg2_intra_c(MPVContext *s,
                                    int16_t *block, int n, int qscale)
 {
     int i, level, nCoeffs;
@@ -135,7 +135,7 @@  static void dct_unquantize_mpeg2_intra_c(MpegEncContext *s,
     }
 }
 
-static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
+static void dct_unquantize_mpeg2_intra_bitexact(MPVContext *s,
                                    int16_t *block, int n, int qscale)
 {
     int i, level, nCoeffs;
@@ -169,7 +169,7 @@  static void dct_unquantize_mpeg2_intra_bitexact(MpegEncContext *s,
     block[63]^=sum&1;
 }
 
-static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
+static void dct_unquantize_mpeg2_inter_c(MPVContext *s,
                                    int16_t *block, int n, int qscale)
 {
     int i, level, nCoeffs;
@@ -203,7 +203,7 @@  static void dct_unquantize_mpeg2_inter_c(MpegEncContext *s,
     block[63]^=sum&1;
 }
 
-static void dct_unquantize_h263_intra_c(MpegEncContext *s,
+static void dct_unquantize_h263_intra_c(MPVContext *s,
                                   int16_t *block, int n, int qscale)
 {
     int i, level, qmul, qadd;
@@ -237,7 +237,7 @@  static void dct_unquantize_h263_intra_c(MpegEncContext *s,
     }
 }
 
-static void dct_unquantize_h263_inter_c(MpegEncContext *s,
+static void dct_unquantize_h263_inter_c(MPVContext *s,
                                   int16_t *block, int n, int qscale)
 {
     int i, level, qmul, qadd;
@@ -277,7 +277,7 @@  static void gray8(uint8_t *dst, const uint8_t *src, ptrdiff_t linesize, int h)
 }
 
 /* init common dct for both encoder and decoder */
-static av_cold int dct_init(MpegEncContext *s)
+static av_cold int dct_init(MPVMainContext *s)
 {
     ff_blockdsp_init(&s->bdsp, s->avctx);
     ff_h264chroma_init(&s->h264chroma, 8); //for lowres
@@ -324,7 +324,7 @@  static av_cold int dct_init(MpegEncContext *s)
     return 0;
 }
 
-av_cold void ff_mpv_idct_init(MpegEncContext *s)
+av_cold void ff_mpv_idct_init(MPVMainContext *s)
 {
     if (s->codec_id == AV_CODEC_ID_MPEG4)
         s->idsp.mpeg4_studio_profile = s->studio_profile;
@@ -344,7 +344,7 @@  av_cold void ff_mpv_idct_init(MpegEncContext *s)
     ff_init_scantable(s->idsp.idct_permutation, &s->intra_v_scantable, ff_alternate_vertical_scan);
 }
 
-static int init_duplicate_context(MpegEncContext *s)
+static int init_duplicate_context(MPVContext *s)
 {
     int y_size = s->b8_stride * (2 * s->mb_height + 1);
     int c_size = s->mb_stride * (s->mb_height + 1);
@@ -389,7 +389,7 @@  static int init_duplicate_context(MpegEncContext *s)
     return 0;
 }
 
-int ff_mpv_init_duplicate_contexts(MpegEncContext *s)
+int ff_mpv_init_duplicate_contexts(MPVMainContext *s)
 {
     int nb_slices = s->slice_context_count, ret;
 
@@ -397,7 +397,7 @@  int ff_mpv_init_duplicate_contexts(MpegEncContext *s)
      * fields allocated in init_duplicate_context are NULL after
      * copying. This prevents double-frees upon allocation error. */
     for (int i = 1; i < nb_slices; i++) {
-        s->thread_context[i] = av_memdup(s, sizeof(MpegEncContext));
+        s->thread_context[i] = av_memdup(s, sizeof(MPVContext));
         if (!s->thread_context[i])
             return AVERROR(ENOMEM);
         if ((ret = init_duplicate_context(s->thread_context[i])) < 0)
@@ -413,7 +413,7 @@  int ff_mpv_init_duplicate_contexts(MpegEncContext *s)
     return init_duplicate_context(s);
 }
 
-static void free_duplicate_context(MpegEncContext *s)
+static void free_duplicate_context(MPVContext *s)
 {
     if (!s)
         return;
@@ -433,7 +433,7 @@  static void free_duplicate_context(MpegEncContext *s)
     s->block = NULL;
 }
 
-static void free_duplicate_contexts(MpegEncContext *s)
+static void free_duplicate_contexts(MPVMainContext *s)
 {
     for (int i = 1; i < s->slice_context_count; i++) {
         free_duplicate_context(s->thread_context[i]);
@@ -442,7 +442,7 @@  static void free_duplicate_contexts(MpegEncContext *s)
     free_duplicate_context(s);
 }
 
-static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
+static void backup_duplicate_context(MPVContext *bak, MPVContext *src)
 {
 #define COPY(a) bak->a = src->a
     COPY(sc.edge_emu_buffer);
@@ -469,13 +469,13 @@  static void backup_duplicate_context(MpegEncContext *bak, MpegEncContext *src)
 #undef COPY
 }
 
-int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
+int ff_update_duplicate_context(MPVContext *dst, MPVContext *src)
 {
-    MpegEncContext bak;
+    MPVContext bak;
     int i, ret;
     // FIXME copy only needed parts
     backup_duplicate_context(&bak, dst);
-    memcpy(dst, src, sizeof(MpegEncContext));
+    memcpy(dst, src, sizeof(*dst));
     backup_duplicate_context(dst, &bak);
     for (i = 0; i < 12; i++) {
         dst->pblocks[i] = &dst->block[i];
@@ -495,12 +495,12 @@  int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src)
 }
 
 /**
- * Set the given MpegEncContext to common defaults
+ * Set the given MPVMainContext to common defaults
  * (same for encoding and decoding).
  * The changed fields will not depend upon the
- * prior state of the MpegEncContext.
+ * prior state of the MPVMainContext.
  */
-void ff_mpv_common_defaults(MpegEncContext *s)
+void ff_mpv_common_defaults(MPVMainContext *s)
 {
     s->y_dc_scale_table      =
     s->c_dc_scale_table      = ff_mpeg1_dc_scale_table;
@@ -518,7 +518,7 @@  void ff_mpv_common_defaults(MpegEncContext *s)
     s->slice_context_count   = 1;
 }
 
-int ff_mpv_init_context_frame(MpegEncContext *s)
+int ff_mpv_init_context_frame(MPVContext *s)
 {
     int y_size, c_size, yc_size, i, mb_array_size, mv_table_size, x, y;
 
@@ -652,7 +652,7 @@  int ff_mpv_init_context_frame(MpegEncContext *s)
     return !CONFIG_MPEGVIDEODEC || s->encoding ? 0 : ff_mpeg_er_init(s);
 }
 
-static void clear_context(MpegEncContext *s)
+static void clear_context(MPVMainContext *s)
 {
     int i, j, k;
 
@@ -731,7 +731,7 @@  static void clear_context(MpegEncContext *s)
  * init common structure for both encoder and decoder.
  * this assumes that some variables like width/height are already set
  */
-av_cold int ff_mpv_common_init(MpegEncContext *s)
+av_cold int ff_mpv_common_init(MPVMainContext *s)
 {
     int i, ret;
     int nb_slices = (HAVE_THREADS &&
@@ -818,7 +818,7 @@  av_cold int ff_mpv_common_init(MpegEncContext *s)
     return ret;
 }
 
-void ff_mpv_free_context_frame(MpegEncContext *s)
+void ff_mpv_free_context_frame(MPVMainContext *s)
 {
     int i, j, k;
 
@@ -872,7 +872,7 @@  void ff_mpv_free_context_frame(MpegEncContext *s)
 }
 
 /* init common structure for both encoder and decoder */
-void ff_mpv_common_end(MpegEncContext *s)
+void ff_mpv_common_end(MPVMainContext *s)
 {
     if (!s)
         return;
@@ -911,7 +911,7 @@  void ff_mpv_common_end(MpegEncContext *s)
 }
 
 
-static inline int hpel_motion_lowres(MpegEncContext *s,
+static inline int hpel_motion_lowres(MPVContext *s,
                                      uint8_t *dest, uint8_t *src,
                                      int field_based, int field_select,
                                      int src_x, int src_y,
@@ -958,7 +958,7 @@  static inline int hpel_motion_lowres(MpegEncContext *s,
 }
 
 /* apply one mpeg motion vector to the three components */
-static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
+static av_always_inline void mpeg_motion_lowres(MPVContext *s,
                                                 uint8_t *dest_y,
                                                 uint8_t *dest_cb,
                                                 uint8_t *dest_cr,
@@ -1097,7 +1097,7 @@  static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
     // FIXME h261 lowres loop filter
 }
 
-static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
+static inline void chroma_4mv_motion_lowres(MPVContext *s,
                                             uint8_t *dest_cb, uint8_t *dest_cr,
                                             uint8_t **ref_picture,
                                             h264_chroma_mc_func * pix_op,
@@ -1165,7 +1165,7 @@  static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
  * @param pix_op halfpel motion compensation function (average or put normally)
  * the motion vectors are taken from s->mv and the MV type from s->mv_type
  */
-static inline void MPV_motion_lowres(MpegEncContext *s,
+static inline void MPV_motion_lowres(MPVContext *s,
                                      uint8_t *dest_y, uint8_t *dest_cb,
                                      uint8_t *dest_cr,
                                      int dir, uint8_t **ref_picture,
@@ -1299,7 +1299,7 @@  static inline void MPV_motion_lowres(MpegEncContext *s,
 /**
  * find the lowest MB row referenced in the MVs
  */
-static int lowest_referenced_row(MpegEncContext *s, int dir)
+static int lowest_referenced_row(MPVContext *s, int dir)
 {
     int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
     int my, off, i, mvs;
@@ -1335,7 +1335,7 @@  unhandled:
 }
 
 /* put block[] to dest[] */
-static inline void put_dct(MpegEncContext *s,
+static inline void put_dct(MPVContext *s,
                            int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
 {
     s->dct_unquantize_intra(s, block, i, qscale);
@@ -1343,7 +1343,7 @@  static inline void put_dct(MpegEncContext *s,
 }
 
 /* add block[] to dest[] */
-static inline void add_dct(MpegEncContext *s,
+static inline void add_dct(MPVContext *s,
                            int16_t *block, int i, uint8_t *dest, int line_size)
 {
     if (s->block_last_index[i] >= 0) {
@@ -1351,7 +1351,7 @@  static inline void add_dct(MpegEncContext *s,
     }
 }
 
-static inline void add_dequant_dct(MpegEncContext *s,
+static inline void add_dequant_dct(MPVContext *s,
                            int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
 {
     if (s->block_last_index[i] >= 0) {
@@ -1364,7 +1364,7 @@  static inline void add_dequant_dct(MpegEncContext *s,
 /**
  * Clean dc, ac, coded_block for the current non-intra MB.
  */
-void ff_clean_intra_table_entries(MpegEncContext *s)
+void ff_clean_intra_table_entries(MPVContext *s)
 {
     int wrap = s->b8_stride;
     int xy = s->block_index[0];
@@ -1405,7 +1405,7 @@  void ff_clean_intra_table_entries(MpegEncContext *s)
    s->interlaced_dct : true if interlaced dct used (mpeg2)
  */
 static av_always_inline
-void mpv_reconstruct_mb_internal(MpegEncContext *s, int16_t block[12][64],
+void mpv_reconstruct_mb_internal(MPVContext *s, int16_t block[12][64],
                             int lowres_flag, int is_mpeg12)
 {
 #define IS_ENCODER(s) (CONFIG_MPEGVIDEOENC && !lowres_flag && (s)->encoding)
@@ -1642,7 +1642,7 @@  skip_idct:
     }
 }
 
-void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
+void ff_mpv_reconstruct_mb(MPVContext *s, int16_t block[12][64])
 {
     if (CONFIG_XVMC &&
         s->avctx->hwaccel && s->avctx->hwaccel->decode_mb) {
@@ -1672,7 +1672,8 @@  void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
     else                  mpv_reconstruct_mb_internal(s, block, 0, 0);
 }
 
-void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
+void ff_init_block_index(MPVContext *s) //FIXME maybe rename
+{
     const int linesize   = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
     const int uvlinesize = s->current_picture.f->linesize[1];
     const int width_of_mb = (4 + (s->avctx->bits_per_raw_sample > 8)) - s->avctx->lowres;
@@ -1708,7 +1709,7 @@  void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
 /**
  * set qscale and update qscale dependent variables.
  */
-void ff_set_qscale(MpegEncContext * s, int qscale)
+void ff_set_qscale(MPVContext *s, int qscale)
 {
     if (qscale < 1)
         qscale = 1;
diff --git a/libavcodec/mpegvideo.h b/libavcodec/mpegvideo.h
index 8f65f53c2a..1bad288ea5 100644
--- a/libavcodec/mpegvideo.h
+++ b/libavcodec/mpegvideo.h
@@ -60,9 +60,9 @@ 
 #define MAX_B_FRAMES 16
 
 /**
- * MpegEncContext.
+ * MPVContext.
  */
-typedef struct MpegEncContext {
+typedef struct MPVContext {
     AVClass *class;
 
     int y_dc_scale, c_dc_scale;
@@ -139,7 +139,7 @@  typedef struct MpegEncContext {
 
     int start_mb_y;            ///< start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
     int end_mb_y;              ///< end   mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)
-    struct MpegEncContext *thread_context[MAX_THREADS];
+    struct MPVContext *thread_context[MAX_THREADS];
     int slice_context_count;   ///< number of used thread_contexts
 
     /**
@@ -474,32 +474,32 @@  typedef struct MpegEncContext {
 
     int16_t (*block)[64]; ///< points to one of the following blocks
     int16_t (*blocks)[12][64]; // for HQ mode we need to keep the best block
-    int (*decode_mb)(struct MpegEncContext *s, int16_t block[12][64]); // used by some codecs to avoid a switch()
+    int (*decode_mb)(struct MPVContext *s, int16_t block[12][64]); // used by some codecs to avoid a switch()
 
 #define SLICE_OK         0
 #define SLICE_ERROR     -1
 #define SLICE_END       -2 ///<end marker found
 #define SLICE_NOEND     -3 ///<no end marker or error found but mb count exceeded
 
-    void (*dct_unquantize_mpeg1_intra)(struct MpegEncContext *s,
+    void (*dct_unquantize_mpeg1_intra)(struct MPVContext *s,
                            int16_t *block/*align 16*/, int n, int qscale);
-    void (*dct_unquantize_mpeg1_inter)(struct MpegEncContext *s,
+    void (*dct_unquantize_mpeg1_inter)(struct MPVContext *s,
                            int16_t *block/*align 16*/, int n, int qscale);
-    void (*dct_unquantize_mpeg2_intra)(struct MpegEncContext *s,
+    void (*dct_unquantize_mpeg2_intra)(struct MPVContext *s,
                            int16_t *block/*align 16*/, int n, int qscale);
-    void (*dct_unquantize_mpeg2_inter)(struct MpegEncContext *s,
+    void (*dct_unquantize_mpeg2_inter)(struct MPVContext *s,
                            int16_t *block/*align 16*/, int n, int qscale);
-    void (*dct_unquantize_h263_intra)(struct MpegEncContext *s,
+    void (*dct_unquantize_h263_intra)(struct MPVContext *s,
                            int16_t *block/*align 16*/, int n, int qscale);
-    void (*dct_unquantize_h263_inter)(struct MpegEncContext *s,
+    void (*dct_unquantize_h263_inter)(struct MPVContext *s,
                            int16_t *block/*align 16*/, int n, int qscale);
-    void (*dct_unquantize_intra)(struct MpegEncContext *s, // unquantizer to use (MPEG-4 can use both)
+    void (*dct_unquantize_intra)(struct MPVContext *s, // unquantizer to use (MPEG-4 can use both)
                            int16_t *block/*align 16*/, int n, int qscale);
-    void (*dct_unquantize_inter)(struct MpegEncContext *s, // unquantizer to use (MPEG-4 can use both)
+    void (*dct_unquantize_inter)(struct MPVContext *s, // unquantizer to use (MPEG-4 can use both)
                            int16_t *block/*align 16*/, int n, int qscale);
-    int (*dct_quantize)(struct MpegEncContext *s, int16_t *block/*align 16*/, int n, int qscale, int *overflow);
-    int (*fast_dct_quantize)(struct MpegEncContext *s, int16_t *block/*align 16*/, int n, int qscale, int *overflow);
-    void (*denoise_dct)(struct MpegEncContext *s, int16_t *block);
+    int (*dct_quantize)(struct MPVContext *s, int16_t *block/*align 16*/, int n, int qscale, int *overflow);
+    int (*fast_dct_quantize)(struct MPVContext *s, int16_t *block/*align 16*/, int n, int qscale, int *overflow);
+    void (*denoise_dct)(struct MPVContext *s, int16_t *block);
 
     int mpv_flags;      ///< flags set by private options
     int quantizer_noise_shaping;
@@ -549,79 +549,83 @@  typedef struct MpegEncContext {
 #if FF_API_MPEGVIDEO_OPTS || FF_API_MJPEG_PRED
     int dummy;               ///< used as target for deprecated options
 #endif
-} MpegEncContext;
+} MPVContext;
 
+typedef MPVContext MPVDecContext;
+typedef MPVContext MPVMainContext;
+typedef MPVContext MPVMainDecContext;
 
 /**
- * Set the given MpegEncContext to common defaults (same for encoding
+ * Set the given MPVContext to common defaults (same for encoding
  * and decoding).  The changed fields will not depend upon the prior
- * state of the MpegEncContext.
+ * state of the MPVContext.
  */
-void ff_mpv_common_defaults(MpegEncContext *s);
-
-int ff_mpv_common_init(MpegEncContext *s);
-void ff_mpv_common_init_arm(MpegEncContext *s);
-void ff_mpv_common_init_axp(MpegEncContext *s);
-void ff_mpv_common_init_neon(MpegEncContext *s);
-void ff_mpv_common_init_ppc(MpegEncContext *s);
-void ff_mpv_common_init_x86(MpegEncContext *s);
-void ff_mpv_common_init_mips(MpegEncContext *s);
+void ff_mpv_common_defaults(MPVMainContext *s);
+
+int ff_mpv_common_init(MPVMainContext *s);
+void ff_mpv_common_init_arm(MPVMainContext *s);
+void ff_mpv_common_init_axp(MPVMainContext *s);
+void ff_mpv_common_init_neon(MPVMainContext *s);
+void ff_mpv_common_init_ppc(MPVMainContext *s);
+void ff_mpv_common_init_x86(MPVMainContext *s);
+void ff_mpv_common_init_mips(MPVMainContext *s);
 /**
- * Initialize an MpegEncContext's thread contexts. Presumes that
+ * Initialize an MPVContext's thread contexts. Presumes that
  * slice_context_count is already set and that all the fields
  * that are freed/reset in free_duplicate_context() are NULL.
  */
-int ff_mpv_init_duplicate_contexts(MpegEncContext *s);
+int ff_mpv_init_duplicate_contexts(MPVContext *s);
 /**
- * Initialize and allocates MpegEncContext fields dependent on the resolution.
+ * Initialize and allocates MPVContext fields dependent on the resolution.
  */
-int ff_mpv_init_context_frame(MpegEncContext *s);
+int ff_mpv_init_context_frame(MPVContext *s);
 /**
- * Frees and resets MpegEncContext fields depending on the resolution
+ * Frees and resets MPVContext fields depending on the resolution
  * as well as the slice thread contexts.
  * Is used during resolution changes to avoid a full reinitialization of the
  * codec.
  */
-void ff_mpv_free_context_frame(MpegEncContext *s);
+void ff_mpv_free_context_frame(MPVContext *s);
 
-int ff_mpv_common_frame_size_change(MpegEncContext *s);
-void ff_mpv_common_end(MpegEncContext *s);
+int ff_mpv_common_frame_size_change(MPVContext *s);
+void ff_mpv_common_end(MPVMainContext *s);
 
 /**
- * Initialize the given MpegEncContext for decoding.
+ * Initialize the given MPVContext for decoding.
  * the changed fields will not depend upon
- * the prior state of the MpegEncContext.
+ * the prior state of the MPVContext.
  */
-void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx);
-void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64]);
-void ff_mpv_report_decode_progress(MpegEncContext *s);
+void ff_mpv_decode_init(MPVMainDecContext *s, AVCodecContext *avctx);
+void ff_mpv_reconstruct_mb(MPVDecContext *s, int16_t block[12][64]);
+void ff_mpv_report_decode_progress(MPVDecContext *s);
 
-int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx);
-void ff_mpv_frame_end(MpegEncContext *s);
+int ff_mpv_frame_start(MPVContext *s, AVCodecContext *avctx);
+void ff_mpv_frame_end(MPVContext *s);
 
-void ff_clean_intra_table_entries(MpegEncContext *s);
-void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h);
+void ff_clean_intra_table_entries(MPVContext *s);
+void ff_mpeg_draw_horiz_band(MPVContext *s, int y, int h);
 void ff_mpeg_flush(AVCodecContext *avctx);
 
-void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict);
+void ff_print_debug_info(MPVContext *s, Picture *p, AVFrame *pict);
 
-int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type);
+int ff_mpv_export_qp_table(MPVContext *s, AVFrame *f, Picture *p, int qp_type);
 
-int ff_update_duplicate_context(MpegEncContext *dst, MpegEncContext *src);
+int ff_update_duplicate_context(MPVMainDecContext *dst, MPVMainDecContext *src);
 int ff_mpeg_update_thread_context(AVCodecContext *dst, const AVCodecContext *src);
-void ff_set_qscale(MpegEncContext * s, int qscale);
+void ff_set_qscale(MPVContext * s, int qscale);
 
-void ff_mpv_idct_init(MpegEncContext *s);
-void ff_init_block_index(MpegEncContext *s);
+void ff_mpv_idct_init(MPVContext *s);
+void ff_init_block_index(MPVContext *s);
 
-void ff_mpv_motion(MpegEncContext *s,
+void ff_mpv_motion(MPVContext *s,
                    uint8_t *dest_y, uint8_t *dest_cb,
                    uint8_t *dest_cr, int dir,
                    uint8_t **ref_picture,
                    op_pixels_func (*pix_op)[4],
                    qpel_mc_func (*qpix_op)[16]);
 
-static inline void ff_update_block_index(MpegEncContext *s){
+static inline void ff_update_block_index(MPVContext *s)
+{
     const int bytes_per_pixel = 1 + (s->avctx->bits_per_raw_sample > 8);
     const int block_size= (8*bytes_per_pixel) >> s->avctx->lowres;
 
@@ -636,7 +640,7 @@  static inline void ff_update_block_index(MpegEncContext *s){
     s->dest[2]+= (2 >> s->chroma_x_shift) * block_size;
 }
 
-static inline int mpeg_get_qscale(MpegEncContext *s)
+static inline int mpeg_get_qscale(MPVContext *s)
 {
     int qscale = get_bits(&s->gb, 5);
     if (s->q_scale_type)
diff --git a/libavcodec/mpegvideo_dec.c b/libavcodec/mpegvideo_dec.c
index dc38f6b267..4be0c3ad8a 100644
--- a/libavcodec/mpegvideo_dec.c
+++ b/libavcodec/mpegvideo_dec.c
@@ -33,7 +33,7 @@ 
 #include "mpegvideo.h"
 #include "thread.h"
 
-void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
+void ff_mpv_decode_init(MPVMainDecContext *s, AVCodecContext *avctx)
 {
     ff_mpv_common_defaults(s);
 
@@ -50,8 +50,8 @@  void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
 int ff_mpeg_update_thread_context(AVCodecContext *dst,
                                   const AVCodecContext *src)
 {
-    MpegEncContext *const s1 = src->priv_data;
-    MpegEncContext *const s  = dst->priv_data;
+    MPVMainDecContext *const s1 = src->priv_data;
+    MPVMainDecContext *const s  = dst->priv_data;
     int ret;
 
     if (dst == src)
@@ -189,7 +189,7 @@  do {\
     return 0;
 }
 
-int ff_mpv_common_frame_size_change(MpegEncContext *s)
+int ff_mpv_common_frame_size_change(MPVMainDecContext *s)
 {
     int err = 0;
 
@@ -243,7 +243,7 @@  int ff_mpv_common_frame_size_change(MpegEncContext *s)
     return err;
 }
 
-static int alloc_picture(MpegEncContext *s, Picture *pic)
+static int alloc_picture(MPVMainDecContext *s, Picture *pic)
 {
     return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, 0, 0,
                             s->chroma_x_shift, s->chroma_y_shift, s->out_format,
@@ -271,7 +271,7 @@  static void gray_frame(AVFrame *frame)
  * generic function called after decoding
  * the header and before a frame is decoded.
  */
-int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
+int ff_mpv_frame_start(MPVMainDecContext *s, AVCodecContext *avctx)
 {
     Picture *pic;
     int idx, ret;
@@ -495,7 +495,7 @@  int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx)
 }
 
 /* called after a frame has been decoded. */
-void ff_mpv_frame_end(MpegEncContext *s)
+void ff_mpv_frame_end(MPVMainDecContext *s)
 {
     emms_c();
 
@@ -503,14 +503,14 @@  void ff_mpv_frame_end(MpegEncContext *s)
         ff_thread_report_progress(&s->current_picture_ptr->tf, INT_MAX, 0);
 }
 
-void ff_print_debug_info(MpegEncContext *s, Picture *p, AVFrame *pict)
+void ff_print_debug_info(MPVMainDecContext *s, Picture *p, AVFrame *pict)
 {
     ff_print_debug_info2(s->avctx, pict, s->mbskip_table, p->mb_type,
                          p->qscale_table, p->motion_val,
                          s->mb_width, s->mb_height, s->mb_stride, s->quarter_sample);
 }
 
-int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_type)
+int ff_mpv_export_qp_table(MPVMainDecContext *s, AVFrame *f, Picture *p, int qp_type)
 {
     AVVideoEncParams *par;
     int mult = (qp_type == FF_QSCALE_TYPE_MPEG1) ? 2 : 1;
@@ -540,7 +540,7 @@  int ff_mpv_export_qp_table(MpegEncContext *s, AVFrame *f, Picture *p, int qp_typ
     return 0;
 }
 
-void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
+void ff_mpeg_draw_horiz_band(MPVDecContext *s, int y, int h)
 {
     ff_draw_horiz_band(s->avctx, s->current_picture_ptr->f,
                        s->last_picture_ptr ? s->last_picture_ptr->f : NULL,
@@ -550,7 +550,7 @@  void ff_mpeg_draw_horiz_band(MpegEncContext *s, int y, int h)
 
 void ff_mpeg_flush(AVCodecContext *avctx)
 {
-    MpegEncContext *const s = avctx->priv_data;
+    MPVMainDecContext *const s = avctx->priv_data;
 
     if (!s->picture)
         return;
@@ -577,7 +577,7 @@  void ff_mpeg_flush(AVCodecContext *avctx)
     s->pp_time = 0;
 }
 
-void ff_mpv_report_decode_progress(MpegEncContext *s)
+void ff_mpv_report_decode_progress(MPVDecContext *s)
 {
     if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
         ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
diff --git a/libavcodec/mpegvideo_enc.c b/libavcodec/mpegvideo_enc.c
index a76b4b9f60..02efc9c2e9 100644
--- a/libavcodec/mpegvideo_enc.c
+++ b/libavcodec/mpegvideo_enc.c
@@ -80,11 +80,11 @@ 
 #define QMAT_SHIFT_MMX 16
 #define QMAT_SHIFT 21
 
-static int encode_picture(MpegEncContext *s, int picture_number);
-static int dct_quantize_refine(MpegEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
-static int sse_mb(MpegEncContext *s);
-static void denoise_dct_c(MpegEncContext *s, int16_t *block);
-static int dct_quantize_trellis_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
+static int encode_picture(MPVMainEncContext *s, int picture_number);
+static int dct_quantize_refine(MPVEncContext *s, int16_t *block, int16_t *weight, int16_t *orig, int n, int qscale);
+static int sse_mb(MPVEncContext *s);
+static void denoise_dct_c(MPVEncContext *s, int16_t *block);
+static int dct_quantize_trellis_c(MPVEncContext *s, int16_t *block, int n, int qscale, int *overflow);
 
 static uint8_t default_mv_penalty[MAX_FCODE + 1][MAX_DMV * 2 + 1];
 static uint8_t default_fcode_tab[MAX_MV * 2 + 1];
@@ -107,7 +107,7 @@  const AVClass ff_mpv_enc_class = {
     .version    = LIBAVUTIL_VERSION_INT,
 };
 
-void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
+void ff_convert_matrix(MPVEncContext *s, int (*qmat)[64],
                        uint16_t (*qmat16)[2][64],
                        const uint16_t *quant_matrix,
                        int bias, int qmin, int qmax, int intra)
@@ -191,7 +191,7 @@  void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64],
     }
 }
 
-static inline void update_qscale(MpegEncContext *s)
+static inline void update_qscale(MPVEncContext *s)
 {
     if (s->q_scale_type == 1 && 0) {
         int i;
@@ -235,7 +235,7 @@  void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix)
 /**
  * init s->current_picture.qscale_table from s->lambda_table
  */
-void ff_init_qscale_tab(MpegEncContext *s)
+void ff_init_qscale_tab(MPVMainEncContext *s)
 {
     int8_t * const qscale_table = s->current_picture.qscale_table;
     int i;
@@ -248,8 +248,8 @@  void ff_init_qscale_tab(MpegEncContext *s)
     }
 }
 
-static void update_duplicate_context_after_me(MpegEncContext *dst,
-                                              MpegEncContext *src)
+static void update_duplicate_context_after_me(MPVEncContext *dst,
+                                              MPVEncContext *src)
 {
 #define COPY(a) dst->a= src->a
     COPY(pict_type);
@@ -273,10 +273,10 @@  static void mpv_encode_init_static(void)
 }
 
 /**
- * Set the given MpegEncContext to defaults for encoding.
- * the changed fields will not depend upon the prior state of the MpegEncContext.
+ * Set the given MPVMainEncContext to defaults for encoding.
+ * the changed fields will not depend upon the prior state of the MPVMainEncContext.
  */
-static void mpv_encode_defaults(MpegEncContext *s)
+static void mpv_encode_defaults(MPVMainEncContext *s)
 {
     static AVOnce init_static_once = AV_ONCE_INIT;
 
@@ -291,7 +291,7 @@  static void mpv_encode_defaults(MpegEncContext *s)
     s->picture_in_gop_number = 0;
 }
 
-av_cold int ff_dct_encode_init(MpegEncContext *s)
+av_cold int ff_dct_encode_init(MPVEncContext *s)
 {
     if (ARCH_X86)
         ff_dct_encode_init_x86(s);
@@ -312,7 +312,7 @@  av_cold int ff_dct_encode_init(MpegEncContext *s)
 /* init video encoder */
 av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
 {
-    MpegEncContext *s = avctx->priv_data;
+    MPVMainEncContext *const s = avctx->priv_data;
     AVCPBProperties *cpb_props;
     int i, ret;
 
@@ -927,7 +927,7 @@  av_cold int ff_mpv_encode_init(AVCodecContext *avctx)
 
 av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
 {
-    MpegEncContext *s = avctx->priv_data;
+    MPVMainEncContext *const s = avctx->priv_data;
     int i;
 
     ff_rate_control_uninit(s);
@@ -970,7 +970,7 @@  static int get_sae(uint8_t *src, int ref, int stride)
     return acc;
 }
 
-static int get_intra_count(MpegEncContext *s, uint8_t *src,
+static int get_intra_count(MPVMainEncContext *s, uint8_t *src,
                            uint8_t *ref, int stride)
 {
     int x, y, w, h;
@@ -993,7 +993,7 @@  static int get_intra_count(MpegEncContext *s, uint8_t *src,
     return acc;
 }
 
-static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
+static int alloc_picture(MPVMainEncContext *s, Picture *pic, int shared)
 {
     return ff_alloc_picture(s->avctx, pic, &s->me, &s->sc, shared, 1,
                             s->chroma_x_shift, s->chroma_y_shift, s->out_format,
@@ -1001,7 +1001,7 @@  static int alloc_picture(MpegEncContext *s, Picture *pic, int shared)
                             &s->linesize, &s->uvlinesize);
 }
 
-static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
+static int load_input_picture(MPVMainEncContext *s, const AVFrame *pic_arg)
 {
     Picture *pic = NULL;
     int64_t pts;
@@ -1152,7 +1152,7 @@  static int load_input_picture(MpegEncContext *s, const AVFrame *pic_arg)
     return 0;
 }
 
-static int skip_check(MpegEncContext *s, Picture *p, Picture *ref)
+static int skip_check(MPVMainEncContext *s, Picture *p, Picture *ref)
 {
     int x, y, plane;
     int score = 0;
@@ -1214,7 +1214,7 @@  static int encode_frame(AVCodecContext *c, AVFrame *frame, AVPacket *pkt)
     return size;
 }
 
-static int estimate_best_b_count(MpegEncContext *s)
+static int estimate_best_b_count(MPVMainEncContext *s)
 {
     const AVCodec *codec = avcodec_find_encoder(s->avctx->codec_id);
     AVPacket *pkt;
@@ -1361,7 +1361,7 @@  fail:
     return best_b_count;
 }
 
-static int select_input_picture(MpegEncContext *s)
+static int select_input_picture(MPVMainEncContext *s)
 {
     int i, ret;
 
@@ -1538,7 +1538,7 @@  no_output_pic:
     return 0;
 }
 
-static void frame_end(MpegEncContext *s)
+static void frame_end(MPVMainEncContext *s)
 {
     if (s->unrestricted_mv &&
         s->current_picture.reference &&
@@ -1575,7 +1575,7 @@  static void frame_end(MpegEncContext *s)
         s->last_non_b_pict_type = s->pict_type;
 }
 
-static void update_noise_reduction(MpegEncContext *s)
+static void update_noise_reduction(MPVMainEncContext *s)
 {
     int intra, i;
 
@@ -1596,7 +1596,7 @@  static void update_noise_reduction(MpegEncContext *s)
     }
 }
 
-static int frame_start(MpegEncContext *s)
+static int frame_start(MPVMainEncContext *s)
 {
     int ret;
 
@@ -1670,7 +1670,7 @@  static int frame_start(MpegEncContext *s)
 int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
                           const AVFrame *pic_arg, int *got_packet)
 {
-    MpegEncContext *s = avctx->priv_data;
+    MPVMainEncContext *const s = avctx->priv_data;
     int i, stuffing_count, ret;
     int context_count = s->slice_context_count;
 
@@ -1903,7 +1903,7 @@  vbv_retry:
     return 0;
 }
 
-static inline void dct_single_coeff_elimination(MpegEncContext *s,
+static inline void dct_single_coeff_elimination(MPVEncContext *s,
                                                 int n, int threshold)
 {
     static const char tab[64] = {
@@ -1959,7 +1959,7 @@  static inline void dct_single_coeff_elimination(MpegEncContext *s,
         s->block_last_index[n] = -1;
 }
 
-static inline void clip_coeffs(MpegEncContext *s, int16_t *block,
+static inline void clip_coeffs(MPVEncContext *s, int16_t *block,
                                int last_index)
 {
     int i;
@@ -2017,7 +2017,7 @@  static void get_visual_weight(int16_t *weight, uint8_t *ptr, int stride)
     }
 }
 
-static av_always_inline void encode_mb_internal(MpegEncContext *s,
+static av_always_inline void encode_mb_internal(MPVEncContext *s,
                                                 int motion_x, int motion_y,
                                                 int mb_block_height,
                                                 int mb_block_width,
@@ -2409,7 +2409,7 @@  static av_always_inline void encode_mb_internal(MpegEncContext *s,
     }
 }
 
-static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int motion_y)
+static av_always_inline void encode_mb(MPVEncContext *s, int motion_x, int motion_y)
 {
     if (s->chroma_format == CHROMA_420)
         encode_mb_internal(s, motion_x, motion_y,  8, 8, 6, 1, 1, CHROMA_420);
@@ -2419,7 +2419,9 @@  static av_always_inline void encode_mb(MpegEncContext *s, int motion_x, int moti
         encode_mb_internal(s, motion_x, motion_y, 16, 16, 12, 0, 0, CHROMA_444);
 }
 
-static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext *s, int type){
+static inline void copy_context_before_encode(MPVEncContext *d,
+                                              const MPVEncContext *s, int type)
+{
     int i;
 
     memcpy(d->last_mv, s->last_mv, 2*2*2*sizeof(int)); //FIXME is memcpy faster than a loop?
@@ -2447,7 +2449,9 @@  static inline void copy_context_before_encode(MpegEncContext *d, MpegEncContext
     d->esc3_level_length= s->esc3_level_length;
 }
 
-static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *s, int type){
+static inline void copy_context_after_encode(MPVEncContext *d,
+                                             const MPVEncContext *s, int type)
+{
     int i;
 
     memcpy(d->mv, s->mv, 2*4*2*sizeof(int));
@@ -2486,7 +2490,7 @@  static inline void copy_context_after_encode(MpegEncContext *d, MpegEncContext *
     d->esc3_level_length= s->esc3_level_length;
 }
 
-static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegEncContext *best, int type,
+static inline void encode_mb_hq(MPVEncContext *s, MPVEncContext *backup, MPVEncContext *best, int type,
                            PutBitContext pb[2], PutBitContext pb2[2], PutBitContext tex_pb[2],
                            int *dmin, int *next_block, int motion_x, int motion_y)
 {
@@ -2537,7 +2541,9 @@  static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegE
     }
 }
 
-static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride){
+static int sse(MPVEncContext *s, uint8_t *src1, uint8_t *src2,
+               int w, int h, int stride)
+{
     const uint32_t *sq = ff_square_tab + 256;
     int acc=0;
     int x,y;
@@ -2558,7 +2564,8 @@  static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, in
     return acc;
 }
 
-static int sse_mb(MpegEncContext *s){
+static int sse_mb(MPVEncContext *s)
+{
     int w= 16;
     int h= 16;
 
@@ -2582,7 +2589,7 @@  static int sse_mb(MpegEncContext *s){
 }
 
 static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
-    MpegEncContext *s= *(void**)arg;
+    MPVEncContext *const s = *(void**)arg;
 
 
     s->me.pre_pass=1;
@@ -2601,7 +2608,7 @@  static int pre_estimate_motion_thread(AVCodecContext *c, void *arg){
 }
 
 static int estimate_motion_thread(AVCodecContext *c, void *arg){
-    MpegEncContext *s= *(void**)arg;
+    MPVEncContext *const s = *(void**)arg;
 
     s->me.dia_size= s->avctx->dia_size;
     s->first_slice_line=1;
@@ -2626,7 +2633,7 @@  static int estimate_motion_thread(AVCodecContext *c, void *arg){
 }
 
 static int mb_var_thread(AVCodecContext *c, void *arg){
-    MpegEncContext *s= *(void**)arg;
+    MPVEncContext *const s = *(void**)arg;
     int mb_x, mb_y;
 
     for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) {
@@ -2648,7 +2655,8 @@  static int mb_var_thread(AVCodecContext *c, void *arg){
     return 0;
 }
 
-static void write_slice_end(MpegEncContext *s){
+static void write_slice_end(MPVEncContext *s)
+{
     if(CONFIG_MPEG4_ENCODER && s->codec_id==AV_CODEC_ID_MPEG4){
         if(s->partitioned_frame){
             ff_mpeg4_merge_partitions(s);
@@ -2668,7 +2676,7 @@  static void write_slice_end(MpegEncContext *s){
         s->misc_bits+= get_bits_diff(s);
 }
 
-static void write_mb_info(MpegEncContext *s)
+static void write_mb_info(MPVEncContext *s)
 {
     uint8_t *ptr = s->mb_info_ptr + s->mb_info_size - 12;
     int offset = put_bits_count(&s->pb);
@@ -2688,7 +2696,7 @@  static void write_mb_info(MpegEncContext *s)
     bytestream_put_byte(&ptr, 0); /* vmv2 */
 }
 
-static void update_mb_info(MpegEncContext *s, int startcode)
+static void update_mb_info(MPVEncContext *s, int startcode)
 {
     if (!s->mb_info)
         return;
@@ -2711,7 +2719,7 @@  static void update_mb_info(MpegEncContext *s, int startcode)
     write_mb_info(s);
 }
 
-int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase)
+int ff_mpv_reallocate_putbitbuffer(MPVEncContext *s, size_t threshold, size_t size_increase)
 {
     if (put_bytes_left(&s->pb, 0) < threshold
         && s->slice_context_count == 1
@@ -2746,11 +2754,11 @@  int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t s
 }
 
 static int encode_thread(AVCodecContext *c, void *arg){
-    MpegEncContext *s= *(void**)arg;
+    MPVEncContext *const s = *(void**)arg;
     int mb_x, mb_y, mb_y_order;
     int chr_h= 16>>s->chroma_y_shift;
     int i, j;
-    MpegEncContext best_s = { 0 }, backup_s;
+    MPVEncContext best_s = { 0 }, backup_s;
     uint8_t bit_buf[2][MAX_MB_BYTES];
     uint8_t bit_buf2[2][MAX_MB_BYTES];
     uint8_t bit_buf_tex[2][MAX_MB_BYTES];
@@ -3384,13 +3392,15 @@  static int encode_thread(AVCodecContext *c, void *arg){
 }
 
 #define MERGE(field) dst->field += src->field; src->field=0
-static void merge_context_after_me(MpegEncContext *dst, MpegEncContext *src){
+static void merge_context_after_me(MPVEncContext *dst, MPVEncContext *src)
+{
     MERGE(me.scene_change_score);
     MERGE(me.mc_mb_var_sum_temp);
     MERGE(me.mb_var_sum_temp);
 }
 
-static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src){
+static void merge_context_after_encode(MPVEncContext *dst, MPVEncContext *src)
+{
     int i;
 
     MERGE(dct_count[0]); //note, the other dct vars are not part of the context
@@ -3420,7 +3430,8 @@  static void merge_context_after_encode(MpegEncContext *dst, MpegEncContext *src)
     flush_put_bits(&dst->pb);
 }
 
-static int estimate_qp(MpegEncContext *s, int dry_run){
+static int estimate_qp(MPVMainEncContext *s, int dry_run)
+{
     if (s->next_lambda){
         s->current_picture_ptr->f->quality =
         s->current_picture.f->quality = s->next_lambda;
@@ -3458,7 +3469,8 @@  static int estimate_qp(MpegEncContext *s, int dry_run){
 }
 
 /* must be called before writing the header */
-static void set_frame_distances(MpegEncContext * s){
+static void set_frame_distances(MPVMainEncContext *s)
+{
     av_assert1(s->current_picture_ptr->f->pts != AV_NOPTS_VALUE);
     s->time = s->current_picture_ptr->f->pts * s->avctx->time_base.num;
 
@@ -3472,7 +3484,7 @@  static void set_frame_distances(MpegEncContext * s){
     }
 }
 
-static int encode_picture(MpegEncContext *s, int picture_number)
+static int encode_picture(MPVMainEncContext *s, int picture_number)
 {
     int i, ret;
     int bits;
@@ -3753,7 +3765,8 @@  static int encode_picture(MpegEncContext *s, int picture_number)
     return 0;
 }
 
-static void denoise_dct_c(MpegEncContext *s, int16_t *block){
+static void denoise_dct_c(MPVEncContext *s, int16_t *block)
+{
     const int intra= s->mb_intra;
     int i;
 
@@ -3777,7 +3790,7 @@  static void denoise_dct_c(MpegEncContext *s, int16_t *block){
     }
 }
 
-static int dct_quantize_trellis_c(MpegEncContext *s,
+static int dct_quantize_trellis_c(MPVEncContext *s,
                                   int16_t *block, int n,
                                   int qscale, int *overflow){
     const int *qmat;
@@ -4111,7 +4124,7 @@  static void build_basis(uint8_t *perm){
     }
 }
 
-static int dct_quantize_refine(MpegEncContext *s, //FIXME breaks denoise?
+static int dct_quantize_refine(MPVEncContext *s, //FIXME breaks denoise?
                         int16_t *block, int16_t *weight, int16_t *orig,
                         int n, int qscale){
     int16_t rem[64];
@@ -4463,7 +4476,7 @@  void ff_block_permute(int16_t *block, uint8_t *permutation,
     }
 }
 
-int ff_dct_quantize_c(MpegEncContext *s,
+int ff_dct_quantize_c(MPVEncContext *s,
                         int16_t *block, int n,
                         int qscale, int *overflow)
 {
diff --git a/libavcodec/mpegvideo_motion.c b/libavcodec/mpegvideo_motion.c
index 876a7375f8..12ae984537 100644
--- a/libavcodec/mpegvideo_motion.c
+++ b/libavcodec/mpegvideo_motion.c
@@ -32,7 +32,7 @@ 
 #include "qpeldsp.h"
 #include "wmv2.h"
 
-static void gmc1_motion(MpegEncContext *s,
+static void gmc1_motion(MPVContext *s,
                         uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
                         uint8_t **ref_picture)
 {
@@ -129,7 +129,7 @@  static void gmc1_motion(MpegEncContext *s,
                  motion_x & 15, motion_y & 15, 128 - s->no_rounding);
 }
 
-static void gmc_motion(MpegEncContext *s,
+static void gmc_motion(MPVContext *s,
                        uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
                        uint8_t **ref_picture)
 {
@@ -187,7 +187,7 @@  static void gmc_motion(MpegEncContext *s,
                 (s->h_edge_pos + 1) >> 1, (s->v_edge_pos + 1) >> 1);
 }
 
-static inline int hpel_motion(MpegEncContext *s,
+static inline int hpel_motion(MPVContext *s,
                               uint8_t *dest, uint8_t *src,
                               int src_x, int src_y,
                               op_pixels_func *pix_op,
@@ -223,7 +223,7 @@  static inline int hpel_motion(MpegEncContext *s,
 }
 
 static av_always_inline
-void mpeg_motion_internal(MpegEncContext *s,
+void mpeg_motion_internal(MPVContext *s,
                           uint8_t *dest_y,
                           uint8_t *dest_cb,
                           uint8_t *dest_cr,
@@ -365,7 +365,7 @@  void mpeg_motion_internal(MpegEncContext *s,
     }
 }
 /* apply one mpeg motion vector to the three components */
-static void mpeg_motion(MpegEncContext *s,
+static void mpeg_motion(MPVContext *s,
                         uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
                         int field_select, uint8_t **ref_picture,
                         op_pixels_func (*pix_op)[4],
@@ -383,7 +383,7 @@  static void mpeg_motion(MpegEncContext *s,
                              motion_x, motion_y, h, 0, is_16x8, mb_y);
 }
 
-static void mpeg_motion_field(MpegEncContext *s, uint8_t *dest_y,
+static void mpeg_motion_field(MPVContext *s, uint8_t *dest_y,
                               uint8_t *dest_cb, uint8_t *dest_cr,
                               int bottom_field, int field_select,
                               uint8_t **ref_picture,
@@ -456,7 +456,7 @@  static inline void put_obmc(uint8_t *dst, uint8_t *src[5], int stride)
 }
 
 /* obmc for 1 8x8 luma block */
-static inline void obmc_motion(MpegEncContext *s,
+static inline void obmc_motion(MPVContext *s,
                                uint8_t *dest, uint8_t *src,
                                int src_x, int src_y,
                                op_pixels_func *pix_op,
@@ -482,7 +482,7 @@  static inline void obmc_motion(MpegEncContext *s,
     put_obmc(dest, ptr, s->linesize);
 }
 
-static inline void qpel_motion(MpegEncContext *s,
+static inline void qpel_motion(MPVContext *s,
                                uint8_t *dest_y,
                                uint8_t *dest_cb,
                                uint8_t *dest_cr,
@@ -589,7 +589,7 @@  static inline void qpel_motion(MpegEncContext *s,
 /**
  * H.263 chroma 4mv motion compensation.
  */
-static void chroma_4mv_motion(MpegEncContext *s,
+static void chroma_4mv_motion(MPVContext *s,
                               uint8_t *dest_cb, uint8_t *dest_cr,
                               uint8_t **ref_picture,
                               op_pixels_func *pix_op,
@@ -641,7 +641,7 @@  static void chroma_4mv_motion(MpegEncContext *s,
     pix_op[dxy](dest_cr, ptr, s->uvlinesize, 8);
 }
 
-static inline void prefetch_motion(MpegEncContext *s, uint8_t **pix, int dir)
+static inline void prefetch_motion(MPVContext *s, uint8_t **pix, int dir)
 {
     /* fetch pixels for estimated mv 4 macroblocks ahead
      * optimized for 64byte cache lines */
@@ -655,7 +655,7 @@  static inline void prefetch_motion(MpegEncContext *s, uint8_t **pix, int dir)
     s->vdsp.prefetch(pix[1] + off, pix[2] - pix[1], 2);
 }
 
-static inline void apply_obmc(MpegEncContext *s,
+static inline void apply_obmc(MPVContext *s,
                               uint8_t *dest_y,
                               uint8_t *dest_cb,
                               uint8_t *dest_cr,
@@ -742,7 +742,7 @@  static inline void apply_obmc(MpegEncContext *s,
                           mx, my);
 }
 
-static inline void apply_8x8(MpegEncContext *s,
+static inline void apply_8x8(MPVContext *s,
                              uint8_t *dest_y,
                              uint8_t *dest_cb,
                              uint8_t *dest_cr,
@@ -826,7 +826,7 @@  static inline void apply_8x8(MpegEncContext *s,
  * @param qpix_op qpel motion compensation function (average or put normally)
  * the motion vectors are taken from s->mv and the MV type from s->mv_type
  */
-static av_always_inline void mpv_motion_internal(MpegEncContext *s,
+static av_always_inline void mpv_motion_internal(MPVContext *s,
                                                  uint8_t *dest_y,
                                                  uint8_t *dest_cb,
                                                  uint8_t *dest_cr,
@@ -969,7 +969,7 @@  static av_always_inline void mpv_motion_internal(MpegEncContext *s,
     }
 }
 
-void ff_mpv_motion(MpegEncContext *s,
+void ff_mpv_motion(MPVContext *s,
                    uint8_t *dest_y, uint8_t *dest_cb,
                    uint8_t *dest_cr, int dir,
                    uint8_t **ref_picture,
diff --git a/libavcodec/mpegvideo_xvmc.c b/libavcodec/mpegvideo_xvmc.c
index bfeb453834..28cb8e4d03 100644
--- a/libavcodec/mpegvideo_xvmc.c
+++ b/libavcodec/mpegvideo_xvmc.c
@@ -34,13 +34,13 @@ 
 #include "version.h"
 
 /**
- * Initialize the block field of the MpegEncContext pointer passed as
+ * Initialize the block field of the MPVDecContext pointer passed as
  * parameter after making sure that the data is not corrupted.
  * In order to implement something like direct rendering instead of decoding
  * coefficients in s->blocks and then copying them, copy them directly
  * into the data_blocks array provided by xvmc.
  */
-void ff_xvmc_init_block(MpegEncContext *s)
+void ff_xvmc_init_block(MPVDecContext *s)
 {
     struct xvmc_pix_fmt *render = (struct xvmc_pix_fmt*)s->current_picture.f->data[2];
     assert(render && render->xvmc_id == AV_XVMC_ID);
@@ -48,7 +48,7 @@  void ff_xvmc_init_block(MpegEncContext *s)
     s->block = (int16_t (*)[64])(render->data_blocks + render->next_free_data_block_num * 64);
 }
 
-static void exchange_uv(MpegEncContext *s)
+static void exchange_uv(MPVDecContext *s)
 {
     int16_t (*tmp)[64];
 
@@ -61,7 +61,7 @@  static void exchange_uv(MpegEncContext *s)
  * Fill individual block pointers, so there are no gaps in the data_block array
  * in case not all blocks in the macroblock are coded.
  */
-void ff_xvmc_pack_pblocks(MpegEncContext *s, int cbp)
+void ff_xvmc_pack_pblocks(MPVDecContext *s, int cbp)
 {
     int i, j = 0;
     const int mb_block_count = 4 + (1 << s->chroma_format);
@@ -86,7 +86,7 @@  void ff_xvmc_pack_pblocks(MpegEncContext *s, int cbp)
  */
 static int ff_xvmc_field_start(AVCodecContext *avctx, const uint8_t *buf, uint32_t buf_size)
 {
-    struct MpegEncContext *s = avctx->priv_data;
+    MPVDecContext *const s = avctx->priv_data;
     struct xvmc_pix_fmt *last, *next, *render = (struct xvmc_pix_fmt*)s->current_picture.f->data[2];
     const int mb_block_count = 4 + (1 << s->chroma_format);
 
@@ -155,7 +155,7 @@  return -1;
  */
 static int ff_xvmc_field_end(AVCodecContext *avctx)
 {
-    struct MpegEncContext *s = avctx->priv_data;
+    MPVDecContext *const s = avctx->priv_data;
     struct xvmc_pix_fmt *render = (struct xvmc_pix_fmt*)s->current_picture.f->data[2];
     assert(render);
 
@@ -170,7 +170,7 @@  static int ff_xvmc_field_end(AVCodecContext *avctx)
  */
 static void ff_xvmc_decode_mb(void *opaque)
 {
-    MpegEncContext *const s = opaque;
+    MPVDecContext *const s = opaque;
     XvMCMacroBlock *mv_block;
     struct xvmc_pix_fmt *render;
     int i, cbp, blocks_per_mb;
diff --git a/libavcodec/mpegvideoenc.h b/libavcodec/mpegvideoenc.h
index 21e7a55e13..94ebe0237f 100644
--- a/libavcodec/mpegvideoenc.h
+++ b/libavcodec/mpegvideoenc.h
@@ -32,6 +32,9 @@ 
 #include "internal.h"
 #include "mpegvideo.h"
 
+typedef MPVContext MPVEncContext;
+typedef MPVContext MPVMainEncContext;
+
 /* mpegvideo_enc common options */
 #define FF_MPV_FLAG_SKIP_RD      0x0001
 #define FF_MPV_FLAG_STRICT_GOP   0x0002
@@ -58,7 +61,7 @@ 
 { "msad",   "Sum of absolute differences, median predicted", 0, AV_OPT_TYPE_CONST, {.i64 = FF_CMP_MEDIAN_SAD }, INT_MIN, INT_MAX, FF_MPV_OPT_FLAGS, "cmp_func" }
 
 #ifndef FF_MPV_OFFSET
-#define FF_MPV_OFFSET(x) offsetof(MpegEncContext, x)
+#define FF_MPV_OFFSET(x) offsetof(MPVMainEncContext, x)
 #endif
 #define FF_MPV_OPT_FLAGS (AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM)
 #define FF_MPV_COMMON_OPTS \
@@ -129,26 +132,26 @@  FF_MPV_OPT_CMP_FUNC, \
 extern const AVClass ff_mpv_enc_class;
 
 int ff_mpv_encode_init(AVCodecContext *avctx);
-void ff_mpv_encode_init_x86(MpegEncContext *s);
+void ff_mpv_encode_init_x86(MPVMainEncContext *s);
 
 int ff_mpv_encode_end(AVCodecContext *avctx);
 int ff_mpv_encode_picture(AVCodecContext *avctx, AVPacket *pkt,
                           const AVFrame *frame, int *got_packet);
-int ff_mpv_reallocate_putbitbuffer(MpegEncContext *s, size_t threshold, size_t size_increase);
+int ff_mpv_reallocate_putbitbuffer(MPVEncContext *s, size_t threshold, size_t size_increase);
 
 void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix);
 
-int ff_dct_encode_init(MpegEncContext *s);
-void ff_dct_encode_init_x86(MpegEncContext *s);
+int ff_dct_encode_init(MPVEncContext *s);
+void ff_dct_encode_init_x86(MPVEncContext *s);
 
-int ff_dct_quantize_c(MpegEncContext *s, int16_t *block, int n, int qscale, int *overflow);
-void ff_convert_matrix(MpegEncContext *s, int (*qmat)[64], uint16_t (*qmat16)[2][64],
+int ff_dct_quantize_c(MPVEncContext *s, int16_t *block, int n, int qscale, int *overflow);
+void ff_convert_matrix(MPVEncContext *s, int (*qmat)[64], uint16_t (*qmat16)[2][64],
                        const uint16_t *quant_matrix, int bias, int qmin, int qmax, int intra);
 
 void ff_block_permute(int16_t *block, uint8_t *permutation,
                       const uint8_t *scantable, int last);
 
-static inline int get_bits_diff(MpegEncContext *s)
+static inline int get_bits_diff(MPVEncContext *s)
 {
     const int bits = put_bits_count(&s->pb);
     const int last = s->last_bits;
diff --git a/libavcodec/msmpeg4.c b/libavcodec/msmpeg4.c
index f12586dd64..991b5f09c9 100644
--- a/libavcodec/msmpeg4.c
+++ b/libavcodec/msmpeg4.c
@@ -112,7 +112,7 @@  static av_cold void msmpeg4_common_init_static(void)
     init_h263_dc_for_msmpeg4();
 }
 
-av_cold void ff_msmpeg4_common_init(MpegEncContext *s)
+av_cold void ff_msmpeg4_common_init(MPVMainContext *s)
 {
     static AVOnce init_static_once = AV_ONCE_INIT;
 
@@ -158,7 +158,7 @@  av_cold void ff_msmpeg4_common_init(MpegEncContext *s)
 }
 
 /* predict coded block */
-int ff_msmpeg4_coded_block_pred(MpegEncContext * s, int n, uint8_t **coded_block_ptr)
+int ff_msmpeg4_coded_block_pred(MPVContext * s, int n, uint8_t **coded_block_ptr)
 {
     int xy, wrap, pred, a, b, c;
 
@@ -198,7 +198,7 @@  static int get_dc(uint8_t *src, int stride, int scale, int block_size)
 }
 
 /* dir = 0: left, dir = 1: top prediction */
-int ff_msmpeg4_pred_dc(MpegEncContext *s, int n,
+int ff_msmpeg4_pred_dc(MPVContext *s, int n,
                        int16_t **dc_val_ptr, int *dir_ptr)
 {
     int a, b, c, wrap, pred, scale;
diff --git a/libavcodec/msmpeg4.h b/libavcodec/msmpeg4.h
index dbe650cbbc..8d8ac31813 100644
--- a/libavcodec/msmpeg4.h
+++ b/libavcodec/msmpeg4.h
@@ -41,17 +41,17 @@ 
 extern VLC ff_mb_non_intra_vlc[4];
 extern VLC ff_inter_intra_vlc;
 
-void ff_msmpeg4_common_init(MpegEncContext *s);
-int ff_msmpeg4_coded_block_pred(MpegEncContext * s, int n,
+void ff_msmpeg4_common_init(MPVMainContext *s);
+int ff_msmpeg4_coded_block_pred(MPVContext * s, int n,
                                 uint8_t **coded_block_ptr);
 
 int ff_msmpeg4_decode_init(AVCodecContext *avctx);
-int ff_msmpeg4_decode_picture_header(MpegEncContext *s);
-int ff_msmpeg4_decode_ext_header(MpegEncContext *s, int buf_size);
-void ff_msmpeg4_decode_motion(MpegEncContext * s, int *mx_ptr, int *my_ptr);
-int ff_msmpeg4_decode_block(MpegEncContext * s, int16_t * block,
+int ff_msmpeg4_decode_picture_header(MPVMainDecContext *s);
+int ff_msmpeg4_decode_ext_header(MPVMainDecContext *s, int buf_size);
+void ff_msmpeg4_decode_motion(MPVDecContext * s, int *mx_ptr, int *my_ptr);
+int ff_msmpeg4_decode_block(MPVDecContext * s, int16_t * block,
                             int n, int coded, const uint8_t *scan_table);
-int ff_msmpeg4_pred_dc(MpegEncContext *s, int n,
+int ff_msmpeg4_pred_dc(MPVContext *s, int n,
                        int16_t **dc_val_ptr, int *dir_ptr);
 
 
diff --git a/libavcodec/msmpeg4dec.c b/libavcodec/msmpeg4dec.c
index 77ad8a8ff4..4bcb119923 100644
--- a/libavcodec/msmpeg4dec.c
+++ b/libavcodec/msmpeg4dec.c
@@ -44,7 +44,7 @@ 
 
 #define DEFAULT_INTER_INDEX 3
 
-static inline int msmpeg4v1_pred_dc(MpegEncContext * s, int n,
+static inline int msmpeg4v1_pred_dc(MPVDecContext * s, int n,
                                     int32_t **dc_val_ptr)
 {
     int i;
@@ -70,7 +70,7 @@  static VLC v2_mb_type_vlc;
 VLC ff_inter_intra_vlc;
 
 /* This is identical to H.263 except that its range is multiplied by 2. */
-static int msmpeg4v2_decode_motion(MpegEncContext * s, int pred, int f_code)
+static int msmpeg4v2_decode_motion(MPVDecContext * s, int pred, int f_code)
 {
     int code, val, sign, shift;
 
@@ -101,7 +101,7 @@  static int msmpeg4v2_decode_motion(MpegEncContext * s, int pred, int f_code)
     return val;
 }
 
-static int msmpeg4v12_decode_mb(MpegEncContext *s, int16_t block[6][64])
+static int msmpeg4v12_decode_mb(MPVDecContext *s, int16_t block[6][64])
 {
     int cbp, code, i;
     uint32_t * const mb_type_ptr = &s->current_picture.mb_type[s->mb_x + s->mb_y*s->mb_stride];
@@ -202,7 +202,7 @@  static int msmpeg4v12_decode_mb(MpegEncContext *s, int16_t block[6][64])
     return 0;
 }
 
-static int msmpeg4v34_decode_mb(MpegEncContext *s, int16_t block[6][64])
+static int msmpeg4v34_decode_mb(MPVDecContext *s, int16_t block[6][64])
 {
     int cbp, code, i;
     uint8_t *coded_val;
@@ -295,7 +295,7 @@  static int msmpeg4v34_decode_mb(MpegEncContext *s, int16_t block[6][64])
 /* init all vlc decoding tables */
 av_cold int ff_msmpeg4_decode_init(AVCodecContext *avctx)
 {
-    MpegEncContext *s = avctx->priv_data;
+    MPVMainDecContext *const s = avctx->priv_data;
     static volatile int done = 0;
     int ret;
     MVTable *mv;
@@ -398,7 +398,7 @@  av_cold int ff_msmpeg4_decode_init(AVCodecContext *avctx)
     return 0;
 }
 
-int ff_msmpeg4_decode_picture_header(MpegEncContext * s)
+int ff_msmpeg4_decode_picture_header(MPVMainDecContext * s)
 {
     int code;
 
@@ -554,7 +554,7 @@  int ff_msmpeg4_decode_picture_header(MpegEncContext * s)
     return 0;
 }
 
-int ff_msmpeg4_decode_ext_header(MpegEncContext * s, int buf_size)
+int ff_msmpeg4_decode_ext_header(MPVMainDecContext *s, int buf_size)
 {
     int left= buf_size*8 - get_bits_count(&s->gb);
     int length= s->msmpeg4_version>=3 ? 17 : 16;
@@ -582,7 +582,7 @@  int ff_msmpeg4_decode_ext_header(MpegEncContext * s, int buf_size)
     return 0;
 }
 
-static int msmpeg4_decode_dc(MpegEncContext * s, int n, int *dir_ptr)
+static int msmpeg4_decode_dc(MPVDecContext * s, int n, int *dir_ptr)
 {
     int level, pred;
 
@@ -638,7 +638,7 @@  static int msmpeg4_decode_dc(MpegEncContext * s, int n, int *dir_ptr)
     return level;
 }
 
-int ff_msmpeg4_decode_block(MpegEncContext * s, int16_t * block,
+int ff_msmpeg4_decode_block(MPVDecContext * s, int16_t * block,
                               int n, int coded, const uint8_t *scan_table)
 {
     int level, i, last, run, run_diff;
@@ -825,7 +825,7 @@  int ff_msmpeg4_decode_block(MpegEncContext * s, int16_t * block,
     return 0;
 }
 
-void ff_msmpeg4_decode_motion(MpegEncContext *s, int *mx_ptr, int *my_ptr)
+void ff_msmpeg4_decode_motion(MPVDecContext *s, int *mx_ptr, int *my_ptr)
 {
     MVTable *mv;
     int code, mx, my;
@@ -862,7 +862,7 @@  const AVCodec ff_msmpeg4v1_decoder = {
     .long_name      = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 1"),
     .type           = AVMEDIA_TYPE_VIDEO,
     .id             = AV_CODEC_ID_MSMPEG4V1,
-    .priv_data_size = sizeof(MpegEncContext),
+    .priv_data_size = sizeof(MPVMainDecContext),
     .init           = ff_msmpeg4_decode_init,
     .close          = ff_h263_decode_end,
     .decode         = ff_h263_decode_frame,
@@ -880,7 +880,7 @@  const AVCodec ff_msmpeg4v2_decoder = {
     .long_name      = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 2"),
     .type           = AVMEDIA_TYPE_VIDEO,
     .id             = AV_CODEC_ID_MSMPEG4V2,
-    .priv_data_size = sizeof(MpegEncContext),
+    .priv_data_size = sizeof(MPVMainDecContext),
     .init           = ff_msmpeg4_decode_init,
     .close          = ff_h263_decode_end,
     .decode         = ff_h263_decode_frame,
@@ -898,7 +898,7 @@  const AVCodec ff_msmpeg4v3_decoder = {
     .long_name      = NULL_IF_CONFIG_SMALL("MPEG-4 part 2 Microsoft variant version 3"),
     .type           = AVMEDIA_TYPE_VIDEO,
     .id             = AV_CODEC_ID_MSMPEG4V3,
-    .priv_data_size = sizeof(MpegEncContext),
+    .priv_data_size = sizeof(MPVMainDecContext),
     .init           = ff_msmpeg4_decode_init,
     .close          = ff_h263_decode_end,
     .decode         = ff_h263_decode_frame,
@@ -916,7 +916,7 @@  const AVCodec ff_wmv1_decoder = {
     .long_name      = NULL_IF_CONFIG_SMALL("Windows Media Video 7"),
     .type           = AVMEDIA_TYPE_VIDEO,
     .id             = AV_CODEC_ID_WMV1,
-    .priv_data_size = sizeof(MpegEncContext),
+    .priv_data_size = sizeof(MPVMainDecContext),
     .init           = ff_msmpeg4_decode_init,
     .close          = ff_h263_decode_end,
     .decode         = ff_h263_decode_frame,
diff --git a/libavcodec/msmpeg4enc.c b/libavcodec/msmpeg4enc.c
index 5e1ed9cc27..0c4d777cc2 100644
--- a/libavcodec/msmpeg4enc.c
+++ b/libavcodec/msmpeg4enc.c
@@ -136,7 +136,7 @@  static av_cold void msmpeg4_encode_init_static(void)
     }
 }
 
-av_cold void ff_msmpeg4_encode_init(MpegEncContext *s)
+av_cold void ff_msmpeg4_encode_init(MPVMainEncContext *s)
 {
     static AVOnce init_static_once = AV_ONCE_INIT;
 
@@ -152,7 +152,7 @@  av_cold void ff_msmpeg4_encode_init(MpegEncContext *s)
 
 static void find_best_tables(MSMPEG4EncContext *ms)
 {
-    MpegEncContext *const s = &ms->s;
+    MPVEncContext *const s = &ms->s;
     int i;
     int best        = 0, best_size        = INT_MAX;
     int chroma_best = 0, best_chroma_size = INT_MAX;
@@ -216,7 +216,7 @@  static void find_best_tables(MSMPEG4EncContext *ms)
 }
 
 /* write MSMPEG4 compatible frame header */
-void ff_msmpeg4_encode_picture_header(MpegEncContext * s, int picture_number)
+void ff_msmpeg4_encode_picture_header(MPVMainEncContext * s, int picture_number)
 {
     MSMPEG4EncContext *const ms = (MSMPEG4EncContext*)s;
 
@@ -278,7 +278,7 @@  void ff_msmpeg4_encode_picture_header(MpegEncContext * s, int picture_number)
     s->esc3_run_length= 0;
 }
 
-void ff_msmpeg4_encode_ext_header(MpegEncContext * s)
+void ff_msmpeg4_encode_ext_header(MPVEncContext * s)
 {
         unsigned fps = s->avctx->time_base.den / s->avctx->time_base.num / FFMAX(s->avctx->ticks_per_frame, 1);
         put_bits(&s->pb, 5, FFMIN(fps, 31)); //yes 29.97 -> 29
@@ -291,7 +291,7 @@  void ff_msmpeg4_encode_ext_header(MpegEncContext * s)
             av_assert0(s->flipflop_rounding==0);
 }
 
-void ff_msmpeg4_encode_motion(MpegEncContext * s,
+void ff_msmpeg4_encode_motion(MPVEncContext * s,
                                   int mx, int my)
 {
     int code;
@@ -324,7 +324,8 @@  void ff_msmpeg4_encode_motion(MpegEncContext * s,
     }
 }
 
-void ff_msmpeg4_handle_slices(MpegEncContext *s){
+void ff_msmpeg4_handle_slices(MPVEncContext *s)
+{
     if (s->mb_x == 0) {
         if (s->slice_height && (s->mb_y % s->slice_height) == 0) {
             if(s->msmpeg4_version < 4){
@@ -337,7 +338,7 @@  void ff_msmpeg4_handle_slices(MpegEncContext *s){
     }
 }
 
-static void msmpeg4v2_encode_motion(MpegEncContext * s, int val)
+static void msmpeg4v2_encode_motion(MPVEncContext * s, int val)
 {
     int range, bit_size, sign, code, bits;
 
@@ -370,7 +371,7 @@  static void msmpeg4v2_encode_motion(MpegEncContext * s, int val)
     }
 }
 
-void ff_msmpeg4_encode_mb(MpegEncContext * s,
+void ff_msmpeg4_encode_mb(MPVEncContext * s,
                           int16_t block[6][64],
                           int motion_x, int motion_y)
 {
@@ -493,7 +494,7 @@  void ff_msmpeg4_encode_mb(MpegEncContext * s,
     }
 }
 
-static void msmpeg4_encode_dc(MpegEncContext * s, int level, int n, int *dir_ptr)
+static void msmpeg4_encode_dc(MPVEncContext * s, int level, int n, int *dir_ptr)
 {
     int sign, code;
     int pred;
@@ -556,7 +557,7 @@  static void msmpeg4_encode_dc(MpegEncContext * s, int level, int n, int *dir_ptr
 
 /* Encoding of a block; very similar to MPEG-4 except for a different
  * escape coding (same as H.263) and more VLC tables. */
-void ff_msmpeg4_encode_block(MpegEncContext * s, int16_t * block, int n)
+void ff_msmpeg4_encode_block(MPVEncContext * s, int16_t * block, int n)
 {
     MSMPEG4EncContext *const ms = (MSMPEG4EncContext*)s;
     int level, run, last, i, j, last_index;
diff --git a/libavcodec/msmpeg4enc.h b/libavcodec/msmpeg4enc.h
index 98044913b1..80f7b0709e 100644
--- a/libavcodec/msmpeg4enc.h
+++ b/libavcodec/msmpeg4enc.h
@@ -23,25 +23,25 @@ 
 #define AVCODEC_MSMPEG4ENC_H
 
 #include "config.h"
-#include "mpegvideo.h"
+#include "mpegvideoenc.h"
 #include "put_bits.h"
 #include "rl.h"
 
 typedef struct MSMPEG4EncContext {
-    MpegEncContext s;
+    MPVMainEncContext s;
 
     /** [mb_intra][isChroma][level][run][last] */
     unsigned ac_stats[2][2][MAX_LEVEL + 1][MAX_RUN + 1][2];
 } MSMPEG4EncContext;
 
-void ff_msmpeg4_encode_init(MpegEncContext *s);
-void ff_msmpeg4_encode_picture_header(MpegEncContext *s, int picture_number);
-void ff_msmpeg4_encode_ext_header(MpegEncContext *s);
-void ff_msmpeg4_encode_mb(MpegEncContext *s, int16_t block[6][64],
+void ff_msmpeg4_encode_init(MPVMainEncContext *s);
+void ff_msmpeg4_encode_picture_header(MPVMainEncContext *s, int picture_number);
+void ff_msmpeg4_encode_ext_header(MPVEncContext *s);
+void ff_msmpeg4_encode_mb(MPVEncContext *s, int16_t block[6][64],
                           int motion_x, int motion_y);
-void ff_msmpeg4_encode_block(MpegEncContext * s, int16_t * block, int n);
-void ff_msmpeg4_handle_slices(MpegEncContext *s);
-void ff_msmpeg4_encode_motion(MpegEncContext * s, int mx, int my);
+void ff_msmpeg4_encode_block(MPVEncContext * s, int16_t * block, int n);
+void ff_msmpeg4_handle_slices(MPVEncContext *s);
+void ff_msmpeg4_encode_motion(MPVEncContext * s, int mx, int my);
 
 void ff_msmpeg4_code012(PutBitContext *pb, int n);
 
diff --git a/libavcodec/mss2.c b/libavcodec/mss2.c
index d7a19f6ad2..64888540ac 100644
--- a/libavcodec/mss2.c
+++ b/libavcodec/mss2.c
@@ -381,7 +381,7 @@  static int decode_wmv9(AVCodecContext *avctx, const uint8_t *buf, int buf_size,
     MSS2Context *ctx  = avctx->priv_data;
     MSS12Context *c   = &ctx->c;
     VC1Context *v     = avctx->priv_data;
-    MpegEncContext *s = &v->s;
+    MPVMainDecContext *const s = &v->s;
     AVFrame *f;
     int ret;
 
diff --git a/libavcodec/neon/mpegvideo.c b/libavcodec/neon/mpegvideo.c
index 8f05d77a65..e5c21290fd 100644
--- a/libavcodec/neon/mpegvideo.c
+++ b/libavcodec/neon/mpegvideo.c
@@ -83,7 +83,7 @@  static void inline ff_dct_unquantize_h263_neon(int qscale, int qadd, int nCoeffs
     vst1_s16(block, d0s16);
 }
 
-static void dct_unquantize_h263_inter_neon(MpegEncContext *s, int16_t *block,
+static void dct_unquantize_h263_inter_neon(MPVContext *s, int16_t *block,
                                            int n, int qscale)
 {
     int nCoeffs = s->inter_scantable.raster_end[s->block_last_index[n]];
@@ -92,7 +92,7 @@  static void dct_unquantize_h263_inter_neon(MpegEncContext *s, int16_t *block,
     ff_dct_unquantize_h263_neon(qscale, qadd, nCoeffs + 1, block);
 }
 
-static void dct_unquantize_h263_intra_neon(MpegEncContext *s, int16_t *block,
+static void dct_unquantize_h263_intra_neon(MPVContext *s, int16_t *block,
                                            int n, int qscale)
 {
     int qadd;
@@ -124,7 +124,7 @@  static void dct_unquantize_h263_intra_neon(MpegEncContext *s, int16_t *block,
 }
 
 
-av_cold void ff_mpv_common_init_neon(MpegEncContext *s)
+av_cold void ff_mpv_common_init_neon(MPVMainContext *s)
 {
     int cpu_flags = av_get_cpu_flags();
 
diff --git a/libavcodec/nvdec_mpeg12.c b/libavcodec/nvdec_mpeg12.c
index b8db4250bb..c3b92f2111 100644
--- a/libavcodec/nvdec_mpeg12.c
+++ b/libavcodec/nvdec_mpeg12.c
@@ -28,7 +28,7 @@ 
 
 static int nvdec_mpeg12_start_frame(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
 {
-    MpegEncContext *s = avctx->priv_data;
+    MPVDecContext *const s = avctx->priv_data;
 
     NVDECContext      *ctx = avctx->internal->hwaccel_priv_data;
     CUVIDPICPARAMS     *pp = &ctx->pic_params;
diff --git a/libavcodec/nvdec_mpeg4.c b/libavcodec/nvdec_mpeg4.c
index 2b2b1b87a1..5d3ed31a69 100644
--- a/libavcodec/nvdec_mpeg4.c
+++ b/libavcodec/nvdec_mpeg4.c
@@ -29,7 +29,7 @@ 
 static int nvdec_mpeg4_start_frame(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
 {
     Mpeg4DecContext *m = avctx->priv_data;
-    MpegEncContext *s = &m->m;
+    MPVEncContext *const s = &m->m;
 
     NVDECContext      *ctx = avctx->internal->hwaccel_priv_data;
     CUVIDPICPARAMS     *pp = &ctx->pic_params;
diff --git a/libavcodec/nvdec_vc1.c b/libavcodec/nvdec_vc1.c
index 10e7b5ab0d..9dc9f49b6a 100644
--- a/libavcodec/nvdec_vc1.c
+++ b/libavcodec/nvdec_vc1.c
@@ -28,7 +28,7 @@ 
 static int nvdec_vc1_start_frame(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
 {
     VC1Context *v = avctx->priv_data;
-    MpegEncContext *s = &v->s;
+    MPVDecContext *const s = &v->s;
 
     NVDECContext      *ctx = avctx->internal->hwaccel_priv_data;
     CUVIDPICPARAMS     *pp = &ctx->pic_params;
diff --git a/libavcodec/ppc/me_cmp.c b/libavcodec/ppc/me_cmp.c
index 17f9a4f016..31a1881f01 100644
--- a/libavcodec/ppc/me_cmp.c
+++ b/libavcodec/ppc/me_cmp.c
@@ -28,7 +28,7 @@ 
 #include "libavutil/ppc/util_altivec.h"
 
 #include "libavcodec/avcodec.h"
-#include "libavcodec/mpegvideo.h"
+#include "libavcodec/mpegvideoenc.h"
 #include "libavcodec/me_cmp.h"
 
 #if HAVE_ALTIVEC
@@ -51,7 +51,7 @@ 
     iv = vec_vsx_ld(1,  pix);\
 }
 #endif
-static int sad16_x2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+static int sad16_x2_altivec(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                             ptrdiff_t stride, int h)
 {
     int i;
@@ -91,7 +91,7 @@  static int sad16_x2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
     return s;
 }
 
-static int sad16_y2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+static int sad16_y2_altivec(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                             ptrdiff_t stride, int h)
 {
     int i;
@@ -141,7 +141,7 @@  static int sad16_y2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
     return s;
 }
 
-static int sad16_xy2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+static int sad16_xy2_altivec(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                              ptrdiff_t stride, int h)
 {
     int i;
@@ -230,7 +230,7 @@  static int sad16_xy2_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
     return s;
 }
 
-static int sad16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+static int sad16_altivec(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                          ptrdiff_t stride, int h)
 {
     int i;
@@ -265,7 +265,7 @@  static int sad16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
     return s;
 }
 
-static int sad8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+static int sad8_altivec(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                         ptrdiff_t stride, int h)
 {
     int i;
@@ -309,7 +309,7 @@  static int sad8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
 
 /* Sum of Squared Errors for an 8x8 block, AltiVec-enhanced.
  * It's the sad8_altivec code above w/ squaring added. */
-static int sse8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+static int sse8_altivec(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                         ptrdiff_t stride, int h)
 {
     int i;
@@ -354,7 +354,7 @@  static int sse8_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
 
 /* Sum of Squared Errors for a 16x16 block, AltiVec-enhanced.
  * It's the sad16_altivec code above w/ squaring added. */
-static int sse16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+static int sse16_altivec(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                          ptrdiff_t stride, int h)
 {
     int i;
@@ -392,7 +392,7 @@  static int sse16_altivec(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
     return s;
 }
 
-static int hadamard8_diff8x8_altivec(MpegEncContext *s, uint8_t *dst,
+static int hadamard8_diff8x8_altivec(MPVEncContext *s, uint8_t *dst,
                                      uint8_t *src, ptrdiff_t stride, int h)
 {
     int __attribute__((aligned(16))) sum;
@@ -518,7 +518,7 @@  static int hadamard8_diff8x8_altivec(MpegEncContext *s, uint8_t *dst,
  * On the 970, the hand-made RA is still a win (around 690 vs. around 780),
  * but xlc goes to around 660 on the regular C code...
  */
-static int hadamard8_diff16x8_altivec(MpegEncContext *s, uint8_t *dst,
+static int hadamard8_diff16x8_altivec(MPVEncContext *s, uint8_t *dst,
                                       uint8_t *src, ptrdiff_t stride, int h)
 {
     int __attribute__((aligned(16))) sum;
@@ -709,7 +709,7 @@  static int hadamard8_diff16x8_altivec(MpegEncContext *s, uint8_t *dst,
     return sum;
 }
 
-static int hadamard8_diff16_altivec(MpegEncContext *s, uint8_t *dst,
+static int hadamard8_diff16_altivec(MPVEncContext *s, uint8_t *dst,
                                     uint8_t *src, ptrdiff_t stride, int h)
 {
     int score = hadamard8_diff16x8_altivec(s, dst, src, stride, 8);
diff --git a/libavcodec/ppc/mpegvideo_altivec.c b/libavcodec/ppc/mpegvideo_altivec.c
index bcb59ba845..e8f8413a39 100644
--- a/libavcodec/ppc/mpegvideo_altivec.c
+++ b/libavcodec/ppc/mpegvideo_altivec.c
@@ -38,7 +38,7 @@ 
 
 /* AltiVec version of dct_unquantize_h263
    this code assumes `block' is 16 bytes-aligned */
-static void dct_unquantize_h263_altivec(MpegEncContext *s,
+static void dct_unquantize_h263_altivec(MPVContext *s,
                                  int16_t *block, int n, int qscale)
 {
     int i, level, qmul, qadd;
@@ -116,7 +116,7 @@  static void dct_unquantize_h263_altivec(MpegEncContext *s,
 
 #endif /* HAVE_ALTIVEC */
 
-av_cold void ff_mpv_common_init_ppc(MpegEncContext *s)
+av_cold void ff_mpv_common_init_ppc(MPVMainContext *s)
 {
 #if HAVE_ALTIVEC
     if (!PPC_ALTIVEC(av_get_cpu_flags()))
diff --git a/libavcodec/ratecontrol.c b/libavcodec/ratecontrol.c
index f814e23ed5..c5088d6790 100644
--- a/libavcodec/ratecontrol.c
+++ b/libavcodec/ratecontrol.c
@@ -35,7 +35,7 @@ 
 #include "mpegvideoenc.h"
 #include "libavutil/eval.h"
 
-void ff_write_pass1_stats(MpegEncContext *s)
+void ff_write_pass1_stats(MPVMainEncContext *s)
 {
     snprintf(s->avctx->stats_out, 256,
              "in:%d out:%d type:%d q:%d itex:%d ptex:%d mv:%d misc:%d "
@@ -77,7 +77,7 @@  static inline double bits2qp(RateControlEntry *rce, double bits)
     return rce->qscale * (double)(rce->i_tex_bits + rce->p_tex_bits + 1) / bits;
 }
 
-static double get_diff_limited_q(MpegEncContext *s, RateControlEntry *rce, double q)
+static double get_diff_limited_q(MPVMainEncContext *s, RateControlEntry *rce, double q)
 {
     RateControlContext *rcc   = &s->rc_context;
     AVCodecContext *a         = s->avctx;
@@ -116,7 +116,7 @@  static double get_diff_limited_q(MpegEncContext *s, RateControlEntry *rce, doubl
 /**
  * Get the qmin & qmax for pict_type.
  */
-static void get_qminmax(int *qmin_ret, int *qmax_ret, MpegEncContext *s, int pict_type)
+static void get_qminmax(int *qmin_ret, int *qmax_ret, MPVMainEncContext *s, int pict_type)
 {
     int qmin = s->lmin;
     int qmax = s->lmax;
@@ -144,7 +144,7 @@  static void get_qminmax(int *qmin_ret, int *qmax_ret, MpegEncContext *s, int pic
     *qmax_ret = qmax;
 }
 
-static double modify_qscale(MpegEncContext *s, RateControlEntry *rce,
+static double modify_qscale(MPVMainEncContext *s, RateControlEntry *rce,
                             double q, int frame_num)
 {
     RateControlContext *rcc  = &s->rc_context;
@@ -235,7 +235,7 @@  static double modify_qscale(MpegEncContext *s, RateControlEntry *rce,
 /**
  * Modify the bitrate curve from pass1 for one frame.
  */
-static double get_qscale(MpegEncContext *s, RateControlEntry *rce,
+static double get_qscale(MPVMainEncContext *s, RateControlEntry *rce,
                          double rate_factor, int frame_num)
 {
     RateControlContext *rcc = &s->rc_context;
@@ -308,7 +308,7 @@  static double get_qscale(MpegEncContext *s, RateControlEntry *rce,
     return q;
 }
 
-static int init_pass2(MpegEncContext *s)
+static int init_pass2(MPVMainEncContext *s)
 {
     RateControlContext *rcc = &s->rc_context;
     AVCodecContext *a       = s->avctx;
@@ -469,7 +469,7 @@  static int init_pass2(MpegEncContext *s)
     return 0;
 }
 
-av_cold int ff_rate_control_init(MpegEncContext *s)
+av_cold int ff_rate_control_init(MPVMainEncContext *s)
 {
     RateControlContext *rcc = &s->rc_context;
     int i, res;
@@ -669,7 +669,7 @@  av_cold int ff_rate_control_init(MpegEncContext *s)
     return 0;
 }
 
-av_cold void ff_rate_control_uninit(MpegEncContext *s)
+av_cold void ff_rate_control_uninit(MPVMainEncContext *s)
 {
     RateControlContext *rcc = &s->rc_context;
     emms_c();
@@ -678,7 +678,7 @@  av_cold void ff_rate_control_uninit(MpegEncContext *s)
     av_freep(&rcc->entry);
 }
 
-int ff_vbv_update(MpegEncContext *s, int frame_size)
+int ff_vbv_update(MPVMainEncContext *s, int frame_size)
 {
     RateControlContext *rcc = &s->rc_context;
     const double fps        = get_fps(s->avctx);
@@ -737,7 +737,7 @@  static void update_predictor(Predictor *p, double q, double var, double size)
     p->coeff += new_coeff;
 }
 
-static void adaptive_quantization(MpegEncContext *s, double q)
+static void adaptive_quantization(MPVMainEncContext *s, double q)
 {
     int i;
     const float lumi_masking         = s->avctx->lumi_masking / (128.0 * 128.0);
@@ -854,7 +854,7 @@  static void adaptive_quantization(MpegEncContext *s, double q)
     }
 }
 
-void ff_get_2pass_fcode(MpegEncContext *s)
+void ff_get_2pass_fcode(MPVMainEncContext *s)
 {
     RateControlContext *rcc = &s->rc_context;
     RateControlEntry *rce   = &rcc->entry[s->picture_number];
@@ -865,7 +865,7 @@  void ff_get_2pass_fcode(MpegEncContext *s)
 
 // FIXME rd or at least approx for dquant
 
-float ff_rate_estimate_qscale(MpegEncContext *s, int dry_run)
+float ff_rate_estimate_qscale(MPVMainEncContext *s, int dry_run)
 {
     float q;
     int qmin, qmax;
diff --git a/libavcodec/ratecontrol.h b/libavcodec/ratecontrol.h
index 2a7aaec644..99715ebdfb 100644
--- a/libavcodec/ratecontrol.h
+++ b/libavcodec/ratecontrol.h
@@ -86,14 +86,16 @@  typedef struct RateControlContext{
     AVExpr * rc_eq_eval;
 }RateControlContext;
 
-struct MpegEncContext;
+#define MPVMainEncContext MPVContext
+struct MPVMainEncContext;
 
 /* rate control */
-int ff_rate_control_init(struct MpegEncContext *s);
-float ff_rate_estimate_qscale(struct MpegEncContext *s, int dry_run);
-void ff_write_pass1_stats(struct MpegEncContext *s);
-void ff_rate_control_uninit(struct MpegEncContext *s);
-int ff_vbv_update(struct MpegEncContext *s, int frame_size);
-void ff_get_2pass_fcode(struct MpegEncContext *s);
+int ff_rate_control_init(struct MPVMainEncContext *s);
+float ff_rate_estimate_qscale(struct MPVMainEncContext *s, int dry_run);
+void ff_write_pass1_stats(struct MPVMainEncContext *s);
+void ff_rate_control_uninit(struct MPVMainEncContext *s);
+int ff_vbv_update(struct MPVMainEncContext *s, int frame_size);
+void ff_get_2pass_fcode(struct MPVMainEncContext *s);
+#undef MPVMainEncContext
 
 #endif /* AVCODEC_RATECONTROL_H */
diff --git a/libavcodec/rv10.c b/libavcodec/rv10.c
index 4dfaa3460d..185aacb5c7 100644
--- a/libavcodec/rv10.c
+++ b/libavcodec/rv10.c
@@ -51,7 +51,7 @@ 
 #define DC_VLC_BITS        9
 
 typedef struct RVDecContext {
-    MpegEncContext m;
+    MPVMainDecContext m;
     int sub_id;
     int orig_width, orig_height;
 } RVDecContext;
@@ -79,7 +79,7 @@  static const uint16_t rv_chrom_len_count[15] = {
 
 static VLC rv_dc_lum, rv_dc_chrom;
 
-int ff_rv_decode_dc(MpegEncContext *s, int n)
+int ff_rv_decode_dc(MPVDecContext *s, int n)
 {
     int code;
 
@@ -96,7 +96,7 @@  int ff_rv_decode_dc(MpegEncContext *s, int n)
 }
 
 /* read RV 1.0 compatible frame header */
-static int rv10_decode_picture_header(MpegEncContext *s)
+static int rv10_decode_picture_header(MPVDecContext *s)
 {
     int mb_count, pb_frame, marker, mb_xy;
 
@@ -156,7 +156,7 @@  static int rv10_decode_picture_header(MpegEncContext *s)
 
 static int rv20_decode_picture_header(RVDecContext *rv, int whole_size)
 {
-    MpegEncContext *s = &rv->m;
+    MPVMainDecContext *const s = &rv->m;
     int seq, mb_pos, i, ret;
     int rpr_max;
 
@@ -366,7 +366,7 @@  static av_cold int rv10_decode_init(AVCodecContext *avctx)
 {
     static AVOnce init_static_once = AV_ONCE_INIT;
     RVDecContext *rv = avctx->priv_data;
-    MpegEncContext *s = &rv->m;
+    MPVMainDecContext *const s = &rv->m;
     int major_ver, minor_ver, micro_ver, ret;
 
     if (avctx->extradata_size < 8) {
@@ -432,7 +432,7 @@  static av_cold int rv10_decode_init(AVCodecContext *avctx)
 
 static av_cold int rv10_decode_end(AVCodecContext *avctx)
 {
-    MpegEncContext *s = avctx->priv_data;
+    MPVMainDecContext *const s = avctx->priv_data;
 
     ff_mpv_common_end(s);
     return 0;
@@ -442,7 +442,7 @@  static int rv10_decode_packet(AVCodecContext *avctx, const uint8_t *buf,
                               int buf_size, int buf_size2, int whole_size)
 {
     RVDecContext *rv = avctx->priv_data;
-    MpegEncContext *s = &rv->m;
+    MPVDecContext *const s = &rv->m;
     int mb_count, mb_pos, left, start_mb_x, active_bits_size, ret;
 
     active_bits_size = buf_size * 8;
@@ -596,7 +596,7 @@  static int rv10_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
 {
     const uint8_t *buf = avpkt->data;
     int buf_size       = avpkt->size;
-    MpegEncContext *s = avctx->priv_data;
+    MPVMainDecContext *const s = avctx->priv_data;
     AVFrame *pict = data;
     int i, ret;
     int slice_count;
diff --git a/libavcodec/rv10.h b/libavcodec/rv10.h
index 364270e76a..bbb0b97c7c 100644
--- a/libavcodec/rv10.h
+++ b/libavcodec/rv10.h
@@ -24,10 +24,11 @@ 
 #include <stdint.h>
 
 #include "mpegvideo.h"
+#include "mpegvideoenc.h"
 
-int ff_rv_decode_dc(MpegEncContext *s, int n);
+int ff_rv_decode_dc(MPVDecContext *s, int n);
 
-int ff_rv10_encode_picture_header(MpegEncContext *s, int picture_number);
-void ff_rv20_encode_picture_header(MpegEncContext *s, int picture_number);
+int ff_rv10_encode_picture_header(MPVMainEncContext *s, int picture_number);
+void ff_rv20_encode_picture_header(MPVMainEncContext *s, int picture_number);
 
 #endif /* AVCODEC_RV10_H */
diff --git a/libavcodec/rv10enc.c b/libavcodec/rv10enc.c
index 0c74da1b3c..25f3179196 100644
--- a/libavcodec/rv10enc.c
+++ b/libavcodec/rv10enc.c
@@ -30,7 +30,7 @@ 
 #include "put_bits.h"
 #include "rv10.h"
 
-int ff_rv10_encode_picture_header(MpegEncContext *s, int picture_number)
+int ff_rv10_encode_picture_header(MPVMainEncContext *s, int picture_number)
 {
     int full_frame= 0;
 
@@ -70,7 +70,7 @@  const AVCodec ff_rv10_encoder = {
     .type           = AVMEDIA_TYPE_VIDEO,
     .id             = AV_CODEC_ID_RV10,
     .priv_class     = &ff_mpv_enc_class,
-    .priv_data_size = sizeof(MpegEncContext),
+    .priv_data_size = sizeof(MPVMainEncContext),
     .init           = ff_mpv_encode_init,
     .encode2        = ff_mpv_encode_picture,
     .close          = ff_mpv_encode_end,
diff --git a/libavcodec/rv20enc.c b/libavcodec/rv20enc.c
index 9f935d61e2..22bcad9837 100644
--- a/libavcodec/rv20enc.c
+++ b/libavcodec/rv20enc.c
@@ -33,7 +33,8 @@ 
 #include "put_bits.h"
 #include "rv10.h"
 
-void ff_rv20_encode_picture_header(MpegEncContext *s, int picture_number){
+void ff_rv20_encode_picture_header(MPVMainEncContext *s, int picture_number)
+{
     put_bits(&s->pb, 2, s->pict_type); //I 0 vs. 1 ?
     put_bits(&s->pb, 1, 0);     /* unknown bit */
     put_bits(&s->pb, 5, s->qscale);
@@ -67,7 +68,7 @@  const AVCodec ff_rv20_encoder = {
     .type           = AVMEDIA_TYPE_VIDEO,
     .id             = AV_CODEC_ID_RV20,
     .priv_class     = &ff_mpv_enc_class,
-    .priv_data_size = sizeof(MpegEncContext),
+    .priv_data_size = sizeof(MPVMainEncContext),
     .init           = ff_mpv_encode_init,
     .encode2        = ff_mpv_encode_picture,
     .close          = ff_mpv_encode_end,
diff --git a/libavcodec/rv30.c b/libavcodec/rv30.c
index d11b8430e7..5c828741aa 100644
--- a/libavcodec/rv30.c
+++ b/libavcodec/rv30.c
@@ -116,7 +116,7 @@  static int rv30_decode_mb_info(RV34DecContext *r)
 {
     static const int rv30_p_types[6] = { RV34_MB_SKIP, RV34_MB_P_16x16, RV34_MB_P_8x8, -1, RV34_MB_TYPE_INTRA, RV34_MB_TYPE_INTRA16x16 };
     static const int rv30_b_types[6] = { RV34_MB_SKIP, RV34_MB_B_DIRECT, RV34_MB_B_FORWARD, RV34_MB_B_BACKWARD, RV34_MB_TYPE_INTRA, RV34_MB_TYPE_INTRA16x16 };
-    MpegEncContext *s = &r->s;
+    MPVDecContext *const s = &r->s;
     GetBitContext *gb = &s->gb;
     unsigned code = get_interleaved_ue_golomb(gb);
 
@@ -151,7 +151,7 @@  static inline void rv30_weak_loop_filter(uint8_t *src, const int step,
 
 static void rv30_loop_filter(RV34DecContext *r, int row)
 {
-    MpegEncContext *s = &r->s;
+    MPVDecContext *const s = &r->s;
     int mb_pos, mb_x;
     int i, j, k;
     uint8_t *Y, *C;
diff --git a/libavcodec/rv34.c b/libavcodec/rv34.c
index e68072de06..8ef80d5b34 100644
--- a/libavcodec/rv34.c
+++ b/libavcodec/rv34.c
@@ -345,7 +345,7 @@  static inline RV34VLC* choose_vlc_set(int quant, int mod, int type)
  */
 static int rv34_decode_intra_mb_header(RV34DecContext *r, int8_t *intra_types)
 {
-    MpegEncContext *s = &r->s;
+    MPVDecContext *const s = &r->s;
     GetBitContext *gb = &s->gb;
     int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
     int t;
@@ -380,7 +380,7 @@  static int rv34_decode_intra_mb_header(RV34DecContext *r, int8_t *intra_types)
  */
 static int rv34_decode_inter_mb_header(RV34DecContext *r, int8_t *intra_types)
 {
-    MpegEncContext *s = &r->s;
+    MPVDecContext *const s = &r->s;
     GetBitContext *gb = &s->gb;
     int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
     int i, t;
@@ -458,7 +458,7 @@  static const uint8_t avail_indexes[4] = { 6, 7, 10, 11 };
  */
 static void rv34_pred_mv(RV34DecContext *r, int block_type, int subblock_no, int dmv_no)
 {
-    MpegEncContext *s = &r->s;
+    MPVDecContext *const s = &r->s;
     int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
     int A[2] = {0}, B[2], C[2];
     int i, j;
@@ -542,7 +542,7 @@  static inline void rv34_pred_b_vector(int A[2], int B[2], int C[2],
  */
 static void rv34_pred_mv_b(RV34DecContext *r, int block_type, int dir)
 {
-    MpegEncContext *s = &r->s;
+    MPVDecContext *const s = &r->s;
     int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
     int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
     int A[2] = { 0 }, B[2] = { 0 }, C[2] = { 0 };
@@ -594,7 +594,7 @@  static void rv34_pred_mv_b(RV34DecContext *r, int block_type, int dir)
  */
 static void rv34_pred_mv_rv3(RV34DecContext *r, int block_type, int dir)
 {
-    MpegEncContext *s = &r->s;
+    MPVDecContext *const s = &r->s;
     int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
     int A[2] = {0}, B[2], C[2];
     int i, j, k;
@@ -662,7 +662,7 @@  static inline void rv34_mc(RV34DecContext *r, const int block_type,
                           qpel_mc_func (*qpel_mc)[16],
                           h264_chroma_mc_func (*chroma_mc))
 {
-    MpegEncContext *s = &r->s;
+    MPVDecContext *const s = &r->s;
     uint8_t *Y, *U, *V, *srcY, *srcU, *srcV;
     int dxy, mx, my, umx, umy, lx, ly, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
     int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride + mv_off;
@@ -850,7 +850,7 @@  static const int num_mvs[RV34_MB_TYPES] = { 0, 0, 1, 4, 1, 1, 0, 0, 2, 2, 2, 1 }
  */
 static int rv34_decode_mv(RV34DecContext *r, int block_type)
 {
-    MpegEncContext *s = &r->s;
+    MPVDecContext *const s = &r->s;
     GetBitContext *gb = &s->gb;
     int i, j, k, l;
     int mv_pos = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
@@ -1009,7 +1009,7 @@  static inline void rv34_process_block(RV34DecContext *r,
                                       uint8_t *pdst, int stride,
                                       int fc, int sc, int q_dc, int q_ac)
 {
-    MpegEncContext *s = &r->s;
+    MPVDecContext *const s = &r->s;
     int16_t *ptr = s->block[0];
     int has_ac = rv34_decode_block(ptr, &s->gb, r->cur_vlcs,
                                    fc, sc, q_dc, q_ac, q_ac);
@@ -1024,7 +1024,7 @@  static inline void rv34_process_block(RV34DecContext *r,
 static void rv34_output_i16x16(RV34DecContext *r, int8_t *intra_types, int cbp)
 {
     LOCAL_ALIGNED_16(int16_t, block16, [16]);
-    MpegEncContext *s    = &r->s;
+    MPVDecContext *const s    = &r->s;
     GetBitContext  *gb   = &s->gb;
     int             q_dc = rv34_qscale_tab[ r->luma_dc_quant_i[s->qscale] ],
                     q_ac = rv34_qscale_tab[s->qscale];
@@ -1086,7 +1086,7 @@  static void rv34_output_i16x16(RV34DecContext *r, int8_t *intra_types, int cbp)
 
 static void rv34_output_intra(RV34DecContext *r, int8_t *intra_types, int cbp)
 {
-    MpegEncContext *s   = &r->s;
+    MPVDecContext *const s   = &r->s;
     uint8_t        *dst = s->dest[0];
     int      avail[6*8] = {0};
     int i, j, k;
@@ -1162,7 +1162,7 @@  static int is_mv_diff_gt_3(int16_t (*motion_val)[2], int step)
 
 static int rv34_set_deblock_coef(RV34DecContext *r)
 {
-    MpegEncContext *s = &r->s;
+    MPVDecContext *const s = &r->s;
     int hmvmask = 0, vmvmask = 0, i, j;
     int midx = s->mb_x * 2 + s->mb_y * 2 * s->b8_stride;
     int16_t (*motion_val)[2] = &s->current_picture_ptr->motion_val[0][midx];
@@ -1192,7 +1192,7 @@  static int rv34_set_deblock_coef(RV34DecContext *r)
 
 static int rv34_decode_inter_macroblock(RV34DecContext *r, int8_t *intra_types)
 {
-    MpegEncContext *s   = &r->s;
+    MPVDecContext *const s   = &r->s;
     GetBitContext  *gb  = &s->gb;
     uint8_t        *dst = s->dest[0];
     int16_t        *ptr = s->block[0];
@@ -1300,7 +1300,7 @@  static int rv34_decode_inter_macroblock(RV34DecContext *r, int8_t *intra_types)
 
 static int rv34_decode_intra_macroblock(RV34DecContext *r, int8_t *intra_types)
 {
-    MpegEncContext *s = &r->s;
+    MPVDecContext *const s = &r->s;
     int cbp, dist;
     int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
 
@@ -1338,7 +1338,7 @@  static int rv34_decode_intra_macroblock(RV34DecContext *r, int8_t *intra_types)
     return 0;
 }
 
-static int check_slice_end(RV34DecContext *r, MpegEncContext *s)
+static int check_slice_end(RV34DecContext *r, MPVDecContext *s)
 {
     int bits;
     if(s->mb_y >= s->mb_height)
@@ -1403,7 +1403,7 @@  static int rv34_decoder_realloc(RV34DecContext *r)
 
 static int rv34_decode_slice(RV34DecContext *r, int end, const uint8_t* buf, int buf_size)
 {
-    MpegEncContext *s = &r->s;
+    MPVDecContext *const s = &r->s;
     GetBitContext *gb = &s->gb;
     int mb_pos, slice_type;
     int res;
@@ -1487,7 +1487,7 @@  av_cold int ff_rv34_decode_init(AVCodecContext *avctx)
 {
     static AVOnce init_static_once = AV_ONCE_INIT;
     RV34DecContext *r = avctx->priv_data;
-    MpegEncContext *s = &r->s;
+    MPVMainDecContext *const s = &r->s;
     int ret;
 
     ff_mpv_decode_init(s, avctx);
@@ -1516,7 +1516,7 @@  av_cold int ff_rv34_decode_init(AVCodecContext *avctx)
 int ff_rv34_decode_update_thread_context(AVCodecContext *dst, const AVCodecContext *src)
 {
     RV34DecContext *r = dst->priv_data, *r1 = src->priv_data;
-    MpegEncContext * const s = &r->s, * const s1 = &r1->s;
+    MPVMainDecContext *const s = &r->s, *const s1 = &r1->s;
     int err;
 
     if (dst == src || !s1->context_initialized)
@@ -1557,7 +1557,7 @@  static int get_slice_offset(AVCodecContext *avctx, const uint8_t *buf, int n, in
 static int finish_frame(AVCodecContext *avctx, AVFrame *pict)
 {
     RV34DecContext *r = avctx->priv_data;
-    MpegEncContext *s = &r->s;
+    MPVMainDecContext *const s = &r->s;
     int got_picture = 0, ret;
 
     ff_er_frame_end(&s->er);
@@ -1601,7 +1601,7 @@  int ff_rv34_decode_frame(AVCodecContext *avctx,
     const uint8_t *buf = avpkt->data;
     int buf_size = avpkt->size;
     RV34DecContext *r = avctx->priv_data;
-    MpegEncContext *s = &r->s;
+    MPVMainDecContext *const s = &r->s;
     AVFrame *pict = data;
     SliceInfo si;
     int i, ret;
diff --git a/libavcodec/rv34.h b/libavcodec/rv34.h
index 53b29dc245..32ec738597 100644
--- a/libavcodec/rv34.h
+++ b/libavcodec/rv34.h
@@ -84,7 +84,7 @@  typedef struct SliceInfo{
 
 /** decoder context */
 typedef struct RV34DecContext{
-    MpegEncContext s;
+    MPVMainDecContext s;
     RV34DSPContext rdsp;
     int8_t *intra_types_hist;///< old block types, used for prediction
     int8_t *intra_types;     ///< block types
diff --git a/libavcodec/rv40.c b/libavcodec/rv40.c
index 0b3f733bc6..56ad4f45e7 100644
--- a/libavcodec/rv40.c
+++ b/libavcodec/rv40.c
@@ -168,7 +168,7 @@  static int rv40_parse_slice_header(RV34DecContext *r, GetBitContext *gb, SliceIn
  */
 static int rv40_decode_intra_types(RV34DecContext *r, GetBitContext *gb, int8_t *dst)
 {
-    MpegEncContext *s = &r->s;
+    MPVDecContext *const s = &r->s;
     int i, j, k, v;
     int A, B, C;
     int pattern;
@@ -230,7 +230,7 @@  static int rv40_decode_intra_types(RV34DecContext *r, GetBitContext *gb, int8_t
  */
 static int rv40_decode_mb_info(RV34DecContext *r)
 {
-    MpegEncContext *s = &r->s;
+    MPVDecContext *const s = &r->s;
     GetBitContext *gb = &s->gb;
     int q, i;
     int prev_type = 0;
@@ -340,7 +340,7 @@  static void rv40_adaptive_loop_filter(RV34DSPContext *rdsp,
  */
 static void rv40_loop_filter(RV34DecContext *r, int row)
 {
-    MpegEncContext *s = &r->s;
+    MPVDecContext *const s = &r->s;
     int mb_pos, mb_x;
     int i, j, k;
     uint8_t *Y, *C;
diff --git a/libavcodec/snow.h b/libavcodec/snow.h
index f5beca66e9..2e09025277 100644
--- a/libavcodec/snow.h
+++ b/libavcodec/snow.h
@@ -32,7 +32,7 @@ 
 #include "rangecoder.h"
 #include "mathops.h"
 
-#include "mpegvideo.h"
+#include "mpegvideoenc.h"
 #include "h264qpel.h"
 
 #define SNOW_MAX_PLANES 4
@@ -182,7 +182,7 @@  typedef struct SnowContext{
     int iterative_dia_size;
     int scenechange_threshold;
 
-    MpegEncContext m; // needed for motion estimation, should not be used for anything else, the idea is to eventually make the motion estimation independent of MpegEncContext, so this will be removed then (FIXME/XXX)
+    MPVMainEncContext m; // needed for motion estimation, should not be used for anything else, the idea is to eventually make the motion estimation independent of mpegvideo, so this will be removed then (FIXME/XXX)
 
     uint8_t *scratchbuf;
     uint8_t *emu_edge_buffer;
diff --git a/libavcodec/snow_dwt.c b/libavcodec/snow_dwt.c
index c093ebff07..8d717b6bf8 100644
--- a/libavcodec/snow_dwt.c
+++ b/libavcodec/snow_dwt.c
@@ -25,6 +25,8 @@ 
 #include "me_cmp.h"
 #include "snow_dwt.h"
 
+#define MPVEncContext MPVContext
+
 int ff_slice_buffer_init(slice_buffer *buf, int line_count,
                          int max_allocated_lines, int line_width,
                          IDWTELEM *base_buffer)
@@ -740,7 +742,7 @@  void ff_spatial_idwt(IDWTELEM *buffer, IDWTELEM *temp, int width, int height,
                               decomposition_count, y);
 }
 
-static inline int w_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t line_size,
+static inline int w_c(struct MPVEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t line_size,
                       int w, int h, int type)
 {
     int s, i, j;
@@ -809,32 +811,32 @@  static inline int w_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, pt
     return s >> 9;
 }
 
-static int w53_8_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t line_size, int h)
+static int w53_8_c(struct MPVEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t line_size, int h)
 {
     return w_c(v, pix1, pix2, line_size, 8, h, 1);
 }
 
-static int w97_8_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t line_size, int h)
+static int w97_8_c(struct MPVEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t line_size, int h)
 {
     return w_c(v, pix1, pix2, line_size, 8, h, 0);
 }
 
-static int w53_16_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t line_size, int h)
+static int w53_16_c(struct MPVEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t line_size, int h)
 {
     return w_c(v, pix1, pix2, line_size, 16, h, 1);
 }
 
-static int w97_16_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t line_size, int h)
+static int w97_16_c(struct MPVEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t line_size, int h)
 {
     return w_c(v, pix1, pix2, line_size, 16, h, 0);
 }
 
-int ff_w53_32_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t line_size, int h)
+int ff_w53_32_c(struct MPVEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t line_size, int h)
 {
     return w_c(v, pix1, pix2, line_size, 32, h, 1);
 }
 
-int ff_w97_32_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t line_size, int h)
+int ff_w97_32_c(struct MPVEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t line_size, int h)
 {
     return w_c(v, pix1, pix2, line_size, 32, h, 0);
 }
diff --git a/libavcodec/snow_dwt.h b/libavcodec/snow_dwt.h
index 390bc57130..ef6690b0ca 100644
--- a/libavcodec/snow_dwt.h
+++ b/libavcodec/snow_dwt.h
@@ -24,7 +24,8 @@ 
 #include <stddef.h>
 #include <stdint.h>
 
-struct MpegEncContext;
+#define MPVEncContext MPVContext
+struct MPVEncContext;
 
 typedef int DWTELEM;
 typedef short IDWTELEM;
@@ -104,8 +105,8 @@  void ff_snow_inner_add_yblock(const uint8_t *obmc, const int obmc_stride,
                               int src_y, int src_stride, slice_buffer *sb,
                               int add, uint8_t *dst8);
 
-int ff_w53_32_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t line_size, int h);
-int ff_w97_32_c(struct MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t line_size, int h);
+int ff_w53_32_c(struct MPVContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t line_size, int h);
+int ff_w97_32_c(struct MPVContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t line_size, int h);
 
 void ff_spatial_dwt(int *buffer, int *temp, int width, int height, int stride,
                     int type, int decomposition_count);
@@ -123,4 +124,6 @@  void ff_spatial_idwt(IDWTELEM *buffer, IDWTELEM *temp, int width, int height,
 void ff_dwt_init(SnowDWTContext *c);
 void ff_dwt_init_x86(SnowDWTContext *c);
 
+#undef MPVEncContext
+
 #endif /* AVCODEC_DWT_H */
diff --git a/libavcodec/speedhqenc.c b/libavcodec/speedhqenc.c
index ba3b1d9393..fb4d426b46 100644
--- a/libavcodec/speedhqenc.c
+++ b/libavcodec/speedhqenc.c
@@ -56,7 +56,7 @@  static uint32_t speedhq_chr_dc_uni[512];
 static uint8_t uni_speedhq_ac_vlc_len[64 * 64 * 2];
 
 typedef struct SpeedHQEncContext {
-    MpegEncContext m;
+    MPVMainEncContext m;
 
     int slice_start;
 } SpeedHQEncContext;
@@ -90,7 +90,7 @@  static av_cold void speedhq_init_static_data(void)
     ff_mpeg1_init_uni_ac_vlc(&ff_rl_speedhq, uni_speedhq_ac_vlc_len);
 }
 
-av_cold int ff_speedhq_encode_init(MpegEncContext *s)
+av_cold int ff_speedhq_encode_init(MPVMainEncContext *s)
 {
     static AVOnce init_static_once = AV_ONCE_INIT;
 
@@ -128,7 +128,7 @@  av_cold int ff_speedhq_encode_init(MpegEncContext *s)
     return 0;
 }
 
-void ff_speedhq_encode_picture_header(MpegEncContext *s)
+void ff_speedhq_encode_picture_header(MPVMainEncContext *s)
 {
     SpeedHQEncContext *ctx = (SpeedHQEncContext*)s;
 
@@ -140,7 +140,7 @@  void ff_speedhq_encode_picture_header(MpegEncContext *s)
     put_bits_le(&s->pb, 24, 0);
 }
 
-void ff_speedhq_end_slice(MpegEncContext *s)
+void ff_speedhq_end_slice(MPVEncContext *s)
 {
     SpeedHQEncContext *ctx = (SpeedHQEncContext*)s;
     int slice_len;
@@ -188,7 +188,7 @@  static inline void encode_dc(PutBitContext *pb, int diff, int component)
     }
 }
 
-static void encode_block(MpegEncContext *s, int16_t *block, int n)
+static void encode_block(MPVEncContext *s, int16_t *block, int n)
 {
     int alevel, level, last_non_zero, dc, i, j, run, last_index, sign;
     int code;
@@ -237,7 +237,7 @@  static void encode_block(MpegEncContext *s, int16_t *block, int n)
     put_bits_le(&s->pb, 4, 6);
 }
 
-void ff_speedhq_encode_mb(MpegEncContext *s, int16_t block[12][64])
+void ff_speedhq_encode_mb(MPVEncContext *s, int16_t block[12][64])
 {
     int i;
     for(i=0;i<6;i++) {
diff --git a/libavcodec/speedhqenc.h b/libavcodec/speedhqenc.h
index 5100bb2d34..5ca8868f28 100644
--- a/libavcodec/speedhqenc.h
+++ b/libavcodec/speedhqenc.h
@@ -33,15 +33,15 @@ 
 
 #include "mjpeg.h"
 #include "mjpegenc_common.h"
-#include "mpegvideo.h"
+#include "mpegvideoenc.h"
 #include "put_bits.h"
 
-int  ff_speedhq_encode_init(MpegEncContext *s);
-void ff_speedhq_encode_close(MpegEncContext *s);
-void ff_speedhq_encode_mb(MpegEncContext *s, int16_t block[12][64]);
+int  ff_speedhq_encode_init(MPVMainEncContext *s);
+void ff_speedhq_encode_close(MPVMainEncContext *s);
+void ff_speedhq_encode_mb(MPVEncContext *s, int16_t block[12][64]);
 
-void ff_speedhq_encode_picture_header(MpegEncContext *s);
-void ff_speedhq_end_slice(MpegEncContext *s);
+void ff_speedhq_encode_picture_header(MPVMainEncContext *s);
+void ff_speedhq_end_slice(MPVEncContext *s);
 
 int ff_speedhq_mb_y_order_to_mb(int mb_y_order, int mb_height, int *first_in_slice);
 
diff --git a/libavcodec/svq1enc.c b/libavcodec/svq1enc.c
index 9cef3d8ef6..7af82bb3ac 100644
--- a/libavcodec/svq1enc.c
+++ b/libavcodec/svq1enc.c
@@ -237,7 +237,8 @@  static int encode_block(SVQ1EncContext *s, uint8_t *src, uint8_t *ref,
     return best_score;
 }
 
-static void init_block_index(MpegEncContext *s){
+static void init_block_index(MPVEncContext *s)
+{
     s->block_index[0]= s->b8_stride*(s->mb_y*2    )     + s->mb_x*2;
     s->block_index[1]= s->b8_stride*(s->mb_y*2    ) + 1 + s->mb_x*2;
     s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1)     + s->mb_x*2;
diff --git a/libavcodec/svq1enc.h b/libavcodec/svq1enc.h
index bb6af082d5..ee4a0b673c 100644
--- a/libavcodec/svq1enc.h
+++ b/libavcodec/svq1enc.h
@@ -29,14 +29,14 @@ 
 #include "avcodec.h"
 #include "hpeldsp.h"
 #include "me_cmp.h"
-#include "mpegvideo.h"
+#include "mpegvideoenc.h"
 #include "put_bits.h"
 
 typedef struct SVQ1EncContext {
     /* FIXME: Needed for motion estimation, should not be used for anything
      * else, the idea is to make the motion estimation eventually independent
-     * of MpegEncContext, so this will be removed then. */
-    MpegEncContext m;
+     * of MPVMainEncContext, so this will be removed then. */
+    MPVMainEncContext m;
     AVCodecContext *avctx;
     MECmpContext mecc;
     HpelDSPContext hdsp;
diff --git a/libavcodec/vaapi_mpeg2.c b/libavcodec/vaapi_mpeg2.c
index 26e0cd827c..057afdd878 100644
--- a/libavcodec/vaapi_mpeg2.c
+++ b/libavcodec/vaapi_mpeg2.c
@@ -27,21 +27,21 @@ 
 #include "vaapi_decode.h"
 
 /** Reconstruct bitstream f_code */
-static inline int mpeg2_get_f_code(const MpegEncContext *s)
+static inline int mpeg2_get_f_code(const MPVDecContext *s)
 {
     return (s->mpeg_f_code[0][0] << 12) | (s->mpeg_f_code[0][1] << 8) |
            (s->mpeg_f_code[1][0] <<  4) |  s->mpeg_f_code[1][1];
 }
 
 /** Determine frame start: first field for field picture or frame picture */
-static inline int mpeg2_get_is_frame_start(const MpegEncContext *s)
+static inline int mpeg2_get_is_frame_start(const MPVDecContext *s)
 {
     return s->first_field || s->picture_structure == PICT_FRAME;
 }
 
 static int vaapi_mpeg2_start_frame(AVCodecContext *avctx, av_unused const uint8_t *buffer, av_unused uint32_t size)
 {
-    const MpegEncContext *s = avctx->priv_data;
+    const MPVDecContext *const s = avctx->priv_data;
     VAAPIDecodePicture *pic = s->current_picture_ptr->hwaccel_picture_private;
     VAPictureParameterBufferMPEG2 pic_param;
     VAIQMatrixBufferMPEG2 iq_matrix;
@@ -114,7 +114,7 @@  fail:
 
 static int vaapi_mpeg2_end_frame(AVCodecContext *avctx)
 {
-    MpegEncContext     *s   = avctx->priv_data;
+    MPVDecContext  *const s = avctx->priv_data;
     VAAPIDecodePicture *pic = s->current_picture_ptr->hwaccel_picture_private;
     int ret;
 
@@ -130,7 +130,7 @@  fail:
 
 static int vaapi_mpeg2_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
 {
-    const MpegEncContext *s = avctx->priv_data;
+    const MPVDecContext *const s = avctx->priv_data;
     VAAPIDecodePicture *pic = s->current_picture_ptr->hwaccel_picture_private;
     VASliceParameterBufferMPEG2 slice_param;
     GetBitContext gb;
diff --git a/libavcodec/vaapi_mpeg4.c b/libavcodec/vaapi_mpeg4.c
index 71e155154c..090902e29d 100644
--- a/libavcodec/vaapi_mpeg4.c
+++ b/libavcodec/vaapi_mpeg4.c
@@ -46,7 +46,7 @@  static int mpeg4_get_intra_dc_vlc_thr(Mpeg4DecContext *s)
 static int vaapi_mpeg4_start_frame(AVCodecContext *avctx, av_unused const uint8_t *buffer, av_unused uint32_t size)
 {
     Mpeg4DecContext *ctx = avctx->priv_data;
-    MpegEncContext *s = &ctx->m;
+    MPVDecContext *const s = &ctx->m;
     VAAPIDecodePicture *pic = s->current_picture_ptr->hwaccel_picture_private;
     VAPictureParameterBufferMPEG4 pic_param;
     int i, err;
@@ -136,7 +136,7 @@  fail:
 
 static int vaapi_mpeg4_end_frame(AVCodecContext *avctx)
 {
-    MpegEncContext *s = avctx->priv_data;
+    MPVDecContext *const s = avctx->priv_data;
     VAAPIDecodePicture *pic = s->current_picture_ptr->hwaccel_picture_private;
     int ret;
 
@@ -152,7 +152,7 @@  fail:
 
 static int vaapi_mpeg4_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
 {
-    MpegEncContext *s = avctx->priv_data;
+    MPVDecContext *const s = avctx->priv_data;
     VAAPIDecodePicture *pic = s->current_picture_ptr->hwaccel_picture_private;
     VASliceParameterBufferMPEG4 slice_param;
     int err;
diff --git a/libavcodec/vaapi_vc1.c b/libavcodec/vaapi_vc1.c
index 4e9607d9be..3d0903333a 100644
--- a/libavcodec/vaapi_vc1.c
+++ b/libavcodec/vaapi_vc1.c
@@ -114,7 +114,7 @@  static inline int vc1_has_FORWARDMB_bitplane(const VC1Context *v)
 /** Reconstruct bitstream PTYPE (7.1.1.4, index into Table-35) */
 static int vc1_get_PTYPE(const VC1Context *v)
 {
-    const MpegEncContext *s = &v->s;
+    const MPVDecContext *const s = &v->s;
     switch (s->pict_type) {
     case AV_PICTURE_TYPE_I: return 0;
     case AV_PICTURE_TYPE_P: return v->p_frame_skipped ? 4 : 1;
@@ -126,7 +126,7 @@  static int vc1_get_PTYPE(const VC1Context *v)
 /** Reconstruct bitstream FPTYPE (9.1.1.42, index into Table-105) */
 static int vc1_get_FPTYPE(const VC1Context *v)
 {
-    const MpegEncContext *s = &v->s;
+    const MPVDecContext *const s = &v->s;
     switch (s->pict_type) {
     case AV_PICTURE_TYPE_I: return 0;
     case AV_PICTURE_TYPE_P: return 3;
@@ -250,7 +250,7 @@  static inline void vc1_pack_bitplanes(uint8_t *bitplane, int n, const uint8_t *f
 static int vaapi_vc1_start_frame(AVCodecContext *avctx, av_unused const uint8_t *buffer, av_unused uint32_t size)
 {
     const VC1Context *v = avctx->priv_data;
-    const MpegEncContext *s = &v->s;
+    const MPVDecContext *const s = &v->s;
     VAAPIDecodePicture *pic = s->current_picture_ptr->hwaccel_picture_private;
     VAPictureParameterBufferVC1 pic_param;
     int err;
@@ -447,7 +447,7 @@  fail:
 static int vaapi_vc1_end_frame(AVCodecContext *avctx)
 {
     VC1Context *v = avctx->priv_data;
-    MpegEncContext *s = &v->s;
+    MPVDecContext *const s = &v->s;
     VAAPIDecodePicture *pic = s->current_picture_ptr->hwaccel_picture_private;
     int ret;
 
@@ -464,7 +464,7 @@  fail:
 static int vaapi_vc1_decode_slice(AVCodecContext *avctx, const uint8_t *buffer, uint32_t size)
 {
     const VC1Context *v = avctx->priv_data;
-    const MpegEncContext *s = &v->s;
+    const MPVDecContext *const s = &v->s;
     VAAPIDecodePicture *pic = s->current_picture_ptr->hwaccel_picture_private;
     VASliceParameterBufferVC1 slice_param;
     int mb_height;
diff --git a/libavcodec/vc1.h b/libavcodec/vc1.h
index 9b25f0872f..25a043af56 100644
--- a/libavcodec/vc1.h
+++ b/libavcodec/vc1.h
@@ -171,7 +171,7 @@  enum Imode {
  * Many members are only used for Advanced Profile
  */
 typedef struct VC1Context{
-    MpegEncContext s;
+    MPVMainDecContext s;
     IntraX8Context x8;
     H264ChromaContext h264chroma;
     VC1DSPContext vc1dsp;
diff --git a/libavcodec/vc1_block.c b/libavcodec/vc1_block.c
index c922efe4c8..61dc4a5a20 100644
--- a/libavcodec/vc1_block.c
+++ b/libavcodec/vc1_block.c
@@ -58,7 +58,7 @@  static const int block_map[6] = {0, 2, 1, 3, 4, 5};
 
 static inline void init_block_index(VC1Context *v)
 {
-    MpegEncContext *s = &v->s;
+    MPVDecContext *const s = &v->s;
     ff_init_block_index(s);
     if (v->field_mode && !(v->second_field ^ v->tff)) {
         s->dest[0] += s->current_picture_ptr->f->linesize[0];
@@ -71,7 +71,7 @@  static inline void init_block_index(VC1Context *v)
 
 static void vc1_put_blocks_clamped(VC1Context *v, int put_signed)
 {
-    MpegEncContext *s = &v->s;
+    MPVDecContext *const s = &v->s;
     uint8_t *dest;
     int block_count = CONFIG_GRAY && (s->avctx->flags & AV_CODEC_FLAG_GRAY) ? 4 : 6;
     int fieldtx = 0;
@@ -329,14 +329,14 @@  static inline void vc1_b_mc(VC1Context *v, int dmv_x[2], int dmv_y[2],
 
 /** Get predicted DC value for I-frames only
  * prediction dir: left=0, top=1
- * @param s MpegEncContext
+ * @param s MPVDecContext
  * @param overlap flag indicating that overlap filtering is used
  * @param pq integer part of picture quantizer
  * @param[in] n block index in the current MB
  * @param dc_val_ptr Pointer to DC predictor
  * @param dir_ptr Prediction direction for use in AC prediction
  */
-static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
+static inline int vc1_i_pred_dc(MPVDecContext *s, int overlap, int pq, int n,
                                 int16_t **dc_val_ptr, int *dir_ptr)
 {
     int a, b, c, wrap, pred, scale;
@@ -392,7 +392,7 @@  static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
 
 /** Get predicted DC value
  * prediction dir: left=0, top=1
- * @param s MpegEncContext
+ * @param s MPVDecContext
  * @param overlap flag indicating that overlap filtering is used
  * @param pq integer part of picture quantizer
  * @param[in] n block index in the current MB
@@ -401,7 +401,7 @@  static inline int vc1_i_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
  * @param dc_val_ptr Pointer to DC predictor
  * @param dir_ptr Prediction direction for use in AC prediction
  */
-static inline int ff_vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
+static inline int ff_vc1_pred_dc(MPVDecContext *s, int overlap, int pq, int n,
                               int a_avail, int c_avail,
                               int16_t **dc_val_ptr, int *dir_ptr)
 {
@@ -472,7 +472,7 @@  static inline int ff_vc1_pred_dc(MpegEncContext *s, int overlap, int pq, int n,
  * @{
  */
 
-static inline int vc1_coded_block_pred(MpegEncContext * s, int n,
+static inline int vc1_coded_block_pred(MPVDecContext * s, int n,
                                        uint8_t **coded_block_ptr)
 {
     int xy, wrap, pred, a, b, c;
@@ -578,8 +578,8 @@  static int vc1_decode_ac_coeff(VC1Context *v, int *last, int *skip,
 static int vc1_decode_i_block(VC1Context *v, int16_t block[64], int n,
                               int coded, int codingset)
 {
+    MPVDecContext *const s = &v->s;
     GetBitContext *gb = &v->s.gb;
-    MpegEncContext *s = &v->s;
     int dc_pred_dir = 0; /* Direction of the DC prediction used */
     int i;
     int16_t *dc_val;
@@ -716,8 +716,8 @@  static int vc1_decode_i_block(VC1Context *v, int16_t block[64], int n,
 static int vc1_decode_i_block_adv(VC1Context *v, int16_t block[64], int n,
                                   int coded, int codingset, int mquant)
 {
+    MPVDecContext *const s = &v->s;
     GetBitContext *gb = &v->s.gb;
-    MpegEncContext *s = &v->s;
     int dc_pred_dir = 0; /* Direction of the DC prediction used */
     int i;
     int16_t *dc_val = NULL;
@@ -907,8 +907,8 @@  static int vc1_decode_i_block_adv(VC1Context *v, int16_t block[64], int n,
 static int vc1_decode_intra_block(VC1Context *v, int16_t block[64], int n,
                                   int coded, int mquant, int codingset)
 {
+    MPVDecContext *const s = &v->s;
     GetBitContext *gb = &v->s.gb;
-    MpegEncContext *s = &v->s;
     int dc_pred_dir = 0; /* Direction of the DC prediction used */
     int i;
     int16_t *dc_val = NULL;
@@ -1116,7 +1116,7 @@  static int vc1_decode_p_block(VC1Context *v, int16_t block[64], int n,
                               uint8_t *dst, int linesize, int skip_block,
                               int *ttmb_out)
 {
-    MpegEncContext *s = &v->s;
+    MPVDecContext *const s = &v->s;
     GetBitContext *gb = &s->gb;
     int i, j;
     int subblkpat = 0;
@@ -1285,7 +1285,7 @@  static const uint8_t size_table[6] = { 0, 2, 3, 4,  5,  8 };
  */
 static int vc1_decode_p_mb(VC1Context *v)
 {
-    MpegEncContext *s = &v->s;
+    MPVDecContext *const s = &v->s;
     GetBitContext *gb = &s->gb;
     int i, j;
     int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
@@ -1517,7 +1517,7 @@  end:
 
 static int vc1_decode_p_mb_intfr(VC1Context *v)
 {
-    MpegEncContext *s = &v->s;
+    MPVDecContext *const s = &v->s;
     GetBitContext *gb = &s->gb;
     int i;
     int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
@@ -1727,7 +1727,7 @@  static int vc1_decode_p_mb_intfr(VC1Context *v)
 
 static int vc1_decode_p_mb_intfi(VC1Context *v)
 {
-    MpegEncContext *s = &v->s;
+    MPVDecContext *const s = &v->s;
     GetBitContext *gb = &s->gb;
     int i;
     int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
@@ -1851,7 +1851,7 @@  static int vc1_decode_p_mb_intfi(VC1Context *v)
  */
 static int vc1_decode_b_mb(VC1Context *v)
 {
-    MpegEncContext *s = &v->s;
+    MPVDecContext *const s = &v->s;
     GetBitContext *gb = &s->gb;
     int i, j;
     int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
@@ -2009,7 +2009,7 @@  static int vc1_decode_b_mb(VC1Context *v)
  */
 static int vc1_decode_b_mb_intfi(VC1Context *v)
 {
-    MpegEncContext *s = &v->s;
+    MPVDecContext *const s = &v->s;
     GetBitContext *gb = &s->gb;
     int i, j;
     int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
@@ -2176,7 +2176,7 @@  static int vc1_decode_b_mb_intfi(VC1Context *v)
  */
 static int vc1_decode_b_mb_intfr(VC1Context *v)
 {
-    MpegEncContext *s = &v->s;
+    MPVDecContext *const s = &v->s;
     GetBitContext *gb = &s->gb;
     int i, j;
     int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
@@ -2526,8 +2526,8 @@  static int vc1_decode_b_mb_intfr(VC1Context *v)
  */
 static void vc1_decode_i_blocks(VC1Context *v)
 {
+    MPVDecContext *const s = &v->s;
     int k, j;
-    MpegEncContext *s = &v->s;
     int cbp, val;
     uint8_t *coded_val;
     int mb_pos;
@@ -2651,8 +2651,8 @@  static void vc1_decode_i_blocks(VC1Context *v)
  */
 static int vc1_decode_i_blocks_adv(VC1Context *v)
 {
+    MPVDecContext *const s = &v->s;
     int k;
-    MpegEncContext *s = &v->s;
     int cbp, val;
     uint8_t *coded_val;
     int mb_pos;
@@ -2794,7 +2794,7 @@  static int vc1_decode_i_blocks_adv(VC1Context *v)
 
 static void vc1_decode_p_blocks(VC1Context *v)
 {
-    MpegEncContext *s = &v->s;
+    MPVDecContext *const s = &v->s;
     int apply_loop_filter;
 
     /* select coding mode used for VLC tables selection */
@@ -2886,7 +2886,7 @@  static void vc1_decode_p_blocks(VC1Context *v)
 
 static void vc1_decode_b_blocks(VC1Context *v)
 {
-    MpegEncContext *s = &v->s;
+    MPVDecContext *const s = &v->s;
 
     /* select coding mode used for VLC tables selection */
     switch (v->c_ac_table_index) {
@@ -2970,7 +2970,7 @@  static void vc1_decode_b_blocks(VC1Context *v)
 
 static void vc1_decode_skip_blocks(VC1Context *v)
 {
-    MpegEncContext *s = &v->s;
+    MPVDecContext *const s = &v->s;
 
     if (!v->s.last_picture.f->data[0])
         return;
diff --git a/libavcodec/vc1_loopfilter.c b/libavcodec/vc1_loopfilter.c
index 0f990cccef..e5819b1042 100644
--- a/libavcodec/vc1_loopfilter.c
+++ b/libavcodec/vc1_loopfilter.c
@@ -104,7 +104,7 @@  static av_always_inline void vc1_v_overlap_filter(VC1Context *v, int16_t (*top_b
 
 void ff_vc1_i_overlap_filter(VC1Context *v)
 {
-    MpegEncContext *s = &v->s;
+    MPVDecContext *const s = &v->s;
     int16_t (*topleft_blk)[64], (*top_blk)[64], (*left_blk)[64], (*cur_blk)[64];
     int block_count = CONFIG_GRAY && (s->avctx->flags & AV_CODEC_FLAG_GRAY) ? 4 : 6;
     int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
@@ -160,7 +160,7 @@  void ff_vc1_i_overlap_filter(VC1Context *v)
 
 void ff_vc1_p_overlap_filter(VC1Context *v)
 {
-    MpegEncContext *s = &v->s;
+    MPVDecContext *const s = &v->s;
     int16_t (*topleft_blk)[64], (*top_blk)[64], (*left_blk)[64], (*cur_blk)[64];
     int block_count = CONFIG_GRAY && (s->avctx->flags & AV_CODEC_FLAG_GRAY) ? 4 : 6;
     int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
@@ -206,7 +206,7 @@  void ff_vc1_p_overlap_filter(VC1Context *v)
 static av_always_inline void vc1_i_h_loop_filter(VC1Context *v, uint8_t *dest,
                                                  uint32_t flags, int block_num)
 {
-    MpegEncContext *s  = &v->s;
+    MPVDecContext *const s  = &v->s;
     int pq = v->pq;
     uint8_t *dst;
 
@@ -239,7 +239,7 @@  static av_always_inline void vc1_i_v_loop_filter(VC1Context *v, uint8_t *dest,
                                                  uint32_t flags, uint8_t fieldtx,
                                                  int block_num)
 {
-    MpegEncContext *s  = &v->s;
+    MPVDecContext *const s  = &v->s;
     int pq = v->pq;
     uint8_t *dst;
 
@@ -270,7 +270,7 @@  static av_always_inline void vc1_i_v_loop_filter(VC1Context *v, uint8_t *dest,
 
 void ff_vc1_i_loop_filter(VC1Context *v)
 {
-    MpegEncContext *s = &v->s;
+    MPVDecContext *const s = &v->s;
     int block_count = CONFIG_GRAY && (s->avctx->flags & AV_CODEC_FLAG_GRAY) ? 4 : 6;
     int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
     uint8_t *dest, fieldtx;
@@ -364,7 +364,7 @@  static av_always_inline void vc1_p_h_loop_filter(VC1Context *v, uint8_t *dest, u
                                                  uint8_t *is_intra, int16_t (*mv)[2], uint8_t *mv_f,
                                                  int *ttblk, uint32_t flags, int block_num)
 {
-    MpegEncContext *s  = &v->s;
+    MPVDecContext *const s  = &v->s;
     int pq = v->pq;
     uint32_t left_cbp = cbp[0] >> (block_num * 4), right_cbp;
     uint8_t left_is_intra, right_is_intra;
@@ -417,7 +417,7 @@  static av_always_inline void vc1_p_v_loop_filter(VC1Context *v, uint8_t *dest, u
                                                  uint8_t *is_intra, int16_t (*mv)[2], uint8_t *mv_f,
                                                  int *ttblk, uint32_t flags, int block_num)
 {
-    MpegEncContext *s  = &v->s;
+    MPVDecContext *const s  = &v->s;
     int pq = v->pq;
     uint32_t top_cbp = cbp[0] >> (block_num * 4), bottom_cbp;
     uint8_t top_is_intra, bottom_is_intra;
@@ -469,7 +469,7 @@  static av_always_inline void vc1_p_v_loop_filter(VC1Context *v, uint8_t *dest, u
 
 void ff_vc1_p_loop_filter(VC1Context *v)
 {
-    MpegEncContext *s = &v->s;
+    MPVDecContext *const s = &v->s;
     int block_count = CONFIG_GRAY && (s->avctx->flags & AV_CODEC_FLAG_GRAY) ? 4 : 6;
     uint8_t *dest;
     uint32_t *cbp;
@@ -802,7 +802,7 @@  void ff_vc1_p_loop_filter(VC1Context *v)
 static av_always_inline void vc1_p_h_intfr_loop_filter(VC1Context *v, uint8_t *dest, int *ttblk,
                                                        uint32_t flags, uint8_t fieldtx, int block_num)
 {
-    MpegEncContext *s  = &v->s;
+    MPVDecContext *const s  = &v->s;
     int pq = v->pq;
     int tt;
     int linesize  = block_num > 3 ? s->uvlinesize : s->linesize;
@@ -852,7 +852,7 @@  static av_always_inline void vc1_p_h_intfr_loop_filter(VC1Context *v, uint8_t *d
 static av_always_inline void vc1_p_v_intfr_loop_filter(VC1Context *v, uint8_t *dest, int *ttblk,
                                                        uint32_t flags, uint8_t fieldtx, int block_num)
 {
-    MpegEncContext *s  = &v->s;
+    MPVDecContext *const s  = &v->s;
     int pq = v->pq;
     int tt;
     int linesize  = block_num > 3 ? s->uvlinesize : s->linesize;
@@ -908,7 +908,7 @@  static av_always_inline void vc1_p_v_intfr_loop_filter(VC1Context *v, uint8_t *d
 
 void ff_vc1_p_intfr_loop_filter(VC1Context *v)
 {
-    MpegEncContext *s = &v->s;
+    MPVDecContext *const s = &v->s;
     int block_count = CONFIG_GRAY && (s->avctx->flags & AV_CODEC_FLAG_GRAY) ? 4 : 6;
     int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
     uint8_t *dest;
@@ -1112,7 +1112,7 @@  void ff_vc1_p_intfr_loop_filter(VC1Context *v)
 static av_always_inline void vc1_b_h_intfi_loop_filter(VC1Context *v, uint8_t *dest, uint32_t *cbp,
                                                        int *ttblk, uint32_t flags, int block_num)
 {
-    MpegEncContext *s  = &v->s;
+    MPVDecContext *const s  = &v->s;
     int pq = v->pq;
     uint8_t *dst;
     uint32_t block_cbp = cbp[0] >> (block_num * 4);
@@ -1144,7 +1144,7 @@  static av_always_inline void vc1_b_h_intfi_loop_filter(VC1Context *v, uint8_t *d
 static av_always_inline void vc1_b_v_intfi_loop_filter(VC1Context *v, uint8_t *dest, uint32_t *cbp,
                                                        int *ttblk, uint32_t flags, int block_num)
 {
-    MpegEncContext *s  = &v->s;
+    MPVDecContext *const s  = &v->s;
     int pq = v->pq;
     uint8_t *dst;
     uint32_t block_cbp = cbp[0] >> (block_num * 4);
@@ -1171,7 +1171,7 @@  static av_always_inline void vc1_b_v_intfi_loop_filter(VC1Context *v, uint8_t *d
 
 void ff_vc1_b_intfi_loop_filter(VC1Context *v)
 {
-    MpegEncContext *s = &v->s;
+    MPVDecContext *const s = &v->s;
     int block_count = CONFIG_GRAY && (s->avctx->flags & AV_CODEC_FLAG_GRAY) ? 4 : 6;
     uint8_t *dest;
     uint32_t *cbp;
diff --git a/libavcodec/vc1_mc.c b/libavcodec/vc1_mc.c
index 1b8d8799b3..e9beb96e6d 100644
--- a/libavcodec/vc1_mc.c
+++ b/libavcodec/vc1_mc.c
@@ -107,7 +107,7 @@  static const uint8_t popcount4[16] = { 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3,
 
 static av_always_inline int get_luma_mv(VC1Context *v, int dir, int16_t *tx, int16_t *ty)
 {
-    MpegEncContext *s = &v->s;
+    MPVDecContext *const s = &v->s;
     int idx = v->mv_f[dir][s->block_index[0] + v->blocks_off] |
              (v->mv_f[dir][s->block_index[1] + v->blocks_off] << 1) |
              (v->mv_f[dir][s->block_index[2] + v->blocks_off] << 2) |
@@ -139,7 +139,7 @@  static av_always_inline int get_luma_mv(VC1Context *v, int dir, int16_t *tx, int
 
 static av_always_inline int get_chroma_mv(VC1Context *v, int dir, int16_t *tx, int16_t *ty)
 {
-    MpegEncContext *s = &v->s;
+    MPVDecContext *const s = &v->s;
     int idx = !v->mb_type[0][s->block_index[0]] |
              (!v->mb_type[0][s->block_index[1]] << 1) |
              (!v->mb_type[0][s->block_index[2]] << 2) |
@@ -171,7 +171,7 @@  static av_always_inline int get_chroma_mv(VC1Context *v, int dir, int16_t *tx, i
  */
 void ff_vc1_mc_1mv(VC1Context *v, int dir)
 {
-    MpegEncContext *s = &v->s;
+    MPVDecContext *const s = &v->s;
     H264ChromaContext *h264chroma = &v->h264chroma;
     uint8_t *srcY, *srcU, *srcV;
     int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
@@ -451,7 +451,7 @@  void ff_vc1_mc_1mv(VC1Context *v, int dir)
  */
 void ff_vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg)
 {
-    MpegEncContext *s = &v->s;
+    MPVDecContext *const s = &v->s;
     uint8_t *srcY;
     int dxy, mx, my, src_x, src_y;
     int off;
@@ -633,7 +633,7 @@  void ff_vc1_mc_4mv_luma(VC1Context *v, int n, int dir, int avg)
  */
 void ff_vc1_mc_4mv_chroma(VC1Context *v, int dir)
 {
-    MpegEncContext *s = &v->s;
+    MPVDecContext *const s = &v->s;
     H264ChromaContext *h264chroma = &v->h264chroma;
     uint8_t *srcU, *srcV;
     int uvmx, uvmy, uvsrc_x, uvsrc_y;
@@ -838,7 +838,7 @@  void ff_vc1_mc_4mv_chroma(VC1Context *v, int dir)
  */
 void ff_vc1_mc_4mv_chroma4(VC1Context *v, int dir, int dir2, int avg)
 {
-    MpegEncContext *s = &v->s;
+    MPVDecContext *const s = &v->s;
     H264ChromaContext *h264chroma = &v->h264chroma;
     uint8_t *srcU, *srcV;
     int uvsrc_x, uvsrc_y;
@@ -1003,7 +1003,7 @@  void ff_vc1_mc_4mv_chroma4(VC1Context *v, int dir, int dir2, int avg)
  */
 void ff_vc1_interp_mc(VC1Context *v)
 {
-    MpegEncContext *s = &v->s;
+    MPVDecContext *const s = &v->s;
     H264ChromaContext *h264chroma = &v->h264chroma;
     uint8_t *srcY, *srcU, *srcV;
     int dxy, mx, my, uvmx, uvmy, src_x, src_y, uvsrc_x, uvsrc_y;
diff --git a/libavcodec/vc1_pred.c b/libavcodec/vc1_pred.c
index ad2caf6db2..812118c7eb 100644
--- a/libavcodec/vc1_pred.c
+++ b/libavcodec/vc1_pred.c
@@ -213,7 +213,7 @@  void ff_vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
                     int mv1, int r_x, int r_y, uint8_t* is_intra,
                     int pred_flag, int dir)
 {
-    MpegEncContext *s = &v->s;
+    MPVDecContext *const s = &v->s;
     int xy, wrap, off = 0;
     int16_t *A, *B, *C;
     int px, py;
@@ -470,7 +470,7 @@  void ff_vc1_pred_mv(VC1Context *v, int n, int dmv_x, int dmv_y,
 void ff_vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
                           int mvn, int r_x, int r_y, int dir)
 {
-    MpegEncContext *s = &v->s;
+    MPVDecContext *const s = &v->s;
     int xy, wrap, off = 0;
     int A[2], B[2], C[2];
     int px = 0, py = 0;
@@ -691,7 +691,7 @@  void ff_vc1_pred_mv_intfr(VC1Context *v, int n, int dmv_x, int dmv_y,
 void ff_vc1_pred_b_mv(VC1Context *v, int dmv_x[2], int dmv_y[2],
                       int direct, int mvtype)
 {
-    MpegEncContext *s = &v->s;
+    MPVDecContext *const s = &v->s;
     int xy, wrap, off = 0;
     int16_t *A, *B, *C;
     int px, py;
@@ -892,7 +892,7 @@  void ff_vc1_pred_b_mv_intfi(VC1Context *v, int n, int *dmv_x, int *dmv_y,
                             int mv1, int *pred_flag)
 {
     int dir = (v->bmvtype == BMV_TYPE_BACKWARD) ? 1 : 0;
-    MpegEncContext *s = &v->s;
+    MPVDecContext *const s = &v->s;
     int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
 
     if (v->bmvtype == BMV_TYPE_DIRECT) {
diff --git a/libavcodec/vc1dec.c b/libavcodec/vc1dec.c
index 7ed5133cfa..78a3422d18 100644
--- a/libavcodec/vc1dec.c
+++ b/libavcodec/vc1dec.c
@@ -179,7 +179,7 @@  static void vc1_draw_sprites(VC1Context *v, SpriteData* sd)
     uint8_t* src_h[2][2];
     int xoff[2], xadv[2], yoff[2], yadv[2], alpha;
     int ysub[2];
-    MpegEncContext *s = &v->s;
+    MPVDecContext *const s = &v->s;
 
     for (i = 0; i <= v->two_sprites; i++) {
         xoff[i] = av_clip(sd->coefs[i][2], 0, v->sprite_width-1 << 16);
@@ -271,8 +271,8 @@  static void vc1_draw_sprites(VC1Context *v, SpriteData* sd)
 
 static int vc1_decode_sprites(VC1Context *v, GetBitContext* gb)
 {
+    MPVDecContext *const s = &v->s;
     int ret;
-    MpegEncContext *s     = &v->s;
     AVCodecContext *avctx = s->avctx;
     SpriteData sd;
 
@@ -304,7 +304,7 @@  static int vc1_decode_sprites(VC1Context *v, GetBitContext* gb)
 static void vc1_sprite_flush(AVCodecContext *avctx)
 {
     VC1Context *v     = avctx->priv_data;
-    MpegEncContext *s = &v->s;
+    MPVDecContext *const s = &v->s;
     AVFrame *f = s->current_picture.f;
     int plane, i;
 
@@ -323,7 +323,7 @@  static void vc1_sprite_flush(AVCodecContext *avctx)
 
 av_cold int ff_vc1_decode_init_alloc_tables(VC1Context *v)
 {
-    MpegEncContext *s = &v->s;
+    MPVMainDecContext *const s = &v->s;
     int i, ret = AVERROR(ENOMEM);
     int mb_height = FFALIGN(s->mb_height, 2);
 
@@ -422,7 +422,7 @@  av_cold void ff_vc1_init_transposed_scantables(VC1Context *v)
 static av_cold int vc1_decode_init(AVCodecContext *avctx)
 {
     VC1Context *v = avctx->priv_data;
-    MpegEncContext *s = &v->s;
+    MPVMainDecContext *const s = &v->s;
     GetBitContext gb;
     int ret;
 
@@ -584,7 +584,6 @@  static av_cold int vc1_decode_init(AVCodecContext *avctx)
 }
 
 /** Close a VC1/WMV3 decoder
- * @warning Initial try at using MpegEncContext stuff
  */
 av_cold int ff_vc1_decode_end(AVCodecContext *avctx)
 {
@@ -625,7 +624,7 @@  static int vc1_decode_frame(AVCodecContext *avctx, void *data,
     const uint8_t *buf = avpkt->data;
     int buf_size = avpkt->size, n_slices = 0, i, ret;
     VC1Context *v = avctx->priv_data;
-    MpegEncContext *s = &v->s;
+    MPVMainDecContext *const s = &v->s;
     AVFrame *pict = data;
     uint8_t *buf2 = NULL;
     const uint8_t *buf_start = buf, *buf_start_second_field = NULL;
diff --git a/libavcodec/vdpau.c b/libavcodec/vdpau.c
index f96ac15e2a..95cd5c3bcd 100644
--- a/libavcodec/vdpau.c
+++ b/libavcodec/vdpau.c
@@ -366,7 +366,7 @@  int ff_vdpau_common_end_frame(AVCodecContext *avctx, AVFrame *frame,
     CONFIG_VC1_VDPAU_HWACCEL   || CONFIG_WMV3_VDPAU_HWACCEL
 int ff_vdpau_mpeg_end_frame(AVCodecContext *avctx)
 {
-    MpegEncContext *s = avctx->priv_data;
+    MPVDecContext *const s = avctx->priv_data;
     Picture *pic = s->current_picture_ptr;
     struct vdpau_picture_context *pic_ctx = pic->hwaccel_picture_private;
     int val;
diff --git a/libavcodec/vdpau_mpeg12.c b/libavcodec/vdpau_mpeg12.c
index 72220ffb4e..eb1a364470 100644
--- a/libavcodec/vdpau_mpeg12.c
+++ b/libavcodec/vdpau_mpeg12.c
@@ -32,7 +32,7 @@ 
 static int vdpau_mpeg_start_frame(AVCodecContext *avctx,
                                   const uint8_t *buffer, uint32_t size)
 {
-    MpegEncContext * const s = avctx->priv_data;
+    MPVDecContext *const s = avctx->priv_data;
     Picture *pic             = s->current_picture_ptr;
     struct vdpau_picture_context *pic_ctx = pic->hwaccel_picture_private;
     VdpPictureInfoMPEG1Or2 *info = &pic_ctx->info.mpeg;
@@ -83,7 +83,7 @@  static int vdpau_mpeg_start_frame(AVCodecContext *avctx,
 static int vdpau_mpeg_decode_slice(AVCodecContext *avctx,
                                    const uint8_t *buffer, uint32_t size)
 {
-    MpegEncContext * const s = avctx->priv_data;
+    MPVDecContext *const s = avctx->priv_data;
     Picture *pic             = s->current_picture_ptr;
     struct vdpau_picture_context *pic_ctx = pic->hwaccel_picture_private;
     int val;
diff --git a/libavcodec/vdpau_mpeg4.c b/libavcodec/vdpau_mpeg4.c
index 93b25beb1f..51676fe065 100644
--- a/libavcodec/vdpau_mpeg4.c
+++ b/libavcodec/vdpau_mpeg4.c
@@ -33,7 +33,7 @@  static int vdpau_mpeg4_start_frame(AVCodecContext *avctx,
                                    const uint8_t *buffer, uint32_t size)
 {
     Mpeg4DecContext *ctx = avctx->priv_data;
-    MpegEncContext * const s = &ctx->m;
+    MPVDecContext *const s = &ctx->m;
     Picture *pic             = s->current_picture_ptr;
     struct vdpau_picture_context *pic_ctx = pic->hwaccel_picture_private;
     VdpPictureInfoMPEG4Part2 *info = &pic_ctx->info.mpeg4;
diff --git a/libavcodec/vdpau_vc1.c b/libavcodec/vdpau_vc1.c
index 96c91b58be..e3aa36fbdd 100644
--- a/libavcodec/vdpau_vc1.c
+++ b/libavcodec/vdpau_vc1.c
@@ -33,7 +33,7 @@  static int vdpau_vc1_start_frame(AVCodecContext *avctx,
                                  const uint8_t *buffer, uint32_t size)
 {
     VC1Context * const v  = avctx->priv_data;
-    MpegEncContext * const s = &v->s;
+    MPVDecContext *const s = &v->s;
     Picture *pic          = s->current_picture_ptr;
     struct vdpau_picture_context *pic_ctx = pic->hwaccel_picture_private;
     VdpPictureInfoVC1 *info = &pic_ctx->info.vc1;
@@ -101,7 +101,7 @@  static int vdpau_vc1_decode_slice(AVCodecContext *avctx,
                                   const uint8_t *buffer, uint32_t size)
 {
     VC1Context * const v  = avctx->priv_data;
-    MpegEncContext * const s = &v->s;
+    MPVDecContext *const s = &v->s;
     Picture *pic          = s->current_picture_ptr;
     struct vdpau_picture_context *pic_ctx = pic->hwaccel_picture_private;
     int val;
diff --git a/libavcodec/videotoolbox.c b/libavcodec/videotoolbox.c
index 51d4eacfd8..f6d2805dba 100644
--- a/libavcodec/videotoolbox.c
+++ b/libavcodec/videotoolbox.c
@@ -1091,7 +1091,7 @@  static int videotoolbox_mpeg_decode_slice(AVCodecContext *avctx,
 
 static int videotoolbox_mpeg_end_frame(AVCodecContext *avctx)
 {
-    MpegEncContext *s = avctx->priv_data;
+    MPVDecContext *const s = avctx->priv_data;
     AVFrame *frame = s->current_picture_ptr->f;
 
     return ff_videotoolbox_common_end_frame(avctx, frame);
diff --git a/libavcodec/wmv2.c b/libavcodec/wmv2.c
index 8d1d117dea..29ed71f8aa 100644
--- a/libavcodec/wmv2.c
+++ b/libavcodec/wmv2.c
@@ -25,7 +25,7 @@ 
 #include "wmv2.h"
 
 
-av_cold void ff_wmv2_common_init(MpegEncContext *s)
+av_cold void ff_wmv2_common_init(MPVMainContext *s)
 {
     WMV2Context *const w = s->private_ctx;
 
@@ -47,7 +47,7 @@  av_cold void ff_wmv2_common_init(MpegEncContext *s)
     s->idsp.idct     = NULL;
 }
 
-void ff_mspel_motion(MpegEncContext *s, uint8_t *dest_y,
+void ff_mspel_motion(MPVContext *s, uint8_t *dest_y,
                      uint8_t *dest_cb, uint8_t *dest_cr,
                      uint8_t **ref_picture, op_pixels_func (*pix_op)[4],
                      int motion_x, int motion_y, int h)
diff --git a/libavcodec/wmv2.h b/libavcodec/wmv2.h
index 4001093881..07b22644fc 100644
--- a/libavcodec/wmv2.h
+++ b/libavcodec/wmv2.h
@@ -22,6 +22,7 @@ 
 #define AVCODEC_WMV2_H
 
 #include "mpegvideo.h"
+#include "mpegvideoenc.h"
 #include "wmv2dsp.h"
 
 #define SKIP_TYPE_NONE 0
@@ -35,24 +36,24 @@  typedef struct WMV2Context {
     int hshift;
 } WMV2Context;
 
-void ff_wmv2_common_init(MpegEncContext *s);
+void ff_wmv2_common_init(MPVMainContext *s);
 
-int ff_wmv2_decode_mb(MpegEncContext *s, int16_t block[6][64]);
-int ff_wmv2_encode_picture_header(MpegEncContext * s, int picture_number);
-void ff_wmv2_encode_mb(MpegEncContext * s, int16_t block[6][64],
+int ff_wmv2_decode_mb(MPVDecContext *s, int16_t block[6][64]);
+int ff_wmv2_encode_picture_header(MPVMainEncContext * s, int picture_number);
+void ff_wmv2_encode_mb(MPVEncContext * s, int16_t block[6][64],
                        int motion_x, int motion_y);
-int ff_wmv2_decode_picture_header(MpegEncContext * s);
-int ff_wmv2_decode_secondary_picture_header(MpegEncContext * s);
-void ff_wmv2_add_mb(MpegEncContext *s, int16_t block[6][64],
+int ff_wmv2_decode_picture_header(MPVMainDecContext * s);
+int ff_wmv2_decode_secondary_picture_header(MPVMainDecContext * s);
+void ff_wmv2_add_mb(MPVContext *s, int16_t block[6][64],
                     uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr);
 
-void ff_mspel_motion(MpegEncContext *s,
+void ff_mspel_motion(MPVContext *s,
                      uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr,
                      uint8_t **ref_picture, op_pixels_func (*pix_op)[4],
                      int motion_x, int motion_y, int h);
 
 
-static av_always_inline int wmv2_get_cbp_table_index(MpegEncContext *s, int cbp_index)
+static av_always_inline int wmv2_get_cbp_table_index(MPVContext *s, int cbp_index)
 {
     static const uint8_t map[3][3] = {
         { 0, 2, 1 },
diff --git a/libavcodec/wmv2dec.c b/libavcodec/wmv2dec.c
index f9d91f9571..00cb1cf6fc 100644
--- a/libavcodec/wmv2dec.c
+++ b/libavcodec/wmv2dec.c
@@ -34,7 +34,7 @@ 
 #include "wmv2data.h"
 
 typedef struct WMV2DecContext {
-    MpegEncContext s;
+    MPVMainDecContext s;
     WMV2Context common;
     IntraX8Context x8;
     int j_type_bit;
@@ -57,7 +57,7 @@  typedef struct WMV2DecContext {
 static void wmv2_add_block(WMV2DecContext *w, int16_t *block1,
                            uint8_t *dst, int stride, int n)
 {
-    MpegEncContext *const s = &w->s;
+    MPVDecContext *const s = &w->s;
 
     if (s->block_last_index[n] >= 0) {
         switch (w->abt_type_table[n]) {
@@ -80,7 +80,7 @@  static void wmv2_add_block(WMV2DecContext *w, int16_t *block1,
     }
 }
 
-void ff_wmv2_add_mb(MpegEncContext *s, int16_t block1[6][64],
+void ff_wmv2_add_mb(MPVDecContext *s, int16_t block1[6][64],
                     uint8_t *dest_y, uint8_t *dest_cb, uint8_t *dest_cr)
 {
     WMV2DecContext *const w = (WMV2DecContext *) s;
@@ -101,7 +101,7 @@  static int parse_mb_skip(WMV2DecContext *w)
 {
     int mb_x, mb_y;
     int coded_mb_count = 0;
-    MpegEncContext *const s = &w->s;
+    MPVDecContext *const s = &w->s;
     uint32_t *const mb_type = s->current_picture_ptr->mb_type;
 
     w->skip_type = get_bits(&s->gb, 2);
@@ -164,7 +164,7 @@  static int parse_mb_skip(WMV2DecContext *w)
 
 static int decode_ext_header(WMV2DecContext *w)
 {
-    MpegEncContext *const s = &w->s;
+    MPVDecContext *const s = &w->s;
     GetBitContext gb;
     int fps;
     int code;
@@ -200,7 +200,7 @@  static int decode_ext_header(WMV2DecContext *w)
     return 0;
 }
 
-int ff_wmv2_decode_picture_header(MpegEncContext *s)
+int ff_wmv2_decode_picture_header(MPVMainDecContext *s)
 {
     WMV2DecContext *const w = (WMV2DecContext *) s;
     int code;
@@ -235,7 +235,7 @@  int ff_wmv2_decode_picture_header(MpegEncContext *s)
     return 0;
 }
 
-int ff_wmv2_decode_secondary_picture_header(MpegEncContext *s)
+int ff_wmv2_decode_secondary_picture_header(MPVMainDecContext *s)
 {
     WMV2DecContext *const w = (WMV2DecContext *) s;
 
@@ -347,7 +347,7 @@  int ff_wmv2_decode_secondary_picture_header(MpegEncContext *s)
 
 static inline void wmv2_decode_motion(WMV2DecContext *w, int *mx_ptr, int *my_ptr)
 {
-    MpegEncContext *const s = &w->s;
+    MPVDecContext *const s = &w->s;
 
     ff_msmpeg4_decode_motion(s, mx_ptr, my_ptr);
 
@@ -359,7 +359,7 @@  static inline void wmv2_decode_motion(WMV2DecContext *w, int *mx_ptr, int *my_pt
 
 static int16_t *wmv2_pred_motion(WMV2DecContext *w, int *px, int *py)
 {
-    MpegEncContext *const s = &w->s;
+    MPVDecContext *const s = &w->s;
     int xy, wrap, diff, type;
     int16_t *A, *B, *C, *mot_val;
 
@@ -405,7 +405,7 @@  static int16_t *wmv2_pred_motion(WMV2DecContext *w, int *px, int *py)
 static inline int wmv2_decode_inter_block(WMV2DecContext *w, int16_t *block,
                                           int n, int cbp)
 {
-    MpegEncContext *const s = &w->s;
+    MPVDecContext *const s = &w->s;
     static const int sub_cbp_table[3] = { 2, 3, 1 };
     int sub_cbp, ret;
 
@@ -442,7 +442,7 @@  static inline int wmv2_decode_inter_block(WMV2DecContext *w, int16_t *block,
     }
 }
 
-int ff_wmv2_decode_mb(MpegEncContext *s, int16_t block[6][64])
+int ff_wmv2_decode_mb(MPVDecContext *s, int16_t block[6][64])
 {
     /* The following is only allowed because this encoder
      * does not use slice threading. */
@@ -562,7 +562,7 @@  int ff_wmv2_decode_mb(MpegEncContext *s, int16_t block[6][64])
 static av_cold int wmv2_decode_init(AVCodecContext *avctx)
 {
     WMV2DecContext *const w = avctx->priv_data;
-    MpegEncContext *const s = &w->s;
+    MPVMainDecContext *const s = &w->s;
     int ret;
 
     s->private_ctx = &w->common;
diff --git a/libavcodec/wmv2enc.c b/libavcodec/wmv2enc.c
index d8481301e1..14f817f2d6 100644
--- a/libavcodec/wmv2enc.c
+++ b/libavcodec/wmv2enc.c
@@ -43,7 +43,7 @@  typedef struct WMV2EncContext {
 
 static int encode_ext_header(WMV2EncContext *w)
 {
-    MpegEncContext *const s = &w->msmpeg4.s;
+    MPVMainEncContext *const s = &w->msmpeg4.s;
     PutBitContext pb;
     int code;
 
@@ -70,7 +70,7 @@  static int encode_ext_header(WMV2EncContext *w)
 static av_cold int wmv2_encode_init(AVCodecContext *avctx)
 {
     WMV2EncContext *const w = avctx->priv_data;
-    MpegEncContext *const s = &w->msmpeg4.s;
+    MPVMainEncContext *const s = &w->msmpeg4.s;
 
     s->private_ctx = &w->common;
     if (ff_mpv_encode_init(avctx) < 0)
@@ -88,7 +88,7 @@  static av_cold int wmv2_encode_init(AVCodecContext *avctx)
     return 0;
 }
 
-int ff_wmv2_encode_picture_header(MpegEncContext *s, int picture_number)
+int ff_wmv2_encode_picture_header(MPVMainEncContext *s, int picture_number)
 {
     WMV2EncContext *const w = (WMV2EncContext *) s;
 
@@ -161,7 +161,7 @@  int ff_wmv2_encode_picture_header(MpegEncContext *s, int picture_number)
 /* Nearly identical to wmv1 but that is just because we do not use the
  * useless M$ crap features. It is duplicated here in case someone wants
  * to add support for these crap features. */
-void ff_wmv2_encode_mb(MpegEncContext *s, int16_t block[6][64],
+void ff_wmv2_encode_mb(MPVEncContext *s, int16_t block[6][64],
                        int motion_x, int motion_y)
 {
     WMV2EncContext *const w = (WMV2EncContext *) s;
diff --git a/libavcodec/x86/me_cmp.asm b/libavcodec/x86/me_cmp.asm
index ad06d485ab..08ffc10f79 100644
--- a/libavcodec/x86/me_cmp.asm
+++ b/libavcodec/x86/me_cmp.asm
@@ -214,7 +214,7 @@  hadamard8x8_diff %+ SUFFIX:
 hadamard8_16_wrapper %1, 3
 %elif cpuflag(mmx)
 ALIGN 16
-; int ff_hadamard8_diff_ ## cpu(MpegEncContext *s, uint8_t *src1,
+; int ff_hadamard8_diff_ ## cpu(MPVEncContext *s, uint8_t *src1,
 ;                               uint8_t *src2, ptrdiff_t stride, int h)
 ; r0 = void *s = unused, int h = unused (always 8)
 ; note how r1, r2 and r3 are not clobbered in this function, so 16x16
@@ -279,7 +279,7 @@  INIT_XMM ssse3
 %define ABS_SUM_8x8 ABS_SUM_8x8_64
 HADAMARD8_DIFF 9
 
-; int ff_sse*_*(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+; int ff_sse*_*(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
 ;               ptrdiff_t line_size, int h)
 
 %macro SUM_SQUARED_ERRORS 1
@@ -471,7 +471,7 @@  HF_NOISE 8
 HF_NOISE 16
 
 ;---------------------------------------------------------------------------------------
-;int ff_sad_<opt>(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t stride, int h);
+;int ff_sad_<opt>(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t stride, int h);
 ;---------------------------------------------------------------------------------------
 ;%1 = 8/16
 %macro SAD 1
@@ -526,7 +526,7 @@  INIT_XMM sse2
 SAD 16
 
 ;------------------------------------------------------------------------------------------
-;int ff_sad_x2_<opt>(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t stride, int h);
+;int ff_sad_x2_<opt>(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t stride, int h);
 ;------------------------------------------------------------------------------------------
 ;%1 = 8/16
 %macro SAD_X2 1
@@ -603,7 +603,7 @@  INIT_XMM sse2
 SAD_X2 16
 
 ;------------------------------------------------------------------------------------------
-;int ff_sad_y2_<opt>(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t stride, int h);
+;int ff_sad_y2_<opt>(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t stride, int h);
 ;------------------------------------------------------------------------------------------
 ;%1 = 8/16
 %macro SAD_Y2 1
@@ -673,7 +673,7 @@  INIT_XMM sse2
 SAD_Y2 16
 
 ;-------------------------------------------------------------------------------------------
-;int ff_sad_approx_xy2_<opt>(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t stride, int h);
+;int ff_sad_approx_xy2_<opt>(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2, ptrdiff_t stride, int h);
 ;-------------------------------------------------------------------------------------------
 ;%1 = 8/16
 %macro SAD_APPROX_XY2 1
@@ -774,7 +774,7 @@  INIT_XMM sse2
 SAD_APPROX_XY2 16
 
 ;--------------------------------------------------------------------
-;int ff_vsad_intra(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+;int ff_vsad_intra(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
 ;                  ptrdiff_t line_size, int h);
 ;--------------------------------------------------------------------
 ; %1 = 8/16
@@ -835,7 +835,7 @@  INIT_XMM sse2
 VSAD_INTRA 16
 
 ;---------------------------------------------------------------------
-;int ff_vsad_approx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+;int ff_vsad_approx(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
 ;                   ptrdiff_t line_size, int h);
 ;---------------------------------------------------------------------
 ; %1 = 8/16
diff --git a/libavcodec/x86/me_cmp_init.c b/libavcodec/x86/me_cmp_init.c
index 9af911bb88..f8709d5f57 100644
--- a/libavcodec/x86/me_cmp_init.c
+++ b/libavcodec/x86/me_cmp_init.c
@@ -28,61 +28,61 @@ 
 #include "libavutil/x86/asm.h"
 #include "libavutil/x86/cpu.h"
 #include "libavcodec/me_cmp.h"
-#include "libavcodec/mpegvideo.h"
+#include "libavcodec/mpegvideoenc.h"
 
 int ff_sum_abs_dctelem_mmx(int16_t *block);
 int ff_sum_abs_dctelem_mmxext(int16_t *block);
 int ff_sum_abs_dctelem_sse2(int16_t *block);
 int ff_sum_abs_dctelem_ssse3(int16_t *block);
-int ff_sse8_mmx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+int ff_sse8_mmx(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                 ptrdiff_t stride, int h);
-int ff_sse16_mmx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+int ff_sse16_mmx(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                  ptrdiff_t stride, int h);
-int ff_sse16_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+int ff_sse16_sse2(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                   ptrdiff_t stride, int h);
 int ff_hf_noise8_mmx(uint8_t *pix1, ptrdiff_t stride, int h);
 int ff_hf_noise16_mmx(uint8_t *pix1, ptrdiff_t stride, int h);
-int ff_sad8_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+int ff_sad8_mmxext(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                    ptrdiff_t stride, int h);
-int ff_sad16_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+int ff_sad16_mmxext(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                     ptrdiff_t stride, int h);
-int ff_sad16_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+int ff_sad16_sse2(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                   ptrdiff_t stride, int h);
-int ff_sad8_x2_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+int ff_sad8_x2_mmxext(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                       ptrdiff_t stride, int h);
-int ff_sad16_x2_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+int ff_sad16_x2_mmxext(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                        ptrdiff_t stride, int h);
-int ff_sad16_x2_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+int ff_sad16_x2_sse2(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                      ptrdiff_t stride, int h);
-int ff_sad8_y2_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+int ff_sad8_y2_mmxext(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                       ptrdiff_t stride, int h);
-int ff_sad16_y2_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+int ff_sad16_y2_mmxext(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                        ptrdiff_t stride, int h);
-int ff_sad16_y2_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+int ff_sad16_y2_sse2(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                      ptrdiff_t stride, int h);
-int ff_sad8_approx_xy2_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+int ff_sad8_approx_xy2_mmxext(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                               ptrdiff_t stride, int h);
-int ff_sad16_approx_xy2_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+int ff_sad16_approx_xy2_mmxext(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                                ptrdiff_t stride, int h);
-int ff_sad16_approx_xy2_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+int ff_sad16_approx_xy2_sse2(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                              ptrdiff_t stride, int h);
-int ff_vsad_intra8_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+int ff_vsad_intra8_mmxext(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                           ptrdiff_t stride, int h);
-int ff_vsad_intra16_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+int ff_vsad_intra16_mmxext(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                            ptrdiff_t stride, int h);
-int ff_vsad_intra16_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+int ff_vsad_intra16_sse2(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                          ptrdiff_t stride, int h);
-int ff_vsad8_approx_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+int ff_vsad8_approx_mmxext(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                     ptrdiff_t stride, int h);
-int ff_vsad16_approx_mmxext(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+int ff_vsad16_approx_mmxext(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                      ptrdiff_t stride, int h);
-int ff_vsad16_approx_sse2(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+int ff_vsad16_approx_sse2(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                    ptrdiff_t stride, int h);
 
 #define hadamard_func(cpu)                                                    \
-    int ff_hadamard8_diff_ ## cpu(MpegEncContext *s, uint8_t *src1,           \
+    int ff_hadamard8_diff_ ## cpu(MPVEncContext *s, uint8_t *src1,           \
                                   uint8_t *src2, ptrdiff_t stride, int h);    \
-    int ff_hadamard8_diff16_ ## cpu(MpegEncContext *s, uint8_t *src1,         \
+    int ff_hadamard8_diff16_ ## cpu(MPVEncContext *s, uint8_t *src1,         \
                                     uint8_t *src2, ptrdiff_t stride, int h);
 
 hadamard_func(mmx)
@@ -91,7 +91,7 @@  hadamard_func(sse2)
 hadamard_func(ssse3)
 
 #if HAVE_X86ASM
-static int nsse16_mmx(MpegEncContext *c, uint8_t *pix1, uint8_t *pix2,
+static int nsse16_mmx(MPVEncContext *c, uint8_t *pix1, uint8_t *pix2,
                       ptrdiff_t stride, int h)
 {
     int score1, score2;
@@ -109,7 +109,7 @@  static int nsse16_mmx(MpegEncContext *c, uint8_t *pix1, uint8_t *pix2,
         return score1 + FFABS(score2) * 8;
 }
 
-static int nsse8_mmx(MpegEncContext *c, uint8_t *pix1, uint8_t *pix2,
+static int nsse8_mmx(MPVEncContext *c, uint8_t *pix1, uint8_t *pix2,
                      ptrdiff_t stride, int h)
 {
     int score1 = ff_sse8_mmx(c, pix1, pix2, stride, h);
@@ -126,7 +126,7 @@  static int nsse8_mmx(MpegEncContext *c, uint8_t *pix1, uint8_t *pix2,
 
 #if HAVE_INLINE_ASM
 
-static int vsad_intra16_mmx(MpegEncContext *v, uint8_t *pix, uint8_t *dummy,
+static int vsad_intra16_mmx(MPVEncContext *v, uint8_t *pix, uint8_t *dummy,
                             ptrdiff_t stride, int h)
 {
     int tmp;
@@ -190,7 +190,7 @@  static int vsad_intra16_mmx(MpegEncContext *v, uint8_t *pix, uint8_t *dummy,
 }
 #undef SUM
 
-static int vsad16_mmx(MpegEncContext *v, uint8_t *pix1, uint8_t *pix2,
+static int vsad16_mmx(MPVEncContext *v, uint8_t *pix1, uint8_t *pix2,
                       ptrdiff_t stride, int h)
 {
     int tmp;
@@ -434,7 +434,7 @@  static inline void sad8_y2a_mmx(uint8_t *blk1, uint8_t *blk2,
 }
 
 #define PIX_SAD(suf)                                                    \
-static int sad8_ ## suf(MpegEncContext *v, uint8_t *blk2,               \
+static int sad8_ ## suf(MPVEncContext *v, uint8_t *blk2,                \
                         uint8_t *blk1, ptrdiff_t stride, int h)         \
 {                                                                       \
     av_assert2(h == 8);                                                     \
@@ -448,7 +448,7 @@  static int sad8_ ## suf(MpegEncContext *v, uint8_t *blk2,               \
     return sum_ ## suf();                                               \
 }                                                                       \
                                                                         \
-static int sad8_x2_ ## suf(MpegEncContext *v, uint8_t *blk2,            \
+static int sad8_x2_ ## suf(MPVEncContext *v, uint8_t *blk2,             \
                            uint8_t *blk1, ptrdiff_t stride, int h)      \
 {                                                                       \
     av_assert2(h == 8);                                                     \
@@ -463,7 +463,7 @@  static int sad8_x2_ ## suf(MpegEncContext *v, uint8_t *blk2,            \
     return sum_ ## suf();                                               \
 }                                                                       \
                                                                         \
-static int sad8_y2_ ## suf(MpegEncContext *v, uint8_t *blk2,            \
+static int sad8_y2_ ## suf(MPVEncContext *v, uint8_t *blk2,             \
                            uint8_t *blk1, ptrdiff_t stride, int h)      \
 {                                                                       \
     av_assert2(h == 8);                                                     \
@@ -478,7 +478,7 @@  static int sad8_y2_ ## suf(MpegEncContext *v, uint8_t *blk2,            \
     return sum_ ## suf();                                               \
 }                                                                       \
                                                                         \
-static int sad8_xy2_ ## suf(MpegEncContext *v, uint8_t *blk2,           \
+static int sad8_xy2_ ## suf(MPVEncContext *v, uint8_t *blk2,            \
                             uint8_t *blk1, ptrdiff_t stride, int h)     \
 {                                                                       \
     av_assert2(h == 8);                                                     \
@@ -492,7 +492,7 @@  static int sad8_xy2_ ## suf(MpegEncContext *v, uint8_t *blk2,           \
     return sum_ ## suf();                                               \
 }                                                                       \
                                                                         \
-static int sad16_ ## suf(MpegEncContext *v, uint8_t *blk2,              \
+static int sad16_ ## suf(MPVEncContext *v, uint8_t *blk2,               \
                          uint8_t *blk1, ptrdiff_t stride, int h)        \
 {                                                                       \
     __asm__ volatile (                                                  \
@@ -506,7 +506,7 @@  static int sad16_ ## suf(MpegEncContext *v, uint8_t *blk2,              \
     return sum_ ## suf();                                               \
 }                                                                       \
                                                                         \
-static int sad16_x2_ ## suf(MpegEncContext *v, uint8_t *blk2,           \
+static int sad16_x2_ ## suf(MPVEncContext *v, uint8_t *blk2,            \
                             uint8_t *blk1, ptrdiff_t stride, int h)     \
 {                                                                       \
     __asm__ volatile (                                                  \
@@ -521,7 +521,7 @@  static int sad16_x2_ ## suf(MpegEncContext *v, uint8_t *blk2,           \
     return sum_ ## suf();                                               \
 }                                                                       \
                                                                         \
-static int sad16_y2_ ## suf(MpegEncContext *v, uint8_t *blk2,           \
+static int sad16_y2_ ## suf(MPVEncContext *v, uint8_t *blk2,            \
                             uint8_t *blk1, ptrdiff_t stride, int h)     \
 {                                                                       \
     __asm__ volatile (                                                  \
@@ -536,7 +536,7 @@  static int sad16_y2_ ## suf(MpegEncContext *v, uint8_t *blk2,           \
     return sum_ ## suf();                                               \
 }                                                                       \
                                                                         \
-static int sad16_xy2_ ## suf(MpegEncContext *v, uint8_t *blk2,          \
+static int sad16_xy2_ ## suf(MPVEncContext *v, uint8_t *blk2,           \
                              uint8_t *blk1, ptrdiff_t stride, int h)    \
 {                                                                       \
     __asm__ volatile (                                                  \
diff --git a/libavcodec/x86/mpegvideo.c b/libavcodec/x86/mpegvideo.c
index 73967cafda..bfd464704b 100644
--- a/libavcodec/x86/mpegvideo.c
+++ b/libavcodec/x86/mpegvideo.c
@@ -29,7 +29,7 @@ 
 
 #if HAVE_MMX_INLINE
 
-static void dct_unquantize_h263_intra_mmx(MpegEncContext *s,
+static void dct_unquantize_h263_intra_mmx(MPVContext *s,
                                   int16_t *block, int n, int qscale)
 {
     x86_reg level, qmul, qadd, nCoeffs;
@@ -104,7 +104,7 @@  __asm__ volatile(
 }
 
 
-static void dct_unquantize_h263_inter_mmx(MpegEncContext *s,
+static void dct_unquantize_h263_inter_mmx(MPVContext *s,
                                   int16_t *block, int n, int qscale)
 {
     x86_reg qmul, qadd, nCoeffs;
@@ -165,7 +165,7 @@  __asm__ volatile(
         );
 }
 
-static void dct_unquantize_mpeg1_intra_mmx(MpegEncContext *s,
+static void dct_unquantize_mpeg1_intra_mmx(MPVContext *s,
                                      int16_t *block, int n, int qscale)
 {
     x86_reg nCoeffs;
@@ -234,7 +234,7 @@  __asm__ volatile(
     block[0]= block0;
 }
 
-static void dct_unquantize_mpeg1_inter_mmx(MpegEncContext *s,
+static void dct_unquantize_mpeg1_inter_mmx(MPVContext *s,
                                      int16_t *block, int n, int qscale)
 {
     x86_reg nCoeffs;
@@ -300,7 +300,7 @@  __asm__ volatile(
         );
 }
 
-static void dct_unquantize_mpeg2_intra_mmx(MpegEncContext *s,
+static void dct_unquantize_mpeg2_intra_mmx(MPVContext *s,
                                      int16_t *block, int n, int qscale)
 {
     x86_reg nCoeffs;
@@ -369,7 +369,7 @@  __asm__ volatile(
         //Note, we do not do mismatch control for intra as errors cannot accumulate
 }
 
-static void dct_unquantize_mpeg2_inter_mmx(MpegEncContext *s,
+static void dct_unquantize_mpeg2_inter_mmx(MPVContext *s,
                                      int16_t *block, int n, int qscale)
 {
     x86_reg nCoeffs;
@@ -451,7 +451,7 @@  __asm__ volatile(
 
 #endif /* HAVE_MMX_INLINE */
 
-av_cold void ff_mpv_common_init_x86(MpegEncContext *s)
+av_cold void ff_mpv_common_init_x86(MPVMainContext *s)
 {
 #if HAVE_MMX_INLINE
     int cpu_flags = av_get_cpu_flags();
diff --git a/libavcodec/x86/mpegvideoenc.c b/libavcodec/x86/mpegvideoenc.c
index 3691cce26c..7c0cf27e30 100644
--- a/libavcodec/x86/mpegvideoenc.c
+++ b/libavcodec/x86/mpegvideoenc.c
@@ -97,7 +97,8 @@  DECLARE_ALIGNED(16, static const uint16_t, inv_zigzag_direct16)[64] = {
 
 #if HAVE_INLINE_ASM
 #if HAVE_MMX_INLINE
-static void  denoise_dct_mmx(MpegEncContext *s, int16_t *block){
+static void  denoise_dct_mmx(MPVEncContext *s, int16_t *block)
+{
     const int intra= s->mb_intra;
     int *sum= s->dct_error_sum[intra];
     uint16_t *offset= s->dct_offset[intra];
@@ -153,7 +154,8 @@  static void  denoise_dct_mmx(MpegEncContext *s, int16_t *block){
 #endif /* HAVE_MMX_INLINE */
 
 #if HAVE_SSE2_INLINE
-static void  denoise_dct_sse2(MpegEncContext *s, int16_t *block){
+static void  denoise_dct_sse2(MPVEncContext *s, int16_t *block)
+{
     const int intra= s->mb_intra;
     int *sum= s->dct_error_sum[intra];
     uint16_t *offset= s->dct_offset[intra];
@@ -211,7 +213,7 @@  static void  denoise_dct_sse2(MpegEncContext *s, int16_t *block){
 #endif /* HAVE_SSE2_INLINE */
 #endif /* HAVE_INLINE_ASM */
 
-av_cold void ff_dct_encode_init_x86(MpegEncContext *s)
+av_cold void ff_dct_encode_init_x86(MPVEncContext *s)
 {
     const int dct_algo = s->avctx->dct_algo;
 
diff --git a/libavcodec/x86/mpegvideoenc_template.c b/libavcodec/x86/mpegvideoenc_template.c
index 30c06a6b2c..19a0d3ac7e 100644
--- a/libavcodec/x86/mpegvideoenc_template.c
+++ b/libavcodec/x86/mpegvideoenc_template.c
@@ -100,7 +100,7 @@ 
             "psubw "a", "b"             \n\t" // out=((ABS(block[i])*qmat[0] - bias[0]*qmat[0])>>16)*sign(block[i])
 #endif
 
-static int RENAME(dct_quantize)(MpegEncContext *s,
+static int RENAME(dct_quantize)(MPVEncContext *s,
                             int16_t *block, int n,
                             int qscale, int *overflow)
 {
diff --git a/libavcodec/xvmc_internal.h b/libavcodec/xvmc_internal.h
index d365ef0266..ec9efdc471 100644
--- a/libavcodec/xvmc_internal.h
+++ b/libavcodec/xvmc_internal.h
@@ -21,11 +21,9 @@ 
 #ifndef AVCODEC_XVMC_INTERNAL_H
 #define AVCODEC_XVMC_INTERNAL_H
 
-#include "avcodec.h"
 #include "mpegvideo.h"
-#include "version.h"
 
-void ff_xvmc_init_block(MpegEncContext *s);
-void ff_xvmc_pack_pblocks(MpegEncContext *s, int cbp);
+void ff_xvmc_init_block(MPVDecContext *s);
+void ff_xvmc_pack_pblocks(MPVDecContext *s, int cbp);
 
 #endif /* AVCODEC_XVMC_INTERNAL_H */