diff mbox series

[FFmpeg-devel,4/6] avcodec/mpegvideo: Split ff_mpv_reconstruct_mb() into de/encoder part

Message ID GV1P250MB07373B9A6B50FA885FD91EEC8F299@GV1P250MB0737.EURP250.PROD.OUTLOOK.COM
State Accepted
Headers show
Series [FFmpeg-devel,1/6] avcodec/mpegvideo: Ignore skip_idct for encoders | expand

Checks

Context Check Description
yinshiyou/make_loongarch64 success Make finished
yinshiyou/make_fate_loongarch64 success Make fate finished
andriy/make_x86 success Make finished
andriy/make_fate_x86 success Make fate finished

Commit Message

Andreas Rheinhardt Oct. 17, 2022, 1:34 a.m. UTC
This has the advantage of not having to check for whether
a given MpegEncContext is actually a decoder or an encoder
context at runtime.

To do so, mpv_reconstruct_mb_internal() is moved into a new
template file that is included by both mpegvideo_enc.c
and mpegvideo_dec.c; the decoder-only code (mainly lowres)
are also moved to mpegvideo_dec.c. The is_encoder checks are
changed to #if IS_ENCODER in order to avoid having to include
headers for decoder-only functions in mpegvideo_enc.c.

This approach also has the advantage that it is easy to adapt
mpv_reconstruct_mb_internal() to using different structures
for decoders and encoders (e.g. the check for whether
a macroblock should be processed for the encoder or not
uses MpegEncContext elements that make no sense for decoders
and should not be part of their context).

Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com>
---
 libavcodec/mpeg_er.c                     |   1 +
 libavcodec/mpegvideo.c                   | 737 -----------------------
 libavcodec/mpegvideo.h                   |   2 -
 libavcodec/mpegvideo_dec.c               | 467 ++++++++++++++
 libavcodec/mpegvideo_enc.c               |  26 +-
 libavcodec/mpegvideodec.h                |   1 +
 libavcodec/mpv_reconstruct_mb_template.c | 300 +++++++++
 7 files changed, 792 insertions(+), 742 deletions(-)
 create mode 100644 libavcodec/mpv_reconstruct_mb_template.c
diff mbox series

Patch

diff --git a/libavcodec/mpeg_er.c b/libavcodec/mpeg_er.c
index 02f407d8ea..8034963253 100644
--- a/libavcodec/mpeg_er.c
+++ b/libavcodec/mpeg_er.c
@@ -18,6 +18,7 @@ 
 
 #include "error_resilience.h"
 #include "mpegvideo.h"
+#include "mpegvideodec.h"
 #include "mpeg_er.h"
 
 static void set_erpic(ERPicture *dst, Picture *src)
diff --git a/libavcodec/mpegvideo.c b/libavcodec/mpegvideo.c
index 43f3ec5a47..850d8f2e94 100644
--- a/libavcodec/mpegvideo.c
+++ b/libavcodec/mpegvideo.c
@@ -27,8 +27,6 @@ 
  * The simplest mpeg encoder (well, it was the simplest!).
  */
 
-#include "config_components.h"
-
 #include "libavutil/attributes.h"
 #include "libavutil/avassert.h"
 #include "libavutil/imgutils.h"
@@ -42,12 +40,7 @@ 
 #include "mpeg_er.h"
 #include "mpegutils.h"
 #include "mpegvideo.h"
-#include "mpeg4videodec.h"
 #include "mpegvideodata.h"
-#include "qpeldsp.h"
-#include "threadframe.h"
-#include "wmv2dec.h"
-#include <limits.h>
 
 static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s,
                                    int16_t *block, int n, int qscale)
@@ -811,456 +804,6 @@  void ff_mpv_common_end(MpegEncContext *s)
 }
 
 
-static inline int hpel_motion_lowres(MpegEncContext *s,
-                                     uint8_t *dest, const uint8_t *src,
-                                     int field_based, int field_select,
-                                     int src_x, int src_y,
-                                     int width, int height, ptrdiff_t stride,
-                                     int h_edge_pos, int v_edge_pos,
-                                     int w, int h, const h264_chroma_mc_func *pix_op,
-                                     int motion_x, int motion_y)
-{
-    const int lowres   = s->avctx->lowres;
-    const int op_index = FFMIN(lowres, 3);
-    const int s_mask   = (2 << lowres) - 1;
-    int emu = 0;
-    int sx, sy;
-
-    if (s->quarter_sample) {
-        motion_x /= 2;
-        motion_y /= 2;
-    }
-
-    sx = motion_x & s_mask;
-    sy = motion_y & s_mask;
-    src_x += motion_x >> lowres + 1;
-    src_y += motion_y >> lowres + 1;
-
-    src   += src_y * stride + src_x;
-
-    if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w,                 0) ||
-        (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
-        s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, src,
-                                 s->linesize, s->linesize,
-                                 w + 1, (h + 1) << field_based,
-                                 src_x, src_y * (1 << field_based),
-                                 h_edge_pos, v_edge_pos);
-        src = s->sc.edge_emu_buffer;
-        emu = 1;
-    }
-
-    sx = (sx << 2) >> lowres;
-    sy = (sy << 2) >> lowres;
-    if (field_select)
-        src += s->linesize;
-    pix_op[op_index](dest, src, stride, h, sx, sy);
-    return emu;
-}
-
-/* apply one mpeg motion vector to the three components */
-static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
-                                                uint8_t *dest_y,
-                                                uint8_t *dest_cb,
-                                                uint8_t *dest_cr,
-                                                int field_based,
-                                                int bottom_field,
-                                                int field_select,
-                                                uint8_t *const *ref_picture,
-                                                const h264_chroma_mc_func *pix_op,
-                                                int motion_x, int motion_y,
-                                                int h, int mb_y)
-{
-    const uint8_t *ptr_y, *ptr_cb, *ptr_cr;
-    int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
-    ptrdiff_t uvlinesize, linesize;
-    const int lowres     = s->avctx->lowres;
-    const int op_index   = FFMIN(lowres-1+s->chroma_x_shift, 3);
-    const int block_s    = 8>>lowres;
-    const int s_mask     = (2 << lowres) - 1;
-    const int h_edge_pos = s->h_edge_pos >> lowres;
-    const int v_edge_pos = s->v_edge_pos >> lowres;
-    linesize   = s->current_picture.f->linesize[0] << field_based;
-    uvlinesize = s->current_picture.f->linesize[1] << field_based;
-
-    // FIXME obviously not perfect but qpel will not work in lowres anyway
-    if (s->quarter_sample) {
-        motion_x /= 2;
-        motion_y /= 2;
-    }
-
-    if(field_based){
-        motion_y += (bottom_field - field_select)*((1 << lowres)-1);
-    }
-
-    sx = motion_x & s_mask;
-    sy = motion_y & s_mask;
-    src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
-    src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
-
-    if (s->out_format == FMT_H263) {
-        uvsx    = ((motion_x >> 1) & s_mask) | (sx & 1);
-        uvsy    = ((motion_y >> 1) & s_mask) | (sy & 1);
-        uvsrc_x = src_x >> 1;
-        uvsrc_y = src_y >> 1;
-    } else if (s->out_format == FMT_H261) {
-        // even chroma mv's are full pel in H261
-        mx      = motion_x / 4;
-        my      = motion_y / 4;
-        uvsx    = (2 * mx) & s_mask;
-        uvsy    = (2 * my) & s_mask;
-        uvsrc_x = s->mb_x * block_s + (mx >> lowres);
-        uvsrc_y =    mb_y * block_s + (my >> lowres);
-    } else {
-        if(s->chroma_y_shift){
-            mx      = motion_x / 2;
-            my      = motion_y / 2;
-            uvsx    = mx & s_mask;
-            uvsy    = my & s_mask;
-            uvsrc_x = s->mb_x * block_s                 + (mx >> lowres + 1);
-            uvsrc_y =   (mb_y * block_s >> field_based) + (my >> lowres + 1);
-        } else {
-            if(s->chroma_x_shift){
-            //Chroma422
-                mx = motion_x / 2;
-                uvsx = mx & s_mask;
-                uvsy = motion_y & s_mask;
-                uvsrc_y = src_y;
-                uvsrc_x = s->mb_x*block_s               + (mx >> (lowres+1));
-            } else {
-            //Chroma444
-                uvsx = motion_x & s_mask;
-                uvsy = motion_y & s_mask;
-                uvsrc_x = src_x;
-                uvsrc_y = src_y;
-            }
-        }
-    }
-
-    ptr_y  = ref_picture[0] + src_y   * linesize   + src_x;
-    ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
-    ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
-
-    if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s,       0) || uvsrc_y<0 ||
-        (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
-        s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y,
-                                 linesize >> field_based, linesize >> field_based,
-                                 17, 17 + field_based,
-                                src_x, src_y * (1 << field_based), h_edge_pos,
-                                v_edge_pos);
-        ptr_y = s->sc.edge_emu_buffer;
-        if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
-            uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize;
-            uint8_t *vbuf =ubuf + 10 * s->uvlinesize;
-            if (s->workaround_bugs & FF_BUG_IEDGE)
-                vbuf -= s->uvlinesize;
-            s->vdsp.emulated_edge_mc(ubuf,  ptr_cb,
-                                     uvlinesize >> field_based, uvlinesize >> field_based,
-                                     9, 9 + field_based,
-                                    uvsrc_x, uvsrc_y * (1 << field_based),
-                                    h_edge_pos >> 1, v_edge_pos >> 1);
-            s->vdsp.emulated_edge_mc(vbuf,  ptr_cr,
-                                     uvlinesize >> field_based,uvlinesize >> field_based,
-                                     9, 9 + field_based,
-                                    uvsrc_x, uvsrc_y * (1 << field_based),
-                                    h_edge_pos >> 1, v_edge_pos >> 1);
-            ptr_cb = ubuf;
-            ptr_cr = vbuf;
-        }
-    }
-
-    // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
-    if (bottom_field) {
-        dest_y  += s->linesize;
-        dest_cb += s->uvlinesize;
-        dest_cr += s->uvlinesize;
-    }
-
-    if (field_select) {
-        ptr_y   += s->linesize;
-        ptr_cb  += s->uvlinesize;
-        ptr_cr  += s->uvlinesize;
-    }
-
-    sx = (sx << 2) >> lowres;
-    sy = (sy << 2) >> lowres;
-    pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
-
-    if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
-        int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
-        uvsx = (uvsx << 2) >> lowres;
-        uvsy = (uvsy << 2) >> lowres;
-        if (hc) {
-            pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
-            pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
-        }
-    }
-    // FIXME h261 lowres loop filter
-}
-
-static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
-                                            uint8_t *dest_cb, uint8_t *dest_cr,
-                                            uint8_t *const *ref_picture,
-                                            const h264_chroma_mc_func * pix_op,
-                                            int mx, int my)
-{
-    const int lowres     = s->avctx->lowres;
-    const int op_index   = FFMIN(lowres, 3);
-    const int block_s    = 8 >> lowres;
-    const int s_mask     = (2 << lowres) - 1;
-    const int h_edge_pos = s->h_edge_pos >> lowres + 1;
-    const int v_edge_pos = s->v_edge_pos >> lowres + 1;
-    int emu = 0, src_x, src_y, sx, sy;
-    ptrdiff_t offset;
-    const uint8_t *ptr;
-
-    if (s->quarter_sample) {
-        mx /= 2;
-        my /= 2;
-    }
-
-    /* In case of 8X8, we construct a single chroma motion vector
-       with a special rounding */
-    mx = ff_h263_round_chroma(mx);
-    my = ff_h263_round_chroma(my);
-
-    sx = mx & s_mask;
-    sy = my & s_mask;
-    src_x = s->mb_x * block_s + (mx >> lowres + 1);
-    src_y = s->mb_y * block_s + (my >> lowres + 1);
-
-    offset = src_y * s->uvlinesize + src_x;
-    ptr = ref_picture[1] + offset;
-    if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
-        (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
-        s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
-                                 s->uvlinesize, s->uvlinesize,
-                                 9, 9,
-                                 src_x, src_y, h_edge_pos, v_edge_pos);
-        ptr = s->sc.edge_emu_buffer;
-        emu = 1;
-    }
-    sx = (sx << 2) >> lowres;
-    sy = (sy << 2) >> lowres;
-    pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
-
-    ptr = ref_picture[2] + offset;
-    if (emu) {
-        s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
-                                 s->uvlinesize, s->uvlinesize,
-                                 9, 9,
-                                 src_x, src_y, h_edge_pos, v_edge_pos);
-        ptr = s->sc.edge_emu_buffer;
-    }
-    pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
-}
-
-/**
- * motion compensation of a single macroblock
- * @param s context
- * @param dest_y luma destination pointer
- * @param dest_cb chroma cb/u destination pointer
- * @param dest_cr chroma cr/v destination pointer
- * @param dir direction (0->forward, 1->backward)
- * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
- * @param pix_op halfpel motion compensation function (average or put normally)
- * the motion vectors are taken from s->mv and the MV type from s->mv_type
- */
-static inline void MPV_motion_lowres(MpegEncContext *s,
-                                     uint8_t *dest_y, uint8_t *dest_cb,
-                                     uint8_t *dest_cr,
-                                     int dir, uint8_t *const *ref_picture,
-                                     const h264_chroma_mc_func *pix_op)
-{
-    int mx, my;
-    int mb_x, mb_y, i;
-    const int lowres  = s->avctx->lowres;
-    const int block_s = 8 >>lowres;
-
-    mb_x = s->mb_x;
-    mb_y = s->mb_y;
-
-    switch (s->mv_type) {
-    case MV_TYPE_16X16:
-        mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
-                           0, 0, 0,
-                           ref_picture, pix_op,
-                           s->mv[dir][0][0], s->mv[dir][0][1],
-                           2 * block_s, mb_y);
-        break;
-    case MV_TYPE_8X8:
-        mx = 0;
-        my = 0;
-        for (i = 0; i < 4; i++) {
-            hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
-                               s->linesize) * block_s,
-                               ref_picture[0], 0, 0,
-                               (2 * mb_x + (i & 1)) * block_s,
-                               (2 * mb_y + (i >> 1)) * block_s,
-                               s->width, s->height, s->linesize,
-                               s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
-                               block_s, block_s, pix_op,
-                               s->mv[dir][i][0], s->mv[dir][i][1]);
-
-            mx += s->mv[dir][i][0];
-            my += s->mv[dir][i][1];
-        }
-
-        if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))
-            chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
-                                     pix_op, mx, my);
-        break;
-    case MV_TYPE_FIELD:
-        if (s->picture_structure == PICT_FRAME) {
-            /* top field */
-            mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
-                               1, 0, s->field_select[dir][0],
-                               ref_picture, pix_op,
-                               s->mv[dir][0][0], s->mv[dir][0][1],
-                               block_s, mb_y);
-            /* bottom field */
-            mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
-                               1, 1, s->field_select[dir][1],
-                               ref_picture, pix_op,
-                               s->mv[dir][1][0], s->mv[dir][1][1],
-                               block_s, mb_y);
-        } else {
-            if (s->picture_structure != s->field_select[dir][0] + 1 &&
-                s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
-                ref_picture = s->current_picture_ptr->f->data;
-
-            }
-            mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
-                               0, 0, s->field_select[dir][0],
-                               ref_picture, pix_op,
-                               s->mv[dir][0][0],
-                               s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
-            }
-        break;
-    case MV_TYPE_16X8:
-        for (i = 0; i < 2; i++) {
-            uint8_t *const *ref2picture;
-
-            if (s->picture_structure == s->field_select[dir][i] + 1 ||
-                s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
-                ref2picture = ref_picture;
-            } else {
-                ref2picture = s->current_picture_ptr->f->data;
-            }
-
-            mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
-                               0, 0, s->field_select[dir][i],
-                               ref2picture, pix_op,
-                               s->mv[dir][i][0], s->mv[dir][i][1] +
-                               2 * block_s * i, block_s, mb_y >> 1);
-
-            dest_y  +=  2 * block_s *  s->linesize;
-            dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
-            dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
-        }
-        break;
-    case MV_TYPE_DMV:
-        if (s->picture_structure == PICT_FRAME) {
-            for (i = 0; i < 2; i++) {
-                int j;
-                for (j = 0; j < 2; j++) {
-                    mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
-                                       1, j, j ^ i,
-                                       ref_picture, pix_op,
-                                       s->mv[dir][2 * i + j][0],
-                                       s->mv[dir][2 * i + j][1],
-                                       block_s, mb_y);
-                }
-                pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
-            }
-        } else {
-            for (i = 0; i < 2; i++) {
-                mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
-                                   0, 0, s->picture_structure != i + 1,
-                                   ref_picture, pix_op,
-                                   s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
-                                   2 * block_s, mb_y >> 1);
-
-                // after put we make avg of the same block
-                pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
-
-                // opposite parity is always in the same
-                // frame if this is second field
-                if (!s->first_field) {
-                    ref_picture = s->current_picture_ptr->f->data;
-                }
-            }
-        }
-        break;
-    default:
-        av_assert2(0);
-    }
-}
-
-/**
- * find the lowest MB row referenced in the MVs
- */
-static int lowest_referenced_row(MpegEncContext *s, int dir)
-{
-    int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
-    int my, off, i, mvs;
-
-    if (s->picture_structure != PICT_FRAME || s->mcsel)
-        goto unhandled;
-
-    switch (s->mv_type) {
-        case MV_TYPE_16X16:
-            mvs = 1;
-            break;
-        case MV_TYPE_16X8:
-            mvs = 2;
-            break;
-        case MV_TYPE_8X8:
-            mvs = 4;
-            break;
-        default:
-            goto unhandled;
-    }
-
-    for (i = 0; i < mvs; i++) {
-        my = s->mv[dir][i][1];
-        my_max = FFMAX(my_max, my);
-        my_min = FFMIN(my_min, my);
-    }
-
-    off = ((FFMAX(-my_min, my_max)<<qpel_shift) + 63) >> 6;
-
-    return av_clip(s->mb_y + off, 0, s->mb_height - 1);
-unhandled:
-    return s->mb_height-1;
-}
-
-/* put block[] to dest[] */
-static inline void put_dct(MpegEncContext *s,
-                           int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
-{
-    s->dct_unquantize_intra(s, block, i, qscale);
-    s->idsp.idct_put(dest, line_size, block);
-}
-
-/* add block[] to dest[] */
-static inline void add_dct(MpegEncContext *s,
-                           int16_t *block, int i, uint8_t *dest, int line_size)
-{
-    if (s->block_last_index[i] >= 0) {
-        s->idsp.idct_add(dest, line_size, block);
-    }
-}
-
-static inline void add_dequant_dct(MpegEncContext *s,
-                           int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
-{
-    if (s->block_last_index[i] >= 0) {
-        s->dct_unquantize_inter(s, block, i, qscale);
-
-        s->idsp.idct_add(dest, line_size, block);
-    }
-}
-
 /**
  * Clean dc, ac, coded_block for the current non-intra MB.
  */
@@ -1294,286 +837,6 @@  void ff_clean_intra_table_entries(MpegEncContext *s)
     s->mbintra_table[xy]= 0;
 }
 
-#define NOT_MPEG12        0
-#define MAY_BE_MPEG12     1
-#define DEFINITELY_MPEG12 2
-
-/* generic function called after a macroblock has been parsed by the
-   decoder or after it has been encoded by the encoder.
-
-   Important variables used:
-   s->mb_intra : true if intra macroblock
-   s->mv_dir   : motion vector direction
-   s->mv_type  : motion vector type
-   s->mv       : motion vector
-   s->interlaced_dct : true if interlaced dct used (mpeg2)
- */
-static av_always_inline
-void mpv_reconstruct_mb_internal(MpegEncContext *s, int16_t block[12][64],
-                                 int lowres_flag, int is_mpeg12, int is_encoder)
-{
-#define IS_MPEG12(s) (is_mpeg12 == MAY_BE_MPEG12 ? ((s)->out_format == FMT_MPEG1) : is_mpeg12)
-    const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
-
-    s->current_picture.qscale_table[mb_xy] = s->qscale;
-
-    /* update DC predictors for P macroblocks */
-    if (!s->mb_intra) {
-        if (is_mpeg12 != DEFINITELY_MPEG12 && (s->h263_pred || s->h263_aic)) {
-            if(s->mbintra_table[mb_xy])
-                ff_clean_intra_table_entries(s);
-        } else {
-            s->last_dc[0] =
-            s->last_dc[1] =
-            s->last_dc[2] = 128 << s->intra_dc_precision;
-        }
-    } else if (is_mpeg12 != DEFINITELY_MPEG12 && (s->h263_pred || s->h263_aic))
-        s->mbintra_table[mb_xy]=1;
-
-    if (!is_encoder || (s->avctx->flags & AV_CODEC_FLAG_PSNR) || s->frame_skip_threshold || s->frame_skip_factor ||
-        !((s->intra_only || s->pict_type == AV_PICTURE_TYPE_B) &&
-          s->avctx->mb_decision != FF_MB_DECISION_RD)) { // FIXME precalc
-        uint8_t *dest_y, *dest_cb, *dest_cr;
-        int dct_linesize, dct_offset;
-        op_pixels_func (*op_pix)[4];
-        qpel_mc_func (*op_qpix)[16];
-        const int linesize   = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
-        const int uvlinesize = s->current_picture.f->linesize[1];
-        const int readable = s->pict_type != AV_PICTURE_TYPE_B || is_encoder || s->avctx->draw_horiz_band || lowres_flag;
-        const int block_size= lowres_flag ? 8>>s->avctx->lowres : 8;
-
-        /* avoid copy if macroblock skipped in last frame too */
-        /* skip only during decoding as we might trash the buffers during encoding a bit */
-        if (!is_encoder) {
-            uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
-
-            if (s->mb_skipped) {
-                s->mb_skipped= 0;
-                av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
-                *mbskip_ptr = 1;
-            } else if(!s->current_picture.reference) {
-                *mbskip_ptr = 1;
-            } else{
-                *mbskip_ptr = 0; /* not skipped */
-            }
-        }
-
-        dct_linesize = linesize << s->interlaced_dct;
-        dct_offset   = s->interlaced_dct ? linesize : linesize * block_size;
-
-        if(readable){
-            dest_y=  s->dest[0];
-            dest_cb= s->dest[1];
-            dest_cr= s->dest[2];
-        }else{
-            dest_y = s->sc.b_scratchpad;
-            dest_cb= s->sc.b_scratchpad+16*linesize;
-            dest_cr= s->sc.b_scratchpad+32*linesize;
-        }
-
-        if (!s->mb_intra) {
-            /* motion handling */
-            /* decoding or more than one mb_type (MC was already done otherwise) */
-            if (!is_encoder) {
-
-                if(HAVE_THREADS && s->avctx->active_thread_type&FF_THREAD_FRAME) {
-                    if (s->mv_dir & MV_DIR_FORWARD) {
-                        ff_thread_await_progress(&s->last_picture_ptr->tf,
-                                                 lowest_referenced_row(s, 0),
-                                                 0);
-                    }
-                    if (s->mv_dir & MV_DIR_BACKWARD) {
-                        ff_thread_await_progress(&s->next_picture_ptr->tf,
-                                                 lowest_referenced_row(s, 1),
-                                                 0);
-                    }
-                }
-
-                if(lowres_flag){
-                    const h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
-
-                    if (s->mv_dir & MV_DIR_FORWARD) {
-                        MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix);
-                        op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
-                    }
-                    if (s->mv_dir & MV_DIR_BACKWARD) {
-                        MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix);
-                    }
-                }else{
-                    op_qpix = s->me.qpel_put;
-                    if ((is_mpeg12 == DEFINITELY_MPEG12 || !s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
-                        op_pix = s->hdsp.put_pixels_tab;
-                    }else{
-                        op_pix = s->hdsp.put_no_rnd_pixels_tab;
-                    }
-                    if (s->mv_dir & MV_DIR_FORWARD) {
-                        ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
-                        op_pix = s->hdsp.avg_pixels_tab;
-                        op_qpix= s->me.qpel_avg;
-                    }
-                    if (s->mv_dir & MV_DIR_BACKWARD) {
-                        ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
-                    }
-                }
-
-            /* skip dequant / idct if we are really late ;) */
-            if(s->avctx->skip_idct){
-                if(  (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
-                   ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
-                   || s->avctx->skip_idct >= AVDISCARD_ALL)
-                    goto skip_idct;
-            }
-            }
-
-            /* add dct residue */
-            if (is_encoder || !(IS_MPEG12(s) || s->msmpeg4_version
-                                || (s->codec_id==AV_CODEC_ID_MPEG4 && !s->mpeg_quant))){
-                add_dequant_dct(s, block[0], 0, dest_y                          , dct_linesize, s->qscale);
-                add_dequant_dct(s, block[1], 1, dest_y              + block_size, dct_linesize, s->qscale);
-                add_dequant_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize, s->qscale);
-                add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
-
-                if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
-                    if (s->chroma_y_shift){
-                        add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
-                        add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
-                    }else{
-                        dct_linesize >>= 1;
-                        dct_offset >>=1;
-                        add_dequant_dct(s, block[4], 4, dest_cb,              dct_linesize, s->chroma_qscale);
-                        add_dequant_dct(s, block[5], 5, dest_cr,              dct_linesize, s->chroma_qscale);
-                        add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
-                        add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
-                    }
-                }
-            } else if (is_mpeg12 == DEFINITELY_MPEG12 || (s->codec_id != AV_CODEC_ID_WMV2)){
-                add_dct(s, block[0], 0, dest_y                          , dct_linesize);
-                add_dct(s, block[1], 1, dest_y              + block_size, dct_linesize);
-                add_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize);
-                add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
-
-                if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
-                    if(s->chroma_y_shift){//Chroma420
-                        add_dct(s, block[4], 4, dest_cb, uvlinesize);
-                        add_dct(s, block[5], 5, dest_cr, uvlinesize);
-                    }else{
-                        //chroma422
-                        dct_linesize = uvlinesize << s->interlaced_dct;
-                        dct_offset   = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
-
-                        add_dct(s, block[4], 4, dest_cb, dct_linesize);
-                        add_dct(s, block[5], 5, dest_cr, dct_linesize);
-                        add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
-                        add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
-                        if(!s->chroma_x_shift){//Chroma444
-                            add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
-                            add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
-                            add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
-                            add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
-                        }
-                    }
-                }//fi gray
-            } else if (CONFIG_WMV2_DECODER) {
-                ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
-            }
-        } else {
-            /* Only MPEG-4 Simple Studio Profile is supported in > 8-bit mode.
-               TODO: Integrate 10-bit properly into mpegvideo.c so that ER works properly */
-            if (!is_encoder && is_mpeg12 != DEFINITELY_MPEG12 && CONFIG_MPEG4_DECODER &&
-                /* s->codec_id == AV_CODEC_ID_MPEG4 && */
-                s->avctx->bits_per_raw_sample > 8) {
-                ff_mpeg4_decode_studio(s, dest_y, dest_cb, dest_cr, block_size,
-                                       uvlinesize, dct_linesize, dct_offset);
-            }
-            /* dct only in intra block */
-            else if (is_encoder || !IS_MPEG12(s)) {
-                put_dct(s, block[0], 0, dest_y                          , dct_linesize, s->qscale);
-                put_dct(s, block[1], 1, dest_y              + block_size, dct_linesize, s->qscale);
-                put_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize, s->qscale);
-                put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
-
-                if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
-                    if(s->chroma_y_shift){
-                        put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
-                        put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
-                    }else{
-                        dct_offset >>=1;
-                        dct_linesize >>=1;
-                        put_dct(s, block[4], 4, dest_cb,              dct_linesize, s->chroma_qscale);
-                        put_dct(s, block[5], 5, dest_cr,              dct_linesize, s->chroma_qscale);
-                        put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
-                        put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
-                    }
-                }
-            }else{
-                s->idsp.idct_put(dest_y,                           dct_linesize, block[0]);
-                s->idsp.idct_put(dest_y              + block_size, dct_linesize, block[1]);
-                s->idsp.idct_put(dest_y + dct_offset,              dct_linesize, block[2]);
-                s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
-
-                if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
-                    if(s->chroma_y_shift){
-                        s->idsp.idct_put(dest_cb, uvlinesize, block[4]);
-                        s->idsp.idct_put(dest_cr, uvlinesize, block[5]);
-                    }else{
-
-                        dct_linesize = uvlinesize << s->interlaced_dct;
-                        dct_offset   = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
-
-                        s->idsp.idct_put(dest_cb,              dct_linesize, block[4]);
-                        s->idsp.idct_put(dest_cr,              dct_linesize, block[5]);
-                        s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
-                        s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
-                        if(!s->chroma_x_shift){//Chroma444
-                            s->idsp.idct_put(dest_cb + block_size,              dct_linesize, block[8]);
-                            s->idsp.idct_put(dest_cr + block_size,              dct_linesize, block[9]);
-                            s->idsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
-                            s->idsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
-                        }
-                    }
-                }//gray
-            }
-        }
-skip_idct:
-        if(!readable){
-            s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y ,   linesize,16);
-            if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
-                s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize,16 >> s->chroma_y_shift);
-                s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize,16 >> s->chroma_y_shift);
-            }
-        }
-    }
-}
-
-void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
-{
-    if (s->avctx->debug & FF_DEBUG_DCT_COEFF) {
-       /* print DCT coefficients */
-       av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
-       for (int i = 0; i < 6; i++) {
-           for (int j = 0; j < 64; j++) {
-               av_log(s->avctx, AV_LOG_DEBUG, "%5d",
-                      block[i][s->idsp.idct_permutation[j]]);
-           }
-           av_log(s->avctx, AV_LOG_DEBUG, "\n");
-       }
-    }
-
-    if (s->encoding) {
-        mpv_reconstruct_mb_internal(s, block, 0, MAY_BE_MPEG12, 1);
-    } else if (!s->avctx->lowres) {
-#if !CONFIG_SMALL
-        if (s->out_format == FMT_MPEG1)
-            mpv_reconstruct_mb_internal(s, block, 0, DEFINITELY_MPEG12, 0);
-        else
-            mpv_reconstruct_mb_internal(s, block, 0, NOT_MPEG12, 0);
-#else
-        mpv_reconstruct_mb_internal(s, block, 0, MAY_BE_MPEG12, 0);
-#endif
-    } else
-        mpv_reconstruct_mb_internal(s, block, 1, MAY_BE_MPEG12, 0);
-}
-
 void ff_init_block_index(MpegEncContext *s){ //FIXME maybe rename
     const int linesize   = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
     const int uvlinesize = s->current_picture.f->linesize[1];
diff --git a/libavcodec/mpegvideo.h b/libavcodec/mpegvideo.h
index 1ddf8034aa..6e458b0379 100644
--- a/libavcodec/mpegvideo.h
+++ b/libavcodec/mpegvideo.h
@@ -582,8 +582,6 @@  void ff_mpv_free_context_frame(MpegEncContext *s);
 
 void ff_mpv_common_end(MpegEncContext *s);
 
-void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64]);
-
 void ff_clean_intra_table_entries(MpegEncContext *s);
 
 int ff_update_duplicate_context(MpegEncContext *dst, const MpegEncContext *src);
diff --git a/libavcodec/mpegvideo_dec.c b/libavcodec/mpegvideo_dec.c
index 6d1edc027a..c2d6d8bdd7 100644
--- a/libavcodec/mpegvideo_dec.c
+++ b/libavcodec/mpegvideo_dec.c
@@ -22,17 +22,22 @@ 
 
 #include <limits.h>
 
+#include "config_components.h"
+
 #include "libavutil/avassert.h"
 #include "libavutil/imgutils.h"
 #include "libavutil/internal.h"
 #include "libavutil/video_enc_params.h"
 
 #include "avcodec.h"
+#include "h264chroma.h"
 #include "internal.h"
 #include "mpegutils.h"
 #include "mpegvideo.h"
 #include "mpegvideodec.h"
+#include "mpeg4videodec.h"
 #include "threadframe.h"
+#include "wmv2dec.h"
 
 void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx)
 {
@@ -563,3 +568,465 @@  void ff_mpv_report_decode_progress(MpegEncContext *s)
     if (s->pict_type != AV_PICTURE_TYPE_B && !s->partitioned_frame && !s->er.error_occurred)
         ff_thread_report_progress(&s->current_picture_ptr->tf, s->mb_y, 0);
 }
+
+
+static inline int hpel_motion_lowres(MpegEncContext *s,
+                                     uint8_t *dest, const uint8_t *src,
+                                     int field_based, int field_select,
+                                     int src_x, int src_y,
+                                     int width, int height, ptrdiff_t stride,
+                                     int h_edge_pos, int v_edge_pos,
+                                     int w, int h, const h264_chroma_mc_func *pix_op,
+                                     int motion_x, int motion_y)
+{
+    const int lowres   = s->avctx->lowres;
+    const int op_index = FFMIN(lowres, 3);
+    const int s_mask   = (2 << lowres) - 1;
+    int emu = 0;
+    int sx, sy;
+
+    if (s->quarter_sample) {
+        motion_x /= 2;
+        motion_y /= 2;
+    }
+
+    sx = motion_x & s_mask;
+    sy = motion_y & s_mask;
+    src_x += motion_x >> lowres + 1;
+    src_y += motion_y >> lowres + 1;
+
+    src   += src_y * stride + src_x;
+
+    if ((unsigned)src_x > FFMAX( h_edge_pos - (!!sx) - w,                 0) ||
+        (unsigned)src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
+        s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, src,
+                                 s->linesize, s->linesize,
+                                 w + 1, (h + 1) << field_based,
+                                 src_x, src_y * (1 << field_based),
+                                 h_edge_pos, v_edge_pos);
+        src = s->sc.edge_emu_buffer;
+        emu = 1;
+    }
+
+    sx = (sx << 2) >> lowres;
+    sy = (sy << 2) >> lowres;
+    if (field_select)
+        src += s->linesize;
+    pix_op[op_index](dest, src, stride, h, sx, sy);
+    return emu;
+}
+
+/* apply one mpeg motion vector to the three components */
+static av_always_inline void mpeg_motion_lowres(MpegEncContext *s,
+                                                uint8_t *dest_y,
+                                                uint8_t *dest_cb,
+                                                uint8_t *dest_cr,
+                                                int field_based,
+                                                int bottom_field,
+                                                int field_select,
+                                                uint8_t *const *ref_picture,
+                                                const h264_chroma_mc_func *pix_op,
+                                                int motion_x, int motion_y,
+                                                int h, int mb_y)
+{
+    const uint8_t *ptr_y, *ptr_cb, *ptr_cr;
+    int mx, my, src_x, src_y, uvsrc_x, uvsrc_y, sx, sy, uvsx, uvsy;
+    ptrdiff_t uvlinesize, linesize;
+    const int lowres     = s->avctx->lowres;
+    const int op_index   = FFMIN(lowres - 1 + s->chroma_x_shift, 3);
+    const int block_s    = 8 >> lowres;
+    const int s_mask     = (2 << lowres) - 1;
+    const int h_edge_pos = s->h_edge_pos >> lowres;
+    const int v_edge_pos = s->v_edge_pos >> lowres;
+    linesize   = s->current_picture.f->linesize[0] << field_based;
+    uvlinesize = s->current_picture.f->linesize[1] << field_based;
+
+    // FIXME obviously not perfect but qpel will not work in lowres anyway
+    if (s->quarter_sample) {
+        motion_x /= 2;
+        motion_y /= 2;
+    }
+
+    if (field_based) {
+        motion_y += (bottom_field - field_select)*((1 << lowres)-1);
+    }
+
+    sx = motion_x & s_mask;
+    sy = motion_y & s_mask;
+    src_x = s->mb_x * 2 * block_s + (motion_x >> lowres + 1);
+    src_y = (mb_y * 2 * block_s >> field_based) + (motion_y >> lowres + 1);
+
+    if (s->out_format == FMT_H263) {
+        uvsx    = ((motion_x >> 1) & s_mask) | (sx & 1);
+        uvsy    = ((motion_y >> 1) & s_mask) | (sy & 1);
+        uvsrc_x = src_x >> 1;
+        uvsrc_y = src_y >> 1;
+    } else if (s->out_format == FMT_H261) {
+        // even chroma mv's are full pel in H261
+        mx      = motion_x / 4;
+        my      = motion_y / 4;
+        uvsx    = (2 * mx) & s_mask;
+        uvsy    = (2 * my) & s_mask;
+        uvsrc_x = s->mb_x * block_s + (mx >> lowres);
+        uvsrc_y =    mb_y * block_s + (my >> lowres);
+    } else {
+        if (s->chroma_y_shift) {
+            mx      = motion_x / 2;
+            my      = motion_y / 2;
+            uvsx    = mx & s_mask;
+            uvsy    = my & s_mask;
+            uvsrc_x = s->mb_x * block_s                 + (mx >> lowres + 1);
+            uvsrc_y =   (mb_y * block_s >> field_based) + (my >> lowres + 1);
+        } else {
+            if (s->chroma_x_shift) {
+            //Chroma422
+                mx = motion_x / 2;
+                uvsx = mx & s_mask;
+                uvsy = motion_y & s_mask;
+                uvsrc_y = src_y;
+                uvsrc_x = s->mb_x*block_s               + (mx >> (lowres+1));
+            } else {
+            //Chroma444
+                uvsx = motion_x & s_mask;
+                uvsy = motion_y & s_mask;
+                uvsrc_x = src_x;
+                uvsrc_y = src_y;
+            }
+        }
+    }
+
+    ptr_y  = ref_picture[0] + src_y   * linesize   + src_x;
+    ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x;
+    ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x;
+
+    if ((unsigned) src_x > FFMAX( h_edge_pos - (!!sx) - 2 * block_s,       0) || uvsrc_y<0 ||
+        (unsigned) src_y > FFMAX((v_edge_pos >> field_based) - (!!sy) - h, 0)) {
+        s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr_y,
+                                 linesize >> field_based, linesize >> field_based,
+                                 17, 17 + field_based,
+                                src_x, src_y * (1 << field_based), h_edge_pos,
+                                v_edge_pos);
+        ptr_y = s->sc.edge_emu_buffer;
+        if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
+            uint8_t *ubuf = s->sc.edge_emu_buffer + 18 * s->linesize;
+            uint8_t *vbuf =ubuf + 10 * s->uvlinesize;
+            if (s->workaround_bugs & FF_BUG_IEDGE)
+                vbuf -= s->uvlinesize;
+            s->vdsp.emulated_edge_mc(ubuf,  ptr_cb,
+                                     uvlinesize >> field_based, uvlinesize >> field_based,
+                                     9, 9 + field_based,
+                                    uvsrc_x, uvsrc_y * (1 << field_based),
+                                    h_edge_pos >> 1, v_edge_pos >> 1);
+            s->vdsp.emulated_edge_mc(vbuf,  ptr_cr,
+                                     uvlinesize >> field_based,uvlinesize >> field_based,
+                                     9, 9 + field_based,
+                                    uvsrc_x, uvsrc_y * (1 << field_based),
+                                    h_edge_pos >> 1, v_edge_pos >> 1);
+            ptr_cb = ubuf;
+            ptr_cr = vbuf;
+        }
+    }
+
+    // FIXME use this for field pix too instead of the obnoxious hack which changes picture.f->data
+    if (bottom_field) {
+        dest_y  += s->linesize;
+        dest_cb += s->uvlinesize;
+        dest_cr += s->uvlinesize;
+    }
+
+    if (field_select) {
+        ptr_y   += s->linesize;
+        ptr_cb  += s->uvlinesize;
+        ptr_cr  += s->uvlinesize;
+    }
+
+    sx = (sx << 2) >> lowres;
+    sy = (sy << 2) >> lowres;
+    pix_op[lowres - 1](dest_y, ptr_y, linesize, h, sx, sy);
+
+    if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
+        int hc = s->chroma_y_shift ? (h+1-bottom_field)>>1 : h;
+        uvsx = (uvsx << 2) >> lowres;
+        uvsy = (uvsy << 2) >> lowres;
+        if (hc) {
+            pix_op[op_index](dest_cb, ptr_cb, uvlinesize, hc, uvsx, uvsy);
+            pix_op[op_index](dest_cr, ptr_cr, uvlinesize, hc, uvsx, uvsy);
+        }
+    }
+    // FIXME h261 lowres loop filter
+}
+
+static inline void chroma_4mv_motion_lowres(MpegEncContext *s,
+                                            uint8_t *dest_cb, uint8_t *dest_cr,
+                                            uint8_t *const *ref_picture,
+                                            const h264_chroma_mc_func * pix_op,
+                                            int mx, int my)
+{
+    const int lowres     = s->avctx->lowres;
+    const int op_index   = FFMIN(lowres, 3);
+    const int block_s    = 8 >> lowres;
+    const int s_mask     = (2 << lowres) - 1;
+    const int h_edge_pos = s->h_edge_pos >> lowres + 1;
+    const int v_edge_pos = s->v_edge_pos >> lowres + 1;
+    int emu = 0, src_x, src_y, sx, sy;
+    ptrdiff_t offset;
+    const uint8_t *ptr;
+
+    if (s->quarter_sample) {
+        mx /= 2;
+        my /= 2;
+    }
+
+    /* In case of 8X8, we construct a single chroma motion vector
+       with a special rounding */
+    mx = ff_h263_round_chroma(mx);
+    my = ff_h263_round_chroma(my);
+
+    sx = mx & s_mask;
+    sy = my & s_mask;
+    src_x = s->mb_x * block_s + (mx >> lowres + 1);
+    src_y = s->mb_y * block_s + (my >> lowres + 1);
+
+    offset = src_y * s->uvlinesize + src_x;
+    ptr = ref_picture[1] + offset;
+    if ((unsigned) src_x > FFMAX(h_edge_pos - (!!sx) - block_s, 0) ||
+        (unsigned) src_y > FFMAX(v_edge_pos - (!!sy) - block_s, 0)) {
+        s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
+                                 s->uvlinesize, s->uvlinesize,
+                                 9, 9,
+                                 src_x, src_y, h_edge_pos, v_edge_pos);
+        ptr = s->sc.edge_emu_buffer;
+        emu = 1;
+    }
+    sx = (sx << 2) >> lowres;
+    sy = (sy << 2) >> lowres;
+    pix_op[op_index](dest_cb, ptr, s->uvlinesize, block_s, sx, sy);
+
+    ptr = ref_picture[2] + offset;
+    if (emu) {
+        s->vdsp.emulated_edge_mc(s->sc.edge_emu_buffer, ptr,
+                                 s->uvlinesize, s->uvlinesize,
+                                 9, 9,
+                                 src_x, src_y, h_edge_pos, v_edge_pos);
+        ptr = s->sc.edge_emu_buffer;
+    }
+    pix_op[op_index](dest_cr, ptr, s->uvlinesize, block_s, sx, sy);
+}
+
+/**
+ * motion compensation of a single macroblock
+ * @param s context
+ * @param dest_y luma destination pointer
+ * @param dest_cb chroma cb/u destination pointer
+ * @param dest_cr chroma cr/v destination pointer
+ * @param dir direction (0->forward, 1->backward)
+ * @param ref_picture array[3] of pointers to the 3 planes of the reference picture
+ * @param pix_op halfpel motion compensation function (average or put normally)
+ * the motion vectors are taken from s->mv and the MV type from s->mv_type
+ */
+static inline void MPV_motion_lowres(MpegEncContext *s,
+                                     uint8_t *dest_y, uint8_t *dest_cb,
+                                     uint8_t *dest_cr,
+                                     int dir, uint8_t *const *ref_picture,
+                                     const h264_chroma_mc_func *pix_op)
+{
+    int mx, my;
+    int mb_x, mb_y;
+    const int lowres  = s->avctx->lowres;
+    const int block_s = 8 >>lowres;
+
+    mb_x = s->mb_x;
+    mb_y = s->mb_y;
+
+    switch (s->mv_type) {
+    case MV_TYPE_16X16:
+        mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
+                           0, 0, 0,
+                           ref_picture, pix_op,
+                           s->mv[dir][0][0], s->mv[dir][0][1],
+                           2 * block_s, mb_y);
+        break;
+    case MV_TYPE_8X8:
+        mx = 0;
+        my = 0;
+        for (int i = 0; i < 4; i++) {
+            hpel_motion_lowres(s, dest_y + ((i & 1) + (i >> 1) *
+                               s->linesize) * block_s,
+                               ref_picture[0], 0, 0,
+                               (2 * mb_x + (i & 1)) * block_s,
+                               (2 * mb_y + (i >> 1)) * block_s,
+                               s->width, s->height, s->linesize,
+                               s->h_edge_pos >> lowres, s->v_edge_pos >> lowres,
+                               block_s, block_s, pix_op,
+                               s->mv[dir][i][0], s->mv[dir][i][1]);
+
+            mx += s->mv[dir][i][0];
+            my += s->mv[dir][i][1];
+        }
+
+        if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY))
+            chroma_4mv_motion_lowres(s, dest_cb, dest_cr, ref_picture,
+                                     pix_op, mx, my);
+        break;
+    case MV_TYPE_FIELD:
+        if (s->picture_structure == PICT_FRAME) {
+            /* top field */
+            mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
+                               1, 0, s->field_select[dir][0],
+                               ref_picture, pix_op,
+                               s->mv[dir][0][0], s->mv[dir][0][1],
+                               block_s, mb_y);
+            /* bottom field */
+            mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
+                               1, 1, s->field_select[dir][1],
+                               ref_picture, pix_op,
+                               s->mv[dir][1][0], s->mv[dir][1][1],
+                               block_s, mb_y);
+        } else {
+            if (s->picture_structure != s->field_select[dir][0] + 1 &&
+                s->pict_type != AV_PICTURE_TYPE_B && !s->first_field) {
+                ref_picture = s->current_picture_ptr->f->data;
+
+            }
+            mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
+                               0, 0, s->field_select[dir][0],
+                               ref_picture, pix_op,
+                               s->mv[dir][0][0],
+                               s->mv[dir][0][1], 2 * block_s, mb_y >> 1);
+            }
+        break;
+    case MV_TYPE_16X8:
+        for (int i = 0; i < 2; i++) {
+            uint8_t *const *ref2picture;
+
+            if (s->picture_structure == s->field_select[dir][i] + 1 ||
+                s->pict_type == AV_PICTURE_TYPE_B || s->first_field) {
+                ref2picture = ref_picture;
+            } else {
+                ref2picture = s->current_picture_ptr->f->data;
+            }
+
+            mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
+                               0, 0, s->field_select[dir][i],
+                               ref2picture, pix_op,
+                               s->mv[dir][i][0], s->mv[dir][i][1] +
+                               2 * block_s * i, block_s, mb_y >> 1);
+
+            dest_y  +=  2 * block_s *  s->linesize;
+            dest_cb += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
+            dest_cr += (2 * block_s >> s->chroma_y_shift) * s->uvlinesize;
+        }
+        break;
+    case MV_TYPE_DMV:
+        if (s->picture_structure == PICT_FRAME) {
+            for (int i = 0; i < 2; i++) {
+                for (int j = 0; j < 2; j++) {
+                    mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
+                                       1, j, j ^ i,
+                                       ref_picture, pix_op,
+                                       s->mv[dir][2 * i + j][0],
+                                       s->mv[dir][2 * i + j][1],
+                                       block_s, mb_y);
+                }
+                pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
+            }
+        } else {
+            for (int i = 0; i < 2; i++) {
+                mpeg_motion_lowres(s, dest_y, dest_cb, dest_cr,
+                                   0, 0, s->picture_structure != i + 1,
+                                   ref_picture, pix_op,
+                                   s->mv[dir][2 * i][0],s->mv[dir][2 * i][1],
+                                   2 * block_s, mb_y >> 1);
+
+                // after put we make avg of the same block
+                pix_op = s->h264chroma.avg_h264_chroma_pixels_tab;
+
+                // opposite parity is always in the same
+                // frame if this is second field
+                if (!s->first_field) {
+                    ref_picture = s->current_picture_ptr->f->data;
+                }
+            }
+        }
+        break;
+    default:
+        av_assert2(0);
+    }
+}
+
+/**
+ * find the lowest MB row referenced in the MVs
+ */
+static int lowest_referenced_row(MpegEncContext *s, int dir)
+{
+    int my_max = INT_MIN, my_min = INT_MAX, qpel_shift = !s->quarter_sample;
+    int off, mvs;
+
+    if (s->picture_structure != PICT_FRAME || s->mcsel)
+        goto unhandled;
+
+    switch (s->mv_type) {
+        case MV_TYPE_16X16:
+            mvs = 1;
+            break;
+        case MV_TYPE_16X8:
+            mvs = 2;
+            break;
+        case MV_TYPE_8X8:
+            mvs = 4;
+            break;
+        default:
+            goto unhandled;
+    }
+
+    for (int i = 0; i < mvs; i++) {
+        int my = s->mv[dir][i][1];
+        my_max = FFMAX(my_max, my);
+        my_min = FFMIN(my_min, my);
+    }
+
+    off = ((FFMAX(-my_min, my_max) << qpel_shift) + 63) >> 6;
+
+    return av_clip(s->mb_y + off, 0, s->mb_height - 1);
+unhandled:
+    return s->mb_height - 1;
+}
+
+/* add block[] to dest[] */
+static inline void add_dct(MpegEncContext *s,
+                           int16_t *block, int i, uint8_t *dest, int line_size)
+{
+    if (s->block_last_index[i] >= 0) {
+        s->idsp.idct_add(dest, line_size, block);
+    }
+}
+
+#define IS_ENCODER 0
+#include "mpv_reconstruct_mb_template.c"
+
+void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
+{
+    if (s->avctx->debug & FF_DEBUG_DCT_COEFF) {
+       /* print DCT coefficients */
+       av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
+       for (int i = 0; i < 6; i++) {
+           for (int j = 0; j < 64; j++) {
+               av_log(s->avctx, AV_LOG_DEBUG, "%5d",
+                      block[i][s->idsp.idct_permutation[j]]);
+           }
+           av_log(s->avctx, AV_LOG_DEBUG, "\n");
+       }
+    }
+
+    if (!s->avctx->lowres) {
+#if !CONFIG_SMALL
+        if (s->out_format == FMT_MPEG1)
+            mpv_reconstruct_mb_internal(s, block, 0, DEFINITELY_MPEG12);
+        else
+            mpv_reconstruct_mb_internal(s, block, 0, NOT_MPEG12);
+#else
+        mpv_reconstruct_mb_internal(s, block, 0, MAY_BE_MPEG12);
+#endif
+    } else
+        mpv_reconstruct_mb_internal(s, block, 1, MAY_BE_MPEG12);
+}
diff --git a/libavcodec/mpegvideo_enc.c b/libavcodec/mpegvideo_enc.c
index 2cbb856866..ce363a585d 100644
--- a/libavcodec/mpegvideo_enc.c
+++ b/libavcodec/mpegvideo_enc.c
@@ -1017,6 +1017,26 @@  av_cold int ff_mpv_encode_end(AVCodecContext *avctx)
     return 0;
 }
 
+#define IS_ENCODER 1
+#include "mpv_reconstruct_mb_template.c"
+
+static void mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64])
+{
+    if (s->avctx->debug & FF_DEBUG_DCT_COEFF) {
+       /* print DCT coefficients */
+       av_log(s->avctx, AV_LOG_DEBUG, "DCT coeffs of MB at %dx%d:\n", s->mb_x, s->mb_y);
+       for (int i = 0; i < 6; i++) {
+           for (int j = 0; j < 64; j++) {
+               av_log(s->avctx, AV_LOG_DEBUG, "%5d",
+                      block[i][s->idsp.idct_permutation[j]]);
+           }
+           av_log(s->avctx, AV_LOG_DEBUG, "\n");
+       }
+    }
+
+    mpv_reconstruct_mb_internal(s, block, 0, MAY_BE_MPEG12);
+}
+
 static int get_sae(const uint8_t *src, int ref, int stride)
 {
     int x,y;
@@ -2577,7 +2597,7 @@  static inline void encode_mb_hq(MpegEncContext *s, MpegEncContext *backup, MpegE
     }
 
     if(s->avctx->mb_decision == FF_MB_DECISION_RD){
-        ff_mpv_reconstruct_mb(s, s->block);
+        mpv_reconstruct_mb(s, s->block);
 
         score *= s->lambda2;
         score += sse_mb(s) << FF_LAMBDA_SHIFT;
@@ -3287,7 +3307,7 @@  static int encode_thread(AVCodecContext *c, void *arg){
                 }
 
                 if(s->avctx->mb_decision == FF_MB_DECISION_BITS)
-                    ff_mpv_reconstruct_mb(s, s->block);
+                    mpv_reconstruct_mb(s, s->block);
             } else {
                 int motion_x = 0, motion_y = 0;
                 s->mv_type=MV_TYPE_16X16;
@@ -3406,7 +3426,7 @@  static int encode_thread(AVCodecContext *c, void *arg){
                     s->out_format == FMT_H263 && s->pict_type!=AV_PICTURE_TYPE_B)
                     ff_h263_update_motion_val(s);
 
-                ff_mpv_reconstruct_mb(s, s->block);
+                mpv_reconstruct_mb(s, s->block);
             }
 
             /* clean the MV table in IPS frames for direct mode in B-frames */
diff --git a/libavcodec/mpegvideodec.h b/libavcodec/mpegvideodec.h
index 250034b486..0b841bc1a1 100644
--- a/libavcodec/mpegvideodec.h
+++ b/libavcodec/mpegvideodec.h
@@ -50,6 +50,7 @@  void ff_mpv_decode_init(MpegEncContext *s, AVCodecContext *avctx);
 int ff_mpv_common_frame_size_change(MpegEncContext *s);
 
 int ff_mpv_frame_start(MpegEncContext *s, AVCodecContext *avctx);
+void ff_mpv_reconstruct_mb(MpegEncContext *s, int16_t block[12][64]);
 void ff_mpv_report_decode_progress(MpegEncContext *s);
 void ff_mpv_frame_end(MpegEncContext *s);
 
diff --git a/libavcodec/mpv_reconstruct_mb_template.c b/libavcodec/mpv_reconstruct_mb_template.c
new file mode 100644
index 0000000000..7d74ec1f5c
--- /dev/null
+++ b/libavcodec/mpv_reconstruct_mb_template.c
@@ -0,0 +1,300 @@ 
+/*
+ * MPEG macroblock reconstruction
+ * Copyright (c) 2000,2001 Fabrice Bellard
+ * Copyright (c) 2002-2004 Michael Niedermayer <michaelni@gmx.at>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#define NOT_MPEG12        0
+#define MAY_BE_MPEG12     1
+#define DEFINITELY_MPEG12 2
+
+/* put block[] to dest[] */
+static inline void put_dct(MpegEncContext *s,
+                           int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
+{
+    s->dct_unquantize_intra(s, block, i, qscale);
+    s->idsp.idct_put(dest, line_size, block);
+}
+
+static inline void add_dequant_dct(MpegEncContext *s,
+                           int16_t *block, int i, uint8_t *dest, int line_size, int qscale)
+{
+    if (s->block_last_index[i] >= 0) {
+        s->dct_unquantize_inter(s, block, i, qscale);
+
+        s->idsp.idct_add(dest, line_size, block);
+    }
+}
+
+/* generic function called after a macroblock has been parsed by the
+   decoder or after it has been encoded by the encoder.
+
+   Important variables used:
+   s->mb_intra : true if intra macroblock
+   s->mv_dir   : motion vector direction
+   s->mv_type  : motion vector type
+   s->mv       : motion vector
+   s->interlaced_dct : true if interlaced dct used (mpeg2)
+ */
+static av_always_inline
+void mpv_reconstruct_mb_internal(MpegEncContext *s, int16_t block[12][64],
+                                 int lowres_flag, int is_mpeg12)
+{
+#define IS_MPEG12(s) (is_mpeg12 == MAY_BE_MPEG12 ? ((s)->out_format == FMT_MPEG1) : is_mpeg12)
+    const int mb_xy = s->mb_y * s->mb_stride + s->mb_x;
+
+    s->current_picture.qscale_table[mb_xy] = s->qscale;
+
+    /* update DC predictors for P macroblocks */
+    if (!s->mb_intra) {
+        if (is_mpeg12 != DEFINITELY_MPEG12 && (s->h263_pred || s->h263_aic)) {
+            if (s->mbintra_table[mb_xy])
+                ff_clean_intra_table_entries(s);
+        } else {
+            s->last_dc[0] =
+            s->last_dc[1] =
+            s->last_dc[2] = 128 << s->intra_dc_precision;
+        }
+    } else if (is_mpeg12 != DEFINITELY_MPEG12 && (s->h263_pred || s->h263_aic))
+        s->mbintra_table[mb_xy] = 1;
+
+#if IS_ENCODER
+    if ((s->avctx->flags & AV_CODEC_FLAG_PSNR) || s->frame_skip_threshold || s->frame_skip_factor ||
+        !((s->intra_only || s->pict_type == AV_PICTURE_TYPE_B) &&
+          s->avctx->mb_decision != FF_MB_DECISION_RD))  // FIXME precalc
+#endif /* IS_ENCODER */
+    {
+        uint8_t *dest_y, *dest_cb, *dest_cr;
+        int dct_linesize, dct_offset;
+        const int linesize   = s->current_picture.f->linesize[0]; //not s->linesize as this would be wrong for field pics
+        const int uvlinesize = s->current_picture.f->linesize[1];
+        const int readable   = IS_ENCODER || lowres_flag || s->pict_type != AV_PICTURE_TYPE_B || s->avctx->draw_horiz_band;
+        const int block_size = lowres_flag ? 8 >> s->avctx->lowres : 8;
+
+        /* avoid copy if macroblock skipped in last frame too */
+        /* skip only during decoding as we might trash the buffers during encoding a bit */
+        if (!IS_ENCODER) {
+            uint8_t *mbskip_ptr = &s->mbskip_table[mb_xy];
+
+            if (s->mb_skipped) {
+                s->mb_skipped = 0;
+                av_assert2(s->pict_type!=AV_PICTURE_TYPE_I);
+                *mbskip_ptr = 1;
+            } else if(!s->current_picture.reference) {
+                *mbskip_ptr = 1;
+            } else{
+                *mbskip_ptr = 0; /* not skipped */
+            }
+        }
+
+        dct_linesize = linesize << s->interlaced_dct;
+        dct_offset   = s->interlaced_dct ? linesize : linesize * block_size;
+
+        if (readable) {
+            dest_y  = s->dest[0];
+            dest_cb = s->dest[1];
+            dest_cr = s->dest[2];
+        } else {
+            dest_y  = s->sc.b_scratchpad;
+            dest_cb = s->sc.b_scratchpad + 16 * linesize;
+            dest_cr = s->sc.b_scratchpad + 32 * linesize;
+        }
+
+        if (!s->mb_intra) {
+            /* motion handling */
+            /* decoding or more than one mb_type (MC was already done otherwise) */
+
+#if !IS_ENCODER
+            if (HAVE_THREADS && s->avctx->active_thread_type & FF_THREAD_FRAME) {
+                if (s->mv_dir & MV_DIR_FORWARD) {
+                    ff_thread_await_progress(&s->last_picture_ptr->tf,
+                                             lowest_referenced_row(s, 0), 0);
+                }
+                if (s->mv_dir & MV_DIR_BACKWARD) {
+                    ff_thread_await_progress(&s->next_picture_ptr->tf,
+                                             lowest_referenced_row(s, 1), 0);
+                }
+            }
+
+            if (lowres_flag) {
+                const h264_chroma_mc_func *op_pix = s->h264chroma.put_h264_chroma_pixels_tab;
+
+                if (s->mv_dir & MV_DIR_FORWARD) {
+                    MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix);
+                    op_pix = s->h264chroma.avg_h264_chroma_pixels_tab;
+                }
+                if (s->mv_dir & MV_DIR_BACKWARD) {
+                    MPV_motion_lowres(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix);
+                }
+            } else {
+                op_pixels_func (*op_pix)[4];
+                qpel_mc_func (*op_qpix)[16] = s->me.qpel_put;
+
+                if ((is_mpeg12 == DEFINITELY_MPEG12 || !s->no_rounding) || s->pict_type == AV_PICTURE_TYPE_B) {
+                    op_pix = s->hdsp.put_pixels_tab;
+                } else {
+                    op_pix = s->hdsp.put_no_rnd_pixels_tab;
+                }
+                if (s->mv_dir & MV_DIR_FORWARD) {
+                    ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 0, s->last_picture.f->data, op_pix, op_qpix);
+                    op_pix  = s->hdsp.avg_pixels_tab;
+                    op_qpix = s->me.qpel_avg;
+                }
+                if (s->mv_dir & MV_DIR_BACKWARD) {
+                    ff_mpv_motion(s, dest_y, dest_cb, dest_cr, 1, s->next_picture.f->data, op_pix, op_qpix);
+                }
+            }
+
+            /* skip dequant / idct if we are really late ;) */
+            if (s->avctx->skip_idct) {
+                if(  (s->avctx->skip_idct >= AVDISCARD_NONREF && s->pict_type == AV_PICTURE_TYPE_B)
+                   ||(s->avctx->skip_idct >= AVDISCARD_NONKEY && s->pict_type != AV_PICTURE_TYPE_I)
+                   || s->avctx->skip_idct >= AVDISCARD_ALL)
+                    goto skip_idct;
+            }
+
+            /* add dct residue */
+            if (!(IS_MPEG12(s) || s->msmpeg4_version ||
+                  (s->codec_id == AV_CODEC_ID_MPEG4 && !s->mpeg_quant)))
+#endif /* !IS_ENCODER */
+            {
+                add_dequant_dct(s, block[0], 0, dest_y                          , dct_linesize, s->qscale);
+                add_dequant_dct(s, block[1], 1, dest_y              + block_size, dct_linesize, s->qscale);
+                add_dequant_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize, s->qscale);
+                add_dequant_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
+
+                if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
+                    if (s->chroma_y_shift) {
+                        add_dequant_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
+                        add_dequant_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
+                    } else {
+                        dct_linesize >>= 1;
+                        dct_offset   >>= 1;
+                        add_dequant_dct(s, block[4], 4, dest_cb,              dct_linesize, s->chroma_qscale);
+                        add_dequant_dct(s, block[5], 5, dest_cr,              dct_linesize, s->chroma_qscale);
+                        add_dequant_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
+                        add_dequant_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
+                    }
+                }
+            }
+#if !IS_ENCODER
+              else if (is_mpeg12 == DEFINITELY_MPEG12 || (s->codec_id != AV_CODEC_ID_WMV2)) {
+                add_dct(s, block[0], 0, dest_y                          , dct_linesize);
+                add_dct(s, block[1], 1, dest_y              + block_size, dct_linesize);
+                add_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize);
+                add_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize);
+
+                if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
+                    if (s->chroma_y_shift) {//Chroma420
+                        add_dct(s, block[4], 4, dest_cb, uvlinesize);
+                        add_dct(s, block[5], 5, dest_cr, uvlinesize);
+                    } else {
+                        //chroma422
+                        dct_linesize = uvlinesize << s->interlaced_dct;
+                        dct_offset   = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
+
+                        add_dct(s, block[4], 4, dest_cb, dct_linesize);
+                        add_dct(s, block[5], 5, dest_cr, dct_linesize);
+                        add_dct(s, block[6], 6, dest_cb+dct_offset, dct_linesize);
+                        add_dct(s, block[7], 7, dest_cr+dct_offset, dct_linesize);
+                        if (!s->chroma_x_shift) {//Chroma444
+                            add_dct(s, block[8], 8, dest_cb+block_size, dct_linesize);
+                            add_dct(s, block[9], 9, dest_cr+block_size, dct_linesize);
+                            add_dct(s, block[10], 10, dest_cb+block_size+dct_offset, dct_linesize);
+                            add_dct(s, block[11], 11, dest_cr+block_size+dct_offset, dct_linesize);
+                        }
+                    }
+                } //fi gray
+            } else if (CONFIG_WMV2_DECODER) {
+                ff_wmv2_add_mb(s, block, dest_y, dest_cb, dest_cr);
+            }
+#endif /* !IS_ENCODER */
+        } else {
+#if !IS_ENCODER
+            /* Only MPEG-4 Simple Studio Profile is supported in > 8-bit mode.
+               TODO: Integrate 10-bit properly into mpegvideo.c so that ER works properly */
+            if (is_mpeg12 != DEFINITELY_MPEG12 && CONFIG_MPEG4_DECODER &&
+                /* s->codec_id == AV_CODEC_ID_MPEG4 && */
+                s->avctx->bits_per_raw_sample > 8) {
+                ff_mpeg4_decode_studio(s, dest_y, dest_cb, dest_cr, block_size,
+                                       uvlinesize, dct_linesize, dct_offset);
+            } else if (!IS_MPEG12(s))
+#endif /* !IS_ENCODER */
+            {
+                /* dct only in intra block */
+                put_dct(s, block[0], 0, dest_y                          , dct_linesize, s->qscale);
+                put_dct(s, block[1], 1, dest_y              + block_size, dct_linesize, s->qscale);
+                put_dct(s, block[2], 2, dest_y + dct_offset             , dct_linesize, s->qscale);
+                put_dct(s, block[3], 3, dest_y + dct_offset + block_size, dct_linesize, s->qscale);
+
+                if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
+                    if (s->chroma_y_shift) {
+                        put_dct(s, block[4], 4, dest_cb, uvlinesize, s->chroma_qscale);
+                        put_dct(s, block[5], 5, dest_cr, uvlinesize, s->chroma_qscale);
+                    } else {
+                        dct_offset >>=1;
+                        dct_linesize >>=1;
+                        put_dct(s, block[4], 4, dest_cb,              dct_linesize, s->chroma_qscale);
+                        put_dct(s, block[5], 5, dest_cr,              dct_linesize, s->chroma_qscale);
+                        put_dct(s, block[6], 6, dest_cb + dct_offset, dct_linesize, s->chroma_qscale);
+                        put_dct(s, block[7], 7, dest_cr + dct_offset, dct_linesize, s->chroma_qscale);
+                    }
+                }
+            }
+#if !IS_ENCODER
+              else {
+                s->idsp.idct_put(dest_y,                           dct_linesize, block[0]);
+                s->idsp.idct_put(dest_y              + block_size, dct_linesize, block[1]);
+                s->idsp.idct_put(dest_y + dct_offset,              dct_linesize, block[2]);
+                s->idsp.idct_put(dest_y + dct_offset + block_size, dct_linesize, block[3]);
+
+                if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
+                    if (s->chroma_y_shift) {
+                        s->idsp.idct_put(dest_cb, uvlinesize, block[4]);
+                        s->idsp.idct_put(dest_cr, uvlinesize, block[5]);
+                    } else {
+                        dct_linesize = uvlinesize << s->interlaced_dct;
+                        dct_offset   = s->interlaced_dct ? uvlinesize : uvlinesize*block_size;
+
+                        s->idsp.idct_put(dest_cb,              dct_linesize, block[4]);
+                        s->idsp.idct_put(dest_cr,              dct_linesize, block[5]);
+                        s->idsp.idct_put(dest_cb + dct_offset, dct_linesize, block[6]);
+                        s->idsp.idct_put(dest_cr + dct_offset, dct_linesize, block[7]);
+                        if (!s->chroma_x_shift) { //Chroma444
+                            s->idsp.idct_put(dest_cb + block_size,              dct_linesize, block[8]);
+                            s->idsp.idct_put(dest_cr + block_size,              dct_linesize, block[9]);
+                            s->idsp.idct_put(dest_cb + block_size + dct_offset, dct_linesize, block[10]);
+                            s->idsp.idct_put(dest_cr + block_size + dct_offset, dct_linesize, block[11]);
+                        }
+                    }
+                } //gray
+            }
+        }
+skip_idct:
+        if (!readable) {
+            s->hdsp.put_pixels_tab[0][0](s->dest[0], dest_y, linesize, 16);
+            if (!CONFIG_GRAY || !(s->avctx->flags & AV_CODEC_FLAG_GRAY)) {
+                s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[1], dest_cb, uvlinesize, 16 >> s->chroma_y_shift);
+                s->hdsp.put_pixels_tab[s->chroma_x_shift][0](s->dest[2], dest_cr, uvlinesize, 16 >> s->chroma_y_shift);
+            }
+#endif /* !IS_ENCODER */
+        }
+    }
+}
+