mbox series

[FFmpeg-devel,v6,0/3] Implement SEI parsing for QSV decoders

Message ID pull.31.v6.ffstaging.FFmpeg.1666670616.ffmpegagent@gmail.com
Headers show
Series Implement SEI parsing for QSV decoders | expand

Message

Aman Karmani Oct. 25, 2022, 4:03 a.m. UTC
Missing SEI information has always been a major drawback when using the QSV
decoders. It turned out that there's a hardly known api method that provides
access to all SEI (h264/hevc) or user data (mpeg2video).

This allows to get things like closed captions, frame packing, display
orientation, HDR data (mastering display, content light level, etc.) without
having to rely on those data being provided by the MSDK as extended buffers.

The commit "Implement SEI parsing for QSV decoders" includes some hard-coded
workarounds for MSDK bugs which I reported:
https://github.com/Intel-Media-SDK/MediaSDK/issues/2597#issuecomment-1072795311
If someone is interested in the details please contact me directly.

v5

 * Split out the first two commits as a separate patchset
   https://github.com/ffstaging/FFmpeg/pull/44
 * For mpeg12, parse A53 data in qsvdec directly
 * For h264 and hevc, factor out ff_hxxx_set_sei_to_frame functions to avoid
   being dependent on the full decoder contexts
 * Ensure sufficient padding for get_bits API
 * Addresses all points (1, 2, 3, 4) made by Andreas
   https://patchwork.ffmpeg.org/project/ffmpeg/cover/pull.31.v5.ffstaging.FFmpeg.1656708534.ffmpegagent@gmail.com/

v4

 * add new dependencies in makefile Now, build still works when someone uses
   configure --disable-decoder=h264 --disable-decoder=hevc
   --disable-decoder=mpegvideo --disable-decoder=mpeg1video
   --disable-decoder=mpeg2video --enable-libmfx

v3

 * frame.h: clarify doc text for av_frame_copy_side_data()

v2

 * qsvdec: make error handling consistent and clear
 * qsvdec: remove AV_CODEC_ID_MPEG1VIDEO constants
 * hevcdec: rename function to ff_hevc_set_side_data(), add doc text

v3

 * qsvdec: fix c/p error

softworkz (3):
  avcodec/hevcdec: factor out ff_hevc_set_set_to_frame
  avcodec/h264dec: make h264_export_frame_props() accessible
  avcodec/qsvdec: Implement SEI parsing for QSV decoders

 libavcodec/Makefile     |   2 +-
 libavcodec/h264_sei.c   | 197 ++++++++++++++++++++++++
 libavcodec/h264_sei.h   |   2 +
 libavcodec/h264_slice.c | 190 +-----------------------
 libavcodec/hevc_sei.c   | 252 +++++++++++++++++++++++++++++++
 libavcodec/hevc_sei.h   |   3 +
 libavcodec/hevcdec.c    | 249 +------------------------------
 libavcodec/qsvdec.c     | 321 ++++++++++++++++++++++++++++++++++++++++
 8 files changed, 782 insertions(+), 434 deletions(-)


base-commit: 882a17068fd8e62c7d38c14e6fb160d7c9fc446a
Published-As: https://github.com/ffstaging/FFmpeg/releases/tag/pr-ffstaging-31%2Fsoftworkz%2Fsubmit_qsv_sei-v6
Fetch-It-Via: git fetch https://github.com/ffstaging/FFmpeg pr-ffstaging-31/softworkz/submit_qsv_sei-v6
Pull-Request: https://github.com/ffstaging/FFmpeg/pull/31

Range-diff vs v5:

 1:  7656477360 < -:  ---------- avutil/frame: Add av_frame_copy_side_data() and av_frame_remove_all_side_data()
 2:  06976606c5 < -:  ---------- avcodec/vpp_qsv: Copy side data from input to output frame
 3:  320a8a535c < -:  ---------- avcodec/mpeg12dec: make mpeg_decode_user_data() accessible
 4:  e58ad6564f ! 1:  4e9adcd90a avcodec/hevcdec: make set_side_data() accessible
     @@ Metadata
      Author: softworkz <softworkz@hotmail.com>
      
       ## Commit message ##
     -    avcodec/hevcdec: make set_side_data() accessible
     +    avcodec/hevcdec: factor out ff_hevc_set_set_to_frame
      
          Signed-off-by: softworkz <softworkz@hotmail.com>
      
     - ## libavcodec/hevcdec.c ##
     -@@ libavcodec/hevcdec.c: error:
     -     return res;
     - }
     + ## libavcodec/hevc_sei.c ##
     +@@
     + #include "hevc_ps.h"
     + #include "hevc_sei.h"
       
     --static int set_side_data(HEVCContext *s)
     -+int ff_hevc_set_side_data(AVCodecContext *logctx, HEVCSEI *sei, HEVCContext *s, AVFrame *out)
     ++#include "libavutil/display.h"
     ++#include "libavutil/film_grain_params.h"
     ++#include "libavutil/mastering_display_metadata.h"
     ++#include "libavutil/stereo3d.h"
     ++#include "libavutil/timecode.h"
     ++
     + static int decode_nal_sei_decoded_picture_hash(HEVCSEIPictureHash *s,
     +                                                GetByteContext *gb)
       {
     --    AVFrame *out = s->ref->frame;
     --    int ret;
     -+    int ret = 0;
     - 
     --    if (s->sei.frame_packing.present &&
     --        s->sei.frame_packing.arrangement_type >= 3 &&
     --        s->sei.frame_packing.arrangement_type <= 5 &&
     --        s->sei.frame_packing.content_interpretation_type > 0 &&
     --        s->sei.frame_packing.content_interpretation_type < 3) {
     +@@ libavcodec/hevc_sei.c: void ff_hevc_reset_sei(HEVCSEI *s)
     +     av_buffer_unref(&s->dynamic_hdr_plus.info);
     +     av_buffer_unref(&s->dynamic_hdr_vivid.info);
     + }
     ++
     ++int ff_hevc_set_sei_to_frame(AVCodecContext *logctx, HEVCSEI *sei, AVFrame *out, AVRational framerate, uint64_t seed, const VUI *vui, int bit_depth_luma, int bit_depth_chroma)
     ++{
      +    if (sei->frame_packing.present &&
      +        sei->frame_packing.arrangement_type >= 3 &&
      +        sei->frame_packing.arrangement_type <= 5 &&
      +        sei->frame_packing.content_interpretation_type > 0 &&
      +        sei->frame_packing.content_interpretation_type < 3) {
     -         AVStereo3D *stereo = av_stereo3d_create_side_data(out);
     -         if (!stereo)
     -             return AVERROR(ENOMEM);
     - 
     --        switch (s->sei.frame_packing.arrangement_type) {
     ++        AVStereo3D *stereo = av_stereo3d_create_side_data(out);
     ++        if (!stereo)
     ++            return AVERROR(ENOMEM);
     ++
      +        switch (sei->frame_packing.arrangement_type) {
     -         case 3:
     --            if (s->sei.frame_packing.quincunx_subsampling)
     ++        case 3:
      +            if (sei->frame_packing.quincunx_subsampling)
     -                 stereo->type = AV_STEREO3D_SIDEBYSIDE_QUINCUNX;
     -             else
     -                 stereo->type = AV_STEREO3D_SIDEBYSIDE;
     -@@ libavcodec/hevcdec.c: static int set_side_data(HEVCContext *s)
     -             break;
     -         }
     - 
     --        if (s->sei.frame_packing.content_interpretation_type == 2)
     ++                stereo->type = AV_STEREO3D_SIDEBYSIDE_QUINCUNX;
     ++            else
     ++                stereo->type = AV_STEREO3D_SIDEBYSIDE;
     ++            break;
     ++        case 4:
     ++            stereo->type = AV_STEREO3D_TOPBOTTOM;
     ++            break;
     ++        case 5:
     ++            stereo->type = AV_STEREO3D_FRAMESEQUENCE;
     ++            break;
     ++        }
     ++
      +        if (sei->frame_packing.content_interpretation_type == 2)
     -             stereo->flags = AV_STEREO3D_FLAG_INVERT;
     - 
     --        if (s->sei.frame_packing.arrangement_type == 5) {
     --            if (s->sei.frame_packing.current_frame_is_frame0_flag)
     ++            stereo->flags = AV_STEREO3D_FLAG_INVERT;
     ++
      +        if (sei->frame_packing.arrangement_type == 5) {
      +            if (sei->frame_packing.current_frame_is_frame0_flag)
     -                 stereo->view = AV_STEREO3D_VIEW_LEFT;
     -             else
     -                 stereo->view = AV_STEREO3D_VIEW_RIGHT;
     -         }
     -     }
     - 
     --    if (s->sei.display_orientation.present &&
     --        (s->sei.display_orientation.anticlockwise_rotation ||
     --         s->sei.display_orientation.hflip || s->sei.display_orientation.vflip)) {
     --        double angle = s->sei.display_orientation.anticlockwise_rotation * 360 / (double) (1 << 16);
     ++                stereo->view = AV_STEREO3D_VIEW_LEFT;
     ++            else
     ++                stereo->view = AV_STEREO3D_VIEW_RIGHT;
     ++        }
     ++    }
     ++
      +    if (sei->display_orientation.present &&
      +        (sei->display_orientation.anticlockwise_rotation ||
      +         sei->display_orientation.hflip || sei->display_orientation.vflip)) {
      +        double angle = sei->display_orientation.anticlockwise_rotation * 360 / (double) (1 << 16);
     -         AVFrameSideData *rotation = av_frame_new_side_data(out,
     -                                                            AV_FRAME_DATA_DISPLAYMATRIX,
     -                                                            sizeof(int32_t) * 9);
     ++        AVFrameSideData *rotation = av_frame_new_side_data(out,
     ++                                                           AV_FRAME_DATA_DISPLAYMATRIX,
     ++                                                           sizeof(int32_t) * 9);
     ++        if (!rotation)
     ++            return AVERROR(ENOMEM);
     ++
     ++        /* av_display_rotation_set() expects the angle in the clockwise
     ++         * direction, hence the first minus.
     ++         * The below code applies the flips after the rotation, yet
     ++         * the H.2645 specs require flipping to be applied first.
     ++         * Because of R O(phi) = O(-phi) R (where R is flipping around
     ++         * an arbitatry axis and O(phi) is the proper rotation by phi)
     ++         * we can create display matrices as desired by negating
     ++         * the degree once for every flip applied. */
     ++        angle = -angle * (1 - 2 * !!sei->display_orientation.hflip)
     ++                       * (1 - 2 * !!sei->display_orientation.vflip);
     ++        av_display_rotation_set((int32_t *)rotation->data, angle);
     ++        av_display_matrix_flip((int32_t *)rotation->data,
     ++                               sei->display_orientation.hflip,
     ++                               sei->display_orientation.vflip);
     ++    }
     ++
     ++    if (sei->mastering_display.present) {
     ++        // HEVC uses a g,b,r ordering, which we convert to a more natural r,g,b
     ++        const int mapping[3] = {2, 0, 1};
     ++        const int chroma_den = 50000;
     ++        const int luma_den = 10000;
     ++        int i;
     ++        AVMasteringDisplayMetadata *metadata =
     ++            av_mastering_display_metadata_create_side_data(out);
     ++        if (!metadata)
     ++            return AVERROR(ENOMEM);
     ++
     ++        for (i = 0; i < 3; i++) {
     ++            const int j = mapping[i];
     ++            metadata->display_primaries[i][0].num = sei->mastering_display.display_primaries[j][0];
     ++            metadata->display_primaries[i][0].den = chroma_den;
     ++            metadata->display_primaries[i][1].num = sei->mastering_display.display_primaries[j][1];
     ++            metadata->display_primaries[i][1].den = chroma_den;
     ++        }
     ++        metadata->white_point[0].num = sei->mastering_display.white_point[0];
     ++        metadata->white_point[0].den = chroma_den;
     ++        metadata->white_point[1].num = sei->mastering_display.white_point[1];
     ++        metadata->white_point[1].den = chroma_den;
     ++
     ++        metadata->max_luminance.num = sei->mastering_display.max_luminance;
     ++        metadata->max_luminance.den = luma_den;
     ++        metadata->min_luminance.num = sei->mastering_display.min_luminance;
     ++        metadata->min_luminance.den = luma_den;
     ++        metadata->has_luminance = 1;
     ++        metadata->has_primaries = 1;
     ++
     ++        av_log(logctx, AV_LOG_DEBUG, "Mastering Display Metadata:\n");
     ++        av_log(logctx, AV_LOG_DEBUG,
     ++               "r(%5.4f,%5.4f) g(%5.4f,%5.4f) b(%5.4f %5.4f) wp(%5.4f, %5.4f)\n",
     ++               av_q2d(metadata->display_primaries[0][0]),
     ++               av_q2d(metadata->display_primaries[0][1]),
     ++               av_q2d(metadata->display_primaries[1][0]),
     ++               av_q2d(metadata->display_primaries[1][1]),
     ++               av_q2d(metadata->display_primaries[2][0]),
     ++               av_q2d(metadata->display_primaries[2][1]),
     ++               av_q2d(metadata->white_point[0]), av_q2d(metadata->white_point[1]));
     ++        av_log(logctx, AV_LOG_DEBUG,
     ++               "min_luminance=%f, max_luminance=%f\n",
     ++               av_q2d(metadata->min_luminance), av_q2d(metadata->max_luminance));
     ++    }
     ++    if (sei->content_light.present) {
     ++        AVContentLightMetadata *metadata =
     ++            av_content_light_metadata_create_side_data(out);
     ++        if (!metadata)
     ++            return AVERROR(ENOMEM);
     ++        metadata->MaxCLL  = sei->content_light.max_content_light_level;
     ++        metadata->MaxFALL = sei->content_light.max_pic_average_light_level;
     ++
     ++        av_log(logctx, AV_LOG_DEBUG, "Content Light Level Metadata:\n");
     ++        av_log(logctx, AV_LOG_DEBUG, "MaxCLL=%d, MaxFALL=%d\n",
     ++               metadata->MaxCLL, metadata->MaxFALL);
     ++    }
     ++
     ++    if (sei->a53_caption.buf_ref) {
     ++        HEVCSEIA53Caption *a53 = &sei->a53_caption;
     ++
     ++        AVFrameSideData *sd = av_frame_new_side_data_from_buf(out, AV_FRAME_DATA_A53_CC, a53->buf_ref);
     ++        if (!sd)
     ++            av_buffer_unref(&a53->buf_ref);
     ++        a53->buf_ref = NULL;
     ++    }
     ++
     ++    for (int i = 0; i < sei->unregistered.nb_buf_ref; i++) {
     ++        HEVCSEIUnregistered *unreg = &sei->unregistered;
     ++
     ++        if (unreg->buf_ref[i]) {
     ++            AVFrameSideData *sd = av_frame_new_side_data_from_buf(out,
     ++                    AV_FRAME_DATA_SEI_UNREGISTERED,
     ++                    unreg->buf_ref[i]);
     ++            if (!sd)
     ++                av_buffer_unref(&unreg->buf_ref[i]);
     ++            unreg->buf_ref[i] = NULL;
     ++        }
     ++    }
     ++    sei->unregistered.nb_buf_ref = 0;
     ++
     ++    if (sei->timecode.present) {
     ++        uint32_t *tc_sd;
     ++        char tcbuf[AV_TIMECODE_STR_SIZE];
     ++        AVFrameSideData *tcside = av_frame_new_side_data(out, AV_FRAME_DATA_S12M_TIMECODE,
     ++                                                         sizeof(uint32_t) * 4);
     ++        if (!tcside)
     ++            return AVERROR(ENOMEM);
     ++
     ++        tc_sd = (uint32_t*)tcside->data;
     ++        tc_sd[0] = sei->timecode.num_clock_ts;
     ++
     ++        for (int i = 0; i < tc_sd[0]; i++) {
     ++            int drop = sei->timecode.cnt_dropped_flag[i];
     ++            int   hh = sei->timecode.hours_value[i];
     ++            int   mm = sei->timecode.minutes_value[i];
     ++            int   ss = sei->timecode.seconds_value[i];
     ++            int   ff = sei->timecode.n_frames[i];
     ++
     ++            tc_sd[i + 1] = av_timecode_get_smpte(framerate, drop, hh, mm, ss, ff);
     ++            av_timecode_make_smpte_tc_string2(tcbuf, framerate, tc_sd[i + 1], 0, 0);
     ++            av_dict_set(&out->metadata, "timecode", tcbuf, 0);
     ++        }
     ++
     ++        sei->timecode.num_clock_ts = 0;
     ++    }
     ++
     ++    if (sei->film_grain_characteristics.present) {
     ++        HEVCSEIFilmGrainCharacteristics *fgc = &sei->film_grain_characteristics;
     ++        AVFilmGrainParams *fgp = av_film_grain_params_create_side_data(out);
     ++        if (!fgp)
     ++            return AVERROR(ENOMEM);
     ++
     ++        fgp->type = AV_FILM_GRAIN_PARAMS_H274;
     ++        fgp->seed = seed; /* no poc_offset in HEVC */
     ++        fgp->codec.h274.model_id = fgc->model_id;
     ++        if (fgc->separate_colour_description_present_flag) {
     ++            fgp->codec.h274.bit_depth_luma = fgc->bit_depth_luma;
     ++            fgp->codec.h274.bit_depth_chroma = fgc->bit_depth_chroma;
     ++            fgp->codec.h274.color_range = fgc->full_range + 1;
     ++            fgp->codec.h274.color_primaries = fgc->color_primaries;
     ++            fgp->codec.h274.color_trc = fgc->transfer_characteristics;
     ++            fgp->codec.h274.color_space = fgc->matrix_coeffs;
     ++        } else {
     ++            fgp->codec.h274.bit_depth_luma = bit_depth_luma;
     ++            fgp->codec.h274.bit_depth_chroma = bit_depth_chroma;
     ++            if (vui->video_signal_type_present_flag)
     ++                fgp->codec.h274.color_range = vui->video_full_range_flag + 1;
     ++            else
     ++                fgp->codec.h274.color_range = AVCOL_RANGE_UNSPECIFIED;
     ++            if (vui->colour_description_present_flag) {
     ++                fgp->codec.h274.color_primaries = vui->colour_primaries;
     ++                fgp->codec.h274.color_trc = vui->transfer_characteristic;
     ++                fgp->codec.h274.color_space = vui->matrix_coeffs;
     ++            } else {
     ++                fgp->codec.h274.color_primaries = AVCOL_PRI_UNSPECIFIED;
     ++                fgp->codec.h274.color_trc = AVCOL_TRC_UNSPECIFIED;
     ++                fgp->codec.h274.color_space = AVCOL_SPC_UNSPECIFIED;
     ++            }
     ++        }
     ++        fgp->codec.h274.blending_mode_id = fgc->blending_mode_id;
     ++        fgp->codec.h274.log2_scale_factor = fgc->log2_scale_factor;
     ++
     ++        memcpy(&fgp->codec.h274.component_model_present, &fgc->comp_model_present_flag,
     ++               sizeof(fgp->codec.h274.component_model_present));
     ++        memcpy(&fgp->codec.h274.num_intensity_intervals, &fgc->num_intensity_intervals,
     ++               sizeof(fgp->codec.h274.num_intensity_intervals));
     ++        memcpy(&fgp->codec.h274.num_model_values, &fgc->num_model_values,
     ++               sizeof(fgp->codec.h274.num_model_values));
     ++        memcpy(&fgp->codec.h274.intensity_interval_lower_bound, &fgc->intensity_interval_lower_bound,
     ++               sizeof(fgp->codec.h274.intensity_interval_lower_bound));
     ++        memcpy(&fgp->codec.h274.intensity_interval_upper_bound, &fgc->intensity_interval_upper_bound,
     ++               sizeof(fgp->codec.h274.intensity_interval_upper_bound));
     ++        memcpy(&fgp->codec.h274.comp_model_value, &fgc->comp_model_value,
     ++               sizeof(fgp->codec.h274.comp_model_value));
     ++
     ++        fgc->present = fgc->persistence_flag;
     ++    }
     ++
     ++    if (sei->dynamic_hdr_plus.info) {
     ++        AVBufferRef *info_ref = av_buffer_ref(sei->dynamic_hdr_plus.info);
     ++        if (!info_ref)
     ++            return AVERROR(ENOMEM);
     ++
     ++        if (!av_frame_new_side_data_from_buf(out, AV_FRAME_DATA_DYNAMIC_HDR_PLUS, info_ref)) {
     ++            av_buffer_unref(&info_ref);
     ++            return AVERROR(ENOMEM);
     ++        }
     ++    }
     ++
     ++    if (sei->dynamic_hdr_vivid.info) {
     ++        AVBufferRef *info_ref = av_buffer_ref(sei->dynamic_hdr_vivid.info);
     ++        if (!info_ref)
     ++            return AVERROR(ENOMEM);
     ++
     ++        if (!av_frame_new_side_data_from_buf(out, AV_FRAME_DATA_DYNAMIC_HDR_VIVID, info_ref)) {
     ++            av_buffer_unref(&info_ref);
     ++            return AVERROR(ENOMEM);
     ++        }
     ++    }
     ++
     ++    return 0;
     ++}
     +
     + ## libavcodec/hevc_sei.h ##
     +@@
     + 
     + #include "get_bits.h"
     + #include "hevc.h"
     ++#include "hevc_ps.h"
     + #include "sei.h"
     + 
     + 
     +@@ libavcodec/hevc_sei.h: int ff_hevc_decode_nal_sei(GetBitContext *gb, void *logctx, HEVCSEI *s,
     +  */
     + void ff_hevc_reset_sei(HEVCSEI *s);
     + 
     ++int ff_hevc_set_sei_to_frame(AVCodecContext *logctx, HEVCSEI *sei, AVFrame *out, AVRational framerate, uint64_t seed, const VUI *vui, int bit_depth_luma, int bit_depth_chroma);
     ++
     + #endif /* AVCODEC_HEVC_SEI_H */
     +
     + ## libavcodec/hevcdec.c ##
      @@ libavcodec/hevcdec.c: static int set_side_data(HEVCContext *s)
     -                        * (1 - 2 * !!s->sei.display_orientation.vflip);
     -         av_display_rotation_set((int32_t *)rotation->data, angle);
     -         av_display_matrix_flip((int32_t *)rotation->data,
     + {
     +     AVFrame *out = s->ref->frame;
     +     int ret;
     +-
     +-    if (s->sei.frame_packing.present &&
     +-        s->sei.frame_packing.arrangement_type >= 3 &&
     +-        s->sei.frame_packing.arrangement_type <= 5 &&
     +-        s->sei.frame_packing.content_interpretation_type > 0 &&
     +-        s->sei.frame_packing.content_interpretation_type < 3) {
     +-        AVStereo3D *stereo = av_stereo3d_create_side_data(out);
     +-        if (!stereo)
     +-            return AVERROR(ENOMEM);
     +-
     +-        switch (s->sei.frame_packing.arrangement_type) {
     +-        case 3:
     +-            if (s->sei.frame_packing.quincunx_subsampling)
     +-                stereo->type = AV_STEREO3D_SIDEBYSIDE_QUINCUNX;
     +-            else
     +-                stereo->type = AV_STEREO3D_SIDEBYSIDE;
     +-            break;
     +-        case 4:
     +-            stereo->type = AV_STEREO3D_TOPBOTTOM;
     +-            break;
     +-        case 5:
     +-            stereo->type = AV_STEREO3D_FRAMESEQUENCE;
     +-            break;
     +-        }
     +-
     +-        if (s->sei.frame_packing.content_interpretation_type == 2)
     +-            stereo->flags = AV_STEREO3D_FLAG_INVERT;
     +-
     +-        if (s->sei.frame_packing.arrangement_type == 5) {
     +-            if (s->sei.frame_packing.current_frame_is_frame0_flag)
     +-                stereo->view = AV_STEREO3D_VIEW_LEFT;
     +-            else
     +-                stereo->view = AV_STEREO3D_VIEW_RIGHT;
     +-        }
     +-    }
     +-
     +-    if (s->sei.display_orientation.present &&
     +-        (s->sei.display_orientation.anticlockwise_rotation ||
     +-         s->sei.display_orientation.hflip || s->sei.display_orientation.vflip)) {
     +-        double angle = s->sei.display_orientation.anticlockwise_rotation * 360 / (double) (1 << 16);
     +-        AVFrameSideData *rotation = av_frame_new_side_data(out,
     +-                                                           AV_FRAME_DATA_DISPLAYMATRIX,
     +-                                                           sizeof(int32_t) * 9);
     +-        if (!rotation)
     +-            return AVERROR(ENOMEM);
     +-
     +-        /* av_display_rotation_set() expects the angle in the clockwise
     +-         * direction, hence the first minus.
     +-         * The below code applies the flips after the rotation, yet
     +-         * the H.2645 specs require flipping to be applied first.
     +-         * Because of R O(phi) = O(-phi) R (where R is flipping around
     +-         * an arbitatry axis and O(phi) is the proper rotation by phi)
     +-         * we can create display matrices as desired by negating
     +-         * the degree once for every flip applied. */
     +-        angle = -angle * (1 - 2 * !!s->sei.display_orientation.hflip)
     +-                       * (1 - 2 * !!s->sei.display_orientation.vflip);
     +-        av_display_rotation_set((int32_t *)rotation->data, angle);
     +-        av_display_matrix_flip((int32_t *)rotation->data,
      -                               s->sei.display_orientation.hflip,
      -                               s->sei.display_orientation.vflip);
     -+                               sei->display_orientation.hflip,
     -+                               sei->display_orientation.vflip);
     -     }
     +-    }
     ++    const HEVCSPS *sps = s->ps.sps;
       
           // Decrement the mastering display flag when IRAP frame has no_rasl_output_flag=1
           // so the side data persists for the entire coded video sequence.
     --    if (s->sei.mastering_display.present > 0 &&
     -+    if (s && sei->mastering_display.present > 0 &&
     +@@ libavcodec/hevcdec.c: static int set_side_data(HEVCContext *s)
               IS_IRAP(s) && s->no_rasl_output_flag) {
     --        s->sei.mastering_display.present--;
     -+        sei->mastering_display.present--;
     +         s->sei.mastering_display.present--;
           }
      -    if (s->sei.mastering_display.present) {
     -+    if (sei->mastering_display.present) {
     -         // HEVC uses a g,b,r ordering, which we convert to a more natural r,g,b
     -         const int mapping[3] = {2, 0, 1};
     -         const int chroma_den = 50000;
     -@@ libavcodec/hevcdec.c: static int set_side_data(HEVCContext *s)
     - 
     -         for (i = 0; i < 3; i++) {
     -             const int j = mapping[i];
     +-        // HEVC uses a g,b,r ordering, which we convert to a more natural r,g,b
     +-        const int mapping[3] = {2, 0, 1};
     +-        const int chroma_den = 50000;
     +-        const int luma_den = 10000;
     +-        int i;
     +-        AVMasteringDisplayMetadata *metadata =
     +-            av_mastering_display_metadata_create_side_data(out);
     +-        if (!metadata)
     +-            return AVERROR(ENOMEM);
     +-
     +-        for (i = 0; i < 3; i++) {
     +-            const int j = mapping[i];
      -            metadata->display_primaries[i][0].num = s->sei.mastering_display.display_primaries[j][0];
     -+            metadata->display_primaries[i][0].num = sei->mastering_display.display_primaries[j][0];
     -             metadata->display_primaries[i][0].den = chroma_den;
     +-            metadata->display_primaries[i][0].den = chroma_den;
      -            metadata->display_primaries[i][1].num = s->sei.mastering_display.display_primaries[j][1];
     -+            metadata->display_primaries[i][1].num = sei->mastering_display.display_primaries[j][1];
     -             metadata->display_primaries[i][1].den = chroma_den;
     -         }
     +-            metadata->display_primaries[i][1].den = chroma_den;
     +-        }
      -        metadata->white_point[0].num = s->sei.mastering_display.white_point[0];
     -+        metadata->white_point[0].num = sei->mastering_display.white_point[0];
     -         metadata->white_point[0].den = chroma_den;
     +-        metadata->white_point[0].den = chroma_den;
      -        metadata->white_point[1].num = s->sei.mastering_display.white_point[1];
     -+        metadata->white_point[1].num = sei->mastering_display.white_point[1];
     -         metadata->white_point[1].den = chroma_den;
     - 
     +-        metadata->white_point[1].den = chroma_den;
     +-
      -        metadata->max_luminance.num = s->sei.mastering_display.max_luminance;
     -+        metadata->max_luminance.num = sei->mastering_display.max_luminance;
     -         metadata->max_luminance.den = luma_den;
     +-        metadata->max_luminance.den = luma_den;
      -        metadata->min_luminance.num = s->sei.mastering_display.min_luminance;
     -+        metadata->min_luminance.num = sei->mastering_display.min_luminance;
     -         metadata->min_luminance.den = luma_den;
     -         metadata->has_luminance = 1;
     -         metadata->has_primaries = 1;
     - 
     +-        metadata->min_luminance.den = luma_den;
     +-        metadata->has_luminance = 1;
     +-        metadata->has_primaries = 1;
     +-
      -        av_log(s->avctx, AV_LOG_DEBUG, "Mastering Display Metadata:\n");
      -        av_log(s->avctx, AV_LOG_DEBUG,
     -+        av_log(logctx, AV_LOG_DEBUG, "Mastering Display Metadata:\n");
     -+        av_log(logctx, AV_LOG_DEBUG,
     -                "r(%5.4f,%5.4f) g(%5.4f,%5.4f) b(%5.4f %5.4f) wp(%5.4f, %5.4f)\n",
     -                av_q2d(metadata->display_primaries[0][0]),
     -                av_q2d(metadata->display_primaries[0][1]),
     -@@ libavcodec/hevcdec.c: static int set_side_data(HEVCContext *s)
     -                av_q2d(metadata->display_primaries[2][0]),
     -                av_q2d(metadata->display_primaries[2][1]),
     -                av_q2d(metadata->white_point[0]), av_q2d(metadata->white_point[1]));
     +-               "r(%5.4f,%5.4f) g(%5.4f,%5.4f) b(%5.4f %5.4f) wp(%5.4f, %5.4f)\n",
     +-               av_q2d(metadata->display_primaries[0][0]),
     +-               av_q2d(metadata->display_primaries[0][1]),
     +-               av_q2d(metadata->display_primaries[1][0]),
     +-               av_q2d(metadata->display_primaries[1][1]),
     +-               av_q2d(metadata->display_primaries[2][0]),
     +-               av_q2d(metadata->display_primaries[2][1]),
     +-               av_q2d(metadata->white_point[0]), av_q2d(metadata->white_point[1]));
      -        av_log(s->avctx, AV_LOG_DEBUG,
     -+        av_log(logctx, AV_LOG_DEBUG,
     -                "min_luminance=%f, max_luminance=%f\n",
     -                av_q2d(metadata->min_luminance), av_q2d(metadata->max_luminance));
     -     }
     +-               "min_luminance=%f, max_luminance=%f\n",
     +-               av_q2d(metadata->min_luminance), av_q2d(metadata->max_luminance));
     +-    }
           // Decrement the mastering display flag when IRAP frame has no_rasl_output_flag=1
           // so the side data persists for the entire coded video sequence.
     --    if (s->sei.content_light.present > 0 &&
     -+    if (s && sei->content_light.present > 0 &&
     +     if (s->sei.content_light.present > 0 &&
               IS_IRAP(s) && s->no_rasl_output_flag) {
     --        s->sei.content_light.present--;
     -+        sei->content_light.present--;
     +         s->sei.content_light.present--;
           }
      -    if (s->sei.content_light.present) {
     -+    if (sei->content_light.present) {
     -         AVContentLightMetadata *metadata =
     -             av_content_light_metadata_create_side_data(out);
     -         if (!metadata)
     -             return AVERROR(ENOMEM);
     +-        AVContentLightMetadata *metadata =
     +-            av_content_light_metadata_create_side_data(out);
     +-        if (!metadata)
     +-            return AVERROR(ENOMEM);
      -        metadata->MaxCLL  = s->sei.content_light.max_content_light_level;
      -        metadata->MaxFALL = s->sei.content_light.max_pic_average_light_level;
     -+        metadata->MaxCLL  = sei->content_light.max_content_light_level;
     -+        metadata->MaxFALL = sei->content_light.max_pic_average_light_level;
     - 
     +-
      -        av_log(s->avctx, AV_LOG_DEBUG, "Content Light Level Metadata:\n");
      -        av_log(s->avctx, AV_LOG_DEBUG, "MaxCLL=%d, MaxFALL=%d\n",
     -+        av_log(logctx, AV_LOG_DEBUG, "Content Light Level Metadata:\n");
     -+        av_log(logctx, AV_LOG_DEBUG, "MaxCLL=%d, MaxFALL=%d\n",
     -                metadata->MaxCLL, metadata->MaxFALL);
     -     }
     - 
     +-               metadata->MaxCLL, metadata->MaxFALL);
     +-    }
     +-
      -    if (s->sei.a53_caption.buf_ref) {
      -        HEVCSEIA53Caption *a53 = &s->sei.a53_caption;
     -+    if (sei->a53_caption.buf_ref) {
     -+        HEVCSEIA53Caption *a53 = &sei->a53_caption;
     - 
     -         AVFrameSideData *sd = av_frame_new_side_data_from_buf(out, AV_FRAME_DATA_A53_CC, a53->buf_ref);
     -         if (!sd)
     -@@ libavcodec/hevcdec.c: static int set_side_data(HEVCContext *s)
     -         a53->buf_ref = NULL;
     -     }
     - 
     +-
     +-        AVFrameSideData *sd = av_frame_new_side_data_from_buf(out, AV_FRAME_DATA_A53_CC, a53->buf_ref);
     +-        if (!sd)
     +-            av_buffer_unref(&a53->buf_ref);
     +-        a53->buf_ref = NULL;
     +-    }
     +-
      -    for (int i = 0; i < s->sei.unregistered.nb_buf_ref; i++) {
      -        HEVCSEIUnregistered *unreg = &s->sei.unregistered;
     -+    for (int i = 0; i < sei->unregistered.nb_buf_ref; i++) {
     -+        HEVCSEIUnregistered *unreg = &sei->unregistered;
     - 
     -         if (unreg->buf_ref[i]) {
     -             AVFrameSideData *sd = av_frame_new_side_data_from_buf(out,
     -@@ libavcodec/hevcdec.c: static int set_side_data(HEVCContext *s)
     -             unreg->buf_ref[i] = NULL;
     -         }
     -     }
     +-
     +-        if (unreg->buf_ref[i]) {
     +-            AVFrameSideData *sd = av_frame_new_side_data_from_buf(out,
     +-                    AV_FRAME_DATA_SEI_UNREGISTERED,
     +-                    unreg->buf_ref[i]);
     +-            if (!sd)
     +-                av_buffer_unref(&unreg->buf_ref[i]);
     +-            unreg->buf_ref[i] = NULL;
     +-        }
     +-    }
      -    s->sei.unregistered.nb_buf_ref = 0;
     -+    sei->unregistered.nb_buf_ref = 0;
       
      -    if (s->sei.timecode.present) {
     -+    if (s && sei->timecode.present) {
     -         uint32_t *tc_sd;
     -         char tcbuf[AV_TIMECODE_STR_SIZE];
     -         AVFrameSideData *tcside = av_frame_new_side_data(out, AV_FRAME_DATA_S12M_TIMECODE,
     -@@ libavcodec/hevcdec.c: static int set_side_data(HEVCContext *s)
     -             return AVERROR(ENOMEM);
     - 
     -         tc_sd = (uint32_t*)tcside->data;
     +-        uint32_t *tc_sd;
     +-        char tcbuf[AV_TIMECODE_STR_SIZE];
     +-        AVFrameSideData *tcside = av_frame_new_side_data(out, AV_FRAME_DATA_S12M_TIMECODE,
     +-                                                         sizeof(uint32_t) * 4);
     +-        if (!tcside)
     +-            return AVERROR(ENOMEM);
     +-
     +-        tc_sd = (uint32_t*)tcside->data;
      -        tc_sd[0] = s->sei.timecode.num_clock_ts;
     -+        tc_sd[0] = sei->timecode.num_clock_ts;
     - 
     -         for (int i = 0; i < tc_sd[0]; i++) {
     +-
     +-        for (int i = 0; i < tc_sd[0]; i++) {
      -            int drop = s->sei.timecode.cnt_dropped_flag[i];
      -            int   hh = s->sei.timecode.hours_value[i];
      -            int   mm = s->sei.timecode.minutes_value[i];
      -            int   ss = s->sei.timecode.seconds_value[i];
      -            int   ff = s->sei.timecode.n_frames[i];
     -+            int drop = sei->timecode.cnt_dropped_flag[i];
     -+            int   hh = sei->timecode.hours_value[i];
     -+            int   mm = sei->timecode.minutes_value[i];
     -+            int   ss = sei->timecode.seconds_value[i];
     -+            int   ff = sei->timecode.n_frames[i];
     - 
     -             tc_sd[i + 1] = av_timecode_get_smpte(s->avctx->framerate, drop, hh, mm, ss, ff);
     -             av_timecode_make_smpte_tc_string2(tcbuf, s->avctx->framerate, tc_sd[i + 1], 0, 0);
     -             av_dict_set(&out->metadata, "timecode", tcbuf, 0);
     -         }
     - 
     +-
     +-            tc_sd[i + 1] = av_timecode_get_smpte(s->avctx->framerate, drop, hh, mm, ss, ff);
     +-            av_timecode_make_smpte_tc_string2(tcbuf, s->avctx->framerate, tc_sd[i + 1], 0, 0);
     +-            av_dict_set(&out->metadata, "timecode", tcbuf, 0);
     +-        }
     +-
      -        s->sei.timecode.num_clock_ts = 0;
     -+        sei->timecode.num_clock_ts = 0;
     -     }
     - 
     +-    }
     +-
      -    if (s->sei.film_grain_characteristics.present) {
      -        HEVCSEIFilmGrainCharacteristics *fgc = &s->sei.film_grain_characteristics;
     -+    if (s && sei->film_grain_characteristics.present) {
     -+        HEVCSEIFilmGrainCharacteristics *fgc = &sei->film_grain_characteristics;
     -         AVFilmGrainParams *fgp = av_film_grain_params_create_side_data(out);
     -         if (!fgp)
     -             return AVERROR(ENOMEM);
     -@@ libavcodec/hevcdec.c: static int set_side_data(HEVCContext *s)
     -         fgc->present = fgc->persistence_flag;
     -     }
     - 
     +-        AVFilmGrainParams *fgp = av_film_grain_params_create_side_data(out);
     +-        if (!fgp)
     +-            return AVERROR(ENOMEM);
     +-
     +-        fgp->type = AV_FILM_GRAIN_PARAMS_H274;
     +-        fgp->seed = s->ref->poc; /* no poc_offset in HEVC */
     +-
     +-        fgp->codec.h274.model_id = fgc->model_id;
     +-        if (fgc->separate_colour_description_present_flag) {
     +-            fgp->codec.h274.bit_depth_luma = fgc->bit_depth_luma;
     +-            fgp->codec.h274.bit_depth_chroma = fgc->bit_depth_chroma;
     +-            fgp->codec.h274.color_range = fgc->full_range + 1;
     +-            fgp->codec.h274.color_primaries = fgc->color_primaries;
     +-            fgp->codec.h274.color_trc = fgc->transfer_characteristics;
     +-            fgp->codec.h274.color_space = fgc->matrix_coeffs;
     +-        } else {
     +-            const HEVCSPS *sps = s->ps.sps;
     +-            const VUI *vui = &sps->vui;
     +-            fgp->codec.h274.bit_depth_luma = sps->bit_depth;
     +-            fgp->codec.h274.bit_depth_chroma = sps->bit_depth_chroma;
     +-            if (vui->video_signal_type_present_flag)
     +-                fgp->codec.h274.color_range = vui->video_full_range_flag + 1;
     +-            else
     +-                fgp->codec.h274.color_range = AVCOL_RANGE_UNSPECIFIED;
     +-            if (vui->colour_description_present_flag) {
     +-                fgp->codec.h274.color_primaries = vui->colour_primaries;
     +-                fgp->codec.h274.color_trc = vui->transfer_characteristic;
     +-                fgp->codec.h274.color_space = vui->matrix_coeffs;
     +-            } else {
     +-                fgp->codec.h274.color_primaries = AVCOL_PRI_UNSPECIFIED;
     +-                fgp->codec.h274.color_trc = AVCOL_TRC_UNSPECIFIED;
     +-                fgp->codec.h274.color_space = AVCOL_SPC_UNSPECIFIED;
     +-            }
     +-        }
     +-        fgp->codec.h274.blending_mode_id = fgc->blending_mode_id;
     +-        fgp->codec.h274.log2_scale_factor = fgc->log2_scale_factor;
     +-
     +-        memcpy(&fgp->codec.h274.component_model_present, &fgc->comp_model_present_flag,
     +-               sizeof(fgp->codec.h274.component_model_present));
     +-        memcpy(&fgp->codec.h274.num_intensity_intervals, &fgc->num_intensity_intervals,
     +-               sizeof(fgp->codec.h274.num_intensity_intervals));
     +-        memcpy(&fgp->codec.h274.num_model_values, &fgc->num_model_values,
     +-               sizeof(fgp->codec.h274.num_model_values));
     +-        memcpy(&fgp->codec.h274.intensity_interval_lower_bound, &fgc->intensity_interval_lower_bound,
     +-               sizeof(fgp->codec.h274.intensity_interval_lower_bound));
     +-        memcpy(&fgp->codec.h274.intensity_interval_upper_bound, &fgc->intensity_interval_upper_bound,
     +-               sizeof(fgp->codec.h274.intensity_interval_upper_bound));
     +-        memcpy(&fgp->codec.h274.comp_model_value, &fgc->comp_model_value,
     +-               sizeof(fgp->codec.h274.comp_model_value));
     +-
     +-        fgc->present = fgc->persistence_flag;
     +-    }
     +-
      -    if (s->sei.dynamic_hdr_plus.info) {
      -        AVBufferRef *info_ref = av_buffer_ref(s->sei.dynamic_hdr_plus.info);
     -+    if (sei->dynamic_hdr_plus.info) {
     -+        AVBufferRef *info_ref = av_buffer_ref(sei->dynamic_hdr_plus.info);
     -         if (!info_ref)
     -             return AVERROR(ENOMEM);
     - 
     -@@ libavcodec/hevcdec.c: static int set_side_data(HEVCContext *s)
     -         }
     -     }
     +-        if (!info_ref)
     +-            return AVERROR(ENOMEM);
     +-
     +-        if (!av_frame_new_side_data_from_buf(out, AV_FRAME_DATA_DYNAMIC_HDR_PLUS, info_ref)) {
     +-            av_buffer_unref(&info_ref);
     +-            return AVERROR(ENOMEM);
     +-        }
     +-    }
     ++    if ((ret = ff_hevc_set_sei_to_frame(s->avctx, &s->sei, out, s->avctx->framerate, s->ref->poc, &sps->vui, sps->bit_depth, sps->bit_depth_chroma) < 0))
     ++        return ret;
       
      -    if (s->rpu_buf) {
      +    if (s && s->rpu_buf) {
     @@ libavcodec/hevcdec.c: static int set_side_data(HEVCContext *s)
               return ret;
       
      -    if (s->sei.dynamic_hdr_vivid.info) {
     -+    if (s && s->sei.dynamic_hdr_vivid.info) {
     -         AVBufferRef *info_ref = av_buffer_ref(s->sei.dynamic_hdr_vivid.info);
     -         if (!info_ref)
     -             return AVERROR(ENOMEM);
     -@@ libavcodec/hevcdec.c: static int hevc_frame_start(HEVCContext *s)
     -             goto fail;
     -     }
     - 
     --    ret = set_side_data(s);
     -+    ret = ff_hevc_set_side_data(s->avctx, &s->sei, s, s->ref->frame);
     -     if (ret < 0)
     -         goto fail;
     - 
     -
     - ## libavcodec/hevcdec.h ##
     -@@ libavcodec/hevcdec.h: void ff_hevc_hls_residual_coding(HEVCContext *s, int x0, int y0,
     - 
     - void ff_hevc_hls_mvd_coding(HEVCContext *s, int x0, int y0, int log2_cb_size);
     +-        AVBufferRef *info_ref = av_buffer_ref(s->sei.dynamic_hdr_vivid.info);
     +-        if (!info_ref)
     +-            return AVERROR(ENOMEM);
     +-
     +-        if (!av_frame_new_side_data_from_buf(out, AV_FRAME_DATA_DYNAMIC_HDR_VIVID, info_ref)) {
     +-            av_buffer_unref(&info_ref);
     +-            return AVERROR(ENOMEM);
     +-        }
     +-    }
     +-
     +     return 0;
     + }
       
     -+/**
     -+ * Set the decodec side data to an AVFrame.
     -+ * @logctx context for logging.
     -+ * @sei HEVCSEI decoding context, must not be NULL.
     -+ * @s HEVCContext, can be NULL.
     -+ * @return < 0 on error, 0 otherwise.
     -+ */
     -+int ff_hevc_set_side_data(AVCodecContext *logctx, HEVCSEI *sei, HEVCContext *s, AVFrame *out);
     -+
     - extern const uint8_t ff_hevc_qpel_extra_before[4];
     - extern const uint8_t ff_hevc_qpel_extra_after[4];
     - extern const uint8_t ff_hevc_qpel_extra[4];
 5:  4c0b6eb4cb ! 2:  51b234c8d0 avcodec/h264dec: make h264_export_frame_props() accessible
     @@ Commit message
      
          Signed-off-by: softworkz <softworkz@hotmail.com>
      
     - ## libavcodec/h264_slice.c ##
     -@@ libavcodec/h264_slice.c: static int h264_init_ps(H264Context *h, const H264SliceContext *sl, int first_sl
     -     return 0;
     - }
     + ## libavcodec/h264_sei.c ##
     +@@
     + #include "h264_ps.h"
     + #include "h264_sei.h"
     + #include "sei.h"
     ++#include "libavutil/display.h"
     ++#include "libavutil/film_grain_params.h"
     ++#include "libavutil/stereo3d.h"
     ++#include "libavutil/timecode.h"
       
     --static int h264_export_frame_props(H264Context *h)
     -+int ff_h264_export_frame_props(AVCodecContext *logctx, H264SEIContext *sei, H264Context *h, AVFrame *out)
     - {
     --    const SPS *sps = h->ps.sps;
     --    H264Picture *cur = h->cur_pic_ptr;
     --    AVFrame *out = cur->f;
     -+    const SPS *sps = h ? h->ps.sps : NULL;
     -+    H264Picture *cur = h ? h->cur_pic_ptr : NULL;
     - 
     -     out->interlaced_frame = 0;
     -     out->repeat_pict      = 0;
     -@@ libavcodec/h264_slice.c: static int h264_export_frame_props(H264Context *h)
     -     /* Signal interlacing information externally. */
     -     /* Prioritize picture timing SEI information over used
     -      * decoding process if it exists. */
     --    if (h->sei.picture_timing.present) {
     --        int ret = ff_h264_sei_process_picture_timing(&h->sei.picture_timing, sps,
     --                                                     h->avctx);
     -+    if (sps && sei->picture_timing.present) {
     -+        int ret = ff_h264_sei_process_picture_timing(&sei->picture_timing, sps,
     -+                                                     logctx);
     -         if (ret < 0) {
     --            av_log(h->avctx, AV_LOG_ERROR, "Error processing a picture timing SEI\n");
     --            if (h->avctx->err_recognition & AV_EF_EXPLODE)
     -+            av_log(logctx, AV_LOG_ERROR, "Error processing a picture timing SEI\n");
     -+            if (logctx->err_recognition & AV_EF_EXPLODE)
     -                 return ret;
     --            h->sei.picture_timing.present = 0;
     -+            sei->picture_timing.present = 0;
     -         }
     -     }
     + #define AVERROR_PS_NOT_FOUND      FFERRTAG(0xF8,'?','P','S')
       
     --    if (sps->pic_struct_present_flag && h->sei.picture_timing.present) {
     --        H264SEIPictureTiming *pt = &h->sei.picture_timing;
     -+    if (h && sps && sps->pic_struct_present_flag && sei->picture_timing.present) {
     -+        H264SEIPictureTiming *pt = &sei->picture_timing;
     -         switch (pt->pic_struct) {
     -         case H264_SEI_PIC_STRUCT_FRAME:
     -             break;
     -@@ libavcodec/h264_slice.c: static int h264_export_frame_props(H264Context *h)
     -         if ((pt->ct_type & 3) &&
     -             pt->pic_struct <= H264_SEI_PIC_STRUCT_BOTTOM_TOP)
     -             out->interlaced_frame = (pt->ct_type & (1 << 1)) != 0;
     --    } else {
     -+    } else if (h) {
     -         /* Derive interlacing flag from used decoding process. */
     -         out->interlaced_frame = FIELD_OR_MBAFF_PICTURE(h);
     +@@ libavcodec/h264_sei.c: const char *ff_h264_sei_stereo_mode(const H264SEIFramePacking *h)
     +         return NULL;
           }
     --    h->prev_interlaced_frame = out->interlaced_frame;
     + }
     ++
     ++int ff_h264_set_sei_to_frame(AVCodecContext *avctx, H264SEIContext *sei, AVFrame *out, const SPS *sps, uint64_t seed)
     ++{
     ++    if (sei->frame_packing.present &&
     ++        sei->frame_packing.arrangement_type <= 6 &&
     ++        sei->frame_packing.content_interpretation_type > 0 &&
     ++        sei->frame_packing.content_interpretation_type < 3) {
     ++        H264SEIFramePacking *fp = &sei->frame_packing;
     ++        AVStereo3D *stereo = av_stereo3d_create_side_data(out);
     ++        if (stereo) {
     ++        switch (fp->arrangement_type) {
     ++        case H264_SEI_FPA_TYPE_CHECKERBOARD:
     ++            stereo->type = AV_STEREO3D_CHECKERBOARD;
     ++            break;
     ++        case H264_SEI_FPA_TYPE_INTERLEAVE_COLUMN:
     ++            stereo->type = AV_STEREO3D_COLUMNS;
     ++            break;
     ++        case H264_SEI_FPA_TYPE_INTERLEAVE_ROW:
     ++            stereo->type = AV_STEREO3D_LINES;
     ++            break;
     ++        case H264_SEI_FPA_TYPE_SIDE_BY_SIDE:
     ++            if (fp->quincunx_sampling_flag)
     ++                stereo->type = AV_STEREO3D_SIDEBYSIDE_QUINCUNX;
     ++            else
     ++                stereo->type = AV_STEREO3D_SIDEBYSIDE;
     ++            break;
     ++        case H264_SEI_FPA_TYPE_TOP_BOTTOM:
     ++            stereo->type = AV_STEREO3D_TOPBOTTOM;
     ++            break;
     ++        case H264_SEI_FPA_TYPE_INTERLEAVE_TEMPORAL:
     ++            stereo->type = AV_STEREO3D_FRAMESEQUENCE;
     ++            break;
     ++        case H264_SEI_FPA_TYPE_2D:
     ++            stereo->type = AV_STEREO3D_2D;
     ++            break;
     ++        }
     ++
     ++        if (fp->content_interpretation_type == 2)
     ++            stereo->flags = AV_STEREO3D_FLAG_INVERT;
     ++
     ++        if (fp->arrangement_type == H264_SEI_FPA_TYPE_INTERLEAVE_TEMPORAL) {
     ++            if (fp->current_frame_is_frame0_flag)
     ++                stereo->view = AV_STEREO3D_VIEW_LEFT;
     ++            else
     ++                stereo->view = AV_STEREO3D_VIEW_RIGHT;
     ++        }
     ++        }
     ++    }
     ++
     ++    if (sei->display_orientation.present &&
     ++        (sei->display_orientation.anticlockwise_rotation ||
     ++         sei->display_orientation.hflip ||
     ++         sei->display_orientation.vflip)) {
     ++        H264SEIDisplayOrientation *o = &sei->display_orientation;
     ++        double angle = o->anticlockwise_rotation * 360 / (double) (1 << 16);
     ++        AVFrameSideData *rotation = av_frame_new_side_data(out,
     ++                                                           AV_FRAME_DATA_DISPLAYMATRIX,
     ++                                                           sizeof(int32_t) * 9);
     ++        if (rotation) {
     ++            /* av_display_rotation_set() expects the angle in the clockwise
     ++             * direction, hence the first minus.
     ++             * The below code applies the flips after the rotation, yet
     ++             * the H.2645 specs require flipping to be applied first.
     ++             * Because of R O(phi) = O(-phi) R (where R is flipping around
     ++             * an arbitatry axis and O(phi) is the proper rotation by phi)
     ++             * we can create display matrices as desired by negating
     ++             * the degree once for every flip applied. */
     ++            angle = -angle * (1 - 2 * !!o->hflip) * (1 - 2 * !!o->vflip);
     ++            av_display_rotation_set((int32_t *)rotation->data, angle);
     ++            av_display_matrix_flip((int32_t *)rotation->data,
     ++                                   o->hflip, o->vflip);
     ++        }
     ++    }
     ++
     ++    if (sei->afd.present) {
     ++        AVFrameSideData *sd = av_frame_new_side_data(out, AV_FRAME_DATA_AFD,
     ++                                                     sizeof(uint8_t));
     ++
     ++        if (sd) {
     ++            *sd->data = sei->afd.active_format_description;
     ++            sei->afd.present = 0;
     ++        }
     ++    }
     ++
     ++    if (sei->a53_caption.buf_ref) {
     ++        H264SEIA53Caption *a53 = &sei->a53_caption;
     ++
     ++        AVFrameSideData *sd = av_frame_new_side_data_from_buf(out, AV_FRAME_DATA_A53_CC, a53->buf_ref);
     ++        if (!sd)
     ++            av_buffer_unref(&a53->buf_ref);
     ++        a53->buf_ref = NULL;
     ++
     ++        avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS;
     ++    }
     ++
     ++    for (int i = 0; i < sei->unregistered.nb_buf_ref; i++) {
     ++        H264SEIUnregistered *unreg = &sei->unregistered;
     ++
     ++        if (unreg->buf_ref[i]) {
     ++            AVFrameSideData *sd = av_frame_new_side_data_from_buf(out,
     ++                    AV_FRAME_DATA_SEI_UNREGISTERED,
     ++                    unreg->buf_ref[i]);
     ++            if (!sd)
     ++                av_buffer_unref(&unreg->buf_ref[i]);
     ++            unreg->buf_ref[i] = NULL;
     ++        }
     ++    }
     ++    sei->unregistered.nb_buf_ref = 0;
     ++
     ++    if (sps && sei->film_grain_characteristics.present) {
     ++        H264SEIFilmGrainCharacteristics *fgc = &sei->film_grain_characteristics;
     ++        AVFilmGrainParams *fgp = av_film_grain_params_create_side_data(out);
     ++        if (!fgp)
     ++            return AVERROR(ENOMEM);
     ++
     ++        fgp->type = AV_FILM_GRAIN_PARAMS_H274;
     ++        fgp->seed = seed;
     ++
     ++        fgp->codec.h274.model_id = fgc->model_id;
     ++        if (fgc->separate_colour_description_present_flag) {
     ++            fgp->codec.h274.bit_depth_luma = fgc->bit_depth_luma;
     ++            fgp->codec.h274.bit_depth_chroma = fgc->bit_depth_chroma;
     ++            fgp->codec.h274.color_range = fgc->full_range + 1;
     ++            fgp->codec.h274.color_primaries = fgc->color_primaries;
     ++            fgp->codec.h274.color_trc = fgc->transfer_characteristics;
     ++            fgp->codec.h274.color_space = fgc->matrix_coeffs;
     ++        } else {
     ++            fgp->codec.h274.bit_depth_luma = sps->bit_depth_luma;
     ++            fgp->codec.h274.bit_depth_chroma = sps->bit_depth_chroma;
     ++            if (sps->video_signal_type_present_flag)
     ++                fgp->codec.h274.color_range = sps->full_range + 1;
     ++            else
     ++                fgp->codec.h274.color_range = AVCOL_RANGE_UNSPECIFIED;
     ++            if (sps->colour_description_present_flag) {
     ++                fgp->codec.h274.color_primaries = sps->color_primaries;
     ++                fgp->codec.h274.color_trc = sps->color_trc;
     ++                fgp->codec.h274.color_space = sps->colorspace;
     ++            } else {
     ++                fgp->codec.h274.color_primaries = AVCOL_PRI_UNSPECIFIED;
     ++                fgp->codec.h274.color_trc = AVCOL_TRC_UNSPECIFIED;
     ++                fgp->codec.h274.color_space = AVCOL_SPC_UNSPECIFIED;
     ++            }
     ++        }
     ++        fgp->codec.h274.blending_mode_id = fgc->blending_mode_id;
     ++        fgp->codec.h274.log2_scale_factor = fgc->log2_scale_factor;
     ++
     ++        memcpy(&fgp->codec.h274.component_model_present, &fgc->comp_model_present_flag,
     ++               sizeof(fgp->codec.h274.component_model_present));
     ++        memcpy(&fgp->codec.h274.num_intensity_intervals, &fgc->num_intensity_intervals,
     ++               sizeof(fgp->codec.h274.num_intensity_intervals));
     ++        memcpy(&fgp->codec.h274.num_model_values, &fgc->num_model_values,
     ++               sizeof(fgp->codec.h274.num_model_values));
     ++        memcpy(&fgp->codec.h274.intensity_interval_lower_bound, &fgc->intensity_interval_lower_bound,
     ++               sizeof(fgp->codec.h274.intensity_interval_lower_bound));
     ++        memcpy(&fgp->codec.h274.intensity_interval_upper_bound, &fgc->intensity_interval_upper_bound,
     ++               sizeof(fgp->codec.h274.intensity_interval_upper_bound));
     ++        memcpy(&fgp->codec.h274.comp_model_value, &fgc->comp_model_value,
     ++               sizeof(fgp->codec.h274.comp_model_value));
     ++
     ++        fgc->present = !!fgc->repetition_period;
     ++
     ++        avctx->properties |= FF_CODEC_PROPERTY_FILM_GRAIN;
     ++    }
     ++
     ++    if (sei->picture_timing.timecode_cnt > 0) {
     ++        uint32_t *tc_sd;
     ++        char tcbuf[AV_TIMECODE_STR_SIZE];
     ++
     ++        AVFrameSideData *tcside = av_frame_new_side_data(out,
     ++                                                         AV_FRAME_DATA_S12M_TIMECODE,
     ++                                                         sizeof(uint32_t)*4);
     ++        if (!tcside)
     ++            return AVERROR(ENOMEM);
     ++
     ++        tc_sd = (uint32_t*)tcside->data;
     ++        tc_sd[0] = sei->picture_timing.timecode_cnt;
     ++
     ++        for (int i = 0; i < tc_sd[0]; i++) {
     ++            int drop = sei->picture_timing.timecode[i].dropframe;
     ++            int   hh = sei->picture_timing.timecode[i].hours;
     ++            int   mm = sei->picture_timing.timecode[i].minutes;
     ++            int   ss = sei->picture_timing.timecode[i].seconds;
     ++            int   ff = sei->picture_timing.timecode[i].frame;
     ++
     ++            tc_sd[i + 1] = av_timecode_get_smpte(avctx->framerate, drop, hh, mm, ss, ff);
     ++            av_timecode_make_smpte_tc_string2(tcbuf, avctx->framerate, tc_sd[i + 1], 0, 0);
     ++            av_dict_set(&out->metadata, "timecode", tcbuf, 0);
     ++        }
     ++        sei->picture_timing.timecode_cnt = 0;
     ++    }
     ++
     ++    return 0;
     ++}
     +
     + ## libavcodec/h264_sei.h ##
     +@@ libavcodec/h264_sei.h: const char *ff_h264_sei_stereo_mode(const H264SEIFramePacking *h);
     + int ff_h264_sei_process_picture_timing(H264SEIPictureTiming *h, const SPS *sps,
     +                                        void *logctx);
       
     --    if (cur->field_poc[0] != cur->field_poc[1]) {
     -+    if (h)
     -+        h->prev_interlaced_frame = out->interlaced_frame;
     -+
     -+    if (sps && cur->field_poc[0] != cur->field_poc[1]) {
     -         /* Derive top_field_first from field pocs. */
     -         out->top_field_first = cur->field_poc[0] < cur->field_poc[1];
     --    } else {
     --        if (sps->pic_struct_present_flag && h->sei.picture_timing.present) {
     -+    } else if (sps) {
     -+        if (sps->pic_struct_present_flag && sei->picture_timing.present) {
     -             /* Use picture timing SEI information. Even if it is a
     -              * information of a past frame, better than nothing. */
     --            if (h->sei.picture_timing.pic_struct == H264_SEI_PIC_STRUCT_TOP_BOTTOM ||
     --                h->sei.picture_timing.pic_struct == H264_SEI_PIC_STRUCT_TOP_BOTTOM_TOP)
     -+            if (sei->picture_timing.pic_struct == H264_SEI_PIC_STRUCT_TOP_BOTTOM ||
     -+                sei->picture_timing.pic_struct == H264_SEI_PIC_STRUCT_TOP_BOTTOM_TOP)
     -                 out->top_field_first = 1;
     -             else
     -                 out->top_field_first = 0;
     ++int ff_h264_set_sei_to_frame(AVCodecContext *avctx, H264SEIContext *sei, AVFrame *out, const SPS *sps, uint64_t seed);
     ++
     + #endif /* AVCODEC_H264_SEI_H */
     +
     + ## libavcodec/h264_slice.c ##
      @@ libavcodec/h264_slice.c: static int h264_export_frame_props(H264Context *h)
               }
           }
     @@ libavcodec/h264_slice.c: static int h264_export_frame_props(H264Context *h)
      -        h->sei.frame_packing.content_interpretation_type > 0 &&
      -        h->sei.frame_packing.content_interpretation_type < 3) {
      -        H264SEIFramePacking *fp = &h->sei.frame_packing;
     -+    if (sei->frame_packing.present &&
     -+        sei->frame_packing.arrangement_type <= 6 &&
     -+        sei->frame_packing.content_interpretation_type > 0 &&
     -+        sei->frame_packing.content_interpretation_type < 3) {
     -+        H264SEIFramePacking *fp = &sei->frame_packing;
     -         AVStereo3D *stereo = av_stereo3d_create_side_data(out);
     -         if (stereo) {
     -         switch (fp->arrangement_type) {
     -@@ libavcodec/h264_slice.c: static int h264_export_frame_props(H264Context *h)
     -         }
     -     }
     - 
     +-        AVStereo3D *stereo = av_stereo3d_create_side_data(out);
     +-        if (stereo) {
     +-        switch (fp->arrangement_type) {
     +-        case H264_SEI_FPA_TYPE_CHECKERBOARD:
     +-            stereo->type = AV_STEREO3D_CHECKERBOARD;
     +-            break;
     +-        case H264_SEI_FPA_TYPE_INTERLEAVE_COLUMN:
     +-            stereo->type = AV_STEREO3D_COLUMNS;
     +-            break;
     +-        case H264_SEI_FPA_TYPE_INTERLEAVE_ROW:
     +-            stereo->type = AV_STEREO3D_LINES;
     +-            break;
     +-        case H264_SEI_FPA_TYPE_SIDE_BY_SIDE:
     +-            if (fp->quincunx_sampling_flag)
     +-                stereo->type = AV_STEREO3D_SIDEBYSIDE_QUINCUNX;
     +-            else
     +-                stereo->type = AV_STEREO3D_SIDEBYSIDE;
     +-            break;
     +-        case H264_SEI_FPA_TYPE_TOP_BOTTOM:
     +-            stereo->type = AV_STEREO3D_TOPBOTTOM;
     +-            break;
     +-        case H264_SEI_FPA_TYPE_INTERLEAVE_TEMPORAL:
     +-            stereo->type = AV_STEREO3D_FRAMESEQUENCE;
     +-            break;
     +-        case H264_SEI_FPA_TYPE_2D:
     +-            stereo->type = AV_STEREO3D_2D;
     +-            break;
     +-        }
     +-
     +-        if (fp->content_interpretation_type == 2)
     +-            stereo->flags = AV_STEREO3D_FLAG_INVERT;
     +-
     +-        if (fp->arrangement_type == H264_SEI_FPA_TYPE_INTERLEAVE_TEMPORAL) {
     +-            if (fp->current_frame_is_frame0_flag)
     +-                stereo->view = AV_STEREO3D_VIEW_LEFT;
     +-            else
     +-                stereo->view = AV_STEREO3D_VIEW_RIGHT;
     +-        }
     +-        }
     +-    }
     +-
      -    if (h->sei.display_orientation.present &&
      -        (h->sei.display_orientation.anticlockwise_rotation ||
      -         h->sei.display_orientation.hflip ||
      -         h->sei.display_orientation.vflip)) {
      -        H264SEIDisplayOrientation *o = &h->sei.display_orientation;
     -+    if (sei->display_orientation.present &&
     -+        (sei->display_orientation.anticlockwise_rotation ||
     -+         sei->display_orientation.hflip ||
     -+         sei->display_orientation.vflip)) {
     -+        H264SEIDisplayOrientation *o = &sei->display_orientation;
     -         double angle = o->anticlockwise_rotation * 360 / (double) (1 << 16);
     -         AVFrameSideData *rotation = av_frame_new_side_data(out,
     -                                                            AV_FRAME_DATA_DISPLAYMATRIX,
     -@@ libavcodec/h264_slice.c: static int h264_export_frame_props(H264Context *h)
     -         }
     -     }
     - 
     +-        double angle = o->anticlockwise_rotation * 360 / (double) (1 << 16);
     +-        AVFrameSideData *rotation = av_frame_new_side_data(out,
     +-                                                           AV_FRAME_DATA_DISPLAYMATRIX,
     +-                                                           sizeof(int32_t) * 9);
     +-        if (rotation) {
     +-            /* av_display_rotation_set() expects the angle in the clockwise
     +-             * direction, hence the first minus.
     +-             * The below code applies the flips after the rotation, yet
     +-             * the H.2645 specs require flipping to be applied first.
     +-             * Because of R O(phi) = O(-phi) R (where R is flipping around
     +-             * an arbitatry axis and O(phi) is the proper rotation by phi)
     +-             * we can create display matrices as desired by negating
     +-             * the degree once for every flip applied. */
     +-            angle = -angle * (1 - 2 * !!o->hflip) * (1 - 2 * !!o->vflip);
     +-            av_display_rotation_set((int32_t *)rotation->data, angle);
     +-            av_display_matrix_flip((int32_t *)rotation->data,
     +-                                   o->hflip, o->vflip);
     +-        }
     +-    }
     +-
      -    if (h->sei.afd.present) {
     -+    if (sei->afd.present) {
     -         AVFrameSideData *sd = av_frame_new_side_data(out, AV_FRAME_DATA_AFD,
     -                                                      sizeof(uint8_t));
     - 
     -         if (sd) {
     +-        AVFrameSideData *sd = av_frame_new_side_data(out, AV_FRAME_DATA_AFD,
     +-                                                     sizeof(uint8_t));
     +-
     +-        if (sd) {
      -            *sd->data = h->sei.afd.active_format_description;
      -            h->sei.afd.present = 0;
     -+            *sd->data = sei->afd.active_format_description;
     -+            sei->afd.present = 0;
     -         }
     -     }
     - 
     +-        }
     +-    }
     +-
      -    if (h->sei.a53_caption.buf_ref) {
      -        H264SEIA53Caption *a53 = &h->sei.a53_caption;
     -+    if (sei->a53_caption.buf_ref) {
     -+        H264SEIA53Caption *a53 = &sei->a53_caption;
     - 
     -         AVFrameSideData *sd = av_frame_new_side_data_from_buf(out, AV_FRAME_DATA_A53_CC, a53->buf_ref);
     -         if (!sd)
     -             av_buffer_unref(&a53->buf_ref);
     -         a53->buf_ref = NULL;
     - 
     +-
     +-        AVFrameSideData *sd = av_frame_new_side_data_from_buf(out, AV_FRAME_DATA_A53_CC, a53->buf_ref);
     +-        if (!sd)
     +-            av_buffer_unref(&a53->buf_ref);
     +-        a53->buf_ref = NULL;
     +-
      -        h->avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS;
     -+        if (h)
     -+            h->avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS;
     -     }
     - 
     +-    }
     +-
      -    for (int i = 0; i < h->sei.unregistered.nb_buf_ref; i++) {
      -        H264SEIUnregistered *unreg = &h->sei.unregistered;
     -+    for (int i = 0; i < sei->unregistered.nb_buf_ref; i++) {
     -+        H264SEIUnregistered *unreg = &sei->unregistered;
     - 
     -         if (unreg->buf_ref[i]) {
     -             AVFrameSideData *sd = av_frame_new_side_data_from_buf(out,
     -@@ libavcodec/h264_slice.c: static int h264_export_frame_props(H264Context *h)
     -             unreg->buf_ref[i] = NULL;
     -         }
     -     }
     +-
     +-        if (unreg->buf_ref[i]) {
     +-            AVFrameSideData *sd = av_frame_new_side_data_from_buf(out,
     +-                    AV_FRAME_DATA_SEI_UNREGISTERED,
     +-                    unreg->buf_ref[i]);
     +-            if (!sd)
     +-                av_buffer_unref(&unreg->buf_ref[i]);
     +-            unreg->buf_ref[i] = NULL;
     +-        }
     +-    }
      -    h->sei.unregistered.nb_buf_ref = 0;
     -+    sei->unregistered.nb_buf_ref = 0;
     - 
     +-
      -    if (h->sei.film_grain_characteristics.present) {
      -        H264SEIFilmGrainCharacteristics *fgc = &h->sei.film_grain_characteristics;
     -+    if (h && sps && sei->film_grain_characteristics.present) {
     -+        H264SEIFilmGrainCharacteristics *fgc = &sei->film_grain_characteristics;
     -         AVFilmGrainParams *fgp = av_film_grain_params_create_side_data(out);
     -         if (!fgp)
     -             return AVERROR(ENOMEM);
     -@@ libavcodec/h264_slice.c: static int h264_export_frame_props(H264Context *h)
     -         h->avctx->properties |= FF_CODEC_PROPERTY_FILM_GRAIN;
     -     }
     - 
     +-        AVFilmGrainParams *fgp = av_film_grain_params_create_side_data(out);
     +-        if (!fgp)
     +-            return AVERROR(ENOMEM);
     +-
     +-        fgp->type = AV_FILM_GRAIN_PARAMS_H274;
     +-        fgp->seed = cur->poc + (h->poc_offset << 5);
     +-
     +-        fgp->codec.h274.model_id = fgc->model_id;
     +-        if (fgc->separate_colour_description_present_flag) {
     +-            fgp->codec.h274.bit_depth_luma = fgc->bit_depth_luma;
     +-            fgp->codec.h274.bit_depth_chroma = fgc->bit_depth_chroma;
     +-            fgp->codec.h274.color_range = fgc->full_range + 1;
     +-            fgp->codec.h274.color_primaries = fgc->color_primaries;
     +-            fgp->codec.h274.color_trc = fgc->transfer_characteristics;
     +-            fgp->codec.h274.color_space = fgc->matrix_coeffs;
     +-        } else {
     +-            fgp->codec.h274.bit_depth_luma = sps->bit_depth_luma;
     +-            fgp->codec.h274.bit_depth_chroma = sps->bit_depth_chroma;
     +-            if (sps->video_signal_type_present_flag)
     +-                fgp->codec.h274.color_range = sps->full_range + 1;
     +-            else
     +-                fgp->codec.h274.color_range = AVCOL_RANGE_UNSPECIFIED;
     +-            if (sps->colour_description_present_flag) {
     +-                fgp->codec.h274.color_primaries = sps->color_primaries;
     +-                fgp->codec.h274.color_trc = sps->color_trc;
     +-                fgp->codec.h274.color_space = sps->colorspace;
     +-            } else {
     +-                fgp->codec.h274.color_primaries = AVCOL_PRI_UNSPECIFIED;
     +-                fgp->codec.h274.color_trc = AVCOL_TRC_UNSPECIFIED;
     +-                fgp->codec.h274.color_space = AVCOL_SPC_UNSPECIFIED;
     +-            }
     +-        }
     +-        fgp->codec.h274.blending_mode_id = fgc->blending_mode_id;
     +-        fgp->codec.h274.log2_scale_factor = fgc->log2_scale_factor;
     +-
     +-        memcpy(&fgp->codec.h274.component_model_present, &fgc->comp_model_present_flag,
     +-               sizeof(fgp->codec.h274.component_model_present));
     +-        memcpy(&fgp->codec.h274.num_intensity_intervals, &fgc->num_intensity_intervals,
     +-               sizeof(fgp->codec.h274.num_intensity_intervals));
     +-        memcpy(&fgp->codec.h274.num_model_values, &fgc->num_model_values,
     +-               sizeof(fgp->codec.h274.num_model_values));
     +-        memcpy(&fgp->codec.h274.intensity_interval_lower_bound, &fgc->intensity_interval_lower_bound,
     +-               sizeof(fgp->codec.h274.intensity_interval_lower_bound));
     +-        memcpy(&fgp->codec.h274.intensity_interval_upper_bound, &fgc->intensity_interval_upper_bound,
     +-               sizeof(fgp->codec.h274.intensity_interval_upper_bound));
     +-        memcpy(&fgp->codec.h274.comp_model_value, &fgc->comp_model_value,
     +-               sizeof(fgp->codec.h274.comp_model_value));
     +-
     +-        fgc->present = !!fgc->repetition_period;
     +-
     +-        h->avctx->properties |= FF_CODEC_PROPERTY_FILM_GRAIN;
     +-    }
     +-
      -    if (h->sei.picture_timing.timecode_cnt > 0) {
     -+    if (h && sei->picture_timing.timecode_cnt > 0) {
     -         uint32_t *tc_sd;
     -         char tcbuf[AV_TIMECODE_STR_SIZE];
     - 
     -@@ libavcodec/h264_slice.c: static int h264_export_frame_props(H264Context *h)
     -             return AVERROR(ENOMEM);
     - 
     -         tc_sd = (uint32_t*)tcside->data;
     +-        uint32_t *tc_sd;
     +-        char tcbuf[AV_TIMECODE_STR_SIZE];
     +-
     +-        AVFrameSideData *tcside = av_frame_new_side_data(out,
     +-                                                         AV_FRAME_DATA_S12M_TIMECODE,
     +-                                                         sizeof(uint32_t)*4);
     +-        if (!tcside)
     +-            return AVERROR(ENOMEM);
     +-
     +-        tc_sd = (uint32_t*)tcside->data;
      -        tc_sd[0] = h->sei.picture_timing.timecode_cnt;
     -+        tc_sd[0] = sei->picture_timing.timecode_cnt;
     - 
     -         for (int i = 0; i < tc_sd[0]; i++) {
     +-
     +-        for (int i = 0; i < tc_sd[0]; i++) {
      -            int drop = h->sei.picture_timing.timecode[i].dropframe;
      -            int   hh = h->sei.picture_timing.timecode[i].hours;
      -            int   mm = h->sei.picture_timing.timecode[i].minutes;
      -            int   ss = h->sei.picture_timing.timecode[i].seconds;
      -            int   ff = h->sei.picture_timing.timecode[i].frame;
     -+            int drop = sei->picture_timing.timecode[i].dropframe;
     -+            int   hh = sei->picture_timing.timecode[i].hours;
     -+            int   mm = sei->picture_timing.timecode[i].minutes;
     -+            int   ss = sei->picture_timing.timecode[i].seconds;
     -+            int   ff = sei->picture_timing.timecode[i].frame;
     - 
     -             tc_sd[i + 1] = av_timecode_get_smpte(h->avctx->framerate, drop, hh, mm, ss, ff);
     -             av_timecode_make_smpte_tc_string2(tcbuf, h->avctx->framerate, tc_sd[i + 1], 0, 0);
     -@@ libavcodec/h264_slice.c: static int h264_field_start(H264Context *h, const H264SliceContext *sl,
     -      * field coded frames, since some SEI information is present for each field
     -      * and is merged by the SEI parsing code. */
     -     if (!FIELD_PICTURE(h) || !h->first_field || h->missing_fields > 1) {
     --        ret = h264_export_frame_props(h);
     -+        ret = ff_h264_export_frame_props(h->avctx, &h->sei, h, h->cur_pic_ptr->f);
     -         if (ret < 0)
     -             return ret;
     - 
     -
     - ## libavcodec/h264dec.h ##
     -@@ libavcodec/h264dec.h: void ff_h264_free_tables(H264Context *h);
     - 
     - void ff_h264_set_erpic(ERPicture *dst, H264Picture *src);
     +-
     +-            tc_sd[i + 1] = av_timecode_get_smpte(h->avctx->framerate, drop, hh, mm, ss, ff);
     +-            av_timecode_make_smpte_tc_string2(tcbuf, h->avctx->framerate, tc_sd[i + 1], 0, 0);
     +-            av_dict_set(&out->metadata, "timecode", tcbuf, 0);
     +-        }
     +-        h->sei.picture_timing.timecode_cnt = 0;
     +-    }
     +-
     +-    return 0;
     ++    return ff_h264_set_sei_to_frame(h->avctx, &h->sei, out, sps, cur->poc + (h->poc_offset << 5));
     + }
       
     -+int ff_h264_export_frame_props(AVCodecContext *logctx, H264SEIContext *sei, H264Context *h, AVFrame *out);
     -+
     - #endif /* AVCODEC_H264DEC_H */
     + static int h264_select_output_frame(H264Context *h)
 6:  19bc00be4d ! 3:  61626ebb78 avcodec/qsvdec: Implement SEI parsing for QSV decoders
     @@ libavcodec/Makefile: OBJS-$(CONFIG_MSS34DSP)                += mss34dsp.o
       OBJS-$(CONFIG_QPELDSP)                 += qpeldsp.o
       OBJS-$(CONFIG_QSV)                     += qsv.o
      -OBJS-$(CONFIG_QSVDEC)                  += qsvdec.o
     -+OBJS-$(CONFIG_QSVDEC)                  += qsvdec.o h264_slice.o h264_cabac.o h264_cavlc.o \
     -+                                          h264_direct.o h264_mb.o h264_picture.o h264_loopfilter.o \
     -+                                          h264dec.o h264_refs.o cabac.o hevcdec.o hevc_refs.o \
     -+										  hevc_filter.o hevc_cabac.o hevc_mvs.o hevcpred.o hevcdsp.o \
     -+										  h274.o dovi_rpu.o mpeg12dec.o
     ++OBJS-$(CONFIG_QSVDEC)                  += qsvdec.o h264_sei.o hevc_sei.o
       OBJS-$(CONFIG_QSVENC)                  += qsvenc.o
       OBJS-$(CONFIG_RANGECODER)              += rangecoder.o
       OBJS-$(CONFIG_RDFT)                    += rdft.o
      
     - ## libavcodec/hevcdsp.c ##
     + ## libavcodec/qsvdec.c ##
      @@
     -  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
     -  */
     - 
     -+#include "config_components.h"
     -+
     - #include "hevcdsp.h"
     - 
     - static const int8_t transform[32][32] = {
     -@@ libavcodec/hevcdsp.c: int i = 0;
     -         break;
     -     }
     + #include "libavutil/time.h"
     + #include "libavutil/imgutils.h"
     + #include "libavutil/film_grain_params.h"
     ++#include <libavutil/reverse.h>
       
     -+#if CONFIG_HEVC_DECODER
     - #if ARCH_AARCH64
     -     ff_hevc_dsp_init_aarch64(hevcdsp, bit_depth);
     - #elif ARCH_ARM
     -@@ libavcodec/hevcdsp.c: int i = 0;
     - #elif ARCH_LOONGARCH
     -     ff_hevc_dsp_init_loongarch(hevcdsp, bit_depth);
     - #endif
     -+#endif
     - }
     -
     - ## libavcodec/qsvdec.c ##
     + #include "avcodec.h"
     + #include "codec_internal.h"
      @@
       #include "hwconfig.h"
       #include "qsv.h"
       #include "qsv_internal.h"
     -+#include "h264dec.h"
      +#include "h264_sei.h"
     -+#include "hevcdec.h"
      +#include "hevc_ps.h"
      +#include "hevc_sei.h"
     -+#include "mpeg12.h"
     - 
     - static const AVRational mfx_tb = { 1, 90000 };
       
     + #if QSV_ONEVPL
     + #include <mfxdispatcher.h>
      @@ libavcodec/qsvdec.c: static const AVRational mfx_tb = { 1, 90000 };
           AV_NOPTS_VALUE : pts_tb.num ? \
           av_rescale_q(mfx_pts, mfx_tb, pts_tb) : mfx_pts)
     @@ libavcodec/qsvdec.c: typedef struct QSVContext {
           int         nb_ext_buffers;
      +
      +    mfxU8 payload_buffer[PAYLOAD_BUFFER_SIZE];
     -+    Mpeg1Context mpeg_ctx;
     ++    AVBufferRef *a53_buf_ref;
       } QSVContext;
       
       static const AVCodecHWConfigInternal *const qsv_hw_configs[] = {
      @@ libavcodec/qsvdec.c: static int qsv_export_film_grain(AVCodecContext *avctx, mfxExtAV1FilmGrainParam
     -     return 0;
       }
       #endif
     + 
      +static int find_start_offset(mfxU8 data[4])
      +{
      +    if (data[0] == 0 && data[1] == 0 && data[2] == 1)
     @@ libavcodec/qsvdec.c: static int qsv_export_film_grain(AVCodecContext *avctx, mfx
      +{
      +    H264SEIContext sei = { 0 };
      +    GetBitContext gb = { 0 };
     -+    mfxPayload payload = { 0, .Data = &q->payload_buffer[0], .BufSize = sizeof(q->payload_buffer) };
     ++    mfxPayload payload = { 0, .Data = &q->payload_buffer[0], .BufSize = sizeof(q->payload_buffer) - AV_INPUT_BUFFER_PADDING_SIZE };
      +    mfxU64 ts;
      +    int ret;
      +
     @@ libavcodec/qsvdec.c: static int qsv_export_film_grain(AVCodecContext *avctx, mfx
      +    }
      +
      +    if (out)
     -+        return ff_h264_export_frame_props(avctx, &sei, NULL, out);
     ++        return ff_h264_set_sei_to_frame(avctx, &sei, out, NULL, 0);
      +
      +    return 0;
      +}
     @@ libavcodec/qsvdec.c: static int qsv_export_film_grain(AVCodecContext *avctx, mfx
      +    HEVCSEI sei = { 0 };
      +    HEVCParamSets ps = { 0 };
      +    GetBitContext gb = { 0 };
     -+    mfxPayload payload = { 0, .Data = &q->payload_buffer[0], .BufSize = sizeof(q->payload_buffer) };
     ++    mfxPayload payload = { 0, .Data = &q->payload_buffer[0], .BufSize = sizeof(q->payload_buffer) - AV_INPUT_BUFFER_PADDING_SIZE };
      +    mfxFrameSurface1 *surface = &out->surface;
      +    mfxU64 ts;
      +    int ret, has_logged = 0;
     @@ libavcodec/qsvdec.c: static int qsv_export_film_grain(AVCodecContext *avctx, mfx
      +    }
      +
      +    if (out && out->frame)
     -+        return ff_hevc_set_side_data(avctx, &sei, NULL, out->frame);
     ++        return ff_hevc_set_sei_to_frame(avctx, &sei, out->frame, avctx->framerate, 0, &ps.sps->vui, ps.sps->bit_depth, ps.sps->bit_depth_chroma);
     ++
     ++    return 0;
     ++}
     ++
     ++#define A53_MAX_CC_COUNT 2000
      +
     ++static int mpeg_decode_a53_cc(AVCodecContext *avctx, QSVContext *s,
     ++                              const uint8_t *p, int buf_size)
     ++{
     ++    if (buf_size >= 6 &&
     ++        p[0] == 'G' && p[1] == 'A' && p[2] == '9' && p[3] == '4' &&
     ++        p[4] == 3 && (p[5] & 0x40)) {
     ++        /* extract A53 Part 4 CC data */
     ++        unsigned cc_count = p[5] & 0x1f;
     ++        if (cc_count > 0 && buf_size >= 7 + cc_count * 3) {
     ++            const uint64_t old_size = s->a53_buf_ref ? s->a53_buf_ref->size : 0;
     ++            const uint64_t new_size = (old_size + cc_count
     ++                                            * UINT64_C(3));
     ++            int ret;
     ++
     ++            if (new_size > 3*A53_MAX_CC_COUNT)
     ++                return AVERROR(EINVAL);
     ++
     ++            ret = av_buffer_realloc(&s->a53_buf_ref, new_size);
     ++            if (ret >= 0)
     ++                memcpy(s->a53_buf_ref->data + old_size, p + 7, cc_count * UINT64_C(3));
     ++
     ++            avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS;
     ++        }
     ++        return 1;
     ++    } else if (buf_size >= 2 && p[0] == 0x03 && (p[1]&0x7f) == 0x01) {
     ++        /* extract SCTE-20 CC data */
     ++        GetBitContext gb;
     ++        unsigned cc_count = 0;
     ++        int ret;
     ++
     ++        init_get_bits8(&gb, p + 2, buf_size - 2);
     ++        cc_count = get_bits(&gb, 5);
     ++        if (cc_count > 0) {
     ++            uint64_t old_size = s->a53_buf_ref ? s->a53_buf_ref->size : 0;
     ++            uint64_t new_size = (old_size + cc_count * UINT64_C(3));
     ++            if (new_size > 3 * A53_MAX_CC_COUNT)
     ++                return AVERROR(EINVAL);
     ++
     ++            ret = av_buffer_realloc(&s->a53_buf_ref, new_size);
     ++            if (ret >= 0) {
     ++                uint8_t field, cc1, cc2;
     ++                uint8_t *cap = s->a53_buf_ref->data;
     ++
     ++                memset(s->a53_buf_ref->data + old_size, 0, cc_count * 3);
     ++                for (unsigned i = 0; i < cc_count && get_bits_left(&gb) >= 26; i++) {
     ++                    skip_bits(&gb, 2); // priority
     ++                    field = get_bits(&gb, 2);
     ++                    skip_bits(&gb, 5); // line_offset
     ++                    cc1 = get_bits(&gb, 8);
     ++                    cc2 = get_bits(&gb, 8);
     ++                    skip_bits(&gb, 1); // marker
     ++
     ++                    if (!field) { // forbidden
     ++                        cap[0] = cap[1] = cap[2] = 0x00;
     ++                    } else {
     ++                        field = (field == 2 ? 1 : 0);
     ++                        ////if (!s1->mpeg_enc_ctx.top_field_first) field = !field;
     ++                        cap[0] = 0x04 | field;
     ++                        cap[1] = ff_reverse[cc1];
     ++                        cap[2] = ff_reverse[cc2];
     ++                    }
     ++                    cap += 3;
     ++                }
     ++            }
     ++            avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS;
     ++        }
     ++        return 1;
     ++    } else if (buf_size >= 11 && p[0] == 'C' && p[1] == 'C' && p[2] == 0x01 && p[3] == 0xf8) {
     ++        int cc_count = 0;
     ++        int i, ret;
     ++        // There is a caption count field in the data, but it is often
     ++        // incorrect.  So count the number of captions present.
     ++        for (i = 5; i + 6 <= buf_size && ((p[i] & 0xfe) == 0xfe); i += 6)
     ++            cc_count++;
     ++        // Transform the DVD format into A53 Part 4 format
     ++        if (cc_count > 0) {
     ++            int old_size = s->a53_buf_ref ? s->a53_buf_ref->size : 0;
     ++            uint64_t new_size = (old_size + cc_count
     ++                                            * UINT64_C(6));
     ++            if (new_size > 3*A53_MAX_CC_COUNT)
     ++                return AVERROR(EINVAL);
     ++
     ++            ret = av_buffer_realloc(&s->a53_buf_ref, new_size);
     ++            if (ret >= 0) {
     ++                uint8_t field1 = !!(p[4] & 0x80);
     ++                uint8_t *cap = s->a53_buf_ref->data;
     ++                p += 5;
     ++                for (i = 0; i < cc_count; i++) {
     ++                    cap[0] = (p[0] == 0xff && field1) ? 0xfc : 0xfd;
     ++                    cap[1] = p[1];
     ++                    cap[2] = p[2];
     ++                    cap[3] = (p[3] == 0xff && !field1) ? 0xfc : 0xfd;
     ++                    cap[4] = p[4];
     ++                    cap[5] = p[5];
     ++                    cap += 6;
     ++                    p += 6;
     ++                }
     ++            }
     ++            avctx->properties |= FF_CODEC_PROPERTY_CLOSED_CAPTIONS;
     ++        }
     ++        return 1;
     ++    }
      +    return 0;
      +}
      +
      +static int parse_sei_mpeg12(AVCodecContext* avctx, QSVContext* q, AVFrame* out)
      +{
     -+    Mpeg1Context *mpeg_ctx = &q->mpeg_ctx;
     -+    mfxPayload payload = { 0, .Data = &q->payload_buffer[0], .BufSize = sizeof(q->payload_buffer) };
     ++    mfxPayload payload = { 0, .Data = &q->payload_buffer[0], .BufSize = sizeof(q->payload_buffer) - AV_INPUT_BUFFER_PADDING_SIZE };
      +    mfxU64 ts;
      +    int ret;
      +
     @@ libavcodec/qsvdec.c: static int qsv_export_film_grain(AVCodecContext *avctx, mfx
      +
      +        start++;
      +
     -+        ff_mpeg_decode_user_data(avctx, mpeg_ctx, &payload.Data[start], (int)((payload.NumBit + 7) / 8) - start);
     ++        mpeg_decode_a53_cc(avctx, q, &payload.Data[start], (int)((payload.NumBit + 7) / 8) - start);
      +
      +        av_log(avctx, AV_LOG_DEBUG, "mfxPayload Type: %d  Numbits %d start %d -> %.s\n", payload.Type, payload.NumBit, start, (char *)(&payload.Data[start]));
      +    }
     @@ libavcodec/qsvdec.c: static int qsv_export_film_grain(AVCodecContext *avctx, mfx
      +    if (!out)
      +        return 0;
      +
     -+    if (mpeg_ctx->a53_buf_ref) {
     -+
     -+        AVFrameSideData *sd = av_frame_new_side_data_from_buf(out, AV_FRAME_DATA_A53_CC, mpeg_ctx->a53_buf_ref);
     -+        if (!sd)
     -+            av_buffer_unref(&mpeg_ctx->a53_buf_ref);
     -+        mpeg_ctx->a53_buf_ref = NULL;
     -+    }
     -+
     -+    if (mpeg_ctx->has_stereo3d) {
     -+        AVStereo3D *stereo = av_stereo3d_create_side_data(out);
     -+        if (!stereo)
     -+            return AVERROR(ENOMEM);
     -+
     -+        *stereo = mpeg_ctx->stereo3d;
     -+        mpeg_ctx->has_stereo3d = 0;
     -+    }
     ++    if (q->a53_buf_ref) {
      +
     -+    if (mpeg_ctx->has_afd) {
     -+        AVFrameSideData *sd = av_frame_new_side_data(out, AV_FRAME_DATA_AFD, 1);
     ++        AVFrameSideData *sd = av_frame_new_side_data_from_buf(out, AV_FRAME_DATA_A53_CC, q->a53_buf_ref);
      +        if (!sd)
     -+            return AVERROR(ENOMEM);
     -+
     -+        *sd->data   = mpeg_ctx->afd;
     -+        mpeg_ctx->has_afd = 0;
     ++            av_buffer_unref(&q->a53_buf_ref);
     ++        q->a53_buf_ref = NULL;
      +    }
      +
      +    return 0;
      +}
     - 
     ++
       static int qsv_decode(AVCodecContext *avctx, QSVContext *q,
                             AVFrame *frame, int *got_frame,
     +                       const AVPacket *avpkt)
      @@ libavcodec/qsvdec.c: static int qsv_decode(AVCodecContext *avctx, QSVContext *q,
                                                     insurf, &outsurf, sync);
               if (ret == MFX_WRN_DEVICE_BUSY)