diff mbox series

[FFmpeg-devel,v2,1/2] libavcodec/qsvdec: Add more pixel format support to qsvdec

Message ID 20220402093553.965954-1-wenbin.chen@intel.com
State New
Headers show
Series [FFmpeg-devel,v2,1/2] libavcodec/qsvdec: Add more pixel format support to qsvdec | expand

Checks

Context Check Description
yinshiyou/make_loongarch64 success Make finished
yinshiyou/make_fate_loongarch64 success Make fate finished
andriy/make_x86 success Make finished
andriy/make_fate_x86 success Make fate finished

Commit Message

Chen, Wenbin April 2, 2022, 9:35 a.m. UTC
Qsv decoder only supports directly output nv12 and p010 to system
memory. For other format, we need to download frame from qsv format
to system memory. Now add other supported format to qsvdec.

Signed-off-by: Wenbin Chen <wenbin.chen@intel.com>
---
 libavcodec/qsv.c          | 36 ++++++++++++++++++++++++++++++++++++
 libavcodec/qsv_internal.h |  3 +++
 libavcodec/qsvdec.c       | 23 +++++++++++++++++------
 3 files changed, 56 insertions(+), 6 deletions(-)

Comments

Xiang, Haihao April 6, 2022, 8:14 a.m. UTC | #1
On Sat, 2022-04-02 at 17:35 +0800, Wenbin Chen wrote:
> Qsv decoder only supports directly output nv12 and p010 to system
> memory. For other format, we need to download frame from qsv format
> to system memory. Now add other supported format to qsvdec.
> 
> Signed-off-by: Wenbin Chen <wenbin.chen@intel.com>
> ---
>  libavcodec/qsv.c          | 36 ++++++++++++++++++++++++++++++++++++
>  libavcodec/qsv_internal.h |  3 +++
>  libavcodec/qsvdec.c       | 23 +++++++++++++++++------
>  3 files changed, 56 insertions(+), 6 deletions(-)
> 
> diff --git a/libavcodec/qsv.c b/libavcodec/qsv.c
> index b75877e698..cc1352aa2a 100644
> --- a/libavcodec/qsv.c
> +++ b/libavcodec/qsv.c
> @@ -244,6 +244,42 @@ int ff_qsv_map_pixfmt(enum AVPixelFormat format, uint32_t
> *fourcc)
>      }
>  }
>  
> +int ff_qsv_map_frame_to_surface(const AVFrame *frame, mfxFrameSurface1
> *surface)
> +{
> +    switch (frame->format) {
> +    case AV_PIX_FMT_NV12:
> +    case AV_PIX_FMT_P010:
> +        surface->Data.Y  = frame->data[0];
> +        surface->Data.UV = frame->data[1];
> +        /* The SDK checks Data.V when using system memory for VP9 encoding */
> +        surface->Data.V  = surface->Data.UV + 1;
> +        break;
> +    case AV_PIX_FMT_X2RGB10LE:
> +    case AV_PIX_FMT_BGRA:
> +        surface->Data.B = frame->data[0];
> +        surface->Data.G = frame->data[0] + 1;
> +        surface->Data.R = frame->data[0] + 2;
> +        surface->Data.A = frame->data[0] + 3;
> +        break;
> +    case AV_PIX_FMT_YUYV422:
> +        surface->Data.Y = frame->data[0];
> +        surface->Data.U = frame->data[0] + 1;
> +        surface->Data.V = frame->data[0] + 3;
> +        break;
> +
> +    case AV_PIX_FMT_Y210:
> +        surface->Data.Y16 = (mfxU16 *)frame->data[0];
> +        surface->Data.U16 = (mfxU16 *)frame->data[0] + 1;
> +        surface->Data.V16 = (mfxU16 *)frame->data[0] + 3;
> +        break;
> +    default:
> +        return AVERROR(ENOSYS);
> +    }
> +    surface->Data.PitchLow  = frame->linesize[0];
> +
> +    return 0;
> +}
> +
>  int ff_qsv_find_surface_idx(QSVFramesContext *ctx, QSVFrame *frame)
>  {
>      int i;
> diff --git a/libavcodec/qsv_internal.h b/libavcodec/qsv_internal.h
> index 58186ea7ca..e2aecdcbd6 100644
> --- a/libavcodec/qsv_internal.h
> +++ b/libavcodec/qsv_internal.h
> @@ -147,4 +147,7 @@ int ff_qsv_find_surface_idx(QSVFramesContext *ctx,
> QSVFrame *frame);
>  void ff_qsv_frame_add_ext_param(AVCodecContext *avctx, QSVFrame *frame,
>                                  mfxExtBuffer *param);
>  
> +int ff_qsv_map_frame_to_surface(const AVFrame *frame, mfxFrameSurface1
> *surface);
> +
> +
>  #endif /* AVCODEC_QSV_INTERNAL_H */
> diff --git a/libavcodec/qsvdec.c b/libavcodec/qsvdec.c
> index 6236391357..f1d56b2af3 100644
> --- a/libavcodec/qsvdec.c
> +++ b/libavcodec/qsvdec.c
> @@ -129,21 +129,28 @@ static int qsv_get_continuous_buffer(AVCodecContext
> *avctx, AVFrame *frame,
>          frame->linesize[0] = FFALIGN(avctx->width, 128);
>          break;
>      case AV_PIX_FMT_P010:
> +    case AV_PIX_FMT_YUYV422:
>          frame->linesize[0] = 2 * FFALIGN(avctx->width, 128);
>          break;
> +    case AV_PIX_FMT_Y210:
> +        frame->linesize[0] = 4 * FFALIGN(avctx->width, 128);
> +        break;
>      default:
>          av_log(avctx, AV_LOG_ERROR, "Unsupported pixel format.\n");
>          return AVERROR(EINVAL);
>      }
>  
> -    frame->linesize[1] = frame->linesize[0];
>      frame->buf[0]      = av_buffer_pool_get(pool);
>      if (!frame->buf[0])
>          return AVERROR(ENOMEM);
>  
>      frame->data[0] = frame->buf[0]->data;
> -    frame->data[1] = frame->data[0] +
> -                            frame->linesize[0] * FFALIGN(avctx->height, 64);
> +    if (avctx->pix_fmt == AV_PIX_FMT_NV12 ||
> +        avctx->pix_fmt == AV_PIX_FMT_P010) {
> +        frame->linesize[1] = frame->linesize[0];
> +        frame->data[1] = frame->data[0] +
> +            frame->linesize[0] * FFALIGN(avctx->height, 64);
> +    }
>  
>      ret = ff_attach_decode_data(frame);
>      if (ret < 0)
> @@ -423,9 +430,11 @@ static int alloc_frame(AVCodecContext *avctx, QSVContext
> *q, QSVFrame *frame)
>      if (frame->frame->format == AV_PIX_FMT_QSV) {
>          frame->surface = *(mfxFrameSurface1*)frame->frame->data[3];
>      } else {
> -        frame->surface.Data.PitchLow = frame->frame->linesize[0];
> -        frame->surface.Data.Y        = frame->frame->data[0];
> -        frame->surface.Data.UV       = frame->frame->data[1];
> +        ret = ff_qsv_map_frame_to_surface(frame->frame, &frame->surface);
> +        if (ret < 0) {
> +            av_log(avctx, AV_LOG_ERROR, "map frame to surface failed.\n");
> +            return ret;
> +        }
>      }
>  
>      frame->surface.Info = q->frame_info;
> @@ -990,6 +999,8 @@ const AVCodec ff_##x##_qsv_decoder = { \
>      .priv_class     = &x##_qsv_class, \
>      .pix_fmts       = (const enum AVPixelFormat[]){ AV_PIX_FMT_NV12, \
>                                                      AV_PIX_FMT_P010, \
> +                                                    AV_PIX_FMT_YUYV422, \
> +                                                    AV_PIX_FMT_Y210, \
>                                                      AV_PIX_FMT_QSV, \
>                                                      AV_PIX_FMT_NONE }, \
>      .hw_configs     = qsv_hw_configs, \

I failed to apply your patchset via git am, could you rebase your patchset ?

BRs
Haihao
Chen, Wenbin April 6, 2022, 8:23 a.m. UTC | #2
> On Sat, 2022-04-02 at 17:35 +0800, Wenbin Chen wrote:
> > Qsv decoder only supports directly output nv12 and p010 to system
> > memory. For other format, we need to download frame from qsv format
> > to system memory. Now add other supported format to qsvdec.
> >
> > Signed-off-by: Wenbin Chen <wenbin.chen@intel.com>
> > ---
> >  libavcodec/qsv.c          | 36 ++++++++++++++++++++++++++++++++++++
> >  libavcodec/qsv_internal.h |  3 +++
> >  libavcodec/qsvdec.c       | 23 +++++++++++++++++------
> >  3 files changed, 56 insertions(+), 6 deletions(-)
> >
> > diff --git a/libavcodec/qsv.c b/libavcodec/qsv.c
> > index b75877e698..cc1352aa2a 100644
> > --- a/libavcodec/qsv.c
> > +++ b/libavcodec/qsv.c
> > @@ -244,6 +244,42 @@ int ff_qsv_map_pixfmt(enum AVPixelFormat
> format, uint32_t
> > *fourcc)
> >      }
> >  }
> >
> > +int ff_qsv_map_frame_to_surface(const AVFrame *frame,
> mfxFrameSurface1
> > *surface)
> > +{
> > +    switch (frame->format) {
> > +    case AV_PIX_FMT_NV12:
> > +    case AV_PIX_FMT_P010:
> > +        surface->Data.Y  = frame->data[0];
> > +        surface->Data.UV = frame->data[1];
> > +        /* The SDK checks Data.V when using system memory for VP9
> encoding */
> > +        surface->Data.V  = surface->Data.UV + 1;
> > +        break;
> > +    case AV_PIX_FMT_X2RGB10LE:
> > +    case AV_PIX_FMT_BGRA:
> > +        surface->Data.B = frame->data[0];
> > +        surface->Data.G = frame->data[0] + 1;
> > +        surface->Data.R = frame->data[0] + 2;
> > +        surface->Data.A = frame->data[0] + 3;
> > +        break;
> > +    case AV_PIX_FMT_YUYV422:
> > +        surface->Data.Y = frame->data[0];
> > +        surface->Data.U = frame->data[0] + 1;
> > +        surface->Data.V = frame->data[0] + 3;
> > +        break;
> > +
> > +    case AV_PIX_FMT_Y210:
> > +        surface->Data.Y16 = (mfxU16 *)frame->data[0];
> > +        surface->Data.U16 = (mfxU16 *)frame->data[0] + 1;
> > +        surface->Data.V16 = (mfxU16 *)frame->data[0] + 3;
> > +        break;
> > +    default:
> > +        return AVERROR(ENOSYS);
> > +    }
> > +    surface->Data.PitchLow  = frame->linesize[0];
> > +
> > +    return 0;
> > +}
> > +
> >  int ff_qsv_find_surface_idx(QSVFramesContext *ctx, QSVFrame *frame)
> >  {
> >      int i;
> > diff --git a/libavcodec/qsv_internal.h b/libavcodec/qsv_internal.h
> > index 58186ea7ca..e2aecdcbd6 100644
> > --- a/libavcodec/qsv_internal.h
> > +++ b/libavcodec/qsv_internal.h
> > @@ -147,4 +147,7 @@ int ff_qsv_find_surface_idx(QSVFramesContext
> *ctx,
> > QSVFrame *frame);
> >  void ff_qsv_frame_add_ext_param(AVCodecContext *avctx, QSVFrame
> *frame,
> >                                  mfxExtBuffer *param);
> >
> > +int ff_qsv_map_frame_to_surface(const AVFrame *frame,
> mfxFrameSurface1
> > *surface);
> > +
> > +
> >  #endif /* AVCODEC_QSV_INTERNAL_H */
> > diff --git a/libavcodec/qsvdec.c b/libavcodec/qsvdec.c
> > index 6236391357..f1d56b2af3 100644
> > --- a/libavcodec/qsvdec.c
> > +++ b/libavcodec/qsvdec.c
> > @@ -129,21 +129,28 @@ static int
> qsv_get_continuous_buffer(AVCodecContext
> > *avctx, AVFrame *frame,
> >          frame->linesize[0] = FFALIGN(avctx->width, 128);
> >          break;
> >      case AV_PIX_FMT_P010:
> > +    case AV_PIX_FMT_YUYV422:
> >          frame->linesize[0] = 2 * FFALIGN(avctx->width, 128);
> >          break;
> > +    case AV_PIX_FMT_Y210:
> > +        frame->linesize[0] = 4 * FFALIGN(avctx->width, 128);
> > +        break;
> >      default:
> >          av_log(avctx, AV_LOG_ERROR, "Unsupported pixel format.\n");
> >          return AVERROR(EINVAL);
> >      }
> >
> > -    frame->linesize[1] = frame->linesize[0];
> >      frame->buf[0]      = av_buffer_pool_get(pool);
> >      if (!frame->buf[0])
> >          return AVERROR(ENOMEM);
> >
> >      frame->data[0] = frame->buf[0]->data;
> > -    frame->data[1] = frame->data[0] +
> > -                            frame->linesize[0] * FFALIGN(avctx->height, 64);
> > +    if (avctx->pix_fmt == AV_PIX_FMT_NV12 ||
> > +        avctx->pix_fmt == AV_PIX_FMT_P010) {
> > +        frame->linesize[1] = frame->linesize[0];
> > +        frame->data[1] = frame->data[0] +
> > +            frame->linesize[0] * FFALIGN(avctx->height, 64);
> > +    }
> >
> >      ret = ff_attach_decode_data(frame);
> >      if (ret < 0)
> > @@ -423,9 +430,11 @@ static int alloc_frame(AVCodecContext *avctx,
> QSVContext
> > *q, QSVFrame *frame)
> >      if (frame->frame->format == AV_PIX_FMT_QSV) {
> >          frame->surface = *(mfxFrameSurface1*)frame->frame->data[3];
> >      } else {
> > -        frame->surface.Data.PitchLow = frame->frame->linesize[0];
> > -        frame->surface.Data.Y        = frame->frame->data[0];
> > -        frame->surface.Data.UV       = frame->frame->data[1];
> > +        ret = ff_qsv_map_frame_to_surface(frame->frame, &frame-
> >surface);
> > +        if (ret < 0) {
> > +            av_log(avctx, AV_LOG_ERROR, "map frame to surface failed.\n");
> > +            return ret;
> > +        }
> >      }
> >
> >      frame->surface.Info = q->frame_info;
> > @@ -990,6 +999,8 @@ const AVCodec ff_##x##_qsv_decoder = { \
> >      .priv_class     = &x##_qsv_class, \
> >      .pix_fmts       = (const enum AVPixelFormat[]){ AV_PIX_FMT_NV12, \
> >                                                      AV_PIX_FMT_P010, \
> > +                                                    AV_PIX_FMT_YUYV422, \
> > +                                                    AV_PIX_FMT_Y210, \
> >                                                      AV_PIX_FMT_QSV, \
> >                                                      AV_PIX_FMT_NONE }, \
> >      .hw_configs     = qsv_hw_configs, \
> 
> I failed to apply your patchset via git am, could you rebase your patchset ?
> 
> BRs
> Haihao

Ok. I will rebase it and send again.

Thanks
Wenbin
> 
> _______________________________________________
> ffmpeg-devel mailing list
> ffmpeg-devel@ffmpeg.org
> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
> 
> To unsubscribe, visit link above, or email
> ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
diff mbox series

Patch

diff --git a/libavcodec/qsv.c b/libavcodec/qsv.c
index b75877e698..cc1352aa2a 100644
--- a/libavcodec/qsv.c
+++ b/libavcodec/qsv.c
@@ -244,6 +244,42 @@  int ff_qsv_map_pixfmt(enum AVPixelFormat format, uint32_t *fourcc)
     }
 }
 
+int ff_qsv_map_frame_to_surface(const AVFrame *frame, mfxFrameSurface1 *surface)
+{
+    switch (frame->format) {
+    case AV_PIX_FMT_NV12:
+    case AV_PIX_FMT_P010:
+        surface->Data.Y  = frame->data[0];
+        surface->Data.UV = frame->data[1];
+        /* The SDK checks Data.V when using system memory for VP9 encoding */
+        surface->Data.V  = surface->Data.UV + 1;
+        break;
+    case AV_PIX_FMT_X2RGB10LE:
+    case AV_PIX_FMT_BGRA:
+        surface->Data.B = frame->data[0];
+        surface->Data.G = frame->data[0] + 1;
+        surface->Data.R = frame->data[0] + 2;
+        surface->Data.A = frame->data[0] + 3;
+        break;
+    case AV_PIX_FMT_YUYV422:
+        surface->Data.Y = frame->data[0];
+        surface->Data.U = frame->data[0] + 1;
+        surface->Data.V = frame->data[0] + 3;
+        break;
+
+    case AV_PIX_FMT_Y210:
+        surface->Data.Y16 = (mfxU16 *)frame->data[0];
+        surface->Data.U16 = (mfxU16 *)frame->data[0] + 1;
+        surface->Data.V16 = (mfxU16 *)frame->data[0] + 3;
+        break;
+    default:
+        return AVERROR(ENOSYS);
+    }
+    surface->Data.PitchLow  = frame->linesize[0];
+
+    return 0;
+}
+
 int ff_qsv_find_surface_idx(QSVFramesContext *ctx, QSVFrame *frame)
 {
     int i;
diff --git a/libavcodec/qsv_internal.h b/libavcodec/qsv_internal.h
index 58186ea7ca..e2aecdcbd6 100644
--- a/libavcodec/qsv_internal.h
+++ b/libavcodec/qsv_internal.h
@@ -147,4 +147,7 @@  int ff_qsv_find_surface_idx(QSVFramesContext *ctx, QSVFrame *frame);
 void ff_qsv_frame_add_ext_param(AVCodecContext *avctx, QSVFrame *frame,
                                 mfxExtBuffer *param);
 
+int ff_qsv_map_frame_to_surface(const AVFrame *frame, mfxFrameSurface1 *surface);
+
+
 #endif /* AVCODEC_QSV_INTERNAL_H */
diff --git a/libavcodec/qsvdec.c b/libavcodec/qsvdec.c
index 6236391357..f1d56b2af3 100644
--- a/libavcodec/qsvdec.c
+++ b/libavcodec/qsvdec.c
@@ -129,21 +129,28 @@  static int qsv_get_continuous_buffer(AVCodecContext *avctx, AVFrame *frame,
         frame->linesize[0] = FFALIGN(avctx->width, 128);
         break;
     case AV_PIX_FMT_P010:
+    case AV_PIX_FMT_YUYV422:
         frame->linesize[0] = 2 * FFALIGN(avctx->width, 128);
         break;
+    case AV_PIX_FMT_Y210:
+        frame->linesize[0] = 4 * FFALIGN(avctx->width, 128);
+        break;
     default:
         av_log(avctx, AV_LOG_ERROR, "Unsupported pixel format.\n");
         return AVERROR(EINVAL);
     }
 
-    frame->linesize[1] = frame->linesize[0];
     frame->buf[0]      = av_buffer_pool_get(pool);
     if (!frame->buf[0])
         return AVERROR(ENOMEM);
 
     frame->data[0] = frame->buf[0]->data;
-    frame->data[1] = frame->data[0] +
-                            frame->linesize[0] * FFALIGN(avctx->height, 64);
+    if (avctx->pix_fmt == AV_PIX_FMT_NV12 ||
+        avctx->pix_fmt == AV_PIX_FMT_P010) {
+        frame->linesize[1] = frame->linesize[0];
+        frame->data[1] = frame->data[0] +
+            frame->linesize[0] * FFALIGN(avctx->height, 64);
+    }
 
     ret = ff_attach_decode_data(frame);
     if (ret < 0)
@@ -423,9 +430,11 @@  static int alloc_frame(AVCodecContext *avctx, QSVContext *q, QSVFrame *frame)
     if (frame->frame->format == AV_PIX_FMT_QSV) {
         frame->surface = *(mfxFrameSurface1*)frame->frame->data[3];
     } else {
-        frame->surface.Data.PitchLow = frame->frame->linesize[0];
-        frame->surface.Data.Y        = frame->frame->data[0];
-        frame->surface.Data.UV       = frame->frame->data[1];
+        ret = ff_qsv_map_frame_to_surface(frame->frame, &frame->surface);
+        if (ret < 0) {
+            av_log(avctx, AV_LOG_ERROR, "map frame to surface failed.\n");
+            return ret;
+        }
     }
 
     frame->surface.Info = q->frame_info;
@@ -990,6 +999,8 @@  const AVCodec ff_##x##_qsv_decoder = { \
     .priv_class     = &x##_qsv_class, \
     .pix_fmts       = (const enum AVPixelFormat[]){ AV_PIX_FMT_NV12, \
                                                     AV_PIX_FMT_P010, \
+                                                    AV_PIX_FMT_YUYV422, \
+                                                    AV_PIX_FMT_Y210, \
                                                     AV_PIX_FMT_QSV, \
                                                     AV_PIX_FMT_NONE }, \
     .hw_configs     = qsv_hw_configs, \