diff mbox

[FFmpeg-devel] lavu/hwcontext_qsv: Add support for AV_PIX_FMT_BGRA.

Message ID 1536225913-20147-1-git-send-email-zhong.li@intel.com
State Accepted
Commit a5e1cb9e96bca091ed7103d8be72a99e7dc31582
Headers show

Commit Message

Zhong Li Sept. 6, 2018, 9:25 a.m. UTC
RGB32(AV_PIX_FMT_BGRA on intel platforms) format may be used as overlay with alpha blending.
So add AV_PIX_FMT_BGRA format support.

Rename RGB32 to be BGRA to make it clearer as Mark Thompson's suggestion.

V2: Add P010 format support else will introduce HEVC 10bit encoding regression.
    Thanks for LinJie's discovery.

Signed-off-by: Zhong Li <zhong.li@intel.com>
Verified-by: Fu, Linjie <linjie.fu@intel.com>
---
 libavfilter/qsvvpp.c      |  2 +-
 libavutil/hwcontext_qsv.c | 44 ++++++++++++++++++++++++++++++++++----------
 2 files changed, 35 insertions(+), 11 deletions(-)

Comments

Zhong Li Oct. 10, 2018, 5:20 a.m. UTC | #1
> From: Li, Zhong
> Sent: Thursday, September 6, 2018 5:25 PM
> To: ffmpeg-devel@ffmpeg.org
> Cc: Li, Zhong <zhong.li@intel.com>
> Subject: [PATCH] lavu/hwcontext_qsv: Add support for AV_PIX_FMT_BGRA.
> 
> RGB32(AV_PIX_FMT_BGRA on intel platforms) format may be used as
> overlay with alpha blending.
> So add AV_PIX_FMT_BGRA format support.
> 
> Rename RGB32 to be BGRA to make it clearer as Mark Thompson's
> suggestion.
> 
> V2: Add P010 format support else will introduce HEVC 10bit encoding
> regression.
>     Thanks for LinJie's discovery.
> 
> Signed-off-by: Zhong Li <zhong.li@intel.com>
> Verified-by: Fu, Linjie <linjie.fu@intel.com>
> ---
>  libavfilter/qsvvpp.c      |  2 +-
>  libavutil/hwcontext_qsv.c | 44
> ++++++++++++++++++++++++++++++++++----------
>  2 files changed, 35 insertions(+), 11 deletions(-)
> 
> diff --git a/libavfilter/qsvvpp.c b/libavfilter/qsvvpp.c index 7ee1e56..06efdf5
> 100644
> --- a/libavfilter/qsvvpp.c
> +++ b/libavfilter/qsvvpp.c
> @@ -142,7 +142,7 @@ static int pix_fmt_to_mfx_fourcc(int format)
>          return MFX_FOURCC_NV12;
>      case AV_PIX_FMT_YUYV422:
>          return MFX_FOURCC_YUY2;
> -    case AV_PIX_FMT_RGB32:
> +    case AV_PIX_FMT_BGRA:
>          return MFX_FOURCC_RGB4;
>      }
> 
> diff --git a/libavutil/hwcontext_qsv.c b/libavutil/hwcontext_qsv.c index
> 250091c..fa7459c 100644
> --- a/libavutil/hwcontext_qsv.c
> +++ b/libavutil/hwcontext_qsv.c
> @@ -90,6 +90,7 @@ static const struct {
>      uint32_t           fourcc;
>  } supported_pixel_formats[] = {
>      { AV_PIX_FMT_NV12, MFX_FOURCC_NV12 },
> +    { AV_PIX_FMT_BGRA, MFX_FOURCC_RGB4 },
>      { AV_PIX_FMT_P010, MFX_FOURCC_P010 },
>      { AV_PIX_FMT_PAL8, MFX_FOURCC_P8   },
>  };
> @@ -731,6 +732,37 @@ static int
> qsv_transfer_data_child(AVHWFramesContext *ctx, AVFrame *dst,
>      return ret;
>  }
> 
> +static int map_frame_to_surface(const AVFrame *frame,
> mfxFrameSurface1
> +*surface) {
> +    switch (frame->format) {
> +    case AV_PIX_FMT_NV12:
> +    case AV_PIX_FMT_P010:
> +        surface->Data.Y  = frame->data[0];
> +        surface->Data.UV = frame->data[1];
> +        break;
> +
> +    case AV_PIX_FMT_YUV420P:
> +        surface->Data.Y = frame->data[0];
> +        surface->Data.U = frame->data[1];
> +        surface->Data.V = frame->data[2];
> +        break;
> +
> +    case AV_PIX_FMT_BGRA:
> +        surface->Data.B = frame->data[0];
> +        surface->Data.G = frame->data[0] + 1;
> +        surface->Data.R = frame->data[0] + 2;
> +        surface->Data.A = frame->data[0] + 3;
> +        break;
> +
> +    default:
> +        return MFX_ERR_UNSUPPORTED;
> +    }
> +    surface->Data.Pitch     = frame->linesize[0];
> +    surface->Data.TimeStamp = frame->pts;
> +
> +    return 0;
> +}
> +
>  static int qsv_transfer_data_from(AVHWFramesContext *ctx, AVFrame
> *dst,
>                                    const AVFrame *src)  { @@
> -750,11 +782,7 @@ static int qsv_transfer_data_from(AVHWFramesContext
> *ctx, AVFrame *dst,
>      }
> 
>      out.Info = in->Info;
> -    out.Data.PitchLow = dst->linesize[0];
> -    out.Data.Y        = dst->data[0];
> -    out.Data.U        = dst->data[1];
> -    out.Data.V        = dst->data[2];
> -    out.Data.A        = dst->data[3];
> +    map_frame_to_surface(dst, &out);
> 
>      do {
>          err = MFXVideoVPP_RunFrameVPPAsync(s->session_download, in,
> &out, NULL, &sync); @@ -797,11 +825,7 @@ static int
> qsv_transfer_data_to(AVHWFramesContext *ctx, AVFrame *dst,
>      }
> 
>      in.Info = out->Info;
> -    in.Data.PitchLow = src->linesize[0];
> -    in.Data.Y        = src->data[0];
> -    in.Data.U        = src->data[1];
> -    in.Data.V        = src->data[2];
> -    in.Data.A        = src->data[3];
> +    map_frame_to_surface(src, &in);
> 
>      do {
>          err = MFXVideoVPP_RunFrameVPPAsync(s->session_upload, &in,
> out, NULL, &sync);
> --
> 2.7.4

Ping? 
If nobody against it, I prefer to push it with an alpha blending overlay command example in the commit message.
diff mbox

Patch

diff --git a/libavfilter/qsvvpp.c b/libavfilter/qsvvpp.c
index 7ee1e56..06efdf5 100644
--- a/libavfilter/qsvvpp.c
+++ b/libavfilter/qsvvpp.c
@@ -142,7 +142,7 @@  static int pix_fmt_to_mfx_fourcc(int format)
         return MFX_FOURCC_NV12;
     case AV_PIX_FMT_YUYV422:
         return MFX_FOURCC_YUY2;
-    case AV_PIX_FMT_RGB32:
+    case AV_PIX_FMT_BGRA:
         return MFX_FOURCC_RGB4;
     }
 
diff --git a/libavutil/hwcontext_qsv.c b/libavutil/hwcontext_qsv.c
index 250091c..fa7459c 100644
--- a/libavutil/hwcontext_qsv.c
+++ b/libavutil/hwcontext_qsv.c
@@ -90,6 +90,7 @@  static const struct {
     uint32_t           fourcc;
 } supported_pixel_formats[] = {
     { AV_PIX_FMT_NV12, MFX_FOURCC_NV12 },
+    { AV_PIX_FMT_BGRA, MFX_FOURCC_RGB4 },
     { AV_PIX_FMT_P010, MFX_FOURCC_P010 },
     { AV_PIX_FMT_PAL8, MFX_FOURCC_P8   },
 };
@@ -731,6 +732,37 @@  static int qsv_transfer_data_child(AVHWFramesContext *ctx, AVFrame *dst,
     return ret;
 }
 
+static int map_frame_to_surface(const AVFrame *frame, mfxFrameSurface1 *surface)
+{
+    switch (frame->format) {
+    case AV_PIX_FMT_NV12:
+    case AV_PIX_FMT_P010:
+        surface->Data.Y  = frame->data[0];
+        surface->Data.UV = frame->data[1];
+        break;
+
+    case AV_PIX_FMT_YUV420P:
+        surface->Data.Y = frame->data[0];
+        surface->Data.U = frame->data[1];
+        surface->Data.V = frame->data[2];
+        break;
+
+    case AV_PIX_FMT_BGRA:
+        surface->Data.B = frame->data[0];
+        surface->Data.G = frame->data[0] + 1;
+        surface->Data.R = frame->data[0] + 2;
+        surface->Data.A = frame->data[0] + 3;
+        break;
+
+    default:
+        return MFX_ERR_UNSUPPORTED;
+    }
+    surface->Data.Pitch     = frame->linesize[0];
+    surface->Data.TimeStamp = frame->pts;
+
+    return 0;
+}
+
 static int qsv_transfer_data_from(AVHWFramesContext *ctx, AVFrame *dst,
                                   const AVFrame *src)
 {
@@ -750,11 +782,7 @@  static int qsv_transfer_data_from(AVHWFramesContext *ctx, AVFrame *dst,
     }
 
     out.Info = in->Info;
-    out.Data.PitchLow = dst->linesize[0];
-    out.Data.Y        = dst->data[0];
-    out.Data.U        = dst->data[1];
-    out.Data.V        = dst->data[2];
-    out.Data.A        = dst->data[3];
+    map_frame_to_surface(dst, &out);
 
     do {
         err = MFXVideoVPP_RunFrameVPPAsync(s->session_download, in, &out, NULL, &sync);
@@ -797,11 +825,7 @@  static int qsv_transfer_data_to(AVHWFramesContext *ctx, AVFrame *dst,
     }
 
     in.Info = out->Info;
-    in.Data.PitchLow = src->linesize[0];
-    in.Data.Y        = src->data[0];
-    in.Data.U        = src->data[1];
-    in.Data.V        = src->data[2];
-    in.Data.A        = src->data[3];
+    map_frame_to_surface(src, &in);
 
     do {
         err = MFXVideoVPP_RunFrameVPPAsync(s->session_upload, &in, out, NULL, &sync);