[FFmpeg-devel,V3] avfilter: Add tonemap vaapi filter

Submitted by Zachary Zhou on Dec. 29, 2018, 7:02 a.m.

Details

Message ID 20181229070230.14883-1-zachary.zhou@intel.com
State New
Headers show

Commit Message

Zachary Zhou Dec. 29, 2018, 7:02 a.m.
It supports ICL platform.
H2H (HDR to HDR): P010 -> RGB10
H2S (HDR to SDR): P010 -> RGB8

libva commit for HDR10
https://github.com/intel/libva/commit/cf11abe5e1b9c93ee75cf974076957162c1605b9

Signed-off-by: Zachary Zhou <zachary.zhou@intel.com>
---
 configure                      |   2 +
 doc/filters.texi               |  63 ++++
 libavfilter/Makefile           |   1 +
 libavfilter/allfilters.c       |   1 +
 libavfilter/vaapi_vpp.c        |  29 ++
 libavfilter/vaapi_vpp.h        |   8 +
 libavfilter/vf_tonemap_vaapi.c | 542 +++++++++++++++++++++++++++++++++
 libavutil/hwcontext_vaapi.c    |   3 +
 8 files changed, 649 insertions(+)
 create mode 100644 libavfilter/vf_tonemap_vaapi.c

Comments

Mark Thompson Dec. 30, 2018, 7:38 p.m.
On 29/12/2018 07:02, Zachary Zhou wrote:
> It supports ICL platform.

When will this hardware be available?

Is this a new fixed-function feature, or will it be available on current hardware as well?

> H2H (HDR to HDR): P010 -> RGB10
> H2S (HDR to SDR): P010 -> RGB8
> 
> libva commit for HDR10
> https://github.com/intel/libva/commit/cf11abe5e1b9c93ee75cf974076957162c1605b9
> 
> Signed-off-by: Zachary Zhou <zachary.zhou@intel.com>
> ---
>  configure                      |   2 +
>  doc/filters.texi               |  63 ++++
>  libavfilter/Makefile           |   1 +
>  libavfilter/allfilters.c       |   1 +
>  libavfilter/vaapi_vpp.c        |  29 ++
>  libavfilter/vaapi_vpp.h        |   8 +
>  libavfilter/vf_tonemap_vaapi.c | 542 +++++++++++++++++++++++++++++++++
>  libavutil/hwcontext_vaapi.c    |   3 +
>  8 files changed, 649 insertions(+)
>  create mode 100644 libavfilter/vf_tonemap_vaapi.c
> 
> diff --git a/configure b/configure
> index be49c19b88..baf70d03fc 100755
> --- a/configure
> +++ b/configure
> @@ -3480,6 +3480,7 @@ tinterlace_merge_test_deps="tinterlace_filter"
>  tinterlace_pad_test_deps="tinterlace_filter"
>  tonemap_filter_deps="const_nan"
>  tonemap_opencl_filter_deps="opencl const_nan"
> +tonemap_vaapi_filter_deps="vaapi VAProcFilterParameterBufferHDRToneMapping"
>  transpose_opencl_filter_deps="opencl"
>  unsharp_opencl_filter_deps="opencl"
>  uspp_filter_deps="gpl avcodec"
> @@ -5984,6 +5985,7 @@ check_type "d3d9.h dxva2api.h" DXVA2_ConfigPictureDecode -D_WIN32_WINNT=0x0602
>  
>  check_type "va/va.h va/va_dec_hevc.h" "VAPictureParameterBufferHEVC"
>  check_struct "va/va.h" "VADecPictureParameterBufferVP9" bit_depth
> +check_type "va/va.h va/va_vpp.h" "VAProcFilterParameterBufferHDRToneMapping"
>  check_type "va/va.h va/va_enc_hevc.h" "VAEncPictureParameterBufferHEVC"
>  check_type "va/va.h va/va_enc_jpeg.h" "VAEncPictureParameterBufferJPEG"
>  check_type "va/va.h va/va_enc_vp8.h"  "VAEncPictureParameterBufferVP8"
> diff --git a/doc/filters.texi b/doc/filters.texi
> index ac4c9b44d8..9ed53a1008 100644
> --- a/doc/filters.texi
> +++ b/doc/filters.texi
> @@ -19130,6 +19130,69 @@ Convert HDR(PQ/HLG) video to bt2020-transfer-characteristic p010 format using li
>  @end example
>  @end itemize
>  
> +@section tonemap_vappi
> +
> +Perform HDR(High Dynamic Range) to HDR and HDR to SDR conversion with tone-mapping.
> +
> +It accepts the following parameters:
> +
> +@table @option
> +@item type
> +Specify the tone-mapping operator to be used.
> +
> +Possible values are:
> +@table @var
> +@item h2h
> +Perform H2H(HDR to HDR), convert from p010 to rgb10
> +@item h2s
> +Perform H2S(HDR to SDR), convert from p010 to rgb8
> +@end table

What are RGB10 and RGB8?

> +
> +@item display
> +Set mastering display metadata for H2H
> +
> +Can assume the following values:
> +@table @var
> +@item G
> +Green primary G(x|y)
> +@item B
> +Blue primary B(x|y)
> +@item R
> +Red primary R(x|y)
> +@item WP
> +White point WP(x|y)
> +@item L
> +Display mastering luminance L(max|min)
> +@end table
> +
> +@item light
> +Set content light level for H2H
> +
> +Can assume the following values:
> +@table @var
> +@item CLL
> +Max content light level
> +@item FALL
> +Max average light level per frame
> +@end table

What units are all of these values in?

> +
> +@end table
> +
> +@subsection Example
> +
> +@itemize
> +@item
> +Convert HDR video to HDR video from p010 format to rgb10 format.
> +@example
> +-i INPUT -vf "tonemap_vaapi=h2h:display=G(13250|34500)B(7500|3000)R(34000|16000)WP(15635|16450)L(2000|2000):light=CLL(10000)FALL(1000)" OUTPUT 
> +@end example
> +@item
> +Convert HDR video to SDR video from p010 format to rgb8 format.
> +@example
> +-i INPUT -vf "tonemap_vaapi=h2s" OUTPUT
> +@end example
> +@end itemize
> +
>  @section unsharp_opencl
>  
>  Sharpen or blur the input video.
> diff --git a/libavfilter/Makefile b/libavfilter/Makefile
> index 6e2658186d..f6894209d1 100644
> --- a/libavfilter/Makefile
> +++ b/libavfilter/Makefile
> @@ -390,6 +390,7 @@ OBJS-$(CONFIG_TMIX_FILTER)                   += vf_mix.o framesync.o
>  OBJS-$(CONFIG_TONEMAP_FILTER)                += vf_tonemap.o colorspace.o
>  OBJS-$(CONFIG_TONEMAP_OPENCL_FILTER)         += vf_tonemap_opencl.o colorspace.o opencl.o \
>                                                  opencl/tonemap.o opencl/colorspace_common.o
> +OBJS-$(CONFIG_TONEMAP_VAAPI_FILTER)          += vf_tonemap_vaapi.o vaapi_vpp.o
>  OBJS-$(CONFIG_TPAD_FILTER)                   += vf_tpad.o
>  OBJS-$(CONFIG_TRANSPOSE_FILTER)              += vf_transpose.o
>  OBJS-$(CONFIG_TRANSPOSE_NPP_FILTER)          += vf_transpose_npp.o cuda_check.o
> diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c
> index a600069500..754b84819d 100644
> --- a/libavfilter/allfilters.c
> +++ b/libavfilter/allfilters.c
> @@ -369,6 +369,7 @@ extern AVFilter ff_vf_tlut2;
>  extern AVFilter ff_vf_tmix;
>  extern AVFilter ff_vf_tonemap;
>  extern AVFilter ff_vf_tonemap_opencl;
> +extern AVFilter ff_vf_tonemap_vaapi;
>  extern AVFilter ff_vf_tpad;
>  extern AVFilter ff_vf_transpose;
>  extern AVFilter ff_vf_transpose_npp;
> diff --git a/libavfilter/vaapi_vpp.c b/libavfilter/vaapi_vpp.c
> index c5bbc3b85b..7389ebd9d5 100644
> --- a/libavfilter/vaapi_vpp.c
> +++ b/libavfilter/vaapi_vpp.c
> @@ -276,6 +276,35 @@ int ff_vaapi_vpp_make_param_buffers(AVFilterContext *avctx,
>      return 0;
>  }
>  
> +int ff_vaapi_vpp_make_param_buffers2(AVFilterContext *avctx,
> +                                     int type,
> +                                     const void *data,
> +                                     size_t size,
> +                                     int count,
> +                                     VABufferID *buffer_id)
> +{
> +    VAStatus vas;
> +    VABufferID buffer;
> +    VAAPIVPPContext *ctx   = avctx->priv;
> +
> +    av_assert0(ctx->nb_filter_buffers + 1 <= VAProcFilterCount);
> +
> +    vas = vaCreateBuffer(ctx->hwctx->display, ctx->va_context,
> +                         type, size, count, (void*)data, &buffer);
> +    if (vas != VA_STATUS_SUCCESS) {
> +        av_log(avctx, AV_LOG_ERROR, "Failed to create parameter "
> +               "buffer (type %d): %d (%s).\n",
> +               type, vas, vaErrorStr(vas));
> +        return AVERROR(EIO);
> +    }
> +
> +    ctx->filter_buffers[ctx->nb_filter_buffers++] = buffer;
> +    *buffer_id = buffer;
> +
> +    av_log(avctx, AV_LOG_DEBUG, "Param buffer (type %d, %zu bytes, count %d) "
> +           "is %#x.\n", type, size, count, buffer);
> +    return 0;
> +}

I don't see the point of this function.  The only change is that the created buffer ID is additionally passed back in the argument pointer as well as in the existing place in ctx->filter_buffers.

>  
>  int ff_vaapi_vpp_render_picture(AVFilterContext *avctx,
>                                  VAProcPipelineParameterBuffer *params,
> diff --git a/libavfilter/vaapi_vpp.h b/libavfilter/vaapi_vpp.h
> index 0bc31018d4..e920c10591 100644
> --- a/libavfilter/vaapi_vpp.h
> +++ b/libavfilter/vaapi_vpp.h
> @@ -72,6 +72,14 @@ int ff_vaapi_vpp_make_param_buffers(AVFilterContext *avctx,
>                                      size_t size,
>                                      int count);
>  
> +int ff_vaapi_vpp_make_param_buffers2(AVFilterContext *avctx,
> +                                     int type,
> +                                     const void *data,
> +                                     size_t size,
> +                                     int count,
> +                                     VABufferID *buffer_id);
> +
> +
>  int ff_vaapi_vpp_render_picture(AVFilterContext *avctx,
>                                  VAProcPipelineParameterBuffer *params,
>                                  VASurfaceID output_surface);
> diff --git a/libavfilter/vf_tonemap_vaapi.c b/libavfilter/vf_tonemap_vaapi.c
> new file mode 100644
> index 0000000000..d77cd574b3
> --- /dev/null
> +++ b/libavfilter/vf_tonemap_vaapi.c
> @@ -0,0 +1,542 @@
> +/*
> + * This file is part of FFmpeg.
> + *
> + * FFmpeg is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU Lesser General Public
> + * License as published by the Free Software Foundation; either
> + * version 2.1 of the License, or (at your option) any later version.
> + *
> + * FFmpeg is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> + * Lesser General Public License for more details.
> + *
> + * You should have received a copy of the GNU Lesser General Public
> + * License along with FFmpeg; if not, write to the Free Software
> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
> + */
> +#include <string.h>
> +
> +#include "libavutil/avassert.h"
> +#include "libavutil/mem.h"
> +#include "libavutil/opt.h"
> +#include "libavutil/pixdesc.h"
> +#include "libavutil/mastering_display_metadata.h"

Headers should be in alphabetical order.

> +
> +#include "avfilter.h"
> +#include "formats.h"
> +#include "internal.h"
> +#include "vaapi_vpp.h"
> +
> +typedef enum {
> +    HDR_VAAPI_H2H,
> +    HDR_VAAPI_H2S,
> +} HDRType;
> +
> +typedef struct HDRVAAPIContext {
> +    VAAPIVPPContext vpp_ctx; // must be the first field
> +
> +    int hdr_type;
> +
> +    char *master_display;
> +    char *content_light;
> +
> +    VABufferID          buffer;
> +    VAHdrMetaDataHDR10  out_metadata;
> +
> +    AVFrameSideData    *src_display;
> +    AVFrameSideData    *src_light;
> +} HDRVAAPIContext;
> +
> +static int tonemap_vaapi_set_filter_params(AVFilterContext *avctx, AVFrame *input_frame)
> +{
> +    VAAPIVPPContext *vpp_ctx   = avctx->priv;
> +    HDRVAAPIContext *ctx       = avctx->priv;
> +    VAStatus vas;
> +    VAHdrMetaDataHDR10  *in_metadata;
> +    VAProcFilterParameterBufferHDRToneMapping *hdrtm_param;
> +
> +    vas = vaMapBuffer(vpp_ctx->hwctx->display, ctx->buffer, (void**)&hdrtm_param);

It's generally easier to make the object and then use vaCreateBuffer() rather than doing this mapping dance.

> +    if (vas != VA_STATUS_SUCCESS) {
> +        av_log(avctx, AV_LOG_ERROR, "Failed to map "
> +               "buffer (%d): %d (%s).\n",
> +               ctx->buffer, vas, vaErrorStr(vas));
> +        return AVERROR(EIO);
> +    }
> +
> +    in_metadata = hdrtm_param->data.metadata;
> +    ctx->src_display = av_frame_get_side_data(input_frame,
> +                                              AV_FRAME_DATA_MASTERING_DISPLAY_METADATA);
> +    if (ctx->src_display) {
> +        const AVMasteringDisplayMetadata *hdr_meta =
> +            (const AVMasteringDisplayMetadata *)ctx->src_display->data;
> +        if (hdr_meta->has_luminance) {
> +#define LAV_UINT32(a) (a.num)
> +            in_metadata->max_display_mastering_luminance =
> +                LAV_UINT32(hdr_meta->max_luminance);
> +            in_metadata->min_display_mastering_luminance =
> +                LAV_UINT32(hdr_meta->min_luminance);
> +#undef LAV_UINT32
> +            av_log(avctx, AV_LOG_DEBUG,
> +                   "Mastering Display Metadata(in luminance):\n");
> +            av_log(avctx, AV_LOG_DEBUG,
> +                   "min_luminance=%d, max_luminance=%d\n",
> +                   in_metadata->min_display_mastering_luminance,
> +                   in_metadata->max_display_mastering_luminance);
> +        }
> +
> +        if (hdr_meta->has_primaries) {
> +#define LAV_RED    0
> +#define LAV_GREEN  1
> +#define LAV_BLUE   2
> +#define LAV_UINT16(a) (a.num)
> +            in_metadata->display_primaries_x[0] =
> +                LAV_UINT16(hdr_meta->display_primaries[LAV_GREEN][0]);
> +            in_metadata->display_primaries_y[0] =
> +                LAV_UINT16(hdr_meta->display_primaries[LAV_GREEN][1]);
> +            in_metadata->display_primaries_x[1] =
> +                LAV_UINT16(hdr_meta->display_primaries[LAV_BLUE][0]);
> +            in_metadata->display_primaries_y[1] =
> +                LAV_UINT16(hdr_meta->display_primaries[LAV_BLUE][1]);
> +            in_metadata->display_primaries_x[2] =
> +                LAV_UINT16(hdr_meta->display_primaries[LAV_RED][0]);
> +            in_metadata->display_primaries_y[2] =
> +                LAV_UINT16(hdr_meta->display_primaries[LAV_RED][1]);
> +            in_metadata->white_point_x =
> +                LAV_UINT16(hdr_meta->white_point[0]);
> +            in_metadata->white_point_y =
> +                LAV_UINT16(hdr_meta->white_point[1]);
> +#undef LAV_RED
> +#undef LAV_GREEN
> +#undef LAV_BLUE
> +#undef LAV_UINT16
> +            av_log(avctx, AV_LOG_DEBUG,
> +                   "Mastering Display Metadata(in primaries):\n");
> +            av_log(avctx, AV_LOG_DEBUG,
> +                   "G(%u,%u) B(%u,%u) R(%u,%u) WP(%u,%u)\n",
> +                   in_metadata->display_primaries_x[0],
> +                   in_metadata->display_primaries_y[0],
> +                   in_metadata->display_primaries_x[1],
> +                   in_metadata->display_primaries_y[1],
> +                   in_metadata->display_primaries_x[2],
> +                   in_metadata->display_primaries_y[2],
> +                   in_metadata->white_point_x,
> +                   in_metadata->white_point_y);
> +        }
> +    }

I don't believe this code at all.  You've taken the numerator only of rational numbers, and the units for most of the values are totally different.

(Relatedly - how can the correctness of the output be verified?)

> +
> +    ctx->src_light = av_frame_get_side_data(input_frame,
> +                                            AV_FRAME_DATA_CONTENT_LIGHT_LEVEL);
> +    if (ctx->src_light) {
> +        const AVContentLightMetadata *light_meta =
> +            (const AVContentLightMetadata *) ctx->src_light->data;
> +        in_metadata->max_content_light_level = light_meta->MaxCLL;
> +        in_metadata->max_pic_average_light_level = light_meta->MaxFALL;
> +
> +        av_log(avctx, AV_LOG_DEBUG,
> +               "Mastering Content Light Level (in):\n");
> +        av_log(avctx, AV_LOG_DEBUG,
> +               "MaxCLL(%u) MaxFALL(%u)\n",
> +               in_metadata->max_content_light_level,
> +               in_metadata->max_pic_average_light_level);
> +    }

This doesn't make sense either - the units don't match.

> +
> +    vas = vaUnmapBuffer(vpp_ctx->hwctx->display, ctx->buffer);
> +    if (vas != VA_STATUS_SUCCESS) {
> +        av_log(avctx, AV_LOG_ERROR, "Failed to unmap output buffers: "
> +               "%d (%s).\n", vas, vaErrorStr(vas));
> +        return AVERROR(EIO);
> +    }

What should the behaviour be for the unfilled fields?

> +
> +    return 0;
> +}
> +
> +static int tonemap_vaapi_build_filter_params(AVFilterContext *avctx)
> +{
> +    VAAPIVPPContext *vpp_ctx   = avctx->priv;
> +    HDRVAAPIContext *ctx       = avctx->priv;
> +    VAStatus vas;
> +    VAProcFilterCapHighDynamicRange hdr_cap;
> +    int num_query_caps;
> +    VAProcFilterParameterBufferHDRToneMapping hdrtm_param;
> +    VAHdrMetaDataHDR10  in_metadata;
> +
> +    vas = vaQueryVideoProcFilterCaps(vpp_ctx->hwctx->display,
> +                                     vpp_ctx->va_context,
> +                                     VAProcFilterHighDynamicRangeToneMapping,
> +                                     &hdr_cap, &num_query_caps);

There can be multiple caps here - you need to grab the whole list and search it for the one you want.

Query behaviour of the iHD driver seems a bit broken on current platforms - it declares that VAProcFilterHighDynamicRangeToneMapping is supported in vaQueryVideoProcFilters(), but then this query fails with invalid argument.

> +    if (vas != VA_STATUS_SUCCESS) {
> +        av_log(avctx, AV_LOG_ERROR, "Failed to query HDR caps "
> +               "context: %d (%s).\n", vas, vaErrorStr(vas));
> +        return AVERROR(EIO);
> +    }
> +
> +    if (hdr_cap.metadata_type == VAProcHighDynamicRangeMetadataNone) {
> +        av_log(avctx, AV_LOG_ERROR, "VAAPI driver doesn't support HDR\n");
> +        return AVERROR(EINVAL);
> +    }
> +
> +    if (! (VA_TONE_MAPPING_HDR_TO_HDR & hdr_cap.caps_flag) &&
> +        ! (VA_TONE_MAPPING_HDR_TO_SDR & hdr_cap.caps_flag)) {
> +        av_log(avctx, AV_LOG_ERROR,
> +               "VAAPI driver doesn't support H2H or H2S\n");

You know which of these you want, so you should check for specifcally that one only.

> +        return AVERROR(EINVAL);
> +    }
> +
> +    memset(&hdrtm_param, 0, sizeof(hdrtm_param));
> +    memset(&in_metadata, 0, sizeof(in_metadata));
> +    hdrtm_param.type = VAProcFilterHighDynamicRangeToneMapping;
> +    hdrtm_param.data.metadata_type = VAProcHighDynamicRangeMetadataHDR10;
> +    hdrtm_param.data.metadata      = &in_metadata;
> +    hdrtm_param.data.metadata_size = sizeof(VAHdrMetaDataHDR10);
> +
> +    ff_vaapi_vpp_make_param_buffers2(avctx,
> +                                     VAProcFilterParameterBufferType,
> +                                     &hdrtm_param, sizeof(hdrtm_param), 1, &ctx->buffer);
> +
> +    return 0;
> +}
> +
> +static int tonemap_vaapi_filter_frame(AVFilterLink *inlink, AVFrame *input_frame)
> +{
> +    AVFilterContext *avctx     = inlink->dst;
> +    AVFilterLink *outlink      = avctx->outputs[0];
> +    VAAPIVPPContext *vpp_ctx   = avctx->priv;
> +    HDRVAAPIContext *ctx       = avctx->priv;
> +    AVFrame *output_frame      = NULL;
> +    VASurfaceID input_surface, output_surface;
> +    VARectangle input_region,  output_region;
> +
> +    VAProcPipelineParameterBuffer params;
> +    int err;
> +
> +    VAHdrMetaData              out_hdr_metadata;
> +
> +    av_log(avctx, AV_LOG_DEBUG, "Filter input: %s, %ux%u (%"PRId64").\n",
> +           av_get_pix_fmt_name(input_frame->format),
> +           input_frame->width, input_frame->height, input_frame->pts);
> +
> +    if (vpp_ctx->va_context == VA_INVALID_ID)
> +        return AVERROR(EINVAL);

Probably not?  EINVAL indicates that a user-supplied argument is invalid.

> +
> +    err = tonemap_vaapi_set_filter_params(avctx, input_frame);
> +    if (err < 0)
> +        goto fail;
> +
> +    input_surface = (VASurfaceID)(uintptr_t)input_frame->data[3];
> +    av_log(avctx, AV_LOG_DEBUG, "Using surface %#x for transpose vpp input.\n",

"transpose"?

> +           input_surface);
> +
> +    output_frame = ff_get_video_buffer(outlink, vpp_ctx->output_width,
> +                                       vpp_ctx->output_height);
> +    if (!output_frame) {
> +        err = AVERROR(ENOMEM);
> +        goto fail;
> +    }
> +
> +    output_surface = (VASurfaceID)(uintptr_t)output_frame->data[3];
> +    av_log(avctx, AV_LOG_DEBUG, "Using surface %#x for transpose vpp output.\n",

"transpose"?

> +           output_surface);
> +    memset(&params, 0, sizeof(params));
> +    input_region = (VARectangle) {
> +        .x      = 0,
> +        .y      = 0,
> +        .width  = input_frame->width,
> +        .height = input_frame->height,
> +    };
> +
> +    output_region = (VARectangle) {
> +        .x      = 0,
> +        .y      = 0,
> +        .width  = output_frame->width,
> +        .height = output_frame->height,
> +    };

You don't need these rectangles.  (va_vpp.h: "If NULL, surface_region implies the whole surface.")

> +
> +    if (vpp_ctx->nb_filter_buffers) {

Condition is always true.

> +        params.filters     = &vpp_ctx->filter_buffers[0];
> +        params.num_filters = vpp_ctx->nb_filter_buffers;
> +    }
> +
> +    params.surface = input_surface;
> +    params.surface_region = &input_region;
> +    params.surface_color_standard =
> +        ff_vaapi_vpp_colour_standard(input_frame->colorspace);
> +
> +    params.output_region = &output_region;
> +    params.output_background_color = 0xff000000;
> +    params.output_color_standard = params.surface_color_standard;
You need to do more with the colour properties of the input and output here.  Check that they actually match what you want - a tonemap treating input as BT.2020 with PQ transfer function is going to make no sense if the input is actually BT.709.  The output will need its properties setting to whatever they actually get mapped to, as well.

(Related: <https://lists.ffmpeg.org/pipermail/ffmpeg-devel/2018-December/238312.html>.)

> +
> +    params.pipeline_flags = 0;
> +    params.filter_flags = VA_FRAME_PICTURE;

Does this flag do anything?

> +
> +    switch (ctx->hdr_type)
> +    {
> +    case HDR_VAAPI_H2H:
> +        params.surface_color_standard = VAProcColorStandardExplicit;
> +        params.output_color_standard = VAProcColorStandardExplicit;
> +        params.input_color_properties.colour_primaries = 9;
> +        params.output_color_properties.colour_primaries = 9;
> +        params.input_color_properties.transfer_characteristics = 16;
> +        params.output_color_properties.transfer_characteristics = 16;
> +        break;
> +    case HDR_VAAPI_H2S:
> +        params.surface_color_standard = VAProcColorStandardExplicit;
> +        params.input_color_properties.colour_primaries = 9;
> +        params.input_color_properties.transfer_characteristics = 16;
> +        break;
> +    default:

When is this case hit?

> +        break;
> +    }
> +
> +    if (ctx->hdr_type == HDR_VAAPI_H2H) {

Can you explain the use-case you are expecting for the HDR-to-HDR mapping?  It's not obvious to me what people are meant to do with the explicit settings.

(From the name I was expecting something like HDR10 to HLG, but that clearly isn't what it's actually doing.)

> +        memset(&out_hdr_metadata, 0, sizeof(out_hdr_metadata));
> +        if (ctx->master_display) {
> +            if (10 != sscanf(ctx->master_display,
> +                             "G(%hu|%hu)B(%hu|%hu)R(%hu|%hu)WP(%hu|%hu)L(%u|%u)",
> +                             &ctx->out_metadata.display_primaries_x[0],
> +                             &ctx->out_metadata.display_primaries_y[0],
> +                             &ctx->out_metadata.display_primaries_x[1],
> +                             &ctx->out_metadata.display_primaries_y[1],
> +                             &ctx->out_metadata.display_primaries_x[2],
> +                             &ctx->out_metadata.display_primaries_y[2],
> +                             &ctx->out_metadata.white_point_x,
> +                             &ctx->out_metadata.white_point_y,
> +                             &ctx->out_metadata.max_display_mastering_luminance,
> +                             &ctx->out_metadata.min_display_mastering_luminance)) {
> +                av_log(avctx, AV_LOG_ERROR,
> +                       "Option mastering-display input invalid\n");
> +                return AVERROR(EINVAL);
> +            } else {
> +                AVFrameSideData *metadata;
> +                AVMasteringDisplayMetadata *hdr_meta;
> +
> +                metadata = av_frame_new_side_data(output_frame,
> +                                                  AV_FRAME_DATA_MASTERING_DISPLAY_METADATA,
> +                                                  sizeof(AVMasteringDisplayMetadata));
> +                if (!metadata) {
> +                    av_log(avctx, AV_LOG_ERROR, "Cannot create new side data for output\n");
> +                    return AVERROR(ENOMEM);
> +                }
> +
> +                if (ctx->src_display) {
> +                    memcpy(metadata->data,
> +                           ctx->src_display->data,
> +                           sizeof(AVMasteringDisplayMetadata));
> +                }
> +
> +                hdr_meta = (AVMasteringDisplayMetadata *)metadata->data;
> +#define LAV_RED    0
> +#define LAV_GREEN  1
> +#define LAV_BLUE   2
> +                hdr_meta->display_primaries[LAV_GREEN][0].num = ctx->out_metadata.display_primaries_x[0];
> +                hdr_meta->display_primaries[LAV_GREEN][1].num = ctx->out_metadata.display_primaries_y[0];
> +                hdr_meta->display_primaries[LAV_BLUE][0].num  = ctx->out_metadata.display_primaries_x[1];
> +                hdr_meta->display_primaries[LAV_BLUE][1].num  = ctx->out_metadata.display_primaries_y[1];
> +                hdr_meta->display_primaries[LAV_RED][0].num   = ctx->out_metadata.display_primaries_x[2];
> +                hdr_meta->display_primaries[LAV_RED][1].num   = ctx->out_metadata.display_primaries_y[2];
> +                hdr_meta->white_point[0].num = ctx->out_metadata.white_point_x;
> +                hdr_meta->white_point[1].num = ctx->out_metadata.white_point_y;
> +#undef LAV_RED
> +#undef LAV_GREEN
> +#undef LAV_BLUE
> +                hdr_meta->has_primaries = 1;
> +
> +                hdr_meta->max_luminance.num = ctx->out_metadata.max_display_mastering_luminance;
> +                hdr_meta->min_luminance.num = ctx->out_metadata.min_display_mastering_luminance;
> +                hdr_meta->has_luminance = 1;

You've set all of these fields to values N/0, so undefined.

> +
> +                av_log(avctx, AV_LOG_DEBUG,
> +                       "Mastering Display Metadata(out luminance):\n");
> +                av_log(avctx, AV_LOG_DEBUG,
> +                       "min_luminance=%u, max_luminance=%u\n",
> +                       ctx->out_metadata.min_display_mastering_luminance,
> +                       ctx->out_metadata.max_display_mastering_luminance);
> +
> +                av_log(avctx, AV_LOG_DEBUG,
> +                       "Mastering Display Metadata(out primaries):\n");
> +                av_log(avctx, AV_LOG_DEBUG,
> +                       "G(%u,%u) B(%u,%u) R(%u,%u) WP(%u,%u)\n",
> +                       ctx->out_metadata.display_primaries_x[0],
> +                       ctx->out_metadata.display_primaries_y[0],
> +                       ctx->out_metadata.display_primaries_x[1],
> +                       ctx->out_metadata.display_primaries_y[1],
> +                       ctx->out_metadata.display_primaries_x[2],
> +                       ctx->out_metadata.display_primaries_y[2],
> +                       ctx->out_metadata.white_point_x,
> +                       ctx->out_metadata.white_point_y);
> +            }
> +        }
> +
> +        if (ctx->content_light) {
> +            if (2 != sscanf(ctx->content_light,
> +                            "CLL(%hu)FALL(%hu)",
> +                            &ctx->out_metadata.max_content_light_level,
> +                            &ctx->out_metadata.max_pic_average_light_level)) {
> +                av_log(avctx, AV_LOG_ERROR,
> +                       "Option content-light input invalid\n");
> +                return AVERROR(EINVAL);
> +            } else {
> +                AVFrameSideData *metadata_lt;
> +                AVContentLightMetadata *hdr_meta_lt;
> +
> +                metadata_lt = av_frame_new_side_data(output_frame,
> +                                                     AV_FRAME_DATA_CONTENT_LIGHT_LEVEL,
> +                                                     sizeof(AVContentLightMetadata));
> +                if (!metadata_lt) {
> +                    av_log(avctx, AV_LOG_ERROR, "Cannot create new side data for output\n");
> +                    return AVERROR(ENOMEM);
> +                }
> +
> +                if (ctx->src_light) {
> +                    memcpy(metadata_lt->data,
> +                           ctx->src_light->data,
> +                           sizeof(AVContentLightMetadata));
> +                }
> +
> +                hdr_meta_lt = (AVContentLightMetadata *)metadata_lt->data;
> +                hdr_meta_lt->MaxCLL = ctx->out_metadata.max_content_light_level;
> +                hdr_meta_lt->MaxFALL = ctx->out_metadata.max_pic_average_light_level;
> +
> +                av_log(avctx, AV_LOG_DEBUG,
> +                       "Mastering Content Light Level (out):\n");
> +                av_log(avctx, AV_LOG_DEBUG,
> +                       "MaxCLL(%u) MaxFALL(%u)\n",
> +                       ctx->out_metadata.max_content_light_level,
> +                       ctx->out_metadata.max_pic_average_light_level);
> +            }
> +        }
> +
> +        out_hdr_metadata.metadata_type = VAProcHighDynamicRangeMetadataHDR10;
> +        out_hdr_metadata.metadata      = &ctx->out_metadata;
> +        out_hdr_metadata.metadata_size = sizeof(VAHdrMetaDataHDR10);
> +
> +        params.output_hdr_metadata = &out_hdr_metadata;
> +    }
> +
> +    err = ff_vaapi_vpp_render_picture(avctx, &params, output_surface);
> +    if (err < 0)
> +        goto fail;
> +
> +    err = av_frame_copy_props(output_frame, input_frame);

This copies all of the input frame properties, including the side-data.

> +    if (err < 0)
> +        goto fail;
> +    av_frame_free(&input_frame);
> +
> +    av_log(avctx, AV_LOG_DEBUG, "Filter output: %s, %ux%u (%"PRId64").\n",
> +           av_get_pix_fmt_name(output_frame->format),
> +           output_frame->width, output_frame->height, output_frame->pts);
> +
> +    return ff_filter_frame(outlink, output_frame);
> +
> +fail:
> +    av_frame_free(&input_frame);
> +    av_frame_free(&output_frame);
> +    return err;
> +}
> +
> +static av_cold int tonemap_vaapi_init(AVFilterContext *avctx)
> +{
> +    VAAPIVPPContext *vpp_ctx = avctx->priv;
> +    HDRVAAPIContext *ctx     = avctx->priv;
> +
> +    ff_vaapi_vpp_ctx_init(avctx);
> +    vpp_ctx->build_filter_params = tonemap_vaapi_build_filter_params;
> +    vpp_ctx->pipeline_uninit = ff_vaapi_vpp_pipeline_uninit;
> +
> +    if (ctx->hdr_type == HDR_VAAPI_H2H) {
> +        vpp_ctx->output_format = AV_PIX_FMT_GBRP10;
> +    } else if (ctx->hdr_type == HDR_VAAPI_H2S) {
> +        vpp_ctx->output_format = AV_PIX_FMT_ARGB;
> +    } else {
> +        vpp_ctx->output_format = AV_PIX_FMT_NONE;
> +    }
> +
> +    return 0;
> +}
> +
> +static void tonemap_vaapi_vpp_ctx_uninit(AVFilterContext *avctx)
> +{
> +    return ff_vaapi_vpp_ctx_uninit(avctx);
> +}

This function is unnecessary indirection.

> +
> +static int tonemap_vaapi_vpp_query_formats(AVFilterContext *avctx)
> +{
> +    int err;
> +
> +    enum AVPixelFormat pix_in_fmts[] = {
> +        AV_PIX_FMT_P010,     //Input
> +    };
> +
> +    enum AVPixelFormat pix_out_fmts[] = {
> +        AV_PIX_FMT_GBRP10,   //H2H RGB10
> +        AV_PIX_FMT_ARGB,     //H2S RGB8
> +    };
> +
> +    err = ff_formats_ref(ff_make_format_list(pix_in_fmts),
> +                         &avctx->inputs[0]->out_formats);
> +    if (err < 0)
> +        return err;
> +
> +    err = ff_formats_ref(ff_make_format_list(pix_out_fmts),
> +                         &avctx->outputs[0]->in_formats);
> +    if (err < 0)
> +        return err;

This doesn't do anything - the below function immediately overwrites them.

> +
> +    return ff_vaapi_vpp_query_formats(avctx);
> +}
> +
> +static int tonemap_vaapi_vpp_config_input(AVFilterLink *inlink)
> +{
> +    return ff_vaapi_vpp_config_input(inlink);
> +}
> +
> +static int tonemap_vaapi_vpp_config_output(AVFilterLink *outlink)
> +{
> +    return ff_vaapi_vpp_config_output(outlink);
> +}

These functions aren't needed.

> +
> +#define OFFSET(x) offsetof(HDRVAAPIContext, x)
> +#define FLAGS (AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM)
> +static const AVOption tonemap_vaapi_options[] = {
> +    { "type",    "hdr type",            OFFSET(hdr_type), AV_OPT_TYPE_INT, { .i64 = HDR_VAAPI_H2H }, 0, 2, FLAGS, "type" },
> +        { "h2h", "vaapi P010 to RGB10", 0, AV_OPT_TYPE_CONST, {.i64=HDR_VAAPI_H2H}, INT_MIN, INT_MAX, FLAGS, "type" },
> +        { "h2s", "vaapi P010 to RGB8",  0, AV_OPT_TYPE_CONST, {.i64=HDR_VAAPI_H2S}, INT_MIN, INT_MAX, FLAGS, "type" },

I think this would work better by defining the output format.  (Like "format" on tonemap_opencl.)

> +    { "display", "set master display",  OFFSET(master_display), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
> +    { "light",   "set content light",   OFFSET(content_light),  AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
> +    { NULL }
> +};
> +
> +
> +AVFILTER_DEFINE_CLASS(tonemap_vaapi);
> +
> +static const AVFilterPad tonemap_vaapi_inputs[] = {
> +    {
> +        .name         = "default",
> +        .type         = AVMEDIA_TYPE_VIDEO,
> +        .filter_frame = &tonemap_vaapi_filter_frame,
> +        .config_props = &tonemap_vaapi_vpp_config_input,
> +    },
> +    { NULL }
> +};
> +
> +static const AVFilterPad tonemap_vaapi_outputs[] = {
> +    {
> +        .name = "default",
> +        .type = AVMEDIA_TYPE_VIDEO,
> +        .config_props = &tonemap_vaapi_vpp_config_output,
> +    },
> +    { NULL }
> +};
> +
> +AVFilter ff_vf_tonemap_vaapi = {
> +    .name           = "tonemap_vaapi",
> +    .description    = NULL_IF_CONFIG_SMALL("VAAPI VPP for tonemap"),
> +    .priv_size      = sizeof(HDRVAAPIContext),
> +    .init           = &tonemap_vaapi_init,
> +    .uninit         = &tonemap_vaapi_vpp_ctx_uninit,
> +    .query_formats  = &tonemap_vaapi_vpp_query_formats,
> +    .inputs         = tonemap_vaapi_inputs,
> +    .outputs        = tonemap_vaapi_outputs,
> +    .priv_class     = &tonemap_vaapi_class,
> +    .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
> +};
> diff --git a/libavutil/hwcontext_vaapi.c b/libavutil/hwcontext_vaapi.c
> index 8624369bb9..f18de1d778 100644
> --- a/libavutil/hwcontext_vaapi.c
> +++ b/libavutil/hwcontext_vaapi.c
> @@ -124,6 +124,9 @@ static const VAAPIFormatDescriptor vaapi_format_map[] = {
>  #endif
>      MAP(BGRA, RGB32,   BGRA, 0),
>      MAP(BGRX, RGB32,   BGR0, 0),
> +#ifdef VA_RT_FORMAT_RGB32_10
> +    MAP(RGBA, RGB32_10, GBRP10, 0),

These three fields don't match at all:

* VA_FOURCC_RGBA is a specific format with four 8-bit sample values packed into 32 bits per pixel.
* VA_RT_FORMAT_RGB32_10 indicates a format with three 10-bit sample values packed into 32 bits per pixel.
* AV_PIX_FMT_GBRP10 is a specific format with three planes of 16-bit values padded to 16 bits.

In any case, changes to different libraries should be in different patches.

> +#endif
>      MAP(RGBA, RGB32,   RGBA, 0),
>      MAP(RGBX, RGB32,   RGB0, 0),
>  #ifdef VA_FOURCC_ABGR
> 

- Mark
Zachary Zhou Jan. 3, 2019, 8:16 a.m.
> -----Original Message-----

> From: ffmpeg-devel [mailto:ffmpeg-devel-bounces@ffmpeg.org] On Behalf Of

> Mark Thompson

> Sent: Monday, December 31, 2018 3:39 AM

> To: ffmpeg-devel@ffmpeg.org

> Subject: Re: [FFmpeg-devel] [PATCH V3] avfilter: Add tonemap vaapi filter

> 

> On 29/12/2018 07:02, Zachary Zhou wrote:

> > It supports ICL platform.

> 

> When will this hardware be available?


currently, I am not sure about it, but I will keep watching it.

> 

> Is this a new fixed-function feature, or will it be available on current hardware

> as well?


As I know, different platform may use different HW feature, some platform uses EUs, some use VEBOX.
The driver team may make this filter works on other platform in future, like KBL, CFL etc.

> 

> > H2H (HDR to HDR): P010 -> RGB10

> > H2S (HDR to SDR): P010 -> RGB8

> >

> > libva commit for HDR10

> >

> https://github.com/intel/libva/commit/cf11abe5e1b9c93ee75cf97407695716

> > 2c1605b9

> >

> > Signed-off-by: Zachary Zhou <zachary.zhou@intel.com>

> > ---

> >  configure                      |   2 +

> >  doc/filters.texi               |  63 ++++

> >  libavfilter/Makefile           |   1 +

> >  libavfilter/allfilters.c       |   1 +

> >  libavfilter/vaapi_vpp.c        |  29 ++

> >  libavfilter/vaapi_vpp.h        |   8 +

> >  libavfilter/vf_tonemap_vaapi.c | 542

> +++++++++++++++++++++++++++++++++

> >  libavutil/hwcontext_vaapi.c    |   3 +

> >  8 files changed, 649 insertions(+)

> >  create mode 100644 libavfilter/vf_tonemap_vaapi.c

> >

> > diff --git a/configure b/configure

> > index be49c19b88..baf70d03fc 100755

> > --- a/configure

> > +++ b/configure

> > @@ -3480,6 +3480,7 @@ tinterlace_merge_test_deps="tinterlace_filter"

> >  tinterlace_pad_test_deps="tinterlace_filter"

> >  tonemap_filter_deps="const_nan"

> >  tonemap_opencl_filter_deps="opencl const_nan"

> > +tonemap_vaapi_filter_deps="vaapi

> VAProcFilterParameterBufferHDRToneMapping"

> >  transpose_opencl_filter_deps="opencl"

> >  unsharp_opencl_filter_deps="opencl"

> >  uspp_filter_deps="gpl avcodec"

> > @@ -5984,6 +5985,7 @@ check_type "d3d9.h dxva2api.h"

> > DXVA2_ConfigPictureDecode -D_WIN32_WINNT=0x0602

> >

> >  check_type "va/va.h va/va_dec_hevc.h" "VAPictureParameterBufferHEVC"

> >  check_struct "va/va.h" "VADecPictureParameterBufferVP9" bit_depth

> > +check_type "va/va.h va/va_vpp.h"

> "VAProcFilterParameterBufferHDRToneMapping"

> >  check_type "va/va.h va/va_enc_hevc.h"

> "VAEncPictureParameterBufferHEVC"

> >  check_type "va/va.h va/va_enc_jpeg.h"

> "VAEncPictureParameterBufferJPEG"

> >  check_type "va/va.h va/va_enc_vp8.h"  "VAEncPictureParameterBufferVP8"

> > diff --git a/doc/filters.texi b/doc/filters.texi index

> > ac4c9b44d8..9ed53a1008 100644

> > --- a/doc/filters.texi

> > +++ b/doc/filters.texi

> > @@ -19130,6 +19130,69 @@ Convert HDR(PQ/HLG) video to

> > bt2020-transfer-characteristic p010 format using li  @end example

> > @end itemize

> >

> > +@section tonemap_vappi

> > +

> > +Perform HDR(High Dynamic Range) to HDR and HDR to SDR conversion

> with tone-mapping.

> > +

> > +It accepts the following parameters:

> > +

> > +@table @option

> > +@item type

> > +Specify the tone-mapping operator to be used.

> > +

> > +Possible values are:

> > +@table @var

> > +@item h2h

> > +Perform H2H(HDR to HDR), convert from p010 to rgb10 @item h2s Perform

> > +H2S(HDR to SDR), convert from p010 to rgb8 @end table

> 

> What are RGB10 and RGB8?


RGB10 is R10G10B10A2
RGB8 is ARGB

I will add more to the document.

> 

> > +

> > +@item display

> > +Set mastering display metadata for H2H

> > +

> > +Can assume the following values:

> > +@table @var

> > +@item G

> > +Green primary G(x|y)

> > +@item B

> > +Blue primary B(x|y)

> > +@item R

> > +Red primary R(x|y)

> > +@item WP

> > +White point WP(x|y)

> > +@item L

> > +Display mastering luminance L(max|min) @end table

> > +

> > +@item light

> > +Set content light level for H2H

> > +

> > +Can assume the following values:

> > +@table @var

> > +@item CLL

> > +Max content light level

> > +@item FALL

> > +Max average light level per frame

> > +@end table

> 

> What units are all of these values in?


I will add units to document.

> 

> > +

> > +@end table

> > +

> > +@subsection Example

> > +

> > +@itemize

> > +@item

> > +Convert HDR video to HDR video from p010 format to rgb10 format.

> > +@example

> > +-i INPUT -vf

> >

> +"tonemap_vaapi=h2h:display=G(13250|34500)B(7500|3000)R(34000|16000)

> WP

> > +(15635|16450)L(2000|2000):light=CLL(10000)FALL(1000)" OUTPUT @end

> > +example @item Convert HDR video to SDR video from p010 format to rgb8

> format.

> > +@example

> > +-i INPUT -vf "tonemap_vaapi=h2s" OUTPUT @end example @end itemize

> > +

> >  @section unsharp_opencl

> >

> >  Sharpen or blur the input video.

> > diff --git a/libavfilter/Makefile b/libavfilter/Makefile index

> > 6e2658186d..f6894209d1 100644

> > --- a/libavfilter/Makefile

> > +++ b/libavfilter/Makefile

> > @@ -390,6 +390,7 @@ OBJS-$(CONFIG_TMIX_FILTER)                   += vf_mix.o

> framesync.o

> >  OBJS-$(CONFIG_TONEMAP_FILTER)                += vf_tonemap.o colorspace.o

> >  OBJS-$(CONFIG_TONEMAP_OPENCL_FILTER)         += vf_tonemap_opencl.o

> colorspace.o opencl.o \

> >                                                  opencl/tonemap.o

> > opencl/colorspace_common.o

> > +OBJS-$(CONFIG_TONEMAP_VAAPI_FILTER)          += vf_tonemap_vaapi.o

> vaapi_vpp.o

> >  OBJS-$(CONFIG_TPAD_FILTER)                   += vf_tpad.o

> >  OBJS-$(CONFIG_TRANSPOSE_FILTER)              += vf_transpose.o

> >  OBJS-$(CONFIG_TRANSPOSE_NPP_FILTER)          += vf_transpose_npp.o

> cuda_check.o

> > diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c index

> > a600069500..754b84819d 100644

> > --- a/libavfilter/allfilters.c

> > +++ b/libavfilter/allfilters.c

> > @@ -369,6 +369,7 @@ extern AVFilter ff_vf_tlut2;  extern AVFilter

> > ff_vf_tmix;  extern AVFilter ff_vf_tonemap;  extern AVFilter

> > ff_vf_tonemap_opencl;

> > +extern AVFilter ff_vf_tonemap_vaapi;

> >  extern AVFilter ff_vf_tpad;

> >  extern AVFilter ff_vf_transpose;

> >  extern AVFilter ff_vf_transpose_npp;

> > diff --git a/libavfilter/vaapi_vpp.c b/libavfilter/vaapi_vpp.c index

> > c5bbc3b85b..7389ebd9d5 100644

> > --- a/libavfilter/vaapi_vpp.c

> > +++ b/libavfilter/vaapi_vpp.c

> > @@ -276,6 +276,35 @@ int

> ff_vaapi_vpp_make_param_buffers(AVFilterContext *avctx,

> >      return 0;

> >  }

> >

> > +int ff_vaapi_vpp_make_param_buffers2(AVFilterContext *avctx,

> > +                                     int type,

> > +                                     const void *data,

> > +                                     size_t size,

> > +                                     int count,

> > +                                     VABufferID *buffer_id) {

> > +    VAStatus vas;

> > +    VABufferID buffer;

> > +    VAAPIVPPContext *ctx   = avctx->priv;

> > +

> > +    av_assert0(ctx->nb_filter_buffers + 1 <= VAProcFilterCount);

> > +

> > +    vas = vaCreateBuffer(ctx->hwctx->display, ctx->va_context,

> > +                         type, size, count, (void*)data, &buffer);

> > +    if (vas != VA_STATUS_SUCCESS) {

> > +        av_log(avctx, AV_LOG_ERROR, "Failed to create parameter "

> > +               "buffer (type %d): %d (%s).\n",

> > +               type, vas, vaErrorStr(vas));

> > +        return AVERROR(EIO);

> > +    }

> > +

> > +    ctx->filter_buffers[ctx->nb_filter_buffers++] = buffer;

> > +    *buffer_id = buffer;

> > +

> > +    av_log(avctx, AV_LOG_DEBUG, "Param buffer (type %d, %zu bytes,

> count %d) "

> > +           "is %#x.\n", type, size, count, buffer);

> > +    return 0;

> > +}

> 

> I don't see the point of this function.  The only change is that the created

> buffer ID is additionally passed back in the argument pointer as well as in the

> existing place in ctx->filter_buffers.


Yes, this function is create a buffer id, and we can use the buffer id to change the data in the buffer, each AVFrame has the meta data need to update to the filter buffer. I am not sure ctx->filter_buffers have many other buffers at design level. and I think not all filter buffer need to be changed base on AVFrames.

I can try to remove this function if only one file buffer in ctx->filter_buffers.

> 

> >

> >  int ff_vaapi_vpp_render_picture(AVFilterContext *avctx,

> >                                  VAProcPipelineParameterBuffer

> > *params, diff --git a/libavfilter/vaapi_vpp.h

> > b/libavfilter/vaapi_vpp.h index 0bc31018d4..e920c10591 100644

> > --- a/libavfilter/vaapi_vpp.h

> > +++ b/libavfilter/vaapi_vpp.h

> > @@ -72,6 +72,14 @@ int

> ff_vaapi_vpp_make_param_buffers(AVFilterContext *avctx,

> >                                      size_t size,

> >                                      int count);

> >

> > +int ff_vaapi_vpp_make_param_buffers2(AVFilterContext *avctx,

> > +                                     int type,

> > +                                     const void *data,

> > +                                     size_t size,

> > +                                     int count,

> > +                                     VABufferID *buffer_id);

> > +

> > +

> >  int ff_vaapi_vpp_render_picture(AVFilterContext *avctx,

> >                                  VAProcPipelineParameterBuffer *params,

> >                                  VASurfaceID output_surface); diff

> > --git a/libavfilter/vf_tonemap_vaapi.c

> > b/libavfilter/vf_tonemap_vaapi.c new file mode 100644 index

> > 0000000000..d77cd574b3

> > --- /dev/null

> > +++ b/libavfilter/vf_tonemap_vaapi.c

> > @@ -0,0 +1,542 @@

> > +/*

> > + * This file is part of FFmpeg.

> > + *

> > + * FFmpeg is free software; you can redistribute it and/or

> > + * modify it under the terms of the GNU Lesser General Public

> > + * License as published by the Free Software Foundation; either

> > + * version 2.1 of the License, or (at your option) any later version.

> > + *

> > + * FFmpeg is distributed in the hope that it will be useful,

> > + * but WITHOUT ANY WARRANTY; without even the implied warranty of

> > + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the

> GNU

> > + * Lesser General Public License for more details.

> > + *

> > + * You should have received a copy of the GNU Lesser General Public

> > + * License along with FFmpeg; if not, write to the Free Software

> > + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA

> > +02110-1301 USA  */ #include <string.h>

> > +

> > +#include "libavutil/avassert.h"

> > +#include "libavutil/mem.h"

> > +#include "libavutil/opt.h"

> > +#include "libavutil/pixdesc.h"

> > +#include "libavutil/mastering_display_metadata.h"

> 

> Headers should be in alphabetical order.


Got it, I will change it.

> 

> > +

> > +#include "avfilter.h"

> > +#include "formats.h"

> > +#include "internal.h"

> > +#include "vaapi_vpp.h"

> > +

> > +typedef enum {

> > +    HDR_VAAPI_H2H,

> > +    HDR_VAAPI_H2S,

> > +} HDRType;

> > +

> > +typedef struct HDRVAAPIContext {

> > +    VAAPIVPPContext vpp_ctx; // must be the first field

> > +

> > +    int hdr_type;

> > +

> > +    char *master_display;

> > +    char *content_light;

> > +

> > +    VABufferID          buffer;

> > +    VAHdrMetaDataHDR10  out_metadata;

> > +

> > +    AVFrameSideData    *src_display;

> > +    AVFrameSideData    *src_light;

> > +} HDRVAAPIContext;

> > +

> > +static int tonemap_vaapi_set_filter_params(AVFilterContext *avctx,

> > +AVFrame *input_frame) {

> > +    VAAPIVPPContext *vpp_ctx   = avctx->priv;

> > +    HDRVAAPIContext *ctx       = avctx->priv;

> > +    VAStatus vas;

> > +    VAHdrMetaDataHDR10  *in_metadata;

> > +    VAProcFilterParameterBufferHDRToneMapping *hdrtm_param;

> > +

> > +    vas = vaMapBuffer(vpp_ctx->hwctx->display, ctx->buffer,

> > + (void**)&hdrtm_param);

> 

> It's generally easier to make the object and then use vaCreateBuffer() rather

> than doing this mapping dance.


The purpose I put the mapping functions is want to update the filter buffer's data.
It looks like the vaCreateBuffer calling  at out range of  vaBeginPicture/vaEndPicture.

> 

> > +    if (vas != VA_STATUS_SUCCESS) {

> > +        av_log(avctx, AV_LOG_ERROR, "Failed to map "

> > +               "buffer (%d): %d (%s).\n",

> > +               ctx->buffer, vas, vaErrorStr(vas));

> > +        return AVERROR(EIO);

> > +    }

> > +

> > +    in_metadata = hdrtm_param->data.metadata;

> > +    ctx->src_display = av_frame_get_side_data(input_frame,

> > +

> AV_FRAME_DATA_MASTERING_DISPLAY_METADATA);

> > +    if (ctx->src_display) {

> > +        const AVMasteringDisplayMetadata *hdr_meta =

> > +            (const AVMasteringDisplayMetadata *)ctx->src_display->data;

> > +        if (hdr_meta->has_luminance) { #define LAV_UINT32(a) (a.num)

> > +            in_metadata->max_display_mastering_luminance =

> > +                LAV_UINT32(hdr_meta->max_luminance);

> > +            in_metadata->min_display_mastering_luminance =

> > +                LAV_UINT32(hdr_meta->min_luminance);

> > +#undef LAV_UINT32

> > +            av_log(avctx, AV_LOG_DEBUG,

> > +                   "Mastering Display Metadata(in luminance):\n");

> > +            av_log(avctx, AV_LOG_DEBUG,

> > +                   "min_luminance=%d, max_luminance=%d\n",

> > +                   in_metadata->min_display_mastering_luminance,

> > +                   in_metadata->max_display_mastering_luminance);

> > +        }

> > +

> > +        if (hdr_meta->has_primaries) {

> > +#define LAV_RED    0

> > +#define LAV_GREEN  1

> > +#define LAV_BLUE   2

> > +#define LAV_UINT16(a) (a.num)

> > +            in_metadata->display_primaries_x[0] =

> > +                LAV_UINT16(hdr_meta->display_primaries[LAV_GREEN][0]);

> > +            in_metadata->display_primaries_y[0] =

> > +                LAV_UINT16(hdr_meta->display_primaries[LAV_GREEN][1]);

> > +            in_metadata->display_primaries_x[1] =

> > +                LAV_UINT16(hdr_meta->display_primaries[LAV_BLUE][0]);

> > +            in_metadata->display_primaries_y[1] =

> > +                LAV_UINT16(hdr_meta->display_primaries[LAV_BLUE][1]);

> > +            in_metadata->display_primaries_x[2] =

> > +                LAV_UINT16(hdr_meta->display_primaries[LAV_RED][0]);

> > +            in_metadata->display_primaries_y[2] =

> > +                LAV_UINT16(hdr_meta->display_primaries[LAV_RED][1]);

> > +            in_metadata->white_point_x =

> > +                LAV_UINT16(hdr_meta->white_point[0]);

> > +            in_metadata->white_point_y =

> > +                LAV_UINT16(hdr_meta->white_point[1]);

> > +#undef LAV_RED

> > +#undef LAV_GREEN

> > +#undef LAV_BLUE

> > +#undef LAV_UINT16

> > +            av_log(avctx, AV_LOG_DEBUG,

> > +                   "Mastering Display Metadata(in primaries):\n");

> > +            av_log(avctx, AV_LOG_DEBUG,

> > +                   "G(%u,%u) B(%u,%u) R(%u,%u) WP(%u,%u)\n",

> > +                   in_metadata->display_primaries_x[0],

> > +                   in_metadata->display_primaries_y[0],

> > +                   in_metadata->display_primaries_x[1],

> > +                   in_metadata->display_primaries_y[1],

> > +                   in_metadata->display_primaries_x[2],

> > +                   in_metadata->display_primaries_y[2],

> > +                   in_metadata->white_point_x,

> > +                   in_metadata->white_point_y);

> > +        }

> > +    }

> 

> I don't believe this code at all.  You've taken the numerator only of rational

> numbers, and the units for most of the values are totally different.


This also confused me, I referred the codes in following -
https://github.com/FFmpeg/FFmpeg/blob/master/libavcodec/hevcdec.c#L2692-L2720

The metadata in VAPPI and HEVC stream is uint16/uint32 type, but in ffmpeg it is AVRational.
https://github.com/FFmpeg/FFmpeg/blob/master/libavcodec/hevc_sei.h#L89-L95
https://github.com/intel/libva/blob/master/va/va_vpp.h#L723-L780

any other way to use the AVRational ?

> 

> (Relatedly - how can the correctness of the output be verified?)

It looks like currently ffmpeg is not support dump vaapi surface  well, I am considering on add other patches.

> 

> > +

> > +    ctx->src_light = av_frame_get_side_data(input_frame,

> > +                                            AV_FRAME_DATA_CONTENT_LIGHT_LEVEL);

> > +    if (ctx->src_light) {

> > +        const AVContentLightMetadata *light_meta =

> > +            (const AVContentLightMetadata *) ctx->src_light->data;

> > +        in_metadata->max_content_light_level = light_meta->MaxCLL;

> > +        in_metadata->max_pic_average_light_level =

> > + light_meta->MaxFALL;

> > +

> > +        av_log(avctx, AV_LOG_DEBUG,

> > +               "Mastering Content Light Level (in):\n");

> > +        av_log(avctx, AV_LOG_DEBUG,

> > +               "MaxCLL(%u) MaxFALL(%u)\n",

> > +               in_metadata->max_content_light_level,

> > +               in_metadata->max_pic_average_light_level);

> > +    }

> 

> This doesn't make sense either - the units don't match.


I am checking this.

> 

> > +

> > +    vas = vaUnmapBuffer(vpp_ctx->hwctx->display, ctx->buffer);

> > +    if (vas != VA_STATUS_SUCCESS) {

> > +        av_log(avctx, AV_LOG_ERROR, "Failed to unmap output buffers: "

> > +               "%d (%s).\n", vas, vaErrorStr(vas));

> > +        return AVERROR(EIO);

> > +    }

> 

> What should the behaviour be for the unfilled fields?


The "unfilled fields" is saying some AVFrame no metadata ?
I will consider on it if so.

> 

> > +

> > +    return 0;

> > +}

> > +

> > +static int tonemap_vaapi_build_filter_params(AVFilterContext *avctx)

> > +{

> > +    VAAPIVPPContext *vpp_ctx   = avctx->priv;

> > +    HDRVAAPIContext *ctx       = avctx->priv;

> > +    VAStatus vas;

> > +    VAProcFilterCapHighDynamicRange hdr_cap;

> > +    int num_query_caps;

> > +    VAProcFilterParameterBufferHDRToneMapping hdrtm_param;

> > +    VAHdrMetaDataHDR10  in_metadata;

> > +

> > +    vas = vaQueryVideoProcFilterCaps(vpp_ctx->hwctx->display,

> > +                                     vpp_ctx->va_context,

> > +                                     VAProcFilterHighDynamicRangeToneMapping,

> > +                                     &hdr_cap, &num_query_caps);

> 

> There can be multiple caps here - you need to grab the whole list and search it

> for the one you want.


Got it, I will change the code.

> 

> Query behaviour of the iHD driver seems a bit broken on current platforms - it

> declares that VAProcFilterHighDynamicRangeToneMapping is supported in

> vaQueryVideoProcFilters(), but then this query fails with invalid argument.


I think the query failed may because of following code - 
https://github.com/intel/media-driver/blob/master/media_driver/linux/common/vp/ddi/media_libva_vp.c#L3931

from the code, it only works on ICL platform.

> 

> > +    if (vas != VA_STATUS_SUCCESS) {

> > +        av_log(avctx, AV_LOG_ERROR, "Failed to query HDR caps "

> > +               "context: %d (%s).\n", vas, vaErrorStr(vas));

> > +        return AVERROR(EIO);

> > +    }

> > +

> > +    if (hdr_cap.metadata_type == VAProcHighDynamicRangeMetadataNone)

> {

> > +        av_log(avctx, AV_LOG_ERROR, "VAAPI driver doesn't support HDR\n");

> > +        return AVERROR(EINVAL);

> > +    }

> > +

> > +    if (! (VA_TONE_MAPPING_HDR_TO_HDR & hdr_cap.caps_flag) &&

> > +        ! (VA_TONE_MAPPING_HDR_TO_SDR & hdr_cap.caps_flag)) {

> > +        av_log(avctx, AV_LOG_ERROR,

> > +               "VAAPI driver doesn't support H2H or H2S\n");

> 

> You know which of these you want, so you should check for specifcally that

> one only.


Got it, I will do the change.

> 

> > +        return AVERROR(EINVAL);

> > +    }

> > +

> > +    memset(&hdrtm_param, 0, sizeof(hdrtm_param));

> > +    memset(&in_metadata, 0, sizeof(in_metadata));

> > +    hdrtm_param.type = VAProcFilterHighDynamicRangeToneMapping;

> > +    hdrtm_param.data.metadata_type =

> VAProcHighDynamicRangeMetadataHDR10;

> > +    hdrtm_param.data.metadata      = &in_metadata;

> > +    hdrtm_param.data.metadata_size = sizeof(VAHdrMetaDataHDR10);

> > +

> > +    ff_vaapi_vpp_make_param_buffers2(avctx,

> > +                                     VAProcFilterParameterBufferType,

> > +                                     &hdrtm_param,

> > + sizeof(hdrtm_param), 1, &ctx->buffer);

> > +

> > +    return 0;

> > +}

> > +

> > +static int tonemap_vaapi_filter_frame(AVFilterLink *inlink, AVFrame

> > +*input_frame) {

> > +    AVFilterContext *avctx     = inlink->dst;

> > +    AVFilterLink *outlink      = avctx->outputs[0];

> > +    VAAPIVPPContext *vpp_ctx   = avctx->priv;

> > +    HDRVAAPIContext *ctx       = avctx->priv;

> > +    AVFrame *output_frame      = NULL;

> > +    VASurfaceID input_surface, output_surface;

> > +    VARectangle input_region,  output_region;

> > +

> > +    VAProcPipelineParameterBuffer params;

> > +    int err;

> > +

> > +    VAHdrMetaData              out_hdr_metadata;

> > +

> > +    av_log(avctx, AV_LOG_DEBUG, "Filter input: %s, %ux%u (%"PRId64").\n",

> > +           av_get_pix_fmt_name(input_frame->format),

> > +           input_frame->width, input_frame->height,

> > + input_frame->pts);

> > +

> > +    if (vpp_ctx->va_context == VA_INVALID_ID)

> > +        return AVERROR(EINVAL);

> 

> Probably not?  EINVAL indicates that a user-supplied argument is invalid.


Got it, copy it (vf_misc_vaapi.c) from other vaapi filters, I will consider on it.

> 

> > +

> > +    err = tonemap_vaapi_set_filter_params(avctx, input_frame);

> > +    if (err < 0)

> > +        goto fail;

> > +

> > +    input_surface = (VASurfaceID)(uintptr_t)input_frame->data[3];

> > +    av_log(avctx, AV_LOG_DEBUG, "Using surface %#x for transpose vpp

> > + input.\n",

> 

> "transpose"?


I will change it.

> 

> > +           input_surface);

> > +

> > +    output_frame = ff_get_video_buffer(outlink, vpp_ctx->output_width,

> > +                                       vpp_ctx->output_height);

> > +    if (!output_frame) {

> > +        err = AVERROR(ENOMEM);

> > +        goto fail;

> > +    }

> > +

> > +    output_surface = (VASurfaceID)(uintptr_t)output_frame->data[3];

> > +    av_log(avctx, AV_LOG_DEBUG, "Using surface %#x for transpose vpp

> > + output.\n",

> 

> "transpose"?


I will change it.

> 

> > +           output_surface);

> > +    memset(&params, 0, sizeof(params));

> > +    input_region = (VARectangle) {

> > +        .x      = 0,

> > +        .y      = 0,

> > +        .width  = input_frame->width,

> > +        .height = input_frame->height,

> > +    };

> > +

> > +    output_region = (VARectangle) {

> > +        .x      = 0,

> > +        .y      = 0,

> > +        .width  = output_frame->width,

> > +        .height = output_frame->height,

> > +    };

> 

> You don't need these rectangles.  (va_vpp.h: "If NULL, surface_region implies

> the whole surface.")


Got it, will change it.

> 

> > +

> > +    if (vpp_ctx->nb_filter_buffers) {

> 

> Condition is always true.


Got it, will change it.

> 

> > +        params.filters     = &vpp_ctx->filter_buffers[0];

> > +        params.num_filters = vpp_ctx->nb_filter_buffers;

> > +    }

> > +

> > +    params.surface = input_surface;

> > +    params.surface_region = &input_region;

> > +    params.surface_color_standard =

> > +        ff_vaapi_vpp_colour_standard(input_frame->colorspace);

> > +

> > +    params.output_region = &output_region;

> > +    params.output_background_color = 0xff000000;

> > +    params.output_color_standard = params.surface_color_standard;

> You need to do more with the colour properties of the input and output here.

> Check that they actually match what you want - a tonemap treating input as

> BT.2020 with PQ transfer function is going to make no sense if the input is

> actually BT.709.  The output will need its properties setting to whatever they

> actually get mapped to, as well.

> 

> (Related: <https://lists.ffmpeg.org/pipermail/ffmpeg-devel/2018-

> December/238312.html>.)


actually, I did that in following code, I will consider to do the change.

> 

> > +

> > +    params.pipeline_flags = 0;

> > +    params.filter_flags = VA_FRAME_PICTURE;

> 

> Does this flag do anything?


I will clean the code.

> 

> > +

> > +    switch (ctx->hdr_type)

> > +    {

> > +    case HDR_VAAPI_H2H:

> > +        params.surface_color_standard = VAProcColorStandardExplicit;

> > +        params.output_color_standard = VAProcColorStandardExplicit;

> > +        params.input_color_properties.colour_primaries = 9;

> > +        params.output_color_properties.colour_primaries = 9;

> > +        params.input_color_properties.transfer_characteristics = 16;

> > +        params.output_color_properties.transfer_characteristics = 16;

> > +        break;

> > +    case HDR_VAAPI_H2S:

> > +        params.surface_color_standard = VAProcColorStandardExplicit;

> > +        params.input_color_properties.colour_primaries = 9;

> > +        params.input_color_properties.transfer_characteristics = 16;

> > +        break;

> > +    default:

> 

> When is this case hit?


I am considering add assert here.

> 

> > +        break;

> > +    }

> > +

> > +    if (ctx->hdr_type == HDR_VAAPI_H2H) {

> 

> Can you explain the use-case you are expecting for the HDR-to-HDR mapping?

> It's not obvious to me what people are meant to do with the explicit settings.

> 

> (From the name I was expecting something like HDR10 to HLG, but that clearly

> isn't what it's actually doing.)


currently H2H is P010 to R10G10B10A2, may have P010 to P010 in future.


> 

> > +        memset(&out_hdr_metadata, 0, sizeof(out_hdr_metadata));

> > +        if (ctx->master_display) {

> > +            if (10 != sscanf(ctx->master_display,

> > +

> "G(%hu|%hu)B(%hu|%hu)R(%hu|%hu)WP(%hu|%hu)L(%u|%u)",

> > +                             &ctx->out_metadata.display_primaries_x[0],

> > +                             &ctx->out_metadata.display_primaries_y[0],

> > +                             &ctx->out_metadata.display_primaries_x[1],

> > +                             &ctx->out_metadata.display_primaries_y[1],

> > +                             &ctx->out_metadata.display_primaries_x[2],

> > +                             &ctx->out_metadata.display_primaries_y[2],

> > +                             &ctx->out_metadata.white_point_x,

> > +                             &ctx->out_metadata.white_point_y,

> > +                             &ctx->out_metadata.max_display_mastering_luminance,

> > +                             &ctx->out_metadata.min_display_mastering_luminance)) {

> > +                av_log(avctx, AV_LOG_ERROR,

> > +                       "Option mastering-display input invalid\n");

> > +                return AVERROR(EINVAL);

> > +            } else {

> > +                AVFrameSideData *metadata;

> > +                AVMasteringDisplayMetadata *hdr_meta;

> > +

> > +                metadata = av_frame_new_side_data(output_frame,

> > +

> AV_FRAME_DATA_MASTERING_DISPLAY_METADATA,

> > +                                                  sizeof(AVMasteringDisplayMetadata));

> > +                if (!metadata) {

> > +                    av_log(avctx, AV_LOG_ERROR, "Cannot create new side data for

> output\n");

> > +                    return AVERROR(ENOMEM);

> > +                }

> > +

> > +                if (ctx->src_display) {

> > +                    memcpy(metadata->data,

> > +                           ctx->src_display->data,

> > +                           sizeof(AVMasteringDisplayMetadata));

> > +                }

> > +

> > +                hdr_meta = (AVMasteringDisplayMetadata *)metadata->data;

> > +#define LAV_RED    0

> > +#define LAV_GREEN  1

> > +#define LAV_BLUE   2

> > +                hdr_meta->display_primaries[LAV_GREEN][0].num = ctx-

> >out_metadata.display_primaries_x[0];

> > +                hdr_meta->display_primaries[LAV_GREEN][1].num = ctx-

> >out_metadata.display_primaries_y[0];

> > +                hdr_meta->display_primaries[LAV_BLUE][0].num  = ctx-

> >out_metadata.display_primaries_x[1];

> > +                hdr_meta->display_primaries[LAV_BLUE][1].num  = ctx-

> >out_metadata.display_primaries_y[1];

> > +                hdr_meta->display_primaries[LAV_RED][0].num   = ctx-

> >out_metadata.display_primaries_x[2];

> > +                hdr_meta->display_primaries[LAV_RED][1].num   = ctx-

> >out_metadata.display_primaries_y[2];

> > +                hdr_meta->white_point[0].num = ctx-

> >out_metadata.white_point_x;

> > +                hdr_meta->white_point[1].num =

> > +ctx->out_metadata.white_point_y; #undef LAV_RED #undef LAV_GREEN

> > +#undef LAV_BLUE

> > +                hdr_meta->has_primaries = 1;

> > +

> > +                hdr_meta->max_luminance.num = ctx-

> >out_metadata.max_display_mastering_luminance;

> > +                hdr_meta->min_luminance.num = ctx-

> >out_metadata.min_display_mastering_luminance;

> > +                hdr_meta->has_luminance = 1;

> 

> You've set all of these fields to values N/0, so undefined.


I tried copy other fields with memcpy in above code.
I will double check it.

> 

> > +

> > +                av_log(avctx, AV_LOG_DEBUG,

> > +                       "Mastering Display Metadata(out luminance):\n");

> > +                av_log(avctx, AV_LOG_DEBUG,

> > +                       "min_luminance=%u, max_luminance=%u\n",

> > +                       ctx->out_metadata.min_display_mastering_luminance,

> > +

> > + ctx->out_metadata.max_display_mastering_luminance);

> > +

> > +                av_log(avctx, AV_LOG_DEBUG,

> > +                       "Mastering Display Metadata(out primaries):\n");

> > +                av_log(avctx, AV_LOG_DEBUG,

> > +                       "G(%u,%u) B(%u,%u) R(%u,%u) WP(%u,%u)\n",

> > +                       ctx->out_metadata.display_primaries_x[0],

> > +                       ctx->out_metadata.display_primaries_y[0],

> > +                       ctx->out_metadata.display_primaries_x[1],

> > +                       ctx->out_metadata.display_primaries_y[1],

> > +                       ctx->out_metadata.display_primaries_x[2],

> > +                       ctx->out_metadata.display_primaries_y[2],

> > +                       ctx->out_metadata.white_point_x,

> > +                       ctx->out_metadata.white_point_y);

> > +            }

> > +        }

> > +

> > +        if (ctx->content_light) {

> > +            if (2 != sscanf(ctx->content_light,

> > +                            "CLL(%hu)FALL(%hu)",

> > +                            &ctx->out_metadata.max_content_light_level,

> > +                            &ctx->out_metadata.max_pic_average_light_level)) {

> > +                av_log(avctx, AV_LOG_ERROR,

> > +                       "Option content-light input invalid\n");

> > +                return AVERROR(EINVAL);

> > +            } else {

> > +                AVFrameSideData *metadata_lt;

> > +                AVContentLightMetadata *hdr_meta_lt;

> > +

> > +                metadata_lt = av_frame_new_side_data(output_frame,

> > +                                                     AV_FRAME_DATA_CONTENT_LIGHT_LEVEL,

> > +                                                     sizeof(AVContentLightMetadata));

> > +                if (!metadata_lt) {

> > +                    av_log(avctx, AV_LOG_ERROR, "Cannot create new side data for

> output\n");

> > +                    return AVERROR(ENOMEM);

> > +                }

> > +

> > +                if (ctx->src_light) {

> > +                    memcpy(metadata_lt->data,

> > +                           ctx->src_light->data,

> > +                           sizeof(AVContentLightMetadata));

> > +                }

> > +

> > +                hdr_meta_lt = (AVContentLightMetadata *)metadata_lt->data;

> > +                hdr_meta_lt->MaxCLL = ctx-

> >out_metadata.max_content_light_level;

> > +                hdr_meta_lt->MaxFALL =

> > + ctx->out_metadata.max_pic_average_light_level;

> > +

> > +                av_log(avctx, AV_LOG_DEBUG,

> > +                       "Mastering Content Light Level (out):\n");

> > +                av_log(avctx, AV_LOG_DEBUG,

> > +                       "MaxCLL(%u) MaxFALL(%u)\n",

> > +                       ctx->out_metadata.max_content_light_level,

> > +                       ctx->out_metadata.max_pic_average_light_level);

> > +            }

> > +        }

> > +

> > +        out_hdr_metadata.metadata_type =

> VAProcHighDynamicRangeMetadataHDR10;

> > +        out_hdr_metadata.metadata      = &ctx->out_metadata;

> > +        out_hdr_metadata.metadata_size = sizeof(VAHdrMetaDataHDR10);

> > +

> > +        params.output_hdr_metadata = &out_hdr_metadata;

> > +    }

> > +

> > +    err = ff_vaapi_vpp_render_picture(avctx, &params, output_surface);

> > +    if (err < 0)

> > +        goto fail;

> > +

> > +    err = av_frame_copy_props(output_frame, input_frame);

> 

> This copies all of the input frame properties, including the side-data.


Got it, I will reconsider it.

> 

> > +    if (err < 0)

> > +        goto fail;

> > +    av_frame_free(&input_frame);

> > +

> > +    av_log(avctx, AV_LOG_DEBUG, "Filter output: %s, %ux%u

> (%"PRId64").\n",

> > +           av_get_pix_fmt_name(output_frame->format),

> > +           output_frame->width, output_frame->height,

> > + output_frame->pts);

> > +

> > +    return ff_filter_frame(outlink, output_frame);

> > +

> > +fail:

> > +    av_frame_free(&input_frame);

> > +    av_frame_free(&output_frame);

> > +    return err;

> > +}

> > +

> > +static av_cold int tonemap_vaapi_init(AVFilterContext *avctx) {

> > +    VAAPIVPPContext *vpp_ctx = avctx->priv;

> > +    HDRVAAPIContext *ctx     = avctx->priv;

> > +

> > +    ff_vaapi_vpp_ctx_init(avctx);

> > +    vpp_ctx->build_filter_params = tonemap_vaapi_build_filter_params;

> > +    vpp_ctx->pipeline_uninit = ff_vaapi_vpp_pipeline_uninit;

> > +

> > +    if (ctx->hdr_type == HDR_VAAPI_H2H) {

> > +        vpp_ctx->output_format = AV_PIX_FMT_GBRP10;

> > +    } else if (ctx->hdr_type == HDR_VAAPI_H2S) {

> > +        vpp_ctx->output_format = AV_PIX_FMT_ARGB;

> > +    } else {

> > +        vpp_ctx->output_format = AV_PIX_FMT_NONE;

> > +    }

> > +

> > +    return 0;

> > +}

> > +

> > +static void tonemap_vaapi_vpp_ctx_uninit(AVFilterContext *avctx) {

> > +    return ff_vaapi_vpp_ctx_uninit(avctx); }

> 

> This function is unnecessary indirection.


I will clean the code.

> 

> > +

> > +static int tonemap_vaapi_vpp_query_formats(AVFilterContext *avctx) {

> > +    int err;

> > +

> > +    enum AVPixelFormat pix_in_fmts[] = {

> > +        AV_PIX_FMT_P010,     //Input

> > +    };

> > +

> > +    enum AVPixelFormat pix_out_fmts[] = {

> > +        AV_PIX_FMT_GBRP10,   //H2H RGB10

> > +        AV_PIX_FMT_ARGB,     //H2S RGB8

> > +    };

> > +

> > +    err = ff_formats_ref(ff_make_format_list(pix_in_fmts),

> > +                         &avctx->inputs[0]->out_formats);

> > +    if (err < 0)

> > +        return err;

> > +

> > +    err = ff_formats_ref(ff_make_format_list(pix_out_fmts),

> > +                         &avctx->outputs[0]->in_formats);

> > +    if (err < 0)

> > +        return err;

> 

> This doesn't do anything - the below function immediately overwrites them.


Got it, I will try remove the calling ff_vaapi_vpp_query_formats(avctx);

> 

> > +

> > +    return ff_vaapi_vpp_query_formats(avctx);

> > +}

> > +

> > +static int tonemap_vaapi_vpp_config_input(AVFilterLink *inlink) {

> > +    return ff_vaapi_vpp_config_input(inlink);

> > +}

> > +

> > +static int tonemap_vaapi_vpp_config_output(AVFilterLink *outlink) {

> > +    return ff_vaapi_vpp_config_output(outlink);

> > +}

> 

> These functions aren't needed.


Got it, will clean them.

> 

> > +

> > +#define OFFSET(x) offsetof(HDRVAAPIContext, x) #define FLAGS

> > +(AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM) static

> const

> > +AVOption tonemap_vaapi_options[] = {

> > +    { "type",    "hdr type",            OFFSET(hdr_type), AV_OPT_TYPE_INT, { .i64

> = HDR_VAAPI_H2H }, 0, 2, FLAGS, "type" },

> > +        { "h2h", "vaapi P010 to RGB10", 0, AV_OPT_TYPE_CONST,

> {.i64=HDR_VAAPI_H2H}, INT_MIN, INT_MAX, FLAGS, "type" },

> > +        { "h2s", "vaapi P010 to RGB8",  0, AV_OPT_TYPE_CONST,

> > +{.i64=HDR_VAAPI_H2S}, INT_MIN, INT_MAX, FLAGS, "type" },

> 

> I think this would work better by defining the output format.  (Like "format"

> on tonemap_opencl.)


Got it, considering on it.

> 

> > +    { "display", "set master display",  OFFSET(master_display),

> AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },

> > +    { "light",   "set content light",   OFFSET(content_light),

> AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },

> > +    { NULL }

> > +};

> > +

> > +

> > +AVFILTER_DEFINE_CLASS(tonemap_vaapi);

> > +

> > +static const AVFilterPad tonemap_vaapi_inputs[] = {

> > +    {

> > +        .name         = "default",

> > +        .type         = AVMEDIA_TYPE_VIDEO,

> > +        .filter_frame = &tonemap_vaapi_filter_frame,

> > +        .config_props = &tonemap_vaapi_vpp_config_input,

> > +    },

> > +    { NULL }

> > +};

> > +

> > +static const AVFilterPad tonemap_vaapi_outputs[] = {

> > +    {

> > +        .name = "default",

> > +        .type = AVMEDIA_TYPE_VIDEO,

> > +        .config_props = &tonemap_vaapi_vpp_config_output,

> > +    },

> > +    { NULL }

> > +};

> > +

> > +AVFilter ff_vf_tonemap_vaapi = {

> > +    .name           = "tonemap_vaapi",

> > +    .description    = NULL_IF_CONFIG_SMALL("VAAPI VPP for tonemap"),

> > +    .priv_size      = sizeof(HDRVAAPIContext),

> > +    .init           = &tonemap_vaapi_init,

> > +    .uninit         = &tonemap_vaapi_vpp_ctx_uninit,

> > +    .query_formats  = &tonemap_vaapi_vpp_query_formats,

> > +    .inputs         = tonemap_vaapi_inputs,

> > +    .outputs        = tonemap_vaapi_outputs,

> > +    .priv_class     = &tonemap_vaapi_class,

> > +    .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE, };

> > diff --git a/libavutil/hwcontext_vaapi.c b/libavutil/hwcontext_vaapi.c

> > index 8624369bb9..f18de1d778 100644

> > --- a/libavutil/hwcontext_vaapi.c

> > +++ b/libavutil/hwcontext_vaapi.c

> > @@ -124,6 +124,9 @@ static const VAAPIFormatDescriptor

> > vaapi_format_map[] = {  #endif

> >      MAP(BGRA, RGB32,   BGRA, 0),

> >      MAP(BGRX, RGB32,   BGR0, 0),

> > +#ifdef VA_RT_FORMAT_RGB32_10

> > +    MAP(RGBA, RGB32_10, GBRP10, 0),

> 

> These three fields don't match at all:

> 

> * VA_FOURCC_RGBA is a specific format with four 8-bit sample values packed

> into 32 bits per pixel.

> * VA_RT_FORMAT_RGB32_10 indicates a format with three 10-bit sample

> values packed into 32 bits per pixel.

> * AV_PIX_FMT_GBRP10 is a specific format with three planes of 16-bit values

> padded to 16 bits.

> 


currently the R10G10B10A2 surface is created by using VA_RT_FORMAT_RGB32_10 and VA_FOURCC_RGBA together.

I am trying suggestion libva add and new fourcc for R10G10B10A2
https://github.com/intel/libva/issues/263

> In any case, changes to different libraries should be in different patches.


Got it, will try to create different patches.

> 

> > +#endif

> >      MAP(RGBA, RGB32,   RGBA, 0),

> >      MAP(RGBX, RGB32,   RGB0, 0),

> >  #ifdef VA_FOURCC_ABGR

> >

> 

> - Mark


Thank you Mark for the review.


> _______________________________________________

> ffmpeg-devel mailing list

> ffmpeg-devel@ffmpeg.org

> http://ffmpeg.org/mailman/listinfo/ffmpeg-devel

Patch hide | download patch | download mbox

diff --git a/configure b/configure
index be49c19b88..baf70d03fc 100755
--- a/configure
+++ b/configure
@@ -3480,6 +3480,7 @@  tinterlace_merge_test_deps="tinterlace_filter"
 tinterlace_pad_test_deps="tinterlace_filter"
 tonemap_filter_deps="const_nan"
 tonemap_opencl_filter_deps="opencl const_nan"
+tonemap_vaapi_filter_deps="vaapi VAProcFilterParameterBufferHDRToneMapping"
 transpose_opencl_filter_deps="opencl"
 unsharp_opencl_filter_deps="opencl"
 uspp_filter_deps="gpl avcodec"
@@ -5984,6 +5985,7 @@  check_type "d3d9.h dxva2api.h" DXVA2_ConfigPictureDecode -D_WIN32_WINNT=0x0602
 
 check_type "va/va.h va/va_dec_hevc.h" "VAPictureParameterBufferHEVC"
 check_struct "va/va.h" "VADecPictureParameterBufferVP9" bit_depth
+check_type "va/va.h va/va_vpp.h" "VAProcFilterParameterBufferHDRToneMapping"
 check_type "va/va.h va/va_enc_hevc.h" "VAEncPictureParameterBufferHEVC"
 check_type "va/va.h va/va_enc_jpeg.h" "VAEncPictureParameterBufferJPEG"
 check_type "va/va.h va/va_enc_vp8.h"  "VAEncPictureParameterBufferVP8"
diff --git a/doc/filters.texi b/doc/filters.texi
index ac4c9b44d8..9ed53a1008 100644
--- a/doc/filters.texi
+++ b/doc/filters.texi
@@ -19130,6 +19130,69 @@  Convert HDR(PQ/HLG) video to bt2020-transfer-characteristic p010 format using li
 @end example
 @end itemize
 
+@section tonemap_vappi
+
+Perform HDR(High Dynamic Range) to HDR and HDR to SDR conversion with tone-mapping.
+
+It accepts the following parameters:
+
+@table @option
+@item type
+Specify the tone-mapping operator to be used.
+
+Possible values are:
+@table @var
+@item h2h
+Perform H2H(HDR to HDR), convert from p010 to rgb10
+@item h2s
+Perform H2S(HDR to SDR), convert from p010 to rgb8
+@end table
+
+@item display
+Set mastering display metadata for H2H
+
+Can assume the following values:
+@table @var
+@item G
+Green primary G(x|y)
+@item B
+Blue primary B(x|y)
+@item R
+Red primary R(x|y)
+@item WP
+White point WP(x|y)
+@item L
+Display mastering luminance L(max|min)
+@end table
+
+@item light
+Set content light level for H2H
+
+Can assume the following values:
+@table @var
+@item CLL
+Max content light level
+@item FALL
+Max average light level per frame
+@end table
+
+@end table
+
+@subsection Example
+
+@itemize
+@item
+Convert HDR video to HDR video from p010 format to rgb10 format.
+@example
+-i INPUT -vf "tonemap_vaapi=h2h:display=G(13250|34500)B(7500|3000)R(34000|16000)WP(15635|16450)L(2000|2000):light=CLL(10000)FALL(1000)" OUTPUT 
+@end example
+@item
+Convert HDR video to SDR video from p010 format to rgb8 format.
+@example
+-i INPUT -vf "tonemap_vaapi=h2s" OUTPUT
+@end example
+@end itemize
+
 @section unsharp_opencl
 
 Sharpen or blur the input video.
diff --git a/libavfilter/Makefile b/libavfilter/Makefile
index 6e2658186d..f6894209d1 100644
--- a/libavfilter/Makefile
+++ b/libavfilter/Makefile
@@ -390,6 +390,7 @@  OBJS-$(CONFIG_TMIX_FILTER)                   += vf_mix.o framesync.o
 OBJS-$(CONFIG_TONEMAP_FILTER)                += vf_tonemap.o colorspace.o
 OBJS-$(CONFIG_TONEMAP_OPENCL_FILTER)         += vf_tonemap_opencl.o colorspace.o opencl.o \
                                                 opencl/tonemap.o opencl/colorspace_common.o
+OBJS-$(CONFIG_TONEMAP_VAAPI_FILTER)          += vf_tonemap_vaapi.o vaapi_vpp.o
 OBJS-$(CONFIG_TPAD_FILTER)                   += vf_tpad.o
 OBJS-$(CONFIG_TRANSPOSE_FILTER)              += vf_transpose.o
 OBJS-$(CONFIG_TRANSPOSE_NPP_FILTER)          += vf_transpose_npp.o cuda_check.o
diff --git a/libavfilter/allfilters.c b/libavfilter/allfilters.c
index a600069500..754b84819d 100644
--- a/libavfilter/allfilters.c
+++ b/libavfilter/allfilters.c
@@ -369,6 +369,7 @@  extern AVFilter ff_vf_tlut2;
 extern AVFilter ff_vf_tmix;
 extern AVFilter ff_vf_tonemap;
 extern AVFilter ff_vf_tonemap_opencl;
+extern AVFilter ff_vf_tonemap_vaapi;
 extern AVFilter ff_vf_tpad;
 extern AVFilter ff_vf_transpose;
 extern AVFilter ff_vf_transpose_npp;
diff --git a/libavfilter/vaapi_vpp.c b/libavfilter/vaapi_vpp.c
index c5bbc3b85b..7389ebd9d5 100644
--- a/libavfilter/vaapi_vpp.c
+++ b/libavfilter/vaapi_vpp.c
@@ -276,6 +276,35 @@  int ff_vaapi_vpp_make_param_buffers(AVFilterContext *avctx,
     return 0;
 }
 
+int ff_vaapi_vpp_make_param_buffers2(AVFilterContext *avctx,
+                                     int type,
+                                     const void *data,
+                                     size_t size,
+                                     int count,
+                                     VABufferID *buffer_id)
+{
+    VAStatus vas;
+    VABufferID buffer;
+    VAAPIVPPContext *ctx   = avctx->priv;
+
+    av_assert0(ctx->nb_filter_buffers + 1 <= VAProcFilterCount);
+
+    vas = vaCreateBuffer(ctx->hwctx->display, ctx->va_context,
+                         type, size, count, (void*)data, &buffer);
+    if (vas != VA_STATUS_SUCCESS) {
+        av_log(avctx, AV_LOG_ERROR, "Failed to create parameter "
+               "buffer (type %d): %d (%s).\n",
+               type, vas, vaErrorStr(vas));
+        return AVERROR(EIO);
+    }
+
+    ctx->filter_buffers[ctx->nb_filter_buffers++] = buffer;
+    *buffer_id = buffer;
+
+    av_log(avctx, AV_LOG_DEBUG, "Param buffer (type %d, %zu bytes, count %d) "
+           "is %#x.\n", type, size, count, buffer);
+    return 0;
+}
 
 int ff_vaapi_vpp_render_picture(AVFilterContext *avctx,
                                 VAProcPipelineParameterBuffer *params,
diff --git a/libavfilter/vaapi_vpp.h b/libavfilter/vaapi_vpp.h
index 0bc31018d4..e920c10591 100644
--- a/libavfilter/vaapi_vpp.h
+++ b/libavfilter/vaapi_vpp.h
@@ -72,6 +72,14 @@  int ff_vaapi_vpp_make_param_buffers(AVFilterContext *avctx,
                                     size_t size,
                                     int count);
 
+int ff_vaapi_vpp_make_param_buffers2(AVFilterContext *avctx,
+                                     int type,
+                                     const void *data,
+                                     size_t size,
+                                     int count,
+                                     VABufferID *buffer_id);
+
+
 int ff_vaapi_vpp_render_picture(AVFilterContext *avctx,
                                 VAProcPipelineParameterBuffer *params,
                                 VASurfaceID output_surface);
diff --git a/libavfilter/vf_tonemap_vaapi.c b/libavfilter/vf_tonemap_vaapi.c
new file mode 100644
index 0000000000..d77cd574b3
--- /dev/null
+++ b/libavfilter/vf_tonemap_vaapi.c
@@ -0,0 +1,542 @@ 
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include <string.h>
+
+#include "libavutil/avassert.h"
+#include "libavutil/mem.h"
+#include "libavutil/opt.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/mastering_display_metadata.h"
+
+#include "avfilter.h"
+#include "formats.h"
+#include "internal.h"
+#include "vaapi_vpp.h"
+
+typedef enum {
+    HDR_VAAPI_H2H,
+    HDR_VAAPI_H2S,
+} HDRType;
+
+typedef struct HDRVAAPIContext {
+    VAAPIVPPContext vpp_ctx; // must be the first field
+
+    int hdr_type;
+
+    char *master_display;
+    char *content_light;
+
+    VABufferID          buffer;
+    VAHdrMetaDataHDR10  out_metadata;
+
+    AVFrameSideData    *src_display;
+    AVFrameSideData    *src_light;
+} HDRVAAPIContext;
+
+static int tonemap_vaapi_set_filter_params(AVFilterContext *avctx, AVFrame *input_frame)
+{
+    VAAPIVPPContext *vpp_ctx   = avctx->priv;
+    HDRVAAPIContext *ctx       = avctx->priv;
+    VAStatus vas;
+    VAHdrMetaDataHDR10  *in_metadata;
+    VAProcFilterParameterBufferHDRToneMapping *hdrtm_param;
+
+    vas = vaMapBuffer(vpp_ctx->hwctx->display, ctx->buffer, (void**)&hdrtm_param);
+    if (vas != VA_STATUS_SUCCESS) {
+        av_log(avctx, AV_LOG_ERROR, "Failed to map "
+               "buffer (%d): %d (%s).\n",
+               ctx->buffer, vas, vaErrorStr(vas));
+        return AVERROR(EIO);
+    }
+
+    in_metadata = hdrtm_param->data.metadata;
+    ctx->src_display = av_frame_get_side_data(input_frame,
+                                              AV_FRAME_DATA_MASTERING_DISPLAY_METADATA);
+    if (ctx->src_display) {
+        const AVMasteringDisplayMetadata *hdr_meta =
+            (const AVMasteringDisplayMetadata *)ctx->src_display->data;
+        if (hdr_meta->has_luminance) {
+#define LAV_UINT32(a) (a.num)
+            in_metadata->max_display_mastering_luminance =
+                LAV_UINT32(hdr_meta->max_luminance);
+            in_metadata->min_display_mastering_luminance =
+                LAV_UINT32(hdr_meta->min_luminance);
+#undef LAV_UINT32
+            av_log(avctx, AV_LOG_DEBUG,
+                   "Mastering Display Metadata(in luminance):\n");
+            av_log(avctx, AV_LOG_DEBUG,
+                   "min_luminance=%d, max_luminance=%d\n",
+                   in_metadata->min_display_mastering_luminance,
+                   in_metadata->max_display_mastering_luminance);
+        }
+
+        if (hdr_meta->has_primaries) {
+#define LAV_RED    0
+#define LAV_GREEN  1
+#define LAV_BLUE   2
+#define LAV_UINT16(a) (a.num)
+            in_metadata->display_primaries_x[0] =
+                LAV_UINT16(hdr_meta->display_primaries[LAV_GREEN][0]);
+            in_metadata->display_primaries_y[0] =
+                LAV_UINT16(hdr_meta->display_primaries[LAV_GREEN][1]);
+            in_metadata->display_primaries_x[1] =
+                LAV_UINT16(hdr_meta->display_primaries[LAV_BLUE][0]);
+            in_metadata->display_primaries_y[1] =
+                LAV_UINT16(hdr_meta->display_primaries[LAV_BLUE][1]);
+            in_metadata->display_primaries_x[2] =
+                LAV_UINT16(hdr_meta->display_primaries[LAV_RED][0]);
+            in_metadata->display_primaries_y[2] =
+                LAV_UINT16(hdr_meta->display_primaries[LAV_RED][1]);
+            in_metadata->white_point_x =
+                LAV_UINT16(hdr_meta->white_point[0]);
+            in_metadata->white_point_y =
+                LAV_UINT16(hdr_meta->white_point[1]);
+#undef LAV_RED
+#undef LAV_GREEN
+#undef LAV_BLUE
+#undef LAV_UINT16
+            av_log(avctx, AV_LOG_DEBUG,
+                   "Mastering Display Metadata(in primaries):\n");
+            av_log(avctx, AV_LOG_DEBUG,
+                   "G(%u,%u) B(%u,%u) R(%u,%u) WP(%u,%u)\n",
+                   in_metadata->display_primaries_x[0],
+                   in_metadata->display_primaries_y[0],
+                   in_metadata->display_primaries_x[1],
+                   in_metadata->display_primaries_y[1],
+                   in_metadata->display_primaries_x[2],
+                   in_metadata->display_primaries_y[2],
+                   in_metadata->white_point_x,
+                   in_metadata->white_point_y);
+        }
+    }
+
+    ctx->src_light = av_frame_get_side_data(input_frame,
+                                            AV_FRAME_DATA_CONTENT_LIGHT_LEVEL);
+    if (ctx->src_light) {
+        const AVContentLightMetadata *light_meta =
+            (const AVContentLightMetadata *) ctx->src_light->data;
+        in_metadata->max_content_light_level = light_meta->MaxCLL;
+        in_metadata->max_pic_average_light_level = light_meta->MaxFALL;
+
+        av_log(avctx, AV_LOG_DEBUG,
+               "Mastering Content Light Level (in):\n");
+        av_log(avctx, AV_LOG_DEBUG,
+               "MaxCLL(%u) MaxFALL(%u)\n",
+               in_metadata->max_content_light_level,
+               in_metadata->max_pic_average_light_level);
+    }
+
+    vas = vaUnmapBuffer(vpp_ctx->hwctx->display, ctx->buffer);
+    if (vas != VA_STATUS_SUCCESS) {
+        av_log(avctx, AV_LOG_ERROR, "Failed to unmap output buffers: "
+               "%d (%s).\n", vas, vaErrorStr(vas));
+        return AVERROR(EIO);
+    }
+
+    return 0;
+}
+
+static int tonemap_vaapi_build_filter_params(AVFilterContext *avctx)
+{
+    VAAPIVPPContext *vpp_ctx   = avctx->priv;
+    HDRVAAPIContext *ctx       = avctx->priv;
+    VAStatus vas;
+    VAProcFilterCapHighDynamicRange hdr_cap;
+    int num_query_caps;
+    VAProcFilterParameterBufferHDRToneMapping hdrtm_param;
+    VAHdrMetaDataHDR10  in_metadata;
+
+    vas = vaQueryVideoProcFilterCaps(vpp_ctx->hwctx->display,
+                                     vpp_ctx->va_context,
+                                     VAProcFilterHighDynamicRangeToneMapping,
+                                     &hdr_cap, &num_query_caps);
+    if (vas != VA_STATUS_SUCCESS) {
+        av_log(avctx, AV_LOG_ERROR, "Failed to query HDR caps "
+               "context: %d (%s).\n", vas, vaErrorStr(vas));
+        return AVERROR(EIO);
+    }
+
+    if (hdr_cap.metadata_type == VAProcHighDynamicRangeMetadataNone) {
+        av_log(avctx, AV_LOG_ERROR, "VAAPI driver doesn't support HDR\n");
+        return AVERROR(EINVAL);
+    }
+
+    if (! (VA_TONE_MAPPING_HDR_TO_HDR & hdr_cap.caps_flag) &&
+        ! (VA_TONE_MAPPING_HDR_TO_SDR & hdr_cap.caps_flag)) {
+        av_log(avctx, AV_LOG_ERROR,
+               "VAAPI driver doesn't support H2H or H2S\n");
+        return AVERROR(EINVAL);
+    }
+
+    memset(&hdrtm_param, 0, sizeof(hdrtm_param));
+    memset(&in_metadata, 0, sizeof(in_metadata));
+    hdrtm_param.type = VAProcFilterHighDynamicRangeToneMapping;
+    hdrtm_param.data.metadata_type = VAProcHighDynamicRangeMetadataHDR10;
+    hdrtm_param.data.metadata      = &in_metadata;
+    hdrtm_param.data.metadata_size = sizeof(VAHdrMetaDataHDR10);
+
+    ff_vaapi_vpp_make_param_buffers2(avctx,
+                                     VAProcFilterParameterBufferType,
+                                     &hdrtm_param, sizeof(hdrtm_param), 1, &ctx->buffer);
+
+    return 0;
+}
+
+static int tonemap_vaapi_filter_frame(AVFilterLink *inlink, AVFrame *input_frame)
+{
+    AVFilterContext *avctx     = inlink->dst;
+    AVFilterLink *outlink      = avctx->outputs[0];
+    VAAPIVPPContext *vpp_ctx   = avctx->priv;
+    HDRVAAPIContext *ctx       = avctx->priv;
+    AVFrame *output_frame      = NULL;
+    VASurfaceID input_surface, output_surface;
+    VARectangle input_region,  output_region;
+
+    VAProcPipelineParameterBuffer params;
+    int err;
+
+    VAHdrMetaData              out_hdr_metadata;
+
+    av_log(avctx, AV_LOG_DEBUG, "Filter input: %s, %ux%u (%"PRId64").\n",
+           av_get_pix_fmt_name(input_frame->format),
+           input_frame->width, input_frame->height, input_frame->pts);
+
+    if (vpp_ctx->va_context == VA_INVALID_ID)
+        return AVERROR(EINVAL);
+
+    err = tonemap_vaapi_set_filter_params(avctx, input_frame);
+    if (err < 0)
+        goto fail;
+
+    input_surface = (VASurfaceID)(uintptr_t)input_frame->data[3];
+    av_log(avctx, AV_LOG_DEBUG, "Using surface %#x for transpose vpp input.\n",
+           input_surface);
+
+    output_frame = ff_get_video_buffer(outlink, vpp_ctx->output_width,
+                                       vpp_ctx->output_height);
+    if (!output_frame) {
+        err = AVERROR(ENOMEM);
+        goto fail;
+    }
+
+    output_surface = (VASurfaceID)(uintptr_t)output_frame->data[3];
+    av_log(avctx, AV_LOG_DEBUG, "Using surface %#x for transpose vpp output.\n",
+           output_surface);
+    memset(&params, 0, sizeof(params));
+    input_region = (VARectangle) {
+        .x      = 0,
+        .y      = 0,
+        .width  = input_frame->width,
+        .height = input_frame->height,
+    };
+
+    output_region = (VARectangle) {
+        .x      = 0,
+        .y      = 0,
+        .width  = output_frame->width,
+        .height = output_frame->height,
+    };
+
+    if (vpp_ctx->nb_filter_buffers) {
+        params.filters     = &vpp_ctx->filter_buffers[0];
+        params.num_filters = vpp_ctx->nb_filter_buffers;
+    }
+
+    params.surface = input_surface;
+    params.surface_region = &input_region;
+    params.surface_color_standard =
+        ff_vaapi_vpp_colour_standard(input_frame->colorspace);
+
+    params.output_region = &output_region;
+    params.output_background_color = 0xff000000;
+    params.output_color_standard = params.surface_color_standard;
+
+    params.pipeline_flags = 0;
+    params.filter_flags = VA_FRAME_PICTURE;
+
+    switch (ctx->hdr_type)
+    {
+    case HDR_VAAPI_H2H:
+        params.surface_color_standard = VAProcColorStandardExplicit;
+        params.output_color_standard = VAProcColorStandardExplicit;
+        params.input_color_properties.colour_primaries = 9;
+        params.output_color_properties.colour_primaries = 9;
+        params.input_color_properties.transfer_characteristics = 16;
+        params.output_color_properties.transfer_characteristics = 16;
+        break;
+    case HDR_VAAPI_H2S:
+        params.surface_color_standard = VAProcColorStandardExplicit;
+        params.input_color_properties.colour_primaries = 9;
+        params.input_color_properties.transfer_characteristics = 16;
+        break;
+    default:
+        break;
+    }
+
+    if (ctx->hdr_type == HDR_VAAPI_H2H) {
+        memset(&out_hdr_metadata, 0, sizeof(out_hdr_metadata));
+        if (ctx->master_display) {
+            if (10 != sscanf(ctx->master_display,
+                             "G(%hu|%hu)B(%hu|%hu)R(%hu|%hu)WP(%hu|%hu)L(%u|%u)",
+                             &ctx->out_metadata.display_primaries_x[0],
+                             &ctx->out_metadata.display_primaries_y[0],
+                             &ctx->out_metadata.display_primaries_x[1],
+                             &ctx->out_metadata.display_primaries_y[1],
+                             &ctx->out_metadata.display_primaries_x[2],
+                             &ctx->out_metadata.display_primaries_y[2],
+                             &ctx->out_metadata.white_point_x,
+                             &ctx->out_metadata.white_point_y,
+                             &ctx->out_metadata.max_display_mastering_luminance,
+                             &ctx->out_metadata.min_display_mastering_luminance)) {
+                av_log(avctx, AV_LOG_ERROR,
+                       "Option mastering-display input invalid\n");
+                return AVERROR(EINVAL);
+            } else {
+                AVFrameSideData *metadata;
+                AVMasteringDisplayMetadata *hdr_meta;
+
+                metadata = av_frame_new_side_data(output_frame,
+                                                  AV_FRAME_DATA_MASTERING_DISPLAY_METADATA,
+                                                  sizeof(AVMasteringDisplayMetadata));
+                if (!metadata) {
+                    av_log(avctx, AV_LOG_ERROR, "Cannot create new side data for output\n");
+                    return AVERROR(ENOMEM);
+                }
+
+                if (ctx->src_display) {
+                    memcpy(metadata->data,
+                           ctx->src_display->data,
+                           sizeof(AVMasteringDisplayMetadata));
+                }
+
+                hdr_meta = (AVMasteringDisplayMetadata *)metadata->data;
+#define LAV_RED    0
+#define LAV_GREEN  1
+#define LAV_BLUE   2
+                hdr_meta->display_primaries[LAV_GREEN][0].num = ctx->out_metadata.display_primaries_x[0];
+                hdr_meta->display_primaries[LAV_GREEN][1].num = ctx->out_metadata.display_primaries_y[0];
+                hdr_meta->display_primaries[LAV_BLUE][0].num  = ctx->out_metadata.display_primaries_x[1];
+                hdr_meta->display_primaries[LAV_BLUE][1].num  = ctx->out_metadata.display_primaries_y[1];
+                hdr_meta->display_primaries[LAV_RED][0].num   = ctx->out_metadata.display_primaries_x[2];
+                hdr_meta->display_primaries[LAV_RED][1].num   = ctx->out_metadata.display_primaries_y[2];
+                hdr_meta->white_point[0].num = ctx->out_metadata.white_point_x;
+                hdr_meta->white_point[1].num = ctx->out_metadata.white_point_y;
+#undef LAV_RED
+#undef LAV_GREEN
+#undef LAV_BLUE
+                hdr_meta->has_primaries = 1;
+
+                hdr_meta->max_luminance.num = ctx->out_metadata.max_display_mastering_luminance;
+                hdr_meta->min_luminance.num = ctx->out_metadata.min_display_mastering_luminance;
+                hdr_meta->has_luminance = 1;
+
+                av_log(avctx, AV_LOG_DEBUG,
+                       "Mastering Display Metadata(out luminance):\n");
+                av_log(avctx, AV_LOG_DEBUG,
+                       "min_luminance=%u, max_luminance=%u\n",
+                       ctx->out_metadata.min_display_mastering_luminance,
+                       ctx->out_metadata.max_display_mastering_luminance);
+
+                av_log(avctx, AV_LOG_DEBUG,
+                       "Mastering Display Metadata(out primaries):\n");
+                av_log(avctx, AV_LOG_DEBUG,
+                       "G(%u,%u) B(%u,%u) R(%u,%u) WP(%u,%u)\n",
+                       ctx->out_metadata.display_primaries_x[0],
+                       ctx->out_metadata.display_primaries_y[0],
+                       ctx->out_metadata.display_primaries_x[1],
+                       ctx->out_metadata.display_primaries_y[1],
+                       ctx->out_metadata.display_primaries_x[2],
+                       ctx->out_metadata.display_primaries_y[2],
+                       ctx->out_metadata.white_point_x,
+                       ctx->out_metadata.white_point_y);
+            }
+        }
+
+        if (ctx->content_light) {
+            if (2 != sscanf(ctx->content_light,
+                            "CLL(%hu)FALL(%hu)",
+                            &ctx->out_metadata.max_content_light_level,
+                            &ctx->out_metadata.max_pic_average_light_level)) {
+                av_log(avctx, AV_LOG_ERROR,
+                       "Option content-light input invalid\n");
+                return AVERROR(EINVAL);
+            } else {
+                AVFrameSideData *metadata_lt;
+                AVContentLightMetadata *hdr_meta_lt;
+
+                metadata_lt = av_frame_new_side_data(output_frame,
+                                                     AV_FRAME_DATA_CONTENT_LIGHT_LEVEL,
+                                                     sizeof(AVContentLightMetadata));
+                if (!metadata_lt) {
+                    av_log(avctx, AV_LOG_ERROR, "Cannot create new side data for output\n");
+                    return AVERROR(ENOMEM);
+                }
+
+                if (ctx->src_light) {
+                    memcpy(metadata_lt->data,
+                           ctx->src_light->data,
+                           sizeof(AVContentLightMetadata));
+                }
+
+                hdr_meta_lt = (AVContentLightMetadata *)metadata_lt->data;
+                hdr_meta_lt->MaxCLL = ctx->out_metadata.max_content_light_level;
+                hdr_meta_lt->MaxFALL = ctx->out_metadata.max_pic_average_light_level;
+
+                av_log(avctx, AV_LOG_DEBUG,
+                       "Mastering Content Light Level (out):\n");
+                av_log(avctx, AV_LOG_DEBUG,
+                       "MaxCLL(%u) MaxFALL(%u)\n",
+                       ctx->out_metadata.max_content_light_level,
+                       ctx->out_metadata.max_pic_average_light_level);
+            }
+        }
+
+        out_hdr_metadata.metadata_type = VAProcHighDynamicRangeMetadataHDR10;
+        out_hdr_metadata.metadata      = &ctx->out_metadata;
+        out_hdr_metadata.metadata_size = sizeof(VAHdrMetaDataHDR10);
+
+        params.output_hdr_metadata = &out_hdr_metadata;
+    }
+
+    err = ff_vaapi_vpp_render_picture(avctx, &params, output_surface);
+    if (err < 0)
+        goto fail;
+
+    err = av_frame_copy_props(output_frame, input_frame);
+    if (err < 0)
+        goto fail;
+    av_frame_free(&input_frame);
+
+    av_log(avctx, AV_LOG_DEBUG, "Filter output: %s, %ux%u (%"PRId64").\n",
+           av_get_pix_fmt_name(output_frame->format),
+           output_frame->width, output_frame->height, output_frame->pts);
+
+    return ff_filter_frame(outlink, output_frame);
+
+fail:
+    av_frame_free(&input_frame);
+    av_frame_free(&output_frame);
+    return err;
+}
+
+static av_cold int tonemap_vaapi_init(AVFilterContext *avctx)
+{
+    VAAPIVPPContext *vpp_ctx = avctx->priv;
+    HDRVAAPIContext *ctx     = avctx->priv;
+
+    ff_vaapi_vpp_ctx_init(avctx);
+    vpp_ctx->build_filter_params = tonemap_vaapi_build_filter_params;
+    vpp_ctx->pipeline_uninit = ff_vaapi_vpp_pipeline_uninit;
+
+    if (ctx->hdr_type == HDR_VAAPI_H2H) {
+        vpp_ctx->output_format = AV_PIX_FMT_GBRP10;
+    } else if (ctx->hdr_type == HDR_VAAPI_H2S) {
+        vpp_ctx->output_format = AV_PIX_FMT_ARGB;
+    } else {
+        vpp_ctx->output_format = AV_PIX_FMT_NONE;
+    }
+
+    return 0;
+}
+
+static void tonemap_vaapi_vpp_ctx_uninit(AVFilterContext *avctx)
+{
+    return ff_vaapi_vpp_ctx_uninit(avctx);
+}
+
+static int tonemap_vaapi_vpp_query_formats(AVFilterContext *avctx)
+{
+    int err;
+
+    enum AVPixelFormat pix_in_fmts[] = {
+        AV_PIX_FMT_P010,     //Input
+    };
+
+    enum AVPixelFormat pix_out_fmts[] = {
+        AV_PIX_FMT_GBRP10,   //H2H RGB10
+        AV_PIX_FMT_ARGB,     //H2S RGB8
+    };
+
+    err = ff_formats_ref(ff_make_format_list(pix_in_fmts),
+                         &avctx->inputs[0]->out_formats);
+    if (err < 0)
+        return err;
+
+    err = ff_formats_ref(ff_make_format_list(pix_out_fmts),
+                         &avctx->outputs[0]->in_formats);
+    if (err < 0)
+        return err;
+
+    return ff_vaapi_vpp_query_formats(avctx);
+}
+
+static int tonemap_vaapi_vpp_config_input(AVFilterLink *inlink)
+{
+    return ff_vaapi_vpp_config_input(inlink);
+}
+
+static int tonemap_vaapi_vpp_config_output(AVFilterLink *outlink)
+{
+    return ff_vaapi_vpp_config_output(outlink);
+}
+
+#define OFFSET(x) offsetof(HDRVAAPIContext, x)
+#define FLAGS (AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM)
+static const AVOption tonemap_vaapi_options[] = {
+    { "type",    "hdr type",            OFFSET(hdr_type), AV_OPT_TYPE_INT, { .i64 = HDR_VAAPI_H2H }, 0, 2, FLAGS, "type" },
+        { "h2h", "vaapi P010 to RGB10", 0, AV_OPT_TYPE_CONST, {.i64=HDR_VAAPI_H2H}, INT_MIN, INT_MAX, FLAGS, "type" },
+        { "h2s", "vaapi P010 to RGB8",  0, AV_OPT_TYPE_CONST, {.i64=HDR_VAAPI_H2S}, INT_MIN, INT_MAX, FLAGS, "type" },
+    { "display", "set master display",  OFFSET(master_display), AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+    { "light",   "set content light",   OFFSET(content_light),  AV_OPT_TYPE_STRING, {.str=NULL}, CHAR_MIN, CHAR_MAX, FLAGS },
+    { NULL }
+};
+
+
+AVFILTER_DEFINE_CLASS(tonemap_vaapi);
+
+static const AVFilterPad tonemap_vaapi_inputs[] = {
+    {
+        .name         = "default",
+        .type         = AVMEDIA_TYPE_VIDEO,
+        .filter_frame = &tonemap_vaapi_filter_frame,
+        .config_props = &tonemap_vaapi_vpp_config_input,
+    },
+    { NULL }
+};
+
+static const AVFilterPad tonemap_vaapi_outputs[] = {
+    {
+        .name = "default",
+        .type = AVMEDIA_TYPE_VIDEO,
+        .config_props = &tonemap_vaapi_vpp_config_output,
+    },
+    { NULL }
+};
+
+AVFilter ff_vf_tonemap_vaapi = {
+    .name           = "tonemap_vaapi",
+    .description    = NULL_IF_CONFIG_SMALL("VAAPI VPP for tonemap"),
+    .priv_size      = sizeof(HDRVAAPIContext),
+    .init           = &tonemap_vaapi_init,
+    .uninit         = &tonemap_vaapi_vpp_ctx_uninit,
+    .query_formats  = &tonemap_vaapi_vpp_query_formats,
+    .inputs         = tonemap_vaapi_inputs,
+    .outputs        = tonemap_vaapi_outputs,
+    .priv_class     = &tonemap_vaapi_class,
+    .flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
+};
diff --git a/libavutil/hwcontext_vaapi.c b/libavutil/hwcontext_vaapi.c
index 8624369bb9..f18de1d778 100644
--- a/libavutil/hwcontext_vaapi.c
+++ b/libavutil/hwcontext_vaapi.c
@@ -124,6 +124,9 @@  static const VAAPIFormatDescriptor vaapi_format_map[] = {
 #endif
     MAP(BGRA, RGB32,   BGRA, 0),
     MAP(BGRX, RGB32,   BGR0, 0),
+#ifdef VA_RT_FORMAT_RGB32_10
+    MAP(RGBA, RGB32_10, GBRP10, 0),
+#endif
     MAP(RGBA, RGB32,   RGBA, 0),
     MAP(RGBX, RGB32,   RGB0, 0),
 #ifdef VA_FOURCC_ABGR