diff mbox

[FFmpeg-devel] Added - HW accelerated H.264 and HEVC encoding for AMD GPUs based on AMF SDK

Message ID 0658a935-9d75-4f36-9677-d78f1023bf12@mmironov-dev.local
State Superseded
Headers show

Commit Message

mmironov Oct. 27, 2017, 6:09 p.m. UTC
From b1b697aed459947cfa04bccdca0f7cfb5c8be72c Mon Sep 17 00:00:00 2001
From: mmironov <mikhail.mironov@amd.com>
Date: Fri, 27 Oct 2017 13:03:15 -0400
Subject: [PATCH] Added: HW accelerated H.264 and HEVC encoding for AMD GPUs
 based on AMF SDK

Signed-off-by: mmironov <mikhail.mironov@amd.com>
---
 Changelog                |    3 +-
 compat/amd/amfsdkenc.h   | 1750 ++++++++++++++++++++++++++++++++++++++++++++++
 configure                |   28 +
 libavcodec/Makefile      |    4 +
 libavcodec/allcodecs.c   |    4 +
 libavcodec/amfenc.c      |  463 ++++++++++++
 libavcodec/amfenc.h      |  131 ++++
 libavcodec/amfenc_h264.c |  467 +++++++++++++
 libavcodec/amfenc_hevc.c |  354 ++++++++++
 libavcodec/version.h     |    4 +-
 10 files changed, 3205 insertions(+), 3 deletions(-)
 create mode 100644 compat/amd/amfsdkenc.h
 create mode 100644 libavcodec/amfenc.c
 create mode 100644 libavcodec/amfenc.h
 create mode 100644 libavcodec/amfenc_h264.c
 create mode 100644 libavcodec/amfenc_hevc.c

Comments

Carl Eugen Hoyos Oct. 28, 2017, 7:18 p.m. UTC | #1
2017-10-27 20:09 GMT+02:00 mmironov <mikhail.mironov@amd.com>:
> +const enum AVPixelFormat ff_amf_pix_fmts[] = {
> +    AV_PIX_FMT_NV12,

> +    AV_PIX_FMT_BGRA,
> +    AV_PIX_FMT_ARGB,
> +    AV_PIX_FMT_RGBA,

This is wrong, your encoders do not convert transparency
information, there are 32 rgb formats without alpha channel.

> +    { AV_PIX_FMT_NV12,       AMF_SURFACE_NV12 },
> +    { AV_PIX_FMT_D3D11,      AMF_SURFACE_NV12 },

Just curious: Can you explain this in simple words?

I will try not to comment on the headers you sent, Carl Eugen
Mark Thompson Oct. 28, 2017, 9:28 p.m. UTC | #2
On 27/10/17 19:09, mmironov wrote:
> From b1b697aed459947cfa04bccdca0f7cfb5c8be72c Mon Sep 17 00:00:00 2001
> From: mmironov <mikhail.mironov@amd.com>
> Date: Fri, 27 Oct 2017 13:03:15 -0400
> Subject: [PATCH] Added: HW accelerated H.264 and HEVC encoding for AMD GPUs
>  based on AMF SDK

There isn't any sense in which this is "accelerated" is there?  Just say it's a hardware encoder (as you already do in the changelog).

> 
> Signed-off-by: mmironov <mikhail.mironov@amd.com>
> ---
>  Changelog                |    3 +-
>  compat/amd/amfsdkenc.h   | 1750 ++++++++++++++++++++++++++++++++++++++++++++++
>  configure                |   28 +
>  libavcodec/Makefile      |    4 +
>  libavcodec/allcodecs.c   |    4 +
>  libavcodec/amfenc.c      |  463 ++++++++++++
>  libavcodec/amfenc.h      |  131 ++++
>  libavcodec/amfenc_h264.c |  467 +++++++++++++
>  libavcodec/amfenc_hevc.c |  354 ++++++++++
>  libavcodec/version.h     |    4 +-
>  10 files changed, 3205 insertions(+), 3 deletions(-)
>  create mode 100644 compat/amd/amfsdkenc.h
>  create mode 100644 libavcodec/amfenc.c
>  create mode 100644 libavcodec/amfenc.h
>  create mode 100644 libavcodec/amfenc_h264.c
>  create mode 100644 libavcodec/amfenc_hevc.c
> 
> diff --git a/Changelog b/Changelog
> index 6592d86..f0d22fa 100644
> --- a/Changelog
> +++ b/Changelog
> @@ -6,7 +6,8 @@ version <next>:
>  - Dropped support for OpenJPEG versions 2.0 and below. Using OpenJPEG now
>    requires 2.1 (or later) and pkg-config.
>  - VDA dropped (use VideoToolbox instead)
> -
> +- AMF H.264 encoder
> +- AMF HEVC encoder
>  
>  version 3.4:
>  - deflicker video filter
> diff --git a/compat/amd/amfsdkenc.h b/compat/amd/amfsdkenc.h
> new file mode 100644
> index 0000000..a640c17
> --- /dev/null
> +++ b/compat/amd/amfsdkenc.h

(Ignoring the header, will consider this separately.)

> diff --git a/configure b/configure
> index 0e1ccaa..229443f 100755
> --- a/configure
> +++ b/configure
> @@ -304,6 +304,7 @@ External library support:
>  
>    The following libraries provide various hardware acceleration features:
>    --disable-audiotoolbox   disable Apple AudioToolbox code [autodetect]
> +  --disable-amf            disable AMF video encoding code [autodetect]
>    --disable-cuda           disable dynamically linked Nvidia CUDA code [autodetect]
>    --enable-cuda-sdk        enable CUDA features that require the CUDA SDK [no]
>    --disable-cuvid          disable Nvidia CUVID support [autodetect]
> @@ -1643,6 +1644,7 @@ EXTERNAL_LIBRARY_LIST="
>  HWACCEL_AUTODETECT_LIBRARY_LIST="
>      audiotoolbox
>      crystalhd
> +	amf
>      cuda
>      cuvid
>      d3d11va

Lists in configure should be kept in alphabetical order.

> @@ -2785,12 +2787,16 @@ scale_npp_filter_deps="cuda libnpp"
>  scale_cuda_filter_deps="cuda_sdk"
>  thumbnail_cuda_filter_deps="cuda_sdk"
>  
> +amf_deps_any="dlopen LoadLibrary"
> +amf_encoder_deps="amf"
> +
>  nvenc_deps="cuda"
>  nvenc_deps_any="libdl LoadLibrary"
>  nvenc_encoder_deps="nvenc"
>  
>  h263_v4l2m2m_decoder_deps="v4l2_m2m h263_v4l2_m2m"
>  h263_v4l2m2m_encoder_deps="v4l2_m2m h263_v4l2_m2m"
> +h264_amf_encoder_deps="amf"
>  h264_crystalhd_decoder_select="crystalhd h264_mp4toannexb_bsf h264_parser"
>  h264_cuvid_decoder_deps="cuda cuvid"
>  h264_cuvid_decoder_select="h264_mp4toannexb_bsf"
> @@ -2809,6 +2815,7 @@ h264_vaapi_encoder_deps="VAEncPictureParameterBufferH264"
>  h264_vaapi_encoder_select="cbs_h264 vaapi_encode"
>  h264_v4l2m2m_decoder_deps="v4l2_m2m h264_v4l2_m2m"
>  h264_v4l2m2m_encoder_deps="v4l2_m2m h264_v4l2_m2m"
> +hevc_amf_encoder_deps="amf"
>  hevc_cuvid_decoder_deps="cuda cuvid"
>  hevc_cuvid_decoder_select="hevc_mp4toannexb_bsf"
>  hevc_mediacodec_decoder_deps="mediacodec"
> @@ -2830,6 +2837,8 @@ mjpeg_vaapi_encoder_select="vaapi_encode jpegtables"
>  mpeg1_cuvid_decoder_deps="cuda cuvid"
>  mpeg1_v4l2m2m_decoder_deps="v4l2_m2m mpeg1_v4l2_m2m"
>  mpeg2_crystalhd_decoder_select="crystalhd"
> +amf_h264_encoder_select="h264_amf_encoder"
> +amf_hevc_encoder_select="hevc_amf_encoder"

These names aren't mentioned anywhere else.

>  mpeg2_cuvid_decoder_deps="cuda cuvid"
>  mpeg2_mmal_decoder_deps="mmal"
>  mpeg2_mediacodec_decoder_deps="mediacodec"
> @@ -6305,6 +6314,18 @@ else
>      disable cuda cuvid nvenc
>  fi
>  
> +if enabled x86; then
> +    case $target_os in
> +        mingw32*|mingw64*|win32|win64|cygwin*)
> +            ;;
> +        *)
> +            disable  amf
> +            ;;
> +    esac
> +else
> +    disable amf
> +fi

Why this OS test?  It should just be going by whether the relevant build packages are present.

> +
>  enabled nvenc &&
>      check_cc -I$source_path <<EOF || disable nvenc
>  #include "compat/nvenc/nvEncodeAPI.h"
> @@ -6313,6 +6334,13 @@ void f(void) { struct { const GUID guid; } s[] = { { NV_ENC_PRESET_HQ_GUID } };
>  int main(void) { return 0; }
>  EOF
>  
> +enabled amf &&
> +    check_cc -I$source_path <<EOF || disable amf
> +#include "compat/amd/amfsdkenc.h"
> +AMFFactory *factory;
> +int main(void) { return 0; }
> +EOF
> +
>  # Funny iconv installations are not unusual, so check it after all flags have been set
>  if enabled libc_iconv; then
>      check_func_headers iconv.h iconv
> diff --git a/libavcodec/Makefile b/libavcodec/Makefile
> index bc4d7da..cbf45ac 100644
> --- a/libavcodec/Makefile
> +++ b/libavcodec/Makefile
> @@ -50,6 +50,7 @@ OBJS = allcodecs.o                                                      \
>  # subsystems
>  OBJS-$(CONFIG_AANDCTTABLES)            += aandcttab.o
>  OBJS-$(CONFIG_AC3DSP)                  += ac3dsp.o ac3.o ac3tab.o
> +OBJS-$(CONFIG_AMF)                     += amfenc.o
>  OBJS-$(CONFIG_AUDIO_FRAME_QUEUE)       += audio_frame_queue.o
>  OBJS-$(CONFIG_AUDIODSP)                += audiodsp.o
>  OBJS-$(CONFIG_BLOCKDSP)                += blockdsp.o
> @@ -334,6 +335,7 @@ OBJS-$(CONFIG_H264_DECODER)            += h264dec.o h264_cabac.o h264_cavlc.o \
>  OBJS-$(CONFIG_H264_CUVID_DECODER)      += cuvid.o
>  OBJS-$(CONFIG_H264_MEDIACODEC_DECODER) += mediacodecdec.o
>  OBJS-$(CONFIG_H264_MMAL_DECODER)       += mmaldec.o
> +OBJS-$(CONFIG_H264_AMF_ENCODER)        += amfenc_h264.o
>  OBJS-$(CONFIG_H264_NVENC_ENCODER)      += nvenc_h264.o
>  OBJS-$(CONFIG_NVENC_ENCODER)           += nvenc_h264.o
>  OBJS-$(CONFIG_NVENC_H264_ENCODER)      += nvenc_h264.o
> @@ -352,6 +354,7 @@ OBJS-$(CONFIG_HEVC_DECODER)            += hevcdec.o hevc_mvs.o \
>                                            hevcdsp.o hevc_filter.o hevc_data.o
>  OBJS-$(CONFIG_HEVC_CUVID_DECODER)      += cuvid.o
>  OBJS-$(CONFIG_HEVC_MEDIACODEC_DECODER) += mediacodecdec.o
> +OBJS-$(CONFIG_HEVC_AMF_ENCODER)        += amfenc_hevc.o
>  OBJS-$(CONFIG_HEVC_NVENC_ENCODER)      += nvenc_hevc.o
>  OBJS-$(CONFIG_NVENC_HEVC_ENCODER)      += nvenc_hevc.o
>  OBJS-$(CONFIG_HEVC_QSV_DECODER)        += qsvdec_h2645.o
> @@ -1056,6 +1059,7 @@ SKIPHEADERS-$(CONFIG_JNI)              += ffjni.h
>  SKIPHEADERS-$(CONFIG_LIBVPX)           += libvpx.h
>  SKIPHEADERS-$(CONFIG_LIBWEBP_ENCODER)  += libwebpenc_common.h
>  SKIPHEADERS-$(CONFIG_MEDIACODEC)       += mediacodecdec_common.h mediacodec_surface.h mediacodec_wrapper.h mediacodec_sw_buffer.h
> +SKIPHEADERS-$(CONFIG_AMF)              += amfenc.h
>  SKIPHEADERS-$(CONFIG_NVENC)            += nvenc.h
>  SKIPHEADERS-$(CONFIG_QSV)              += qsv.h qsv_internal.h
>  SKIPHEADERS-$(CONFIG_QSVDEC)           += qsvdec.h
> diff --git a/libavcodec/allcodecs.c b/libavcodec/allcodecs.c
> index 8369126..d597540 100644
> --- a/libavcodec/allcodecs.c
> +++ b/libavcodec/allcodecs.c
> @@ -649,6 +649,8 @@ static void register_all(void)
>       * above is available */
>      REGISTER_ENCODER(H263_V4L2M2M,      h263_v4l2m2m);
>      REGISTER_ENCDEC (LIBOPENH264,       libopenh264);
> +    REGISTER_ENCODER(H264_AMF,          h264_amf);
> +	REGISTER_ENCODER(H264_AMF,          h264_amf_d3d11va);

No tabs.  Why is the d3d11 version separate?  The encoder should be able to accept multiple pixfmts.

>      REGISTER_DECODER(H264_CUVID,        h264_cuvid);
>      REGISTER_ENCODER(H264_NVENC,        h264_nvenc);
>      REGISTER_ENCODER(H264_OMX,          h264_omx);
> @@ -661,6 +663,8 @@ static void register_all(void)
>      REGISTER_ENCODER(NVENC_H264,        nvenc_h264);
>      REGISTER_ENCODER(NVENC_HEVC,        nvenc_hevc);
>  #endif
> +    REGISTER_ENCODER(HEVC_AMF,          hevc_amf);
> +	REGISTER_ENCODER(HEVC_AMF,          hevc_amf_d3d11va);

Tab.

>      REGISTER_DECODER(HEVC_CUVID,        hevc_cuvid);
>      REGISTER_DECODER(HEVC_MEDIACODEC,   hevc_mediacodec);
>      REGISTER_ENCODER(HEVC_NVENC,        hevc_nvenc);
> diff --git a/libavcodec/amfenc.c b/libavcodec/amfenc.c
> new file mode 100644
> index 0000000..8717928
> --- /dev/null
> +++ b/libavcodec/amfenc.c
> @@ -0,0 +1,463 @@
> +/*
> + * This file is part of FFmpeg.
> + *
> + * FFmpeg is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU Lesser General Public
> + * License as published by the Free Software Foundation; either
> + * version 2.1 of the License, or (at your option) any later version.
> + *
> + * FFmpeg is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> + * Lesser General Public License for more details.
> + *
> + * You should have received a copy of the GNU Lesser General Public
> + * License along with FFmpeg; if not, write to the Free Software
> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
> + */
> +#include "amfenc.h"
> +
> +//#include "compat/amd/amf/public/include/components/VideoEncoderVCE.h"
> +//#include "compat/amd/amf/public/include/components/VideoEncoderHEVC.h"

Why are these commented out?

> +
> +#include "libavutil/time.h"
> +#include "libavutil/imgutils.h"
> +#include "libavutil/hwcontext.h"
> +#include "libavutil/hwcontext_d3d11va.h"
> +#include "libavutil/avassert.h"
> +#include "libavutil/mem.h"
> +#include "libavutil/pixdesc.h"
> +#include "libavutil/hwcontext.h"

Headers should be in alphabetical order, and not include the same thing multiple times.

> +#include "internal.h"
> +
> +#include <d3d11.h>
> +
> +#ifdef _WIN32
> +#include "compat/w32dlfcn.h"
> +#else
> +#include <dlfcn.h>
> +#endif
> +
> +#define FFMPEG_AMF_WRITER_ID L"ffmpeg_amf"
> +#define AMF_DEBUG_TRACE 0
> +
> +const enum AVPixelFormat ff_amf_pix_fmts[] = {
> +    AV_PIX_FMT_NV12,
> +    AV_PIX_FMT_BGRA,
> +    AV_PIX_FMT_ARGB,
> +    AV_PIX_FMT_RGBA,
> +    AV_PIX_FMT_YUV420P,
> +    AV_PIX_FMT_YUYV422,
> +    AV_PIX_FMT_D3D11,
> +    AV_PIX_FMT_NONE
> +};
> +
> +typedef struct FormatMap {
> +    enum AVPixelFormat       av_format;
> +    enum AMF_SURFACE_FORMAT  amf_format;
> +} FormatMap;
> +
> +static const FormatMap format_map[] =
> +{
> +    { AV_PIX_FMT_NONE,       AMF_SURFACE_UNKNOWN },

Seems a bit pointless to include NONE in this list explicitly.

> +    { AV_PIX_FMT_NV12,       AMF_SURFACE_NV12 },
> +    { AV_PIX_FMT_BGRA,       AMF_SURFACE_BGRA },
> +    { AV_PIX_FMT_ARGB,       AMF_SURFACE_ARGB },
> +    { AV_PIX_FMT_RGBA,       AMF_SURFACE_RGBA },
> +    { AV_PIX_FMT_GRAY8,      AMF_SURFACE_GRAY8 },
> +    { AV_PIX_FMT_YUV420P,    AMF_SURFACE_YUV420P },
> +    { AV_PIX_FMT_BGR0,       AMF_SURFACE_BGRA },
> +    { AV_PIX_FMT_YUV420P,    AMF_SURFACE_YV12 },
> +    { AV_PIX_FMT_YUYV422,    AMF_SURFACE_YUY2 },

Do all of these formats actually work?

> +    { AV_PIX_FMT_D3D11,      AMF_SURFACE_NV12 },

D3D11 surfaces need not be NV12.  The actual format is in AVHWFramesContext.sw_format - if you only support 8-bit then something nasty probably happens if you give it P010 surfaces.

> +};
> +
> +static enum AMF_SURFACE_FORMAT amf_av_to_amf_format(enum AVPixelFormat fmt) 
> +{
> +    for (int i = 0; i < amf_countof(format_map); i++) {
> +        if (format_map[i].av_format == fmt) {
> +            return format_map[i].amf_format;
> +        }
> +    }
> +    return AMF_SURFACE_UNKNOWN;
> +}
> +
> +// virtual functions decalred

What does this comment mean?

> +static void AMF_CDECL_CALL AMFTraceWriter_Write(AMFTraceWriter* pThis,
> +    const wchar_t* scope, const wchar_t* message) 

"type *variable" is preferred to "type* variable".

Also, trailing space.

> +{
> +    AmfTraceWriter *tracer = (AmfTraceWriter*)pThis;
> +#if AMF_DEBUG_TRACE
> +    av_log(tracer->avctx, AV_LOG_INFO, "%ls: %ls", scope, message);
> +#else
> +    av_log(tracer->avctx, AV_LOG_TRACE, "%ls: %ls", scope, message);
> +#endif

What sort of messages actually come out of this trace function?

If this is intended for debugging (and should never be seen by the user), just make them AV_LOG_DEBUG and drop the AMF_DEBUG_TRACE define.

> +}
> +
> +static void AMF_CDECL_CALL AMFTraceWriter_Flush(AMFTraceWriter* pThis) 
> +{
> +}
> +
> +static AMFTraceWriterVtbl tracer_vtbl =
> +{
> +    .Write = AMFTraceWriter_Write,
> +    .Flush = AMFTraceWriter_Flush,
> +};
> +
> +static int amf_load_library(AVCodecContext *avctx) 
> +{
> +    AmfContext             *ctx = avctx->priv_data;
> +    AMFInit_Fn              init_fun = 0;

NULL

> +    AMFQueryVersion_Fn      version_fun = 0;

NULL

> +
> +    ctx->library = dlopen(AMF_DLL_NAMEA, RTLD_NOW | RTLD_LOCAL);
> +    AMF_RETURN_IF_FALSE(ctx, ctx->library != 0, 

Just ctx->library.

Also trailing space.

> +        AVERROR_UNKNOWN, "DLL %s failed to open. \n", AMF_DLL_NAMEA);
> +
> +    init_fun = (AMFInit_Fn)dlsym(ctx->library, AMF_INIT_FUNCTION_NAME);
> +    AMF_RETURN_IF_FALSE(ctx, init_fun != 0, AVERROR_UNKNOWN, "DLL %s failed to find function %s. \n", AMF_DLL_NAMEA, AMF_INIT_FUNCTION_NAME);
> +
> +    version_fun = (AMFQueryVersion_Fn)dlsym(ctx->library, AMF_QUERY_VERSION_FUNCTION_NAME);
> +    AMF_RETURN_IF_FALSE(ctx, init_fun != 0, AVERROR_UNKNOWN, "DLL %s failed to find function %s. \n", AMF_DLL_NAMEA, AMF_QUERY_VERSION_FUNCTION_NAME);
> +
> +    version_fun(&ctx->version);
> +    init_fun(AMF_FULL_VERSION, &ctx->factory);
> +    ctx->factory->pVtbl->GetTrace(ctx->factory, &ctx->trace);
> +    ctx->factory->pVtbl->GetDebug(ctx->factory, &ctx->debug);

Do all of these functions necessarily succeed?

> +    return 0;
> +}
> +
> +
> +static int amf_init_context(AVCodecContext *avctx) 
> +{
> +    AmfContext         *ctx = avctx->priv_data;
> +    AMF_RESULT          res = AMF_OK;
> +
> +    ctx->trace->pVtbl->EnableWriter(ctx->trace, AMF_TRACE_WRITER_CONSOLE, false);

Using false probably wants <stdbool.h> and checking compiler support for it?  Since compilers without stdbool are supported, we generally use 0 and 1 instead.

> +
> +#if AMF_DEBUG_TRACE
> +    ctx->trace->pVtbl->EnableWriter(ctx->trace, AMF_TRACE_WRITER_DEBUG_OUTPUT, true);
> +    ctx->trace->pVtbl->SetWriterLevel(ctx->trace, AMF_TRACE_WRITER_DEBUG_OUTPUT, AMF_TRACE_TRACE);
> +    ctx->trace->pVtbl->SetGlobalLevel(ctx->trace, AMF_TRACE_TRACE);
> +#else
> +    ctx->trace->pVtbl->EnableWriter(ctx->trace, AMF_TRACE_WRITER_DEBUG_OUTPUT, false);
> +#endif
> +    ctx->tracer.vtbl = &tracer_vtbl;
> +    ctx->tracer.avctx = avctx;
> +    ctx->trace->pVtbl->RegisterWriter(ctx->trace, FFMPEG_AMF_WRITER_ID, 
> +        (AMFTraceWriter*)&ctx->tracer, true);

Can any of these functions fail?

> +
> +    res = ctx->factory->pVtbl->CreateContext(ctx->factory, &ctx->context);
> +    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "CreateContext() failed with error %d", res);

Newline.

> +
> +    // try to reuse existing DX device
> +
> +    if (avctx->hw_frames_ctx) {
> +        AVHWFramesContext *device_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
> +        if (device_ctx->device_ctx->type == AV_HWDEVICE_TYPE_D3D11VA){
> +            if (device_ctx->device_ctx->hwctx) {
> +                AVD3D11VADeviceContext *device_d3d11 = (AVD3D11VADeviceContext *)device_ctx->device_ctx->hwctx;
> +                res = ctx->context->pVtbl->InitDX11(ctx->context, device_d3d11->device, AMF_DX11_1);
> +                if (res == AMF_OK) {
> +                    ctx->hw_frames_ctx = av_buffer_ref(avctx->hw_frames_ctx);
> +                } else {
> +                    av_log(avctx, AV_LOG_INFO, "amf_shared: avctx->hw_frames_ctx has non-AMD device, switching to default");

I'm not sure this is going to act sensibly - if the user has D3D11 frames input on another device, does it work?

Also newline.

> +                }
> +
> +            }
> +        }
> +    } else if (avctx->hw_device_ctx) {
> +        AVHWDeviceContext *device_ctx = (AVHWDeviceContext*)(avctx->hw_device_ctx->data);
> +        if (device_ctx->type == AV_HWDEVICE_TYPE_D3D11VA) {
> +            if (device_ctx->hwctx) {
> +                AVD3D11VADeviceContext *device_d3d11 = (AVD3D11VADeviceContext *)device_ctx->hwctx;
> +                res = ctx->context->pVtbl->InitDX11(ctx->context, device_d3d11->device, AMF_DX11_1);
> +                if (res == AMF_OK) {
> +                    ctx->hw_device_ctx = av_buffer_ref(avctx->hw_device_ctx);
> +                } else {
> +                    av_log(avctx, AV_LOG_INFO, "amf_shared: avctx->hw_device_ctx has non-AMD device, switching to default");

Newline.

> +                }
> +            }
> +        }
> +    }
> +
> +    if (!ctx->hw_frames_ctx) {
> +        res = ctx->context->pVtbl->InitDX11(ctx->context, 0, AMF_DX11_1);
> +        if (res != AMF_OK) {
> +            res = ctx->context->pVtbl->InitDX9(ctx->context, 0);
> +            AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "InitDX9() failed with error %d", res);

Newline, and also in more messages below which I won't point out individually.

> +        }
> +    }
> +    return 0;
> +}
> +
> +static int amf_init_encoder(AVCodecContext *avctx) 
> +{
> +    AmfContext          *ctx = avctx->priv_data;
> +    const wchar_t       *codec_id = 0;

NULL.

> +    AMF_RESULT          res = AMF_OK;
> +
> +    switch (avctx->codec->id) {
> +        case AV_CODEC_ID_H264:
> +            codec_id = AMFVideoEncoderVCE_AVC;
> +            break;
> +        case AV_CODEC_ID_HEVC:
> +            codec_id = AMFVideoEncoder_HEVC;
> +            break;
> +        default:
> +            break;
> +    }
> +    AMF_RETURN_IF_FALSE(ctx, codec_id != 0, AVERROR(EINVAL), "Codec %d is not supported", avctx->codec->id);
> +
> +    ctx->format = amf_av_to_amf_format(avctx->pix_fmt);
> +    AMF_RETURN_IF_FALSE(ctx, ctx->format != AMF_SURFACE_UNKNOWN, AVERROR(EINVAL), "Format %d is not supported", avctx->pix_fmt);
> +
> +    res = ctx->factory->pVtbl->CreateComponent(ctx->factory, ctx->context, codec_id, &ctx->encoder);
> +    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_ENCODER_NOT_FOUND, "CreateComponent(%S) failed with error %d", codec_id, res);

"%S" is not standard, please use %ls.

> +
> +    ctx->eof = false;
> +    return 0;
> +}
> +
> +static int amf_terminate(AVCodecContext *avctx) 
> +{
> +    AmfContext*          ctx = avctx->priv_data;

* next to variable.

> +
> +    if (ctx->encoder) {
> +        ctx->encoder->pVtbl->Terminate(ctx->encoder);
> +        ctx->encoder->pVtbl->Release(ctx->encoder);
> +        ctx->encoder = 0;

NULL.

> +    }
> +
> +    if (ctx->context) {
> +        ctx->context->pVtbl->Terminate(ctx->context);
> +        ctx->context->pVtbl->Release(ctx->context);
> +        ctx->context = 0;

NULL.

> +    }
> +    if (ctx->hw_device_ctx){
> +        av_buffer_unref(&ctx->hw_device_ctx);
> +        ctx->hw_device_ctx = 0;
> +    }
> +    if (ctx->hw_frames_ctx) {
> +        av_buffer_unref(&ctx->hw_frames_ctx);
> +        ctx->hw_frames_ctx = 0;
> +    }

Just use av_buffer_unref() without the checks or setting to null.

> +    
> +    if (ctx->trace) {
> +        ctx->trace->pVtbl->UnregisterWriter(ctx->trace, FFMPEG_AMF_WRITER_ID);
> +    }
> +
> +    if (ctx->library) {
> +        dlclose(ctx->library);
> +        ctx->library = 0;
> +    }
> +    ctx->trace = 0;
> +    ctx->debug = 0;
> +    ctx->factory = 0;
> +    ctx->version = 0;
> +
> +    return 0;
> +}
> +
> +static int amf_copy_surface(AVCodecContext *avctx, const AVFrame *frame, 
> +                            AMFSurface* surface) 
> +{
> +    AmfContext     *ctx = avctx->priv_data;
> +    AMFPlane       *plane = 0;
> +    uint8_t        *dst_data[4];
> +    int             dst_linesize[4];
> +
> +    int planes = (int)surface->pVtbl->GetPlanesCount(surface);
> +    AMF_RETURN_IF_FALSE(ctx, planes <= amf_countof(dst_data), AVERROR(EINVAL), "Invalid number of planes %d in surface", planes);
> +
> +    for (int i = 0; i < planes; i++) {

Declare variables at the start of the block.

> +        plane = surface->pVtbl->GetPlaneAt(surface, i);
> +        dst_data[i] = plane->pVtbl->GetNative(plane);
> +        dst_linesize[i] = plane->pVtbl->GetHPitch(plane);
> +    }
> +    av_image_copy(dst_data, dst_linesize,
> +        (const uint8_t**)frame->data, frame->linesize, frame->format,
> +        avctx->width, avctx->height);
> +
> +    surface->pVtbl->SetPts(surface, frame->pts);

Does this accept the same range as frame->pts, including AV_NOPTS_VALUE?

> +
> +    return 0;
> +}
> +
> +static int amf_copy_buffer(AVCodecContext *avctx, AVPacket *pkt, AMFBuffer *buffer)
> +{
> +    int                 ret = 0;

Pointless initialisation?

> +    AMFVariantStruct    var;
> +    int                 size = (int)buffer->pVtbl->GetSize(buffer);
> +
> +    if (ret = ff_alloc_packet2(avctx, pkt, size, 0)) {

Check for ret negative.

> +        return ret;
> +    }
> +    memcpy(pkt->data, buffer->pVtbl->GetNative(buffer), size);
> +
> +    switch (avctx->codec->id) {
> +        case AV_CODEC_ID_H264:
> +            buffer->pVtbl->GetProperty(buffer, AMF_VIDEO_ENCODER_OUTPUT_DATA_TYPE, &var);
> +            switch (var.int64Value) {
> +                case AMF_VIDEO_ENCODER_OUTPUT_DATA_TYPE_IDR:
> +                    pkt->flags = AV_PICTURE_TYPE_I | AV_PKT_FLAG_KEY;

The AV_PICTURE_TYPE does not go in AVPacket.flags.

> +                    break;
> +                case AMF_VIDEO_ENCODER_OUTPUT_DATA_TYPE_I:
> +                    pkt->flags = AV_PICTURE_TYPE_I;
> +                    break;
> +                case AMF_VIDEO_ENCODER_OUTPUT_DATA_TYPE_P:
> +                    pkt->flags = AV_PICTURE_TYPE_P;
> +                    break;
> +                case AMF_VIDEO_ENCODER_OUTPUT_DATA_TYPE_B:
> +                    pkt->flags = AV_PICTURE_TYPE_B;
> +                    break;
> +                default:
> +                    av_log(avctx, AV_LOG_ERROR, "Unknown picture type encountered, expect the output to be broken.\n");

When can this happen?

> +                    break;
> +            }
> +            break;
> +        case AV_CODEC_ID_HEVC:
> +            buffer->pVtbl->GetProperty(buffer, AMF_VIDEO_ENCODER_HEVC_OUTPUT_DATA_TYPE, &var);
> +            switch (var.int64Value) {
> +                case AMF_VIDEO_ENCODER_HEVC_OUTPUT_DATA_TYPE_IDR:
> +                    pkt->flags = AV_PICTURE_TYPE_I | AV_PKT_FLAG_KEY;
> +                    break;
> +                case AMF_VIDEO_ENCODER_HEVC_OUTPUT_DATA_TYPE_I:
> +                    pkt->flags = AV_PICTURE_TYPE_I | AV_PKT_FLAG_KEY;

All intra picture generated by this encoder are necessarily IRAP?

> +                    break;
> +                case AMF_VIDEO_ENCODER_HEVC_OUTPUT_DATA_TYPE_P:
> +                    pkt->flags = AV_PICTURE_TYPE_P;
> +                    break;
> +                default:
> +                    av_log(avctx, AV_LOG_ERROR, "Unknown picture type encountered, expect the output to be broken.\n");
> +                    break;
> +            }
> +            break;
> +        default:
> +            break;
> +    }
> +    pkt->pts = buffer->pVtbl->GetPts(buffer);
> +    pkt->dts = pkt->pts;
> +    return 0;
> +}
> +
> +// amfenc API implmentation
> +int ff_amf_encode_init(AVCodecContext *avctx) 
> +{
> +    AmfContext     *ctx = avctx->priv_data;
> +    int             ret = 0;

Pointless initialisation.

> +
> +    ctx->factory = 0;
> +    ctx->debug = 0;
> +    ctx->trace = 0;
> +    ctx->context = 0;
> +    ctx->encoder = 0;
> +    ctx->library = 0;
> +    ctx->version = 0;
> +    ctx->eof = 0;
> +    ctx->format = 0;
> +    ctx->tracer.vtbl = 0;
> +    ctx->tracer.avctx = 0;

Some of these should probably be NULL.

> +
> +    if ((ret = amf_load_library(avctx)) == 0) {
> +        if ((ret = amf_init_context(avctx)) == 0) {
> +            if ((ret = amf_init_encoder(avctx)) == 0) {
> +                return 0;
> +            }
> +        }
> +    }
> +    amf_terminate(avctx);
> +    return ret;
> +}
> +
> +int av_cold ff_amf_encode_close(AVCodecContext *avctx)
> +{
> +    int ret = 0;

Pointless initialisation.

> +    ret = amf_terminate(avctx);
> +    return ret;
> +}
> +
> +static GUID  AMFTextureArrayIndexGUID = 
> +{ 0x28115527, 0xe7c3, 0x4b66, { 0x99, 0xd3, 0x4f, 0x2a, 0xe6, 0xb4, 0x7f, 0xaf } };

This seems like something that really shouldn't be hardcoded like this here.

> +
> +int ff_amf_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
> +                        const AVFrame *frame, int *got_packet) 
> +{
> +    int             ret = 0;
> +    AMF_RESULT      res = AMF_OK;
> +    AmfContext     *ctx = avctx->priv_data;
> +    AMFSurface     *surface = 0;
> +    AMFData        *data = 0;
> +    amf_bool       submitted = false;
> +
> +    while (!submitted) {
> +        if (!frame) { // submit drain
> +            if (!ctx->eof) { // submit drain onre time only
> +                res = ctx->encoder->pVtbl->Drain(ctx->encoder);
> +                if (res == AMF_INPUT_FULL) {
> +                    av_usleep(1000); // input queue is full: wait, poll and submit Drain again
> +                                     // need to get some output and try again
> +                } else if (res == AMF_OK) {
> +                    ctx->eof = true; // drain started
> +                    submitted = true;
> +                }
> +            }
> +        } else { // submit frame
> +            if (surface == 0) { // prepare surface from frame one time only
> +                if (frame->hw_frames_ctx && ( // HW frame detected
> +                                              // check if the same hw_frames_ctx as used in initialization
> +                    (ctx->hw_frames_ctx && frame->hw_frames_ctx->data == ctx->hw_frames_ctx->data) ||
> +                    // check if the same hw_device_ctx as used in initialization
> +                    (ctx->hw_device_ctx && ((AVHWFramesContext*)frame->hw_frames_ctx->data)->device_ctx ==
> +                    (AVHWDeviceContext*)ctx->hw_device_ctx->data)
> +                )) {
> +                    ID3D11Texture2D* texture = (ID3D11Texture2D*)frame->data[0]; // actual texture
> +                    int index = (int)(size_t)frame->data[1]; // index is a slice in texture array is - set to tell AMF which slice to use
> +                    texture->lpVtbl->SetPrivateData(texture, &AMFTextureArrayIndexGUID, sizeof(index), &index);
> +
> +                    res = ctx->context->pVtbl->CreateSurfaceFromDX11Native(ctx->context, texture, &surface, NULL); // wrap to AMF surface
> +                    surface->pVtbl->SetCrop(surface, 0, 0, frame->width, frame->height); // decode surfaces are vertically aligned by 16 tell AMF real size
> +                    surface->pVtbl->SetPts(surface, frame->pts);
> +                } else {
> +                    res = ctx->context->pVtbl->AllocSurface(ctx->context, AMF_MEMORY_HOST, ctx->format, avctx->width, avctx->height, &surface);
> +                    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_BUG, "AllocSurface() failed  with error %d", res);
> +                    amf_copy_surface(avctx, frame, surface);
> +                }
> +            }
> +            // encode
> +            res = ctx->encoder->pVtbl->SubmitInput(ctx->encoder, (AMFData*)surface);
> +            if (res == AMF_INPUT_FULL) { // handle full queue
> +                av_usleep(1000); // input queue is full: wait, poll and submit surface again

Is there really no way in the API to wait for this properly?

> +            } else {
> +                surface->pVtbl->Release(surface);
> +                surface = NULL;
> +                AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "SubmitInput() failed with error %d", res);
> +                submitted = 1;
> +            }
> +        }
> +        // poll results
> +        if (!data) {
> +            res = ctx->encoder->pVtbl->QueryOutput(ctx->encoder, &data);
> +            if (data) {
> +                AMFBuffer* buffer;
> +                AMFGuid guid = IID_AMFBuffer();
> +                data->pVtbl->QueryInterface(data, &guid, (void**)&buffer); // query for buffer interface
> +                ret = amf_copy_buffer(avctx, pkt, buffer);
> +                if (!ret)
> +                    *got_packet = 1;
> +                buffer->pVtbl->Release(buffer);
> +                data->pVtbl->Release(data);
> +                if (ctx->eof) {
> +                    submitted = true; // we are in the drain state - no submissions
> +                }
> +            } else if (res == AMF_EOF) {
> +                submitted = true; // drain complete
> +            } else {
> +                if (!submitted) {
> +                    av_usleep(1000); // wait and poll again
> +                }
> +            }
> +        }
> +    }

I suspect this setup is not actually going to follow the constraints of the deprecated encode2().  Given the API here, I think you would be much better off writing with send_frame()/receive_packet().

> +    return ret;
> +}
> diff --git a/libavcodec/amfenc.h b/libavcodec/amfenc.h
> new file mode 100644
> index 0000000..6b0135a
> --- /dev/null
> +++ b/libavcodec/amfenc.h
> @@ -0,0 +1,131 @@
> +/*
> +* This file is part of FFmpeg.
> +*
> +* FFmpeg is free software; you can redistribute it and/or
> +* modify it under the terms of the GNU Lesser General Public
> +* License as published by the Free Software Foundation; either
> +* version 2.1 of the License, or (at your option) any later version.
> +*
> +* FFmpeg is distributed in the hope that it will be useful,
> +* but WITHOUT ANY WARRANTY; without even the implied warranty of
> +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> +* Lesser General Public License for more details.
> +*
> +* You should have received a copy of the GNU Lesser General Public
> +* License along with FFmpeg; if not, write to the Free Software
> +* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
> +*/
> +
> +#ifndef AVCODEC_AMFENC_H
> +#define AVCODEC_AMFENC_H
> +
> +#include "config.h"
> +#include "avcodec.h"
> +//#include "compat/amd/amf/public/include/core/Factory.h"

Commented out?

> +#include "compat/amd/amfsdkenc.h"
> +
> +
> +/**
> +* AMF trace writer callback class
> +* Used to capture all AMF logging
> +*/
> +
> +typedef struct AmfTraceWriter {
> +	AMFTraceWriterVtbl* vtbl;
> +	AVCodecContext      *avctx;
> +} AmfTraceWriter;
> +
> +/**
> +* AMF encoder context
> +*/
> +
> +typedef struct AmfContext {
> +	AVClass*            avclass;
> +	/** access to AMF runtime */
> +	amf_handle          library; ///< handle to DLL library
> +	AMFFactory*         factory; ///< pointer to AMF factory 
> +	AMFDebug*           debug;   ///< pointer to AMF debug interface 
> +	AMFTrace*           trace;   ///< pointer to AMF trace interface 
> +
> +	amf_uint64          version; ///< version of AMF runtime
> +	AmfTraceWriter      tracer;  ///< AMF writer registered with AMF 
> +	AMFContext*         context; ///< AMF context
> +	//encoder
> +	AMFComponent*       encoder; ///< AMF encoder object
> +	amf_bool            eof;     ///< flag indicating EOF happened
> +	AMF_SURFACE_FORMAT  format;  ///< AMF surface format
> +
> +	AVBufferRef        *hw_device_ctx; ///< pointer to HW accelerator (decoder)
> +	AVBufferRef        *hw_frames_ctx; ///< pointer to HW accelerator (frame allocator)
> +
> +	/** common encoder option options */
> +
> +    /** Static options, have to be set before Init() call */
> +    int                 usage;
> +    int                 profile;
> +    int                 level;
> +    int                 preanalysis;
> +    int                 quality;
> +    int					b_frame_delta_qp;
> +    int					ref_b_frame_delta_qp;
> +
> +    /** Dynamic options, can be set after Init() call */
> +
> +    int                 rate_control_mode;
> +	int					enforce_hrd;
> +	int					filler_data;
> +    int					enable_vbaq;
> +    int					skip_frame;
> +    int					qp_i;
> +	int					qp_p;
> +    int					qp_b;
> +    int					max_au_size;
> +	int					header_spacing;
> +	int					b_frame_ref;
> +	int					intra_refresh_mb;
> +    int                 slices;
> +	int					coding_mode;
> +	int					me_half_pel;
> +	int					me_quater_pel;
> +
> +    /** HEVC - specific options */
> +
> +    int					gops_per_idr;
> +    int                 header_insertion_mode;
> +    int                 min_qp_i;
> +    int                 max_qp_i;
> +    int                 min_qp_p;
> +    int                 max_qp_p;
> +	int                 tier;

There are lots of tabs and strange indentation here.

> +} AmfContext;
> +
> +/** 
> +* Common encoder initization code 
> +*/
> +int ff_amf_encode_init(AVCodecContext *avctx);
> +/**
> +* Common encoder termination code
> +*/
> +int ff_amf_encode_close(AVCodecContext *avctx);
> +
> +/**
> +* Ecoding one frame - common for all AMF encoders
> +*/
> +int ff_amf_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
> +	const AVFrame *frame, int *got_packet);
> +
> +/**
> +* Supported formats
> +*/
> +extern const enum AVPixelFormat ff_amf_pix_fmts[];
> +
> +/**
> +* Error handling helper
> +*/
> +#define AMF_RETURN_IF_FALSE(avctx, exp, ret_value, /*optional message,*/ ...) \
> +    if (!(exp)) { \
> +        av_log(avctx, AV_LOG_ERROR, __VA_ARGS__); \
> +        return AVERROR(ret_value); \
> +    }
> +
> +#endif //AVCODEC_AMFENC_H
> \ No newline at end of file

git has review comment for you too.

> diff --git a/libavcodec/amfenc_h264.c b/libavcodec/amfenc_h264.c
> new file mode 100644
> index 0000000..a6e0f3c
> --- /dev/null
> +++ b/libavcodec/amfenc_h264.c
> @@ -0,0 +1,467 @@
> +/*
> + * This file is part of FFmpeg.
> + *
> + * FFmpeg is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU Lesser General Public
> + * License as published by the Free Software Foundation; either
> + * version 2.1 of the License, or (at your option) any later version.
> + *
> + * FFmpeg is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> + * Lesser General Public License for more details.
> + *
> + * You should have received a copy of the GNU Lesser General Public
> + * License along with FFmpeg; if not, write to the Free Software
> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
> + */
> +
> +#include "amfenc.h"
> +//#include "compat/amd/amf/public/include/components/VideoEncoderVCE.h"
> +#include "libavutil/opt.h"
> +#include "libavutil/internal.h"
> +#include "internal.h"
> +
> +#define OFFSET(x) offsetof(AmfContext, x)
> +#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
> +
> +static const AVOption options[] = {
> +    // Static
> +    /// Usage
> +    { "usage",          "Encoder Usage",        OFFSET(usage),  AV_OPT_TYPE_INT,   { .i64 = AMF_VIDEO_ENCODER_USAGE_TRANSCONDING      }, AMF_VIDEO_ENCODER_USAGE_TRANSCONDING, AMF_VIDEO_ENCODER_USAGE_WEBCAM, VE, "usage" },
> +    { "transcoding",    "Generic Transcoding",  0,              AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_USAGE_TRANSCONDING      }, 0, 0, VE, "usage" },
> +    { "ultralowlatency","",                     0,              AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_USAGE_ULTRA_LOW_LATENCY }, 0, 0, VE, "usage" },
> +    { "lowlatency",     "",                     0,              AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_USAGE_LOW_LATENCY       }, 0, 0, VE, "usage" },
> +    { "webcam",         "Webcam",               0,              AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_USAGE_WEBCAM            }, 0, 0, VE, "usage" },
> +
> +    /// Profile,
> +    { "profile",        "Profile",              OFFSET(profile),AV_OPT_TYPE_INT, { .i64 = AMF_VIDEO_ENCODER_PROFILE_MAIN       }, AMF_VIDEO_ENCODER_PROFILE_BASELINE, AMF_VIDEO_ENCODER_PROFILE_HIGH, VE, "profile" },
> +    { "baseline",       "",                     0,              AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_PROFILE_BASELINE }, 0, 0, VE, "profile" },

Do you really support baseline profile H.264?  You probably mean constrained baseline.

> +    { "main",           "",                     0,              AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_PROFILE_MAIN     }, 0, 0, VE, "profile" },
> +    { "high",           "",                     0,              AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_PROFILE_HIGH     }, 0, 0, VE, "profile" },
> +
> +    /// Profile Level
> +    { "level",          "Profile Level",        OFFSET(level),  AV_OPT_TYPE_INT,   { .i64 = 0  }, 0, 62, VE, "level" },
> +    { "auto",           "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 0  }, 0, 0,  VE, "level" },
> +    { "1.0",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 10 }, 0, 0,  VE, "level" },
> +    { "1.1",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 11 }, 0, 0,  VE, "level" },
> +    { "1.2",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 12 }, 0, 0,  VE, "level" },
> +    { "1.3",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 13 }, 0, 0,  VE, "level" },
> +    { "2.0",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 20 }, 0, 0,  VE, "level" },
> +    { "2.1",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 21 }, 0, 0,  VE, "level" },
> +    { "2.2",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 22 }, 0, 0,  VE, "level" },
> +    { "3.0",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 30 }, 0, 0,  VE, "level" },
> +    { "3.1",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 31 }, 0, 0,  VE, "level" },
> +    { "3.2",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 32 }, 0, 0,  VE, "level" },
> +    { "4.0",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 40 }, 0, 0,  VE, "level" },
> +    { "4.1",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 41 }, 0, 0,  VE, "level" },
> +    { "4.2",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 42 }, 0, 0,  VE, "level" },
> +    { "5.0",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 50 }, 0, 0,  VE, "level" },
> +    { "5.1",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 51 }, 0, 0,  VE, "level" },
> +    { "5.2",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 52 }, 0, 0,  VE, "level" },
> +    { "6.0",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 60 }, 0, 0,  VE, "level" },
> +    { "6.1",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 61 }, 0, 0,  VE, "level" },
> +    { "6.2",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 62 }, 0, 0,  VE, "level" },

These private options for profile and level are fine, but you should read AVCodecContext.(profile|level) first.

> +
> +    /// Quality Preset
> +    { "quality",        "Quality Preference",                   OFFSET(quality),    AV_OPT_TYPE_INT,   { .i64 = AMF_VIDEO_ENCODER_QUALITY_PRESET_SPEED    }, AMF_VIDEO_ENCODER_QUALITY_PRESET_BALANCED, AMF_VIDEO_ENCODER_QUALITY_PRESET_QUALITY, VE, "quality" },
> +    { "speed",          "Prefer Speed",                         0,                  AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_QUALITY_PRESET_SPEED    },       0, 0, VE, "quality" },
> +    { "balanced",       "Balanced",                             0,                  AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_QUALITY_PRESET_BALANCED },    0, 0, VE, "quality" },
> +    { "quality",        "Prefer Quality",                       0,                  AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_QUALITY_PRESET_QUALITY  },     0, 0, VE, "quality" },
> +
> +    // Dynamic
> +    /// Rate Control Method
> +    { "rc",             "Rate Control Method",                  OFFSET(rate_control_mode),  AV_OPT_TYPE_INT,   { .i64 = AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_PEAK_CONSTRAINED_VBR    }, AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CONSTANT_QP, AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_LATENCY_CONSTRAINED_VBR, VE, "rc" },
> +    { "cqp",            "Constant Quantization Parameter",      0,                          AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CONSTANT_QP             }, 0, 0, VE, "rc" },
> +    { "cbr",            "Constant Bitrate",                     0,                          AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CBR                     }, 0, 0, VE, "rc" },
> +    { "vbr_peak",       "Peak Contrained Variable Bitrate",     0,                          AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_PEAK_CONSTRAINED_VBR    }, 0, 0, VE, "rc" },
> +    { "vbr_latency",    "Latency Constrained Variable Bitrate", 0,                          AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_LATENCY_CONSTRAINED_VBR }, 0, 0, VE, "rc" },
> +
> +    /// Enforce HRD, Filler Data, VBAQ, Frame Skipping
> +    { "enforce_hrd",    "Enforce HRD",                          OFFSET(enforce_hrd),        AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },
> +    { "filler_data",    "Filler Data Enable",                   OFFSET(filler_data),        AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },
> +    { "vbaq",           "Enable VBAQ",                          OFFSET(enable_vbaq),        AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },
> +    { "frame_skipping", "Rate Control Based Frame Skip",        OFFSET(skip_frame),         AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE, NULL },
> +
> +    /// QP Values
> +    { "qp_i",           "Quantization Parameter for I-Frame",   OFFSET(qp_i),               AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 51, VE },
> +    { "qp_p",           "Quantization Parameter for P-Frame",   OFFSET(qp_p),               AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 51, VE },
> +    { "qp_b",           "Quantization Parameter for B-Frame",   OFFSET(qp_b),               AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 51, VE },
> +
> +    /// Pre-Pass, Pre-Analysis, Two-Pass
> +    { "preanalysis",    "Pre-Analysis Mode",                    OFFSET(preanalysis),        AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },
> +
> +    /// Maximum Access Unit Size
> +    { "max_au_size",    "Maximum Access Unit Size (in bits)",   OFFSET(max_au_size),        AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE, NULL },

Maximum access unit size seems like a slightly strange thing to set - the HRD parameters and rate control should define how this works.

Is it actually maximum NAL unit size?  (For RFC 6184 single NAL unit packets.)

> +
> +    /// Header Insertion Spacing
> +    { "header_spacing", "Header Insertion Spacing",             OFFSET(header_spacing),     AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1000, VE },
> +
> +    /// B-Frames
> +    // BPicturesPattern=bf
> +    { "bf_delta_qp",    "B-Picture Delta QP",                   OFFSET(b_frame_delta_qp),   AV_OPT_TYPE_INT,  { .i64 = 4 }, -10, 10, VE, NULL },
> +    { "bf_ref",         "Enable Reference to B-Frames",         OFFSET(b_frame_ref),        AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, VE, NULL },
> +    { "bf_ref_delta_qp","Reference B-Picture Delta QP",         OFFSET(ref_b_frame_delta_qp), AV_OPT_TYPE_INT,  { .i64 = 4 }, -10, 10, VE, NULL },
> +
> +    /// Intra-Refresh
> +    { "intra_refresh_mb","Intra Refresh MBs Number Per Slot in Macroblocks",       OFFSET(intra_refresh_mb),    AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
> +    { "slices",         "Number of Slices per Frame",           OFFSET(slices),        AV_OPT_TYPE_INT,   { .i64 = 1 } , 1, 8160, VE, NULL },

Use AVCodecContext.slices rather than a private option.

> +
> +    /// coder
> +    { "coding",         "Coding Type",                          OFFSET(coding_mode),   AV_OPT_TYPE_INT,   { .i64 = AMF_VIDEO_ENCODER_UNDEFINED }, AMF_VIDEO_ENCODER_UNDEFINED, AMF_VIDEO_ENCODER_CALV, VE, "coding" },
> +    { "auto",           "Automatic",                            0,                     AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_UNDEFINED }, 0, 0, VE, "coding" },
> +    { "cavlc",          "Context Adaptive Variable-Length Coding", 0,                  AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_CALV },      0, 0, VE, "coding" },
> +    { "cabac",          "Context Adaptive Binary Arithmetic Coding", 0,                AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_CABAC },     0, 0, VE, "coding" },

Other encoders have the same option named "coder", it might be nice to be consistent.

> +
> +    { "me_half_pel",    "Enable ME Half Pixel",                 OFFSET(me_half_pel),   AV_OPT_TYPE_BOOL,  { .i64 = 1 }, 0, 1, VE, NULL },
> +    { "me_quater_pel",  "Enable ME Quarter Pixel ",             OFFSET(me_quater_pel), AV_OPT_TYPE_BOOL,  { .i64 = 1 }, 0, 1, VE, NULL },
> +
> +    { NULL }
> +};

I think these options could do with some better documentation (maybe in the texinfo, though, and it can be a later patch).

> +
> +static av_cold int amf_encode_init_h264(AVCodecContext *avctx)
> +{
> +    int                 ret = 0;
> +    AMF_RESULT          res = AMF_OK;
> +    AmfContext         *ctx = avctx->priv_data;
> +    AMFVariantStruct    var = {0};
> +    amf_int64           profile_level = 0;
> +
> +    AMFSize             framesize = AMFConstructSize(avctx->width, avctx->height);
> +    AMFRate             framerate = AMFConstructRate(avctx->time_base.den, avctx->time_base.num * avctx->ticks_per_frame);

Is VFR encoding not supported?

> +
> +    int                 deblocking_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
> +
> +    if ((ret = ff_amf_encode_init(avctx)) != 0)
> +        return ret;
> +
> +    // Static parameters
> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_USAGE, ctx->usage);
> +
> +    AMF_ASSIGN_PROPERTY_SIZE(res, ctx->encoder, AMF_VIDEO_ENCODER_FRAMESIZE, framesize);
> +
> +    AMF_ASSIGN_PROPERTY_RATE(res, ctx->encoder, AMF_VIDEO_ENCODER_FRAMERATE, framerate);
> +
> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_PROFILE, ctx->profile);

Check avctx->profile as well.

> +
> +    profile_level = ctx->level;

Check avctx->level as well.

> +    if (profile_level == 0) {
> +        // Automatic detection of correct profile level.
> +        struct {
> +            uint64_t  max_samples;
> +            uint64_t max_samples_per_sec;
> +            int level;
> +        } restrictions[] = {
> +            { 25344,    380160, 10 },
> +            { 101376,   768000, 11 },
> +            { 101376,   1536000, 12 },
> +            //{   101376,    3041280, 13 }, // Backwards compatible 2.0
> +            { 101376,   3041280, 20 },
> +            { 202752,   5068800, 21 },
> +            { 414720,   5184000, 22 },
> +            { 414720,   10368000, 30 },
> +            { 921600,   27648000, 31 },
> +            { 1310720,  55296000, 32 },
> +            //{  2097152,   62914560, 40 }, // Backwards compatible 4.1
> +            { 2097152,  62914560, 41 },
> +            { 2228224,  133693440, 42 },
> +            { 5652480,  150994994, 50 },
> +            { 9437184,  251658240, 51 },
> +            { 9437184,  530841600, 52 },
> +            { 35651584, 1069547520, 60 },
> +            { 35651584, 2139095040, 61 },
> +            { 35651584, 4278190080, 62 },
> +            { 0, 0, -1 }
> +        };
> +        uint64_t samples = framesize.width * framesize.height;
> +        uint64_t samples_per_sec = (samples * framerate.num) / framerate.den;
> +        profile_level = 52; // Default to 5.2 for now.
> +        for (unsigned int index = 0; restrictions[index].level != -1; index++) {
> +            if ((samples < restrictions[index].max_samples)
> +                && (samples_per_sec < restrictions[index].max_samples_per_sec)) {
> +                profile_level = restrictions[index].level;
> +                break;
> +            }
> +        }
> +    }
> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_PROFILE_LEVEL, profile_level);

Um, does this really have to be done outside the encoder?

> +
> +    // Maximum Reference Frames
> +    if (avctx->refs != -1) {
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_MAX_NUM_REFRAMES, avctx->refs);
> +    }
> +    if (avctx->sample_aspect_ratio.den && avctx->sample_aspect_ratio.num) {
> +        AMFRatio ratio = AMFConstructRatio(avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
> +        AMF_ASSIGN_PROPERTY_RATIO(res, ctx->encoder, AMF_VIDEO_ENCODER_ASPECT_RATIO, ratio);
> +    }
> +
> +    /// Color Range (Partial/TV/MPEG or Full/PC/JPEG)
> +    if (avctx->color_range == AVCOL_RANGE_JPEG) {
> +        AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_FULL_RANGE_COLOR, 1);
> +    }
> +
> +    if (ctx->rate_control_mode == AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CONSTANT_QP) {
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_RATE_CONTROL_PREANALYSIS_ENABLE, false);
> +        if (ctx->preanalysis)
> +            av_log(ctx, AV_LOG_WARNING, "Pre-Analysis is not supported by cqp Rate Control Method, automatically disabled.");

(More newlines missing.)

> +    } else {
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_RATE_CONTROL_PREANALYSIS_ENABLE, ctx->preanalysis);
> +    }
> +
> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_QUALITY_PRESET, ctx->quality);
> +
> +    // Initialize Encoder
> +    res = ctx->encoder->pVtbl->Init(ctx->encoder, ctx->format, avctx->width, avctx->height);
> +    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_BUG, "encoder->Init() failed with error %d", res);
> +
> +    // Dynamic parmaters 
> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD, ctx->rate_control_mode);
> +
> +    /// VBV Buffer
> +    if (avctx->rc_buffer_size != 0)
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_VBV_BUFFER_SIZE, avctx->rc_buffer_size);
> +    if (avctx->rc_initial_buffer_occupancy != 0) {
> +        int percent = avctx->rc_buffer_size * 64 / avctx->rc_initial_buffer_occupancy;
> +        if (percent > 64)
> +            percent = 64;

???

> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_INITIAL_VBV_BUFFER_FULLNESS, percent);
> +    }
> +    /// Maximum Access Unit Size
> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_MAX_AU_SIZE, ctx->max_au_size);
> +
> +    
> +    // QP Minimum / Maximum
> +    if (ctx->rate_control_mode == AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CONSTANT_QP) {
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_MIN_QP, 0);
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_MAX_QP, 51);
> +    } else {
> +        if (avctx->qmin != -1) {
> +            int qval = avctx->qmin > 51 ? 51 : avctx->qmin;
> +            AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_MIN_QP, qval);
> +        }            

Trailing spaces.

> +        if (avctx->qmax != -1) {
> +            int qval = avctx->qmax > 51 ? 51 : avctx->qmax;
> +            AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_MAX_QP, qval);
> +        }
> +    }
> +    // QP Values
> +    if (ctx->qp_i != -1)
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_QP_I, ctx->qp_i);
> +    if (ctx->qp_p != -1)
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_QP_P, ctx->qp_p);
> +    if (ctx->qp_b != -1)
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_QP_B, ctx->qp_b);
> +
> +    // Bitrate
> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_TARGET_BITRATE, avctx->bit_rate);
> +    if (ctx->rate_control_mode == AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CBR) {
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_PEAK_BITRATE, avctx->bit_rate);
> +    } else {
> +        int rc_max_rate = avctx->rc_max_rate >= avctx->bit_rate ? avctx->rc_max_rate : avctx->bit_rate * 13 / 10;

Where does 13/10 come from?

> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_PEAK_BITRATE, rc_max_rate);
> +    }
> +    // Enforce HRD, Filler Data, VBAQ, Frame Skipping, Deblocking Filter
> +    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_ENFORCE_HRD, !!ctx->enforce_hrd);
> +    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_FILLER_DATA_ENABLE, !!ctx->filler_data);
> +    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_RATE_CONTROL_SKIP_FRAME_ENABLE, !!ctx->skip_frame);
> +    if (ctx->rate_control_mode == AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CONSTANT_QP) {
> +        AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_ENABLE_VBAQ, false);
> +        if (ctx->enable_vbaq)
> +            av_log(ctx, AV_LOG_WARNING, "VBAQ is not supported by cqp Rate Control Method, automatically disabled.");
> +    } else {
> +        AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_ENABLE_VBAQ, !!ctx->enable_vbaq);
> +    }
> +    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_DE_BLOCKING_FILTER, !!deblocking_filter);
> +
> +    // B-Frames
> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_B_PIC_PATTERN, avctx->max_b_frames);
> +    if (avctx->max_b_frames && res == AMF_OK) {
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_B_PIC_DELTA_QP, ctx->b_frame_delta_qp);
> +        AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_B_REFERENCE_ENABLE, !!ctx->b_frame_ref);
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_REF_B_PIC_DELTA_QP, ctx->ref_b_frame_delta_qp);
> +    }
> +
> +    // Keyframe Interval
> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_IDR_PERIOD, avctx->gop_size);
> +
> +    // Header Insertion Spacing
> +    if (ctx->header_spacing >= 0)
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEADER_INSERTION_SPACING, ctx->header_spacing);
> +
> +    // Intra-Refresh, Slicing
> +    if (ctx->intra_refresh_mb > 0)
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_INTRA_REFRESH_NUM_MBS_PER_SLOT, ctx->intra_refresh_mb);
> +    if (ctx->slices > 1)
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_SLICES_PER_FRAME, ctx->slices);> +    
> +    // Coding
> +    if (ctx->coding_mode != 0)
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_CABAC_ENABLE, ctx->coding_mode);
> +
> +    // Motion Estimation
> +    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_MOTION_HALF_PIXEL, !!ctx->me_half_pel);
> +    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_MOTION_QUARTERPIXEL, !!ctx->me_quater_pel);
> +
> +    // fill extradata
> +    AMFVariantInit(&var);

Can this fail?

> +    res = ctx->encoder->pVtbl->GetProperty(ctx->encoder, AMF_VIDEO_ENCODER_EXTRADATA, &var);
> +    if (res == AMF_OK && var.pInterface) {
> +        AMFBuffer* buffer;
> +        AMFGuid guid = IID_AMFBuffer();
> +
> +        var.pInterface->pVtbl->QueryInterface(var.pInterface, &guid, (void**)&buffer); // query for buffer interface
> +
> +        avctx->extradata_size = (int)buffer->pVtbl->GetSize(buffer);
> +        avctx->extradata = av_mallocz(avctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
> +        if (!avctx->extradata) {
> +            buffer->pVtbl->Release(buffer);
> +            var.pInterface->pVtbl->Release(var.pInterface);
> +            return AVERROR(ENOMEM);
> +        }
> +        memcpy(avctx->extradata, buffer->pVtbl->GetNative(buffer), avctx->extradata_size);
> +
> +        buffer->pVtbl->Release(buffer);
> +        var.pInterface->pVtbl->Release(var.pInterface);
> +    }
> +    return 0;
> +}
> +
> +
> +
> +static const AVCodecDefault defaults[] = {
> +    { "refs",       "-1" },
> +    { "aspect",     "0" },
> +    { "sar",        "0" },
> +    { "qmin",       "-1" },
> +    { "qmax",       "-1" },
> +    { "b",          "2M" },
> +    { "maxrate",    "3M" },
> +    { "g",          "250" },
> +    { "keyint_min", "0" },
> +    { "bf",         "0" },
> +    { "slices",     "1" },
> +    { NULL },
> +};
> +
> +static const AVClass h264_amf_class = {
> +    .class_name = "h264_amf",
> +    .item_name = av_default_item_name,
> +    .option = options,
> +    .version = LIBAVUTIL_VERSION_INT,
> +};
> +static const AVClass h264_amf_d3d11va_class = {
> +    .class_name = "h264_amf_d3d11va",
> +    .item_name = av_default_item_name,
> +    .option = options,
> +    .version = LIBAVUTIL_VERSION_INT,
> +};
> +// regular encoder
> +AVCodec ff_h264_amf_encoder = {
> +    .name = "h264_amf",
> +    .long_name = NULL_IF_CONFIG_SMALL("AMD AMF H.264 Encoder"),
> +    .type = AVMEDIA_TYPE_VIDEO,
> +    .id = AV_CODEC_ID_H264,
> +    .init = amf_encode_init_h264,
> +    .encode2 = ff_amf_encode_frame,
> +    .close = ff_amf_encode_close,
> +    .priv_data_size = sizeof(AmfContext),
> +    .priv_class = &h264_amf_class,
> +    .defaults = defaults,
> +    .capabilities = AV_CODEC_CAP_DELAY,
> +    .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
> +    .pix_fmts = ff_amf_pix_fmts,
> +};
> +// encoder connected with D3D11 HW accelerator
> +AVCodec ff_h264_amf_d3d11va_encoder = {
> +    .name = "h264_amf_d3d11va",
> +    .long_name = NULL_IF_CONFIG_SMALL("AMD AMF H.264 Encoder with d3d11va"),
> +    .type = AVMEDIA_TYPE_VIDEO,
> +    .id = AV_CODEC_ID_H264,
> +    .init = amf_encode_init_h264,
> +    .encode2 = ff_amf_encode_frame,
> +    .close = ff_amf_encode_close,
> +    .priv_data_size = sizeof(AmfContext),
> +    .priv_class = &h264_amf_d3d11va_class,
> +    .defaults = defaults,
> +    .capabilities = AV_CODEC_CAP_DELAY,
> +    .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
> +    .pix_fmts = ff_amf_pix_fmts,
> +};

As above, why does this separate (identical) instance exist?

> +
> +/**
> +* Basic test BAT file:
> +echo off
> +if "%~1"=="" (
> +echo input file name is empty. Use basic_transcode_amf_h264.bat video.mp4
> +goto error
> +)
> +
> +SET "CWD=%~dp0"
> +SET bitrate=5M
> +SET maxbitrate=6M
> +SET bufsize=2M
> +SET x264_preset=veryfast
> +SET amf_quality=speed
> +
> +
> +rem veryfast and zerolatency options make x264 comparable with VCE
> +
> +rem change path to ffmpeg.exe if needed
> +
> +"%CWD%\..\bin\ffmpeg.exe" -y -t 100 -threads 0 -i "%~1" -c:v h264_amf  -b:v %bitrate% -maxrate %maxbitrate% -bufsize %bufsize% -rc vbr_peak -quality %amf_quality% out_amf_h264.mp4
> +"%CWD%\..\bin\ffmpeg.exe" -y -t 100 -threads 0 -i "%~1" -c:v libx264   -b:v %bitrate% -maxrate %maxbitrate% -bufsize %bufsize% -preset %x264_preset% -nal-hrd vbr -tune zerolatency out_x264_h264.mp4
> +
> +echo PSNR > result.txt
> +
> +"%CWD%\..\bin\ffmpeg.exe" -y -hide_banner -loglevel info -t 100 -threads 0 -i "%~1" -i out_amf_h264.mp4  -lavfi psnr="stats_file='amf_h264.psnr.log'"  -f null - > "trace.txt" 2>&1
> +for /f "tokens=*" %%A in ('type "trace.txt"') do @echo amf_h264 : %%A >end.txt
> +type end.txt >> result.txt
> +
> +
> +"%CWD%\..\bin\ffmpeg.exe" -y -hide_banner -loglevel info -t 100 -threads 0 -i "%~1" -i out_x264_h264.mp4 -lavfi psnr="stats_file='x264_h264.psnr.log'" -f null - > "trace.txt" 2>&1
> +for /f "tokens=*" %%A in ('type "trace.txt"') do @echo x264_h264: %%A >end.txt
> +type end.txt >> result.txt
> +
> +
> +echo SSIM >> result.txt
> +
> +"%CWD%\..\bin\ffmpeg.exe" -y -hide_banner -loglevel info -t 100 -threads 0 -i "%~1" -i out_amf_h264.mp4  -lavfi ssim="stats_file='amf_h264_ssim.log'" -f null - > "trace.txt"  2>&1
> +for /f "tokens=*" %%A in ('type "trace.txt"') do @echo amf_h264 : %%A >end.txt
> +type end.txt >> result.txt
> +
> +"%CWD%\..\bin\ffmpeg.exe" -y -hide_banner -loglevel info -t 100 -threads 0 -i "%~1" -i out_x264_h264.mp4  -lavfi ssim="stats_file=x264_h264_ssim.log'" -f null - > "trace.txt"  2>&1
> +for /f "tokens=*" %%A in ('type "trace.txt"') do @echo x264_h264: %%A >end.txt
> +type end.txt >> result.txt
> +
> +del trace.txt
> +del end.txt
> +
> +echo
> +type result.txt
> +
> +
> +:error
> +*/
> +
> +
> +/**
> +* d3d11va integration test bat file
> +rem echo off
> +if "%~1"=="" (
> +echo input file name is empty. Use dx11_transcode_amf_h264.bat video.mp4
> +goto error
> +)
> +
> +SET "CWD=%~dp0"
> +SET bitrate=5M
> +SET maxbitrate=6M
> +SET bufsize=2M
> +
> +rem change path to ffmpeg.exe if needed
> +
> +"%CWD%\..\bin\ffmpeg.exe" -y -t 100 -hwaccel d3d11va -hwaccel_output_format d3d11 -threads 1 -i "%~1" -c:v h264_amf          -b:v %bitrate% -maxrate %maxbitrate% -bufsize %bufsize% -rc vbr_peak shared_dx11_amf_h264.mp4
> +"%CWD%\..\bin\ffmpeg.exe" -y -t 100 -hwaccel d3d11va                              -threads 1 -i "%~1" -c:v h264_amf_d3d11va  -b:v %bitrate% -maxrate %maxbitrate% -bufsize %bufsize% -rc vbr_peak custom_dx11_amf_h264.mp4
> +
> +*/

Don't include any of this stuff.

> +
> diff --git a/libavcodec/amfenc_hevc.c b/libavcodec/amfenc_hevc.c
> new file mode 100644
> index 0000000..4d3c7d4
> --- /dev/null
> +++ b/libavcodec/amfenc_hevc.c
> @@ -0,0 +1,354 @@
> +/*
> + * This file is part of FFmpeg.
> + *
> + * FFmpeg is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU Lesser General Public
> + * License as published by the Free Software Foundation; either
> + * version 2.1 of the License, or (at your option) any later version.
> + *
> + * FFmpeg is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> + * Lesser General Public License for more details.
> + *
> + * You should have received a copy of the GNU Lesser General Public
> + * License along with FFmpeg; if not, write to the Free Software
> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
> + */
> +
> +#include "amfenc.h"
> +//#include "compat/amd/amf/public/include/components/VideoEncoderHEVC.h"
> +#include "libavutil/opt.h"
> +#include "libavutil/internal.h"
> +#include "internal.h"
> +
> +#define OFFSET(x) offsetof(AmfContext, x)
> +#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
> +static const AVOption options[] = {
> +    { "usage",          "Set the encoding usage",             OFFSET(usage),          AV_OPT_TYPE_INT,   { .i64 = AMF_VIDEO_ENCODER_HEVC_USAGE_TRANSCONDING }, AMF_VIDEO_ENCODER_HEVC_USAGE_TRANSCONDING, AMF_VIDEO_ENCODER_HEVC_USAGE_WEBCAM, VE, "usage" },
> +    { "transcoding",    "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_USAGE_TRANSCONDING },         0, 0, VE, "usage" },
> +    { "ultralowlatency","", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_USAGE_ULTRA_LOW_LATENCY },    0, 0, VE, "usage" },
> +    { "lowlatency",     "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_USAGE_LOW_LATENCY },          0, 0, VE, "usage" },
> +    { "webcam",         "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_USAGE_WEBCAM },               0, 0, VE, "usage" },

Could some of this be in common with the H.264 encoder?  (Maybe in the header?)

> +
> +    { "profile",        "Set the profile (default main)",           OFFSET(profile),   AV_OPT_TYPE_INT,{ .i64 = AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN }, AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN, AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN, VE, "profile" },
> +    { "main",           "", 0,                      AV_OPT_TYPE_CONST,{ .i64 = AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN }, 0, 0, VE, "profile" },
> +
> +    { "profile_tier",   "Set the profile tier (default main)",      OFFSET(tier), AV_OPT_TYPE_INT,{ .i64 = AMF_VIDEO_ENCODER_HEVC_TIER_MAIN }, AMF_VIDEO_ENCODER_HEVC_TIER_MAIN, AMF_VIDEO_ENCODER_HEVC_TIER_HIGH, VE, "tier" },
> +    { "main",           "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_TIER_MAIN }, 0, 0, VE, "tier" },
> +    { "high",           "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_TIER_HIGH }, 0, 0, VE, "tier" },
> +
> +    { "level",          "Set the encoding level (default auto)",    OFFSET(level), AV_OPT_TYPE_INT,{ .i64 = 0 }, 0, AMF_LEVEL_6_2, VE, "level" },
> +    { "auto",           "", 0, AV_OPT_TYPE_CONST, { .i64 = 0             }, 0, 0, VE, "level" },
> +    { "1.0",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_1   }, 0, 0, VE, "level" },
> +    { "2.0",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_2   }, 0, 0, VE, "level" },
> +    { "2.1",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_2_1 }, 0, 0, VE, "level" },
> +    { "3.0",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_3   }, 0, 0, VE, "level" },
> +    { "3.1",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_3_1 }, 0, 0, VE, "level" },
> +    { "4.0",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_4   }, 0, 0, VE, "level" },
> +    { "4.1",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_4_1 }, 0, 0, VE, "level" },
> +    { "5.0",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_5   }, 0, 0, VE, "level" },
> +    { "5.1",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_5_1 }, 0, 0, VE, "level" },
> +    { "5.2",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_5_2 }, 0, 0, VE, "level" },
> +    { "6.0",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_6   }, 0, 0, VE, "level" },
> +    { "6.1",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_6_1 }, 0, 0, VE, "level" },
> +    { "6.2",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_6_2 }, 0, 0, VE, "level" },
> +    
> +    { "quality",        "Set the encoding quality",                 OFFSET(quality),      AV_OPT_TYPE_INT,   { .i64 = AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET_SPEED }, AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET_QUALITY, AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET_SPEED, VE, "quality" },
> +    { "balanced",       "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET_BALANCED }, 0, 0, VE, "quality" },
> +    { "speed",          "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET_SPEED    }, 0, 0, VE, "quality" },
> +    { "quality",        "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET_QUALITY  }, 0, 0, VE, "quality" },
> +
> +    { "rc",             "Set the rate control mode",                OFFSET(rate_control_mode),   AV_OPT_TYPE_INT,   { .i64 = AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_PEAK_CONSTRAINED_VBR }, AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_CONSTANT_QP, AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_CBR, VE, "rc" },
> +    { "cqp",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_CONSTANT_QP             }, 0, 0, VE, "rc" },
> +    { "cbr",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_CBR                     }, 0, 0, VE, "rc" },
> +    { "vbr_peak",       "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_PEAK_CONSTRAINED_VBR    }, 0, 0, VE, "rc" },
> +    { "vbr_latency",    "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_LATENCY_CONSTRAINED_VBR }, 0, 0, VE, "rc" },
> +    

Trailing spaces.

> +
> +    { "header_insertion_mode",        "Set header insertion mode",  OFFSET(header_insertion_mode),      AV_OPT_TYPE_INT,{ .i64 = AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE_NONE }, AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE_NONE, AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE_IDR_ALIGNED, VE, "hdrmode" },
> +    { "balanced",       "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE_NONE        }, 0, 0, VE, "hdrmode" },
> +    { "speed",          "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE_GOP_ALIGNED }, 0, 0, VE, "hdrmode" },
> +    { "quality",        "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE_IDR_ALIGNED }, 0, 0, VE, "hdrmode" },

Names look suspicious...

> +
> +    { "gops_per_idr",    "GOPs per IDR 0-no IDR will be inserted",  OFFSET(gops_per_idr),  AV_OPT_TYPE_INT,{ .i64 = 60 }, 0, INT_MAX, VE },
> +    { "preanalysis",    "Enable preanalysis",                       OFFSET(preanalysis),   AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },
> +    { "vbaq",           "Enable VBAQ",                              OFFSET(enable_vbaq),   AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },
> +    { "enforce_hrd",    "Enforce HRD",                              OFFSET(enforce_hrd),   AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },
> +    { "filler_data",    "Filler Data Enable",                       OFFSET(filler_data),   AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },
> +    { "max_au_size",    "Max AU Size in bits",                      OFFSET(max_au_size),   AV_OPT_TYPE_INT,{ .i64 = 0 }, 0, INT_MAX, VE, NULL },
> +    { "min_qp_i",       "min quantization parameter for I-frame",   OFFSET(min_qp_i),      AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },
> +    { "max_qp_i",       "max quantization parameter for I-frame",   OFFSET(max_qp_i),      AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },
> +    { "min_qp_p",       "min quantization parameter for P-frame",   OFFSET(min_qp_p),      AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },
> +    { "max_qp_p",       "max quantization parameter for P-frame",   OFFSET(max_qp_p),      AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },
> +    { "qp_p",           "quantization parameter for P-frame",       OFFSET(qp_p),          AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },
> +    { "qp_i",           "quantization parameter for I-frame",       OFFSET(qp_i),          AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },
> +    { "skip_frame",     "Rate Control Based Frame Skip",            OFFSET(skip_frame),    AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },
> +    { "me_half_pel",    "Enable ME Half Pixel",                     OFFSET(me_half_pel),   AV_OPT_TYPE_BOOL,{ .i64 = 1 }, 0, 1, VE, NULL },
> +    { "me_quater_pel",  "Enable ME Quarter Pixel ",                 OFFSET(me_quater_pel), AV_OPT_TYPE_BOOL,{ .i64 = 1 }, 0, 1, VE, NULL },
> +
> +    { NULL }
> +};
> +
> +static av_cold int amf_encode_init_hevc(AVCodecContext *avctx)
> +{
> +    int                 ret = 0;
> +    AMF_RESULT          res = AMF_OK;
> +    AmfContext         *ctx = avctx->priv_data;
> +    AMFVariantStruct    var = {0};
> +
> +    AMFSize             framesize = AMFConstructSize(avctx->width, avctx->height);
> +    AMFRate             framerate = AMFConstructRate(avctx->time_base.den, avctx->time_base.num * avctx->ticks_per_frame);
> +
> +    int                 deblocking_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
> +
> +    if ((ret = ff_amf_encode_init(avctx)) < 0)
> +        return ret;
> +    

Trailing spaces.

> +    // init static parameters
> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_USAGE, ctx->usage);
> +
> +    AMF_ASSIGN_PROPERTY_SIZE(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_FRAMESIZE, framesize);
> +
> +    AMF_ASSIGN_PROPERTY_RATE(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_FRAMERATE, framerate);
> +
> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_PROFILE, ctx->profile);
> +
> +    switch (ctx->profile) {
> +    case AMF_VIDEO_ENCODER_HEVC_TIER_HIGH:
> +        avctx->profile = FF_PROFILE_HEVC_REXT;
> +        break;
> +    case AMF_VIDEO_ENCODER_HEVC_TIER_MAIN:
> +        avctx->profile = FF_PROFILE_HEVC_MAIN;
> +        break;

It's set by the user - you should be using it, not writing it.

> +    default:
> +        break;
> +    }
> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_TIER, ctx->tier);
> +
> +    if (ctx->level != 0) {
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_PROFILE_LEVEL, ctx->level);
> +        avctx->level = ctx->level;

As profile, you should be using it, not writing it.

> +    }
> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET, ctx->quality);
> +    // Maximum Reference Frames
> +    if (avctx->refs != 0) {
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_MAX_NUM_REFRAMES, avctx->refs);
> +    }
> +    // Aspect Ratio
> +    if (avctx->sample_aspect_ratio.den && avctx->sample_aspect_ratio.num) {
> +        AMFRatio ratio = AMFConstructRatio(avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
> +        AMF_ASSIGN_PROPERTY_RATIO(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_ASPECT_RATIO, ratio);
> +    }
> +
> +    // Picture control properties
> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_NUM_GOPS_PER_IDR, ctx->gops_per_idr);
> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_GOP_SIZE, avctx->gop_size);
> +    if (avctx->slices > 1) {
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_SLICES_PER_FRAME, avctx->slices);
> +    }
> +    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_DE_BLOCKING_FILTER_DISABLE, deblocking_filter);
> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE, ctx->header_insertion_mode);
> +
> +    // Rate control
> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD, ctx->rate_control_mode);
> +    if (avctx->rc_buffer_size)
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_VBV_BUFFER_SIZE, avctx->rc_buffer_size);
> +
> +    if (avctx->rc_initial_buffer_occupancy != 0) {
> +        int percent = avctx->rc_buffer_size * 64 / avctx->rc_initial_buffer_occupancy;
> +        if (percent > 64)
> +            percent = 64;

As H.264; what is this trying to do?

> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_INITIAL_VBV_BUFFER_FULLNESS, percent);
> +    }
> +    // Pre-Pass, Pre-Analysis, Two-Pass
> +    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_PREANALYSIS_ENABLE, ctx->preanalysis);
> +
> +    if (ctx->rate_control_mode == AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_CONSTANT_QP) {
> +        AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_ENABLE_VBAQ, false);
> +        if (ctx->enable_vbaq)
> +            av_log(ctx, AV_LOG_WARNING, "VBAQ is not supported by cqp Rate Control Method, automatically disabled.");
> +    } else {
> +        AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_ENABLE_VBAQ, !!ctx->enable_vbaq);
> +    }
> +    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_MOTION_HALF_PIXEL, ctx->me_half_pel);
> +    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_MOTION_QUARTERPIXEL, ctx->me_quater_pel);
> +
> +    // init encoder
> +    res = ctx->encoder->pVtbl->Init(ctx->encoder, ctx->format, avctx->width, avctx->height);
> +    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_BUG, "encoder->Init() failed with error %d", res);
> +
> +    // init dynamic rate control params
> +    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_ENFORCE_HRD, ctx->enforce_hrd);
> +    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_FILLER_DATA_ENABLE, ctx->filler_data);
> +
> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_TARGET_BITRATE, avctx->bit_rate);
> +    if (ctx->rate_control_mode == AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_CBR) {
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_PEAK_BITRATE, avctx->bit_rate);
> +    } else {
> +        int rc_max_rate = avctx->rc_max_rate >= avctx->bit_rate ? avctx->rc_max_rate : avctx->bit_rate * 13 / 10;

Why 13/10?

> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_PEAK_BITRATE, rc_max_rate);
> +    }
> +
> +    // init dynamic picture control params
> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_MAX_AU_SIZE, ctx->max_au_size);
> +
> +
> +    if (ctx->min_qp_i != -1) {
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_MIN_QP_I, ctx->min_qp_i);
> +    }
> +    if (ctx->max_qp_i != -1) {
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_MAX_QP_I, ctx->max_qp_i);
> +    }
> +    if (ctx->min_qp_p != -1) {
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_MIN_QP_P, ctx->min_qp_p);
> +    }
> +    if (ctx->max_qp_p != -1) {
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_MAX_QP_P, ctx->max_qp_p);
> +    }
> +
> +    if (ctx->qp_p != -1) {
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_QP_I, ctx->qp_p);
> +    }
> +    if (ctx->qp_i != -1) {
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_QP_P, ctx->qp_i);
> +    }
> +    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_SKIP_FRAME_ENABLE, ctx->skip_frame);
> +
> +
> +    // fill extradata
> +    AMFVariantInit(&var);

Check return value?

> +    res = ctx->encoder->pVtbl->GetProperty(ctx->encoder, AMF_VIDEO_ENCODER_HEVC_EXTRADATA, &var);
> +    if(res == AMF_OK && var.pInterface){ 

Formatting.

> +        AMFBuffer* buffer;
> +        AMFGuid guid = IID_AMFBuffer();
> +
> +        var.pInterface->pVtbl->QueryInterface(var.pInterface, &guid, (void**)&buffer); // query for buffer interface
> +
> +        avctx->extradata_size = (int)buffer->pVtbl->GetSize(buffer);
> +        avctx->extradata = av_mallocz(avctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
> +        if (!avctx->extradata) {
> +            buffer->pVtbl->Release(buffer);
> +            var.pInterface->pVtbl->Release(var.pInterface);
> +            return AVERROR(ENOMEM);
> +        }
> +        memcpy(avctx->extradata, buffer->pVtbl->GetNative(buffer), avctx->extradata_size);
> +
> +        buffer->pVtbl->Release(buffer);
> +        var.pInterface->pVtbl->Release(var.pInterface);
> +    }

Should that fail if res is not OK?

> +    return 0;
> +}
> +static const AVCodecDefault defaults[] = {
> +    { "b",       "2M" },
> +    { "maxrate", "3M" },
> +    { "qmin",   "-1" },
> +    { "qmax",   "-1" },
> +    { "qdiff",  "-1" },
> +    { "qblur",  "-1" },
> +    { "qcomp",  "-1" },

Why set these?  You don't appear to be using them.

> +    { "g",      "250" },
> +    { "bf",     "0" },
> +    { NULL },
> +};
> +static const AVClass hevc_amf_class = {
> +    .class_name = "hevc_amf",
> +    .item_name = av_default_item_name,
> +    .option = options,
> +    .version = LIBAVUTIL_VERSION_INT,
> +};
> +static const AVClass hevc_amf_amf_d3d11va_class = {
> +    .class_name = "hevc_amf_amf_d3d11va",
> +    .item_name = av_default_item_name,
> +    .option = options,
> +    .version = LIBAVUTIL_VERSION_INT,
> +};
> +// regular encoder
> +AVCodec ff_hevc_amf_encoder = {
> +    .name           = "hevc_amf",
> +    .long_name      = NULL_IF_CONFIG_SMALL("AMD AMF HEVC encoder"),
> +    .type           = AVMEDIA_TYPE_VIDEO,
> +    .id             = AV_CODEC_ID_HEVC,
> +    .init           = amf_encode_init_hevc,
> +    .encode2        = ff_amf_encode_frame,
> +    .close          = ff_amf_encode_close,
> +    .priv_data_size = sizeof(AmfContext),
> +    .priv_class     = &hevc_amf_class,
> +    .defaults       = defaults,
> +    .capabilities   = AV_CODEC_CAP_DELAY,
> +    .caps_internal  = FF_CODEC_CAP_INIT_CLEANUP,
> +    .pix_fmts       = ff_amf_pix_fmts,
> +};
> +// encoder connected with D3D11 HW accelerator
> +AVCodec ff_hevc_amf_d3d11va_encoder = {
> +.name = "hevc_amf_d3d11va",
> +.long_name = NULL_IF_CONFIG_SMALL("AMD AMF HEVC encoder with d3d11va"),
> +.type = AVMEDIA_TYPE_VIDEO,
> +.id = AV_CODEC_ID_HEVC,
> +.init = amf_encode_init_hevc,
> +.encode2 = ff_amf_encode_frame,
> +.close = ff_amf_encode_close,
> +.priv_data_size = sizeof(AmfContext),
> +.priv_class = &hevc_amf_amf_d3d11va_class,
> +.defaults = defaults,
> +.capabilities = AV_CODEC_CAP_DELAY,
> +.caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
> +.pix_fmts = ff_amf_pix_fmts,
> +};
> +
> +/**
> +* Basic test BAT file:
> +echo off
> +if "%~1"=="" (
> +echo input file name is empty. Use basic_transcode_amf_hevc.bat video.mp4
> +goto error
> +)
> +
> +SET "CWD=%~dp0"
> +SET bitrate=5M
> +SET maxbitrate=6M
> +SET bufsize=2M
> +SET x265_preset=veryfast
> +SET amf_quality=speed
> +
> +
> +rem veryfast and zerolatency options make x265 comparable with VCE
> +
> +rem change path to ffmpeg.exe if needed
> +
> +"%CWD%\..\bin\ffmpeg.exe" -y -t 100 -threads 0 -i "%~1" -c:v hevc_amf  -b:v %bitrate% -maxrate %maxbitrate% -bufsize %bufsize% -rc vbr_peak -quality %amf_quality% out_amf_hevc.mp4
> +"%CWD%\..\bin\ffmpeg.exe" -y -t 100 -threads 0 -i "%~1" -c:v libx265   -b:v %bitrate% -maxrate %maxbitrate% -bufsize %bufsize% -preset %x265_preset% -x265-params vbv-maxrate=6000:vbv-bufsize=2000 -tune zerolatency out_x265_hevc.mp4
> +
> +echo PSNR > result.txt
> +
> +"%CWD%\..\bin\ffmpeg.exe" -y -hide_banner -loglevel info -t 100 -threads 0 -i "%~1" -i out_amf_hevc.mp4  -lavfi psnr="stats_file='amf_hevc.psnr.log'"  -f null - > "trace.txt" 2>&1
> +for /f "tokens=*" %%A in ('type "trace.txt"') do @echo amf_hevc : %%A >end.txt
> +type end.txt >> result.txt
> +
> +
> +"%CWD%\..\bin\ffmpeg.exe" -y -hide_banner -loglevel info -t 100 -threads 0 -i "%~1" -i out_x265_hevc.mp4 -lavfi psnr="stats_file='x265_hevc.psnr.log'" -f null - > "trace.txt" 2>&1
> +for /f "tokens=*" %%A in ('type "trace.txt"') do @echo x265_hevc: %%A >end.txt
> +type end.txt >> result.txt
> +
> +
> +echo SSIM >> result.txt
> +
> +"%CWD%\..\bin\ffmpeg.exe" -y -hide_banner -loglevel info -t 100 -threads 0 -i "%~1" -i out_amf_hevc.mp4  -lavfi ssim="stats_file='amf_hevc_ssim.log'" -f null - > "trace.txt"  2>&1
> +for /f "tokens=*" %%A in ('type "trace.txt"') do @echo amf_hevc : %%A >end.txt
> +type end.txt >> result.txt
> +
> +"%CWD%\..\bin\ffmpeg.exe" -y -hide_banner -loglevel info -t 100 -threads 0 -i "%~1" -i out_x265_hevc.mp4  -lavfi ssim="stats_file=x265_hevc_ssim.log'" -f null - > "trace.txt"  2>&1
> +for /f "tokens=*" %%A in ('type "trace.txt"') do @echo x265_hevc: %%A >end.txt
> +type end.txt >> result.txt
> +
> +del trace.txt
> +del end.txt
> +
> +echo
> +type result.txt
> +
> +
> +:error
> +
> +
> +*/

Don't include this stuff.

> \ No newline at end of file

git has another a review comment for you.

> diff --git a/libavcodec/version.h b/libavcodec/version.h
> index 226da19..6c0d7a8 100644
> --- a/libavcodec/version.h
> +++ b/libavcodec/version.h
> @@ -28,8 +28,8 @@
>  #include "libavutil/version.h"
>  
>  #define LIBAVCODEC_VERSION_MAJOR  58
> -#define LIBAVCODEC_VERSION_MINOR   0
> -#define LIBAVCODEC_VERSION_MICRO 101
> +#define LIBAVCODEC_VERSION_MINOR   1
> +#define LIBAVCODEC_VERSION_MICRO 100
>  
>  #define LIBAVCODEC_VERSION_INT  AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
>                                                 LIBAVCODEC_VERSION_MINOR, \
> 

Thanks,

- Mark
mmironov Oct. 29, 2017, 2:54 p.m. UTC | #3
> -----Original Message-----

> From: ffmpeg-devel [mailto:ffmpeg-devel-bounces@ffmpeg.org] On Behalf

> Of Carl Eugen Hoyos

> Sent: October 28, 2017 3:19 PM

> To: FFmpeg development discussions and patches <ffmpeg-

> devel@ffmpeg.org>

> Subject: Re: [FFmpeg-devel] Added - HW accelerated H.264 and HEVC

> encoding for AMD GPUs based on AMF SDK

> 

> 2017-10-27 20:09 GMT+02:00 mmironov <mikhail.mironov@amd.com>:

> > +const enum AVPixelFormat ff_amf_pix_fmts[] = {

> > +    AV_PIX_FMT_NV12,

> 

> > +    AV_PIX_FMT_BGRA,

> > +    AV_PIX_FMT_ARGB,

> > +    AV_PIX_FMT_RGBA,

> 

> This is wrong, your encoders do not convert transparency information, there

> are 32 rgb formats without alpha channel.

> 

> > +    { AV_PIX_FMT_NV12,       AMF_SURFACE_NV12 },

> > +    { AV_PIX_FMT_D3D11,      AMF_SURFACE_NV12 },

> 

> Just curious: Can you explain this in simple words?

> 

> I will try not to comment on the headers you sent, Carl Eugen

> _______________________________________________

> ffmpeg-devel mailing list

> ffmpeg-devel@ffmpeg.org

> http://ffmpeg.org/mailman/listinfo/ffmpeg-devel

Sure: the native format for the encoder is NV12. It can work via D3D11 and accept DXGI_FORMAT_NV12 texture or via D3D9  accepting (D3DFORMAT)MAKEFOURCC('N', 'V', '1', '2') surface. AMF surfaces are wrappers around these objects and converters from system memory. In addition, the encoder has built-in shader-based color converter. It can accept BGRA and RGBA formats and convert them into NV12 dropping alpha channel. This is why I added these entries. Granted, the conversion lacks parameters and to support more options AMF has a separate color space component, also shader-based. Once encoder is integrated I planned to ask FFmpeg team opinion if it is needed or not. 
I made the FFmpeg encoder accepting DX objects from other FFmpeg components if available.
Mikhail
mmironov Oct. 29, 2017, 2:57 p.m. UTC | #4
> -----Original Message-----

> From: ffmpeg-devel [mailto:ffmpeg-devel-bounces@ffmpeg.org] On Behalf

> Of Mark Thompson

> Sent: October 28, 2017 5:29 PM

> To: ffmpeg-devel@ffmpeg.org

> Subject: Re: [FFmpeg-devel] Added - HW accelerated H.264 and HEVC

> encoding for AMD GPUs based on AMF SDK

> 

> On 27/10/17 19:09, mmironov wrote:

> > From b1b697aed459947cfa04bccdca0f7cfb5c8be72c Mon Sep 17 00:00:00

> 2001

> > From: mmironov <mikhail.mironov@amd.com>

> > Date: Fri, 27 Oct 2017 13:03:15 -0400

> > Subject: [PATCH] Added: HW accelerated H.264 and HEVC encoding for

> AMD GPUs

> >  based on AMF SDK

> 

> There isn't any sense in which this is "accelerated" is there?  Just say it's a

> hardware encoder (as you already do in the changelog).


OK, will do next time. 
Mikhail

> >

> > Signed-off-by: mmironov <mikhail.mironov@amd.com>

> > ---

> >  Changelog                |    3 +-

> >  compat/amd/amfsdkenc.h   | 1750

> ++++++++++++++++++++++++++++++++++++++++++++++

> >  configure                |   28 +

> >  libavcodec/Makefile      |    4 +

> >  libavcodec/allcodecs.c   |    4 +

> >  libavcodec/amfenc.c      |  463 ++++++++++++

> >  libavcodec/amfenc.h      |  131 ++++

> >  libavcodec/amfenc_h264.c |  467 +++++++++++++

> >  libavcodec/amfenc_hevc.c |  354 ++++++++++

> >  libavcodec/version.h     |    4 +-

> >  10 files changed, 3205 insertions(+), 3 deletions(-)

> >  create mode 100644 compat/amd/amfsdkenc.h

> >  create mode 100644 libavcodec/amfenc.c

> >  create mode 100644 libavcodec/amfenc.h

> >  create mode 100644 libavcodec/amfenc_h264.c

> >  create mode 100644 libavcodec/amfenc_hevc.c

> >

> > diff --git a/Changelog b/Changelog

> > index 6592d86..f0d22fa 100644

> > --- a/Changelog

> > +++ b/Changelog

> > @@ -6,7 +6,8 @@ version <next>:

> >  - Dropped support for OpenJPEG versions 2.0 and below. Using OpenJPEG

> now

> >    requires 2.1 (or later) and pkg-config.

> >  - VDA dropped (use VideoToolbox instead)

> > -

> > +- AMF H.264 encoder

> > +- AMF HEVC encoder

> >

> >  version 3.4:

> >  - deflicker video filter

> > diff --git a/compat/amd/amfsdkenc.h b/compat/amd/amfsdkenc.h

> > new file mode 100644

> > index 0000000..a640c17

> > --- /dev/null

> > +++ b/compat/amd/amfsdkenc.h

> 

> (Ignoring the header, will consider this separately.)

> 

> > diff --git a/configure b/configure

> > index 0e1ccaa..229443f 100755

> > --- a/configure

> > +++ b/configure

> > @@ -304,6 +304,7 @@ External library support:

> >

> >    The following libraries provide various hardware acceleration features:

> >    --disable-audiotoolbox   disable Apple AudioToolbox code [autodetect]

> > +  --disable-amf            disable AMF video encoding code [autodetect]

> >    --disable-cuda           disable dynamically linked Nvidia CUDA code

> [autodetect]

> >    --enable-cuda-sdk        enable CUDA features that require the CUDA SDK

> [no]

> >    --disable-cuvid          disable Nvidia CUVID support [autodetect]

> > @@ -1643,6 +1644,7 @@ EXTERNAL_LIBRARY_LIST="

> >  HWACCEL_AUTODETECT_LIBRARY_LIST="

> >      audiotoolbox

> >      crystalhd

> > +	amf

> >      cuda

> >      cuvid

> >      d3d11va

> 

> Lists in configure should be kept in alphabetical order.

> 

> > @@ -2785,12 +2787,16 @@ scale_npp_filter_deps="cuda libnpp"

> >  scale_cuda_filter_deps="cuda_sdk"

> >  thumbnail_cuda_filter_deps="cuda_sdk"

> >

> > +amf_deps_any="dlopen LoadLibrary"

> > +amf_encoder_deps="amf"

> > +

> >  nvenc_deps="cuda"

> >  nvenc_deps_any="libdl LoadLibrary"

> >  nvenc_encoder_deps="nvenc"

> >

> >  h263_v4l2m2m_decoder_deps="v4l2_m2m h263_v4l2_m2m"

> >  h263_v4l2m2m_encoder_deps="v4l2_m2m h263_v4l2_m2m"

> > +h264_amf_encoder_deps="amf"

> >  h264_crystalhd_decoder_select="crystalhd h264_mp4toannexb_bsf

> h264_parser"

> >  h264_cuvid_decoder_deps="cuda cuvid"

> >  h264_cuvid_decoder_select="h264_mp4toannexb_bsf"

> > @@ -2809,6 +2815,7 @@

> h264_vaapi_encoder_deps="VAEncPictureParameterBufferH264"

> >  h264_vaapi_encoder_select="cbs_h264 vaapi_encode"

> >  h264_v4l2m2m_decoder_deps="v4l2_m2m h264_v4l2_m2m"

> >  h264_v4l2m2m_encoder_deps="v4l2_m2m h264_v4l2_m2m"

> > +hevc_amf_encoder_deps="amf"

> >  hevc_cuvid_decoder_deps="cuda cuvid"

> >  hevc_cuvid_decoder_select="hevc_mp4toannexb_bsf"

> >  hevc_mediacodec_decoder_deps="mediacodec"

> > @@ -2830,6 +2837,8 @@ mjpeg_vaapi_encoder_select="vaapi_encode

> jpegtables"

> >  mpeg1_cuvid_decoder_deps="cuda cuvid"

> >  mpeg1_v4l2m2m_decoder_deps="v4l2_m2m mpeg1_v4l2_m2m"

> >  mpeg2_crystalhd_decoder_select="crystalhd"

> > +amf_h264_encoder_select="h264_amf_encoder"

> > +amf_hevc_encoder_select="hevc_amf_encoder"

> 

> These names aren't mentioned anywhere else.

> 

> >  mpeg2_cuvid_decoder_deps="cuda cuvid"

> >  mpeg2_mmal_decoder_deps="mmal"

> >  mpeg2_mediacodec_decoder_deps="mediacodec"

> > @@ -6305,6 +6314,18 @@ else

> >      disable cuda cuvid nvenc

> >  fi

> >

> > +if enabled x86; then

> > +    case $target_os in

> > +        mingw32*|mingw64*|win32|win64|cygwin*)

> > +            ;;

> > +        *)

> > +            disable  amf

> > +            ;;

> > +    esac

> > +else

> > +    disable amf

> > +fi

> 

> Why this OS test?  It should just be going by whether the relevant build

> packages are present.

> 

> > +

> >  enabled nvenc &&

> >      check_cc -I$source_path <<EOF || disable nvenc

> >  #include "compat/nvenc/nvEncodeAPI.h"

> > @@ -6313,6 +6334,13 @@ void f(void) { struct { const GUID guid; } s[] = { {

> NV_ENC_PRESET_HQ_GUID } };

> >  int main(void) { return 0; }

> >  EOF

> >

> > +enabled amf &&

> > +    check_cc -I$source_path <<EOF || disable amf

> > +#include "compat/amd/amfsdkenc.h"

> > +AMFFactory *factory;

> > +int main(void) { return 0; }

> > +EOF

> > +

> >  # Funny iconv installations are not unusual, so check it after all flags have

> been set

> >  if enabled libc_iconv; then

> >      check_func_headers iconv.h iconv

> > diff --git a/libavcodec/Makefile b/libavcodec/Makefile

> > index bc4d7da..cbf45ac 100644

> > --- a/libavcodec/Makefile

> > +++ b/libavcodec/Makefile

> > @@ -50,6 +50,7 @@ OBJS = allcodecs.o                                                      \

> >  # subsystems

> >  OBJS-$(CONFIG_AANDCTTABLES)            += aandcttab.o

> >  OBJS-$(CONFIG_AC3DSP)                  += ac3dsp.o ac3.o ac3tab.o

> > +OBJS-$(CONFIG_AMF)                     += amfenc.o

> >  OBJS-$(CONFIG_AUDIO_FRAME_QUEUE)       += audio_frame_queue.o

> >  OBJS-$(CONFIG_AUDIODSP)                += audiodsp.o

> >  OBJS-$(CONFIG_BLOCKDSP)                += blockdsp.o

> > @@ -334,6 +335,7 @@ OBJS-$(CONFIG_H264_DECODER)            +=

> h264dec.o h264_cabac.o h264_cavlc.o \

> >  OBJS-$(CONFIG_H264_CUVID_DECODER)      += cuvid.o

> >  OBJS-$(CONFIG_H264_MEDIACODEC_DECODER) += mediacodecdec.o

> >  OBJS-$(CONFIG_H264_MMAL_DECODER)       += mmaldec.o

> > +OBJS-$(CONFIG_H264_AMF_ENCODER)        += amfenc_h264.o

> >  OBJS-$(CONFIG_H264_NVENC_ENCODER)      += nvenc_h264.o

> >  OBJS-$(CONFIG_NVENC_ENCODER)           += nvenc_h264.o

> >  OBJS-$(CONFIG_NVENC_H264_ENCODER)      += nvenc_h264.o

> > @@ -352,6 +354,7 @@ OBJS-$(CONFIG_HEVC_DECODER)            +=

> hevcdec.o hevc_mvs.o \

> >                                            hevcdsp.o hevc_filter.o hevc_data.o

> >  OBJS-$(CONFIG_HEVC_CUVID_DECODER)      += cuvid.o

> >  OBJS-$(CONFIG_HEVC_MEDIACODEC_DECODER) += mediacodecdec.o

> > +OBJS-$(CONFIG_HEVC_AMF_ENCODER)        += amfenc_hevc.o

> >  OBJS-$(CONFIG_HEVC_NVENC_ENCODER)      += nvenc_hevc.o

> >  OBJS-$(CONFIG_NVENC_HEVC_ENCODER)      += nvenc_hevc.o

> >  OBJS-$(CONFIG_HEVC_QSV_DECODER)        += qsvdec_h2645.o

> > @@ -1056,6 +1059,7 @@ SKIPHEADERS-$(CONFIG_JNI)              += ffjni.h

> >  SKIPHEADERS-$(CONFIG_LIBVPX)           += libvpx.h

> >  SKIPHEADERS-$(CONFIG_LIBWEBP_ENCODER)  += libwebpenc_common.h

> >  SKIPHEADERS-$(CONFIG_MEDIACODEC)       += mediacodecdec_common.h

> mediacodec_surface.h mediacodec_wrapper.h mediacodec_sw_buffer.h

> > +SKIPHEADERS-$(CONFIG_AMF)              += amfenc.h

> >  SKIPHEADERS-$(CONFIG_NVENC)            += nvenc.h

> >  SKIPHEADERS-$(CONFIG_QSV)              += qsv.h qsv_internal.h

> >  SKIPHEADERS-$(CONFIG_QSVDEC)           += qsvdec.h

> > diff --git a/libavcodec/allcodecs.c b/libavcodec/allcodecs.c

> > index 8369126..d597540 100644

> > --- a/libavcodec/allcodecs.c

> > +++ b/libavcodec/allcodecs.c

> > @@ -649,6 +649,8 @@ static void register_all(void)

> >       * above is available */

> >      REGISTER_ENCODER(H263_V4L2M2M,      h263_v4l2m2m);

> >      REGISTER_ENCDEC (LIBOPENH264,       libopenh264);

> > +    REGISTER_ENCODER(H264_AMF,          h264_amf);

> > +	REGISTER_ENCODER(H264_AMF,          h264_amf_d3d11va);

> 

> No tabs.  Why is the d3d11 version separate?  The encoder should be able to

> accept multiple pixfmts.

> 

> >      REGISTER_DECODER(H264_CUVID,        h264_cuvid);

> >      REGISTER_ENCODER(H264_NVENC,        h264_nvenc);

> >      REGISTER_ENCODER(H264_OMX,          h264_omx);

> > @@ -661,6 +663,8 @@ static void register_all(void)

> >      REGISTER_ENCODER(NVENC_H264,        nvenc_h264);

> >      REGISTER_ENCODER(NVENC_HEVC,        nvenc_hevc);

> >  #endif

> > +    REGISTER_ENCODER(HEVC_AMF,          hevc_amf);

> > +	REGISTER_ENCODER(HEVC_AMF,          hevc_amf_d3d11va);

> 

> Tab.

> 

> >      REGISTER_DECODER(HEVC_CUVID,        hevc_cuvid);

> >      REGISTER_DECODER(HEVC_MEDIACODEC,   hevc_mediacodec);

> >      REGISTER_ENCODER(HEVC_NVENC,        hevc_nvenc);

> > diff --git a/libavcodec/amfenc.c b/libavcodec/amfenc.c

> > new file mode 100644

> > index 0000000..8717928

> > --- /dev/null

> > +++ b/libavcodec/amfenc.c

> > @@ -0,0 +1,463 @@

> > +/*

> > + * This file is part of FFmpeg.

> > + *

> > + * FFmpeg is free software; you can redistribute it and/or

> > + * modify it under the terms of the GNU Lesser General Public

> > + * License as published by the Free Software Foundation; either

> > + * version 2.1 of the License, or (at your option) any later version.

> > + *

> > + * FFmpeg is distributed in the hope that it will be useful,

> > + * but WITHOUT ANY WARRANTY; without even the implied warranty of

> > + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the

> GNU

> > + * Lesser General Public License for more details.

> > + *

> > + * You should have received a copy of the GNU Lesser General Public

> > + * License along with FFmpeg; if not, write to the Free Software

> > + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301

> USA

> > + */

> > +#include "amfenc.h"

> > +

> > +//#include

> "compat/amd/amf/public/include/components/VideoEncoderVCE.h"

> > +//#include

> "compat/amd/amf/public/include/components/VideoEncoderHEVC.h"

> 

> Why are these commented out?

> 

> > +

> > +#include "libavutil/time.h"

> > +#include "libavutil/imgutils.h"

> > +#include "libavutil/hwcontext.h"

> > +#include "libavutil/hwcontext_d3d11va.h"

> > +#include "libavutil/avassert.h"

> > +#include "libavutil/mem.h"

> > +#include "libavutil/pixdesc.h"

> > +#include "libavutil/hwcontext.h"

> 

> Headers should be in alphabetical order, and not include the same thing

> multiple times.

> 

> > +#include "internal.h"

> > +

> > +#include <d3d11.h>

> > +

> > +#ifdef _WIN32

> > +#include "compat/w32dlfcn.h"

> > +#else

> > +#include <dlfcn.h>

> > +#endif

> > +

> > +#define FFMPEG_AMF_WRITER_ID L"ffmpeg_amf"

> > +#define AMF_DEBUG_TRACE 0

> > +

> > +const enum AVPixelFormat ff_amf_pix_fmts[] = {

> > +    AV_PIX_FMT_NV12,

> > +    AV_PIX_FMT_BGRA,

> > +    AV_PIX_FMT_ARGB,

> > +    AV_PIX_FMT_RGBA,

> > +    AV_PIX_FMT_YUV420P,

> > +    AV_PIX_FMT_YUYV422,

> > +    AV_PIX_FMT_D3D11,

> > +    AV_PIX_FMT_NONE

> > +};

> > +

> > +typedef struct FormatMap {

> > +    enum AVPixelFormat       av_format;

> > +    enum AMF_SURFACE_FORMAT  amf_format;

> > +} FormatMap;

> > +

> > +static const FormatMap format_map[] =

> > +{

> > +    { AV_PIX_FMT_NONE,       AMF_SURFACE_UNKNOWN },

> 

> Seems a bit pointless to include NONE in this list explicitly.

> 

> > +    { AV_PIX_FMT_NV12,       AMF_SURFACE_NV12 },

> > +    { AV_PIX_FMT_BGRA,       AMF_SURFACE_BGRA },

> > +    { AV_PIX_FMT_ARGB,       AMF_SURFACE_ARGB },

> > +    { AV_PIX_FMT_RGBA,       AMF_SURFACE_RGBA },

> > +    { AV_PIX_FMT_GRAY8,      AMF_SURFACE_GRAY8 },

> > +    { AV_PIX_FMT_YUV420P,    AMF_SURFACE_YUV420P },

> > +    { AV_PIX_FMT_BGR0,       AMF_SURFACE_BGRA },

> > +    { AV_PIX_FMT_YUV420P,    AMF_SURFACE_YV12 },

> > +    { AV_PIX_FMT_YUYV422,    AMF_SURFACE_YUY2 },

> 

> Do all of these formats actually work?

> 

> > +    { AV_PIX_FMT_D3D11,      AMF_SURFACE_NV12 },

> 

> D3D11 surfaces need not be NV12.  The actual format is in

> AVHWFramesContext.sw_format - if you only support 8-bit then something

> nasty probably happens if you give it P010 surfaces.

> 

> > +};

> > +

> > +static enum AMF_SURFACE_FORMAT amf_av_to_amf_format(enum

> AVPixelFormat fmt)

> > +{

> > +    for (int i = 0; i < amf_countof(format_map); i++) {

> > +        if (format_map[i].av_format == fmt) {

> > +            return format_map[i].amf_format;

> > +        }

> > +    }

> > +    return AMF_SURFACE_UNKNOWN;

> > +}

> > +

> > +// virtual functions decalred

> 

> What does this comment mean?

> 

> > +static void AMF_CDECL_CALL AMFTraceWriter_Write(AMFTraceWriter*

> pThis,

> > +    const wchar_t* scope, const wchar_t* message)

> 

> "type *variable" is preferred to "type* variable".

> 

> Also, trailing space.

> 

> > +{

> > +    AmfTraceWriter *tracer = (AmfTraceWriter*)pThis;

> > +#if AMF_DEBUG_TRACE

> > +    av_log(tracer->avctx, AV_LOG_INFO, "%ls: %ls", scope, message);

> > +#else

> > +    av_log(tracer->avctx, AV_LOG_TRACE, "%ls: %ls", scope, message);

> > +#endif

> 

> What sort of messages actually come out of this trace function?

> 

> If this is intended for debugging (and should never be seen by the user), just

> make them AV_LOG_DEBUG and drop the AMF_DEBUG_TRACE define.

> 

> > +}

> > +

> > +static void AMF_CDECL_CALL AMFTraceWriter_Flush(AMFTraceWriter*

> pThis)

> > +{

> > +}

> > +

> > +static AMFTraceWriterVtbl tracer_vtbl =

> > +{

> > +    .Write = AMFTraceWriter_Write,

> > +    .Flush = AMFTraceWriter_Flush,

> > +};

> > +

> > +static int amf_load_library(AVCodecContext *avctx)

> > +{

> > +    AmfContext             *ctx = avctx->priv_data;

> > +    AMFInit_Fn              init_fun = 0;

> 

> NULL

> 

> > +    AMFQueryVersion_Fn      version_fun = 0;

> 

> NULL

> 

> > +

> > +    ctx->library = dlopen(AMF_DLL_NAMEA, RTLD_NOW | RTLD_LOCAL);

> > +    AMF_RETURN_IF_FALSE(ctx, ctx->library != 0,

> 

> Just ctx->library.

> 

> Also trailing space.

> 

> > +        AVERROR_UNKNOWN, "DLL %s failed to open. \n",

> AMF_DLL_NAMEA);

> > +

> > +    init_fun = (AMFInit_Fn)dlsym(ctx->library,

> AMF_INIT_FUNCTION_NAME);

> > +    AMF_RETURN_IF_FALSE(ctx, init_fun != 0, AVERROR_UNKNOWN, "DLL

> %s failed to find function %s. \n", AMF_DLL_NAMEA,

> AMF_INIT_FUNCTION_NAME);

> > +

> > +    version_fun = (AMFQueryVersion_Fn)dlsym(ctx->library,

> AMF_QUERY_VERSION_FUNCTION_NAME);

> > +    AMF_RETURN_IF_FALSE(ctx, init_fun != 0, AVERROR_UNKNOWN, "DLL

> %s failed to find function %s. \n", AMF_DLL_NAMEA,

> AMF_QUERY_VERSION_FUNCTION_NAME);

> > +

> > +    version_fun(&ctx->version);

> > +    init_fun(AMF_FULL_VERSION, &ctx->factory);

> > +    ctx->factory->pVtbl->GetTrace(ctx->factory, &ctx->trace);

> > +    ctx->factory->pVtbl->GetDebug(ctx->factory, &ctx->debug);

> 

> Do all of these functions necessarily succeed?

> 

> > +    return 0;

> > +}

> > +

> > +

> > +static int amf_init_context(AVCodecContext *avctx)

> > +{

> > +    AmfContext         *ctx = avctx->priv_data;

> > +    AMF_RESULT          res = AMF_OK;

> > +

> > +    ctx->trace->pVtbl->EnableWriter(ctx->trace,

> AMF_TRACE_WRITER_CONSOLE, false);

> 

> Using false probably wants <stdbool.h> and checking compiler support for it?

> Since compilers without stdbool are supported, we generally use 0 and 1

> instead.

> 

> > +

> > +#if AMF_DEBUG_TRACE

> > +    ctx->trace->pVtbl->EnableWriter(ctx->trace,

> AMF_TRACE_WRITER_DEBUG_OUTPUT, true);

> > +    ctx->trace->pVtbl->SetWriterLevel(ctx->trace,

> AMF_TRACE_WRITER_DEBUG_OUTPUT, AMF_TRACE_TRACE);

> > +    ctx->trace->pVtbl->SetGlobalLevel(ctx->trace, AMF_TRACE_TRACE);

> > +#else

> > +    ctx->trace->pVtbl->EnableWriter(ctx->trace,

> AMF_TRACE_WRITER_DEBUG_OUTPUT, false);

> > +#endif

> > +    ctx->tracer.vtbl = &tracer_vtbl;

> > +    ctx->tracer.avctx = avctx;

> > +    ctx->trace->pVtbl->RegisterWriter(ctx->trace,

> FFMPEG_AMF_WRITER_ID,

> > +        (AMFTraceWriter*)&ctx->tracer, true);

> 

> Can any of these functions fail?

> 

> > +

> > +    res = ctx->factory->pVtbl->CreateContext(ctx->factory, &ctx->context);

> > +    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN,

> "CreateContext() failed with error %d", res);

> 

> Newline.

> 

> > +

> > +    // try to reuse existing DX device

> > +

> > +    if (avctx->hw_frames_ctx) {

> > +        AVHWFramesContext *device_ctx = (AVHWFramesContext*)avctx-

> >hw_frames_ctx->data;

> > +        if (device_ctx->device_ctx->type == AV_HWDEVICE_TYPE_D3D11VA){

> > +            if (device_ctx->device_ctx->hwctx) {

> > +                AVD3D11VADeviceContext *device_d3d11 =

> (AVD3D11VADeviceContext *)device_ctx->device_ctx->hwctx;

> > +                res = ctx->context->pVtbl->InitDX11(ctx->context, device_d3d11-

> >device, AMF_DX11_1);

> > +                if (res == AMF_OK) {

> > +                    ctx->hw_frames_ctx = av_buffer_ref(avctx->hw_frames_ctx);

> > +                } else {

> > +                    av_log(avctx, AV_LOG_INFO, "amf_shared: avctx-

> >hw_frames_ctx has non-AMD device, switching to default");

> 

> I'm not sure this is going to act sensibly - if the user has D3D11 frames input

> on another device, does it work?

> 

> Also newline.

> 

> > +                }

> > +

> > +            }

> > +        }

> > +    } else if (avctx->hw_device_ctx) {

> > +        AVHWDeviceContext *device_ctx = (AVHWDeviceContext*)(avctx-

> >hw_device_ctx->data);

> > +        if (device_ctx->type == AV_HWDEVICE_TYPE_D3D11VA) {

> > +            if (device_ctx->hwctx) {

> > +                AVD3D11VADeviceContext *device_d3d11 =

> (AVD3D11VADeviceContext *)device_ctx->hwctx;

> > +                res = ctx->context->pVtbl->InitDX11(ctx->context, device_d3d11-

> >device, AMF_DX11_1);

> > +                if (res == AMF_OK) {

> > +                    ctx->hw_device_ctx = av_buffer_ref(avctx->hw_device_ctx);

> > +                } else {

> > +                    av_log(avctx, AV_LOG_INFO, "amf_shared: avctx-

> >hw_device_ctx has non-AMD device, switching to default");

> 

> Newline.

> 

> > +                }

> > +            }

> > +        }

> > +    }

> > +

> > +    if (!ctx->hw_frames_ctx) {

> > +        res = ctx->context->pVtbl->InitDX11(ctx->context, 0, AMF_DX11_1);

> > +        if (res != AMF_OK) {

> > +            res = ctx->context->pVtbl->InitDX9(ctx->context, 0);

> > +            AMF_RETURN_IF_FALSE(ctx, res == AMF_OK,

> AVERROR_UNKNOWN, "InitDX9() failed with error %d", res);

> 

> Newline, and also in more messages below which I won't point out

> individually.

> 

> > +        }

> > +    }

> > +    return 0;

> > +}

> > +

> > +static int amf_init_encoder(AVCodecContext *avctx)

> > +{

> > +    AmfContext          *ctx = avctx->priv_data;

> > +    const wchar_t       *codec_id = 0;

> 

> NULL.

> 

> > +    AMF_RESULT          res = AMF_OK;

> > +

> > +    switch (avctx->codec->id) {

> > +        case AV_CODEC_ID_H264:

> > +            codec_id = AMFVideoEncoderVCE_AVC;

> > +            break;

> > +        case AV_CODEC_ID_HEVC:

> > +            codec_id = AMFVideoEncoder_HEVC;

> > +            break;

> > +        default:

> > +            break;

> > +    }

> > +    AMF_RETURN_IF_FALSE(ctx, codec_id != 0, AVERROR(EINVAL), "Codec

> %d is not supported", avctx->codec->id);

> > +

> > +    ctx->format = amf_av_to_amf_format(avctx->pix_fmt);

> > +    AMF_RETURN_IF_FALSE(ctx, ctx->format != AMF_SURFACE_UNKNOWN,

> AVERROR(EINVAL), "Format %d is not supported", avctx->pix_fmt);

> > +

> > +    res = ctx->factory->pVtbl->CreateComponent(ctx->factory, ctx->context,

> codec_id, &ctx->encoder);

> > +    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK,

> AVERROR_ENCODER_NOT_FOUND, "CreateComponent(%S) failed with error

> %d", codec_id, res);

> 

> "%S" is not standard, please use %ls.

> 

> > +

> > +    ctx->eof = false;

> > +    return 0;

> > +}

> > +

> > +static int amf_terminate(AVCodecContext *avctx)

> > +{

> > +    AmfContext*          ctx = avctx->priv_data;

> 

> * next to variable.

> 

> > +

> > +    if (ctx->encoder) {

> > +        ctx->encoder->pVtbl->Terminate(ctx->encoder);

> > +        ctx->encoder->pVtbl->Release(ctx->encoder);

> > +        ctx->encoder = 0;

> 

> NULL.

> 

> > +    }

> > +

> > +    if (ctx->context) {

> > +        ctx->context->pVtbl->Terminate(ctx->context);

> > +        ctx->context->pVtbl->Release(ctx->context);

> > +        ctx->context = 0;

> 

> NULL.

> 

> > +    }

> > +    if (ctx->hw_device_ctx){

> > +        av_buffer_unref(&ctx->hw_device_ctx);

> > +        ctx->hw_device_ctx = 0;

> > +    }

> > +    if (ctx->hw_frames_ctx) {

> > +        av_buffer_unref(&ctx->hw_frames_ctx);

> > +        ctx->hw_frames_ctx = 0;

> > +    }

> 

> Just use av_buffer_unref() without the checks or setting to null.

> 

> > +

> > +    if (ctx->trace) {

> > +        ctx->trace->pVtbl->UnregisterWriter(ctx->trace,

> FFMPEG_AMF_WRITER_ID);

> > +    }

> > +

> > +    if (ctx->library) {

> > +        dlclose(ctx->library);

> > +        ctx->library = 0;

> > +    }

> > +    ctx->trace = 0;

> > +    ctx->debug = 0;

> > +    ctx->factory = 0;

> > +    ctx->version = 0;

> > +

> > +    return 0;

> > +}

> > +

> > +static int amf_copy_surface(AVCodecContext *avctx, const AVFrame

> *frame,

> > +                            AMFSurface* surface)

> > +{

> > +    AmfContext     *ctx = avctx->priv_data;

> > +    AMFPlane       *plane = 0;

> > +    uint8_t        *dst_data[4];

> > +    int             dst_linesize[4];

> > +

> > +    int planes = (int)surface->pVtbl->GetPlanesCount(surface);

> > +    AMF_RETURN_IF_FALSE(ctx, planes <= amf_countof(dst_data),

> AVERROR(EINVAL), "Invalid number of planes %d in surface", planes);

> > +

> > +    for (int i = 0; i < planes; i++) {

> 

> Declare variables at the start of the block.

> 

> > +        plane = surface->pVtbl->GetPlaneAt(surface, i);

> > +        dst_data[i] = plane->pVtbl->GetNative(plane);

> > +        dst_linesize[i] = plane->pVtbl->GetHPitch(plane);

> > +    }

> > +    av_image_copy(dst_data, dst_linesize,

> > +        (const uint8_t**)frame->data, frame->linesize, frame->format,

> > +        avctx->width, avctx->height);

> > +

> > +    surface->pVtbl->SetPts(surface, frame->pts);

> 

> Does this accept the same range as frame->pts, including AV_NOPTS_VALUE?

> 

> > +

> > +    return 0;

> > +}

> > +

> > +static int amf_copy_buffer(AVCodecContext *avctx, AVPacket *pkt,

> AMFBuffer *buffer)

> > +{

> > +    int                 ret = 0;

> 

> Pointless initialisation?

> 

> > +    AMFVariantStruct    var;

> > +    int                 size = (int)buffer->pVtbl->GetSize(buffer);

> > +

> > +    if (ret = ff_alloc_packet2(avctx, pkt, size, 0)) {

> 

> Check for ret negative.

> 

> > +        return ret;

> > +    }

> > +    memcpy(pkt->data, buffer->pVtbl->GetNative(buffer), size);

> > +

> > +    switch (avctx->codec->id) {

> > +        case AV_CODEC_ID_H264:

> > +            buffer->pVtbl->GetProperty(buffer,

> AMF_VIDEO_ENCODER_OUTPUT_DATA_TYPE, &var);

> > +            switch (var.int64Value) {

> > +                case AMF_VIDEO_ENCODER_OUTPUT_DATA_TYPE_IDR:

> > +                    pkt->flags = AV_PICTURE_TYPE_I | AV_PKT_FLAG_KEY;

> 

> The AV_PICTURE_TYPE does not go in AVPacket.flags.

> 

> > +                    break;

> > +                case AMF_VIDEO_ENCODER_OUTPUT_DATA_TYPE_I:

> > +                    pkt->flags = AV_PICTURE_TYPE_I;

> > +                    break;

> > +                case AMF_VIDEO_ENCODER_OUTPUT_DATA_TYPE_P:

> > +                    pkt->flags = AV_PICTURE_TYPE_P;

> > +                    break;

> > +                case AMF_VIDEO_ENCODER_OUTPUT_DATA_TYPE_B:

> > +                    pkt->flags = AV_PICTURE_TYPE_B;

> > +                    break;

> > +                default:

> > +                    av_log(avctx, AV_LOG_ERROR, "Unknown picture type

> encountered, expect the output to be broken.\n");

> 

> When can this happen?

> 

> > +                    break;

> > +            }

> > +            break;

> > +        case AV_CODEC_ID_HEVC:

> > +            buffer->pVtbl->GetProperty(buffer,

> AMF_VIDEO_ENCODER_HEVC_OUTPUT_DATA_TYPE, &var);

> > +            switch (var.int64Value) {

> > +                case AMF_VIDEO_ENCODER_HEVC_OUTPUT_DATA_TYPE_IDR:

> > +                    pkt->flags = AV_PICTURE_TYPE_I | AV_PKT_FLAG_KEY;

> > +                    break;

> > +                case AMF_VIDEO_ENCODER_HEVC_OUTPUT_DATA_TYPE_I:

> > +                    pkt->flags = AV_PICTURE_TYPE_I | AV_PKT_FLAG_KEY;

> 

> All intra picture generated by this encoder are necessarily IRAP?

> 

> > +                    break;

> > +                case AMF_VIDEO_ENCODER_HEVC_OUTPUT_DATA_TYPE_P:

> > +                    pkt->flags = AV_PICTURE_TYPE_P;

> > +                    break;

> > +                default:

> > +                    av_log(avctx, AV_LOG_ERROR, "Unknown picture type

> encountered, expect the output to be broken.\n");

> > +                    break;

> > +            }

> > +            break;

> > +        default:

> > +            break;

> > +    }

> > +    pkt->pts = buffer->pVtbl->GetPts(buffer);

> > +    pkt->dts = pkt->pts;

> > +    return 0;

> > +}

> > +

> > +// amfenc API implmentation

> > +int ff_amf_encode_init(AVCodecContext *avctx)

> > +{

> > +    AmfContext     *ctx = avctx->priv_data;

> > +    int             ret = 0;

> 

> Pointless initialisation.

> 

> > +

> > +    ctx->factory = 0;

> > +    ctx->debug = 0;

> > +    ctx->trace = 0;

> > +    ctx->context = 0;

> > +    ctx->encoder = 0;

> > +    ctx->library = 0;

> > +    ctx->version = 0;

> > +    ctx->eof = 0;

> > +    ctx->format = 0;

> > +    ctx->tracer.vtbl = 0;

> > +    ctx->tracer.avctx = 0;

> 

> Some of these should probably be NULL.

> 

> > +

> > +    if ((ret = amf_load_library(avctx)) == 0) {

> > +        if ((ret = amf_init_context(avctx)) == 0) {

> > +            if ((ret = amf_init_encoder(avctx)) == 0) {

> > +                return 0;

> > +            }

> > +        }

> > +    }

> > +    amf_terminate(avctx);

> > +    return ret;

> > +}

> > +

> > +int av_cold ff_amf_encode_close(AVCodecContext *avctx)

> > +{

> > +    int ret = 0;

> 

> Pointless initialisation.

> 

> > +    ret = amf_terminate(avctx);

> > +    return ret;

> > +}

> > +

> > +static GUID  AMFTextureArrayIndexGUID =

> > +{ 0x28115527, 0xe7c3, 0x4b66, { 0x99, 0xd3, 0x4f, 0x2a, 0xe6, 0xb4, 0x7f,

> 0xaf } };

> 

> This seems like something that really shouldn't be hardcoded like this here.

> 

> > +

> > +int ff_amf_encode_frame(AVCodecContext *avctx, AVPacket *pkt,

> > +                        const AVFrame *frame, int *got_packet)

> > +{

> > +    int             ret = 0;

> > +    AMF_RESULT      res = AMF_OK;

> > +    AmfContext     *ctx = avctx->priv_data;

> > +    AMFSurface     *surface = 0;

> > +    AMFData        *data = 0;

> > +    amf_bool       submitted = false;

> > +

> > +    while (!submitted) {

> > +        if (!frame) { // submit drain

> > +            if (!ctx->eof) { // submit drain onre time only

> > +                res = ctx->encoder->pVtbl->Drain(ctx->encoder);

> > +                if (res == AMF_INPUT_FULL) {

> > +                    av_usleep(1000); // input queue is full: wait, poll and submit

> Drain again

> > +                                     // need to get some output and try again

> > +                } else if (res == AMF_OK) {

> > +                    ctx->eof = true; // drain started

> > +                    submitted = true;

> > +                }

> > +            }

> > +        } else { // submit frame

> > +            if (surface == 0) { // prepare surface from frame one time only

> > +                if (frame->hw_frames_ctx && ( // HW frame detected

> > +                                              // check if the same hw_frames_ctx as used in

> initialization

> > +                    (ctx->hw_frames_ctx && frame->hw_frames_ctx->data == ctx-

> >hw_frames_ctx->data) ||

> > +                    // check if the same hw_device_ctx as used in initialization

> > +                    (ctx->hw_device_ctx && ((AVHWFramesContext*)frame-

> >hw_frames_ctx->data)->device_ctx ==

> > +                    (AVHWDeviceContext*)ctx->hw_device_ctx->data)

> > +                )) {

> > +                    ID3D11Texture2D* texture = (ID3D11Texture2D*)frame-

> >data[0]; // actual texture

> > +                    int index = (int)(size_t)frame->data[1]; // index is a slice in

> texture array is - set to tell AMF which slice to use

> > +                    texture->lpVtbl->SetPrivateData(texture,

> &AMFTextureArrayIndexGUID, sizeof(index), &index);

> > +

> > +                    res = ctx->context->pVtbl->CreateSurfaceFromDX11Native(ctx-

> >context, texture, &surface, NULL); // wrap to AMF surface

> > +                    surface->pVtbl->SetCrop(surface, 0, 0, frame->width, frame-

> >height); // decode surfaces are vertically aligned by 16 tell AMF real size

> > +                    surface->pVtbl->SetPts(surface, frame->pts);

> > +                } else {

> > +                    res = ctx->context->pVtbl->AllocSurface(ctx->context,

> AMF_MEMORY_HOST, ctx->format, avctx->width, avctx->height, &surface);

> > +                    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_BUG,

> "AllocSurface() failed  with error %d", res);

> > +                    amf_copy_surface(avctx, frame, surface);

> > +                }

> > +            }

> > +            // encode

> > +            res = ctx->encoder->pVtbl->SubmitInput(ctx->encoder,

> (AMFData*)surface);

> > +            if (res == AMF_INPUT_FULL) { // handle full queue

> > +                av_usleep(1000); // input queue is full: wait, poll and submit

> surface again

> 

> Is there really no way in the API to wait for this properly?

> 

> > +            } else {

> > +                surface->pVtbl->Release(surface);

> > +                surface = NULL;

> > +                AMF_RETURN_IF_FALSE(ctx, res == AMF_OK,

> AVERROR_UNKNOWN, "SubmitInput() failed with error %d", res);

> > +                submitted = 1;

> > +            }

> > +        }

> > +        // poll results

> > +        if (!data) {

> > +            res = ctx->encoder->pVtbl->QueryOutput(ctx->encoder, &data);

> > +            if (data) {

> > +                AMFBuffer* buffer;

> > +                AMFGuid guid = IID_AMFBuffer();

> > +                data->pVtbl->QueryInterface(data, &guid, (void**)&buffer); //

> query for buffer interface

> > +                ret = amf_copy_buffer(avctx, pkt, buffer);

> > +                if (!ret)

> > +                    *got_packet = 1;

> > +                buffer->pVtbl->Release(buffer);

> > +                data->pVtbl->Release(data);

> > +                if (ctx->eof) {

> > +                    submitted = true; // we are in the drain state - no submissions

> > +                }

> > +            } else if (res == AMF_EOF) {

> > +                submitted = true; // drain complete

> > +            } else {

> > +                if (!submitted) {

> > +                    av_usleep(1000); // wait and poll again

> > +                }

> > +            }

> > +        }

> > +    }

> 

> I suspect this setup is not actually going to follow the constraints of the

> deprecated encode2().  Given the API here, I think you would be much better

> off writing with send_frame()/receive_packet().

> 

> > +    return ret;

> > +}

> > diff --git a/libavcodec/amfenc.h b/libavcodec/amfenc.h

> > new file mode 100644

> > index 0000000..6b0135a

> > --- /dev/null

> > +++ b/libavcodec/amfenc.h

> > @@ -0,0 +1,131 @@

> > +/*

> > +* This file is part of FFmpeg.

> > +*

> > +* FFmpeg is free software; you can redistribute it and/or

> > +* modify it under the terms of the GNU Lesser General Public

> > +* License as published by the Free Software Foundation; either

> > +* version 2.1 of the License, or (at your option) any later version.

> > +*

> > +* FFmpeg is distributed in the hope that it will be useful,

> > +* but WITHOUT ANY WARRANTY; without even the implied warranty of

> > +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the

> GNU

> > +* Lesser General Public License for more details.

> > +*

> > +* You should have received a copy of the GNU Lesser General Public

> > +* License along with FFmpeg; if not, write to the Free Software

> > +* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301

> USA

> > +*/

> > +

> > +#ifndef AVCODEC_AMFENC_H

> > +#define AVCODEC_AMFENC_H

> > +

> > +#include "config.h"

> > +#include "avcodec.h"

> > +//#include "compat/amd/amf/public/include/core/Factory.h"

> 

> Commented out?

> 

> > +#include "compat/amd/amfsdkenc.h"

> > +

> > +

> > +/**

> > +* AMF trace writer callback class

> > +* Used to capture all AMF logging

> > +*/

> > +

> > +typedef struct AmfTraceWriter {

> > +	AMFTraceWriterVtbl* vtbl;

> > +	AVCodecContext      *avctx;

> > +} AmfTraceWriter;

> > +

> > +/**

> > +* AMF encoder context

> > +*/

> > +

> > +typedef struct AmfContext {

> > +	AVClass*            avclass;

> > +	/** access to AMF runtime */

> > +	amf_handle          library; ///< handle to DLL library

> > +	AMFFactory*         factory; ///< pointer to AMF factory

> > +	AMFDebug*           debug;   ///< pointer to AMF debug interface

> > +	AMFTrace*           trace;   ///< pointer to AMF trace interface

> > +

> > +	amf_uint64          version; ///< version of AMF runtime

> > +	AmfTraceWriter      tracer;  ///< AMF writer registered with AMF

> > +	AMFContext*         context; ///< AMF context

> > +	//encoder

> > +	AMFComponent*       encoder; ///< AMF encoder object

> > +	amf_bool            eof;     ///< flag indicating EOF happened

> > +	AMF_SURFACE_FORMAT  format;  ///< AMF surface format

> > +

> > +	AVBufferRef        *hw_device_ctx; ///< pointer to HW accelerator

> (decoder)

> > +	AVBufferRef        *hw_frames_ctx; ///< pointer to HW accelerator

> (frame allocator)

> > +

> > +	/** common encoder option options */

> > +

> > +    /** Static options, have to be set before Init() call */

> > +    int                 usage;

> > +    int                 profile;

> > +    int                 level;

> > +    int                 preanalysis;

> > +    int                 quality;

> > +    int					b_frame_delta_qp;

> > +    int					ref_b_frame_delta_qp;

> > +

> > +    /** Dynamic options, can be set after Init() call */

> > +

> > +    int                 rate_control_mode;

> > +	int					enforce_hrd;

> > +	int					filler_data;

> > +    int					enable_vbaq;

> > +    int					skip_frame;

> > +    int					qp_i;

> > +	int					qp_p;

> > +    int					qp_b;

> > +    int					max_au_size;

> > +	int					header_spacing;

> > +	int					b_frame_ref;

> > +	int					intra_refresh_mb;

> > +    int                 slices;

> > +	int					coding_mode;

> > +	int					me_half_pel;

> > +	int					me_quater_pel;

> > +

> > +    /** HEVC - specific options */

> > +

> > +    int					gops_per_idr;

> > +    int                 header_insertion_mode;

> > +    int                 min_qp_i;

> > +    int                 max_qp_i;

> > +    int                 min_qp_p;

> > +    int                 max_qp_p;

> > +	int                 tier;

> 

> There are lots of tabs and strange indentation here.

> 

> > +} AmfContext;

> > +

> > +/**

> > +* Common encoder initization code

> > +*/

> > +int ff_amf_encode_init(AVCodecContext *avctx);

> > +/**

> > +* Common encoder termination code

> > +*/

> > +int ff_amf_encode_close(AVCodecContext *avctx);

> > +

> > +/**

> > +* Ecoding one frame - common for all AMF encoders

> > +*/

> > +int ff_amf_encode_frame(AVCodecContext *avctx, AVPacket *pkt,

> > +	const AVFrame *frame, int *got_packet);

> > +

> > +/**

> > +* Supported formats

> > +*/

> > +extern const enum AVPixelFormat ff_amf_pix_fmts[];

> > +

> > +/**

> > +* Error handling helper

> > +*/

> > +#define AMF_RETURN_IF_FALSE(avctx, exp, ret_value, /*optional

> message,*/ ...) \

> > +    if (!(exp)) { \

> > +        av_log(avctx, AV_LOG_ERROR, __VA_ARGS__); \

> > +        return AVERROR(ret_value); \

> > +    }

> > +

> > +#endif //AVCODEC_AMFENC_H

> > \ No newline at end of file

> 

> git has review comment for you too.

> 

> > diff --git a/libavcodec/amfenc_h264.c b/libavcodec/amfenc_h264.c

> > new file mode 100644

> > index 0000000..a6e0f3c

> > --- /dev/null

> > +++ b/libavcodec/amfenc_h264.c

> > @@ -0,0 +1,467 @@

> > +/*

> > + * This file is part of FFmpeg.

> > + *

> > + * FFmpeg is free software; you can redistribute it and/or

> > + * modify it under the terms of the GNU Lesser General Public

> > + * License as published by the Free Software Foundation; either

> > + * version 2.1 of the License, or (at your option) any later version.

> > + *

> > + * FFmpeg is distributed in the hope that it will be useful,

> > + * but WITHOUT ANY WARRANTY; without even the implied warranty of

> > + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the

> GNU

> > + * Lesser General Public License for more details.

> > + *

> > + * You should have received a copy of the GNU Lesser General Public

> > + * License along with FFmpeg; if not, write to the Free Software

> > + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301

> USA

> > + */

> > +

> > +#include "amfenc.h"

> > +//#include

> "compat/amd/amf/public/include/components/VideoEncoderVCE.h"

> > +#include "libavutil/opt.h"

> > +#include "libavutil/internal.h"

> > +#include "internal.h"

> > +

> > +#define OFFSET(x) offsetof(AmfContext, x)

> > +#define VE AV_OPT_FLAG_VIDEO_PARAM |

> AV_OPT_FLAG_ENCODING_PARAM

> > +

> > +static const AVOption options[] = {

> > +    // Static

> > +    /// Usage

> > +    { "usage",          "Encoder Usage",        OFFSET(usage),

> AV_OPT_TYPE_INT,   { .i64 = AMF_VIDEO_ENCODER_USAGE_TRANSCONDING

> }, AMF_VIDEO_ENCODER_USAGE_TRANSCONDING,

> AMF_VIDEO_ENCODER_USAGE_WEBCAM, VE, "usage" },

> > +    { "transcoding",    "Generic Transcoding",  0,

> AV_OPT_TYPE_CONST, { .i64 =

> AMF_VIDEO_ENCODER_USAGE_TRANSCONDING      }, 0, 0, VE, "usage" },

> > +    { "ultralowlatency","",                     0,              AV_OPT_TYPE_CONST, { .i64

> = AMF_VIDEO_ENCODER_USAGE_ULTRA_LOW_LATENCY }, 0, 0, VE, "usage"

> },

> > +    { "lowlatency",     "",                     0,              AV_OPT_TYPE_CONST, { .i64 =

> AMF_VIDEO_ENCODER_USAGE_LOW_LATENCY       }, 0, 0, VE, "usage" },

> > +    { "webcam",         "Webcam",               0,              AV_OPT_TYPE_CONST, {

> .i64 = AMF_VIDEO_ENCODER_USAGE_WEBCAM            }, 0, 0, VE, "usage" },

> > +

> > +    /// Profile,

> > +    { "profile",        "Profile",              OFFSET(profile),AV_OPT_TYPE_INT, {

> .i64 = AMF_VIDEO_ENCODER_PROFILE_MAIN       },

> AMF_VIDEO_ENCODER_PROFILE_BASELINE,

> AMF_VIDEO_ENCODER_PROFILE_HIGH, VE, "profile" },

> > +    { "baseline",       "",                     0,              AV_OPT_TYPE_CONST, { .i64 =

> AMF_VIDEO_ENCODER_PROFILE_BASELINE }, 0, 0, VE, "profile" },

> 

> Do you really support baseline profile H.264?  You probably mean

> constrained baseline.

> 

> > +    { "main",           "",                     0,              AV_OPT_TYPE_CONST, { .i64 =

> AMF_VIDEO_ENCODER_PROFILE_MAIN     }, 0, 0, VE, "profile" },

> > +    { "high",           "",                     0,              AV_OPT_TYPE_CONST, { .i64 =

> AMF_VIDEO_ENCODER_PROFILE_HIGH     }, 0, 0, VE, "profile" },

> > +

> > +    /// Profile Level

> > +    { "level",          "Profile Level",        OFFSET(level),  AV_OPT_TYPE_INT,   {

> .i64 = 0  }, 0, 62, VE, "level" },

> > +    { "auto",           "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 0

> }, 0, 0,  VE, "level" },

> > +    { "1.0",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 10

> }, 0, 0,  VE, "level" },

> > +    { "1.1",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 11

> }, 0, 0,  VE, "level" },

> > +    { "1.2",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 12

> }, 0, 0,  VE, "level" },

> > +    { "1.3",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 13

> }, 0, 0,  VE, "level" },

> > +    { "2.0",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 20

> }, 0, 0,  VE, "level" },

> > +    { "2.1",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 21

> }, 0, 0,  VE, "level" },

> > +    { "2.2",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 22

> }, 0, 0,  VE, "level" },

> > +    { "3.0",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 30

> }, 0, 0,  VE, "level" },

> > +    { "3.1",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 31

> }, 0, 0,  VE, "level" },

> > +    { "3.2",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 32

> }, 0, 0,  VE, "level" },

> > +    { "4.0",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 40

> }, 0, 0,  VE, "level" },

> > +    { "4.1",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 41

> }, 0, 0,  VE, "level" },

> > +    { "4.2",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 42

> }, 0, 0,  VE, "level" },

> > +    { "5.0",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 50

> }, 0, 0,  VE, "level" },

> > +    { "5.1",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 51

> }, 0, 0,  VE, "level" },

> > +    { "5.2",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 52

> }, 0, 0,  VE, "level" },

> > +    { "6.0",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 60

> }, 0, 0,  VE, "level" },

> > +    { "6.1",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 61

> }, 0, 0,  VE, "level" },

> > +    { "6.2",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 62

> }, 0, 0,  VE, "level" },

> 

> These private options for profile and level are fine, but you should read

> AVCodecContext.(profile|level) first.

> 

> > +

> > +    /// Quality Preset

> > +    { "quality",        "Quality Preference",                   OFFSET(quality),

> AV_OPT_TYPE_INT,   { .i64 = AMF_VIDEO_ENCODER_QUALITY_PRESET_SPEED

> }, AMF_VIDEO_ENCODER_QUALITY_PRESET_BALANCED,

> AMF_VIDEO_ENCODER_QUALITY_PRESET_QUALITY, VE, "quality" },

> > +    { "speed",          "Prefer Speed",                         0,

> AV_OPT_TYPE_CONST, { .i64 =

> AMF_VIDEO_ENCODER_QUALITY_PRESET_SPEED    },       0, 0, VE, "quality" },

> > +    { "balanced",       "Balanced",                             0,

> AV_OPT_TYPE_CONST, { .i64 =

> AMF_VIDEO_ENCODER_QUALITY_PRESET_BALANCED },    0, 0, VE, "quality" },

> > +    { "quality",        "Prefer Quality",                       0,

> AV_OPT_TYPE_CONST, { .i64 =

> AMF_VIDEO_ENCODER_QUALITY_PRESET_QUALITY  },     0, 0, VE, "quality" },

> > +

> > +    // Dynamic

> > +    /// Rate Control Method

> > +    { "rc",             "Rate Control Method",

> OFFSET(rate_control_mode),  AV_OPT_TYPE_INT,   { .i64 =

> AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_PEAK_CONSTRAINED_VB

> R    }, AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CONSTANT_QP,

> AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_LATENCY_CONSTRAINED

> _VBR, VE, "rc" },

> > +    { "cqp",            "Constant Quantization Parameter",      0,

> AV_OPT_TYPE_CONST, { .i64 =

> AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CONSTANT_QP             },

> 0, 0, VE, "rc" },

> > +    { "cbr",            "Constant Bitrate",                     0,

> AV_OPT_TYPE_CONST, { .i64 =

> AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CBR                     }, 0, 0,

> VE, "rc" },

> > +    { "vbr_peak",       "Peak Contrained Variable Bitrate",     0,

> AV_OPT_TYPE_CONST, { .i64 =

> AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_PEAK_CONSTRAINED_VB

> R    }, 0, 0, VE, "rc" },

> > +    { "vbr_latency",    "Latency Constrained Variable Bitrate", 0,

> AV_OPT_TYPE_CONST, { .i64 =

> AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_LATENCY_CONSTRAINED

> _VBR }, 0, 0, VE, "rc" },

> > +

> > +    /// Enforce HRD, Filler Data, VBAQ, Frame Skipping

> > +    { "enforce_hrd",    "Enforce HRD",                          OFFSET(enforce_hrd),

> AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },

> > +    { "filler_data",    "Filler Data Enable",                   OFFSET(filler_data),

> AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },

> > +    { "vbaq",           "Enable VBAQ",                          OFFSET(enable_vbaq),

> AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },

> > +    { "frame_skipping", "Rate Control Based Frame Skip",

> OFFSET(skip_frame),         AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE, NULL },

> > +

> > +    /// QP Values

> > +    { "qp_i",           "Quantization Parameter for I-Frame",   OFFSET(qp_i),

> AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 51, VE },

> > +    { "qp_p",           "Quantization Parameter for P-Frame",   OFFSET(qp_p),

> AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 51, VE },

> > +    { "qp_b",           "Quantization Parameter for B-Frame",   OFFSET(qp_b),

> AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 51, VE },

> > +

> > +    /// Pre-Pass, Pre-Analysis, Two-Pass

> > +    { "preanalysis",    "Pre-Analysis Mode",                    OFFSET(preanalysis),

> AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },

> > +

> > +    /// Maximum Access Unit Size

> > +    { "max_au_size",    "Maximum Access Unit Size (in bits)",

> OFFSET(max_au_size),        AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE,

> NULL },

> 

> Maximum access unit size seems like a slightly strange thing to set - the HRD

> parameters and rate control should define how this works.

> 

> Is it actually maximum NAL unit size?  (For RFC 6184 single NAL unit packets.)

> 

> > +

> > +    /// Header Insertion Spacing

> > +    { "header_spacing", "Header Insertion Spacing",

> OFFSET(header_spacing),     AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1000, VE },

> > +

> > +    /// B-Frames

> > +    // BPicturesPattern=bf

> > +    { "bf_delta_qp",    "B-Picture Delta QP",

> OFFSET(b_frame_delta_qp),   AV_OPT_TYPE_INT,  { .i64 = 4 }, -10, 10, VE,

> NULL },

> > +    { "bf_ref",         "Enable Reference to B-Frames",

> OFFSET(b_frame_ref),        AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, VE, NULL },

> > +    { "bf_ref_delta_qp","Reference B-Picture Delta QP",

> OFFSET(ref_b_frame_delta_qp), AV_OPT_TYPE_INT,  { .i64 = 4 }, -10, 10, VE,

> NULL },

> > +

> > +    /// Intra-Refresh

> > +    { "intra_refresh_mb","Intra Refresh MBs Number Per Slot in

> Macroblocks",       OFFSET(intra_refresh_mb),    AV_OPT_TYPE_INT, { .i64 = 0

> }, 0, INT_MAX, VE },

> > +    { "slices",         "Number of Slices per Frame",           OFFSET(slices),

> AV_OPT_TYPE_INT,   { .i64 = 1 } , 1, 8160, VE, NULL },

> 

> Use AVCodecContext.slices rather than a private option.

> 

> > +

> > +    /// coder

> > +    { "coding",         "Coding Type",                          OFFSET(coding_mode),

> AV_OPT_TYPE_INT,   { .i64 = AMF_VIDEO_ENCODER_UNDEFINED },

> AMF_VIDEO_ENCODER_UNDEFINED, AMF_VIDEO_ENCODER_CALV, VE,

> "coding" },

> > +    { "auto",           "Automatic",                            0,

> AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_UNDEFINED }, 0, 0,

> VE, "coding" },

> > +    { "cavlc",          "Context Adaptive Variable-Length Coding", 0,

> AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_CALV },      0, 0, VE,

> "coding" },

> > +    { "cabac",          "Context Adaptive Binary Arithmetic Coding", 0,

> AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_CABAC },     0, 0, VE,

> "coding" },

> 

> Other encoders have the same option named "coder", it might be nice to be

> consistent.

> 

> > +

> > +    { "me_half_pel",    "Enable ME Half Pixel",

> OFFSET(me_half_pel),   AV_OPT_TYPE_BOOL,  { .i64 = 1 }, 0, 1, VE, NULL },

> > +    { "me_quater_pel",  "Enable ME Quarter Pixel ",

> OFFSET(me_quater_pel), AV_OPT_TYPE_BOOL,  { .i64 = 1 }, 0, 1, VE, NULL },

> > +

> > +    { NULL }

> > +};

> 

> I think these options could do with some better documentation (maybe in

> the texinfo, though, and it can be a later patch).

> 

> > +

> > +static av_cold int amf_encode_init_h264(AVCodecContext *avctx)

> > +{

> > +    int                 ret = 0;

> > +    AMF_RESULT          res = AMF_OK;

> > +    AmfContext         *ctx = avctx->priv_data;

> > +    AMFVariantStruct    var = {0};

> > +    amf_int64           profile_level = 0;

> > +

> > +    AMFSize             framesize = AMFConstructSize(avctx->width, avctx-

> >height);

> > +    AMFRate             framerate = AMFConstructRate(avctx->time_base.den,

> avctx->time_base.num * avctx->ticks_per_frame);

> 

> Is VFR encoding not supported?

> 

> > +

> > +    int                 deblocking_filter = (avctx->flags &

> AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;

> > +

> > +    if ((ret = ff_amf_encode_init(avctx)) != 0)

> > +        return ret;

> > +

> > +    // Static parameters

> > +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_USAGE, ctx->usage);

> > +

> > +    AMF_ASSIGN_PROPERTY_SIZE(res, ctx->encoder,

> AMF_VIDEO_ENCODER_FRAMESIZE, framesize);

> > +

> > +    AMF_ASSIGN_PROPERTY_RATE(res, ctx->encoder,

> AMF_VIDEO_ENCODER_FRAMERATE, framerate);

> > +

> > +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_PROFILE, ctx->profile);

> 

> Check avctx->profile as well.

> 

> > +

> > +    profile_level = ctx->level;

> 

> Check avctx->level as well.

> 

> > +    if (profile_level == 0) {

> > +        // Automatic detection of correct profile level.

> > +        struct {

> > +            uint64_t  max_samples;

> > +            uint64_t max_samples_per_sec;

> > +            int level;

> > +        } restrictions[] = {

> > +            { 25344,    380160, 10 },

> > +            { 101376,   768000, 11 },

> > +            { 101376,   1536000, 12 },

> > +            //{   101376,    3041280, 13 }, // Backwards compatible 2.0

> > +            { 101376,   3041280, 20 },

> > +            { 202752,   5068800, 21 },

> > +            { 414720,   5184000, 22 },

> > +            { 414720,   10368000, 30 },

> > +            { 921600,   27648000, 31 },

> > +            { 1310720,  55296000, 32 },

> > +            //{  2097152,   62914560, 40 }, // Backwards compatible 4.1

> > +            { 2097152,  62914560, 41 },

> > +            { 2228224,  133693440, 42 },

> > +            { 5652480,  150994994, 50 },

> > +            { 9437184,  251658240, 51 },

> > +            { 9437184,  530841600, 52 },

> > +            { 35651584, 1069547520, 60 },

> > +            { 35651584, 2139095040, 61 },

> > +            { 35651584, 4278190080, 62 },

> > +            { 0, 0, -1 }

> > +        };

> > +        uint64_t samples = framesize.width * framesize.height;

> > +        uint64_t samples_per_sec = (samples * framerate.num) /

> framerate.den;

> > +        profile_level = 52; // Default to 5.2 for now.

> > +        for (unsigned int index = 0; restrictions[index].level != -1; index++) {

> > +            if ((samples < restrictions[index].max_samples)

> > +                && (samples_per_sec <

> restrictions[index].max_samples_per_sec)) {

> > +                profile_level = restrictions[index].level;

> > +                break;

> > +            }

> > +        }

> > +    }

> > +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_PROFILE_LEVEL, profile_level);

> 

> Um, does this really have to be done outside the encoder?

> 

> > +

> > +    // Maximum Reference Frames

> > +    if (avctx->refs != -1) {

> > +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_MAX_NUM_REFRAMES, avctx->refs);

> > +    }

> > +    if (avctx->sample_aspect_ratio.den && avctx-

> >sample_aspect_ratio.num) {

> > +        AMFRatio ratio = AMFConstructRatio(avctx-

> >sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);

> > +        AMF_ASSIGN_PROPERTY_RATIO(res, ctx->encoder,

> AMF_VIDEO_ENCODER_ASPECT_RATIO, ratio);

> > +    }

> > +

> > +    /// Color Range (Partial/TV/MPEG or Full/PC/JPEG)

> > +    if (avctx->color_range == AVCOL_RANGE_JPEG) {

> > +        AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder,

> AMF_VIDEO_ENCODER_FULL_RANGE_COLOR, 1);

> > +    }

> > +

> > +    if (ctx->rate_control_mode ==

> AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CONSTANT_QP) {

> > +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_RATE_CONTROL_PREANALYSIS_ENABLE, false);

> > +        if (ctx->preanalysis)

> > +            av_log(ctx, AV_LOG_WARNING, "Pre-Analysis is not supported by

> cqp Rate Control Method, automatically disabled.");

> 

> (More newlines missing.)

> 

> > +    } else {

> > +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_RATE_CONTROL_PREANALYSIS_ENABLE, ctx-

> >preanalysis);

> > +    }

> > +

> > +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_QUALITY_PRESET, ctx->quality);

> > +

> > +    // Initialize Encoder

> > +    res = ctx->encoder->pVtbl->Init(ctx->encoder, ctx->format, avctx-

> >width, avctx->height);

> > +    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_BUG, "encoder-

> >Init() failed with error %d", res);

> > +

> > +    // Dynamic parmaters

> > +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD, ctx-

> >rate_control_mode);

> > +

> > +    /// VBV Buffer

> > +    if (avctx->rc_buffer_size != 0)

> > +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_VBV_BUFFER_SIZE, avctx->rc_buffer_size);

> > +    if (avctx->rc_initial_buffer_occupancy != 0) {

> > +        int percent = avctx->rc_buffer_size * 64 / avctx-

> >rc_initial_buffer_occupancy;

> > +        if (percent > 64)

> > +            percent = 64;

> 

> ???

> 

> > +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_INITIAL_VBV_BUFFER_FULLNESS, percent);

> > +    }

> > +    /// Maximum Access Unit Size

> > +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_MAX_AU_SIZE, ctx->max_au_size);

> > +

> > +

> > +    // QP Minimum / Maximum

> > +    if (ctx->rate_control_mode ==

> AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CONSTANT_QP) {

> > +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_MIN_QP, 0);

> > +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_MAX_QP, 51);

> > +    } else {

> > +        if (avctx->qmin != -1) {

> > +            int qval = avctx->qmin > 51 ? 51 : avctx->qmin;

> > +            AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_MIN_QP, qval);

> > +        }

> 

> Trailing spaces.

> 

> > +        if (avctx->qmax != -1) {

> > +            int qval = avctx->qmax > 51 ? 51 : avctx->qmax;

> > +            AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_MAX_QP, qval);

> > +        }

> > +    }

> > +    // QP Values

> > +    if (ctx->qp_i != -1)

> > +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_QP_I, ctx->qp_i);

> > +    if (ctx->qp_p != -1)

> > +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_QP_P, ctx->qp_p);

> > +    if (ctx->qp_b != -1)

> > +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_QP_B, ctx->qp_b);

> > +

> > +    // Bitrate

> > +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_TARGET_BITRATE, avctx->bit_rate);

> > +    if (ctx->rate_control_mode ==

> AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CBR) {

> > +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_PEAK_BITRATE, avctx->bit_rate);

> > +    } else {

> > +        int rc_max_rate = avctx->rc_max_rate >= avctx->bit_rate ? avctx-

> >rc_max_rate : avctx->bit_rate * 13 / 10;

> 

> Where does 13/10 come from?

> 

> > +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_PEAK_BITRATE, rc_max_rate);

> > +    }

> > +    // Enforce HRD, Filler Data, VBAQ, Frame Skipping, Deblocking Filter

> > +    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder,

> AMF_VIDEO_ENCODER_ENFORCE_HRD, !!ctx->enforce_hrd);

> > +    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder,

> AMF_VIDEO_ENCODER_FILLER_DATA_ENABLE, !!ctx->filler_data);

> > +    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder,

> AMF_VIDEO_ENCODER_RATE_CONTROL_SKIP_FRAME_ENABLE, !!ctx-

> >skip_frame);

> > +    if (ctx->rate_control_mode ==

> AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CONSTANT_QP) {

> > +        AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder,

> AMF_VIDEO_ENCODER_ENABLE_VBAQ, false);

> > +        if (ctx->enable_vbaq)

> > +            av_log(ctx, AV_LOG_WARNING, "VBAQ is not supported by cqp

> Rate Control Method, automatically disabled.");

> > +    } else {

> > +        AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder,

> AMF_VIDEO_ENCODER_ENABLE_VBAQ, !!ctx->enable_vbaq);

> > +    }

> > +    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder,

> AMF_VIDEO_ENCODER_DE_BLOCKING_FILTER, !!deblocking_filter);

> > +

> > +    // B-Frames

> > +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_B_PIC_PATTERN, avctx->max_b_frames);

> > +    if (avctx->max_b_frames && res == AMF_OK) {

> > +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_B_PIC_DELTA_QP, ctx->b_frame_delta_qp);

> > +        AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder,

> AMF_VIDEO_ENCODER_B_REFERENCE_ENABLE, !!ctx->b_frame_ref);

> > +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_REF_B_PIC_DELTA_QP, ctx->ref_b_frame_delta_qp);

> > +    }

> > +

> > +    // Keyframe Interval

> > +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_IDR_PERIOD, avctx->gop_size);

> > +

> > +    // Header Insertion Spacing

> > +    if (ctx->header_spacing >= 0)

> > +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEADER_INSERTION_SPACING, ctx-

> >header_spacing);

> > +

> > +    // Intra-Refresh, Slicing

> > +    if (ctx->intra_refresh_mb > 0)

> > +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_INTRA_REFRESH_NUM_MBS_PER_SLOT, ctx-

> >intra_refresh_mb);

> > +    if (ctx->slices > 1)

> > +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_SLICES_PER_FRAME, ctx->slices);> +

> > +    // Coding

> > +    if (ctx->coding_mode != 0)

> > +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_CABAC_ENABLE, ctx->coding_mode);

> > +

> > +    // Motion Estimation

> > +    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder,

> AMF_VIDEO_ENCODER_MOTION_HALF_PIXEL, !!ctx->me_half_pel);

> > +    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder,

> AMF_VIDEO_ENCODER_MOTION_QUARTERPIXEL, !!ctx->me_quater_pel);

> > +

> > +    // fill extradata

> > +    AMFVariantInit(&var);

> 

> Can this fail?

> 

> > +    res = ctx->encoder->pVtbl->GetProperty(ctx->encoder,

> AMF_VIDEO_ENCODER_EXTRADATA, &var);

> > +    if (res == AMF_OK && var.pInterface) {

> > +        AMFBuffer* buffer;

> > +        AMFGuid guid = IID_AMFBuffer();

> > +

> > +        var.pInterface->pVtbl->QueryInterface(var.pInterface, &guid,

> (void**)&buffer); // query for buffer interface

> > +

> > +        avctx->extradata_size = (int)buffer->pVtbl->GetSize(buffer);

> > +        avctx->extradata = av_mallocz(avctx->extradata_size +

> AV_INPUT_BUFFER_PADDING_SIZE);

> > +        if (!avctx->extradata) {

> > +            buffer->pVtbl->Release(buffer);

> > +            var.pInterface->pVtbl->Release(var.pInterface);

> > +            return AVERROR(ENOMEM);

> > +        }

> > +        memcpy(avctx->extradata, buffer->pVtbl->GetNative(buffer), avctx-

> >extradata_size);

> > +

> > +        buffer->pVtbl->Release(buffer);

> > +        var.pInterface->pVtbl->Release(var.pInterface);

> > +    }

> > +    return 0;

> > +}

> > +

> > +

> > +

> > +static const AVCodecDefault defaults[] = {

> > +    { "refs",       "-1" },

> > +    { "aspect",     "0" },

> > +    { "sar",        "0" },

> > +    { "qmin",       "-1" },

> > +    { "qmax",       "-1" },

> > +    { "b",          "2M" },

> > +    { "maxrate",    "3M" },

> > +    { "g",          "250" },

> > +    { "keyint_min", "0" },

> > +    { "bf",         "0" },

> > +    { "slices",     "1" },

> > +    { NULL },

> > +};

> > +

> > +static const AVClass h264_amf_class = {

> > +    .class_name = "h264_amf",

> > +    .item_name = av_default_item_name,

> > +    .option = options,

> > +    .version = LIBAVUTIL_VERSION_INT,

> > +};

> > +static const AVClass h264_amf_d3d11va_class = {

> > +    .class_name = "h264_amf_d3d11va",

> > +    .item_name = av_default_item_name,

> > +    .option = options,

> > +    .version = LIBAVUTIL_VERSION_INT,

> > +};

> > +// regular encoder

> > +AVCodec ff_h264_amf_encoder = {

> > +    .name = "h264_amf",

> > +    .long_name = NULL_IF_CONFIG_SMALL("AMD AMF H.264 Encoder"),

> > +    .type = AVMEDIA_TYPE_VIDEO,

> > +    .id = AV_CODEC_ID_H264,

> > +    .init = amf_encode_init_h264,

> > +    .encode2 = ff_amf_encode_frame,

> > +    .close = ff_amf_encode_close,

> > +    .priv_data_size = sizeof(AmfContext),

> > +    .priv_class = &h264_amf_class,

> > +    .defaults = defaults,

> > +    .capabilities = AV_CODEC_CAP_DELAY,

> > +    .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,

> > +    .pix_fmts = ff_amf_pix_fmts,

> > +};

> > +// encoder connected with D3D11 HW accelerator

> > +AVCodec ff_h264_amf_d3d11va_encoder = {

> > +    .name = "h264_amf_d3d11va",

> > +    .long_name = NULL_IF_CONFIG_SMALL("AMD AMF H.264 Encoder with

> d3d11va"),

> > +    .type = AVMEDIA_TYPE_VIDEO,

> > +    .id = AV_CODEC_ID_H264,

> > +    .init = amf_encode_init_h264,

> > +    .encode2 = ff_amf_encode_frame,

> > +    .close = ff_amf_encode_close,

> > +    .priv_data_size = sizeof(AmfContext),

> > +    .priv_class = &h264_amf_d3d11va_class,

> > +    .defaults = defaults,

> > +    .capabilities = AV_CODEC_CAP_DELAY,

> > +    .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,

> > +    .pix_fmts = ff_amf_pix_fmts,

> > +};

> 

> As above, why does this separate (identical) instance exist?

> 

> > +

> > +/**

> > +* Basic test BAT file:

> > +echo off

> > +if "%~1"=="" (

> > +echo input file name is empty. Use basic_transcode_amf_h264.bat

> video.mp4

> > +goto error

> > +)

> > +

> > +SET "CWD=%~dp0"

> > +SET bitrate=5M

> > +SET maxbitrate=6M

> > +SET bufsize=2M

> > +SET x264_preset=veryfast

> > +SET amf_quality=speed

> > +

> > +

> > +rem veryfast and zerolatency options make x264 comparable with VCE

> > +

> > +rem change path to ffmpeg.exe if needed

> > +

> > +"%CWD%\..\bin\ffmpeg.exe" -y -t 100 -threads 0 -i "%~1" -c:v h264_amf  -

> b:v %bitrate% -maxrate %maxbitrate% -bufsize %bufsize% -rc vbr_peak -

> quality %amf_quality% out_amf_h264.mp4

> > +"%CWD%\..\bin\ffmpeg.exe" -y -t 100 -threads 0 -i "%~1" -c:v libx264   -

> b:v %bitrate% -maxrate %maxbitrate% -bufsize %bufsize% -preset

> %x264_preset% -nal-hrd vbr -tune zerolatency out_x264_h264.mp4

> > +

> > +echo PSNR > result.txt

> > +

> > +"%CWD%\..\bin\ffmpeg.exe" -y -hide_banner -loglevel info -t 100 -

> threads 0 -i "%~1" -i out_amf_h264.mp4  -lavfi

> psnr="stats_file='amf_h264.psnr.log'"  -f null - > "trace.txt" 2>&1

> > +for /f "tokens=*" %%A in ('type "trace.txt"') do @echo amf_h264 : %%A

> >end.txt

> > +type end.txt >> result.txt

> > +

> > +

> > +"%CWD%\..\bin\ffmpeg.exe" -y -hide_banner -loglevel info -t 100 -

> threads 0 -i "%~1" -i out_x264_h264.mp4 -lavfi

> psnr="stats_file='x264_h264.psnr.log'" -f null - > "trace.txt" 2>&1

> > +for /f "tokens=*" %%A in ('type "trace.txt"') do @echo x264_h264: %%A

> >end.txt

> > +type end.txt >> result.txt

> > +

> > +

> > +echo SSIM >> result.txt

> > +

> > +"%CWD%\..\bin\ffmpeg.exe" -y -hide_banner -loglevel info -t 100 -

> threads 0 -i "%~1" -i out_amf_h264.mp4  -lavfi

> ssim="stats_file='amf_h264_ssim.log'" -f null - > "trace.txt"  2>&1

> > +for /f "tokens=*" %%A in ('type "trace.txt"') do @echo amf_h264 : %%A

> >end.txt

> > +type end.txt >> result.txt

> > +

> > +"%CWD%\..\bin\ffmpeg.exe" -y -hide_banner -loglevel info -t 100 -

> threads 0 -i "%~1" -i out_x264_h264.mp4  -lavfi

> ssim="stats_file=x264_h264_ssim.log'" -f null - > "trace.txt"  2>&1

> > +for /f "tokens=*" %%A in ('type "trace.txt"') do @echo x264_h264: %%A

> >end.txt

> > +type end.txt >> result.txt

> > +

> > +del trace.txt

> > +del end.txt

> > +

> > +echo

> > +type result.txt

> > +

> > +

> > +:error

> > +*/

> > +

> > +

> > +/**

> > +* d3d11va integration test bat file

> > +rem echo off

> > +if "%~1"=="" (

> > +echo input file name is empty. Use dx11_transcode_amf_h264.bat

> video.mp4

> > +goto error

> > +)

> > +

> > +SET "CWD=%~dp0"

> > +SET bitrate=5M

> > +SET maxbitrate=6M

> > +SET bufsize=2M

> > +

> > +rem change path to ffmpeg.exe if needed

> > +

> > +"%CWD%\..\bin\ffmpeg.exe" -y -t 100 -hwaccel d3d11va -

> hwaccel_output_format d3d11 -threads 1 -i "%~1" -c:v h264_amf          -b:v

> %bitrate% -maxrate %maxbitrate% -bufsize %bufsize% -rc vbr_peak

> shared_dx11_amf_h264.mp4

> > +"%CWD%\..\bin\ffmpeg.exe" -y -t 100 -hwaccel d3d11va                              -

> threads 1 -i "%~1" -c:v h264_amf_d3d11va  -b:v %bitrate% -maxrate

> %maxbitrate% -bufsize %bufsize% -rc vbr_peak

> custom_dx11_amf_h264.mp4

> > +

> > +*/

> 

> Don't include any of this stuff.

> 

> > +

> > diff --git a/libavcodec/amfenc_hevc.c b/libavcodec/amfenc_hevc.c

> > new file mode 100644

> > index 0000000..4d3c7d4

> > --- /dev/null

> > +++ b/libavcodec/amfenc_hevc.c

> > @@ -0,0 +1,354 @@

> > +/*

> > + * This file is part of FFmpeg.

> > + *

> > + * FFmpeg is free software; you can redistribute it and/or

> > + * modify it under the terms of the GNU Lesser General Public

> > + * License as published by the Free Software Foundation; either

> > + * version 2.1 of the License, or (at your option) any later version.

> > + *

> > + * FFmpeg is distributed in the hope that it will be useful,

> > + * but WITHOUT ANY WARRANTY; without even the implied warranty of

> > + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the

> GNU

> > + * Lesser General Public License for more details.

> > + *

> > + * You should have received a copy of the GNU Lesser General Public

> > + * License along with FFmpeg; if not, write to the Free Software

> > + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301

> USA

> > + */

> > +

> > +#include "amfenc.h"

> > +//#include

> "compat/amd/amf/public/include/components/VideoEncoderHEVC.h"

> > +#include "libavutil/opt.h"

> > +#include "libavutil/internal.h"

> > +#include "internal.h"

> > +

> > +#define OFFSET(x) offsetof(AmfContext, x)

> > +#define VE AV_OPT_FLAG_VIDEO_PARAM |

> AV_OPT_FLAG_ENCODING_PARAM

> > +static const AVOption options[] = {

> > +    { "usage",          "Set the encoding usage",             OFFSET(usage),

> AV_OPT_TYPE_INT,   { .i64 =

> AMF_VIDEO_ENCODER_HEVC_USAGE_TRANSCONDING },

> AMF_VIDEO_ENCODER_HEVC_USAGE_TRANSCONDING,

> AMF_VIDEO_ENCODER_HEVC_USAGE_WEBCAM, VE, "usage" },

> > +    { "transcoding",    "", 0, AV_OPT_TYPE_CONST, { .i64 =

> AMF_VIDEO_ENCODER_HEVC_USAGE_TRANSCONDING },         0, 0, VE,

> "usage" },

> > +    { "ultralowlatency","", 0, AV_OPT_TYPE_CONST, { .i64 =

> AMF_VIDEO_ENCODER_HEVC_USAGE_ULTRA_LOW_LATENCY },    0, 0, VE,

> "usage" },

> > +    { "lowlatency",     "", 0, AV_OPT_TYPE_CONST, { .i64 =

> AMF_VIDEO_ENCODER_HEVC_USAGE_LOW_LATENCY },          0, 0, VE,

> "usage" },

> > +    { "webcam",         "", 0, AV_OPT_TYPE_CONST, { .i64 =

> AMF_VIDEO_ENCODER_HEVC_USAGE_WEBCAM },               0, 0, VE, "usage" },

> 

> Could some of this be in common with the H.264 encoder?  (Maybe in the

> header?)

> 

> > +

> > +    { "profile",        "Set the profile (default main)",           OFFSET(profile),

> AV_OPT_TYPE_INT,{ .i64 = AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN },

> AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN,

> AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN, VE, "profile" },

> > +    { "main",           "", 0,                      AV_OPT_TYPE_CONST,{ .i64 =

> AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN }, 0, 0, VE, "profile" },

> > +

> > +    { "profile_tier",   "Set the profile tier (default main)",      OFFSET(tier),

> AV_OPT_TYPE_INT,{ .i64 = AMF_VIDEO_ENCODER_HEVC_TIER_MAIN },

> AMF_VIDEO_ENCODER_HEVC_TIER_MAIN,

> AMF_VIDEO_ENCODER_HEVC_TIER_HIGH, VE, "tier" },

> > +    { "main",           "", 0, AV_OPT_TYPE_CONST, { .i64 =

> AMF_VIDEO_ENCODER_HEVC_TIER_MAIN }, 0, 0, VE, "tier" },

> > +    { "high",           "", 0, AV_OPT_TYPE_CONST, { .i64 =

> AMF_VIDEO_ENCODER_HEVC_TIER_HIGH }, 0, 0, VE, "tier" },

> > +

> > +    { "level",          "Set the encoding level (default auto)",    OFFSET(level),

> AV_OPT_TYPE_INT,{ .i64 = 0 }, 0, AMF_LEVEL_6_2, VE, "level" },

> > +    { "auto",           "", 0, AV_OPT_TYPE_CONST, { .i64 = 0             }, 0, 0, VE,

> "level" },

> > +    { "1.0",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_1   }, 0, 0,

> VE, "level" },

> > +    { "2.0",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_2   }, 0, 0,

> VE, "level" },

> > +    { "2.1",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_2_1 }, 0,

> 0, VE, "level" },

> > +    { "3.0",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_3   }, 0, 0,

> VE, "level" },

> > +    { "3.1",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_3_1 }, 0,

> 0, VE, "level" },

> > +    { "4.0",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_4   }, 0, 0,

> VE, "level" },

> > +    { "4.1",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_4_1 }, 0,

> 0, VE, "level" },

> > +    { "5.0",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_5   }, 0, 0,

> VE, "level" },

> > +    { "5.1",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_5_1 }, 0,

> 0, VE, "level" },

> > +    { "5.2",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_5_2 }, 0,

> 0, VE, "level" },

> > +    { "6.0",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_6   }, 0, 0,

> VE, "level" },

> > +    { "6.1",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_6_1 }, 0,

> 0, VE, "level" },

> > +    { "6.2",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_6_2 }, 0,

> 0, VE, "level" },

> > +

> > +    { "quality",        "Set the encoding quality",                 OFFSET(quality),

> AV_OPT_TYPE_INT,   { .i64 =

> AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET_SPEED },

> AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET_QUALITY,

> AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET_SPEED, VE, "quality" },

> > +    { "balanced",       "", 0, AV_OPT_TYPE_CONST, { .i64 =

> AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET_BALANCED }, 0, 0, VE,

> "quality" },

> > +    { "speed",          "", 0, AV_OPT_TYPE_CONST, { .i64 =

> AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET_SPEED    }, 0, 0, VE, "quality"

> },

> > +    { "quality",        "", 0, AV_OPT_TYPE_CONST, { .i64 =

> AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET_QUALITY  }, 0, 0, VE,

> "quality" },

> > +

> > +    { "rc",             "Set the rate control mode",

> OFFSET(rate_control_mode),   AV_OPT_TYPE_INT,   { .i64 =

> AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_PEAK_CONSTRAIN

> ED_VBR },

> AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_CONSTANT_QP,

> AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_CBR, VE, "rc" },

> > +    { "cqp",            "", 0, AV_OPT_TYPE_CONST, { .i64 =

> AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_CONSTANT_QP

> }, 0, 0, VE, "rc" },

> > +    { "cbr",            "", 0, AV_OPT_TYPE_CONST, { .i64 =

> AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_CBR                     },

> 0, 0, VE, "rc" },

> > +    { "vbr_peak",       "", 0, AV_OPT_TYPE_CONST, { .i64 =

> AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_PEAK_CONSTRAIN

> ED_VBR    }, 0, 0, VE, "rc" },

> > +    { "vbr_latency",    "", 0, AV_OPT_TYPE_CONST, { .i64 =

> AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_LATENCY_CONSTR

> AINED_VBR }, 0, 0, VE, "rc" },

> > +

> 

> Trailing spaces.

> 

> > +

> > +    { "header_insertion_mode",        "Set header insertion mode",

> OFFSET(header_insertion_mode),      AV_OPT_TYPE_INT,{ .i64 =

> AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE_NONE },

> AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE_NONE,

> AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE_IDR_ALIGNED,

> VE, "hdrmode" },

> > +    { "balanced",       "", 0, AV_OPT_TYPE_CONST, { .i64 =

> AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE_NONE        }, 0, 0,

> VE, "hdrmode" },

> > +    { "speed",          "", 0, AV_OPT_TYPE_CONST, { .i64 =

> AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE_GOP_ALIGNED },

> 0, 0, VE, "hdrmode" },

> > +    { "quality",        "", 0, AV_OPT_TYPE_CONST, { .i64 =

> AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE_IDR_ALIGNED },

> 0, 0, VE, "hdrmode" },

> 

> Names look suspicious...

> 

> > +

> > +    { "gops_per_idr",    "GOPs per IDR 0-no IDR will be inserted",

> OFFSET(gops_per_idr),  AV_OPT_TYPE_INT,{ .i64 = 60 }, 0, INT_MAX, VE },

> > +    { "preanalysis",    "Enable preanalysis",                       OFFSET(preanalysis),

> AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },

> > +    { "vbaq",           "Enable VBAQ",                              OFFSET(enable_vbaq),

> AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },

> > +    { "enforce_hrd",    "Enforce HRD",                              OFFSET(enforce_hrd),

> AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },

> > +    { "filler_data",    "Filler Data Enable",                       OFFSET(filler_data),

> AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },

> > +    { "max_au_size",    "Max AU Size in bits",

> OFFSET(max_au_size),   AV_OPT_TYPE_INT,{ .i64 = 0 }, 0, INT_MAX, VE, NULL

> },

> > +    { "min_qp_i",       "min quantization parameter for I-frame",

> OFFSET(min_qp_i),      AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },

> > +    { "max_qp_i",       "max quantization parameter for I-frame",

> OFFSET(max_qp_i),      AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },

> > +    { "min_qp_p",       "min quantization parameter for P-frame",

> OFFSET(min_qp_p),      AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },

> > +    { "max_qp_p",       "max quantization parameter for P-frame",

> OFFSET(max_qp_p),      AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },

> > +    { "qp_p",           "quantization parameter for P-frame",       OFFSET(qp_p),

> AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },

> > +    { "qp_i",           "quantization parameter for I-frame",       OFFSET(qp_i),

> AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },

> > +    { "skip_frame",     "Rate Control Based Frame Skip",

> OFFSET(skip_frame),    AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },

> > +    { "me_half_pel",    "Enable ME Half Pixel",

> OFFSET(me_half_pel),   AV_OPT_TYPE_BOOL,{ .i64 = 1 }, 0, 1, VE, NULL },

> > +    { "me_quater_pel",  "Enable ME Quarter Pixel ",

> OFFSET(me_quater_pel), AV_OPT_TYPE_BOOL,{ .i64 = 1 }, 0, 1, VE, NULL },

> > +

> > +    { NULL }

> > +};

> > +

> > +static av_cold int amf_encode_init_hevc(AVCodecContext *avctx)

> > +{

> > +    int                 ret = 0;

> > +    AMF_RESULT          res = AMF_OK;

> > +    AmfContext         *ctx = avctx->priv_data;

> > +    AMFVariantStruct    var = {0};

> > +

> > +    AMFSize             framesize = AMFConstructSize(avctx->width, avctx-

> >height);

> > +    AMFRate             framerate = AMFConstructRate(avctx->time_base.den,

> avctx->time_base.num * avctx->ticks_per_frame);

> > +

> > +    int                 deblocking_filter = (avctx->flags &

> AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;

> > +

> > +    if ((ret = ff_amf_encode_init(avctx)) < 0)

> > +        return ret;

> > +

> 

> Trailing spaces.

> 

> > +    // init static parameters

> > +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_USAGE, ctx->usage);

> > +

> > +    AMF_ASSIGN_PROPERTY_SIZE(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_FRAMESIZE, framesize);

> > +

> > +    AMF_ASSIGN_PROPERTY_RATE(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_FRAMERATE, framerate);

> > +

> > +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_PROFILE, ctx->profile);

> > +

> > +    switch (ctx->profile) {

> > +    case AMF_VIDEO_ENCODER_HEVC_TIER_HIGH:

> > +        avctx->profile = FF_PROFILE_HEVC_REXT;

> > +        break;

> > +    case AMF_VIDEO_ENCODER_HEVC_TIER_MAIN:

> > +        avctx->profile = FF_PROFILE_HEVC_MAIN;

> > +        break;

> 

> It's set by the user - you should be using it, not writing it.

> 

> > +    default:

> > +        break;

> > +    }

> > +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_TIER, ctx->tier);

> > +

> > +    if (ctx->level != 0) {

> > +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_PROFILE_LEVEL, ctx->level);

> > +        avctx->level = ctx->level;

> 

> As profile, you should be using it, not writing it.

> 

> > +    }

> > +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET, ctx->quality);

> > +    // Maximum Reference Frames

> > +    if (avctx->refs != 0) {

> > +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_MAX_NUM_REFRAMES, avctx->refs);

> > +    }

> > +    // Aspect Ratio

> > +    if (avctx->sample_aspect_ratio.den && avctx-

> >sample_aspect_ratio.num) {

> > +        AMFRatio ratio = AMFConstructRatio(avctx-

> >sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);

> > +        AMF_ASSIGN_PROPERTY_RATIO(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_ASPECT_RATIO, ratio);

> > +    }

> > +

> > +    // Picture control properties

> > +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_NUM_GOPS_PER_IDR, ctx->gops_per_idr);

> > +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_GOP_SIZE, avctx->gop_size);

> > +    if (avctx->slices > 1) {

> > +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_SLICES_PER_FRAME, avctx->slices);

> > +    }

> > +    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_DE_BLOCKING_FILTER_DISABLE,

> deblocking_filter);

> > +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE, ctx-

> >header_insertion_mode);

> > +

> > +    // Rate control

> > +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD, ctx-

> >rate_control_mode);

> > +    if (avctx->rc_buffer_size)

> > +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_VBV_BUFFER_SIZE, avctx->rc_buffer_size);

> > +

> > +    if (avctx->rc_initial_buffer_occupancy != 0) {

> > +        int percent = avctx->rc_buffer_size * 64 / avctx-

> >rc_initial_buffer_occupancy;

> > +        if (percent > 64)

> > +            percent = 64;

> 

> As H.264; what is this trying to do?

> 

> > +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_INITIAL_VBV_BUFFER_FULLNESS, percent);

> > +    }

> > +    // Pre-Pass, Pre-Analysis, Two-Pass

> > +    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_PREANALYSIS_ENABLE, ctx-

> >preanalysis);

> > +

> > +    if (ctx->rate_control_mode ==

> AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_CONSTANT_QP) {

> > +        AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_ENABLE_VBAQ, false);

> > +        if (ctx->enable_vbaq)

> > +            av_log(ctx, AV_LOG_WARNING, "VBAQ is not supported by cqp

> Rate Control Method, automatically disabled.");

> > +    } else {

> > +        AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_ENABLE_VBAQ, !!ctx->enable_vbaq);

> > +    }

> > +    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_MOTION_HALF_PIXEL, ctx->me_half_pel);

> > +    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_MOTION_QUARTERPIXEL, ctx-

> >me_quater_pel);

> > +

> > +    // init encoder

> > +    res = ctx->encoder->pVtbl->Init(ctx->encoder, ctx->format, avctx-

> >width, avctx->height);

> > +    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_BUG, "encoder-

> >Init() failed with error %d", res);

> > +

> > +    // init dynamic rate control params

> > +    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_ENFORCE_HRD, ctx->enforce_hrd);

> > +    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_FILLER_DATA_ENABLE, ctx->filler_data);

> > +

> > +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_TARGET_BITRATE, avctx->bit_rate);

> > +    if (ctx->rate_control_mode ==

> AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_CBR) {

> > +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_PEAK_BITRATE, avctx->bit_rate);

> > +    } else {

> > +        int rc_max_rate = avctx->rc_max_rate >= avctx->bit_rate ? avctx-

> >rc_max_rate : avctx->bit_rate * 13 / 10;

> 

> Why 13/10?

> 

> > +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_PEAK_BITRATE, rc_max_rate);

> > +    }

> > +

> > +    // init dynamic picture control params

> > +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_MAX_AU_SIZE, ctx->max_au_size);

> > +

> > +

> > +    if (ctx->min_qp_i != -1) {

> > +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_MIN_QP_I, ctx->min_qp_i);

> > +    }

> > +    if (ctx->max_qp_i != -1) {

> > +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_MAX_QP_I, ctx->max_qp_i);

> > +    }

> > +    if (ctx->min_qp_p != -1) {

> > +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_MIN_QP_P, ctx->min_qp_p);

> > +    }

> > +    if (ctx->max_qp_p != -1) {

> > +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_MAX_QP_P, ctx->max_qp_p);

> > +    }

> > +

> > +    if (ctx->qp_p != -1) {

> > +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_QP_I, ctx->qp_p);

> > +    }

> > +    if (ctx->qp_i != -1) {

> > +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_QP_P, ctx->qp_i);

> > +    }

> > +    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_SKIP_FRAME_ENABLE, ctx-

> >skip_frame);

> > +

> > +

> > +    // fill extradata

> > +    AMFVariantInit(&var);

> 

> Check return value?

> 

> > +    res = ctx->encoder->pVtbl->GetProperty(ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_EXTRADATA, &var);

> > +    if(res == AMF_OK && var.pInterface){

> 

> Formatting.

> 

> > +        AMFBuffer* buffer;

> > +        AMFGuid guid = IID_AMFBuffer();

> > +

> > +        var.pInterface->pVtbl->QueryInterface(var.pInterface, &guid,

> (void**)&buffer); // query for buffer interface

> > +

> > +        avctx->extradata_size = (int)buffer->pVtbl->GetSize(buffer);

> > +        avctx->extradata = av_mallocz(avctx->extradata_size +

> AV_INPUT_BUFFER_PADDING_SIZE);

> > +        if (!avctx->extradata) {

> > +            buffer->pVtbl->Release(buffer);

> > +            var.pInterface->pVtbl->Release(var.pInterface);

> > +            return AVERROR(ENOMEM);

> > +        }

> > +        memcpy(avctx->extradata, buffer->pVtbl->GetNative(buffer), avctx-

> >extradata_size);

> > +

> > +        buffer->pVtbl->Release(buffer);

> > +        var.pInterface->pVtbl->Release(var.pInterface);

> > +    }

> 

> Should that fail if res is not OK?

> 

> > +    return 0;

> > +}

> > +static const AVCodecDefault defaults[] = {

> > +    { "b",       "2M" },

> > +    { "maxrate", "3M" },

> > +    { "qmin",   "-1" },

> > +    { "qmax",   "-1" },

> > +    { "qdiff",  "-1" },

> > +    { "qblur",  "-1" },

> > +    { "qcomp",  "-1" },

> 

> Why set these?  You don't appear to be using them.

> 

> > +    { "g",      "250" },

> > +    { "bf",     "0" },

> > +    { NULL },

> > +};

> > +static const AVClass hevc_amf_class = {

> > +    .class_name = "hevc_amf",

> > +    .item_name = av_default_item_name,

> > +    .option = options,

> > +    .version = LIBAVUTIL_VERSION_INT,

> > +};

> > +static const AVClass hevc_amf_amf_d3d11va_class = {

> > +    .class_name = "hevc_amf_amf_d3d11va",

> > +    .item_name = av_default_item_name,

> > +    .option = options,

> > +    .version = LIBAVUTIL_VERSION_INT,

> > +};

> > +// regular encoder

> > +AVCodec ff_hevc_amf_encoder = {

> > +    .name           = "hevc_amf",

> > +    .long_name      = NULL_IF_CONFIG_SMALL("AMD AMF HEVC encoder"),

> > +    .type           = AVMEDIA_TYPE_VIDEO,

> > +    .id             = AV_CODEC_ID_HEVC,

> > +    .init           = amf_encode_init_hevc,

> > +    .encode2        = ff_amf_encode_frame,

> > +    .close          = ff_amf_encode_close,

> > +    .priv_data_size = sizeof(AmfContext),

> > +    .priv_class     = &hevc_amf_class,

> > +    .defaults       = defaults,

> > +    .capabilities   = AV_CODEC_CAP_DELAY,

> > +    .caps_internal  = FF_CODEC_CAP_INIT_CLEANUP,

> > +    .pix_fmts       = ff_amf_pix_fmts,

> > +};

> > +// encoder connected with D3D11 HW accelerator

> > +AVCodec ff_hevc_amf_d3d11va_encoder = {

> > +.name = "hevc_amf_d3d11va",

> > +.long_name = NULL_IF_CONFIG_SMALL("AMD AMF HEVC encoder with

> d3d11va"),

> > +.type = AVMEDIA_TYPE_VIDEO,

> > +.id = AV_CODEC_ID_HEVC,

> > +.init = amf_encode_init_hevc,

> > +.encode2 = ff_amf_encode_frame,

> > +.close = ff_amf_encode_close,

> > +.priv_data_size = sizeof(AmfContext),

> > +.priv_class = &hevc_amf_amf_d3d11va_class,

> > +.defaults = defaults,

> > +.capabilities = AV_CODEC_CAP_DELAY,

> > +.caps_internal = FF_CODEC_CAP_INIT_CLEANUP,

> > +.pix_fmts = ff_amf_pix_fmts,

> > +};

> > +

> > +/**

> > +* Basic test BAT file:

> > +echo off

> > +if "%~1"=="" (

> > +echo input file name is empty. Use basic_transcode_amf_hevc.bat

> video.mp4

> > +goto error

> > +)

> > +

> > +SET "CWD=%~dp0"

> > +SET bitrate=5M

> > +SET maxbitrate=6M

> > +SET bufsize=2M

> > +SET x265_preset=veryfast

> > +SET amf_quality=speed

> > +

> > +

> > +rem veryfast and zerolatency options make x265 comparable with VCE

> > +

> > +rem change path to ffmpeg.exe if needed

> > +

> > +"%CWD%\..\bin\ffmpeg.exe" -y -t 100 -threads 0 -i "%~1" -c:v hevc_amf  -

> b:v %bitrate% -maxrate %maxbitrate% -bufsize %bufsize% -rc vbr_peak -

> quality %amf_quality% out_amf_hevc.mp4

> > +"%CWD%\..\bin\ffmpeg.exe" -y -t 100 -threads 0 -i "%~1" -c:v libx265   -

> b:v %bitrate% -maxrate %maxbitrate% -bufsize %bufsize% -preset

> %x265_preset% -x265-params vbv-maxrate=6000:vbv-bufsize=2000 -tune

> zerolatency out_x265_hevc.mp4

> > +

> > +echo PSNR > result.txt

> > +

> > +"%CWD%\..\bin\ffmpeg.exe" -y -hide_banner -loglevel info -t 100 -

> threads 0 -i "%~1" -i out_amf_hevc.mp4  -lavfi

> psnr="stats_file='amf_hevc.psnr.log'"  -f null - > "trace.txt" 2>&1

> > +for /f "tokens=*" %%A in ('type "trace.txt"') do @echo amf_hevc : %%A

> >end.txt

> > +type end.txt >> result.txt

> > +

> > +

> > +"%CWD%\..\bin\ffmpeg.exe" -y -hide_banner -loglevel info -t 100 -

> threads 0 -i "%~1" -i out_x265_hevc.mp4 -lavfi

> psnr="stats_file='x265_hevc.psnr.log'" -f null - > "trace.txt" 2>&1

> > +for /f "tokens=*" %%A in ('type "trace.txt"') do @echo x265_hevc: %%A

> >end.txt

> > +type end.txt >> result.txt

> > +

> > +

> > +echo SSIM >> result.txt

> > +

> > +"%CWD%\..\bin\ffmpeg.exe" -y -hide_banner -loglevel info -t 100 -

> threads 0 -i "%~1" -i out_amf_hevc.mp4  -lavfi

> ssim="stats_file='amf_hevc_ssim.log'" -f null - > "trace.txt"  2>&1

> > +for /f "tokens=*" %%A in ('type "trace.txt"') do @echo amf_hevc : %%A

> >end.txt

> > +type end.txt >> result.txt

> > +

> > +"%CWD%\..\bin\ffmpeg.exe" -y -hide_banner -loglevel info -t 100 -

> threads 0 -i "%~1" -i out_x265_hevc.mp4  -lavfi

> ssim="stats_file=x265_hevc_ssim.log'" -f null - > "trace.txt"  2>&1

> > +for /f "tokens=*" %%A in ('type "trace.txt"') do @echo x265_hevc: %%A

> >end.txt

> > +type end.txt >> result.txt

> > +

> > +del trace.txt

> > +del end.txt

> > +

> > +echo

> > +type result.txt

> > +

> > +

> > +:error

> > +

> > +

> > +*/

> 

> Don't include this stuff.

> 

> > \ No newline at end of file

> 

> git has another a review comment for you.

> 

> > diff --git a/libavcodec/version.h b/libavcodec/version.h

> > index 226da19..6c0d7a8 100644

> > --- a/libavcodec/version.h

> > +++ b/libavcodec/version.h

> > @@ -28,8 +28,8 @@

> >  #include "libavutil/version.h"

> >

> >  #define LIBAVCODEC_VERSION_MAJOR  58

> > -#define LIBAVCODEC_VERSION_MINOR   0

> > -#define LIBAVCODEC_VERSION_MICRO 101

> > +#define LIBAVCODEC_VERSION_MINOR   1

> > +#define LIBAVCODEC_VERSION_MICRO 100

> >

> >  #define LIBAVCODEC_VERSION_INT

> AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \

> >                                                 LIBAVCODEC_VERSION_MINOR, \

> >

> 

> Thanks,

> 

> - Mark

> _______________________________________________

> ffmpeg-devel mailing list

> ffmpeg-devel@ffmpeg.org

> http://ffmpeg.org/mailman/listinfo/ffmpeg-devel
Carl Eugen Hoyos Oct. 29, 2017, 2:59 p.m. UTC | #5
2017-10-29 15:54 GMT+01:00 Mironov, Mikhail <Mikhail.Mironov@amd.com>:
>> -----Original Message-----
>> From: ffmpeg-devel [mailto:ffmpeg-devel-bounces@ffmpeg.org] On Behalf
>> Of Carl Eugen Hoyos
>> Sent: October 28, 2017 3:19 PM
>> To: FFmpeg development discussions and patches <ffmpeg-
>> devel@ffmpeg.org>
>> Subject: Re: [FFmpeg-devel] Added - HW accelerated H.264 and HEVC
>> encoding for AMD GPUs based on AMF SDK
>>
>> 2017-10-27 20:09 GMT+02:00 mmironov <mikhail.mironov@amd.com>:
>> > +const enum AVPixelFormat ff_amf_pix_fmts[] = {
>> > +    AV_PIX_FMT_NV12,
>>
>> > +    AV_PIX_FMT_BGRA,
>> > +    AV_PIX_FMT_ARGB,
>> > +    AV_PIX_FMT_RGBA,
>>
>> This is wrong, your encoders do not convert transparency information, there
>> are 32 rgb formats without alpha channel.

> In addition, the encoder has built-in shader-based color converter. It can
> accept BGRA and RGBA formats and convert them into NV12

Yes, I understood.

> dropping alpha channel.

Which is why using AV_PIX_FMT_BGRA is wrong, use AV_PIX_FMT_BGR0
(and friends).

Carl Eugen
mmironov Oct. 29, 2017, 3:43 p.m. UTC | #6
> -----Original Message-----

> From: ffmpeg-devel [mailto:ffmpeg-devel-bounces@ffmpeg.org] On Behalf

> Of Carl Eugen Hoyos

> Sent: October 29, 2017 11:00 AM

> To: FFmpeg development discussions and patches <ffmpeg-

> devel@ffmpeg.org>

> Subject: Re: [FFmpeg-devel] Added - HW accelerated H.264 and HEVC

> encoding for AMD GPUs based on AMF SDK

> 

> 2017-10-29 15:54 GMT+01:00 Mironov, Mikhail

> <Mikhail.Mironov@amd.com>:

> >> -----Original Message-----

> >> From: ffmpeg-devel [mailto:ffmpeg-devel-bounces@ffmpeg.org] On

> Behalf

> >> Of Carl Eugen Hoyos

> >> Sent: October 28, 2017 3:19 PM

> >> To: FFmpeg development discussions and patches <ffmpeg-

> >> devel@ffmpeg.org>

> >> Subject: Re: [FFmpeg-devel] Added - HW accelerated H.264 and HEVC

> >> encoding for AMD GPUs based on AMF SDK

> >>

> >> 2017-10-27 20:09 GMT+02:00 mmironov <mikhail.mironov@amd.com>:

> >> > +const enum AVPixelFormat ff_amf_pix_fmts[] = {

> >> > +    AV_PIX_FMT_NV12,

> >>

> >> > +    AV_PIX_FMT_BGRA,

> >> > +    AV_PIX_FMT_ARGB,

> >> > +    AV_PIX_FMT_RGBA,

> >>

> >> This is wrong, your encoders do not convert transparency information,

> >> there are 32 rgb formats without alpha channel.

> 

> > In addition, the encoder has built-in shader-based color converter. It

> > can accept BGRA and RGBA formats and convert them into NV12

> 

> Yes, I understood.

> 

> > dropping alpha channel.

> 

> Which is why using AV_PIX_FMT_BGRA is wrong, use AV_PIX_FMT_BGR0

> (and friends).

> 

> Carl Eugen

> _______________________________________________

> ffmpeg-devel mailing list

> ffmpeg-devel@ffmpeg.org

> http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Got it, will do. This is my first contribution to FFmpeg. Should I resubmit with this change or wait for some time for more comments and resubmit all-together?
Mikhail
Carl Eugen Hoyos Oct. 29, 2017, 3:51 p.m. UTC | #7
2017-10-29 16:43 GMT+01:00 Mironov, Mikhail <Mikhail.Mironov@amd.com>:

>> >> > +const enum AVPixelFormat ff_amf_pix_fmts[] = {
>> >> > +    AV_PIX_FMT_NV12,
>> >>
>> >> > +    AV_PIX_FMT_BGRA,
>> >> > +    AV_PIX_FMT_ARGB,
>> >> > +    AV_PIX_FMT_RGBA,
>> >>
>> >> This is wrong, your encoders do not convert transparency information,
>> >> there are 32 rgb formats without alpha channel.
>>
>> > In addition, the encoder has built-in shader-based color converter. It
>> > can accept BGRA and RGBA formats and convert them into NV12
>>
>> Yes, I understood.
>>
>> > dropping alpha channel.
>>
>> Which is why using AV_PIX_FMT_BGRA is wrong, use AV_PIX_FMT_BGR0
>> (and friends).

> Got it, will do. This is my first contribution to FFmpeg. Should I resubmit with
> this change or wait for some time for more comments and resubmit all-together?

While my comment does not justify a new submission,
Mark's clearly does.
(There was more than one sentence in his review.)

Please cut your quotes, Carl Eugen
mmironov Oct. 29, 2017, 6:39 p.m. UTC | #8
> >      REGISTER_ENCODER(H263_V4L2M2M,      h263_v4l2m2m);

> >      REGISTER_ENCDEC (LIBOPENH264,       libopenh264);

> > +    REGISTER_ENCODER(H264_AMF,          h264_amf);

> > +	REGISTER_ENCODER(H264_AMF,          h264_amf_d3d11va);

> 

> No tabs.  Why is the d3d11 version separate?  The encoder should be able to

> accept multiple pixfmts.

> 


It does accept multiple formants but there is a code that searches for 
accelerator name in the encoder name and unless it is present disables 
passing accelerator to encoder. See hw_device_setup_for_encode().

> > +    { AV_PIX_FMT_NV12,       AMF_SURFACE_NV12 },

> > +    { AV_PIX_FMT_BGRA,       AMF_SURFACE_BGRA },

> > +    { AV_PIX_FMT_ARGB,       AMF_SURFACE_ARGB },

> > +    { AV_PIX_FMT_RGBA,       AMF_SURFACE_RGBA },

> > +    { AV_PIX_FMT_GRAY8,      AMF_SURFACE_GRAY8 },

> > +    { AV_PIX_FMT_YUV420P,    AMF_SURFACE_YUV420P },

> > +    { AV_PIX_FMT_BGR0,       AMF_SURFACE_BGRA },

> > +    { AV_PIX_FMT_YUV420P,    AMF_SURFACE_YV12 },

> > +    { AV_PIX_FMT_YUYV422,    AMF_SURFACE_YUY2 },

> 

> Do all of these formats actually work?


This is just a translation table. Actual support is in AVCodec::rix_fmts 

> 

> > +    { AV_PIX_FMT_D3D11,      AMF_SURFACE_NV12 },

> 

> D3D11 surfaces need not be NV12.  The actual format is in

> AVHWFramesContext.sw_format - if you only support 8-bit then something

> nasty probably happens if you give it P010 surfaces.

> 


Agreed, but how should I define D3D11 NV12 as input format if I only have AV_PIX_FMT_D3D11?

> > +};

> > +

> > +static enum AMF_SURFACE_FORMAT amf_av_to_amf_format(enum

> AVPixelFormat fmt)

> > +{

> > +    for (int i = 0; i < amf_countof(format_map); i++) {

> > +        if (format_map[i].av_format == fmt) {

> > +            return format_map[i].amf_format;

> > +        }

> > +    }

> > +    return AMF_SURFACE_UNKNOWN;

> > +}

> > +

> > +// virtual functions decalred

> 

> What does this comment mean?

> 


These functions are virtual functions put in real virtual table.  

> > +

> > +    version_fun(&ctx->version);

> > +    init_fun(AMF_FULL_VERSION, &ctx->factory);

> > +    ctx->factory->pVtbl->GetTrace(ctx->factory, &ctx->trace);

> > +    ctx->factory->pVtbl->GetDebug(ctx->factory, &ctx->debug);

> 

> Do all of these functions necessarily succeed?

> 


Yes.

 
> > +

> > +    // try to reuse existing DX device

> > +

> > +    if (avctx->hw_frames_ctx) {

> > +        AVHWFramesContext *device_ctx = (AVHWFramesContext*)avctx-

> >hw_frames_ctx->data;

> > +        if (device_ctx->device_ctx->type == AV_HWDEVICE_TYPE_D3D11VA){

> > +            if (device_ctx->device_ctx->hwctx) {

> > +                AVD3D11VADeviceContext *device_d3d11 =

> (AVD3D11VADeviceContext *)device_ctx->device_ctx->hwctx;

> > +                res = ctx->context->pVtbl->InitDX11(ctx->context, device_d3d11-

> >device, AMF_DX11_1);

> > +                if (res == AMF_OK) {

> > +                    ctx->hw_frames_ctx = av_buffer_ref(avctx->hw_frames_ctx);

> > +                } else {

> > +                    av_log(avctx, AV_LOG_INFO, "amf_shared: avctx-

> >hw_frames_ctx has non-AMD device, switching to default");

> 

> I'm not sure this is going to act sensibly - if the user has D3D11 frames input

> on another device, does it work?

> 


If device is not AMD's the code is trying to create another device  - the compatible one . 
In these cases the submission will be via system memory.

> > +    surface->pVtbl->SetPts(surface, frame->pts);

> 

> Does this accept the same range as frame->pts, including AV_NOPTS_VALUE?

> 


Yes, encoder doesn’t use pts, just passes the value through for convenience.

> > +

> > +int ff_amf_encode_frame(AVCodecContext *avctx, AVPacket *pkt,

> > +                        const AVFrame *frame, int *got_packet)

> > +{

> > +    int             ret = 0;

> > +    AMF_RESULT      res = AMF_OK;

> > +    AmfContext     *ctx = avctx->priv_data;

> > +    AMFSurface     *surface = 0;

> > +    AMFData        *data = 0;

> > +    amf_bool       submitted = false;

> > +

> > +    while (!submitted) {

> > +        if (!frame) { // submit drain

> > +            if (!ctx->eof) { // submit drain onre time only

> > +                res = ctx->encoder->pVtbl->Drain(ctx->encoder);

> > +                if (res == AMF_INPUT_FULL) {

> > +                    av_usleep(1000); // input queue is full: wait, poll and submit

> Drain again

> > +                                     // need to get some output and try again

> > +                } else if (res == AMF_OK) {

> > +                    ctx->eof = true; // drain started

> > +                    submitted = true;

> > +                }

> > +            }

> > +        } else { // submit frame

> > +            if (surface == 0) { // prepare surface from frame one time only

> > +                if (frame->hw_frames_ctx && ( // HW frame detected

> > +                                              // check if the same hw_frames_ctx as used in

> initialization

> > +                    (ctx->hw_frames_ctx && frame->hw_frames_ctx->data == ctx-

> >hw_frames_ctx->data) ||

> > +                    // check if the same hw_device_ctx as used in initialization

> > +                    (ctx->hw_device_ctx && ((AVHWFramesContext*)frame-

> >hw_frames_ctx->data)->device_ctx ==

> > +                    (AVHWDeviceContext*)ctx->hw_device_ctx->data)

> > +                )) {

> > +                    ID3D11Texture2D* texture = (ID3D11Texture2D*)frame-

> >data[0]; // actual texture

> > +                    int index = (int)(size_t)frame->data[1]; // index is a slice in

> texture array is - set to tell AMF which slice to use

> > +                    texture->lpVtbl->SetPrivateData(texture,

> &AMFTextureArrayIndexGUID, sizeof(index), &index);

> > +

> > +                    res = ctx->context->pVtbl->CreateSurfaceFromDX11Native(ctx-

> >context, texture, &surface, NULL); // wrap to AMF surface

> > +                    surface->pVtbl->SetCrop(surface, 0, 0, frame->width, frame-

> >height); // decode surfaces are vertically aligned by 16 tell AMF real size

> > +                    surface->pVtbl->SetPts(surface, frame->pts);

> > +                } else {

> > +                    res = ctx->context->pVtbl->AllocSurface(ctx->context,

> AMF_MEMORY_HOST, ctx->format, avctx->width, avctx->height, &surface);

> > +                    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_BUG,

> "AllocSurface() failed  with error %d", res);

> > +                    amf_copy_surface(avctx, frame, surface);

> > +                }

> > +            }

> > +            // encode

> > +            res = ctx->encoder->pVtbl->SubmitInput(ctx->encoder,

> (AMFData*)surface);

> > +            if (res == AMF_INPUT_FULL) { // handle full queue

> > +                av_usleep(1000); // input queue is full: wait, poll and submit

> surface again

> 

> Is there really no way in the API to wait for this properly?

> 


The AMF runtime is designed without threads and sleeps inside. It is up to
 application to poll output and this way make space in the input HW queue. 
But if input queue is really full it does make sense to sleep and 
continue polling to avoid unnecessary CPU burn. 


> > +            } else {

> > +                surface->pVtbl->Release(surface);

> > +                surface = NULL;

> > +                AMF_RETURN_IF_FALSE(ctx, res == AMF_OK,

> AVERROR_UNKNOWN, "SubmitInput() failed with error %d", res);

> > +                submitted = 1;

> > +            }

> > +        }

> > +        // poll results

> > +        if (!data) {

> > +            res = ctx->encoder->pVtbl->QueryOutput(ctx->encoder, &data);

> > +            if (data) {

> > +                AMFBuffer* buffer;

> > +                AMFGuid guid = IID_AMFBuffer();

> > +                data->pVtbl->QueryInterface(data, &guid, (void**)&buffer); //

> query for buffer interface

> > +                ret = amf_copy_buffer(avctx, pkt, buffer);

> > +                if (!ret)

> > +                    *got_packet = 1;

> > +                buffer->pVtbl->Release(buffer);

> > +                data->pVtbl->Release(data);

> > +                if (ctx->eof) {

> > +                    submitted = true; // we are in the drain state - no submissions

> > +                }

> > +            } else if (res == AMF_EOF) {

> > +                submitted = true; // drain complete

> > +            } else {

> > +                if (!submitted) {

> > +                    av_usleep(1000); // wait and poll again

> > +                }

> > +            }

> > +        }

> > +    }

> 

> I suspect this setup is not actually going to follow the constraints of the

> deprecated encode2().  Given the API here, I think you would be much better

> off writing with send_frame()/receive_packet().


I considered this, but without a thread that would call receive_packet() the implementation 
will fall into the same pattern as it is now but with an additional queue of ready outputs.

> > +    AMFSize             framesize = AMFConstructSize(avctx->width, avctx-

> >height);

> > +    AMFRate             framerate = AMFConstructRate(avctx->time_base.den,

> avctx->time_base.num * avctx->ticks_per_frame);

> 

> Is VFR encoding not supported?

> 


The encoder uses frame rate only for rate control. It does not take in account 
frame duration in case of VFR.

> > +    if (avctx->rc_initial_buffer_occupancy != 0) {

> > +        int percent = avctx->rc_buffer_size * 64 / avctx-

> >rc_initial_buffer_occupancy;

> > +        if (percent > 64)

> > +            percent = 64;

> 

> ???

> 


This is an attempt to translate to 1-64 range which is exposed by the encoder.

> > +    // Bitrate

> > +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_TARGET_BITRATE, avctx->bit_rate);

> > +    if (ctx->rate_control_mode ==

> AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CBR) {

> > +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_PEAK_BITRATE, avctx->bit_rate);

> > +    } else {

> > +        int rc_max_rate = avctx->rc_max_rate >= avctx->bit_rate ? avctx-

> >rc_max_rate : avctx->bit_rate * 13 / 10;

> 

> Where does 13/10 come from?

> 


For best results rc_max_rate should be bigger then bit_rate  for VBR. For CBR it is ignored. 
What is the better way to correct an unset parameter?

> > +

> > +    // fill extradata

> > +    AMFVariantInit(&var);

> 

> Can this fail?

> 

Not if var cleared with 0.

> > +// encoder connected with D3D11 HW accelerator

> > +AVCodec ff_h264_amf_d3d11va_encoder = {

> > +    .name = "h264_amf_d3d11va",

> > +    .long_name = NULL_IF_CONFIG_SMALL("AMD AMF H.264 Encoder with

> d3d11va"),

> > +    .type = AVMEDIA_TYPE_VIDEO,

> > +    .id = AV_CODEC_ID_H264,

> > +    .init = amf_encode_init_h264,

> > +    .encode2 = ff_amf_encode_frame,

> > +    .close = ff_amf_encode_close,

> > +    .priv_data_size = sizeof(AmfContext),

> > +    .priv_class = &h264_amf_d3d11va_class,

> > +    .defaults = defaults,

> > +    .capabilities = AV_CODEC_CAP_DELAY,

> > +    .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,

> > +    .pix_fmts = ff_amf_pix_fmts,

> > +};

> 

> As above, why does this separate (identical) instance exist?


See explanation about accelerator handling for encoders above.

> > +static const AVOption options[] = {

> > +    { "usage",          "Set the encoding usage",             OFFSET(usage),

> AV_OPT_TYPE_INT,   { .i64 =

> AMF_VIDEO_ENCODER_HEVC_USAGE_TRANSCONDING },

> AMF_VIDEO_ENCODER_HEVC_USAGE_TRANSCONDING,

> AMF_VIDEO_ENCODER_HEVC_USAGE_WEBCAM, VE, "usage" },

> > +    { "transcoding",    "", 0, AV_OPT_TYPE_CONST, { .i64 =

> AMF_VIDEO_ENCODER_HEVC_USAGE_TRANSCONDING },         0, 0, VE,

> "usage" },

> > +    { "ultralowlatency","", 0, AV_OPT_TYPE_CONST, { .i64 =

> AMF_VIDEO_ENCODER_HEVC_USAGE_ULTRA_LOW_LATENCY },    0, 0, VE,

> "usage" },

> > +    { "lowlatency",     "", 0, AV_OPT_TYPE_CONST, { .i64 =

> AMF_VIDEO_ENCODER_HEVC_USAGE_LOW_LATENCY },          0, 0, VE,

> "usage" },

> > +    { "webcam",         "", 0, AV_OPT_TYPE_CONST, { .i64 =

> AMF_VIDEO_ENCODER_HEVC_USAGE_WEBCAM },               0, 0, VE, "usage" },

> 

> Could some of this be in common with the H.264 encoder?  (Maybe in the

> header?)

> 


I tried to keep H264 and HAVC parameters completely separate. I was asked by codec 
team to do so in AMF API  and did the same here.


> >

> 

> Thanks,

> 

> - Mark

> _______________________________________________

> ffmpeg-devel mailing list

> ffmpeg-devel@ffmpeg.org

> http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


I tied to answer all questions. Sorry if I missed something. The rest is clear.
Thanks, Mikhail
Mark Thompson Oct. 29, 2017, 7:36 p.m. UTC | #9
On 29/10/17 18:39, Mironov, Mikhail wrote:
>>>      REGISTER_ENCODER(H263_V4L2M2M,      h263_v4l2m2m);
>>>      REGISTER_ENCDEC (LIBOPENH264,       libopenh264);
>>> +    REGISTER_ENCODER(H264_AMF,          h264_amf);
>>> +	REGISTER_ENCODER(H264_AMF,          h264_amf_d3d11va);
>>
>> No tabs.  Why is the d3d11 version separate?  The encoder should be able to
>> accept multiple pixfmts.
>>
> 
> It does accept multiple formants but there is a code that searches for 
> accelerator name in the encoder name and unless it is present disables 
> passing accelerator to encoder. See hw_device_setup_for_encode().

That code is a temporary hack, please don't assume it.  A setup something like <https://lists.libav.org/pipermail/libav-devel/2017-October/085223.html> is intended to replace it (not yet done for encode), which will not require that sort of nastiness.  (Feel free to comment on that.)

I'm not sure how much you want an implicit device here anyway - you get a device from hw_frames_ctx for D3D11 input, and otherwise it doesn't matter?

>>> +    { AV_PIX_FMT_NV12,       AMF_SURFACE_NV12 },
>>> +    { AV_PIX_FMT_BGRA,       AMF_SURFACE_BGRA },
>>> +    { AV_PIX_FMT_ARGB,       AMF_SURFACE_ARGB },
>>> +    { AV_PIX_FMT_RGBA,       AMF_SURFACE_RGBA },
>>> +    { AV_PIX_FMT_GRAY8,      AMF_SURFACE_GRAY8 },
>>> +    { AV_PIX_FMT_YUV420P,    AMF_SURFACE_YUV420P },
>>> +    { AV_PIX_FMT_BGR0,       AMF_SURFACE_BGRA },
>>> +    { AV_PIX_FMT_YUV420P,    AMF_SURFACE_YV12 },
>>> +    { AV_PIX_FMT_YUYV422,    AMF_SURFACE_YUY2 },
>>
>> Do all of these formats actually work?
> 
> This is just a translation table. Actual support is in AVCodec::rix_fmts 

You answered some of the actual question responding to Carl, I'll reply there.

>>
>>> +    { AV_PIX_FMT_D3D11,      AMF_SURFACE_NV12 },
>>
>> D3D11 surfaces need not be NV12.  The actual format is in
>> AVHWFramesContext.sw_format - if you only support 8-bit then something
>> nasty probably happens if you give it P010 surfaces.
>>
> 
> Agreed, but how should I define D3D11 NV12 as input format if I only have AV_PIX_FMT_D3D11?

Check sw_format afterwards.

>>> +};
>>> +
>>> +static enum AMF_SURFACE_FORMAT amf_av_to_amf_format(enum
>> AVPixelFormat fmt)
>>> +{
>>> +    for (int i = 0; i < amf_countof(format_map); i++) {
>>> +        if (format_map[i].av_format == fmt) {
>>> +            return format_map[i].amf_format;
>>> +        }
>>> +    }
>>> +    return AMF_SURFACE_UNKNOWN;
>>> +}
>>> +
>>> +// virtual functions decalred
>>
>> What does this comment mean?
>>
> 
> These functions are virtual functions put in real virtual table.  

IMO the comment is useless.  If you want to keep it, at least fix the typo.

>>> +
>>> +    version_fun(&ctx->version);
>>> +    init_fun(AMF_FULL_VERSION, &ctx->factory);
>>> +    ctx->factory->pVtbl->GetTrace(ctx->factory, &ctx->trace);
>>> +    ctx->factory->pVtbl->GetDebug(ctx->factory, &ctx->debug);
>>
>> Do all of these functions necessarily succeed?
>>
> 
> Yes.

Even in possible future API versions?  Why aren't they void, then?

>>> +
>>> +    // try to reuse existing DX device
>>> +
>>> +    if (avctx->hw_frames_ctx) {
>>> +        AVHWFramesContext *device_ctx = (AVHWFramesContext*)avctx-
>>> hw_frames_ctx->data;
>>> +        if (device_ctx->device_ctx->type == AV_HWDEVICE_TYPE_D3D11VA){
>>> +            if (device_ctx->device_ctx->hwctx) {
>>> +                AVD3D11VADeviceContext *device_d3d11 =
>> (AVD3D11VADeviceContext *)device_ctx->device_ctx->hwctx;
>>> +                res = ctx->context->pVtbl->InitDX11(ctx->context, device_d3d11-
>>> device, AMF_DX11_1);
>>> +                if (res == AMF_OK) {
>>> +                    ctx->hw_frames_ctx = av_buffer_ref(avctx->hw_frames_ctx);
>>> +                } else {
>>> +                    av_log(avctx, AV_LOG_INFO, "amf_shared: avctx-
>>> hw_frames_ctx has non-AMD device, switching to default");
>>
>> I'm not sure this is going to act sensibly - if the user has D3D11 frames input
>> on another device, does it work?
>>
> 
> If device is not AMD's the code is trying to create another device  - the compatible one . 
> In these cases the submission will be via system memory.

And that works with D3D11 frames as hw_frames_ctx on another device?

>>> +    surface->pVtbl->SetPts(surface, frame->pts);
>>
>> Does this accept the same range as frame->pts, including AV_NOPTS_VALUE?
>>
> 
> Yes, encoder doesn’t use pts, just passes the value through for convenience.

What does it do with them, then?

>>> +
>>> +int ff_amf_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
>>> +                        const AVFrame *frame, int *got_packet)
>>> +{
>>> +    int             ret = 0;
>>> +    AMF_RESULT      res = AMF_OK;
>>> +    AmfContext     *ctx = avctx->priv_data;
>>> +    AMFSurface     *surface = 0;
>>> +    AMFData        *data = 0;
>>> +    amf_bool       submitted = false;
>>> +
>>> +    while (!submitted) {
>>> +        if (!frame) { // submit drain
>>> +            if (!ctx->eof) { // submit drain onre time only
>>> +                res = ctx->encoder->pVtbl->Drain(ctx->encoder);
>>> +                if (res == AMF_INPUT_FULL) {
>>> +                    av_usleep(1000); // input queue is full: wait, poll and submit
>> Drain again
>>> +                                     // need to get some output and try again
>>> +                } else if (res == AMF_OK) {
>>> +                    ctx->eof = true; // drain started
>>> +                    submitted = true;
>>> +                }
>>> +            }
>>> +        } else { // submit frame
>>> +            if (surface == 0) { // prepare surface from frame one time only
>>> +                if (frame->hw_frames_ctx && ( // HW frame detected
>>> +                                              // check if the same hw_frames_ctx as used in
>> initialization
>>> +                    (ctx->hw_frames_ctx && frame->hw_frames_ctx->data == ctx-
>>> hw_frames_ctx->data) ||
>>> +                    // check if the same hw_device_ctx as used in initialization
>>> +                    (ctx->hw_device_ctx && ((AVHWFramesContext*)frame-
>>> hw_frames_ctx->data)->device_ctx ==
>>> +                    (AVHWDeviceContext*)ctx->hw_device_ctx->data)
>>> +                )) {
>>> +                    ID3D11Texture2D* texture = (ID3D11Texture2D*)frame-
>>> data[0]; // actual texture
>>> +                    int index = (int)(size_t)frame->data[1]; // index is a slice in
>> texture array is - set to tell AMF which slice to use
>>> +                    texture->lpVtbl->SetPrivateData(texture,
>> &AMFTextureArrayIndexGUID, sizeof(index), &index);
>>> +
>>> +                    res = ctx->context->pVtbl->CreateSurfaceFromDX11Native(ctx-
>>> context, texture, &surface, NULL); // wrap to AMF surface
>>> +                    surface->pVtbl->SetCrop(surface, 0, 0, frame->width, frame-
>>> height); // decode surfaces are vertically aligned by 16 tell AMF real size
>>> +                    surface->pVtbl->SetPts(surface, frame->pts);
>>> +                } else {
>>> +                    res = ctx->context->pVtbl->AllocSurface(ctx->context,
>> AMF_MEMORY_HOST, ctx->format, avctx->width, avctx->height, &surface);
>>> +                    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_BUG,
>> "AllocSurface() failed  with error %d", res);
>>> +                    amf_copy_surface(avctx, frame, surface);
>>> +                }
>>> +            }
>>> +            // encode
>>> +            res = ctx->encoder->pVtbl->SubmitInput(ctx->encoder,
>> (AMFData*)surface);
>>> +            if (res == AMF_INPUT_FULL) { // handle full queue
>>> +                av_usleep(1000); // input queue is full: wait, poll and submit
>> surface again
>>
>> Is there really no way in the API to wait for this properly?
>>
> 
> The AMF runtime is designed without threads and sleeps inside. It is up to
>  application to poll output and this way make space in the input HW queue. 
> But if input queue is really full it does make sense to sleep and 
> continue polling to avoid unnecessary CPU burn. 

Some APIs have a way to wait for an event for this rather than polling.  Polling like this will wake the CPU pointlessly and waste power.  (And yes, I know lots of other places do it, but if you have a proper API for this then please use it.)

>>> +            } else {
>>> +                surface->pVtbl->Release(surface);
>>> +                surface = NULL;
>>> +                AMF_RETURN_IF_FALSE(ctx, res == AMF_OK,
>> AVERROR_UNKNOWN, "SubmitInput() failed with error %d", res);
>>> +                submitted = 1;
>>> +            }
>>> +        }
>>> +        // poll results
>>> +        if (!data) {
>>> +            res = ctx->encoder->pVtbl->QueryOutput(ctx->encoder, &data);
>>> +            if (data) {
>>> +                AMFBuffer* buffer;
>>> +                AMFGuid guid = IID_AMFBuffer();
>>> +                data->pVtbl->QueryInterface(data, &guid, (void**)&buffer); //
>> query for buffer interface
>>> +                ret = amf_copy_buffer(avctx, pkt, buffer);
>>> +                if (!ret)
>>> +                    *got_packet = 1;
>>> +                buffer->pVtbl->Release(buffer);
>>> +                data->pVtbl->Release(data);
>>> +                if (ctx->eof) {
>>> +                    submitted = true; // we are in the drain state - no submissions
>>> +                }
>>> +            } else if (res == AMF_EOF) {
>>> +                submitted = true; // drain complete
>>> +            } else {
>>> +                if (!submitted) {
>>> +                    av_usleep(1000); // wait and poll again
>>> +                }
>>> +            }
>>> +        }
>>> +    }
>>
>> I suspect this setup is not actually going to follow the constraints of the
>> deprecated encode2().  Given the API here, I think you would be much better
>> off writing with send_frame()/receive_packet().
> 
> I considered this, but without a thread that would call receive_packet() the implementation 
> will fall into the same pattern as it is now but with an additional queue of ready outputs.

See the documentation - you can return EAGAIN to send_packet() when a frame is available, no external queue is required.

>>> +    AMFSize             framesize = AMFConstructSize(avctx->width, avctx-
>>> height);
>>> +    AMFRate             framerate = AMFConstructRate(avctx->time_base.den,
>> avctx->time_base.num * avctx->ticks_per_frame);
>>
>> Is VFR encoding not supported?
>>
> 
> The encoder uses frame rate only for rate control. It does not take in account 
> frame duration in case of VFR.

Sure.  (That should probably be documented somewhere.)

>>> +    if (avctx->rc_initial_buffer_occupancy != 0) {
>>> +        int percent = avctx->rc_buffer_size * 64 / avctx-
>>> rc_initial_buffer_occupancy;
>>> +        if (percent > 64)
>>> +            percent = 64;
>>
>> ???
>>
> 
> This is an attempt to translate to 1-64 range which is exposed by the encoder.

1-64, yet the variable is called percent?  Sounds very suspicious.  If that is indeed the behaviour, please add a comment explaining it.

>>> +    // Bitrate
>>> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_TARGET_BITRATE, avctx->bit_rate);
>>> +    if (ctx->rate_control_mode ==
>> AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CBR) {
>>> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_PEAK_BITRATE, avctx->bit_rate);
>>> +    } else {
>>> +        int rc_max_rate = avctx->rc_max_rate >= avctx->bit_rate ? avctx-
>>> rc_max_rate : avctx->bit_rate * 13 / 10;
>>
>> Where does 13/10 come from?
>>
> 
> For best results rc_max_rate should be bigger then bit_rate  for VBR. For CBR it is ignored. 
> What is the better way to correct an unset parameter?

Max rate should only be constrained by the HRD buffering in this case.  Are you sure this isn't handled internally if you don't supply the paramater at all?  If not, maybe supply some sort of infinity to avoid it constraining anything appropriately.

>>> +
>>> +    // fill extradata
>>> +    AMFVariantInit(&var);
>>
>> Can this fail?
>>
> Not if var cleared with 0.

I would prefer that return values are checked for all functions which return them, in case of future API changes.  If the function can't return an error, why isn't it void?

>>> +// encoder connected with D3D11 HW accelerator
>>> +AVCodec ff_h264_amf_d3d11va_encoder = {
>>> +    .name = "h264_amf_d3d11va",
>>> +    .long_name = NULL_IF_CONFIG_SMALL("AMD AMF H.264 Encoder with
>> d3d11va"),
>>> +    .type = AVMEDIA_TYPE_VIDEO,
>>> +    .id = AV_CODEC_ID_H264,
>>> +    .init = amf_encode_init_h264,
>>> +    .encode2 = ff_amf_encode_frame,
>>> +    .close = ff_amf_encode_close,
>>> +    .priv_data_size = sizeof(AmfContext),
>>> +    .priv_class = &h264_amf_d3d11va_class,
>>> +    .defaults = defaults,
>>> +    .capabilities = AV_CODEC_CAP_DELAY,
>>> +    .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
>>> +    .pix_fmts = ff_amf_pix_fmts,
>>> +};
>>
>> As above, why does this separate (identical) instance exist?
> 
> See explanation about accelerator handling for encoders above.
> 
>>> +static const AVOption options[] = {
>>> +    { "usage",          "Set the encoding usage",             OFFSET(usage),
>> AV_OPT_TYPE_INT,   { .i64 =
>> AMF_VIDEO_ENCODER_HEVC_USAGE_TRANSCONDING },
>> AMF_VIDEO_ENCODER_HEVC_USAGE_TRANSCONDING,
>> AMF_VIDEO_ENCODER_HEVC_USAGE_WEBCAM, VE, "usage" },
>>> +    { "transcoding",    "", 0, AV_OPT_TYPE_CONST, { .i64 =
>> AMF_VIDEO_ENCODER_HEVC_USAGE_TRANSCONDING },         0, 0, VE,
>> "usage" },
>>> +    { "ultralowlatency","", 0, AV_OPT_TYPE_CONST, { .i64 =
>> AMF_VIDEO_ENCODER_HEVC_USAGE_ULTRA_LOW_LATENCY },    0, 0, VE,
>> "usage" },
>>> +    { "lowlatency",     "", 0, AV_OPT_TYPE_CONST, { .i64 =
>> AMF_VIDEO_ENCODER_HEVC_USAGE_LOW_LATENCY },          0, 0, VE,
>> "usage" },
>>> +    { "webcam",         "", 0, AV_OPT_TYPE_CONST, { .i64 =
>> AMF_VIDEO_ENCODER_HEVC_USAGE_WEBCAM },               0, 0, VE, "usage" },
>>
>> Could some of this be in common with the H.264 encoder?  (Maybe in the
>> header?)
>>
> 
> I tried to keep H264 and HAVC parameters completely separate. I was asked by codec 
> team to do so in AMF API  and did the same here.

Ok, sure.


Some more random questions:

* How can we supply colour information to the codecs?  (VUI colour_primaries/transfer_characteristics/matrix_coefficients/chroma_sample_loc_type.)

* Does timing SEI (buffering_period/pic_timing) get generated for bitrate-targetted modes?  Is there any way to control that?

* When you say this will be used on Linux, is that going to be in Mesa (integrated with VAAPI as the Windows code is with D3D?) or will it be something in the proprietary drivers?

Thanks,

- Mark
Mark Thompson Oct. 29, 2017, 7:49 p.m. UTC | #10
On 29/10/17 14:54, Mironov, Mikhail wrote:
>> -----Original Message-----
>> From: ffmpeg-devel [mailto:ffmpeg-devel-bounces@ffmpeg.org] On Behalf
>> Of Carl Eugen Hoyos
>> Sent: October 28, 2017 3:19 PM
>> To: FFmpeg development discussions and patches <ffmpeg-
>> devel@ffmpeg.org>
>> Subject: Re: [FFmpeg-devel] Added - HW accelerated H.264 and HEVC
>> encoding for AMD GPUs based on AMF SDK
>>
>> 2017-10-27 20:09 GMT+02:00 mmironov <mikhail.mironov@amd.com>:
>>> +const enum AVPixelFormat ff_amf_pix_fmts[] = {
>>> +    AV_PIX_FMT_NV12,
>>
>>> +    AV_PIX_FMT_BGRA,
>>> +    AV_PIX_FMT_ARGB,
>>> +    AV_PIX_FMT_RGBA,
>>
>> This is wrong, your encoders do not convert transparency information, there
>> are 32 rgb formats without alpha channel.
>>
>>> +    { AV_PIX_FMT_NV12,       AMF_SURFACE_NV12 },
>>> +    { AV_PIX_FMT_D3D11,      AMF_SURFACE_NV12 },
>>
>> Just curious: Can you explain this in simple words?
>>
>> I will try not to comment on the headers you sent, Carl Eugen
>> _______________________________________________
>> ffmpeg-devel mailing list
>> ffmpeg-devel@ffmpeg.org
>> http://ffmpeg.org/mailman/listinfo/ffmpeg-devel
> Sure: the native format for the encoder is NV12. It can work via D3D11 and accept DXGI_FORMAT_NV12 texture or via D3D9  accepting (D3DFORMAT)MAKEFOURCC('N', 'V', '1', '2') surface. AMF surfaces are wrappers around these objects and converters from system memory. In addition, the encoder has built-in shader-based color converter. It can accept BGRA and RGBA formats and convert them into NV12 dropping alpha channel. This is why I added these entries. Granted, the conversion lacks parameters and to support more options AMF has a separate color space component, also shader-based. Once encoder is integrated I planned to ask FFmpeg team opinion if it is needed or not. 
> I made the FFmpeg encoder accepting DX objects from other FFmpeg components if available.

If you're going to do the colour conversion here then please pass the necessary colour information (AVFrame.color_primaries|color_trc|colorspace) so that non-BT.709 (I'm guessing...) has some chance of being correct.

(Though I'd say in general it's preferable not to have the encoder do this, because it becomes confused with actual encoding of RGB.  A separate D3D11 shader implementation to do it as a filter would definitely be welcome...)

Thanks,

- Mark
mmironov Oct. 29, 2017, 8:48 p.m. UTC | #11
> -----Original Message-----

> From: ffmpeg-devel [mailto:ffmpeg-devel-bounces@ffmpeg.org] On Behalf

> Of Mark Thompson

> Sent: October 29, 2017 3:36 PM

> To: ffmpeg-devel@ffmpeg.org

> Subject: Re: [FFmpeg-devel] Added - HW accelerated H.264 and HEVC

> encoding for AMD GPUs based on AMF SDK

> 

> On 29/10/17 18:39, Mironov, Mikhail wrote:

> >>>      REGISTER_ENCODER(H263_V4L2M2M,      h263_v4l2m2m);

> >>>      REGISTER_ENCDEC (LIBOPENH264,       libopenh264);

> >>> +    REGISTER_ENCODER(H264_AMF,          h264_amf);

> >>> +	REGISTER_ENCODER(H264_AMF,          h264_amf_d3d11va);

> >>

> >> No tabs.  Why is the d3d11 version separate?  The encoder should be

> >> able to accept multiple pixfmts.

> >>

> >

> > It does accept multiple formants but there is a code that searches for

> > accelerator name in the encoder name and unless it is present disables

> > passing accelerator to encoder. See hw_device_setup_for_encode().

> 

> That code is a temporary hack, please don't assume it.  A setup something

> like <https://lists.libav.org/pipermail/libav-devel/2017-

> October/085223.html> is intended to replace it (not yet done for encode),

> which will not require that sort of nastiness.  (Feel free to comment on that.)


OK, this would be way better. I also didn’t like the codec duplication. 
So for now I will remove it and mark the codec as HW when available.

> 

> I'm not sure how much you want an implicit device here anyway - you get a

> device from hw_frames_ctx for D3D11 input, and otherwise it doesn't

> matter?

> 

> >>> +    { AV_PIX_FMT_NV12,       AMF_SURFACE_NV12 },

> >>> +    { AV_PIX_FMT_BGRA,       AMF_SURFACE_BGRA },

> >>> +    { AV_PIX_FMT_ARGB,       AMF_SURFACE_ARGB },

> >>> +    { AV_PIX_FMT_RGBA,       AMF_SURFACE_RGBA },

> >>> +    { AV_PIX_FMT_GRAY8,      AMF_SURFACE_GRAY8 },

> >>> +    { AV_PIX_FMT_YUV420P,    AMF_SURFACE_YUV420P },

> >>> +    { AV_PIX_FMT_BGR0,       AMF_SURFACE_BGRA },

> >>> +    { AV_PIX_FMT_YUV420P,    AMF_SURFACE_YV12 },

> >>> +    { AV_PIX_FMT_YUYV422,    AMF_SURFACE_YUY2 },

> >>

> >> Do all of these formats actually work?

> >

> > This is just a translation table. Actual support is in

> > AVCodec::rix_fmts

> 

> You answered some of the actual question responding to Carl, I'll reply there.

> 

> >>

> >>> +    { AV_PIX_FMT_D3D11,      AMF_SURFACE_NV12 },

> >>

> >> D3D11 surfaces need not be NV12.  The actual format is in

> >> AVHWFramesContext.sw_format - if you only support 8-bit then

> >> something nasty probably happens if you give it P010 surfaces.

> >>

> >

> > Agreed, but how should I define D3D11 NV12 as input format if I only have

> AV_PIX_FMT_D3D11?

> 

> Check sw_format afterwards.

> 


sw_format is part of AVHWFramesContext in hwcodec.h 
But how should I define pix_fmt array in AVCodec? For example, In nvenc.c 
is done via AV_PIX_FMT_CUDA. Is it wrong?

> >>> +};

> >>> +

> >>> +static enum AMF_SURFACE_FORMAT amf_av_to_amf_format(enum

> >> AVPixelFormat fmt)

> >>> +{

> >>> +    for (int i = 0; i < amf_countof(format_map); i++) {

> >>> +        if (format_map[i].av_format == fmt) {

> >>> +            return format_map[i].amf_format;

> >>> +        }

> >>> +    }

> >>> +    return AMF_SURFACE_UNKNOWN;

> >>> +}

> >>> +

> >>> +// virtual functions decalred

> >>

> >> What does this comment mean?

> >>

> >

> > These functions are virtual functions put in real virtual table.

> 

> IMO the comment is useless.  If you want to keep it, at least fix the typo.


I can remove it, no problem. Just wanted to explain.
 
> 

> >>> +

> >>> +    version_fun(&ctx->version);

> >>> +    init_fun(AMF_FULL_VERSION, &ctx->factory);

> >>> +    ctx->factory->pVtbl->GetTrace(ctx->factory, &ctx->trace);

> >>> +    ctx->factory->pVtbl->GetDebug(ctx->factory, &ctx->debug);

> >>

> >> Do all of these functions necessarily succeed?

> >>

> >

> > Yes.

> 

> Even in possible future API versions?  Why aren't they void, then?


Good question, I guess for consistency of in API. I will add error checking.

> 

> >>> +

> >>> +    // try to reuse existing DX device

> >>> +

> >>> +    if (avctx->hw_frames_ctx) {

> >>> +        AVHWFramesContext *device_ctx = (AVHWFramesContext*)avctx-

> >>> hw_frames_ctx->data;

> >>> +        if (device_ctx->device_ctx->type ==

> AV_HWDEVICE_TYPE_D3D11VA){

> >>> +            if (device_ctx->device_ctx->hwctx) {

> >>> +                AVD3D11VADeviceContext *device_d3d11 =

> >> (AVD3D11VADeviceContext *)device_ctx->device_ctx->hwctx;

> >>> +                res = ctx->context->pVtbl->InitDX11(ctx->context,

> >>> + device_d3d11-

> >>> device, AMF_DX11_1);

> >>> +                if (res == AMF_OK) {

> >>> +                    ctx->hw_frames_ctx = av_buffer_ref(avctx->hw_frames_ctx);

> >>> +                } else {

> >>> +                    av_log(avctx, AV_LOG_INFO, "amf_shared: avctx-

> >>> hw_frames_ctx has non-AMD device, switching to default");

> >>

> >> I'm not sure this is going to act sensibly - if the user has D3D11

> >> frames input on another device, does it work?

> >>

> >

> > If device is not AMD's the code is trying to create another device  - the

> compatible one .

> > In these cases the submission will be via system memory.

> 

> And that works with D3D11 frames as hw_frames_ctx on another device?


Why not? AMF will be initialized with a different device, 
in submission code it is detected, surface with system (host) memory is allocated,  
data will be copied using av_image_copy() and host memory will be submitted to AMF.

> 

> >>> +    surface->pVtbl->SetPts(surface, frame->pts);

> >>

> >> Does this accept the same range as frame->pts, including

> AV_NOPTS_VALUE?

> >>

> >

> > Yes, encoder doesn’t use pts, just passes the value through for

> convenience.

> 

> What does it do with them, then?


If caller submits a frame for encoding it should be time-stamped on the output. 
The encoder passes pts through with one caveat:  
If B-frames are used it will reorder the pts in monotonic order.
If original or other data are needed they an be passed through via custom propery.

> 

> >>> +

> >>> +int ff_amf_encode_frame(AVCodecContext *avctx, AVPacket *pkt,

> >>> +                        const AVFrame *frame, int *got_packet) {

> >>> +    int             ret = 0;

> >>> +    AMF_RESULT      res = AMF_OK;

> >>> +    AmfContext     *ctx = avctx->priv_data;

> >>> +    AMFSurface     *surface = 0;

> >>> +    AMFData        *data = 0;

> >>> +    amf_bool       submitted = false;

> >>> +

> >>> +    while (!submitted) {

> >>> +        if (!frame) { // submit drain

> >>> +            if (!ctx->eof) { // submit drain onre time only

> >>> +                res = ctx->encoder->pVtbl->Drain(ctx->encoder);

> >>> +                if (res == AMF_INPUT_FULL) {

> >>> +                    av_usleep(1000); // input queue is full: wait,

> >>> + poll and submit

> >> Drain again

> >>> +                                     // need to get some output and try again

> >>> +                } else if (res == AMF_OK) {

> >>> +                    ctx->eof = true; // drain started

> >>> +                    submitted = true;

> >>> +                }

> >>> +            }

> >>> +        } else { // submit frame

> >>> +            if (surface == 0) { // prepare surface from frame one time only

> >>> +                if (frame->hw_frames_ctx && ( // HW frame detected

> >>> +                                              // check if the same

> >>> + hw_frames_ctx as used in

> >> initialization

> >>> +                    (ctx->hw_frames_ctx &&

> >>> + frame->hw_frames_ctx->data == ctx-

> >>> hw_frames_ctx->data) ||

> >>> +                    // check if the same hw_device_ctx as used in initialization

> >>> +                    (ctx->hw_device_ctx &&

> >>> + ((AVHWFramesContext*)frame-

> >>> hw_frames_ctx->data)->device_ctx ==

> >>> +                    (AVHWDeviceContext*)ctx->hw_device_ctx->data)

> >>> +                )) {

> >>> +                    ID3D11Texture2D* texture =

> >>> + (ID3D11Texture2D*)frame-

> >>> data[0]; // actual texture

> >>> +                    int index = (int)(size_t)frame->data[1]; //

> >>> + index is a slice in

> >> texture array is - set to tell AMF which slice to use

> >>> +                    texture->lpVtbl->SetPrivateData(texture,

> >> &AMFTextureArrayIndexGUID, sizeof(index), &index);

> >>> +

> >>> +                    res =

> >>> + ctx->context->pVtbl->CreateSurfaceFromDX11Native(ctx-

> >>> context, texture, &surface, NULL); // wrap to AMF surface

> >>> +                    surface->pVtbl->SetCrop(surface, 0, 0,

> >>> + frame->width, frame-

> >>> height); // decode surfaces are vertically aligned by 16 tell AMF

> >>> real size

> >>> +                    surface->pVtbl->SetPts(surface, frame->pts);

> >>> +                } else {

> >>> +                    res =

> >>> + ctx->context->pVtbl->AllocSurface(ctx->context,

> >> AMF_MEMORY_HOST, ctx->format, avctx->width, avctx->height,

> &surface);

> >>> +                    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK,

> >>> + AVERROR_BUG,

> >> "AllocSurface() failed  with error %d", res);

> >>> +                    amf_copy_surface(avctx, frame, surface);

> >>> +                }

> >>> +            }

> >>> +            // encode

> >>> +            res = ctx->encoder->pVtbl->SubmitInput(ctx->encoder,

> >> (AMFData*)surface);

> >>> +            if (res == AMF_INPUT_FULL) { // handle full queue

> >>> +                av_usleep(1000); // input queue is full: wait, poll

> >>> + and submit

> >> surface again

> >>

> >> Is there really no way in the API to wait for this properly?

> >>

> >

> > The AMF runtime is designed without threads and sleeps inside. It is

> > up to  application to poll output and this way make space in the input HW

> queue.

> > But if input queue is really full it does make sense to sleep and

> > continue polling to avoid unnecessary CPU burn.

> 

> Some APIs have a way to wait for an event for this rather than polling.

> Polling like this will wake the CPU pointlessly and waste power.  (And yes, I

> know lots of other places do it, but if you have a proper API for this then

> please use it.)


At this point there is no way to wait on completion. It may be available in the future.

> 

> >>> +            } else {

> >>> +                surface->pVtbl->Release(surface);

> >>> +                surface = NULL;

> >>> +                AMF_RETURN_IF_FALSE(ctx, res == AMF_OK,

> >> AVERROR_UNKNOWN, "SubmitInput() failed with error %d", res);

> >>> +                submitted = 1;

> >>> +            }

> >>> +        }

> >>> +        // poll results

> >>> +        if (!data) {

> >>> +            res = ctx->encoder->pVtbl->QueryOutput(ctx->encoder, &data);

> >>> +            if (data) {

> >>> +                AMFBuffer* buffer;

> >>> +                AMFGuid guid = IID_AMFBuffer();

> >>> +                data->pVtbl->QueryInterface(data, &guid,

> >>> + (void**)&buffer); //

> >> query for buffer interface

> >>> +                ret = amf_copy_buffer(avctx, pkt, buffer);

> >>> +                if (!ret)

> >>> +                    *got_packet = 1;

> >>> +                buffer->pVtbl->Release(buffer);

> >>> +                data->pVtbl->Release(data);

> >>> +                if (ctx->eof) {

> >>> +                    submitted = true; // we are in the drain state - no

> submissions

> >>> +                }

> >>> +            } else if (res == AMF_EOF) {

> >>> +                submitted = true; // drain complete

> >>> +            } else {

> >>> +                if (!submitted) {

> >>> +                    av_usleep(1000); // wait and poll again

> >>> +                }

> >>> +            }

> >>> +        }

> >>> +    }

> >>

> >> I suspect this setup is not actually going to follow the constraints

> >> of the deprecated encode2().  Given the API here, I think you would

> >> be much better off writing with send_frame()/receive_packet().

> >

> > I considered this, but without a thread that would call

> > receive_packet() the implementation will fall into the same pattern as it is

> now but with an additional queue of ready outputs.

> 

> See the documentation - you can return EAGAIN to send_packet() when a

> frame is available, no external queue is required.


I didn’t debug it but from visual code inspection this logic is available for decoders only. 
For encoder call avcodec_send_frame() inside do_video_out() doesn’t check for EAGAIN 
and inside avcodec_send_frame() there is no such checking.

> 

> >>> +    AMFSize             framesize = AMFConstructSize(avctx->width, avctx-

> >>> height);

> >>> +    AMFRate             framerate = AMFConstructRate(avctx-

> >time_base.den,

> >> avctx->time_base.num * avctx->ticks_per_frame);

> >>

> >> Is VFR encoding not supported?

> >>

> >

> > The encoder uses frame rate only for rate control. It does not take in

> > account frame duration in case of VFR.

> 

> Sure.  (That should probably be documented somewhere.)

> 

> >>> +    if (avctx->rc_initial_buffer_occupancy != 0) {

> >>> +        int percent = avctx->rc_buffer_size * 64 / avctx-

> >>> rc_initial_buffer_occupancy;

> >>> +        if (percent > 64)

> >>> +            percent = 64;

> >>

> >> ???

> >>

> >

> > This is an attempt to translate to 1-64 range which is exposed by the

> encoder.

> 

> 1-64, yet the variable is called percent?  Sounds very suspicious.  If that is

> indeed the behaviour, please add a comment explaining it.


Sure, the AMF parameter is 1- to 64,. "percent"  is not a good name for this variable.

> 

> >>> +    // Bitrate

> >>> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> >> AMF_VIDEO_ENCODER_TARGET_BITRATE, avctx->bit_rate);

> >>> +    if (ctx->rate_control_mode ==

> >> AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CBR) {

> >>> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> >> AMF_VIDEO_ENCODER_PEAK_BITRATE, avctx->bit_rate);

> >>> +    } else {

> >>> +        int rc_max_rate = avctx->rc_max_rate >= avctx->bit_rate ?

> >>> + avctx-

> >>> rc_max_rate : avctx->bit_rate * 13 / 10;

> >>

> >> Where does 13/10 come from?

> >>

> >

> > For best results rc_max_rate should be bigger then bit_rate  for VBR. For

> CBR it is ignored.

> > What is the better way to correct an unset parameter?

> 

> Max rate should only be constrained by the HRD buffering in this case.  Are

> you sure this isn't handled internally if you don't supply the paramater at all?

> If not, maybe supply some sort of infinity to avoid it constraining anything

> appropriately.


I concern about VBR with peaks. For this mode max rate defines height of the peaks. 
If not defined the encoder will put something valid, but idea was to control such thing explicitly 
from FFmpeg.

> 

> >>> +

> >>> +    // fill extradata

> >>> +    AMFVariantInit(&var);

> >>

> >> Can this fail?

> >>

> > Not if var cleared with 0.

> 

> I would prefer that return values are checked for all functions which return

> them, in case of future API changes.  If the function can't return an error,

> why isn't it void?


I guess I will just add error checking everywhere  to avoid confusion.

> 

> >>> +// encoder connected with D3D11 HW accelerator AVCodec

> >>> +ff_h264_amf_d3d11va_encoder = {

> >>> +    .name = "h264_amf_d3d11va",

> >>> +    .long_name = NULL_IF_CONFIG_SMALL("AMD AMF H.264 Encoder

> with

> >> d3d11va"),

> >>> +    .type = AVMEDIA_TYPE_VIDEO,

> >>> +    .id = AV_CODEC_ID_H264,

> >>> +    .init = amf_encode_init_h264,

> >>> +    .encode2 = ff_amf_encode_frame,

> >>> +    .close = ff_amf_encode_close,

> >>> +    .priv_data_size = sizeof(AmfContext),

> >>> +    .priv_class = &h264_amf_d3d11va_class,

> >>> +    .defaults = defaults,

> >>> +    .capabilities = AV_CODEC_CAP_DELAY,

> >>> +    .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,

> >>> +    .pix_fmts = ff_amf_pix_fmts,

> >>> +};

> >>

> >> As above, why does this separate (identical) instance exist?

> >

> > See explanation about accelerator handling for encoders above.

> >

> >>> +static const AVOption options[] = {

> >>> +    { "usage",          "Set the encoding usage",             OFFSET(usage),

> >> AV_OPT_TYPE_INT,   { .i64 =

> >> AMF_VIDEO_ENCODER_HEVC_USAGE_TRANSCONDING },

> >> AMF_VIDEO_ENCODER_HEVC_USAGE_TRANSCONDING,

> >> AMF_VIDEO_ENCODER_HEVC_USAGE_WEBCAM, VE, "usage" },

> >>> +    { "transcoding",    "", 0, AV_OPT_TYPE_CONST, { .i64 =

> >> AMF_VIDEO_ENCODER_HEVC_USAGE_TRANSCONDING },         0, 0, VE,

> >> "usage" },

> >>> +    { "ultralowlatency","", 0, AV_OPT_TYPE_CONST, { .i64 =

> >> AMF_VIDEO_ENCODER_HEVC_USAGE_ULTRA_LOW_LATENCY },    0, 0, VE,

> >> "usage" },

> >>> +    { "lowlatency",     "", 0, AV_OPT_TYPE_CONST, { .i64 =

> >> AMF_VIDEO_ENCODER_HEVC_USAGE_LOW_LATENCY },          0, 0, VE,

> >> "usage" },

> >>> +    { "webcam",         "", 0, AV_OPT_TYPE_CONST, { .i64 =

> >> AMF_VIDEO_ENCODER_HEVC_USAGE_WEBCAM },               0, 0, VE,

> "usage" },

> >>

> >> Could some of this be in common with the H.264 encoder?  (Maybe in

> >> the

> >> header?)

> >>

> >

> > I tried to keep H264 and HAVC parameters completely separate. I was

> > asked by codec team to do so in AMF API  and did the same here.

> 

> Ok, sure.

> 

> 

> Some more random questions:

> 

> * How can we supply colour information to the codecs?  (VUI

> colour_primaries/transfer_characteristics/matrix_coefficients/chroma_samp

> le_loc_type.)


There is very limited set of VUI parameters available: timing (shared with rate control via frame rate), 
aspect ratio, and video_full_range_flag, bit stream restriction and few other related to reordering .

> 

> * Does timing SEI (buffering_period/pic_timing) get generated for bitrate-

> targetted modes?  Is there any way to control that?


I will check tomorrow if it they are generated but there is no control over it.

> 

> * When you say this will be used on Linux, is that going to be in Mesa

> (integrated with VAAPI as the Windows code is with D3D?) or will it be

> something in the proprietary drivers?


VAAPI on Linux is available in open source driver. AMF version will be implemented via 
Vulkan and will follow Vulkan implementation  in the driver and in open source policy.  
I cannot say more right now. 

> 

> Thanks,

> 

> - Mark

> _______________________________________________

> ffmpeg-devel mailing list

> ffmpeg-devel@ffmpeg.org

> http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Thanks, Mikhail
mmironov Oct. 29, 2017, 8:57 p.m. UTC | #12
> -----Original Message-----

> From: ffmpeg-devel [mailto:ffmpeg-devel-bounces@ffmpeg.org] On Behalf

> Of Mark Thompson

> Sent: October 29, 2017 3:49 PM

> To: ffmpeg-devel@ffmpeg.org

> Subject: Re: [FFmpeg-devel] Added - HW accelerated H.264 and HEVC

> encoding for AMD GPUs based on AMF SDK

> 

> On 29/10/17 14:54, Mironov, Mikhail wrote:

> >> -----Original Message-----

> >> From: ffmpeg-devel [mailto:ffmpeg-devel-bounces@ffmpeg.org] On

> Behalf

> >> Of Carl Eugen Hoyos

> >> Sent: October 28, 2017 3:19 PM

> >> To: FFmpeg development discussions and patches <ffmpeg-

> >> devel@ffmpeg.org>

> >> Subject: Re: [FFmpeg-devel] Added - HW accelerated H.264 and HEVC

> >> encoding for AMD GPUs based on AMF SDK

> >>

> >> 2017-10-27 20:09 GMT+02:00 mmironov <mikhail.mironov@amd.com>:

> >>> +const enum AVPixelFormat ff_amf_pix_fmts[] = {

> >>> +    AV_PIX_FMT_NV12,

> >>

> >>> +    AV_PIX_FMT_BGRA,

> >>> +    AV_PIX_FMT_ARGB,

> >>> +    AV_PIX_FMT_RGBA,

> >>

> >> This is wrong, your encoders do not convert transparency information,

> >> there are 32 rgb formats without alpha channel.

> >>

> >>> +    { AV_PIX_FMT_NV12,       AMF_SURFACE_NV12 },

> >>> +    { AV_PIX_FMT_D3D11,      AMF_SURFACE_NV12 },

> >>

> >> Just curious: Can you explain this in simple words?

> >>

> >> I will try not to comment on the headers you sent, Carl Eugen

> >> _______________________________________________

> >> ffmpeg-devel mailing list

> >> ffmpeg-devel@ffmpeg.org

> >> http://ffmpeg.org/mailman/listinfo/ffmpeg-devel

> > Sure: the native format for the encoder is NV12. It can work via D3D11 and

> accept DXGI_FORMAT_NV12 texture or via D3D9  accepting

> (D3DFORMAT)MAKEFOURCC('N', 'V', '1', '2') surface. AMF surfaces are

> wrappers around these objects and converters from system memory. In

> addition, the encoder has built-in shader-based color converter. It can accept

> BGRA and RGBA formats and convert them into NV12 dropping alpha

> channel. This is why I added these entries. Granted, the conversion lacks

> parameters and to support more options AMF has a separate color space

> component, also shader-based. Once encoder is integrated I planned to ask

> FFmpeg team opinion if it is needed or not.

> > I made the FFmpeg encoder accepting DX objects from other FFmpeg

> components if available.

> 

> If you're going to do the colour conversion here then please pass the

> necessary colour information

> (AVFrame.color_primaries|color_trc|colorspace) so that non-BT.709 (I'm

> guessing...) has some chance of being correct.


Currently 709 or 601 is decided based on frame size. It is crude but works for folks 
who don’t want to deal with a separate AMF component. I am thinking of adding these 
parameters to the encoder in the driver and to AMF API. But so far it was low priority 
because of the separate component which has these parameters.

> 

> (Though I'd say in general it's preferable not to have the encoder do this,

> because it becomes confused with actual encoding of RGB.  A separate

> D3D11 shader implementation to do it as a filter would definitely be

> welcome...)


As I said, I would be glad to work on this or ask someone in my team to do it 
once the encoder is in place. 

> 

> Thanks,

> 

> - Mark

> _______________________________________________

> ffmpeg-devel mailing list

> ffmpeg-devel@ffmpeg.org

> http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Thanks, Mikhail
Carl Eugen Hoyos Oct. 29, 2017, 9:01 p.m. UTC | #13
2017-10-29 21:48 GMT+01:00 Mironov, Mikhail <Mikhail.Mironov@amd.com>:

> VAAPI on Linux is available in open source driver. AMF version will be implemented via
> Vulkan and will follow Vulkan implementation  in the driver and in open source policy.

Does that mean that at least in theory, the driver may also work on big-endian?

If yes, you have to be extra-careful about how to define the RGB colour-spaces
(and how to map them to pix_fmts).

Carl Eugen
mmironov Oct. 29, 2017, 9:05 p.m. UTC | #14
> -----Original Message-----

> From: ffmpeg-devel [mailto:ffmpeg-devel-bounces@ffmpeg.org] On Behalf

> Of Carl Eugen Hoyos

> Sent: October 29, 2017 5:01 PM

> To: FFmpeg development discussions and patches <ffmpeg-

> devel@ffmpeg.org>

> Subject: Re: [FFmpeg-devel] Added - HW accelerated H.264 and HEVC

> encoding for AMD GPUs based on AMF SDK

> 

> 2017-10-29 21:48 GMT+01:00 Mironov, Mikhail

> <Mikhail.Mironov@amd.com>:

> 

> > VAAPI on Linux is available in open source driver. AMF version will be

> > implemented via Vulkan and will follow Vulkan implementation  in the

> driver and in open source policy.

> 

> Does that mean that at least in theory, the driver may also work on big-

> endian?

> 

> If yes, you have to be extra-careful about how to define the RGB colour-

> spaces (and how to map them to pix_fmts).

> 

OK, will keep it in mind.

> Carl Eugen

> _______________________________________________

> ffmpeg-devel mailing list

> ffmpeg-devel@ffmpeg.org

> http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Thanks, Mikhail
Mark Thompson Oct. 29, 2017, 9:51 p.m. UTC | #15
On 29/10/17 20:48, Mironov, Mikhail wrote:
>> -----Original Message-----
>> From: ffmpeg-devel [mailto:ffmpeg-devel-bounces@ffmpeg.org] On Behalf
>> Of Mark Thompson
>> Sent: October 29, 2017 3:36 PM
>> To: ffmpeg-devel@ffmpeg.org
>> Subject: Re: [FFmpeg-devel] Added - HW accelerated H.264 and HEVC
>> encoding for AMD GPUs based on AMF SDK
>>
>> On 29/10/17 18:39, Mironov, Mikhail wrote:
>>>>
>>>>> +    { AV_PIX_FMT_D3D11,      AMF_SURFACE_NV12 },
>>>>
>>>> D3D11 surfaces need not be NV12.  The actual format is in
>>>> AVHWFramesContext.sw_format - if you only support 8-bit then
>>>> something nasty probably happens if you give it P010 surfaces.
>>>>
>>>
>>> Agreed, but how should I define D3D11 NV12 as input format if I only have
>> AV_PIX_FMT_D3D11?
>>
>> Check sw_format afterwards.
>>
> 
> sw_format is part of AVHWFramesContext in hwcodec.h 
> But how should I define pix_fmt array in AVCodec? For example, In nvenc.c 
> is done via AV_PIX_FMT_CUDA. Is it wrong?

NVENC checks sw_format at init time - see <http://git.videolan.org/?p=ffmpeg.git;a=blob;f=libavcodec/nvenc.c;h=e1d3316de39cfefa41432105aed91f0d4e93a154;hb=HEAD#l1408>.  I think you want to do the same thing.

(I agree this result isn't ideal with just the pix_fmt array, but there isn't currently a way to check sw_format as well from the outside.  Checking it at init time and failing if it isn't usable is the best we can do.)

>>>>> +
>>>>> +    // try to reuse existing DX device
>>>>> +
>>>>> +    if (avctx->hw_frames_ctx) {
>>>>> +        AVHWFramesContext *device_ctx = (AVHWFramesContext*)avctx-
>>>>> hw_frames_ctx->data;
>>>>> +        if (device_ctx->device_ctx->type ==
>> AV_HWDEVICE_TYPE_D3D11VA){
>>>>> +            if (device_ctx->device_ctx->hwctx) {
>>>>> +                AVD3D11VADeviceContext *device_d3d11 =
>>>> (AVD3D11VADeviceContext *)device_ctx->device_ctx->hwctx;
>>>>> +                res = ctx->context->pVtbl->InitDX11(ctx->context,
>>>>> + device_d3d11-
>>>>> device, AMF_DX11_1);
>>>>> +                if (res == AMF_OK) {
>>>>> +                    ctx->hw_frames_ctx = av_buffer_ref(avctx->hw_frames_ctx);
>>>>> +                } else {
>>>>> +                    av_log(avctx, AV_LOG_INFO, "amf_shared: avctx-
>>>>> hw_frames_ctx has non-AMD device, switching to default");
>>>>
>>>> I'm not sure this is going to act sensibly - if the user has D3D11
>>>> frames input on another device, does it work?
>>>>
>>>
>>> If device is not AMD's the code is trying to create another device  - the
>> compatible one .
>>> In these cases the submission will be via system memory.
>>
>> And that works with D3D11 frames as hw_frames_ctx on another device?
> 
> Why not? AMF will be initialized with a different device, 
> in submission code it is detected, surface with system (host) memory is allocated,  
> data will be copied using av_image_copy() and host memory will be submitted to AMF.

It won't be copied externally if it's already in a D3D11 surface for the other device.  At the moment it looks like it silently encodes an empty frame because the conditions in ff_amf_encode_frame() make it call amf_copy_surface(), but av_image_copy() doesn't support copying from hardware surfaces and has no way to indicate that failure.

I think the easiest way to solve this is to fail at init time if the given hw_frames_ctx is on an unusable device.  The hw_device_ctx case matters less, because the user input is not going to be in D3D11 frames in that case.

>>>>> +            } else {
>>>>> +                surface->pVtbl->Release(surface);
>>>>> +                surface = NULL;
>>>>> +                AMF_RETURN_IF_FALSE(ctx, res == AMF_OK,
>>>> AVERROR_UNKNOWN, "SubmitInput() failed with error %d", res);
>>>>> +                submitted = 1;
>>>>> +            }
>>>>> +        }
>>>>> +        // poll results
>>>>> +        if (!data) {
>>>>> +            res = ctx->encoder->pVtbl->QueryOutput(ctx->encoder, &data);
>>>>> +            if (data) {
>>>>> +                AMFBuffer* buffer;
>>>>> +                AMFGuid guid = IID_AMFBuffer();
>>>>> +                data->pVtbl->QueryInterface(data, &guid,
>>>>> + (void**)&buffer); //
>>>> query for buffer interface
>>>>> +                ret = amf_copy_buffer(avctx, pkt, buffer);
>>>>> +                if (!ret)
>>>>> +                    *got_packet = 1;
>>>>> +                buffer->pVtbl->Release(buffer);
>>>>> +                data->pVtbl->Release(data);
>>>>> +                if (ctx->eof) {
>>>>> +                    submitted = true; // we are in the drain state - no
>> submissions
>>>>> +                }
>>>>> +            } else if (res == AMF_EOF) {
>>>>> +                submitted = true; // drain complete
>>>>> +            } else {
>>>>> +                if (!submitted) {
>>>>> +                    av_usleep(1000); // wait and poll again
>>>>> +                }
>>>>> +            }
>>>>> +        }
>>>>> +    }
>>>>
>>>> I suspect this setup is not actually going to follow the constraints
>>>> of the deprecated encode2().  Given the API here, I think you would
>>>> be much better off writing with send_frame()/receive_packet().
>>>
>>> I considered this, but without a thread that would call
>>> receive_packet() the implementation will fall into the same pattern as it is
>> now but with an additional queue of ready outputs.
>>
>> See the documentation - you can return EAGAIN to send_packet() when a
>> frame is available, no external queue is required.
> 
> I didn’t debug it but from visual code inspection this logic is available for decoders only. 
> For encoder call avcodec_send_frame() inside do_video_out() doesn’t check for EAGAIN 
> and inside avcodec_send_frame() there is no such checking.

Right, sorry, I mean return EAGAIN to receive_packet().

This does need to have some way to tell whether the SubmitInput() call will return AMF_INPUT_FULL.  If you have that, then you can block in receive_packet() iff that is true - if not, just return EAGAIN and get another frame.  This also maximises the number of frames in-flight for the asynchronous encode.

>>>>> +    // Bitrate
>>>>> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>>>> AMF_VIDEO_ENCODER_TARGET_BITRATE, avctx->bit_rate);
>>>>> +    if (ctx->rate_control_mode ==
>>>> AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CBR) {
>>>>> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>>>> AMF_VIDEO_ENCODER_PEAK_BITRATE, avctx->bit_rate);
>>>>> +    } else {
>>>>> +        int rc_max_rate = avctx->rc_max_rate >= avctx->bit_rate ?
>>>>> + avctx-
>>>>> rc_max_rate : avctx->bit_rate * 13 / 10;
>>>>
>>>> Where does 13/10 come from?
>>>>
>>>
>>> For best results rc_max_rate should be bigger then bit_rate  for VBR. For
>> CBR it is ignored.
>>> What is the better way to correct an unset parameter?
>>
>> Max rate should only be constrained by the HRD buffering in this case.  Are
>> you sure this isn't handled internally if you don't supply the paramater at all?
>> If not, maybe supply some sort of infinity to avoid it constraining anything
>> appropriately.
> 
> I concern about VBR with peaks. For this mode max rate defines height of the peaks. 
> If not defined the encoder will put something valid, but idea was to control such thing explicitly 
> from FFmpeg.

If it is set by the user then certainly use that value, but if not then it's probably better to make up the answer inside the driver rather than here (the driver will have better knowledge of how the hardware works, after all).

>> Some more random questions:
>>
>> * How can we supply colour information to the codecs?  (VUI
>> colour_primaries/transfer_characteristics/matrix_coefficients/chroma_samp
>> le_loc_type.)
> 
> There is very limited set of VUI parameters available: timing (shared with rate control via frame rate), 
> aspect ratio, and video_full_range_flag, bit stream restriction and few other related to reordering .

Inability to set the colour information will do nasty things to the output video in some cases.  Input coming from JPEG or similar with centred chroma samples is visibly weird viewed with the default middle-left, and HDR colours will come out incorrectly.  I recommend you add a way to do this, though it won't block the rest of the patch.

Thanks,

- Mark
mmironov Oct. 29, 2017, 10:57 p.m. UTC | #16
> -----Original Message-----

> From: ffmpeg-devel [mailto:ffmpeg-devel-bounces@ffmpeg.org] On Behalf

> Of Mark Thompson

> Sent: October 29, 2017 5:51 PM

> To: ffmpeg-devel@ffmpeg.org

> Subject: Re: [FFmpeg-devel] Added - HW accelerated H.264 and HEVC

> encoding for AMD GPUs based on AMF SDK

> 

> On 29/10/17 20:48, Mironov, Mikhail wrote:

> >> -----Original Message-----

> >> From: ffmpeg-devel [mailto:ffmpeg-devel-bounces@ffmpeg.org] On

> Behalf

> >> Of Mark Thompson

> >> Sent: October 29, 2017 3:36 PM

> >> To: ffmpeg-devel@ffmpeg.org

> >> Subject: Re: [FFmpeg-devel] Added - HW accelerated H.264 and HEVC

> >> encoding for AMD GPUs based on AMF SDK

> >>

> >> On 29/10/17 18:39, Mironov, Mikhail wrote:

> >>>>

> >>>>> +    { AV_PIX_FMT_D3D11,      AMF_SURFACE_NV12 },

> >>>>

> >>>> D3D11 surfaces need not be NV12.  The actual format is in

> >>>> AVHWFramesContext.sw_format - if you only support 8-bit then

> >>>> something nasty probably happens if you give it P010 surfaces.

> >>>>

> >>>

> >>> Agreed, but how should I define D3D11 NV12 as input format if I only

> >>> have

> >> AV_PIX_FMT_D3D11?

> >>

> >> Check sw_format afterwards.

> >>

> >

> > sw_format is part of AVHWFramesContext in hwcodec.h But how should I

> > define pix_fmt array in AVCodec? For example, In nvenc.c is done via

> > AV_PIX_FMT_CUDA. Is it wrong?

> 

> NVENC checks sw_format at init time - see

> <http://git.videolan.org/?p=ffmpeg.git;a=blob;f=libavcodec/nvenc.c;h=e1d33

> 16de39cfefa41432105aed91f0d4e93a154;hb=HEAD#l1408>.  I think you want

> to do the same thing.

> 

> (I agree this result isn't ideal with just the pix_fmt array, but there isn't

> currently a way to check sw_format as well from the outside.  Checking it at

> init time and failing if it isn't usable is the best we can do.)

> 


Sure, will add checking at initialization.

> >>>>> +

> >>>>> +    // try to reuse existing DX device

> >>>>> +

> >>>>> +    if (avctx->hw_frames_ctx) {

> >>>>> +        AVHWFramesContext *device_ctx =

> >>>>> + (AVHWFramesContext*)avctx-

> >>>>> hw_frames_ctx->data;

> >>>>> +        if (device_ctx->device_ctx->type ==

> >> AV_HWDEVICE_TYPE_D3D11VA){

> >>>>> +            if (device_ctx->device_ctx->hwctx) {

> >>>>> +                AVD3D11VADeviceContext *device_d3d11 =

> >>>> (AVD3D11VADeviceContext *)device_ctx->device_ctx->hwctx;

> >>>>> +                res = ctx->context->pVtbl->InitDX11(ctx->context,

> >>>>> + device_d3d11-

> >>>>> device, AMF_DX11_1);

> >>>>> +                if (res == AMF_OK) {

> >>>>> +                    ctx->hw_frames_ctx = av_buffer_ref(avctx-

> >hw_frames_ctx);

> >>>>> +                } else {

> >>>>> +                    av_log(avctx, AV_LOG_INFO, "amf_shared:

> >>>>> + avctx-

> >>>>> hw_frames_ctx has non-AMD device, switching to default");

> >>>>

> >>>> I'm not sure this is going to act sensibly - if the user has D3D11

> >>>> frames input on another device, does it work?

> >>>>

> >>>

> >>> If device is not AMD's the code is trying to create another device

> >>> - the

> >> compatible one .

> >>> In these cases the submission will be via system memory.

> >>

> >> And that works with D3D11 frames as hw_frames_ctx on another device?

> >

> > Why not? AMF will be initialized with a different device, in

> > submission code it is detected, surface with system (host) memory is

> > allocated, data will be copied using av_image_copy() and host memory will

> be submitted to AMF.

> 

> It won't be copied externally if it's already in a D3D11 surface for the other

> device.  At the moment it looks like it silently encodes an empty frame

> because the conditions in ff_amf_encode_frame() make it call

> amf_copy_surface(), but av_image_copy() doesn't support copying from

> hardware surfaces and has no way to indicate that failure.

> 

> I think the easiest way to solve this is to fail at init time if the given

> hw_frames_ctx is on an unusable device.  The hw_device_ctx case matters

> less, because the user input is not going to be in D3D11 frames in that case.


I though that in multi-GPU situation it is still good to support encoding. 
I could force HW frame to be copied into system memory and consume it in encoder:
 transfer_data_to(). Should it be better then fail at initialization?

> 

> >>>>> +            } else {

> >>>>> +                surface->pVtbl->Release(surface);

> >>>>> +                surface = NULL;

> >>>>> +                AMF_RETURN_IF_FALSE(ctx, res == AMF_OK,

> >>>> AVERROR_UNKNOWN, "SubmitInput() failed with error %d", res);

> >>>>> +                submitted = 1;

> >>>>> +            }

> >>>>> +        }

> >>>>> +        // poll results

> >>>>> +        if (!data) {

> >>>>> +            res = ctx->encoder->pVtbl->QueryOutput(ctx->encoder,

> &data);

> >>>>> +            if (data) {

> >>>>> +                AMFBuffer* buffer;

> >>>>> +                AMFGuid guid = IID_AMFBuffer();

> >>>>> +                data->pVtbl->QueryInterface(data, &guid,

> >>>>> + (void**)&buffer); //

> >>>> query for buffer interface

> >>>>> +                ret = amf_copy_buffer(avctx, pkt, buffer);

> >>>>> +                if (!ret)

> >>>>> +                    *got_packet = 1;

> >>>>> +                buffer->pVtbl->Release(buffer);

> >>>>> +                data->pVtbl->Release(data);

> >>>>> +                if (ctx->eof) {

> >>>>> +                    submitted = true; // we are in the drain

> >>>>> + state - no

> >> submissions

> >>>>> +                }

> >>>>> +            } else if (res == AMF_EOF) {

> >>>>> +                submitted = true; // drain complete

> >>>>> +            } else {

> >>>>> +                if (!submitted) {

> >>>>> +                    av_usleep(1000); // wait and poll again

> >>>>> +                }

> >>>>> +            }

> >>>>> +        }

> >>>>> +    }

> >>>>

> >>>> I suspect this setup is not actually going to follow the

> >>>> constraints of the deprecated encode2().  Given the API here, I

> >>>> think you would be much better off writing with

> send_frame()/receive_packet().

> >>>

> >>> I considered this, but without a thread that would call

> >>> receive_packet() the implementation will fall into the same pattern

> >>> as it is

> >> now but with an additional queue of ready outputs.

> >>

> >> See the documentation - you can return EAGAIN to send_packet() when a

> >> frame is available, no external queue is required.

> >

> > I didn’t debug it but from visual code inspection this logic is available for

> decoders only.

> > For encoder call avcodec_send_frame() inside do_video_out() doesn’t

> > check for EAGAIN and inside avcodec_send_frame() there is no such

> checking.

> 

> Right, sorry, I mean return EAGAIN to receive_packet().

> 

> This does need to have some way to tell whether the SubmitInput() call will

> return AMF_INPUT_FULL.  If you have that, then you can block in

> receive_packet() iff that is true - if not, just return EAGAIN and get another

> frame.  This also maximises the number of frames in-flight for the

> asynchronous encode.


If inside send_packet() SubmitInput() returns AMF_INPUT_FULL there is no way I can trick 
ffmpeg.exe to resubmit this frame. If I bock it, then receive_packet() will not be called.
It is impossible to tell ahead of time that SubmitInput() will returns AMF_INPUT_FULL 
because it is HW - specific. In MHO the encoder submission should be organized the 
same way as  decoder submission. Till then I don’t see how to implement this. Keep in mind that AMF API 
is designed exactly the same way as send_frame() / receive_packet() pair of functions.

> 

> >>>>> +    // Bitrate

> >>>>> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> >>>> AMF_VIDEO_ENCODER_TARGET_BITRATE, avctx->bit_rate);

> >>>>> +    if (ctx->rate_control_mode ==

> >>>> AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CBR) {

> >>>>> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> >>>> AMF_VIDEO_ENCODER_PEAK_BITRATE, avctx->bit_rate);

> >>>>> +    } else {

> >>>>> +        int rc_max_rate = avctx->rc_max_rate >= avctx->bit_rate ?

> >>>>> + avctx-

> >>>>> rc_max_rate : avctx->bit_rate * 13 / 10;

> >>>>

> >>>> Where does 13/10 come from?

> >>>>

> >>>

> >>> For best results rc_max_rate should be bigger then bit_rate  for

> >>> VBR. For

> >> CBR it is ignored.

> >>> What is the better way to correct an unset parameter?

> >>

> >> Max rate should only be constrained by the HRD buffering in this

> >> case.  Are you sure this isn't handled internally if you don't supply the

> paramater at all?

> >> If not, maybe supply some sort of infinity to avoid it constraining

> >> anything appropriately.

> >

> > I concern about VBR with peaks. For this mode max rate defines height of

> the peaks.

> > If not defined the encoder will put something valid, but idea was to

> > control such thing explicitly from FFmpeg.

> 

> If it is set by the user then certainly use that value, but if not then it's

> probably better to make up the answer inside the driver rather than here

> (the driver will have better knowledge of how the hardware works, after all).

> 

> >> Some more random questions:

> >>

> >> * How can we supply colour information to the codecs?  (VUI

> >> colour_primaries/transfer_characteristics/matrix_coefficients/chroma_

> >> samp

> >> le_loc_type.)

> >

> > There is very limited set of VUI parameters available: timing (shared

> > with rate control via frame rate), aspect ratio, and video_full_range_flag,

> bit stream restriction and few other related to reordering .

> 

> Inability to set the colour information will do nasty things to the output video

> in some cases.  Input coming from JPEG or similar with centred chroma

> samples is visibly weird viewed with the default middle-left, and HDR colours

> will come out incorrectly.  I recommend you add a way to do this, though it

> won't block the rest of the patch.


VUI is generating deep in the driver or in firmware, you are asking for a driver feature. I will 
definitely bring this to the driver team and ensure that they implement this at one point.
Such inputs are truly appreciated. 

> 

> Thanks,

> 

> - Mark

> _______________________________________________

> ffmpeg-devel mailing list

> ffmpeg-devel@ffmpeg.org

> http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Thanks Mikhail
diff mbox

Patch

diff --git a/Changelog b/Changelog
index 6592d86..f0d22fa 100644
--- a/Changelog
+++ b/Changelog
@@ -6,7 +6,8 @@  version <next>:
 - Dropped support for OpenJPEG versions 2.0 and below. Using OpenJPEG now
   requires 2.1 (or later) and pkg-config.
 - VDA dropped (use VideoToolbox instead)
-
+- AMF H.264 encoder
+- AMF HEVC encoder
 
 version 3.4:
 - deflicker video filter
diff --git a/compat/amd/amfsdkenc.h b/compat/amd/amfsdkenc.h
new file mode 100644
index 0000000..a640c17
--- /dev/null
+++ b/compat/amd/amfsdkenc.h
@@ -0,0 +1,1750 @@ 
+// 
+// MIT license 
+// 
+// Copyright (c) 2017 Advanced Micro Devices, Inc. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+//
+
+// Reduced AMF API
+//
+// Full version of AMF SDK and the latest version of this file 
+// can be found at https://github.com/GPUOpen-LibrariesAndSDKs/AMF
+
+#ifndef __AMF_SDK_Enc_h__
+#define __AMF_SDK_Enc_h__
+#pragma once
+
+//-----------------------------------------------------------------------------
+// Platform.h
+//-----------------------------------------------------------------------------
+//----------------------------------------------------------------------------------------------
+// export declaration
+//----------------------------------------------------------------------------------------------
+#ifdef _WIN32
+#if defined(AMF_CORE_STATIC)
+#define AMF_CORE_LINK
+#else
+#if defined(AMF_CORE_EXPORTS)
+#define AMF_CORE_LINK __declspec(dllexport)
+#else
+#define AMF_CORE_LINK __declspec(dllimport)
+#endif
+#endif
+#else // #ifdef _WIN32
+#define AMF_CORE_LINK
+#endif // #ifdef _WIN32
+
+#define AMF_MACRO_STRING2(x) #x
+#define AMF_MACRO_STRING(x) AMF_MACRO_STRING2(x)
+
+#define AMF_TODO(_todo) (__FILE__ "(" AMF_MACRO_STRING(__LINE__) "): TODO: "_todo)
+
+
+#if defined(__GNUC__) || defined(__clang__)
+#define AMF_ALIGN(n) __attribute__((aligned(n)))
+#elif defined(_MSC_VER) || defined(__INTEL_COMPILER)
+#define AMF_ALIGN(n) __declspec(align(n))
+#else
+#define AMF_ALIGN(n)
+//     #error Need to define AMF_ALIGN
+#endif
+
+#include <stdio.h>
+#include <stdint.h>
+
+#if defined(_WIN32)
+
+
+#ifndef NOMINMAX
+#define NOMINMAX
+#endif
+#define AMF_STD_CALL            __stdcall
+#define AMF_CDECL_CALL          __cdecl
+#define AMF_FAST_CALL           __fastcall
+#if defined(__GNUC__) || defined(__clang__)
+#define AMF_INLINE              inline
+#define AMF_FORCEINLINE         inline
+#else
+#define AMF_INLINE              __inline
+#define AMF_FORCEINLINE         __forceinline
+#endif
+#define AMF_NO_VTABLE           __declspec(novtable)
+
+#define AMFPRId64   "I64d"
+#define LPRId64    L"I64d"
+
+#define AMFPRIud64   "Iu64d"
+#define LPRIud64    L"Iu64d"
+
+#define AMFPRIx64   "I64x"
+#define LPRIx64    L"I64x"
+
+#else // !WIN32 - Linux and Mac
+
+#define AMF_STD_CALL
+#define AMF_CDECL_CALL
+#define AMF_FAST_CALL
+#if defined(__GNUC__) || defined(__clang__)
+#define AMF_INLINE              inline
+#define AMF_FORCEINLINE         inline
+#else
+#define AMF_INLINE              __inline__
+#define AMF_FORCEINLINE         __inline__
+#endif
+#define AMF_NO_VTABLE           
+
+#if !defined(AMFPRId64)
+#define AMFPRId64    "lld"
+#define LPRId64     L"lld"
+
+#define AMFPRIud64    "ulld"
+#define LPRIud64     L"ulld"
+
+#define AMFPRIx64    "llx"
+#define LPRIx64     L"llx"
+#endif
+
+#endif // WIN32
+
+
+#if defined(_MSC_VER)
+#define AMF_WEAK __declspec( selectany ) 
+#elif defined (__GCC__) || defined(__clang__)//GCC or CLANG
+#define AMF_WEAK __attribute__((weak))
+#endif
+
+#define amf_countof(x) (sizeof(x) / sizeof(x[0]))
+
+//-------------------------------------------------------------------------------------------------
+// basic data types
+//-------------------------------------------------------------------------------------------------
+typedef     int64_t             amf_int64;
+typedef     int32_t             amf_int32;
+typedef     int16_t             amf_int16;
+typedef     int8_t              amf_int8;
+
+typedef     uint64_t            amf_uint64;
+typedef     uint32_t            amf_uint32;
+typedef     uint16_t            amf_uint16;
+typedef     uint8_t             amf_uint8;
+typedef     size_t              amf_size;
+
+typedef     void*               amf_handle;
+typedef     double              amf_double;
+typedef     float               amf_float;
+
+typedef     void                amf_void;
+
+#if defined(__cplusplus)
+typedef     bool                amf_bool;
+#else
+typedef     amf_uint16          amf_bool;
+#define     true                1 
+#define     false               0 
+#endif
+
+typedef     long                amf_long;
+typedef     int                 amf_int;
+typedef     unsigned long       amf_ulong;
+typedef     unsigned int        amf_uint;
+
+typedef     amf_int64           amf_pts;     // in 100 nanosecs
+
+#define AMF_SECOND          10000000L    // 1 second in 100 nanoseconds
+
+#define AMF_MIN(a, b) ((a) < (b) ? (a) : (b))
+#define AMF_MAX(a, b) ((a) > (b) ? (a) : (b))
+
+#if defined(_WIN32)
+#define PATH_SEPARATOR_WSTR         L"\\"
+#define PATH_SEPARATOR_WCHAR        L'\\'
+#elif defined(__linux) // Linux
+#define PATH_SEPARATOR_WSTR          L"/"
+#define PATH_SEPARATOR_WCHAR         L'/'
+#endif
+
+typedef struct AMFRect
+{
+    amf_int32 left;
+    amf_int32 top;
+    amf_int32 right;
+    amf_int32 bottom;
+} AMFRect;
+
+AMF_INLINE struct AMFRect AMFConstructRect(amf_int32 left, amf_int32 top, amf_int32 right, amf_int32 bottom)
+{
+    struct AMFRect object = { left, top, right, bottom };
+    return object;
+}
+
+typedef struct AMFSize
+{
+    amf_int32 width;
+    amf_int32 height;
+} AMFSize;
+
+AMF_INLINE struct AMFSize AMFConstructSize(amf_int32 width, amf_int32 height)
+{
+    struct AMFSize object = { width, height };
+    return object;
+}
+
+typedef struct AMFPoint
+{
+    amf_int32 x;
+    amf_int32 y;
+} AMFPoint;
+
+AMF_INLINE struct AMFPoint AMFConstructPoint(amf_int32 x, amf_int32 y)
+{
+    struct AMFPoint object = { x, y };
+    return object;
+}
+
+typedef struct AMFRate
+{
+    amf_uint32 num;
+    amf_uint32 den;
+} AMFRate;
+
+AMF_INLINE struct AMFRate AMFConstructRate(amf_uint32 num, amf_uint32 den)
+{
+    struct AMFRate object = { num, den };
+    return object;
+}
+
+typedef struct AMFRatio
+{
+    amf_uint32 num;
+    amf_uint32 den;
+} AMFRatio;
+
+AMF_INLINE struct AMFRatio AMFConstructRatio(amf_uint32 num, amf_uint32 den)
+{
+    struct AMFRatio object = { num, den };
+    return object;
+}
+
+#pragma pack(push, 1)
+#if defined(_MSC_VER)
+#pragma warning( push )
+#endif
+#if defined(WIN32)
+#if defined(_MSC_VER)
+#pragma warning(disable : 4200)
+#pragma warning(disable : 4201)
+#endif
+#endif
+typedef struct AMFColor
+{
+    union
+    {
+        struct
+        {
+            amf_uint8 r;
+            amf_uint8 g;
+            amf_uint8 b;
+            amf_uint8 a;
+        };
+        amf_uint32 rgba;
+    };
+} AMFColor;
+#if defined(_MSC_VER)
+#pragma warning( pop )
+#endif
+#pragma pack(pop)
+
+
+AMF_INLINE struct AMFColor AMFConstructColor(amf_uint8 r, amf_uint8 g, amf_uint8 b, amf_uint8 a)
+{
+    struct AMFColor object;
+    object.r = r;
+    object.g = g;
+    object.b = b;
+    object.a = a;
+    return object;
+}
+
+#if defined(_WIN32)
+#include <combaseapi.h>
+
+#if defined(__cplusplus)
+extern "C"
+{
+#endif
+    // allocator
+    AMF_INLINE void* AMF_CDECL_CALL amf_variant_alloc(amf_size count)
+    {
+        return CoTaskMemAlloc(count);
+    }
+    AMF_INLINE void AMF_CDECL_CALL amf_variant_free(void* ptr)
+    {
+        CoTaskMemFree(ptr);
+    }
+#if defined(__cplusplus)
+}
+#endif
+
+#else // defined(_WIN32)
+#include <stdlib.h>
+#if defined(__cplusplus)
+extern "C"
+{
+#endif
+    // allocator
+    AMF_INLINE void* AMF_CDECL_CALL amf_variant_alloc(amf_size count)
+    {
+        return malloc(count);
+    }
+    AMF_INLINE void AMF_CDECL_CALL amf_variant_free(void* ptr)
+    {
+        free(ptr);
+    }
+#if defined(__cplusplus)
+}
+#endif
+#endif // defined(_WIN32)
+
+
+typedef struct AMFGuid
+{
+    amf_uint32 data1;
+    amf_uint16 data2;
+    amf_uint16 data3;
+    amf_uint8 data41;
+    amf_uint8 data42;
+    amf_uint8 data43;
+    amf_uint8 data44;
+    amf_uint8 data45;
+    amf_uint8 data46;
+    amf_uint8 data47;
+    amf_uint8 data48;
+} AMFGuid;
+
+//-----------------------------------------------------------------------------
+// Version.h
+//-----------------------------------------------------------------------------
+#define AMF_MAKE_FULL_VERSION(VERSION_MAJOR, VERSION_MINOR, VERSION_RELEASE, VERSION_BUILD_NUM)    ( ((amf_uint64)(VERSION_MAJOR) << 48ull) | ((amf_uint64)(VERSION_MINOR) << 32ull) | ((amf_uint64)(VERSION_RELEASE) << 16ull)  | (amf_uint64)(VERSION_BUILD_NUM))
+
+#define AMF_GET_MAJOR_VERSION(x)      ((x >> 48ull) & 0xFFFF)
+#define AMF_GET_MINOR_VERSION(x)      ((x >> 32ull) & 0xFFFF)
+#define AMF_GET_SUBMINOR_VERSION(x)   ((x >> 16ull) & 0xFFFF)
+#define AMF_GET_BUILD_VERSION(x)      ((x >>  0ull) & 0xFFFF)
+
+#define AMF_VERSION_MAJOR       1
+#define AMF_VERSION_MINOR       4
+#define AMF_VERSION_RELEASE     4
+#define AMF_VERSION_BUILD_NUM   0
+
+#define AMF_FULL_VERSION AMF_MAKE_FULL_VERSION(AMF_VERSION_MAJOR, AMF_VERSION_MINOR, AMF_VERSION_RELEASE, AMF_VERSION_BUILD_NUM)
+
+//-----------------------------------------------------------------------------
+// Result.h
+//-----------------------------------------------------------------------------
+//----------------------------------------------------------------------------------------------
+// result codes
+//----------------------------------------------------------------------------------------------
+
+typedef enum AMF_RESULT
+{
+    AMF_OK                                   = 0,
+    AMF_FAIL                                    ,
+
+// common errors
+    AMF_UNEXPECTED                              ,
+
+    AMF_ACCESS_DENIED                           ,
+    AMF_INVALID_ARG                             ,
+    AMF_OUT_OF_RANGE                            ,
+
+    AMF_OUT_OF_MEMORY                           ,
+    AMF_INVALID_POINTER                         ,
+
+    AMF_NO_INTERFACE                            ,
+    AMF_NOT_IMPLEMENTED                         ,
+    AMF_NOT_SUPPORTED                           ,
+    AMF_NOT_FOUND                               ,
+
+    AMF_ALREADY_INITIALIZED                     ,
+    AMF_NOT_INITIALIZED                         ,
+
+    AMF_INVALID_FORMAT                          ,// invalid data format
+
+    AMF_WRONG_STATE                             ,
+    AMF_FILE_NOT_OPEN                           ,// cannot open file
+
+// device common codes
+    AMF_NO_DEVICE                               ,
+
+// device directx
+    AMF_DIRECTX_FAILED                          ,
+// device opencl 
+    AMF_OPENCL_FAILED                           ,
+// device opengl 
+    AMF_GLX_FAILED                              ,//failed to use GLX
+// device XV 
+    AMF_XV_FAILED                               , //failed to use Xv extension
+// device alsa
+    AMF_ALSA_FAILED                             ,//failed to use ALSA
+
+// component common codes
+
+    //result codes
+    AMF_EOF                                     ,
+    AMF_REPEAT                                  ,
+    AMF_INPUT_FULL                              ,//returned by AMFComponent::SubmitInput if input queue is full
+    AMF_RESOLUTION_CHANGED                      ,//resolution changed client needs to Drain/Terminate/Init
+    AMF_RESOLUTION_UPDATED                      ,//resolution changed in adaptive mode. New ROI will be set on output on newly decoded frames
+
+    //error codes
+    AMF_INVALID_DATA_TYPE                       ,//invalid data type
+    AMF_INVALID_RESOLUTION                      ,//invalid resolution (width or height)
+    AMF_CODEC_NOT_SUPPORTED                     ,//codec not supported
+    AMF_SURFACE_FORMAT_NOT_SUPPORTED            ,//surface format not supported
+    AMF_SURFACE_MUST_BE_SHARED                  ,//surface should be shared (DX11: (MiscFlags & D3D11_RESOURCE_MISC_SHARED) == 0, DX9: No shared handle found)
+
+// component video decoder
+    AMF_DECODER_NOT_PRESENT                     ,//failed to create the decoder
+    AMF_DECODER_SURFACE_ALLOCATION_FAILED       ,//failed to create the surface for decoding
+    AMF_DECODER_NO_FREE_SURFACES                ,
+
+// component video encoder
+    AMF_ENCODER_NOT_PRESENT                     ,//failed to create the encoder
+
+// component video processor
+
+// component video conveter
+
+// component dem
+    AMF_DEM_ERROR                               ,
+    AMF_DEM_PROPERTY_READONLY                   ,
+    AMF_DEM_REMOTE_DISPLAY_CREATE_FAILED        ,
+    AMF_DEM_START_ENCODING_FAILED               ,
+    AMF_DEM_QUERY_OUTPUT_FAILED                 ,
+
+// component TAN
+    AMF_TAN_CLIPPING_WAS_REQUIRED               , // Resulting data was truncated to meet output type's value limits.
+    AMF_TAN_UNSUPPORTED_VERSION                 , // Not supported version requested, solely for TANCreateContext().
+
+    AMF_NEED_MORE_INPUT                         ,//returned by AMFComponent::SubmitInput did not produce buffer
+} AMF_RESULT;
+
+
+//-----------------------------------------------------------------------------
+// Interface.h
+//-----------------------------------------------------------------------------
+#define AMF_DECLARE_IID(name, _data1, _data2, _data3, _data41, _data42, _data43, _data44, _data45, _data46, _data47, _data48) \
+        AMF_INLINE static const AMFGuid IID_##name(void) \
+        { \
+            AMFGuid uid = {_data1, _data2, _data3, _data41, _data42, _data43, _data44, _data45, _data46, _data47, _data48}; \
+            return uid; \
+        }
+AMF_DECLARE_IID(AMFInterface, 0x9d872f34, 0x90dc, 0x4b93, 0xb6, 0xb2, 0x6c, 0xa3, 0x7c, 0x85, 0x25, 0xdb)
+typedef struct AMFInterface AMFInterface;
+
+typedef struct AMFInterfaceVtbl
+{
+    // AMFInterface interface
+    amf_long(AMF_STD_CALL *Acquire)(AMFInterface* pThis);
+    amf_long(AMF_STD_CALL *Release)(AMFInterface* pThis);
+    enum AMF_RESULT(AMF_STD_CALL *QueryInterface)(AMFInterface* pThis, const struct AMFGuid *interfaceID, void** ppInterface);
+} AMFInterfaceVtbl;
+
+struct AMFInterface
+{
+    const AMFInterfaceVtbl *pVtbl;
+};
+
+//-----------------------------------------------------------------------------
+// Variant.h
+//-----------------------------------------------------------------------------
+
+//----------------------------------------------------------------------------------------------
+// variant types
+//----------------------------------------------------------------------------------------------
+typedef enum AMF_VARIANT_TYPE
+{
+    AMF_VARIANT_EMPTY = 0,
+
+    AMF_VARIANT_BOOL = 1,
+    AMF_VARIANT_INT64 = 2,
+    AMF_VARIANT_DOUBLE = 3,
+
+    AMF_VARIANT_RECT = 4,
+    AMF_VARIANT_SIZE = 5,
+    AMF_VARIANT_POINT = 6,
+    AMF_VARIANT_RATE = 7,
+    AMF_VARIANT_RATIO = 8,
+    AMF_VARIANT_COLOR = 9,
+
+    AMF_VARIANT_STRING = 10,  // value is char*
+    AMF_VARIANT_WSTRING = 11,  // value is wchar_t*
+    AMF_VARIANT_INTERFACE = 12,  // value is AMFInterface*
+} AMF_VARIANT_TYPE;
+//----------------------------------------------------------------------------------------------
+// variant struct
+//----------------------------------------------------------------------------------------------
+typedef struct AMFVariantStruct
+{
+    AMF_VARIANT_TYPE            type;
+    union
+    {
+        amf_bool                boolValue;
+        amf_int64               int64Value;
+        amf_double              doubleValue;
+        char*                   stringValue;
+        wchar_t*                wstringValue;
+        AMFInterface*           pInterface;
+        struct AMFRect          rectValue;
+        struct AMFSize          sizeValue;
+        struct AMFPoint         pointValue;
+        struct AMFRate          rateValue;
+        struct AMFRatio         ratioValue;
+        struct AMFColor         colorValue;
+    };
+} AMFVariantStruct;
+
+#define AMF_VARIANT_RETURN_IF_INVALID_POINTER(p) \
+       { \
+            if(p == NULL) \
+                    { \
+                 return AMF_INVALID_POINTER; \
+            } \
+       }
+
+static AMF_INLINE AMF_RESULT AMF_CDECL_CALL AMFVariantInit(AMFVariantStruct* pVariant)
+{
+    AMF_VARIANT_RETURN_IF_INVALID_POINTER(pVariant);
+    pVariant->type = AMF_VARIANT_EMPTY;
+    return AMF_OK;
+}
+
+static AMF_INLINE AMF_RESULT AMF_CDECL_CALL AMFVariantClear(AMFVariantStruct* pVariant)
+{
+    AMF_RESULT errRet = AMF_OK;
+    AMF_VARIANT_RETURN_IF_INVALID_POINTER(pVariant);
+
+    switch (pVariant->type)
+    {
+    case AMF_VARIANT_STRING:
+        amf_variant_free(pVariant->stringValue);
+        pVariant->type = AMF_VARIANT_EMPTY;
+        break;
+
+    case AMF_VARIANT_WSTRING:
+        amf_variant_free(pVariant->wstringValue);
+        pVariant->type = AMF_VARIANT_EMPTY;
+        break;
+
+    case AMF_VARIANT_INTERFACE:
+        if (pVariant->pInterface != NULL)
+        {
+#if defined(__cplusplus)
+            pVariant->pInterface->Release();
+#else
+            pVariant->pInterface->pVtbl->Release(pVariant->pInterface);
+#endif
+            pVariant->pInterface = NULL;
+        }
+        pVariant->type = AMF_VARIANT_EMPTY;
+        break;
+
+    default:
+        pVariant->type = AMF_VARIANT_EMPTY;
+        break;
+    }
+    return errRet;
+}
+//-------------------------------------------------------------------------------------------------
+static AMF_INLINE AMF_RESULT AMF_CDECL_CALL AMFVariantAssignBool(AMFVariantStruct* pDest, amf_bool value)
+{
+    AMF_RESULT errRet = AMF_OK;
+    AMF_VARIANT_RETURN_IF_INVALID_POINTER(pDest);
+
+    errRet = AMFVariantClear(pDest);
+    if (errRet == AMF_OK)
+    {
+        pDest->type = AMF_VARIANT_BOOL;
+        pDest->boolValue = value;
+    }
+    return errRet;
+}
+//-------------------------------------------------------------------------------------------------
+static AMF_INLINE AMF_RESULT AMF_CDECL_CALL AMFVariantAssignInt64(AMFVariantStruct* pDest, amf_int64 value)
+{
+    AMF_RESULT errRet = AMF_OK;
+    AMF_VARIANT_RETURN_IF_INVALID_POINTER(pDest);
+
+    errRet = AMFVariantClear(pDest);
+    if (errRet == AMF_OK)
+    {
+        pDest->type = AMF_VARIANT_INT64;
+        pDest->int64Value = value;
+    }
+    return errRet;
+}
+//-------------------------------------------------------------------------------------------------
+static AMF_INLINE AMF_RESULT AMF_CDECL_CALL AMFVariantAssignDouble(AMFVariantStruct* pDest, amf_double value)
+{
+    AMF_RESULT errRet = AMF_OK;
+    AMF_VARIANT_RETURN_IF_INVALID_POINTER(pDest);
+
+    errRet = AMFVariantClear(pDest);
+    if (errRet == AMF_OK)
+    {
+        pDest->type = AMF_VARIANT_DOUBLE;
+        pDest->doubleValue = value;
+    }
+    return errRet;
+}
+//-------------------------------------------------------------------------------------------------
+static AMF_INLINE AMF_RESULT AMF_CDECL_CALL AMFVariantAssignSize(AMFVariantStruct* pDest, const AMFSize* value)
+{
+    AMF_RESULT errRet = AMF_OK;
+    AMF_VARIANT_RETURN_IF_INVALID_POINTER(pDest);
+
+    errRet = AMFVariantClear(pDest);
+    if (errRet == AMF_OK)
+    {
+        pDest->type = AMF_VARIANT_SIZE;
+        pDest->sizeValue = *value;
+    }
+    return errRet;
+}
+//-------------------------------------------------------------------------------------------------
+static AMF_INLINE AMF_RESULT AMF_CDECL_CALL AMFVariantAssignPoint(AMFVariantStruct* pDest, const AMFPoint* value)
+{
+    AMF_RESULT errRet = AMF_OK;
+    AMF_VARIANT_RETURN_IF_INVALID_POINTER(pDest);
+
+    errRet = AMFVariantClear(pDest);
+    if (errRet == AMF_OK)
+    {
+        pDest->type = AMF_VARIANT_POINT;
+        pDest->pointValue = *value;
+    }
+    return errRet;
+}
+//-------------------------------------------------------------------------------------------------
+static AMF_INLINE AMF_RESULT AMF_CDECL_CALL AMFVariantAssignRate(AMFVariantStruct* pDest, const AMFRate* value)
+{
+    AMF_RESULT errRet = AMF_OK;
+    AMF_VARIANT_RETURN_IF_INVALID_POINTER(pDest);
+
+    errRet = AMFVariantClear(pDest);
+    if (errRet == AMF_OK)
+    {
+        pDest->type = AMF_VARIANT_RATE;
+        pDest->rateValue = *value;
+    }
+    return errRet;
+}
+//-------------------------------------------------------------------------------------------------
+static AMF_INLINE AMF_RESULT AMF_CDECL_CALL AMFVariantAssignRatio(AMFVariantStruct* pDest, const AMFRatio* value)
+{
+    AMF_RESULT errRet = AMF_OK;
+    AMF_VARIANT_RETURN_IF_INVALID_POINTER(pDest);
+
+    errRet = AMFVariantClear(pDest);
+    if (errRet == AMF_OK)
+    {
+        pDest->type = AMF_VARIANT_RATIO;
+        pDest->ratioValue = *value;
+    }
+    return errRet;
+}
+//-------------------------------------------------------------------------------------------------
+static AMF_INLINE AMF_RESULT AMF_CDECL_CALL AMFVariantAssignColor(AMFVariantStruct* pDest, const AMFColor* value)
+{
+    AMF_RESULT errRet = AMF_OK;
+    AMF_VARIANT_RETURN_IF_INVALID_POINTER(pDest);
+
+    errRet = AMFVariantClear(pDest);
+    if (errRet == AMF_OK)
+    {
+        pDest->type = AMF_VARIANT_COLOR;
+        pDest->colorValue = *value;
+    }
+    return errRet;
+}
+
+//-----------------------------------------------------------------------------
+// PropertyStorage.h
+//-----------------------------------------------------------------------------
+typedef struct AMFPropertyStorageObserver AMFPropertyStorageObserver;
+typedef struct AMFPropertyStorage AMFPropertyStorage;
+
+#define AMF_ASSIGN_PROPERTY_DATA(res, varType, pThis, name, val ) \
+    { \
+        AMFVariantStruct var = {0}; \
+        AMFVariantAssign##varType(&var, val); \
+        res = pThis->pVtbl->SetProperty(pThis, name, var ); \
+    }
+#define AMF_ASSIGN_PROPERTY_TYPE(res, varType, dataType , pThis, name, val )  AMF_ASSIGN_PROPERTY_DATA(res, varType, pThis, name, (dataType)val)
+
+#define AMF_ASSIGN_PROPERTY_INT64(res, pThis, name, val ) AMF_ASSIGN_PROPERTY_TYPE(res, Int64, amf_int64, pThis, name, val)
+#define AMF_ASSIGN_PROPERTY_DOUBLE(res, pThis, name, val ) AMF_ASSIGN_PROPERTY_TYPE(res, Double, amf_double, pThis, name, val)
+#define AMF_ASSIGN_PROPERTY_BOOL(res, pThis, name, val ) AMF_ASSIGN_PROPERTY_TYPE(res, Bool, amf_bool, pThis, name, val)
+#define AMF_ASSIGN_PROPERTY_RECT(res, pThis, name, val ) AMF_ASSIGN_PROPERTY_DATA(res, Rect, pThis, name, &val)
+#define AMF_ASSIGN_PROPERTY_SIZE(res, pThis, name, val ) AMF_ASSIGN_PROPERTY_DATA(res, Size, pThis, name, &val)
+#define AMF_ASSIGN_PROPERTY_POINT(res, pThis, name, val ) AMF_ASSIGN_PROPERTY_DATA(res, Point, pThis, name, &val)
+#define AMF_ASSIGN_PROPERTY_RATE(res, pThis, name, val ) AMF_ASSIGN_PROPERTY_DATA(res, Rate, pThis, name, &val)
+#define AMF_ASSIGN_PROPERTY_RATIO(res, pThis, name, val ) AMF_ASSIGN_PROPERTY_DATA(res, Ratio, pThis, name, &val)
+#define AMF_ASSIGN_PROPERTY_COLOR(res, pThis, name, val ) AMF_ASSIGN_PROPERTY_DATA(res, Color, pThis, name, &val)
+
+//-----------------------------------------------------------------------------
+// PropertyStorageEx.h
+//-----------------------------------------------------------------------------
+//----------------------------------------------------------------------------------------------
+typedef enum AMF_PROPERTY_CONTENT_ENUM
+{
+    AMF_PROPERTY_CONTENT_DEFAULT = 0,
+    AMF_PROPERTY_CONTENT_XML,               // m_eType is AMF_VARIANT_STRING
+
+    AMF_PROPERTY_CONTENT_FILE_OPEN_PATH,    // m_eType AMF_VARIANT_WSTRING
+    AMF_PROPERTY_CONTENT_FILE_SAVE_PATH     // m_eType AMF_VARIANT_WSTRING
+} AMF_PROPERTY_CONTENT_ENUM;
+//----------------------------------------------------------------------------------------------
+typedef enum AMF_PROPERTY_ACCESS_TYPE
+{
+    AMF_PROPERTY_ACCESS_PRIVATE = 0,
+    AMF_PROPERTY_ACCESS_READ = 0x1,
+    AMF_PROPERTY_ACCESS_WRITE = 0x2,
+    AMF_PROPERTY_ACCESS_READ_WRITE = (AMF_PROPERTY_ACCESS_READ | AMF_PROPERTY_ACCESS_WRITE),
+    AMF_PROPERTY_ACCESS_WRITE_RUNTIME = 0x4,
+    AMF_PROPERTY_ACCESS_FULL = 0xFF,
+} AMF_PROPERTY_ACCESS_TYPE;
+//----------------------------------------------------------------------------------------------
+typedef struct AMFEnumDescriptionEntry
+{
+    amf_int             value;
+    const wchar_t*      name;
+} AMFEnumDescriptionEntry;
+//----------------------------------------------------------------------------------------------
+typedef amf_uint32 AMF_PROPERTY_CONTENT_TYPE;
+
+typedef struct AMFPropertyInfo
+{
+    const wchar_t*                  name;
+    const wchar_t*                  desc;
+    AMF_VARIANT_TYPE                type;
+    AMF_PROPERTY_CONTENT_TYPE       contentType;
+
+    AMFVariantStruct                defaultValue;
+    AMFVariantStruct                minValue;
+    AMFVariantStruct                maxValue;
+    AMF_PROPERTY_ACCESS_TYPE        accessType;
+    const AMFEnumDescriptionEntry*  pEnumDescription;
+} AMFPropertyInfo;
+//-----------------------------------------------------------------------------
+// Data.h
+//-----------------------------------------------------------------------------
+//----------------------------------------------------------------------------------------------
+typedef enum AMF_DATA_TYPE
+{
+    AMF_DATA_BUFFER = 0,
+    AMF_DATA_SURFACE = 1,
+    AMF_DATA_AUDIO_BUFFER = 2,
+    AMF_DATA_USER = 1000,
+    // all extensions will be AMF_DATA_USER+i
+} AMF_DATA_TYPE;
+//----------------------------------------------------------------------------------------------
+typedef enum AMF_MEMORY_TYPE
+{
+    AMF_MEMORY_UNKNOWN = 0,
+    AMF_MEMORY_HOST = 1,
+    AMF_MEMORY_DX9 = 2,
+    AMF_MEMORY_DX11 = 3,
+    AMF_MEMORY_OPENCL = 4,
+    AMF_MEMORY_OPENGL = 5,
+    AMF_MEMORY_XV = 6,
+    AMF_MEMORY_GRALLOC = 7,
+    AMF_MEMORY_COMPUTE_FOR_DX9 = 8,
+    AMF_MEMORY_COMPUTE_FOR_DX11 = 9,
+} AMF_MEMORY_TYPE;
+
+//----------------------------------------------------------------------------------------------
+typedef enum AMF_DX_VERSION
+{
+    AMF_DX9 = 90,
+    AMF_DX9_EX = 91,
+    AMF_DX11_0 = 110,
+    AMF_DX11_1 = 111
+} AMF_DX_VERSION;
+
+typedef struct AMFData AMFData;
+AMF_DECLARE_IID(AMFData, 0xa1159bf6, 0x9104, 0x4107, 0x8e, 0xaa, 0xc5, 0x3d, 0x5d, 0xba, 0xc5, 0x11)
+
+typedef struct AMFDataVtbl
+{
+    // AMFInterface interface
+    amf_long(AMF_STD_CALL *Acquire)(AMFData* pThis);
+    amf_long(AMF_STD_CALL *Release)(AMFData* pThis);
+    enum AMF_RESULT(AMF_STD_CALL *QueryInterface)(AMFData* pThis, const struct AMFGuid *interfaceID, void** ppInterface);
+
+    // AMFPropertyStorage interface
+    AMF_RESULT(AMF_STD_CALL *SetProperty)(AMFData* pThis, const wchar_t* name, AMFVariantStruct value);
+    AMF_RESULT(AMF_STD_CALL *GetProperty)(AMFData* pThis, const wchar_t* name, AMFVariantStruct* pValue);
+    amf_bool(AMF_STD_CALL *HasProperty)(AMFData* pThis, const wchar_t* name);
+    amf_size(AMF_STD_CALL *GetPropertyCount)(AMFData* pThis);
+    AMF_RESULT(AMF_STD_CALL *GetPropertyAt)(AMFData* pThis, amf_size index, wchar_t* name, amf_size nameSize, AMFVariantStruct* pValue);
+    AMF_RESULT(AMF_STD_CALL *Clear)(AMFData* pThis);
+    AMF_RESULT(AMF_STD_CALL *AddTo)(AMFData* pThis, AMFPropertyStorage* pDest, amf_bool overwrite, amf_bool deep);
+    AMF_RESULT(AMF_STD_CALL *CopyTo)(AMFData* pThis, AMFPropertyStorage* pDest, amf_bool deep);
+    void                (AMF_STD_CALL *AddObserver)(AMFData* pThis, AMFPropertyStorageObserver* pObserver);
+    void                (AMF_STD_CALL *RemoveObserver)(AMFData* pThis, AMFPropertyStorageObserver* pObserver);
+
+    // AMFData interface
+
+    AMF_MEMORY_TYPE(AMF_STD_CALL *GetMemoryType)(AMFData* pThis);
+
+    AMF_RESULT(AMF_STD_CALL *Duplicate)(AMFData* pThis, AMF_MEMORY_TYPE type, AMFData** ppData);
+    AMF_RESULT(AMF_STD_CALL *Convert)(AMFData* pThis, AMF_MEMORY_TYPE type); // optimal interop if possilble. Copy through host memory if needed
+    AMF_RESULT(AMF_STD_CALL *Interop)(AMFData* pThis, AMF_MEMORY_TYPE type); // only optimal interop if possilble. No copy through host memory for GPU objects
+
+    AMF_DATA_TYPE(AMF_STD_CALL *GetDataType)(AMFData* pThis);
+
+    amf_bool(AMF_STD_CALL *IsReusable)(AMFData* pThis);
+
+    void                (AMF_STD_CALL *SetPts)(AMFData* pThis, amf_pts pts);
+    amf_pts(AMF_STD_CALL *GetPts)(AMFData* pThis);
+    void                (AMF_STD_CALL *SetDuration)(AMFData* pThis, amf_pts duration);
+    amf_pts(AMF_STD_CALL *GetDuration)(AMFData* pThis);
+
+} AMFDataVtbl;
+
+struct AMFData
+{
+    const AMFDataVtbl *pVtbl;
+};
+//-----------------------------------------------------------------------------
+// Plane.h
+//-----------------------------------------------------------------------------
+//---------------------------------------------------------------------------------------------
+typedef enum AMF_PLANE_TYPE
+{
+    AMF_PLANE_UNKNOWN = 0,
+    AMF_PLANE_PACKED = 1,             // for all packed formats: BGRA, YUY2, etc
+    AMF_PLANE_Y = 2,
+    AMF_PLANE_UV = 3,
+    AMF_PLANE_U = 4,
+    AMF_PLANE_V = 5,
+} AMF_PLANE_TYPE;
+
+//---------------------------------------------------------------------------------------------
+// AMFPlane interface
+//---------------------------------------------------------------------------------------------
+AMF_DECLARE_IID(AMFPlane, 0xbede1aa6, 0xd8fa, 0x4625, 0x94, 0x65, 0x6c, 0x82, 0xc4, 0x37, 0x71, 0x2e)
+typedef struct AMFPlane AMFPlane;
+typedef struct AMFPlaneVtbl
+{
+    // AMFInterface interface
+    amf_long(AMF_STD_CALL *Acquire)(AMFPlane* pThis);
+    amf_long(AMF_STD_CALL *Release)(AMFPlane* pThis);
+    enum AMF_RESULT(AMF_STD_CALL *QueryInterface)(AMFPlane* pThis, const struct AMFGuid *interfaceID, void** ppInterface);
+
+    // AMFPlane interface
+    AMF_PLANE_TYPE(AMF_STD_CALL *GetType)(AMFPlane* pThis);
+    void*               (AMF_STD_CALL *GetNative)(AMFPlane* pThis);
+    amf_int32(AMF_STD_CALL *GetPixelSizeInBytes)(AMFPlane* pThis);
+    amf_int32(AMF_STD_CALL *GetOffsetX)(AMFPlane* pThis);
+    amf_int32(AMF_STD_CALL *GetOffsetY)(AMFPlane* pThis);
+    amf_int32(AMF_STD_CALL *GetWidth)(AMFPlane* pThis);
+    amf_int32(AMF_STD_CALL *GetHeight)(AMFPlane* pThis);
+    amf_int32(AMF_STD_CALL *GetHPitch)(AMFPlane* pThis);
+    amf_int32(AMF_STD_CALL *GetVPitch)(AMFPlane* pThis);
+    amf_bool(AMF_STD_CALL *IsTiled)(AMFPlane* pThis);
+
+} AMFPlaneVtbl;
+
+struct AMFPlane
+{
+    const AMFPlaneVtbl *pVtbl;
+};
+//-----------------------------------------------------------------------------
+// Buffer.h
+//-----------------------------------------------------------------------------
+#if defined(_MSC_VER)
+#pragma warning( push )
+#pragma warning(disable : 4263)
+#pragma warning(disable : 4264)
+#endif
+
+typedef struct AMFBuffer AMFBuffer;
+typedef struct AMFBufferObserver AMFBufferObserver;
+
+AMF_DECLARE_IID(AMFBuffer, 0xb04b7248, 0xb6f0, 0x4321, 0xb6, 0x91, 0xba, 0xa4, 0x74, 0xf, 0x9f, 0xcb)
+
+typedef struct AMFBufferVtbl
+{
+    // AMFInterface interface
+    amf_long(AMF_STD_CALL *Acquire)(AMFBuffer* pThis);
+    amf_long(AMF_STD_CALL *Release)(AMFBuffer* pThis);
+    enum AMF_RESULT(AMF_STD_CALL *QueryInterface)(AMFBuffer* pThis, const struct AMFGuid *interfaceID, void** ppInterface);
+
+    // AMFPropertyStorage interface
+    AMF_RESULT(AMF_STD_CALL *SetProperty)(AMFBuffer* pThis, const wchar_t* name, AMFVariantStruct value);
+    AMF_RESULT(AMF_STD_CALL *GetProperty)(AMFBuffer* pThis, const wchar_t* name, AMFVariantStruct* pValue);
+    amf_bool(AMF_STD_CALL *HasProperty)(AMFBuffer* pThis, const wchar_t* name);
+    amf_size(AMF_STD_CALL *GetPropertyCount)(AMFBuffer* pThis);
+    AMF_RESULT(AMF_STD_CALL *GetPropertyAt)(AMFBuffer* pThis, amf_size index, wchar_t* name, amf_size nameSize, AMFVariantStruct* pValue);
+    AMF_RESULT(AMF_STD_CALL *Clear)(AMFBuffer* pThis);
+    AMF_RESULT(AMF_STD_CALL *AddTo)(AMFBuffer* pThis, AMFPropertyStorage* pDest, amf_bool overwrite, amf_bool deep);
+    AMF_RESULT(AMF_STD_CALL *CopyTo)(AMFBuffer* pThis, AMFPropertyStorage* pDest, amf_bool deep);
+    void                (AMF_STD_CALL *AddObserver)(AMFBuffer* pThis, AMFPropertyStorageObserver* pObserver);
+    void                (AMF_STD_CALL *RemoveObserver)(AMFBuffer* pThis, AMFPropertyStorageObserver* pObserver);
+
+    // AMFData interface
+
+    AMF_MEMORY_TYPE(AMF_STD_CALL *GetMemoryType)(AMFBuffer* pThis);
+
+    AMF_RESULT(AMF_STD_CALL *Duplicate)(AMFBuffer* pThis, AMF_MEMORY_TYPE type, AMFData** ppData);
+    AMF_RESULT(AMF_STD_CALL *Convert)(AMFBuffer* pThis, AMF_MEMORY_TYPE type); // optimal interop if possilble. Copy through host memory if needed
+    AMF_RESULT(AMF_STD_CALL *Interop)(AMFBuffer* pThis, AMF_MEMORY_TYPE type); // only optimal interop if possilble. No copy through host memory for GPU objects
+
+    AMF_DATA_TYPE(AMF_STD_CALL *GetDataType)(AMFBuffer* pThis);
+
+    amf_bool(AMF_STD_CALL *IsReusable)(AMFBuffer* pThis);
+
+    void                (AMF_STD_CALL *SetPts)(AMFBuffer* pThis, amf_pts pts);
+    amf_pts(AMF_STD_CALL *GetPts)(AMFBuffer* pThis);
+    void                (AMF_STD_CALL *SetDuration)(AMFBuffer* pThis, amf_pts duration);
+    amf_pts(AMF_STD_CALL *GetDuration)(AMFBuffer* pThis);
+
+    // AMFBuffer interface
+
+    AMF_RESULT(AMF_STD_CALL *SetSize)(AMFBuffer* pThis, amf_size newSize);
+    amf_size(AMF_STD_CALL *GetSize)(AMFBuffer* pThis);
+    void*               (AMF_STD_CALL *GetNative)(AMFBuffer* pThis);
+
+    // Observer management
+    void                (AMF_STD_CALL *AddObserver_Buffer)(AMFBuffer* pThis, AMFBufferObserver* pObserver);
+    void                (AMF_STD_CALL *RemoveObserver_Buffer)(AMFBuffer* pThis, AMFBufferObserver* pObserver);
+
+} AMFBufferVtbl;
+
+struct AMFBuffer
+{
+    const AMFBufferVtbl *pVtbl;
+};
+
+//-----------------------------------------------------------------------------
+// AudioBuffer.h
+//-----------------------------------------------------------------------------
+typedef enum AMF_AUDIO_FORMAT
+{
+    AMFAF_UNKNOWN = -1,
+    AMFAF_U8 = 0,               // amf_uint8
+    AMFAF_S16 = 1,               // amf_int16
+    AMFAF_S32 = 2,               // amf_int32
+    AMFAF_FLT = 3,               // amf_float
+    AMFAF_DBL = 4,               // amf_double
+
+    AMFAF_U8P = 5,               // amf_uint8
+    AMFAF_S16P = 6,               // amf_int16
+    AMFAF_S32P = 7,               // amf_int32
+    AMFAF_FLTP = 8,               // amf_float
+    AMFAF_DBLP = 9,               // amf_double
+    AMFAF_FIRST = AMFAF_U8,
+    AMFAF_LAST = AMFAF_DBLP,
+} AMF_AUDIO_FORMAT;
+
+typedef struct AMFAudioBuffer AMFAudioBuffer;
+typedef struct AMFAudioBufferObserver AMFAudioBufferObserver;
+//-----------------------------------------------------------------------------
+// Surface.h
+//-----------------------------------------------------------------------------
+
+typedef enum AMF_SURFACE_FORMAT
+{
+    AMF_SURFACE_UNKNOWN = 0,
+    AMF_SURFACE_NV12,               ///< 1 - planar Y width x height + packed UV width/2 x height/2 - 8 bit per component
+    AMF_SURFACE_YV12,               ///< 2 - planar Y width x height + V width/2 x height/2 + U width/2 x height/2 - 8 bit per component
+    AMF_SURFACE_BGRA,               ///< 3 - packed - 8 bit per component
+    AMF_SURFACE_ARGB,               ///< 4 - packed - 8 bit per component
+    AMF_SURFACE_RGBA,               ///< 5 - packed - 8 bit per component
+    AMF_SURFACE_GRAY8,              ///< 6 - single component - 8 bit
+    AMF_SURFACE_YUV420P,            ///< 7 - planar Y width x height + U width/2 x height/2 + V width/2 x height/2 - 8 bit per component
+    AMF_SURFACE_U8V8,               ///< 8 - double component - 8 bit per component
+    AMF_SURFACE_YUY2,               ///< 9 - YUY2: Byte 0=8-bit Y'0; Byte 1=8-bit Cb; Byte 2=8-bit Y'1; Byte 3=8-bit Cr
+    AMF_SURFACE_P010,               ///< 10- planar Y width x height + packed UV width/2 x height/2 - 10 bit per component (16 allocated, upper 10 bits are used)
+    AMF_SURFACE_RGBA_F16,           ///< 11 - packed - 16 bit per component float
+
+    AMF_SURFACE_FIRST = AMF_SURFACE_NV12,
+    AMF_SURFACE_LAST = AMF_SURFACE_RGBA_F16
+} AMF_SURFACE_FORMAT;
+
+//----------------------------------------------------------------------------------------------
+// frame type
+//----------------------------------------------------------------------------------------------
+typedef enum AMF_FRAME_TYPE
+{
+    // flags
+    AMF_FRAME_STEREO_FLAG = 0x10000000,
+    AMF_FRAME_LEFT_FLAG = AMF_FRAME_STEREO_FLAG | 0x20000000,
+    AMF_FRAME_RIGHT_FLAG = AMF_FRAME_STEREO_FLAG | 0x40000000,
+    AMF_FRAME_BOTH_FLAG = AMF_FRAME_LEFT_FLAG | AMF_FRAME_RIGHT_FLAG,
+    AMF_FRAME_INTERLEAVED_FLAG = 0x01000000,
+    AMF_FRAME_FIELD_FLAG = 0x02000000,
+    AMF_FRAME_EVEN_FLAG = 0x04000000,
+    AMF_FRAME_ODD_FLAG = 0x08000000,
+
+    // values
+    AMF_FRAME_UNKNOWN = -1,
+    AMF_FRAME_PROGRESSIVE = 0,
+
+    AMF_FRAME_INTERLEAVED_EVEN_FIRST = AMF_FRAME_INTERLEAVED_FLAG | AMF_FRAME_EVEN_FLAG,
+    AMF_FRAME_INTERLEAVED_ODD_FIRST = AMF_FRAME_INTERLEAVED_FLAG | AMF_FRAME_ODD_FLAG,
+    AMF_FRAME_FIELD_SINGLE_EVEN = AMF_FRAME_FIELD_FLAG | AMF_FRAME_EVEN_FLAG,
+    AMF_FRAME_FIELD_SINGLE_ODD = AMF_FRAME_FIELD_FLAG | AMF_FRAME_ODD_FLAG,
+
+    AMF_FRAME_STEREO_LEFT = AMF_FRAME_LEFT_FLAG,
+    AMF_FRAME_STEREO_RIGHT = AMF_FRAME_RIGHT_FLAG,
+    AMF_FRAME_STEREO_BOTH = AMF_FRAME_BOTH_FLAG,
+
+    AMF_FRAME_INTERLEAVED_EVEN_FIRST_STEREO_LEFT = AMF_FRAME_INTERLEAVED_FLAG | AMF_FRAME_EVEN_FLAG | AMF_FRAME_LEFT_FLAG,
+    AMF_FRAME_INTERLEAVED_EVEN_FIRST_STEREO_RIGHT = AMF_FRAME_INTERLEAVED_FLAG | AMF_FRAME_EVEN_FLAG | AMF_FRAME_RIGHT_FLAG,
+    AMF_FRAME_INTERLEAVED_EVEN_FIRST_STEREO_BOTH = AMF_FRAME_INTERLEAVED_FLAG | AMF_FRAME_EVEN_FLAG | AMF_FRAME_BOTH_FLAG,
+
+    AMF_FRAME_INTERLEAVED_ODD_FIRST_STEREO_LEFT = AMF_FRAME_INTERLEAVED_FLAG | AMF_FRAME_ODD_FLAG | AMF_FRAME_LEFT_FLAG,
+    AMF_FRAME_INTERLEAVED_ODD_FIRST_STEREO_RIGHT = AMF_FRAME_INTERLEAVED_FLAG | AMF_FRAME_ODD_FLAG | AMF_FRAME_RIGHT_FLAG,
+    AMF_FRAME_INTERLEAVED_ODD_FIRST_STEREO_BOTH = AMF_FRAME_INTERLEAVED_FLAG | AMF_FRAME_ODD_FLAG | AMF_FRAME_BOTH_FLAG,
+} AMF_FRAME_TYPE;
+
+typedef struct AMFSurface AMFSurface;
+typedef struct AMFSurfaceObserver AMFSurfaceObserver;
+
+typedef struct AMFSurfaceObserverVtbl
+{
+    void                (AMF_STD_CALL *OnSurfaceDataRelease)(AMFSurfaceObserver* pThis, AMFSurface* pSurface);
+} AMFSurfaceObserverVtbl;
+
+struct AMFSurfaceObserver
+{
+    const AMFSurfaceObserverVtbl *pVtbl;
+};
+
+AMF_DECLARE_IID(AMFSurface, 0x3075dbe3, 0x8718, 0x4cfa, 0x86, 0xfb, 0x21, 0x14, 0xc0, 0xa5, 0xa4, 0x51)
+typedef struct AMFSurfaceVtbl
+{
+    // AMFInterface interface
+    amf_long(AMF_STD_CALL *Acquire)(AMFSurface* pThis);
+    amf_long(AMF_STD_CALL *Release)(AMFSurface* pThis);
+    enum AMF_RESULT(AMF_STD_CALL *QueryInterface)(AMFSurface* pThis, const struct AMFGuid *interfaceID, void** ppInterface);
+
+    // AMFPropertyStorage interface
+    AMF_RESULT(AMF_STD_CALL *SetProperty)(AMFSurface* pThis, const wchar_t* name, AMFVariantStruct value);
+    AMF_RESULT(AMF_STD_CALL *GetProperty)(AMFSurface* pThis, const wchar_t* name, AMFVariantStruct* pValue);
+    amf_bool(AMF_STD_CALL *HasProperty)(AMFSurface* pThis, const wchar_t* name);
+    amf_size(AMF_STD_CALL *GetPropertyCount)(AMFSurface* pThis);
+    AMF_RESULT(AMF_STD_CALL *GetPropertyAt)(AMFSurface* pThis, amf_size index, wchar_t* name, amf_size nameSize, AMFVariantStruct* pValue);
+    AMF_RESULT(AMF_STD_CALL *Clear)(AMFSurface* pThis);
+    AMF_RESULT(AMF_STD_CALL *AddTo)(AMFSurface* pThis, AMFPropertyStorage* pDest, amf_bool overwrite, amf_bool deep);
+    AMF_RESULT(AMF_STD_CALL *CopyTo)(AMFSurface* pThis, AMFPropertyStorage* pDest, amf_bool deep);
+    void                (AMF_STD_CALL *AddObserver)(AMFSurface* pThis, AMFPropertyStorageObserver* pObserver);
+    void                (AMF_STD_CALL *RemoveObserver)(AMFSurface* pThis, AMFPropertyStorageObserver* pObserver);
+
+    // AMFData interface
+
+    AMF_MEMORY_TYPE(AMF_STD_CALL *GetMemoryType)(AMFSurface* pThis);
+
+    AMF_RESULT(AMF_STD_CALL *Duplicate)(AMFSurface* pThis, AMF_MEMORY_TYPE type, AMFData** ppData);
+    AMF_RESULT(AMF_STD_CALL *Convert)(AMFSurface* pThis, AMF_MEMORY_TYPE type); // optimal interop if possilble. Copy through host memory if needed
+    AMF_RESULT(AMF_STD_CALL *Interop)(AMFSurface* pThis, AMF_MEMORY_TYPE type); // only optimal interop if possilble. No copy through host memory for GPU objects
+
+    AMF_DATA_TYPE(AMF_STD_CALL *GetDataType)(AMFSurface* pThis);
+
+    amf_bool(AMF_STD_CALL *IsReusable)(AMFSurface* pThis);
+
+    void                (AMF_STD_CALL *SetPts)(AMFSurface* pThis, amf_pts pts);
+    amf_pts(AMF_STD_CALL *GetPts)(AMFSurface* pThis);
+    void                (AMF_STD_CALL *SetDuration)(AMFSurface* pThis, amf_pts duration);
+    amf_pts(AMF_STD_CALL *GetDuration)(AMFSurface* pThis);
+
+    // AMFSurface interface
+
+    AMF_SURFACE_FORMAT(AMF_STD_CALL *GetFormat)(AMFSurface* pThis);
+
+    // do not store planes outside. should be used together with Surface
+    amf_size(AMF_STD_CALL *GetPlanesCount)(AMFSurface* pThis);
+    AMFPlane*           (AMF_STD_CALL *GetPlaneAt)(AMFSurface* pThis, amf_size index);
+    AMFPlane*           (AMF_STD_CALL *GetPlane)(AMFSurface* pThis, AMF_PLANE_TYPE type);
+
+    AMF_FRAME_TYPE(AMF_STD_CALL *GetFrameType)(AMFSurface* pThis);
+    void                (AMF_STD_CALL *SetFrameType)(AMFSurface* pThis, AMF_FRAME_TYPE type);
+
+    AMF_RESULT(AMF_STD_CALL *SetCrop)(AMFSurface* pThis, amf_int32 x, amf_int32 y, amf_int32 width, amf_int32 height);
+    AMF_RESULT(AMF_STD_CALL *CopySurfaceRegion)(AMFSurface* pThis, AMFSurface* pDest, amf_int32 dstX, amf_int32 dstY, amf_int32 srcX, amf_int32 srcY, amf_int32 width, amf_int32 height);
+
+
+    // Observer management
+    void                (AMF_STD_CALL *AddObserver_Surface)(AMFSurface* pThis, AMFSurfaceObserver* pObserver);
+    void                (AMF_STD_CALL *RemoveObserver_Surface)(AMFSurface* pThis, AMFSurfaceObserver* pObserver);
+
+} AMFSurfaceVtbl;
+
+struct AMFSurface
+{
+    const AMFSurfaceVtbl *pVtbl;
+};
+//-----------------------------------------------------------------------------
+// Component.h
+//-----------------------------------------------------------------------------
+AMF_DECLARE_IID(AMFComponent, 0x8b51e5e4, 0x455d, 0x4034, 0xa7, 0x46, 0xde, 0x1b, 0xed, 0xc3, 0xc4, 0x6)
+typedef struct AMFComponent AMFComponent;
+typedef struct AMFContext AMFContext;
+typedef struct AMFIOCaps AMFIOCaps;
+typedef struct AMFCaps AMFCaps;
+
+typedef struct AMFDataAllocatorCB AMFDataAllocatorCB;
+typedef struct AMFComponentOptimizationCallback AMFComponentOptimizationCallback;
+
+typedef struct AMFComponentVtbl
+{
+    // AMFInterface interface
+    amf_long(AMF_STD_CALL *Acquire)(AMFComponent* pThis);
+    amf_long(AMF_STD_CALL *Release)(AMFComponent* pThis);
+    enum AMF_RESULT(AMF_STD_CALL *QueryInterface)(AMFComponent* pThis, const struct AMFGuid *interfaceID, void** ppInterface);
+
+    // AMFPropertyStorage interface
+    AMF_RESULT(AMF_STD_CALL *SetProperty)(AMFComponent* pThis, const wchar_t* name, AMFVariantStruct value);
+    AMF_RESULT(AMF_STD_CALL *GetProperty)(AMFComponent* pThis, const wchar_t* name, AMFVariantStruct* pValue);
+    amf_bool(AMF_STD_CALL *HasProperty)(AMFComponent* pThis, const wchar_t* name);
+    amf_size(AMF_STD_CALL *GetPropertyCount)(AMFComponent* pThis);
+    AMF_RESULT(AMF_STD_CALL *GetPropertyAt)(AMFComponent* pThis, amf_size index, wchar_t* name, amf_size nameSize, AMFVariantStruct* pValue);
+    AMF_RESULT(AMF_STD_CALL *Clear)(AMFComponent* pThis);
+    AMF_RESULT(AMF_STD_CALL *AddTo)(AMFComponent* pThis, AMFPropertyStorage* pDest, amf_bool overwrite, amf_bool deep);
+    AMF_RESULT(AMF_STD_CALL *CopyTo)(AMFComponent* pThis, AMFPropertyStorage* pDest, amf_bool deep);
+    void                (AMF_STD_CALL *AddObserver)(AMFComponent* pThis, AMFPropertyStorageObserver* pObserver);
+    void                (AMF_STD_CALL *RemoveObserver)(AMFComponent* pThis, AMFPropertyStorageObserver* pObserver);
+
+    // AMFPropertyStorageEx interface
+
+    amf_size(AMF_STD_CALL *GetPropertiesInfoCount)(AMFComponent* pThis);
+    AMF_RESULT(AMF_STD_CALL *GetPropertyInfoAt)(AMFComponent* pThis, amf_size index, const AMFPropertyInfo** ppInfo);
+    AMF_RESULT(AMF_STD_CALL *GetPropertyInfo)(AMFComponent* pThis, const wchar_t* name, const AMFPropertyInfo** ppInfo);
+    AMF_RESULT(AMF_STD_CALL *ValidateProperty)(AMFComponent* pThis, const wchar_t* name, AMFVariantStruct value, AMFVariantStruct* pOutValidated);
+
+    // AMFComponent interface
+
+    AMF_RESULT(AMF_STD_CALL *Init)(AMFComponent* pThis, AMF_SURFACE_FORMAT format, amf_int32 width, amf_int32 height);
+    AMF_RESULT(AMF_STD_CALL *ReInit)(AMFComponent* pThis, amf_int32 width, amf_int32 height);
+    AMF_RESULT(AMF_STD_CALL *Terminate)(AMFComponent* pThis);
+    AMF_RESULT(AMF_STD_CALL *Drain)(AMFComponent* pThis);
+    AMF_RESULT(AMF_STD_CALL *Flush)(AMFComponent* pThis);
+
+    AMF_RESULT(AMF_STD_CALL *SubmitInput)(AMFComponent* pThis, AMFData* pData);
+    AMF_RESULT(AMF_STD_CALL *QueryOutput)(AMFComponent* pThis, AMFData** ppData);
+    AMFContext* (AMF_STD_CALL *GetContext)(AMFComponent* pThis);
+    AMF_RESULT(AMF_STD_CALL *SetOutputDataAllocatorCB)(AMFComponent* pThis, AMFDataAllocatorCB* callback);
+
+    AMF_RESULT(AMF_STD_CALL *GetCaps)(AMFComponent* pThis, AMFCaps** ppCaps);
+    AMF_RESULT(AMF_STD_CALL *Optimize)(AMFComponent* pThis, AMFComponentOptimizationCallback* pCallback);
+} AMFComponentVtbl;
+
+struct AMFComponent
+{
+    const AMFComponentVtbl *pVtbl;
+};
+
+//-----------------------------------------------------------------------------
+// Context.h
+//-----------------------------------------------------------------------------
+typedef struct AMFCompute AMFCompute;
+typedef struct AMFComputeFactory AMFComputeFactory;
+typedef struct AMFComputeDevice AMFComputeDevice;
+typedef struct AMFContext AMFContext;
+AMF_DECLARE_IID(AMFContext, 0xa76a13f0, 0xd80e, 0x4fcc, 0xb5, 0x8, 0x65, 0xd0, 0xb5, 0x2e, 0xd9, 0xee)
+
+typedef struct AMFContextVtbl
+{
+    // AMFInterface interface
+    amf_long(AMF_STD_CALL *Acquire)(AMFContext* pThis);
+    amf_long(AMF_STD_CALL *Release)(AMFContext* pThis);
+    enum AMF_RESULT(AMF_STD_CALL *QueryInterface)(AMFContext* pThis, const struct AMFGuid *interfaceID, void** ppInterface);
+
+    // AMFInterface AMFPropertyStorage
+
+    AMF_RESULT(AMF_STD_CALL *SetProperty)(AMFContext* pThis, const wchar_t* name, AMFVariantStruct value);
+    AMF_RESULT(AMF_STD_CALL *GetProperty)(AMFContext* pThis, const wchar_t* name, AMFVariantStruct* pValue);
+    amf_bool(AMF_STD_CALL *HasProperty)(AMFContext* pThis, const wchar_t* name);
+    amf_size(AMF_STD_CALL *GetPropertyCount)(AMFContext* pThis);
+    AMF_RESULT(AMF_STD_CALL *GetPropertyAt)(AMFContext* pThis, amf_size index, wchar_t* name, amf_size nameSize, AMFVariantStruct* pValue);
+    AMF_RESULT(AMF_STD_CALL *Clear)(AMFContext* pThis);
+    AMF_RESULT(AMF_STD_CALL *AddTo)(AMFContext* pThis, AMFPropertyStorage* pDest, amf_bool overwrite, amf_bool deep);
+    AMF_RESULT(AMF_STD_CALL *CopyTo)(AMFContext* pThis, AMFPropertyStorage* pDest, amf_bool deep);
+    void                (AMF_STD_CALL *AddObserver)(AMFContext* pThis, AMFPropertyStorageObserver* pObserver);
+    void                (AMF_STD_CALL *RemoveObserver)(AMFContext* pThis, AMFPropertyStorageObserver* pObserver);
+
+    // AMFContext interface
+
+    // Cleanup
+    AMF_RESULT(AMF_STD_CALL *Terminate)(AMFContext* pThis);
+
+    // DX9
+    AMF_RESULT(AMF_STD_CALL *InitDX9)(AMFContext* pThis, void* pDX9Device);
+    void*               (AMF_STD_CALL *GetDX9Device)(AMFContext* pThis, AMF_DX_VERSION dxVersionRequired);
+    AMF_RESULT(AMF_STD_CALL *LockDX9)(AMFContext* pThis);
+    AMF_RESULT(AMF_STD_CALL *UnlockDX9)(AMFContext* pThis);
+    // DX11
+    AMF_RESULT(AMF_STD_CALL *InitDX11)(AMFContext* pThis, void* pDX11Device, AMF_DX_VERSION dxVersionRequired);
+    void*               (AMF_STD_CALL *GetDX11Device)(AMFContext* pThis, AMF_DX_VERSION dxVersionRequired);
+    AMF_RESULT(AMF_STD_CALL *LockDX11)(AMFContext* pThis);
+    AMF_RESULT(AMF_STD_CALL *UnlockDX11)(AMFContext* pThis);
+
+    // OpenCL
+    AMF_RESULT(AMF_STD_CALL *InitOpenCL)(AMFContext* pThis, void* pCommandQueue);
+    void*               (AMF_STD_CALL *GetOpenCLContext)(AMFContext* pThis);
+    void*               (AMF_STD_CALL *GetOpenCLCommandQueue)(AMFContext* pThis);
+    void*               (AMF_STD_CALL *GetOpenCLDeviceID)(AMFContext* pThis);
+    AMF_RESULT(AMF_STD_CALL *GetOpenCLComputeFactory)(AMFContext* pThis, AMFComputeFactory **ppFactory); // advanced compute - multiple queries
+    AMF_RESULT(AMF_STD_CALL *InitOpenCLEx)(AMFContext* pThis, AMFComputeDevice *pDevice);
+    AMF_RESULT(AMF_STD_CALL *LockOpenCL)(AMFContext* pThis);
+    AMF_RESULT(AMF_STD_CALL *UnlockOpenCL)(AMFContext* pThis);
+
+    // OpenGL
+    AMF_RESULT(AMF_STD_CALL *InitOpenGL)(AMFContext* pThis, amf_handle hOpenGLContext, amf_handle hWindow, amf_handle hDC);
+    amf_handle(AMF_STD_CALL *GetOpenGLContext)(AMFContext* pThis);
+    amf_handle(AMF_STD_CALL *GetOpenGLDrawable)(AMFContext* pThis);
+    AMF_RESULT(AMF_STD_CALL *LockOpenGL)(AMFContext* pThis);
+    AMF_RESULT(AMF_STD_CALL *UnlockOpenGL)(AMFContext* pThis);
+    // XV - Linux
+    AMF_RESULT(AMF_STD_CALL *InitXV)(AMFContext* pThis, void* pXVDevice);
+    void*               (AMF_STD_CALL *GetXVDevice)(AMFContext* pThis);
+    AMF_RESULT(AMF_STD_CALL *LockXV)(AMFContext* pThis);
+    AMF_RESULT(AMF_STD_CALL *UnlockXV)(AMFContext* pThis);
+
+    // Gralloc - Android
+    AMF_RESULT(AMF_STD_CALL *InitGralloc)(AMFContext* pThis, void* pGrallocDevice);
+    void*               (AMF_STD_CALL *GetGrallocDevice)(AMFContext* pThis);
+    AMF_RESULT(AMF_STD_CALL *LockGralloc)(AMFContext* pThis);
+    AMF_RESULT(AMF_STD_CALL *UnlockGralloc)(AMFContext* pThis);
+    // Allocation
+    AMF_RESULT(AMF_STD_CALL *AllocBuffer)(AMFContext* pThis, AMF_MEMORY_TYPE type, amf_size size, AMFBuffer** ppBuffer);
+    AMF_RESULT(AMF_STD_CALL *AllocSurface)(AMFContext* pThis, AMF_MEMORY_TYPE type, AMF_SURFACE_FORMAT format, amf_int32 width, amf_int32 height, AMFSurface** ppSurface);
+    AMF_RESULT(AMF_STD_CALL *AllocAudioBuffer)(AMFContext* pThis, AMF_MEMORY_TYPE type, AMF_AUDIO_FORMAT format, amf_int32 samples, amf_int32 sampleRate, amf_int32 channels,
+        AMFAudioBuffer** ppAudioBuffer);
+
+    // Wrap existing objects
+    AMF_RESULT(AMF_STD_CALL *CreateBufferFromHostNative)(AMFContext* pThis, void* pHostBuffer, amf_size size, AMFBuffer** ppBuffer, AMFBufferObserver* pObserver);
+    AMF_RESULT(AMF_STD_CALL *CreateSurfaceFromHostNative)(AMFContext* pThis, AMF_SURFACE_FORMAT format, amf_int32 width, amf_int32 height, amf_int32 hPitch, amf_int32 vPitch, void* pData,
+        AMFSurface** ppSurface, AMFSurfaceObserver* pObserver);
+    AMF_RESULT(AMF_STD_CALL *CreateSurfaceFromDX9Native)(AMFContext* pThis, void* pDX9Surface, AMFSurface** ppSurface, AMFSurfaceObserver* pObserver);
+    AMF_RESULT(AMF_STD_CALL *CreateSurfaceFromDX11Native)(AMFContext* pThis, void* pDX11Surface, AMFSurface** ppSurface, AMFSurfaceObserver* pObserver);
+    AMF_RESULT(AMF_STD_CALL *CreateSurfaceFromOpenGLNative)(AMFContext* pThis, AMF_SURFACE_FORMAT format, amf_handle hGLTextureID, AMFSurface** ppSurface, AMFSurfaceObserver* pObserver);
+    AMF_RESULT(AMF_STD_CALL *CreateSurfaceFromGrallocNative)(AMFContext* pThis, amf_handle hGrallocSurface, AMFSurface** ppSurface, AMFSurfaceObserver* pObserver);
+    AMF_RESULT(AMF_STD_CALL *CreateSurfaceFromOpenCLNative)(AMFContext* pThis, AMF_SURFACE_FORMAT format, amf_int32 width, amf_int32 height, void** pClPlanes,
+        AMFSurface** ppSurface, AMFSurfaceObserver* pObserver);
+    AMF_RESULT(AMF_STD_CALL *CreateBufferFromOpenCLNative)(AMFContext* pThis, void* pCLBuffer, amf_size size, AMFBuffer** ppBuffer);
+
+    // Access to AMFCompute interface - AMF_MEMORY_OPENCL, AMF_MEMORY_COMPUTE_FOR_DX9, AMF_MEMORY_COMPUTE_FOR_DX11 are currently supported
+    AMF_RESULT(AMF_STD_CALL *GetCompute)(AMFContext* pThis, AMF_MEMORY_TYPE eMemType, AMFCompute** ppCompute);
+
+} AMFContextVtbl;
+
+struct AMFContext
+{
+    const AMFContextVtbl *pVtbl;
+};
+
+//-----------------------------------------------------------------------------
+// Debug.h 
+//-----------------------------------------------------------------------------
+
+typedef struct AMFDebug AMFDebug;
+typedef struct AMFDebugVtbl
+{
+    // AMFDebug interface
+    void               (AMF_STD_CALL *EnablePerformanceMonitor)(AMFDebug* pThis, amf_bool enable);
+    amf_bool(AMF_STD_CALL *PerformanceMonitorEnabled)(AMFDebug* pThis);
+    void               (AMF_STD_CALL *AssertsEnable)(AMFDebug* pThis, amf_bool enable);
+    amf_bool(AMF_STD_CALL *AssertsEnabled)(AMFDebug* pThis);
+} AMFDebugVtbl;
+
+struct AMFDebug
+{
+    const AMFDebugVtbl *pVtbl;
+};
+
+//-----------------------------------------------------------------------------
+// Trace.h 
+//-----------------------------------------------------------------------------
+//----------------------------------------------------------------------------------------------
+// trace levels
+//----------------------------------------------------------------------------------------------
+#define AMF_TRACE_ERROR     0
+#define AMF_TRACE_WARNING   1
+#define AMF_TRACE_INFO      2 // default in sdk
+#define AMF_TRACE_DEBUG     3
+#define AMF_TRACE_TRACE     4
+
+#define AMF_TRACE_TEST      5
+#define AMF_TRACE_NOLOG     100
+
+//----------------------------------------------------------------------------------------------
+// available trace writers
+//----------------------------------------------------------------------------------------------
+#define AMF_TRACE_WRITER_CONSOLE            L"Console"
+#define AMF_TRACE_WRITER_DEBUG_OUTPUT       L"DebugOutput"
+#define AMF_TRACE_WRITER_FILE               L"File"
+
+
+typedef struct AMFTraceWriter AMFTraceWriter;
+
+typedef struct AMFTraceWriterVtbl
+{
+    // AMFTraceWriter interface
+    void (AMF_CDECL_CALL *Write)(AMFTraceWriter* pThis, const wchar_t* scope, const wchar_t* message);
+    void (AMF_CDECL_CALL *Flush)(AMFTraceWriter* pThis);
+} AMFTraceWriterVtbl;
+
+struct AMFTraceWriter
+{
+    const AMFTraceWriterVtbl *pVtbl;
+};
+typedef struct AMFTrace AMFTrace;
+
+typedef struct AMFTraceVtbl
+{
+    // AMFTrace interface
+    void               (AMF_STD_CALL *TraceW)(AMFTrace* pThis, const wchar_t* src_path, amf_int32 line, amf_int32 level, const wchar_t* scope, amf_int32 countArgs, const wchar_t* format, ...);
+    void               (AMF_STD_CALL *Trace)(AMFTrace* pThis, const wchar_t* src_path, amf_int32 line, amf_int32 level, const wchar_t* scope, const wchar_t* message, va_list* pArglist);
+
+    amf_int32(AMF_STD_CALL *SetGlobalLevel)(AMFTrace* pThis, amf_int32 level);
+    amf_int32(AMF_STD_CALL *GetGlobalLevel)(AMFTrace* pThis);
+
+    amf_bool(AMF_STD_CALL *EnableWriter)(AMFTrace* pThis, const wchar_t* writerID, amf_bool enable);
+    amf_bool(AMF_STD_CALL *WriterEnabled)(AMFTrace* pThis, const wchar_t* writerID);
+    AMF_RESULT(AMF_STD_CALL *TraceEnableAsync)(AMFTrace* pThis, amf_bool enable);
+    AMF_RESULT(AMF_STD_CALL *TraceFlush)(AMFTrace* pThis);
+    AMF_RESULT(AMF_STD_CALL *SetPath)(AMFTrace* pThis, const wchar_t* path);
+    AMF_RESULT(AMF_STD_CALL *GetPath)(AMFTrace* pThis, wchar_t* path, amf_size* pSize);
+    amf_int32(AMF_STD_CALL *SetWriterLevel)(AMFTrace* pThis, const wchar_t* writerID, amf_int32 level);
+    amf_int32(AMF_STD_CALL *GetWriterLevel)(AMFTrace* pThis, const wchar_t* writerID);
+    amf_int32(AMF_STD_CALL *SetWriterLevelForScope)(AMFTrace* pThis, const wchar_t* writerID, const wchar_t* scope, amf_int32 level);
+    amf_int32(AMF_STD_CALL *GetWriterLevelForScope)(AMFTrace* pThis, const wchar_t* writerID, const wchar_t* scope);
+
+    amf_int32(AMF_STD_CALL *GetIndentation)(AMFTrace* pThis);
+    void                (AMF_STD_CALL *Indent)(AMFTrace* pThis, amf_int32 addIndent);
+
+    void                (AMF_STD_CALL *RegisterWriter)(AMFTrace* pThis, const wchar_t* writerID, AMFTraceWriter* pWriter, amf_bool enable);
+    void                (AMF_STD_CALL *UnregisterWriter)(AMFTrace* pThis, const wchar_t* writerID);
+
+    const wchar_t*      (AMF_STD_CALL *GetResultText)(AMFTrace* pThis, AMF_RESULT res);
+    const wchar_t*      (AMF_STD_CALL *SurfaceGetFormatName)(AMFTrace* pThis, const AMF_SURFACE_FORMAT eSurfaceFormat);
+    AMF_SURFACE_FORMAT(AMF_STD_CALL *SurfaceGetFormatByName)(AMFTrace* pThis, const wchar_t* name);
+
+    const wchar_t* const (AMF_STD_CALL *GetMemoryTypeName)(AMFTrace* pThis, const AMF_MEMORY_TYPE memoryType);
+    AMF_MEMORY_TYPE(AMF_STD_CALL *GetMemoryTypeByName)(AMFTrace* pThis, const wchar_t* name);
+
+    const wchar_t* const (AMF_STD_CALL *GetSampleFormatName)(AMFTrace* pThis, const AMF_AUDIO_FORMAT eFormat);
+    AMF_AUDIO_FORMAT(AMF_STD_CALL *GetSampleFormatByName)(AMFTrace* pThis, const wchar_t* name);
+} AMFTraceVtbl;
+
+struct AMFTrace
+{
+    const AMFTraceVtbl *pVtbl;
+};
+//-----------------------------------------------------------------------------
+// Factory.h
+//-----------------------------------------------------------------------------
+typedef struct AMFPrograms AMFPrograms;
+
+typedef struct AMFFactory AMFFactory;
+
+typedef struct AMFFactoryVtbl
+{
+    AMF_RESULT(AMF_STD_CALL *CreateContext)(AMFFactory* pThis, AMFContext** ppContext);
+    AMF_RESULT(AMF_STD_CALL *CreateComponent)(AMFFactory* pThis, AMFContext* pContext, const wchar_t* id, AMFComponent** ppComponent);
+    AMF_RESULT(AMF_STD_CALL *SetCacheFolder)(AMFFactory* pThis, const wchar_t* path);
+    const wchar_t*      (AMF_STD_CALL *GetCacheFolder)(AMFFactory* pThis);
+    AMF_RESULT(AMF_STD_CALL *GetDebug)(AMFFactory* pThis, AMFDebug** ppDebug);
+    AMF_RESULT(AMF_STD_CALL *GetTrace)(AMFFactory* pThis, AMFTrace** ppTrace);
+    AMF_RESULT(AMF_STD_CALL *GetPrograms)(AMFFactory* pThis, AMFPrograms** ppPrograms);
+} AMFFactoryVtbl;
+
+struct AMFFactory
+{
+    const AMFFactoryVtbl *pVtbl;
+};
+
+#define AMF_INIT_FUNCTION_NAME             "AMFInit"
+#define AMF_QUERY_VERSION_FUNCTION_NAME    "AMFQueryVersion"
+
+typedef AMF_RESULT(AMF_CDECL_CALL *AMFInit_Fn)(amf_uint64 version, AMFFactory **ppFactory);
+typedef AMF_RESULT(AMF_CDECL_CALL *AMFQueryVersion_Fn)(amf_uint64 *pVersion);
+
+#if defined(_M_AMD64)
+#define AMF_DLL_NAME    L"amfrt64.dll"
+#define AMF_DLL_NAMEA   "amfrt64.dll"
+#else
+#define AMF_DLL_NAME    L"amfrt32.dll"
+#define AMF_DLL_NAMEA   "amfrt32.dll"
+#endif
+
+
+//-----------------------------------------------------------------------------
+// VideoEncoderVCE.h
+//-----------------------------------------------------------------------------
+#define AMFVideoEncoderVCE_AVC L"AMFVideoEncoderVCE_AVC"
+#define AMFVideoEncoderVCE_SVC L"AMFVideoEncoderVCE_SVC"
+
+enum AMF_VIDEO_ENCODER_USAGE_ENUM
+{
+    AMF_VIDEO_ENCODER_USAGE_TRANSCONDING = 0,
+    AMF_VIDEO_ENCODER_USAGE_ULTRA_LOW_LATENCY,
+    AMF_VIDEO_ENCODER_USAGE_LOW_LATENCY,
+    AMF_VIDEO_ENCODER_USAGE_WEBCAM
+};
+
+enum AMF_VIDEO_ENCODER_PROFILE_ENUM
+{
+    AMF_VIDEO_ENCODER_PROFILE_BASELINE = 66,
+    AMF_VIDEO_ENCODER_PROFILE_MAIN = 77,
+    AMF_VIDEO_ENCODER_PROFILE_HIGH = 100
+};
+
+enum AMF_VIDEO_ENCODER_SCANTYPE_ENUM
+{
+    AMF_VIDEO_ENCODER_SCANTYPE_PROGRESSIVE = 0,
+    AMF_VIDEO_ENCODER_SCANTYPE_INTERLACED
+};
+
+enum AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_ENUM
+{
+    AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CONSTANT_QP = 0,
+    AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CBR,
+    AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_PEAK_CONSTRAINED_VBR,
+    AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_LATENCY_CONSTRAINED_VBR
+};
+
+enum AMF_VIDEO_ENCODER_QUALITY_PRESET_ENUM
+{
+    AMF_VIDEO_ENCODER_QUALITY_PRESET_BALANCED = 0,
+    AMF_VIDEO_ENCODER_QUALITY_PRESET_SPEED,
+    AMF_VIDEO_ENCODER_QUALITY_PRESET_QUALITY
+};
+
+enum AMF_VIDEO_ENCODER_PICTURE_STRUCTURE_ENUM
+{
+    AMF_VIDEO_ENCODER_PICTURE_STRUCTURE_NONE = 0,
+    AMF_VIDEO_ENCODER_PICTURE_STRUCTURE_FRAME,
+    AMF_VIDEO_ENCODER_PICTURE_STRUCTURE_TOP_FIELD,
+    AMF_VIDEO_ENCODER_PICTURE_STRUCTURE_BOTTOM_FIELD
+};
+
+enum AMF_VIDEO_ENCODER_PICTURE_TYPE_ENUM
+{
+    AMF_VIDEO_ENCODER_PICTURE_TYPE_NONE = 0,
+    AMF_VIDEO_ENCODER_PICTURE_TYPE_SKIP,
+    AMF_VIDEO_ENCODER_PICTURE_TYPE_IDR,
+    AMF_VIDEO_ENCODER_PICTURE_TYPE_I,
+    AMF_VIDEO_ENCODER_PICTURE_TYPE_P,
+    AMF_VIDEO_ENCODER_PICTURE_TYPE_B
+};
+
+enum AMF_VIDEO_ENCODER_OUTPUT_DATA_TYPE_ENUM
+{
+    AMF_VIDEO_ENCODER_OUTPUT_DATA_TYPE_IDR,
+    AMF_VIDEO_ENCODER_OUTPUT_DATA_TYPE_I,
+    AMF_VIDEO_ENCODER_OUTPUT_DATA_TYPE_P,
+    AMF_VIDEO_ENCODER_OUTPUT_DATA_TYPE_B
+};
+
+enum AMF_VIDEO_ENCODER_PREENCODE_MODE_ENUM
+{
+    AMF_VIDEO_ENCODER_PREENCODE_DISABLED = 0,
+    AMF_VIDEO_ENCODER_PREENCODE_ENABLED = 1,
+};
+
+enum AMF_VIDEO_ENCODER_CODING_ENUM
+{
+    AMF_VIDEO_ENCODER_UNDEFINED = 0, // BASELINE = CALV; MAIN, HIGH = CABAC
+    AMF_VIDEO_ENCODER_CABAC,
+    AMF_VIDEO_ENCODER_CALV,
+
+};
+
+
+// Static properties - can be set before Init()
+
+#define AMF_VIDEO_ENCODER_FRAMESIZE                             L"FrameSize"                // AMFSize; default = 0,0; Frame size
+#define AMF_VIDEO_ENCODER_FRAMERATE                             L"FrameRate"                // AMFRate; default = depends on usage; Frame Rate 
+
+#define AMF_VIDEO_ENCODER_EXTRADATA                             L"ExtraData"                // AMFInterface* - > AMFBuffer*; SPS/PPS buffer in Annex B format - read-only
+#define AMF_VIDEO_ENCODER_USAGE                                 L"Usage"                    // amf_int64(AMF_VIDEO_ENCODER_USAGE_ENUM); default = N/A; Encoder usage type. fully configures parameter set. 
+#define AMF_VIDEO_ENCODER_PROFILE                               L"Profile"                  // amf_int64(AMF_VIDEO_ENCODER_PROFILE_ENUM) ; default = AMF_VIDEO_ENCODER_PROFILE_MAIN;  H264 profile
+#define AMF_VIDEO_ENCODER_PROFILE_LEVEL                         L"ProfileLevel"             // amf_int64; default = 42; H264 profile level
+#define AMF_VIDEO_ENCODER_MAX_LTR_FRAMES                        L"MaxOfLTRFrames"           // amf_int64; default = 0; Max number of LTR frames
+#define AMF_VIDEO_ENCODER_SCANTYPE                              L"ScanType"                 // amf_int64(AMF_VIDEO_ENCODER_SCANTYPE_ENUM); default = AMF_VIDEO_ENCODER_SCANTYPE_PROGRESSIVE; indicates input stream type
+#define AMF_VIDEO_ENCODER_MAX_NUM_REFRAMES                      L"MaxNumRefFrames"          // amf_int64; Maximum number of reference frames
+#define AMF_VIDEO_ENCODER_ASPECT_RATIO                          L"AspectRatio"              // AMFRatio; default = 1, 1
+#define AMF_VIDEO_ENCODER_FULL_RANGE_COLOR                      L"FullRangeColor"           // bool; default = false; inidicates that YUV input is (0,255) 
+#define AMF_VIDEO_ENCODER_RATE_CONTROL_PREANALYSIS_ENABLE       L"RateControlPreanalysisEnable"     // amf_int64(AMF_VIDEO_ENCODER_PREENCODE_MODE_ENUM); default =  AMF_VIDEO_ENCODER_PREENCODE_DISABLED; controls Pre-analysis assisted rate control 
+
+// Quality preset property
+#define AMF_VIDEO_ENCODER_QUALITY_PRESET                        L"QualityPreset"            // amf_int64(AMF_VIDEO_ENCODER_QUALITY_PRESET_ENUM); default = depends on USAGE; Quality Preset 
+
+
+// Dynamic properties - can be set at any time
+
+// Rate control properties
+#define AMF_VIDEO_ENCODER_B_PIC_DELTA_QP                        L"BPicturesDeltaQP"         // amf_int64; default = depends on USAGE; B-picture Delta
+#define AMF_VIDEO_ENCODER_REF_B_PIC_DELTA_QP                    L"ReferenceBPicturesDeltaQP"// amf_int64; default = depends on USAGE; Reference B-picture Delta
+
+#define AMF_VIDEO_ENCODER_ENFORCE_HRD                           L"EnforceHRD"               // bool; default = depends on USAGE; Enforce HRD
+#define AMF_VIDEO_ENCODER_FILLER_DATA_ENABLE                    L"FillerDataEnable"         // bool; default = false; Filler Data Enable
+#define AMF_VIDEO_ENCODER_ENABLE_VBAQ                           L"EnableVBAQ"               // bool; default = depends on USAGE; Enable VBAQ
+
+
+#define AMF_VIDEO_ENCODER_VBV_BUFFER_SIZE                       L"VBVBufferSize"            // amf_int64; default = depends on USAGE; VBV Buffer Size in bits
+#define AMF_VIDEO_ENCODER_INITIAL_VBV_BUFFER_FULLNESS           L"InitialVBVBufferFullness" // amf_int64; default =  64; Initial VBV Buffer Fullness 0=0% 64=100%
+
+#define AMF_VIDEO_ENCODER_MAX_AU_SIZE                           L"MaxAUSize"                // amf_int64; default = 60; Max AU Size in bits
+
+#define AMF_VIDEO_ENCODER_MIN_QP                                L"MinQP"                    // amf_int64; default = depends on USAGE; Min QP; range = 0-51
+#define AMF_VIDEO_ENCODER_MAX_QP                                L"MaxQP"                    // amf_int64; default = depends on USAGE; Max QP; range = 0-51
+#define AMF_VIDEO_ENCODER_QP_I                                  L"QPI"                      // amf_int64; default = 22; I-frame QP; range = 0-51
+#define AMF_VIDEO_ENCODER_QP_P                                  L"QPP"                      // amf_int64; default = 22; P-frame QP; range = 0-51
+#define AMF_VIDEO_ENCODER_QP_B                                  L"QPB"                      // amf_int64; default = 22; B-frame QP; range = 0-51
+#define AMF_VIDEO_ENCODER_TARGET_BITRATE                        L"TargetBitrate"            // amf_int64; default = depends on USAGE; Target bit rate in bits
+#define AMF_VIDEO_ENCODER_PEAK_BITRATE                          L"PeakBitrate"              // amf_int64; default = depends on USAGE; Peak bit rate in bits
+#define AMF_VIDEO_ENCODER_RATE_CONTROL_SKIP_FRAME_ENABLE        L"RateControlSkipFrameEnable"   // bool; default =  depends on USAGE; Rate Control Based Frame Skip 
+#define AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD                   L"RateControlMethod"        // amf_int64(AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_ENUM); default = depends on USAGE; Rate Control Method 
+
+// Picture control properties
+#define AMF_VIDEO_ENCODER_HEADER_INSERTION_SPACING              L"HeaderInsertionSpacing"   // amf_int64; default = depends on USAGE; Header Insertion Spacing; range 0-1000
+#define AMF_VIDEO_ENCODER_B_PIC_PATTERN                         L"BPicturesPattern"         // amf_int64; default = 3; B-picture Pattern (number of B-Frames)
+#define AMF_VIDEO_ENCODER_DE_BLOCKING_FILTER                    L"DeBlockingFilter"         // bool; default = depends on USAGE; De-blocking Filter
+#define AMF_VIDEO_ENCODER_B_REFERENCE_ENABLE                    L"BReferenceEnable"         // bool; default = true; Enable Refrence to B-frames
+#define AMF_VIDEO_ENCODER_IDR_PERIOD                            L"IDRPeriod"                // amf_int64; default = depends on USAGE; IDR Period in frames
+#define AMF_VIDEO_ENCODER_INTRA_REFRESH_NUM_MBS_PER_SLOT        L"IntraRefreshMBsNumberPerSlot" // amf_int64; default = depends on USAGE; Intra Refresh MBs Number Per Slot in Macroblocks
+#define AMF_VIDEO_ENCODER_SLICES_PER_FRAME                      L"SlicesPerFrame"           // amf_int64; default = 1; Number of slices Per Frame 
+#define AMF_VIDEO_ENCODER_CABAC_ENABLE                          L"CABACEnable"              // amf_int64(AMF_VIDEO_ENCODER_CODING_ENUM) default = AMF_VIDEO_ENCODER_UNDEFINED
+
+// Motion estimation
+#define AMF_VIDEO_ENCODER_MOTION_HALF_PIXEL                     L"HalfPixel"                // bool; default= true; Half Pixel 
+#define AMF_VIDEO_ENCODER_MOTION_QUARTERPIXEL                   L"QuarterPixel"             // bool; default= true; Quarter Pixel
+
+// SVC
+#define AMF_VIDEO_ENCODER_NUM_TEMPORAL_ENHANCMENT_LAYERS        L"NumOfTemporalEnhancmentLayers" // amf_int64; default = 0; range = 0, min(2, caps->GetMaxNumOfTemporalLayers()) number of temporal enhancment Layers (SVC)
+
+// Per-submittion properties - can be set on input surface interface
+#define AMF_VIDEO_ENCODER_END_OF_SEQUENCE                       L"EndOfSequence"            // bool; default = false; generate end of sequence
+#define AMF_VIDEO_ENCODER_END_OF_STREAM                         L"EndOfStream"              // bool; default = false; generate end of stream
+#define AMF_VIDEO_ENCODER_FORCE_PICTURE_TYPE                    L"ForcePictureType"         // amf_int64(AMF_VIDEO_ENCODER_PICTURE_TYPE_ENUM); default = AMF_VIDEO_ENCODER_PICTURE_TYPE_NONE; generate particular picture type
+#define AMF_VIDEO_ENCODER_INSERT_AUD                            L"InsertAUD"                // bool; default = false; insert AUD
+#define AMF_VIDEO_ENCODER_INSERT_SPS                            L"InsertSPS"                // bool; default = false; insert SPS
+#define AMF_VIDEO_ENCODER_INSERT_PPS                            L"InsertPPS"                // bool; default = false; insert PPS
+#define AMF_VIDEO_ENCODER_PICTURE_STRUCTURE                     L"PictureStructure"         // amf_int64(AMF_VIDEO_ENCODER_PICTURE_STRUCTURE_ENUM); default = AMF_VIDEO_ENCODER_PICTURE_STRUCTURE_FRAME; indicate picture type
+#define AMF_VIDEO_ENCODER_MARK_CURRENT_WITH_LTR_INDEX           L"MarkCurrentWithLTRIndex"  // //amf_int64; default = N/A; Mark current frame with LTR index
+#define AMF_VIDEO_ENCODER_FORCE_LTR_REFERENCE_BITFIELD          L"ForceLTRReferenceBitfield"// amf_int64; default = 0; force LTR bit-field 
+
+// properties set by encoder on output buffer interface
+#define AMF_VIDEO_ENCODER_OUTPUT_DATA_TYPE                      L"OutputDataType"           // amf_int64(AMF_VIDEO_ENCODER_OUTPUT_DATA_TYPE_ENUM); default = N/A
+#define AMF_VIDEO_ENCODER_OUTPUT_MARKED_LTR_INDEX               L"MarkedLTRIndex"           //amf_int64; default = -1; Marked LTR index
+#define AMF_VIDEO_ENCODER_OUTPUT_REFERENCED_LTR_INDEX_BITFIELD  L"ReferencedLTRIndexBitfield" // amf_int64; default = 0; referenced LTR bit-field 
+
+
+#define AMF_VIDEO_ENCODER_HDCP_COUNTER                          L"HDCPCounter"              //  const void*
+
+// Properties for multi-instance cloud gaming
+#define AMF_VIDEO_ENCODER_MAX_INSTANCES                         L"EncoderMaxInstances"      //  amf_uint32; default = 1; max number of encoder instances
+#define AMF_VIDEO_ENCODER_MULTI_INSTANCE_MODE                   L"MultiInstanceMode"        //  bool; default = false;
+#define AMF_VIDEO_ENCODER_CURRENT_QUEUE                         L"MultiInstanceCurrentQueue"//  amf_uin32; default = 0; 
+
+// VCE Encoder capabilities - exposed in AMFCaps interface
+#define AMF_VIDEO_ENCODER_CAP_MAX_BITRATE                       L"MaxBitrate"               // amf_int64; Maximum bit rate in bits
+#define AMF_VIDEO_ENCODER_CAP_NUM_OF_STREAMS                    L"NumOfStreams"             // amf_int64; maximum number of encode streams supported 
+#define AMF_VIDEO_ENCODER_CAP_MAX_PROFILE                       L"MaxProfile"               // AMF_VIDEO_ENCODER_PROFILE_ENUM
+#define AMF_VIDEO_ENCODER_CAP_MAX_LEVEL                         L"MaxLevel"                 // amf_int64 maximum profile level
+#define AMF_VIDEO_ENCODER_CAP_BFRAMES                           L"BFrames"                  // bool  is B-Frames supported
+#define AMF_VIDEO_ENCODER_CAP_MIN_REFERENCE_FRAMES              L"MinReferenceFrames"       // amf_int64 minimum number of reference frames
+#define AMF_VIDEO_ENCODER_CAP_MAX_REFERENCE_FRAMES              L"MaxReferenceFrames"       // amf_int64 maximum number of reference frames
+#define AMF_VIDEO_ENCODER_CAP_MAX_TEMPORAL_LAYERS               L"MaxTemporalLayers"        // amf_int64 maximum number of temporal layers
+#define AMF_VIDEO_ENCODER_CAP_FIXED_SLICE_MODE                  L"FixedSliceMode"           // bool  is fixed slice mode supported
+#define AMF_VIDEO_ENCODER_CAP_NUM_OF_HW_INSTANCES               L"NumOfHwInstances"         // amf_int64 number of HW encoder instances
+
+//-----------------------------------------------------------------------------
+// VideoEncoderHEVC.h
+//-----------------------------------------------------------------------------
+#define AMFVideoEncoder_HEVC L"AMFVideoEncoderHW_HEVC"
+
+enum AMF_VIDEO_ENCODER_HEVC_USAGE_ENUM
+{
+    AMF_VIDEO_ENCODER_HEVC_USAGE_TRANSCONDING = 0,
+    AMF_VIDEO_ENCODER_HEVC_USAGE_ULTRA_LOW_LATENCY,
+    AMF_VIDEO_ENCODER_HEVC_USAGE_LOW_LATENCY,
+    AMF_VIDEO_ENCODER_HEVC_USAGE_WEBCAM
+};
+
+enum AMF_VIDEO_ENCODER_HEVC_PROFILE_ENUM
+{
+    AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN = 1
+};
+
+enum AMF_VIDEO_ENCODER_HEVC_TIER_ENUM
+{
+    AMF_VIDEO_ENCODER_HEVC_TIER_MAIN = 0,
+    AMF_VIDEO_ENCODER_HEVC_TIER_HIGH = 1
+};
+
+enum AMF_VIDEO_ENCODER_LEVEL_ENUM
+{
+    AMF_LEVEL_1 = 30,
+    AMF_LEVEL_2 = 60,
+    AMF_LEVEL_2_1 = 63,
+    AMF_LEVEL_3 = 90,
+    AMF_LEVEL_3_1 = 93,
+    AMF_LEVEL_4 = 120,
+    AMF_LEVEL_4_1 = 123,
+    AMF_LEVEL_5 = 150,
+    AMF_LEVEL_5_1 = 153,
+    AMF_LEVEL_5_2 = 156,
+    AMF_LEVEL_6 = 180,
+    AMF_LEVEL_6_1 = 183,
+    AMF_LEVEL_6_2 = 186
+};
+
+enum AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_ENUM
+{
+    AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_CONSTANT_QP = 0,
+    AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_LATENCY_CONSTRAINED_VBR,
+    AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_PEAK_CONSTRAINED_VBR,
+    AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_CBR
+};
+
+enum AMF_VIDEO_ENCODER_HEVC_PICTURE_TYPE_ENUM
+{
+    AMF_VIDEO_ENCODER_HEVC_PICTURE_TYPE_NONE = 0,
+    AMF_VIDEO_ENCODER_HEVC_PICTURE_TYPE_SKIP,
+    AMF_VIDEO_ENCODER_HEVC_PICTURE_TYPE_IDR,
+    AMF_VIDEO_ENCODER_HEVC_PICTURE_TYPE_I,
+    AMF_VIDEO_ENCODER_HEVC_PICTURE_TYPE_P
+};
+
+enum AMF_VIDEO_ENCODER_HEVC_OUTPUT_DATA_TYPE_ENUM
+{
+    AMF_VIDEO_ENCODER_HEVC_OUTPUT_DATA_TYPE_IDR,
+    AMF_VIDEO_ENCODER_HEVC_OUTPUT_DATA_TYPE_I,
+    AMF_VIDEO_ENCODER_HEVC_OUTPUT_DATA_TYPE_P
+};
+
+enum AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET_ENUM
+{
+    AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET_QUALITY = 0,
+    AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET_BALANCED = 5,
+    AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET_SPEED = 10
+};
+
+enum AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE_ENUM
+{
+    AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE_NONE = 0,
+    AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE_GOP_ALIGNED,
+    AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE_IDR_ALIGNED
+};
+
+
+
+// Static properties - can be set before Init()
+#define AMF_VIDEO_ENCODER_HEVC_FRAMESIZE                            L"HevcFrameSize"                // AMFSize; default = 0,0; Frame size
+
+#define AMF_VIDEO_ENCODER_HEVC_USAGE                                L"HevcUsage"                    // amf_int64(AMF_VIDEO_ENCODER_HEVC_USAGE_ENUM); default = N/A; Encoder usage type. fully configures parameter set. 
+#define AMF_VIDEO_ENCODER_HEVC_PROFILE                              L"HevcProfile"                  // amf_int64(AMF_VIDEO_ENCODER_HEVC_PROFILE_ENUM) ; default = AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN;
+#define AMF_VIDEO_ENCODER_HEVC_TIER                                 L"HevcTier"                     // amf_int64(AMF_VIDEO_ENCODER_HEVC_TIER_ENUM) ; default = AMF_VIDEO_ENCODER_HEVC_TIER_MAIN;
+#define AMF_VIDEO_ENCODER_HEVC_PROFILE_LEVEL                        L"HevcProfileLevel"             // amf_int64 (AMF_VIDEO_ENCODER_LEVEL_ENUM, default depends on HW capabilities); 
+#define AMF_VIDEO_ENCODER_HEVC_MAX_LTR_FRAMES                       L"HevcMaxOfLTRFrames"           // amf_int64; default = 0; Max number of LTR frames
+#define AMF_VIDEO_ENCODER_HEVC_MAX_NUM_REFRAMES                     L"HevcMaxNumRefFrames"          // amf_int64; default = 1; Maximum number of reference frames
+#define AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET                       L"HevcQualityPreset"            // amf_int64(AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET_ENUM); default = depends on USAGE; Quality Preset 
+#define AMF_VIDEO_ENCODER_HEVC_EXTRADATA                            L"HevcExtraData"                // AMFInterface* - > AMFBuffer*; SPS/PPS buffer - read-only
+#define AMF_VIDEO_ENCODER_HEVC_ASPECT_RATIO                         L"HevcAspectRatio"              // AMFRatio; default = 1, 1
+
+// Picture control properties
+#define AMF_VIDEO_ENCODER_HEVC_NUM_GOPS_PER_IDR                     L"HevcGOPSPerIDR"               // amf_int64; default = 60; The frequency to insert IDR as start of a GOP. 0 means no IDR will be inserted.
+#define AMF_VIDEO_ENCODER_HEVC_GOP_SIZE                             L"HevcGOPSize"                  // amf_int64; default = 60; GOP Size, in frames
+#define AMF_VIDEO_ENCODER_HEVC_DE_BLOCKING_FILTER_DISABLE           L"HevcDeBlockingFilter"         // bool; default = depends on USAGE; De-blocking Filter
+#define AMF_VIDEO_ENCODER_HEVC_SLICES_PER_FRAME                     L"HevcSlicesPerFrame"           // amf_int64; default = 1; Number of slices Per Frame 
+#define AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE                L"HevcHeaderInsertionMode"      // amf_int64(AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE_ENUM); default = NONE
+
+// Rate control properties
+#define AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD                  L"HevcRateControlMethod"        // amf_int64(AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_MODE_ENUM); default = depends on USAGE; Rate Control Method 
+#define AMF_VIDEO_ENCODER_HEVC_FRAMERATE                            L"HevcFrameRate"                // AMFRate; default = depends on usage; Frame Rate 
+#define AMF_VIDEO_ENCODER_HEVC_VBV_BUFFER_SIZE                      L"HevcVBVBufferSize"            // amf_int64; default = depends on USAGE; VBV Buffer Size in bits
+#define AMF_VIDEO_ENCODER_HEVC_INITIAL_VBV_BUFFER_FULLNESS          L"HevcInitialVBVBufferFullness" // amf_int64; default =  64; Initial VBV Buffer Fullness 0=0% 64=100%
+#define AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_PREANALYSIS_ENABLE      L"HevcRateControlPreAnalysisEnable"  // bool; default =  depends on USAGE; enable Pre-analysis assisted rate control 
+#define AMF_VIDEO_ENCODER_HEVC_ENABLE_VBAQ                          L"HevcEnableVBAQ"               // // bool; default = depends on USAGE; Enable auto VBAQ
+
+// Motion estimation
+#define AMF_VIDEO_ENCODER_HEVC_MOTION_HALF_PIXEL                    L"HevcHalfPixel"                // bool; default= true; Half Pixel 
+#define AMF_VIDEO_ENCODER_HEVC_MOTION_QUARTERPIXEL                  L"HevcQuarterPixel"             // bool; default= true; Quarter Pixel
+
+// Dynamic properties - can be set at any time
+
+// Rate control properties
+#define AMF_VIDEO_ENCODER_HEVC_ENFORCE_HRD                          L"HevcEnforceHRD"               // bool; default = depends on USAGE; Enforce HRD
+#define AMF_VIDEO_ENCODER_HEVC_FILLER_DATA_ENABLE                   L"HevcFillerDataEnable"         // bool; default = depends on USAGE; Enforce HRD
+#define AMF_VIDEO_ENCODER_HEVC_TARGET_BITRATE                       L"HevcTargetBitrate"            // amf_int64; default = depends on USAGE; Target bit rate in bits
+#define AMF_VIDEO_ENCODER_HEVC_PEAK_BITRATE                         L"HevcPeakBitrate"              // amf_int64; default = depends on USAGE; Peak bit rate in bits
+
+#define AMF_VIDEO_ENCODER_HEVC_MAX_AU_SIZE                          L"HevcMaxAUSize"                // amf_int64; default = 60; Max AU Size in bits
+
+#define AMF_VIDEO_ENCODER_HEVC_MIN_QP_I                             L"HevcMinQP_I"                  // amf_int64; default = depends on USAGE; Min QP; range = 
+#define AMF_VIDEO_ENCODER_HEVC_MAX_QP_I                             L"HevcMaxQP_I"                  // amf_int64; default = depends on USAGE; Max QP; range = 
+#define AMF_VIDEO_ENCODER_HEVC_MIN_QP_P                             L"HevcMinQP_P"                  // amf_int64; default = depends on USAGE; Min QP; range = 
+#define AMF_VIDEO_ENCODER_HEVC_MAX_QP_P                             L"HevcMaxQP_P"                  // amf_int64; default = depends on USAGE; Max QP; range = 
+
+#define AMF_VIDEO_ENCODER_HEVC_QP_I                                 L"HevcQP_I"                     // amf_int64; default = 26; P-frame QP; range = 0-51
+#define AMF_VIDEO_ENCODER_HEVC_QP_P                                 L"HevcQP_P"                     // amf_int64; default = 26; P-frame QP; range = 0-51
+
+#define AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_SKIP_FRAME_ENABLE       L"HevcRateControlSkipFrameEnable" // bool; default =  depends on USAGE; Rate Control Based Frame Skip 
+
+
+
+// Per-submittion properties - can be set on input surface interface
+#define AMF_VIDEO_ENCODER_HEVC_END_OF_SEQUENCE                      L"HevcEndOfSequence"            // bool; default = false; generate end of sequence
+#define AMF_VIDEO_ENCODER_HEVC_FORCE_PICTURE_TYPE                   L"HevcForcePictureType"         // amf_int64(AMF_VIDEO_ENCODER_HEVC_PICTURE_TYPE_ENUM); default = AMF_VIDEO_ENCODER_HEVC_PICTURE_TYPE_NONE; generate particular picture type
+#define AMF_VIDEO_ENCODER_HEVC_INSERT_AUD                           L"HevcInsertAUD"                // bool; default = false; insert AUD
+#define AMF_VIDEO_ENCODER_HEVC_INSERT_HEADER                        L"HevcInsertHeader"             // bool; default = false; insert header(SPS, PPS, VPS)
+
+#define AMF_VIDEO_ENCODER_HEVC_MARK_CURRENT_WITH_LTR_INDEX          L"HevcMarkCurrentWithLTRIndex"  // amf_int64; default = N/A; Mark current frame with LTR index
+#define AMF_VIDEO_ENCODER_HEVC_FORCE_LTR_REFERENCE_BITFIELD         L"HevcForceLTRReferenceBitfield"// amf_int64; default = 0; force LTR bit-field 
+
+// Properties set by encoder on output buffer interface
+#define AMF_VIDEO_ENCODER_HEVC_OUTPUT_DATA_TYPE                     L"HevcOutputDataType"           // amf_int64(AMF_VIDEO_ENCODER_HEVC_OUTPUT_DATA_TYPE_ENUM); default = N/A
+#define AMF_VIDEO_ENCODER_HEVC_OUTPUT_MARKED_LTR_INDEX              L"HevcMarkedLTRIndex"           // amf_int64; default = -1; Marked LTR index
+#define AMF_VIDEO_ENCODER_HEVC_OUTPUT_REFERENCED_LTR_INDEX_BITFIELD L"HevcReferencedLTRIndexBitfield"// amf_int64; default = 0; referenced LTR bit-field 
+
+// HEVC Encoder capabilities - exposed in AMFCaps interface
+#define AMF_VIDEO_ENCODER_HEVC_CAP_MAX_BITRATE                      L"HevcMaxBitrate"               // amf_int64; Maximum bit rate in bits
+#define AMF_VIDEO_ENCODER_HEVC_CAP_NUM_OF_STREAMS                   L"HevcNumOfStreams"             // amf_int64; maximum number of encode streams supported 
+#define AMF_VIDEO_ENCODER_HEVC_CAP_MAX_PROFILE                      L"HevcMaxProfile"               // amf_int64(AMF_VIDEO_ENCODER_HEVC_PROFILE_ENUM)
+#define AMF_VIDEO_ENCODER_HEVC_CAP_MAX_TIER                         L"HevcMaxTier"                  // amf_int64(AMF_VIDEO_ENCODER_HEVC_TIER_ENUM) maximum profile tier 
+#define AMF_VIDEO_ENCODER_HEVC_CAP_MAX_LEVEL                        L"HevcMaxLevel"                 // amf_int64 maximum profile level
+#define AMF_VIDEO_ENCODER_HEVC_CAP_MIN_REFERENCE_FRAMES             L"HevcMinReferenceFrames"       // amf_int64 minimum number of reference frames
+#define AMF_VIDEO_ENCODER_HEVC_CAP_MAX_REFERENCE_FRAMES             L"HevcMaxReferenceFrames"       // amf_int64 maximum number of reference frames
+
+
+//-----------------------------------------------------------------------------
+//-----------------------------------------------------------------------------
+
+#endif // __AMF_SDK_Enc_h__
+
+
+
diff --git a/configure b/configure
index 0e1ccaa..229443f 100755
--- a/configure
+++ b/configure
@@ -304,6 +304,7 @@  External library support:
 
   The following libraries provide various hardware acceleration features:
   --disable-audiotoolbox   disable Apple AudioToolbox code [autodetect]
+  --disable-amf            disable AMF video encoding code [autodetect]
   --disable-cuda           disable dynamically linked Nvidia CUDA code [autodetect]
   --enable-cuda-sdk        enable CUDA features that require the CUDA SDK [no]
   --disable-cuvid          disable Nvidia CUVID support [autodetect]
@@ -1643,6 +1644,7 @@  EXTERNAL_LIBRARY_LIST="
 HWACCEL_AUTODETECT_LIBRARY_LIST="
     audiotoolbox
     crystalhd
+	amf
     cuda
     cuvid
     d3d11va
@@ -2785,12 +2787,16 @@  scale_npp_filter_deps="cuda libnpp"
 scale_cuda_filter_deps="cuda_sdk"
 thumbnail_cuda_filter_deps="cuda_sdk"
 
+amf_deps_any="dlopen LoadLibrary"
+amf_encoder_deps="amf"
+
 nvenc_deps="cuda"
 nvenc_deps_any="libdl LoadLibrary"
 nvenc_encoder_deps="nvenc"
 
 h263_v4l2m2m_decoder_deps="v4l2_m2m h263_v4l2_m2m"
 h263_v4l2m2m_encoder_deps="v4l2_m2m h263_v4l2_m2m"
+h264_amf_encoder_deps="amf"
 h264_crystalhd_decoder_select="crystalhd h264_mp4toannexb_bsf h264_parser"
 h264_cuvid_decoder_deps="cuda cuvid"
 h264_cuvid_decoder_select="h264_mp4toannexb_bsf"
@@ -2809,6 +2815,7 @@  h264_vaapi_encoder_deps="VAEncPictureParameterBufferH264"
 h264_vaapi_encoder_select="cbs_h264 vaapi_encode"
 h264_v4l2m2m_decoder_deps="v4l2_m2m h264_v4l2_m2m"
 h264_v4l2m2m_encoder_deps="v4l2_m2m h264_v4l2_m2m"
+hevc_amf_encoder_deps="amf"
 hevc_cuvid_decoder_deps="cuda cuvid"
 hevc_cuvid_decoder_select="hevc_mp4toannexb_bsf"
 hevc_mediacodec_decoder_deps="mediacodec"
@@ -2830,6 +2837,8 @@  mjpeg_vaapi_encoder_select="vaapi_encode jpegtables"
 mpeg1_cuvid_decoder_deps="cuda cuvid"
 mpeg1_v4l2m2m_decoder_deps="v4l2_m2m mpeg1_v4l2_m2m"
 mpeg2_crystalhd_decoder_select="crystalhd"
+amf_h264_encoder_select="h264_amf_encoder"
+amf_hevc_encoder_select="hevc_amf_encoder"
 mpeg2_cuvid_decoder_deps="cuda cuvid"
 mpeg2_mmal_decoder_deps="mmal"
 mpeg2_mediacodec_decoder_deps="mediacodec"
@@ -6305,6 +6314,18 @@  else
     disable cuda cuvid nvenc
 fi
 
+if enabled x86; then
+    case $target_os in
+        mingw32*|mingw64*|win32|win64|cygwin*)
+            ;;
+        *)
+            disable  amf
+            ;;
+    esac
+else
+    disable amf
+fi
+
 enabled nvenc &&
     check_cc -I$source_path <<EOF || disable nvenc
 #include "compat/nvenc/nvEncodeAPI.h"
@@ -6313,6 +6334,13 @@  void f(void) { struct { const GUID guid; } s[] = { { NV_ENC_PRESET_HQ_GUID } };
 int main(void) { return 0; }
 EOF
 
+enabled amf &&
+    check_cc -I$source_path <<EOF || disable amf
+#include "compat/amd/amfsdkenc.h"
+AMFFactory *factory;
+int main(void) { return 0; }
+EOF
+
 # Funny iconv installations are not unusual, so check it after all flags have been set
 if enabled libc_iconv; then
     check_func_headers iconv.h iconv
diff --git a/libavcodec/Makefile b/libavcodec/Makefile
index bc4d7da..cbf45ac 100644
--- a/libavcodec/Makefile
+++ b/libavcodec/Makefile
@@ -50,6 +50,7 @@  OBJS = allcodecs.o                                                      \
 # subsystems
 OBJS-$(CONFIG_AANDCTTABLES)            += aandcttab.o
 OBJS-$(CONFIG_AC3DSP)                  += ac3dsp.o ac3.o ac3tab.o
+OBJS-$(CONFIG_AMF)                     += amfenc.o
 OBJS-$(CONFIG_AUDIO_FRAME_QUEUE)       += audio_frame_queue.o
 OBJS-$(CONFIG_AUDIODSP)                += audiodsp.o
 OBJS-$(CONFIG_BLOCKDSP)                += blockdsp.o
@@ -334,6 +335,7 @@  OBJS-$(CONFIG_H264_DECODER)            += h264dec.o h264_cabac.o h264_cavlc.o \
 OBJS-$(CONFIG_H264_CUVID_DECODER)      += cuvid.o
 OBJS-$(CONFIG_H264_MEDIACODEC_DECODER) += mediacodecdec.o
 OBJS-$(CONFIG_H264_MMAL_DECODER)       += mmaldec.o
+OBJS-$(CONFIG_H264_AMF_ENCODER)        += amfenc_h264.o
 OBJS-$(CONFIG_H264_NVENC_ENCODER)      += nvenc_h264.o
 OBJS-$(CONFIG_NVENC_ENCODER)           += nvenc_h264.o
 OBJS-$(CONFIG_NVENC_H264_ENCODER)      += nvenc_h264.o
@@ -352,6 +354,7 @@  OBJS-$(CONFIG_HEVC_DECODER)            += hevcdec.o hevc_mvs.o \
                                           hevcdsp.o hevc_filter.o hevc_data.o
 OBJS-$(CONFIG_HEVC_CUVID_DECODER)      += cuvid.o
 OBJS-$(CONFIG_HEVC_MEDIACODEC_DECODER) += mediacodecdec.o
+OBJS-$(CONFIG_HEVC_AMF_ENCODER)        += amfenc_hevc.o
 OBJS-$(CONFIG_HEVC_NVENC_ENCODER)      += nvenc_hevc.o
 OBJS-$(CONFIG_NVENC_HEVC_ENCODER)      += nvenc_hevc.o
 OBJS-$(CONFIG_HEVC_QSV_DECODER)        += qsvdec_h2645.o
@@ -1056,6 +1059,7 @@  SKIPHEADERS-$(CONFIG_JNI)              += ffjni.h
 SKIPHEADERS-$(CONFIG_LIBVPX)           += libvpx.h
 SKIPHEADERS-$(CONFIG_LIBWEBP_ENCODER)  += libwebpenc_common.h
 SKIPHEADERS-$(CONFIG_MEDIACODEC)       += mediacodecdec_common.h mediacodec_surface.h mediacodec_wrapper.h mediacodec_sw_buffer.h
+SKIPHEADERS-$(CONFIG_AMF)              += amfenc.h
 SKIPHEADERS-$(CONFIG_NVENC)            += nvenc.h
 SKIPHEADERS-$(CONFIG_QSV)              += qsv.h qsv_internal.h
 SKIPHEADERS-$(CONFIG_QSVDEC)           += qsvdec.h
diff --git a/libavcodec/allcodecs.c b/libavcodec/allcodecs.c
index 8369126..d597540 100644
--- a/libavcodec/allcodecs.c
+++ b/libavcodec/allcodecs.c
@@ -649,6 +649,8 @@  static void register_all(void)
      * above is available */
     REGISTER_ENCODER(H263_V4L2M2M,      h263_v4l2m2m);
     REGISTER_ENCDEC (LIBOPENH264,       libopenh264);
+    REGISTER_ENCODER(H264_AMF,          h264_amf);
+	REGISTER_ENCODER(H264_AMF,          h264_amf_d3d11va);
     REGISTER_DECODER(H264_CUVID,        h264_cuvid);
     REGISTER_ENCODER(H264_NVENC,        h264_nvenc);
     REGISTER_ENCODER(H264_OMX,          h264_omx);
@@ -661,6 +663,8 @@  static void register_all(void)
     REGISTER_ENCODER(NVENC_H264,        nvenc_h264);
     REGISTER_ENCODER(NVENC_HEVC,        nvenc_hevc);
 #endif
+    REGISTER_ENCODER(HEVC_AMF,          hevc_amf);
+	REGISTER_ENCODER(HEVC_AMF,          hevc_amf_d3d11va);
     REGISTER_DECODER(HEVC_CUVID,        hevc_cuvid);
     REGISTER_DECODER(HEVC_MEDIACODEC,   hevc_mediacodec);
     REGISTER_ENCODER(HEVC_NVENC,        hevc_nvenc);
diff --git a/libavcodec/amfenc.c b/libavcodec/amfenc.c
new file mode 100644
index 0000000..8717928
--- /dev/null
+++ b/libavcodec/amfenc.c
@@ -0,0 +1,463 @@ 
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "amfenc.h"
+
+//#include "compat/amd/amf/public/include/components/VideoEncoderVCE.h"
+//#include "compat/amd/amf/public/include/components/VideoEncoderHEVC.h"
+
+#include "libavutil/time.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/hwcontext.h"
+#include "libavutil/hwcontext_d3d11va.h"
+#include "libavutil/avassert.h"
+#include "libavutil/mem.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/hwcontext.h"
+#include "internal.h"
+
+#include <d3d11.h>
+
+#ifdef _WIN32
+#include "compat/w32dlfcn.h"
+#else
+#include <dlfcn.h>
+#endif
+
+#define FFMPEG_AMF_WRITER_ID L"ffmpeg_amf"
+#define AMF_DEBUG_TRACE 0
+
+const enum AVPixelFormat ff_amf_pix_fmts[] = {
+    AV_PIX_FMT_NV12,
+    AV_PIX_FMT_BGRA,
+    AV_PIX_FMT_ARGB,
+    AV_PIX_FMT_RGBA,
+    AV_PIX_FMT_YUV420P,
+    AV_PIX_FMT_YUYV422,
+    AV_PIX_FMT_D3D11,
+    AV_PIX_FMT_NONE
+};
+
+typedef struct FormatMap {
+    enum AVPixelFormat       av_format;
+    enum AMF_SURFACE_FORMAT  amf_format;
+} FormatMap;
+
+static const FormatMap format_map[] =
+{
+    { AV_PIX_FMT_NONE,       AMF_SURFACE_UNKNOWN },
+    { AV_PIX_FMT_NV12,       AMF_SURFACE_NV12 },
+    { AV_PIX_FMT_BGRA,       AMF_SURFACE_BGRA },
+    { AV_PIX_FMT_ARGB,       AMF_SURFACE_ARGB },
+    { AV_PIX_FMT_RGBA,       AMF_SURFACE_RGBA },
+    { AV_PIX_FMT_GRAY8,      AMF_SURFACE_GRAY8 },
+    { AV_PIX_FMT_YUV420P,    AMF_SURFACE_YUV420P },
+    { AV_PIX_FMT_BGR0,       AMF_SURFACE_BGRA },
+    { AV_PIX_FMT_YUV420P,    AMF_SURFACE_YV12 },
+    { AV_PIX_FMT_YUYV422,    AMF_SURFACE_YUY2 },
+    { AV_PIX_FMT_D3D11,      AMF_SURFACE_NV12 },
+};
+
+static enum AMF_SURFACE_FORMAT amf_av_to_amf_format(enum AVPixelFormat fmt) 
+{
+    for (int i = 0; i < amf_countof(format_map); i++) {
+        if (format_map[i].av_format == fmt) {
+            return format_map[i].amf_format;
+        }
+    }
+    return AMF_SURFACE_UNKNOWN;
+}
+
+// virtual functions decalred
+static void AMF_CDECL_CALL AMFTraceWriter_Write(AMFTraceWriter* pThis,
+    const wchar_t* scope, const wchar_t* message) 
+{
+    AmfTraceWriter *tracer = (AmfTraceWriter*)pThis;
+#if AMF_DEBUG_TRACE
+    av_log(tracer->avctx, AV_LOG_INFO, "%ls: %ls", scope, message);
+#else
+    av_log(tracer->avctx, AV_LOG_TRACE, "%ls: %ls", scope, message);
+#endif
+}
+
+static void AMF_CDECL_CALL AMFTraceWriter_Flush(AMFTraceWriter* pThis) 
+{
+}
+
+static AMFTraceWriterVtbl tracer_vtbl =
+{
+    .Write = AMFTraceWriter_Write,
+    .Flush = AMFTraceWriter_Flush,
+};
+
+static int amf_load_library(AVCodecContext *avctx) 
+{
+    AmfContext             *ctx = avctx->priv_data;
+    AMFInit_Fn              init_fun = 0;
+    AMFQueryVersion_Fn      version_fun = 0;
+
+    ctx->library = dlopen(AMF_DLL_NAMEA, RTLD_NOW | RTLD_LOCAL);
+    AMF_RETURN_IF_FALSE(ctx, ctx->library != 0, 
+        AVERROR_UNKNOWN, "DLL %s failed to open. \n", AMF_DLL_NAMEA);
+
+    init_fun = (AMFInit_Fn)dlsym(ctx->library, AMF_INIT_FUNCTION_NAME);
+    AMF_RETURN_IF_FALSE(ctx, init_fun != 0, AVERROR_UNKNOWN, "DLL %s failed to find function %s. \n", AMF_DLL_NAMEA, AMF_INIT_FUNCTION_NAME);
+
+    version_fun = (AMFQueryVersion_Fn)dlsym(ctx->library, AMF_QUERY_VERSION_FUNCTION_NAME);
+    AMF_RETURN_IF_FALSE(ctx, init_fun != 0, AVERROR_UNKNOWN, "DLL %s failed to find function %s. \n", AMF_DLL_NAMEA, AMF_QUERY_VERSION_FUNCTION_NAME);
+
+    version_fun(&ctx->version);
+    init_fun(AMF_FULL_VERSION, &ctx->factory);
+    ctx->factory->pVtbl->GetTrace(ctx->factory, &ctx->trace);
+    ctx->factory->pVtbl->GetDebug(ctx->factory, &ctx->debug);
+    return 0;
+}
+
+
+static int amf_init_context(AVCodecContext *avctx) 
+{
+    AmfContext         *ctx = avctx->priv_data;
+    AMF_RESULT          res = AMF_OK;
+
+    ctx->trace->pVtbl->EnableWriter(ctx->trace, AMF_TRACE_WRITER_CONSOLE, false);
+
+#if AMF_DEBUG_TRACE
+    ctx->trace->pVtbl->EnableWriter(ctx->trace, AMF_TRACE_WRITER_DEBUG_OUTPUT, true);
+    ctx->trace->pVtbl->SetWriterLevel(ctx->trace, AMF_TRACE_WRITER_DEBUG_OUTPUT, AMF_TRACE_TRACE);
+    ctx->trace->pVtbl->SetGlobalLevel(ctx->trace, AMF_TRACE_TRACE);
+#else
+    ctx->trace->pVtbl->EnableWriter(ctx->trace, AMF_TRACE_WRITER_DEBUG_OUTPUT, false);
+#endif
+    ctx->tracer.vtbl = &tracer_vtbl;
+    ctx->tracer.avctx = avctx;
+    ctx->trace->pVtbl->RegisterWriter(ctx->trace, FFMPEG_AMF_WRITER_ID, 
+        (AMFTraceWriter*)&ctx->tracer, true);
+
+    res = ctx->factory->pVtbl->CreateContext(ctx->factory, &ctx->context);
+    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "CreateContext() failed with error %d", res);
+
+    // try to reuse existing DX device
+
+    if (avctx->hw_frames_ctx) {
+        AVHWFramesContext *device_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
+        if (device_ctx->device_ctx->type == AV_HWDEVICE_TYPE_D3D11VA){
+            if (device_ctx->device_ctx->hwctx) {
+                AVD3D11VADeviceContext *device_d3d11 = (AVD3D11VADeviceContext *)device_ctx->device_ctx->hwctx;
+                res = ctx->context->pVtbl->InitDX11(ctx->context, device_d3d11->device, AMF_DX11_1);
+                if (res == AMF_OK) {
+                    ctx->hw_frames_ctx = av_buffer_ref(avctx->hw_frames_ctx);
+                } else {
+                    av_log(avctx, AV_LOG_INFO, "amf_shared: avctx->hw_frames_ctx has non-AMD device, switching to default");
+                }
+
+            }
+        }
+    } else if (avctx->hw_device_ctx) {
+        AVHWDeviceContext *device_ctx = (AVHWDeviceContext*)(avctx->hw_device_ctx->data);
+        if (device_ctx->type == AV_HWDEVICE_TYPE_D3D11VA) {
+            if (device_ctx->hwctx) {
+                AVD3D11VADeviceContext *device_d3d11 = (AVD3D11VADeviceContext *)device_ctx->hwctx;
+                res = ctx->context->pVtbl->InitDX11(ctx->context, device_d3d11->device, AMF_DX11_1);
+                if (res == AMF_OK) {
+                    ctx->hw_device_ctx = av_buffer_ref(avctx->hw_device_ctx);
+                } else {
+                    av_log(avctx, AV_LOG_INFO, "amf_shared: avctx->hw_device_ctx has non-AMD device, switching to default");
+                }
+            }
+        }
+    }
+
+    if (!ctx->hw_frames_ctx) {
+        res = ctx->context->pVtbl->InitDX11(ctx->context, 0, AMF_DX11_1);
+        if (res != AMF_OK) {
+            res = ctx->context->pVtbl->InitDX9(ctx->context, 0);
+            AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "InitDX9() failed with error %d", res);
+        }
+    }
+    return 0;
+}
+
+static int amf_init_encoder(AVCodecContext *avctx) 
+{
+    AmfContext          *ctx = avctx->priv_data;
+    const wchar_t       *codec_id = 0;
+    AMF_RESULT          res = AMF_OK;
+
+    switch (avctx->codec->id) {
+        case AV_CODEC_ID_H264:
+            codec_id = AMFVideoEncoderVCE_AVC;
+            break;
+        case AV_CODEC_ID_HEVC:
+            codec_id = AMFVideoEncoder_HEVC;
+            break;
+        default:
+            break;
+    }
+    AMF_RETURN_IF_FALSE(ctx, codec_id != 0, AVERROR(EINVAL), "Codec %d is not supported", avctx->codec->id);
+
+    ctx->format = amf_av_to_amf_format(avctx->pix_fmt);
+    AMF_RETURN_IF_FALSE(ctx, ctx->format != AMF_SURFACE_UNKNOWN, AVERROR(EINVAL), "Format %d is not supported", avctx->pix_fmt);
+
+    res = ctx->factory->pVtbl->CreateComponent(ctx->factory, ctx->context, codec_id, &ctx->encoder);
+    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_ENCODER_NOT_FOUND, "CreateComponent(%S) failed with error %d", codec_id, res);
+
+    ctx->eof = false;
+    return 0;
+}
+
+static int amf_terminate(AVCodecContext *avctx) 
+{
+    AmfContext*          ctx = avctx->priv_data;
+
+    if (ctx->encoder) {
+        ctx->encoder->pVtbl->Terminate(ctx->encoder);
+        ctx->encoder->pVtbl->Release(ctx->encoder);
+        ctx->encoder = 0;
+    }
+
+    if (ctx->context) {
+        ctx->context->pVtbl->Terminate(ctx->context);
+        ctx->context->pVtbl->Release(ctx->context);
+        ctx->context = 0;
+    }
+    if (ctx->hw_device_ctx){
+        av_buffer_unref(&ctx->hw_device_ctx);
+        ctx->hw_device_ctx = 0;
+    }
+    if (ctx->hw_frames_ctx) {
+        av_buffer_unref(&ctx->hw_frames_ctx);
+        ctx->hw_frames_ctx = 0;
+    }
+    
+    if (ctx->trace) {
+        ctx->trace->pVtbl->UnregisterWriter(ctx->trace, FFMPEG_AMF_WRITER_ID);
+    }
+
+    if (ctx->library) {
+        dlclose(ctx->library);
+        ctx->library = 0;
+    }
+    ctx->trace = 0;
+    ctx->debug = 0;
+    ctx->factory = 0;
+    ctx->version = 0;
+
+    return 0;
+}
+
+static int amf_copy_surface(AVCodecContext *avctx, const AVFrame *frame, 
+                            AMFSurface* surface) 
+{
+    AmfContext     *ctx = avctx->priv_data;
+    AMFPlane       *plane = 0;
+    uint8_t        *dst_data[4];
+    int             dst_linesize[4];
+
+    int planes = (int)surface->pVtbl->GetPlanesCount(surface);
+    AMF_RETURN_IF_FALSE(ctx, planes <= amf_countof(dst_data), AVERROR(EINVAL), "Invalid number of planes %d in surface", planes);
+
+    for (int i = 0; i < planes; i++) {
+        plane = surface->pVtbl->GetPlaneAt(surface, i);
+        dst_data[i] = plane->pVtbl->GetNative(plane);
+        dst_linesize[i] = plane->pVtbl->GetHPitch(plane);
+    }
+    av_image_copy(dst_data, dst_linesize,
+        (const uint8_t**)frame->data, frame->linesize, frame->format,
+        avctx->width, avctx->height);
+
+    surface->pVtbl->SetPts(surface, frame->pts);
+
+    return 0;
+}
+
+static int amf_copy_buffer(AVCodecContext *avctx, AVPacket *pkt, AMFBuffer *buffer)
+{
+    int                 ret = 0;
+    AMFVariantStruct    var;
+    int                 size = (int)buffer->pVtbl->GetSize(buffer);
+
+    if (ret = ff_alloc_packet2(avctx, pkt, size, 0)) {
+        return ret;
+    }
+    memcpy(pkt->data, buffer->pVtbl->GetNative(buffer), size);
+
+    switch (avctx->codec->id) {
+        case AV_CODEC_ID_H264:
+            buffer->pVtbl->GetProperty(buffer, AMF_VIDEO_ENCODER_OUTPUT_DATA_TYPE, &var);
+            switch (var.int64Value) {
+                case AMF_VIDEO_ENCODER_OUTPUT_DATA_TYPE_IDR:
+                    pkt->flags = AV_PICTURE_TYPE_I | AV_PKT_FLAG_KEY;
+                    break;
+                case AMF_VIDEO_ENCODER_OUTPUT_DATA_TYPE_I:
+                    pkt->flags = AV_PICTURE_TYPE_I;
+                    break;
+                case AMF_VIDEO_ENCODER_OUTPUT_DATA_TYPE_P:
+                    pkt->flags = AV_PICTURE_TYPE_P;
+                    break;
+                case AMF_VIDEO_ENCODER_OUTPUT_DATA_TYPE_B:
+                    pkt->flags = AV_PICTURE_TYPE_B;
+                    break;
+                default:
+                    av_log(avctx, AV_LOG_ERROR, "Unknown picture type encountered, expect the output to be broken.\n");
+                    break;
+            }
+            break;
+        case AV_CODEC_ID_HEVC:
+            buffer->pVtbl->GetProperty(buffer, AMF_VIDEO_ENCODER_HEVC_OUTPUT_DATA_TYPE, &var);
+            switch (var.int64Value) {
+                case AMF_VIDEO_ENCODER_HEVC_OUTPUT_DATA_TYPE_IDR:
+                    pkt->flags = AV_PICTURE_TYPE_I | AV_PKT_FLAG_KEY;
+                    break;
+                case AMF_VIDEO_ENCODER_HEVC_OUTPUT_DATA_TYPE_I:
+                    pkt->flags = AV_PICTURE_TYPE_I | AV_PKT_FLAG_KEY;
+                    break;
+                case AMF_VIDEO_ENCODER_HEVC_OUTPUT_DATA_TYPE_P:
+                    pkt->flags = AV_PICTURE_TYPE_P;
+                    break;
+                default:
+                    av_log(avctx, AV_LOG_ERROR, "Unknown picture type encountered, expect the output to be broken.\n");
+                    break;
+            }
+            break;
+        default:
+            break;
+    }
+    pkt->pts = buffer->pVtbl->GetPts(buffer);
+    pkt->dts = pkt->pts;
+    return 0;
+}
+
+// amfenc API implmentation
+int ff_amf_encode_init(AVCodecContext *avctx) 
+{
+    AmfContext     *ctx = avctx->priv_data;
+    int             ret = 0;
+
+    ctx->factory = 0;
+    ctx->debug = 0;
+    ctx->trace = 0;
+    ctx->context = 0;
+    ctx->encoder = 0;
+    ctx->library = 0;
+    ctx->version = 0;
+    ctx->eof = 0;
+    ctx->format = 0;
+    ctx->tracer.vtbl = 0;
+    ctx->tracer.avctx = 0;
+
+    if ((ret = amf_load_library(avctx)) == 0) {
+        if ((ret = amf_init_context(avctx)) == 0) {
+            if ((ret = amf_init_encoder(avctx)) == 0) {
+                return 0;
+            }
+        }
+    }
+    amf_terminate(avctx);
+    return ret;
+}
+
+int av_cold ff_amf_encode_close(AVCodecContext *avctx)
+{
+    int ret = 0;
+    ret = amf_terminate(avctx);
+    return ret;
+}
+
+static GUID  AMFTextureArrayIndexGUID = 
+{ 0x28115527, 0xe7c3, 0x4b66, { 0x99, 0xd3, 0x4f, 0x2a, 0xe6, 0xb4, 0x7f, 0xaf } };
+
+int ff_amf_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
+                        const AVFrame *frame, int *got_packet) 
+{
+    int             ret = 0;
+    AMF_RESULT      res = AMF_OK;
+    AmfContext     *ctx = avctx->priv_data;
+    AMFSurface     *surface = 0;
+    AMFData        *data = 0;
+    amf_bool       submitted = false;
+
+    while (!submitted) {
+        if (!frame) { // submit drain
+            if (!ctx->eof) { // submit drain onre time only
+                res = ctx->encoder->pVtbl->Drain(ctx->encoder);
+                if (res == AMF_INPUT_FULL) {
+                    av_usleep(1000); // input queue is full: wait, poll and submit Drain again
+                                     // need to get some output and try again
+                } else if (res == AMF_OK) {
+                    ctx->eof = true; // drain started
+                    submitted = true;
+                }
+            }
+        } else { // submit frame
+            if (surface == 0) { // prepare surface from frame one time only
+                if (frame->hw_frames_ctx && ( // HW frame detected
+                                              // check if the same hw_frames_ctx as used in initialization
+                    (ctx->hw_frames_ctx && frame->hw_frames_ctx->data == ctx->hw_frames_ctx->data) ||
+                    // check if the same hw_device_ctx as used in initialization
+                    (ctx->hw_device_ctx && ((AVHWFramesContext*)frame->hw_frames_ctx->data)->device_ctx ==
+                    (AVHWDeviceContext*)ctx->hw_device_ctx->data)
+                )) {
+                    ID3D11Texture2D* texture = (ID3D11Texture2D*)frame->data[0]; // actual texture
+                    int index = (int)(size_t)frame->data[1]; // index is a slice in texture array is - set to tell AMF which slice to use
+                    texture->lpVtbl->SetPrivateData(texture, &AMFTextureArrayIndexGUID, sizeof(index), &index);
+
+                    res = ctx->context->pVtbl->CreateSurfaceFromDX11Native(ctx->context, texture, &surface, NULL); // wrap to AMF surface
+                    surface->pVtbl->SetCrop(surface, 0, 0, frame->width, frame->height); // decode surfaces are vertically aligned by 16 tell AMF real size
+                    surface->pVtbl->SetPts(surface, frame->pts);
+                } else {
+                    res = ctx->context->pVtbl->AllocSurface(ctx->context, AMF_MEMORY_HOST, ctx->format, avctx->width, avctx->height, &surface);
+                    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_BUG, "AllocSurface() failed  with error %d", res);
+                    amf_copy_surface(avctx, frame, surface);
+                }
+            }
+            // encode
+            res = ctx->encoder->pVtbl->SubmitInput(ctx->encoder, (AMFData*)surface);
+            if (res == AMF_INPUT_FULL) { // handle full queue
+                av_usleep(1000); // input queue is full: wait, poll and submit surface again
+            } else {
+                surface->pVtbl->Release(surface);
+                surface = NULL;
+                AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "SubmitInput() failed with error %d", res);
+                submitted = 1;
+            }
+        }
+        // poll results
+        if (!data) {
+            res = ctx->encoder->pVtbl->QueryOutput(ctx->encoder, &data);
+            if (data) {
+                AMFBuffer* buffer;
+                AMFGuid guid = IID_AMFBuffer();
+                data->pVtbl->QueryInterface(data, &guid, (void**)&buffer); // query for buffer interface
+                ret = amf_copy_buffer(avctx, pkt, buffer);
+                if (!ret)
+                    *got_packet = 1;
+                buffer->pVtbl->Release(buffer);
+                data->pVtbl->Release(data);
+                if (ctx->eof) {
+                    submitted = true; // we are in the drain state - no submissions
+                }
+            } else if (res == AMF_EOF) {
+                submitted = true; // drain complete
+            } else {
+                if (!submitted) {
+                    av_usleep(1000); // wait and poll again
+                }
+            }
+        }
+    }
+    return ret;
+}
diff --git a/libavcodec/amfenc.h b/libavcodec/amfenc.h
new file mode 100644
index 0000000..6b0135a
--- /dev/null
+++ b/libavcodec/amfenc.h
@@ -0,0 +1,131 @@ 
+/*
+* This file is part of FFmpeg.
+*
+* FFmpeg is free software; you can redistribute it and/or
+* modify it under the terms of the GNU Lesser General Public
+* License as published by the Free Software Foundation; either
+* version 2.1 of the License, or (at your option) any later version.
+*
+* FFmpeg is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+* Lesser General Public License for more details.
+*
+* You should have received a copy of the GNU Lesser General Public
+* License along with FFmpeg; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+*/
+
+#ifndef AVCODEC_AMFENC_H
+#define AVCODEC_AMFENC_H
+
+#include "config.h"
+#include "avcodec.h"
+//#include "compat/amd/amf/public/include/core/Factory.h"
+#include "compat/amd/amfsdkenc.h"
+
+
+/**
+* AMF trace writer callback class
+* Used to capture all AMF logging
+*/
+
+typedef struct AmfTraceWriter {
+	AMFTraceWriterVtbl* vtbl;
+	AVCodecContext      *avctx;
+} AmfTraceWriter;
+
+/**
+* AMF encoder context
+*/
+
+typedef struct AmfContext {
+	AVClass*            avclass;
+	/** access to AMF runtime */
+	amf_handle          library; ///< handle to DLL library
+	AMFFactory*         factory; ///< pointer to AMF factory 
+	AMFDebug*           debug;   ///< pointer to AMF debug interface 
+	AMFTrace*           trace;   ///< pointer to AMF trace interface 
+
+	amf_uint64          version; ///< version of AMF runtime
+	AmfTraceWriter      tracer;  ///< AMF writer registered with AMF 
+	AMFContext*         context; ///< AMF context
+	//encoder
+	AMFComponent*       encoder; ///< AMF encoder object
+	amf_bool            eof;     ///< flag indicating EOF happened
+	AMF_SURFACE_FORMAT  format;  ///< AMF surface format
+
+	AVBufferRef        *hw_device_ctx; ///< pointer to HW accelerator (decoder)
+	AVBufferRef        *hw_frames_ctx; ///< pointer to HW accelerator (frame allocator)
+
+	/** common encoder option options */
+
+    /** Static options, have to be set before Init() call */
+    int                 usage;
+    int                 profile;
+    int                 level;
+    int                 preanalysis;
+    int                 quality;
+    int					b_frame_delta_qp;
+    int					ref_b_frame_delta_qp;
+
+    /** Dynamic options, can be set after Init() call */
+
+    int                 rate_control_mode;
+	int					enforce_hrd;
+	int					filler_data;
+    int					enable_vbaq;
+    int					skip_frame;
+    int					qp_i;
+	int					qp_p;
+    int					qp_b;
+    int					max_au_size;
+	int					header_spacing;
+	int					b_frame_ref;
+	int					intra_refresh_mb;
+    int                 slices;
+	int					coding_mode;
+	int					me_half_pel;
+	int					me_quater_pel;
+
+    /** HEVC - specific options */
+
+    int					gops_per_idr;
+    int                 header_insertion_mode;
+    int                 min_qp_i;
+    int                 max_qp_i;
+    int                 min_qp_p;
+    int                 max_qp_p;
+	int                 tier;
+} AmfContext;
+
+/** 
+* Common encoder initization code 
+*/
+int ff_amf_encode_init(AVCodecContext *avctx);
+/**
+* Common encoder termination code
+*/
+int ff_amf_encode_close(AVCodecContext *avctx);
+
+/**
+* Ecoding one frame - common for all AMF encoders
+*/
+int ff_amf_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
+	const AVFrame *frame, int *got_packet);
+
+/**
+* Supported formats
+*/
+extern const enum AVPixelFormat ff_amf_pix_fmts[];
+
+/**
+* Error handling helper
+*/
+#define AMF_RETURN_IF_FALSE(avctx, exp, ret_value, /*optional message,*/ ...) \
+    if (!(exp)) { \
+        av_log(avctx, AV_LOG_ERROR, __VA_ARGS__); \
+        return AVERROR(ret_value); \
+    }
+
+#endif //AVCODEC_AMFENC_H
\ No newline at end of file
diff --git a/libavcodec/amfenc_h264.c b/libavcodec/amfenc_h264.c
new file mode 100644
index 0000000..a6e0f3c
--- /dev/null
+++ b/libavcodec/amfenc_h264.c
@@ -0,0 +1,467 @@ 
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "amfenc.h"
+//#include "compat/amd/amf/public/include/components/VideoEncoderVCE.h"
+#include "libavutil/opt.h"
+#include "libavutil/internal.h"
+#include "internal.h"
+
+#define OFFSET(x) offsetof(AmfContext, x)
+#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
+
+static const AVOption options[] = {
+    // Static
+    /// Usage
+    { "usage",          "Encoder Usage",        OFFSET(usage),  AV_OPT_TYPE_INT,   { .i64 = AMF_VIDEO_ENCODER_USAGE_TRANSCONDING      }, AMF_VIDEO_ENCODER_USAGE_TRANSCONDING, AMF_VIDEO_ENCODER_USAGE_WEBCAM, VE, "usage" },
+    { "transcoding",    "Generic Transcoding",  0,              AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_USAGE_TRANSCONDING      }, 0, 0, VE, "usage" },
+    { "ultralowlatency","",                     0,              AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_USAGE_ULTRA_LOW_LATENCY }, 0, 0, VE, "usage" },
+    { "lowlatency",     "",                     0,              AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_USAGE_LOW_LATENCY       }, 0, 0, VE, "usage" },
+    { "webcam",         "Webcam",               0,              AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_USAGE_WEBCAM            }, 0, 0, VE, "usage" },
+
+    /// Profile,
+    { "profile",        "Profile",              OFFSET(profile),AV_OPT_TYPE_INT, { .i64 = AMF_VIDEO_ENCODER_PROFILE_MAIN       }, AMF_VIDEO_ENCODER_PROFILE_BASELINE, AMF_VIDEO_ENCODER_PROFILE_HIGH, VE, "profile" },
+    { "baseline",       "",                     0,              AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_PROFILE_BASELINE }, 0, 0, VE, "profile" },
+    { "main",           "",                     0,              AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_PROFILE_MAIN     }, 0, 0, VE, "profile" },
+    { "high",           "",                     0,              AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_PROFILE_HIGH     }, 0, 0, VE, "profile" },
+
+    /// Profile Level
+    { "level",          "Profile Level",        OFFSET(level),  AV_OPT_TYPE_INT,   { .i64 = 0  }, 0, 62, VE, "level" },
+    { "auto",           "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 0  }, 0, 0,  VE, "level" },
+    { "1.0",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 10 }, 0, 0,  VE, "level" },
+    { "1.1",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 11 }, 0, 0,  VE, "level" },
+    { "1.2",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 12 }, 0, 0,  VE, "level" },
+    { "1.3",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 13 }, 0, 0,  VE, "level" },
+    { "2.0",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 20 }, 0, 0,  VE, "level" },
+    { "2.1",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 21 }, 0, 0,  VE, "level" },
+    { "2.2",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 22 }, 0, 0,  VE, "level" },
+    { "3.0",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 30 }, 0, 0,  VE, "level" },
+    { "3.1",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 31 }, 0, 0,  VE, "level" },
+    { "3.2",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 32 }, 0, 0,  VE, "level" },
+    { "4.0",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 40 }, 0, 0,  VE, "level" },
+    { "4.1",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 41 }, 0, 0,  VE, "level" },
+    { "4.2",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 42 }, 0, 0,  VE, "level" },
+    { "5.0",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 50 }, 0, 0,  VE, "level" },
+    { "5.1",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 51 }, 0, 0,  VE, "level" },
+    { "5.2",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 52 }, 0, 0,  VE, "level" },
+    { "6.0",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 60 }, 0, 0,  VE, "level" },
+    { "6.1",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 61 }, 0, 0,  VE, "level" },
+    { "6.2",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 62 }, 0, 0,  VE, "level" },
+
+
+    /// Quality Preset
+    { "quality",        "Quality Preference",                   OFFSET(quality),    AV_OPT_TYPE_INT,   { .i64 = AMF_VIDEO_ENCODER_QUALITY_PRESET_SPEED    }, AMF_VIDEO_ENCODER_QUALITY_PRESET_BALANCED, AMF_VIDEO_ENCODER_QUALITY_PRESET_QUALITY, VE, "quality" },
+    { "speed",          "Prefer Speed",                         0,                  AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_QUALITY_PRESET_SPEED    },       0, 0, VE, "quality" },
+    { "balanced",       "Balanced",                             0,                  AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_QUALITY_PRESET_BALANCED },    0, 0, VE, "quality" },
+    { "quality",        "Prefer Quality",                       0,                  AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_QUALITY_PRESET_QUALITY  },     0, 0, VE, "quality" },
+
+    // Dynamic
+    /// Rate Control Method
+    { "rc",             "Rate Control Method",                  OFFSET(rate_control_mode),  AV_OPT_TYPE_INT,   { .i64 = AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_PEAK_CONSTRAINED_VBR    }, AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CONSTANT_QP, AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_LATENCY_CONSTRAINED_VBR, VE, "rc" },
+    { "cqp",            "Constant Quantization Parameter",      0,                          AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CONSTANT_QP             }, 0, 0, VE, "rc" },
+    { "cbr",            "Constant Bitrate",                     0,                          AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CBR                     }, 0, 0, VE, "rc" },
+    { "vbr_peak",       "Peak Contrained Variable Bitrate",     0,                          AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_PEAK_CONSTRAINED_VBR    }, 0, 0, VE, "rc" },
+    { "vbr_latency",    "Latency Constrained Variable Bitrate", 0,                          AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_LATENCY_CONSTRAINED_VBR }, 0, 0, VE, "rc" },
+
+    /// Enforce HRD, Filler Data, VBAQ, Frame Skipping
+    { "enforce_hrd",    "Enforce HRD",                          OFFSET(enforce_hrd),        AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },
+    { "filler_data",    "Filler Data Enable",                   OFFSET(filler_data),        AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },
+    { "vbaq",           "Enable VBAQ",                          OFFSET(enable_vbaq),        AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },
+    { "frame_skipping", "Rate Control Based Frame Skip",        OFFSET(skip_frame),         AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE, NULL },
+
+    /// QP Values
+    { "qp_i",           "Quantization Parameter for I-Frame",   OFFSET(qp_i),               AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 51, VE },
+    { "qp_p",           "Quantization Parameter for P-Frame",   OFFSET(qp_p),               AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 51, VE },
+    { "qp_b",           "Quantization Parameter for B-Frame",   OFFSET(qp_b),               AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 51, VE },
+
+    /// Pre-Pass, Pre-Analysis, Two-Pass
+    { "preanalysis",    "Pre-Analysis Mode",                    OFFSET(preanalysis),        AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },
+
+    /// Maximum Access Unit Size
+    { "max_au_size",    "Maximum Access Unit Size (in bits)",   OFFSET(max_au_size),        AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE, NULL },
+
+    /// Header Insertion Spacing
+    { "header_spacing", "Header Insertion Spacing",             OFFSET(header_spacing),     AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1000, VE },
+
+    /// B-Frames
+    // BPicturesPattern=bf
+    { "bf_delta_qp",    "B-Picture Delta QP",                   OFFSET(b_frame_delta_qp),   AV_OPT_TYPE_INT,  { .i64 = 4 }, -10, 10, VE, NULL },
+    { "bf_ref",         "Enable Reference to B-Frames",         OFFSET(b_frame_ref),        AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, VE, NULL },
+    { "bf_ref_delta_qp","Reference B-Picture Delta QP",         OFFSET(ref_b_frame_delta_qp), AV_OPT_TYPE_INT,  { .i64 = 4 }, -10, 10, VE, NULL },
+
+    /// Intra-Refresh
+    { "intra_refresh_mb","Intra Refresh MBs Number Per Slot in Macroblocks",       OFFSET(intra_refresh_mb),    AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
+    { "slices",         "Number of Slices per Frame",           OFFSET(slices),        AV_OPT_TYPE_INT,   { .i64 = 1 } , 1, 8160, VE, NULL },
+
+    /// coder
+    { "coding",         "Coding Type",                          OFFSET(coding_mode),   AV_OPT_TYPE_INT,   { .i64 = AMF_VIDEO_ENCODER_UNDEFINED }, AMF_VIDEO_ENCODER_UNDEFINED, AMF_VIDEO_ENCODER_CALV, VE, "coding" },
+    { "auto",           "Automatic",                            0,                     AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_UNDEFINED }, 0, 0, VE, "coding" },
+    { "cavlc",          "Context Adaptive Variable-Length Coding", 0,                  AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_CALV },      0, 0, VE, "coding" },
+    { "cabac",          "Context Adaptive Binary Arithmetic Coding", 0,                AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_CABAC },     0, 0, VE, "coding" },
+
+    { "me_half_pel",    "Enable ME Half Pixel",                 OFFSET(me_half_pel),   AV_OPT_TYPE_BOOL,  { .i64 = 1 }, 0, 1, VE, NULL },
+    { "me_quater_pel",  "Enable ME Quarter Pixel ",             OFFSET(me_quater_pel), AV_OPT_TYPE_BOOL,  { .i64 = 1 }, 0, 1, VE, NULL },
+
+    { NULL }
+};
+
+static av_cold int amf_encode_init_h264(AVCodecContext *avctx)
+{
+    int                 ret = 0;
+    AMF_RESULT          res = AMF_OK;
+    AmfContext         *ctx = avctx->priv_data;
+    AMFVariantStruct    var = {0};
+    amf_int64           profile_level = 0;
+
+    AMFSize             framesize = AMFConstructSize(avctx->width, avctx->height);
+    AMFRate             framerate = AMFConstructRate(avctx->time_base.den, avctx->time_base.num * avctx->ticks_per_frame);
+
+    int                 deblocking_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
+
+    if ((ret = ff_amf_encode_init(avctx)) != 0)
+        return ret;
+
+    // Static parameters
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_USAGE, ctx->usage);
+
+    AMF_ASSIGN_PROPERTY_SIZE(res, ctx->encoder, AMF_VIDEO_ENCODER_FRAMESIZE, framesize);
+
+    AMF_ASSIGN_PROPERTY_RATE(res, ctx->encoder, AMF_VIDEO_ENCODER_FRAMERATE, framerate);
+
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_PROFILE, ctx->profile);
+
+    profile_level = ctx->level;
+    if (profile_level == 0) {
+        // Automatic detection of correct profile level.
+        struct {
+            uint64_t  max_samples;
+            uint64_t max_samples_per_sec;
+            int level;
+        } restrictions[] = {
+            { 25344,    380160, 10 },
+            { 101376,   768000, 11 },
+            { 101376,   1536000, 12 },
+            //{   101376,    3041280, 13 }, // Backwards compatible 2.0
+            { 101376,   3041280, 20 },
+            { 202752,   5068800, 21 },
+            { 414720,   5184000, 22 },
+            { 414720,   10368000, 30 },
+            { 921600,   27648000, 31 },
+            { 1310720,  55296000, 32 },
+            //{  2097152,   62914560, 40 }, // Backwards compatible 4.1
+            { 2097152,  62914560, 41 },
+            { 2228224,  133693440, 42 },
+            { 5652480,  150994994, 50 },
+            { 9437184,  251658240, 51 },
+            { 9437184,  530841600, 52 },
+            { 35651584, 1069547520, 60 },
+            { 35651584, 2139095040, 61 },
+            { 35651584, 4278190080, 62 },
+            { 0, 0, -1 }
+        };
+        uint64_t samples = framesize.width * framesize.height;
+        uint64_t samples_per_sec = (samples * framerate.num) / framerate.den;
+        profile_level = 52; // Default to 5.2 for now.
+        for (unsigned int index = 0; restrictions[index].level != -1; index++) {
+            if ((samples < restrictions[index].max_samples)
+                && (samples_per_sec < restrictions[index].max_samples_per_sec)) {
+                profile_level = restrictions[index].level;
+                break;
+            }
+        }
+    }
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_PROFILE_LEVEL, profile_level);
+
+    // Maximum Reference Frames
+    if (avctx->refs != -1) {
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_MAX_NUM_REFRAMES, avctx->refs);
+    }
+    if (avctx->sample_aspect_ratio.den && avctx->sample_aspect_ratio.num) {
+        AMFRatio ratio = AMFConstructRatio(avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
+        AMF_ASSIGN_PROPERTY_RATIO(res, ctx->encoder, AMF_VIDEO_ENCODER_ASPECT_RATIO, ratio);
+    }
+
+    /// Color Range (Partial/TV/MPEG or Full/PC/JPEG)
+    if (avctx->color_range == AVCOL_RANGE_JPEG) {
+        AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_FULL_RANGE_COLOR, 1);
+    }
+
+    if (ctx->rate_control_mode == AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CONSTANT_QP) {
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_RATE_CONTROL_PREANALYSIS_ENABLE, false);
+        if (ctx->preanalysis)
+            av_log(ctx, AV_LOG_WARNING, "Pre-Analysis is not supported by cqp Rate Control Method, automatically disabled.");
+    } else {
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_RATE_CONTROL_PREANALYSIS_ENABLE, ctx->preanalysis);
+    }
+
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_QUALITY_PRESET, ctx->quality);
+
+    // Initialize Encoder
+    res = ctx->encoder->pVtbl->Init(ctx->encoder, ctx->format, avctx->width, avctx->height);
+    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_BUG, "encoder->Init() failed with error %d", res);
+
+    // Dynamic parmaters 
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD, ctx->rate_control_mode);
+
+    /// VBV Buffer
+    if (avctx->rc_buffer_size != 0)
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_VBV_BUFFER_SIZE, avctx->rc_buffer_size);
+    if (avctx->rc_initial_buffer_occupancy != 0) {
+        int percent = avctx->rc_buffer_size * 64 / avctx->rc_initial_buffer_occupancy;
+        if (percent > 64)
+            percent = 64;
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_INITIAL_VBV_BUFFER_FULLNESS, percent);
+    }
+    /// Maximum Access Unit Size
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_MAX_AU_SIZE, ctx->max_au_size);
+
+    
+    // QP Minimum / Maximum
+    if (ctx->rate_control_mode == AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CONSTANT_QP) {
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_MIN_QP, 0);
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_MAX_QP, 51);
+    } else {
+        if (avctx->qmin != -1) {
+            int qval = avctx->qmin > 51 ? 51 : avctx->qmin;
+            AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_MIN_QP, qval);
+        }            
+        if (avctx->qmax != -1) {
+            int qval = avctx->qmax > 51 ? 51 : avctx->qmax;
+            AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_MAX_QP, qval);
+        }
+    }
+    // QP Values
+    if (ctx->qp_i != -1)
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_QP_I, ctx->qp_i);
+    if (ctx->qp_p != -1)
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_QP_P, ctx->qp_p);
+    if (ctx->qp_b != -1)
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_QP_B, ctx->qp_b);
+
+    // Bitrate
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_TARGET_BITRATE, avctx->bit_rate);
+    if (ctx->rate_control_mode == AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CBR) {
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_PEAK_BITRATE, avctx->bit_rate);
+    } else {
+        int rc_max_rate = avctx->rc_max_rate >= avctx->bit_rate ? avctx->rc_max_rate : avctx->bit_rate * 13 / 10;
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_PEAK_BITRATE, rc_max_rate);
+    }
+    // Enforce HRD, Filler Data, VBAQ, Frame Skipping, Deblocking Filter
+    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_ENFORCE_HRD, !!ctx->enforce_hrd);
+    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_FILLER_DATA_ENABLE, !!ctx->filler_data);
+    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_RATE_CONTROL_SKIP_FRAME_ENABLE, !!ctx->skip_frame);
+    if (ctx->rate_control_mode == AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CONSTANT_QP) {
+        AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_ENABLE_VBAQ, false);
+        if (ctx->enable_vbaq)
+            av_log(ctx, AV_LOG_WARNING, "VBAQ is not supported by cqp Rate Control Method, automatically disabled.");
+    } else {
+        AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_ENABLE_VBAQ, !!ctx->enable_vbaq);
+    }
+    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_DE_BLOCKING_FILTER, !!deblocking_filter);
+
+    // B-Frames
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_B_PIC_PATTERN, avctx->max_b_frames);
+    if (avctx->max_b_frames && res == AMF_OK) {
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_B_PIC_DELTA_QP, ctx->b_frame_delta_qp);
+        AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_B_REFERENCE_ENABLE, !!ctx->b_frame_ref);
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_REF_B_PIC_DELTA_QP, ctx->ref_b_frame_delta_qp);
+    }
+
+    // Keyframe Interval
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_IDR_PERIOD, avctx->gop_size);
+
+    // Header Insertion Spacing
+    if (ctx->header_spacing >= 0)
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEADER_INSERTION_SPACING, ctx->header_spacing);
+
+    // Intra-Refresh, Slicing
+    if (ctx->intra_refresh_mb > 0)
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_INTRA_REFRESH_NUM_MBS_PER_SLOT, ctx->intra_refresh_mb);
+    if (ctx->slices > 1)
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_SLICES_PER_FRAME, ctx->slices);
+    
+    // Coding
+    if (ctx->coding_mode != 0)
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_CABAC_ENABLE, ctx->coding_mode);
+
+    // Motion Estimation
+    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_MOTION_HALF_PIXEL, !!ctx->me_half_pel);
+    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_MOTION_QUARTERPIXEL, !!ctx->me_quater_pel);
+
+    // fill extradata
+    AMFVariantInit(&var);
+    res = ctx->encoder->pVtbl->GetProperty(ctx->encoder, AMF_VIDEO_ENCODER_EXTRADATA, &var);
+    if (res == AMF_OK && var.pInterface) {
+        AMFBuffer* buffer;
+        AMFGuid guid = IID_AMFBuffer();
+
+        var.pInterface->pVtbl->QueryInterface(var.pInterface, &guid, (void**)&buffer); // query for buffer interface
+
+        avctx->extradata_size = (int)buffer->pVtbl->GetSize(buffer);
+        avctx->extradata = av_mallocz(avctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
+        if (!avctx->extradata) {
+            buffer->pVtbl->Release(buffer);
+            var.pInterface->pVtbl->Release(var.pInterface);
+            return AVERROR(ENOMEM);
+        }
+        memcpy(avctx->extradata, buffer->pVtbl->GetNative(buffer), avctx->extradata_size);
+
+        buffer->pVtbl->Release(buffer);
+        var.pInterface->pVtbl->Release(var.pInterface);
+    }
+    return 0;
+}
+
+
+
+static const AVCodecDefault defaults[] = {
+    { "refs",       "-1" },
+    { "aspect",     "0" },
+    { "sar",        "0" },
+    { "qmin",       "-1" },
+    { "qmax",       "-1" },
+    { "b",          "2M" },
+    { "maxrate",    "3M" },
+    { "g",          "250" },
+    { "keyint_min", "0" },
+    { "bf",         "0" },
+    { "slices",     "1" },
+    { NULL },
+};
+
+static const AVClass h264_amf_class = {
+    .class_name = "h264_amf",
+    .item_name = av_default_item_name,
+    .option = options,
+    .version = LIBAVUTIL_VERSION_INT,
+};
+static const AVClass h264_amf_d3d11va_class = {
+    .class_name = "h264_amf_d3d11va",
+    .item_name = av_default_item_name,
+    .option = options,
+    .version = LIBAVUTIL_VERSION_INT,
+};
+// regular encoder
+AVCodec ff_h264_amf_encoder = {
+    .name = "h264_amf",
+    .long_name = NULL_IF_CONFIG_SMALL("AMD AMF H.264 Encoder"),
+    .type = AVMEDIA_TYPE_VIDEO,
+    .id = AV_CODEC_ID_H264,
+    .init = amf_encode_init_h264,
+    .encode2 = ff_amf_encode_frame,
+    .close = ff_amf_encode_close,
+    .priv_data_size = sizeof(AmfContext),
+    .priv_class = &h264_amf_class,
+    .defaults = defaults,
+    .capabilities = AV_CODEC_CAP_DELAY,
+    .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
+    .pix_fmts = ff_amf_pix_fmts,
+};
+// encoder connected with D3D11 HW accelerator
+AVCodec ff_h264_amf_d3d11va_encoder = {
+    .name = "h264_amf_d3d11va",
+    .long_name = NULL_IF_CONFIG_SMALL("AMD AMF H.264 Encoder with d3d11va"),
+    .type = AVMEDIA_TYPE_VIDEO,
+    .id = AV_CODEC_ID_H264,
+    .init = amf_encode_init_h264,
+    .encode2 = ff_amf_encode_frame,
+    .close = ff_amf_encode_close,
+    .priv_data_size = sizeof(AmfContext),
+    .priv_class = &h264_amf_d3d11va_class,
+    .defaults = defaults,
+    .capabilities = AV_CODEC_CAP_DELAY,
+    .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
+    .pix_fmts = ff_amf_pix_fmts,
+};
+
+/**
+* Basic test BAT file:
+echo off
+if "%~1"=="" (
+echo input file name is empty. Use basic_transcode_amf_h264.bat video.mp4
+goto error
+)
+
+SET "CWD=%~dp0"
+SET bitrate=5M
+SET maxbitrate=6M
+SET bufsize=2M
+SET x264_preset=veryfast
+SET amf_quality=speed
+
+
+rem veryfast and zerolatency options make x264 comparable with VCE
+
+rem change path to ffmpeg.exe if needed
+
+"%CWD%\..\bin\ffmpeg.exe" -y -t 100 -threads 0 -i "%~1" -c:v h264_amf  -b:v %bitrate% -maxrate %maxbitrate% -bufsize %bufsize% -rc vbr_peak -quality %amf_quality% out_amf_h264.mp4
+"%CWD%\..\bin\ffmpeg.exe" -y -t 100 -threads 0 -i "%~1" -c:v libx264   -b:v %bitrate% -maxrate %maxbitrate% -bufsize %bufsize% -preset %x264_preset% -nal-hrd vbr -tune zerolatency out_x264_h264.mp4
+
+echo PSNR > result.txt
+
+"%CWD%\..\bin\ffmpeg.exe" -y -hide_banner -loglevel info -t 100 -threads 0 -i "%~1" -i out_amf_h264.mp4  -lavfi psnr="stats_file='amf_h264.psnr.log'"  -f null - > "trace.txt" 2>&1
+for /f "tokens=*" %%A in ('type "trace.txt"') do @echo amf_h264 : %%A >end.txt
+type end.txt >> result.txt
+
+
+"%CWD%\..\bin\ffmpeg.exe" -y -hide_banner -loglevel info -t 100 -threads 0 -i "%~1" -i out_x264_h264.mp4 -lavfi psnr="stats_file='x264_h264.psnr.log'" -f null - > "trace.txt" 2>&1
+for /f "tokens=*" %%A in ('type "trace.txt"') do @echo x264_h264: %%A >end.txt
+type end.txt >> result.txt
+
+
+echo SSIM >> result.txt
+
+"%CWD%\..\bin\ffmpeg.exe" -y -hide_banner -loglevel info -t 100 -threads 0 -i "%~1" -i out_amf_h264.mp4  -lavfi ssim="stats_file='amf_h264_ssim.log'" -f null - > "trace.txt"  2>&1
+for /f "tokens=*" %%A in ('type "trace.txt"') do @echo amf_h264 : %%A >end.txt
+type end.txt >> result.txt
+
+"%CWD%\..\bin\ffmpeg.exe" -y -hide_banner -loglevel info -t 100 -threads 0 -i "%~1" -i out_x264_h264.mp4  -lavfi ssim="stats_file=x264_h264_ssim.log'" -f null - > "trace.txt"  2>&1
+for /f "tokens=*" %%A in ('type "trace.txt"') do @echo x264_h264: %%A >end.txt
+type end.txt >> result.txt
+
+del trace.txt
+del end.txt
+
+echo
+type result.txt
+
+
+:error
+*/
+
+
+/**
+* d3d11va integration test bat file
+rem echo off
+if "%~1"=="" (
+echo input file name is empty. Use dx11_transcode_amf_h264.bat video.mp4
+goto error
+)
+
+SET "CWD=%~dp0"
+SET bitrate=5M
+SET maxbitrate=6M
+SET bufsize=2M
+
+rem change path to ffmpeg.exe if needed
+
+"%CWD%\..\bin\ffmpeg.exe" -y -t 100 -hwaccel d3d11va -hwaccel_output_format d3d11 -threads 1 -i "%~1" -c:v h264_amf          -b:v %bitrate% -maxrate %maxbitrate% -bufsize %bufsize% -rc vbr_peak shared_dx11_amf_h264.mp4
+"%CWD%\..\bin\ffmpeg.exe" -y -t 100 -hwaccel d3d11va                              -threads 1 -i "%~1" -c:v h264_amf_d3d11va  -b:v %bitrate% -maxrate %maxbitrate% -bufsize %bufsize% -rc vbr_peak custom_dx11_amf_h264.mp4
+
+*/
+
diff --git a/libavcodec/amfenc_hevc.c b/libavcodec/amfenc_hevc.c
new file mode 100644
index 0000000..4d3c7d4
--- /dev/null
+++ b/libavcodec/amfenc_hevc.c
@@ -0,0 +1,354 @@ 
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "amfenc.h"
+//#include "compat/amd/amf/public/include/components/VideoEncoderHEVC.h"
+#include "libavutil/opt.h"
+#include "libavutil/internal.h"
+#include "internal.h"
+
+#define OFFSET(x) offsetof(AmfContext, x)
+#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
+static const AVOption options[] = {
+    { "usage",          "Set the encoding usage",             OFFSET(usage),          AV_OPT_TYPE_INT,   { .i64 = AMF_VIDEO_ENCODER_HEVC_USAGE_TRANSCONDING }, AMF_VIDEO_ENCODER_HEVC_USAGE_TRANSCONDING, AMF_VIDEO_ENCODER_HEVC_USAGE_WEBCAM, VE, "usage" },
+    { "transcoding",    "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_USAGE_TRANSCONDING },         0, 0, VE, "usage" },
+    { "ultralowlatency","", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_USAGE_ULTRA_LOW_LATENCY },    0, 0, VE, "usage" },
+    { "lowlatency",     "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_USAGE_LOW_LATENCY },          0, 0, VE, "usage" },
+    { "webcam",         "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_USAGE_WEBCAM },               0, 0, VE, "usage" },
+
+    { "profile",        "Set the profile (default main)",           OFFSET(profile),   AV_OPT_TYPE_INT,{ .i64 = AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN }, AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN, AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN, VE, "profile" },
+    { "main",           "", 0,                      AV_OPT_TYPE_CONST,{ .i64 = AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN }, 0, 0, VE, "profile" },
+
+    { "profile_tier",   "Set the profile tier (default main)",      OFFSET(tier), AV_OPT_TYPE_INT,{ .i64 = AMF_VIDEO_ENCODER_HEVC_TIER_MAIN }, AMF_VIDEO_ENCODER_HEVC_TIER_MAIN, AMF_VIDEO_ENCODER_HEVC_TIER_HIGH, VE, "tier" },
+    { "main",           "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_TIER_MAIN }, 0, 0, VE, "tier" },
+    { "high",           "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_TIER_HIGH }, 0, 0, VE, "tier" },
+
+    { "level",          "Set the encoding level (default auto)",    OFFSET(level), AV_OPT_TYPE_INT,{ .i64 = 0 }, 0, AMF_LEVEL_6_2, VE, "level" },
+    { "auto",           "", 0, AV_OPT_TYPE_CONST, { .i64 = 0             }, 0, 0, VE, "level" },
+    { "1.0",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_1   }, 0, 0, VE, "level" },
+    { "2.0",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_2   }, 0, 0, VE, "level" },
+    { "2.1",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_2_1 }, 0, 0, VE, "level" },
+    { "3.0",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_3   }, 0, 0, VE, "level" },
+    { "3.1",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_3_1 }, 0, 0, VE, "level" },
+    { "4.0",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_4   }, 0, 0, VE, "level" },
+    { "4.1",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_4_1 }, 0, 0, VE, "level" },
+    { "5.0",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_5   }, 0, 0, VE, "level" },
+    { "5.1",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_5_1 }, 0, 0, VE, "level" },
+    { "5.2",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_5_2 }, 0, 0, VE, "level" },
+    { "6.0",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_6   }, 0, 0, VE, "level" },
+    { "6.1",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_6_1 }, 0, 0, VE, "level" },
+    { "6.2",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_6_2 }, 0, 0, VE, "level" },
+    
+    { "quality",        "Set the encoding quality",                 OFFSET(quality),      AV_OPT_TYPE_INT,   { .i64 = AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET_SPEED }, AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET_QUALITY, AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET_SPEED, VE, "quality" },
+    { "balanced",       "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET_BALANCED }, 0, 0, VE, "quality" },
+    { "speed",          "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET_SPEED    }, 0, 0, VE, "quality" },
+    { "quality",        "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET_QUALITY  }, 0, 0, VE, "quality" },
+
+    { "rc",             "Set the rate control mode",                OFFSET(rate_control_mode),   AV_OPT_TYPE_INT,   { .i64 = AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_PEAK_CONSTRAINED_VBR }, AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_CONSTANT_QP, AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_CBR, VE, "rc" },
+    { "cqp",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_CONSTANT_QP             }, 0, 0, VE, "rc" },
+    { "cbr",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_CBR                     }, 0, 0, VE, "rc" },
+    { "vbr_peak",       "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_PEAK_CONSTRAINED_VBR    }, 0, 0, VE, "rc" },
+    { "vbr_latency",    "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_LATENCY_CONSTRAINED_VBR }, 0, 0, VE, "rc" },
+    
+
+    { "header_insertion_mode",        "Set header insertion mode",  OFFSET(header_insertion_mode),      AV_OPT_TYPE_INT,{ .i64 = AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE_NONE }, AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE_NONE, AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE_IDR_ALIGNED, VE, "hdrmode" },
+    { "balanced",       "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE_NONE        }, 0, 0, VE, "hdrmode" },
+    { "speed",          "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE_GOP_ALIGNED }, 0, 0, VE, "hdrmode" },
+    { "quality",        "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE_IDR_ALIGNED }, 0, 0, VE, "hdrmode" },
+
+    { "gops_per_idr",    "GOPs per IDR 0-no IDR will be inserted",  OFFSET(gops_per_idr),  AV_OPT_TYPE_INT,{ .i64 = 60 }, 0, INT_MAX, VE },
+    { "preanalysis",    "Enable preanalysis",                       OFFSET(preanalysis),   AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },
+    { "vbaq",           "Enable VBAQ",                              OFFSET(enable_vbaq),   AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },
+    { "enforce_hrd",    "Enforce HRD",                              OFFSET(enforce_hrd),   AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },
+    { "filler_data",    "Filler Data Enable",                       OFFSET(filler_data),   AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },
+    { "max_au_size",    "Max AU Size in bits",                      OFFSET(max_au_size),   AV_OPT_TYPE_INT,{ .i64 = 0 }, 0, INT_MAX, VE, NULL },
+    { "min_qp_i",       "min quantization parameter for I-frame",   OFFSET(min_qp_i),      AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },
+    { "max_qp_i",       "max quantization parameter for I-frame",   OFFSET(max_qp_i),      AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },
+    { "min_qp_p",       "min quantization parameter for P-frame",   OFFSET(min_qp_p),      AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },
+    { "max_qp_p",       "max quantization parameter for P-frame",   OFFSET(max_qp_p),      AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },
+    { "qp_p",           "quantization parameter for P-frame",       OFFSET(qp_p),          AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },
+    { "qp_i",           "quantization parameter for I-frame",       OFFSET(qp_i),          AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },
+    { "skip_frame",     "Rate Control Based Frame Skip",            OFFSET(skip_frame),    AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },
+    { "me_half_pel",    "Enable ME Half Pixel",                     OFFSET(me_half_pel),   AV_OPT_TYPE_BOOL,{ .i64 = 1 }, 0, 1, VE, NULL },
+    { "me_quater_pel",  "Enable ME Quarter Pixel ",                 OFFSET(me_quater_pel), AV_OPT_TYPE_BOOL,{ .i64 = 1 }, 0, 1, VE, NULL },
+
+    { NULL }
+};
+
+static av_cold int amf_encode_init_hevc(AVCodecContext *avctx)
+{
+    int                 ret = 0;
+    AMF_RESULT          res = AMF_OK;
+    AmfContext         *ctx = avctx->priv_data;
+    AMFVariantStruct    var = {0};
+
+    AMFSize             framesize = AMFConstructSize(avctx->width, avctx->height);
+    AMFRate             framerate = AMFConstructRate(avctx->time_base.den, avctx->time_base.num * avctx->ticks_per_frame);
+
+    int                 deblocking_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
+
+    if ((ret = ff_amf_encode_init(avctx)) < 0)
+        return ret;
+    
+    // init static parameters
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_USAGE, ctx->usage);
+
+    AMF_ASSIGN_PROPERTY_SIZE(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_FRAMESIZE, framesize);
+
+    AMF_ASSIGN_PROPERTY_RATE(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_FRAMERATE, framerate);
+
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_PROFILE, ctx->profile);
+
+    switch (ctx->profile) {
+    case AMF_VIDEO_ENCODER_HEVC_TIER_HIGH:
+        avctx->profile = FF_PROFILE_HEVC_REXT;
+        break;
+    case AMF_VIDEO_ENCODER_HEVC_TIER_MAIN:
+        avctx->profile = FF_PROFILE_HEVC_MAIN;
+        break;
+    default:
+        break;
+    }
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_TIER, ctx->tier);
+
+    if (ctx->level != 0) {
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_PROFILE_LEVEL, ctx->level);
+        avctx->level = ctx->level;
+    }
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET, ctx->quality);
+    // Maximum Reference Frames
+    if (avctx->refs != 0) {
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_MAX_NUM_REFRAMES, avctx->refs);
+    }
+    // Aspect Ratio
+    if (avctx->sample_aspect_ratio.den && avctx->sample_aspect_ratio.num) {
+        AMFRatio ratio = AMFConstructRatio(avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
+        AMF_ASSIGN_PROPERTY_RATIO(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_ASPECT_RATIO, ratio);
+    }
+
+    // Picture control properties
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_NUM_GOPS_PER_IDR, ctx->gops_per_idr);
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_GOP_SIZE, avctx->gop_size);
+    if (avctx->slices > 1) {
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_SLICES_PER_FRAME, avctx->slices);
+    }
+    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_DE_BLOCKING_FILTER_DISABLE, deblocking_filter);
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE, ctx->header_insertion_mode);
+
+    // Rate control
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD, ctx->rate_control_mode);
+    if (avctx->rc_buffer_size)
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_VBV_BUFFER_SIZE, avctx->rc_buffer_size);
+
+    if (avctx->rc_initial_buffer_occupancy != 0) {
+        int percent = avctx->rc_buffer_size * 64 / avctx->rc_initial_buffer_occupancy;
+        if (percent > 64)
+            percent = 64;
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_INITIAL_VBV_BUFFER_FULLNESS, percent);
+    }
+    // Pre-Pass, Pre-Analysis, Two-Pass
+    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_PREANALYSIS_ENABLE, ctx->preanalysis);
+
+    if (ctx->rate_control_mode == AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_CONSTANT_QP) {
+        AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_ENABLE_VBAQ, false);
+        if (ctx->enable_vbaq)
+            av_log(ctx, AV_LOG_WARNING, "VBAQ is not supported by cqp Rate Control Method, automatically disabled.");
+    } else {
+        AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_ENABLE_VBAQ, !!ctx->enable_vbaq);
+    }
+    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_MOTION_HALF_PIXEL, ctx->me_half_pel);
+    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_MOTION_QUARTERPIXEL, ctx->me_quater_pel);
+
+    // init encoder
+    res = ctx->encoder->pVtbl->Init(ctx->encoder, ctx->format, avctx->width, avctx->height);
+    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_BUG, "encoder->Init() failed with error %d", res);
+
+    // init dynamic rate control params
+    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_ENFORCE_HRD, ctx->enforce_hrd);
+    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_FILLER_DATA_ENABLE, ctx->filler_data);
+
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_TARGET_BITRATE, avctx->bit_rate);
+    if (ctx->rate_control_mode == AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_CBR) {
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_PEAK_BITRATE, avctx->bit_rate);
+    } else {
+        int rc_max_rate = avctx->rc_max_rate >= avctx->bit_rate ? avctx->rc_max_rate : avctx->bit_rate * 13 / 10;
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_PEAK_BITRATE, rc_max_rate);
+    }
+
+    // init dynamic picture control params
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_MAX_AU_SIZE, ctx->max_au_size);
+
+
+    if (ctx->min_qp_i != -1) {
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_MIN_QP_I, ctx->min_qp_i);
+    }
+    if (ctx->max_qp_i != -1) {
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_MAX_QP_I, ctx->max_qp_i);
+    }
+    if (ctx->min_qp_p != -1) {
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_MIN_QP_P, ctx->min_qp_p);
+    }
+    if (ctx->max_qp_p != -1) {
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_MAX_QP_P, ctx->max_qp_p);
+    }
+
+    if (ctx->qp_p != -1) {
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_QP_I, ctx->qp_p);
+    }
+    if (ctx->qp_i != -1) {
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_QP_P, ctx->qp_i);
+    }
+    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_SKIP_FRAME_ENABLE, ctx->skip_frame);
+
+
+    // fill extradata
+    AMFVariantInit(&var);
+    res = ctx->encoder->pVtbl->GetProperty(ctx->encoder, AMF_VIDEO_ENCODER_HEVC_EXTRADATA, &var);
+    if(res == AMF_OK && var.pInterface){ 
+        AMFBuffer* buffer;
+        AMFGuid guid = IID_AMFBuffer();
+
+        var.pInterface->pVtbl->QueryInterface(var.pInterface, &guid, (void**)&buffer); // query for buffer interface
+
+        avctx->extradata_size = (int)buffer->pVtbl->GetSize(buffer);
+        avctx->extradata = av_mallocz(avctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
+        if (!avctx->extradata) {
+            buffer->pVtbl->Release(buffer);
+            var.pInterface->pVtbl->Release(var.pInterface);
+            return AVERROR(ENOMEM);
+        }
+        memcpy(avctx->extradata, buffer->pVtbl->GetNative(buffer), avctx->extradata_size);
+
+        buffer->pVtbl->Release(buffer);
+        var.pInterface->pVtbl->Release(var.pInterface);
+    }
+    return 0;
+}
+static const AVCodecDefault defaults[] = {
+    { "b",       "2M" },
+    { "maxrate", "3M" },
+    { "qmin",   "-1" },
+    { "qmax",   "-1" },
+    { "qdiff",  "-1" },
+    { "qblur",  "-1" },
+    { "qcomp",  "-1" },
+    { "g",      "250" },
+    { "bf",     "0" },
+    { NULL },
+};
+static const AVClass hevc_amf_class = {
+    .class_name = "hevc_amf",
+    .item_name = av_default_item_name,
+    .option = options,
+    .version = LIBAVUTIL_VERSION_INT,
+};
+static const AVClass hevc_amf_amf_d3d11va_class = {
+    .class_name = "hevc_amf_amf_d3d11va",
+    .item_name = av_default_item_name,
+    .option = options,
+    .version = LIBAVUTIL_VERSION_INT,
+};
+// regular encoder
+AVCodec ff_hevc_amf_encoder = {
+    .name           = "hevc_amf",
+    .long_name      = NULL_IF_CONFIG_SMALL("AMD AMF HEVC encoder"),
+    .type           = AVMEDIA_TYPE_VIDEO,
+    .id             = AV_CODEC_ID_HEVC,
+    .init           = amf_encode_init_hevc,
+    .encode2        = ff_amf_encode_frame,
+    .close          = ff_amf_encode_close,
+    .priv_data_size = sizeof(AmfContext),
+    .priv_class     = &hevc_amf_class,
+    .defaults       = defaults,
+    .capabilities   = AV_CODEC_CAP_DELAY,
+    .caps_internal  = FF_CODEC_CAP_INIT_CLEANUP,
+    .pix_fmts       = ff_amf_pix_fmts,
+};
+// encoder connected with D3D11 HW accelerator
+AVCodec ff_hevc_amf_d3d11va_encoder = {
+.name = "hevc_amf_d3d11va",
+.long_name = NULL_IF_CONFIG_SMALL("AMD AMF HEVC encoder with d3d11va"),
+.type = AVMEDIA_TYPE_VIDEO,
+.id = AV_CODEC_ID_HEVC,
+.init = amf_encode_init_hevc,
+.encode2 = ff_amf_encode_frame,
+.close = ff_amf_encode_close,
+.priv_data_size = sizeof(AmfContext),
+.priv_class = &hevc_amf_amf_d3d11va_class,
+.defaults = defaults,
+.capabilities = AV_CODEC_CAP_DELAY,
+.caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
+.pix_fmts = ff_amf_pix_fmts,
+};
+
+/**
+* Basic test BAT file:
+echo off
+if "%~1"=="" (
+echo input file name is empty. Use basic_transcode_amf_hevc.bat video.mp4
+goto error
+)
+
+SET "CWD=%~dp0"
+SET bitrate=5M
+SET maxbitrate=6M
+SET bufsize=2M
+SET x265_preset=veryfast
+SET amf_quality=speed
+
+
+rem veryfast and zerolatency options make x265 comparable with VCE
+
+rem change path to ffmpeg.exe if needed
+
+"%CWD%\..\bin\ffmpeg.exe" -y -t 100 -threads 0 -i "%~1" -c:v hevc_amf  -b:v %bitrate% -maxrate %maxbitrate% -bufsize %bufsize% -rc vbr_peak -quality %amf_quality% out_amf_hevc.mp4
+"%CWD%\..\bin\ffmpeg.exe" -y -t 100 -threads 0 -i "%~1" -c:v libx265   -b:v %bitrate% -maxrate %maxbitrate% -bufsize %bufsize% -preset %x265_preset% -x265-params vbv-maxrate=6000:vbv-bufsize=2000 -tune zerolatency out_x265_hevc.mp4
+
+echo PSNR > result.txt
+
+"%CWD%\..\bin\ffmpeg.exe" -y -hide_banner -loglevel info -t 100 -threads 0 -i "%~1" -i out_amf_hevc.mp4  -lavfi psnr="stats_file='amf_hevc.psnr.log'"  -f null - > "trace.txt" 2>&1
+for /f "tokens=*" %%A in ('type "trace.txt"') do @echo amf_hevc : %%A >end.txt
+type end.txt >> result.txt
+
+
+"%CWD%\..\bin\ffmpeg.exe" -y -hide_banner -loglevel info -t 100 -threads 0 -i "%~1" -i out_x265_hevc.mp4 -lavfi psnr="stats_file='x265_hevc.psnr.log'" -f null - > "trace.txt" 2>&1
+for /f "tokens=*" %%A in ('type "trace.txt"') do @echo x265_hevc: %%A >end.txt
+type end.txt >> result.txt
+
+
+echo SSIM >> result.txt
+
+"%CWD%\..\bin\ffmpeg.exe" -y -hide_banner -loglevel info -t 100 -threads 0 -i "%~1" -i out_amf_hevc.mp4  -lavfi ssim="stats_file='amf_hevc_ssim.log'" -f null - > "trace.txt"  2>&1
+for /f "tokens=*" %%A in ('type "trace.txt"') do @echo amf_hevc : %%A >end.txt
+type end.txt >> result.txt
+
+"%CWD%\..\bin\ffmpeg.exe" -y -hide_banner -loglevel info -t 100 -threads 0 -i "%~1" -i out_x265_hevc.mp4  -lavfi ssim="stats_file=x265_hevc_ssim.log'" -f null - > "trace.txt"  2>&1
+for /f "tokens=*" %%A in ('type "trace.txt"') do @echo x265_hevc: %%A >end.txt
+type end.txt >> result.txt
+
+del trace.txt
+del end.txt
+
+echo
+type result.txt
+
+
+:error
+
+
+*/
\ No newline at end of file
diff --git a/libavcodec/version.h b/libavcodec/version.h
index 226da19..6c0d7a8 100644
--- a/libavcodec/version.h
+++ b/libavcodec/version.h
@@ -28,8 +28,8 @@ 
 #include "libavutil/version.h"
 
 #define LIBAVCODEC_VERSION_MAJOR  58
-#define LIBAVCODEC_VERSION_MINOR   0
-#define LIBAVCODEC_VERSION_MICRO 101
+#define LIBAVCODEC_VERSION_MINOR   1
+#define LIBAVCODEC_VERSION_MICRO 100
 
 #define LIBAVCODEC_VERSION_INT  AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
                                                LIBAVCODEC_VERSION_MINOR, \