diff mbox

[FFmpeg-devel] Added HW H.264 and HEVC encoding for AMD GPUs based on AMF SDK

Message ID 751aacf8-4bce-48d5-8b1c-dd740b7d98fe@mmironov-dev.local
State Superseded
Headers show

Commit Message

mmironov Oct. 30, 2017, 5:56 p.m. UTC
From 9337cb69176bc15aaaf74186cb3468f106236f04 Mon Sep 17 00:00:00 2001
From: mmironov <mikhail.mironov@amd.com>
Date: Fri, 27 Oct 2017 13:03:15 -0400
Subject: [PATCH] Added: HW accelerated H.264 and HEVC encoding for AMD GPUs
 based on AMF SDK

Signed-off-by: mmironov <mikhail.mironov@amd.com>
---
 Changelog                |    3 +-
 compat/amd/amfsdkenc.h   | 1750 ++++++++++++++++++++++++++++++++++++++++++++++
 configure                |   26 +
 libavcodec/Makefile      |    4 +
 libavcodec/allcodecs.c   |    2 +
 libavcodec/amfenc.c      |  465 ++++++++++++
 libavcodec/amfenc.h      |  129 ++++
 libavcodec/amfenc_h264.c |  345 +++++++++
 libavcodec/amfenc_hevc.c |  289 ++++++++
 libavcodec/version.h     |    4 +-
 10 files changed, 3014 insertions(+), 3 deletions(-)
 create mode 100644 compat/amd/amfsdkenc.h
 create mode 100644 libavcodec/amfenc.c
 create mode 100644 libavcodec/amfenc.h
 create mode 100644 libavcodec/amfenc_h264.c
 create mode 100644 libavcodec/amfenc_hevc.c

Comments

Mark Thompson Oct. 30, 2017, 6:32 p.m. UTC | #1
On 30/10/17 17:56, mmironov wrote:
> From 9337cb69176bc15aaaf74186cb3468f106236f04 Mon Sep 17 00:00:00 2001
> From: mmironov <mikhail.mironov@amd.com>
> Date: Fri, 27 Oct 2017 13:03:15 -0400
> Subject: [PATCH] Added: HW accelerated H.264 and HEVC encoding for AMD GPUs
>  based on AMF SDK
> 
> Signed-off-by: mmironov <mikhail.mironov@amd.com>
> ---
>  Changelog                |    3 +-
>  compat/amd/amfsdkenc.h   | 1750 ++++++++++++++++++++++++++++++++++++++++++++++
>  configure                |   26 +
>  libavcodec/Makefile      |    4 +
>  libavcodec/allcodecs.c   |    2 +
>  libavcodec/amfenc.c      |  465 ++++++++++++
>  libavcodec/amfenc.h      |  129 ++++
>  libavcodec/amfenc_h264.c |  345 +++++++++
>  libavcodec/amfenc_hevc.c |  289 ++++++++
>  libavcodec/version.h     |    4 +-
>  10 files changed, 3014 insertions(+), 3 deletions(-)
>  create mode 100644 compat/amd/amfsdkenc.h
>  create mode 100644 libavcodec/amfenc.c
>  create mode 100644 libavcodec/amfenc.h
>  create mode 100644 libavcodec/amfenc_h264.c
>  create mode 100644 libavcodec/amfenc_hevc.c
> 
> diff --git a/Changelog b/Changelog
> index 6592d86..f0d22fa 100644
> --- a/Changelog
> +++ b/Changelog
> @@ -6,7 +6,8 @@ version <next>:
>  - Dropped support for OpenJPEG versions 2.0 and below. Using OpenJPEG now
>    requires 2.1 (or later) and pkg-config.
>  - VDA dropped (use VideoToolbox instead)
> -
> +- AMF H.264 encoder
> +- AMF HEVC encoder
>  
>  version 3.4:
>  - deflicker video filter
> diff --git a/compat/amd/amfsdkenc.h b/compat/amd/amfsdkenc.h
> ...
> diff --git a/configure b/configure
> index 0e1ccaa..c785cc9 100755
> --- a/configure
> +++ b/configure
> @@ -304,6 +304,7 @@ External library support:
>  
>    The following libraries provide various hardware acceleration features:
>    --disable-audiotoolbox   disable Apple AudioToolbox code [autodetect]
> +  --disable-amf            disable AMF video encoding code [autodetect]
>    --disable-cuda           disable dynamically linked Nvidia CUDA code [autodetect]
>    --enable-cuda-sdk        enable CUDA features that require the CUDA SDK [no]
>    --disable-cuvid          disable Nvidia CUVID support [autodetect]
> @@ -1641,6 +1642,7 @@ EXTERNAL_LIBRARY_LIST="
>  "
>  
>  HWACCEL_AUTODETECT_LIBRARY_LIST="
> +    amf
>      audiotoolbox
>      crystalhd
>      cuda
> @@ -2785,12 +2787,16 @@ scale_npp_filter_deps="cuda libnpp"
>  scale_cuda_filter_deps="cuda_sdk"
>  thumbnail_cuda_filter_deps="cuda_sdk"
>  
> +amf_deps_any="dlopen LoadLibrary"
> +amf_encoder_deps="amf"

"amf_encoder" isn't mentioned anywhere else?

> +
>  nvenc_deps="cuda"
>  nvenc_deps_any="libdl LoadLibrary"
>  nvenc_encoder_deps="nvenc"
>  
>  h263_v4l2m2m_decoder_deps="v4l2_m2m h263_v4l2_m2m"
>  h263_v4l2m2m_encoder_deps="v4l2_m2m h263_v4l2_m2m"
> +h264_amf_encoder_deps="amf"
>  h264_crystalhd_decoder_select="crystalhd h264_mp4toannexb_bsf h264_parser"
>  h264_cuvid_decoder_deps="cuda cuvid"
>  h264_cuvid_decoder_select="h264_mp4toannexb_bsf"
> @@ -2809,6 +2815,7 @@ h264_vaapi_encoder_deps="VAEncPictureParameterBufferH264"
>  h264_vaapi_encoder_select="cbs_h264 vaapi_encode"
>  h264_v4l2m2m_decoder_deps="v4l2_m2m h264_v4l2_m2m"
>  h264_v4l2m2m_encoder_deps="v4l2_m2m h264_v4l2_m2m"
> +hevc_amf_encoder_deps="amf"
>  hevc_cuvid_decoder_deps="cuda cuvid"
>  hevc_cuvid_decoder_select="hevc_mp4toannexb_bsf"
>  hevc_mediacodec_decoder_deps="mediacodec"
> @@ -6305,6 +6312,18 @@ else
>      disable cuda cuvid nvenc
>  fi
>  
> +if enabled x86; then
> +    case $target_os in
> +        mingw32*|mingw64*|win32|win64|cygwin*)
> +            ;;
> +        *)
> +            disable  amf
> +            ;;
> +    esac
> +else
> +    disable amf
> +fi
> +
>  enabled nvenc &&
>      check_cc -I$source_path <<EOF || disable nvenc
>  #include "compat/nvenc/nvEncodeAPI.h"
> @@ -6313,6 +6332,13 @@ void f(void) { struct { const GUID guid; } s[] = { { NV_ENC_PRESET_HQ_GUID } };
>  int main(void) { return 0; }
>  EOF
>  
> +enabled amf &&
> +    check_cc -I$source_path <<EOF || disable amf
> +#include "compat/amd/amfsdkenc.h"
> +AMFFactory *factory;
> +int main(void) { return 0; }
> +EOF
> +
>  # Funny iconv installations are not unusual, so check it after all flags have been set
>  if enabled libc_iconv; then
>      check_func_headers iconv.h iconv
> diff --git a/libavcodec/Makefile b/libavcodec/Makefile
> index bc4d7da..cbf45ac 100644
> --- a/libavcodec/Makefile
> +++ b/libavcodec/Makefile
> @@ -50,6 +50,7 @@ OBJS = allcodecs.o                                                      \
>  # subsystems
>  OBJS-$(CONFIG_AANDCTTABLES)            += aandcttab.o
>  OBJS-$(CONFIG_AC3DSP)                  += ac3dsp.o ac3.o ac3tab.o
> +OBJS-$(CONFIG_AMF)                     += amfenc.o
>  OBJS-$(CONFIG_AUDIO_FRAME_QUEUE)       += audio_frame_queue.o
>  OBJS-$(CONFIG_AUDIODSP)                += audiodsp.o
>  OBJS-$(CONFIG_BLOCKDSP)                += blockdsp.o
> @@ -334,6 +335,7 @@ OBJS-$(CONFIG_H264_DECODER)            += h264dec.o h264_cabac.o h264_cavlc.o \
>  OBJS-$(CONFIG_H264_CUVID_DECODER)      += cuvid.o
>  OBJS-$(CONFIG_H264_MEDIACODEC_DECODER) += mediacodecdec.o
>  OBJS-$(CONFIG_H264_MMAL_DECODER)       += mmaldec.o
> +OBJS-$(CONFIG_H264_AMF_ENCODER)        += amfenc_h264.o
>  OBJS-$(CONFIG_H264_NVENC_ENCODER)      += nvenc_h264.o
>  OBJS-$(CONFIG_NVENC_ENCODER)           += nvenc_h264.o
>  OBJS-$(CONFIG_NVENC_H264_ENCODER)      += nvenc_h264.o
> @@ -352,6 +354,7 @@ OBJS-$(CONFIG_HEVC_DECODER)            += hevcdec.o hevc_mvs.o \
>                                            hevcdsp.o hevc_filter.o hevc_data.o
>  OBJS-$(CONFIG_HEVC_CUVID_DECODER)      += cuvid.o
>  OBJS-$(CONFIG_HEVC_MEDIACODEC_DECODER) += mediacodecdec.o
> +OBJS-$(CONFIG_HEVC_AMF_ENCODER)        += amfenc_hevc.o
>  OBJS-$(CONFIG_HEVC_NVENC_ENCODER)      += nvenc_hevc.o
>  OBJS-$(CONFIG_NVENC_HEVC_ENCODER)      += nvenc_hevc.o
>  OBJS-$(CONFIG_HEVC_QSV_DECODER)        += qsvdec_h2645.o
> @@ -1056,6 +1059,7 @@ SKIPHEADERS-$(CONFIG_JNI)              += ffjni.h
>  SKIPHEADERS-$(CONFIG_LIBVPX)           += libvpx.h
>  SKIPHEADERS-$(CONFIG_LIBWEBP_ENCODER)  += libwebpenc_common.h
>  SKIPHEADERS-$(CONFIG_MEDIACODEC)       += mediacodecdec_common.h mediacodec_surface.h mediacodec_wrapper.h mediacodec_sw_buffer.h
> +SKIPHEADERS-$(CONFIG_AMF)              += amfenc.h
>  SKIPHEADERS-$(CONFIG_NVENC)            += nvenc.h
>  SKIPHEADERS-$(CONFIG_QSV)              += qsv.h qsv_internal.h
>  SKIPHEADERS-$(CONFIG_QSVDEC)           += qsvdec.h
> diff --git a/libavcodec/allcodecs.c b/libavcodec/allcodecs.c
> index 8369126..3d2299f 100644
> --- a/libavcodec/allcodecs.c
> +++ b/libavcodec/allcodecs.c
> @@ -649,6 +649,7 @@ static void register_all(void)
>       * above is available */
>      REGISTER_ENCODER(H263_V4L2M2M,      h263_v4l2m2m);
>      REGISTER_ENCDEC (LIBOPENH264,       libopenh264);
> +    REGISTER_ENCODER(H264_AMF,          h264_amf);
>      REGISTER_DECODER(H264_CUVID,        h264_cuvid);
>      REGISTER_ENCODER(H264_NVENC,        h264_nvenc);
>      REGISTER_ENCODER(H264_OMX,          h264_omx);
> @@ -661,6 +662,7 @@ static void register_all(void)
>      REGISTER_ENCODER(NVENC_H264,        nvenc_h264);
>      REGISTER_ENCODER(NVENC_HEVC,        nvenc_hevc);
>  #endif
> +    REGISTER_ENCODER(HEVC_AMF,          hevc_amf);
>      REGISTER_DECODER(HEVC_CUVID,        hevc_cuvid);
>      REGISTER_DECODER(HEVC_MEDIACODEC,   hevc_mediacodec);
>      REGISTER_ENCODER(HEVC_NVENC,        hevc_nvenc);
> diff --git a/libavcodec/amfenc.c b/libavcodec/amfenc.c
> new file mode 100644
> index 0000000..5fdf027
> --- /dev/null
> +++ b/libavcodec/amfenc.c
> @@ -0,0 +1,465 @@
> +/*
> + * This file is part of FFmpeg.
> + *
> + * FFmpeg is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU Lesser General Public
> + * License as published by the Free Software Foundation; either
> + * version 2.1 of the License, or (at your option) any later version.
> + *
> + * FFmpeg is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> + * Lesser General Public License for more details.
> + *
> + * You should have received a copy of the GNU Lesser General Public
> + * License along with FFmpeg; if not, write to the Free Software
> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
> + */
> +#include "amfenc.h"
> +
> +#include "libavutil/avassert.h"
> +#include "libavutil/imgutils.h"
> +#include "libavutil/hwcontext.h"
> +#include "libavutil/hwcontext_d3d11va.h"
> +#include "libavutil/mem.h"
> +#include "libavutil/pixdesc.h"
> +#include "libavutil/time.h"
> +#include "internal.h"
> +
> +#include <d3d11.h>
> +
> +#ifdef _WIN32
> +#include "compat/w32dlfcn.h"
> +#else
> +#include <dlfcn.h>
> +#endif
> +
> +#define FFMPEG_AMF_WRITER_ID L"ffmpeg_amf"
> +#define AMF_DEBUG_TRACE 0
> +
> +const enum AVPixelFormat ff_amf_pix_fmts[] = {
> +    AV_PIX_FMT_NV12,
> +    AV_PIX_FMT_0RGB32,
> +    AV_PIX_FMT_0BGR32,
> +    AV_PIX_FMT_YUV420P,
> +    AV_PIX_FMT_D3D11,
> +    AV_PIX_FMT_NONE
> +};
> +
> +typedef struct FormatMap {
> +    enum AVPixelFormat       av_format;
> +    enum AMF_SURFACE_FORMAT  amf_format;
> +} FormatMap;
> +
> +static const FormatMap format_map[] =
> +{
> +    { AV_PIX_FMT_NONE,       AMF_SURFACE_UNKNOWN },
> +    { AV_PIX_FMT_NV12,       AMF_SURFACE_NV12 },
> +    { AV_PIX_FMT_0BGR32,       AMF_SURFACE_BGRA },
> +    { AV_PIX_FMT_0RGB32,       AMF_SURFACE_RGBA },

Funny alignment?

> +    { AV_PIX_FMT_GRAY8,      AMF_SURFACE_GRAY8 },
> +    { AV_PIX_FMT_YUV420P,    AMF_SURFACE_YUV420P },
> +    { AV_PIX_FMT_BGR0,       AMF_SURFACE_BGRA },
> +    { AV_PIX_FMT_YUV420P,    AMF_SURFACE_YV12 },
> +    { AV_PIX_FMT_YUYV422,    AMF_SURFACE_YUY2 },
> +    { AV_PIX_FMT_D3D11,      AMF_SURFACE_NV12 },
> +};
> +
> +static enum AMF_SURFACE_FORMAT amf_av_to_amf_format(enum AVPixelFormat fmt)
> +{
> +    for (int i = 0; i < amf_countof(format_map); i++) {

Don't mix declarations with code.

> +        if (format_map[i].av_format == fmt) {
> +            return format_map[i].amf_format;
> +        }
> +    }
> +    return AMF_SURFACE_UNKNOWN;
> +}
> +
> +static void AMF_CDECL_CALL AMFTraceWriter_Write(AMFTraceWriter *pThis,
> +    const wchar_t *scope, const wchar_t *message)
> +{
> +    AmfTraceWriter *tracer = (AmfTraceWriter*)pThis;
> +    av_log(tracer->avctx, AV_LOG_DEBUG, "%ls: %ls", scope, message);

Does the message necessarily include a newline already?

> +}
> +
> +static void AMF_CDECL_CALL AMFTraceWriter_Flush(AMFTraceWriter *pThis)
> +{
> +}
> +
> +static AMFTraceWriterVtbl tracer_vtbl =
> +{
> +    .Write = AMFTraceWriter_Write,
> +    .Flush = AMFTraceWriter_Flush,
> +};
> +
> +static int amf_load_library(AVCodecContext *avctx)
> +{
> +    AmfContext             *ctx = avctx->priv_data;
> +    AMFInit_Fn              init_fun = NULL;
> +    AMFQueryVersion_Fn      version_fun = NULL;
> +    AMF_RESULT              res = AMF_OK;
> +
> +    ctx->library = dlopen(AMF_DLL_NAMEA, RTLD_NOW | RTLD_LOCAL);
> +    AMF_RETURN_IF_FALSE(ctx, ctx->library != NULL,
> +        AVERROR_UNKNOWN, "DLL %s failed to open. \n", AMF_DLL_NAMEA);
> +
> +    init_fun = (AMFInit_Fn)dlsym(ctx->library, AMF_INIT_FUNCTION_NAME);
> +    AMF_RETURN_IF_FALSE(ctx, init_fun != NULL, AVERROR_UNKNOWN, "DLL %s failed to find function %s. \n", AMF_DLL_NAMEA, AMF_INIT_FUNCTION_NAME);

I think do s/ \n/\n/ for all of these messages.

> +
> +    version_fun = (AMFQueryVersion_Fn)dlsym(ctx->library, AMF_QUERY_VERSION_FUNCTION_NAME);
> +    AMF_RETURN_IF_FALSE(ctx, version_fun != NULL, AVERROR_UNKNOWN, "DLL %s failed to find function %s. \n", AMF_DLL_NAMEA, AMF_QUERY_VERSION_FUNCTION_NAME);
> +
> +    res = version_fun(&ctx->version);
> +    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "%s failed with error %d. \n", AMF_QUERY_VERSION_FUNCTION_NAME, res);
> +    res = init_fun(AMF_FULL_VERSION, &ctx->factory);
> +    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "%s failed with error %d. \n", AMF_INIT_FUNCTION_NAME, res);
> +    res = ctx->factory->pVtbl->GetTrace(ctx->factory, &ctx->trace);
> +    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "GetTrace() failed with error %d. \n", res);
> +    res = ctx->factory->pVtbl->GetDebug(ctx->factory, &ctx->debug);
> +    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "GetDebug() failed with error %d. \n", res);
> +    return 0;
> +}
> +
> +static int amf_init_context(AVCodecContext *avctx)
> +{
> +    AmfContext         *ctx = avctx->priv_data;
> +    AMF_RESULT          res = AMF_OK;
> +
> +    // the return of these functions indicates old state and do not affect behaviour
> +    ctx->trace->pVtbl->EnableWriter(ctx->trace, AMF_TRACE_WRITER_CONSOLE, 0);
> +#if AMF_DEBUG_TRACE
> +    ctx->trace->pVtbl->EnableWriter(ctx->trace, AMF_TRACE_WRITER_DEBUG_OUTPUT, 1);
> +    ctx->trace->pVtbl->SetWriterLevel(ctx->trace, AMF_TRACE_WRITER_DEBUG_OUTPUT, AMF_TRACE_TRACE);
> +    ctx->trace->pVtbl->SetGlobalLevel(ctx->trace, AMF_TRACE_TRACE);
> +#else
> +    ctx->trace->pVtbl->EnableWriter(ctx->trace, AMF_TRACE_WRITER_DEBUG_OUTPUT, 0);
> +#endif

I don't much like this compile-time option.  What sort of messages does the trace writer actually give you?  Will a user ever want to enable it?

> +    ctx->tracer.vtbl = &tracer_vtbl;
> +    ctx->tracer.avctx = avctx;
> +    ctx->trace->pVtbl->RegisterWriter(ctx->trace, FFMPEG_AMF_WRITER_ID,
> +        (AMFTraceWriter*)&ctx->tracer, 1);
> +
> +    res = ctx->factory->pVtbl->CreateContext(ctx->factory, &ctx->context);
> +    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "CreateContext() failed with error %d. \n", res);
> +    // try to reuse existing DX device
> +    if (avctx->hw_frames_ctx) {
> +        AVHWFramesContext *device_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
> +        if (device_ctx->device_ctx->type == AV_HWDEVICE_TYPE_D3D11VA){
> +            if (amf_av_to_amf_format(device_ctx->sw_format) == AMF_SURFACE_UNKNOWN) {
> +                if (device_ctx->device_ctx->hwctx) {
> +                    AVD3D11VADeviceContext *device_d3d11 = (AVD3D11VADeviceContext *)device_ctx->device_ctx->hwctx;
> +                    res = ctx->context->pVtbl->InitDX11(ctx->context, device_d3d11->device, AMF_DX11_1);
> +                    if (res == AMF_OK) {
> +                        ctx->hw_frames_ctx = av_buffer_ref(avctx->hw_frames_ctx);
> +                    }else {
> +                        av_log(avctx, AV_LOG_INFO, "amf_shared: avctx->hw_frames_ctx has non-AMD device, switching to default. \n");
> +                    }
> +                }
> +            }else {
> +                av_log(avctx, AV_LOG_INFO, "amf_shared: avctx->hw_frames_ctx has format not uspported by AMF, switching to default. \n");
> +            }
> +        }
> +    } else if (avctx->hw_device_ctx) {
> +        AVHWDeviceContext *device_ctx = (AVHWDeviceContext*)(avctx->hw_device_ctx->data);
> +        if (device_ctx->type == AV_HWDEVICE_TYPE_D3D11VA) {
> +            if (device_ctx->hwctx) {
> +                AVD3D11VADeviceContext *device_d3d11 = (AVD3D11VADeviceContext *)device_ctx->hwctx;
> +                res = ctx->context->pVtbl->InitDX11(ctx->context, device_d3d11->device, AMF_DX11_1);
> +                if (res == AMF_OK) {
> +                    ctx->hw_device_ctx = av_buffer_ref(avctx->hw_device_ctx);
> +                } else {
> +                    av_log(avctx, AV_LOG_INFO, "amf_shared: avctx->hw_device_ctx has non-AMD device, switching to default. \n");
> +                }
> +            }
> +        }
> +    }
> +    if (!ctx->hw_frames_ctx) {
> +        res = ctx->context->pVtbl->InitDX11(ctx->context, NULL, AMF_DX11_1);
> +        if (res != AMF_OK) {
> +            res = ctx->context->pVtbl->InitDX9(ctx->context, NULL);
> +            AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "InitDX9() failed with error %d. \n", res);
> +        }
> +    }
> +    return 0;
> +}
> +
> +static int amf_init_encoder(AVCodecContext *avctx)
> +{
> +    AmfContext          *ctx = avctx->priv_data;
> +    const wchar_t       *codec_id = NULL;
> +    AMF_RESULT           res = AMF_OK;
> +
> +    switch (avctx->codec->id) {
> +        case AV_CODEC_ID_H264:
> +            codec_id = AMFVideoEncoderVCE_AVC;
> +            break;
> +        case AV_CODEC_ID_HEVC:
> +            codec_id = AMFVideoEncoder_HEVC;
> +            break;
> +        default:
> +            break;
> +    }
> +    AMF_RETURN_IF_FALSE(ctx, codec_id != 0, AVERROR(EINVAL), "Codec %d is not supported. \n", avctx->codec->id);

Just codec_id, you're testing whether it's non-null.

> +
> +    ctx->format = amf_av_to_amf_format(avctx->pix_fmt);
> +    AMF_RETURN_IF_FALSE(ctx, ctx->format != AMF_SURFACE_UNKNOWN, AVERROR(EINVAL), "Format %d is not supported. \n", avctx->pix_fmt);
> +
> +    res = ctx->factory->pVtbl->CreateComponent(ctx->factory, ctx->context, codec_id, &ctx->encoder);
> +    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_ENCODER_NOT_FOUND, "CreateComponent(%ls) failed with error %d. \n", codec_id, res);
> +
> +    ctx->eof = 0;
> +    return 0;
> +}
> +
> +static int amf_terminate(AVCodecContext *avctx)
> +{
> +    AmfContext      *ctx = avctx->priv_data;
> +
> +    if (ctx->encoder) {
> +        ctx->encoder->pVtbl->Terminate(ctx->encoder);
> +        ctx->encoder->pVtbl->Release(ctx->encoder);
> +        ctx->encoder = NULL;
> +    }
> +
> +    if (ctx->context) {
> +        ctx->context->pVtbl->Terminate(ctx->context);
> +        ctx->context->pVtbl->Release(ctx->context);
> +        ctx->context = NULL;
> +    }
> +    if (ctx->hw_device_ctx){
> +        av_buffer_unref(&ctx->hw_device_ctx);
> +        ctx->hw_device_ctx = NULL;
> +    }

The av_buffer_unref() comment applies to hw_device_ctx as well.

> +    av_buffer_unref(&ctx->hw_frames_ctx);
> +
> +    if (ctx->trace) {
> +        ctx->trace->pVtbl->UnregisterWriter(ctx->trace, FFMPEG_AMF_WRITER_ID);
> +    }
> +    if (ctx->library) {
> +        dlclose(ctx->library);
> +        ctx->library = NULL;
> +    }
> +    ctx->trace = NULL;
> +    ctx->debug = NULL;
> +    ctx->factory = NULL;
> +    ctx->version = 0;
> +
> +    return 0;
> +}
> +
> +static int amf_copy_surface(AVCodecContext *avctx, const AVFrame *frame,
> +    AMFSurface* surface)
> +{
> +    AVFrame        *sw_frame = NULL;
> +    AMFPlane       *plane = NULL;
> +    uint8_t        *dst_data[4];
> +    int             dst_linesize[4];
> +    int             ret = 0;
> +    int             planes;
> +
> +    if (frame->hw_frames_ctx) {

This condition isn't sufficient, hw_frames_ctx is set on CPU-mapped hardware frames.

Maybe look at frame->format?

> +        if (!(sw_frame = av_frame_alloc())) {
> +            av_log(avctx, AV_LOG_ERROR, "Can not alloc frame\n");
> +            ret = AVERROR(ENOMEM);
> +            goto fail;
> +        }
> +        if ((ret = av_hwframe_transfer_data(sw_frame, frame, 0)) < 0) {
> +            av_log(avctx, AV_LOG_ERROR, "Error transferring the data to system memory. \n");
> +            ret = AVERROR(EINVAL);
> +            goto fail;
> +        }
> +        frame = sw_frame;
> +    }
> +    planes = (int)surface->pVtbl->GetPlanesCount(surface);
> +    if (planes > amf_countof(dst_data)) {
> +        av_log(avctx, AV_LOG_ERROR, "Invalid number of planes %d in surface \n", planes);
> +        ret = AVERROR(EINVAL);
> +        goto fail;
> +    }
> +
> +    for (int i = 0; i < planes; i++) {
> +        plane = surface->pVtbl->GetPlaneAt(surface, i);
> +        dst_data[i] = plane->pVtbl->GetNative(plane);
> +        dst_linesize[i] = plane->pVtbl->GetHPitch(plane);
> +    }
> +    av_image_copy(dst_data, dst_linesize,
> +        (const uint8_t**)frame->data, frame->linesize, frame->format,
> +        avctx->width, avctx->height);
> +
> +    surface->pVtbl->SetPts(surface, frame->pts);
> +fail:
> +    if (sw_frame){
> +        av_frame_free(&sw_frame);
> +    }
> +    return ret;
> +}
> +
> +static int amf_copy_buffer(AVCodecContext *avctx, AVPacket *pkt, AMFBuffer *buffer)
> +{
> +    int                 ret;
> +    AMFVariantStruct    var;
> +    int                 size = (int)buffer->pVtbl->GetSize(buffer);

Possibly dubious truncation?

All of these Get* functions necessarily succeed?

> +
> +    if (ret = ff_alloc_packet2(avctx, pkt, size, 0) < 0) {
> +        return ret;
> +    }
> +    memcpy(pkt->data, buffer->pVtbl->GetNative(buffer), size);
> +
> +    switch (avctx->codec->id) {
> +        case AV_CODEC_ID_H264:
> +            buffer->pVtbl->GetProperty(buffer, AMF_VIDEO_ENCODER_OUTPUT_DATA_TYPE, &var);
> +            switch (var.int64Value) {
> +                case AMF_VIDEO_ENCODER_OUTPUT_DATA_TYPE_IDR:
> +                    pkt->flags = AV_PKT_FLAG_KEY;
> +                    break;

If it's only for IDR frames, it might be easier to drop the switch.

> +                case AMF_VIDEO_ENCODER_OUTPUT_DATA_TYPE_I:
> +                    break;
> +                case AMF_VIDEO_ENCODER_OUTPUT_DATA_TYPE_P:
> +                    break;
> +                case AMF_VIDEO_ENCODER_OUTPUT_DATA_TYPE_B:
> +                    break;
> +                default:
> +                    break;
> +            }
> +            break;
> +        case AV_CODEC_ID_HEVC:
> +            buffer->pVtbl->GetProperty(buffer, AMF_VIDEO_ENCODER_HEVC_OUTPUT_DATA_TYPE, &var);
> +            switch (var.int64Value) {
> +                case AMF_VIDEO_ENCODER_HEVC_OUTPUT_DATA_TYPE_IDR:
> +                    pkt->flags = AV_PKT_FLAG_KEY;
> +                    break;

And here.

> +                case AMF_VIDEO_ENCODER_HEVC_OUTPUT_DATA_TYPE_I:
> +                    break;
> +                case AMF_VIDEO_ENCODER_HEVC_OUTPUT_DATA_TYPE_P:
> +                    break;
> +                default:
> +                    break;
> +            }
> +            break;
> +        default:
> +            break;
> +    }
> +    pkt->pts = buffer->pVtbl->GetPts(buffer);
> +    pkt->dts = pkt->pts;

This is wrong if reordering is present (B-frames) - dts needs to be monotonic.

> +    return 0;
> +}
> +
> +// amfenc API implmentation
> +int ff_amf_encode_init(AVCodecContext *avctx)
> +{
> +    AmfContext     *ctx = avctx->priv_data;
> +    int             ret;
> +
> +    ctx->factory = NULL;
> +    ctx->debug = NULL;
> +    ctx->trace = NULL;
> +    ctx->context = NULL;
> +    ctx->encoder = NULL;
> +    ctx->library = NULL;
> +    ctx->version = 0;
> +    ctx->eof = 0;
> +    ctx->format = 0;
> +    ctx->tracer.vtbl = NULL;
> +    ctx->tracer.avctx = NULL;
> +
> +    if ((ret = amf_load_library(avctx)) == 0) {
> +        if ((ret = amf_init_context(avctx)) == 0) {
> +            if ((ret = amf_init_encoder(avctx)) == 0) {
> +                return 0;
> +            }
> +        }
> +    }
> +    amf_terminate(avctx);
> +    return ret;
> +}
> +
> +int av_cold ff_amf_encode_close(AVCodecContext *avctx)
> +{
> +    int ret;
> +    ret = amf_terminate(avctx);
> +    return ret;
> +}
> +
> +static GUID  AMFTextureArrayIndexGUID = AMFTextureArrayIndexGUIDDef;

GUID is a Windows type, should this be AMFGuid?  (I tried removing the check and compiling on Linux, other than the D3D11 stuff this is the only error.)

> +
> +int ff_amf_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
> +                        const AVFrame *frame, int *got_packet)
> +{
> +    int             ret = 0;
> +    AMF_RESULT      res = AMF_OK;
> +    AmfContext     *ctx = avctx->priv_data;
> +    AMFSurface     *surface = NULL;
> +    AMFData        *data = NULL;
> +    amf_bool       submitted = 0;
> +
> +    while (!submitted) {
> +        if (!frame) { // submit drain
> +            if (!ctx->eof) { // submit drain onre time only
> +                res = ctx->encoder->pVtbl->Drain(ctx->encoder);
> +                if (res == AMF_INPUT_FULL) {
> +                    av_usleep(1000); // input queue is full: wait, poll and submit Drain again
> +                                     // need to get some output and try again
> +                } else if (res == AMF_OK) {
> +                    ctx->eof = 1; // drain started
> +                    submitted = 1;
> +                }
> +            }
> +        } else { // submit frame
> +            if (surface == NULL) { // prepare surface from frame one time only
> +                if (frame->hw_frames_ctx && ( // HW frame detected
> +                                              // check if the same hw_frames_ctx as used in initialization
> +                    (ctx->hw_frames_ctx && frame->hw_frames_ctx->data == ctx->hw_frames_ctx->data) ||
> +                    // check if the same hw_device_ctx as used in initialization
> +                    (ctx->hw_device_ctx && ((AVHWFramesContext*)frame->hw_frames_ctx->data)->device_ctx ==
> +                    (AVHWDeviceContext*)ctx->hw_device_ctx->data)
> +                )) {
> +                    ID3D11Texture2D* texture = (ID3D11Texture2D*)frame->data[0]; // actual texture
> +                    int index = (int)(size_t)frame->data[1]; // index is a slice in texture array is - set to tell AMF which slice to use

(int)(intptr_t)frame->data[1];

> +                    texture->lpVtbl->SetPrivateData(texture, &AMFTextureArrayIndexGUID, sizeof(index), &index);
> +
> +                    res = ctx->context->pVtbl->CreateSurfaceFromDX11Native(ctx->context, texture, &surface, NULL); // wrap to AMF surface
> +                    surface->pVtbl->SetCrop(surface, 0, 0, frame->width, frame->height); // decode surfaces are vertically aligned by 16 tell AMF real size

"decode surfaces"?  These need not come from a decoder.  Does it work with hwupload?

> +                    surface->pVtbl->SetPts(surface, frame->pts);
> +                } else {
> +                    res = ctx->context->pVtbl->AllocSurface(ctx->context, AMF_MEMORY_HOST, ctx->format, avctx->width, avctx->height, &surface);
> +                    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_BUG, "AllocSurface() failed  with error %d \n", res);
> +                    amf_copy_surface(avctx, frame, surface);
> +                }
> +            }
> +            // encode
> +            res = ctx->encoder->pVtbl->SubmitInput(ctx->encoder, (AMFData*)surface);
> +            if (res == AMF_INPUT_FULL) { // handle full queue
> +                av_usleep(1000); // input queue is full: wait, poll and submit surface again
> +            } else {
> +                surface->pVtbl->Release(surface);
> +                surface = NULL;
> +                AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "SubmitInput() failed with error %d \n", res);
> +                submitted = 1;
> +            }
> +        }
> +        // poll results
> +        if (!data) {
> +            res = ctx->encoder->pVtbl->QueryOutput(ctx->encoder, &data);
> +            if (data) {
> +                AMFBuffer* buffer;
> +                AMFGuid guid = IID_AMFBuffer();
> +                data->pVtbl->QueryInterface(data, &guid, (void**)&buffer); // query for buffer interface
> +                ret = amf_copy_buffer(avctx, pkt, buffer);
> +                if (!ret)
> +                    *got_packet = 1;
> +                buffer->pVtbl->Release(buffer);
> +                data->pVtbl->Release(data);
> +                if (ctx->eof) {
> +                    submitted = 1; // we are in the drain state - no submissions
> +                }
> +            } else if (res == AMF_EOF) {
> +                submitted = 1; // drain complete
> +            } else {
> +                if (!submitted) {
> +                    av_usleep(1000); // wait and poll again
> +                }
> +            }
> +        }
> +    }
> +    return ret;
> +}

I still think this would be much better off using the send_frame()/receive_packet() API.  Even if your API doesn't expose any information about the queue length, you only need to hold a single input frame transiently to get around that (the user is not allowed to call send_frame() twice in a row without calling receive_packet()).

> diff --git a/libavcodec/amfenc.h b/libavcodec/amfenc.h
> new file mode 100644
> index 0000000..ee3a03b
> --- /dev/null
> +++ b/libavcodec/amfenc.h
> @@ -0,0 +1,129 @@
> +/*
> +* This file is part of FFmpeg.
> +*
> +* FFmpeg is free software; you can redistribute it and/or
> +* modify it under the terms of the GNU Lesser General Public
> +* License as published by the Free Software Foundation; either
> +* version 2.1 of the License, or (at your option) any later version.
> +*
> +* FFmpeg is distributed in the hope that it will be useful,
> +* but WITHOUT ANY WARRANTY; without even the implied warranty of
> +* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> +* Lesser General Public License for more details.
> +*
> +* You should have received a copy of the GNU Lesser General Public
> +* License along with FFmpeg; if not, write to the Free Software
> +* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
> +*/
> +
> +#ifndef AVCODEC_AMFENC_H
> +#define AVCODEC_AMFENC_H
> +
> +#include "config.h"
> +#include "avcodec.h"
> +#include "compat/amd/amfsdkenc.h"
> +
> +
> +/**
> +* AMF trace writer callback class
> +* Used to capture all AMF logging
> +*/
> +
> +typedef struct AmfTraceWriter {
> +    AMFTraceWriterVtbl* vtbl;
> +    AVCodecContext      *avctx;
> +} AmfTraceWriter;
> +
> +/**
> +* AMF encoder context
> +*/
> +
> +typedef struct AmfContext {
> +    AVClass            *avclass;
> +    /** access to AMF runtime */

/** doxygen comments attach to the following item, which doesn't look right here.

> +    amf_handle          library; ///< handle to DLL library
> +    AMFFactory         *factory; ///< pointer to AMF factory
> +    AMFDebug*           debug;   ///< pointer to AMF debug interface
> +    AMFTrace*           trace;   ///< pointer to AMF trace interface
> +
> +    amf_uint64          version; ///< version of AMF runtime
> +    AmfTraceWriter      tracer;  ///< AMF writer registered with AMF
> +    AMFContext         *context; ///< AMF context
> +    //encoder
> +    AMFComponent*       encoder; ///< AMF encoder object
> +    amf_bool            eof;     ///< flag indicating EOF happened
> +    AMF_SURFACE_FORMAT  format;  ///< AMF surface format
> +
> +    AVBufferRef        *hw_device_ctx; ///< pointer to HW accelerator (decoder)
> +    AVBufferRef        *hw_frames_ctx; ///< pointer to HW accelerator (frame allocator)
> +
> +    /** common encoder option options */
> +
> +    /** Static options, have to be set before Init() call */

Also these, and more below.

> +    int                 usage;
> +    int                 profile;
> +    int                 level;
> +    int                 preanalysis;
> +    int                 quality;
> +    int                 b_frame_delta_qp;
> +    int                 ref_b_frame_delta_qp;
> +
> +    /** Dynamic options, can be set after Init() call */
> +
> +    int                 rate_control_mode;
> +    int                 enforce_hrd;
> +    int                 filler_data;
> +    int                 enable_vbaq;
> +    int                 skip_frame;
> +    int                 qp_i;
> +    int                 qp_p;
> +    int                 qp_b;
> +    int                 max_au_size;
> +    int                 header_spacing;
> +    int                 b_frame_ref;
> +    int                 intra_refresh_mb;
> +    int                 coding_mode;
> +    int                 me_half_pel;
> +    int                 me_quater_pel;

"quater"?

> +
> +    /** HEVC - specific options */
> +
> +    int                 gops_per_idr;
> +    int                 header_insertion_mode;
> +    int                 min_qp_i;
> +    int                 max_qp_i;
> +    int                 min_qp_p;
> +    int                 max_qp_p;
> +    int                 tier;
> +} AmfContext;
> +
> +/**
> +* Common encoder initization code
> +*/
> +int ff_amf_encode_init(AVCodecContext *avctx);
> +/**
> +* Common encoder termination code
> +*/
> +int ff_amf_encode_close(AVCodecContext *avctx);
> +
> +/**
> +* Ecoding one frame - common for all AMF encoders
> +*/
> +int ff_amf_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
> +    const AVFrame *frame, int *got_packet);
> +
> +/**
> +* Supported formats
> +*/
> +extern const enum AVPixelFormat ff_amf_pix_fmts[];
> +
> +/**
> +* Error handling helper
> +*/
> +#define AMF_RETURN_IF_FALSE(avctx, exp, ret_value, /*optional message,*/ ...) \

Message is not optional to av_log().

> +    if (!(exp)) { \
> +        av_log(avctx, AV_LOG_ERROR, __VA_ARGS__); \
> +        return AVERROR(ret_value); \
> +    }
> +
> +#endif //AVCODEC_AMFENC_H
> diff --git a/libavcodec/amfenc_h264.c b/libavcodec/amfenc_h264.c
> new file mode 100644
> index 0000000..1b22429
> --- /dev/null
> +++ b/libavcodec/amfenc_h264.c
> @@ -0,0 +1,345 @@
> +/*
> + * This file is part of FFmpeg.
> + *
> + * FFmpeg is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU Lesser General Public
> + * License as published by the Free Software Foundation; either
> + * version 2.1 of the License, or (at your option) any later version.
> + *
> + * FFmpeg is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> + * Lesser General Public License for more details.
> + *
> + * You should have received a copy of the GNU Lesser General Public
> + * License along with FFmpeg; if not, write to the Free Software
> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
> + */
> +
> +#include "amfenc.h"
> +
> +#include "libavutil/internal.h"
> +#include "libavutil/opt.h"
> +#include "internal.h"
> +
> +#define OFFSET(x) offsetof(AmfContext, x)
> +#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
> +
> +static const AVOption options[] = {
> +    // Static
> +    /// Usage
> +    { "usage",          "Encoder Usage",        OFFSET(usage),  AV_OPT_TYPE_INT,   { .i64 = AMF_VIDEO_ENCODER_USAGE_TRANSCONDING      }, AMF_VIDEO_ENCODER_USAGE_TRANSCONDING, AMF_VIDEO_ENCODER_USAGE_WEBCAM, VE, "usage" },
> +    { "transcoding",    "Generic Transcoding",  0,              AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_USAGE_TRANSCONDING      }, 0, 0, VE, "usage" },
> +    { "ultralowlatency","",                     0,              AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_USAGE_ULTRA_LOW_LATENCY }, 0, 0, VE, "usage" },
> +    { "lowlatency",     "",                     0,              AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_USAGE_LOW_LATENCY       }, 0, 0, VE, "usage" },
> +    { "webcam",         "Webcam",               0,              AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_USAGE_WEBCAM            }, 0, 0, VE, "usage" },
> +
> +    /// Profile,
> +    { "profile",        "Profile",              OFFSET(profile),AV_OPT_TYPE_INT, { .i64 = AMF_VIDEO_ENCODER_PROFILE_MAIN       }, AMF_VIDEO_ENCODER_PROFILE_BASELINE, AMF_VIDEO_ENCODER_PROFILE_HIGH, VE, "profile" },
> +    { "baseline",       "",                     0,              AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_PROFILE_BASELINE }, 0, 0, VE, "profile" },

You still don't support baseline profile.

> +    { "main",           "",                     0,              AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_PROFILE_MAIN     }, 0, 0, VE, "profile" },
> +    { "high",           "",                     0,              AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_PROFILE_HIGH     }, 0, 0, VE, "profile" },
> +
> +    /// Profile Level
> +    { "level",          "Profile Level",        OFFSET(level),  AV_OPT_TYPE_INT,   { .i64 = 0  }, 0, 62, VE, "level" },
> +    { "auto",           "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 0  }, 0, 0,  VE, "level" },
> +    { "1.0",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 10 }, 0, 0,  VE, "level" },
> +    { "1.1",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 11 }, 0, 0,  VE, "level" },
> +    { "1.2",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 12 }, 0, 0,  VE, "level" },
> +    { "1.3",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 13 }, 0, 0,  VE, "level" },
> +    { "2.0",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 20 }, 0, 0,  VE, "level" },
> +    { "2.1",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 21 }, 0, 0,  VE, "level" },
> +    { "2.2",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 22 }, 0, 0,  VE, "level" },
> +    { "3.0",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 30 }, 0, 0,  VE, "level" },
> +    { "3.1",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 31 }, 0, 0,  VE, "level" },
> +    { "3.2",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 32 }, 0, 0,  VE, "level" },
> +    { "4.0",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 40 }, 0, 0,  VE, "level" },
> +    { "4.1",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 41 }, 0, 0,  VE, "level" },
> +    { "4.2",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 42 }, 0, 0,  VE, "level" },
> +    { "5.0",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 50 }, 0, 0,  VE, "level" },
> +    { "5.1",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 51 }, 0, 0,  VE, "level" },
> +    { "5.2",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 52 }, 0, 0,  VE, "level" },
> +    { "6.0",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 60 }, 0, 0,  VE, "level" },
> +    { "6.1",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 61 }, 0, 0,  VE, "level" },
> +    { "6.2",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 62 }, 0, 0,  VE, "level" },
> +
> +
> +    /// Quality Preset
> +    { "quality",        "Quality Preference",                   OFFSET(quality),    AV_OPT_TYPE_INT,   { .i64 = AMF_VIDEO_ENCODER_QUALITY_PRESET_SPEED    }, AMF_VIDEO_ENCODER_QUALITY_PRESET_BALANCED, AMF_VIDEO_ENCODER_QUALITY_PRESET_QUALITY, VE, "quality" },
> +    { "speed",          "Prefer Speed",                         0,                  AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_QUALITY_PRESET_SPEED    },       0, 0, VE, "quality" },
> +    { "balanced",       "Balanced",                             0,                  AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_QUALITY_PRESET_BALANCED },    0, 0, VE, "quality" },
> +    { "quality",        "Prefer Quality",                       0,                  AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_QUALITY_PRESET_QUALITY  },     0, 0, VE, "quality" },
> +
> +    // Dynamic
> +    /// Rate Control Method
> +    { "rc",             "Rate Control Method",                  OFFSET(rate_control_mode),  AV_OPT_TYPE_INT,   { .i64 = AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_PEAK_CONSTRAINED_VBR    }, AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CONSTANT_QP, AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_LATENCY_CONSTRAINED_VBR, VE, "rc" },
> +    { "cqp",            "Constant Quantization Parameter",      0,                          AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CONSTANT_QP             }, 0, 0, VE, "rc" },
> +    { "cbr",            "Constant Bitrate",                     0,                          AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CBR                     }, 0, 0, VE, "rc" },
> +    { "vbr_peak",       "Peak Contrained Variable Bitrate",     0,                          AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_PEAK_CONSTRAINED_VBR    }, 0, 0, VE, "rc" },
> +    { "vbr_latency",    "Latency Constrained Variable Bitrate", 0,                          AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_LATENCY_CONSTRAINED_VBR }, 0, 0, VE, "rc" },
> +
> +    /// Enforce HRD, Filler Data, VBAQ, Frame Skipping
> +    { "enforce_hrd",    "Enforce HRD",                          OFFSET(enforce_hrd),        AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },
> +    { "filler_data",    "Filler Data Enable",                   OFFSET(filler_data),        AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },
> +    { "vbaq",           "Enable VBAQ",                          OFFSET(enable_vbaq),        AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },
> +    { "frame_skipping", "Rate Control Based Frame Skip",        OFFSET(skip_frame),         AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE, NULL },
> +
> +    /// QP Values
> +    { "qp_i",           "Quantization Parameter for I-Frame",   OFFSET(qp_i),               AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 51, VE },
> +    { "qp_p",           "Quantization Parameter for P-Frame",   OFFSET(qp_p),               AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 51, VE },
> +    { "qp_b",           "Quantization Parameter for B-Frame",   OFFSET(qp_b),               AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 51, VE },
> +
> +    /// Pre-Pass, Pre-Analysis, Two-Pass
> +    { "preanalysis",    "Pre-Analysis Mode",                    OFFSET(preanalysis),        AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },
> +
> +    /// Maximum Access Unit Size
> +    { "max_au_size",    "Maximum Access Unit Size for rate control (in bits)",   OFFSET(max_au_size),        AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE, NULL },

Did you check whether this really means the maximum access unit size?  If yes, what is the use-case for that?

> +
> +    /// Header Insertion Spacing
> +    { "header_spacing", "Header Insertion Spacing",             OFFSET(header_spacing),     AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1000, VE },
> +
> +    /// B-Frames
> +    // BPicturesPattern=bf
> +    { "bf_delta_qp",    "B-Picture Delta QP",                   OFFSET(b_frame_delta_qp),   AV_OPT_TYPE_INT,  { .i64 = 4 }, -10, 10, VE, NULL },
> +    { "bf_ref",         "Enable Reference to B-Frames",         OFFSET(b_frame_ref),        AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, VE, NULL },
> +    { "bf_ref_delta_qp","Reference B-Picture Delta QP",         OFFSET(ref_b_frame_delta_qp), AV_OPT_TYPE_INT,  { .i64 = 4 }, -10, 10, VE, NULL },
> +
> +    /// Intra-Refresh
> +    { "intra_refresh_mb","Intra Refresh MBs Number Per Slot in Macroblocks",       OFFSET(intra_refresh_mb),    AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
> +
> +    /// coder
> +    { "coder",          "Coding Type",                          OFFSET(coding_mode),   AV_OPT_TYPE_INT,   { .i64 = AMF_VIDEO_ENCODER_UNDEFINED }, AMF_VIDEO_ENCODER_UNDEFINED, AMF_VIDEO_ENCODER_CALV, VE, "coding" },
> +    { "auto",           "Automatic",                            0,                     AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_UNDEFINED }, 0, 0, VE, "coder" },
> +    { "cavlc",          "Context Adaptive Variable-Length Coding", 0,                  AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_CALV },      0, 0, VE, "coder" },
> +    { "cabac",          "Context Adaptive Binary Arithmetic Coding", 0,                AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_CABAC },     0, 0, VE, "coder" },
> +
> +    { "me_half_pel",    "Enable ME Half Pixel",                 OFFSET(me_half_pel),   AV_OPT_TYPE_BOOL,  { .i64 = 1 }, 0, 1, VE, NULL },
> +    { "me_quater_pel",  "Enable ME Quarter Pixel ",             OFFSET(me_quater_pel), AV_OPT_TYPE_BOOL,  { .i64 = 1 }, 0, 1, VE, NULL },

What is the use-case for these options?

> +
> +    { NULL }
> +};
> +
> +static av_cold int amf_encode_init_h264(AVCodecContext *avctx)
> +{
> +    int                 ret = 0;
> +    AMF_RESULT          res = AMF_OK;
> +    AmfContext         *ctx = avctx->priv_data;
> +    AMFVariantStruct    var = {0};
> +    amf_int64           profile = 0;
> +    amf_int64           profile_level = 0;
> +    AMFBuffer          *buffer;
> +    AMFGuid             guid;
> +
> +    AMFSize             framesize = AMFConstructSize(avctx->width, avctx->height);
> +    AMFRate             framerate = AMFConstructRate(avctx->time_base.den, avctx->time_base.num * avctx->ticks_per_frame);

avctx->framerate should be set if the input is CFR, use that first.

> +
> +    int                 deblocking_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
> +
> +    if ((ret = ff_amf_encode_init(avctx)) != 0)
> +        return ret;
> +
> +    // Static parameters
> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_USAGE, ctx->usage);
> +
> +    AMF_ASSIGN_PROPERTY_SIZE(res, ctx->encoder, AMF_VIDEO_ENCODER_FRAMESIZE, framesize);
> +
> +    AMF_ASSIGN_PROPERTY_RATE(res, ctx->encoder, AMF_VIDEO_ENCODER_FRAMERATE, framerate);
> +
> +    profile = avctx->profile;

avctx->profile might be (is by default, even) FF_PROFILE_UNKNOWN, which is not zero.

> +    if (profile == 0) {
> +        profile = ctx->profile;
> +    }
> +
> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_PROFILE, profile);
> +
> +    profile_level = avctx->level;

Similarly FF_LEVEL_UNKNOWN.

> +    if (profile_level == 0) {
> +        profile_level = ctx->level;
> +    }
> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_PROFILE_LEVEL, profile_level);
> +
> +    // Maximum Reference Frames
> +    if (avctx->refs != -1) {
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_MAX_NUM_REFRAMES, avctx->refs);
> +    }
> +    if (avctx->sample_aspect_ratio.den && avctx->sample_aspect_ratio.num) {
> +        AMFRatio ratio = AMFConstructRatio(avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
> +        AMF_ASSIGN_PROPERTY_RATIO(res, ctx->encoder, AMF_VIDEO_ENCODER_ASPECT_RATIO, ratio);
> +    }
> +
> +    /// Color Range (Partial/TV/MPEG or Full/PC/JPEG)
> +    if (avctx->color_range == AVCOL_RANGE_JPEG) {
> +        AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_FULL_RANGE_COLOR, 1);
> +    }
> +
> +    if (ctx->rate_control_mode == AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CONSTANT_QP) {
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_RATE_CONTROL_PREANALYSIS_ENABLE, AMF_VIDEO_ENCODER_PREENCODE_DISABLED);
> +        if (ctx->preanalysis)
> +            av_log(ctx, AV_LOG_WARNING, "Pre-Analysis is not supported by cqp Rate Control Method, automatically disabled. \n");
> +    } else {
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_RATE_CONTROL_PREANALYSIS_ENABLE, ctx->preanalysis);
> +    }
> +
> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_QUALITY_PRESET, ctx->quality);
> +
> +    // Initialize Encoder
> +    res = ctx->encoder->pVtbl->Init(ctx->encoder, ctx->format, avctx->width, avctx->height);
> +    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_BUG, "encoder->Init() failed with error %d \n", res);
> +
> +    // Dynamic parmaters
> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD, ctx->rate_control_mode);
> +
> +    /// VBV Buffer
> +    if (avctx->rc_buffer_size != 0)
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_VBV_BUFFER_SIZE, avctx->rc_buffer_size);
> +    if (avctx->rc_initial_buffer_occupancy != 0) {
> +        int amf_buffer_fullness = avctx->rc_buffer_size * 64 / avctx->rc_initial_buffer_occupancy;
> +        if (amf_buffer_fullness > 64)
> +            amf_buffer_fullness = 64;

I still don't understand what this is trying to do.

rc_initial_buffer_occupancy is necessarily at most rc_buffer_size, so the calculation will always get a number >= 64, so you always pass 64.

What are the units of AMF_VIDEO_ENCODER_INITIAL_VBV_BUFFER_FULLNESS meant to be?

> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_INITIAL_VBV_BUFFER_FULLNESS, amf_buffer_fullness);
> +    }
> +    /// Maximum Access Unit Size
> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_MAX_AU_SIZE, ctx->max_au_size);
> +
> +    // QP Minimum / Maximum
> +    if (ctx->rate_control_mode == AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CONSTANT_QP) {
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_MIN_QP, 0);
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_MAX_QP, 51);
> +    } else {
> +        if (avctx->qmin != -1) {
> +            int qval = avctx->qmin > 51 ? 51 : avctx->qmin;
> +            AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_MIN_QP, qval);
> +        }
> +        if (avctx->qmax != -1) {
> +            int qval = avctx->qmax > 51 ? 51 : avctx->qmax;
> +            AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_MAX_QP, qval);
> +        }
> +    }
> +    // QP Values
> +    if (ctx->qp_i != -1)
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_QP_I, ctx->qp_i);
> +    if (ctx->qp_p != -1)
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_QP_P, ctx->qp_p);
> +    if (ctx->qp_b != -1)
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_QP_B, ctx->qp_b);
> +
> +    // Bitrate
> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_TARGET_BITRATE, avctx->bit_rate);
> +
> +    // Peak (max) bitrate. If not set make it out of bit_rate for best results.
> +    if (ctx->rate_control_mode == AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CBR) {
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_PEAK_BITRATE, avctx->bit_rate);
> +    } else {
> +        int rc_max_rate = avctx->rc_max_rate >= avctx->bit_rate ? avctx->rc_max_rate : avctx->bit_rate * 13 / 10;

Please calculate a real value here as suggested in the previous comments rather than using 13/10.

> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_PEAK_BITRATE, rc_max_rate);
> +    }
> +    // Enforce HRD, Filler Data, VBAQ, Frame Skipping, Deblocking Filter
> +    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_ENFORCE_HRD, !!ctx->enforce_hrd);
> +    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_FILLER_DATA_ENABLE, !!ctx->filler_data);
> +    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_RATE_CONTROL_SKIP_FRAME_ENABLE, !!ctx->skip_frame);
> +    if (ctx->rate_control_mode == AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CONSTANT_QP) {
> +        AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_ENABLE_VBAQ, 0);
> +        if (ctx->enable_vbaq)
> +            av_log(ctx, AV_LOG_WARNING, "VBAQ is not supported by cqp Rate Control Method, automatically disabled. \n");
> +    } else {
> +        AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_ENABLE_VBAQ, !!ctx->enable_vbaq);
> +    }
> +    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_DE_BLOCKING_FILTER, !!deblocking_filter);
> +
> +    // B-Frames
> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_B_PIC_PATTERN, avctx->max_b_frames);
> +    if (avctx->max_b_frames && res == AMF_OK) {
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_B_PIC_DELTA_QP, ctx->b_frame_delta_qp);
> +        AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_B_REFERENCE_ENABLE, !!ctx->b_frame_ref);
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_REF_B_PIC_DELTA_QP, ctx->ref_b_frame_delta_qp);
> +    }
> +
> +    // Keyframe Interval
> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_IDR_PERIOD, avctx->gop_size);
> +
> +    // Header Insertion Spacing
> +    if (ctx->header_spacing >= 0)
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEADER_INSERTION_SPACING, ctx->header_spacing);
> +
> +    // Intra-Refresh, Slicing
> +    if (ctx->intra_refresh_mb > 0)
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_INTRA_REFRESH_NUM_MBS_PER_SLOT, ctx->intra_refresh_mb);
> +    if (avctx->slices > 1)
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_SLICES_PER_FRAME, avctx->slices);
> +
> +    // Coding
> +    if (ctx->coding_mode != 0)
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_CABAC_ENABLE, ctx->coding_mode);
> +
> +    // Motion Estimation
> +    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_MOTION_HALF_PIXEL, !!ctx->me_half_pel);
> +    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_MOTION_QUARTERPIXEL, !!ctx->me_quater_pel);
> +
> +    // fill extradata
> +    res = AMFVariantInit(&var);
> +    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_BUG, "AMFVariantInit() failed with error %d \n", res);
> +
> +    res = ctx->encoder->pVtbl->GetProperty(ctx->encoder, AMF_VIDEO_ENCODER_EXTRADATA, &var);
> +    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_BUG, "GetProperty(AMF_VIDEO_ENCODER_EXTRADATA) failed with error %d \n", res);
> +    AMF_RETURN_IF_FALSE(ctx, var.pInterface != NULL, AVERROR_BUG, "GetProperty(AMF_VIDEO_ENCODER_EXTRADATA) returned NULL \n");
> +
> +    guid = IID_AMFBuffer();
> +
> +    res = var.pInterface->pVtbl->QueryInterface(var.pInterface, &guid, (void**)&buffer); // query for buffer interface
> +    if (res != AMF_OK) {
> +        var.pInterface->pVtbl->Release(var.pInterface);
> +    }
> +    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_BUG, "QueryInterface(IID_AMFBuffer) failed with error %d \n", res);
> +
> +    avctx->extradata_size = (int)buffer->pVtbl->GetSize(buffer);
> +    avctx->extradata = av_mallocz(avctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
> +    if (!avctx->extradata) {
> +        buffer->pVtbl->Release(buffer);
> +        var.pInterface->pVtbl->Release(var.pInterface);
> +        return AVERROR(ENOMEM);
> +    }
> +    memcpy(avctx->extradata, buffer->pVtbl->GetNative(buffer), avctx->extradata_size);
> +
> +    buffer->pVtbl->Release(buffer);
> +    var.pInterface->pVtbl->Release(var.pInterface);
> +    
> +    return 0;
> +}
> +
> +static const AVCodecDefault defaults[] = {
> +    { "refs",       "-1" },
> +    { "aspect",     "0" },
> +    { "sar",        "0" },
> +    { "qmin",       "-1" },
> +    { "qmax",       "-1" },
> +    { "b",          "2M" },
> +    { "maxrate",    "3M" },
> +    { "g",          "250" },
> +    { "keyint_min", "0" },
> +    { "bf",         "0" },
> +    { "slices",     "1" },
> +    { NULL },
> +};
> +
> +static const AVClass h264_amf_class = {
> +    .class_name = "h264_amf",
> +    .item_name = av_default_item_name,
> +    .option = options,
> +    .version = LIBAVUTIL_VERSION_INT,
> +};
> +//TODO declare as HW encoder when available
> +AVCodec ff_h264_amf_encoder = {
> +    .name = "h264_amf",
> +    .long_name = NULL_IF_CONFIG_SMALL("AMD AMF H.264 Encoder"),
> +    .type = AVMEDIA_TYPE_VIDEO,
> +    .id = AV_CODEC_ID_H264,
> +    .init = amf_encode_init_h264,
> +    .encode2 = ff_amf_encode_frame,
> +    .close = ff_amf_encode_close,
> +    .priv_data_size = sizeof(AmfContext),
> +    .priv_class = &h264_amf_class,
> +    .defaults = defaults,
> +    .capabilities = AV_CODEC_CAP_DELAY,
> +    .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
> +    .pix_fmts = ff_amf_pix_fmts,
> +};
> diff --git a/libavcodec/amfenc_hevc.c b/libavcodec/amfenc_hevc.c
> new file mode 100644
> index 0000000..41bc5c8
> --- /dev/null
> +++ b/libavcodec/amfenc_hevc.c
> @@ -0,0 +1,289 @@
> +/*
> + * This file is part of FFmpeg.
> + *
> + * FFmpeg is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU Lesser General Public
> + * License as published by the Free Software Foundation; either
> + * version 2.1 of the License, or (at your option) any later version.
> + *
> + * FFmpeg is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> + * Lesser General Public License for more details.
> + *
> + * You should have received a copy of the GNU Lesser General Public
> + * License along with FFmpeg; if not, write to the Free Software
> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
> + */
> +
> +#include "amfenc.h"
> +
> +#include "libavutil/internal.h"
> +#include "libavutil/opt.h"
> +#include "internal.h"
> +
> +#define OFFSET(x) offsetof(AmfContext, x)
> +#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
> +static const AVOption options[] = {
> +    { "usage",          "Set the encoding usage",             OFFSET(usage),          AV_OPT_TYPE_INT,   { .i64 = AMF_VIDEO_ENCODER_HEVC_USAGE_TRANSCONDING }, AMF_VIDEO_ENCODER_HEVC_USAGE_TRANSCONDING, AMF_VIDEO_ENCODER_HEVC_USAGE_WEBCAM, VE, "usage" },
> +    { "transcoding",    "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_USAGE_TRANSCONDING },         0, 0, VE, "usage" },
> +    { "ultralowlatency","", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_USAGE_ULTRA_LOW_LATENCY },    0, 0, VE, "usage" },
> +    { "lowlatency",     "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_USAGE_LOW_LATENCY },          0, 0, VE, "usage" },
> +    { "webcam",         "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_USAGE_WEBCAM },               0, 0, VE, "usage" },
> +
> +    { "profile",        "Set the profile (default main)",           OFFSET(profile),   AV_OPT_TYPE_INT,{ .i64 = AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN }, AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN, AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN, VE, "profile" },
> +    { "main",           "", 0,                      AV_OPT_TYPE_CONST,{ .i64 = AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN }, 0, 0, VE, "profile" },
> +
> +    { "profile_tier",   "Set the profile tier (default main)",      OFFSET(tier), AV_OPT_TYPE_INT,{ .i64 = AMF_VIDEO_ENCODER_HEVC_TIER_MAIN }, AMF_VIDEO_ENCODER_HEVC_TIER_MAIN, AMF_VIDEO_ENCODER_HEVC_TIER_HIGH, VE, "tier" },
> +    { "main",           "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_TIER_MAIN }, 0, 0, VE, "tier" },
> +    { "high",           "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_TIER_HIGH }, 0, 0, VE, "tier" },
> +
> +    { "level",          "Set the encoding level (default auto)",    OFFSET(level), AV_OPT_TYPE_INT,{ .i64 = 0 }, 0, AMF_LEVEL_6_2, VE, "level" },
> +    { "auto",           "", 0, AV_OPT_TYPE_CONST, { .i64 = 0             }, 0, 0, VE, "level" },
> +    { "1.0",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_1   }, 0, 0, VE, "level" },
> +    { "2.0",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_2   }, 0, 0, VE, "level" },
> +    { "2.1",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_2_1 }, 0, 0, VE, "level" },
> +    { "3.0",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_3   }, 0, 0, VE, "level" },
> +    { "3.1",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_3_1 }, 0, 0, VE, "level" },
> +    { "4.0",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_4   }, 0, 0, VE, "level" },
> +    { "4.1",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_4_1 }, 0, 0, VE, "level" },
> +    { "5.0",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_5   }, 0, 0, VE, "level" },
> +    { "5.1",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_5_1 }, 0, 0, VE, "level" },
> +    { "5.2",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_5_2 }, 0, 0, VE, "level" },
> +    { "6.0",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_6   }, 0, 0, VE, "level" },
> +    { "6.1",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_6_1 }, 0, 0, VE, "level" },
> +    { "6.2",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_6_2 }, 0, 0, VE, "level" },
> +
> +    { "quality",        "Set the encoding quality",                 OFFSET(quality),      AV_OPT_TYPE_INT,   { .i64 = AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET_SPEED }, AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET_QUALITY, AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET_SPEED, VE, "quality" },
> +    { "balanced",       "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET_BALANCED }, 0, 0, VE, "quality" },
> +    { "speed",          "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET_SPEED    }, 0, 0, VE, "quality" },
> +    { "quality",        "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET_QUALITY  }, 0, 0, VE, "quality" },
> +
> +    { "rc",             "Set the rate control mode",                OFFSET(rate_control_mode),   AV_OPT_TYPE_INT,   { .i64 = AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_PEAK_CONSTRAINED_VBR }, AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_CONSTANT_QP, AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_CBR, VE, "rc" },
> +    { "cqp",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_CONSTANT_QP             }, 0, 0, VE, "rc" },
> +    { "cbr",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_CBR                     }, 0, 0, VE, "rc" },
> +    { "vbr_peak",       "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_PEAK_CONSTRAINED_VBR    }, 0, 0, VE, "rc" },
> +    { "vbr_latency",    "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_LATENCY_CONSTRAINED_VBR }, 0, 0, VE, "rc" },
> +
> +    { "header_insertion_mode",        "Set header insertion mode",  OFFSET(header_insertion_mode),      AV_OPT_TYPE_INT,{ .i64 = AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE_NONE }, AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE_NONE, AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE_IDR_ALIGNED, VE, "hdrmode" },
> +    { "none",           "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE_NONE        }, 0, 0, VE, "hdrmode" },
> +    { "gop",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE_GOP_ALIGNED }, 0, 0, VE, "hdrmode" },
> +    { "idr",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE_IDR_ALIGNED }, 0, 0, VE, "hdrmode" },
> +
> +    { "gops_per_idr",    "GOPs per IDR 0-no IDR will be inserted",  OFFSET(gops_per_idr),  AV_OPT_TYPE_INT,{ .i64 = 60 }, 0, INT_MAX, VE },
> +    { "preanalysis",    "Enable preanalysis",                       OFFSET(preanalysis),   AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },
> +    { "vbaq",           "Enable VBAQ",                              OFFSET(enable_vbaq),   AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },
> +    { "enforce_hrd",    "Enforce HRD",                              OFFSET(enforce_hrd),   AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },
> +    { "filler_data",    "Filler Data Enable",                       OFFSET(filler_data),   AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },
> +    { "max_au_size",    "Max AU Size in bits",                      OFFSET(max_au_size),   AV_OPT_TYPE_INT,{ .i64 = 0 }, 0, INT_MAX, VE, NULL },

Same question as in H.264.  Also other stuff below.

> +    { "min_qp_i",       "min quantization parameter for I-frame",   OFFSET(min_qp_i),      AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },
> +    { "max_qp_i",       "max quantization parameter for I-frame",   OFFSET(max_qp_i),      AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },
> +    { "min_qp_p",       "min quantization parameter for P-frame",   OFFSET(min_qp_p),      AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },
> +    { "max_qp_p",       "max quantization parameter for P-frame",   OFFSET(max_qp_p),      AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },
> +    { "qp_p",           "quantization parameter for P-frame",       OFFSET(qp_p),          AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },
> +    { "qp_i",           "quantization parameter for I-frame",       OFFSET(qp_i),          AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },
> +    { "skip_frame",     "Rate Control Based Frame Skip",            OFFSET(skip_frame),    AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },
> +    { "me_half_pel",    "Enable ME Half Pixel",                     OFFSET(me_half_pel),   AV_OPT_TYPE_BOOL,{ .i64 = 1 }, 0, 1, VE, NULL },
> +    { "me_quater_pel",  "Enable ME Quarter Pixel ",                 OFFSET(me_quater_pel), AV_OPT_TYPE_BOOL,{ .i64 = 1 }, 0, 1, VE, NULL },
> +
> +    { NULL }
> +};
> +
> +static av_cold int amf_encode_init_hevc(AVCodecContext *avctx)
> +{
> +    int                 ret = 0;
> +    AMF_RESULT          res = AMF_OK;
> +    AmfContext         *ctx = avctx->priv_data;
> +    AMFVariantStruct    var = {0};
> +    amf_int64           profile = 0;
> +    amf_int64           profile_level = 0;
> +    AMFBuffer          *buffer;
> +    AMFGuid             guid;
> +
> +    AMFSize             framesize = AMFConstructSize(avctx->width, avctx->height);
> +    AMFRate             framerate = AMFConstructRate(avctx->time_base.den, avctx->time_base.num * avctx->ticks_per_frame);
> +
> +    int                 deblocking_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
> +
> +    if ((ret = ff_amf_encode_init(avctx)) < 0)
> +        return ret;
> +
> +    // init static parameters
> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_USAGE, ctx->usage);
> +
> +    AMF_ASSIGN_PROPERTY_SIZE(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_FRAMESIZE, framesize);
> +
> +    AMF_ASSIGN_PROPERTY_RATE(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_FRAMERATE, framerate);
> +
> +    switch (avctx->profile) {
> +    case FF_PROFILE_HEVC_MAIN:
> +        profile = AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN;
> +        break;
> +    default:
> +        break;
> +    }
> +    if (profile == 0) {
> +        profile = ctx->profile;
> +    }
> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_PROFILE, profile);
> +
> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_TIER, ctx->tier);
> +
> +    profile_level = avctx->level;
> +    if (profile_level == 0) {
> +        profile_level = ctx->level;
> +    }
> +    if (profile_level != 0) {
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_PROFILE_LEVEL, profile_level);
> +    }
> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET, ctx->quality);
> +    // Maximum Reference Frames
> +    if (avctx->refs != 0) {
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_MAX_NUM_REFRAMES, avctx->refs);
> +    }
> +    // Aspect Ratio
> +    if (avctx->sample_aspect_ratio.den && avctx->sample_aspect_ratio.num) {
> +        AMFRatio ratio = AMFConstructRatio(avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
> +        AMF_ASSIGN_PROPERTY_RATIO(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_ASPECT_RATIO, ratio);
> +    }
> +
> +    // Picture control properties
> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_NUM_GOPS_PER_IDR, ctx->gops_per_idr);
> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_GOP_SIZE, avctx->gop_size);
> +    if (avctx->slices > 1) {
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_SLICES_PER_FRAME, avctx->slices);
> +    }
> +    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_DE_BLOCKING_FILTER_DISABLE, deblocking_filter);

What about SAO?

> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE, ctx->header_insertion_mode);
> +
> +    // Rate control
> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD, ctx->rate_control_mode);
> +    if (avctx->rc_buffer_size)
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_VBV_BUFFER_SIZE, avctx->rc_buffer_size);
> +
> +    if (avctx->rc_initial_buffer_occupancy != 0) {
> +        int amf_buffer_fullness = avctx->rc_buffer_size * 64 / avctx->rc_initial_buffer_occupancy;
> +        if (amf_buffer_fullness > 64)
> +            amf_buffer_fullness = 64;
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_INITIAL_VBV_BUFFER_FULLNESS, amf_buffer_fullness);
> +    }
> +    // Pre-Pass, Pre-Analysis, Two-Pass
> +    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_PREANALYSIS_ENABLE, ctx->preanalysis);
> +
> +    if (ctx->rate_control_mode == AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_CONSTANT_QP) {
> +        AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_ENABLE_VBAQ, false);
> +        if (ctx->enable_vbaq)
> +            av_log(ctx, AV_LOG_WARNING, "VBAQ is not supported by cqp Rate Control Method, automatically disabled. \n");
> +    } else {
> +        AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_ENABLE_VBAQ, !!ctx->enable_vbaq);
> +    }
> +    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_MOTION_HALF_PIXEL, ctx->me_half_pel);
> +    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_MOTION_QUARTERPIXEL, ctx->me_quater_pel);
> +
> +    // init encoder
> +    res = ctx->encoder->pVtbl->Init(ctx->encoder, ctx->format, avctx->width, avctx->height);
> +    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_BUG, "encoder->Init() failed with error %d \n", res);
> +
> +    // init dynamic rate control params
> +    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_ENFORCE_HRD, ctx->enforce_hrd);
> +    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_FILLER_DATA_ENABLE, ctx->filler_data);
> +
> +    // Bitrate
> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_TARGET_BITRATE, avctx->bit_rate);
> +
> +    // peak (max) bitrate. If not set make it out of bit_rate for best results.
> +    if (ctx->rate_control_mode == AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_CBR) {
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_PEAK_BITRATE, avctx->bit_rate);
> +    } else {
> +        int rc_max_rate = avctx->rc_max_rate >= avctx->bit_rate ? avctx->rc_max_rate : avctx->bit_rate * 13 / 10;
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_PEAK_BITRATE, rc_max_rate);
> +    }
> +
> +    // init dynamic picture control params
> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_MAX_AU_SIZE, ctx->max_au_size);
> +
> +
> +    if (ctx->min_qp_i != -1) {
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_MIN_QP_I, ctx->min_qp_i);
> +    }
> +    if (ctx->max_qp_i != -1) {
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_MAX_QP_I, ctx->max_qp_i);
> +    }
> +    if (ctx->min_qp_p != -1) {
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_MIN_QP_P, ctx->min_qp_p);
> +    }
> +    if (ctx->max_qp_p != -1) {
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_MAX_QP_P, ctx->max_qp_p);
> +    }
> +
> +    if (ctx->qp_p != -1) {
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_QP_I, ctx->qp_p);
> +    }
> +    if (ctx->qp_i != -1) {
> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_QP_P, ctx->qp_i);
> +    }
> +    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_SKIP_FRAME_ENABLE, ctx->skip_frame);
> +
> +
> +    // fill extradata
> +    res = AMFVariantInit(&var);
> +    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_BUG, "AMFVariantInit() failed with error %d \n", res);
> +
> +    res = ctx->encoder->pVtbl->GetProperty(ctx->encoder, AMF_VIDEO_ENCODER_HEVC_EXTRADATA, &var);
> +    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_BUG, "GetProperty(AMF_VIDEO_ENCODER_EXTRADATA) failed with error %d \n", res);
> +    AMF_RETURN_IF_FALSE(ctx, var.pInterface != NULL, AVERROR_BUG, "GetProperty(AMF_VIDEO_ENCODER_EXTRADATA) returned NULL \n");
> +
> +    guid = IID_AMFBuffer();
> +
> +    res = var.pInterface->pVtbl->QueryInterface(var.pInterface, &guid, (void**)&buffer); // query for buffer interface
> +    if (res != AMF_OK) {
> +        var.pInterface->pVtbl->Release(var.pInterface);
> +    }
> +    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_BUG, "QueryInterface(IID_AMFBuffer) failed with error %d \n", res);
> +
> +    avctx->extradata_size = (int)buffer->pVtbl->GetSize(buffer);
> +    avctx->extradata = av_mallocz(avctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
> +    if (!avctx->extradata) {
> +        buffer->pVtbl->Release(buffer);
> +        var.pInterface->pVtbl->Release(var.pInterface);
> +        return AVERROR(ENOMEM);
> +    }
> +    memcpy(avctx->extradata, buffer->pVtbl->GetNative(buffer), avctx->extradata_size);
> +
> +    buffer->pVtbl->Release(buffer);
> +    var.pInterface->pVtbl->Release(var.pInterface);
> +
> +    return 0;
> +}
> +static const AVCodecDefault defaults[] = {
> +    { "b",       "2M" },
> +    { "maxrate", "3M" },
> +    { "qmin",   "-1" },
> +    { "qmax",   "-1" },
> +    { "qdiff",  "-1" },
> +    { "qblur",  "-1" },
> +    { "qcomp",  "-1" },

Unused?

> +    { NULL },
> +};
> +static const AVClass hevc_amf_class = {
> +    .class_name = "hevc_amf",
> +    .item_name = av_default_item_name,
> +    .option = options,
> +    .version = LIBAVUTIL_VERSION_INT,
> +};
> +//TODO declare as HW encoder when available
> +AVCodec ff_hevc_amf_encoder = {
> +    .name           = "hevc_amf",
> +    .long_name      = NULL_IF_CONFIG_SMALL("AMD AMF HEVC encoder"),
> +    .type           = AVMEDIA_TYPE_VIDEO,
> +    .id             = AV_CODEC_ID_HEVC,
> +    .init           = amf_encode_init_hevc,
> +    .encode2        = ff_amf_encode_frame,
> +    .close          = ff_amf_encode_close,
> +    .priv_data_size = sizeof(AmfContext),
> +    .priv_class     = &hevc_amf_class,
> +    .defaults       = defaults,
> +    .capabilities   = AV_CODEC_CAP_DELAY,
> +    .caps_internal  = FF_CODEC_CAP_INIT_CLEANUP,
> +    .pix_fmts       = ff_amf_pix_fmts,
> +};
> diff --git a/libavcodec/version.h b/libavcodec/version.h
> index 226da19..6c0d7a8 100644
> --- a/libavcodec/version.h
> +++ b/libavcodec/version.h
> @@ -28,8 +28,8 @@
>  #include "libavutil/version.h"
>  
>  #define LIBAVCODEC_VERSION_MAJOR  58
> -#define LIBAVCODEC_VERSION_MINOR   0
> -#define LIBAVCODEC_VERSION_MICRO 101
> +#define LIBAVCODEC_VERSION_MINOR   1
> +#define LIBAVCODEC_VERSION_MICRO 100
>  
>  #define LIBAVCODEC_VERSION_INT  AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
>                                                 LIBAVCODEC_VERSION_MINOR, \
> 

Thanks,

- Mark
mmironov Oct. 30, 2017, 9:30 p.m. UTC | #2
> > +static void AMF_CDECL_CALL AMFTraceWriter_Write(AMFTraceWriter

> *pThis,

> > +    const wchar_t *scope, const wchar_t *message)

> > +{

> > +    AmfTraceWriter *tracer = (AmfTraceWriter*)pThis;

> > +    av_log(tracer->avctx, AV_LOG_DEBUG, "%ls: %ls", scope, message);

> 

> Does the message necessarily include a newline already?


Yes.

> > +    init_fun = (AMFInit_Fn)dlsym(ctx->library,

> AMF_INIT_FUNCTION_NAME);

> > +    AMF_RETURN_IF_FALSE(ctx, init_fun != NULL, AVERROR_UNKNOWN,

> "DLL %s failed to find function %s. \n", AMF_DLL_NAMEA,

> AMF_INIT_FUNCTION_NAME);

> 

> I think do s/ \n/\n/ for all of these messages.


Sorry, didn't get this.

> 

> > +

> > +    version_fun = (AMFQueryVersion_Fn)dlsym(ctx->library,

> AMF_QUERY_VERSION_FUNCTION_NAME);

> > +    AMF_RETURN_IF_FALSE(ctx, version_fun != NULL,

> AVERROR_UNKNOWN, "DLL %s failed to find function %s. \n",

> AMF_DLL_NAMEA, AMF_QUERY_VERSION_FUNCTION_NAME);

> > +

> > +    res = version_fun(&ctx->version);

> > +    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "%s

> failed with error %d. \n", AMF_QUERY_VERSION_FUNCTION_NAME, res);

> > +    res = init_fun(AMF_FULL_VERSION, &ctx->factory);

> > +    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "%s

> failed with error %d. \n", AMF_INIT_FUNCTION_NAME, res);

> > +    res = ctx->factory->pVtbl->GetTrace(ctx->factory, &ctx->trace);

> > +    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN,

> "GetTrace() failed with error %d. \n", res);

> > +    res = ctx->factory->pVtbl->GetDebug(ctx->factory, &ctx->debug);

> > +    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN,

> "GetDebug() failed with error %d. \n", res);

> > +    return 0;

> > +}

> > +

> > +static int amf_init_context(AVCodecContext *avctx)

> > +{

> > +    AmfContext         *ctx = avctx->priv_data;

> > +    AMF_RESULT          res = AMF_OK;

> > +

> > +    // the return of these functions indicates old state and do not affect

> behaviour

> > +    ctx->trace->pVtbl->EnableWriter(ctx->trace,

> AMF_TRACE_WRITER_CONSOLE, 0);

> > +#if AMF_DEBUG_TRACE

> > +    ctx->trace->pVtbl->EnableWriter(ctx->trace,

> AMF_TRACE_WRITER_DEBUG_OUTPUT, 1);

> > +    ctx->trace->pVtbl->SetWriterLevel(ctx->trace,

> AMF_TRACE_WRITER_DEBUG_OUTPUT, AMF_TRACE_TRACE);

> > +    ctx->trace->pVtbl->SetGlobalLevel(ctx->trace, AMF_TRACE_TRACE);

> > +#else

> > +    ctx->trace->pVtbl->EnableWriter(ctx->trace,

> AMF_TRACE_WRITER_DEBUG_OUTPUT, 0);

> > +#endif

> 

> I don't much like this compile-time option.  What sort of messages does the

> trace writer actually give you?  Will a user ever want to enable it?


Two points:
1. There is extensive AMF logging that can help diagnose a problem. Do we want to have it all time in AV_LOG_DEBUG?
2. AMD can trace to debug output and this is useful but for normal ffmpeg operation it is under #ifdef.

> 

> > +

> > +static GUID  AMFTextureArrayIndexGUID =

> AMFTextureArrayIndexGUIDDef;

> 

> GUID is a Windows type, should this be AMFGuid?  (I tried removing the

> check and compiling on Linux, other than the D3D11 stuff this is the only

> error.)

> 


This is Windows type and used with Windows interface ID3D11Texture2D.
When Linux support is added all this section will be under #ifdef.

> > +

> > +int ff_amf_encode_frame(AVCodecContext *avctx, AVPacket *pkt,

> > +                        const AVFrame *frame, int *got_packet)

> > +{

> > +    int             ret = 0;

> > +    AMF_RESULT      res = AMF_OK;

> > +    AmfContext     *ctx = avctx->priv_data;

> > +    AMFSurface     *surface = NULL;

> > +    AMFData        *data = NULL;

> > +    amf_bool       submitted = 0;

> > +

> > +    while (!submitted) {

> > +        if (!frame) { // submit drain

> > +            if (!ctx->eof) { // submit drain onre time only

> > +                res = ctx->encoder->pVtbl->Drain(ctx->encoder);

> > +                if (res == AMF_INPUT_FULL) {

> > +                    av_usleep(1000); // input queue is full: wait, poll and submit

> Drain again

> > +                                     // need to get some output and try again

> > +                } else if (res == AMF_OK) {

> > +                    ctx->eof = 1; // drain started

> > +                    submitted = 1;

> > +                }

> > +            }

> > +        } else { // submit frame

> > +            if (surface == NULL) { // prepare surface from frame one time only

> > +                if (frame->hw_frames_ctx && ( // HW frame detected

> > +                                              // check if the same hw_frames_ctx as used in

> initialization

> > +                    (ctx->hw_frames_ctx && frame->hw_frames_ctx->data == ctx-

> >hw_frames_ctx->data) ||

> > +                    // check if the same hw_device_ctx as used in initialization

> > +                    (ctx->hw_device_ctx && ((AVHWFramesContext*)frame-

> >hw_frames_ctx->data)->device_ctx ==

> > +                    (AVHWDeviceContext*)ctx->hw_device_ctx->data)

> > +                )) {

> > +                    ID3D11Texture2D* texture = (ID3D11Texture2D*)frame-

> >data[0]; // actual texture

> > +                    int index = (int)(size_t)frame->data[1]; // index is a slice in

> texture array is - set to tell AMF which slice to use

> 

> (int)(intptr_t)frame->data[1];

> 

> > +                    texture->lpVtbl->SetPrivateData(texture,

> &AMFTextureArrayIndexGUID, sizeof(index), &index);

> > +

> > +                    res = ctx->context->pVtbl->CreateSurfaceFromDX11Native(ctx-

> >context, texture, &surface, NULL); // wrap to AMF surface

> > +                    surface->pVtbl->SetCrop(surface, 0, 0, frame->width, frame-

> >height); // decode surfaces are vertically aligned by 16 tell AMF real size

> 

> "decode surfaces"?  These need not come from a decoder.  Does it work with

> hwupload?

> 

> > +                    surface->pVtbl->SetPts(surface, frame->pts);

> > +                } else {

> > +                    res = ctx->context->pVtbl->AllocSurface(ctx->context,

> AMF_MEMORY_HOST, ctx->format, avctx->width, avctx->height, &surface);

> > +                    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_BUG,

> "AllocSurface() failed  with error %d \n", res);

> > +                    amf_copy_surface(avctx, frame, surface);

> > +                }

> > +            }

> > +            // encode

> > +            res = ctx->encoder->pVtbl->SubmitInput(ctx->encoder,

> (AMFData*)surface);

> > +            if (res == AMF_INPUT_FULL) { // handle full queue

> > +                av_usleep(1000); // input queue is full: wait, poll and submit

> surface again

> > +            } else {

> > +                surface->pVtbl->Release(surface);

> > +                surface = NULL;

> > +                AMF_RETURN_IF_FALSE(ctx, res == AMF_OK,

> AVERROR_UNKNOWN, "SubmitInput() failed with error %d \n", res);

> > +                submitted = 1;

> > +            }

> > +        }

> > +        // poll results

> > +        if (!data) {

> > +            res = ctx->encoder->pVtbl->QueryOutput(ctx->encoder, &data);

> > +            if (data) {

> > +                AMFBuffer* buffer;

> > +                AMFGuid guid = IID_AMFBuffer();

> > +                data->pVtbl->QueryInterface(data, &guid, (void**)&buffer); //

> query for buffer interface

> > +                ret = amf_copy_buffer(avctx, pkt, buffer);

> > +                if (!ret)

> > +                    *got_packet = 1;

> > +                buffer->pVtbl->Release(buffer);

> > +                data->pVtbl->Release(data);

> > +                if (ctx->eof) {

> > +                    submitted = 1; // we are in the drain state - no submissions

> > +                }

> > +            } else if (res == AMF_EOF) {

> > +                submitted = 1; // drain complete

> > +            } else {

> > +                if (!submitted) {

> > +                    av_usleep(1000); // wait and poll again

> > +                }

> > +            }

> > +        }

> > +    }

> > +    return ret;

> > +}

> 

> I still think this would be much better off using the

> send_frame()/receive_packet() API.  Even if your API doesn't expose any

> information about the queue length, you only need to hold a single input

> frame transiently to get around that (the user is not allowed to call

> send_frame() twice in a row without calling receive_packet()).

> 


So to implement this I would have to:
- in the send_frame() if AMF_INPUT_FULL is returned - store input frame (or copy?)
- In the next receive_frame() check if frame is stored
- Wait till some output is produced
- resubmit stored frame
Issues I see:
- Isn't this logic defeat the purpose of independent send()/receive()?
- How can I report a error if receive() produced a compressed frame but the delayed submission failed?
- This logic depends on the particular logic in the calling code.
- This logic depends on the particular HW behaviour. 
- In the future, we would like to output individual slices of a compressed frame. 
When this added receive_frame() must be called several times to clear space in the HW queue. 
Granted, current implementation also does not cover this case but truly independent 
send/receive implementation would. 


> > +static const AVOption options[] = {

> > +    // Static

> > +    /// Usage

> > +    { "usage",          "Encoder Usage",        OFFSET(usage),

> AV_OPT_TYPE_INT,   { .i64 = AMF_VIDEO_ENCODER_USAGE_TRANSCONDING

> }, AMF_VIDEO_ENCODER_USAGE_TRANSCONDING,

> AMF_VIDEO_ENCODER_USAGE_WEBCAM, VE, "usage" },

> > +    { "transcoding",    "Generic Transcoding",  0,

> AV_OPT_TYPE_CONST, { .i64 =

> AMF_VIDEO_ENCODER_USAGE_TRANSCONDING      }, 0, 0, VE, "usage" },

> > +    { "ultralowlatency","",                     0,              AV_OPT_TYPE_CONST, { .i64

> = AMF_VIDEO_ENCODER_USAGE_ULTRA_LOW_LATENCY }, 0, 0, VE, "usage"

> },

> > +    { "lowlatency",     "",                     0,              AV_OPT_TYPE_CONST, { .i64 =

> AMF_VIDEO_ENCODER_USAGE_LOW_LATENCY       }, 0, 0, VE, "usage" },

> > +    { "webcam",         "Webcam",               0,              AV_OPT_TYPE_CONST, {

> .i64 = AMF_VIDEO_ENCODER_USAGE_WEBCAM            }, 0, 0, VE, "usage" },

> > +

> > +    /// Profile,

> > +    { "profile",        "Profile",              OFFSET(profile),AV_OPT_TYPE_INT, {

> .i64 = AMF_VIDEO_ENCODER_PROFILE_MAIN       },

> AMF_VIDEO_ENCODER_PROFILE_BASELINE,

> AMF_VIDEO_ENCODER_PROFILE_HIGH, VE, "profile" },

> > +    { "baseline",       "",                     0,              AV_OPT_TYPE_CONST, { .i64 =

> AMF_VIDEO_ENCODER_PROFILE_BASELINE }, 0, 0, VE, "profile" },

> 

> You still don't support baseline profile.


Talked to codec folks. Currently this is really baseline by mistake. The intention was to expose only 
"constrained baseline" They want to correct this but it should go into the driver first and then 
reflected in AMF API. Once done this entry will be updated.

> > +    /// Maximum Access Unit Size

> > +    { "max_au_size",    "Maximum Access Unit Size for rate control (in bits)",

> OFFSET(max_au_size),        AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE,

> NULL },

> 

> Did you check whether this really means the maximum access unit size?  If

> yes, what is the use-case for that?

> 


I've changed the description. This parameter is used in rate control to limit AU size. 
It is useful for streaming.

> > +    { "me_half_pel",    "Enable ME Half Pixel",

> OFFSET(me_half_pel),   AV_OPT_TYPE_BOOL,  { .i64 = 1 }, 0, 1, VE, NULL },

> > +    { "me_quater_pel",  "Enable ME Quarter Pixel ",

> OFFSET(me_quater_pel), AV_OPT_TYPE_BOOL,  { .i64 = 1 }, 0, 1, VE, NULL },

> 

> What is the use-case for these options?

> 


These are options for motion estimator precision. Spelling is corrected "me_quarter_pel"

> > +

> > +    { NULL }

> > +};

> > +

> > +static av_cold int amf_encode_init_h264(AVCodecContext *avctx)

> > +{

> > +    int                 ret = 0;

> > +    AMF_RESULT          res = AMF_OK;

> > +    AmfContext         *ctx = avctx->priv_data;

> > +    AMFVariantStruct    var = {0};

> > +    amf_int64           profile = 0;

> > +    amf_int64           profile_level = 0;

> > +    AMFBuffer          *buffer;

> > +    AMFGuid             guid;

> > +

> > +    AMFSize             framesize = AMFConstructSize(avctx->width, avctx-

> >height);

> > +    AMFRate             framerate = AMFConstructRate(avctx->time_base.den,

> avctx->time_base.num * avctx->ticks_per_frame);

> 

> avctx->framerate should be set if the input is CFR, use that first.

> 

> > +

> > +    int                 deblocking_filter = (avctx->flags &

> AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;

> > +

> > +    if ((ret = ff_amf_encode_init(avctx)) != 0)

> > +        return ret;

> > +

> > +    // Static parameters

> > +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_USAGE, ctx->usage);

> > +

> > +    AMF_ASSIGN_PROPERTY_SIZE(res, ctx->encoder,

> AMF_VIDEO_ENCODER_FRAMESIZE, framesize);

> > +

> > +    AMF_ASSIGN_PROPERTY_RATE(res, ctx->encoder,

> AMF_VIDEO_ENCODER_FRAMERATE, framerate);

> > +

> > +    profile = avctx->profile;

> 

> avctx->profile might be (is by default, even) FF_PROFILE_UNKNOWN, which is

> not zero.

> 

> > +    if (profile == 0) {

> > +        profile = ctx->profile;

> > +    }

> > +

> > +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_PROFILE, profile);

> > +

> > +    profile_level = avctx->level;

> 

> Similarly FF_LEVEL_UNKNOWN.

> 

> > +    if (profile_level == 0) {

> > +        profile_level = ctx->level;

> > +    }

> > +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_PROFILE_LEVEL, profile_level);

> > +

> > +    // Maximum Reference Frames

> > +    if (avctx->refs != -1) {

> > +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_MAX_NUM_REFRAMES, avctx->refs);

> > +    }

> > +    if (avctx->sample_aspect_ratio.den && avctx-

> >sample_aspect_ratio.num) {

> > +        AMFRatio ratio = AMFConstructRatio(avctx-

> >sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);

> > +        AMF_ASSIGN_PROPERTY_RATIO(res, ctx->encoder,

> AMF_VIDEO_ENCODER_ASPECT_RATIO, ratio);

> > +    }

> > +

> > +    /// Color Range (Partial/TV/MPEG or Full/PC/JPEG)

> > +    if (avctx->color_range == AVCOL_RANGE_JPEG) {

> > +        AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder,

> AMF_VIDEO_ENCODER_FULL_RANGE_COLOR, 1);

> > +    }

> > +

> > +    if (ctx->rate_control_mode ==

> AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CONSTANT_QP) {

> > +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_RATE_CONTROL_PREANALYSIS_ENABLE,

> AMF_VIDEO_ENCODER_PREENCODE_DISABLED);

> > +        if (ctx->preanalysis)

> > +            av_log(ctx, AV_LOG_WARNING, "Pre-Analysis is not supported by

> cqp Rate Control Method, automatically disabled. \n");

> > +    } else {

> > +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_RATE_CONTROL_PREANALYSIS_ENABLE, ctx-

> >preanalysis);

> > +    }

> > +

> > +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_QUALITY_PRESET, ctx->quality);

> > +

> > +    // Initialize Encoder

> > +    res = ctx->encoder->pVtbl->Init(ctx->encoder, ctx->format, avctx-

> >width, avctx->height);

> > +    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_BUG, "encoder-

> >Init() failed with error %d \n", res);

> > +

> > +    // Dynamic parmaters

> > +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD, ctx-

> >rate_control_mode);

> > +

> > +    /// VBV Buffer

> > +    if (avctx->rc_buffer_size != 0)

> > +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_VBV_BUFFER_SIZE, avctx->rc_buffer_size);

> > +    if (avctx->rc_initial_buffer_occupancy != 0) {

> > +        int amf_buffer_fullness = avctx->rc_buffer_size * 64 / avctx-

> >rc_initial_buffer_occupancy;

> > +        if (amf_buffer_fullness > 64)

> > +            amf_buffer_fullness = 64;

> 

> I still don't understand what this is trying to do.

> 

> rc_initial_buffer_occupancy is necessarily at most rc_buffer_size, so the

> calculation will always get a number >= 64, so you always pass 64.

> 

> What are the units of

> AMF_VIDEO_ENCODER_INITIAL_VBV_BUFFER_FULLNESS meant to be?

> 


They meant to be an abstract value from 0 to 64 meaning 64 is 100%. Don’t ask me why ☹ 
Calculation should be the opposite. My fault. 

> > +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_INITIAL_VBV_BUFFER_FULLNESS,

> amf_buffer_fullness);

> > +    }

> > +    /// Maximum Access Unit Size

> > +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_MAX_AU_SIZE, ctx->max_au_size);

> > +

> > +    // QP Minimum / Maximum

> > +    if (ctx->rate_control_mode ==

> AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CONSTANT_QP) {

> > +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_MIN_QP, 0);

> > +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_MAX_QP, 51);

> > +    } else {

> > +        if (avctx->qmin != -1) {

> > +            int qval = avctx->qmin > 51 ? 51 : avctx->qmin;

> > +            AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_MIN_QP, qval);

> > +        }

> > +        if (avctx->qmax != -1) {

> > +            int qval = avctx->qmax > 51 ? 51 : avctx->qmax;

> > +            AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_MAX_QP, qval);

> > +        }

> > +    }

> > +    // QP Values

> > +    if (ctx->qp_i != -1)

> > +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_QP_I, ctx->qp_i);

> > +    if (ctx->qp_p != -1)

> > +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_QP_P, ctx->qp_p);

> > +    if (ctx->qp_b != -1)

> > +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_QP_B, ctx->qp_b);

> > +

> > +    // Bitrate

> > +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_TARGET_BITRATE, avctx->bit_rate);

> > +

> > +    // Peak (max) bitrate. If not set make it out of bit_rate for best results.

> > +    if (ctx->rate_control_mode ==

> AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CBR) {

> > +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_PEAK_BITRATE, avctx->bit_rate);

> > +    } else {

> > +        int rc_max_rate = avctx->rc_max_rate >= avctx->bit_rate ? avctx-

> >rc_max_rate : avctx->bit_rate * 13 / 10;

> 

> Please calculate a real value here as suggested in the previous comments

> rather than using 13/10.


The suggestion was to set rc_max_rate to infinity. This will produce unpredicted results. 
I can set it to bit_rate but quality will be not good. Another option would be to generate error.
I am open to suggestions.

> > +    { "max_au_size",    "Max AU Size in bits",

> OFFSET(max_au_size),   AV_OPT_TYPE_INT,{ .i64 = 0 }, 0, INT_MAX, VE, NULL

> },

> 

> Same question as in H.264.  Also other stuff below.

> 

> > +    { "min_qp_i",       "min quantization parameter for I-frame",

> OFFSET(min_qp_i),      AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },

> > +    { "max_qp_i",       "max quantization parameter for I-frame",

> OFFSET(max_qp_i),      AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },

> > +    { "min_qp_p",       "min quantization parameter for P-frame",

> OFFSET(min_qp_p),      AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },

> > +    { "max_qp_p",       "max quantization parameter for P-frame",

> OFFSET(max_qp_p),      AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },

> > +    { "qp_p",           "quantization parameter for P-frame",       OFFSET(qp_p),

> AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },

> > +    { "qp_i",           "quantization parameter for I-frame",       OFFSET(qp_i),

> AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },

> > +    { "skip_frame",     "Rate Control Based Frame Skip",

> OFFSET(skip_frame),    AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },

> > +    { "me_half_pel",    "Enable ME Half Pixel",

> OFFSET(me_half_pel),   AV_OPT_TYPE_BOOL,{ .i64 = 1 }, 0, 1, VE, NULL },

> > +    { "me_quater_pel",  "Enable ME Quarter Pixel ",

> OFFSET(me_quater_pel), AV_OPT_TYPE_BOOL,{ .i64 = 1 }, 0, 1, VE, NULL },

> > +

> > +    { NULL }

> > +};

> > +

> > +static av_cold int amf_encode_init_hevc(AVCodecContext *avctx)

> > +{

> > +    int                 ret = 0;

> > +    AMF_RESULT          res = AMF_OK;

> > +    AmfContext         *ctx = avctx->priv_data;

> > +    AMFVariantStruct    var = {0};

> > +    amf_int64           profile = 0;

> > +    amf_int64           profile_level = 0;

> > +    AMFBuffer          *buffer;

> > +    AMFGuid             guid;

> > +

> > +    AMFSize             framesize = AMFConstructSize(avctx->width, avctx-

> >height);

> > +    AMFRate             framerate = AMFConstructRate(avctx->time_base.den,

> avctx->time_base.num * avctx->ticks_per_frame);

> > +

> > +    int                 deblocking_filter = (avctx->flags &

> AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;

> > +

> > +    if ((ret = ff_amf_encode_init(avctx)) < 0)

> > +        return ret;

> > +

> > +    // init static parameters

> > +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_USAGE, ctx->usage);

> > +

> > +    AMF_ASSIGN_PROPERTY_SIZE(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_FRAMESIZE, framesize);

> > +

> > +    AMF_ASSIGN_PROPERTY_RATE(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_FRAMERATE, framerate);

> > +

> > +    switch (avctx->profile) {

> > +    case FF_PROFILE_HEVC_MAIN:

> > +        profile = AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN;

> > +        break;

> > +    default:

> > +        break;

> > +    }

> > +    if (profile == 0) {

> > +        profile = ctx->profile;

> > +    }

> > +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_PROFILE, profile);

> > +

> > +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_TIER, ctx->tier);

> > +

> > +    profile_level = avctx->level;

> > +    if (profile_level == 0) {

> > +        profile_level = ctx->level;

> > +    }

> > +    if (profile_level != 0) {

> > +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_PROFILE_LEVEL, profile_level);

> > +    }

> > +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET, ctx->quality);

> > +    // Maximum Reference Frames

> > +    if (avctx->refs != 0) {

> > +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_MAX_NUM_REFRAMES, avctx->refs);

> > +    }

> > +    // Aspect Ratio

> > +    if (avctx->sample_aspect_ratio.den && avctx-

> >sample_aspect_ratio.num) {

> > +        AMFRatio ratio = AMFConstructRatio(avctx-

> >sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);

> > +        AMF_ASSIGN_PROPERTY_RATIO(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_ASPECT_RATIO, ratio);

> > +    }

> > +

> > +    // Picture control properties

> > +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_NUM_GOPS_PER_IDR, ctx->gops_per_idr);

> > +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_GOP_SIZE, avctx->gop_size);

> > +    if (avctx->slices > 1) {

> > +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_SLICES_PER_FRAME, avctx->slices);

> > +    }

> > +    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder,

> AMF_VIDEO_ENCODER_HEVC_DE_BLOCKING_FILTER_DISABLE,

> deblocking_filter);

> 

> What about SAO?


SAO ???

> 

> >

> 

> Thanks,

> 

> - Mark

> _______________________________________________

> ffmpeg-devel mailing list

> ffmpeg-devel@ffmpeg.org

> http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Thanks,
Mikhail
Carl Eugen Hoyos Oct. 30, 2017, 10:19 p.m. UTC | #3
2017-10-30 18:56 GMT+01:00 mmironov <mikhail.mironov@amd.com>:

[...]

> +const enum AVPixelFormat ff_amf_pix_fmts[] = {
> +    AV_PIX_FMT_NV12,
> +    AV_PIX_FMT_0RGB32,
> +    AV_PIX_FMT_0BGR32,
> +    AV_PIX_FMT_YUV420P,
> +    AV_PIX_FMT_D3D11,
> +    AV_PIX_FMT_NONE
> +};
> +
> +typedef struct FormatMap {
> +    enum AVPixelFormat       av_format;
> +    enum AMF_SURFACE_FORMAT  amf_format;
> +} FormatMap;
> +
> +static const FormatMap format_map[] =
> +{
> +    { AV_PIX_FMT_NONE,       AMF_SURFACE_UNKNOWN },
> +    { AV_PIX_FMT_NV12,       AMF_SURFACE_NV12 },

> +    { AV_PIX_FMT_0BGR32,       AMF_SURFACE_BGRA },
> +    { AV_PIX_FMT_0RGB32,       AMF_SURFACE_RGBA },

On little-endian, this is different from what you originally sent:
Which one is correct? (Visually)

> +    { AV_PIX_FMT_GRAY8,      AMF_SURFACE_GRAY8 },

> +    { AV_PIX_FMT_BGR0,       AMF_SURFACE_BGRA },

Please remove this line to reduce the confusion.
(Or fix above if this line is correct.)

> +    { AV_PIX_FMT_YUV420P,    AMF_SURFACE_YV12 },
> +    { AV_PIX_FMT_YUV420P,    AMF_SURFACE_YUV420P },

Do you agree that it is impossible that both lines are correct?

> +    { AV_PIX_FMT_YUYV422,    AMF_SURFACE_YUY2 },
> +    { AV_PIX_FMT_D3D11,      AMF_SURFACE_NV12 },

Carl Eugen

[...]
Mark Thompson Oct. 30, 2017, 10:35 p.m. UTC | #4
On 30/10/17 21:30, Mironov, Mikhail wrote:
>>> +static void AMF_CDECL_CALL AMFTraceWriter_Write(AMFTraceWriter
>> *pThis,
>>> +    const wchar_t *scope, const wchar_t *message)
>>> +{
>>> +    AmfTraceWriter *tracer = (AmfTraceWriter*)pThis;
>>> +    av_log(tracer->avctx, AV_LOG_DEBUG, "%ls: %ls", scope, message);
>>
>> Does the message necessarily include a newline already?
> 
> Yes.
> 
>>> +    init_fun = (AMFInit_Fn)dlsym(ctx->library,
>> AMF_INIT_FUNCTION_NAME);
>>> +    AMF_RETURN_IF_FALSE(ctx, init_fun != NULL, AVERROR_UNKNOWN,
>> "DLL %s failed to find function %s. \n", AMF_DLL_NAMEA,
>> AMF_INIT_FUNCTION_NAME);
>>
>> I think do s/ \n/\n/ for all of these messages.
> 
> Sorry, didn't get this.

Most of your messages end with a space before the newline, the space probably shouldn't be there.

>>> +
>>> +    version_fun = (AMFQueryVersion_Fn)dlsym(ctx->library,
>> AMF_QUERY_VERSION_FUNCTION_NAME);
>>> +    AMF_RETURN_IF_FALSE(ctx, version_fun != NULL,
>> AVERROR_UNKNOWN, "DLL %s failed to find function %s. \n",
>> AMF_DLL_NAMEA, AMF_QUERY_VERSION_FUNCTION_NAME);
>>> +
>>> +    res = version_fun(&ctx->version);
>>> +    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "%s
>> failed with error %d. \n", AMF_QUERY_VERSION_FUNCTION_NAME, res);
>>> +    res = init_fun(AMF_FULL_VERSION, &ctx->factory);
>>> +    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "%s
>> failed with error %d. \n", AMF_INIT_FUNCTION_NAME, res);
>>> +    res = ctx->factory->pVtbl->GetTrace(ctx->factory, &ctx->trace);
>>> +    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN,
>> "GetTrace() failed with error %d. \n", res);
>>> +    res = ctx->factory->pVtbl->GetDebug(ctx->factory, &ctx->debug);
>>> +    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN,
>> "GetDebug() failed with error %d. \n", res);
>>> +    return 0;
>>> +}
>>> +
>>> +static int amf_init_context(AVCodecContext *avctx)
>>> +{
>>> +    AmfContext         *ctx = avctx->priv_data;
>>> +    AMF_RESULT          res = AMF_OK;
>>> +
>>> +    // the return of these functions indicates old state and do not affect
>> behaviour
>>> +    ctx->trace->pVtbl->EnableWriter(ctx->trace,
>> AMF_TRACE_WRITER_CONSOLE, 0);
>>> +#if AMF_DEBUG_TRACE
>>> +    ctx->trace->pVtbl->EnableWriter(ctx->trace,
>> AMF_TRACE_WRITER_DEBUG_OUTPUT, 1);
>>> +    ctx->trace->pVtbl->SetWriterLevel(ctx->trace,
>> AMF_TRACE_WRITER_DEBUG_OUTPUT, AMF_TRACE_TRACE);
>>> +    ctx->trace->pVtbl->SetGlobalLevel(ctx->trace, AMF_TRACE_TRACE);
>>> +#else
>>> +    ctx->trace->pVtbl->EnableWriter(ctx->trace,
>> AMF_TRACE_WRITER_DEBUG_OUTPUT, 0);
>>> +#endif
>>
>> I don't much like this compile-time option.  What sort of messages does the
>> trace writer actually give you?  Will a user ever want to enable it?
> 
> Two points:
> 1. There is extensive AMF logging that can help diagnose a problem. Do we want to have it all time in AV_LOG_DEBUG?
> 2. AMD can trace to debug output and this is useful but for normal ffmpeg operation it is under #ifdef.

Help who diagnose a problem?  Either it is useful to a user, in which case put it behind a real option, or it isn't, in which case don't include it at all.  A compile-time option just encourages bitrot on whichever side is not default.

>>> +
>>> +static GUID  AMFTextureArrayIndexGUID =
>> AMFTextureArrayIndexGUIDDef;
>>
>> GUID is a Windows type, should this be AMFGuid?  (I tried removing the
>> check and compiling on Linux, other than the D3D11 stuff this is the only
>> error.)
>>
> 
> This is Windows type and used with Windows interface ID3D11Texture2D.
> When Linux support is added all this section will be under #ifdef.

It might be cleaner to put it inside the function (see below).  Also, it should be const.

>>> +
>>> +int ff_amf_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
>>> +                        const AVFrame *frame, int *got_packet)
>>> +{
>>> +    int             ret = 0;
>>> +    AMF_RESULT      res = AMF_OK;
>>> +    AmfContext     *ctx = avctx->priv_data;
>>> +    AMFSurface     *surface = NULL;
>>> +    AMFData        *data = NULL;
>>> +    amf_bool       submitted = 0;
>>> +
>>> +    while (!submitted) {
>>> +        if (!frame) { // submit drain
>>> +            if (!ctx->eof) { // submit drain onre time only
>>> +                res = ctx->encoder->pVtbl->Drain(ctx->encoder);
>>> +                if (res == AMF_INPUT_FULL) {
>>> +                    av_usleep(1000); // input queue is full: wait, poll and submit
>> Drain again
>>> +                                     // need to get some output and try again
>>> +                } else if (res == AMF_OK) {
>>> +                    ctx->eof = 1; // drain started
>>> +                    submitted = 1;
>>> +                }
>>> +            }
>>> +        } else { // submit frame
>>> +            if (surface == NULL) { // prepare surface from frame one time only
>>> +                if (frame->hw_frames_ctx && ( // HW frame detected
>>> +                                              // check if the same hw_frames_ctx as used in
>> initialization
>>> +                    (ctx->hw_frames_ctx && frame->hw_frames_ctx->data == ctx-
>>> hw_frames_ctx->data) ||
>>> +                    // check if the same hw_device_ctx as used in initialization
>>> +                    (ctx->hw_device_ctx && ((AVHWFramesContext*)frame-
>>> hw_frames_ctx->data)->device_ctx ==
>>> +                    (AVHWDeviceContext*)ctx->hw_device_ctx->data)
>>> +                )) {

(Here.)

>>> +                    ID3D11Texture2D* texture = (ID3D11Texture2D*)frame-
>>> data[0]; // actual texture
>>> +                    int index = (int)(size_t)frame->data[1]; // index is a slice in
>> texture array is - set to tell AMF which slice to use
>>
>> (int)(intptr_t)frame->data[1];
>>
>>> +                    texture->lpVtbl->SetPrivateData(texture,
>> &AMFTextureArrayIndexGUID, sizeof(index), &index);
>>> +
>>> +                    res = ctx->context->pVtbl->CreateSurfaceFromDX11Native(ctx-
>>> context, texture, &surface, NULL); // wrap to AMF surface
>>> +                    surface->pVtbl->SetCrop(surface, 0, 0, frame->width, frame-
>>> height); // decode surfaces are vertically aligned by 16 tell AMF real size
>>
>> "decode surfaces"?  These need not come from a decoder.  Does it work with
>> hwupload?
>>
>>> +                    surface->pVtbl->SetPts(surface, frame->pts);
>>> +                } else {
>>> +                    res = ctx->context->pVtbl->AllocSurface(ctx->context,
>> AMF_MEMORY_HOST, ctx->format, avctx->width, avctx->height, &surface);
>>> +                    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_BUG,
>> "AllocSurface() failed  with error %d \n", res);
>>> +                    amf_copy_surface(avctx, frame, surface);
>>> +                }
>>> +            }
>>> +            // encode
>>> +            res = ctx->encoder->pVtbl->SubmitInput(ctx->encoder,
>> (AMFData*)surface);
>>> +            if (res == AMF_INPUT_FULL) { // handle full queue
>>> +                av_usleep(1000); // input queue is full: wait, poll and submit
>> surface again
>>> +            } else {
>>> +                surface->pVtbl->Release(surface);
>>> +                surface = NULL;
>>> +                AMF_RETURN_IF_FALSE(ctx, res == AMF_OK,
>> AVERROR_UNKNOWN, "SubmitInput() failed with error %d \n", res);
>>> +                submitted = 1;
>>> +            }
>>> +        }
>>> +        // poll results
>>> +        if (!data) {
>>> +            res = ctx->encoder->pVtbl->QueryOutput(ctx->encoder, &data);
>>> +            if (data) {
>>> +                AMFBuffer* buffer;
>>> +                AMFGuid guid = IID_AMFBuffer();
>>> +                data->pVtbl->QueryInterface(data, &guid, (void**)&buffer); //
>> query for buffer interface
>>> +                ret = amf_copy_buffer(avctx, pkt, buffer);
>>> +                if (!ret)
>>> +                    *got_packet = 1;
>>> +                buffer->pVtbl->Release(buffer);
>>> +                data->pVtbl->Release(data);
>>> +                if (ctx->eof) {
>>> +                    submitted = 1; // we are in the drain state - no submissions
>>> +                }
>>> +            } else if (res == AMF_EOF) {
>>> +                submitted = 1; // drain complete
>>> +            } else {
>>> +                if (!submitted) {
>>> +                    av_usleep(1000); // wait and poll again
>>> +                }
>>> +            }
>>> +        }
>>> +    }
>>> +    return ret;
>>> +}
>>
>> I still think this would be much better off using the
>> send_frame()/receive_packet() API.  Even if your API doesn't expose any
>> information about the queue length, you only need to hold a single input
>> frame transiently to get around that (the user is not allowed to call
>> send_frame() twice in a row without calling receive_packet()).
>>
> 
> So to implement this I would have to:
> - in the send_frame() if AMF_INPUT_FULL is returned - store input frame (or copy?)
> - In the next receive_frame() check if frame is stored
> - Wait till some output is produced
> - resubmit stored frame

Sounds about right.

> Issues I see:
> - Isn't this logic defeat the purpose of independent send()/receive()?
> - How can I report a error if receive() produced a compressed frame but the delayed submission failed?

Since this is asynchronous anyway, just report it at the next available opportunity.

> - This logic depends on the particular logic in the calling code.

The API requires this behaviour of the caller.  See the documentation in avcodec.h.

> - This logic depends on the particular HW behaviour. 

How so?

> - In the future, we would like to output individual slices of a compressed frame. 
> When this added receive_frame() must be called several times to clear space in the HW queue. 
> Granted, current implementation also does not cover this case but truly independent 
> send/receive implementation would. 

Note that the user is required to call receive_packet() repeatedly until it returns EAGAIN, and only then are they allowed to call send_frame() again.

>>> +static const AVOption options[] = {
>>> +    // Static
>>> +    /// Usage
>>> +    { "usage",          "Encoder Usage",        OFFSET(usage),
>> AV_OPT_TYPE_INT,   { .i64 = AMF_VIDEO_ENCODER_USAGE_TRANSCONDING
>> }, AMF_VIDEO_ENCODER_USAGE_TRANSCONDING,
>> AMF_VIDEO_ENCODER_USAGE_WEBCAM, VE, "usage" },
>>> +    { "transcoding",    "Generic Transcoding",  0,
>> AV_OPT_TYPE_CONST, { .i64 =
>> AMF_VIDEO_ENCODER_USAGE_TRANSCONDING      }, 0, 0, VE, "usage" },
>>> +    { "ultralowlatency","",                     0,              AV_OPT_TYPE_CONST, { .i64
>> = AMF_VIDEO_ENCODER_USAGE_ULTRA_LOW_LATENCY }, 0, 0, VE, "usage"
>> },
>>> +    { "lowlatency",     "",                     0,              AV_OPT_TYPE_CONST, { .i64 =
>> AMF_VIDEO_ENCODER_USAGE_LOW_LATENCY       }, 0, 0, VE, "usage" },
>>> +    { "webcam",         "Webcam",               0,              AV_OPT_TYPE_CONST, {
>> .i64 = AMF_VIDEO_ENCODER_USAGE_WEBCAM            }, 0, 0, VE, "usage" },
>>> +
>>> +    /// Profile,
>>> +    { "profile",        "Profile",              OFFSET(profile),AV_OPT_TYPE_INT, {
>> .i64 = AMF_VIDEO_ENCODER_PROFILE_MAIN       },
>> AMF_VIDEO_ENCODER_PROFILE_BASELINE,
>> AMF_VIDEO_ENCODER_PROFILE_HIGH, VE, "profile" },
>>> +    { "baseline",       "",                     0,              AV_OPT_TYPE_CONST, { .i64 =
>> AMF_VIDEO_ENCODER_PROFILE_BASELINE }, 0, 0, VE, "profile" },
>>
>> You still don't support baseline profile.
> 
> Talked to codec folks. Currently this is really baseline by mistake. The intention was to expose only 
> "constrained baseline" They want to correct this but it should go into the driver first and then 
> reflected in AMF API. Once done this entry will be updated.

Ok, so baseline profile will not be included at all, and then constrained baseline added later?  That sounds fine.

>>> +    /// Maximum Access Unit Size
>>> +    { "max_au_size",    "Maximum Access Unit Size for rate control (in bits)",
>> OFFSET(max_au_size),        AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE,
>> NULL },
>>
>> Did you check whether this really means the maximum access unit size?  If
>> yes, what is the use-case for that?
>>
> 
> I've changed the description. This parameter is used in rate control to limit AU size. 
> It is useful for streaming.

When do you want to explicitly set an access unit size limit?  That sort of thing is better limited by setting maxrate with a shorter window, IMO.

>>> +    { "me_half_pel",    "Enable ME Half Pixel",
>> OFFSET(me_half_pel),   AV_OPT_TYPE_BOOL,  { .i64 = 1 }, 0, 1, VE, NULL },
>>> +    { "me_quater_pel",  "Enable ME Quarter Pixel ",
>> OFFSET(me_quater_pel), AV_OPT_TYPE_BOOL,  { .i64 = 1 }, 0, 1, VE, NULL },
>>
>> What is the use-case for these options?
>>
> 
> These are options for motion estimator precision. Spelling is corrected "me_quarter_pel"

What I mean is, why would anyone ever set these options to zero?

>>> +
>>> +    { NULL }
>>> +};
>>> +
>>> +static av_cold int amf_encode_init_h264(AVCodecContext *avctx)
>>> +{
>>> +    int                 ret = 0;
>>> +    AMF_RESULT          res = AMF_OK;
>>> +    AmfContext         *ctx = avctx->priv_data;
>>> +    AMFVariantStruct    var = {0};
>>> +    amf_int64           profile = 0;
>>> +    amf_int64           profile_level = 0;
>>> +    AMFBuffer          *buffer;
>>> +    AMFGuid             guid;
>>> +
>>> +    AMFSize             framesize = AMFConstructSize(avctx->width, avctx-
>>> height);
>>> +    AMFRate             framerate = AMFConstructRate(avctx->time_base.den,
>> avctx->time_base.num * avctx->ticks_per_frame);
>>
>> avctx->framerate should be set if the input is CFR, use that first.
>>
>>> +
>>> +    int                 deblocking_filter = (avctx->flags &
>> AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
>>> +
>>> +    if ((ret = ff_amf_encode_init(avctx)) != 0)
>>> +        return ret;
>>> +
>>> +    // Static parameters
>>> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_USAGE, ctx->usage);
>>> +
>>> +    AMF_ASSIGN_PROPERTY_SIZE(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_FRAMESIZE, framesize);
>>> +
>>> +    AMF_ASSIGN_PROPERTY_RATE(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_FRAMERATE, framerate);
>>> +
>>> +    profile = avctx->profile;
>>
>> avctx->profile might be (is by default, even) FF_PROFILE_UNKNOWN, which is
>> not zero.
>>
>>> +    if (profile == 0) {
>>> +        profile = ctx->profile;
>>> +    }
>>> +
>>> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_PROFILE, profile);
>>> +
>>> +    profile_level = avctx->level;
>>
>> Similarly FF_LEVEL_UNKNOWN.
>>
>>> +    if (profile_level == 0) {
>>> +        profile_level = ctx->level;
>>> +    }
>>> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_PROFILE_LEVEL, profile_level);
>>> +
>>> +    // Maximum Reference Frames
>>> +    if (avctx->refs != -1) {
>>> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_MAX_NUM_REFRAMES, avctx->refs);
>>> +    }
>>> +    if (avctx->sample_aspect_ratio.den && avctx-
>>> sample_aspect_ratio.num) {
>>> +        AMFRatio ratio = AMFConstructRatio(avctx-
>>> sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
>>> +        AMF_ASSIGN_PROPERTY_RATIO(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_ASPECT_RATIO, ratio);
>>> +    }
>>> +
>>> +    /// Color Range (Partial/TV/MPEG or Full/PC/JPEG)
>>> +    if (avctx->color_range == AVCOL_RANGE_JPEG) {
>>> +        AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_FULL_RANGE_COLOR, 1);
>>> +    }
>>> +
>>> +    if (ctx->rate_control_mode ==
>> AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CONSTANT_QP) {
>>> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_RATE_CONTROL_PREANALYSIS_ENABLE,
>> AMF_VIDEO_ENCODER_PREENCODE_DISABLED);
>>> +        if (ctx->preanalysis)
>>> +            av_log(ctx, AV_LOG_WARNING, "Pre-Analysis is not supported by
>> cqp Rate Control Method, automatically disabled. \n");
>>> +    } else {
>>> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_RATE_CONTROL_PREANALYSIS_ENABLE, ctx-
>>> preanalysis);
>>> +    }
>>> +
>>> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_QUALITY_PRESET, ctx->quality);
>>> +
>>> +    // Initialize Encoder
>>> +    res = ctx->encoder->pVtbl->Init(ctx->encoder, ctx->format, avctx-
>>> width, avctx->height);
>>> +    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_BUG, "encoder-
>>> Init() failed with error %d \n", res);
>>> +
>>> +    // Dynamic parmaters
>>> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD, ctx-
>>> rate_control_mode);
>>> +
>>> +    /// VBV Buffer
>>> +    if (avctx->rc_buffer_size != 0)
>>> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_VBV_BUFFER_SIZE, avctx->rc_buffer_size);
>>> +    if (avctx->rc_initial_buffer_occupancy != 0) {
>>> +        int amf_buffer_fullness = avctx->rc_buffer_size * 64 / avctx-
>>> rc_initial_buffer_occupancy;
>>> +        if (amf_buffer_fullness > 64)
>>> +            amf_buffer_fullness = 64;
>>
>> I still don't understand what this is trying to do.
>>
>> rc_initial_buffer_occupancy is necessarily at most rc_buffer_size, so the
>> calculation will always get a number >= 64, so you always pass 64.
>>
>> What are the units of
>> AMF_VIDEO_ENCODER_INITIAL_VBV_BUFFER_FULLNESS meant to be?
>>
> 
> They meant to be an abstract value from 0 to 64 meaning 64 is 100%. Don’t ask me why ☹ 
> Calculation should be the opposite. My fault. 

Right, that makes more sense.

>>> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_INITIAL_VBV_BUFFER_FULLNESS,
>> amf_buffer_fullness);
>>> +    }
>>> +    /// Maximum Access Unit Size
>>> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_MAX_AU_SIZE, ctx->max_au_size);
>>> +
>>> +    // QP Minimum / Maximum
>>> +    if (ctx->rate_control_mode ==
>> AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CONSTANT_QP) {
>>> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_MIN_QP, 0);
>>> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_MAX_QP, 51);
>>> +    } else {
>>> +        if (avctx->qmin != -1) {
>>> +            int qval = avctx->qmin > 51 ? 51 : avctx->qmin;
>>> +            AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_MIN_QP, qval);
>>> +        }
>>> +        if (avctx->qmax != -1) {
>>> +            int qval = avctx->qmax > 51 ? 51 : avctx->qmax;
>>> +            AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_MAX_QP, qval);
>>> +        }
>>> +    }
>>> +    // QP Values
>>> +    if (ctx->qp_i != -1)
>>> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_QP_I, ctx->qp_i);
>>> +    if (ctx->qp_p != -1)
>>> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_QP_P, ctx->qp_p);
>>> +    if (ctx->qp_b != -1)
>>> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_QP_B, ctx->qp_b);
>>> +
>>> +    // Bitrate
>>> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_TARGET_BITRATE, avctx->bit_rate);
>>> +
>>> +    // Peak (max) bitrate. If not set make it out of bit_rate for best results.
>>> +    if (ctx->rate_control_mode ==
>> AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CBR) {
>>> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_PEAK_BITRATE, avctx->bit_rate);
>>> +    } else {
>>> +        int rc_max_rate = avctx->rc_max_rate >= avctx->bit_rate ? avctx-
>>> rc_max_rate : avctx->bit_rate * 13 / 10;
>>
>> Please calculate a real value here as suggested in the previous comments
>> rather than using 13/10.
> 
> The suggestion was to set rc_max_rate to infinity. This will produce unpredicted results. 
> I can set it to bit_rate but quality will be not good. Another option would be to generate error.
> I am open to suggestions.

Is the window over which rc_max_rate applies defined anywhere?  If so, then if rc_buffer_size is set you can calculate rc_max_rate as (rc_buffer_size + bit_rate * window) / window (i.e. the maximum number of bits which could be included in one window region given the buffer constraints).

If rc_buffer_size isn't set either, then it isn't meant to be constrained - the average should be right over the long term, but locally it doesn't matter.  Hence infinity (or at least some very large value), to not impose any constraint.

>>> +    { "max_au_size",    "Max AU Size in bits",
>> OFFSET(max_au_size),   AV_OPT_TYPE_INT,{ .i64 = 0 }, 0, INT_MAX, VE, NULL
>> },
>>
>> Same question as in H.264.  Also other stuff below.
>>
>>> +    { "min_qp_i",       "min quantization parameter for I-frame",
>> OFFSET(min_qp_i),      AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },
>>> +    { "max_qp_i",       "max quantization parameter for I-frame",
>> OFFSET(max_qp_i),      AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },
>>> +    { "min_qp_p",       "min quantization parameter for P-frame",
>> OFFSET(min_qp_p),      AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },
>>> +    { "max_qp_p",       "max quantization parameter for P-frame",
>> OFFSET(max_qp_p),      AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },
>>> +    { "qp_p",           "quantization parameter for P-frame",       OFFSET(qp_p),
>> AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },
>>> +    { "qp_i",           "quantization parameter for I-frame",       OFFSET(qp_i),
>> AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },
>>> +    { "skip_frame",     "Rate Control Based Frame Skip",
>> OFFSET(skip_frame),    AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },
>>> +    { "me_half_pel",    "Enable ME Half Pixel",
>> OFFSET(me_half_pel),   AV_OPT_TYPE_BOOL,{ .i64 = 1 }, 0, 1, VE, NULL },
>>> +    { "me_quater_pel",  "Enable ME Quarter Pixel ",
>> OFFSET(me_quater_pel), AV_OPT_TYPE_BOOL,{ .i64 = 1 }, 0, 1, VE, NULL },
>>> +
>>> +    { NULL }
>>> +};
>>> +
>>> +static av_cold int amf_encode_init_hevc(AVCodecContext *avctx)
>>> +{
>>> +    int                 ret = 0;
>>> +    AMF_RESULT          res = AMF_OK;
>>> +    AmfContext         *ctx = avctx->priv_data;
>>> +    AMFVariantStruct    var = {0};
>>> +    amf_int64           profile = 0;
>>> +    amf_int64           profile_level = 0;
>>> +    AMFBuffer          *buffer;
>>> +    AMFGuid             guid;
>>> +
>>> +    AMFSize             framesize = AMFConstructSize(avctx->width, avctx-
>>> height);
>>> +    AMFRate             framerate = AMFConstructRate(avctx->time_base.den,
>> avctx->time_base.num * avctx->ticks_per_frame);
>>> +
>>> +    int                 deblocking_filter = (avctx->flags &
>> AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
>>> +
>>> +    if ((ret = ff_amf_encode_init(avctx)) < 0)
>>> +        return ret;
>>> +
>>> +    // init static parameters
>>> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_HEVC_USAGE, ctx->usage);
>>> +
>>> +    AMF_ASSIGN_PROPERTY_SIZE(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_HEVC_FRAMESIZE, framesize);
>>> +
>>> +    AMF_ASSIGN_PROPERTY_RATE(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_HEVC_FRAMERATE, framerate);
>>> +
>>> +    switch (avctx->profile) {
>>> +    case FF_PROFILE_HEVC_MAIN:
>>> +        profile = AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN;
>>> +        break;
>>> +    default:
>>> +        break;
>>> +    }
>>> +    if (profile == 0) {
>>> +        profile = ctx->profile;
>>> +    }
>>> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_HEVC_PROFILE, profile);
>>> +
>>> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_HEVC_TIER, ctx->tier);
>>> +
>>> +    profile_level = avctx->level;
>>> +    if (profile_level == 0) {
>>> +        profile_level = ctx->level;
>>> +    }
>>> +    if (profile_level != 0) {
>>> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_HEVC_PROFILE_LEVEL, profile_level);
>>> +    }
>>> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET, ctx->quality);
>>> +    // Maximum Reference Frames
>>> +    if (avctx->refs != 0) {
>>> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_HEVC_MAX_NUM_REFRAMES, avctx->refs);
>>> +    }
>>> +    // Aspect Ratio
>>> +    if (avctx->sample_aspect_ratio.den && avctx-
>>> sample_aspect_ratio.num) {
>>> +        AMFRatio ratio = AMFConstructRatio(avctx-
>>> sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
>>> +        AMF_ASSIGN_PROPERTY_RATIO(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_HEVC_ASPECT_RATIO, ratio);
>>> +    }
>>> +
>>> +    // Picture control properties
>>> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_HEVC_NUM_GOPS_PER_IDR, ctx->gops_per_idr);
>>> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_HEVC_GOP_SIZE, avctx->gop_size);
>>> +    if (avctx->slices > 1) {
>>> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_HEVC_SLICES_PER_FRAME, avctx->slices);
>>> +    }
>>> +    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder,
>> AMF_VIDEO_ENCODER_HEVC_DE_BLOCKING_FILTER_DISABLE,
>> deblocking_filter);
>>
>> What about SAO?
> 
> SAO ???

You're looking at AV_CODEC_FLAG_LOOP_FILTER to disable this, so you might want to consider both loop filters in H.265, not just the deblocking filter.

- Mark
Carl Eugen Hoyos Oct. 30, 2017, 10:36 p.m. UTC | #5
2017-10-30 23:35 GMT+01:00 Mark Thompson <sw@jkqxz.net>:
> On 30/10/17 21:30, Mironov, Mikhail wrote:
>>>> +static void AMF_CDECL_CALL AMFTraceWriter_Write(AMFTraceWriter
>>> *pThis,
>>>> +    const wchar_t *scope, const wchar_t *message)
>>>> +{
>>>> +    AmfTraceWriter *tracer = (AmfTraceWriter*)pThis;
>>>> +    av_log(tracer->avctx, AV_LOG_DEBUG, "%ls: %ls", scope, message);
>>>
>>> Does the message necessarily include a newline already?
>>
>> Yes.
>>
>>>> +    init_fun = (AMFInit_Fn)dlsym(ctx->library,
>>> AMF_INIT_FUNCTION_NAME);
>>>> +    AMF_RETURN_IF_FALSE(ctx, init_fun != NULL, AVERROR_UNKNOWN,
>>> "DLL %s failed to find function %s. \n", AMF_DLL_NAMEA,
>>> AMF_INIT_FUNCTION_NAME);
>>>
>>> I think do s/ \n/\n/ for all of these messages.
>>
>> Sorry, didn't get this.
>
> Most of your messages end with a space before the newline,
> the space probably shouldn't be there.

Correct: While I like the space, it shouldn't be there for
consistency.

Carl Eugen
mmironov Oct. 30, 2017, 10:53 p.m. UTC | #6
Mikhail

> -----Original Message-----

> From: ffmpeg-devel [mailto:ffmpeg-devel-bounces@ffmpeg.org] On Behalf

> Of Carl Eugen Hoyos

> Sent: October 30, 2017 6:19 PM

> To: FFmpeg development discussions and patches <ffmpeg-

> devel@ffmpeg.org>

> Subject: Re: [FFmpeg-devel] Added HW H.264 and HEVC encoding for AMD

> GPUs based on AMF SDK

> 

> 2017-10-30 18:56 GMT+01:00 mmironov <mikhail.mironov@amd.com>:

> 

> [...]

> 

> > +const enum AVPixelFormat ff_amf_pix_fmts[] = {

> > +    AV_PIX_FMT_NV12,

> > +    AV_PIX_FMT_0RGB32,

> > +    AV_PIX_FMT_0BGR32,

> > +    AV_PIX_FMT_YUV420P,

> > +    AV_PIX_FMT_D3D11,

> > +    AV_PIX_FMT_NONE

> > +};

> > +

> > +typedef struct FormatMap {

> > +    enum AVPixelFormat       av_format;

> > +    enum AMF_SURFACE_FORMAT  amf_format; } FormatMap;

> > +

> > +static const FormatMap format_map[] = {

> > +    { AV_PIX_FMT_NONE,       AMF_SURFACE_UNKNOWN },

> > +    { AV_PIX_FMT_NV12,       AMF_SURFACE_NV12 },

> 

> > +    { AV_PIX_FMT_0BGR32,       AMF_SURFACE_BGRA },

> > +    { AV_PIX_FMT_0RGB32,       AMF_SURFACE_RGBA },

> 

> On little-endian, this is different from what you originally sent:

> Which one is correct? (Visually)


It should be RGB0 and BGR0 all the time. I will correct.

> 

> > +    { AV_PIX_FMT_GRAY8,      AMF_SURFACE_GRAY8 },

> 

> > +    { AV_PIX_FMT_BGR0,       AMF_SURFACE_BGRA },

> 

> Please remove this line to reduce the confusion.

> (Or fix above if this line is correct.)



OK

> 

> > +    { AV_PIX_FMT_YUV420P,    AMF_SURFACE_YV12 },

> > +    { AV_PIX_FMT_YUV420P,    AMF_SURFACE_YUV420P },

> 

> Do you agree that it is impossible that both lines are correct?


Agree. Will fix.

> 

> > +    { AV_PIX_FMT_YUYV422,    AMF_SURFACE_YUY2 },

> > +    { AV_PIX_FMT_D3D11,      AMF_SURFACE_NV12 },

> 

> Carl Eugen

> 

> [...]

> _______________________________________________

> ffmpeg-devel mailing list

> ffmpeg-devel@ffmpeg.org

> http://ffmpeg.org/mailman/listinfo/ffmpeg-devel
mmironov Oct. 30, 2017, 11:20 p.m. UTC | #7
> >>> +

> >>> +int ff_amf_encode_frame(AVCodecContext *avctx, AVPacket *pkt,

> >>> +                        const AVFrame *frame, int *got_packet) {

> >>> +    int             ret = 0;

> >>> +    AMF_RESULT      res = AMF_OK;

> >>> +    AmfContext     *ctx = avctx->priv_data;

> >>> +    AMFSurface     *surface = NULL;

> >>> +    AMFData        *data = NULL;

> >>> +    amf_bool       submitted = 0;

> >>> +

> >>> +    while (!submitted) {

> >>> +        if (!frame) { // submit drain

> >>> +            if (!ctx->eof) { // submit drain onre time only

> >>> +                res = ctx->encoder->pVtbl->Drain(ctx->encoder);

> >>> +                if (res == AMF_INPUT_FULL) {

> >>> +                    av_usleep(1000); // input queue is full: wait,

> >>> + poll and submit

> >> Drain again

> >>> +                                     // need to get some output and try again

> >>> +                } else if (res == AMF_OK) {

> >>> +                    ctx->eof = 1; // drain started

> >>> +                    submitted = 1;

> >>> +                }

> >>> +            }

> >>> +        } else { // submit frame

> >>> +            if (surface == NULL) { // prepare surface from frame one time

> only

> >>> +                if (frame->hw_frames_ctx && ( // HW frame detected

> >>> +                                              // check if the same

> >>> + hw_frames_ctx as used in

> >> initialization

> >>> +                    (ctx->hw_frames_ctx &&

> >>> + frame->hw_frames_ctx->data == ctx-

> >>> hw_frames_ctx->data) ||

> >>> +                    // check if the same hw_device_ctx as used in initialization

> >>> +                    (ctx->hw_device_ctx &&

> >>> + ((AVHWFramesContext*)frame-

> >>> hw_frames_ctx->data)->device_ctx ==

> >>> +                    (AVHWDeviceContext*)ctx->hw_device_ctx->data)

> >>> +                )) {

> 

> (Here.)

> 

> >>> +                    ID3D11Texture2D* texture =

> >>> + (ID3D11Texture2D*)frame-

> >>> data[0]; // actual texture

> >>> +                    int index = (int)(size_t)frame->data[1]; //

> >>> + index is a slice in

> >> texture array is - set to tell AMF which slice to use

> >>

> >> (int)(intptr_t)frame->data[1];

> >>

> >>> +                    texture->lpVtbl->SetPrivateData(texture,

> >> &AMFTextureArrayIndexGUID, sizeof(index), &index);

> >>> +

> >>> +                    res =

> >>> + ctx->context->pVtbl->CreateSurfaceFromDX11Native(ctx-

> >>> context, texture, &surface, NULL); // wrap to AMF surface

> >>> +                    surface->pVtbl->SetCrop(surface, 0, 0,

> >>> + frame->width, frame-

> >>> height); // decode surfaces are vertically aligned by 16 tell AMF

> >>> real size

> >>

> >> "decode surfaces"?  These need not come from a decoder.  Does it work

> >> with hwupload?

> >>

> >>> +                    surface->pVtbl->SetPts(surface, frame->pts);

> >>> +                } else {

> >>> +                    res =

> >>> + ctx->context->pVtbl->AllocSurface(ctx->context,

> >> AMF_MEMORY_HOST, ctx->format, avctx->width, avctx->height,

> &surface);

> >>> +                    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK,

> >>> + AVERROR_BUG,

> >> "AllocSurface() failed  with error %d \n", res);

> >>> +                    amf_copy_surface(avctx, frame, surface);

> >>> +                }

> >>> +            }

> >>> +            // encode

> >>> +            res = ctx->encoder->pVtbl->SubmitInput(ctx->encoder,

> >> (AMFData*)surface);

> >>> +            if (res == AMF_INPUT_FULL) { // handle full queue

> >>> +                av_usleep(1000); // input queue is full: wait, poll

> >>> + and submit

> >> surface again

> >>> +            } else {

> >>> +                surface->pVtbl->Release(surface);

> >>> +                surface = NULL;

> >>> +                AMF_RETURN_IF_FALSE(ctx, res == AMF_OK,

> >> AVERROR_UNKNOWN, "SubmitInput() failed with error %d \n", res);

> >>> +                submitted = 1;

> >>> +            }

> >>> +        }

> >>> +        // poll results

> >>> +        if (!data) {

> >>> +            res = ctx->encoder->pVtbl->QueryOutput(ctx->encoder, &data);

> >>> +            if (data) {

> >>> +                AMFBuffer* buffer;

> >>> +                AMFGuid guid = IID_AMFBuffer();

> >>> +                data->pVtbl->QueryInterface(data, &guid,

> >>> + (void**)&buffer); //

> >> query for buffer interface

> >>> +                ret = amf_copy_buffer(avctx, pkt, buffer);

> >>> +                if (!ret)

> >>> +                    *got_packet = 1;

> >>> +                buffer->pVtbl->Release(buffer);

> >>> +                data->pVtbl->Release(data);

> >>> +                if (ctx->eof) {

> >>> +                    submitted = 1; // we are in the drain state - no submissions

> >>> +                }

> >>> +            } else if (res == AMF_EOF) {

> >>> +                submitted = 1; // drain complete

> >>> +            } else {

> >>> +                if (!submitted) {

> >>> +                    av_usleep(1000); // wait and poll again

> >>> +                }

> >>> +            }

> >>> +        }

> >>> +    }

> >>> +    return ret;

> >>> +}

> >>

> >> I still think this would be much better off using the

> >> send_frame()/receive_packet() API.  Even if your API doesn't expose

> >> any information about the queue length, you only need to hold a

> >> single input frame transiently to get around that (the user is not

> >> allowed to call

> >> send_frame() twice in a row without calling receive_packet()).

> >>

> >

> > So to implement this I would have to:

> > - in the send_frame() if AMF_INPUT_FULL is returned - store input

> > frame (or copy?)

> > - In the next receive_frame() check if frame is stored

> > - Wait till some output is produced

> > - resubmit stored frame

> 

> Sounds about right.

> 

> > Issues I see:

> > - Isn't this logic defeat the purpose of independent send()/receive()?

> > - How can I report a error if receive() produced a compressed frame but

> the delayed submission failed?

> 

> Since this is asynchronous anyway, just report it at the next available

> opportunity.

> 

> > - This logic depends on the particular logic in the calling code.

> 

> The API requires this behaviour of the caller.  See the documentation in

> avcodec.h.

> 

> > - This logic depends on the particular HW behaviour.

> 

> How so?

> 

> > - In the future, we would like to output individual slices of a compressed

> frame.

> > When this added receive_frame() must be called several times to clear

> space in the HW queue.

> > Granted, current implementation also does not cover this case but

> > truly independent send/receive implementation would.

> 

> Note that the user is required to call receive_packet() repeatedly until it

> returns EAGAIN, and only then are they allowed to call send_frame() again.


The implementation will be cumbersome at least. Note that calling Drain() 
may also return AMF_INPUT_FULL and therefore will have to be remembered and 
called again in receive(). But I will implement as you suggests. It is not a huge change.

> 

> >>> +static const AVOption options[] = {

> >>> +    // Static

> >>> +    /// Usage

> >>> +    { "usage",          "Encoder Usage",        OFFSET(usage),

> >> AV_OPT_TYPE_INT,   { .i64 =

> AMF_VIDEO_ENCODER_USAGE_TRANSCONDING

> >> }, AMF_VIDEO_ENCODER_USAGE_TRANSCONDING,

> >> AMF_VIDEO_ENCODER_USAGE_WEBCAM, VE, "usage" },

> >>> +    { "transcoding",    "Generic Transcoding",  0,

> >> AV_OPT_TYPE_CONST, { .i64 =

> >> AMF_VIDEO_ENCODER_USAGE_TRANSCONDING      }, 0, 0, VE, "usage" },

> >>> +    { "ultralowlatency","",                     0,              AV_OPT_TYPE_CONST, {

> .i64

> >> = AMF_VIDEO_ENCODER_USAGE_ULTRA_LOW_LATENCY }, 0, 0, VE,

> "usage"

> >> },

> >>> +    { "lowlatency",     "",                     0,              AV_OPT_TYPE_CONST, { .i64

> =

> >> AMF_VIDEO_ENCODER_USAGE_LOW_LATENCY       }, 0, 0, VE, "usage" },

> >>> +    { "webcam",         "Webcam",               0,              AV_OPT_TYPE_CONST,

> {

> >> .i64 = AMF_VIDEO_ENCODER_USAGE_WEBCAM            }, 0, 0, VE, "usage"

> },

> >>> +

> >>> +    /// Profile,

> >>> +    { "profile",        "Profile",              OFFSET(profile),AV_OPT_TYPE_INT, {

> >> .i64 = AMF_VIDEO_ENCODER_PROFILE_MAIN       },

> >> AMF_VIDEO_ENCODER_PROFILE_BASELINE,

> >> AMF_VIDEO_ENCODER_PROFILE_HIGH, VE, "profile" },

> >>> +    { "baseline",       "",                     0,              AV_OPT_TYPE_CONST, { .i64

> =

> >> AMF_VIDEO_ENCODER_PROFILE_BASELINE }, 0, 0, VE, "profile" },

> >>

> >> You still don't support baseline profile.

> >

> > Talked to codec folks. Currently this is really baseline by mistake.

> > The intention was to expose only "constrained baseline" They want to

> > correct this but it should go into the driver first and then reflected in AMF

> API. Once done this entry will be updated.

> 

> Ok, so baseline profile will not be included at all, and then constrained

> baseline added later?  That sounds fine.


It appeared that the "constrained baseline" and "constrained high" were implemented but 
not published in AMF API. They will review it briefly and I will include them in the next submission.

> 

> >>> +    /// Maximum Access Unit Size

> >>> +    { "max_au_size",    "Maximum Access Unit Size for rate control (in

> bits)",

> >> OFFSET(max_au_size),        AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE,

> >> NULL },

> >>

> >> Did you check whether this really means the maximum access unit size?

> >> If yes, what is the use-case for that?

> >>

> >

> > I've changed the description. This parameter is used in rate control to limit

> AU size.

> > It is useful for streaming.

> 

> When do you want to explicitly set an access unit size limit?  That sort of

> thing is better limited by setting maxrate with a shorter window, IMO.


This is domain of codec rate control team. They feel that this parameter is important for some users. 
Some of them actually do use it.

> 

> >>> +    { "me_half_pel",    "Enable ME Half Pixel",

> >> OFFSET(me_half_pel),   AV_OPT_TYPE_BOOL,  { .i64 = 1 }, 0, 1, VE, NULL },

> >>> +    { "me_quater_pel",  "Enable ME Quarter Pixel ",

> >> OFFSET(me_quater_pel), AV_OPT_TYPE_BOOL,  { .i64 = 1 }, 0, 1, VE,

> >> NULL },

> >>

> >> What is the use-case for these options?

> >>

> >

> > These are options for motion estimator precision. Spelling is corrected

> "me_quarter_pel"

> 

> What I mean is, why would anyone ever set these options to zero?


One can speed up encoding by disabling detailed motion estimation. 
I am aware of at least one customer who is using this.

> 

> >>> +

> >>> +    { NULL }

> >>> +};

> >>> +

> >>> +static av_cold int amf_encode_init_h264(AVCodecContext *avctx) {

> >>> +    int                 ret = 0;

> >>> +    AMF_RESULT          res = AMF_OK;

> >>> +    AmfContext         *ctx = avctx->priv_data;

> >>> +    AMFVariantStruct    var = {0};

> >>> +    amf_int64           profile = 0;

> >>> +    amf_int64           profile_level = 0;

> >>> +    AMFBuffer          *buffer;

> >>> +    AMFGuid             guid;

> >>> +

> >>> +    AMFSize             framesize = AMFConstructSize(avctx->width, avctx-

> >>> height);

> >>> +    AMFRate             framerate = AMFConstructRate(avctx-

> >time_base.den,

> >> avctx->time_base.num * avctx->ticks_per_frame);

> >>

> >> avctx->framerate should be set if the input is CFR, use that first.

> >>

> >>> +

> >>> +    int                 deblocking_filter = (avctx->flags &

> >> AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;

> >>> +

> >>> +    if ((ret = ff_amf_encode_init(avctx)) != 0)

> >>> +        return ret;

> >>> +

> >>> +    // Static parameters

> >>> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> >> AMF_VIDEO_ENCODER_USAGE, ctx->usage);

> >>> +

> >>> +    AMF_ASSIGN_PROPERTY_SIZE(res, ctx->encoder,

> >> AMF_VIDEO_ENCODER_FRAMESIZE, framesize);

> >>> +

> >>> +    AMF_ASSIGN_PROPERTY_RATE(res, ctx->encoder,

> >> AMF_VIDEO_ENCODER_FRAMERATE, framerate);

> >>> +

> >>> +    profile = avctx->profile;

> >>

> >> avctx->profile might be (is by default, even) FF_PROFILE_UNKNOWN,

> >> avctx->which is

> >> not zero.

> >>

> >>> +    if (profile == 0) {

> >>> +        profile = ctx->profile;

> >>> +    }

> >>> +

> >>> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> >> AMF_VIDEO_ENCODER_PROFILE, profile);

> >>> +

> >>> +    profile_level = avctx->level;

> >>

> >> Similarly FF_LEVEL_UNKNOWN.

> >>

> >>> +    if (profile_level == 0) {

> >>> +        profile_level = ctx->level;

> >>> +    }

> >>> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> >> AMF_VIDEO_ENCODER_PROFILE_LEVEL, profile_level);

> >>> +

> >>> +    // Maximum Reference Frames

> >>> +    if (avctx->refs != -1) {

> >>> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> >> AMF_VIDEO_ENCODER_MAX_NUM_REFRAMES, avctx->refs);

> >>> +    }

> >>> +    if (avctx->sample_aspect_ratio.den && avctx-

> >>> sample_aspect_ratio.num) {

> >>> +        AMFRatio ratio = AMFConstructRatio(avctx-

> >>> sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);

> >>> +        AMF_ASSIGN_PROPERTY_RATIO(res, ctx->encoder,

> >> AMF_VIDEO_ENCODER_ASPECT_RATIO, ratio);

> >>> +    }

> >>> +

> >>> +    /// Color Range (Partial/TV/MPEG or Full/PC/JPEG)

> >>> +    if (avctx->color_range == AVCOL_RANGE_JPEG) {

> >>> +        AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder,

> >> AMF_VIDEO_ENCODER_FULL_RANGE_COLOR, 1);

> >>> +    }

> >>> +

> >>> +    if (ctx->rate_control_mode ==

> >> AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CONSTANT_QP) {

> >>> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> >> AMF_VIDEO_ENCODER_RATE_CONTROL_PREANALYSIS_ENABLE,

> >> AMF_VIDEO_ENCODER_PREENCODE_DISABLED);

> >>> +        if (ctx->preanalysis)

> >>> +            av_log(ctx, AV_LOG_WARNING, "Pre-Analysis is not

> >>> + supported by

> >> cqp Rate Control Method, automatically disabled. \n");

> >>> +    } else {

> >>> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> >> AMF_VIDEO_ENCODER_RATE_CONTROL_PREANALYSIS_ENABLE, ctx-

> >>> preanalysis);

> >>> +    }

> >>> +

> >>> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> >> AMF_VIDEO_ENCODER_QUALITY_PRESET, ctx->quality);

> >>> +

> >>> +    // Initialize Encoder

> >>> +    res = ctx->encoder->pVtbl->Init(ctx->encoder, ctx->format,

> >>> + avctx-

> >>> width, avctx->height);

> >>> +    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_BUG,

> "encoder-

> >>> Init() failed with error %d \n", res);

> >>> +

> >>> +    // Dynamic parmaters

> >>> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> >> AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD, ctx-

> >>> rate_control_mode);

> >>> +

> >>> +    /// VBV Buffer

> >>> +    if (avctx->rc_buffer_size != 0)

> >>> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> >> AMF_VIDEO_ENCODER_VBV_BUFFER_SIZE, avctx->rc_buffer_size);

> >>> +    if (avctx->rc_initial_buffer_occupancy != 0) {

> >>> +        int amf_buffer_fullness = avctx->rc_buffer_size * 64 /

> >>> + avctx-

> >>> rc_initial_buffer_occupancy;

> >>> +        if (amf_buffer_fullness > 64)

> >>> +            amf_buffer_fullness = 64;

> >>

> >> I still don't understand what this is trying to do.

> >>

> >> rc_initial_buffer_occupancy is necessarily at most rc_buffer_size, so

> >> the calculation will always get a number >= 64, so you always pass 64.

> >>

> >> What are the units of

> >> AMF_VIDEO_ENCODER_INITIAL_VBV_BUFFER_FULLNESS meant to be?

> >>

> >

> > They meant to be an abstract value from 0 to 64 meaning 64 is 100%.

> > Don’t ask me why ☹ Calculation should be the opposite. My fault.

> 

> Right, that makes more sense.

> 

> >>> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> >> AMF_VIDEO_ENCODER_INITIAL_VBV_BUFFER_FULLNESS,

> >> amf_buffer_fullness);

> >>> +    }

> >>> +    /// Maximum Access Unit Size

> >>> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> >> AMF_VIDEO_ENCODER_MAX_AU_SIZE, ctx->max_au_size);

> >>> +

> >>> +    // QP Minimum / Maximum

> >>> +    if (ctx->rate_control_mode ==

> >> AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CONSTANT_QP) {

> >>> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> >> AMF_VIDEO_ENCODER_MIN_QP, 0);

> >>> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> >> AMF_VIDEO_ENCODER_MAX_QP, 51);

> >>> +    } else {

> >>> +        if (avctx->qmin != -1) {

> >>> +            int qval = avctx->qmin > 51 ? 51 : avctx->qmin;

> >>> +            AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> >> AMF_VIDEO_ENCODER_MIN_QP, qval);

> >>> +        }

> >>> +        if (avctx->qmax != -1) {

> >>> +            int qval = avctx->qmax > 51 ? 51 : avctx->qmax;

> >>> +            AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> >> AMF_VIDEO_ENCODER_MAX_QP, qval);

> >>> +        }

> >>> +    }

> >>> +    // QP Values

> >>> +    if (ctx->qp_i != -1)

> >>> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> >> AMF_VIDEO_ENCODER_QP_I, ctx->qp_i);

> >>> +    if (ctx->qp_p != -1)

> >>> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> >> AMF_VIDEO_ENCODER_QP_P, ctx->qp_p);

> >>> +    if (ctx->qp_b != -1)

> >>> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> >> AMF_VIDEO_ENCODER_QP_B, ctx->qp_b);

> >>> +

> >>> +    // Bitrate

> >>> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> >> AMF_VIDEO_ENCODER_TARGET_BITRATE, avctx->bit_rate);

> >>> +

> >>> +    // Peak (max) bitrate. If not set make it out of bit_rate for best

> results.

> >>> +    if (ctx->rate_control_mode ==

> >> AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CBR) {

> >>> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> >> AMF_VIDEO_ENCODER_PEAK_BITRATE, avctx->bit_rate);

> >>> +    } else {

> >>> +        int rc_max_rate = avctx->rc_max_rate >= avctx->bit_rate ?

> >>> + avctx-

> >>> rc_max_rate : avctx->bit_rate * 13 / 10;

> >>

> >> Please calculate a real value here as suggested in the previous

> >> comments rather than using 13/10.

> >

> > The suggestion was to set rc_max_rate to infinity. This will produce

> unpredicted results.

> > I can set it to bit_rate but quality will be not good. Another option would be

> to generate error.

> > I am open to suggestions.

> 

> Is the window over which rc_max_rate applies defined anywhere?  If so,

> then if rc_buffer_size is set you can calculate rc_max_rate as (rc_buffer_size

> + bit_rate * window) / window (i.e. the maximum number of bits which

> could be included in one window region given the buffer constraints).

> 

> If rc_buffer_size isn't set either, then it isn't meant to be constrained - the

> average should be right over the long term, but locally it doesn't matter.

> Hence infinity (or at least some very large value), to not impose any

> constraint.

> 


I will ask rate control guy tomorrow and let him weight on this. 

> >>> +    { "max_au_size",    "Max AU Size in bits",

> >> OFFSET(max_au_size),   AV_OPT_TYPE_INT,{ .i64 = 0 }, 0, INT_MAX, VE,

> NULL

> >> },

> >>

> >> Same question as in H.264.  Also other stuff below.

> >>

> >>> +    { "min_qp_i",       "min quantization parameter for I-frame",

> >> OFFSET(min_qp_i),      AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },

> >>> +    { "max_qp_i",       "max quantization parameter for I-frame",

> >> OFFSET(max_qp_i),      AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },

> >>> +    { "min_qp_p",       "min quantization parameter for P-frame",

> >> OFFSET(min_qp_p),      AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },

> >>> +    { "max_qp_p",       "max quantization parameter for P-frame",

> >> OFFSET(max_qp_p),      AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },

> >>> +    { "qp_p",           "quantization parameter for P-frame",

> OFFSET(qp_p),

> >> AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },

> >>> +    { "qp_i",           "quantization parameter for I-frame",

> OFFSET(qp_i),

> >> AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },

> >>> +    { "skip_frame",     "Rate Control Based Frame Skip",

> >> OFFSET(skip_frame),    AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },

> >>> +    { "me_half_pel",    "Enable ME Half Pixel",

> >> OFFSET(me_half_pel),   AV_OPT_TYPE_BOOL,{ .i64 = 1 }, 0, 1, VE, NULL },

> >>> +    { "me_quater_pel",  "Enable ME Quarter Pixel ",

> >> OFFSET(me_quater_pel), AV_OPT_TYPE_BOOL,{ .i64 = 1 }, 0, 1, VE, NULL

> >> },

> >>> +

> >>> +    { NULL }

> >>> +};

> >>> +

> >>> +static av_cold int amf_encode_init_hevc(AVCodecContext *avctx) {

> >>> +    int                 ret = 0;

> >>> +    AMF_RESULT          res = AMF_OK;

> >>> +    AmfContext         *ctx = avctx->priv_data;

> >>> +    AMFVariantStruct    var = {0};

> >>> +    amf_int64           profile = 0;

> >>> +    amf_int64           profile_level = 0;

> >>> +    AMFBuffer          *buffer;

> >>> +    AMFGuid             guid;

> >>> +

> >>> +    AMFSize             framesize = AMFConstructSize(avctx->width, avctx-

> >>> height);

> >>> +    AMFRate             framerate = AMFConstructRate(avctx-

> >time_base.den,

> >> avctx->time_base.num * avctx->ticks_per_frame);

> >>> +

> >>> +    int                 deblocking_filter = (avctx->flags &

> >> AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;

> >>> +

> >>> +    if ((ret = ff_amf_encode_init(avctx)) < 0)

> >>> +        return ret;

> >>> +

> >>> +    // init static parameters

> >>> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> >> AMF_VIDEO_ENCODER_HEVC_USAGE, ctx->usage);

> >>> +

> >>> +    AMF_ASSIGN_PROPERTY_SIZE(res, ctx->encoder,

> >> AMF_VIDEO_ENCODER_HEVC_FRAMESIZE, framesize);

> >>> +

> >>> +    AMF_ASSIGN_PROPERTY_RATE(res, ctx->encoder,

> >> AMF_VIDEO_ENCODER_HEVC_FRAMERATE, framerate);

> >>> +

> >>> +    switch (avctx->profile) {

> >>> +    case FF_PROFILE_HEVC_MAIN:

> >>> +        profile = AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN;

> >>> +        break;

> >>> +    default:

> >>> +        break;

> >>> +    }

> >>> +    if (profile == 0) {

> >>> +        profile = ctx->profile;

> >>> +    }

> >>> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> >> AMF_VIDEO_ENCODER_HEVC_PROFILE, profile);

> >>> +

> >>> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> >> AMF_VIDEO_ENCODER_HEVC_TIER, ctx->tier);

> >>> +

> >>> +    profile_level = avctx->level;

> >>> +    if (profile_level == 0) {

> >>> +        profile_level = ctx->level;

> >>> +    }

> >>> +    if (profile_level != 0) {

> >>> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> >> AMF_VIDEO_ENCODER_HEVC_PROFILE_LEVEL, profile_level);

> >>> +    }

> >>> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> >> AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET, ctx->quality);

> >>> +    // Maximum Reference Frames

> >>> +    if (avctx->refs != 0) {

> >>> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> >> AMF_VIDEO_ENCODER_HEVC_MAX_NUM_REFRAMES, avctx->refs);

> >>> +    }

> >>> +    // Aspect Ratio

> >>> +    if (avctx->sample_aspect_ratio.den && avctx-

> >>> sample_aspect_ratio.num) {

> >>> +        AMFRatio ratio = AMFConstructRatio(avctx-

> >>> sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);

> >>> +        AMF_ASSIGN_PROPERTY_RATIO(res, ctx->encoder,

> >> AMF_VIDEO_ENCODER_HEVC_ASPECT_RATIO, ratio);

> >>> +    }

> >>> +

> >>> +    // Picture control properties

> >>> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> >> AMF_VIDEO_ENCODER_HEVC_NUM_GOPS_PER_IDR, ctx->gops_per_idr);

> >>> +    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> >> AMF_VIDEO_ENCODER_HEVC_GOP_SIZE, avctx->gop_size);

> >>> +    if (avctx->slices > 1) {

> >>> +        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder,

> >> AMF_VIDEO_ENCODER_HEVC_SLICES_PER_FRAME, avctx->slices);

> >>> +    }

> >>> +    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder,

> >> AMF_VIDEO_ENCODER_HEVC_DE_BLOCKING_FILTER_DISABLE,

> >> deblocking_filter);

> >>

> >> What about SAO?

> >

> > SAO ???

> 

> You're looking at AV_CODEC_FLAG_LOOP_FILTER to disable this, so you

> might want to consider both loop filters in H.265, not just the deblocking

> filter.

> 

> - Mark

> _______________________________________________

> ffmpeg-devel mailing list

> ffmpeg-devel@ffmpeg.org

> http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Thanks, Mikhail
Marton Balint Oct. 31, 2017, 1:26 a.m. UTC | #8
On Mon, 30 Oct 2017, Mironov, Mikhail wrote:

[...]

>>>> I still think this would be much better off using the
>>>> send_frame()/receive_packet() API.  Even if your API doesn't expose
>>>> any information about the queue length, you only need to hold a
>>>> single input frame transiently to get around that (the user is not
>>>> allowed to call
>>>> send_frame() twice in a row without calling receive_packet()).
>>>>
>>>
>>> So to implement this I would have to:
>>> - in the send_frame() if AMF_INPUT_FULL is returned - store input
>>> frame (or copy?)
>>> - In the next receive_frame() check if frame is stored
>>> - Wait till some output is produced
>>> - resubmit stored frame
>>
>> Sounds about right.
>>
>>> Issues I see:
>>> - Isn't this logic defeat the purpose of independent send()/receive()?
>>> - How can I report a error if receive() produced a compressed frame but
>> the delayed submission failed?
>>
>> Since this is asynchronous anyway, just report it at the next available
>> opportunity.
>>
>>> - This logic depends on the particular logic in the calling code.
>>
>> The API requires this behaviour of the caller.  See the documentation in
>> avcodec.h.
>>
>>> - This logic depends on the particular HW behaviour.
>>
>> How so?
>>
>>> - In the future, we would like to output individual slices of a compressed
>> frame.
>>> When this added receive_frame() must be called several times to clear
>> space in the HW queue.
>>> Granted, current implementation also does not cover this case but
>>> truly independent send/receive implementation would.
>>
>> Note that the user is required to call receive_packet() repeatedly until it
>> returns EAGAIN, and only then are they allowed to call send_frame() again.
>
> The implementation will be cumbersome at least. Note that calling Drain()
> may also return AMF_INPUT_FULL and therefore will have to be remembered and
> called again in receive(). But I will implement as you suggests. It is not a huge change.
>

I see some confusion. The user can call send_frame/receive_packet in 
any order, and you can implement send_frame and receive_packet any way you 
want, the only thing you have to guarantee is that you cannot return 
EAGAIN for both send_frame and receive_packet. Not even temporarily.

If you returned EAGAIN in send_frame, you must return success or a 
normal error in receive_packet. If you returned EAGAIN in 
receive_packet, you must return success or a normal error in 
send_frame.

By returning EAGAIN in receive_packet you make sure that the API user 
submits as many frames as needed to fill your pipeline.

The simplest solution really seems to me what Mark proposed:

send_frame:

if (have_stored_frame)
   return EAGAIN;
if (amd_send_frame() == INPUT_FULL)
   store_frame;
return 0;

receive_packet:

if (have_stored_frame) {
   if (amd_send_frame() == OK)
      unstore_frame;
   block_until_have_packet
   return packet
} else {
   return EAGAIN
}

I hope I did not mess it up, proper draining and error handling obviously 
needs some minor changes.

Regards,
Marton
mmironov Oct. 31, 2017, 1:44 p.m. UTC | #9
> -----Original Message-----

> From: ffmpeg-devel [mailto:ffmpeg-devel-bounces@ffmpeg.org] On Behalf

> Of Marton Balint

> Sent: October 30, 2017 9:26 PM

> To: FFmpeg development discussions and patches <ffmpeg-

> devel@ffmpeg.org>

> Subject: Re: [FFmpeg-devel] Added HW H.264 and HEVC encoding for AMD

> GPUs based on AMF SDK

> 

> 

> 

> On Mon, 30 Oct 2017, Mironov, Mikhail wrote:

> 

> [...]

> 

> >>>> I still think this would be much better off using the

> >>>> send_frame()/receive_packet() API.  Even if your API doesn't expose

> >>>> any information about the queue length, you only need to hold a

> >>>> single input frame transiently to get around that (the user is not

> >>>> allowed to call

> >>>> send_frame() twice in a row without calling receive_packet()).

> >>>>

> >>>

> >>> So to implement this I would have to:

> >>> - in the send_frame() if AMF_INPUT_FULL is returned - store input

> >>> frame (or copy?)

> >>> - In the next receive_frame() check if frame is stored

> >>> - Wait till some output is produced

> >>> - resubmit stored frame

> >>

> >> Sounds about right.

> >>

> >>> Issues I see:

> >>> - Isn't this logic defeat the purpose of independent send()/receive()?

> >>> - How can I report a error if receive() produced a compressed frame

> >>> but

> >> the delayed submission failed?

> >>

> >> Since this is asynchronous anyway, just report it at the next

> >> available opportunity.

> >>

> >>> - This logic depends on the particular logic in the calling code.

> >>

> >> The API requires this behaviour of the caller.  See the documentation

> >> in avcodec.h.

> >>

> >>> - This logic depends on the particular HW behaviour.

> >>

> >> How so?

> >>

> >>> - In the future, we would like to output individual slices of a

> >>> compressed

> >> frame.

> >>> When this added receive_frame() must be called several times to

> >>> clear

> >> space in the HW queue.

> >>> Granted, current implementation also does not cover this case but

> >>> truly independent send/receive implementation would.

> >>

> >> Note that the user is required to call receive_packet() repeatedly

> >> until it returns EAGAIN, and only then are they allowed to call

> send_frame() again.

> >

> > The implementation will be cumbersome at least. Note that calling

> > Drain() may also return AMF_INPUT_FULL and therefore will have to be

> > remembered and called again in receive(). But I will implement as you

> suggests. It is not a huge change.

> >

> 

> I see some confusion. The user can call send_frame/receive_packet in any

> order, and you can implement send_frame and receive_packet any way you

> want, the only thing you have to guarantee is that you cannot return EAGAIN

> for both send_frame and receive_packet. Not even temporarily.

> 

> If you returned EAGAIN in send_frame, you must return success or a normal

> error in receive_packet. If you returned EAGAIN in receive_packet, you must

> return success or a normal error in send_frame.

> 

> By returning EAGAIN in receive_packet you make sure that the API user

> submits as many frames as needed to fill your pipeline.

> 

> The simplest solution really seems to me what Mark proposed:

> 

> send_frame:

> 

> if (have_stored_frame)

>    return EAGAIN;

> if (amd_send_frame() == INPUT_FULL)

>    store_frame;

> return 0;

> 

> receive_packet:

> 

> if (have_stored_frame) {

>    if (amd_send_frame() == OK)

>       unstore_frame;

>    block_until_have_packet

>    return packet

> } else {

>    return EAGAIN

> }

> 

> I hope I did not mess it up, proper draining and error handling obviously

> needs some minor changes.

> 


The logic in receive_packet() will be slightly different but I will figure this out. 
My only note is that returning EAGAIN from send_frame() will not work with 
current ffmpeg calling code. I was assured that this will never happen but I
 don’t like possibility of the failure.  What the calling code supposed to do 
getting EAGAIN from send_frame()? Resubmit? If so it would not work with 
the logic described.
Anyway, lets try Mark's suggestion and see if alternations are needed.

> Regards,

> Marton

> _______________________________________________

> ffmpeg-devel mailing list

> ffmpeg-devel@ffmpeg.org

> http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Thanks, Mikhail
mmironov Oct. 31, 2017, 2:10 p.m. UTC | #10
> >>> +    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder,

> >> AMF_VIDEO_ENCODER_HEVC_DE_BLOCKING_FILTER_DISABLE,

> >> deblocking_filter);

> >>

> >> What about SAO?

> >

> > SAO ???

> 

> You're looking at AV_CODEC_FLAG_LOOP_FILTER to disable this, so you

> might want to consider both loop filters in H.265, not just the deblocking

> filter.

> 


At this point AMF exposes only deblocking filter.

> - Mark

> _______________________________________________

> ffmpeg-devel mailing list

> ffmpeg-devel@ffmpeg.org

> http://ffmpeg.org/mailman/listinfo/ffmpeg-devel
Marton Balint Oct. 31, 2017, 6:05 p.m. UTC | #11
On Tue, 31 Oct 2017, Mironov, Mikhail wrote:

[...]

>> I see some confusion. The user can call send_frame/receive_packet in any
>> order, and you can implement send_frame and receive_packet any way you
>> want, the only thing you have to guarantee is that you cannot return EAGAIN
>> for both send_frame and receive_packet. Not even temporarily.
>>
>> If you returned EAGAIN in send_frame, you must return success or a normal
>> error in receive_packet. If you returned EAGAIN in receive_packet, you must
>> return success or a normal error in send_frame.
>>
>> By returning EAGAIN in receive_packet you make sure that the API user
>> submits as many frames as needed to fill your pipeline.
>>
>> The simplest solution really seems to me what Mark proposed:
>>
>> send_frame:
>>
>> if (have_stored_frame)
>>    return EAGAIN;
>> if (amd_send_frame() == INPUT_FULL)
>>    store_frame;
>> return 0;
>>
>> receive_packet:
>>
>> if (have_stored_frame) {
>>    if (amd_send_frame() == OK)
>>       unstore_frame;
>>    block_until_have_packet
>>    return packet
>> } else {
>>    return EAGAIN
>> }
>>
>> I hope I did not mess it up, proper draining and error handling obviously
>> needs some minor changes.
>>
>
> The logic in receive_packet() will be slightly different but I will figure this out.
> My only note is that returning EAGAIN from send_frame() will not work with
> current ffmpeg calling code.
> I was assured that this will never happen but I
> don’t like possibility of the failure.  What the calling code supposed to do
> getting EAGAIN from send_frame()? Resubmit? If so it would not work with
> the logic described.
> Anyway, lets try Mark's suggestion and see if alternations are needed.

ffmpeg.c is written in a way that it calls receive_packet repeatedly until 
it gets EAGAIN. Due to the API requirements I mentioned (send_frame and 
receive_packet both cannot return EAGAIN), it is OK to not handle EAGAIN 
for send_frame in ffmpeg.c code.

Other applications might use other logic (e.g. call send_frame repeatedly, 
and then call receive_packet once, or call send_frame and receive packet 
alternating), in these cases the user application must be able to handle 
EAGAIN for send_frame, and resubmit the frame next time.

But if ffmpeg.c gets an EAGAIN in send_frame, that means a bug in the 
encoder because the encoder is breaking the API and it needs to be fixed.

Regards,
Marton
mmironov Oct. 31, 2017, 6:31 p.m. UTC | #12
> -----Original Message-----

> From: ffmpeg-devel [mailto:ffmpeg-devel-bounces@ffmpeg.org] On Behalf

> Of Marton Balint

> Sent: October 31, 2017 2:06 PM

> To: FFmpeg development discussions and patches <ffmpeg-

> devel@ffmpeg.org>

> Subject: Re: [FFmpeg-devel] Added HW H.264 and HEVC encoding for AMD

> GPUs based on AMF SDK

> 

> 

> On Tue, 31 Oct 2017, Mironov, Mikhail wrote:

> 

> [...]

> 

> >> I see some confusion. The user can call send_frame/receive_packet in

> >> any order, and you can implement send_frame and receive_packet any

> >> way you want, the only thing you have to guarantee is that you cannot

> >> return EAGAIN for both send_frame and receive_packet. Not even

> temporarily.

> >>

> >> If you returned EAGAIN in send_frame, you must return success or a

> >> normal error in receive_packet. If you returned EAGAIN in

> >> receive_packet, you must return success or a normal error in send_frame.

> >>

> >> By returning EAGAIN in receive_packet you make sure that the API user

> >> submits as many frames as needed to fill your pipeline.

> >>

> >> The simplest solution really seems to me what Mark proposed:

> >>

> >> send_frame:

> >>

> >> if (have_stored_frame)

> >>    return EAGAIN;

> >> if (amd_send_frame() == INPUT_FULL)

> >>    store_frame;

> >> return 0;

> >>

> >> receive_packet:

> >>

> >> if (have_stored_frame) {

> >>    if (amd_send_frame() == OK)

> >>       unstore_frame;

> >>    block_until_have_packet

> >>    return packet

> >> } else {

> >>    return EAGAIN

> >> }

> >>

> >> I hope I did not mess it up, proper draining and error handling

> >> obviously needs some minor changes.

> >>

> >

> > The logic in receive_packet() will be slightly different but I will figure this

> out.

> > My only note is that returning EAGAIN from send_frame() will not work

> > with current ffmpeg calling code.

> > I was assured that this will never happen but I don’t like possibility

> > of the failure.  What the calling code supposed to do getting EAGAIN

> > from send_frame()? Resubmit? If so it would not work with the logic

> > described.

> > Anyway, lets try Mark's suggestion and see if alternations are needed.

> 

> ffmpeg.c is written in a way that it calls receive_packet repeatedly until it gets

> EAGAIN. Due to the API requirements I mentioned (send_frame and

> receive_packet both cannot return EAGAIN), it is OK to not handle EAGAIN

> for send_frame in ffmpeg.c code.

> 

> Other applications might use other logic (e.g. call send_frame repeatedly,

> and then call receive_packet once, or call send_frame and receive packet

> alternating), in these cases the user application must be able to handle

> EAGAIN for send_frame, and resubmit the frame next time.

> 

> But if ffmpeg.c gets an EAGAIN in send_frame, that means a bug in the

> encoder because the encoder is breaking the API and it needs to be fixed.


Yes, this is exactly how I understand it and hopefully implemented it.
 
> 

> Regards,

> Marton

> _______________________________________________

> ffmpeg-devel mailing list

> ffmpeg-devel@ffmpeg.org

> http://ffmpeg.org/mailman/listinfo/ffmpeg-devel


Thanks, Mikhail
diff mbox

Patch

diff --git a/Changelog b/Changelog
index 6592d86..f0d22fa 100644
--- a/Changelog
+++ b/Changelog
@@ -6,7 +6,8 @@  version <next>:
 - Dropped support for OpenJPEG versions 2.0 and below. Using OpenJPEG now
   requires 2.1 (or later) and pkg-config.
 - VDA dropped (use VideoToolbox instead)
-
+- AMF H.264 encoder
+- AMF HEVC encoder
 
 version 3.4:
 - deflicker video filter
diff --git a/compat/amd/amfsdkenc.h b/compat/amd/amfsdkenc.h
new file mode 100644
index 0000000..0919080
--- /dev/null
+++ b/compat/amd/amfsdkenc.h
@@ -0,0 +1,1750 @@ 
+// 
+// MIT license 
+// 
+// Copyright (c) 2017 Advanced Micro Devices, Inc. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+//
+
+// Reduced AMF API
+//
+// Full version of AMF SDK and the latest version of this file 
+// can be found at https://github.com/GPUOpen-LibrariesAndSDKs/AMF
+
+#ifndef __AMF_SDK_Enc_h__
+#define __AMF_SDK_Enc_h__
+#pragma once
+
+//-----------------------------------------------------------------------------
+// Platform.h
+//-----------------------------------------------------------------------------
+//----------------------------------------------------------------------------------------------
+// export declaration
+//----------------------------------------------------------------------------------------------
+#ifdef _WIN32
+#if defined(AMF_CORE_STATIC)
+#define AMF_CORE_LINK
+#else
+#if defined(AMF_CORE_EXPORTS)
+#define AMF_CORE_LINK __declspec(dllexport)
+#else
+#define AMF_CORE_LINK __declspec(dllimport)
+#endif
+#endif
+#else // #ifdef _WIN32
+#define AMF_CORE_LINK
+#endif // #ifdef _WIN32
+
+#define AMF_MACRO_STRING2(x) #x
+#define AMF_MACRO_STRING(x) AMF_MACRO_STRING2(x)
+
+#define AMF_TODO(_todo) (__FILE__ "(" AMF_MACRO_STRING(__LINE__) "): TODO: "_todo)
+
+
+#if defined(__GNUC__) || defined(__clang__)
+#define AMF_ALIGN(n) __attribute__((aligned(n)))
+#elif defined(_MSC_VER) || defined(__INTEL_COMPILER)
+#define AMF_ALIGN(n) __declspec(align(n))
+#else
+#define AMF_ALIGN(n)
+//     #error Need to define AMF_ALIGN
+#endif
+
+#include <stdio.h>
+#include <stdint.h>
+
+#if defined(_WIN32)
+
+
+#ifndef NOMINMAX
+#define NOMINMAX
+#endif
+#define AMF_STD_CALL            __stdcall
+#define AMF_CDECL_CALL          __cdecl
+#define AMF_FAST_CALL           __fastcall
+#if defined(__GNUC__) || defined(__clang__)
+#define AMF_INLINE              inline
+#define AMF_FORCEINLINE         inline
+#else
+#define AMF_INLINE              __inline
+#define AMF_FORCEINLINE         __forceinline
+#endif
+#define AMF_NO_VTABLE           __declspec(novtable)
+
+#define AMFPRId64   "I64d"
+#define LPRId64    L"I64d"
+
+#define AMFPRIud64   "Iu64d"
+#define LPRIud64    L"Iu64d"
+
+#define AMFPRIx64   "I64x"
+#define LPRIx64    L"I64x"
+
+#else // !WIN32 - Linux and Mac
+
+#define AMF_STD_CALL
+#define AMF_CDECL_CALL
+#define AMF_FAST_CALL
+#if defined(__GNUC__) || defined(__clang__)
+#define AMF_INLINE              inline
+#define AMF_FORCEINLINE         inline
+#else
+#define AMF_INLINE              __inline__
+#define AMF_FORCEINLINE         __inline__
+#endif
+#define AMF_NO_VTABLE           
+
+#if !defined(AMFPRId64)
+#define AMFPRId64    "lld"
+#define LPRId64     L"lld"
+
+#define AMFPRIud64    "ulld"
+#define LPRIud64     L"ulld"
+
+#define AMFPRIx64    "llx"
+#define LPRIx64     L"llx"
+#endif
+
+#endif // WIN32
+
+
+#if defined(_MSC_VER)
+#define AMF_WEAK __declspec( selectany ) 
+#elif defined (__GCC__) || defined(__clang__)//GCC or CLANG
+#define AMF_WEAK __attribute__((weak))
+#endif
+
+#define amf_countof(x) (sizeof(x) / sizeof(x[0]))
+
+//-------------------------------------------------------------------------------------------------
+// basic data types
+//-------------------------------------------------------------------------------------------------
+typedef     int64_t             amf_int64;
+typedef     int32_t             amf_int32;
+typedef     int16_t             amf_int16;
+typedef     int8_t              amf_int8;
+
+typedef     uint64_t            amf_uint64;
+typedef     uint32_t            amf_uint32;
+typedef     uint16_t            amf_uint16;
+typedef     uint8_t             amf_uint8;
+typedef     size_t              amf_size;
+
+typedef     void*               amf_handle;
+typedef     double              amf_double;
+typedef     float               amf_float;
+
+typedef     void                amf_void;
+
+#if defined(__cplusplus)
+typedef     bool                amf_bool;
+#else
+typedef     amf_uint16          amf_bool;
+#define     true                1 
+#define     false               0 
+#endif
+
+typedef     long                amf_long;
+typedef     int                 amf_int;
+typedef     unsigned long       amf_ulong;
+typedef     unsigned int        amf_uint;
+
+typedef     amf_int64           amf_pts;     // in 100 nanosecs
+
+#define AMF_SECOND          10000000L    // 1 second in 100 nanoseconds
+
+#define AMF_MIN(a, b) ((a) < (b) ? (a) : (b))
+#define AMF_MAX(a, b) ((a) > (b) ? (a) : (b))
+
+#if defined(_WIN32)
+#define PATH_SEPARATOR_WSTR         L"\\"
+#define PATH_SEPARATOR_WCHAR        L'\\'
+#elif defined(__linux) // Linux
+#define PATH_SEPARATOR_WSTR          L"/"
+#define PATH_SEPARATOR_WCHAR         L'/'
+#endif
+
+typedef struct AMFRect
+{
+    amf_int32 left;
+    amf_int32 top;
+    amf_int32 right;
+    amf_int32 bottom;
+} AMFRect;
+
+AMF_INLINE struct AMFRect AMFConstructRect(amf_int32 left, amf_int32 top, amf_int32 right, amf_int32 bottom)
+{
+    struct AMFRect object = { left, top, right, bottom };
+    return object;
+}
+
+typedef struct AMFSize
+{
+    amf_int32 width;
+    amf_int32 height;
+} AMFSize;
+
+AMF_INLINE struct AMFSize AMFConstructSize(amf_int32 width, amf_int32 height)
+{
+    struct AMFSize object = { width, height };
+    return object;
+}
+
+typedef struct AMFPoint
+{
+    amf_int32 x;
+    amf_int32 y;
+} AMFPoint;
+
+AMF_INLINE struct AMFPoint AMFConstructPoint(amf_int32 x, amf_int32 y)
+{
+    struct AMFPoint object = { x, y };
+    return object;
+}
+
+typedef struct AMFRate
+{
+    amf_uint32 num;
+    amf_uint32 den;
+} AMFRate;
+
+AMF_INLINE struct AMFRate AMFConstructRate(amf_uint32 num, amf_uint32 den)
+{
+    struct AMFRate object = { num, den };
+    return object;
+}
+
+typedef struct AMFRatio
+{
+    amf_uint32 num;
+    amf_uint32 den;
+} AMFRatio;
+
+AMF_INLINE struct AMFRatio AMFConstructRatio(amf_uint32 num, amf_uint32 den)
+{
+    struct AMFRatio object = { num, den };
+    return object;
+}
+
+#pragma pack(push, 1)
+#if defined(_MSC_VER)
+#pragma warning( push )
+#endif
+#if defined(WIN32)
+#if defined(_MSC_VER)
+#pragma warning(disable : 4200)
+#pragma warning(disable : 4201)
+#endif
+#endif
+typedef struct AMFColor
+{
+    union
+    {
+        struct
+        {
+            amf_uint8 r;
+            amf_uint8 g;
+            amf_uint8 b;
+            amf_uint8 a;
+        };
+        amf_uint32 rgba;
+    };
+} AMFColor;
+#if defined(_MSC_VER)
+#pragma warning( pop )
+#endif
+#pragma pack(pop)
+
+
+AMF_INLINE struct AMFColor AMFConstructColor(amf_uint8 r, amf_uint8 g, amf_uint8 b, amf_uint8 a)
+{
+    struct AMFColor object;
+    object.r = r;
+    object.g = g;
+    object.b = b;
+    object.a = a;
+    return object;
+}
+
+#if defined(_WIN32)
+#include <combaseapi.h>
+
+#if defined(__cplusplus)
+extern "C"
+{
+#endif
+    // allocator
+    AMF_INLINE void* AMF_CDECL_CALL amf_variant_alloc(amf_size count)
+    {
+        return CoTaskMemAlloc(count);
+    }
+    AMF_INLINE void AMF_CDECL_CALL amf_variant_free(void* ptr)
+    {
+        CoTaskMemFree(ptr);
+    }
+#if defined(__cplusplus)
+}
+#endif
+
+#else // defined(_WIN32)
+#include <stdlib.h>
+#if defined(__cplusplus)
+extern "C"
+{
+#endif
+    // allocator
+    AMF_INLINE void* AMF_CDECL_CALL amf_variant_alloc(amf_size count)
+    {
+        return malloc(count);
+    }
+    AMF_INLINE void AMF_CDECL_CALL amf_variant_free(void* ptr)
+    {
+        free(ptr);
+    }
+#if defined(__cplusplus)
+}
+#endif
+#endif // defined(_WIN32)
+
+
+typedef struct AMFGuid
+{
+    amf_uint32 data1;
+    amf_uint16 data2;
+    amf_uint16 data3;
+    amf_uint8 data41;
+    amf_uint8 data42;
+    amf_uint8 data43;
+    amf_uint8 data44;
+    amf_uint8 data45;
+    amf_uint8 data46;
+    amf_uint8 data47;
+    amf_uint8 data48;
+} AMFGuid;
+
+//-----------------------------------------------------------------------------
+// Version.h
+//-----------------------------------------------------------------------------
+#define AMF_MAKE_FULL_VERSION(VERSION_MAJOR, VERSION_MINOR, VERSION_RELEASE, VERSION_BUILD_NUM)    ( ((amf_uint64)(VERSION_MAJOR) << 48ull) | ((amf_uint64)(VERSION_MINOR) << 32ull) | ((amf_uint64)(VERSION_RELEASE) << 16ull)  | (amf_uint64)(VERSION_BUILD_NUM))
+
+#define AMF_GET_MAJOR_VERSION(x)      ((x >> 48ull) & 0xFFFF)
+#define AMF_GET_MINOR_VERSION(x)      ((x >> 32ull) & 0xFFFF)
+#define AMF_GET_SUBMINOR_VERSION(x)   ((x >> 16ull) & 0xFFFF)
+#define AMF_GET_BUILD_VERSION(x)      ((x >>  0ull) & 0xFFFF)
+
+#define AMF_VERSION_MAJOR       1
+#define AMF_VERSION_MINOR       4
+#define AMF_VERSION_RELEASE     4
+#define AMF_VERSION_BUILD_NUM   0
+
+#define AMF_FULL_VERSION AMF_MAKE_FULL_VERSION(AMF_VERSION_MAJOR, AMF_VERSION_MINOR, AMF_VERSION_RELEASE, AMF_VERSION_BUILD_NUM)
+
+//-----------------------------------------------------------------------------
+// Result.h
+//-----------------------------------------------------------------------------
+//----------------------------------------------------------------------------------------------
+// result codes
+//----------------------------------------------------------------------------------------------
+
+typedef enum AMF_RESULT
+{
+    AMF_OK                                   = 0,
+    AMF_FAIL                                    ,
+
+// common errors
+    AMF_UNEXPECTED                              ,
+
+    AMF_ACCESS_DENIED                           ,
+    AMF_INVALID_ARG                             ,
+    AMF_OUT_OF_RANGE                            ,
+
+    AMF_OUT_OF_MEMORY                           ,
+    AMF_INVALID_POINTER                         ,
+
+    AMF_NO_INTERFACE                            ,
+    AMF_NOT_IMPLEMENTED                         ,
+    AMF_NOT_SUPPORTED                           ,
+    AMF_NOT_FOUND                               ,
+
+    AMF_ALREADY_INITIALIZED                     ,
+    AMF_NOT_INITIALIZED                         ,
+
+    AMF_INVALID_FORMAT                          ,// invalid data format
+
+    AMF_WRONG_STATE                             ,
+    AMF_FILE_NOT_OPEN                           ,// cannot open file
+
+// device common codes
+    AMF_NO_DEVICE                               ,
+
+// device directx
+    AMF_DIRECTX_FAILED                          ,
+// device opencl 
+    AMF_OPENCL_FAILED                           ,
+// device opengl 
+    AMF_GLX_FAILED                              ,//failed to use GLX
+// device XV 
+    AMF_XV_FAILED                               , //failed to use Xv extension
+// device alsa
+    AMF_ALSA_FAILED                             ,//failed to use ALSA
+
+// component common codes
+
+    //result codes
+    AMF_EOF                                     ,
+    AMF_REPEAT                                  ,
+    AMF_INPUT_FULL                              ,//returned by AMFComponent::SubmitInput if input queue is full
+    AMF_RESOLUTION_CHANGED                      ,//resolution changed client needs to Drain/Terminate/Init
+    AMF_RESOLUTION_UPDATED                      ,//resolution changed in adaptive mode. New ROI will be set on output on newly decoded frames
+
+    //error codes
+    AMF_INVALID_DATA_TYPE                       ,//invalid data type
+    AMF_INVALID_RESOLUTION                      ,//invalid resolution (width or height)
+    AMF_CODEC_NOT_SUPPORTED                     ,//codec not supported
+    AMF_SURFACE_FORMAT_NOT_SUPPORTED            ,//surface format not supported
+    AMF_SURFACE_MUST_BE_SHARED                  ,//surface should be shared (DX11: (MiscFlags & D3D11_RESOURCE_MISC_SHARED) == 0, DX9: No shared handle found)
+
+// component video decoder
+    AMF_DECODER_NOT_PRESENT                     ,//failed to create the decoder
+    AMF_DECODER_SURFACE_ALLOCATION_FAILED       ,//failed to create the surface for decoding
+    AMF_DECODER_NO_FREE_SURFACES                ,
+
+// component video encoder
+    AMF_ENCODER_NOT_PRESENT                     ,//failed to create the encoder
+
+// component video processor
+
+// component video conveter
+
+// component dem
+    AMF_DEM_ERROR                               ,
+    AMF_DEM_PROPERTY_READONLY                   ,
+    AMF_DEM_REMOTE_DISPLAY_CREATE_FAILED        ,
+    AMF_DEM_START_ENCODING_FAILED               ,
+    AMF_DEM_QUERY_OUTPUT_FAILED                 ,
+
+// component TAN
+    AMF_TAN_CLIPPING_WAS_REQUIRED               , // Resulting data was truncated to meet output type's value limits.
+    AMF_TAN_UNSUPPORTED_VERSION                 , // Not supported version requested, solely for TANCreateContext().
+
+    AMF_NEED_MORE_INPUT                         ,//returned by AMFComponent::SubmitInput did not produce buffer
+} AMF_RESULT;
+
+
+//-----------------------------------------------------------------------------
+// Interface.h
+//-----------------------------------------------------------------------------
+#define AMF_DECLARE_IID(name, _data1, _data2, _data3, _data41, _data42, _data43, _data44, _data45, _data46, _data47, _data48) \
+        AMF_INLINE static const AMFGuid IID_##name(void) \
+        { \
+            AMFGuid uid = {_data1, _data2, _data3, _data41, _data42, _data43, _data44, _data45, _data46, _data47, _data48}; \
+            return uid; \
+        }
+AMF_DECLARE_IID(AMFInterface, 0x9d872f34, 0x90dc, 0x4b93, 0xb6, 0xb2, 0x6c, 0xa3, 0x7c, 0x85, 0x25, 0xdb)
+typedef struct AMFInterface AMFInterface;
+
+typedef struct AMFInterfaceVtbl
+{
+    // AMFInterface interface
+    amf_long(AMF_STD_CALL *Acquire)(AMFInterface* pThis);
+    amf_long(AMF_STD_CALL *Release)(AMFInterface* pThis);
+    enum AMF_RESULT(AMF_STD_CALL *QueryInterface)(AMFInterface* pThis, const struct AMFGuid *interfaceID, void** ppInterface);
+} AMFInterfaceVtbl;
+
+struct AMFInterface
+{
+    const AMFInterfaceVtbl *pVtbl;
+};
+
+//-----------------------------------------------------------------------------
+// Variant.h
+//-----------------------------------------------------------------------------
+
+//----------------------------------------------------------------------------------------------
+// variant types
+//----------------------------------------------------------------------------------------------
+typedef enum AMF_VARIANT_TYPE
+{
+    AMF_VARIANT_EMPTY = 0,
+
+    AMF_VARIANT_BOOL = 1,
+    AMF_VARIANT_INT64 = 2,
+    AMF_VARIANT_DOUBLE = 3,
+
+    AMF_VARIANT_RECT = 4,
+    AMF_VARIANT_SIZE = 5,
+    AMF_VARIANT_POINT = 6,
+    AMF_VARIANT_RATE = 7,
+    AMF_VARIANT_RATIO = 8,
+    AMF_VARIANT_COLOR = 9,
+
+    AMF_VARIANT_STRING = 10,  // value is char*
+    AMF_VARIANT_WSTRING = 11,  // value is wchar_t*
+    AMF_VARIANT_INTERFACE = 12,  // value is AMFInterface*
+} AMF_VARIANT_TYPE;
+//----------------------------------------------------------------------------------------------
+// variant struct
+//----------------------------------------------------------------------------------------------
+typedef struct AMFVariantStruct
+{
+    AMF_VARIANT_TYPE            type;
+    union
+    {
+        amf_bool                boolValue;
+        amf_int64               int64Value;
+        amf_double              doubleValue;
+        char*                   stringValue;
+        wchar_t*                wstringValue;
+        AMFInterface*           pInterface;
+        struct AMFRect          rectValue;
+        struct AMFSize          sizeValue;
+        struct AMFPoint         pointValue;
+        struct AMFRate          rateValue;
+        struct AMFRatio         ratioValue;
+        struct AMFColor         colorValue;
+    };
+} AMFVariantStruct;
+
+#define AMF_VARIANT_RETURN_IF_INVALID_POINTER(p) \
+       { \
+            if(p == NULL) \
+                    { \
+                 return AMF_INVALID_POINTER; \
+            } \
+       }
+
+static AMF_INLINE AMF_RESULT AMF_CDECL_CALL AMFVariantInit(AMFVariantStruct* pVariant)
+{
+    AMF_VARIANT_RETURN_IF_INVALID_POINTER(pVariant);
+    pVariant->type = AMF_VARIANT_EMPTY;
+    return AMF_OK;
+}
+
+static AMF_INLINE AMF_RESULT AMF_CDECL_CALL AMFVariantClear(AMFVariantStruct* pVariant)
+{
+    AMF_RESULT errRet = AMF_OK;
+    AMF_VARIANT_RETURN_IF_INVALID_POINTER(pVariant);
+
+    switch (pVariant->type)
+    {
+    case AMF_VARIANT_STRING:
+        amf_variant_free(pVariant->stringValue);
+        pVariant->type = AMF_VARIANT_EMPTY;
+        break;
+
+    case AMF_VARIANT_WSTRING:
+        amf_variant_free(pVariant->wstringValue);
+        pVariant->type = AMF_VARIANT_EMPTY;
+        break;
+
+    case AMF_VARIANT_INTERFACE:
+        if (pVariant->pInterface != NULL)
+        {
+#if defined(__cplusplus)
+            pVariant->pInterface->Release();
+#else
+            pVariant->pInterface->pVtbl->Release(pVariant->pInterface);
+#endif
+            pVariant->pInterface = NULL;
+        }
+        pVariant->type = AMF_VARIANT_EMPTY;
+        break;
+
+    default:
+        pVariant->type = AMF_VARIANT_EMPTY;
+        break;
+    }
+    return errRet;
+}
+//-------------------------------------------------------------------------------------------------
+static AMF_INLINE AMF_RESULT AMF_CDECL_CALL AMFVariantAssignBool(AMFVariantStruct* pDest, amf_bool value)
+{
+    AMF_RESULT errRet = AMF_OK;
+    AMF_VARIANT_RETURN_IF_INVALID_POINTER(pDest);
+
+    errRet = AMFVariantClear(pDest);
+    if (errRet == AMF_OK)
+    {
+        pDest->type = AMF_VARIANT_BOOL;
+        pDest->boolValue = value;
+    }
+    return errRet;
+}
+//-------------------------------------------------------------------------------------------------
+static AMF_INLINE AMF_RESULT AMF_CDECL_CALL AMFVariantAssignInt64(AMFVariantStruct* pDest, amf_int64 value)
+{
+    AMF_RESULT errRet = AMF_OK;
+    AMF_VARIANT_RETURN_IF_INVALID_POINTER(pDest);
+
+    errRet = AMFVariantClear(pDest);
+    if (errRet == AMF_OK)
+    {
+        pDest->type = AMF_VARIANT_INT64;
+        pDest->int64Value = value;
+    }
+    return errRet;
+}
+//-------------------------------------------------------------------------------------------------
+static AMF_INLINE AMF_RESULT AMF_CDECL_CALL AMFVariantAssignDouble(AMFVariantStruct* pDest, amf_double value)
+{
+    AMF_RESULT errRet = AMF_OK;
+    AMF_VARIANT_RETURN_IF_INVALID_POINTER(pDest);
+
+    errRet = AMFVariantClear(pDest);
+    if (errRet == AMF_OK)
+    {
+        pDest->type = AMF_VARIANT_DOUBLE;
+        pDest->doubleValue = value;
+    }
+    return errRet;
+}
+//-------------------------------------------------------------------------------------------------
+static AMF_INLINE AMF_RESULT AMF_CDECL_CALL AMFVariantAssignSize(AMFVariantStruct* pDest, const AMFSize* value)
+{
+    AMF_RESULT errRet = AMF_OK;
+    AMF_VARIANT_RETURN_IF_INVALID_POINTER(pDest);
+
+    errRet = AMFVariantClear(pDest);
+    if (errRet == AMF_OK)
+    {
+        pDest->type = AMF_VARIANT_SIZE;
+        pDest->sizeValue = *value;
+    }
+    return errRet;
+}
+//-------------------------------------------------------------------------------------------------
+static AMF_INLINE AMF_RESULT AMF_CDECL_CALL AMFVariantAssignPoint(AMFVariantStruct* pDest, const AMFPoint* value)
+{
+    AMF_RESULT errRet = AMF_OK;
+    AMF_VARIANT_RETURN_IF_INVALID_POINTER(pDest);
+
+    errRet = AMFVariantClear(pDest);
+    if (errRet == AMF_OK)
+    {
+        pDest->type = AMF_VARIANT_POINT;
+        pDest->pointValue = *value;
+    }
+    return errRet;
+}
+//-------------------------------------------------------------------------------------------------
+static AMF_INLINE AMF_RESULT AMF_CDECL_CALL AMFVariantAssignRate(AMFVariantStruct* pDest, const AMFRate* value)
+{
+    AMF_RESULT errRet = AMF_OK;
+    AMF_VARIANT_RETURN_IF_INVALID_POINTER(pDest);
+
+    errRet = AMFVariantClear(pDest);
+    if (errRet == AMF_OK)
+    {
+        pDest->type = AMF_VARIANT_RATE;
+        pDest->rateValue = *value;
+    }
+    return errRet;
+}
+//-------------------------------------------------------------------------------------------------
+static AMF_INLINE AMF_RESULT AMF_CDECL_CALL AMFVariantAssignRatio(AMFVariantStruct* pDest, const AMFRatio* value)
+{
+    AMF_RESULT errRet = AMF_OK;
+    AMF_VARIANT_RETURN_IF_INVALID_POINTER(pDest);
+
+    errRet = AMFVariantClear(pDest);
+    if (errRet == AMF_OK)
+    {
+        pDest->type = AMF_VARIANT_RATIO;
+        pDest->ratioValue = *value;
+    }
+    return errRet;
+}
+//-------------------------------------------------------------------------------------------------
+static AMF_INLINE AMF_RESULT AMF_CDECL_CALL AMFVariantAssignColor(AMFVariantStruct* pDest, const AMFColor* value)
+{
+    AMF_RESULT errRet = AMF_OK;
+    AMF_VARIANT_RETURN_IF_INVALID_POINTER(pDest);
+
+    errRet = AMFVariantClear(pDest);
+    if (errRet == AMF_OK)
+    {
+        pDest->type = AMF_VARIANT_COLOR;
+        pDest->colorValue = *value;
+    }
+    return errRet;
+}
+
+//-----------------------------------------------------------------------------
+// PropertyStorage.h
+//-----------------------------------------------------------------------------
+typedef struct AMFPropertyStorageObserver AMFPropertyStorageObserver;
+typedef struct AMFPropertyStorage AMFPropertyStorage;
+
+#define AMF_ASSIGN_PROPERTY_DATA(res, varType, pThis, name, val ) \
+    { \
+        AMFVariantStruct var = {0}; \
+        AMFVariantAssign##varType(&var, val); \
+        res = pThis->pVtbl->SetProperty(pThis, name, var ); \
+    }
+#define AMF_ASSIGN_PROPERTY_TYPE(res, varType, dataType , pThis, name, val )  AMF_ASSIGN_PROPERTY_DATA(res, varType, pThis, name, (dataType)val)
+
+#define AMF_ASSIGN_PROPERTY_INT64(res, pThis, name, val ) AMF_ASSIGN_PROPERTY_TYPE(res, Int64, amf_int64, pThis, name, val)
+#define AMF_ASSIGN_PROPERTY_DOUBLE(res, pThis, name, val ) AMF_ASSIGN_PROPERTY_TYPE(res, Double, amf_double, pThis, name, val)
+#define AMF_ASSIGN_PROPERTY_BOOL(res, pThis, name, val ) AMF_ASSIGN_PROPERTY_TYPE(res, Bool, amf_bool, pThis, name, val)
+#define AMF_ASSIGN_PROPERTY_RECT(res, pThis, name, val ) AMF_ASSIGN_PROPERTY_DATA(res, Rect, pThis, name, &val)
+#define AMF_ASSIGN_PROPERTY_SIZE(res, pThis, name, val ) AMF_ASSIGN_PROPERTY_DATA(res, Size, pThis, name, &val)
+#define AMF_ASSIGN_PROPERTY_POINT(res, pThis, name, val ) AMF_ASSIGN_PROPERTY_DATA(res, Point, pThis, name, &val)
+#define AMF_ASSIGN_PROPERTY_RATE(res, pThis, name, val ) AMF_ASSIGN_PROPERTY_DATA(res, Rate, pThis, name, &val)
+#define AMF_ASSIGN_PROPERTY_RATIO(res, pThis, name, val ) AMF_ASSIGN_PROPERTY_DATA(res, Ratio, pThis, name, &val)
+#define AMF_ASSIGN_PROPERTY_COLOR(res, pThis, name, val ) AMF_ASSIGN_PROPERTY_DATA(res, Color, pThis, name, &val)
+
+//-----------------------------------------------------------------------------
+// PropertyStorageEx.h
+//-----------------------------------------------------------------------------
+//----------------------------------------------------------------------------------------------
+typedef enum AMF_PROPERTY_CONTENT_ENUM
+{
+    AMF_PROPERTY_CONTENT_DEFAULT = 0,
+    AMF_PROPERTY_CONTENT_XML,               // m_eType is AMF_VARIANT_STRING
+
+    AMF_PROPERTY_CONTENT_FILE_OPEN_PATH,    // m_eType AMF_VARIANT_WSTRING
+    AMF_PROPERTY_CONTENT_FILE_SAVE_PATH     // m_eType AMF_VARIANT_WSTRING
+} AMF_PROPERTY_CONTENT_ENUM;
+//----------------------------------------------------------------------------------------------
+typedef enum AMF_PROPERTY_ACCESS_TYPE
+{
+    AMF_PROPERTY_ACCESS_PRIVATE = 0,
+    AMF_PROPERTY_ACCESS_READ = 0x1,
+    AMF_PROPERTY_ACCESS_WRITE = 0x2,
+    AMF_PROPERTY_ACCESS_READ_WRITE = (AMF_PROPERTY_ACCESS_READ | AMF_PROPERTY_ACCESS_WRITE),
+    AMF_PROPERTY_ACCESS_WRITE_RUNTIME = 0x4,
+    AMF_PROPERTY_ACCESS_FULL = 0xFF,
+} AMF_PROPERTY_ACCESS_TYPE;
+//----------------------------------------------------------------------------------------------
+typedef struct AMFEnumDescriptionEntry
+{
+    amf_int             value;
+    const wchar_t*      name;
+} AMFEnumDescriptionEntry;
+//----------------------------------------------------------------------------------------------
+typedef amf_uint32 AMF_PROPERTY_CONTENT_TYPE;
+
+typedef struct AMFPropertyInfo
+{
+    const wchar_t*                  name;
+    const wchar_t*                  desc;
+    AMF_VARIANT_TYPE                type;
+    AMF_PROPERTY_CONTENT_TYPE       contentType;
+
+    AMFVariantStruct                defaultValue;
+    AMFVariantStruct                minValue;
+    AMFVariantStruct                maxValue;
+    AMF_PROPERTY_ACCESS_TYPE        accessType;
+    const AMFEnumDescriptionEntry*  pEnumDescription;
+} AMFPropertyInfo;
+//-----------------------------------------------------------------------------
+// Data.h
+//-----------------------------------------------------------------------------
+//----------------------------------------------------------------------------------------------
+typedef enum AMF_DATA_TYPE
+{
+    AMF_DATA_BUFFER = 0,
+    AMF_DATA_SURFACE = 1,
+    AMF_DATA_AUDIO_BUFFER = 2,
+    AMF_DATA_USER = 1000,
+    // all extensions will be AMF_DATA_USER+i
+} AMF_DATA_TYPE;
+//----------------------------------------------------------------------------------------------
+typedef enum AMF_MEMORY_TYPE
+{
+    AMF_MEMORY_UNKNOWN = 0,
+    AMF_MEMORY_HOST = 1,
+    AMF_MEMORY_DX9 = 2,
+    AMF_MEMORY_DX11 = 3,
+    AMF_MEMORY_OPENCL = 4,
+    AMF_MEMORY_OPENGL = 5,
+    AMF_MEMORY_XV = 6,
+    AMF_MEMORY_GRALLOC = 7,
+    AMF_MEMORY_COMPUTE_FOR_DX9 = 8,
+    AMF_MEMORY_COMPUTE_FOR_DX11 = 9,
+} AMF_MEMORY_TYPE;
+
+//----------------------------------------------------------------------------------------------
+typedef enum AMF_DX_VERSION
+{
+    AMF_DX9 = 90,
+    AMF_DX9_EX = 91,
+    AMF_DX11_0 = 110,
+    AMF_DX11_1 = 111
+} AMF_DX_VERSION;
+
+typedef struct AMFData AMFData;
+AMF_DECLARE_IID(AMFData, 0xa1159bf6, 0x9104, 0x4107, 0x8e, 0xaa, 0xc5, 0x3d, 0x5d, 0xba, 0xc5, 0x11)
+
+typedef struct AMFDataVtbl
+{
+    // AMFInterface interface
+    amf_long(AMF_STD_CALL *Acquire)(AMFData* pThis);
+    amf_long(AMF_STD_CALL *Release)(AMFData* pThis);
+    enum AMF_RESULT(AMF_STD_CALL *QueryInterface)(AMFData* pThis, const struct AMFGuid *interfaceID, void** ppInterface);
+
+    // AMFPropertyStorage interface
+    AMF_RESULT(AMF_STD_CALL *SetProperty)(AMFData* pThis, const wchar_t* name, AMFVariantStruct value);
+    AMF_RESULT(AMF_STD_CALL *GetProperty)(AMFData* pThis, const wchar_t* name, AMFVariantStruct* pValue);
+    amf_bool(AMF_STD_CALL *HasProperty)(AMFData* pThis, const wchar_t* name);
+    amf_size(AMF_STD_CALL *GetPropertyCount)(AMFData* pThis);
+    AMF_RESULT(AMF_STD_CALL *GetPropertyAt)(AMFData* pThis, amf_size index, wchar_t* name, amf_size nameSize, AMFVariantStruct* pValue);
+    AMF_RESULT(AMF_STD_CALL *Clear)(AMFData* pThis);
+    AMF_RESULT(AMF_STD_CALL *AddTo)(AMFData* pThis, AMFPropertyStorage* pDest, amf_bool overwrite, amf_bool deep);
+    AMF_RESULT(AMF_STD_CALL *CopyTo)(AMFData* pThis, AMFPropertyStorage* pDest, amf_bool deep);
+    void                (AMF_STD_CALL *AddObserver)(AMFData* pThis, AMFPropertyStorageObserver* pObserver);
+    void                (AMF_STD_CALL *RemoveObserver)(AMFData* pThis, AMFPropertyStorageObserver* pObserver);
+
+    // AMFData interface
+
+    AMF_MEMORY_TYPE(AMF_STD_CALL *GetMemoryType)(AMFData* pThis);
+
+    AMF_RESULT(AMF_STD_CALL *Duplicate)(AMFData* pThis, AMF_MEMORY_TYPE type, AMFData** ppData);
+    AMF_RESULT(AMF_STD_CALL *Convert)(AMFData* pThis, AMF_MEMORY_TYPE type); // optimal interop if possilble. Copy through host memory if needed
+    AMF_RESULT(AMF_STD_CALL *Interop)(AMFData* pThis, AMF_MEMORY_TYPE type); // only optimal interop if possilble. No copy through host memory for GPU objects
+
+    AMF_DATA_TYPE(AMF_STD_CALL *GetDataType)(AMFData* pThis);
+
+    amf_bool(AMF_STD_CALL *IsReusable)(AMFData* pThis);
+
+    void                (AMF_STD_CALL *SetPts)(AMFData* pThis, amf_pts pts);
+    amf_pts(AMF_STD_CALL *GetPts)(AMFData* pThis);
+    void                (AMF_STD_CALL *SetDuration)(AMFData* pThis, amf_pts duration);
+    amf_pts(AMF_STD_CALL *GetDuration)(AMFData* pThis);
+
+} AMFDataVtbl;
+
+struct AMFData
+{
+    const AMFDataVtbl *pVtbl;
+};
+//-----------------------------------------------------------------------------
+// Plane.h
+//-----------------------------------------------------------------------------
+//---------------------------------------------------------------------------------------------
+typedef enum AMF_PLANE_TYPE
+{
+    AMF_PLANE_UNKNOWN = 0,
+    AMF_PLANE_PACKED = 1,             // for all packed formats: BGRA, YUY2, etc
+    AMF_PLANE_Y = 2,
+    AMF_PLANE_UV = 3,
+    AMF_PLANE_U = 4,
+    AMF_PLANE_V = 5,
+} AMF_PLANE_TYPE;
+
+//---------------------------------------------------------------------------------------------
+// AMFPlane interface
+//---------------------------------------------------------------------------------------------
+AMF_DECLARE_IID(AMFPlane, 0xbede1aa6, 0xd8fa, 0x4625, 0x94, 0x65, 0x6c, 0x82, 0xc4, 0x37, 0x71, 0x2e)
+typedef struct AMFPlane AMFPlane;
+typedef struct AMFPlaneVtbl
+{
+    // AMFInterface interface
+    amf_long(AMF_STD_CALL *Acquire)(AMFPlane* pThis);
+    amf_long(AMF_STD_CALL *Release)(AMFPlane* pThis);
+    enum AMF_RESULT(AMF_STD_CALL *QueryInterface)(AMFPlane* pThis, const struct AMFGuid *interfaceID, void** ppInterface);
+
+    // AMFPlane interface
+    AMF_PLANE_TYPE(AMF_STD_CALL *GetType)(AMFPlane* pThis);
+    void*               (AMF_STD_CALL *GetNative)(AMFPlane* pThis);
+    amf_int32(AMF_STD_CALL *GetPixelSizeInBytes)(AMFPlane* pThis);
+    amf_int32(AMF_STD_CALL *GetOffsetX)(AMFPlane* pThis);
+    amf_int32(AMF_STD_CALL *GetOffsetY)(AMFPlane* pThis);
+    amf_int32(AMF_STD_CALL *GetWidth)(AMFPlane* pThis);
+    amf_int32(AMF_STD_CALL *GetHeight)(AMFPlane* pThis);
+    amf_int32(AMF_STD_CALL *GetHPitch)(AMFPlane* pThis);
+    amf_int32(AMF_STD_CALL *GetVPitch)(AMFPlane* pThis);
+    amf_bool(AMF_STD_CALL *IsTiled)(AMFPlane* pThis);
+
+} AMFPlaneVtbl;
+
+struct AMFPlane
+{
+    const AMFPlaneVtbl *pVtbl;
+};
+//-----------------------------------------------------------------------------
+// Buffer.h
+//-----------------------------------------------------------------------------
+#if defined(_MSC_VER)
+#pragma warning( push )
+#pragma warning(disable : 4263)
+#pragma warning(disable : 4264)
+#endif
+
+typedef struct AMFBuffer AMFBuffer;
+typedef struct AMFBufferObserver AMFBufferObserver;
+
+AMF_DECLARE_IID(AMFBuffer, 0xb04b7248, 0xb6f0, 0x4321, 0xb6, 0x91, 0xba, 0xa4, 0x74, 0xf, 0x9f, 0xcb)
+
+typedef struct AMFBufferVtbl
+{
+    // AMFInterface interface
+    amf_long(AMF_STD_CALL *Acquire)(AMFBuffer* pThis);
+    amf_long(AMF_STD_CALL *Release)(AMFBuffer* pThis);
+    enum AMF_RESULT(AMF_STD_CALL *QueryInterface)(AMFBuffer* pThis, const struct AMFGuid *interfaceID, void** ppInterface);
+
+    // AMFPropertyStorage interface
+    AMF_RESULT(AMF_STD_CALL *SetProperty)(AMFBuffer* pThis, const wchar_t* name, AMFVariantStruct value);
+    AMF_RESULT(AMF_STD_CALL *GetProperty)(AMFBuffer* pThis, const wchar_t* name, AMFVariantStruct* pValue);
+    amf_bool(AMF_STD_CALL *HasProperty)(AMFBuffer* pThis, const wchar_t* name);
+    amf_size(AMF_STD_CALL *GetPropertyCount)(AMFBuffer* pThis);
+    AMF_RESULT(AMF_STD_CALL *GetPropertyAt)(AMFBuffer* pThis, amf_size index, wchar_t* name, amf_size nameSize, AMFVariantStruct* pValue);
+    AMF_RESULT(AMF_STD_CALL *Clear)(AMFBuffer* pThis);
+    AMF_RESULT(AMF_STD_CALL *AddTo)(AMFBuffer* pThis, AMFPropertyStorage* pDest, amf_bool overwrite, amf_bool deep);
+    AMF_RESULT(AMF_STD_CALL *CopyTo)(AMFBuffer* pThis, AMFPropertyStorage* pDest, amf_bool deep);
+    void                (AMF_STD_CALL *AddObserver)(AMFBuffer* pThis, AMFPropertyStorageObserver* pObserver);
+    void                (AMF_STD_CALL *RemoveObserver)(AMFBuffer* pThis, AMFPropertyStorageObserver* pObserver);
+
+    // AMFData interface
+
+    AMF_MEMORY_TYPE(AMF_STD_CALL *GetMemoryType)(AMFBuffer* pThis);
+
+    AMF_RESULT(AMF_STD_CALL *Duplicate)(AMFBuffer* pThis, AMF_MEMORY_TYPE type, AMFData** ppData);
+    AMF_RESULT(AMF_STD_CALL *Convert)(AMFBuffer* pThis, AMF_MEMORY_TYPE type); // optimal interop if possilble. Copy through host memory if needed
+    AMF_RESULT(AMF_STD_CALL *Interop)(AMFBuffer* pThis, AMF_MEMORY_TYPE type); // only optimal interop if possilble. No copy through host memory for GPU objects
+
+    AMF_DATA_TYPE(AMF_STD_CALL *GetDataType)(AMFBuffer* pThis);
+
+    amf_bool(AMF_STD_CALL *IsReusable)(AMFBuffer* pThis);
+
+    void                (AMF_STD_CALL *SetPts)(AMFBuffer* pThis, amf_pts pts);
+    amf_pts(AMF_STD_CALL *GetPts)(AMFBuffer* pThis);
+    void                (AMF_STD_CALL *SetDuration)(AMFBuffer* pThis, amf_pts duration);
+    amf_pts(AMF_STD_CALL *GetDuration)(AMFBuffer* pThis);
+
+    // AMFBuffer interface
+
+    AMF_RESULT(AMF_STD_CALL *SetSize)(AMFBuffer* pThis, amf_size newSize);
+    amf_size(AMF_STD_CALL *GetSize)(AMFBuffer* pThis);
+    void*               (AMF_STD_CALL *GetNative)(AMFBuffer* pThis);
+
+    // Observer management
+    void                (AMF_STD_CALL *AddObserver_Buffer)(AMFBuffer* pThis, AMFBufferObserver* pObserver);
+    void                (AMF_STD_CALL *RemoveObserver_Buffer)(AMFBuffer* pThis, AMFBufferObserver* pObserver);
+
+} AMFBufferVtbl;
+
+struct AMFBuffer
+{
+    const AMFBufferVtbl *pVtbl;
+};
+
+//-----------------------------------------------------------------------------
+// AudioBuffer.h
+//-----------------------------------------------------------------------------
+typedef enum AMF_AUDIO_FORMAT
+{
+    AMFAF_UNKNOWN = -1,
+    AMFAF_U8 = 0,               // amf_uint8
+    AMFAF_S16 = 1,               // amf_int16
+    AMFAF_S32 = 2,               // amf_int32
+    AMFAF_FLT = 3,               // amf_float
+    AMFAF_DBL = 4,               // amf_double
+
+    AMFAF_U8P = 5,               // amf_uint8
+    AMFAF_S16P = 6,               // amf_int16
+    AMFAF_S32P = 7,               // amf_int32
+    AMFAF_FLTP = 8,               // amf_float
+    AMFAF_DBLP = 9,               // amf_double
+    AMFAF_FIRST = AMFAF_U8,
+    AMFAF_LAST = AMFAF_DBLP,
+} AMF_AUDIO_FORMAT;
+
+typedef struct AMFAudioBuffer AMFAudioBuffer;
+typedef struct AMFAudioBufferObserver AMFAudioBufferObserver;
+//-----------------------------------------------------------------------------
+// Surface.h
+//-----------------------------------------------------------------------------
+
+typedef enum AMF_SURFACE_FORMAT
+{
+    AMF_SURFACE_UNKNOWN = 0,
+    AMF_SURFACE_NV12,               ///< 1 - planar Y width x height + packed UV width/2 x height/2 - 8 bit per component
+    AMF_SURFACE_YV12,               ///< 2 - planar Y width x height + V width/2 x height/2 + U width/2 x height/2 - 8 bit per component
+    AMF_SURFACE_BGRA,               ///< 3 - packed - 8 bit per component
+    AMF_SURFACE_ARGB,               ///< 4 - packed - 8 bit per component
+    AMF_SURFACE_RGBA,               ///< 5 - packed - 8 bit per component
+    AMF_SURFACE_GRAY8,              ///< 6 - single component - 8 bit
+    AMF_SURFACE_YUV420P,            ///< 7 - planar Y width x height + U width/2 x height/2 + V width/2 x height/2 - 8 bit per component
+    AMF_SURFACE_U8V8,               ///< 8 - double component - 8 bit per component
+    AMF_SURFACE_YUY2,               ///< 9 - YUY2: Byte 0=8-bit Y'0; Byte 1=8-bit Cb; Byte 2=8-bit Y'1; Byte 3=8-bit Cr
+    AMF_SURFACE_P010,               ///< 10- planar Y width x height + packed UV width/2 x height/2 - 10 bit per component (16 allocated, upper 10 bits are used)
+    AMF_SURFACE_RGBA_F16,           ///< 11 - packed - 16 bit per component float
+
+    AMF_SURFACE_FIRST = AMF_SURFACE_NV12,
+    AMF_SURFACE_LAST = AMF_SURFACE_RGBA_F16
+} AMF_SURFACE_FORMAT;
+
+//----------------------------------------------------------------------------------------------
+// frame type
+//----------------------------------------------------------------------------------------------
+typedef enum AMF_FRAME_TYPE
+{
+    // flags
+    AMF_FRAME_STEREO_FLAG = 0x10000000,
+    AMF_FRAME_LEFT_FLAG = AMF_FRAME_STEREO_FLAG | 0x20000000,
+    AMF_FRAME_RIGHT_FLAG = AMF_FRAME_STEREO_FLAG | 0x40000000,
+    AMF_FRAME_BOTH_FLAG = AMF_FRAME_LEFT_FLAG | AMF_FRAME_RIGHT_FLAG,
+    AMF_FRAME_INTERLEAVED_FLAG = 0x01000000,
+    AMF_FRAME_FIELD_FLAG = 0x02000000,
+    AMF_FRAME_EVEN_FLAG = 0x04000000,
+    AMF_FRAME_ODD_FLAG = 0x08000000,
+
+    // values
+    AMF_FRAME_UNKNOWN = -1,
+    AMF_FRAME_PROGRESSIVE = 0,
+
+    AMF_FRAME_INTERLEAVED_EVEN_FIRST = AMF_FRAME_INTERLEAVED_FLAG | AMF_FRAME_EVEN_FLAG,
+    AMF_FRAME_INTERLEAVED_ODD_FIRST = AMF_FRAME_INTERLEAVED_FLAG | AMF_FRAME_ODD_FLAG,
+    AMF_FRAME_FIELD_SINGLE_EVEN = AMF_FRAME_FIELD_FLAG | AMF_FRAME_EVEN_FLAG,
+    AMF_FRAME_FIELD_SINGLE_ODD = AMF_FRAME_FIELD_FLAG | AMF_FRAME_ODD_FLAG,
+
+    AMF_FRAME_STEREO_LEFT = AMF_FRAME_LEFT_FLAG,
+    AMF_FRAME_STEREO_RIGHT = AMF_FRAME_RIGHT_FLAG,
+    AMF_FRAME_STEREO_BOTH = AMF_FRAME_BOTH_FLAG,
+
+    AMF_FRAME_INTERLEAVED_EVEN_FIRST_STEREO_LEFT = AMF_FRAME_INTERLEAVED_FLAG | AMF_FRAME_EVEN_FLAG | AMF_FRAME_LEFT_FLAG,
+    AMF_FRAME_INTERLEAVED_EVEN_FIRST_STEREO_RIGHT = AMF_FRAME_INTERLEAVED_FLAG | AMF_FRAME_EVEN_FLAG | AMF_FRAME_RIGHT_FLAG,
+    AMF_FRAME_INTERLEAVED_EVEN_FIRST_STEREO_BOTH = AMF_FRAME_INTERLEAVED_FLAG | AMF_FRAME_EVEN_FLAG | AMF_FRAME_BOTH_FLAG,
+
+    AMF_FRAME_INTERLEAVED_ODD_FIRST_STEREO_LEFT = AMF_FRAME_INTERLEAVED_FLAG | AMF_FRAME_ODD_FLAG | AMF_FRAME_LEFT_FLAG,
+    AMF_FRAME_INTERLEAVED_ODD_FIRST_STEREO_RIGHT = AMF_FRAME_INTERLEAVED_FLAG | AMF_FRAME_ODD_FLAG | AMF_FRAME_RIGHT_FLAG,
+    AMF_FRAME_INTERLEAVED_ODD_FIRST_STEREO_BOTH = AMF_FRAME_INTERLEAVED_FLAG | AMF_FRAME_ODD_FLAG | AMF_FRAME_BOTH_FLAG,
+} AMF_FRAME_TYPE;
+
+typedef struct AMFSurface AMFSurface;
+typedef struct AMFSurfaceObserver AMFSurfaceObserver;
+
+typedef struct AMFSurfaceObserverVtbl
+{
+    void                (AMF_STD_CALL *OnSurfaceDataRelease)(AMFSurfaceObserver* pThis, AMFSurface* pSurface);
+} AMFSurfaceObserverVtbl;
+
+struct AMFSurfaceObserver
+{
+    const AMFSurfaceObserverVtbl *pVtbl;
+};
+
+AMF_DECLARE_IID(AMFSurface, 0x3075dbe3, 0x8718, 0x4cfa, 0x86, 0xfb, 0x21, 0x14, 0xc0, 0xa5, 0xa4, 0x51)
+typedef struct AMFSurfaceVtbl
+{
+    // AMFInterface interface
+    amf_long(AMF_STD_CALL *Acquire)(AMFSurface* pThis);
+    amf_long(AMF_STD_CALL *Release)(AMFSurface* pThis);
+    enum AMF_RESULT(AMF_STD_CALL *QueryInterface)(AMFSurface* pThis, const struct AMFGuid *interfaceID, void** ppInterface);
+
+    // AMFPropertyStorage interface
+    AMF_RESULT(AMF_STD_CALL *SetProperty)(AMFSurface* pThis, const wchar_t* name, AMFVariantStruct value);
+    AMF_RESULT(AMF_STD_CALL *GetProperty)(AMFSurface* pThis, const wchar_t* name, AMFVariantStruct* pValue);
+    amf_bool(AMF_STD_CALL *HasProperty)(AMFSurface* pThis, const wchar_t* name);
+    amf_size(AMF_STD_CALL *GetPropertyCount)(AMFSurface* pThis);
+    AMF_RESULT(AMF_STD_CALL *GetPropertyAt)(AMFSurface* pThis, amf_size index, wchar_t* name, amf_size nameSize, AMFVariantStruct* pValue);
+    AMF_RESULT(AMF_STD_CALL *Clear)(AMFSurface* pThis);
+    AMF_RESULT(AMF_STD_CALL *AddTo)(AMFSurface* pThis, AMFPropertyStorage* pDest, amf_bool overwrite, amf_bool deep);
+    AMF_RESULT(AMF_STD_CALL *CopyTo)(AMFSurface* pThis, AMFPropertyStorage* pDest, amf_bool deep);
+    void                (AMF_STD_CALL *AddObserver)(AMFSurface* pThis, AMFPropertyStorageObserver* pObserver);
+    void                (AMF_STD_CALL *RemoveObserver)(AMFSurface* pThis, AMFPropertyStorageObserver* pObserver);
+
+    // AMFData interface
+
+    AMF_MEMORY_TYPE(AMF_STD_CALL *GetMemoryType)(AMFSurface* pThis);
+
+    AMF_RESULT(AMF_STD_CALL *Duplicate)(AMFSurface* pThis, AMF_MEMORY_TYPE type, AMFData** ppData);
+    AMF_RESULT(AMF_STD_CALL *Convert)(AMFSurface* pThis, AMF_MEMORY_TYPE type); // optimal interop if possilble. Copy through host memory if needed
+    AMF_RESULT(AMF_STD_CALL *Interop)(AMFSurface* pThis, AMF_MEMORY_TYPE type); // only optimal interop if possilble. No copy through host memory for GPU objects
+
+    AMF_DATA_TYPE(AMF_STD_CALL *GetDataType)(AMFSurface* pThis);
+
+    amf_bool(AMF_STD_CALL *IsReusable)(AMFSurface* pThis);
+
+    void                (AMF_STD_CALL *SetPts)(AMFSurface* pThis, amf_pts pts);
+    amf_pts(AMF_STD_CALL *GetPts)(AMFSurface* pThis);
+    void                (AMF_STD_CALL *SetDuration)(AMFSurface* pThis, amf_pts duration);
+    amf_pts(AMF_STD_CALL *GetDuration)(AMFSurface* pThis);
+
+    // AMFSurface interface
+
+    AMF_SURFACE_FORMAT(AMF_STD_CALL *GetFormat)(AMFSurface* pThis);
+
+    // do not store planes outside. should be used together with Surface
+    amf_size(AMF_STD_CALL *GetPlanesCount)(AMFSurface* pThis);
+    AMFPlane*           (AMF_STD_CALL *GetPlaneAt)(AMFSurface* pThis, amf_size index);
+    AMFPlane*           (AMF_STD_CALL *GetPlane)(AMFSurface* pThis, AMF_PLANE_TYPE type);
+
+    AMF_FRAME_TYPE(AMF_STD_CALL *GetFrameType)(AMFSurface* pThis);
+    void                (AMF_STD_CALL *SetFrameType)(AMFSurface* pThis, AMF_FRAME_TYPE type);
+
+    AMF_RESULT(AMF_STD_CALL *SetCrop)(AMFSurface* pThis, amf_int32 x, amf_int32 y, amf_int32 width, amf_int32 height);
+    AMF_RESULT(AMF_STD_CALL *CopySurfaceRegion)(AMFSurface* pThis, AMFSurface* pDest, amf_int32 dstX, amf_int32 dstY, amf_int32 srcX, amf_int32 srcY, amf_int32 width, amf_int32 height);
+
+
+    // Observer management
+    void                (AMF_STD_CALL *AddObserver_Surface)(AMFSurface* pThis, AMFSurfaceObserver* pObserver);
+    void                (AMF_STD_CALL *RemoveObserver_Surface)(AMFSurface* pThis, AMFSurfaceObserver* pObserver);
+
+} AMFSurfaceVtbl;
+
+struct AMFSurface
+{
+    const AMFSurfaceVtbl *pVtbl;
+};
+
+#define   AMFTextureArrayIndexGUIDDef { 0x28115527, 0xe7c3, 0x4b66,{ 0x99, 0xd3, 0x4f, 0x2a, 0xe6, 0xb4, 0x7f, 0xaf } }
+
+//-----------------------------------------------------------------------------
+// Component.h
+//-----------------------------------------------------------------------------
+AMF_DECLARE_IID(AMFComponent, 0x8b51e5e4, 0x455d, 0x4034, 0xa7, 0x46, 0xde, 0x1b, 0xed, 0xc3, 0xc4, 0x6)
+typedef struct AMFComponent AMFComponent;
+typedef struct AMFContext AMFContext;
+typedef struct AMFIOCaps AMFIOCaps;
+typedef struct AMFCaps AMFCaps;
+
+typedef struct AMFDataAllocatorCB AMFDataAllocatorCB;
+typedef struct AMFComponentOptimizationCallback AMFComponentOptimizationCallback;
+
+typedef struct AMFComponentVtbl
+{
+    // AMFInterface interface
+    amf_long(AMF_STD_CALL *Acquire)(AMFComponent* pThis);
+    amf_long(AMF_STD_CALL *Release)(AMFComponent* pThis);
+    enum AMF_RESULT(AMF_STD_CALL *QueryInterface)(AMFComponent* pThis, const struct AMFGuid *interfaceID, void** ppInterface);
+
+    // AMFPropertyStorage interface
+    AMF_RESULT(AMF_STD_CALL *SetProperty)(AMFComponent* pThis, const wchar_t* name, AMFVariantStruct value);
+    AMF_RESULT(AMF_STD_CALL *GetProperty)(AMFComponent* pThis, const wchar_t* name, AMFVariantStruct* pValue);
+    amf_bool(AMF_STD_CALL *HasProperty)(AMFComponent* pThis, const wchar_t* name);
+    amf_size(AMF_STD_CALL *GetPropertyCount)(AMFComponent* pThis);
+    AMF_RESULT(AMF_STD_CALL *GetPropertyAt)(AMFComponent* pThis, amf_size index, wchar_t* name, amf_size nameSize, AMFVariantStruct* pValue);
+    AMF_RESULT(AMF_STD_CALL *Clear)(AMFComponent* pThis);
+    AMF_RESULT(AMF_STD_CALL *AddTo)(AMFComponent* pThis, AMFPropertyStorage* pDest, amf_bool overwrite, amf_bool deep);
+    AMF_RESULT(AMF_STD_CALL *CopyTo)(AMFComponent* pThis, AMFPropertyStorage* pDest, amf_bool deep);
+    void                (AMF_STD_CALL *AddObserver)(AMFComponent* pThis, AMFPropertyStorageObserver* pObserver);
+    void                (AMF_STD_CALL *RemoveObserver)(AMFComponent* pThis, AMFPropertyStorageObserver* pObserver);
+
+    // AMFPropertyStorageEx interface
+
+    amf_size(AMF_STD_CALL *GetPropertiesInfoCount)(AMFComponent* pThis);
+    AMF_RESULT(AMF_STD_CALL *GetPropertyInfoAt)(AMFComponent* pThis, amf_size index, const AMFPropertyInfo** ppInfo);
+    AMF_RESULT(AMF_STD_CALL *GetPropertyInfo)(AMFComponent* pThis, const wchar_t* name, const AMFPropertyInfo** ppInfo);
+    AMF_RESULT(AMF_STD_CALL *ValidateProperty)(AMFComponent* pThis, const wchar_t* name, AMFVariantStruct value, AMFVariantStruct* pOutValidated);
+
+    // AMFComponent interface
+
+    AMF_RESULT(AMF_STD_CALL *Init)(AMFComponent* pThis, AMF_SURFACE_FORMAT format, amf_int32 width, amf_int32 height);
+    AMF_RESULT(AMF_STD_CALL *ReInit)(AMFComponent* pThis, amf_int32 width, amf_int32 height);
+    AMF_RESULT(AMF_STD_CALL *Terminate)(AMFComponent* pThis);
+    AMF_RESULT(AMF_STD_CALL *Drain)(AMFComponent* pThis);
+    AMF_RESULT(AMF_STD_CALL *Flush)(AMFComponent* pThis);
+
+    AMF_RESULT(AMF_STD_CALL *SubmitInput)(AMFComponent* pThis, AMFData* pData);
+    AMF_RESULT(AMF_STD_CALL *QueryOutput)(AMFComponent* pThis, AMFData** ppData);
+    AMFContext* (AMF_STD_CALL *GetContext)(AMFComponent* pThis);
+    AMF_RESULT(AMF_STD_CALL *SetOutputDataAllocatorCB)(AMFComponent* pThis, AMFDataAllocatorCB* callback);
+
+    AMF_RESULT(AMF_STD_CALL *GetCaps)(AMFComponent* pThis, AMFCaps** ppCaps);
+    AMF_RESULT(AMF_STD_CALL *Optimize)(AMFComponent* pThis, AMFComponentOptimizationCallback* pCallback);
+} AMFComponentVtbl;
+
+struct AMFComponent
+{
+    const AMFComponentVtbl *pVtbl;
+};
+
+//-----------------------------------------------------------------------------
+// Context.h
+//-----------------------------------------------------------------------------
+typedef struct AMFCompute AMFCompute;
+typedef struct AMFComputeFactory AMFComputeFactory;
+typedef struct AMFComputeDevice AMFComputeDevice;
+typedef struct AMFContext AMFContext;
+AMF_DECLARE_IID(AMFContext, 0xa76a13f0, 0xd80e, 0x4fcc, 0xb5, 0x8, 0x65, 0xd0, 0xb5, 0x2e, 0xd9, 0xee)
+
+typedef struct AMFContextVtbl
+{
+    // AMFInterface interface
+    amf_long(AMF_STD_CALL *Acquire)(AMFContext* pThis);
+    amf_long(AMF_STD_CALL *Release)(AMFContext* pThis);
+    enum AMF_RESULT(AMF_STD_CALL *QueryInterface)(AMFContext* pThis, const struct AMFGuid *interfaceID, void** ppInterface);
+
+    // AMFInterface AMFPropertyStorage
+
+    AMF_RESULT(AMF_STD_CALL *SetProperty)(AMFContext* pThis, const wchar_t* name, AMFVariantStruct value);
+    AMF_RESULT(AMF_STD_CALL *GetProperty)(AMFContext* pThis, const wchar_t* name, AMFVariantStruct* pValue);
+    amf_bool(AMF_STD_CALL *HasProperty)(AMFContext* pThis, const wchar_t* name);
+    amf_size(AMF_STD_CALL *GetPropertyCount)(AMFContext* pThis);
+    AMF_RESULT(AMF_STD_CALL *GetPropertyAt)(AMFContext* pThis, amf_size index, wchar_t* name, amf_size nameSize, AMFVariantStruct* pValue);
+    AMF_RESULT(AMF_STD_CALL *Clear)(AMFContext* pThis);
+    AMF_RESULT(AMF_STD_CALL *AddTo)(AMFContext* pThis, AMFPropertyStorage* pDest, amf_bool overwrite, amf_bool deep);
+    AMF_RESULT(AMF_STD_CALL *CopyTo)(AMFContext* pThis, AMFPropertyStorage* pDest, amf_bool deep);
+    void                (AMF_STD_CALL *AddObserver)(AMFContext* pThis, AMFPropertyStorageObserver* pObserver);
+    void                (AMF_STD_CALL *RemoveObserver)(AMFContext* pThis, AMFPropertyStorageObserver* pObserver);
+
+    // AMFContext interface
+
+    // Cleanup
+    AMF_RESULT(AMF_STD_CALL *Terminate)(AMFContext* pThis);
+
+    // DX9
+    AMF_RESULT(AMF_STD_CALL *InitDX9)(AMFContext* pThis, void* pDX9Device);
+    void*               (AMF_STD_CALL *GetDX9Device)(AMFContext* pThis, AMF_DX_VERSION dxVersionRequired);
+    AMF_RESULT(AMF_STD_CALL *LockDX9)(AMFContext* pThis);
+    AMF_RESULT(AMF_STD_CALL *UnlockDX9)(AMFContext* pThis);
+    // DX11
+    AMF_RESULT(AMF_STD_CALL *InitDX11)(AMFContext* pThis, void* pDX11Device, AMF_DX_VERSION dxVersionRequired);
+    void*               (AMF_STD_CALL *GetDX11Device)(AMFContext* pThis, AMF_DX_VERSION dxVersionRequired);
+    AMF_RESULT(AMF_STD_CALL *LockDX11)(AMFContext* pThis);
+    AMF_RESULT(AMF_STD_CALL *UnlockDX11)(AMFContext* pThis);
+
+    // OpenCL
+    AMF_RESULT(AMF_STD_CALL *InitOpenCL)(AMFContext* pThis, void* pCommandQueue);
+    void*               (AMF_STD_CALL *GetOpenCLContext)(AMFContext* pThis);
+    void*               (AMF_STD_CALL *GetOpenCLCommandQueue)(AMFContext* pThis);
+    void*               (AMF_STD_CALL *GetOpenCLDeviceID)(AMFContext* pThis);
+    AMF_RESULT(AMF_STD_CALL *GetOpenCLComputeFactory)(AMFContext* pThis, AMFComputeFactory **ppFactory); // advanced compute - multiple queries
+    AMF_RESULT(AMF_STD_CALL *InitOpenCLEx)(AMFContext* pThis, AMFComputeDevice *pDevice);
+    AMF_RESULT(AMF_STD_CALL *LockOpenCL)(AMFContext* pThis);
+    AMF_RESULT(AMF_STD_CALL *UnlockOpenCL)(AMFContext* pThis);
+
+    // OpenGL
+    AMF_RESULT(AMF_STD_CALL *InitOpenGL)(AMFContext* pThis, amf_handle hOpenGLContext, amf_handle hWindow, amf_handle hDC);
+    amf_handle(AMF_STD_CALL *GetOpenGLContext)(AMFContext* pThis);
+    amf_handle(AMF_STD_CALL *GetOpenGLDrawable)(AMFContext* pThis);
+    AMF_RESULT(AMF_STD_CALL *LockOpenGL)(AMFContext* pThis);
+    AMF_RESULT(AMF_STD_CALL *UnlockOpenGL)(AMFContext* pThis);
+    // XV - Linux
+    AMF_RESULT(AMF_STD_CALL *InitXV)(AMFContext* pThis, void* pXVDevice);
+    void*               (AMF_STD_CALL *GetXVDevice)(AMFContext* pThis);
+    AMF_RESULT(AMF_STD_CALL *LockXV)(AMFContext* pThis);
+    AMF_RESULT(AMF_STD_CALL *UnlockXV)(AMFContext* pThis);
+
+    // Gralloc - Android
+    AMF_RESULT(AMF_STD_CALL *InitGralloc)(AMFContext* pThis, void* pGrallocDevice);
+    void*               (AMF_STD_CALL *GetGrallocDevice)(AMFContext* pThis);
+    AMF_RESULT(AMF_STD_CALL *LockGralloc)(AMFContext* pThis);
+    AMF_RESULT(AMF_STD_CALL *UnlockGralloc)(AMFContext* pThis);
+    // Allocation
+    AMF_RESULT(AMF_STD_CALL *AllocBuffer)(AMFContext* pThis, AMF_MEMORY_TYPE type, amf_size size, AMFBuffer** ppBuffer);
+    AMF_RESULT(AMF_STD_CALL *AllocSurface)(AMFContext* pThis, AMF_MEMORY_TYPE type, AMF_SURFACE_FORMAT format, amf_int32 width, amf_int32 height, AMFSurface** ppSurface);
+    AMF_RESULT(AMF_STD_CALL *AllocAudioBuffer)(AMFContext* pThis, AMF_MEMORY_TYPE type, AMF_AUDIO_FORMAT format, amf_int32 samples, amf_int32 sampleRate, amf_int32 channels,
+        AMFAudioBuffer** ppAudioBuffer);
+
+    // Wrap existing objects
+    AMF_RESULT(AMF_STD_CALL *CreateBufferFromHostNative)(AMFContext* pThis, void* pHostBuffer, amf_size size, AMFBuffer** ppBuffer, AMFBufferObserver* pObserver);
+    AMF_RESULT(AMF_STD_CALL *CreateSurfaceFromHostNative)(AMFContext* pThis, AMF_SURFACE_FORMAT format, amf_int32 width, amf_int32 height, amf_int32 hPitch, amf_int32 vPitch, void* pData,
+        AMFSurface** ppSurface, AMFSurfaceObserver* pObserver);
+    AMF_RESULT(AMF_STD_CALL *CreateSurfaceFromDX9Native)(AMFContext* pThis, void* pDX9Surface, AMFSurface** ppSurface, AMFSurfaceObserver* pObserver);
+    AMF_RESULT(AMF_STD_CALL *CreateSurfaceFromDX11Native)(AMFContext* pThis, void* pDX11Surface, AMFSurface** ppSurface, AMFSurfaceObserver* pObserver);
+    AMF_RESULT(AMF_STD_CALL *CreateSurfaceFromOpenGLNative)(AMFContext* pThis, AMF_SURFACE_FORMAT format, amf_handle hGLTextureID, AMFSurface** ppSurface, AMFSurfaceObserver* pObserver);
+    AMF_RESULT(AMF_STD_CALL *CreateSurfaceFromGrallocNative)(AMFContext* pThis, amf_handle hGrallocSurface, AMFSurface** ppSurface, AMFSurfaceObserver* pObserver);
+    AMF_RESULT(AMF_STD_CALL *CreateSurfaceFromOpenCLNative)(AMFContext* pThis, AMF_SURFACE_FORMAT format, amf_int32 width, amf_int32 height, void** pClPlanes,
+        AMFSurface** ppSurface, AMFSurfaceObserver* pObserver);
+    AMF_RESULT(AMF_STD_CALL *CreateBufferFromOpenCLNative)(AMFContext* pThis, void* pCLBuffer, amf_size size, AMFBuffer** ppBuffer);
+
+    // Access to AMFCompute interface - AMF_MEMORY_OPENCL, AMF_MEMORY_COMPUTE_FOR_DX9, AMF_MEMORY_COMPUTE_FOR_DX11 are currently supported
+    AMF_RESULT(AMF_STD_CALL *GetCompute)(AMFContext* pThis, AMF_MEMORY_TYPE eMemType, AMFCompute** ppCompute);
+
+} AMFContextVtbl;
+
+struct AMFContext
+{
+    const AMFContextVtbl *pVtbl;
+};
+
+//-----------------------------------------------------------------------------
+// Debug.h 
+//-----------------------------------------------------------------------------
+
+typedef struct AMFDebug AMFDebug;
+typedef struct AMFDebugVtbl
+{
+    // AMFDebug interface
+    void               (AMF_STD_CALL *EnablePerformanceMonitor)(AMFDebug* pThis, amf_bool enable);
+    amf_bool(AMF_STD_CALL *PerformanceMonitorEnabled)(AMFDebug* pThis);
+    void               (AMF_STD_CALL *AssertsEnable)(AMFDebug* pThis, amf_bool enable);
+    amf_bool(AMF_STD_CALL *AssertsEnabled)(AMFDebug* pThis);
+} AMFDebugVtbl;
+
+struct AMFDebug
+{
+    const AMFDebugVtbl *pVtbl;
+};
+
+//-----------------------------------------------------------------------------
+// Trace.h 
+//-----------------------------------------------------------------------------
+//----------------------------------------------------------------------------------------------
+// trace levels
+//----------------------------------------------------------------------------------------------
+#define AMF_TRACE_ERROR     0
+#define AMF_TRACE_WARNING   1
+#define AMF_TRACE_INFO      2 // default in sdk
+#define AMF_TRACE_DEBUG     3
+#define AMF_TRACE_TRACE     4
+
+#define AMF_TRACE_TEST      5
+#define AMF_TRACE_NOLOG     100
+
+//----------------------------------------------------------------------------------------------
+// available trace writers
+//----------------------------------------------------------------------------------------------
+#define AMF_TRACE_WRITER_CONSOLE            L"Console"
+#define AMF_TRACE_WRITER_DEBUG_OUTPUT       L"DebugOutput"
+#define AMF_TRACE_WRITER_FILE               L"File"
+
+
+typedef struct AMFTraceWriter AMFTraceWriter;
+
+typedef struct AMFTraceWriterVtbl
+{
+    // AMFTraceWriter interface
+    void (AMF_CDECL_CALL *Write)(AMFTraceWriter* pThis, const wchar_t* scope, const wchar_t* message);
+    void (AMF_CDECL_CALL *Flush)(AMFTraceWriter* pThis);
+} AMFTraceWriterVtbl;
+
+struct AMFTraceWriter
+{
+    const AMFTraceWriterVtbl *pVtbl;
+};
+typedef struct AMFTrace AMFTrace;
+
+typedef struct AMFTraceVtbl
+{
+    // AMFTrace interface
+    void               (AMF_STD_CALL *TraceW)(AMFTrace* pThis, const wchar_t* src_path, amf_int32 line, amf_int32 level, const wchar_t* scope, amf_int32 countArgs, const wchar_t* format, ...);
+    void               (AMF_STD_CALL *Trace)(AMFTrace* pThis, const wchar_t* src_path, amf_int32 line, amf_int32 level, const wchar_t* scope, const wchar_t* message, va_list* pArglist);
+
+    amf_int32(AMF_STD_CALL *SetGlobalLevel)(AMFTrace* pThis, amf_int32 level);
+    amf_int32(AMF_STD_CALL *GetGlobalLevel)(AMFTrace* pThis);
+
+    amf_bool(AMF_STD_CALL *EnableWriter)(AMFTrace* pThis, const wchar_t* writerID, amf_bool enable);
+    amf_bool(AMF_STD_CALL *WriterEnabled)(AMFTrace* pThis, const wchar_t* writerID);
+    AMF_RESULT(AMF_STD_CALL *TraceEnableAsync)(AMFTrace* pThis, amf_bool enable);
+    AMF_RESULT(AMF_STD_CALL *TraceFlush)(AMFTrace* pThis);
+    AMF_RESULT(AMF_STD_CALL *SetPath)(AMFTrace* pThis, const wchar_t* path);
+    AMF_RESULT(AMF_STD_CALL *GetPath)(AMFTrace* pThis, wchar_t* path, amf_size* pSize);
+    amf_int32(AMF_STD_CALL *SetWriterLevel)(AMFTrace* pThis, const wchar_t* writerID, amf_int32 level);
+    amf_int32(AMF_STD_CALL *GetWriterLevel)(AMFTrace* pThis, const wchar_t* writerID);
+    amf_int32(AMF_STD_CALL *SetWriterLevelForScope)(AMFTrace* pThis, const wchar_t* writerID, const wchar_t* scope, amf_int32 level);
+    amf_int32(AMF_STD_CALL *GetWriterLevelForScope)(AMFTrace* pThis, const wchar_t* writerID, const wchar_t* scope);
+
+    amf_int32(AMF_STD_CALL *GetIndentation)(AMFTrace* pThis);
+    void                (AMF_STD_CALL *Indent)(AMFTrace* pThis, amf_int32 addIndent);
+
+    void                (AMF_STD_CALL *RegisterWriter)(AMFTrace* pThis, const wchar_t* writerID, AMFTraceWriter* pWriter, amf_bool enable);
+    void                (AMF_STD_CALL *UnregisterWriter)(AMFTrace* pThis, const wchar_t* writerID);
+
+    const wchar_t*      (AMF_STD_CALL *GetResultText)(AMFTrace* pThis, AMF_RESULT res);
+    const wchar_t*      (AMF_STD_CALL *SurfaceGetFormatName)(AMFTrace* pThis, const AMF_SURFACE_FORMAT eSurfaceFormat);
+    AMF_SURFACE_FORMAT(AMF_STD_CALL *SurfaceGetFormatByName)(AMFTrace* pThis, const wchar_t* name);
+
+    const wchar_t* const (AMF_STD_CALL *GetMemoryTypeName)(AMFTrace* pThis, const AMF_MEMORY_TYPE memoryType);
+    AMF_MEMORY_TYPE(AMF_STD_CALL *GetMemoryTypeByName)(AMFTrace* pThis, const wchar_t* name);
+
+    const wchar_t* const (AMF_STD_CALL *GetSampleFormatName)(AMFTrace* pThis, const AMF_AUDIO_FORMAT eFormat);
+    AMF_AUDIO_FORMAT(AMF_STD_CALL *GetSampleFormatByName)(AMFTrace* pThis, const wchar_t* name);
+} AMFTraceVtbl;
+
+struct AMFTrace
+{
+    const AMFTraceVtbl *pVtbl;
+};
+//-----------------------------------------------------------------------------
+// Factory.h
+//-----------------------------------------------------------------------------
+typedef struct AMFPrograms AMFPrograms;
+
+typedef struct AMFFactory AMFFactory;
+
+typedef struct AMFFactoryVtbl
+{
+    AMF_RESULT(AMF_STD_CALL *CreateContext)(AMFFactory* pThis, AMFContext** ppContext);
+    AMF_RESULT(AMF_STD_CALL *CreateComponent)(AMFFactory* pThis, AMFContext* pContext, const wchar_t* id, AMFComponent** ppComponent);
+    AMF_RESULT(AMF_STD_CALL *SetCacheFolder)(AMFFactory* pThis, const wchar_t* path);
+    const wchar_t*      (AMF_STD_CALL *GetCacheFolder)(AMFFactory* pThis);
+    AMF_RESULT(AMF_STD_CALL *GetDebug)(AMFFactory* pThis, AMFDebug** ppDebug);
+    AMF_RESULT(AMF_STD_CALL *GetTrace)(AMFFactory* pThis, AMFTrace** ppTrace);
+    AMF_RESULT(AMF_STD_CALL *GetPrograms)(AMFFactory* pThis, AMFPrograms** ppPrograms);
+} AMFFactoryVtbl;
+
+struct AMFFactory
+{
+    const AMFFactoryVtbl *pVtbl;
+};
+
+#define AMF_INIT_FUNCTION_NAME             "AMFInit"
+#define AMF_QUERY_VERSION_FUNCTION_NAME    "AMFQueryVersion"
+
+typedef AMF_RESULT(AMF_CDECL_CALL *AMFInit_Fn)(amf_uint64 version, AMFFactory **ppFactory);
+typedef AMF_RESULT(AMF_CDECL_CALL *AMFQueryVersion_Fn)(amf_uint64 *pVersion);
+
+#if defined(_M_AMD64)
+#define AMF_DLL_NAME    L"amfrt64.dll"
+#define AMF_DLL_NAMEA   "amfrt64.dll"
+#else
+#define AMF_DLL_NAME    L"amfrt32.dll"
+#define AMF_DLL_NAMEA   "amfrt32.dll"
+#endif
+
+
+//-----------------------------------------------------------------------------
+// VideoEncoderVCE.h
+//-----------------------------------------------------------------------------
+#define AMFVideoEncoderVCE_AVC L"AMFVideoEncoderVCE_AVC"
+#define AMFVideoEncoderVCE_SVC L"AMFVideoEncoderVCE_SVC"
+
+enum AMF_VIDEO_ENCODER_USAGE_ENUM
+{
+    AMF_VIDEO_ENCODER_USAGE_TRANSCONDING = 0,
+    AMF_VIDEO_ENCODER_USAGE_ULTRA_LOW_LATENCY,
+    AMF_VIDEO_ENCODER_USAGE_LOW_LATENCY,
+    AMF_VIDEO_ENCODER_USAGE_WEBCAM
+};
+
+enum AMF_VIDEO_ENCODER_PROFILE_ENUM
+{
+    AMF_VIDEO_ENCODER_PROFILE_BASELINE = 66,
+    AMF_VIDEO_ENCODER_PROFILE_MAIN = 77,
+    AMF_VIDEO_ENCODER_PROFILE_HIGH = 100
+};
+
+enum AMF_VIDEO_ENCODER_SCANTYPE_ENUM
+{
+    AMF_VIDEO_ENCODER_SCANTYPE_PROGRESSIVE = 0,
+    AMF_VIDEO_ENCODER_SCANTYPE_INTERLACED
+};
+
+enum AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_ENUM
+{
+    AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CONSTANT_QP = 0,
+    AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CBR,
+    AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_PEAK_CONSTRAINED_VBR,
+    AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_LATENCY_CONSTRAINED_VBR
+};
+
+enum AMF_VIDEO_ENCODER_QUALITY_PRESET_ENUM
+{
+    AMF_VIDEO_ENCODER_QUALITY_PRESET_BALANCED = 0,
+    AMF_VIDEO_ENCODER_QUALITY_PRESET_SPEED,
+    AMF_VIDEO_ENCODER_QUALITY_PRESET_QUALITY
+};
+
+enum AMF_VIDEO_ENCODER_PICTURE_STRUCTURE_ENUM
+{
+    AMF_VIDEO_ENCODER_PICTURE_STRUCTURE_NONE = 0,
+    AMF_VIDEO_ENCODER_PICTURE_STRUCTURE_FRAME,
+    AMF_VIDEO_ENCODER_PICTURE_STRUCTURE_TOP_FIELD,
+    AMF_VIDEO_ENCODER_PICTURE_STRUCTURE_BOTTOM_FIELD
+};
+
+enum AMF_VIDEO_ENCODER_PICTURE_TYPE_ENUM
+{
+    AMF_VIDEO_ENCODER_PICTURE_TYPE_NONE = 0,
+    AMF_VIDEO_ENCODER_PICTURE_TYPE_SKIP,
+    AMF_VIDEO_ENCODER_PICTURE_TYPE_IDR,
+    AMF_VIDEO_ENCODER_PICTURE_TYPE_I,
+    AMF_VIDEO_ENCODER_PICTURE_TYPE_P,
+    AMF_VIDEO_ENCODER_PICTURE_TYPE_B
+};
+
+enum AMF_VIDEO_ENCODER_OUTPUT_DATA_TYPE_ENUM
+{
+    AMF_VIDEO_ENCODER_OUTPUT_DATA_TYPE_IDR,
+    AMF_VIDEO_ENCODER_OUTPUT_DATA_TYPE_I,
+    AMF_VIDEO_ENCODER_OUTPUT_DATA_TYPE_P,
+    AMF_VIDEO_ENCODER_OUTPUT_DATA_TYPE_B
+};
+
+enum AMF_VIDEO_ENCODER_PREENCODE_MODE_ENUM
+{
+    AMF_VIDEO_ENCODER_PREENCODE_DISABLED = 0,
+    AMF_VIDEO_ENCODER_PREENCODE_ENABLED = 1,
+};
+
+enum AMF_VIDEO_ENCODER_CODING_ENUM
+{
+    AMF_VIDEO_ENCODER_UNDEFINED = 0, // BASELINE = CALV; MAIN, HIGH = CABAC
+    AMF_VIDEO_ENCODER_CABAC,
+    AMF_VIDEO_ENCODER_CALV,
+
+};
+
+
+// Static properties - can be set before Init()
+
+#define AMF_VIDEO_ENCODER_FRAMESIZE                             L"FrameSize"                // AMFSize; default = 0,0; Frame size
+#define AMF_VIDEO_ENCODER_FRAMERATE                             L"FrameRate"                // AMFRate; default = depends on usage; Frame Rate 
+
+#define AMF_VIDEO_ENCODER_EXTRADATA                             L"ExtraData"                // AMFInterface* - > AMFBuffer*; SPS/PPS buffer in Annex B format - read-only
+#define AMF_VIDEO_ENCODER_USAGE                                 L"Usage"                    // amf_int64(AMF_VIDEO_ENCODER_USAGE_ENUM); default = N/A; Encoder usage type. fully configures parameter set. 
+#define AMF_VIDEO_ENCODER_PROFILE                               L"Profile"                  // amf_int64(AMF_VIDEO_ENCODER_PROFILE_ENUM) ; default = AMF_VIDEO_ENCODER_PROFILE_MAIN;  H264 profile
+#define AMF_VIDEO_ENCODER_PROFILE_LEVEL                         L"ProfileLevel"             // amf_int64; default = 42; H264 profile level
+#define AMF_VIDEO_ENCODER_MAX_LTR_FRAMES                        L"MaxOfLTRFrames"           // amf_int64; default = 0; Max number of LTR frames
+#define AMF_VIDEO_ENCODER_SCANTYPE                              L"ScanType"                 // amf_int64(AMF_VIDEO_ENCODER_SCANTYPE_ENUM); default = AMF_VIDEO_ENCODER_SCANTYPE_PROGRESSIVE; indicates input stream type
+#define AMF_VIDEO_ENCODER_MAX_NUM_REFRAMES                      L"MaxNumRefFrames"          // amf_int64; Maximum number of reference frames
+#define AMF_VIDEO_ENCODER_ASPECT_RATIO                          L"AspectRatio"              // AMFRatio; default = 1, 1
+#define AMF_VIDEO_ENCODER_FULL_RANGE_COLOR                      L"FullRangeColor"           // bool; default = false; inidicates that YUV input is (0,255) 
+#define AMF_VIDEO_ENCODER_RATE_CONTROL_PREANALYSIS_ENABLE       L"RateControlPreanalysisEnable"     // amf_int64(AMF_VIDEO_ENCODER_PREENCODE_MODE_ENUM); default =  AMF_VIDEO_ENCODER_PREENCODE_DISABLED; controls Pre-analysis assisted rate control 
+
+// Quality preset property
+#define AMF_VIDEO_ENCODER_QUALITY_PRESET                        L"QualityPreset"            // amf_int64(AMF_VIDEO_ENCODER_QUALITY_PRESET_ENUM); default = depends on USAGE; Quality Preset 
+
+
+// Dynamic properties - can be set at any time
+
+// Rate control properties
+#define AMF_VIDEO_ENCODER_B_PIC_DELTA_QP                        L"BPicturesDeltaQP"         // amf_int64; default = depends on USAGE; B-picture Delta
+#define AMF_VIDEO_ENCODER_REF_B_PIC_DELTA_QP                    L"ReferenceBPicturesDeltaQP"// amf_int64; default = depends on USAGE; Reference B-picture Delta
+
+#define AMF_VIDEO_ENCODER_ENFORCE_HRD                           L"EnforceHRD"               // bool; default = depends on USAGE; Enforce HRD
+#define AMF_VIDEO_ENCODER_FILLER_DATA_ENABLE                    L"FillerDataEnable"         // bool; default = false; Filler Data Enable
+#define AMF_VIDEO_ENCODER_ENABLE_VBAQ                           L"EnableVBAQ"               // bool; default = depends on USAGE; Enable VBAQ
+
+
+#define AMF_VIDEO_ENCODER_VBV_BUFFER_SIZE                       L"VBVBufferSize"            // amf_int64; default = depends on USAGE; VBV Buffer Size in bits
+#define AMF_VIDEO_ENCODER_INITIAL_VBV_BUFFER_FULLNESS           L"InitialVBVBufferFullness" // amf_int64; default =  64; Initial VBV Buffer Fullness 0=0% 64=100%
+
+#define AMF_VIDEO_ENCODER_MAX_AU_SIZE                           L"MaxAUSize"                // amf_int64; default = 60; Max AU Size in bits
+
+#define AMF_VIDEO_ENCODER_MIN_QP                                L"MinQP"                    // amf_int64; default = depends on USAGE; Min QP; range = 0-51
+#define AMF_VIDEO_ENCODER_MAX_QP                                L"MaxQP"                    // amf_int64; default = depends on USAGE; Max QP; range = 0-51
+#define AMF_VIDEO_ENCODER_QP_I                                  L"QPI"                      // amf_int64; default = 22; I-frame QP; range = 0-51
+#define AMF_VIDEO_ENCODER_QP_P                                  L"QPP"                      // amf_int64; default = 22; P-frame QP; range = 0-51
+#define AMF_VIDEO_ENCODER_QP_B                                  L"QPB"                      // amf_int64; default = 22; B-frame QP; range = 0-51
+#define AMF_VIDEO_ENCODER_TARGET_BITRATE                        L"TargetBitrate"            // amf_int64; default = depends on USAGE; Target bit rate in bits
+#define AMF_VIDEO_ENCODER_PEAK_BITRATE                          L"PeakBitrate"              // amf_int64; default = depends on USAGE; Peak bit rate in bits
+#define AMF_VIDEO_ENCODER_RATE_CONTROL_SKIP_FRAME_ENABLE        L"RateControlSkipFrameEnable"   // bool; default =  depends on USAGE; Rate Control Based Frame Skip 
+#define AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD                   L"RateControlMethod"        // amf_int64(AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_ENUM); default = depends on USAGE; Rate Control Method 
+
+// Picture control properties
+#define AMF_VIDEO_ENCODER_HEADER_INSERTION_SPACING              L"HeaderInsertionSpacing"   // amf_int64; default = depends on USAGE; Header Insertion Spacing; range 0-1000
+#define AMF_VIDEO_ENCODER_B_PIC_PATTERN                         L"BPicturesPattern"         // amf_int64; default = 3; B-picture Pattern (number of B-Frames)
+#define AMF_VIDEO_ENCODER_DE_BLOCKING_FILTER                    L"DeBlockingFilter"         // bool; default = depends on USAGE; De-blocking Filter
+#define AMF_VIDEO_ENCODER_B_REFERENCE_ENABLE                    L"BReferenceEnable"         // bool; default = true; Enable Refrence to B-frames
+#define AMF_VIDEO_ENCODER_IDR_PERIOD                            L"IDRPeriod"                // amf_int64; default = depends on USAGE; IDR Period in frames
+#define AMF_VIDEO_ENCODER_INTRA_REFRESH_NUM_MBS_PER_SLOT        L"IntraRefreshMBsNumberPerSlot" // amf_int64; default = depends on USAGE; Intra Refresh MBs Number Per Slot in Macroblocks
+#define AMF_VIDEO_ENCODER_SLICES_PER_FRAME                      L"SlicesPerFrame"           // amf_int64; default = 1; Number of slices Per Frame 
+#define AMF_VIDEO_ENCODER_CABAC_ENABLE                          L"CABACEnable"              // amf_int64(AMF_VIDEO_ENCODER_CODING_ENUM) default = AMF_VIDEO_ENCODER_UNDEFINED
+
+// Motion estimation
+#define AMF_VIDEO_ENCODER_MOTION_HALF_PIXEL                     L"HalfPixel"                // bool; default= true; Half Pixel 
+#define AMF_VIDEO_ENCODER_MOTION_QUARTERPIXEL                   L"QuarterPixel"             // bool; default= true; Quarter Pixel
+
+// SVC
+#define AMF_VIDEO_ENCODER_NUM_TEMPORAL_ENHANCMENT_LAYERS        L"NumOfTemporalEnhancmentLayers" // amf_int64; default = 0; range = 0, min(2, caps->GetMaxNumOfTemporalLayers()) number of temporal enhancment Layers (SVC)
+
+// Per-submittion properties - can be set on input surface interface
+#define AMF_VIDEO_ENCODER_END_OF_SEQUENCE                       L"EndOfSequence"            // bool; default = false; generate end of sequence
+#define AMF_VIDEO_ENCODER_END_OF_STREAM                         L"EndOfStream"              // bool; default = false; generate end of stream
+#define AMF_VIDEO_ENCODER_FORCE_PICTURE_TYPE                    L"ForcePictureType"         // amf_int64(AMF_VIDEO_ENCODER_PICTURE_TYPE_ENUM); default = AMF_VIDEO_ENCODER_PICTURE_TYPE_NONE; generate particular picture type
+#define AMF_VIDEO_ENCODER_INSERT_AUD                            L"InsertAUD"                // bool; default = false; insert AUD
+#define AMF_VIDEO_ENCODER_INSERT_SPS                            L"InsertSPS"                // bool; default = false; insert SPS
+#define AMF_VIDEO_ENCODER_INSERT_PPS                            L"InsertPPS"                // bool; default = false; insert PPS
+#define AMF_VIDEO_ENCODER_PICTURE_STRUCTURE                     L"PictureStructure"         // amf_int64(AMF_VIDEO_ENCODER_PICTURE_STRUCTURE_ENUM); default = AMF_VIDEO_ENCODER_PICTURE_STRUCTURE_FRAME; indicate picture type
+#define AMF_VIDEO_ENCODER_MARK_CURRENT_WITH_LTR_INDEX           L"MarkCurrentWithLTRIndex"  // //amf_int64; default = N/A; Mark current frame with LTR index
+#define AMF_VIDEO_ENCODER_FORCE_LTR_REFERENCE_BITFIELD          L"ForceLTRReferenceBitfield"// amf_int64; default = 0; force LTR bit-field 
+
+// properties set by encoder on output buffer interface
+#define AMF_VIDEO_ENCODER_OUTPUT_DATA_TYPE                      L"OutputDataType"           // amf_int64(AMF_VIDEO_ENCODER_OUTPUT_DATA_TYPE_ENUM); default = N/A
+#define AMF_VIDEO_ENCODER_OUTPUT_MARKED_LTR_INDEX               L"MarkedLTRIndex"           //amf_int64; default = -1; Marked LTR index
+#define AMF_VIDEO_ENCODER_OUTPUT_REFERENCED_LTR_INDEX_BITFIELD  L"ReferencedLTRIndexBitfield" // amf_int64; default = 0; referenced LTR bit-field 
+
+
+#define AMF_VIDEO_ENCODER_HDCP_COUNTER                          L"HDCPCounter"              //  const void*
+
+// Properties for multi-instance cloud gaming
+#define AMF_VIDEO_ENCODER_MAX_INSTANCES                         L"EncoderMaxInstances"      //  amf_uint32; default = 1; max number of encoder instances
+#define AMF_VIDEO_ENCODER_MULTI_INSTANCE_MODE                   L"MultiInstanceMode"        //  bool; default = false;
+#define AMF_VIDEO_ENCODER_CURRENT_QUEUE                         L"MultiInstanceCurrentQueue"//  amf_uin32; default = 0; 
+
+// VCE Encoder capabilities - exposed in AMFCaps interface
+#define AMF_VIDEO_ENCODER_CAP_MAX_BITRATE                       L"MaxBitrate"               // amf_int64; Maximum bit rate in bits
+#define AMF_VIDEO_ENCODER_CAP_NUM_OF_STREAMS                    L"NumOfStreams"             // amf_int64; maximum number of encode streams supported 
+#define AMF_VIDEO_ENCODER_CAP_MAX_PROFILE                       L"MaxProfile"               // AMF_VIDEO_ENCODER_PROFILE_ENUM
+#define AMF_VIDEO_ENCODER_CAP_MAX_LEVEL                         L"MaxLevel"                 // amf_int64 maximum profile level
+#define AMF_VIDEO_ENCODER_CAP_BFRAMES                           L"BFrames"                  // bool  is B-Frames supported
+#define AMF_VIDEO_ENCODER_CAP_MIN_REFERENCE_FRAMES              L"MinReferenceFrames"       // amf_int64 minimum number of reference frames
+#define AMF_VIDEO_ENCODER_CAP_MAX_REFERENCE_FRAMES              L"MaxReferenceFrames"       // amf_int64 maximum number of reference frames
+#define AMF_VIDEO_ENCODER_CAP_MAX_TEMPORAL_LAYERS               L"MaxTemporalLayers"        // amf_int64 maximum number of temporal layers
+#define AMF_VIDEO_ENCODER_CAP_FIXED_SLICE_MODE                  L"FixedSliceMode"           // bool  is fixed slice mode supported
+#define AMF_VIDEO_ENCODER_CAP_NUM_OF_HW_INSTANCES               L"NumOfHwInstances"         // amf_int64 number of HW encoder instances
+
+//-----------------------------------------------------------------------------
+// VideoEncoderHEVC.h
+//-----------------------------------------------------------------------------
+#define AMFVideoEncoder_HEVC L"AMFVideoEncoderHW_HEVC"
+
+enum AMF_VIDEO_ENCODER_HEVC_USAGE_ENUM
+{
+    AMF_VIDEO_ENCODER_HEVC_USAGE_TRANSCONDING = 0,
+    AMF_VIDEO_ENCODER_HEVC_USAGE_ULTRA_LOW_LATENCY,
+    AMF_VIDEO_ENCODER_HEVC_USAGE_LOW_LATENCY,
+    AMF_VIDEO_ENCODER_HEVC_USAGE_WEBCAM
+};
+
+enum AMF_VIDEO_ENCODER_HEVC_PROFILE_ENUM
+{
+    AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN = 1
+};
+
+enum AMF_VIDEO_ENCODER_HEVC_TIER_ENUM
+{
+    AMF_VIDEO_ENCODER_HEVC_TIER_MAIN = 0,
+    AMF_VIDEO_ENCODER_HEVC_TIER_HIGH = 1
+};
+
+enum AMF_VIDEO_ENCODER_LEVEL_ENUM
+{
+    AMF_LEVEL_1 = 30,
+    AMF_LEVEL_2 = 60,
+    AMF_LEVEL_2_1 = 63,
+    AMF_LEVEL_3 = 90,
+    AMF_LEVEL_3_1 = 93,
+    AMF_LEVEL_4 = 120,
+    AMF_LEVEL_4_1 = 123,
+    AMF_LEVEL_5 = 150,
+    AMF_LEVEL_5_1 = 153,
+    AMF_LEVEL_5_2 = 156,
+    AMF_LEVEL_6 = 180,
+    AMF_LEVEL_6_1 = 183,
+    AMF_LEVEL_6_2 = 186
+};
+
+enum AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_ENUM
+{
+    AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_CONSTANT_QP = 0,
+    AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_LATENCY_CONSTRAINED_VBR,
+    AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_PEAK_CONSTRAINED_VBR,
+    AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_CBR
+};
+
+enum AMF_VIDEO_ENCODER_HEVC_PICTURE_TYPE_ENUM
+{
+    AMF_VIDEO_ENCODER_HEVC_PICTURE_TYPE_NONE = 0,
+    AMF_VIDEO_ENCODER_HEVC_PICTURE_TYPE_SKIP,
+    AMF_VIDEO_ENCODER_HEVC_PICTURE_TYPE_IDR,
+    AMF_VIDEO_ENCODER_HEVC_PICTURE_TYPE_I,
+    AMF_VIDEO_ENCODER_HEVC_PICTURE_TYPE_P
+};
+
+enum AMF_VIDEO_ENCODER_HEVC_OUTPUT_DATA_TYPE_ENUM
+{
+    AMF_VIDEO_ENCODER_HEVC_OUTPUT_DATA_TYPE_IDR,
+    AMF_VIDEO_ENCODER_HEVC_OUTPUT_DATA_TYPE_I,
+    AMF_VIDEO_ENCODER_HEVC_OUTPUT_DATA_TYPE_P
+};
+
+enum AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET_ENUM
+{
+    AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET_QUALITY = 0,
+    AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET_BALANCED = 5,
+    AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET_SPEED = 10
+};
+
+enum AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE_ENUM
+{
+    AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE_NONE = 0,
+    AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE_GOP_ALIGNED,
+    AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE_IDR_ALIGNED
+};
+
+
+
+// Static properties - can be set before Init()
+#define AMF_VIDEO_ENCODER_HEVC_FRAMESIZE                            L"HevcFrameSize"                // AMFSize; default = 0,0; Frame size
+
+#define AMF_VIDEO_ENCODER_HEVC_USAGE                                L"HevcUsage"                    // amf_int64(AMF_VIDEO_ENCODER_HEVC_USAGE_ENUM); default = N/A; Encoder usage type. fully configures parameter set. 
+#define AMF_VIDEO_ENCODER_HEVC_PROFILE                              L"HevcProfile"                  // amf_int64(AMF_VIDEO_ENCODER_HEVC_PROFILE_ENUM) ; default = AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN;
+#define AMF_VIDEO_ENCODER_HEVC_TIER                                 L"HevcTier"                     // amf_int64(AMF_VIDEO_ENCODER_HEVC_TIER_ENUM) ; default = AMF_VIDEO_ENCODER_HEVC_TIER_MAIN;
+#define AMF_VIDEO_ENCODER_HEVC_PROFILE_LEVEL                        L"HevcProfileLevel"             // amf_int64 (AMF_VIDEO_ENCODER_LEVEL_ENUM, default depends on HW capabilities); 
+#define AMF_VIDEO_ENCODER_HEVC_MAX_LTR_FRAMES                       L"HevcMaxOfLTRFrames"           // amf_int64; default = 0; Max number of LTR frames
+#define AMF_VIDEO_ENCODER_HEVC_MAX_NUM_REFRAMES                     L"HevcMaxNumRefFrames"          // amf_int64; default = 1; Maximum number of reference frames
+#define AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET                       L"HevcQualityPreset"            // amf_int64(AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET_ENUM); default = depends on USAGE; Quality Preset 
+#define AMF_VIDEO_ENCODER_HEVC_EXTRADATA                            L"HevcExtraData"                // AMFInterface* - > AMFBuffer*; SPS/PPS buffer - read-only
+#define AMF_VIDEO_ENCODER_HEVC_ASPECT_RATIO                         L"HevcAspectRatio"              // AMFRatio; default = 1, 1
+
+// Picture control properties
+#define AMF_VIDEO_ENCODER_HEVC_NUM_GOPS_PER_IDR                     L"HevcGOPSPerIDR"               // amf_int64; default = 60; The frequency to insert IDR as start of a GOP. 0 means no IDR will be inserted.
+#define AMF_VIDEO_ENCODER_HEVC_GOP_SIZE                             L"HevcGOPSize"                  // amf_int64; default = 60; GOP Size, in frames
+#define AMF_VIDEO_ENCODER_HEVC_DE_BLOCKING_FILTER_DISABLE           L"HevcDeBlockingFilter"         // bool; default = depends on USAGE; De-blocking Filter
+#define AMF_VIDEO_ENCODER_HEVC_SLICES_PER_FRAME                     L"HevcSlicesPerFrame"           // amf_int64; default = 1; Number of slices Per Frame 
+#define AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE                L"HevcHeaderInsertionMode"      // amf_int64(AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE_ENUM); default = NONE
+
+// Rate control properties
+#define AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD                  L"HevcRateControlMethod"        // amf_int64(AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_MODE_ENUM); default = depends on USAGE; Rate Control Method 
+#define AMF_VIDEO_ENCODER_HEVC_FRAMERATE                            L"HevcFrameRate"                // AMFRate; default = depends on usage; Frame Rate 
+#define AMF_VIDEO_ENCODER_HEVC_VBV_BUFFER_SIZE                      L"HevcVBVBufferSize"            // amf_int64; default = depends on USAGE; VBV Buffer Size in bits
+#define AMF_VIDEO_ENCODER_HEVC_INITIAL_VBV_BUFFER_FULLNESS          L"HevcInitialVBVBufferFullness" // amf_int64; default =  64; Initial VBV Buffer Fullness 0=0% 64=100%
+#define AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_PREANALYSIS_ENABLE      L"HevcRateControlPreAnalysisEnable"  // bool; default =  depends on USAGE; enable Pre-analysis assisted rate control 
+#define AMF_VIDEO_ENCODER_HEVC_ENABLE_VBAQ                          L"HevcEnableVBAQ"               // // bool; default = depends on USAGE; Enable auto VBAQ
+
+// Motion estimation
+#define AMF_VIDEO_ENCODER_HEVC_MOTION_HALF_PIXEL                    L"HevcHalfPixel"                // bool; default= true; Half Pixel 
+#define AMF_VIDEO_ENCODER_HEVC_MOTION_QUARTERPIXEL                  L"HevcQuarterPixel"             // bool; default= true; Quarter Pixel
+
+// Dynamic properties - can be set at any time
+
+// Rate control properties
+#define AMF_VIDEO_ENCODER_HEVC_ENFORCE_HRD                          L"HevcEnforceHRD"               // bool; default = depends on USAGE; Enforce HRD
+#define AMF_VIDEO_ENCODER_HEVC_FILLER_DATA_ENABLE                   L"HevcFillerDataEnable"         // bool; default = depends on USAGE; Enforce HRD
+#define AMF_VIDEO_ENCODER_HEVC_TARGET_BITRATE                       L"HevcTargetBitrate"            // amf_int64; default = depends on USAGE; Target bit rate in bits
+#define AMF_VIDEO_ENCODER_HEVC_PEAK_BITRATE                         L"HevcPeakBitrate"              // amf_int64; default = depends on USAGE; Peak bit rate in bits
+
+#define AMF_VIDEO_ENCODER_HEVC_MAX_AU_SIZE                          L"HevcMaxAUSize"                // amf_int64; default = 60; Max AU Size in bits
+
+#define AMF_VIDEO_ENCODER_HEVC_MIN_QP_I                             L"HevcMinQP_I"                  // amf_int64; default = depends on USAGE; Min QP; range = 
+#define AMF_VIDEO_ENCODER_HEVC_MAX_QP_I                             L"HevcMaxQP_I"                  // amf_int64; default = depends on USAGE; Max QP; range = 
+#define AMF_VIDEO_ENCODER_HEVC_MIN_QP_P                             L"HevcMinQP_P"                  // amf_int64; default = depends on USAGE; Min QP; range = 
+#define AMF_VIDEO_ENCODER_HEVC_MAX_QP_P                             L"HevcMaxQP_P"                  // amf_int64; default = depends on USAGE; Max QP; range = 
+
+#define AMF_VIDEO_ENCODER_HEVC_QP_I                                 L"HevcQP_I"                     // amf_int64; default = 26; P-frame QP; range = 0-51
+#define AMF_VIDEO_ENCODER_HEVC_QP_P                                 L"HevcQP_P"                     // amf_int64; default = 26; P-frame QP; range = 0-51
+
+#define AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_SKIP_FRAME_ENABLE       L"HevcRateControlSkipFrameEnable" // bool; default =  depends on USAGE; Rate Control Based Frame Skip 
+
+
+
+// Per-submittion properties - can be set on input surface interface
+#define AMF_VIDEO_ENCODER_HEVC_END_OF_SEQUENCE                      L"HevcEndOfSequence"            // bool; default = false; generate end of sequence
+#define AMF_VIDEO_ENCODER_HEVC_FORCE_PICTURE_TYPE                   L"HevcForcePictureType"         // amf_int64(AMF_VIDEO_ENCODER_HEVC_PICTURE_TYPE_ENUM); default = AMF_VIDEO_ENCODER_HEVC_PICTURE_TYPE_NONE; generate particular picture type
+#define AMF_VIDEO_ENCODER_HEVC_INSERT_AUD                           L"HevcInsertAUD"                // bool; default = false; insert AUD
+#define AMF_VIDEO_ENCODER_HEVC_INSERT_HEADER                        L"HevcInsertHeader"             // bool; default = false; insert header(SPS, PPS, VPS)
+
+#define AMF_VIDEO_ENCODER_HEVC_MARK_CURRENT_WITH_LTR_INDEX          L"HevcMarkCurrentWithLTRIndex"  // amf_int64; default = N/A; Mark current frame with LTR index
+#define AMF_VIDEO_ENCODER_HEVC_FORCE_LTR_REFERENCE_BITFIELD         L"HevcForceLTRReferenceBitfield"// amf_int64; default = 0; force LTR bit-field 
+
+// Properties set by encoder on output buffer interface
+#define AMF_VIDEO_ENCODER_HEVC_OUTPUT_DATA_TYPE                     L"HevcOutputDataType"           // amf_int64(AMF_VIDEO_ENCODER_HEVC_OUTPUT_DATA_TYPE_ENUM); default = N/A
+#define AMF_VIDEO_ENCODER_HEVC_OUTPUT_MARKED_LTR_INDEX              L"HevcMarkedLTRIndex"           // amf_int64; default = -1; Marked LTR index
+#define AMF_VIDEO_ENCODER_HEVC_OUTPUT_REFERENCED_LTR_INDEX_BITFIELD L"HevcReferencedLTRIndexBitfield"// amf_int64; default = 0; referenced LTR bit-field 
+
+// HEVC Encoder capabilities - exposed in AMFCaps interface
+#define AMF_VIDEO_ENCODER_HEVC_CAP_MAX_BITRATE                      L"HevcMaxBitrate"               // amf_int64; Maximum bit rate in bits
+#define AMF_VIDEO_ENCODER_HEVC_CAP_NUM_OF_STREAMS                   L"HevcNumOfStreams"             // amf_int64; maximum number of encode streams supported 
+#define AMF_VIDEO_ENCODER_HEVC_CAP_MAX_PROFILE                      L"HevcMaxProfile"               // amf_int64(AMF_VIDEO_ENCODER_HEVC_PROFILE_ENUM)
+#define AMF_VIDEO_ENCODER_HEVC_CAP_MAX_TIER                         L"HevcMaxTier"                  // amf_int64(AMF_VIDEO_ENCODER_HEVC_TIER_ENUM) maximum profile tier 
+#define AMF_VIDEO_ENCODER_HEVC_CAP_MAX_LEVEL                        L"HevcMaxLevel"                 // amf_int64 maximum profile level
+#define AMF_VIDEO_ENCODER_HEVC_CAP_MIN_REFERENCE_FRAMES             L"HevcMinReferenceFrames"       // amf_int64 minimum number of reference frames
+#define AMF_VIDEO_ENCODER_HEVC_CAP_MAX_REFERENCE_FRAMES             L"HevcMaxReferenceFrames"       // amf_int64 maximum number of reference frames
+
+
+//-----------------------------------------------------------------------------
+//-----------------------------------------------------------------------------
+
+#endif // __AMF_SDK_Enc_h__
diff --git a/configure b/configure
index 0e1ccaa..c785cc9 100755
--- a/configure
+++ b/configure
@@ -304,6 +304,7 @@  External library support:
 
   The following libraries provide various hardware acceleration features:
   --disable-audiotoolbox   disable Apple AudioToolbox code [autodetect]
+  --disable-amf            disable AMF video encoding code [autodetect]
   --disable-cuda           disable dynamically linked Nvidia CUDA code [autodetect]
   --enable-cuda-sdk        enable CUDA features that require the CUDA SDK [no]
   --disable-cuvid          disable Nvidia CUVID support [autodetect]
@@ -1641,6 +1642,7 @@  EXTERNAL_LIBRARY_LIST="
 "
 
 HWACCEL_AUTODETECT_LIBRARY_LIST="
+    amf
     audiotoolbox
     crystalhd
     cuda
@@ -2785,12 +2787,16 @@  scale_npp_filter_deps="cuda libnpp"
 scale_cuda_filter_deps="cuda_sdk"
 thumbnail_cuda_filter_deps="cuda_sdk"
 
+amf_deps_any="dlopen LoadLibrary"
+amf_encoder_deps="amf"
+
 nvenc_deps="cuda"
 nvenc_deps_any="libdl LoadLibrary"
 nvenc_encoder_deps="nvenc"
 
 h263_v4l2m2m_decoder_deps="v4l2_m2m h263_v4l2_m2m"
 h263_v4l2m2m_encoder_deps="v4l2_m2m h263_v4l2_m2m"
+h264_amf_encoder_deps="amf"
 h264_crystalhd_decoder_select="crystalhd h264_mp4toannexb_bsf h264_parser"
 h264_cuvid_decoder_deps="cuda cuvid"
 h264_cuvid_decoder_select="h264_mp4toannexb_bsf"
@@ -2809,6 +2815,7 @@  h264_vaapi_encoder_deps="VAEncPictureParameterBufferH264"
 h264_vaapi_encoder_select="cbs_h264 vaapi_encode"
 h264_v4l2m2m_decoder_deps="v4l2_m2m h264_v4l2_m2m"
 h264_v4l2m2m_encoder_deps="v4l2_m2m h264_v4l2_m2m"
+hevc_amf_encoder_deps="amf"
 hevc_cuvid_decoder_deps="cuda cuvid"
 hevc_cuvid_decoder_select="hevc_mp4toannexb_bsf"
 hevc_mediacodec_decoder_deps="mediacodec"
@@ -6305,6 +6312,18 @@  else
     disable cuda cuvid nvenc
 fi
 
+if enabled x86; then
+    case $target_os in
+        mingw32*|mingw64*|win32|win64|cygwin*)
+            ;;
+        *)
+            disable  amf
+            ;;
+    esac
+else
+    disable amf
+fi
+
 enabled nvenc &&
     check_cc -I$source_path <<EOF || disable nvenc
 #include "compat/nvenc/nvEncodeAPI.h"
@@ -6313,6 +6332,13 @@  void f(void) { struct { const GUID guid; } s[] = { { NV_ENC_PRESET_HQ_GUID } };
 int main(void) { return 0; }
 EOF
 
+enabled amf &&
+    check_cc -I$source_path <<EOF || disable amf
+#include "compat/amd/amfsdkenc.h"
+AMFFactory *factory;
+int main(void) { return 0; }
+EOF
+
 # Funny iconv installations are not unusual, so check it after all flags have been set
 if enabled libc_iconv; then
     check_func_headers iconv.h iconv
diff --git a/libavcodec/Makefile b/libavcodec/Makefile
index bc4d7da..cbf45ac 100644
--- a/libavcodec/Makefile
+++ b/libavcodec/Makefile
@@ -50,6 +50,7 @@  OBJS = allcodecs.o                                                      \
 # subsystems
 OBJS-$(CONFIG_AANDCTTABLES)            += aandcttab.o
 OBJS-$(CONFIG_AC3DSP)                  += ac3dsp.o ac3.o ac3tab.o
+OBJS-$(CONFIG_AMF)                     += amfenc.o
 OBJS-$(CONFIG_AUDIO_FRAME_QUEUE)       += audio_frame_queue.o
 OBJS-$(CONFIG_AUDIODSP)                += audiodsp.o
 OBJS-$(CONFIG_BLOCKDSP)                += blockdsp.o
@@ -334,6 +335,7 @@  OBJS-$(CONFIG_H264_DECODER)            += h264dec.o h264_cabac.o h264_cavlc.o \
 OBJS-$(CONFIG_H264_CUVID_DECODER)      += cuvid.o
 OBJS-$(CONFIG_H264_MEDIACODEC_DECODER) += mediacodecdec.o
 OBJS-$(CONFIG_H264_MMAL_DECODER)       += mmaldec.o
+OBJS-$(CONFIG_H264_AMF_ENCODER)        += amfenc_h264.o
 OBJS-$(CONFIG_H264_NVENC_ENCODER)      += nvenc_h264.o
 OBJS-$(CONFIG_NVENC_ENCODER)           += nvenc_h264.o
 OBJS-$(CONFIG_NVENC_H264_ENCODER)      += nvenc_h264.o
@@ -352,6 +354,7 @@  OBJS-$(CONFIG_HEVC_DECODER)            += hevcdec.o hevc_mvs.o \
                                           hevcdsp.o hevc_filter.o hevc_data.o
 OBJS-$(CONFIG_HEVC_CUVID_DECODER)      += cuvid.o
 OBJS-$(CONFIG_HEVC_MEDIACODEC_DECODER) += mediacodecdec.o
+OBJS-$(CONFIG_HEVC_AMF_ENCODER)        += amfenc_hevc.o
 OBJS-$(CONFIG_HEVC_NVENC_ENCODER)      += nvenc_hevc.o
 OBJS-$(CONFIG_NVENC_HEVC_ENCODER)      += nvenc_hevc.o
 OBJS-$(CONFIG_HEVC_QSV_DECODER)        += qsvdec_h2645.o
@@ -1056,6 +1059,7 @@  SKIPHEADERS-$(CONFIG_JNI)              += ffjni.h
 SKIPHEADERS-$(CONFIG_LIBVPX)           += libvpx.h
 SKIPHEADERS-$(CONFIG_LIBWEBP_ENCODER)  += libwebpenc_common.h
 SKIPHEADERS-$(CONFIG_MEDIACODEC)       += mediacodecdec_common.h mediacodec_surface.h mediacodec_wrapper.h mediacodec_sw_buffer.h
+SKIPHEADERS-$(CONFIG_AMF)              += amfenc.h
 SKIPHEADERS-$(CONFIG_NVENC)            += nvenc.h
 SKIPHEADERS-$(CONFIG_QSV)              += qsv.h qsv_internal.h
 SKIPHEADERS-$(CONFIG_QSVDEC)           += qsvdec.h
diff --git a/libavcodec/allcodecs.c b/libavcodec/allcodecs.c
index 8369126..3d2299f 100644
--- a/libavcodec/allcodecs.c
+++ b/libavcodec/allcodecs.c
@@ -649,6 +649,7 @@  static void register_all(void)
      * above is available */
     REGISTER_ENCODER(H263_V4L2M2M,      h263_v4l2m2m);
     REGISTER_ENCDEC (LIBOPENH264,       libopenh264);
+    REGISTER_ENCODER(H264_AMF,          h264_amf);
     REGISTER_DECODER(H264_CUVID,        h264_cuvid);
     REGISTER_ENCODER(H264_NVENC,        h264_nvenc);
     REGISTER_ENCODER(H264_OMX,          h264_omx);
@@ -661,6 +662,7 @@  static void register_all(void)
     REGISTER_ENCODER(NVENC_H264,        nvenc_h264);
     REGISTER_ENCODER(NVENC_HEVC,        nvenc_hevc);
 #endif
+    REGISTER_ENCODER(HEVC_AMF,          hevc_amf);
     REGISTER_DECODER(HEVC_CUVID,        hevc_cuvid);
     REGISTER_DECODER(HEVC_MEDIACODEC,   hevc_mediacodec);
     REGISTER_ENCODER(HEVC_NVENC,        hevc_nvenc);
diff --git a/libavcodec/amfenc.c b/libavcodec/amfenc.c
new file mode 100644
index 0000000..5fdf027
--- /dev/null
+++ b/libavcodec/amfenc.c
@@ -0,0 +1,465 @@ 
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+#include "amfenc.h"
+
+#include "libavutil/avassert.h"
+#include "libavutil/imgutils.h"
+#include "libavutil/hwcontext.h"
+#include "libavutil/hwcontext_d3d11va.h"
+#include "libavutil/mem.h"
+#include "libavutil/pixdesc.h"
+#include "libavutil/time.h"
+#include "internal.h"
+
+#include <d3d11.h>
+
+#ifdef _WIN32
+#include "compat/w32dlfcn.h"
+#else
+#include <dlfcn.h>
+#endif
+
+#define FFMPEG_AMF_WRITER_ID L"ffmpeg_amf"
+#define AMF_DEBUG_TRACE 0
+
+const enum AVPixelFormat ff_amf_pix_fmts[] = {
+    AV_PIX_FMT_NV12,
+    AV_PIX_FMT_0RGB32,
+    AV_PIX_FMT_0BGR32,
+    AV_PIX_FMT_YUV420P,
+    AV_PIX_FMT_D3D11,
+    AV_PIX_FMT_NONE
+};
+
+typedef struct FormatMap {
+    enum AVPixelFormat       av_format;
+    enum AMF_SURFACE_FORMAT  amf_format;
+} FormatMap;
+
+static const FormatMap format_map[] =
+{
+    { AV_PIX_FMT_NONE,       AMF_SURFACE_UNKNOWN },
+    { AV_PIX_FMT_NV12,       AMF_SURFACE_NV12 },
+    { AV_PIX_FMT_0BGR32,       AMF_SURFACE_BGRA },
+    { AV_PIX_FMT_0RGB32,       AMF_SURFACE_RGBA },
+    { AV_PIX_FMT_GRAY8,      AMF_SURFACE_GRAY8 },
+    { AV_PIX_FMT_YUV420P,    AMF_SURFACE_YUV420P },
+    { AV_PIX_FMT_BGR0,       AMF_SURFACE_BGRA },
+    { AV_PIX_FMT_YUV420P,    AMF_SURFACE_YV12 },
+    { AV_PIX_FMT_YUYV422,    AMF_SURFACE_YUY2 },
+    { AV_PIX_FMT_D3D11,      AMF_SURFACE_NV12 },
+};
+
+static enum AMF_SURFACE_FORMAT amf_av_to_amf_format(enum AVPixelFormat fmt)
+{
+    for (int i = 0; i < amf_countof(format_map); i++) {
+        if (format_map[i].av_format == fmt) {
+            return format_map[i].amf_format;
+        }
+    }
+    return AMF_SURFACE_UNKNOWN;
+}
+
+static void AMF_CDECL_CALL AMFTraceWriter_Write(AMFTraceWriter *pThis,
+    const wchar_t *scope, const wchar_t *message)
+{
+    AmfTraceWriter *tracer = (AmfTraceWriter*)pThis;
+    av_log(tracer->avctx, AV_LOG_DEBUG, "%ls: %ls", scope, message);
+}
+
+static void AMF_CDECL_CALL AMFTraceWriter_Flush(AMFTraceWriter *pThis)
+{
+}
+
+static AMFTraceWriterVtbl tracer_vtbl =
+{
+    .Write = AMFTraceWriter_Write,
+    .Flush = AMFTraceWriter_Flush,
+};
+
+static int amf_load_library(AVCodecContext *avctx)
+{
+    AmfContext             *ctx = avctx->priv_data;
+    AMFInit_Fn              init_fun = NULL;
+    AMFQueryVersion_Fn      version_fun = NULL;
+    AMF_RESULT              res = AMF_OK;
+
+    ctx->library = dlopen(AMF_DLL_NAMEA, RTLD_NOW | RTLD_LOCAL);
+    AMF_RETURN_IF_FALSE(ctx, ctx->library != NULL,
+        AVERROR_UNKNOWN, "DLL %s failed to open. \n", AMF_DLL_NAMEA);
+
+    init_fun = (AMFInit_Fn)dlsym(ctx->library, AMF_INIT_FUNCTION_NAME);
+    AMF_RETURN_IF_FALSE(ctx, init_fun != NULL, AVERROR_UNKNOWN, "DLL %s failed to find function %s. \n", AMF_DLL_NAMEA, AMF_INIT_FUNCTION_NAME);
+
+    version_fun = (AMFQueryVersion_Fn)dlsym(ctx->library, AMF_QUERY_VERSION_FUNCTION_NAME);
+    AMF_RETURN_IF_FALSE(ctx, version_fun != NULL, AVERROR_UNKNOWN, "DLL %s failed to find function %s. \n", AMF_DLL_NAMEA, AMF_QUERY_VERSION_FUNCTION_NAME);
+
+    res = version_fun(&ctx->version);
+    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "%s failed with error %d. \n", AMF_QUERY_VERSION_FUNCTION_NAME, res);
+    res = init_fun(AMF_FULL_VERSION, &ctx->factory);
+    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "%s failed with error %d. \n", AMF_INIT_FUNCTION_NAME, res);
+    res = ctx->factory->pVtbl->GetTrace(ctx->factory, &ctx->trace);
+    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "GetTrace() failed with error %d. \n", res);
+    res = ctx->factory->pVtbl->GetDebug(ctx->factory, &ctx->debug);
+    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "GetDebug() failed with error %d. \n", res);
+    return 0;
+}
+
+static int amf_init_context(AVCodecContext *avctx)
+{
+    AmfContext         *ctx = avctx->priv_data;
+    AMF_RESULT          res = AMF_OK;
+
+    // the return of these functions indicates old state and do not affect behaviour
+    ctx->trace->pVtbl->EnableWriter(ctx->trace, AMF_TRACE_WRITER_CONSOLE, 0);
+#if AMF_DEBUG_TRACE
+    ctx->trace->pVtbl->EnableWriter(ctx->trace, AMF_TRACE_WRITER_DEBUG_OUTPUT, 1);
+    ctx->trace->pVtbl->SetWriterLevel(ctx->trace, AMF_TRACE_WRITER_DEBUG_OUTPUT, AMF_TRACE_TRACE);
+    ctx->trace->pVtbl->SetGlobalLevel(ctx->trace, AMF_TRACE_TRACE);
+#else
+    ctx->trace->pVtbl->EnableWriter(ctx->trace, AMF_TRACE_WRITER_DEBUG_OUTPUT, 0);
+#endif
+    ctx->tracer.vtbl = &tracer_vtbl;
+    ctx->tracer.avctx = avctx;
+    ctx->trace->pVtbl->RegisterWriter(ctx->trace, FFMPEG_AMF_WRITER_ID,
+        (AMFTraceWriter*)&ctx->tracer, 1);
+
+    res = ctx->factory->pVtbl->CreateContext(ctx->factory, &ctx->context);
+    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "CreateContext() failed with error %d. \n", res);
+    // try to reuse existing DX device
+    if (avctx->hw_frames_ctx) {
+        AVHWFramesContext *device_ctx = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
+        if (device_ctx->device_ctx->type == AV_HWDEVICE_TYPE_D3D11VA){
+            if (amf_av_to_amf_format(device_ctx->sw_format) == AMF_SURFACE_UNKNOWN) {
+                if (device_ctx->device_ctx->hwctx) {
+                    AVD3D11VADeviceContext *device_d3d11 = (AVD3D11VADeviceContext *)device_ctx->device_ctx->hwctx;
+                    res = ctx->context->pVtbl->InitDX11(ctx->context, device_d3d11->device, AMF_DX11_1);
+                    if (res == AMF_OK) {
+                        ctx->hw_frames_ctx = av_buffer_ref(avctx->hw_frames_ctx);
+                    }else {
+                        av_log(avctx, AV_LOG_INFO, "amf_shared: avctx->hw_frames_ctx has non-AMD device, switching to default. \n");
+                    }
+                }
+            }else {
+                av_log(avctx, AV_LOG_INFO, "amf_shared: avctx->hw_frames_ctx has format not uspported by AMF, switching to default. \n");
+            }
+        }
+    } else if (avctx->hw_device_ctx) {
+        AVHWDeviceContext *device_ctx = (AVHWDeviceContext*)(avctx->hw_device_ctx->data);
+        if (device_ctx->type == AV_HWDEVICE_TYPE_D3D11VA) {
+            if (device_ctx->hwctx) {
+                AVD3D11VADeviceContext *device_d3d11 = (AVD3D11VADeviceContext *)device_ctx->hwctx;
+                res = ctx->context->pVtbl->InitDX11(ctx->context, device_d3d11->device, AMF_DX11_1);
+                if (res == AMF_OK) {
+                    ctx->hw_device_ctx = av_buffer_ref(avctx->hw_device_ctx);
+                } else {
+                    av_log(avctx, AV_LOG_INFO, "amf_shared: avctx->hw_device_ctx has non-AMD device, switching to default. \n");
+                }
+            }
+        }
+    }
+    if (!ctx->hw_frames_ctx) {
+        res = ctx->context->pVtbl->InitDX11(ctx->context, NULL, AMF_DX11_1);
+        if (res != AMF_OK) {
+            res = ctx->context->pVtbl->InitDX9(ctx->context, NULL);
+            AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "InitDX9() failed with error %d. \n", res);
+        }
+    }
+    return 0;
+}
+
+static int amf_init_encoder(AVCodecContext *avctx)
+{
+    AmfContext          *ctx = avctx->priv_data;
+    const wchar_t       *codec_id = NULL;
+    AMF_RESULT           res = AMF_OK;
+
+    switch (avctx->codec->id) {
+        case AV_CODEC_ID_H264:
+            codec_id = AMFVideoEncoderVCE_AVC;
+            break;
+        case AV_CODEC_ID_HEVC:
+            codec_id = AMFVideoEncoder_HEVC;
+            break;
+        default:
+            break;
+    }
+    AMF_RETURN_IF_FALSE(ctx, codec_id != 0, AVERROR(EINVAL), "Codec %d is not supported. \n", avctx->codec->id);
+
+    ctx->format = amf_av_to_amf_format(avctx->pix_fmt);
+    AMF_RETURN_IF_FALSE(ctx, ctx->format != AMF_SURFACE_UNKNOWN, AVERROR(EINVAL), "Format %d is not supported. \n", avctx->pix_fmt);
+
+    res = ctx->factory->pVtbl->CreateComponent(ctx->factory, ctx->context, codec_id, &ctx->encoder);
+    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_ENCODER_NOT_FOUND, "CreateComponent(%ls) failed with error %d. \n", codec_id, res);
+
+    ctx->eof = 0;
+    return 0;
+}
+
+static int amf_terminate(AVCodecContext *avctx)
+{
+    AmfContext      *ctx = avctx->priv_data;
+
+    if (ctx->encoder) {
+        ctx->encoder->pVtbl->Terminate(ctx->encoder);
+        ctx->encoder->pVtbl->Release(ctx->encoder);
+        ctx->encoder = NULL;
+    }
+
+    if (ctx->context) {
+        ctx->context->pVtbl->Terminate(ctx->context);
+        ctx->context->pVtbl->Release(ctx->context);
+        ctx->context = NULL;
+    }
+    if (ctx->hw_device_ctx){
+        av_buffer_unref(&ctx->hw_device_ctx);
+        ctx->hw_device_ctx = NULL;
+    }
+    av_buffer_unref(&ctx->hw_frames_ctx);
+
+    if (ctx->trace) {
+        ctx->trace->pVtbl->UnregisterWriter(ctx->trace, FFMPEG_AMF_WRITER_ID);
+    }
+    if (ctx->library) {
+        dlclose(ctx->library);
+        ctx->library = NULL;
+    }
+    ctx->trace = NULL;
+    ctx->debug = NULL;
+    ctx->factory = NULL;
+    ctx->version = 0;
+
+    return 0;
+}
+
+static int amf_copy_surface(AVCodecContext *avctx, const AVFrame *frame,
+    AMFSurface* surface)
+{
+    AVFrame        *sw_frame = NULL;
+    AMFPlane       *plane = NULL;
+    uint8_t        *dst_data[4];
+    int             dst_linesize[4];
+    int             ret = 0;
+    int             planes;
+
+    if (frame->hw_frames_ctx) {
+        if (!(sw_frame = av_frame_alloc())) {
+            av_log(avctx, AV_LOG_ERROR, "Can not alloc frame\n");
+            ret = AVERROR(ENOMEM);
+            goto fail;
+        }
+        if ((ret = av_hwframe_transfer_data(sw_frame, frame, 0)) < 0) {
+            av_log(avctx, AV_LOG_ERROR, "Error transferring the data to system memory. \n");
+            ret = AVERROR(EINVAL);
+            goto fail;
+        }
+        frame = sw_frame;
+    }
+    planes = (int)surface->pVtbl->GetPlanesCount(surface);
+    if (planes > amf_countof(dst_data)) {
+        av_log(avctx, AV_LOG_ERROR, "Invalid number of planes %d in surface \n", planes);
+        ret = AVERROR(EINVAL);
+        goto fail;
+    }
+
+    for (int i = 0; i < planes; i++) {
+        plane = surface->pVtbl->GetPlaneAt(surface, i);
+        dst_data[i] = plane->pVtbl->GetNative(plane);
+        dst_linesize[i] = plane->pVtbl->GetHPitch(plane);
+    }
+    av_image_copy(dst_data, dst_linesize,
+        (const uint8_t**)frame->data, frame->linesize, frame->format,
+        avctx->width, avctx->height);
+
+    surface->pVtbl->SetPts(surface, frame->pts);
+fail:
+    if (sw_frame){
+        av_frame_free(&sw_frame);
+    }
+    return ret;
+}
+
+static int amf_copy_buffer(AVCodecContext *avctx, AVPacket *pkt, AMFBuffer *buffer)
+{
+    int                 ret;
+    AMFVariantStruct    var;
+    int                 size = (int)buffer->pVtbl->GetSize(buffer);
+
+    if (ret = ff_alloc_packet2(avctx, pkt, size, 0) < 0) {
+        return ret;
+    }
+    memcpy(pkt->data, buffer->pVtbl->GetNative(buffer), size);
+
+    switch (avctx->codec->id) {
+        case AV_CODEC_ID_H264:
+            buffer->pVtbl->GetProperty(buffer, AMF_VIDEO_ENCODER_OUTPUT_DATA_TYPE, &var);
+            switch (var.int64Value) {
+                case AMF_VIDEO_ENCODER_OUTPUT_DATA_TYPE_IDR:
+                    pkt->flags = AV_PKT_FLAG_KEY;
+                    break;
+                case AMF_VIDEO_ENCODER_OUTPUT_DATA_TYPE_I:
+                    break;
+                case AMF_VIDEO_ENCODER_OUTPUT_DATA_TYPE_P:
+                    break;
+                case AMF_VIDEO_ENCODER_OUTPUT_DATA_TYPE_B:
+                    break;
+                default:
+                    break;
+            }
+            break;
+        case AV_CODEC_ID_HEVC:
+            buffer->pVtbl->GetProperty(buffer, AMF_VIDEO_ENCODER_HEVC_OUTPUT_DATA_TYPE, &var);
+            switch (var.int64Value) {
+                case AMF_VIDEO_ENCODER_HEVC_OUTPUT_DATA_TYPE_IDR:
+                    pkt->flags = AV_PKT_FLAG_KEY;
+                    break;
+                case AMF_VIDEO_ENCODER_HEVC_OUTPUT_DATA_TYPE_I:
+                    break;
+                case AMF_VIDEO_ENCODER_HEVC_OUTPUT_DATA_TYPE_P:
+                    break;
+                default:
+                    break;
+            }
+            break;
+        default:
+            break;
+    }
+    pkt->pts = buffer->pVtbl->GetPts(buffer);
+    pkt->dts = pkt->pts;
+    return 0;
+}
+
+// amfenc API implmentation
+int ff_amf_encode_init(AVCodecContext *avctx)
+{
+    AmfContext     *ctx = avctx->priv_data;
+    int             ret;
+
+    ctx->factory = NULL;
+    ctx->debug = NULL;
+    ctx->trace = NULL;
+    ctx->context = NULL;
+    ctx->encoder = NULL;
+    ctx->library = NULL;
+    ctx->version = 0;
+    ctx->eof = 0;
+    ctx->format = 0;
+    ctx->tracer.vtbl = NULL;
+    ctx->tracer.avctx = NULL;
+
+    if ((ret = amf_load_library(avctx)) == 0) {
+        if ((ret = amf_init_context(avctx)) == 0) {
+            if ((ret = amf_init_encoder(avctx)) == 0) {
+                return 0;
+            }
+        }
+    }
+    amf_terminate(avctx);
+    return ret;
+}
+
+int av_cold ff_amf_encode_close(AVCodecContext *avctx)
+{
+    int ret;
+    ret = amf_terminate(avctx);
+    return ret;
+}
+
+static GUID  AMFTextureArrayIndexGUID = AMFTextureArrayIndexGUIDDef;
+
+int ff_amf_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
+                        const AVFrame *frame, int *got_packet)
+{
+    int             ret = 0;
+    AMF_RESULT      res = AMF_OK;
+    AmfContext     *ctx = avctx->priv_data;
+    AMFSurface     *surface = NULL;
+    AMFData        *data = NULL;
+    amf_bool       submitted = 0;
+
+    while (!submitted) {
+        if (!frame) { // submit drain
+            if (!ctx->eof) { // submit drain onre time only
+                res = ctx->encoder->pVtbl->Drain(ctx->encoder);
+                if (res == AMF_INPUT_FULL) {
+                    av_usleep(1000); // input queue is full: wait, poll and submit Drain again
+                                     // need to get some output and try again
+                } else if (res == AMF_OK) {
+                    ctx->eof = 1; // drain started
+                    submitted = 1;
+                }
+            }
+        } else { // submit frame
+            if (surface == NULL) { // prepare surface from frame one time only
+                if (frame->hw_frames_ctx && ( // HW frame detected
+                                              // check if the same hw_frames_ctx as used in initialization
+                    (ctx->hw_frames_ctx && frame->hw_frames_ctx->data == ctx->hw_frames_ctx->data) ||
+                    // check if the same hw_device_ctx as used in initialization
+                    (ctx->hw_device_ctx && ((AVHWFramesContext*)frame->hw_frames_ctx->data)->device_ctx ==
+                    (AVHWDeviceContext*)ctx->hw_device_ctx->data)
+                )) {
+                    ID3D11Texture2D* texture = (ID3D11Texture2D*)frame->data[0]; // actual texture
+                    int index = (int)(size_t)frame->data[1]; // index is a slice in texture array is - set to tell AMF which slice to use
+                    texture->lpVtbl->SetPrivateData(texture, &AMFTextureArrayIndexGUID, sizeof(index), &index);
+
+                    res = ctx->context->pVtbl->CreateSurfaceFromDX11Native(ctx->context, texture, &surface, NULL); // wrap to AMF surface
+                    surface->pVtbl->SetCrop(surface, 0, 0, frame->width, frame->height); // decode surfaces are vertically aligned by 16 tell AMF real size
+                    surface->pVtbl->SetPts(surface, frame->pts);
+                } else {
+                    res = ctx->context->pVtbl->AllocSurface(ctx->context, AMF_MEMORY_HOST, ctx->format, avctx->width, avctx->height, &surface);
+                    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_BUG, "AllocSurface() failed  with error %d \n", res);
+                    amf_copy_surface(avctx, frame, surface);
+                }
+            }
+            // encode
+            res = ctx->encoder->pVtbl->SubmitInput(ctx->encoder, (AMFData*)surface);
+            if (res == AMF_INPUT_FULL) { // handle full queue
+                av_usleep(1000); // input queue is full: wait, poll and submit surface again
+            } else {
+                surface->pVtbl->Release(surface);
+                surface = NULL;
+                AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_UNKNOWN, "SubmitInput() failed with error %d \n", res);
+                submitted = 1;
+            }
+        }
+        // poll results
+        if (!data) {
+            res = ctx->encoder->pVtbl->QueryOutput(ctx->encoder, &data);
+            if (data) {
+                AMFBuffer* buffer;
+                AMFGuid guid = IID_AMFBuffer();
+                data->pVtbl->QueryInterface(data, &guid, (void**)&buffer); // query for buffer interface
+                ret = amf_copy_buffer(avctx, pkt, buffer);
+                if (!ret)
+                    *got_packet = 1;
+                buffer->pVtbl->Release(buffer);
+                data->pVtbl->Release(data);
+                if (ctx->eof) {
+                    submitted = 1; // we are in the drain state - no submissions
+                }
+            } else if (res == AMF_EOF) {
+                submitted = 1; // drain complete
+            } else {
+                if (!submitted) {
+                    av_usleep(1000); // wait and poll again
+                }
+            }
+        }
+    }
+    return ret;
+}
diff --git a/libavcodec/amfenc.h b/libavcodec/amfenc.h
new file mode 100644
index 0000000..ee3a03b
--- /dev/null
+++ b/libavcodec/amfenc.h
@@ -0,0 +1,129 @@ 
+/*
+* This file is part of FFmpeg.
+*
+* FFmpeg is free software; you can redistribute it and/or
+* modify it under the terms of the GNU Lesser General Public
+* License as published by the Free Software Foundation; either
+* version 2.1 of the License, or (at your option) any later version.
+*
+* FFmpeg is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+* Lesser General Public License for more details.
+*
+* You should have received a copy of the GNU Lesser General Public
+* License along with FFmpeg; if not, write to the Free Software
+* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+*/
+
+#ifndef AVCODEC_AMFENC_H
+#define AVCODEC_AMFENC_H
+
+#include "config.h"
+#include "avcodec.h"
+#include "compat/amd/amfsdkenc.h"
+
+
+/**
+* AMF trace writer callback class
+* Used to capture all AMF logging
+*/
+
+typedef struct AmfTraceWriter {
+    AMFTraceWriterVtbl* vtbl;
+    AVCodecContext      *avctx;
+} AmfTraceWriter;
+
+/**
+* AMF encoder context
+*/
+
+typedef struct AmfContext {
+    AVClass            *avclass;
+    /** access to AMF runtime */
+    amf_handle          library; ///< handle to DLL library
+    AMFFactory         *factory; ///< pointer to AMF factory
+    AMFDebug*           debug;   ///< pointer to AMF debug interface
+    AMFTrace*           trace;   ///< pointer to AMF trace interface
+
+    amf_uint64          version; ///< version of AMF runtime
+    AmfTraceWriter      tracer;  ///< AMF writer registered with AMF
+    AMFContext         *context; ///< AMF context
+    //encoder
+    AMFComponent*       encoder; ///< AMF encoder object
+    amf_bool            eof;     ///< flag indicating EOF happened
+    AMF_SURFACE_FORMAT  format;  ///< AMF surface format
+
+    AVBufferRef        *hw_device_ctx; ///< pointer to HW accelerator (decoder)
+    AVBufferRef        *hw_frames_ctx; ///< pointer to HW accelerator (frame allocator)
+
+    /** common encoder option options */
+
+    /** Static options, have to be set before Init() call */
+    int                 usage;
+    int                 profile;
+    int                 level;
+    int                 preanalysis;
+    int                 quality;
+    int                 b_frame_delta_qp;
+    int                 ref_b_frame_delta_qp;
+
+    /** Dynamic options, can be set after Init() call */
+
+    int                 rate_control_mode;
+    int                 enforce_hrd;
+    int                 filler_data;
+    int                 enable_vbaq;
+    int                 skip_frame;
+    int                 qp_i;
+    int                 qp_p;
+    int                 qp_b;
+    int                 max_au_size;
+    int                 header_spacing;
+    int                 b_frame_ref;
+    int                 intra_refresh_mb;
+    int                 coding_mode;
+    int                 me_half_pel;
+    int                 me_quater_pel;
+
+    /** HEVC - specific options */
+
+    int                 gops_per_idr;
+    int                 header_insertion_mode;
+    int                 min_qp_i;
+    int                 max_qp_i;
+    int                 min_qp_p;
+    int                 max_qp_p;
+    int                 tier;
+} AmfContext;
+
+/**
+* Common encoder initization code
+*/
+int ff_amf_encode_init(AVCodecContext *avctx);
+/**
+* Common encoder termination code
+*/
+int ff_amf_encode_close(AVCodecContext *avctx);
+
+/**
+* Ecoding one frame - common for all AMF encoders
+*/
+int ff_amf_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
+    const AVFrame *frame, int *got_packet);
+
+/**
+* Supported formats
+*/
+extern const enum AVPixelFormat ff_amf_pix_fmts[];
+
+/**
+* Error handling helper
+*/
+#define AMF_RETURN_IF_FALSE(avctx, exp, ret_value, /*optional message,*/ ...) \
+    if (!(exp)) { \
+        av_log(avctx, AV_LOG_ERROR, __VA_ARGS__); \
+        return AVERROR(ret_value); \
+    }
+
+#endif //AVCODEC_AMFENC_H
diff --git a/libavcodec/amfenc_h264.c b/libavcodec/amfenc_h264.c
new file mode 100644
index 0000000..1b22429
--- /dev/null
+++ b/libavcodec/amfenc_h264.c
@@ -0,0 +1,345 @@ 
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "amfenc.h"
+
+#include "libavutil/internal.h"
+#include "libavutil/opt.h"
+#include "internal.h"
+
+#define OFFSET(x) offsetof(AmfContext, x)
+#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
+
+static const AVOption options[] = {
+    // Static
+    /// Usage
+    { "usage",          "Encoder Usage",        OFFSET(usage),  AV_OPT_TYPE_INT,   { .i64 = AMF_VIDEO_ENCODER_USAGE_TRANSCONDING      }, AMF_VIDEO_ENCODER_USAGE_TRANSCONDING, AMF_VIDEO_ENCODER_USAGE_WEBCAM, VE, "usage" },
+    { "transcoding",    "Generic Transcoding",  0,              AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_USAGE_TRANSCONDING      }, 0, 0, VE, "usage" },
+    { "ultralowlatency","",                     0,              AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_USAGE_ULTRA_LOW_LATENCY }, 0, 0, VE, "usage" },
+    { "lowlatency",     "",                     0,              AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_USAGE_LOW_LATENCY       }, 0, 0, VE, "usage" },
+    { "webcam",         "Webcam",               0,              AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_USAGE_WEBCAM            }, 0, 0, VE, "usage" },
+
+    /// Profile,
+    { "profile",        "Profile",              OFFSET(profile),AV_OPT_TYPE_INT, { .i64 = AMF_VIDEO_ENCODER_PROFILE_MAIN       }, AMF_VIDEO_ENCODER_PROFILE_BASELINE, AMF_VIDEO_ENCODER_PROFILE_HIGH, VE, "profile" },
+    { "baseline",       "",                     0,              AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_PROFILE_BASELINE }, 0, 0, VE, "profile" },
+    { "main",           "",                     0,              AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_PROFILE_MAIN     }, 0, 0, VE, "profile" },
+    { "high",           "",                     0,              AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_PROFILE_HIGH     }, 0, 0, VE, "profile" },
+
+    /// Profile Level
+    { "level",          "Profile Level",        OFFSET(level),  AV_OPT_TYPE_INT,   { .i64 = 0  }, 0, 62, VE, "level" },
+    { "auto",           "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 0  }, 0, 0,  VE, "level" },
+    { "1.0",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 10 }, 0, 0,  VE, "level" },
+    { "1.1",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 11 }, 0, 0,  VE, "level" },
+    { "1.2",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 12 }, 0, 0,  VE, "level" },
+    { "1.3",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 13 }, 0, 0,  VE, "level" },
+    { "2.0",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 20 }, 0, 0,  VE, "level" },
+    { "2.1",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 21 }, 0, 0,  VE, "level" },
+    { "2.2",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 22 }, 0, 0,  VE, "level" },
+    { "3.0",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 30 }, 0, 0,  VE, "level" },
+    { "3.1",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 31 }, 0, 0,  VE, "level" },
+    { "3.2",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 32 }, 0, 0,  VE, "level" },
+    { "4.0",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 40 }, 0, 0,  VE, "level" },
+    { "4.1",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 41 }, 0, 0,  VE, "level" },
+    { "4.2",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 42 }, 0, 0,  VE, "level" },
+    { "5.0",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 50 }, 0, 0,  VE, "level" },
+    { "5.1",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 51 }, 0, 0,  VE, "level" },
+    { "5.2",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 52 }, 0, 0,  VE, "level" },
+    { "6.0",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 60 }, 0, 0,  VE, "level" },
+    { "6.1",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 61 }, 0, 0,  VE, "level" },
+    { "6.2",            "",                     0,              AV_OPT_TYPE_CONST, { .i64 = 62 }, 0, 0,  VE, "level" },
+
+
+    /// Quality Preset
+    { "quality",        "Quality Preference",                   OFFSET(quality),    AV_OPT_TYPE_INT,   { .i64 = AMF_VIDEO_ENCODER_QUALITY_PRESET_SPEED    }, AMF_VIDEO_ENCODER_QUALITY_PRESET_BALANCED, AMF_VIDEO_ENCODER_QUALITY_PRESET_QUALITY, VE, "quality" },
+    { "speed",          "Prefer Speed",                         0,                  AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_QUALITY_PRESET_SPEED    },       0, 0, VE, "quality" },
+    { "balanced",       "Balanced",                             0,                  AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_QUALITY_PRESET_BALANCED },    0, 0, VE, "quality" },
+    { "quality",        "Prefer Quality",                       0,                  AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_QUALITY_PRESET_QUALITY  },     0, 0, VE, "quality" },
+
+    // Dynamic
+    /// Rate Control Method
+    { "rc",             "Rate Control Method",                  OFFSET(rate_control_mode),  AV_OPT_TYPE_INT,   { .i64 = AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_PEAK_CONSTRAINED_VBR    }, AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CONSTANT_QP, AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_LATENCY_CONSTRAINED_VBR, VE, "rc" },
+    { "cqp",            "Constant Quantization Parameter",      0,                          AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CONSTANT_QP             }, 0, 0, VE, "rc" },
+    { "cbr",            "Constant Bitrate",                     0,                          AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CBR                     }, 0, 0, VE, "rc" },
+    { "vbr_peak",       "Peak Contrained Variable Bitrate",     0,                          AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_PEAK_CONSTRAINED_VBR    }, 0, 0, VE, "rc" },
+    { "vbr_latency",    "Latency Constrained Variable Bitrate", 0,                          AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_LATENCY_CONSTRAINED_VBR }, 0, 0, VE, "rc" },
+
+    /// Enforce HRD, Filler Data, VBAQ, Frame Skipping
+    { "enforce_hrd",    "Enforce HRD",                          OFFSET(enforce_hrd),        AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },
+    { "filler_data",    "Filler Data Enable",                   OFFSET(filler_data),        AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },
+    { "vbaq",           "Enable VBAQ",                          OFFSET(enable_vbaq),        AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },
+    { "frame_skipping", "Rate Control Based Frame Skip",        OFFSET(skip_frame),         AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, VE, NULL },
+
+    /// QP Values
+    { "qp_i",           "Quantization Parameter for I-Frame",   OFFSET(qp_i),               AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 51, VE },
+    { "qp_p",           "Quantization Parameter for P-Frame",   OFFSET(qp_p),               AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 51, VE },
+    { "qp_b",           "Quantization Parameter for B-Frame",   OFFSET(qp_b),               AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 51, VE },
+
+    /// Pre-Pass, Pre-Analysis, Two-Pass
+    { "preanalysis",    "Pre-Analysis Mode",                    OFFSET(preanalysis),        AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },
+
+    /// Maximum Access Unit Size
+    { "max_au_size",    "Maximum Access Unit Size for rate control (in bits)",   OFFSET(max_au_size),        AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE, NULL },
+
+    /// Header Insertion Spacing
+    { "header_spacing", "Header Insertion Spacing",             OFFSET(header_spacing),     AV_OPT_TYPE_INT, { .i64 = -1 }, -1, 1000, VE },
+
+    /// B-Frames
+    // BPicturesPattern=bf
+    { "bf_delta_qp",    "B-Picture Delta QP",                   OFFSET(b_frame_delta_qp),   AV_OPT_TYPE_INT,  { .i64 = 4 }, -10, 10, VE, NULL },
+    { "bf_ref",         "Enable Reference to B-Frames",         OFFSET(b_frame_ref),        AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, VE, NULL },
+    { "bf_ref_delta_qp","Reference B-Picture Delta QP",         OFFSET(ref_b_frame_delta_qp), AV_OPT_TYPE_INT,  { .i64 = 4 }, -10, 10, VE, NULL },
+
+    /// Intra-Refresh
+    { "intra_refresh_mb","Intra Refresh MBs Number Per Slot in Macroblocks",       OFFSET(intra_refresh_mb),    AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, VE },
+
+    /// coder
+    { "coder",          "Coding Type",                          OFFSET(coding_mode),   AV_OPT_TYPE_INT,   { .i64 = AMF_VIDEO_ENCODER_UNDEFINED }, AMF_VIDEO_ENCODER_UNDEFINED, AMF_VIDEO_ENCODER_CALV, VE, "coding" },
+    { "auto",           "Automatic",                            0,                     AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_UNDEFINED }, 0, 0, VE, "coder" },
+    { "cavlc",          "Context Adaptive Variable-Length Coding", 0,                  AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_CALV },      0, 0, VE, "coder" },
+    { "cabac",          "Context Adaptive Binary Arithmetic Coding", 0,                AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_CABAC },     0, 0, VE, "coder" },
+
+    { "me_half_pel",    "Enable ME Half Pixel",                 OFFSET(me_half_pel),   AV_OPT_TYPE_BOOL,  { .i64 = 1 }, 0, 1, VE, NULL },
+    { "me_quater_pel",  "Enable ME Quarter Pixel ",             OFFSET(me_quater_pel), AV_OPT_TYPE_BOOL,  { .i64 = 1 }, 0, 1, VE, NULL },
+
+    { NULL }
+};
+
+static av_cold int amf_encode_init_h264(AVCodecContext *avctx)
+{
+    int                 ret = 0;
+    AMF_RESULT          res = AMF_OK;
+    AmfContext         *ctx = avctx->priv_data;
+    AMFVariantStruct    var = {0};
+    amf_int64           profile = 0;
+    amf_int64           profile_level = 0;
+    AMFBuffer          *buffer;
+    AMFGuid             guid;
+
+    AMFSize             framesize = AMFConstructSize(avctx->width, avctx->height);
+    AMFRate             framerate = AMFConstructRate(avctx->time_base.den, avctx->time_base.num * avctx->ticks_per_frame);
+
+    int                 deblocking_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
+
+    if ((ret = ff_amf_encode_init(avctx)) != 0)
+        return ret;
+
+    // Static parameters
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_USAGE, ctx->usage);
+
+    AMF_ASSIGN_PROPERTY_SIZE(res, ctx->encoder, AMF_VIDEO_ENCODER_FRAMESIZE, framesize);
+
+    AMF_ASSIGN_PROPERTY_RATE(res, ctx->encoder, AMF_VIDEO_ENCODER_FRAMERATE, framerate);
+
+    profile = avctx->profile;
+    if (profile == 0) {
+        profile = ctx->profile;
+    }
+
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_PROFILE, profile);
+
+    profile_level = avctx->level;
+    if (profile_level == 0) {
+        profile_level = ctx->level;
+    }
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_PROFILE_LEVEL, profile_level);
+
+    // Maximum Reference Frames
+    if (avctx->refs != -1) {
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_MAX_NUM_REFRAMES, avctx->refs);
+    }
+    if (avctx->sample_aspect_ratio.den && avctx->sample_aspect_ratio.num) {
+        AMFRatio ratio = AMFConstructRatio(avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
+        AMF_ASSIGN_PROPERTY_RATIO(res, ctx->encoder, AMF_VIDEO_ENCODER_ASPECT_RATIO, ratio);
+    }
+
+    /// Color Range (Partial/TV/MPEG or Full/PC/JPEG)
+    if (avctx->color_range == AVCOL_RANGE_JPEG) {
+        AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_FULL_RANGE_COLOR, 1);
+    }
+
+    if (ctx->rate_control_mode == AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CONSTANT_QP) {
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_RATE_CONTROL_PREANALYSIS_ENABLE, AMF_VIDEO_ENCODER_PREENCODE_DISABLED);
+        if (ctx->preanalysis)
+            av_log(ctx, AV_LOG_WARNING, "Pre-Analysis is not supported by cqp Rate Control Method, automatically disabled. \n");
+    } else {
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_RATE_CONTROL_PREANALYSIS_ENABLE, ctx->preanalysis);
+    }
+
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_QUALITY_PRESET, ctx->quality);
+
+    // Initialize Encoder
+    res = ctx->encoder->pVtbl->Init(ctx->encoder, ctx->format, avctx->width, avctx->height);
+    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_BUG, "encoder->Init() failed with error %d \n", res);
+
+    // Dynamic parmaters
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD, ctx->rate_control_mode);
+
+    /// VBV Buffer
+    if (avctx->rc_buffer_size != 0)
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_VBV_BUFFER_SIZE, avctx->rc_buffer_size);
+    if (avctx->rc_initial_buffer_occupancy != 0) {
+        int amf_buffer_fullness = avctx->rc_buffer_size * 64 / avctx->rc_initial_buffer_occupancy;
+        if (amf_buffer_fullness > 64)
+            amf_buffer_fullness = 64;
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_INITIAL_VBV_BUFFER_FULLNESS, amf_buffer_fullness);
+    }
+    /// Maximum Access Unit Size
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_MAX_AU_SIZE, ctx->max_au_size);
+
+    // QP Minimum / Maximum
+    if (ctx->rate_control_mode == AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CONSTANT_QP) {
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_MIN_QP, 0);
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_MAX_QP, 51);
+    } else {
+        if (avctx->qmin != -1) {
+            int qval = avctx->qmin > 51 ? 51 : avctx->qmin;
+            AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_MIN_QP, qval);
+        }
+        if (avctx->qmax != -1) {
+            int qval = avctx->qmax > 51 ? 51 : avctx->qmax;
+            AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_MAX_QP, qval);
+        }
+    }
+    // QP Values
+    if (ctx->qp_i != -1)
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_QP_I, ctx->qp_i);
+    if (ctx->qp_p != -1)
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_QP_P, ctx->qp_p);
+    if (ctx->qp_b != -1)
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_QP_B, ctx->qp_b);
+
+    // Bitrate
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_TARGET_BITRATE, avctx->bit_rate);
+
+    // Peak (max) bitrate. If not set make it out of bit_rate for best results.
+    if (ctx->rate_control_mode == AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CBR) {
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_PEAK_BITRATE, avctx->bit_rate);
+    } else {
+        int rc_max_rate = avctx->rc_max_rate >= avctx->bit_rate ? avctx->rc_max_rate : avctx->bit_rate * 13 / 10;
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_PEAK_BITRATE, rc_max_rate);
+    }
+    // Enforce HRD, Filler Data, VBAQ, Frame Skipping, Deblocking Filter
+    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_ENFORCE_HRD, !!ctx->enforce_hrd);
+    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_FILLER_DATA_ENABLE, !!ctx->filler_data);
+    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_RATE_CONTROL_SKIP_FRAME_ENABLE, !!ctx->skip_frame);
+    if (ctx->rate_control_mode == AMF_VIDEO_ENCODER_RATE_CONTROL_METHOD_CONSTANT_QP) {
+        AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_ENABLE_VBAQ, 0);
+        if (ctx->enable_vbaq)
+            av_log(ctx, AV_LOG_WARNING, "VBAQ is not supported by cqp Rate Control Method, automatically disabled. \n");
+    } else {
+        AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_ENABLE_VBAQ, !!ctx->enable_vbaq);
+    }
+    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_DE_BLOCKING_FILTER, !!deblocking_filter);
+
+    // B-Frames
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_B_PIC_PATTERN, avctx->max_b_frames);
+    if (avctx->max_b_frames && res == AMF_OK) {
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_B_PIC_DELTA_QP, ctx->b_frame_delta_qp);
+        AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_B_REFERENCE_ENABLE, !!ctx->b_frame_ref);
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_REF_B_PIC_DELTA_QP, ctx->ref_b_frame_delta_qp);
+    }
+
+    // Keyframe Interval
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_IDR_PERIOD, avctx->gop_size);
+
+    // Header Insertion Spacing
+    if (ctx->header_spacing >= 0)
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEADER_INSERTION_SPACING, ctx->header_spacing);
+
+    // Intra-Refresh, Slicing
+    if (ctx->intra_refresh_mb > 0)
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_INTRA_REFRESH_NUM_MBS_PER_SLOT, ctx->intra_refresh_mb);
+    if (avctx->slices > 1)
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_SLICES_PER_FRAME, avctx->slices);
+
+    // Coding
+    if (ctx->coding_mode != 0)
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_CABAC_ENABLE, ctx->coding_mode);
+
+    // Motion Estimation
+    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_MOTION_HALF_PIXEL, !!ctx->me_half_pel);
+    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_MOTION_QUARTERPIXEL, !!ctx->me_quater_pel);
+
+    // fill extradata
+    res = AMFVariantInit(&var);
+    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_BUG, "AMFVariantInit() failed with error %d \n", res);
+
+    res = ctx->encoder->pVtbl->GetProperty(ctx->encoder, AMF_VIDEO_ENCODER_EXTRADATA, &var);
+    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_BUG, "GetProperty(AMF_VIDEO_ENCODER_EXTRADATA) failed with error %d \n", res);
+    AMF_RETURN_IF_FALSE(ctx, var.pInterface != NULL, AVERROR_BUG, "GetProperty(AMF_VIDEO_ENCODER_EXTRADATA) returned NULL \n");
+
+    guid = IID_AMFBuffer();
+
+    res = var.pInterface->pVtbl->QueryInterface(var.pInterface, &guid, (void**)&buffer); // query for buffer interface
+    if (res != AMF_OK) {
+        var.pInterface->pVtbl->Release(var.pInterface);
+    }
+    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_BUG, "QueryInterface(IID_AMFBuffer) failed with error %d \n", res);
+
+    avctx->extradata_size = (int)buffer->pVtbl->GetSize(buffer);
+    avctx->extradata = av_mallocz(avctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
+    if (!avctx->extradata) {
+        buffer->pVtbl->Release(buffer);
+        var.pInterface->pVtbl->Release(var.pInterface);
+        return AVERROR(ENOMEM);
+    }
+    memcpy(avctx->extradata, buffer->pVtbl->GetNative(buffer), avctx->extradata_size);
+
+    buffer->pVtbl->Release(buffer);
+    var.pInterface->pVtbl->Release(var.pInterface);
+    
+    return 0;
+}
+
+static const AVCodecDefault defaults[] = {
+    { "refs",       "-1" },
+    { "aspect",     "0" },
+    { "sar",        "0" },
+    { "qmin",       "-1" },
+    { "qmax",       "-1" },
+    { "b",          "2M" },
+    { "maxrate",    "3M" },
+    { "g",          "250" },
+    { "keyint_min", "0" },
+    { "bf",         "0" },
+    { "slices",     "1" },
+    { NULL },
+};
+
+static const AVClass h264_amf_class = {
+    .class_name = "h264_amf",
+    .item_name = av_default_item_name,
+    .option = options,
+    .version = LIBAVUTIL_VERSION_INT,
+};
+//TODO declare as HW encoder when available
+AVCodec ff_h264_amf_encoder = {
+    .name = "h264_amf",
+    .long_name = NULL_IF_CONFIG_SMALL("AMD AMF H.264 Encoder"),
+    .type = AVMEDIA_TYPE_VIDEO,
+    .id = AV_CODEC_ID_H264,
+    .init = amf_encode_init_h264,
+    .encode2 = ff_amf_encode_frame,
+    .close = ff_amf_encode_close,
+    .priv_data_size = sizeof(AmfContext),
+    .priv_class = &h264_amf_class,
+    .defaults = defaults,
+    .capabilities = AV_CODEC_CAP_DELAY,
+    .caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
+    .pix_fmts = ff_amf_pix_fmts,
+};
diff --git a/libavcodec/amfenc_hevc.c b/libavcodec/amfenc_hevc.c
new file mode 100644
index 0000000..41bc5c8
--- /dev/null
+++ b/libavcodec/amfenc_hevc.c
@@ -0,0 +1,289 @@ 
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "amfenc.h"
+
+#include "libavutil/internal.h"
+#include "libavutil/opt.h"
+#include "internal.h"
+
+#define OFFSET(x) offsetof(AmfContext, x)
+#define VE AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_ENCODING_PARAM
+static const AVOption options[] = {
+    { "usage",          "Set the encoding usage",             OFFSET(usage),          AV_OPT_TYPE_INT,   { .i64 = AMF_VIDEO_ENCODER_HEVC_USAGE_TRANSCONDING }, AMF_VIDEO_ENCODER_HEVC_USAGE_TRANSCONDING, AMF_VIDEO_ENCODER_HEVC_USAGE_WEBCAM, VE, "usage" },
+    { "transcoding",    "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_USAGE_TRANSCONDING },         0, 0, VE, "usage" },
+    { "ultralowlatency","", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_USAGE_ULTRA_LOW_LATENCY },    0, 0, VE, "usage" },
+    { "lowlatency",     "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_USAGE_LOW_LATENCY },          0, 0, VE, "usage" },
+    { "webcam",         "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_USAGE_WEBCAM },               0, 0, VE, "usage" },
+
+    { "profile",        "Set the profile (default main)",           OFFSET(profile),   AV_OPT_TYPE_INT,{ .i64 = AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN }, AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN, AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN, VE, "profile" },
+    { "main",           "", 0,                      AV_OPT_TYPE_CONST,{ .i64 = AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN }, 0, 0, VE, "profile" },
+
+    { "profile_tier",   "Set the profile tier (default main)",      OFFSET(tier), AV_OPT_TYPE_INT,{ .i64 = AMF_VIDEO_ENCODER_HEVC_TIER_MAIN }, AMF_VIDEO_ENCODER_HEVC_TIER_MAIN, AMF_VIDEO_ENCODER_HEVC_TIER_HIGH, VE, "tier" },
+    { "main",           "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_TIER_MAIN }, 0, 0, VE, "tier" },
+    { "high",           "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_TIER_HIGH }, 0, 0, VE, "tier" },
+
+    { "level",          "Set the encoding level (default auto)",    OFFSET(level), AV_OPT_TYPE_INT,{ .i64 = 0 }, 0, AMF_LEVEL_6_2, VE, "level" },
+    { "auto",           "", 0, AV_OPT_TYPE_CONST, { .i64 = 0             }, 0, 0, VE, "level" },
+    { "1.0",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_1   }, 0, 0, VE, "level" },
+    { "2.0",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_2   }, 0, 0, VE, "level" },
+    { "2.1",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_2_1 }, 0, 0, VE, "level" },
+    { "3.0",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_3   }, 0, 0, VE, "level" },
+    { "3.1",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_3_1 }, 0, 0, VE, "level" },
+    { "4.0",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_4   }, 0, 0, VE, "level" },
+    { "4.1",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_4_1 }, 0, 0, VE, "level" },
+    { "5.0",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_5   }, 0, 0, VE, "level" },
+    { "5.1",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_5_1 }, 0, 0, VE, "level" },
+    { "5.2",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_5_2 }, 0, 0, VE, "level" },
+    { "6.0",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_6   }, 0, 0, VE, "level" },
+    { "6.1",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_6_1 }, 0, 0, VE, "level" },
+    { "6.2",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_LEVEL_6_2 }, 0, 0, VE, "level" },
+
+    { "quality",        "Set the encoding quality",                 OFFSET(quality),      AV_OPT_TYPE_INT,   { .i64 = AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET_SPEED }, AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET_QUALITY, AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET_SPEED, VE, "quality" },
+    { "balanced",       "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET_BALANCED }, 0, 0, VE, "quality" },
+    { "speed",          "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET_SPEED    }, 0, 0, VE, "quality" },
+    { "quality",        "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET_QUALITY  }, 0, 0, VE, "quality" },
+
+    { "rc",             "Set the rate control mode",                OFFSET(rate_control_mode),   AV_OPT_TYPE_INT,   { .i64 = AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_PEAK_CONSTRAINED_VBR }, AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_CONSTANT_QP, AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_CBR, VE, "rc" },
+    { "cqp",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_CONSTANT_QP             }, 0, 0, VE, "rc" },
+    { "cbr",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_CBR                     }, 0, 0, VE, "rc" },
+    { "vbr_peak",       "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_PEAK_CONSTRAINED_VBR    }, 0, 0, VE, "rc" },
+    { "vbr_latency",    "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_LATENCY_CONSTRAINED_VBR }, 0, 0, VE, "rc" },
+
+    { "header_insertion_mode",        "Set header insertion mode",  OFFSET(header_insertion_mode),      AV_OPT_TYPE_INT,{ .i64 = AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE_NONE }, AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE_NONE, AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE_IDR_ALIGNED, VE, "hdrmode" },
+    { "none",           "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE_NONE        }, 0, 0, VE, "hdrmode" },
+    { "gop",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE_GOP_ALIGNED }, 0, 0, VE, "hdrmode" },
+    { "idr",            "", 0, AV_OPT_TYPE_CONST, { .i64 = AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE_IDR_ALIGNED }, 0, 0, VE, "hdrmode" },
+
+    { "gops_per_idr",    "GOPs per IDR 0-no IDR will be inserted",  OFFSET(gops_per_idr),  AV_OPT_TYPE_INT,{ .i64 = 60 }, 0, INT_MAX, VE },
+    { "preanalysis",    "Enable preanalysis",                       OFFSET(preanalysis),   AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },
+    { "vbaq",           "Enable VBAQ",                              OFFSET(enable_vbaq),   AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },
+    { "enforce_hrd",    "Enforce HRD",                              OFFSET(enforce_hrd),   AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },
+    { "filler_data",    "Filler Data Enable",                       OFFSET(filler_data),   AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },
+    { "max_au_size",    "Max AU Size in bits",                      OFFSET(max_au_size),   AV_OPT_TYPE_INT,{ .i64 = 0 }, 0, INT_MAX, VE, NULL },
+    { "min_qp_i",       "min quantization parameter for I-frame",   OFFSET(min_qp_i),      AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },
+    { "max_qp_i",       "max quantization parameter for I-frame",   OFFSET(max_qp_i),      AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },
+    { "min_qp_p",       "min quantization parameter for P-frame",   OFFSET(min_qp_p),      AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },
+    { "max_qp_p",       "max quantization parameter for P-frame",   OFFSET(max_qp_p),      AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },
+    { "qp_p",           "quantization parameter for P-frame",       OFFSET(qp_p),          AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },
+    { "qp_i",           "quantization parameter for I-frame",       OFFSET(qp_i),          AV_OPT_TYPE_INT,{ .i64 = -1 }, -1, 51, VE },
+    { "skip_frame",     "Rate Control Based Frame Skip",            OFFSET(skip_frame),    AV_OPT_TYPE_BOOL,{ .i64 = 0 }, 0, 1, VE, NULL },
+    { "me_half_pel",    "Enable ME Half Pixel",                     OFFSET(me_half_pel),   AV_OPT_TYPE_BOOL,{ .i64 = 1 }, 0, 1, VE, NULL },
+    { "me_quater_pel",  "Enable ME Quarter Pixel ",                 OFFSET(me_quater_pel), AV_OPT_TYPE_BOOL,{ .i64 = 1 }, 0, 1, VE, NULL },
+
+    { NULL }
+};
+
+static av_cold int amf_encode_init_hevc(AVCodecContext *avctx)
+{
+    int                 ret = 0;
+    AMF_RESULT          res = AMF_OK;
+    AmfContext         *ctx = avctx->priv_data;
+    AMFVariantStruct    var = {0};
+    amf_int64           profile = 0;
+    amf_int64           profile_level = 0;
+    AMFBuffer          *buffer;
+    AMFGuid             guid;
+
+    AMFSize             framesize = AMFConstructSize(avctx->width, avctx->height);
+    AMFRate             framerate = AMFConstructRate(avctx->time_base.den, avctx->time_base.num * avctx->ticks_per_frame);
+
+    int                 deblocking_filter = (avctx->flags & AV_CODEC_FLAG_LOOP_FILTER) ? 1 : 0;
+
+    if ((ret = ff_amf_encode_init(avctx)) < 0)
+        return ret;
+
+    // init static parameters
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_USAGE, ctx->usage);
+
+    AMF_ASSIGN_PROPERTY_SIZE(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_FRAMESIZE, framesize);
+
+    AMF_ASSIGN_PROPERTY_RATE(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_FRAMERATE, framerate);
+
+    switch (avctx->profile) {
+    case FF_PROFILE_HEVC_MAIN:
+        profile = AMF_VIDEO_ENCODER_HEVC_PROFILE_MAIN;
+        break;
+    default:
+        break;
+    }
+    if (profile == 0) {
+        profile = ctx->profile;
+    }
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_PROFILE, profile);
+
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_TIER, ctx->tier);
+
+    profile_level = avctx->level;
+    if (profile_level == 0) {
+        profile_level = ctx->level;
+    }
+    if (profile_level != 0) {
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_PROFILE_LEVEL, profile_level);
+    }
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_QUALITY_PRESET, ctx->quality);
+    // Maximum Reference Frames
+    if (avctx->refs != 0) {
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_MAX_NUM_REFRAMES, avctx->refs);
+    }
+    // Aspect Ratio
+    if (avctx->sample_aspect_ratio.den && avctx->sample_aspect_ratio.num) {
+        AMFRatio ratio = AMFConstructRatio(avctx->sample_aspect_ratio.num, avctx->sample_aspect_ratio.den);
+        AMF_ASSIGN_PROPERTY_RATIO(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_ASPECT_RATIO, ratio);
+    }
+
+    // Picture control properties
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_NUM_GOPS_PER_IDR, ctx->gops_per_idr);
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_GOP_SIZE, avctx->gop_size);
+    if (avctx->slices > 1) {
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_SLICES_PER_FRAME, avctx->slices);
+    }
+    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_DE_BLOCKING_FILTER_DISABLE, deblocking_filter);
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_HEADER_INSERTION_MODE, ctx->header_insertion_mode);
+
+    // Rate control
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD, ctx->rate_control_mode);
+    if (avctx->rc_buffer_size)
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_VBV_BUFFER_SIZE, avctx->rc_buffer_size);
+
+    if (avctx->rc_initial_buffer_occupancy != 0) {
+        int amf_buffer_fullness = avctx->rc_buffer_size * 64 / avctx->rc_initial_buffer_occupancy;
+        if (amf_buffer_fullness > 64)
+            amf_buffer_fullness = 64;
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_INITIAL_VBV_BUFFER_FULLNESS, amf_buffer_fullness);
+    }
+    // Pre-Pass, Pre-Analysis, Two-Pass
+    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_PREANALYSIS_ENABLE, ctx->preanalysis);
+
+    if (ctx->rate_control_mode == AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_CONSTANT_QP) {
+        AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_ENABLE_VBAQ, false);
+        if (ctx->enable_vbaq)
+            av_log(ctx, AV_LOG_WARNING, "VBAQ is not supported by cqp Rate Control Method, automatically disabled. \n");
+    } else {
+        AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_ENABLE_VBAQ, !!ctx->enable_vbaq);
+    }
+    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_MOTION_HALF_PIXEL, ctx->me_half_pel);
+    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_MOTION_QUARTERPIXEL, ctx->me_quater_pel);
+
+    // init encoder
+    res = ctx->encoder->pVtbl->Init(ctx->encoder, ctx->format, avctx->width, avctx->height);
+    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_BUG, "encoder->Init() failed with error %d \n", res);
+
+    // init dynamic rate control params
+    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_ENFORCE_HRD, ctx->enforce_hrd);
+    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_FILLER_DATA_ENABLE, ctx->filler_data);
+
+    // Bitrate
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_TARGET_BITRATE, avctx->bit_rate);
+
+    // peak (max) bitrate. If not set make it out of bit_rate for best results.
+    if (ctx->rate_control_mode == AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_METHOD_CBR) {
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_PEAK_BITRATE, avctx->bit_rate);
+    } else {
+        int rc_max_rate = avctx->rc_max_rate >= avctx->bit_rate ? avctx->rc_max_rate : avctx->bit_rate * 13 / 10;
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_PEAK_BITRATE, rc_max_rate);
+    }
+
+    // init dynamic picture control params
+    AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_MAX_AU_SIZE, ctx->max_au_size);
+
+
+    if (ctx->min_qp_i != -1) {
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_MIN_QP_I, ctx->min_qp_i);
+    }
+    if (ctx->max_qp_i != -1) {
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_MAX_QP_I, ctx->max_qp_i);
+    }
+    if (ctx->min_qp_p != -1) {
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_MIN_QP_P, ctx->min_qp_p);
+    }
+    if (ctx->max_qp_p != -1) {
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_MAX_QP_P, ctx->max_qp_p);
+    }
+
+    if (ctx->qp_p != -1) {
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_QP_I, ctx->qp_p);
+    }
+    if (ctx->qp_i != -1) {
+        AMF_ASSIGN_PROPERTY_INT64(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_QP_P, ctx->qp_i);
+    }
+    AMF_ASSIGN_PROPERTY_BOOL(res, ctx->encoder, AMF_VIDEO_ENCODER_HEVC_RATE_CONTROL_SKIP_FRAME_ENABLE, ctx->skip_frame);
+
+
+    // fill extradata
+    res = AMFVariantInit(&var);
+    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_BUG, "AMFVariantInit() failed with error %d \n", res);
+
+    res = ctx->encoder->pVtbl->GetProperty(ctx->encoder, AMF_VIDEO_ENCODER_HEVC_EXTRADATA, &var);
+    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_BUG, "GetProperty(AMF_VIDEO_ENCODER_EXTRADATA) failed with error %d \n", res);
+    AMF_RETURN_IF_FALSE(ctx, var.pInterface != NULL, AVERROR_BUG, "GetProperty(AMF_VIDEO_ENCODER_EXTRADATA) returned NULL \n");
+
+    guid = IID_AMFBuffer();
+
+    res = var.pInterface->pVtbl->QueryInterface(var.pInterface, &guid, (void**)&buffer); // query for buffer interface
+    if (res != AMF_OK) {
+        var.pInterface->pVtbl->Release(var.pInterface);
+    }
+    AMF_RETURN_IF_FALSE(ctx, res == AMF_OK, AVERROR_BUG, "QueryInterface(IID_AMFBuffer) failed with error %d \n", res);
+
+    avctx->extradata_size = (int)buffer->pVtbl->GetSize(buffer);
+    avctx->extradata = av_mallocz(avctx->extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
+    if (!avctx->extradata) {
+        buffer->pVtbl->Release(buffer);
+        var.pInterface->pVtbl->Release(var.pInterface);
+        return AVERROR(ENOMEM);
+    }
+    memcpy(avctx->extradata, buffer->pVtbl->GetNative(buffer), avctx->extradata_size);
+
+    buffer->pVtbl->Release(buffer);
+    var.pInterface->pVtbl->Release(var.pInterface);
+
+    return 0;
+}
+static const AVCodecDefault defaults[] = {
+    { "b",       "2M" },
+    { "maxrate", "3M" },
+    { "qmin",   "-1" },
+    { "qmax",   "-1" },
+    { "qdiff",  "-1" },
+    { "qblur",  "-1" },
+    { "qcomp",  "-1" },
+    { NULL },
+};
+static const AVClass hevc_amf_class = {
+    .class_name = "hevc_amf",
+    .item_name = av_default_item_name,
+    .option = options,
+    .version = LIBAVUTIL_VERSION_INT,
+};
+//TODO declare as HW encoder when available
+AVCodec ff_hevc_amf_encoder = {
+    .name           = "hevc_amf",
+    .long_name      = NULL_IF_CONFIG_SMALL("AMD AMF HEVC encoder"),
+    .type           = AVMEDIA_TYPE_VIDEO,
+    .id             = AV_CODEC_ID_HEVC,
+    .init           = amf_encode_init_hevc,
+    .encode2        = ff_amf_encode_frame,
+    .close          = ff_amf_encode_close,
+    .priv_data_size = sizeof(AmfContext),
+    .priv_class     = &hevc_amf_class,
+    .defaults       = defaults,
+    .capabilities   = AV_CODEC_CAP_DELAY,
+    .caps_internal  = FF_CODEC_CAP_INIT_CLEANUP,
+    .pix_fmts       = ff_amf_pix_fmts,
+};
diff --git a/libavcodec/version.h b/libavcodec/version.h
index 226da19..6c0d7a8 100644
--- a/libavcodec/version.h
+++ b/libavcodec/version.h
@@ -28,8 +28,8 @@ 
 #include "libavutil/version.h"
 
 #define LIBAVCODEC_VERSION_MAJOR  58
-#define LIBAVCODEC_VERSION_MINOR   0
-#define LIBAVCODEC_VERSION_MICRO 101
+#define LIBAVCODEC_VERSION_MINOR   1
+#define LIBAVCODEC_VERSION_MICRO 100
 
 #define LIBAVCODEC_VERSION_INT  AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
                                                LIBAVCODEC_VERSION_MINOR, \