diff mbox

[FFmpeg-devel,V2] examples/vaapi_dec: Add a VA-API hwaccel decoding example

Message ID 09386466-2472-4f2d-27c6-362b61543828@gmail.com
State New
Headers show

Commit Message

Jun Zhao July 11, 2017, 4:06 a.m. UTC
V2: re-work with new hw decoding API.
From 0e4d230ae4c98949a962c6bbdad31d216b54bb6a Mon Sep 17 00:00:00 2001
From: Jun Zhao <jun.zhao@intel.com>
Date: Tue, 21 Mar 2017 11:04:41 +0800
Subject: [V2] examples/vaapi_dec: Add a VA-API hwaccel decoding example.

Add a VA-API hwaccel decoding example.

Signed-off-by: Liu, Kaixuan <kaixuan.liu@intel.com>
Signed-off-by: Jun Zhao <jun.zhao@intel.com>
---
 doc/examples/vaapi_dec.c | 266 +++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 266 insertions(+)
 create mode 100644 doc/examples/vaapi_dec.c

Comments

wm4 July 11, 2017, 7:58 a.m. UTC | #1
On Tue, 11 Jul 2017 12:06:10 +0800
Jun Zhao <mypopydev@gmail.com> wrote:

> From 0e4d230ae4c98949a962c6bbdad31d216b54bb6a Mon Sep 17 00:00:00 2001
> From: Jun Zhao <jun.zhao@intel.com>
> Date: Tue, 21 Mar 2017 11:04:41 +0800
> Subject: [V2] examples/vaapi_dec: Add a VA-API hwaccel decoding example.
> 
> Add a VA-API hwaccel decoding example.
> 
> Signed-off-by: Liu, Kaixuan <kaixuan.liu@intel.com>
> Signed-off-by: Jun Zhao <jun.zhao@intel.com>
> ---
>  doc/examples/vaapi_dec.c | 266 +++++++++++++++++++++++++++++++++++++++++++++++
>  1 file changed, 266 insertions(+)
>  create mode 100644 doc/examples/vaapi_dec.c
> 
> diff --git a/doc/examples/vaapi_dec.c b/doc/examples/vaapi_dec.c
> new file mode 100644
> index 0000000000..01320a3b71
> --- /dev/null
> +++ b/doc/examples/vaapi_dec.c
> @@ -0,0 +1,266 @@
> +/*
> + * Video Acceleration API (video decoding) decode sample
> + *
> + * This file is part of FFmpeg.
> + *
> + * FFmpeg is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU Lesser General Public
> + * License as published by the Free Software Foundation; either
> + * version 2.1 of the License, or (at your option) any later version.
> + *
> + * FFmpeg is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> + * Lesser General Public License for more details.
> + *
> + * You should have received a copy of the GNU Lesser General Public
> + * License along with FFmpeg; if not, write to the Free Software
> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
> + */
> +
> +/**
> + * @file
> + * Intel VAAPI-accelerated decoding example.
> + *
> + * @example vaapi_dec.c
> + * This example shows how to do VAAPI-accelerated decoding with output
> + * frames from the VAAPI video surfaces.
> + */
> +
> +#include <stdio.h>
> +#include <libavcodec/avcodec.h>
> +#include <libavcodec/vaapi.h>
> +#include <libavformat/avformat.h>
> +#include <libavutil/pixdesc.h>
> +#include <libavutil/hwcontext.h>
> +#include <libavutil/opt.h>
> +#include <libavutil/hwcontext_vaapi.h>
> +#include <libavutil/avassert.h>
> +
> +static AVBufferRef *hw_device_ctx = NULL;
> +FILE *output_file = NULL;
> +
> +int decoder_init_vaapi(AVCodecContext *ctx)
> +{
> +    int err = 0;
> +    const char *dev_name = "/dev/dri/renderD128";
> +
> +    if ((err = av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_VAAPI,
> +                                      dev_name, NULL, 0)) < 0) {

The implementation already uses your dev_name as default, so you should
be able to drop that, and pass NULL to it.

> +        fprintf(stderr, "Failed to create a VAAPI device.\n");
> +        return err;
> +    }
> +    ctx->hw_device_ctx = av_buffer_ref(hw_device_ctx);
> +
> +    return err;
> +}
> +
> +static enum AVPixelFormat get_vaapi_format(AVCodecContext *ctx,
> +                                           const enum AVPixelFormat *pix_fmts)
> +{
> +    const enum AVPixelFormat *p;
> +
> +    for (p = pix_fmts; *p != -1; p++) {
> +        if (*p == AV_PIX_FMT_VAAPI)
> +            return *p;
> +    }
> +
> +    return AV_PIX_FMT_NONE;
> +}
> +
> +int retrieve_data(AVFrame *input)
> +{
> +    AVFrame *output = NULL;
> +    int err;
> +    av_assert0(input->format == AV_PIX_FMT_VAAPI);
> +
> +    if (!(output = av_frame_alloc()))
> +        return AVERROR(ENOMEM);
> +    /* default output nv12 */
> +    output->format = AV_PIX_FMT_NV12;

You could use av_hwframe_transfer_get_formats() instead of hardcoding
it. Might be better if the input is 10 bit, not sure. (Just a
suggestion, up to you.)

> +    if ((err = av_hwframe_transfer_data(output, input, 0)) < 0) {
> +        fprintf(stderr, "Failed to transfer data to output frame: %d.\n", err);
> +        goto fail;
> +    }
> +
> +    if ((err = av_frame_copy_props(output, input)) < 0) {
> +        av_frame_unref(output);
> +        goto fail;
> +    }
> +
> +    av_frame_unref(input);
> +    av_frame_move_ref(input, output);
> +    av_frame_free(&output);
> +    return 0;
> +
> +fail:
> +    av_frame_free(&output);
> +    return err;
> +}
> +
> +int write_frame(AVFrame *frame)
> +{
> +    int idx, size;
> +    int width = frame->width;
> +    int height = frame->height;
> +
> +    av_assert0(frame && frame->data[0] && output_file);
> +
> +    for (idx = 0; idx < height; idx++) {
> +        if ((size = fwrite(frame->data[0] + idx*frame->linesize[0],
> +                           1, width, output_file)) < 0) {
> +            fprintf(stderr, "Dump Y to file error.\n");
> +            return -1;
> +        }
> +    }
> +
> +    height >>= 1;
> +    for (idx = 0; idx < height; idx++) {
> +        if ((size = fwrite(frame->data[1] + idx*frame->linesize[1],
> +                           1, width, output_file)) < 0) {
> +            fprintf(stderr, "Dump UV to file error.\n");
> +            return -1;
> +        }
> +    }

Using av_image_copy_to_buffer() would be more generic, although it'd
require another allocation/copy in memory. (Just a suggestion.)

> +
> +    return 0;
> +}
> +
> +int decode_write(AVCodecContext *avctx, AVPacket packet, int flush)
> +{
> +    AVFrame *frame = NULL;
> +    int ret = 0;
> +
> +    ret = avcodec_send_packet(avctx, &packet);
> +    if (ret < 0 && ret != AVERROR_EOF)
> +        return 0;
> +
> +    if (!(frame = av_frame_alloc()))
> +        return AVERROR(ENOMEM);
> +
> +    ret = avcodec_receive_frame(avctx, frame);
> +    if (ret >= 0) {
> +        /* retrieve data from GPU to CPU */
> +        if ((ret = retrieve_data(frame)) < 0)
> +            goto fail;
> +
> +        if ((ret = write_frame(frame)) < 0)
> +            goto fail;
> +    } else if (flush == 0) {
> +        ret = 0;
> +    }
> +
> +fail:
> +    av_frame_free(&frame);
> +    return ret;
> +}
> +
> +/* flush the decoder */
> +int flush(AVCodecContext *avctx)
> +{
> +    AVPacket packet;
> +    int ret = 0;
> +
> +    av_init_packet(&packet);
> +    packet.data = NULL;
> +    packet.size = 0;
> +
> +    while (1) {
> +        if ((ret = decode_write(avctx, packet, 1)) < 0)
> +            break;
> +    }
> +    av_packet_unref(&packet);
> +
> +    return 0;
> +}
> +
> +int main(int argc, char *argv[])
> +{
> +    AVFormatContext *input_ctx = NULL;
> +    int i, video_stream, ret;
> +    AVCodecContext *decoder_ctx = NULL;
> +    AVCodec *decoder = NULL;
> +    AVPacket packet;
> +
> +    if (argc < 3) {
> +        fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
> +        return -1;
> +    }
> +
> +    av_register_all();
> +
> +    /* open the input file */
> +    if (avformat_open_input(&input_ctx, argv[1], NULL, NULL) != 0) {
> +        fprintf(stderr, "Cannot open input file '%s'\n", argv[1]);
> +        return -1;
> +    }
> +
> +    if (avformat_find_stream_info(input_ctx, NULL) < 0) {
> +        fprintf(stderr, "Couldn't find input stream information.\n");
> +        goto fail;
> +    }
> +
> +    /* find the video stream information */
> +    video_stream = -1;
> +    for (i = 0; i < input_ctx->nb_streams; i++) {
> +        AVStream *st = input_ctx->streams[i];
> +        if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
> +            video_stream = i;        /* NOTE: just handle the first video stream */
> +            break;
> +        }
> +    }
> +
> +    if (video_stream == -1) {
> +        fprintf(stderr, "No find a video stream in the input file.\n");
> +        goto fail;
> +    }
> +
> +    /* initialize the video decoder */
> +    if (!(decoder = avcodec_find_decoder(input_ctx->streams[video_stream]->codecpar->codec_id))) {
> +        fprintf(stderr, "Could not find input codec\n");
> +        goto fail;
> +    }
> +
> +    if (!(decoder_ctx = avcodec_alloc_context3(decoder)))
> +        return AVERROR(ENOMEM);
> +
> +    if (avcodec_parameters_to_context(decoder_ctx,
> +                                      input_ctx->streams[video_stream]->codecpar) < 0)
> +        goto fail;
> +
> +    decoder_ctx->get_format  = get_vaapi_format;
> +    av_opt_set_int(decoder_ctx, "refcounted_frames", 1, 0);
> +
> +    if (decoder_init_vaapi(decoder_ctx) < 0)
> +        goto fail;
> +
> +    if ((ret = avcodec_open2(decoder_ctx, decoder, NULL)) < 0) {
> +        fprintf(stderr, "Failed to open codec for stream #%u\n", i);
> +        goto fail;
> +    }
> +
> +    /* open the output file to dump YUV */
> +    output_file = fopen(argv[2], "w+");
> +
> +    /* actual decoding and dump the YUV data */
> +    while (av_read_frame(input_ctx, &packet) >= 0) {
> +        if (video_stream == packet.stream_index)
> +            ret = decode_write(decoder_ctx, packet, 0);
> +        av_packet_unref(&packet);
> +
> +        if (ret < 0)
> +            break;
> +    }
> +
> +    /* flush the decoder */
> +    flush(decoder_ctx);
> +
> +fail:
> +    if (output_file)
> +        fclose(output_file);
> +    avcodec_free_context(&decoder_ctx);
> +    avformat_close_input(&input_ctx);
> +    av_buffer_unref(&hw_device_ctx);
> +
> +    return ret;
> +}

Generally looks OK. Can you replace all references to "vaapi" with "hw"
or so? This code will work for other hwaccels too, and the only things
that need to be switched are the AV_HWDEVICE_TYPE and the decoding
AVPixelFormat. The pixel format still needs to be mapped manually, but
we hope to add new API to make this also generic, so in the end you
could choose the hw implementation by passing a name to it or fully
automatically or so.

Also please wait for comments by Mark Thompson.
Jun Zhao July 17, 2017, 2:51 a.m. UTC | #2
On 2017/7/11 15:58, wm4 wrote:
> On Tue, 11 Jul 2017 12:06:10 +0800
> Jun Zhao <mypopydev@gmail.com> wrote:
> 
>> From 0e4d230ae4c98949a962c6bbdad31d216b54bb6a Mon Sep 17 00:00:00 2001
>> From: Jun Zhao <jun.zhao@intel.com>
>> Date: Tue, 21 Mar 2017 11:04:41 +0800
>> Subject: [V2] examples/vaapi_dec: Add a VA-API hwaccel decoding example.
>>
>> Add a VA-API hwaccel decoding example.
>>
>> Signed-off-by: Liu, Kaixuan <kaixuan.liu@intel.com>
>> Signed-off-by: Jun Zhao <jun.zhao@intel.com>
>> ---
>>  doc/examples/vaapi_dec.c | 266 +++++++++++++++++++++++++++++++++++++++++++++++
>>  1 file changed, 266 insertions(+)
>>  create mode 100644 doc/examples/vaapi_dec.c
>>
>> diff --git a/doc/examples/vaapi_dec.c b/doc/examples/vaapi_dec.c
>> new file mode 100644
>> index 0000000000..01320a3b71
>> --- /dev/null
>> +++ b/doc/examples/vaapi_dec.c
>> @@ -0,0 +1,266 @@
>> +/*
>> + * Video Acceleration API (video decoding) decode sample
>> + *
>> + * This file is part of FFmpeg.
>> + *
>> + * FFmpeg is free software; you can redistribute it and/or
>> + * modify it under the terms of the GNU Lesser General Public
>> + * License as published by the Free Software Foundation; either
>> + * version 2.1 of the License, or (at your option) any later version.
>> + *
>> + * FFmpeg is distributed in the hope that it will be useful,
>> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
>> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
>> + * Lesser General Public License for more details.
>> + *
>> + * You should have received a copy of the GNU Lesser General Public
>> + * License along with FFmpeg; if not, write to the Free Software
>> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
>> + */
>> +
>> +/**
>> + * @file
>> + * Intel VAAPI-accelerated decoding example.
>> + *
>> + * @example vaapi_dec.c
>> + * This example shows how to do VAAPI-accelerated decoding with output
>> + * frames from the VAAPI video surfaces.
>> + */
>> +
>> +#include <stdio.h>
>> +#include <libavcodec/avcodec.h>
>> +#include <libavcodec/vaapi.h>
>> +#include <libavformat/avformat.h>
>> +#include <libavutil/pixdesc.h>
>> +#include <libavutil/hwcontext.h>
>> +#include <libavutil/opt.h>
>> +#include <libavutil/hwcontext_vaapi.h>
>> +#include <libavutil/avassert.h>
>> +
>> +static AVBufferRef *hw_device_ctx = NULL;
>> +FILE *output_file = NULL;
>> +
>> +int decoder_init_vaapi(AVCodecContext *ctx)
>> +{
>> +    int err = 0;
>> +    const char *dev_name = "/dev/dri/renderD128";
>> +
>> +    if ((err = av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_VAAPI,
>> +                                      dev_name, NULL, 0)) < 0) {
> 
> The implementation already uses your dev_name as default, so you should
> be able to drop that, and pass NULL to it.
> 

Yes, will remove the dev_name.

>> +        fprintf(stderr, "Failed to create a VAAPI device.\n");
>> +        return err;
>> +    }
>> +    ctx->hw_device_ctx = av_buffer_ref(hw_device_ctx);
>> +
>> +    return err;
>> +}
>> +
>> +static enum AVPixelFormat get_vaapi_format(AVCodecContext *ctx,
>> +                                           const enum AVPixelFormat *pix_fmts)
>> +{
>> +    const enum AVPixelFormat *p;
>> +
>> +    for (p = pix_fmts; *p != -1; p++) {
>> +        if (*p == AV_PIX_FMT_VAAPI)
>> +            return *p;
>> +    }
>> +
>> +    return AV_PIX_FMT_NONE;
>> +}
>> +
>> +int retrieve_data(AVFrame *input)
>> +{
>> +    AVFrame *output = NULL;
>> +    int err;
>> +    av_assert0(input->format == AV_PIX_FMT_VAAPI);
>> +
>> +    if (!(output = av_frame_alloc()))
>> +        return AVERROR(ENOMEM);
>> +    /* default output nv12 */
>> +    output->format = AV_PIX_FMT_NV12;
> 
> You could use av_hwframe_transfer_get_formats() instead of hardcoding
> it. Might be better if the input is 10 bit, not sure. (Just a
> suggestion, up to you.)
Ok, it's will show the HW surface/ SW pixel format mapping if we 
use av_hwframe_transfer_get_formats() instead of hardcoding.

> 
>> +    if ((err = av_hwframe_transfer_data(output, input, 0)) < 0) {
>> +        fprintf(stderr, "Failed to transfer data to output frame: %d.\n", err);
>> +        goto fail;
>> +    }
>> +
>> +    if ((err = av_frame_copy_props(output, input)) < 0) {
>> +        av_frame_unref(output);
>> +        goto fail;
>> +    }
>> +
>> +    av_frame_unref(input);
>> +    av_frame_move_ref(input, output);
>> +    av_frame_free(&output);
>> +    return 0;
>> +
>> +fail:
>> +    av_frame_free(&output);
>> +    return err;
>> +}
>> +
>> +int write_frame(AVFrame *frame)
>> +{
>> +    int idx, size;
>> +    int width = frame->width;
>> +    int height = frame->height;
>> +
>> +    av_assert0(frame && frame->data[0] && output_file);
>> +
>> +    for (idx = 0; idx < height; idx++) {
>> +        if ((size = fwrite(frame->data[0] + idx*frame->linesize[0],
>> +                           1, width, output_file)) < 0) {
>> +            fprintf(stderr, "Dump Y to file error.\n");
>> +            return -1;
>> +        }
>> +    }
>> +
>> +    height >>= 1;
>> +    for (idx = 0; idx < height; idx++) {
>> +        if ((size = fwrite(frame->data[1] + idx*frame->linesize[1],
>> +                           1, width, output_file)) < 0) {
>> +            fprintf(stderr, "Dump UV to file error.\n");
>> +            return -1;
>> +        }
>> +    }
> 
> Using av_image_copy_to_buffer() would be more generic, although it'd
> require another allocation/copy in memory. (Just a suggestion.)
>
Will use av_image_copy_to_buffer().

>> +
>> +    return 0;
>> +}
>> +
>> +int decode_write(AVCodecContext *avctx, AVPacket packet, int flush)
>> +{
>> +    AVFrame *frame = NULL;
>> +    int ret = 0;
>> +
>> +    ret = avcodec_send_packet(avctx, &packet);
>> +    if (ret < 0 && ret != AVERROR_EOF)
>> +        return 0;
>> +
>> +    if (!(frame = av_frame_alloc()))
>> +        return AVERROR(ENOMEM);
>> +
>> +    ret = avcodec_receive_frame(avctx, frame);
>> +    if (ret >= 0) {
>> +        /* retrieve data from GPU to CPU */
>> +        if ((ret = retrieve_data(frame)) < 0)
>> +            goto fail;
>> +
>> +        if ((ret = write_frame(frame)) < 0)
>> +            goto fail;
>> +    } else if (flush == 0) {
>> +        ret = 0;
>> +    }
>> +
>> +fail:
>> +    av_frame_free(&frame);
>> +    return ret;
>> +}
>> +
>> +/* flush the decoder */
>> +int flush(AVCodecContext *avctx)
>> +{
>> +    AVPacket packet;
>> +    int ret = 0;
>> +
>> +    av_init_packet(&packet);
>> +    packet.data = NULL;
>> +    packet.size = 0;
>> +
>> +    while (1) {
>> +        if ((ret = decode_write(avctx, packet, 1)) < 0)
>> +            break;
>> +    }
>> +    av_packet_unref(&packet);
>> +
>> +    return 0;
>> +}
>> +
>> +int main(int argc, char *argv[])
>> +{
>> +    AVFormatContext *input_ctx = NULL;
>> +    int i, video_stream, ret;
>> +    AVCodecContext *decoder_ctx = NULL;
>> +    AVCodec *decoder = NULL;
>> +    AVPacket packet;
>> +
>> +    if (argc < 3) {
>> +        fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
>> +        return -1;
>> +    }
>> +
>> +    av_register_all();
>> +
>> +    /* open the input file */
>> +    if (avformat_open_input(&input_ctx, argv[1], NULL, NULL) != 0) {
>> +        fprintf(stderr, "Cannot open input file '%s'\n", argv[1]);
>> +        return -1;
>> +    }
>> +
>> +    if (avformat_find_stream_info(input_ctx, NULL) < 0) {
>> +        fprintf(stderr, "Couldn't find input stream information.\n");
>> +        goto fail;
>> +    }
>> +
>> +    /* find the video stream information */
>> +    video_stream = -1;
>> +    for (i = 0; i < input_ctx->nb_streams; i++) {
>> +        AVStream *st = input_ctx->streams[i];
>> +        if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
>> +            video_stream = i;        /* NOTE: just handle the first video stream */
>> +            break;
>> +        }
>> +    }
>> +
>> +    if (video_stream == -1) {
>> +        fprintf(stderr, "No find a video stream in the input file.\n");
>> +        goto fail;
>> +    }
>> +
>> +    /* initialize the video decoder */
>> +    if (!(decoder = avcodec_find_decoder(input_ctx->streams[video_stream]->codecpar->codec_id))) {
>> +        fprintf(stderr, "Could not find input codec\n");
>> +        goto fail;
>> +    }
>> +
>> +    if (!(decoder_ctx = avcodec_alloc_context3(decoder)))
>> +        return AVERROR(ENOMEM);
>> +
>> +    if (avcodec_parameters_to_context(decoder_ctx,
>> +                                      input_ctx->streams[video_stream]->codecpar) < 0)
>> +        goto fail;
>> +
>> +    decoder_ctx->get_format  = get_vaapi_format;
>> +    av_opt_set_int(decoder_ctx, "refcounted_frames", 1, 0);
>> +
>> +    if (decoder_init_vaapi(decoder_ctx) < 0)
>> +        goto fail;
>> +
>> +    if ((ret = avcodec_open2(decoder_ctx, decoder, NULL)) < 0) {
>> +        fprintf(stderr, "Failed to open codec for stream #%u\n", i);
>> +        goto fail;
>> +    }
>> +
>> +    /* open the output file to dump YUV */
>> +    output_file = fopen(argv[2], "w+");
>> +
>> +    /* actual decoding and dump the YUV data */
>> +    while (av_read_frame(input_ctx, &packet) >= 0) {
>> +        if (video_stream == packet.stream_index)
>> +            ret = decode_write(decoder_ctx, packet, 0);
>> +        av_packet_unref(&packet);
>> +
>> +        if (ret < 0)
>> +            break;
>> +    }
>> +
>> +    /* flush the decoder */
>> +    flush(decoder_ctx);
>> +
>> +fail:
>> +    if (output_file)
>> +        fclose(output_file);
>> +    avcodec_free_context(&decoder_ctx);
>> +    avformat_close_input(&input_ctx);
>> +    av_buffer_unref(&hw_device_ctx);
>> +
>> +    return ret;
>> +}
> 
> Generally looks OK. Can you replace all references to "vaapi" with "hw"
> or so? This code will work for other hwaccels too, and the only things
> that need to be switched are the AV_HWDEVICE_TYPE and the decoding
> AVPixelFormat. The pixel format still needs to be mapped manually, but
> we hope to add new API to make this also generic, so in the end you
> could choose the hw implementation by passing a name to it or fully
> automatically or so.

Good suggestion, will re-work for the general HWAccel decode sample.  
> Also please wait for comments by Mark Thompson.
> _______________________________________________
> ffmpeg-devel mailing list
> ffmpeg-devel@ffmpeg.org
> http://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>
Mark Thompson July 18, 2017, 11:30 p.m. UTC | #3
On 17/07/17 03:51, Jun Zhao wrote> On 2017/7/11 15:58, wm4 wrote:
>> On Tue, 11 Jul 2017 12:06:10 +0800
>> Jun Zhao <mypopydev@gmail.com> wrote:
>>
>>> From 0e4d230ae4c98949a962c6bbdad31d216b54bb6a Mon Sep 17 00:00:00 2001
>>> From: Jun Zhao <jun.zhao@intel.com>
>>> Date: Tue, 21 Mar 2017 11:04:41 +0800
>>> Subject: [V2] examples/vaapi_dec: Add a VA-API hwaccel decoding example.
>>>
>>> Add a VA-API hwaccel decoding example.
>>>
>>> Signed-off-by: Liu, Kaixuan <kaixuan.liu@intel.com>
>>> Signed-off-by: Jun Zhao <jun.zhao@intel.com>
>>> ---
>>>  doc/examples/vaapi_dec.c | 266 +++++++++++++++++++++++++++++++++++++++++++++++
>>>  1 file changed, 266 insertions(+)
>>>  create mode 100644 doc/examples/vaapi_dec.c
>>>
>>> diff --git a/doc/examples/vaapi_dec.c b/doc/examples/vaapi_dec.c
>>> new file mode 100644
>>> index 0000000000..01320a3b71
>>> --- /dev/null
>>> +++ b/doc/examples/vaapi_dec.c
>>> @@ -0,0 +1,266 @@
>>> +/*
>>> + * Video Acceleration API (video decoding) decode sample
>>> + *
>>> + * This file is part of FFmpeg.
>>> + *
>>> + * FFmpeg is free software; you can redistribute it and/or
>>> + * modify it under the terms of the GNU Lesser General Public
>>> + * License as published by the Free Software Foundation; either
>>> + * version 2.1 of the License, or (at your option) any later version.
>>> + *
>>> + * FFmpeg is distributed in the hope that it will be useful,
>>> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
>>> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
>>> + * Lesser General Public License for more details.
>>> + *
>>> + * You should have received a copy of the GNU Lesser General Public
>>> + * License along with FFmpeg; if not, write to the Free Software
>>> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
>>> + */
>>> +
>>> +/**
>>> + * @file
>>> + * Intel VAAPI-accelerated decoding example.
>>> + *
>>> + * @example vaapi_dec.c
>>> + * This example shows how to do VAAPI-accelerated decoding with output
>>> + * frames from the VAAPI video surfaces.
>>> + */
>>> +
>>> +#include <stdio.h>
>>> +#include <libavcodec/avcodec.h>
>>> +#include <libavcodec/vaapi.h>

This file is deprecated, and contains nothing useful anyway.

>>> +#include <libavformat/avformat.h>
>>> +#include <libavutil/pixdesc.h>
>>> +#include <libavutil/hwcontext.h>
>>> +#include <libavutil/opt.h>
>>> +#include <libavutil/hwcontext_vaapi.h>

Shouldn't need any of the API-specific hwcontext includes, just the generic one.

>>> +#include <libavutil/avassert.h>
>>> +
>>> +static AVBufferRef *hw_device_ctx = NULL;
>>> +FILE *output_file = NULL;
>>> +
>>> +int decoder_init_vaapi(AVCodecContext *ctx)
>>> +{
>>> +    int err = 0;
>>> +    const char *dev_name = "/dev/dri/renderD128";
>>> +
>>> +    if ((err = av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_VAAPI,
>>> +                                      dev_name, NULL, 0)) < 0) {
>>
>> The implementation already uses your dev_name as default, so you should
>> be able to drop that, and pass NULL to it.
>>
> 
> Yes, will remove the dev_name.
> 
>>> +        fprintf(stderr, "Failed to create a VAAPI device.\n");
>>> +        return err;
>>> +    }
>>> +    ctx->hw_device_ctx = av_buffer_ref(hw_device_ctx);
>>> +
>>> +    return err;
>>> +}
>>> +
>>> +static enum AVPixelFormat get_vaapi_format(AVCodecContext *ctx,
>>> +                                           const enum AVPixelFormat *pix_fmts)
>>> +{
>>> +    const enum AVPixelFormat *p;
>>> +
>>> +    for (p = pix_fmts; *p != -1; p++) {
>>> +        if (*p == AV_PIX_FMT_VAAPI)
>>> +            return *p;
>>> +    }
>>> +
>>> +    return AV_PIX_FMT_NONE;
>>> +}
>>> +
>>> +int retrieve_data(AVFrame *input)
>>> +{
>>> +    AVFrame *output = NULL;
>>> +    int err;
>>> +    av_assert0(input->format == AV_PIX_FMT_VAAPI);
>>> +
>>> +    if (!(output = av_frame_alloc()))
>>> +        return AVERROR(ENOMEM);
>>> +    /* default output nv12 */
>>> +    output->format = AV_PIX_FMT_NV12;
>>
>> You could use av_hwframe_transfer_get_formats() instead of hardcoding
>> it. Might be better if the input is 10 bit, not sure. (Just a
>> suggestion, up to you.)
> Ok, it's will show the HW surface/ SW pixel format mapping if we 
> use av_hwframe_transfer_get_formats() instead of hardcoding.
> 
>>
>>> +    if ((err = av_hwframe_transfer_data(output, input, 0)) < 0) {
>>> +        fprintf(stderr, "Failed to transfer data to output frame: %d.\n", err);
>>> +        goto fail;
>>> +    }
>>> +
>>> +    if ((err = av_frame_copy_props(output, input)) < 0) {
>>> +        av_frame_unref(output);
>>> +        goto fail;
>>> +    }
>>> +
>>> +    av_frame_unref(input);
>>> +    av_frame_move_ref(input, output);
>>> +    av_frame_free(&output);
>>> +    return 0;
>>> +
>>> +fail:
>>> +    av_frame_free(&output);
>>> +    return err;
>>> +}
>>> +
>>> +int write_frame(AVFrame *frame)
>>> +{
>>> +    int idx, size;
>>> +    int width = frame->width;
>>> +    int height = frame->height;
>>> +
>>> +    av_assert0(frame && frame->data[0] && output_file);
>>> +
>>> +    for (idx = 0; idx < height; idx++) {
>>> +        if ((size = fwrite(frame->data[0] + idx*frame->linesize[0],
>>> +                           1, width, output_file)) < 0) {
>>> +            fprintf(stderr, "Dump Y to file error.\n");
>>> +            return -1;
>>> +        }
>>> +    }
>>> +
>>> +    height >>= 1;
>>> +    for (idx = 0; idx < height; idx++) {
>>> +        if ((size = fwrite(frame->data[1] + idx*frame->linesize[1],
>>> +                           1, width, output_file)) < 0) {
>>> +            fprintf(stderr, "Dump UV to file error.\n");
>>> +            return -1;
>>> +        }
>>> +    }
>>
>> Using av_image_copy_to_buffer() would be more generic, although it'd
>> require another allocation/copy in memory. (Just a suggestion.)
>>
> Will use av_image_copy_to_buffer().
> 
>>> +
>>> +    return 0;
>>> +}
>>> +
>>> +int decode_write(AVCodecContext *avctx, AVPacket packet, int flush)

I don't think you mean to pass the packet by value here.

>>> +{
>>> +    AVFrame *frame = NULL;
>>> +    int ret = 0;
>>> +
>>> +    ret = avcodec_send_packet(avctx, &packet);
>>> +    if (ret < 0 && ret != AVERROR_EOF)
>>> +        return 0;
>>> +
>>> +    if (!(frame = av_frame_alloc()))
>>> +        return AVERROR(ENOMEM);
>>> +
>>> +    ret = avcodec_receive_frame(avctx, frame);
>>> +    if (ret >= 0) {
>>> +        /* retrieve data from GPU to CPU */
>>> +        if ((ret = retrieve_data(frame)) < 0)
>>> +            goto fail;
>>> +
>>> +        if ((ret = write_frame(frame)) < 0)
>>> +            goto fail;
>>> +    } else if (flush == 0) {
>>> +        ret = 0;
>>> +    }
>>> +
>>> +fail:
>>> +    av_frame_free(&frame);
>>> +    return ret;
>>> +}
>>> +
>>> +/* flush the decoder */
>>> +int flush(AVCodecContext *avctx)
>>> +{
>>> +    AVPacket packet;
>>> +    int ret = 0;
>>> +
>>> +    av_init_packet(&packet);
>>> +    packet.data = NULL;
>>> +    packet.size = 0;

IIRC passing NULL directly to avcodec_send_packet() is preferable to an empty packet to flush?

>>> +
>>> +    while (1) {
>>> +        if ((ret = decode_write(avctx, packet, 1)) < 0)
>>> +            break;
>>> +    }
>>> +    av_packet_unref(&packet);
>>> +
>>> +    return 0;
>>> +}
>>> +
>>> +int main(int argc, char *argv[])
>>> +{
>>> +    AVFormatContext *input_ctx = NULL;
>>> +    int i, video_stream, ret;
>>> +    AVCodecContext *decoder_ctx = NULL;
>>> +    AVCodec *decoder = NULL;
>>> +    AVPacket packet;
>>> +
>>> +    if (argc < 3) {
>>> +        fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
>>> +        return -1;
>>> +    }
>>> +
>>> +    av_register_all();
>>> +
>>> +    /* open the input file */
>>> +    if (avformat_open_input(&input_ctx, argv[1], NULL, NULL) != 0) {
>>> +        fprintf(stderr, "Cannot open input file '%s'\n", argv[1]);
>>> +        return -1;
>>> +    }
>>> +
>>> +    if (avformat_find_stream_info(input_ctx, NULL) < 0) {
>>> +        fprintf(stderr, "Couldn't find input stream information.\n");
>>> +        goto fail;
>>> +    }
>>> +
>>> +    /* find the video stream information */
>>> +    video_stream = -1;
>>> +    for (i = 0; i < input_ctx->nb_streams; i++) {
>>> +        AVStream *st = input_ctx->streams[i];
>>> +        if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
>>> +            video_stream = i;        /* NOTE: just handle the first video stream */
>>> +            break;
>>> +        }
>>> +    }

Maybe replace this block with av_find_best_stream()?  (Assuming there isn't some reason I'm missing that that doesn't work...)

>>> +
>>> +    if (video_stream == -1) {
>>> +        fprintf(stderr, "No find a video stream in the input file.\n");
>>> +        goto fail;
>>> +    }
>>> +
>>> +    /* initialize the video decoder */
>>> +    if (!(decoder = avcodec_find_decoder(input_ctx->streams[video_stream]->codecpar->codec_id))) {
>>> +        fprintf(stderr, "Could not find input codec\n");
>>> +        goto fail;
>>> +    }
>>> +
>>> +    if (!(decoder_ctx = avcodec_alloc_context3(decoder)))
>>> +        return AVERROR(ENOMEM);
>>> +
>>> +    if (avcodec_parameters_to_context(decoder_ctx,
>>> +                                      input_ctx->streams[video_stream]->codecpar) < 0)
>>> +        goto fail;
>>> +
>>> +    decoder_ctx->get_format  = get_vaapi_format;
>>> +    av_opt_set_int(decoder_ctx, "refcounted_frames", 1, 0);
>>> +
>>> +    if (decoder_init_vaapi(decoder_ctx) < 0)
>>> +        goto fail;
>>> +
>>> +    if ((ret = avcodec_open2(decoder_ctx, decoder, NULL)) < 0) {
>>> +        fprintf(stderr, "Failed to open codec for stream #%u\n", i);
>>> +        goto fail;
>>> +    }
>>> +
>>> +    /* open the output file to dump YUV */
>>> +    output_file = fopen(argv[2], "w+");
>>> +
>>> +    /* actual decoding and dump the YUV data */
>>> +    while (av_read_frame(input_ctx, &packet) >= 0) {
>>> +        if (video_stream == packet.stream_index)
>>> +            ret = decode_write(decoder_ctx, packet, 0);
>>> +        av_packet_unref(&packet);
>>> +
>>> +        if (ret < 0)
>>> +            break;
>>> +    }
>>> +
>>> +    /* flush the decoder */
>>> +    flush(decoder_ctx);
>>> +
>>> +fail:
>>> +    if (output_file)
>>> +        fclose(output_file);
>>> +    avcodec_free_context(&decoder_ctx);
>>> +    avformat_close_input(&input_ctx);
>>> +    av_buffer_unref(&hw_device_ctx);
>>> +
>>> +    return ret;
>>> +}
>>
>> Generally looks OK. Can you replace all references to "vaapi" with "hw"
>> or so? This code will work for other hwaccels too, and the only things
>> that need to be switched are the AV_HWDEVICE_TYPE and the decoding
>> AVPixelFormat. The pixel format still needs to be mapped manually, but
>> we hope to add new API to make this also generic, so in the end you
>> could choose the hw implementation by passing a name to it or fully
>> automatically or so.
> 
> Good suggestion, will re-work for the general HWAccel decode sample.

Agree.  Maybe pass the type name as a first argument to make the command line like "argv[0] <vaapi|dxva2|d3d11va> <input file> <output file>"?

(Those three should work at least.  Videotoolbox might work as well, VDPAU would need different output formats.  Probably don't want more complexity than that, though, because it wants to be a clear example.)

Rest looks good to me.

Thanks,

- Mark
Jun Zhao July 19, 2017, 12:50 a.m. UTC | #4
On 2017/7/19 7:30, Mark Thompson wrote:
> On 17/07/17 03:51, Jun Zhao wrote> On 2017/7/11 15:58, wm4 wrote:
>>> On Tue, 11 Jul 2017 12:06:10 +0800
>>> Jun Zhao <mypopydev@gmail.com> wrote:
>>>
>>>> From 0e4d230ae4c98949a962c6bbdad31d216b54bb6a Mon Sep 17 00:00:00 2001
>>>> From: Jun Zhao <jun.zhao@intel.com>
>>>> Date: Tue, 21 Mar 2017 11:04:41 +0800
>>>> Subject: [V2] examples/vaapi_dec: Add a VA-API hwaccel decoding example.
>>>>
>>>> Add a VA-API hwaccel decoding example.
>>>>
>>>> Signed-off-by: Liu, Kaixuan <kaixuan.liu@intel.com>
>>>> Signed-off-by: Jun Zhao <jun.zhao@intel.com>
>>>> ---
>>>>  doc/examples/vaapi_dec.c | 266 +++++++++++++++++++++++++++++++++++++++++++++++
>>>>  1 file changed, 266 insertions(+)
>>>>  create mode 100644 doc/examples/vaapi_dec.c
>>>>
>>>> diff --git a/doc/examples/vaapi_dec.c b/doc/examples/vaapi_dec.c
>>>> new file mode 100644
>>>> index 0000000000..01320a3b71
>>>> --- /dev/null
>>>> +++ b/doc/examples/vaapi_dec.c
>>>> @@ -0,0 +1,266 @@
>>>> +/*
>>>> + * Video Acceleration API (video decoding) decode sample
>>>> + *
>>>> + * This file is part of FFmpeg.
>>>> + *
>>>> + * FFmpeg is free software; you can redistribute it and/or
>>>> + * modify it under the terms of the GNU Lesser General Public
>>>> + * License as published by the Free Software Foundation; either
>>>> + * version 2.1 of the License, or (at your option) any later version.
>>>> + *
>>>> + * FFmpeg is distributed in the hope that it will be useful,
>>>> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
>>>> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
>>>> + * Lesser General Public License for more details.
>>>> + *
>>>> + * You should have received a copy of the GNU Lesser General Public
>>>> + * License along with FFmpeg; if not, write to the Free Software
>>>> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
>>>> + */
>>>> +
>>>> +/**
>>>> + * @file
>>>> + * Intel VAAPI-accelerated decoding example.
>>>> + *
>>>> + * @example vaapi_dec.c
>>>> + * This example shows how to do VAAPI-accelerated decoding with output
>>>> + * frames from the VAAPI video surfaces.
>>>> + */
>>>> +
>>>> +#include <stdio.h>
>>>> +#include <libavcodec/avcodec.h>
>>>> +#include <libavcodec/vaapi.h>
> 
> This file is deprecated, and contains nothing useful anyway.
> 
Will remove.
>>>> +#include <libavformat/avformat.h>
>>>> +#include <libavutil/pixdesc.h>
>>>> +#include <libavutil/hwcontext.h>
>>>> +#include <libavutil/opt.h>
>>>> +#include <libavutil/hwcontext_vaapi.h>
> 
> Shouldn't need any of the API-specific hwcontext includes, just the generic one.
> 
Same as above.
>>>> +#include <libavutil/avassert.h>
>>>> +
>>>> +static AVBufferRef *hw_device_ctx = NULL;
>>>> +FILE *output_file = NULL;
>>>> +
>>>> +int decoder_init_vaapi(AVCodecContext *ctx)
>>>> +{
>>>> +    int err = 0;
>>>> +    const char *dev_name = "/dev/dri/renderD128";
>>>> +
>>>> +    if ((err = av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_VAAPI,
>>>> +                                      dev_name, NULL, 0)) < 0) {
>>>
>>> The implementation already uses your dev_name as default, so you should
>>> be able to drop that, and pass NULL to it.
>>>
>>
>> Yes, will remove the dev_name.
>>
>>>> +        fprintf(stderr, "Failed to create a VAAPI device.\n");
>>>> +        return err;
>>>> +    }
>>>> +    ctx->hw_device_ctx = av_buffer_ref(hw_device_ctx);
>>>> +
>>>> +    return err;
>>>> +}
>>>> +
>>>> +static enum AVPixelFormat get_vaapi_format(AVCodecContext *ctx,
>>>> +                                           const enum AVPixelFormat *pix_fmts)
>>>> +{
>>>> +    const enum AVPixelFormat *p;
>>>> +
>>>> +    for (p = pix_fmts; *p != -1; p++) {
>>>> +        if (*p == AV_PIX_FMT_VAAPI)
>>>> +            return *p;
>>>> +    }
>>>> +
>>>> +    return AV_PIX_FMT_NONE;
>>>> +}
>>>> +
>>>> +int retrieve_data(AVFrame *input)
>>>> +{
>>>> +    AVFrame *output = NULL;
>>>> +    int err;
>>>> +    av_assert0(input->format == AV_PIX_FMT_VAAPI);
>>>> +
>>>> +    if (!(output = av_frame_alloc()))
>>>> +        return AVERROR(ENOMEM);
>>>> +    /* default output nv12 */
>>>> +    output->format = AV_PIX_FMT_NV12;
>>>
>>> You could use av_hwframe_transfer_get_formats() instead of hardcoding
>>> it. Might be better if the input is 10 bit, not sure. (Just a
>>> suggestion, up to you.)
>> Ok, it's will show the HW surface/ SW pixel format mapping if we 
>> use av_hwframe_transfer_get_formats() instead of hardcoding.
>>
>>>
>>>> +    if ((err = av_hwframe_transfer_data(output, input, 0)) < 0) {
>>>> +        fprintf(stderr, "Failed to transfer data to output frame: %d.\n", err);
>>>> +        goto fail;
>>>> +    }
>>>> +
>>>> +    if ((err = av_frame_copy_props(output, input)) < 0) {
>>>> +        av_frame_unref(output);
>>>> +        goto fail;
>>>> +    }
>>>> +
>>>> +    av_frame_unref(input);
>>>> +    av_frame_move_ref(input, output);
>>>> +    av_frame_free(&output);
>>>> +    return 0;
>>>> +
>>>> +fail:
>>>> +    av_frame_free(&output);
>>>> +    return err;
>>>> +}
>>>> +
>>>> +int write_frame(AVFrame *frame)
>>>> +{
>>>> +    int idx, size;
>>>> +    int width = frame->width;
>>>> +    int height = frame->height;
>>>> +
>>>> +    av_assert0(frame && frame->data[0] && output_file);
>>>> +
>>>> +    for (idx = 0; idx < height; idx++) {
>>>> +        if ((size = fwrite(frame->data[0] + idx*frame->linesize[0],
>>>> +                           1, width, output_file)) < 0) {
>>>> +            fprintf(stderr, "Dump Y to file error.\n");
>>>> +            return -1;
>>>> +        }
>>>> +    }
>>>> +
>>>> +    height >>= 1;
>>>> +    for (idx = 0; idx < height; idx++) {
>>>> +        if ((size = fwrite(frame->data[1] + idx*frame->linesize[1],
>>>> +                           1, width, output_file)) < 0) {
>>>> +            fprintf(stderr, "Dump UV to file error.\n");
>>>> +            return -1;
>>>> +        }
>>>> +    }
>>>
>>> Using av_image_copy_to_buffer() would be more generic, although it'd
>>> require another allocation/copy in memory. (Just a suggestion.)
>>>
>> Will use av_image_copy_to_buffer().
>>
>>>> +
>>>> +    return 0;
>>>> +}
>>>> +
>>>> +int decode_write(AVCodecContext *avctx, AVPacket packet, int flush)
> 
> I don't think you mean to pass the packet by value here> 
Yes, will use pointer in V3 patch.
>>>> +{
>>>> +    AVFrame *frame = NULL;
>>>> +    int ret = 0;
>>>> +
>>>> +    ret = avcodec_send_packet(avctx, &packet);
>>>> +    if (ret < 0 && ret != AVERROR_EOF)
>>>> +        return 0;
>>>> +
>>>> +    if (!(frame = av_frame_alloc()))
>>>> +        return AVERROR(ENOMEM);
>>>> +
>>>> +    ret = avcodec_receive_frame(avctx, frame);
>>>> +    if (ret >= 0) {
>>>> +        /* retrieve data from GPU to CPU */
>>>> +        if ((ret = retrieve_data(frame)) < 0)
>>>> +            goto fail;
>>>> +
>>>> +        if ((ret = write_frame(frame)) < 0)
>>>> +            goto fail;
>>>> +    } else if (flush == 0) {
>>>> +        ret = 0;
>>>> +    }
>>>> +
>>>> +fail:
>>>> +    av_frame_free(&frame);
>>>> +    return ret;
>>>> +}
>>>> +
>>>> +/* flush the decoder */
>>>> +int flush(AVCodecContext *avctx)
>>>> +{
>>>> +    AVPacket packet;
>>>> +    int ret = 0;
>>>> +
>>>> +    av_init_packet(&packet);
>>>> +    packet.data = NULL;
>>>> +    packet.size = 0;
> 
> IIRC passing NULL directly to avcodec_send_packet() is preferable to an empty packet to flush?
> 
Will change in the V3 patch :)
>>>> +
>>>> +    while (1) {
>>>> +        if ((ret = decode_write(avctx, packet, 1)) < 0)
>>>> +            break;
>>>> +    }
>>>> +    av_packet_unref(&packet);
>>>> +
>>>> +    return 0;
>>>> +}
>>>> +
>>>> +int main(int argc, char *argv[])
>>>> +{
>>>> +    AVFormatContext *input_ctx = NULL;
>>>> +    int i, video_stream, ret;
>>>> +    AVCodecContext *decoder_ctx = NULL;
>>>> +    AVCodec *decoder = NULL;
>>>> +    AVPacket packet;
>>>> +
>>>> +    if (argc < 3) {
>>>> +        fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
>>>> +        return -1;
>>>> +    }
>>>> +
>>>> +    av_register_all();
>>>> +
>>>> +    /* open the input file */
>>>> +    if (avformat_open_input(&input_ctx, argv[1], NULL, NULL) != 0) {
>>>> +        fprintf(stderr, "Cannot open input file '%s'\n", argv[1]);
>>>> +        return -1;
>>>> +    }
>>>> +
>>>> +    if (avformat_find_stream_info(input_ctx, NULL) < 0) {
>>>> +        fprintf(stderr, "Couldn't find input stream information.\n");
>>>> +        goto fail;
>>>> +    }
>>>> +
>>>> +    /* find the video stream information */
>>>> +    video_stream = -1;
>>>> +    for (i = 0; i < input_ctx->nb_streams; i++) {
>>>> +        AVStream *st = input_ctx->streams[i];
>>>> +        if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
>>>> +            video_stream = i;        /* NOTE: just handle the first video stream */
>>>> +            break;
>>>> +        }
>>>> +    }
> 
> Maybe replace this block with av_find_best_stream()?  (Assuming there isn't some reason I'm missing that that doesn't work...)

Good! This function will reduce about 10 lines code in this example.

> 
>>>> +
>>>> +    if (video_stream == -1) {
>>>> +        fprintf(stderr, "No find a video stream in the input file.\n");
>>>> +        goto fail;
>>>> +    }
>>>> +
>>>> +    /* initialize the video decoder */
>>>> +    if (!(decoder = avcodec_find_decoder(input_ctx->streams[video_stream]->codecpar->codec_id))) {
>>>> +        fprintf(stderr, "Could not find input codec\n");
>>>> +        goto fail;
>>>> +    }
>>>> +
>>>> +    if (!(decoder_ctx = avcodec_alloc_context3(decoder)))
>>>> +        return AVERROR(ENOMEM);
>>>> +
>>>> +    if (avcodec_parameters_to_context(decoder_ctx,
>>>> +                                      input_ctx->streams[video_stream]->codecpar) < 0)
>>>> +        goto fail;
>>>> +
>>>> +    decoder_ctx->get_format  = get_vaapi_format;
>>>> +    av_opt_set_int(decoder_ctx, "refcounted_frames", 1, 0);
>>>> +
>>>> +    if (decoder_init_vaapi(decoder_ctx) < 0)
>>>> +        goto fail;
>>>> +
>>>> +    if ((ret = avcodec_open2(decoder_ctx, decoder, NULL)) < 0) {
>>>> +        fprintf(stderr, "Failed to open codec for stream #%u\n", i);
>>>> +        goto fail;
>>>> +    }
>>>> +
>>>> +    /* open the output file to dump YUV */
>>>> +    output_file = fopen(argv[2], "w+");
>>>> +
>>>> +    /* actual decoding and dump the YUV data */
>>>> +    while (av_read_frame(input_ctx, &packet) >= 0) {
>>>> +        if (video_stream == packet.stream_index)
>>>> +            ret = decode_write(decoder_ctx, packet, 0);
>>>> +        av_packet_unref(&packet);
>>>> +
>>>> +        if (ret < 0)
>>>> +            break;
>>>> +    }
>>>> +
>>>> +    /* flush the decoder */
>>>> +    flush(decoder_ctx);
>>>> +
>>>> +fail:
>>>> +    if (output_file)
>>>> +        fclose(output_file);
>>>> +    avcodec_free_context(&decoder_ctx);
>>>> +    avformat_close_input(&input_ctx);
>>>> +    av_buffer_unref(&hw_device_ctx);
>>>> +
>>>> +    return ret;
>>>> +}
>>>
>>> Generally looks OK. Can you replace all references to "vaapi" with "hw"
>>> or so? This code will work for other hwaccels too, and the only things
>>> that need to be switched are the AV_HWDEVICE_TYPE and the decoding
>>> AVPixelFormat. The pixel format still needs to be mapped manually, but
>>> we hope to add new API to make this also generic, so in the end you
>>> could choose the hw implementation by passing a name to it or fully
>>> automatically or so.
>>
>> Good suggestion, will re-work for the general HWAccel decode sample.
> 
> Agree.  Maybe pass the type name as a first argument to make the command line like "argv[0] <vaapi|dxva2|d3d11va> <input file> <output file>"?
> 
> (Those three should work at least.  Videotoolbox might work as well, VDPAU would need different output formats.  Probably don't want more complexity than that, though, because it wants to be a clear example.)
> 
> Rest looks good to me.
> 
> Thanks,
> 
> - Mark
> _______________________________________________
> ffmpeg-devel mailing list
> ffmpeg-devel@ffmpeg.org
> http://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>
diff mbox

Patch

diff --git a/doc/examples/vaapi_dec.c b/doc/examples/vaapi_dec.c
new file mode 100644
index 0000000000..01320a3b71
--- /dev/null
+++ b/doc/examples/vaapi_dec.c
@@ -0,0 +1,266 @@ 
+/*
+ * Video Acceleration API (video decoding) decode sample
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+/**
+ * @file
+ * Intel VAAPI-accelerated decoding example.
+ *
+ * @example vaapi_dec.c
+ * This example shows how to do VAAPI-accelerated decoding with output
+ * frames from the VAAPI video surfaces.
+ */
+
+#include <stdio.h>
+#include <libavcodec/avcodec.h>
+#include <libavcodec/vaapi.h>
+#include <libavformat/avformat.h>
+#include <libavutil/pixdesc.h>
+#include <libavutil/hwcontext.h>
+#include <libavutil/opt.h>
+#include <libavutil/hwcontext_vaapi.h>
+#include <libavutil/avassert.h>
+
+static AVBufferRef *hw_device_ctx = NULL;
+FILE *output_file = NULL;
+
+int decoder_init_vaapi(AVCodecContext *ctx)
+{
+    int err = 0;
+    const char *dev_name = "/dev/dri/renderD128";
+
+    if ((err = av_hwdevice_ctx_create(&hw_device_ctx, AV_HWDEVICE_TYPE_VAAPI,
+                                      dev_name, NULL, 0)) < 0) {
+        fprintf(stderr, "Failed to create a VAAPI device.\n");
+        return err;
+    }
+    ctx->hw_device_ctx = av_buffer_ref(hw_device_ctx);
+
+    return err;
+}
+
+static enum AVPixelFormat get_vaapi_format(AVCodecContext *ctx,
+                                           const enum AVPixelFormat *pix_fmts)
+{
+    const enum AVPixelFormat *p;
+
+    for (p = pix_fmts; *p != -1; p++) {
+        if (*p == AV_PIX_FMT_VAAPI)
+            return *p;
+    }
+
+    return AV_PIX_FMT_NONE;
+}
+
+int retrieve_data(AVFrame *input)
+{
+    AVFrame *output = NULL;
+    int err;
+    av_assert0(input->format == AV_PIX_FMT_VAAPI);
+
+    if (!(output = av_frame_alloc()))
+        return AVERROR(ENOMEM);
+    /* default output nv12 */
+    output->format = AV_PIX_FMT_NV12;
+    if ((err = av_hwframe_transfer_data(output, input, 0)) < 0) {
+        fprintf(stderr, "Failed to transfer data to output frame: %d.\n", err);
+        goto fail;
+    }
+
+    if ((err = av_frame_copy_props(output, input)) < 0) {
+        av_frame_unref(output);
+        goto fail;
+    }
+
+    av_frame_unref(input);
+    av_frame_move_ref(input, output);
+    av_frame_free(&output);
+    return 0;
+
+fail:
+    av_frame_free(&output);
+    return err;
+}
+
+int write_frame(AVFrame *frame)
+{
+    int idx, size;
+    int width = frame->width;
+    int height = frame->height;
+
+    av_assert0(frame && frame->data[0] && output_file);
+
+    for (idx = 0; idx < height; idx++) {
+        if ((size = fwrite(frame->data[0] + idx*frame->linesize[0],
+                           1, width, output_file)) < 0) {
+            fprintf(stderr, "Dump Y to file error.\n");
+            return -1;
+        }
+    }
+
+    height >>= 1;
+    for (idx = 0; idx < height; idx++) {
+        if ((size = fwrite(frame->data[1] + idx*frame->linesize[1],
+                           1, width, output_file)) < 0) {
+            fprintf(stderr, "Dump UV to file error.\n");
+            return -1;
+        }
+    }
+
+    return 0;
+}
+
+int decode_write(AVCodecContext *avctx, AVPacket packet, int flush)
+{
+    AVFrame *frame = NULL;
+    int ret = 0;
+
+    ret = avcodec_send_packet(avctx, &packet);
+    if (ret < 0 && ret != AVERROR_EOF)
+        return 0;
+
+    if (!(frame = av_frame_alloc()))
+        return AVERROR(ENOMEM);
+
+    ret = avcodec_receive_frame(avctx, frame);
+    if (ret >= 0) {
+        /* retrieve data from GPU to CPU */
+        if ((ret = retrieve_data(frame)) < 0)
+            goto fail;
+
+        if ((ret = write_frame(frame)) < 0)
+            goto fail;
+    } else if (flush == 0) {
+        ret = 0;
+    }
+
+fail:
+    av_frame_free(&frame);
+    return ret;
+}
+
+/* flush the decoder */
+int flush(AVCodecContext *avctx)
+{
+    AVPacket packet;
+    int ret = 0;
+
+    av_init_packet(&packet);
+    packet.data = NULL;
+    packet.size = 0;
+
+    while (1) {
+        if ((ret = decode_write(avctx, packet, 1)) < 0)
+            break;
+    }
+    av_packet_unref(&packet);
+
+    return 0;
+}
+
+int main(int argc, char *argv[])
+{
+    AVFormatContext *input_ctx = NULL;
+    int i, video_stream, ret;
+    AVCodecContext *decoder_ctx = NULL;
+    AVCodec *decoder = NULL;
+    AVPacket packet;
+
+    if (argc < 3) {
+        fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
+        return -1;
+    }
+
+    av_register_all();
+
+    /* open the input file */
+    if (avformat_open_input(&input_ctx, argv[1], NULL, NULL) != 0) {
+        fprintf(stderr, "Cannot open input file '%s'\n", argv[1]);
+        return -1;
+    }
+
+    if (avformat_find_stream_info(input_ctx, NULL) < 0) {
+        fprintf(stderr, "Couldn't find input stream information.\n");
+        goto fail;
+    }
+
+    /* find the video stream information */
+    video_stream = -1;
+    for (i = 0; i < input_ctx->nb_streams; i++) {
+        AVStream *st = input_ctx->streams[i];
+        if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
+            video_stream = i;        /* NOTE: just handle the first video stream */
+            break;
+        }
+    }
+
+    if (video_stream == -1) {
+        fprintf(stderr, "No find a video stream in the input file.\n");
+        goto fail;
+    }
+
+    /* initialize the video decoder */
+    if (!(decoder = avcodec_find_decoder(input_ctx->streams[video_stream]->codecpar->codec_id))) {
+        fprintf(stderr, "Could not find input codec\n");
+        goto fail;
+    }
+
+    if (!(decoder_ctx = avcodec_alloc_context3(decoder)))
+        return AVERROR(ENOMEM);
+
+    if (avcodec_parameters_to_context(decoder_ctx,
+                                      input_ctx->streams[video_stream]->codecpar) < 0)
+        goto fail;
+
+    decoder_ctx->get_format  = get_vaapi_format;
+    av_opt_set_int(decoder_ctx, "refcounted_frames", 1, 0);
+
+    if (decoder_init_vaapi(decoder_ctx) < 0)
+        goto fail;
+
+    if ((ret = avcodec_open2(decoder_ctx, decoder, NULL)) < 0) {
+        fprintf(stderr, "Failed to open codec for stream #%u\n", i);
+        goto fail;
+    }
+
+    /* open the output file to dump YUV */
+    output_file = fopen(argv[2], "w+");
+
+    /* actual decoding and dump the YUV data */
+    while (av_read_frame(input_ctx, &packet) >= 0) {
+        if (video_stream == packet.stream_index)
+            ret = decode_write(decoder_ctx, packet, 0);
+        av_packet_unref(&packet);
+
+        if (ret < 0)
+            break;
+    }
+
+    /* flush the decoder */
+    flush(decoder_ctx);
+
+fail:
+    if (output_file)
+        fclose(output_file);
+    avcodec_free_context(&decoder_ctx);
+    avformat_close_input(&input_ctx);
+    av_buffer_unref(&hw_device_ctx);
+
+    return ret;
+}