@@ -391,7 +391,6 @@ static int aic_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
uint32_t off;
int x, y, ret;
int slice_size;
- ThreadFrame frame = { .f = data };
ctx->frame = data;
ctx->frame->pict_type = AV_PICTURE_TYPE_I;
@@ -410,7 +409,7 @@ static int aic_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
return ret;
}
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, ctx->frame, 0)) < 0)
return ret;
bytestream2_init(&gb, buf + AIC_HDR_SIZE,
@@ -270,10 +270,9 @@ static int decode_element(AVCodecContext *avctx, AVFrame *frame, int ch_index,
return AVERROR_INVALIDDATA;
}
if (!alac->nb_samples) {
- ThreadFrame tframe = { .f = frame };
/* get output buffer */
frame->nb_samples = output_samples;
- if ((ret = ff_thread_get_buffer(avctx, &tframe, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, frame, 0)) < 0)
return ret;
} else if (output_samples != alac->nb_samples) {
av_log(avctx, AV_LOG_ERROR, "sample count mismatch: %"PRIu32" != %d\n",
@@ -27,6 +27,7 @@
#include "hwconfig.h"
#include "internal.h"
#include "profiles.h"
+#include "thread.h"
/**< same with Div_Lut defined in spec 7.11.3.7 */
static const uint16_t div_lut[AV1_DIV_LUT_NUM] = {
@@ -569,7 +570,7 @@ static int get_pixel_format(AVCodecContext *avctx)
static void av1_frame_unref(AVCodecContext *avctx, AV1Frame *f)
{
- ff_thread_release_buffer(avctx, &f->tf);
+ ff_thread_release_buffer(avctx, f->f);
av_buffer_unref(&f->hwaccel_priv_buf);
f->hwaccel_picture_private = NULL;
av_buffer_unref(&f->header_ref);
@@ -591,10 +592,10 @@ static int av1_frame_ref(AVCodecContext *avctx, AV1Frame *dst, const AV1Frame *s
dst->raw_frame_header = src->raw_frame_header;
- if (!src->tf.f->buf[0])
+ if (!src->f->buf[0])
return 0;
- ret = ff_thread_ref_frame(&dst->tf, &src->tf);
+ ret = av_frame_ref(dst->f, src->f);
if (ret < 0)
goto fail;
@@ -637,10 +638,10 @@ static av_cold int av1_decode_free(AVCodecContext *avctx)
for (int i = 0; i < FF_ARRAY_ELEMS(s->ref); i++) {
av1_frame_unref(avctx, &s->ref[i]);
- av_frame_free(&s->ref[i].tf.f);
+ av_frame_free(&s->ref[i].f);
}
av1_frame_unref(avctx, &s->cur_frame);
- av_frame_free(&s->cur_frame.tf.f);
+ av_frame_free(&s->cur_frame.f);
av_buffer_unref(&s->seq_ref);
av_buffer_unref(&s->header_ref);
@@ -741,16 +742,16 @@ static av_cold int av1_decode_init(AVCodecContext *avctx)
s->pix_fmt = AV_PIX_FMT_NONE;
for (int i = 0; i < FF_ARRAY_ELEMS(s->ref); i++) {
- s->ref[i].tf.f = av_frame_alloc();
- if (!s->ref[i].tf.f) {
+ s->ref[i].f = av_frame_alloc();
+ if (!s->ref[i].f) {
av_log(avctx, AV_LOG_ERROR,
"Failed to allocate reference frame buffer %d.\n", i);
return AVERROR(ENOMEM);
}
}
- s->cur_frame.tf.f = av_frame_alloc();
- if (!s->cur_frame.tf.f) {
+ s->cur_frame.f = av_frame_alloc();
+ if (!s->cur_frame.f) {
av_log(avctx, AV_LOG_ERROR,
"Failed to allocate current frame buffer.\n");
return AVERROR(ENOMEM);
@@ -803,10 +804,10 @@ static int av1_frame_alloc(AVCodecContext *avctx, AV1Frame *f)
return ret;
}
- if ((ret = ff_thread_get_buffer(avctx, &f->tf, AV_GET_BUFFER_FLAG_REF)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, f->f, AV_GET_BUFFER_FLAG_REF)) < 0)
goto fail;
- frame = f->tf.f;
+ frame = f->f;
frame->key_frame = header->frame_type == AV1_FRAME_KEY;
switch (header->frame_type) {
@@ -905,7 +906,7 @@ static int set_output_frame(AVCodecContext *avctx, AVFrame *frame,
const AVPacket *pkt, int *got_frame)
{
AV1DecContext *s = avctx->priv_data;
- const AVFrame *srcframe = s->cur_frame.tf.f;
+ const AVFrame *srcframe = s->cur_frame.f;
int ret;
// TODO: all layers
@@ -1101,7 +1102,7 @@ static int av1_decode_frame(AVCodecContext *avctx, void *frame,
goto end;
}
- if (s->cur_frame.tf.f->buf[0]) {
+ if (s->cur_frame.f->buf[0]) {
ret = set_output_frame(avctx, frame, pkt, got_frame);
if (ret < 0)
av_log(avctx, AV_LOG_ERROR, "Set output frame error.\n");
@@ -1121,7 +1122,7 @@ static int av1_decode_frame(AVCodecContext *avctx, void *frame,
s->cur_frame.spatial_id = header->spatial_id;
s->cur_frame.temporal_id = header->temporal_id;
- if (avctx->hwaccel && s->cur_frame.tf.f->buf[0]) {
+ if (avctx->hwaccel && s->cur_frame.f->buf[0]) {
ret = avctx->hwaccel->start_frame(avctx, unit->data,
unit->data_size);
if (ret < 0) {
@@ -1148,7 +1149,7 @@ static int av1_decode_frame(AVCodecContext *avctx, void *frame,
if (ret < 0)
goto end;
- if (avctx->hwaccel && s->cur_frame.tf.f->buf[0]) {
+ if (avctx->hwaccel && s->cur_frame.f->buf[0]) {
ret = avctx->hwaccel->decode_slice(avctx,
raw_tile_group->tile_data.data,
raw_tile_group->tile_data.data_size);
@@ -1171,7 +1172,7 @@ static int av1_decode_frame(AVCodecContext *avctx, void *frame,
}
if (raw_tile_group && (s->tile_num == raw_tile_group->tg_end + 1)) {
- if (avctx->hwaccel && s->cur_frame.tf.f->buf[0]) {
+ if (avctx->hwaccel && s->cur_frame.f->buf[0]) {
ret = avctx->hwaccel->end_frame(avctx);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "HW accel end frame fail.\n");
@@ -1185,7 +1186,7 @@ static int av1_decode_frame(AVCodecContext *avctx, void *frame,
goto end;
}
- if (s->raw_frame_header->show_frame && s->cur_frame.tf.f->buf[0]) {
+ if (s->raw_frame_header->show_frame && s->cur_frame.f->buf[0]) {
ret = set_output_frame(avctx, frame, pkt, got_frame);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "Set output frame error\n");
@@ -24,14 +24,14 @@
#include <stdint.h>
#include "libavutil/buffer.h"
+#include "libavutil/frame.h"
#include "libavutil/pixfmt.h"
#include "avcodec.h"
#include "cbs.h"
#include "cbs_av1.h"
-#include "thread.h"
typedef struct AV1Frame {
- ThreadFrame tf;
+ AVFrame *f;
AVBufferRef *hwaccel_priv_buf;
void *hwaccel_picture_private;
@@ -65,12 +65,11 @@ static int bitpacked_decode_yuv422p10(AVCodecContext *avctx, AVFrame *frame,
{
uint64_t frame_size = (uint64_t)avctx->width * (uint64_t)avctx->height * 20;
uint64_t packet_size = (uint64_t)avpkt->size * 8;
- ThreadFrame tframe = { .f = frame };
GetBitContext bc;
uint16_t *y, *u, *v;
int ret, i, j;
- ret = ff_thread_get_buffer(avctx, &tframe, 0);
+ ret = ff_thread_get_buffer(avctx, frame, 0);
if (ret < 0)
return ret;
@@ -378,8 +378,7 @@ static int cfhd_decode(AVCodecContext *avctx, void *data, int *got_frame,
CFHDContext *s = avctx->priv_data;
CFHDDSPContext *dsp = &s->dsp;
GetByteContext gb;
- ThreadFrame frame = { .f = data };
- AVFrame *pic = data;
+ AVFrame *const pic = data;
int ret = 0, i, j, plane, got_buffer = 0;
int16_t *coeff_data;
@@ -681,10 +680,9 @@ static int cfhd_decode(AVCodecContext *avctx, void *data, int *got_frame,
return AVERROR_INVALIDDATA;
avctx->height = height;
}
- frame.f->width =
- frame.f->height = 0;
+ pic->width = pic->height = 0;
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
return ret;
s->coded_width = 0;
@@ -692,10 +690,9 @@ static int cfhd_decode(AVCodecContext *avctx, void *data, int *got_frame,
s->coded_format = AV_PIX_FMT_NONE;
got_buffer = 1;
} else if (tag == FrameIndex && data == 1 && s->sample_type == 1 && s->frame_type == 2) {
- frame.f->width =
- frame.f->height = 0;
+ pic->width = pic->height = 0;
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
return ret;
s->coded_width = 0;
s->coded_height = 0;
@@ -360,7 +360,6 @@ static int cllc_decode_frame(AVCodecContext *avctx, void *data,
{
CLLCContext *ctx = avctx->priv_data;
AVFrame *pic = data;
- ThreadFrame frame = { .f = data };
uint8_t *src = avpkt->data;
uint32_t info_tag, info_offset;
int data_size;
@@ -424,7 +423,7 @@ static int cllc_decode_frame(AVCodecContext *avctx, void *data,
avctx->pix_fmt = AV_PIX_FMT_YUV422P;
avctx->bits_per_raw_sample = 8;
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
return ret;
ret = decode_yuv_frame(ctx, &gb, pic);
@@ -437,7 +436,7 @@ static int cllc_decode_frame(AVCodecContext *avctx, void *data,
avctx->pix_fmt = AV_PIX_FMT_RGB24;
avctx->bits_per_raw_sample = 8;
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
return ret;
ret = decode_rgb24_frame(ctx, &gb, pic);
@@ -449,7 +448,7 @@ static int cllc_decode_frame(AVCodecContext *avctx, void *data,
avctx->pix_fmt = AV_PIX_FMT_ARGB;
avctx->bits_per_raw_sample = 8;
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
return ret;
ret = decode_argb_frame(ctx, &gb, pic);
@@ -174,7 +174,6 @@ static int cri_decode_frame(AVCodecContext *avctx, void *data,
{
CRIContext *s = avctx->priv_data;
GetByteContext *gb = &s->gb;
- ThreadFrame frame = { .f = data };
int ret, bps, hflip = 0, vflip = 0;
AVFrameSideData *rotation;
int compressed = 0;
@@ -318,7 +317,7 @@ skip:
if (!s->data || !s->data_size)
return AVERROR_INVALIDDATA;
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, p, 0)) < 0)
return ret;
avctx->bits_per_raw_sample = bps;
@@ -618,7 +618,6 @@ static int dnxhd_decode_frame(AVCodecContext *avctx, void *data,
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
DNXHDContext *ctx = avctx->priv_data;
- ThreadFrame frame = { .f = data };
AVFrame *picture = data;
int first_field = 1;
int ret, i;
@@ -650,7 +649,7 @@ decode_coding_unit:
return ret;
if (first_field) {
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, picture, 0)) < 0)
return ret;
picture->pict_type = AV_PICTURE_TYPE_I;
picture->key_frame = 1;
@@ -612,7 +612,7 @@ static int dvvideo_decode_frame(AVCodecContext *avctx, void *data,
uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
DVVideoContext *s = avctx->priv_data;
- ThreadFrame frame = { .f = data };
+ AVFrame *const frame = data;
const uint8_t *vsc_pack;
int apt, is16_9, ret;
const AVDVProfile *sys;
@@ -633,9 +633,9 @@ static int dvvideo_decode_frame(AVCodecContext *avctx, void *data,
s->sys = sys;
}
- s->frame = frame.f;
- frame.f->key_frame = 1;
- frame.f->pict_type = AV_PICTURE_TYPE_I;
+ s->frame = frame;
+ frame->key_frame = 1;
+ frame->pict_type = AV_PICTURE_TYPE_I;
avctx->pix_fmt = s->sys->pix_fmt;
avctx->framerate = av_inv_q(s->sys->time_base);
@@ -652,20 +652,20 @@ static int dvvideo_decode_frame(AVCodecContext *avctx, void *data,
ff_set_sar(avctx, s->sys->sar[is16_9]);
}
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, frame, 0)) < 0)
return ret;
/* Determine the codec's field order from the packet */
if ( *vsc_pack == dv_video_control ) {
if (avctx->height == 720) {
- frame.f->interlaced_frame = 0;
- frame.f->top_field_first = 0;
+ frame->interlaced_frame = 0;
+ frame->top_field_first = 0;
} else if (avctx->height == 1080) {
- frame.f->interlaced_frame = 1;
- frame.f->top_field_first = (vsc_pack[3] & 0x40) == 0x40;
+ frame->interlaced_frame = 1;
+ frame->top_field_first = (vsc_pack[3] & 0x40) == 0x40;
} else {
- frame.f->interlaced_frame = (vsc_pack[3] & 0x10) == 0x10;
- frame.f->top_field_first = !(vsc_pack[3] & 0x40);
+ frame->interlaced_frame = (vsc_pack[3] & 0x10) == 0x10;
+ frame->top_field_first = !(vsc_pack[3] & 0x40);
}
}
@@ -93,7 +93,6 @@ static int dxtory_decode_v1_rgb(AVCodecContext *avctx, AVFrame *pic,
const uint8_t *src, int src_size,
int id, int bpp, uint32_t vflipped)
{
- ThreadFrame frame = { .f = pic };
int h;
uint8_t *dst;
int ret;
@@ -104,7 +103,7 @@ static int dxtory_decode_v1_rgb(AVCodecContext *avctx, AVFrame *pic,
}
avctx->pix_fmt = id;
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
return ret;
do_vflip(avctx, pic, vflipped);
@@ -125,7 +124,6 @@ static int dxtory_decode_v1_410(AVCodecContext *avctx, AVFrame *pic,
const uint8_t *src, int src_size,
uint32_t vflipped)
{
- ThreadFrame frame = { .f = pic };
int h, w;
uint8_t *Y1, *Y2, *Y3, *Y4, *U, *V;
int height, width, hmargin, vmargin;
@@ -138,7 +136,7 @@ static int dxtory_decode_v1_410(AVCodecContext *avctx, AVFrame *pic,
}
avctx->pix_fmt = AV_PIX_FMT_YUV410P;
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
return ret;
do_vflip(avctx, pic, vflipped);
@@ -221,7 +219,6 @@ static int dxtory_decode_v1_420(AVCodecContext *avctx, AVFrame *pic,
const uint8_t *src, int src_size,
uint32_t vflipped)
{
- ThreadFrame frame = { .f = pic };
int h, w;
uint8_t *Y1, *Y2, *U, *V;
int height, width, hmargin, vmargin;
@@ -234,7 +231,7 @@ static int dxtory_decode_v1_420(AVCodecContext *avctx, AVFrame *pic,
}
avctx->pix_fmt = AV_PIX_FMT_YUV420P;
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
return ret;
do_vflip(avctx, pic, vflipped);
@@ -294,7 +291,6 @@ static int dxtory_decode_v1_444(AVCodecContext *avctx, AVFrame *pic,
const uint8_t *src, int src_size,
uint32_t vflipped)
{
- ThreadFrame frame = { .f = pic };
int h, w;
uint8_t *Y, *U, *V;
int ret;
@@ -305,7 +301,7 @@ static int dxtory_decode_v1_444(AVCodecContext *avctx, AVFrame *pic,
}
avctx->pix_fmt = AV_PIX_FMT_YUV444P;
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
return ret;
do_vflip(avctx, pic, vflipped);
@@ -430,7 +426,6 @@ static int dxtory_decode_v2(AVCodecContext *avctx, AVFrame *pic,
enum AVPixelFormat fmt,
uint32_t vflipped)
{
- ThreadFrame frame = { .f = pic };
GetByteContext gb, gb_check;
GetBitContext gb2;
int nslices, slice, line = 0;
@@ -457,7 +452,7 @@ static int dxtory_decode_v2(AVCodecContext *avctx, AVFrame *pic,
return AVERROR_INVALIDDATA;
avctx->pix_fmt = fmt;
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
return ret;
do_vflip(avctx, pic, vflipped);
@@ -1042,7 +1042,7 @@ static int dxv_decode(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
{
DXVContext *ctx = avctx->priv_data;
- ThreadFrame tframe;
+ AVFrame *const frame = data;
GetByteContext *gbc = &ctx->gbc;
int (*decompress_tex)(AVCodecContext *avctx);
const char *msgcomp, *msgtext;
@@ -1211,18 +1211,17 @@ static int dxv_decode(AVCodecContext *avctx, void *data,
return AVERROR_INVALIDDATA;
}
- tframe.f = data;
- ret = ff_thread_get_buffer(avctx, &tframe, 0);
+ ret = ff_thread_get_buffer(avctx, frame, 0);
if (ret < 0)
return ret;
/* Now decompress the texture with the standard functions. */
avctx->execute2(avctx, decompress_texture_thread,
- tframe.f, NULL, ctx->slice_count);
+ frame, NULL, ctx->slice_count);
/* Frame is ready to be output. */
- tframe.f->pict_type = AV_PICTURE_TYPE_I;
- tframe.f->key_frame = 1;
+ frame->pict_type = AV_PICTURE_TYPE_I;
+ frame->key_frame = 1;
*got_frame = 1;
return avpkt->size;
@@ -72,7 +72,7 @@ static int fill_picture_parameters(const AVCodecContext *avctx, AVDXVAContext *c
pp->max_width = seq->max_frame_width_minus_1 + 1;
pp->max_height = seq->max_frame_height_minus_1 + 1;
- pp->CurrPicTextureIndex = ff_dxva2_get_surface_index(avctx, ctx, h->cur_frame.tf.f);
+ pp->CurrPicTextureIndex = ff_dxva2_get_surface_index(avctx, ctx, h->cur_frame.f);
pp->superres_denom = frame_header->use_superres ? frame_header->coded_denom + AV1_SUPERRES_DENOM_MIN : AV1_SUPERRES_NUM;
pp->bitdepth = get_bit_depth_from_seq(seq);
pp->seq_profile = seq->seq_profile;
@@ -132,7 +132,7 @@ static int fill_picture_parameters(const AVCodecContext *avctx, AVDXVAContext *c
memset(pp->RefFrameMapTextureIndex, 0xFF, sizeof(pp->RefFrameMapTextureIndex));
for (i = 0; i < AV1_REFS_PER_FRAME; i++) {
int8_t ref_idx = frame_header->ref_frame_idx[i];
- AVFrame *ref_frame = h->ref[ref_idx].tf.f;
+ AVFrame *ref_frame = h->ref[ref_idx].f;
pp->frame_refs[i].width = ref_frame->width;
pp->frame_refs[i].height = ref_frame->height;
@@ -146,7 +146,7 @@ static int fill_picture_parameters(const AVCodecContext *avctx, AVDXVAContext *c
}
}
for (i = 0; i < AV1_NUM_REF_FRAMES; i++) {
- AVFrame *ref_frame = h->ref[i].tf.f;
+ AVFrame *ref_frame = h->ref[i].f;
if (ref_frame->buf[0])
pp->RefFrameMapTextureIndex[i] = ff_dxva2_get_surface_index(avctx, ctx, ref_frame);
}
@@ -436,7 +436,7 @@ static int dxva2_av1_end_frame(AVCodecContext *avctx)
if (ctx_pic->bitstream_size <= 0)
return -1;
- ret = ff_dxva2_common_end_frame(avctx, h->cur_frame.tf.f,
+ ret = ff_dxva2_common_end_frame(avctx, h->cur_frame.f,
&ctx_pic->pp, sizeof(ctx_pic->pp),
NULL, 0,
commit_bitstream_and_slice_buffer);
@@ -24,7 +24,7 @@
#include "avcodec.h"
#include "me_cmp.h"
-#include "thread.h"
+#include "threadframe.h"
///< current MB is the first after a resync marker
#define VP_START 1
@@ -2027,7 +2027,6 @@ static int decode_frame(AVCodecContext *avctx, void *data,
{
EXRContext *s = avctx->priv_data;
GetByteContext *gb = &s->gb;
- ThreadFrame frame = { .f = data };
AVFrame *picture = data;
uint8_t *ptr;
@@ -2149,7 +2148,7 @@ static int decode_frame(AVCodecContext *avctx, void *data,
s->scan_lines_per_block;
}
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, picture, 0)) < 0)
return ret;
if (bytestream2_get_bytes_left(gb)/8 < nb_blocks)
@@ -38,7 +38,7 @@
#include "mathops.h"
#include "put_bits.h"
#include "rangecoder.h"
-#include "thread.h"
+#include "threadframe.h"
#ifdef __INTEL_COMPILER
#undef av_flatten
@@ -37,6 +37,7 @@
#include "golomb.h"
#include "mathops.h"
#include "ffv1.h"
+#include "thread.h"
#include "threadframe.h"
static inline av_flatten int get_symbol_inline(RangeCoder *c, uint8_t *state,
@@ -559,7 +559,6 @@ static int flac_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame_ptr, AVPacket *avpkt)
{
AVFrame *frame = data;
- ThreadFrame tframe = { .f = data };
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
FLACContext *s = avctx->priv_data;
@@ -618,7 +617,7 @@ static int flac_decode_frame(AVCodecContext *avctx, void *data,
/* get output buffer */
frame->nb_samples = s->blocksize;
- if ((ret = ff_thread_get_buffer(avctx, &tframe, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, frame, 0)) < 0)
return ret;
s->dsp.decorrelate[s->ch_mode](frame->data, s->decoded,
@@ -140,7 +140,6 @@ static int decode_frame(AVCodecContext *avctx,
FrapsContext * const s = avctx->priv_data;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
- ThreadFrame frame = { .f = data };
AVFrame * const f = data;
uint32_t header;
unsigned int version,header_size;
@@ -227,7 +226,7 @@ static int decode_frame(AVCodecContext *avctx,
: AVCOL_RANGE_JPEG;
avctx->colorspace = version & 1 ? AVCOL_SPC_UNSPECIFIED : AVCOL_SPC_BT709;
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, f, 0)) < 0)
return ret;
switch (version) {
@@ -30,18 +30,19 @@
#include "avcodec.h"
#include "h264dec.h"
#include "mpegutils.h"
+#include "thread.h"
#include "threadframe.h"
void ff_h264_unref_picture(H264Context *h, H264Picture *pic)
{
- int off = offsetof(H264Picture, tf_grain) + sizeof(pic->tf_grain);
+ int off = offsetof(H264Picture, f_grain) + sizeof(pic->f_grain);
int i;
if (!pic->f || !pic->f->buf[0])
return;
ff_thread_release_ext_buffer(h->avctx, &pic->tf);
- ff_thread_release_buffer(h->avctx, &pic->tf_grain);
+ ff_thread_release_buffer(h->avctx, pic->f_grain);
av_buffer_unref(&pic->hwaccel_priv_buf);
av_buffer_unref(&pic->qscale_table_buf);
@@ -102,9 +103,7 @@ int ff_h264_ref_picture(H264Context *h, H264Picture *dst, H264Picture *src)
goto fail;
if (src->needs_fg) {
- av_assert0(src->tf_grain.f == src->f_grain);
- dst->tf_grain.f = dst->f_grain;
- ret = ff_thread_ref_frame(&dst->tf_grain, &src->tf_grain);
+ ret = av_frame_ref(dst->f_grain, src->f_grain);
if (ret < 0)
goto fail;
}
@@ -161,10 +160,8 @@ int ff_h264_replace_picture(H264Context *h, H264Picture *dst, const H264Picture
goto fail;
if (src->needs_fg) {
- av_assert0(src->tf_grain.f == src->f_grain);
- dst->tf_grain.f = dst->f_grain;
- ff_thread_release_buffer(h->avctx, &dst->tf_grain);
- ret = ff_thread_ref_frame(&dst->tf_grain, &src->tf_grain);
+ ff_thread_release_buffer(h->avctx, dst->f_grain);
+ ret = av_frame_ref(dst->f_grain, src->f_grain);
if (ret < 0)
goto fail;
}
@@ -45,6 +45,7 @@
#include "mathops.h"
#include "mpegutils.h"
#include "rectangle.h"
+#include "thread.h"
#include "threadframe.h"
static const uint8_t field_scan[16+1] = {
@@ -197,11 +198,10 @@ static int alloc_picture(H264Context *h, H264Picture *pic)
goto fail;
if (pic->needs_fg) {
- pic->tf_grain.f = pic->f_grain;
pic->f_grain->format = pic->f->format;
pic->f_grain->width = pic->f->width;
pic->f_grain->height = pic->f->height;
- ret = ff_thread_get_buffer(h->avctx, &pic->tf_grain, 0);
+ ret = ff_thread_get_buffer(h->avctx, pic->f_grain, 0);
if (ret < 0)
goto fail;
}
@@ -46,6 +46,7 @@
#include "mpegutils.h"
#include "profiles.h"
#include "rectangle.h"
+#include "thread.h"
#include "threadframe.h"
const uint16_t ff_h264_mb_sizes[4] = { 256, 384, 512, 768 };
@@ -109,7 +109,6 @@ typedef struct H264Picture {
ThreadFrame tf;
AVFrame *f_grain;
- ThreadFrame tf_grain;
AVBufferRef *qscale_table_buf;
int8_t *qscale_table;
@@ -305,7 +305,7 @@ static int hap_decode(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
{
HapContext *ctx = avctx->priv_data;
- ThreadFrame tframe;
+ AVFrame *const frame = data;
int ret, i, t;
int section_size;
enum HapSectionType section_type;
@@ -330,8 +330,7 @@ static int hap_decode(AVCodecContext *avctx, void *data,
}
/* Get the output frame ready to receive data */
- tframe.f = data;
- ret = ff_thread_get_buffer(avctx, &tframe, 0);
+ ret = ff_thread_get_buffer(avctx, frame, 0);
if (ret < 0)
return ret;
@@ -383,16 +382,15 @@ static int hap_decode(AVCodecContext *avctx, void *data,
/* Use the decompress function on the texture, one block per thread */
if (t == 0){
- avctx->execute2(avctx, decompress_texture_thread, tframe.f, NULL, ctx->slice_count);
+ avctx->execute2(avctx, decompress_texture_thread, frame, NULL, ctx->slice_count);
} else{
- tframe.f = data;
- avctx->execute2(avctx, decompress_texture2_thread, tframe.f, NULL, ctx->slice_count);
+ avctx->execute2(avctx, decompress_texture2_thread, frame, NULL, ctx->slice_count);
}
}
/* Frame is ready to be output */
- tframe.f->pict_type = AV_PICTURE_TYPE_I;
- tframe.f->key_frame = 1;
+ frame->pict_type = AV_PICTURE_TYPE_I;
+ frame->key_frame = 1;
*got_frame = 1;
return avpkt->size;
@@ -39,7 +39,7 @@ void ff_hevc_unref_frame(HEVCContext *s, HEVCFrame *frame, int flags)
frame->flags &= ~flags;
if (!frame->flags) {
ff_thread_release_ext_buffer(s->avctx, &frame->tf);
- ff_thread_release_buffer(s->avctx, &frame->tf_grain);
+ ff_thread_release_buffer(s->avctx, frame->frame_grain);
frame->needs_fg = 0;
av_buffer_unref(&frame->tab_mvf_buf);
@@ -45,6 +45,7 @@
#include "hevcdec.h"
#include "hwconfig.h"
#include "profiles.h"
+#include "thread.h"
#include "threadframe.h"
const uint8_t ff_hevc_pel_weight[65] = { [2] = 0, [4] = 1, [6] = 2, [8] = 3, [12] = 4, [16] = 5, [24] = 6, [32] = 7, [48] = 8, [64] = 9 };
@@ -3026,7 +3027,7 @@ static int hevc_frame_start(HEVCContext *s)
s->ref->frame_grain->format = s->ref->frame->format;
s->ref->frame_grain->width = s->ref->frame->width;
s->ref->frame_grain->height = s->ref->frame->height;
- if ((ret = ff_thread_get_buffer(s->avctx, &s->ref->tf_grain, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(s->avctx, s->ref->frame_grain, 0)) < 0)
goto fail;
}
@@ -3533,7 +3534,7 @@ static int hevc_ref_frame(HEVCContext *s, HEVCFrame *dst, HEVCFrame *src)
return ret;
if (src->needs_fg) {
- ret = ff_thread_ref_frame(&dst->tf_grain, &src->tf_grain);
+ ret = av_frame_ref(dst->frame_grain, src->frame_grain);
if (ret < 0)
return ret;
dst->needs_fg = 1;
@@ -3652,7 +3653,6 @@ static av_cold int hevc_init_context(AVCodecContext *avctx)
s->DPB[i].frame_grain = av_frame_alloc();
if (!s->DPB[i].frame_grain)
goto fail;
- s->DPB[i].tf_grain.f = s->DPB[i].frame_grain;
}
s->max_ra = INT_MAX;
@@ -42,7 +42,7 @@
#include "hevcdsp.h"
#include "h274.h"
#include "internal.h"
-#include "thread.h"
+#include "threadframe.h"
#include "videodsp.h"
#define SHIFT_CTB_WPP 2
@@ -396,7 +396,6 @@ typedef struct HEVCFrame {
AVFrame *frame;
AVFrame *frame_grain;
ThreadFrame tf;
- ThreadFrame tf_grain;
int needs_fg; /* 1 if grain needs to be applied by the decoder */
MvField *tab_mvf;
RefPicList *refPicList;
@@ -404,7 +404,7 @@ static int hqx_decode_frame(AVCodecContext *avctx, void *data,
int *got_picture_ptr, AVPacket *avpkt)
{
HQXContext *ctx = avctx->priv_data;
- ThreadFrame frame = { .f = data };
+ AVFrame *const frame = data;
uint8_t *src = avpkt->data;
uint32_t info_tag;
int data_start;
@@ -499,7 +499,7 @@ static int hqx_decode_frame(AVCodecContext *avctx, void *data,
return AVERROR_INVALIDDATA;
}
- ret = ff_thread_get_buffer(avctx, &frame, 0);
+ ret = ff_thread_get_buffer(avctx, frame, 0);
if (ret < 0)
return ret;
@@ -1185,7 +1185,6 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
HYuvContext *s = avctx->priv_data;
const int width = s->width;
const int height = s->height;
- ThreadFrame frame = { .f = data };
AVFrame *const p = data;
int slice, table_size = 0, ret, nb_slices;
unsigned slices_info_offset;
@@ -1203,7 +1202,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
s->bdsp.bswap_buf((uint32_t *) s->bitstream_buffer,
(const uint32_t *) buf, buf_size / 4);
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, p, 0)) < 0)
return ret;
if (s->context) {
@@ -2476,7 +2476,6 @@ static int jpeg2000_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
{
Jpeg2000DecoderContext *s = avctx->priv_data;
- ThreadFrame frame = { .f = data };
AVFrame *picture = data;
int ret;
@@ -2517,7 +2516,7 @@ static int jpeg2000_decode_frame(AVCodecContext *avctx, void *data,
goto end;
/* get picture buffer */
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, picture, 0)) < 0)
goto end;
picture->pict_type = AV_PICTURE_TYPE_I;
picture->key_frame = 1;
@@ -540,7 +540,6 @@ static int lag_decode_frame(AVCodecContext *avctx,
const uint8_t *buf = avpkt->data;
unsigned int buf_size = avpkt->size;
LagarithContext *l = avctx->priv_data;
- ThreadFrame frame = { .f = data };
AVFrame *const p = data;
uint8_t frametype;
uint32_t offset_gu = 0, offset_bv = 0, offset_ry = 9;
@@ -569,7 +568,7 @@ static int lag_decode_frame(AVCodecContext *avctx,
planes = 4;
}
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, p, 0)) < 0)
return ret;
if (frametype == FRAME_SOLID_RGBA) {
@@ -593,7 +592,7 @@ static int lag_decode_frame(AVCodecContext *avctx,
avctx->pix_fmt = AV_PIX_FMT_GBRAP;
}
- if ((ret = ff_thread_get_buffer(avctx, &frame,0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, p,0)) < 0)
return ret;
for (i = 0; i < avctx->height; i++) {
@@ -614,7 +613,7 @@ static int lag_decode_frame(AVCodecContext *avctx,
if (frametype == FRAME_ARITH_RGB24 || frametype == FRAME_U_RGB24)
avctx->pix_fmt = AV_PIX_FMT_GBRP;
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, p, 0)) < 0)
return ret;
offs[0] = offset_bv;
@@ -650,7 +649,7 @@ static int lag_decode_frame(AVCodecContext *avctx,
case FRAME_ARITH_YUY2:
avctx->pix_fmt = AV_PIX_FMT_YUV422P;
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, p, 0)) < 0)
return ret;
if (offset_ry >= buf_size ||
@@ -678,7 +677,7 @@ static int lag_decode_frame(AVCodecContext *avctx,
case FRAME_ARITH_YV12:
avctx->pix_fmt = AV_PIX_FMT_YUV420P;
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, p, 0)) < 0)
return ret;
if (offset_ry >= buf_size ||
@@ -158,7 +158,6 @@ static int zlib_decomp(AVCodecContext *avctx, const uint8_t *src, int src_len, i
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPacket *avpkt)
{
AVFrame *frame = data;
- ThreadFrame tframe = { .f = data };
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
LclDecContext * const c = avctx->priv_data;
@@ -175,7 +174,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame, AVPac
unsigned int len = buf_size;
int linesize, offset;
- if ((ret = ff_thread_get_buffer(avctx, &tframe, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, frame, 0)) < 0)
return ret;
outptr = frame->data[0]; // Output image pointer
@@ -324,7 +324,6 @@ static int libopenjpeg_decode_frame(AVCodecContext *avctx,
uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
LibOpenJPEGContext *ctx = avctx->priv_data;
- ThreadFrame frame = { .f = data };
AVFrame *picture = data;
const AVPixFmtDescriptor *desc;
int width, height, ret;
@@ -417,7 +416,7 @@ static int libopenjpeg_decode_frame(AVCodecContext *avctx,
if (image->comps[i].prec > avctx->bits_per_raw_sample)
avctx->bits_per_raw_sample = image->comps[i].prec;
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, picture, 0)) < 0)
goto done;
ret = !opj_decode(dec, stream, image);
@@ -431,7 +431,6 @@ static int magy_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
{
MagicYUVContext *s = avctx->priv_data;
- ThreadFrame frame = { .f = data };
AVFrame *p = data;
GetByteContext gb;
uint32_t first_offset, offset, next_offset, header_size, slice_width;
@@ -641,7 +640,7 @@ static int magy_decode_frame(AVCodecContext *avctx, void *data,
p->pict_type = AV_PICTURE_TYPE_I;
p->key_frame = 1;
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, p, 0)) < 0)
return ret;
s->buf = avpkt->data;
@@ -42,7 +42,6 @@ typedef struct MDECContext {
BlockDSPContext bdsp;
BswapDSPContext bbdsp;
IDCTDSPContext idsp;
- ThreadFrame frame;
GetBitContext gb;
ScanTable scantable;
int version;
@@ -174,13 +173,13 @@ static int decode_frame(AVCodecContext *avctx,
MDECContext * const a = avctx->priv_data;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
- ThreadFrame frame = { .f = data };
+ AVFrame *const frame = data;
int ret;
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, frame, 0)) < 0)
return ret;
- frame.f->pict_type = AV_PICTURE_TYPE_I;
- frame.f->key_frame = 1;
+ frame->pict_type = AV_PICTURE_TYPE_I;
+ frame->key_frame = 1;
av_fast_padded_malloc(&a->bitstream_buffer, &a->bitstream_buffer_size, buf_size);
if (!a->bitstream_buffer)
@@ -202,7 +201,7 @@ static int decode_frame(AVCodecContext *avctx,
if ((ret = decode_mb(a, a->block)) < 0)
return ret;
- idct_put(a, frame.f, a->mb_x, a->mb_y);
+ idct_put(a, frame, a->mb_x, a->mb_y);
}
}
@@ -27,7 +27,7 @@
#include "avcodec.h"
#include "motion_est.h"
-#include "thread.h"
+#include "threadframe.h"
#define MPEGVIDEO_MAX_PLANES 4
#define MAX_PICTURE_COUNT 36
@@ -146,7 +146,7 @@ static int lz4_decompress(AVCodecContext *avctx,
return bytestream2_tell_p(pb);
}
-static int decode_blocks(AVCodecContext *avctx, AVFrame *p, ThreadFrame *frame,
+static int decode_blocks(AVCodecContext *avctx, AVFrame *p,
unsigned uncompressed_size)
{
NotchLCContext *s = avctx->priv_data;
@@ -221,7 +221,7 @@ static int decode_blocks(AVCodecContext *avctx, AVFrame *p, ThreadFrame *frame,
return AVERROR_INVALIDDATA;
s->uv_count_offset = s->y_data_offset - s->a_data_offset;
- if ((ret = ff_thread_get_buffer(avctx, frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, p, 0)) < 0)
return ret;
rgb = *gb;
@@ -464,7 +464,6 @@ static int decode_frame(AVCodecContext *avctx,
AVPacket *avpkt)
{
NotchLCContext *s = avctx->priv_data;
- ThreadFrame frame = { .f = data };
GetByteContext *gb = &s->gb;
PutByteContext *pb = &s->pb;
unsigned uncompressed_size;
@@ -513,7 +512,7 @@ static int decode_frame(AVCodecContext *avctx,
bytestream2_init(gb, s->uncompressed_buffer, uncompressed_size);
}
- ret = decode_blocks(avctx, p, &frame, uncompressed_size);
+ ret = decode_blocks(avctx, p, uncompressed_size);
if (ret < 0)
return ret;
@@ -49,7 +49,7 @@ static int nvdec_av1_start_frame(AVCodecContext *avctx, const uint8_t *buffer, u
CUVIDAV1PICPARAMS *ppc = &pp->CodecSpecific.av1;
FrameDecodeData *fdd;
NVDECFrame *cf;
- AVFrame *cur_frame = s->cur_frame.tf.f;
+ AVFrame *cur_frame = s->cur_frame.f;
unsigned char remap_lr_type[4] = { AV1_RESTORE_NONE, AV1_RESTORE_SWITCHABLE, AV1_RESTORE_WIENER, AV1_RESTORE_SGRPROJ };
@@ -233,7 +233,7 @@ static int nvdec_av1_start_frame(AVCodecContext *avctx, const uint8_t *buffer, u
ppc->loop_filter_ref_deltas[i] = frame_header->loop_filter_ref_deltas[i];
/* Reference Frames */
- ppc->ref_frame_map[i] = ff_nvdec_get_ref_idx(s->ref[i].tf.f);
+ ppc->ref_frame_map[i] = ff_nvdec_get_ref_idx(s->ref[i].f);
}
if (frame_header->primary_ref_frame == AV1_PRIMARY_REF_NONE) {
@@ -246,7 +246,7 @@ static int nvdec_av1_start_frame(AVCodecContext *avctx, const uint8_t *buffer, u
for (i = 0; i < AV1_REFS_PER_FRAME; ++i) {
/* Ref Frame List */
int8_t ref_idx = frame_header->ref_frame_idx[i];
- AVFrame *ref_frame = s->ref[ref_idx].tf.f;
+ AVFrame *ref_frame = s->ref[ref_idx].f;
ppc->ref_frame[i].index = ppc->ref_frame_map[ref_idx];
ppc->ref_frame[i].width = ref_frame->width;
@@ -293,7 +293,6 @@ static int photocd_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
{
PhotoCDContext *s = avctx->priv_data;
- ThreadFrame frame = { .f = data };
const uint8_t *buf = avpkt->data;
GetByteContext *gb = &s->gb;
AVFrame *p = data;
@@ -326,7 +325,7 @@ static int photocd_decode_frame(AVCodecContext *avctx, void *data,
if (ret < 0)
return ret;
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, p, 0)) < 0)
return ret;
p->pict_type = AV_PICTURE_TYPE_I;
@@ -606,7 +606,6 @@ static int pixlet_decode_frame(AVCodecContext *avctx, void *data,
PixletContext *ctx = avctx->priv_data;
int i, w, h, width, height, ret, version;
AVFrame *p = data;
- ThreadFrame frame = { .f = data };
uint32_t pktsize, depth;
bytestream2_init(&ctx->gb, avpkt->data, avpkt->size);
@@ -673,20 +672,20 @@ static int pixlet_decode_frame(AVCodecContext *avctx, void *data,
p->key_frame = 1;
p->color_range = AVCOL_RANGE_JPEG;
- ret = ff_thread_get_buffer(avctx, &frame, 0);
+ ret = ff_thread_get_buffer(avctx, p, 0);
if (ret < 0)
return ret;
for (i = 0; i < 3; i++) {
- ret = decode_plane(avctx, i, avpkt, frame.f);
+ ret = decode_plane(avctx, i, avpkt, p);
if (ret < 0)
return ret;
if (avctx->flags & AV_CODEC_FLAG_GRAY)
break;
}
- postprocess_luma(avctx, frame.f, ctx->w, ctx->h, ctx->depth);
- postprocess_chroma(frame.f, ctx->w >> 1, ctx->h >> 1, ctx->depth);
+ postprocess_luma(avctx, p, ctx->w, ctx->h, ctx->depth);
+ postprocess_chroma(p, ctx->w >> 1, ctx->h >> 1, ctx->depth);
*got_frame = 1;
@@ -779,7 +779,6 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
AVPacket *avpkt)
{
ProresContext *ctx = avctx->priv_data;
- ThreadFrame tframe = { .f = data };
AVFrame *frame = data;
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
@@ -805,7 +804,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
buf += frame_hdr_size;
buf_size -= frame_hdr_size;
- if ((ret = ff_thread_get_buffer(avctx, &tframe, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, frame, 0)) < 0)
return ret;
ff_thread_finish_setup(avctx);
@@ -948,15 +948,13 @@ FF_ENABLE_DEPRECATION_WARNINGS
return 1;
}
-static int thread_get_buffer_internal(AVCodecContext *avctx, ThreadFrame *f, int flags)
+static int thread_get_buffer_internal(AVCodecContext *avctx, AVFrame *f, int flags)
{
PerThreadContext *p;
int err;
- f->owner[0] = f->owner[1] = avctx;
-
if (!(avctx->active_thread_type & FF_THREAD_FRAME))
- return ff_get_buffer(avctx, f->f, flags);
+ return ff_get_buffer(avctx, f, flags);
p = avctx->internal->thread_ctx;
FF_DISABLE_DEPRECATION_WARNINGS
@@ -971,28 +969,16 @@ FF_ENABLE_DEPRECATION_WARNINGS
return -1;
}
- if (avctx->codec->caps_internal & FF_CODEC_CAP_ALLOCATE_PROGRESS) {
- atomic_int *progress;
- f->progress = av_buffer_alloc(2 * sizeof(*progress));
- if (!f->progress) {
- return AVERROR(ENOMEM);
- }
- progress = (atomic_int*)f->progress->data;
-
- atomic_init(&progress[0], -1);
- atomic_init(&progress[1], -1);
- }
-
pthread_mutex_lock(&p->parent->buffer_mutex);
#if !FF_API_THREAD_SAFE_CALLBACKS
err = ff_get_buffer(avctx, f->f, flags);
#else
FF_DISABLE_DEPRECATION_WARNINGS
if (THREAD_SAFE_CALLBACKS(avctx)) {
- err = ff_get_buffer(avctx, f->f, flags);
+ err = ff_get_buffer(avctx, f, flags);
} else {
pthread_mutex_lock(&p->progress_mutex);
- p->requested_frame = f->f;
+ p->requested_frame = f;
p->requested_flags = flags;
atomic_store_explicit(&p->state, STATE_GET_BUFFER, memory_order_release);
pthread_cond_broadcast(&p->progress_cond);
@@ -1009,8 +995,6 @@ FF_DISABLE_DEPRECATION_WARNINGS
ff_thread_finish_setup(avctx);
FF_ENABLE_DEPRECATION_WARNINGS
#endif
- if (err)
- av_buffer_unref(&f->progress);
pthread_mutex_unlock(&p->parent->buffer_mutex);
@@ -1049,7 +1033,7 @@ enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixe
FF_ENABLE_DEPRECATION_WARNINGS
#endif
-int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
+int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
{
int ret = thread_get_buffer_internal(avctx, f, flags);
if (ret < 0)
@@ -1059,10 +1043,36 @@ int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
{
- return ff_thread_get_buffer(avctx, f, flags);
+ int ret;
+
+ f->owner[0] = f->owner[1] = avctx;
+ /* Hint: It is possible for this function to be called with codecs
+ * that don't support frame threading at all, namely in case
+ * a frame-threaded decoder shares code with codecs that are not.
+ * This currently affects non-MPEG-4 mpegvideo codecs and and VP7.
+ * The following check will always be true for them. */
+ if (!(avctx->active_thread_type & FF_THREAD_FRAME))
+ return ff_get_buffer(avctx, f->f, flags);
+
+ if (avctx->codec->caps_internal & FF_CODEC_CAP_ALLOCATE_PROGRESS) {
+ atomic_int *progress;
+ f->progress = av_buffer_alloc(2 * sizeof(*progress));
+ if (!f->progress) {
+ return AVERROR(ENOMEM);
+ }
+ progress = (atomic_int*)f->progress->data;
+
+ atomic_init(&progress[0], -1);
+ atomic_init(&progress[1], -1);
+ }
+
+ ret = ff_thread_get_buffer(avctx, f->f, flags);
+ if (ret)
+ av_buffer_unref(&f->progress);
+ return ret;
}
-void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
+void ff_thread_release_buffer(AVCodecContext *avctx, AVFrame *f)
{
#if FF_API_THREAD_SAFE_CALLBACKS
FF_DISABLE_DEPRECATION_WARNINGS
@@ -1075,21 +1085,18 @@ FF_DISABLE_DEPRECATION_WARNINGS
FF_ENABLE_DEPRECATION_WARNINGS
#endif
- if (!f->f)
+ if (!f)
return;
if (avctx->debug & FF_DEBUG_BUFFERS)
av_log(avctx, AV_LOG_DEBUG, "thread_release_buffer called on pic %p\n", f);
- av_buffer_unref(&f->progress);
- f->owner[0] = f->owner[1] = NULL;
-
#if !FF_API_THREAD_SAFE_CALLBACKS
av_frame_unref(f->f);
#else
// when the frame buffers are not allocated, just reset it to clean state
- if (can_direct_free || !f->f->buf[0]) {
- av_frame_unref(f->f);
+ if (can_direct_free || !f->buf[0]) {
+ av_frame_unref(f);
return;
}
@@ -1113,7 +1120,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
}
dst = p->released_buffers[p->num_released_buffers];
- av_frame_move_ref(dst, f->f);
+ av_frame_move_ref(dst, f);
p->num_released_buffers++;
@@ -1124,15 +1131,17 @@ fail:
// this leaks, but it is better than crashing
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "Could not queue a frame for freeing, this will leak\n");
- memset(f->f->buf, 0, sizeof(f->f->buf));
- if (f->f->extended_buf)
- memset(f->f->extended_buf, 0, f->f->nb_extended_buf * sizeof(*f->f->extended_buf));
- av_frame_unref(f->f);
+ memset(f->buf, 0, sizeof(f->buf));
+ if (f->extended_buf)
+ memset(f->extended_buf, 0, f->nb_extended_buf * sizeof(*f->extended_buf));
+ av_frame_unref(f);
}
#endif
}
void ff_thread_release_ext_buffer(AVCodecContext *avctx, ThreadFrame *f)
{
- ff_thread_release_buffer(avctx, f);
+ av_buffer_unref(&f->progress);
+ f->owner[0] = f->owner[1] = NULL;
+ ff_thread_release_buffer(avctx, f->f);
}
@@ -41,6 +41,7 @@
#include "mpeg_er.h"
#include "qpeldsp.h"
#include "rectangle.h"
+#include "thread.h"
#include "threadframe.h"
#include "rv34vlc.h"
@@ -1810,7 +1810,6 @@ static int decode_frame(AVCodecContext *avctx,
AVPacket *avpkt)
{
SheerVideoContext *s = avctx->priv_data;
- ThreadFrame frame = { .f = data };
const SheerTable *table;
AVFrame *p = data;
GetBitContext gb;
@@ -1982,7 +1981,7 @@ static int decode_frame(AVCodecContext *avctx,
p->pict_type = AV_PICTURE_TYPE_I;
p->key_frame = 1;
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, p, 0)) < 0)
return ret;
if ((ret = init_get_bits8(&gb, avpkt->data + 20, avpkt->size - 20)) < 0)
@@ -679,7 +679,6 @@ static int tak_decode_frame(AVCodecContext *avctx, void *data,
{
TAKDecContext *s = avctx->priv_data;
AVFrame *frame = data;
- ThreadFrame tframe = { .f = data };
GetBitContext *gb = &s->gb;
int chan, i, ret, hsize;
@@ -742,7 +741,7 @@ static int tak_decode_frame(AVCodecContext *avctx, void *data,
: s->ti.frame_samples;
frame->nb_samples = s->nb_samples;
- if ((ret = ff_thread_get_buffer(avctx, &tframe, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, frame, 0)) < 0)
return ret;
ff_thread_finish_setup(avctx);
@@ -31,14 +31,6 @@
#include "avcodec.h"
-typedef struct ThreadFrame {
- AVFrame *f;
- AVCodecContext *owner[2];
- // progress->data is an array of 2 ints holding progress for top/bottom
- // fields
- AVBufferRef *progress;
-} ThreadFrame;
-
/**
* Wait for decoding threads to finish and reset internal state.
* Called by avcodec_flush_buffers().
@@ -92,7 +84,7 @@ enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixe
* @param avctx The current context.
* @param f The frame to write into.
*/
-int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags);
+int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags);
/**
* Wrapper around release_buffer() frame-for multithreaded codecs.
@@ -105,9 +97,7 @@ int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags);
* @param avctx The current context.
* @param f The picture being released.
*/
-void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f);
-
-int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src);
+void ff_thread_release_buffer(AVCodecContext *avctx, AVFrame *f);
int ff_thread_init(AVCodecContext *s);
int ff_slice_thread_execute_with_mainfunc(AVCodecContext *avctx,
@@ -21,8 +21,16 @@
#ifndef AVCODEC_THREADFRAME_H
#define AVCODEC_THREADFRAME_H
+#include "libavutil/frame.h"
#include "avcodec.h"
-#include "thread.h"
+
+typedef struct ThreadFrame {
+ AVFrame *f;
+ AVCodecContext *owner[2];
+ // progress->data is an array of 2 ints holding progress for top/bottom
+ // fields
+ AVBufferRef *progress;
+} ThreadFrame;
/**
* Notify later decoding threads when part of their reference picture is ready.
@@ -74,4 +82,6 @@ int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags);
*/
void ff_thread_release_ext_buffer(AVCodecContext *avctx, ThreadFrame *f);
+int ff_thread_ref_frame(ThreadFrame *dst, const ThreadFrame *src);
+
#endif
@@ -1016,7 +1016,7 @@ static int dng_decode_tiles(AVCodecContext *avctx, AVFrame *frame,
return avpkt->size;
}
-static int init_image(TiffContext *s, ThreadFrame *frame)
+static int init_image(TiffContext *s, AVFrame *frame)
{
int ret;
int create_gray_palette = 0;
@@ -1177,11 +1177,11 @@ static int init_image(TiffContext *s, ThreadFrame *frame)
return ret;
if (s->avctx->pix_fmt == AV_PIX_FMT_PAL8) {
if (!create_gray_palette)
- memcpy(frame->f->data[1], s->palette, sizeof(s->palette));
+ memcpy(frame->data[1], s->palette, sizeof(s->palette));
else {
/* make default grayscale pal */
int i;
- uint32_t *pal = (uint32_t *)frame->f->data[1];
+ uint32_t *pal = (uint32_t *)frame->data[1];
for (i = 0; i < 1<<s->bpp; i++)
pal[i] = 0xFFU << 24 | i * 255 / ((1<<s->bpp) - 1) * 0x010101;
}
@@ -1743,7 +1743,6 @@ static int decode_frame(AVCodecContext *avctx,
{
TiffContext *const s = avctx->priv_data;
AVFrame *const p = data;
- ThreadFrame frame = { .f = data };
unsigned off, last_off;
int le, ret, plane, planes;
int i, j, entries, stride;
@@ -1894,7 +1893,7 @@ again:
}
/* now we have the data and may start decoding */
- if ((ret = init_image(s, &frame)) < 0)
+ if ((ret = init_image(s, p)) < 0)
return ret;
if (!s->is_tiled || has_strip_bits) {
@@ -222,7 +222,6 @@ static int tta_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame_ptr, AVPacket *avpkt)
{
AVFrame *frame = data;
- ThreadFrame tframe = { .f = data };
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
TTAContext *s = avctx->priv_data;
@@ -242,7 +241,7 @@ static int tta_decode_frame(AVCodecContext *avctx, void *data,
/* get output buffer */
frame->nb_samples = framelen;
- if ((ret = ff_thread_get_buffer(avctx, &tframe, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, frame, 0)) < 0)
return ret;
// decode directly to output buffer for 24-bit sample format
@@ -893,10 +893,9 @@ enum AVPixelFormat ff_thread_get_format(AVCodecContext *avctx, const enum AVPixe
return ff_get_format(avctx, fmt);
}
-int ff_thread_get_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
+int ff_thread_get_buffer(AVCodecContext *avctx, AVFrame *f, int flags)
{
- f->owner[0] = f->owner[1] = avctx;
- return ff_get_buffer(avctx, f->f, flags);
+ return ff_get_buffer(avctx, f, flags);
}
int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
@@ -905,10 +904,10 @@ int ff_thread_get_ext_buffer(AVCodecContext *avctx, ThreadFrame *f, int flags)
return ff_get_buffer(avctx, f->f, flags);
}
-void ff_thread_release_buffer(AVCodecContext *avctx, ThreadFrame *f)
+void ff_thread_release_buffer(AVCodecContext *avctx, AVFrame *f)
{
- if (f->f)
- av_frame_unref(f->f);
+ if (f)
+ av_frame_unref(f);
}
void ff_thread_release_ext_buffer(AVCodecContext *avctx, ThreadFrame *f)
@@ -563,14 +563,14 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
UtvideoContext *c = avctx->priv_data;
+ AVFrame *const frame = data;
int i, j;
const uint8_t *plane_start[5];
int plane_size, max_slice_size = 0, slice_start, slice_end, slice_size;
int ret;
GetByteContext gb;
- ThreadFrame frame = { .f = data };
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, frame, 0)) < 0)
return ret;
/* parse plane structure to get frame flags and validate slice offsets */
@@ -709,80 +709,80 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
case AV_PIX_FMT_GBRP:
case AV_PIX_FMT_GBRAP:
for (i = 0; i < c->planes; i++) {
- ret = decode_plane(c, i, frame.f->data[i],
- frame.f->linesize[i], avctx->width,
+ ret = decode_plane(c, i, frame->data[i],
+ frame->linesize[i], avctx->width,
avctx->height, plane_start[i],
c->frame_pred == PRED_LEFT);
if (ret)
return ret;
if (c->frame_pred == PRED_MEDIAN) {
if (!c->interlaced) {
- restore_median_planar(c, frame.f->data[i],
- frame.f->linesize[i], avctx->width,
+ restore_median_planar(c, frame->data[i],
+ frame->linesize[i], avctx->width,
avctx->height, c->slices, 0);
} else {
- restore_median_planar_il(c, frame.f->data[i],
- frame.f->linesize[i],
+ restore_median_planar_il(c, frame->data[i],
+ frame->linesize[i],
avctx->width, avctx->height, c->slices,
0);
}
} else if (c->frame_pred == PRED_GRADIENT) {
if (!c->interlaced) {
- restore_gradient_planar(c, frame.f->data[i],
- frame.f->linesize[i], avctx->width,
+ restore_gradient_planar(c, frame->data[i],
+ frame->linesize[i], avctx->width,
avctx->height, c->slices, 0);
} else {
- restore_gradient_planar_il(c, frame.f->data[i],
- frame.f->linesize[i],
+ restore_gradient_planar_il(c, frame->data[i],
+ frame->linesize[i],
avctx->width, avctx->height, c->slices,
0);
}
}
}
- c->utdsp.restore_rgb_planes(frame.f->data[2], frame.f->data[0], frame.f->data[1],
- frame.f->linesize[2], frame.f->linesize[0], frame.f->linesize[1],
+ c->utdsp.restore_rgb_planes(frame->data[2], frame->data[0], frame->data[1],
+ frame->linesize[2], frame->linesize[0], frame->linesize[1],
avctx->width, avctx->height);
break;
case AV_PIX_FMT_GBRAP10:
case AV_PIX_FMT_GBRP10:
for (i = 0; i < c->planes; i++) {
- ret = decode_plane10(c, i, (uint16_t *)frame.f->data[i],
- frame.f->linesize[i] / 2, avctx->width,
+ ret = decode_plane10(c, i, (uint16_t *)frame->data[i],
+ frame->linesize[i] / 2, avctx->width,
avctx->height, plane_start[i],
plane_start[i + 1] - 1024,
c->frame_pred == PRED_LEFT);
if (ret)
return ret;
}
- c->utdsp.restore_rgb_planes10((uint16_t *)frame.f->data[2], (uint16_t *)frame.f->data[0], (uint16_t *)frame.f->data[1],
- frame.f->linesize[2] / 2, frame.f->linesize[0] / 2, frame.f->linesize[1] / 2,
+ c->utdsp.restore_rgb_planes10((uint16_t *)frame->data[2], (uint16_t *)frame->data[0], (uint16_t *)frame->data[1],
+ frame->linesize[2] / 2, frame->linesize[0] / 2, frame->linesize[1] / 2,
avctx->width, avctx->height);
break;
case AV_PIX_FMT_YUV420P:
for (i = 0; i < 3; i++) {
- ret = decode_plane(c, i, frame.f->data[i], frame.f->linesize[i],
+ ret = decode_plane(c, i, frame->data[i], frame->linesize[i],
avctx->width >> !!i, avctx->height >> !!i,
plane_start[i], c->frame_pred == PRED_LEFT);
if (ret)
return ret;
if (c->frame_pred == PRED_MEDIAN) {
if (!c->interlaced) {
- restore_median_planar(c, frame.f->data[i], frame.f->linesize[i],
+ restore_median_planar(c, frame->data[i], frame->linesize[i],
avctx->width >> !!i, avctx->height >> !!i,
c->slices, !i);
} else {
- restore_median_planar_il(c, frame.f->data[i], frame.f->linesize[i],
+ restore_median_planar_il(c, frame->data[i], frame->linesize[i],
avctx->width >> !!i,
avctx->height >> !!i,
c->slices, !i);
}
} else if (c->frame_pred == PRED_GRADIENT) {
if (!c->interlaced) {
- restore_gradient_planar(c, frame.f->data[i], frame.f->linesize[i],
+ restore_gradient_planar(c, frame->data[i], frame->linesize[i],
avctx->width >> !!i, avctx->height >> !!i,
c->slices, !i);
} else {
- restore_gradient_planar_il(c, frame.f->data[i], frame.f->linesize[i],
+ restore_gradient_planar_il(c, frame->data[i], frame->linesize[i],
avctx->width >> !!i,
avctx->height >> !!i,
c->slices, !i);
@@ -792,28 +792,28 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
break;
case AV_PIX_FMT_YUV422P:
for (i = 0; i < 3; i++) {
- ret = decode_plane(c, i, frame.f->data[i], frame.f->linesize[i],
+ ret = decode_plane(c, i, frame->data[i], frame->linesize[i],
avctx->width >> !!i, avctx->height,
plane_start[i], c->frame_pred == PRED_LEFT);
if (ret)
return ret;
if (c->frame_pred == PRED_MEDIAN) {
if (!c->interlaced) {
- restore_median_planar(c, frame.f->data[i], frame.f->linesize[i],
+ restore_median_planar(c, frame->data[i], frame->linesize[i],
avctx->width >> !!i, avctx->height,
c->slices, 0);
} else {
- restore_median_planar_il(c, frame.f->data[i], frame.f->linesize[i],
+ restore_median_planar_il(c, frame->data[i], frame->linesize[i],
avctx->width >> !!i, avctx->height,
c->slices, 0);
}
} else if (c->frame_pred == PRED_GRADIENT) {
if (!c->interlaced) {
- restore_gradient_planar(c, frame.f->data[i], frame.f->linesize[i],
+ restore_gradient_planar(c, frame->data[i], frame->linesize[i],
avctx->width >> !!i, avctx->height,
c->slices, 0);
} else {
- restore_gradient_planar_il(c, frame.f->data[i], frame.f->linesize[i],
+ restore_gradient_planar_il(c, frame->data[i], frame->linesize[i],
avctx->width >> !!i, avctx->height,
c->slices, 0);
}
@@ -822,28 +822,28 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
break;
case AV_PIX_FMT_YUV444P:
for (i = 0; i < 3; i++) {
- ret = decode_plane(c, i, frame.f->data[i], frame.f->linesize[i],
+ ret = decode_plane(c, i, frame->data[i], frame->linesize[i],
avctx->width, avctx->height,
plane_start[i], c->frame_pred == PRED_LEFT);
if (ret)
return ret;
if (c->frame_pred == PRED_MEDIAN) {
if (!c->interlaced) {
- restore_median_planar(c, frame.f->data[i], frame.f->linesize[i],
+ restore_median_planar(c, frame->data[i], frame->linesize[i],
avctx->width, avctx->height,
c->slices, 0);
} else {
- restore_median_planar_il(c, frame.f->data[i], frame.f->linesize[i],
+ restore_median_planar_il(c, frame->data[i], frame->linesize[i],
avctx->width, avctx->height,
c->slices, 0);
}
} else if (c->frame_pred == PRED_GRADIENT) {
if (!c->interlaced) {
- restore_gradient_planar(c, frame.f->data[i], frame.f->linesize[i],
+ restore_gradient_planar(c, frame->data[i], frame->linesize[i],
avctx->width, avctx->height,
c->slices, 0);
} else {
- restore_gradient_planar_il(c, frame.f->data[i], frame.f->linesize[i],
+ restore_gradient_planar_il(c, frame->data[i], frame->linesize[i],
avctx->width, avctx->height,
c->slices, 0);
}
@@ -852,7 +852,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
break;
case AV_PIX_FMT_YUV420P10:
for (i = 0; i < 3; i++) {
- ret = decode_plane10(c, i, (uint16_t *)frame.f->data[i], frame.f->linesize[i] / 2,
+ ret = decode_plane10(c, i, (uint16_t *)frame->data[i], frame->linesize[i] / 2,
avctx->width >> !!i, avctx->height >> !!i,
plane_start[i], plane_start[i + 1] - 1024, c->frame_pred == PRED_LEFT);
if (ret)
@@ -861,7 +861,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
break;
case AV_PIX_FMT_YUV422P10:
for (i = 0; i < 3; i++) {
- ret = decode_plane10(c, i, (uint16_t *)frame.f->data[i], frame.f->linesize[i] / 2,
+ ret = decode_plane10(c, i, (uint16_t *)frame->data[i], frame->linesize[i] / 2,
avctx->width >> !!i, avctx->height,
plane_start[i], plane_start[i + 1] - 1024, c->frame_pred == PRED_LEFT);
if (ret)
@@ -870,9 +870,9 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
break;
}
- frame.f->key_frame = 1;
- frame.f->pict_type = AV_PICTURE_TYPE_I;
- frame.f->interlaced_frame = !!c->interlaced;
+ frame->key_frame = 1;
+ frame->pict_type = AV_PICTURE_TYPE_I;
+ frame->interlaced_frame = !!c->interlaced;
*got_frame = 1;
@@ -142,7 +142,6 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
V210DecContext *s = avctx->priv_data;
ThreadData td;
int ret, stride, aligned_input;
- ThreadFrame frame = { .f = data };
AVFrame *pic = data;
const uint8_t *psrc = avpkt->data;
@@ -177,7 +176,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
ff_v210dec_init(s);
}
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
return ret;
pic->pict_type = AV_PICTURE_TYPE_I;
@@ -89,7 +89,6 @@ static int v410_decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
{
ThreadData td;
- ThreadFrame frame = { .f = data };
AVFrame *pic = data;
uint8_t *src = avpkt->data;
int ret;
@@ -101,7 +100,7 @@ static int v410_decode_frame(AVCodecContext *avctx, void *data,
return AVERROR(EINVAL);
}
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
return ret;
pic->key_frame = 1;
@@ -18,14 +18,16 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include "libavutil/frame.h"
#include "libavutil/pixdesc.h"
#include "hwconfig.h"
#include "vaapi_decode.h"
#include "internal.h"
#include "av1dec.h"
+#include "thread.h"
typedef struct VAAPIAV1FrameRef {
- ThreadFrame frame;
+ AVFrame *frame;
int valid;
} VAAPIAV1FrameRef;
@@ -40,13 +42,13 @@ typedef struct VAAPIAV1DecContext {
* used to apply film grain and push to downstream.
*/
VAAPIAV1FrameRef ref_tab[AV1_NUM_REF_FRAMES];
- ThreadFrame tmp_frame;
+ AVFrame *tmp_frame;
} VAAPIAV1DecContext;
static VASurfaceID vaapi_av1_surface_id(AV1Frame *vf)
{
if (vf)
- return ff_vaapi_get_surface_id(vf->tf.f);
+ return ff_vaapi_get_surface_id(vf->f);
else
return VA_INVALID_SURFACE;
}
@@ -73,16 +75,16 @@ static int vaapi_av1_decode_init(AVCodecContext *avctx)
{
VAAPIAV1DecContext *ctx = avctx->internal->hwaccel_priv_data;
- ctx->tmp_frame.f = av_frame_alloc();
- if (!ctx->tmp_frame.f) {
+ ctx->tmp_frame = av_frame_alloc();
+ if (!ctx->tmp_frame) {
av_log(avctx, AV_LOG_ERROR,
"Failed to allocate frame.\n");
return AVERROR(ENOMEM);
}
for (int i = 0; i < FF_ARRAY_ELEMS(ctx->ref_tab); i++) {
- ctx->ref_tab[i].frame.f = av_frame_alloc();
- if (!ctx->ref_tab[i].frame.f) {
+ ctx->ref_tab[i].frame = av_frame_alloc();
+ if (!ctx->ref_tab[i].frame) {
av_log(avctx, AV_LOG_ERROR,
"Failed to allocate reference table frame %d.\n", i);
return AVERROR(ENOMEM);
@@ -97,14 +99,14 @@ static int vaapi_av1_decode_uninit(AVCodecContext *avctx)
{
VAAPIAV1DecContext *ctx = avctx->internal->hwaccel_priv_data;
- if (ctx->tmp_frame.f->buf[0])
- ff_thread_release_buffer(avctx, &ctx->tmp_frame);
- av_frame_free(&ctx->tmp_frame.f);
+ if (ctx->tmp_frame->buf[0])
+ ff_thread_release_buffer(avctx, ctx->tmp_frame);
+ av_frame_free(&ctx->tmp_frame);
for (int i = 0; i < FF_ARRAY_ELEMS(ctx->ref_tab); i++) {
- if (ctx->ref_tab[i].frame.f->buf[0])
- ff_thread_release_buffer(avctx, &ctx->ref_tab[i].frame);
- av_frame_free(&ctx->ref_tab[i].frame.f);
+ if (ctx->ref_tab[i].frame->buf[0])
+ ff_thread_release_buffer(avctx, ctx->ref_tab[i].frame);
+ av_frame_free(&ctx->ref_tab[i].frame);
}
return ff_vaapi_decode_uninit(avctx);
@@ -135,12 +137,12 @@ static int vaapi_av1_start_frame(AVCodecContext *avctx,
goto fail;
if (apply_grain) {
- if (ctx->tmp_frame.f->buf[0])
- ff_thread_release_buffer(avctx, &ctx->tmp_frame);
- err = ff_thread_get_buffer(avctx, &ctx->tmp_frame, AV_GET_BUFFER_FLAG_REF);
+ if (ctx->tmp_frame->buf[0])
+ ff_thread_release_buffer(avctx, ctx->tmp_frame);
+ err = ff_thread_get_buffer(avctx, ctx->tmp_frame, AV_GET_BUFFER_FLAG_REF);
if (err < 0)
goto fail;
- pic->output_surface = ff_vaapi_get_surface_id(ctx->tmp_frame.f);
+ pic->output_surface = ff_vaapi_get_surface_id(ctx->tmp_frame);
} else {
pic->output_surface = vaapi_av1_surface_id(&s->cur_frame);
}
@@ -276,7 +278,7 @@ static int vaapi_av1_start_frame(AVCodecContext *avctx,
pic_param.ref_frame_map[i] = VA_INVALID_ID;
else
pic_param.ref_frame_map[i] = ctx->ref_tab[i].valid ?
- ff_vaapi_get_surface_id(ctx->ref_tab[i].frame.f) :
+ ff_vaapi_get_surface_id(ctx->ref_tab[i].frame) :
vaapi_av1_surface_id(&s->ref[i]);
}
for (int i = 0; i < AV1_REFS_PER_FRAME; i++) {
@@ -380,11 +382,11 @@ static int vaapi_av1_end_frame(AVCodecContext *avctx)
for (int i = 0; i < AV1_NUM_REF_FRAMES; i++) {
if (header->refresh_frame_flags & (1 << i)) {
- if (ctx->ref_tab[i].frame.f->buf[0])
- ff_thread_release_buffer(avctx, &ctx->ref_tab[i].frame);
+ if (ctx->ref_tab[i].frame->buf[0])
+ ff_thread_release_buffer(avctx, ctx->ref_tab[i].frame);
if (apply_grain) {
- ret = ff_thread_ref_frame(&ctx->ref_tab[i].frame, &ctx->tmp_frame);
+ ret = av_frame_ref(ctx->ref_tab[i].frame, ctx->tmp_frame);
if (ret < 0)
return ret;
ctx->ref_tab[i].valid = 1;
@@ -125,7 +125,6 @@ static int vble_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
int offset = 0;
int width_uv = avctx->width / 2, height_uv = avctx->height / 2;
int ret;
- ThreadFrame frame = { .f = data };
if (avpkt->size < 4 || avpkt->size - 4 > INT_MAX/8) {
av_log(avctx, AV_LOG_ERROR, "Invalid packet size\n");
@@ -133,7 +132,7 @@ static int vble_decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
}
/* Allocate buffer */
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, pic, 0)) < 0)
return ret;
/* Set flags */
@@ -33,7 +33,7 @@
#include "libavutil/thread.h"
#include "h264pred.h"
-#include "thread.h"
+#include "threadframe.h"
#include "vp56.h"
#include "vp8dsp.h"
@@ -28,7 +28,7 @@
#include <stdint.h>
#include "vp9.h"
-#include "thread.h"
+#include "threadframe.h"
#include "vp56.h"
enum BlockPartition {
@@ -568,8 +568,7 @@ static int decode_entropy_coded_image(WebPContext *s, enum ImageRole role,
img->frame->height = h;
if (role == IMAGE_ROLE_ARGB && !img->is_alpha_primary) {
- ThreadFrame pt = { .f = img->frame };
- ret = ff_thread_get_buffer(s->avctx, &pt, 0);
+ ret = ff_thread_get_buffer(s->avctx, img->frame, 0);
} else
ret = av_frame_get_buffer(img->frame, 1);
if (ret < 0)
@@ -285,7 +285,6 @@ static int decode_frame(AVCodecContext *avctx,
int TL[4] = { 128, 128, 128, 128 };
int L[4] = { 128, 128, 128, 128 };
YLCContext *s = avctx->priv_data;
- ThreadFrame frame = { .f = data };
const uint8_t *buf = avpkt->data;
int ret, x, y, toffset, boffset;
AVFrame * const p = data;
@@ -307,7 +306,7 @@ static int decode_frame(AVCodecContext *avctx,
if (toffset >= boffset || boffset >= avpkt->size)
return AVERROR_INVALIDDATA;
- if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, p, 0)) < 0)
return ret;
av_fast_malloc(&s->buffer, &s->buffer_size,
The majority of frame-threaded decoders (mainly the intra-only) need exactly one part of ThreadFrame: The AVFrame. They don't need the owners nor the progress, yet they had to use it because ff_thread_(get|release)_buffer() requires it. This commit changes this and makes these functions work with ordinary AVFrames; the decoders that need the extra fields for progress use ff_thread_(get|release)_ext_buffer() which work exactly as ff_thread_(get|release)_buffer() used to do. This also avoids some unnecessary allocations of progress AVBuffers, namely for H.264 and HEVC film grain frames: These frames are not used for synchronization and therefore don't need a ThreadFrame. Also move the ThreadFrame structure as well as ff_thread_ref_frame() to threadframe.h, the header for frame-threaded decoders with inter-frame dependencies. Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com> --- In case anyone is wondering why ff_thread_get_ext_buffer() still checks for FF_CODEC_CAP_ALLOCATE_PROGRESS: It is due to weirdness in the WebP decoder. The WebP decoder (which has the AV_CODEC_CAP_FRAME_THREADS set, but not FF_CODEC_CAP_ALLOCATE_PROGRESS) directly uses the VP8 decoder's init, decode_frame and close function. According to the spec [1], this is only to be used with VP8 key frames, yet I don't see a check that enforces this. If the data would contain non-keyframes, it would probably (haven't tested it) lead to very broken results when using frame threading, because there is no synchronisation whatsoever between the underlying VP8 decode contexts. Always flushing the VP8 decoder after every frame should be enough to make it consistent (and error out in case there are non-keyframes). [1]: https://developers.google.com/speed/webp/docs/riff_container libavcodec/aic.c | 3 +- libavcodec/alac.c | 3 +- libavcodec/av1dec.c | 35 ++++++++-------- libavcodec/av1dec.h | 4 +- libavcodec/bitpacked_dec.c | 3 +- libavcodec/cfhd.c | 13 +++--- libavcodec/cllc.c | 7 ++-- libavcodec/cri.c | 3 +- libavcodec/dnxhddec.c | 3 +- libavcodec/dvdec.c | 22 +++++----- libavcodec/dxtory.c | 15 +++---- libavcodec/dxv.c | 11 +++-- libavcodec/dxva2_av1.c | 8 ++-- libavcodec/error_resilience.h | 2 +- libavcodec/exr.c | 3 +- libavcodec/ffv1.h | 2 +- libavcodec/ffv1dec.c | 1 + libavcodec/flacdec.c | 3 +- libavcodec/fraps.c | 3 +- libavcodec/h264_picture.c | 15 +++---- libavcodec/h264_slice.c | 4 +- libavcodec/h264dec.c | 1 + libavcodec/h264dec.h | 1 - libavcodec/hapdec.c | 14 +++---- libavcodec/hevc_refs.c | 2 +- libavcodec/hevcdec.c | 6 +-- libavcodec/hevcdec.h | 3 +- libavcodec/hqx.c | 4 +- libavcodec/huffyuvdec.c | 3 +- libavcodec/jpeg2000dec.c | 3 +- libavcodec/lagarith.c | 11 +++-- libavcodec/lcldec.c | 3 +- libavcodec/libopenjpegdec.c | 3 +- libavcodec/magicyuv.c | 3 +- libavcodec/mdec.c | 11 +++-- libavcodec/mpegpicture.h | 2 +- libavcodec/notchlc.c | 7 ++-- libavcodec/nvdec_av1.c | 6 +-- libavcodec/photocd.c | 3 +- libavcodec/pixlet.c | 9 ++-- libavcodec/proresdec2.c | 3 +- libavcodec/pthread_frame.c | 79 +++++++++++++++++++---------------- libavcodec/rv34.c | 1 + libavcodec/sheervideo.c | 3 +- libavcodec/takdec.c | 3 +- libavcodec/thread.h | 14 +------ libavcodec/threadframe.h | 12 +++++- libavcodec/tiff.c | 9 ++-- libavcodec/tta.c | 3 +- libavcodec/utils.c | 11 +++-- libavcodec/utvideodec.c | 76 ++++++++++++++++----------------- libavcodec/v210dec.c | 3 +- libavcodec/v410dec.c | 3 +- libavcodec/vaapi_av1.c | 44 +++++++++---------- libavcodec/vble.c | 3 +- libavcodec/vp8.h | 2 +- libavcodec/vp9shared.h | 2 +- libavcodec/webp.c | 3 +- libavcodec/ylc.c | 3 +- 59 files changed, 252 insertions(+), 283 deletions(-)