@@ -28,6 +28,7 @@
#include "libavutil/internal.h"
#include "libavutil/mem.h"
#include "libavutil/intreadwrite.h"
+#include "thread.h"
#define READ_PIXELS(a, b, c) \
do { \
@@ -37,6 +38,12 @@
*c++ = (val >> 20) & 0x3FF; \
} while (0)
+typedef struct ThreadData {
+ AVFrame *frame;
+ uint8_t *buf;
+ int stride;
+} ThreadData;
+
static void v210_planar_unpack_c(const uint32_t *src, uint16_t *y, uint16_t *u, uint16_t *v, int width)
{
uint32_t val;
@@ -64,21 +71,81 @@ static av_cold int decode_init(AVCodecContext *avctx)
avctx->pix_fmt = AV_PIX_FMT_YUV422P10;
avctx->bits_per_raw_sample = 10;
+ s->thread_count = av_clip(avctx->thread_count, 1, avctx->height/4);
s->aligned_input = 0;
ff_v210dec_init(s);
return 0;
}
+static int v210_decode_slice(AVCodecContext *avctx, void *arg, int jobnr, int threadnr)
+{
+ V210DecContext *s = avctx->priv_data;
+ int h, w;
+ ThreadData *td = arg;
+ AVFrame *frame = td->frame;
+ int stride = td->stride;
+ int slice_start = (avctx->height * jobnr) / s->thread_count;
+ int slice_end = (avctx->height * (jobnr+1)) / s->thread_count;
+ uint8_t *psrc = td->buf + stride * slice_start;
+ uint16_t *y, *u, *v;
+
+ y = (uint16_t*)frame->data[0] + slice_start * frame->linesize[0] / 2;
+ u = (uint16_t*)frame->data[1] + slice_start * frame->linesize[1] / 2;
+ v = (uint16_t*)frame->data[2] + slice_start * frame->linesize[2] / 2;
+ for (h = slice_start; h < slice_end; h++) {
+ const uint32_t *src = (const uint32_t*)psrc;
+ uint32_t val;
+
+ w = (avctx->width / 12) * 12;
+ s->unpack_frame(src, y, u, v, w);
+
+ y += w;
+ u += w >> 1;
+ v += w >> 1;
+ src += (w << 1) / 3;
+
+ if (w < avctx->width - 5) {
+ READ_PIXELS(u, y, v);
+ READ_PIXELS(y, u, y);
+ READ_PIXELS(v, y, u);
+ READ_PIXELS(y, v, y);
+ w += 6;
+ }
+
+ if (w < avctx->width - 1) {
+ READ_PIXELS(u, y, v);
+
+ val = av_le2ne32(*src++);
+ *y++ = val & 0x3FF;
+ if (w < avctx->width - 3) {
+ *u++ = (val >> 10) & 0x3FF;
+ *y++ = (val >> 20) & 0x3FF;
+
+ val = av_le2ne32(*src++);
+ *v++ = val & 0x3FF;
+ *y++ = (val >> 10) & 0x3FF;
+ }
+ }
+
+ psrc += stride;
+ y += frame->linesize[0] / 2 - avctx->width + (avctx->width & 1);
+ u += frame->linesize[1] / 2 - avctx->width / 2;
+ v += frame->linesize[2] / 2 - avctx->width / 2;
+ }
+
+ return 0;
+}
+
static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
AVPacket *avpkt)
{
V210DecContext *s = avctx->priv_data;
-
- int h, w, ret, stride, aligned_input;
+ ThreadData td;
+ int ret, stride, aligned_input;
+ ThreadFrame frame = { .f = data };
AVFrame *pic = data;
const uint8_t *psrc = avpkt->data;
- uint16_t *y, *u, *v;
if (s->custom_stride )
stride = s->custom_stride;
@@ -86,6 +153,7 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
int aligned_width = ((avctx->width + 47) / 48) * 48;
stride = aligned_width * 8 / 3;
}
+ td.stride = stride;
if (avpkt->size < stride * avctx->height) {
if ((((avctx->width + 23) / 24) * 24 * 8) / 3 * avctx->height == avpkt->size) {
@@ -110,55 +178,15 @@ static int decode_frame(AVCodecContext *avctx, void *data, int *got_frame,
ff_v210dec_init(s);
}
- if ((ret = ff_get_buffer(avctx, pic, 0)) < 0)
+ if ((ret = ff_thread_get_buffer(avctx, &frame, 0)) < 0)
return ret;
- y = (uint16_t*)pic->data[0];
- u = (uint16_t*)pic->data[1];
- v = (uint16_t*)pic->data[2];
pic->pict_type = AV_PICTURE_TYPE_I;
pic->key_frame = 1;
- for (h = 0; h < avctx->height; h++) {
- const uint32_t *src = (const uint32_t*)psrc;
- uint32_t val;
-
- w = (avctx->width / 12) * 12;
- s->unpack_frame(src, y, u, v, w);
-
- y += w;
- u += w >> 1;
- v += w >> 1;
- src += (w << 1) / 3;
-
- if (w < avctx->width - 5) {
- READ_PIXELS(u, y, v);
- READ_PIXELS(y, u, y);
- READ_PIXELS(v, y, u);
- READ_PIXELS(y, v, y);
- w += 6;
- }
-
- if (w < avctx->width - 1) {
- READ_PIXELS(u, y, v);
-
- val = av_le2ne32(*src++);
- *y++ = val & 0x3FF;
- if (w < avctx->width - 3) {
- *u++ = (val >> 10) & 0x3FF;
- *y++ = (val >> 20) & 0x3FF;
-
- val = av_le2ne32(*src++);
- *v++ = val & 0x3FF;
- *y++ = (val >> 10) & 0x3FF;
- }
- }
-
- psrc += stride;
- y += pic->linesize[0] / 2 - avctx->width + (avctx->width & 1);
- u += pic->linesize[1] / 2 - avctx->width / 2;
- v += pic->linesize[2] / 2 - avctx->width / 2;
- }
+ td.buf = (uint8_t*)psrc;
+ td.frame = pic;
+ avctx->execute2(avctx, v210_decode_slice, &td, NULL, s->thread_count);
if (avctx->field_order > AV_FIELD_PROGRESSIVE) {
/* we have interlaced material flagged in container */
@@ -194,6 +222,8 @@ AVCodec ff_v210_decoder = {
.priv_data_size = sizeof(V210DecContext),
.init = decode_init,
.decode = decode_frame,
- .capabilities = AV_CODEC_CAP_DR1,
+ .capabilities = AV_CODEC_CAP_DR1 |
+ AV_CODEC_CAP_SLICE_THREADS |
+ AV_CODEC_CAP_FRAME_THREADS,
.priv_class = &v210dec_class,
};
@@ -27,6 +27,7 @@ typedef struct {
AVClass *av_class;
int custom_stride;
int aligned_input;
+ int thread_count;
int stride_warning_shown;
void (*unpack_frame)(const uint32_t *src, uint16_t *y, uint16_t *u, uint16_t *v, int width);
} V210DecContext;