Message ID | 20230310115635.13639-5-anton@khirnov.net |
---|---|
State | New |
Headers | show |
Series | [FFmpeg-devel,1/6] lavc/decode: stop mangling last_pkt_props->opaque | expand |
Context | Check | Description |
---|---|---|
yinshiyou/make_loongarch64 | success | Make finished |
yinshiyou/make_fate_loongarch64 | success | Make fate finished |
andriy/make_x86 | success | Make finished |
andriy/make_fate_x86 | success | Make fate finished |
On Fri, 10 Mar 2023, Anton Khirnov wrote: > This field is ad-hoc and will be deprecated. Use the recently-added > AV_CODEC_FLAG_COPY_OPAQUE to pass arbitrary user data from packets to > frames. > --- > fftools/ffplay.c | 24 ++++++++++++++++++++++-- > 1 file changed, 22 insertions(+), 2 deletions(-) > > diff --git a/fftools/ffplay.c b/fftools/ffplay.c > index 0a96511475..d572eeabad 100644 > --- a/fftools/ffplay.c > +++ b/fftools/ffplay.c > @@ -146,6 +146,10 @@ typedef struct Clock { > int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */ > } Clock; > > +typedef struct FrameData { > + int64_t pkt_pos; > +} FrameData; > + > /* Common struct for handling all types of decoded data and allocated render buffers. */ > typedef struct Frame { > AVFrame *frame; > @@ -640,6 +644,14 @@ static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub) { > } > av_packet_unref(d->pkt); > } else { > + if (d->pkt->buf && !d->pkt->opaque_ref) { > + FrameData *fd; > + > + d->pkt->opaque_ref = av_buffer_allocz(sizeof(*fd)); Missing alloc check. Otherwise looks good. Thanks, Marton > + fd = (FrameData*)d->pkt->opaque_ref->data; > + fd->pkt_pos = d->pkt->pos; > + } > + > if (avcodec_send_packet(d->avctx, d->pkt) == AVERROR(EAGAIN)) { > av_log(d->avctx, AV_LOG_ERROR, "Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n"); > d->packet_pending = 1; > @@ -2041,12 +2053,13 @@ static int audio_thread(void *arg) > goto the_end; > > while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) { > + FrameData *fd = frame->opaque_ref ? (FrameData*)frame->opaque_ref->data : NULL; > tb = av_buffersink_get_time_base(is->out_audio_filter); > if (!(af = frame_queue_peek_writable(&is->sampq))) > goto the_end; > > af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb); > - af->pos = frame->pkt_pos; > + af->pos = fd ? fd->pkt_pos : -1; > af->serial = is->auddec.pkt_serial; > af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate}); > > @@ -2145,6 +2158,8 @@ static int video_thread(void *arg) > goto the_end; > > while (ret >= 0) { > + FrameData *fd; > + > is->frame_last_returned_time = av_gettime_relative() / 1000000.0; > > ret = av_buffersink_get_frame_flags(filt_out, frame, 0); > @@ -2155,13 +2170,15 @@ static int video_thread(void *arg) > break; > } > > + fd = frame->opaque_ref ? (FrameData*)frame->opaque_ref->data : NULL; > + > is->frame_last_filter_delay = av_gettime_relative() / 1000000.0 - is->frame_last_returned_time; > if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0) > is->frame_last_filter_delay = 0; > tb = av_buffersink_get_time_base(filt_out); > duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0); > pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb); > - ret = queue_picture(is, frame, pts, duration, frame->pkt_pos, is->viddec.pkt_serial); > + ret = queue_picture(is, frame, pts, duration, fd ? fd->pkt_pos : -1, is->viddec.pkt_serial); > av_frame_unref(frame); > if (is->videoq.serial != is->viddec.pkt_serial) > break; > @@ -2567,6 +2584,9 @@ static int stream_component_open(VideoState *is, int stream_index) > av_dict_set(&opts, "threads", "auto", 0); > if (stream_lowres) > av_dict_set_int(&opts, "lowres", stream_lowres, 0); > + > + av_dict_set(&opts, "flags", "+copy_opaque", AV_DICT_MULTIKEY); > + > if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) { > goto fail; > } > -- > 2.39.1 > > _______________________________________________ > ffmpeg-devel mailing list > ffmpeg-devel@ffmpeg.org > https://ffmpeg.org/mailman/listinfo/ffmpeg-devel > > To unsubscribe, visit link above, or email > ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe". >
diff --git a/fftools/ffplay.c b/fftools/ffplay.c index 0a96511475..d572eeabad 100644 --- a/fftools/ffplay.c +++ b/fftools/ffplay.c @@ -146,6 +146,10 @@ typedef struct Clock { int *queue_serial; /* pointer to the current packet queue serial, used for obsolete clock detection */ } Clock; +typedef struct FrameData { + int64_t pkt_pos; +} FrameData; + /* Common struct for handling all types of decoded data and allocated render buffers. */ typedef struct Frame { AVFrame *frame; @@ -640,6 +644,14 @@ static int decoder_decode_frame(Decoder *d, AVFrame *frame, AVSubtitle *sub) { } av_packet_unref(d->pkt); } else { + if (d->pkt->buf && !d->pkt->opaque_ref) { + FrameData *fd; + + d->pkt->opaque_ref = av_buffer_allocz(sizeof(*fd)); + fd = (FrameData*)d->pkt->opaque_ref->data; + fd->pkt_pos = d->pkt->pos; + } + if (avcodec_send_packet(d->avctx, d->pkt) == AVERROR(EAGAIN)) { av_log(d->avctx, AV_LOG_ERROR, "Receive_frame and send_packet both returned EAGAIN, which is an API violation.\n"); d->packet_pending = 1; @@ -2041,12 +2053,13 @@ static int audio_thread(void *arg) goto the_end; while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) { + FrameData *fd = frame->opaque_ref ? (FrameData*)frame->opaque_ref->data : NULL; tb = av_buffersink_get_time_base(is->out_audio_filter); if (!(af = frame_queue_peek_writable(&is->sampq))) goto the_end; af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb); - af->pos = frame->pkt_pos; + af->pos = fd ? fd->pkt_pos : -1; af->serial = is->auddec.pkt_serial; af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate}); @@ -2145,6 +2158,8 @@ static int video_thread(void *arg) goto the_end; while (ret >= 0) { + FrameData *fd; + is->frame_last_returned_time = av_gettime_relative() / 1000000.0; ret = av_buffersink_get_frame_flags(filt_out, frame, 0); @@ -2155,13 +2170,15 @@ static int video_thread(void *arg) break; } + fd = frame->opaque_ref ? (FrameData*)frame->opaque_ref->data : NULL; + is->frame_last_filter_delay = av_gettime_relative() / 1000000.0 - is->frame_last_returned_time; if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0) is->frame_last_filter_delay = 0; tb = av_buffersink_get_time_base(filt_out); duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0); pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb); - ret = queue_picture(is, frame, pts, duration, frame->pkt_pos, is->viddec.pkt_serial); + ret = queue_picture(is, frame, pts, duration, fd ? fd->pkt_pos : -1, is->viddec.pkt_serial); av_frame_unref(frame); if (is->videoq.serial != is->viddec.pkt_serial) break; @@ -2567,6 +2584,9 @@ static int stream_component_open(VideoState *is, int stream_index) av_dict_set(&opts, "threads", "auto", 0); if (stream_lowres) av_dict_set_int(&opts, "lowres", stream_lowres, 0); + + av_dict_set(&opts, "flags", "+copy_opaque", AV_DICT_MULTIKEY); + if ((ret = avcodec_open2(avctx, codec, &opts)) < 0) { goto fail; }