@@ -150,7 +150,7 @@ const FFCodec ff_zero12v_decoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_012V,
.init = zero12v_decode_init,
- .decode = zero12v_decode_frame,
+ FF_CODEC_DECODE_CB(zero12v_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -1034,7 +1034,7 @@ const FFCodec ff_fourxm_decoder = {
.priv_data_size = sizeof(FourXContext),
.init = decode_init,
.close = decode_end,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
};
@@ -180,7 +180,7 @@ const FFCodec ff_eightbps_decoder = {
.p.id = AV_CODEC_ID_8BPS,
.priv_data_size = sizeof(EightBpsContext),
.init = decode_init,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -194,7 +194,7 @@ const FFCodec ff_eightsvx_fib_decoder = {
.p.id = AV_CODEC_ID_8SVX_FIB,
.priv_data_size = sizeof (EightSvxContext),
.init = eightsvx_decode_init,
- .decode = eightsvx_decode_frame,
+ FF_CODEC_DECODE_CB(eightsvx_decode_frame),
.close = eightsvx_decode_close,
.p.capabilities = AV_CODEC_CAP_DR1,
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P,
@@ -210,7 +210,7 @@ const FFCodec ff_eightsvx_exp_decoder = {
.p.id = AV_CODEC_ID_8SVX_EXP,
.priv_data_size = sizeof (EightSvxContext),
.init = eightsvx_decode_init,
- .decode = eightsvx_decode_frame,
+ FF_CODEC_DECODE_CB(eightsvx_decode_frame),
.close = eightsvx_decode_close,
.p.capabilities = AV_CODEC_CAP_DR1,
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P,
@@ -401,7 +401,7 @@ const FFCodec ff_a64multi_encoder = {
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
.priv_data_size = sizeof(A64Context),
.init = a64multi_encode_init,
- .encode2 = a64multi_encode_frame,
+ FF_CODEC_ENCODE_CB(a64multi_encode_frame),
.close = a64multi_close_encoder,
.p.pix_fmts = (const enum AVPixelFormat[]) {AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE},
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP | FF_CODEC_CAP_INIT_THREADSAFE,
@@ -416,7 +416,7 @@ const FFCodec ff_a64multi5_encoder = {
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
.priv_data_size = sizeof(A64Context),
.init = a64multi_encode_init,
- .encode2 = a64multi_encode_frame,
+ FF_CODEC_ENCODE_CB(a64multi_encode_frame),
.close = a64multi_close_encoder,
.p.pix_fmts = (const enum AVPixelFormat[]) {AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE},
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP | FF_CODEC_CAP_INIT_THREADSAFE,
@@ -560,7 +560,7 @@ const FFCodec ff_aac_decoder = {
.priv_data_size = sizeof(AACContext),
.init = aac_decode_init,
.close = aac_decode_close,
- .decode = aac_decode_frame,
+ FF_CODEC_DECODE_CB(aac_decode_frame),
.p.sample_fmts = (const enum AVSampleFormat[]) {
AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE
},
@@ -588,7 +588,7 @@ const FFCodec ff_aac_latm_decoder = {
.priv_data_size = sizeof(struct LATMContext),
.init = latm_decode_init,
.close = aac_decode_close,
- .decode = latm_decode_frame,
+ FF_CODEC_DECODE_CB(latm_decode_frame),
.p.sample_fmts = (const enum AVSampleFormat[]) {
AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE
},
@@ -458,7 +458,7 @@ const FFCodec ff_aac_fixed_decoder = {
.priv_data_size = sizeof(AACContext),
.init = aac_decode_init,
.close = aac_decode_close,
- .decode = aac_decode_frame,
+ FF_CODEC_DECODE_CB(aac_decode_frame),
.p.sample_fmts = (const enum AVSampleFormat[]) {
AV_SAMPLE_FMT_S32P, AV_SAMPLE_FMT_NONE
},
@@ -1144,7 +1144,7 @@ const FFCodec ff_aac_encoder = {
.p.id = AV_CODEC_ID_AAC,
.priv_data_size = sizeof(AACEncContext),
.init = aac_encode_init,
- .encode2 = aac_encode_frame,
+ FF_CODEC_ENCODE_CB(aac_encode_frame),
.close = aac_encode_end,
.defaults = aac_encode_defaults,
.p.supported_samplerates = ff_mpeg4audio_sample_rates,
@@ -157,7 +157,7 @@ const FFCodec ff_aasc_decoder = {
.priv_data_size = sizeof(AascContext),
.init = aasc_decode_init,
.close = aasc_decode_end,
- .decode = aasc_decode_frame,
+ FF_CODEC_DECODE_CB(aasc_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -178,7 +178,7 @@ const FFCodec ff_ac3_fixed_decoder = {
.priv_data_size = sizeof (AC3DecodeContext),
.init = ac3_decode_init,
.close = ac3_decode_end,
- .decode = ac3_decode_frame,
+ FF_CODEC_DECODE_CB(ac3_decode_frame),
.p.capabilities = AV_CODEC_CAP_CHANNEL_CONF |
AV_CODEC_CAP_DR1,
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16P,
@@ -66,7 +66,7 @@ const FFCodec ff_ac3_decoder = {
.priv_data_size = sizeof (AC3DecodeContext),
.init = ac3_decode_init,
.close = ac3_decode_end,
- .decode = ac3_decode_frame,
+ FF_CODEC_DECODE_CB(ac3_decode_frame),
.p.capabilities = AV_CODEC_CAP_CHANNEL_CONF |
AV_CODEC_CAP_DR1,
.p.long_name = NULL_IF_CONFIG_SMALL("ATSC A/52A (AC-3)"),
@@ -84,7 +84,7 @@ const FFCodec ff_eac3_decoder = {
.priv_data_size = sizeof (AC3DecodeContext),
.init = ac3_decode_init,
.close = ac3_decode_end,
- .decode = ac3_decode_frame,
+ FF_CODEC_DECODE_CB(ac3_decode_frame),
.p.capabilities = AV_CODEC_CAP_CHANNEL_CONF |
AV_CODEC_CAP_DR1,
.p.long_name = NULL_IF_CONFIG_SMALL("ATSC A/52B (AC-3, E-AC-3)"),
@@ -128,7 +128,7 @@ const FFCodec ff_ac3_fixed_encoder = {
.p.capabilities = AV_CODEC_CAP_DR1,
.priv_data_size = sizeof(AC3EncodeContext),
.init = ac3_fixed_encode_init,
- .encode2 = ff_ac3_fixed_encode_frame,
+ FF_CODEC_ENCODE_CB(ff_ac3_fixed_encode_frame),
.close = ff_ac3_encode_close,
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S32P,
AV_SAMPLE_FMT_NONE },
@@ -132,7 +132,7 @@ const FFCodec ff_ac3_encoder = {
.p.capabilities = AV_CODEC_CAP_DR1,
.priv_data_size = sizeof(AC3EncodeContext),
.init = ff_ac3_float_encode_init,
- .encode2 = ff_ac3_float_encode_frame,
+ FF_CODEC_ENCODE_CB(ff_ac3_float_encode_frame),
.close = ff_ac3_encode_close,
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE },
@@ -2294,7 +2294,7 @@ const FFCodec ff_ ## name_ ## _decoder = { \
.p.sample_fmts = sample_fmts_, \
.priv_data_size = sizeof(ADPCMDecodeContext), \
.init = adpcm_decode_init, \
- .decode = adpcm_decode_frame, \
+ FF_CODEC_DECODE_CB(adpcm_decode_frame), \
.flush = adpcm_flush, \
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, \
};
@@ -1007,7 +1007,7 @@ const FFCodec ff_ ## name_ ## _encoder = { \
.p.priv_class = &adpcm_encoder_class, \
.priv_data_size = sizeof(ADPCMEncodeContext), \
.init = adpcm_encode_init, \
- .encode2 = adpcm_encode_frame, \
+ FF_CODEC_ENCODE_CB(adpcm_encode_frame), \
.close = adpcm_encode_close, \
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP | FF_CODEC_CAP_INIT_THREADSAFE, \
};
@@ -197,7 +197,7 @@ const FFCodec ff_adpcm_adx_decoder = {
.p.id = AV_CODEC_ID_ADPCM_ADX,
.priv_data_size = sizeof(ADXContext),
.init = adx_decode_init,
- .decode = adx_decode_frame,
+ FF_CODEC_DECODE_CB(adx_decode_frame),
.flush = adx_decode_flush,
.p.capabilities = AV_CODEC_CAP_CHANNEL_CONF |
AV_CODEC_CAP_DR1,
@@ -197,7 +197,7 @@ const FFCodec ff_adpcm_adx_encoder = {
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
.priv_data_size = sizeof(ADXContext),
.init = adx_encode_init,
- .encode2 = adx_encode_frame,
+ FF_CODEC_ENCODE_CB(adx_encode_frame),
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE },
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -1294,7 +1294,7 @@ const FFCodec ff_agm_decoder = {
.priv_data_size = sizeof(AGMContext),
.init = decode_init,
.close = decode_close,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.flush = decode_flush,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
FF_CODEC_CAP_INIT_CLEANUP |
@@ -503,7 +503,7 @@ const FFCodec ff_aic_decoder = {
.priv_data_size = sizeof(AICContext),
.init = aic_decode_init,
.close = aic_decode_close,
- .decode = aic_decode_frame,
+ FF_CODEC_DECODE_CB(aic_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -618,7 +618,7 @@ const FFCodec ff_alac_decoder = {
.priv_data_size = sizeof(ALACContext),
.init = alac_decode_init,
.close = alac_decode_close,
- .decode = alac_decode_frame,
+ FF_CODEC_DECODE_CB(alac_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_CHANNEL_CONF,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
.p.priv_class = &alac_class
@@ -657,7 +657,7 @@ const FFCodec ff_alac_encoder = {
.priv_data_size = sizeof(AlacEncodeContext),
.p.priv_class = &alacenc_class,
.init = alac_encode_init,
- .encode2 = alac_encode_frame,
+ FF_CODEC_ENCODE_CB(alac_encode_frame),
.close = alac_encode_close,
.p.capabilities = AV_CODEC_CAP_SMALL_LAST_FRAME,
#if FF_API_OLD_CHANNEL_LAYOUT
@@ -127,5 +127,5 @@ const FFCodec ff_alias_pix_decoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_ALIAS_PIX,
.p.capabilities = AV_CODEC_CAP_DR1,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
};
@@ -106,7 +106,7 @@ const FFCodec ff_alias_pix_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("Alias/Wavefront PIX image"),
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_ALIAS_PIX,
- .encode2 = encode_frame,
+ FF_CODEC_ENCODE_CB(encode_frame),
.p.pix_fmts = (const enum AVPixelFormat[]) {
AV_PIX_FMT_BGR24, AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE
},
@@ -2183,7 +2183,7 @@ const FFCodec ff_als_decoder = {
.priv_data_size = sizeof(ALSDecContext),
.init = decode_init,
.close = decode_end,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.flush = flush,
.p.capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
@@ -384,7 +384,7 @@ const FFCodec ff_h264_amf_encoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_H264,
.init = amf_encode_init_h264,
- .receive_packet = ff_amf_receive_packet,
+ FF_CODEC_RECEIVE_PACKET_CB(ff_amf_receive_packet),
.close = ff_amf_encode_close,
.priv_data_size = sizeof(AmfContext),
.p.priv_class = &h264_amf_class,
@@ -316,7 +316,7 @@ const FFCodec ff_hevc_amf_encoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_HEVC,
.init = amf_encode_init_hevc,
- .receive_packet = ff_amf_receive_packet,
+ FF_CODEC_RECEIVE_PACKET_CB(ff_amf_receive_packet),
.close = ff_amf_encode_close,
.priv_data_size = sizeof(AmfContext),
.p.priv_class = &hevc_amf_class,
@@ -1103,7 +1103,7 @@ const FFCodec ff_amrnb_decoder = {
.p.id = AV_CODEC_ID_AMR_NB,
.priv_data_size = sizeof(AMRChannelsContext),
.init = amrnb_decode_init,
- .decode = amrnb_decode_frame,
+ FF_CODEC_DECODE_CB(amrnb_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE },
@@ -1299,7 +1299,7 @@ const FFCodec ff_amrwb_decoder = {
.p.id = AV_CODEC_ID_AMR_WB,
.priv_data_size = sizeof(AMRWBChannelsContext),
.init = amrwb_decode_init,
- .decode = amrwb_decode_frame,
+ FF_CODEC_DECODE_CB(amrwb_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLT,
AV_SAMPLE_FMT_NONE },
@@ -197,7 +197,7 @@ const FFCodec ff_anm_decoder = {
.priv_data_size = sizeof(AnmContext),
.init = decode_init,
.close = decode_end,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -488,7 +488,7 @@ const FFCodec ff_ansi_decoder = {
.priv_data_size = sizeof(AnsiContext),
.init = decode_init,
.close = decode_close,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
.defaults = ansi_defaults,
@@ -1666,7 +1666,7 @@ const FFCodec ff_ape_decoder = {
.priv_data_size = sizeof(APEContext),
.init = ape_decode_init,
.close = ape_decode_close,
- .decode = ape_decode_frame,
+ FF_CODEC_DECODE_CB(ape_decode_frame),
.p.capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DELAY |
AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
@@ -181,7 +181,7 @@ const FFCodec ff_aptx_decoder = {
.p.id = AV_CODEC_ID_APTX,
.priv_data_size = sizeof(AptXContext),
.init = ff_aptx_init,
- .decode = aptx_decode_frame,
+ FF_CODEC_DECODE_CB(aptx_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
#if FF_API_OLD_CHANNEL_LAYOUT
@@ -201,7 +201,7 @@ const FFCodec ff_aptx_hd_decoder = {
.p.id = AV_CODEC_ID_APTX_HD,
.priv_data_size = sizeof(AptXContext),
.init = ff_aptx_init,
- .decode = aptx_decode_frame,
+ FF_CODEC_DECODE_CB(aptx_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
#if FF_API_OLD_CHANNEL_LAYOUT
@@ -253,7 +253,7 @@ const FFCodec ff_aptx_encoder = {
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_SMALL_LAST_FRAME,
.priv_data_size = sizeof(AptXContext),
.init = ff_aptx_init,
- .encode2 = aptx_encode_frame,
+ FF_CODEC_ENCODE_CB(aptx_encode_frame),
.close = aptx_close,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
#if FF_API_OLD_CHANNEL_LAYOUT
@@ -275,7 +275,7 @@ const FFCodec ff_aptx_hd_encoder = {
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_SMALL_LAST_FRAME,
.priv_data_size = sizeof(AptXContext),
.init = ff_aptx_init,
- .encode2 = aptx_encode_frame,
+ FF_CODEC_ENCODE_CB(aptx_encode_frame),
.close = aptx_close,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
#if FF_API_OLD_CHANNEL_LAYOUT
@@ -218,7 +218,7 @@ const FFCodec ff_arbc_decoder = {
.p.id = AV_CODEC_ID_ARBC,
.priv_data_size = sizeof(ARBCContext),
.init = decode_init,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.flush = decode_flush,
.close = decode_close,
.p.capabilities = AV_CODEC_CAP_DR1,
@@ -740,7 +740,7 @@ const FFCodec ff_argo_decoder = {
.p.id = AV_CODEC_ID_ARGO,
.priv_data_size = sizeof(ArgoContext),
.init = decode_init,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.flush = decode_flush,
.close = decode_close,
.p.capabilities = AV_CODEC_CAP_DR1,
@@ -68,7 +68,7 @@ const FFCodec ff_ssa_decoder = {
.p.type = AVMEDIA_TYPE_SUBTITLE,
.p.id = AV_CODEC_ID_ASS,
.init = ass_decode_init,
- .decode_sub = ass_decode_frame,
+ FF_CODEC_DECODE_SUB_CB(ass_decode_frame),
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
#endif
@@ -80,7 +80,7 @@ const FFCodec ff_ass_decoder = {
.p.type = AVMEDIA_TYPE_SUBTITLE,
.p.id = AV_CODEC_ID_ASS,
.init = ass_decode_init,
- .decode_sub = ass_decode_frame,
+ FF_CODEC_DECODE_SUB_CB(ass_decode_frame),
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
#endif
@@ -75,7 +75,7 @@ const FFCodec ff_ssa_encoder = {
.p.type = AVMEDIA_TYPE_SUBTITLE,
.p.id = AV_CODEC_ID_ASS,
.init = ass_encode_init,
- .encode_sub = ass_encode_frame,
+ FF_CODEC_ENCODE_SUB_CB(ass_encode_frame),
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
#endif
@@ -87,7 +87,7 @@ const FFCodec ff_ass_encoder = {
.p.type = AVMEDIA_TYPE_SUBTITLE,
.p.id = AV_CODEC_ID_ASS,
.init = ass_encode_init,
- .encode_sub = ass_encode_frame,
+ FF_CODEC_ENCODE_SUB_CB(ass_encode_frame),
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
#endif
@@ -336,7 +336,7 @@ const FFCodec ff_asv1_decoder = {
.priv_data_size = sizeof(ASV1Context),
.init = decode_init,
.close = decode_end,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -350,7 +350,7 @@ const FFCodec ff_asv2_decoder = {
.p.id = AV_CODEC_ID_ASV2,
.priv_data_size = sizeof(ASV1Context),
.init = decode_init,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -350,7 +350,7 @@ const FFCodec ff_asv1_encoder = {
.p.id = AV_CODEC_ID_ASV1,
.priv_data_size = sizeof(ASV1Context),
.init = encode_init,
- .encode2 = encode_frame,
+ FF_CODEC_ENCODE_CB(encode_frame),
.p.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P,
AV_PIX_FMT_NONE },
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -365,7 +365,7 @@ const FFCodec ff_asv2_encoder = {
.p.id = AV_CODEC_ID_ASV2,
.priv_data_size = sizeof(ASV1Context),
.init = encode_init,
- .encode2 = encode_frame,
+ FF_CODEC_ENCODE_CB(encode_frame),
.p.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P,
AV_PIX_FMT_NONE },
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -392,7 +392,7 @@ const FFCodec ff_atrac1_decoder = {
.priv_data_size = sizeof(AT1Ctx),
.init = atrac1_decode_init,
.close = atrac1_decode_end,
- .decode = atrac1_decode_frame,
+ FF_CODEC_DECODE_CB(atrac1_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE },
@@ -1023,7 +1023,7 @@ const FFCodec ff_atrac3_decoder = {
.priv_data_size = sizeof(ATRAC3Context),
.init = atrac3_decode_init,
.close = atrac3_decode_close,
- .decode = atrac3_decode_frame,
+ FF_CODEC_DECODE_CB(atrac3_decode_frame),
.p.capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1,
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE },
@@ -1038,7 +1038,7 @@ const FFCodec ff_atrac3al_decoder = {
.priv_data_size = sizeof(ATRAC3Context),
.init = atrac3_decode_init,
.close = atrac3_decode_close,
- .decode = atrac3al_decode_frame,
+ FF_CODEC_DECODE_CB(atrac3al_decode_frame),
.p.capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1,
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE },
@@ -400,7 +400,7 @@ const FFCodec ff_atrac3p_decoder = {
.priv_data_size = sizeof(ATRAC3PContext),
.init = atrac3p_decode_init,
.close = atrac3p_decode_close,
- .decode = atrac3p_decode_frame,
+ FF_CODEC_DECODE_CB(atrac3p_decode_frame),
};
const FFCodec ff_atrac3pal_decoder = {
@@ -413,5 +413,5 @@ const FFCodec ff_atrac3pal_decoder = {
.priv_data_size = sizeof(ATRAC3PContext),
.init = atrac3p_decode_init,
.close = atrac3p_decode_close,
- .decode = atrac3p_decode_frame,
+ FF_CODEC_DECODE_CB(atrac3p_decode_frame),
};
@@ -995,7 +995,7 @@ const FFCodec ff_atrac9_decoder = {
.priv_data_size = sizeof(ATRAC9Context),
.init = atrac9_decode_init,
.close = atrac9_decode_close,
- .decode = atrac9_decode_frame,
+ FF_CODEC_DECODE_CB(atrac9_decode_frame),
.flush = atrac9_decode_flush,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
.p.capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
@@ -592,7 +592,7 @@ static av_cold int ffat_close_decoder(AVCodecContext *avctx)
.priv_data_size = sizeof(ATDecodeContext), \
.init = ffat_init_decoder, \
.close = ffat_close_decoder, \
- .decode = ffat_decode, \
+ FF_CODEC_DECODE_CB(ffat_decode), \
.flush = ffat_decode_flush, \
.p.priv_class = &ffat_##NAME##_dec_class, \
.bsfs = bsf_name, \
@@ -621,7 +621,7 @@ static const AVOption options[] = {
.priv_data_size = sizeof(ATDecodeContext), \
.init = ffat_init_encoder, \
.close = ffat_close_encoder, \
- .encode2 = ffat_encode, \
+ FF_CODEC_ENCODE_CB(ffat_encode), \
.flush = ffat_encode_flush, \
.p.priv_class = &ffat_##NAME##_enc_class, \
.p.capabilities = AV_CODEC_CAP_DELAY | \
@@ -102,7 +102,7 @@ const FFCodec ff_aura2_decoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_AURA2,
.init = aura_decode_init,
- .decode = aura_decode_frame,
+ FF_CODEC_DECODE_CB(aura_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -1246,7 +1246,7 @@ const FFCodec ff_av1_decoder = {
.priv_data_size = sizeof(AV1DecContext),
.init = av1_decode_init,
.close = av1_decode_free,
- .decode = av1_decode_frame,
+ FF_CODEC_DECODE_CB(av1_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_AVOID_PROBING,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
FF_CODEC_CAP_INIT_CLEANUP |
@@ -96,7 +96,7 @@ const FFCodec ff_avrn_decoder = {
.p.id = AV_CODEC_ID_AVRN,
.priv_data_size = sizeof(AVRnContext),
.init = init,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
};
@@ -182,7 +182,7 @@ const FFCodec ff_avs_decoder = {
.p.id = AV_CODEC_ID_AVS,
.priv_data_size = sizeof(AvsContext),
.init = avs_decode_init,
- .decode = avs_decode_frame,
+ FF_CODEC_DECODE_CB(avs_decode_frame),
.close = avs_decode_end,
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -126,6 +126,6 @@ const FFCodec ff_avui_decoder = {
.p.id = AV_CODEC_ID_AVUI,
.p.capabilities = AV_CODEC_CAP_DR1,
.init = avui_decode_init,
- .decode = avui_decode_frame,
+ FF_CODEC_DECODE_CB(avui_decode_frame),
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -99,6 +99,6 @@ const FFCodec ff_avui_encoder = {
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_EXPERIMENTAL,
.p.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_UYVY422, AV_PIX_FMT_NONE },
.init = avui_encode_init,
- .encode2 = avui_encode_frame,
+ FF_CODEC_ENCODE_CB(avui_encode_frame),
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -166,7 +166,7 @@ const FFCodec ff_bethsoftvid_decoder = {
.priv_data_size = sizeof(BethsoftvidContext),
.init = bethsoftvid_decode_init,
.close = bethsoftvid_decode_end,
- .decode = bethsoftvid_decode_frame,
+ FF_CODEC_DECODE_CB(bethsoftvid_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -183,7 +183,7 @@ const FFCodec ff_bfi_decoder = {
.priv_data_size = sizeof(BFIContext),
.init = bfi_decode_init,
.close = bfi_decode_close,
- .decode = bfi_decode_frame,
+ FF_CODEC_DECODE_CB(bfi_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -1427,7 +1427,7 @@ const FFCodec ff_bink_decoder = {
.priv_data_size = sizeof(BinkContext),
.init = decode_init,
.close = decode_end,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.flush = flush,
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
@@ -372,7 +372,7 @@ const FFCodec ff_binkaudio_rdft_decoder = {
.init = decode_init,
.flush = decode_flush,
.close = decode_end,
- .receive_frame = binkaudio_receive_frame,
+ FF_CODEC_RECEIVE_FRAME_CB(binkaudio_receive_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
};
@@ -386,7 +386,7 @@ const FFCodec ff_binkaudio_dct_decoder = {
.init = decode_init,
.flush = decode_flush,
.close = decode_end,
- .receive_frame = binkaudio_receive_frame,
+ FF_CODEC_RECEIVE_FRAME_CB(binkaudio_receive_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
};
@@ -224,7 +224,7 @@ const FFCodec ff_bintext_decoder = {
.p.id = AV_CODEC_ID_BINTEXT,
.priv_data_size = sizeof(XbinContext),
.init = decode_init,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -237,7 +237,7 @@ const FFCodec ff_xbin_decoder = {
.p.id = AV_CODEC_ID_XBIN,
.priv_data_size = sizeof(XbinContext),
.init = decode_init,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -250,7 +250,7 @@ const FFCodec ff_idf_decoder = {
.p.id = AV_CODEC_ID_IDF,
.priv_data_size = sizeof(XbinContext),
.init = decode_init,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -149,7 +149,7 @@ const FFCodec ff_bitpacked_decoder = {
.p.capabilities = AV_CODEC_CAP_FRAME_THREADS,
.priv_data_size = sizeof(struct BitpackedContext),
.init = bitpacked_init_decoder,
- .decode = bitpacked_decode,
+ FF_CODEC_DECODE_CB(bitpacked_decode),
.codec_tags = (const uint32_t []){
MKTAG('U', 'Y', 'V', 'Y'),
FF_CODEC_TAGS_END,
@@ -112,7 +112,7 @@ const FFCodec ff_bitpacked_encoder = {
.priv_data_size = sizeof(struct BitpackedContext),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.init = encode_init,
- .encode2 = encode_frame,
+ FF_CODEC_ENCODE_CB(encode_frame),
.p.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV422P10,
AV_PIX_FMT_NONE },
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -370,5 +370,5 @@ const FFCodec ff_bmp_decoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_BMP,
.p.capabilities = AV_CODEC_CAP_DR1,
- .decode = bmp_decode_frame,
+ FF_CODEC_DECODE_CB(bmp_decode_frame),
};
@@ -162,7 +162,7 @@ const FFCodec ff_bmp_encoder = {
.p.id = AV_CODEC_ID_BMP,
.p.capabilities = AV_CODEC_CAP_DR1,
.init = bmp_encode_init,
- .encode2 = bmp_encode_frame,
+ FF_CODEC_ENCODE_CB(bmp_encode_frame),
.p.pix_fmts = (const enum AVPixelFormat[]){
AV_PIX_FMT_BGRA, AV_PIX_FMT_BGR24,
AV_PIX_FMT_RGB565, AV_PIX_FMT_RGB555, AV_PIX_FMT_RGB444,
@@ -84,7 +84,7 @@ const FFCodec ff_bmv_audio_decoder = {
.p.type = AVMEDIA_TYPE_AUDIO,
.p.id = AV_CODEC_ID_BMV_AUDIO,
.init = bmv_aud_decode_init,
- .decode = bmv_aud_decode_frame,
+ FF_CODEC_DECODE_CB(bmv_aud_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -292,7 +292,7 @@ const FFCodec ff_bmv_video_decoder = {
.p.id = AV_CODEC_ID_BMV_VIDEO,
.priv_data_size = sizeof(BMVDecContext),
.init = decode_init,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -290,5 +290,5 @@ const FFCodec ff_brender_pix_decoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_BRENDER_PIX,
.p.capabilities = AV_CODEC_CAP_DR1,
- .decode = pix_decode_frame,
+ FF_CODEC_DECODE_CB(pix_decode_frame),
};
@@ -266,7 +266,7 @@ const FFCodec ff_c93_decoder = {
.priv_data_size = sizeof(C93DecoderContext),
.init = decode_init,
.close = decode_end,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
};
@@ -1318,7 +1318,7 @@ const FFCodec ff_cavs_decoder = {
.priv_data_size = sizeof(AVSContext),
.init = ff_cavs_init,
.close = ff_cavs_end,
- .decode = cavs_decode_frame,
+ FF_CODEC_DECODE_CB(cavs_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
.flush = cavs_flush,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
@@ -955,6 +955,6 @@ const FFCodec ff_ccaption_decoder = {
.init = init_decoder,
.close = close_decoder,
.flush = flush_decoder,
- .decode_sub = decode,
+ FF_CODEC_DECODE_SUB_CB(decode),
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -395,7 +395,7 @@ const FFCodec ff_cdgraphics_decoder = {
.priv_data_size = sizeof(CDGraphicsContext),
.init = cdg_decode_init,
.close = cdg_decode_end,
- .decode = cdg_decode_frame,
+ FF_CODEC_DECODE_CB(cdg_decode_frame),
.flush = cdg_decode_flush,
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -451,7 +451,7 @@ const FFCodec ff_cdtoons_decoder = {
.priv_data_size = sizeof(CDToonsContext),
.init = cdtoons_decode_init,
.close = cdtoons_decode_end,
- .decode = cdtoons_decode_frame,
+ FF_CODEC_DECODE_CB(cdtoons_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.flush = cdtoons_flush,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -344,7 +344,7 @@ const FFCodec ff_cdxl_decoder = {
.priv_data_size = sizeof(CDXLVideoContext),
.init = cdxl_decode_init,
.close = cdxl_decode_end,
- .decode = cdxl_decode_frame,
+ FF_CODEC_DECODE_CB(cdxl_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -1463,7 +1463,7 @@ const FFCodec ff_cfhd_decoder = {
.priv_data_size = sizeof(CFHDContext),
.init = cfhd_init,
.close = cfhd_close,
- .decode = cfhd_decode,
+ FF_CODEC_DECODE_CB(cfhd_decode),
.update_thread_context = ONLY_IF_THREADS_ENABLED(update_thread_context),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
@@ -854,7 +854,7 @@ const FFCodec ff_cfhd_encoder = {
.p.priv_class = &cfhd_class,
.init = cfhd_encode_init,
.close = cfhd_encode_close,
- .encode2 = cfhd_encode_frame,
+ FF_CODEC_ENCODE_CB(cfhd_encode_frame),
.p.capabilities = AV_CODEC_CAP_FRAME_THREADS,
.p.pix_fmts = (const enum AVPixelFormat[]) {
AV_PIX_FMT_YUV422P10,
@@ -514,7 +514,7 @@ const FFCodec ff_cinepak_decoder = {
.priv_data_size = sizeof(CinepakContext),
.init = cinepak_decode_init,
.close = cinepak_decode_end,
- .decode = cinepak_decode_frame,
+ FF_CODEC_DECODE_CB(cinepak_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -1201,7 +1201,7 @@ const FFCodec ff_cinepak_encoder = {
.p.id = AV_CODEC_ID_CINEPAK,
.priv_data_size = sizeof(CinepakEncContext),
.init = cinepak_encode_init,
- .encode2 = cinepak_encode_frame,
+ FF_CODEC_ENCODE_CB(cinepak_encode_frame),
.close = cinepak_encode_end,
.p.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_RGB24, AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE },
.p.priv_class = &cinepak_class,
@@ -775,7 +775,7 @@ const FFCodec ff_clearvideo_decoder = {
.priv_data_size = sizeof(CLVContext),
.init = clv_decode_init,
.close = clv_decode_end,
- .decode = clv_decode_frame,
+ FF_CODEC_DECODE_CB(clv_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
};
@@ -87,7 +87,7 @@ const FFCodec ff_cljr_decoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_CLJR,
.init = decode_init,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -115,7 +115,7 @@ const FFCodec ff_cljr_encoder = {
.p.id = AV_CODEC_ID_CLJR,
.p.capabilities = AV_CODEC_CAP_DR1,
.priv_data_size = sizeof(CLJRContext),
- .encode2 = encode_frame,
+ FF_CODEC_ENCODE_CB(encode_frame),
.p.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV411P,
AV_PIX_FMT_NONE },
.p.priv_class = &cljr_class,
@@ -498,7 +498,7 @@ const FFCodec ff_cllc_decoder = {
.p.id = AV_CODEC_ID_CLLC,
.priv_data_size = sizeof(CLLCContext),
.init = cllc_decode_init,
- .decode = cllc_decode_frame,
+ FF_CODEC_DECODE_CB(cllc_decode_frame),
.close = cllc_decode_close,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -169,7 +169,7 @@ const FFCodec ff_comfortnoise_decoder = {
.p.id = AV_CODEC_ID_COMFORT_NOISE,
.priv_data_size = sizeof(CNGContext),
.init = cng_decode_init,
- .decode = cng_decode_frame,
+ FF_CODEC_DECODE_CB(cng_decode_frame),
.flush = cng_decode_flush,
.close = cng_decode_close,
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
@@ -104,7 +104,7 @@ const FFCodec ff_comfortnoise_encoder = {
.p.capabilities = AV_CODEC_CAP_DR1,
.priv_data_size = sizeof(CNGContext),
.init = cng_encode_init,
- .encode2 = cng_encode_frame,
+ FF_CODEC_ENCODE_CB(cng_encode_frame),
.close = cng_encode_close,
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE },
@@ -88,6 +88,27 @@ struct AVCodecContext;
struct AVSubtitle;
struct AVPacket;
+enum FFCodecType {
+ /* The codec is a decoder using the decode callback;
+ * audio and video codecs only. */
+ FF_CODEC_CB_TYPE_DECODE,
+ /* The codec is a decoder using the decode_sub callback;
+ * subtitle codecs only. */
+ FF_CODEC_CB_TYPE_DECODE_SUB,
+ /* The codec is a decoder using the receive_frame callback;
+ * audio and video codecs only. */
+ FF_CODEC_CB_TYPE_RECEIVE_FRAME,
+ /* The codec is an encoder using the encode callback;
+ * audio and video codecs only. */
+ FF_CODEC_CB_TYPE_ENCODE,
+ /* The codec is an encoder using the encode_sub callback;
+ * subtitle codecs only. */
+ FF_CODEC_CB_TYPE_ENCODE_SUB,
+ /* The codec is an encoder using the receive_packet callback;
+ * audio and video codecs only. */
+ FF_CODEC_CB_TYPE_RECEIVE_PACKET,
+};
+
typedef struct FFCodec {
/**
* The public AVCodec. See codec.h for it.
@@ -97,7 +118,14 @@ typedef struct FFCodec {
/**
* Internal codec capabilities FF_CODEC_CAP_*.
*/
- int caps_internal;
+ unsigned caps_internal:29;
+
+ /**
+ * This field determines the type of the codec (decoder/encoder)
+ * and also the exact callback cb implemented by the codec.
+ * cb_type uses enum FFCodecType values.
+ */
+ unsigned cb_type:3;
int priv_data_size;
/**
@@ -133,53 +161,69 @@ typedef struct FFCodec {
void (*init_static_data)(struct FFCodec *codec);
int (*init)(struct AVCodecContext *);
- int (*encode_sub)(struct AVCodecContext *, uint8_t *buf, int buf_size,
- const struct AVSubtitle *sub);
- /**
- * Encode data to an AVPacket.
- *
- * @param avctx codec context
- * @param avpkt output AVPacket
- * @param[in] frame AVFrame containing the raw data to be encoded
- * @param[out] got_packet_ptr encoder sets to 0 or 1 to indicate that a
- * non-empty packet was returned in avpkt.
- * @return 0 on success, negative error code on failure
- */
- int (*encode2)(struct AVCodecContext *avctx, struct AVPacket *avpkt,
- const struct AVFrame *frame, int *got_packet_ptr);
- /**
- * Decode to an AVFrame.
- *
- * @param avctx codec context
- * @param frame AVFrame for output
- * @param[out] got_frame_ptr decoder sets to 0 or 1 to indicate that a
- * non-empty frame was returned in outdata.
- * @param[in] avpkt AVPacket containing the data to be decoded
- * @return amount of bytes read from the packet on success, negative error
- * code on failure
- */
- int (*decode)(struct AVCodecContext *avctx, struct AVFrame *frame,
- int *got_frame_ptr, struct AVPacket *avpkt);
- /**
- * Decode subtitle data. Same as decode except that it uses
- * a struct AVSubtitle structure for output.
- */
- int (*decode_sub)(struct AVCodecContext *avctx, struct AVSubtitle *sub,
+
+ union {
+ /**
+ * Decode to an AVFrame.
+ * cb is in this state if cb_type is FF_CODEC_CB_TYPE_DECODE.
+ *
+ * @param avctx codec context
+ * @param[out] frame AVFrame for output
+ * @param[out] got_frame_ptr decoder sets to 0 or 1 to indicate that
+ * a non-empty frame was returned in frame.
+ * @param[in] avpkt AVPacket containing the data to be decoded
+ * @return amount of bytes read from the packet on success,
+ * negative error code on failure
+ */
+ int (*decode)(struct AVCodecContext *avctx, struct AVFrame *frame,
int *got_frame_ptr, struct AVPacket *avpkt);
+ /**
+ * Decode subtitle data to an AVSubtitle.
+ * cb is in this state if cb_type is FF_CODEC_CB_TYPE_DECODE_SUB.
+ *
+ * Apart from that this is like the decode callback.
+ */
+ int (*decode_sub)(struct AVCodecContext *avctx, struct AVSubtitle *sub,
+ int *got_frame_ptr, struct AVPacket *avpkt);
+ /**
+ * Decode API with decoupled packet/frame dataflow.
+ * cb is in this state if cb_type is FF_CODEC_CB_TYPE_RECEIVE_FRAME.
+ *
+ * This function is called to get one output frame. It should call
+ * ff_decode_get_packet() to obtain input data.
+ */
+ int (*receive_frame)(struct AVCodecContext *avctx, struct AVFrame *frame);
+ /**
+ * Encode data to an AVPacket.
+ * cb is in this state if cb_type is FF_CODEC_CB_TYPE_ENCODE
+ *
+ * @param avctx codec context
+ * @param[out] avpkt output AVPacket
+ * @param[in] frame AVFrame containing the input to be encoded
+ * @param[out] got_packet_ptr encoder sets to 0 or 1 to indicate that a
+ * non-empty packet was returned in avpkt.
+ * @return 0 on success, negative error code on failure
+ */
+ int (*encode)(struct AVCodecContext *avctx, struct AVPacket *avpkt,
+ const struct AVFrame *frame, int *got_packet_ptr);
+ /**
+ * Encode subtitles to a raw buffer.
+ * cb is in this state if cb_type is FF_CODEC_CB_TYPE_ENCODE_SUB.
+ */
+ int (*encode_sub)(struct AVCodecContext *avctx, uint8_t *buf,
+ int buf_size, const struct AVSubtitle *sub);
+ /**
+ * Encode API with decoupled frame/packet dataflow.
+ * cb is in this state if cb_type is FF_CODEC_CB_TYPE_RECEIVE_PACKET.
+ *
+ * This function is called to get one output packet.
+ * It should call ff_encode_get_frame() to obtain input data.
+ */
+ int (*receive_packet)(struct AVCodecContext *avctx, struct AVPacket *avpkt);
+ } cb;
+
int (*close)(struct AVCodecContext *);
- /**
- * Encode API with decoupled frame/packet dataflow. This function is called
- * to get one output packet. It should call ff_encode_get_frame() to obtain
- * input data.
- */
- int (*receive_packet)(struct AVCodecContext *avctx, struct AVPacket *avpkt);
- /**
- * Decode API with decoupled packet/frame dataflow. This function is called
- * to get one output frame. It should call ff_decode_get_packet() to obtain
- * input data.
- */
- int (*receive_frame)(struct AVCodecContext *avctx, struct AVFrame *frame);
/**
* Flush buffers.
* Will be called when seeking
@@ -207,6 +251,25 @@ typedef struct FFCodec {
const uint32_t *codec_tags;
} FFCodec;
+#define FF_CODEC_DECODE_CB(func) \
+ .cb_type = FF_CODEC_CB_TYPE_DECODE, \
+ .cb.decode = (func)
+#define FF_CODEC_DECODE_SUB_CB(func) \
+ .cb_type = FF_CODEC_CB_TYPE_DECODE_SUB, \
+ .cb.decode_sub = (func)
+#define FF_CODEC_RECEIVE_FRAME_CB(func) \
+ .cb_type = FF_CODEC_CB_TYPE_RECEIVE_FRAME, \
+ .cb.receive_frame = (func)
+#define FF_CODEC_ENCODE_CB(func) \
+ .cb_type = FF_CODEC_CB_TYPE_ENCODE, \
+ .cb.encode = (func)
+#define FF_CODEC_ENCODE_SUB_CB(func) \
+ .cb_type = FF_CODEC_CB_TYPE_ENCODE_SUB, \
+ .cb.encode_sub = (func)
+#define FF_CODEC_RECEIVE_PACKET_CB(func) \
+ .cb_type = FF_CODEC_CB_TYPE_RECEIVE_PACKET, \
+ .cb.receive_packet = (func)
+
static av_always_inline const FFCodec *ffcodec(const AVCodec *codec)
{
return (const FFCodec*)codec;
@@ -1304,7 +1304,7 @@ const FFCodec ff_cook_decoder = {
.priv_data_size = sizeof(COOKContext),
.init = cook_decode_init,
.close = cook_decode_close,
- .decode = cook_decode_frame,
+ FF_CODEC_DECODE_CB(cook_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE },
@@ -230,7 +230,7 @@ const FFCodec ff_cpia_decoder = {
.priv_data_size = sizeof(CpiaContext),
.init = cpia_decode_init,
.close = cpia_decode_end,
- .decode = cpia_decode_frame,
+ FF_CODEC_DECODE_CB(cpia_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -429,7 +429,7 @@ const FFCodec ff_cri_decoder = {
.p.id = AV_CODEC_ID_CRI,
.priv_data_size = sizeof(CRIContext),
.init = cri_decode_init,
- .decode = cri_decode_frame,
+ FF_CODEC_DECODE_CB(cri_decode_frame),
.close = cri_decode_close,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
@@ -783,7 +783,7 @@ static int crystalhd_receive_frame(AVCodecContext *avctx, AVFrame *frame)
.p.priv_class = &x##_crystalhd_class, \
.init = init, \
.close = uninit, \
- .receive_frame = crystalhd_receive_frame, \
+ FF_CODEC_RECEIVE_FRAME_CB(crystalhd_receive_frame), \
.flush = flush, \
.bsfs = bsf_name, \
.p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AVOID_PROBING | AV_CODEC_CAP_HARDWARE, \
@@ -175,7 +175,7 @@ const FFCodec ff_cscd_decoder = {
.priv_data_size = sizeof(CamStudioContext),
.init = decode_init,
.close = decode_end,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
};
@@ -1112,7 +1112,7 @@ static const AVCodecHWConfigInternal *const cuvid_hw_configs[] = {
.p.priv_class = &x##_cuvid_class, \
.init = cuvid_decode_init, \
.close = cuvid_decode_end, \
- .receive_frame = cuvid_output_frame, \
+ FF_CODEC_RECEIVE_FRAME_CB(cuvid_output_frame), \
.flush = cuvid_flush, \
.bsfs = bsf_name, \
.p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AVOID_PROBING | AV_CODEC_CAP_HARDWARE, \
@@ -184,7 +184,7 @@ const FFCodec ff_aura_decoder = {
.p.id = AV_CODEC_ID_AURA,
.priv_data_size = sizeof(CyuvDecodeContext),
.init = cyuv_decode_init,
- .decode = cyuv_decode_frame,
+ FF_CODEC_DECODE_CB(cyuv_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -198,7 +198,7 @@ const FFCodec ff_cyuv_decoder = {
.p.id = AV_CODEC_ID_CYUV,
.priv_data_size = sizeof(CyuvDecodeContext),
.init = cyuv_decode_init,
- .decode = cyuv_decode_frame,
+ FF_CODEC_DECODE_CB(cyuv_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -417,7 +417,7 @@ const FFCodec ff_dca_decoder = {
.p.id = AV_CODEC_ID_DTS,
.priv_data_size = sizeof(DCAContext),
.init = dcadec_init,
- .decode = dcadec_decode_frame,
+ FF_CODEC_DECODE_CB(dcadec_decode_frame),
.close = dcadec_close,
.flush = dcadec_flush,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
@@ -1249,7 +1249,7 @@ const FFCodec ff_dca_encoder = {
.priv_data_size = sizeof(DCAEncContext),
.init = encode_init,
.close = encode_close,
- .encode2 = encode_frame,
+ FF_CODEC_ENCODE_CB(encode_frame),
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S32,
AV_SAMPLE_FMT_NONE },
@@ -753,7 +753,7 @@ const FFCodec ff_dds_decoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("DirectDraw Surface image decoder"),
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_DDS,
- .decode = dds_decode,
+ FF_CODEC_DECODE_CB(dds_decode),
.priv_data_size = sizeof(DDSContext),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_SLICE_THREADS,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE
@@ -322,7 +322,7 @@ static inline int decode_simple_internal(AVCodecContext *avctx, AVFrame *frame,
if (HAVE_THREADS && avctx->active_thread_type & FF_THREAD_FRAME) {
ret = ff_thread_decode_frame(avctx, frame, &got_frame, pkt);
} else {
- ret = codec->decode(avctx, frame, &got_frame, pkt);
+ ret = codec->cb.decode(avctx, frame, &got_frame, pkt);
if (!(codec->caps_internal & FF_CODEC_CAP_SETS_PKT_DTS))
frame->pkt_dts = pkt->dts;
@@ -546,8 +546,8 @@ static int decode_receive_frame_internal(AVCodecContext *avctx, AVFrame *frame)
av_assert0(!frame->buf[0]);
- if (codec->receive_frame) {
- ret = codec->receive_frame(avctx, frame);
+ if (codec->cb_type == FF_CODEC_CB_TYPE_RECEIVE_FRAME) {
+ ret = codec->cb.receive_frame(avctx, frame);
if (ret != AVERROR(EAGAIN))
av_packet_unref(avci->last_pkt_props);
} else
@@ -862,7 +862,7 @@ int avcodec_decode_subtitle2(AVCodecContext *avctx, AVSubtitle *sub,
if (avctx->pkt_timebase.num && avpkt->pts != AV_NOPTS_VALUE)
sub->pts = av_rescale_q(avpkt->pts,
avctx->pkt_timebase, AV_TIME_BASE_Q);
- ret = ffcodec(avctx->codec)->decode_sub(avctx, sub, got_sub_ptr, pkt);
+ ret = ffcodec(avctx->codec)->cb.decode_sub(avctx, sub, got_sub_ptr, pkt);
if (pkt == avci->buffer_pkt) // did we recode?
av_packet_unref(avci->buffer_pkt);
if (ret < 0) {
@@ -429,7 +429,7 @@ const FFCodec ff_dfa_decoder = {
.priv_data_size = sizeof(DfaContext),
.init = dfa_decode_init,
.close = dfa_decode_end,
- .decode = dfa_decode_frame,
+ FF_CODEC_DECODE_CB(dfa_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -131,7 +131,7 @@ const FFCodec ff_dfpwm_decoder = {
.p.id = AV_CODEC_ID_DFPWM,
.priv_data_size = sizeof(DFPWMState),
.init = dfpwm_dec_init,
- .decode = dfpwm_dec_frame,
+ FF_CODEC_DECODE_CB(dfpwm_dec_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -114,7 +114,7 @@ const FFCodec ff_dfpwm_encoder = {
.p.id = AV_CODEC_ID_DFPWM,
.priv_data_size = sizeof(DFPWMState),
.init = dfpwm_enc_init,
- .encode2 = dfpwm_enc_frame,
+ FF_CODEC_ENCODE_CB(dfpwm_enc_frame),
.p.sample_fmts = (const enum AVSampleFormat[]){AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_NONE},
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_VARIABLE_FRAME_SIZE,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -2364,7 +2364,7 @@ const FFCodec ff_dirac_decoder = {
.priv_data_size = sizeof(DiracContext),
.init = dirac_decode_init,
.close = dirac_decode_end,
- .decode = dirac_decode_frame,
+ FF_CODEC_DECODE_CB(dirac_decode_frame),
.p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_SLICE_THREADS | AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
.flush = dirac_decode_flush,
@@ -732,7 +732,7 @@ const FFCodec ff_dnxhd_decoder = {
.priv_data_size = sizeof(DNXHDContext),
.init = dnxhd_decode_init,
.close = dnxhd_decode_close,
- .decode = dnxhd_decode_frame,
+ FF_CODEC_DECODE_CB(dnxhd_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS |
AV_CODEC_CAP_SLICE_THREADS,
.p.profiles = NULL_IF_CONFIG_SMALL(ff_dnxhd_profiles),
@@ -1361,7 +1361,7 @@ const FFCodec ff_dnxhd_encoder = {
AV_CODEC_CAP_SLICE_THREADS,
.priv_data_size = sizeof(DNXHDEncContext),
.init = dnxhd_encode_init,
- .encode2 = dnxhd_encode_picture,
+ FF_CODEC_ENCODE_CB(dnxhd_encode_picture),
.close = dnxhd_encode_end,
.p.pix_fmts = (const enum AVPixelFormat[]) {
AV_PIX_FMT_YUV422P,
@@ -1305,7 +1305,7 @@ const FFCodec ff_dolby_e_decoder = {
.priv_data_size = sizeof(DBEDecodeContext),
.p.priv_class = &dolby_e_decoder_class,
.init = dolby_e_init,
- .decode = dolby_e_decode_frame,
+ FF_CODEC_DECODE_CB(dolby_e_decode_frame),
.close = dolby_e_close,
.flush = dolby_e_flush,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
@@ -418,7 +418,7 @@ const FFCodec ff_ ## name_ ## _decoder = { \
.p.capabilities = AV_CODEC_CAP_DR1, \
.priv_data_size = sizeof(DPCMContext), \
.init = dpcm_decode_init, \
- .decode = dpcm_decode_frame, \
+ FF_CODEC_DECODE_CB(dpcm_decode_frame), \
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, \
}
@@ -765,6 +765,6 @@ const FFCodec ff_dpx_decoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("DPX (Digital Picture Exchange) image"),
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_DPX,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
};
@@ -282,7 +282,7 @@ const FFCodec ff_dpx_encoder = {
.p.capabilities = AV_CODEC_CAP_DR1,
.priv_data_size = sizeof(DPXContext),
.init = encode_init,
- .encode2 = encode_frame,
+ FF_CODEC_ENCODE_CB(encode_frame),
.p.pix_fmts = (const enum AVPixelFormat[]){
AV_PIX_FMT_GRAY8,
AV_PIX_FMT_RGB24, AV_PIX_FMT_RGBA, AV_PIX_FMT_ABGR,
@@ -121,7 +121,7 @@ const FFCodec ff_ ## name_ ## _decoder = { \
.p.type = AVMEDIA_TYPE_AUDIO, \
.p.id = AV_CODEC_ID_##id_, \
.init = decode_init, \
- .decode = decode_frame, \
+ FF_CODEC_DECODE_CB(decode_frame), \
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_SLICE_THREADS, \
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLTP, \
AV_SAMPLE_FMT_NONE }, \
@@ -128,7 +128,7 @@ const FFCodec ff_dsicinaudio_decoder = {
.p.id = AV_CODEC_ID_DSICINAUDIO,
.priv_data_size = sizeof(CinAudioContext),
.init = cinaudio_decode_init,
- .decode = cinaudio_decode_frame,
+ FF_CODEC_DECODE_CB(cinaudio_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -329,7 +329,7 @@ const FFCodec ff_dsicinvideo_decoder = {
.priv_data_size = sizeof(CinVideoContext),
.init = cinvideo_decode_init,
.close = cinvideo_decode_end,
- .decode = cinvideo_decode_frame,
+ FF_CODEC_DECODE_CB(cinvideo_decode_frame),
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
.p.capabilities = AV_CODEC_CAP_DR1,
};
@@ -779,7 +779,7 @@ const FFCodec ff_dss_sp_decoder = {
.p.id = AV_CODEC_ID_DSS_SP,
.priv_data_size = sizeof(DssSpContext),
.init = dss_sp_decode_init,
- .decode = dss_sp_decode_frame,
+ FF_CODEC_DECODE_CB(dss_sp_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -386,7 +386,7 @@ const FFCodec ff_dst_decoder = {
.p.id = AV_CODEC_ID_DST,
.priv_data_size = sizeof(DSTContext),
.init = decode_init,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLT,
AV_SAMPLE_FMT_NONE },
@@ -124,7 +124,7 @@ const FFCodec ff_dvaudio_decoder = {
.p.type = AVMEDIA_TYPE_AUDIO,
.p.id = AV_CODEC_ID_DVAUDIO,
.init = decode_init,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.priv_data_size = sizeof(DVAudioContext),
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -1746,7 +1746,7 @@ const FFCodec ff_dvbsub_decoder = {
.priv_data_size = sizeof(DVBSubContext),
.init = dvbsub_init_decoder,
.close = dvbsub_close_decoder,
- .decode_sub = dvbsub_decode,
+ FF_CODEC_DECODE_SUB_CB(dvbsub_decode),
.p.priv_class = &dvbsubdec_class,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -512,5 +512,5 @@ const FFCodec ff_dvbsub_encoder = {
.p.type = AVMEDIA_TYPE_SUBTITLE,
.p.id = AV_CODEC_ID_DVB_SUBTITLE,
.priv_data_size = sizeof(DVBSubtitleContext),
- .encode_sub = dvbsub_encode,
+ FF_CODEC_ENCODE_SUB_CB(dvbsub_encode),
};
@@ -688,7 +688,7 @@ const FFCodec ff_dvvideo_decoder = {
.p.id = AV_CODEC_ID_DVVIDEO,
.priv_data_size = sizeof(DVVideoContext),
.init = dvvideo_decode_init,
- .decode = dvvideo_decode_frame,
+ FF_CODEC_DECODE_CB(dvvideo_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_SLICE_THREADS,
.p.max_lowres = 3,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -760,7 +760,7 @@ const FFCodec ff_dvdsub_decoder = {
.p.id = AV_CODEC_ID_DVD_SUBTITLE,
.priv_data_size = sizeof(DVDSubContext),
.init = dvdsub_init,
- .decode_sub = dvdsub_decode,
+ FF_CODEC_DECODE_SUB_CB(dvdsub_decode),
.flush = dvdsub_flush,
.p.priv_class = &dvdsub_class,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -500,7 +500,7 @@ const FFCodec ff_dvdsub_encoder = {
.p.type = AVMEDIA_TYPE_SUBTITLE,
.p.id = AV_CODEC_ID_DVD_SUBTITLE,
.init = dvdsub_init,
- .encode_sub = dvdsub_encode,
+ FF_CODEC_ENCODE_SUB_CB(dvdsub_encode),
.p.priv_class = &dvdsubenc_class,
.priv_data_size = sizeof(DVDSubtitleContext),
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -1215,7 +1215,7 @@ const FFCodec ff_dvvideo_encoder = {
AV_CODEC_CAP_SLICE_THREADS,
.priv_data_size = sizeof(DVVideoContext),
.init = dvvideo_encode_init,
- .encode2 = dvvideo_encode_frame,
+ FF_CODEC_ENCODE_CB(dvvideo_encode_frame),
.p.pix_fmts = (const enum AVPixelFormat[]) {
AV_PIX_FMT_YUV411P, AV_PIX_FMT_YUV422P,
AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE
@@ -369,7 +369,7 @@ const FFCodec ff_dxa_decoder = {
.priv_data_size = sizeof(DxaDecContext),
.init = decode_init,
.close = decode_end,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
};
@@ -875,6 +875,6 @@ const FFCodec ff_dxtory_decoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("Dxtory"),
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_DXTORY,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
};
@@ -1266,7 +1266,7 @@ const FFCodec ff_dxv_decoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_DXV,
.init = dxv_init,
- .decode = dxv_decode,
+ FF_CODEC_DECODE_CB(dxv_decode),
.close = dxv_close,
.priv_data_size = sizeof(DXVContext),
.p.capabilities = AV_CODEC_CAP_DR1 |
@@ -258,7 +258,7 @@ const FFCodec ff_eac3_encoder = {
.p.capabilities = AV_CODEC_CAP_DR1,
.priv_data_size = sizeof(AC3EncodeContext),
.init = ff_ac3_float_encode_init,
- .encode2 = ff_ac3_float_encode_frame,
+ FF_CODEC_ENCODE_CB(ff_ac3_float_encode_frame),
.close = ff_ac3_encode_close,
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE },
@@ -237,7 +237,7 @@ const FFCodec ff_eacmv_decoder = {
.priv_data_size = sizeof(CmvContext),
.init = cmv_decode_init,
.close = cmv_decode_end,
- .decode = cmv_decode_frame,
+ FF_CODEC_DECODE_CB(cmv_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
};
@@ -348,7 +348,7 @@ const FFCodec ff_eamad_decoder = {
.priv_data_size = sizeof(MadContext),
.init = decode_init,
.close = decode_end,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -253,7 +253,7 @@ const FFCodec ff_eatgq_decoder = {
.p.id = AV_CODEC_ID_TGQ,
.priv_data_size = sizeof(TgqContext),
.init = tgq_decode_init,
- .decode = tgq_decode_frame,
+ FF_CODEC_DECODE_CB(tgq_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -366,7 +366,7 @@ const FFCodec ff_eatgv_decoder = {
.priv_data_size = sizeof(TgvContext),
.init = tgv_decode_init,
.close = tgv_decode_end,
- .decode = tgv_decode_frame,
+ FF_CODEC_DECODE_CB(tgv_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -189,7 +189,7 @@ const FFCodec ff_eatqi_decoder = {
.priv_data_size = sizeof(TqiContext),
.init = tqi_decode_init,
.close = tqi_decode_end,
- .decode = tqi_decode_frame,
+ FF_CODEC_DECODE_CB(tqi_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -152,7 +152,7 @@ int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size,
return -1;
}
- ret = ffcodec(avctx->codec)->encode_sub(avctx, buf, buf_size, sub);
+ ret = ffcodec(avctx->codec)->cb.encode_sub(avctx, buf, buf_size, sub);
avctx->frame_number++;
return ret;
}
@@ -202,7 +202,7 @@ static int encode_simple_internal(AVCodecContext *avctx, AVPacket *avpkt)
got_packet = 0;
- av_assert0(codec->encode2);
+ av_assert0(codec->cb_type == FF_CODEC_CB_TYPE_ENCODE);
if (CONFIG_FRAME_THREAD_ENCODER &&
avci->frame_thread_encoder && (avctx->active_thread_type & FF_THREAD_FRAME))
@@ -212,7 +212,7 @@ static int encode_simple_internal(AVCodecContext *avctx, AVPacket *avpkt)
* no sense to use the properties of the current frame anyway). */
ret = ff_thread_video_encode_frame(avctx, avpkt, frame, &got_packet);
else {
- ret = codec->encode2(avctx, avpkt, frame, &got_packet);
+ ret = codec->cb.encode(avctx, avpkt, frame, &got_packet);
if (avctx->codec->type == AVMEDIA_TYPE_VIDEO && !ret && got_packet &&
!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY))
avpkt->pts = avpkt->dts = frame->pts;
@@ -292,8 +292,8 @@ static int encode_receive_packet_internal(AVCodecContext *avctx, AVPacket *avpkt
return AVERROR(EINVAL);
}
- if (ffcodec(avctx->codec)->receive_packet) {
- ret = ffcodec(avctx->codec)->receive_packet(avctx, avpkt);
+ if (ffcodec(avctx->codec)->cb_type == FF_CODEC_CB_TYPE_RECEIVE_PACKET) {
+ ret = ffcodec(avctx->codec)->cb.receive_packet(avctx, avpkt);
if (ret < 0)
av_packet_unref(avpkt);
else
@@ -383,7 +383,7 @@ const FFCodec ff_escape124_decoder = {
.priv_data_size = sizeof(Escape124Context),
.init = escape124_decode_init,
.close = escape124_decode_close,
- .decode = escape124_decode_frame,
+ FF_CODEC_DECODE_CB(escape124_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -353,7 +353,7 @@ const FFCodec ff_escape130_decoder = {
.priv_data_size = sizeof(Escape130Context),
.init = escape130_decode_init,
.close = escape130_decode_close,
- .decode = escape130_decode_frame,
+ FF_CODEC_DECODE_CB(escape130_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
};
@@ -935,7 +935,7 @@ const FFCodec ff_evrc_decoder = {
.p.type = AVMEDIA_TYPE_AUDIO,
.p.id = AV_CODEC_ID_EVRC,
.init = evrc_decode_init,
- .decode = evrc_decode_frame,
+ FF_CODEC_DECODE_CB(evrc_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
.priv_data_size = sizeof(EVRCContext),
.p.priv_class = &evrcdec_class,
@@ -2348,7 +2348,7 @@ const FFCodec ff_exr_decoder = {
.priv_data_size = sizeof(EXRContext),
.init = decode_init,
.close = decode_end,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS |
AV_CODEC_CAP_SLICE_THREADS,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -543,7 +543,7 @@ const FFCodec ff_exr_encoder = {
.p.id = AV_CODEC_ID_EXR,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.init = encode_init,
- .encode2 = encode_frame,
+ FF_CODEC_ENCODE_CB(encode_frame),
.close = encode_close,
.p.pix_fmts = (const enum AVPixelFormat[]) {
AV_PIX_FMT_GBRPF32,
@@ -194,7 +194,7 @@ const FFCodec ff_fastaudio_decoder = {
.p.id = AV_CODEC_ID_FASTAUDIO,
.priv_data_size = sizeof(FastAudioContext),
.init = fastaudio_init,
- .decode = fastaudio_decode,
+ FF_CODEC_DECODE_CB(fastaudio_decode),
.close = fastaudio_close,
.p.capabilities = AV_CODEC_CAP_DR1,
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
@@ -1060,7 +1060,7 @@ const FFCodec ff_ffv1_decoder = {
.priv_data_size = sizeof(FFV1Context),
.init = decode_init,
.close = ff_ffv1_close,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.update_thread_context = ONLY_IF_THREADS_ENABLED(update_thread_context),
.p.capabilities = AV_CODEC_CAP_DR1 /*| AV_CODEC_CAP_DRAW_HORIZ_BAND*/ |
AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_SLICE_THREADS,
@@ -1284,7 +1284,7 @@ const FFCodec ff_ffv1_encoder = {
.p.id = AV_CODEC_ID_FFV1,
.priv_data_size = sizeof(FFV1Context),
.init = encode_init,
- .encode2 = encode_frame,
+ FF_CODEC_ENCODE_CB(encode_frame),
.close = encode_close,
.p.capabilities = AV_CODEC_CAP_SLICE_THREADS | AV_CODEC_CAP_DELAY,
.p.pix_fmts = (const enum AVPixelFormat[]) {
@@ -467,7 +467,7 @@ const FFCodec ff_ffwavesynth_decoder = {
.priv_data_size = sizeof(struct wavesynth_context),
.init = wavesynth_init,
.close = wavesynth_close,
- .decode = wavesynth_decode,
+ FF_CODEC_DECODE_CB(wavesynth_decode),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
};
@@ -491,7 +491,7 @@ const FFCodec ff_fic_decoder = {
.p.id = AV_CODEC_ID_FIC,
.priv_data_size = sizeof(FICContext),
.init = fic_decode_init,
- .decode = fic_decode_frame,
+ FF_CODEC_DECODE_CB(fic_decode_frame),
.close = fic_decode_close,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_SLICE_THREADS,
.p.priv_class = &fic_decoder_class,
@@ -329,5 +329,5 @@ const FFCodec ff_fits_decoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("Flexible Image Transport System"),
.p.priv_class = &fits_decoder_class,
.priv_data_size = sizeof(FITSContext),
- .decode = fits_decode_frame,
+ FF_CODEC_DECODE_CB(fits_decode_frame),
};
@@ -116,7 +116,7 @@ const FFCodec ff_fits_encoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_FITS,
.p.capabilities = AV_CODEC_CAP_DR1,
- .encode2 = fits_encode_frame,
+ FF_CODEC_ENCODE_CB(fits_encode_frame),
.p.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_GBRAP16BE,
AV_PIX_FMT_GBRP16BE,
AV_PIX_FMT_GBRP,
@@ -665,7 +665,7 @@ const FFCodec ff_flac_decoder = {
.priv_data_size = sizeof(FLACContext),
.init = flac_decode_init,
.close = flac_decode_close,
- .decode = flac_decode_frame,
+ FF_CODEC_DECODE_CB(flac_decode_frame),
.p.capabilities = AV_CODEC_CAP_CHANNEL_CONF |
AV_CODEC_CAP_DR1 |
AV_CODEC_CAP_FRAME_THREADS,
@@ -1467,7 +1467,7 @@ const FFCodec ff_flac_encoder = {
AV_CODEC_CAP_SMALL_LAST_FRAME,
.priv_data_size = sizeof(FlacEncodeContext),
.init = flac_encode_init,
- .encode2 = flac_encode_frame,
+ FF_CODEC_ENCODE_CB(flac_encode_frame),
.close = flac_encode_close,
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_S32,
@@ -502,7 +502,7 @@ const FFCodec ff_flashsv_decoder = {
.priv_data_size = sizeof(FlashSVContext),
.init = flashsv_decode_init,
.close = flashsv_decode_end,
- .decode = flashsv_decode_frame,
+ FF_CODEC_DECODE_CB(flashsv_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
.p.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_BGR24, AV_PIX_FMT_NONE },
@@ -569,7 +569,7 @@ const FFCodec ff_flashsv2_decoder = {
.priv_data_size = sizeof(FlashSVContext),
.init = flashsv2_decode_init,
.close = flashsv2_decode_end,
- .decode = flashsv_decode_frame,
+ FF_CODEC_DECODE_CB(flashsv_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
.p.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_BGR24, AV_PIX_FMT_NONE },
@@ -917,7 +917,7 @@ const FFCodec ff_flashsv2_encoder = {
.p.id = AV_CODEC_ID_FLASHSV2,
.priv_data_size = sizeof(FlashSV2Context),
.init = flashsv2_encode_init,
- .encode2 = flashsv2_encode_frame,
+ FF_CODEC_ENCODE_CB(flashsv2_encode_frame),
.close = flashsv2_encode_end,
.p.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_BGR24, AV_PIX_FMT_NONE },
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
@@ -264,7 +264,7 @@ const FFCodec ff_flashsv_encoder = {
.p.id = AV_CODEC_ID_FLASHSV,
.priv_data_size = sizeof(FlashSVContext),
.init = flashsv_encode_init,
- .encode2 = flashsv_encode_frame,
+ FF_CODEC_ENCODE_CB(flashsv_encode_frame),
.close = flashsv_encode_end,
.p.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_BGR24, AV_PIX_FMT_NONE },
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -1112,7 +1112,7 @@ const FFCodec ff_flic_decoder = {
.priv_data_size = sizeof(FlicDecodeContext),
.init = flic_decode_init,
.close = flic_decode_end,
- .decode = flic_decode_frame,
+ FF_CODEC_DECODE_CB(flic_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -121,7 +121,7 @@ const FFCodec ff_flv_decoder = {
.priv_data_size = sizeof(MpegEncContext),
.init = ff_h263_decode_init,
.close = ff_h263_decode_end,
- .decode = ff_h263_decode_frame,
+ FF_CODEC_DECODE_CB(ff_h263_decode_frame),
.p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
@@ -100,7 +100,7 @@ const FFCodec ff_flv_encoder = {
.p.priv_class = &ff_mpv_enc_class,
.priv_data_size = sizeof(MpegEncContext),
.init = ff_mpv_encode_init,
- .encode2 = ff_mpv_encode_picture,
+ FF_CODEC_ENCODE_CB(ff_mpv_encode_picture),
.close = ff_mpv_encode_end,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
.p.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P,
@@ -634,7 +634,7 @@ const FFCodec ff_fmvc_decoder = {
.priv_data_size = sizeof(FMVCContext),
.init = decode_init,
.close = decode_close,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
FF_CODEC_CAP_INIT_CLEANUP,
@@ -104,7 +104,7 @@ static void * attribute_align_arg worker(void *v){
frame = task->indata;
pkt = task->outdata;
- ret = ffcodec(avctx->codec)->encode2(avctx, pkt, frame, &got_packet);
+ ret = ffcodec(avctx->codec)->cb.encode(avctx, pkt, frame, &got_packet);
if(got_packet) {
int ret2 = av_packet_make_refcounted(pkt);
if (ret >= 0 && ret2 < 0)
@@ -347,7 +347,7 @@ const FFCodec ff_fraps_decoder = {
.priv_data_size = sizeof(FrapsContext),
.init = decode_init,
.close = decode_end,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -122,7 +122,7 @@ const FFCodec ff_frwu_decoder = {
.p.id = AV_CODEC_ID_FRWU,
.priv_data_size = sizeof(FRWUContext),
.init = decode_init,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.p.priv_class = &frwu_class,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -1630,7 +1630,7 @@ const FFCodec ff_g2m_decoder = {
.priv_data_size = sizeof(G2MContext),
.init = g2m_decode_init,
.close = g2m_decode_end,
- .decode = g2m_decode_frame,
+ FF_CODEC_DECODE_CB(g2m_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
};
@@ -146,7 +146,7 @@ const FFCodec ff_adpcm_g722_decoder = {
.p.id = AV_CODEC_ID_ADPCM_G722,
.priv_data_size = sizeof(G722Context),
.init = g722_decode_init,
- .decode = g722_decode_frame,
+ FF_CODEC_DECODE_CB(g722_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
.p.priv_class = &g722_decoder_class,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -380,7 +380,7 @@ const FFCodec ff_adpcm_g722_encoder = {
.priv_data_size = sizeof(G722Context),
.init = g722_encode_init,
.close = g722_encode_close,
- .encode2 = g722_encode_frame,
+ FF_CODEC_ENCODE_CB(g722_encode_frame),
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE },
#if FF_API_OLD_CHANNEL_LAYOUT
.p.channel_layouts = (const uint64_t[]){ AV_CH_LAYOUT_MONO, 0 },
@@ -1118,7 +1118,7 @@ const FFCodec ff_g723_1_decoder = {
.p.id = AV_CODEC_ID_G723_1,
.priv_data_size = sizeof(G723_1_Context),
.init = g723_1_decode_init,
- .decode = g723_1_decode_frame,
+ FF_CODEC_DECODE_CB(g723_1_decode_frame),
.p.capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1,
.p.priv_class = &g723_1dec_class,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -1246,7 +1246,7 @@ const FFCodec ff_g723_1_encoder = {
.p.capabilities = AV_CODEC_CAP_DR1,
.priv_data_size = sizeof(G723_1_Context),
.init = g723_1_encode_init,
- .encode2 = g723_1_encode_frame,
+ FF_CODEC_ENCODE_CB(g723_1_encode_frame),
.defaults = defaults,
.p.sample_fmts = (const enum AVSampleFormat[]) {
AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE
@@ -408,7 +408,7 @@ const FFCodec ff_adpcm_g726_encoder = {
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_SMALL_LAST_FRAME,
.priv_data_size = sizeof(G726Context),
.init = g726_encode_init,
- .encode2 = g726_encode_frame,
+ FF_CODEC_ENCODE_CB(g726_encode_frame),
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE },
.p.priv_class = &g726_class,
@@ -426,7 +426,7 @@ const FFCodec ff_adpcm_g726le_encoder = {
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_SMALL_LAST_FRAME,
.priv_data_size = sizeof(G726Context),
.init = g726_encode_init,
- .encode2 = g726_encode_frame,
+ FF_CODEC_ENCODE_CB(g726_encode_frame),
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE },
.p.priv_class = &g726_class,
@@ -509,7 +509,7 @@ const FFCodec ff_adpcm_g726_decoder = {
.p.id = AV_CODEC_ID_ADPCM_G726,
.priv_data_size = sizeof(G726Context),
.init = g726_decode_init,
- .decode = g726_decode_frame,
+ FF_CODEC_DECODE_CB(g726_decode_frame),
.flush = g726_decode_flush,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -523,7 +523,7 @@ const FFCodec ff_adpcm_g726le_decoder = {
.p.id = AV_CODEC_ID_ADPCM_G726LE,
.priv_data_size = sizeof(G726Context),
.init = g726_decode_init,
- .decode = g726_decode_frame,
+ FF_CODEC_DECODE_CB(g726_decode_frame),
.flush = g726_decode_flush,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
.p.long_name = NULL_IF_CONFIG_SMALL("G.726 ADPCM little-endian"),
@@ -759,7 +759,7 @@ const FFCodec ff_g729_decoder = {
.p.id = AV_CODEC_ID_G729,
.priv_data_size = sizeof(G729Context),
.init = decoder_init,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.close = decode_close,
.p.capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -772,7 +772,7 @@ const FFCodec ff_acelp_kelvin_decoder = {
.p.id = AV_CODEC_ID_ACELP_KELVIN,
.priv_data_size = sizeof(G729Context),
.init = decoder_init,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.close = decode_close,
.p.capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -568,7 +568,7 @@ const FFCodec ff_gdv_decoder = {
.priv_data_size = sizeof(GDVContext),
.init = gdv_decode_init,
.close = gdv_decode_close,
- .decode = gdv_decode_frame,
+ FF_CODEC_DECODE_CB(gdv_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -356,6 +356,6 @@ const FFCodec ff_gem_decoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_GEM,
.p.capabilities = AV_CODEC_CAP_DR1,
- .decode = gem_decode_frame,
+ FF_CODEC_DECODE_CB(gem_decode_frame),
.close = gem_close,
};
@@ -559,7 +559,7 @@ const FFCodec ff_gif_encoder = {
.p.id = AV_CODEC_ID_GIF,
.priv_data_size = sizeof(GIFContext),
.init = gif_encode_init,
- .encode2 = gif_encode_frame,
+ FF_CODEC_ENCODE_CB(gif_encode_frame),
.close = gif_encode_close,
.p.pix_fmts = (const enum AVPixelFormat[]){
AV_PIX_FMT_RGB8, AV_PIX_FMT_BGR8, AV_PIX_FMT_RGB4_BYTE, AV_PIX_FMT_BGR4_BYTE,
@@ -564,7 +564,7 @@ const FFCodec ff_gif_decoder = {
.priv_data_size = sizeof(GifState),
.init = gif_decode_init,
.close = gif_decode_close,
- .decode = gif_decode_frame,
+ FF_CODEC_DECODE_CB(gif_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
FF_CODEC_CAP_INIT_CLEANUP,
@@ -120,7 +120,7 @@ const FFCodec ff_gsm_decoder = {
.p.id = AV_CODEC_ID_GSM,
.priv_data_size = sizeof(GSMContext),
.init = gsm_init,
- .decode = gsm_decode_frame,
+ FF_CODEC_DECODE_CB(gsm_decode_frame),
.flush = gsm_flush,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -134,7 +134,7 @@ const FFCodec ff_gsm_ms_decoder = {
.p.id = AV_CODEC_ID_GSM_MS,
.priv_data_size = sizeof(GSMContext),
.init = gsm_init,
- .decode = gsm_decode_frame,
+ FF_CODEC_DECODE_CB(gsm_decode_frame),
.flush = gsm_flush,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -688,7 +688,7 @@ const FFCodec ff_h261_decoder = {
.priv_data_size = sizeof(H261DecContext),
.init = h261_decode_init,
.close = h261_decode_end,
- .decode = h261_decode_frame,
+ FF_CODEC_DECODE_CB(h261_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
.p.max_lowres = 3,
@@ -408,7 +408,7 @@ const FFCodec ff_h261_encoder = {
.p.priv_class = &ff_mpv_enc_class,
.priv_data_size = sizeof(H261EncContext),
.init = ff_mpv_encode_init,
- .encode2 = ff_mpv_encode_picture,
+ FF_CODEC_ENCODE_CB(ff_mpv_encode_picture),
.close = ff_mpv_encode_end,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
.p.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P,
@@ -762,7 +762,7 @@ const FFCodec ff_h263_decoder = {
.priv_data_size = sizeof(MpegEncContext),
.init = ff_h263_decode_init,
.close = ff_h263_decode_end,
- .decode = ff_h263_decode_frame,
+ FF_CODEC_DECODE_CB(ff_h263_decode_frame),
.p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
#if FF_API_FLAG_TRUNCATED
AV_CODEC_CAP_TRUNCATED |
@@ -784,7 +784,7 @@ const FFCodec ff_h263p_decoder = {
.priv_data_size = sizeof(MpegEncContext),
.init = ff_h263_decode_init,
.close = ff_h263_decode_end,
- .decode = ff_h263_decode_frame,
+ FF_CODEC_DECODE_CB(ff_h263_decode_frame),
.p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
#if FF_API_FLAG_TRUNCATED
AV_CODEC_CAP_TRUNCATED |
@@ -1070,7 +1070,7 @@ const FFCodec ff_h264_decoder = {
.priv_data_size = sizeof(H264Context),
.init = h264_decode_init,
.close = h264_decode_end,
- .decode = h264_decode_frame,
+ FF_CODEC_DECODE_CB(h264_decode_frame),
.p.capabilities = /*AV_CODEC_CAP_DRAW_HORIZ_BAND |*/ AV_CODEC_CAP_DR1 |
AV_CODEC_CAP_DELAY | AV_CODEC_CAP_SLICE_THREADS |
AV_CODEC_CAP_FRAME_THREADS,
@@ -478,7 +478,7 @@ const FFCodec ff_hap_decoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_HAP,
.init = hap_init,
- .decode = hap_decode,
+ FF_CODEC_DECODE_CB(hap_decode),
.close = hap_close,
.priv_data_size = sizeof(HapContext),
.p.capabilities = AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_SLICE_THREADS |
@@ -357,7 +357,7 @@ const FFCodec ff_hap_encoder = {
.priv_data_size = sizeof(HapContext),
.p.priv_class = &hapenc_class,
.init = hap_init,
- .encode2 = hap_encode,
+ FF_CODEC_ENCODE_CB(hap_encode),
.close = hap_close,
.p.pix_fmts = (const enum AVPixelFormat[]) {
AV_PIX_FMT_RGBA, AV_PIX_FMT_NONE,
@@ -452,7 +452,7 @@ const FFCodec ff_hca_decoder = {
.p.id = AV_CODEC_ID_HCA,
.priv_data_size = sizeof(HCAContext),
.init = decode_init,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.close = decode_close,
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
@@ -142,7 +142,7 @@ const FFCodec ff_hcom_decoder = {
.priv_data_size = sizeof(HCOMContext),
.init = hcom_init,
.close = hcom_close,
- .decode = hcom_decode,
+ FF_CODEC_DECODE_CB(hcom_decode),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
};
@@ -3884,7 +3884,7 @@ const FFCodec ff_hevc_decoder = {
.p.priv_class = &hevc_decoder_class,
.init = hevc_decode_init,
.close = hevc_decode_free,
- .decode = hevc_decode_frame,
+ FF_CODEC_DECODE_CB(hevc_decode_frame),
.flush = hevc_decode_flush,
.update_thread_context = ONLY_IF_THREADS_ENABLED(hevc_update_thread_context),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY |
@@ -505,7 +505,7 @@ const FFCodec ff_hnm4_video_decoder = {
.priv_data_size = sizeof(Hnm4VideoContext),
.init = hnm_decode_init,
.close = hnm_decode_end,
- .decode = hnm_decode_frame,
+ FF_CODEC_DECODE_CB(hnm_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
};
@@ -388,7 +388,7 @@ const FFCodec ff_hq_hqa_decoder = {
.p.id = AV_CODEC_ID_HQ_HQA,
.priv_data_size = sizeof(HQContext),
.init = hq_hqa_decode_init,
- .decode = hq_hqa_decode_frame,
+ FF_CODEC_DECODE_CB(hq_hqa_decode_frame),
.close = hq_hqa_decode_close,
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
@@ -541,7 +541,7 @@ const FFCodec ff_hqx_decoder = {
.p.id = AV_CODEC_ID_HQX,
.priv_data_size = sizeof(HQXContext),
.init = hqx_decode_init,
- .decode = hqx_decode_frame,
+ FF_CODEC_DECODE_CB(hqx_decode_frame),
.close = hqx_decode_close,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_SLICE_THREADS |
AV_CODEC_CAP_FRAME_THREADS,
@@ -1270,7 +1270,7 @@ const FFCodec ff_huffyuv_decoder = {
.priv_data_size = sizeof(HYuvContext),
.init = decode_init,
.close = decode_end,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DRAW_HORIZ_BAND |
AV_CODEC_CAP_FRAME_THREADS,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
@@ -1285,7 +1285,7 @@ const FFCodec ff_ffvhuff_decoder = {
.priv_data_size = sizeof(HYuvContext),
.init = decode_init,
.close = decode_end,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DRAW_HORIZ_BAND |
AV_CODEC_CAP_FRAME_THREADS,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
@@ -1301,7 +1301,7 @@ const FFCodec ff_hymt_decoder = {
.priv_data_size = sizeof(HYuvContext),
.init = decode_init,
.close = decode_end,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DRAW_HORIZ_BAND |
AV_CODEC_CAP_FRAME_THREADS,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
@@ -1056,7 +1056,7 @@ const FFCodec ff_huffyuv_encoder = {
.p.id = AV_CODEC_ID_HUFFYUV,
.priv_data_size = sizeof(HYuvContext),
.init = encode_init,
- .encode2 = encode_frame,
+ FF_CODEC_ENCODE_CB(encode_frame),
.close = encode_end,
.p.capabilities = AV_CODEC_CAP_FRAME_THREADS,
.p.priv_class = &normal_class,
@@ -1076,7 +1076,7 @@ const FFCodec ff_ffvhuff_encoder = {
.p.id = AV_CODEC_ID_FFVHUFF,
.priv_data_size = sizeof(HYuvContext),
.init = encode_init,
- .encode2 = encode_frame,
+ FF_CODEC_ENCODE_CB(encode_frame),
.close = encode_end,
.p.capabilities = AV_CODEC_CAP_FRAME_THREADS,
.p.priv_class = &ff_class,
@@ -248,7 +248,7 @@ const FFCodec ff_idcin_decoder = {
.p.id = AV_CODEC_ID_IDCIN,
.priv_data_size = sizeof(IdcinContext),
.init = idcin_decode_init,
- .decode = idcin_decode_frame,
+ FF_CODEC_DECODE_CB(idcin_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.defaults = idcin_defaults,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -1917,7 +1917,7 @@ const FFCodec ff_iff_ilbm_decoder = {
.priv_data_size = sizeof(IffContext),
.init = decode_init,
.close = decode_end,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
};
@@ -1484,7 +1484,7 @@ const FFCodec ff_ilbc_decoder = {
.p.type = AVMEDIA_TYPE_AUDIO,
.p.id = AV_CODEC_ID_ILBC,
.init = ilbc_decode_init,
- .decode = ilbc_decode_frame,
+ FF_CODEC_DECODE_CB(ilbc_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
.priv_data_size = sizeof(ILBCContext),
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -1092,7 +1092,7 @@ const FFCodec ff_imc_decoder = {
.priv_data_size = sizeof(IMCContext),
.init = imc_decode_init,
.close = imc_decode_close,
- .decode = imc_decode_frame,
+ FF_CODEC_DECODE_CB(imc_decode_frame),
.flush = flush,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
@@ -1109,7 +1109,7 @@ const FFCodec ff_iac_decoder = {
.priv_data_size = sizeof(IMCContext),
.init = imc_decode_init,
.close = imc_decode_close,
- .decode = imc_decode_frame,
+ FF_CODEC_DECODE_CB(imc_decode_frame),
.flush = flush,
.p.capabilities = AV_CODEC_CAP_DR1,
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
@@ -540,7 +540,7 @@ const FFCodec ff_imm4_decoder = {
.priv_data_size = sizeof(IMM4Context),
.init = decode_init,
.close = decode_close,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.flush = decode_flush,
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
@@ -183,7 +183,7 @@ const FFCodec ff_imm5_decoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_IMM5,
.init = imm5_init,
- .decode = imm5_decode_frame,
+ FF_CODEC_DECODE_CB(imm5_decode_frame),
.close = imm5_close,
.flush = imm5_flush,
.priv_data_size = sizeof(IMM5Context),
@@ -187,7 +187,7 @@ const FFCodec ff_simbiosis_imx_decoder = {
.p.id = AV_CODEC_ID_SIMBIOSIS_IMX,
.priv_data_size = sizeof(SimbiosisIMXContext),
.init = imx_decode_init,
- .decode = imx_decode_frame,
+ FF_CODEC_DECODE_CB(imx_decode_frame),
.close = imx_decode_close,
.flush = imx_decode_flush,
.p.capabilities = AV_CODEC_CAP_DR1,
@@ -267,7 +267,7 @@ const FFCodec ff_indeo2_decoder = {
.priv_data_size = sizeof(Ir2Context),
.init = ir2_decode_init,
.close = ir2_decode_end,
- .decode = ir2_decode_frame,
+ FF_CODEC_DECODE_CB(ir2_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -1141,7 +1141,7 @@ const FFCodec ff_indeo3_decoder = {
.priv_data_size = sizeof(Indeo3DecodeContext),
.init = decode_init,
.close = decode_close,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
};
@@ -711,7 +711,7 @@ const FFCodec ff_indeo4_decoder = {
.priv_data_size = sizeof(IVI45DecContext),
.init = decode_init,
.close = ff_ivi_decode_close,
- .decode = ff_ivi_decode_frame,
+ FF_CODEC_DECODE_CB(ff_ivi_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -691,7 +691,7 @@ const FFCodec ff_indeo5_decoder = {
.priv_data_size = sizeof(IVI45DecContext),
.init = decode_init,
.close = ff_ivi_decode_close,
- .decode = ff_ivi_decode_frame,
+ FF_CODEC_DECODE_CB(ff_ivi_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
};
@@ -137,7 +137,7 @@ const FFCodec ff_h263i_decoder = {
.priv_data_size = sizeof(MpegEncContext),
.init = ff_h263_decode_init,
.close = ff_h263_decode_end,
- .decode = ff_h263_decode_frame,
+ FF_CODEC_DECODE_CB(ff_h263_decode_frame),
.p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
@@ -639,7 +639,7 @@ const FFCodec ff_interplay_acm_decoder = {
.p.id = AV_CODEC_ID_INTERPLAY_ACM,
.init = decode_init,
.close = decode_close,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
.priv_data_size = sizeof(InterplayACMContext),
@@ -1364,7 +1364,7 @@ const FFCodec ff_interplay_video_decoder = {
.priv_data_size = sizeof(IpvideoContext),
.init = ipvideo_decode_init,
.close = ipvideo_decode_end,
- .decode = ipvideo_decode_frame,
+ FF_CODEC_DECODE_CB(ipvideo_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_PARAM_CHANGE,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
};
@@ -911,7 +911,7 @@ const FFCodec ff_h263_encoder = {
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
.priv_data_size = sizeof(MpegEncContext),
.init = ff_mpv_encode_init,
- .encode2 = ff_mpv_encode_picture,
+ FF_CODEC_ENCODE_CB(ff_mpv_encode_picture),
.close = ff_mpv_encode_end,
};
@@ -947,6 +947,6 @@ const FFCodec ff_h263p_encoder = {
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
.priv_data_size = sizeof(MpegEncContext),
.init = ff_mpv_encode_init,
- .encode2 = ff_mpv_encode_picture,
+ FF_CODEC_ENCODE_CB(ff_mpv_encode_picture),
.close = ff_mpv_encode_end,
};
@@ -1839,7 +1839,7 @@ const FFCodec ff_jpeg2000_encoder = {
.p.id = AV_CODEC_ID_JPEG2000,
.priv_data_size = sizeof(Jpeg2000EncoderContext),
.init = j2kenc_init,
- .encode2 = encode_frame,
+ FF_CODEC_ENCODE_CB(encode_frame),
.close = j2kenc_destroy,
.p.pix_fmts = (const enum AVPixelFormat[]) {
AV_PIX_FMT_RGB24, AV_PIX_FMT_YUV444P, AV_PIX_FMT_GRAY8,
@@ -199,7 +199,7 @@ const FFCodec ff_jacosub_decoder = {
.p.type = AVMEDIA_TYPE_SUBTITLE,
.p.id = AV_CODEC_ID_JACOSUB,
.init = ff_ass_subtitle_header_default,
- .decode_sub = jacosub_decode_frame,
+ FF_CODEC_DECODE_SUB_CB(jacosub_decode_frame),
.flush = ff_ass_decoder_flush,
.priv_data_size = sizeof(FFASSDecoderContext),
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -2578,7 +2578,7 @@ const FFCodec ff_jpeg2000_decoder = {
.p.capabilities = AV_CODEC_CAP_SLICE_THREADS | AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_DR1,
.priv_data_size = sizeof(Jpeg2000DecoderContext),
.init = jpeg2000_decode_init,
- .decode = jpeg2000_decode_frame,
+ FF_CODEC_DECODE_CB(jpeg2000_decode_frame),
.p.priv_class = &jpeg2000_class,
.p.max_lowres = 5,
.p.profiles = NULL_IF_CONFIG_SMALL(ff_jpeg2000_profiles),
@@ -558,7 +558,7 @@ const FFCodec ff_jpegls_decoder = {
.priv_data_size = sizeof(MJpegDecodeContext),
.init = ff_mjpeg_decode_init,
.close = ff_mjpeg_decode_end,
- .receive_frame = ff_mjpeg_receive_frame,
+ FF_CODEC_RECEIVE_FRAME_CB(ff_mjpeg_receive_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP |
FF_CODEC_CAP_SETS_PKT_DTS,
@@ -482,7 +482,7 @@ const FFCodec ff_jpegls_encoder = {
.priv_data_size = sizeof(JPEGLSContext),
.p.priv_class = &jpegls_class,
.init = encode_jpegls_init,
- .encode2 = encode_picture_ls,
+ FF_CODEC_ENCODE_CB(encode_picture_ls),
.close = encode_jpegls_close,
.p.pix_fmts = (const enum AVPixelFormat[]) {
AV_PIX_FMT_BGR24, AV_PIX_FMT_RGB24,
@@ -242,7 +242,7 @@ const FFCodec ff_jv_decoder = {
.priv_data_size = sizeof(JvContext),
.init = decode_init,
.close = decode_close,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -184,7 +184,7 @@ const FFCodec ff_kgv1_decoder = {
.priv_data_size = sizeof(KgvContext),
.init = decode_init,
.close = decode_end,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.flush = decode_flush,
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -411,7 +411,7 @@ const FFCodec ff_kmvc_decoder = {
.p.id = AV_CODEC_ID_KMVC,
.priv_data_size = sizeof(KmvcContext),
.init = decode_init,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -732,7 +732,7 @@ const FFCodec ff_lagarith_decoder = {
.p.id = AV_CODEC_ID_LAGARITH,
.priv_data_size = sizeof(LagarithContext),
.init = lag_decode_init,
- .decode = lag_decode_frame,
+ FF_CODEC_DECODE_CB(lag_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -639,7 +639,7 @@ const FFCodec ff_mszh_decoder = {
.priv_data_size = sizeof(LclDecContext),
.init = decode_init,
.close = decode_end,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
};
#endif
@@ -654,7 +654,7 @@ const FFCodec ff_zlib_decoder = {
.priv_data_size = sizeof(LclDecContext),
.init = decode_init,
.close = decode_end,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
};
#endif
@@ -158,7 +158,7 @@ const FFCodec ff_zlib_encoder = {
.p.id = AV_CODEC_ID_ZLIB,
.priv_data_size = sizeof(LclEncContext),
.init = encode_init,
- .encode2 = encode_frame,
+ FF_CODEC_ENCODE_CB(encode_frame),
.close = encode_end,
.p.capabilities = AV_CODEC_CAP_FRAME_THREADS,
.p.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_BGR24, AV_PIX_FMT_NONE },
@@ -256,7 +256,7 @@ const FFCodec ff_libaom_av1_decoder = {
.priv_data_size = sizeof(AV1DecodeContext),
.init = av1_init,
.close = aom_free,
- .decode = aom_decode,
+ FF_CODEC_DECODE_CB(aom_decode),
.p.capabilities = AV_CODEC_CAP_OTHER_THREADS | AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_AUTO_THREADS,
.p.profiles = NULL_IF_CONFIG_SMALL(ff_av1_profiles),
@@ -1353,7 +1353,7 @@ FFCodec ff_libaom_av1_encoder = {
.p.wrapper_name = "libaom",
.priv_data_size = sizeof(AOMContext),
.init = av1_init,
- .encode2 = aom_encode,
+ FF_CODEC_ENCODE_CB(aom_encode),
.close = aom_free,
.caps_internal = FF_CODEC_CAP_AUTO_THREADS,
.defaults = defaults,
@@ -391,6 +391,6 @@ const FFCodec ff_libaribb24_decoder = {
.priv_data_size = sizeof(Libaribb24Context),
.init = libaribb24_init,
.close = libaribb24_close,
- .decode_sub = libaribb24_decode,
+ FF_CODEC_DECODE_SUB_CB(libaribb24_decode),
.flush = libaribb24_flush,
};
@@ -137,5 +137,5 @@ const FFCodec ff_libcelt_decoder = {
.priv_data_size = sizeof(struct libcelt_context),
.init = libcelt_dec_init,
.close = libcelt_dec_close,
- .decode = libcelt_dec_decode,
+ FF_CODEC_DECODE_CB(libcelt_dec_decode),
};
@@ -188,7 +188,7 @@ const FFCodec ff_libcodec2_decoder = {
.priv_data_size = sizeof(LibCodec2Context),
.init = libcodec2_init_decoder,
.close = libcodec2_close,
- .decode = libcodec2_decode,
+ FF_CODEC_DECODE_CB(libcodec2_decode),
#if FF_API_OLD_CHANNEL_LAYOUT
.p.channel_layouts = (const uint64_t[]) { AV_CH_LAYOUT_MONO, 0 },
#endif
@@ -207,7 +207,7 @@ const FFCodec ff_libcodec2_encoder = {
.priv_data_size = sizeof(LibCodec2Context),
.init = libcodec2_init_encoder,
.close = libcodec2_close,
- .encode2 = libcodec2_encode,
+ FF_CODEC_ENCODE_CB(libcodec2_encode),
#if FF_API_OLD_CHANNEL_LAYOUT
.p.channel_layouts = (const uint64_t[]) { AV_CH_LAYOUT_MONO, 0 },
#endif
@@ -582,7 +582,7 @@ const FFCodec ff_libdav1d_decoder = {
.init = libdav1d_init,
.close = libdav1d_close,
.flush = libdav1d_flush,
- .receive_frame = libdav1d_receive_frame,
+ FF_CODEC_RECEIVE_FRAME_CB(libdav1d_receive_frame),
.p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_OTHER_THREADS,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_SETS_PKT_DTS |
FF_CODEC_CAP_AUTO_THREADS,
@@ -220,7 +220,7 @@ const FFCodec ff_libdavs2_decoder = {
.priv_data_size = sizeof(DAVS2Context),
.init = davs2_init,
.close = davs2_end,
- .decode = davs2_decode_frame,
+ FF_CODEC_DECODE_CB(davs2_decode_frame),
.flush = davs2_flush,
.p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_OTHER_THREADS,
.caps_internal = FF_CODEC_CAP_AUTO_THREADS,
@@ -483,7 +483,7 @@ const FFCodec ff_libfdk_aac_decoder = {
.p.id = AV_CODEC_ID_AAC,
.priv_data_size = sizeof(FDKAACDecContext),
.init = fdk_aac_decode_init,
- .decode = fdk_aac_decode_frame,
+ FF_CODEC_DECODE_CB(fdk_aac_decode_frame),
.close = fdk_aac_decode_close,
.flush = fdk_aac_decode_flush,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF
@@ -482,7 +482,7 @@ const FFCodec ff_libfdk_aac_encoder = {
.p.id = AV_CODEC_ID_AAC,
.priv_data_size = sizeof(AACContext),
.init = aac_encode_init,
- .encode2 = aac_encode_frame,
+ FF_CODEC_ENCODE_CB(aac_encode_frame),
.close = aac_encode_close,
.p.capabilities = AV_CODEC_CAP_SMALL_LAST_FRAME | AV_CODEC_CAP_DELAY,
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
@@ -135,7 +135,7 @@ const FFCodec ff_libgsm_decoder = {
.priv_data_size = sizeof(LibGSMDecodeContext),
.init = libgsm_decode_init,
.close = libgsm_decode_close,
- .decode = libgsm_decode_frame,
+ FF_CODEC_DECODE_CB(libgsm_decode_frame),
.flush = libgsm_flush,
};
#endif
@@ -150,7 +150,7 @@ const FFCodec ff_libgsm_ms_decoder = {
.priv_data_size = sizeof(LibGSMDecodeContext),
.init = libgsm_decode_init,
.close = libgsm_decode_close,
- .decode = libgsm_decode_frame,
+ FF_CODEC_DECODE_CB(libgsm_decode_frame),
.flush = libgsm_flush,
};
#endif
@@ -124,7 +124,7 @@ const FFCodec ff_libgsm_encoder = {
.p.id = AV_CODEC_ID_GSM,
.p.capabilities = AV_CODEC_CAP_DR1,
.init = libgsm_encode_init,
- .encode2 = libgsm_encode_frame,
+ FF_CODEC_ENCODE_CB(libgsm_encode_frame),
.close = libgsm_encode_close,
.defaults = libgsm_defaults,
#if FF_API_OLD_CHANNEL_LAYOUT
@@ -144,7 +144,7 @@ const FFCodec ff_libgsm_ms_encoder = {
.p.id = AV_CODEC_ID_GSM_MS,
.p.capabilities = AV_CODEC_CAP_DR1,
.init = libgsm_encode_init,
- .encode2 = libgsm_encode_frame,
+ FF_CODEC_ENCODE_CB(libgsm_encode_frame),
.close = libgsm_encode_close,
.defaults = libgsm_defaults,
#if FF_API_OLD_CHANNEL_LAYOUT
@@ -124,7 +124,7 @@ const FFCodec ff_libilbc_decoder = {
.p.id = AV_CODEC_ID_ILBC,
.priv_data_size = sizeof(ILBCDecContext),
.init = ilbc_decode_init,
- .decode = ilbc_decode_frame,
+ FF_CODEC_DECODE_CB(ilbc_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
.p.priv_class = &ilbc_dec_class,
};
@@ -206,7 +206,7 @@ const FFCodec ff_libilbc_encoder = {
.p.id = AV_CODEC_ID_ILBC,
.priv_data_size = sizeof(ILBCEncContext),
.init = ilbc_encode_init,
- .encode2 = ilbc_encode_frame,
+ FF_CODEC_ENCODE_CB(ilbc_encode_frame),
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE },
.defaults = ilbc_encode_defaults,
@@ -332,7 +332,7 @@ const FFCodec ff_libkvazaar_encoder = {
.defaults = defaults,
.init = libkvazaar_init,
- .encode2 = libkvazaar_encode,
+ FF_CODEC_ENCODE_CB(libkvazaar_encode),
.close = libkvazaar_close,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP |
@@ -337,7 +337,7 @@ const FFCodec ff_libmp3lame_encoder = {
AV_CODEC_CAP_SMALL_LAST_FRAME,
.priv_data_size = sizeof(LAMEContext),
.init = mp3lame_encode_init,
- .encode2 = mp3lame_encode_frame,
+ FF_CODEC_ENCODE_CB(mp3lame_encode_frame),
.close = mp3lame_encode_close,
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S32P,
AV_SAMPLE_FMT_FLTP,
@@ -141,7 +141,7 @@ const FFCodec ff_libopencore_amrnb_decoder = {
.priv_data_size = sizeof(AMRContext),
.init = amr_nb_decode_init,
.close = amr_nb_decode_close,
- .decode = amr_nb_decode_frame,
+ FF_CODEC_DECODE_CB(amr_nb_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
};
#endif /* CONFIG_LIBOPENCORE_AMRNB_DECODER */
@@ -295,7 +295,7 @@ const FFCodec ff_libopencore_amrnb_encoder = {
.p.id = AV_CODEC_ID_AMR_NB,
.priv_data_size = sizeof(AMRContext),
.init = amr_nb_encode_init,
- .encode2 = amr_nb_encode_frame,
+ FF_CODEC_ENCODE_CB(amr_nb_encode_frame),
.close = amr_nb_encode_close,
.p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_SMALL_LAST_FRAME,
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
@@ -382,7 +382,7 @@ const FFCodec ff_libopencore_amrwb_decoder = {
.priv_data_size = sizeof(AMRWBContext),
.init = amr_wb_decode_init,
.close = amr_wb_decode_close,
- .decode = amr_wb_decode_frame,
+ FF_CODEC_DECODE_CB(amr_wb_decode_frame),
};
#endif /* CONFIG_LIBOPENCORE_AMRWB_DECODER */
@@ -163,7 +163,7 @@ const FFCodec ff_libopenh264_decoder = {
.p.id = AV_CODEC_ID_H264,
.priv_data_size = sizeof(SVCContext),
.init = svc_decode_init,
- .decode = svc_decode_frame,
+ FF_CODEC_DECODE_CB(svc_decode_frame),
.close = svc_decode_close,
.p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_SETS_PKT_DTS | FF_CODEC_CAP_INIT_THREADSAFE |
@@ -459,7 +459,7 @@ const FFCodec ff_libopenh264_encoder = {
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_OTHER_THREADS,
.priv_data_size = sizeof(SVCContext),
.init = svc_encode_init,
- .encode2 = svc_encode_frame,
+ FF_CODEC_ENCODE_CB(svc_encode_frame),
.close = svc_encode_close,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP |
FF_CODEC_CAP_AUTO_THREADS,
@@ -511,5 +511,5 @@ const FFCodec ff_libopenjpeg_decoder = {
.p.wrapper_name = "libopenjpeg",
.priv_data_size = sizeof(LibOpenJPEGContext),
.init = libopenjpeg_decode_init,
- .decode = libopenjpeg_decode_frame,
+ FF_CODEC_DECODE_CB(libopenjpeg_decode_frame),
};
@@ -761,7 +761,7 @@ const FFCodec ff_libopenjpeg_encoder = {
.p.id = AV_CODEC_ID_JPEG2000,
.priv_data_size = sizeof(LibOpenJPEGContext),
.init = libopenjpeg_encode_init,
- .encode2 = libopenjpeg_encode_frame,
+ FF_CODEC_ENCODE_CB(libopenjpeg_encode_frame),
.p.capabilities = AV_CODEC_CAP_FRAME_THREADS,
.p.pix_fmts = (const enum AVPixelFormat[]) {
AV_PIX_FMT_RGB24, AV_PIX_FMT_RGBA, AV_PIX_FMT_RGB48,
@@ -238,7 +238,7 @@ const FFCodec ff_libopus_decoder = {
.priv_data_size = sizeof(struct libopus_context),
.init = libopus_decode_init,
.close = libopus_decode_close,
- .decode = libopus_decode,
+ FF_CODEC_DECODE_CB(libopus_decode),
.flush = libopus_flush,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP,
@@ -590,7 +590,7 @@ const FFCodec ff_libopus_encoder = {
.p.id = AV_CODEC_ID_OPUS,
.priv_data_size = sizeof(LibopusEncContext),
.init = libopus_encode_init,
- .encode2 = libopus_encode,
+ FF_CODEC_ENCODE_CB(libopus_encode),
.close = libopus_encode_close,
.p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_SMALL_LAST_FRAME,
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
@@ -620,7 +620,7 @@ const FFCodec ff_librav1e_encoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_AV1,
.init = librav1e_encode_init,
- .receive_packet = librav1e_receive_packet,
+ FF_CODEC_RECEIVE_PACKET_CB(librav1e_receive_packet),
.close = librav1e_encode_close,
.priv_data_size = sizeof(librav1eContext),
.p.priv_class = &class,
@@ -126,6 +126,6 @@ const FFCodec ff_librsvg_decoder = {
.p.id = AV_CODEC_ID_SVG,
.p.capabilities = AV_CODEC_CAP_DR1,
.p.wrapper_name = "librsvg",
- .decode = librsvg_decode_frame,
+ FF_CODEC_DECODE_CB(librsvg_decode_frame),
.priv_data_size = sizeof(LibRSVGContext),
};
@@ -139,7 +139,7 @@ const FFCodec ff_libshine_encoder = {
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
.priv_data_size = sizeof(SHINEContext),
.init = libshine_encode_init,
- .encode2 = libshine_encode_frame,
+ FF_CODEC_ENCODE_CB(libshine_encode_frame),
.close = libshine_encode_close,
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16P,
AV_SAMPLE_FMT_NONE },
@@ -200,6 +200,6 @@ const FFCodec ff_libspeex_decoder = {
.priv_data_size = sizeof(LibSpeexContext),
.init = libspeex_decode_init,
.close = libspeex_decode_close,
- .decode = libspeex_decode_frame,
+ FF_CODEC_DECODE_CB(libspeex_decode_frame),
.flush = libspeex_decode_flush,
};
@@ -355,7 +355,7 @@ const FFCodec ff_libspeex_encoder = {
.p.id = AV_CODEC_ID_SPEEX,
.priv_data_size = sizeof(LibSpeexEncContext),
.init = encode_init,
- .encode2 = encode_frame,
+ FF_CODEC_ENCODE_CB(encode_frame),
.close = encode_close,
.p.capabilities = AV_CODEC_CAP_DELAY,
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
@@ -614,7 +614,7 @@ const FFCodec ff_libsvtav1_encoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_AV1,
.init = eb_enc_init,
- .receive_packet = eb_receive_packet,
+ FF_CODEC_RECEIVE_PACKET_CB(eb_receive_packet),
.close = eb_enc_close,
.p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_OTHER_THREADS,
.caps_internal = FF_CODEC_CAP_AUTO_THREADS | FF_CODEC_CAP_INIT_CLEANUP,
@@ -377,7 +377,7 @@ const FFCodec ff_libtheora_encoder = {
.priv_data_size = sizeof(TheoraContext),
.init = encode_init,
.close = encode_close,
- .encode2 = encode_frame,
+ FF_CODEC_ENCODE_CB(encode_frame),
.p.pix_fmts = (const enum AVPixelFormat[]){
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_NONE
},
@@ -216,7 +216,7 @@ const FFCodec ff_libtwolame_encoder = {
.p.id = AV_CODEC_ID_MP2,
.priv_data_size = sizeof(TWOLAMEContext),
.init = twolame_encode_init,
- .encode2 = twolame_encode_frame,
+ FF_CODEC_ENCODE_CB(twolame_encode_frame),
.close = twolame_encode_close,
.p.capabilities = AV_CODEC_CAP_DELAY,
.defaults = twolame_defaults,
@@ -255,7 +255,7 @@ const FFCodec ff_libuavs3d_decoder = {
.priv_data_size = sizeof(uavs3d_context),
.init = libuavs3d_init,
.close = libuavs3d_end,
- .decode = libuavs3d_decode_frame,
+ FF_CODEC_DECODE_CB(libuavs3d_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_OTHER_THREADS,
.caps_internal = FF_CODEC_CAP_AUTO_THREADS,
.flush = libuavs3d_flush,
@@ -150,7 +150,7 @@ const FFCodec ff_libvo_amrwbenc_encoder = {
.p.wrapper_name = "libvo_amrwbenc",
.priv_data_size = sizeof(AMRWBContext),
.init = amr_wb_encode_init,
- .encode2 = amr_wb_encode_frame,
+ FF_CODEC_ENCODE_CB(amr_wb_encode_frame),
.close = amr_wb_encode_close,
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE },
@@ -218,6 +218,6 @@ const FFCodec ff_libvorbis_decoder = {
.p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_CHANNEL_CONF,
.priv_data_size = sizeof(OggVorbisDecContext),
.init = oggvorbis_decode_init,
- .decode = oggvorbis_decode_frame,
+ FF_CODEC_DECODE_CB(oggvorbis_decode_frame),
.close = oggvorbis_decode_close,
};
@@ -384,7 +384,7 @@ const FFCodec ff_libvorbis_encoder = {
AV_CODEC_CAP_SMALL_LAST_FRAME,
.priv_data_size = sizeof(LibvorbisEncContext),
.init = libvorbis_encode_init,
- .encode2 = libvorbis_encode_frame,
+ FF_CODEC_ENCODE_CB(libvorbis_encode_frame),
.close = libvorbis_encode_close,
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE },
@@ -365,7 +365,7 @@ const FFCodec ff_libvpx_vp8_decoder = {
.priv_data_size = sizeof(VPxContext),
.init = vp8_init,
.close = vpx_free,
- .decode = vpx_decode,
+ FF_CODEC_DECODE_CB(vpx_decode),
.caps_internal = FF_CODEC_CAP_AUTO_THREADS,
};
#endif /* CONFIG_LIBVPX_VP8_DECODER */
@@ -388,7 +388,7 @@ FFCodec ff_libvpx_vp9_decoder = {
.priv_data_size = sizeof(VPxContext),
.init = vp9_init,
.close = vpx_free,
- .decode = vpx_decode,
+ FF_CODEC_DECODE_CB(vpx_decode),
.caps_internal = FF_CODEC_CAP_AUTO_THREADS,
.init_static_data = ff_vp9_init_static,
};
@@ -1934,7 +1934,7 @@ const FFCodec ff_libvpx_vp8_encoder = {
AV_CODEC_CAP_OTHER_THREADS,
.priv_data_size = sizeof(VPxContext),
.init = vp8_init,
- .encode2 = vpx_encode,
+ FF_CODEC_ENCODE_CB(vpx_encode),
.close = vpx_free,
.caps_internal = FF_CODEC_CAP_AUTO_THREADS,
.p.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUVA420P, AV_PIX_FMT_NONE },
@@ -1969,7 +1969,7 @@ FFCodec ff_libvpx_vp9_encoder = {
.p.wrapper_name = "libvpx",
.priv_data_size = sizeof(VPxContext),
.init = vp9_init,
- .encode2 = vpx_encode,
+ FF_CODEC_ENCODE_CB(vpx_encode),
.close = vpx_free,
.caps_internal = FF_CODEC_CAP_AUTO_THREADS,
.defaults = defaults,
@@ -99,6 +99,6 @@ const FFCodec ff_libwebp_encoder = {
.priv_data_size = sizeof(LibWebPContext),
.defaults = ff_libwebp_defaults,
.init = libwebp_encode_init,
- .encode2 = libwebp_encode_frame,
+ FF_CODEC_ENCODE_CB(libwebp_encode_frame),
.close = libwebp_encode_close,
};
@@ -137,6 +137,6 @@ const FFCodec ff_libwebp_anim_encoder = {
.priv_data_size = sizeof(LibWebPAnimContext),
.defaults = ff_libwebp_defaults,
.init = libwebp_anim_encode_init,
- .encode2 = libwebp_anim_encode_frame,
+ FF_CODEC_ENCODE_CB(libwebp_anim_encode_frame),
.close = libwebp_anim_encode_close,
};
@@ -1228,7 +1228,7 @@ FFCodec ff_libx264_encoder = {
.p.wrapper_name = "libx264",
.priv_data_size = sizeof(X264Context),
.init = X264_init,
- .encode2 = X264_frame,
+ FF_CODEC_ENCODE_CB(X264_frame),
.close = X264_close,
.defaults = x264_defaults,
#if X264_BUILD < 153
@@ -1265,7 +1265,7 @@ const FFCodec ff_libx264rgb_encoder = {
.p.wrapper_name = "libx264",
.priv_data_size = sizeof(X264Context),
.init = X264_init,
- .encode2 = X264_frame,
+ FF_CODEC_ENCODE_CB(X264_frame),
.close = X264_close,
.defaults = x264_defaults,
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP | FF_CODEC_CAP_AUTO_THREADS
@@ -1297,7 +1297,7 @@ const FFCodec ff_libx262_encoder = {
.p.wrapper_name = "libx264",
.priv_data_size = sizeof(X264Context),
.init = X264_init,
- .encode2 = X264_frame,
+ FF_CODEC_ENCODE_CB(X264_frame),
.close = X264_close,
.defaults = x264_defaults,
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP | FF_CODEC_CAP_AUTO_THREADS,
@@ -752,7 +752,7 @@ FFCodec ff_libx265_encoder = {
.p.wrapper_name = "libx265",
.init = libx265_encode_init,
.init_static_data = libx265_encode_init_csp,
- .encode2 = libx265_encode_frame,
+ FF_CODEC_ENCODE_CB(libx265_encode_frame),
.close = libx265_encode_close,
.priv_data_size = sizeof(libx265Context),
.defaults = x265_defaults,
@@ -430,7 +430,7 @@ const FFCodec ff_libxavs_encoder = {
AV_CODEC_CAP_OTHER_THREADS,
.priv_data_size = sizeof(XavsContext),
.init = XAVS_init,
- .encode2 = XAVS_frame,
+ FF_CODEC_ENCODE_CB(XAVS_frame),
.close = XAVS_close,
.caps_internal = FF_CODEC_CAP_AUTO_THREADS,
.p.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
@@ -295,7 +295,7 @@ const FFCodec ff_libxavs2_encoder = {
AV_CODEC_CAP_OTHER_THREADS,
.priv_data_size = sizeof(XAVS2EContext),
.init = xavs2_init,
- .encode2 = xavs2_encode_frame,
+ FF_CODEC_ENCODE_CB(xavs2_encode_frame),
.close = xavs2_close,
.caps_internal = FF_CODEC_CAP_AUTO_THREADS,
.p.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P,
@@ -904,7 +904,7 @@ const FFCodec ff_libxvid_encoder = {
.p.id = AV_CODEC_ID_MPEG4,
.priv_data_size = sizeof(struct xvid_context),
.init = xvid_encode_init,
- .encode2 = xvid_encode_frame,
+ FF_CODEC_ENCODE_CB(xvid_encode_frame),
.close = xvid_encode_close,
.p.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
.p.priv_class = &xvid_class,
@@ -822,6 +822,6 @@ const FFCodec ff_libzvbi_teletext_decoder = {
.priv_data_size = sizeof(TeletextContext),
.init = teletext_init_decoder,
.close = teletext_close_decoder,
- .decode_sub = teletext_decode_frame,
+ FF_CODEC_DECODE_SUB_CB(teletext_decode_frame),
.flush = teletext_flush,
};
@@ -329,7 +329,7 @@ const FFCodec ff_ljpeg_encoder = {
.priv_data_size = sizeof(LJpegEncContext),
.p.priv_class = &ljpeg_class,
.init = ljpeg_encode_init,
- .encode2 = ljpeg_encode_frame,
+ FF_CODEC_ENCODE_CB(ljpeg_encode_frame),
.close = ljpeg_encode_close,
.p.capabilities = AV_CODEC_CAP_FRAME_THREADS,
.p.pix_fmts = (const enum AVPixelFormat[]){
@@ -343,7 +343,7 @@ const FFCodec ff_loco_decoder = {
.p.id = AV_CODEC_ID_LOCO,
.priv_data_size = sizeof(LOCOContext),
.init = decode_init,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -253,7 +253,7 @@ const FFCodec ff_lscr_decoder = {
.priv_data_size = sizeof(LSCRContext),
.init = lscr_decode_init,
.close = lscr_decode_close,
- .decode = decode_frame_lscr,
+ FF_CODEC_DECODE_CB(decode_frame_lscr),
.flush = lscr_decode_flush,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
};
@@ -111,7 +111,7 @@ const FFCodec ff_m101_decoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_M101,
.init = m101_decode_init,
- .decode = m101_decode_frame,
+ FF_CODEC_DECODE_CB(m101_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -292,7 +292,7 @@ const FFCodec ff_mace3_decoder = {
.p.id = AV_CODEC_ID_MACE3,
.priv_data_size = sizeof(MACEContext),
.init = mace_decode_init,
- .decode = mace_decode_frame,
+ FF_CODEC_DECODE_CB(mace_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16P,
AV_SAMPLE_FMT_NONE },
@@ -306,7 +306,7 @@ const FFCodec ff_mace6_decoder = {
.p.id = AV_CODEC_ID_MACE6,
.priv_data_size = sizeof(MACEContext),
.init = mace_decode_init,
- .decode = mace_decode_frame,
+ FF_CODEC_DECODE_CB(mace_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16P,
AV_SAMPLE_FMT_NONE },
@@ -701,7 +701,7 @@ const FFCodec ff_magicyuv_decoder = {
.priv_data_size = sizeof(MagicYUVContext),
.init = magy_decode_init,
.close = magy_decode_end,
- .decode = magy_decode_frame,
+ FF_CODEC_DECODE_CB(magy_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 |
AV_CODEC_CAP_FRAME_THREADS |
AV_CODEC_CAP_SLICE_THREADS,
@@ -572,7 +572,7 @@ const FFCodec ff_magicyuv_encoder = {
.p.priv_class = &magicyuv_class,
.init = magy_encode_init,
.close = magy_encode_close,
- .encode2 = magy_encode_frame,
+ FF_CODEC_ENCODE_CB(magy_encode_frame),
.p.capabilities = AV_CODEC_CAP_FRAME_THREADS,
.p.pix_fmts = (const enum AVPixelFormat[]) {
AV_PIX_FMT_GBRP, AV_PIX_FMT_GBRAP, AV_PIX_FMT_YUV422P,
@@ -257,7 +257,7 @@ const FFCodec ff_mdec_decoder = {
.priv_data_size = sizeof(MDECContext),
.init = decode_init,
.close = decode_end,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -540,7 +540,7 @@ const FFCodec ff_ ## short_name ## _mediacodec_decoder = {
.p.priv_class = &ff_##short_name##_mediacodec_dec_class, \
.priv_data_size = sizeof(MediaCodecH264DecContext), \
.init = mediacodec_decode_init, \
- .receive_frame = mediacodec_receive_frame, \
+ FF_CODEC_RECEIVE_FRAME_CB(mediacodec_receive_frame), \
.flush = mediacodec_decode_flush, \
.close = mediacodec_decode_close, \
.p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AVOID_PROBING | AV_CODEC_CAP_HARDWARE, \
@@ -381,7 +381,7 @@ const FFCodec ff_metasound_decoder = {
.priv_data_size = sizeof(TwinVQContext),
.init = metasound_decode_init,
.close = ff_twinvq_decode_close,
- .decode = ff_twinvq_decode_frame,
+ FF_CODEC_DECODE_CB(ff_twinvq_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE },
@@ -1160,7 +1160,7 @@ static int mf_close(AVCodecContext *avctx)
.priv_data_size = sizeof(MFContext), \
.init = mf_init, \
.close = mf_close, \
- .receive_packet = mf_receive_packet, \
+ FF_CODEC_RECEIVE_PACKET_CB(mf_receive_packet), \
EXTRA \
.p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HYBRID | \
AV_CODEC_CAP_DR1, \
@@ -374,7 +374,7 @@ const FFCodec ff_microdvd_decoder = {
.p.type = AVMEDIA_TYPE_SUBTITLE,
.p.id = AV_CODEC_ID_MICRODVD,
.init = microdvd_init,
- .decode_sub = microdvd_decode_frame,
+ FF_CODEC_DECODE_SUB_CB(microdvd_decode_frame),
.flush = ff_ass_decoder_flush,
.priv_data_size = sizeof(FFASSDecoderContext),
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -285,7 +285,7 @@ const FFCodec ff_mvdv_decoder = {
.p.id = AV_CODEC_ID_MVDV,
.priv_data_size = sizeof(MidiVidContext),
.init = decode_init,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.flush = decode_flush,
.close = decode_close,
.p.capabilities = AV_CODEC_CAP_DR1,
@@ -445,7 +445,7 @@ const FFCodec ff_mimic_decoder = {
.priv_data_size = sizeof(MimicContext),
.init = mimic_decode_init,
.close = mimic_decode_end,
- .decode = mimic_decode_frame,
+ FF_CODEC_DECODE_CB(mimic_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.update_thread_context = ONLY_IF_THREADS_ENABLED(mimic_decode_update_thread_context),
.caps_internal = FF_CODEC_CAP_ALLOCATE_PROGRESS |
@@ -164,7 +164,7 @@ const FFCodec ff_mjpegb_decoder = {
.priv_data_size = sizeof(MJpegDecodeContext),
.init = ff_mjpeg_decode_init,
.close = ff_mjpeg_decode_end,
- .decode = mjpegb_decode_frame,
+ FF_CODEC_DECODE_CB(mjpegb_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.p.max_lowres = 3,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
@@ -3020,7 +3020,7 @@ const FFCodec ff_mjpeg_decoder = {
.priv_data_size = sizeof(MJpegDecodeContext),
.init = ff_mjpeg_decode_init,
.close = ff_mjpeg_decode_end,
- .receive_frame = ff_mjpeg_receive_frame,
+ FF_CODEC_RECEIVE_FRAME_CB(ff_mjpeg_receive_frame),
.flush = decode_flush,
.p.capabilities = AV_CODEC_CAP_DR1,
.p.max_lowres = 3,
@@ -3048,7 +3048,7 @@ const FFCodec ff_thp_decoder = {
.priv_data_size = sizeof(MJpegDecodeContext),
.init = ff_mjpeg_decode_init,
.close = ff_mjpeg_decode_end,
- .receive_frame = ff_mjpeg_receive_frame,
+ FF_CODEC_RECEIVE_FRAME_CB(ff_mjpeg_receive_frame),
.flush = decode_flush,
.p.capabilities = AV_CODEC_CAP_DR1,
.p.max_lowres = 3,
@@ -3066,7 +3066,7 @@ const FFCodec ff_smvjpeg_decoder = {
.priv_data_size = sizeof(MJpegDecodeContext),
.init = ff_mjpeg_decode_init,
.close = ff_mjpeg_decode_end,
- .receive_frame = ff_mjpeg_receive_frame,
+ FF_CODEC_RECEIVE_FRAME_CB(ff_mjpeg_receive_frame),
.flush = decode_flush,
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_EXPORTS_CROPPING |
@@ -658,7 +658,7 @@ const FFCodec ff_mjpeg_encoder = {
.p.id = AV_CODEC_ID_MJPEG,
.priv_data_size = sizeof(MJPEGEncContext),
.init = ff_mpv_encode_init,
- .encode2 = ff_mpv_encode_picture,
+ FF_CODEC_ENCODE_CB(ff_mpv_encode_picture),
.close = mjpeg_encode_close,
.p.capabilities = AV_CODEC_CAP_SLICE_THREADS | AV_CODEC_CAP_FRAME_THREADS,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
@@ -687,7 +687,7 @@ const FFCodec ff_amv_encoder = {
.p.id = AV_CODEC_ID_AMV,
.priv_data_size = sizeof(MJPEGEncContext),
.init = ff_mpv_encode_init,
- .encode2 = amv_encode_picture,
+ FF_CODEC_ENCODE_CB(amv_encode_picture),
.close = mjpeg_encode_close,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
.p.pix_fmts = (const enum AVPixelFormat[]) {
@@ -1424,7 +1424,7 @@ const FFCodec ff_mlp_decoder = {
.priv_data_size = sizeof(MLPDecodeContext),
.p.priv_class = &mlp_decoder_class,
.init = mlp_decode_init,
- .decode = read_access_unit,
+ FF_CODEC_DECODE_CB(read_access_unit),
.flush = mlp_decode_flush,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -1439,7 +1439,7 @@ const FFCodec ff_truehd_decoder = {
.priv_data_size = sizeof(MLPDecodeContext),
.p.priv_class = &truehd_decoder_class,
.init = mlp_decode_init,
- .decode = read_access_unit,
+ FF_CODEC_DECODE_CB(read_access_unit),
.flush = mlp_decode_flush,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -2213,7 +2213,7 @@ const FFCodec ff_mlp_encoder = {
.p.id = AV_CODEC_ID_MLP,
.priv_data_size = sizeof(MLPEncodeContext),
.init = mlp_encode_init,
- .encode2 = mlp_encode_frame,
+ FF_CODEC_ENCODE_CB(mlp_encode_frame),
.close = mlp_encode_close,
.p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_EXPERIMENTAL,
.p.sample_fmts = (const enum AVSampleFormat[]) {AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_NONE},
@@ -2233,7 +2233,7 @@ const FFCodec ff_truehd_encoder = {
.p.id = AV_CODEC_ID_TRUEHD,
.priv_data_size = sizeof(MLPEncodeContext),
.init = mlp_encode_init,
- .encode2 = mlp_encode_frame,
+ FF_CODEC_ENCODE_CB(mlp_encode_frame),
.close = mlp_encode_close,
.p.capabilities = AV_CODEC_CAP_SMALL_LAST_FRAME | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_EXPERIMENTAL,
.p.sample_fmts = (const enum AVSampleFormat[]) {AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_NONE},
@@ -837,7 +837,7 @@ static const AVClass ffmmal_dec_class = {
.priv_data_size = sizeof(MMALDecodeContext), \
.init = ffmmal_init_decoder, \
.close = ffmmal_close_decoder, \
- .receive_frame = ffmmal_receive_frame, \
+ FF_CODEC_RECEIVE_FRAME_CB(ffmmal_receive_frame), \
.flush = ffmmal_flush, \
.p.priv_class = &ffmmal_dec_class, \
.p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HARDWARE, \
@@ -246,7 +246,7 @@ const FFCodec ff_mmvideo_decoder = {
.priv_data_size = sizeof(MmContext),
.init = mm_decode_init,
.close = mm_decode_end,
- .decode = mm_decode_frame,
+ FF_CODEC_DECODE_CB(mm_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -1347,7 +1347,7 @@ const FFCodec ff_mobiclip_decoder = {
.p.id = AV_CODEC_ID_MOBICLIP,
.priv_data_size = sizeof(MobiClipContext),
.init = mobiclip_init,
- .decode = mobiclip_decode,
+ FF_CODEC_DECODE_CB(mobiclip_decode),
.flush = mobiclip_flush,
.close = mobiclip_close,
.p.capabilities = AV_CODEC_CAP_DR1,
@@ -353,7 +353,7 @@ const FFCodec ff_motionpixels_decoder = {
.priv_data_size = sizeof(MotionPixelsContext),
.init = mp_decode_init,
.close = mp_decode_end,
- .decode = mp_decode_frame,
+ FF_CODEC_DECODE_CB(mp_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP | FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -599,7 +599,7 @@ const FFCodec ff_movtext_decoder = {
.priv_data_size = sizeof(MovTextContext),
.p.priv_class = &mov_text_decoder_class,
.init = mov_text_init,
- .decode_sub = mov_text_decode_frame,
+ FF_CODEC_DECODE_SUB_CB(mov_text_decode_frame),
.close = mov_text_decode_close,
.flush = mov_text_flush,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -707,7 +707,7 @@ const FFCodec ff_movtext_encoder = {
.priv_data_size = sizeof(MovTextContext),
.p.priv_class = &mov_text_encoder_class,
.init = mov_text_encode_init,
- .encode_sub = mov_text_encode_frame,
+ FF_CODEC_ENCODE_SUB_CB(mov_text_encode_frame),
.close = mov_text_encode_close,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
};
@@ -317,7 +317,7 @@ const FFCodec ff_mpc7_decoder = {
.priv_data_size = sizeof(MPCContext),
.init = mpc7_decode_init,
.close = mpc7_decode_close,
- .decode = mpc7_decode_frame,
+ FF_CODEC_DECODE_CB(mpc7_decode_frame),
.flush = mpc7_decode_flush,
.p.capabilities = AV_CODEC_CAP_DR1,
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16P,
@@ -390,7 +390,7 @@ const FFCodec ff_mpc8_decoder = {
.p.id = AV_CODEC_ID_MUSEPACK8,
.priv_data_size = sizeof(MPCContext),
.init = mpc8_decode_init,
- .decode = mpc8_decode_frame,
+ FF_CODEC_DECODE_CB(mpc8_decode_frame),
.flush = mpc8_decode_flush,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_S16P,
@@ -2870,7 +2870,7 @@ const FFCodec ff_mpeg1video_decoder = {
.priv_data_size = sizeof(Mpeg1Context),
.init = mpeg_decode_init,
.close = mpeg_decode_end,
- .decode = mpeg_decode_frame,
+ FF_CODEC_DECODE_CB(mpeg_decode_frame),
.p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
#if FF_API_FLAG_TRUNCATED
AV_CODEC_CAP_TRUNCATED |
@@ -2903,7 +2903,7 @@ const FFCodec ff_mpeg2video_decoder = {
.priv_data_size = sizeof(Mpeg1Context),
.init = mpeg_decode_init,
.close = mpeg_decode_end,
- .decode = mpeg_decode_frame,
+ FF_CODEC_DECODE_CB(mpeg_decode_frame),
.p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
#if FF_API_FLAG_TRUNCATED
AV_CODEC_CAP_TRUNCATED |
@@ -2949,7 +2949,7 @@ const FFCodec ff_mpegvideo_decoder = {
.priv_data_size = sizeof(Mpeg1Context),
.init = mpeg_decode_init,
.close = mpeg_decode_end,
- .decode = mpeg_decode_frame,
+ FF_CODEC_DECODE_CB(mpeg_decode_frame),
.p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
#if FF_API_FLAG_TRUNCATED
AV_CODEC_CAP_TRUNCATED |
@@ -3113,7 +3113,7 @@ const FFCodec ff_ipu_decoder = {
.p.id = AV_CODEC_ID_IPU,
.priv_data_size = sizeof(IPUContext),
.init = ipu_decode_init,
- .decode = ipu_decode_frame,
+ FF_CODEC_DECODE_CB(ipu_decode_frame),
.close = ipu_decode_end,
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
@@ -1232,7 +1232,7 @@ const FFCodec ff_mpeg1video_encoder = {
.p.id = AV_CODEC_ID_MPEG1VIDEO,
.priv_data_size = sizeof(MPEG12EncContext),
.init = encode_init,
- .encode2 = ff_mpv_encode_picture,
+ FF_CODEC_ENCODE_CB(ff_mpv_encode_picture),
.close = ff_mpv_encode_end,
.p.supported_framerates = ff_mpeg12_frame_rate_tab + 1,
.p.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P,
@@ -1249,7 +1249,7 @@ const FFCodec ff_mpeg2video_encoder = {
.p.id = AV_CODEC_ID_MPEG2VIDEO,
.priv_data_size = sizeof(MPEG12EncContext),
.init = encode_init,
- .encode2 = ff_mpv_encode_picture,
+ FF_CODEC_ENCODE_CB(ff_mpv_encode_picture),
.close = ff_mpv_encode_end,
.p.supported_framerates = ff_mpeg2_frame_rate_tab,
.p.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P,
@@ -3670,7 +3670,7 @@ const FFCodec ff_mpeg4_decoder = {
.priv_data_size = sizeof(Mpeg4DecContext),
.init = decode_init,
.close = ff_h263_decode_end,
- .decode = ff_h263_decode_frame,
+ FF_CODEC_DECODE_CB(ff_h263_decode_frame),
.p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1 |
#if FF_API_FLAG_TRUNCATED
AV_CODEC_CAP_TRUNCATED |
@@ -1401,7 +1401,7 @@ const FFCodec ff_mpeg4_encoder = {
.p.id = AV_CODEC_ID_MPEG4,
.priv_data_size = sizeof(MpegEncContext),
.init = encode_init,
- .encode2 = ff_mpv_encode_picture,
+ FF_CODEC_ENCODE_CB(ff_mpv_encode_picture),
.close = ff_mpv_encode_end,
.p.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
.p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_SLICE_THREADS,
@@ -68,7 +68,7 @@ const FFCodec ff_mp1_decoder = {
.p.id = AV_CODEC_ID_MP1,
.priv_data_size = sizeof(MPADecodeContext),
.init = decode_init,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_CHANNEL_CONF |
AV_CODEC_CAP_DR1,
.flush = flush,
@@ -86,7 +86,7 @@ const FFCodec ff_mp2_decoder = {
.p.id = AV_CODEC_ID_MP2,
.priv_data_size = sizeof(MPADecodeContext),
.init = decode_init,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_CHANNEL_CONF |
AV_CODEC_CAP_DR1,
.flush = flush,
@@ -104,7 +104,7 @@ const FFCodec ff_mp3_decoder = {
.p.id = AV_CODEC_ID_MP3,
.priv_data_size = sizeof(MPADecodeContext),
.init = decode_init,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_CHANNEL_CONF |
AV_CODEC_CAP_DR1,
.flush = flush,
@@ -122,7 +122,7 @@ const FFCodec ff_mp3adu_decoder = {
.p.id = AV_CODEC_ID_MP3ADU,
.priv_data_size = sizeof(MPADecodeContext),
.init = decode_init,
- .decode = decode_frame_adu,
+ FF_CODEC_DECODE_CB(decode_frame_adu),
.p.capabilities = AV_CODEC_CAP_CHANNEL_CONF |
AV_CODEC_CAP_DR1,
.flush = flush,
@@ -141,7 +141,7 @@ const FFCodec ff_mp3on4_decoder = {
.priv_data_size = sizeof(MP3On4DecodeContext),
.init = decode_init_mp3on4,
.close = decode_close_mp3on4,
- .decode = decode_frame_mp3on4,
+ FF_CODEC_DECODE_CB(decode_frame_mp3on4),
.p.capabilities = AV_CODEC_CAP_CHANNEL_CONF |
AV_CODEC_CAP_DR1,
.flush = flush_mp3on4,
@@ -81,7 +81,7 @@ const FFCodec ff_mp1float_decoder = {
.p.id = AV_CODEC_ID_MP1,
.priv_data_size = sizeof(MPADecodeContext),
.init = decode_init,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_CHANNEL_CONF |
AV_CODEC_CAP_DR1,
.flush = flush,
@@ -99,7 +99,7 @@ const FFCodec ff_mp2float_decoder = {
.p.id = AV_CODEC_ID_MP2,
.priv_data_size = sizeof(MPADecodeContext),
.init = decode_init,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_CHANNEL_CONF |
AV_CODEC_CAP_DR1,
.flush = flush,
@@ -117,7 +117,7 @@ const FFCodec ff_mp3float_decoder = {
.p.id = AV_CODEC_ID_MP3,
.priv_data_size = sizeof(MPADecodeContext),
.init = decode_init,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_CHANNEL_CONF |
AV_CODEC_CAP_DR1,
.flush = flush,
@@ -135,7 +135,7 @@ const FFCodec ff_mp3adufloat_decoder = {
.p.id = AV_CODEC_ID_MP3ADU,
.priv_data_size = sizeof(MPADecodeContext),
.init = decode_init,
- .decode = decode_frame_adu,
+ FF_CODEC_DECODE_CB(decode_frame_adu),
.p.capabilities = AV_CODEC_CAP_CHANNEL_CONF |
AV_CODEC_CAP_DR1,
.flush = flush,
@@ -154,7 +154,7 @@ const FFCodec ff_mp3on4float_decoder = {
.priv_data_size = sizeof(MP3On4DecodeContext),
.init = decode_init_mp3on4,
.close = decode_close_mp3on4,
- .decode = decode_frame_mp3on4,
+ FF_CODEC_DECODE_CB(decode_frame_mp3on4),
.p.capabilities = AV_CODEC_CAP_CHANNEL_CONF |
AV_CODEC_CAP_DR1,
.flush = flush_mp3on4,
@@ -30,7 +30,7 @@ const FFCodec ff_mp2fixed_encoder = {
.p.id = AV_CODEC_ID_MP2,
.priv_data_size = sizeof(MpegAudioContext),
.init = MPA_encode_init,
- .encode2 = MPA_encode_frame,
+ FF_CODEC_ENCODE_CB(MPA_encode_frame),
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE },
.p.supported_samplerates = (const int[]){
@@ -31,7 +31,7 @@ const FFCodec ff_mp2_encoder = {
.p.id = AV_CODEC_ID_MP2,
.priv_data_size = sizeof(MpegAudioContext),
.init = MPA_encode_init,
- .encode2 = MPA_encode_frame,
+ FF_CODEC_ENCODE_CB(MPA_encode_frame),
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE },
.p.supported_samplerates = (const int[]){
@@ -86,7 +86,7 @@ const FFCodec ff_mpl2_decoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("MPL2 subtitle"),
.p.type = AVMEDIA_TYPE_SUBTITLE,
.p.id = AV_CODEC_ID_MPL2,
- .decode_sub = mpl2_decode_frame,
+ FF_CODEC_DECODE_SUB_CB(mpl2_decode_frame),
.init = ff_ass_subtitle_header_default,
.flush = ff_ass_decoder_flush,
.priv_data_size = sizeof(FFASSDecoderContext),
@@ -252,7 +252,7 @@ const FFCodec ff_mscc_decoder = {
.priv_data_size = sizeof(MSCCContext),
.init = decode_init,
.close = decode_close,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
};
@@ -265,7 +265,7 @@ const FFCodec ff_srgc_decoder = {
.priv_data_size = sizeof(MSCCContext),
.init = decode_init,
.close = decode_close,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
};
@@ -872,7 +872,7 @@ const FFCodec ff_msmpeg4v1_decoder = {
.priv_data_size = sizeof(MpegEncContext),
.init = ff_msmpeg4_decode_init,
.close = ff_h263_decode_end,
- .decode = ff_h263_decode_frame,
+ FF_CODEC_DECODE_CB(ff_h263_decode_frame),
.p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
@@ -891,7 +891,7 @@ const FFCodec ff_msmpeg4v2_decoder = {
.priv_data_size = sizeof(MpegEncContext),
.init = ff_msmpeg4_decode_init,
.close = ff_h263_decode_end,
- .decode = ff_h263_decode_frame,
+ FF_CODEC_DECODE_CB(ff_h263_decode_frame),
.p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
@@ -910,7 +910,7 @@ const FFCodec ff_msmpeg4v3_decoder = {
.priv_data_size = sizeof(MpegEncContext),
.init = ff_msmpeg4_decode_init,
.close = ff_h263_decode_end,
- .decode = ff_h263_decode_frame,
+ FF_CODEC_DECODE_CB(ff_h263_decode_frame),
.p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
@@ -929,7 +929,7 @@ const FFCodec ff_wmv1_decoder = {
.priv_data_size = sizeof(MpegEncContext),
.init = ff_msmpeg4_decode_init,
.close = ff_h263_decode_end,
- .decode = ff_h263_decode_frame,
+ FF_CODEC_DECODE_CB(ff_h263_decode_frame),
.p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM,
@@ -686,7 +686,7 @@ const FFCodec ff_msmpeg4v2_encoder = {
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
.priv_data_size = sizeof(MSMPEG4EncContext),
.init = ff_mpv_encode_init,
- .encode2 = ff_mpv_encode_picture,
+ FF_CODEC_ENCODE_CB(ff_mpv_encode_picture),
.close = ff_mpv_encode_end,
};
@@ -700,7 +700,7 @@ const FFCodec ff_msmpeg4v3_encoder = {
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
.priv_data_size = sizeof(MSMPEG4EncContext),
.init = ff_mpv_encode_init,
- .encode2 = ff_mpv_encode_picture,
+ FF_CODEC_ENCODE_CB(ff_mpv_encode_picture),
.close = ff_mpv_encode_end,
};
@@ -714,6 +714,6 @@ const FFCodec ff_wmv1_encoder = {
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
.priv_data_size = sizeof(MSMPEG4EncContext),
.init = ff_mpv_encode_init,
- .encode2 = ff_mpv_encode_picture,
+ FF_CODEC_ENCODE_CB(ff_mpv_encode_picture),
.close = ff_mpv_encode_end,
};
@@ -98,5 +98,5 @@ const FFCodec ff_msp2_decoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_MSP2,
.p.capabilities = AV_CODEC_CAP_DR1,
- .decode = msp2_decode_frame,
+ FF_CODEC_DECODE_CB(msp2_decode_frame),
};
@@ -167,7 +167,7 @@ const FFCodec ff_msrle_decoder = {
.priv_data_size = sizeof(MsrleContext),
.init = msrle_decode_init,
.close = msrle_decode_end,
- .decode = msrle_decode_frame,
+ FF_CODEC_DECODE_CB(msrle_decode_frame),
.flush = msrle_decode_flush,
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -227,7 +227,7 @@ const FFCodec ff_mss1_decoder = {
.priv_data_size = sizeof(MSS1Context),
.init = mss1_decode_init,
.close = mss1_decode_end,
- .decode = mss1_decode_frame,
+ FF_CODEC_DECODE_CB(mss1_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -854,7 +854,7 @@ const FFCodec ff_mss2_decoder = {
.priv_data_size = sizeof(MSS2Context),
.init = mss2_decode_init,
.close = mss2_decode_end,
- .decode = mss2_decode_frame,
+ FF_CODEC_DECODE_CB(mss2_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -868,7 +868,7 @@ const FFCodec ff_msa1_decoder = {
.priv_data_size = sizeof(MSS3Context),
.init = mss3_decode_init,
.close = mss3_decode_end,
- .decode = mss3_decode_frame,
+ FF_CODEC_DECODE_CB(mss3_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
};
@@ -613,7 +613,7 @@ const FFCodec ff_mts2_decoder = {
.priv_data_size = sizeof(MSS4Context),
.init = mss4_decode_init,
.close = mss4_decode_end,
- .decode = mss4_decode_frame,
+ FF_CODEC_DECODE_CB(mss4_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP | FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -349,7 +349,7 @@ const FFCodec ff_msvideo1_decoder = {
.priv_data_size = sizeof(Msvideo1Context),
.init = msvideo1_decode_init,
.close = msvideo1_decode_end,
- .decode = msvideo1_decode_frame,
+ FF_CODEC_DECODE_CB(msvideo1_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -309,7 +309,7 @@ const FFCodec ff_msvideo1_encoder = {
.p.id = AV_CODEC_ID_MSVIDEO1,
.priv_data_size = sizeof(Msvideo1EncContext),
.init = encode_init,
- .encode2 = encode_frame,
+ FF_CODEC_ENCODE_CB(encode_frame),
.close = encode_end,
.p.pix_fmts = (const enum AVPixelFormat[]){AV_PIX_FMT_RGB555, AV_PIX_FMT_NONE},
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -710,7 +710,7 @@ const FFCodec ff_mv30_decoder = {
.priv_data_size = sizeof(MV30Context),
.init = decode_init,
.close = decode_close,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.flush = decode_flush,
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
@@ -262,7 +262,7 @@ const FFCodec ff_mvc1_decoder = {
.p.id = AV_CODEC_ID_MVC1,
.priv_data_size = sizeof(MvcContext),
.init = mvc_decode_init,
- .decode = mvc_decode_frame,
+ FF_CODEC_DECODE_CB(mvc_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -276,7 +276,7 @@ const FFCodec ff_mvc2_decoder = {
.p.id = AV_CODEC_ID_MVC2,
.priv_data_size = sizeof(MvcContext),
.init = mvc_decode_init,
- .decode = mvc_decode_frame,
+ FF_CODEC_DECODE_CB(mvc_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -306,7 +306,7 @@ const FFCodec ff_mvha_decoder = {
.priv_data_size = sizeof(MVHAContext),
.init = decode_init,
.close = decode_close,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
FF_CODEC_CAP_INIT_CLEANUP,
@@ -176,7 +176,7 @@ const FFCodec ff_mwsc_decoder = {
.priv_data_size = sizeof(MWSCContext),
.init = decode_init,
.close = decode_close,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
FF_CODEC_CAP_INIT_CLEANUP,
@@ -350,7 +350,7 @@ const FFCodec ff_mxpeg_decoder = {
.priv_data_size = sizeof(MXpegDecodeContext),
.init = mxpeg_decode_init,
.close = mxpeg_decode_end,
- .decode = mxpeg_decode_frame,
+ FF_CODEC_DECODE_CB(mxpeg_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.p.max_lowres = 3,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
@@ -194,7 +194,7 @@ const FFCodec ff_nellymoser_decoder = {
.priv_data_size = sizeof(NellyMoserDecodeContext),
.init = decode_init,
.close = decode_end,
- .decode = decode_tag,
+ FF_CODEC_DECODE_CB(decode_tag),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_PARAM_CHANGE | AV_CODEC_CAP_CHANNEL_CONF,
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLT,
AV_SAMPLE_FMT_NONE },
@@ -422,7 +422,7 @@ const FFCodec ff_nellymoser_encoder = {
AV_CODEC_CAP_SMALL_LAST_FRAME,
.priv_data_size = sizeof(NellyMoserEncodeContext),
.init = encode_init,
- .encode2 = encode_frame,
+ FF_CODEC_ENCODE_CB(encode_frame),
.close = encode_end,
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLT,
AV_SAMPLE_FMT_NONE },
@@ -543,7 +543,7 @@ const FFCodec ff_notchlc_decoder = {
.priv_data_size = sizeof(NotchLCContext),
.init = decode_init,
.close = decode_end,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -370,7 +370,7 @@ const FFCodec ff_nuv_decoder = {
.priv_data_size = sizeof(NuvContext),
.init = decode_init,
.close = decode_end,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
};
@@ -225,7 +225,7 @@ const FFCodec ff_h264_nvenc_encoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_H264,
.init = ff_nvenc_encode_init,
- .receive_packet = ff_nvenc_receive_packet,
+ FF_CODEC_RECEIVE_PACKET_CB(ff_nvenc_receive_packet),
.close = ff_nvenc_encode_close,
.flush = ff_nvenc_encode_flush,
.priv_data_size = sizeof(NvencContext),
@@ -206,7 +206,7 @@ const FFCodec ff_hevc_nvenc_encoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_HEVC,
.init = ff_nvenc_encode_init,
- .receive_packet = ff_nvenc_receive_packet,
+ FF_CODEC_RECEIVE_PACKET_CB(ff_nvenc_receive_packet),
.close = ff_nvenc_encode_close,
.flush = ff_nvenc_encode_flush,
.priv_data_size = sizeof(NvencContext),
@@ -943,7 +943,7 @@ const FFCodec ff_mpeg4_omx_encoder = {
.p.id = AV_CODEC_ID_MPEG4,
.priv_data_size = sizeof(OMXCodecContext),
.init = omx_encode_init,
- .encode2 = omx_encode_frame,
+ FF_CODEC_ENCODE_CB(omx_encode_frame),
.close = omx_encode_end,
.p.pix_fmts = omx_encoder_pix_fmts,
.p.capabilities = AV_CODEC_CAP_DELAY,
@@ -964,7 +964,7 @@ const FFCodec ff_h264_omx_encoder = {
.p.id = AV_CODEC_ID_H264,
.priv_data_size = sizeof(OMXCodecContext),
.init = omx_encode_init,
- .encode2 = omx_encode_frame,
+ FF_CODEC_ENCODE_CB(omx_encode_frame),
.close = omx_encode_end,
.p.pix_fmts = omx_encoder_pix_fmts,
.p.capabilities = AV_CODEC_CAP_DELAY,
@@ -1011,7 +1011,7 @@ const FFCodec ff_on2avc_decoder = {
.p.id = AV_CODEC_ID_ON2AVC,
.priv_data_size = sizeof(On2AVCContext),
.init = on2avc_decode_init,
- .decode = on2avc_decode_frame,
+ FF_CODEC_DECODE_CB(on2avc_decode_frame),
.close = on2avc_decode_close,
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
@@ -713,7 +713,7 @@ const FFCodec ff_opus_decoder = {
.priv_data_size = sizeof(OpusContext),
.init = opus_decode_init,
.close = opus_decode_close,
- .decode = opus_decode_packet,
+ FF_CODEC_DECODE_CB(opus_decode_packet),
.flush = opus_decode_flush,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_CHANNEL_CONF,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
@@ -734,7 +734,7 @@ const FFCodec ff_opus_encoder = {
.p.priv_class = &opusenc_class,
.priv_data_size = sizeof(OpusEncContext),
.init = opus_encode_init,
- .encode2 = opus_encode_frame,
+ FF_CODEC_ENCODE_CB(opus_encode_frame),
.close = opus_encode_end,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
.p.capabilities = AV_CODEC_CAP_EXPERIMENTAL | AV_CODEC_CAP_SMALL_LAST_FRAME | AV_CODEC_CAP_DELAY,
@@ -79,7 +79,7 @@ const FFCodec ff_paf_audio_decoder = {
.p.type = AVMEDIA_TYPE_AUDIO,
.p.id = AV_CODEC_ID_PAF_AUDIO,
.init = paf_audio_init,
- .decode = paf_audio_decode,
+ FF_CODEC_DECODE_CB(paf_audio_decode),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -416,7 +416,7 @@ const FFCodec ff_paf_video_decoder = {
.priv_data_size = sizeof(PAFVideoDecContext),
.init = paf_video_init,
.close = paf_video_close,
- .decode = paf_video_decode,
+ FF_CODEC_DECODE_CB(paf_video_decode),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
};
@@ -133,7 +133,7 @@ const FFCodec ff_pam_encoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_PAM,
.p.capabilities = AV_CODEC_CAP_DR1,
- .encode2 = pam_encode_frame,
+ FF_CODEC_ENCODE_CB(pam_encode_frame),
.p.pix_fmts = (const enum AVPixelFormat[]){
AV_PIX_FMT_RGB24, AV_PIX_FMT_RGBA,
AV_PIX_FMT_RGB48BE, AV_PIX_FMT_RGBA64BE,
@@ -304,7 +304,7 @@ const FFCodec ff_pcm_bluray_decoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("PCM signed 16|20|24-bit big-endian for Blu-ray media"),
.p.type = AVMEDIA_TYPE_AUDIO,
.p.id = AV_CODEC_ID_PCM_BLURAY,
- .decode = pcm_bluray_decode_frame,
+ FF_CODEC_DECODE_CB(pcm_bluray_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
.p.sample_fmts = (const enum AVSampleFormat[]){
AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_NONE
@@ -277,7 +277,7 @@ const FFCodec ff_pcm_bluray_encoder = {
.p.id = AV_CODEC_ID_PCM_BLURAY,
.priv_data_size = sizeof(BlurayPCMEncContext),
.init = pcm_bluray_encode_init,
- .encode2 = pcm_bluray_encode_frame,
+ FF_CODEC_ENCODE_CB(pcm_bluray_encode_frame),
.p.supported_samplerates = (const int[]) { 48000, 96000, 192000, 0 },
#if FF_API_OLD_CHANNEL_LAYOUT
.p.channel_layouts = (const uint64_t[]) {
@@ -302,7 +302,7 @@ const FFCodec ff_pcm_dvd_decoder = {
.p.id = AV_CODEC_ID_PCM_DVD,
.priv_data_size = sizeof(PCMDVDContext),
.init = pcm_dvd_decode_init,
- .decode = pcm_dvd_decode_frame,
+ FF_CODEC_DECODE_CB(pcm_dvd_decode_frame),
.p.capabilities = AV_CODEC_CAP_CHANNEL_CONF |
AV_CODEC_CAP_DR1,
.p.sample_fmts = (const enum AVSampleFormat[]) {
@@ -180,7 +180,7 @@ const FFCodec ff_pcm_dvd_encoder = {
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_SMALL_LAST_FRAME,
.priv_data_size = sizeof(PCMDVDContext),
.init = pcm_dvd_encode_init,
- .encode2 = pcm_dvd_encode_frame,
+ FF_CODEC_ENCODE_CB(pcm_dvd_encode_frame),
.p.supported_samplerates = (const int[]) { 48000, 96000, 0},
#if FF_API_OLD_CHANNEL_LAYOUT
.p.channel_layouts = (const uint64_t[]) { AV_CH_LAYOUT_MONO,
@@ -561,7 +561,7 @@ const FFCodec ff_ ## name_ ## _encoder = { \
.p.id = AV_CODEC_ID_ ## id_, \
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_VARIABLE_FRAME_SIZE, \
.init = pcm_encode_init, \
- .encode2 = pcm_encode_frame, \
+ FF_CODEC_ENCODE_CB(pcm_encode_frame), \
.p.sample_fmts = (const enum AVSampleFormat[]){ sample_fmt_, \
AV_SAMPLE_FMT_NONE }, \
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE, \
@@ -583,7 +583,7 @@ const FFCodec ff_ ## name_ ## _decoder = { \
.p.id = AV_CODEC_ID_ ## id_, \
.priv_data_size = sizeof(PCMDecode), \
.init = pcm_decode_init, \
- .decode = pcm_decode_frame, \
+ FF_CODEC_DECODE_CB(pcm_decode_frame), \
.p.capabilities = AV_CODEC_CAP_DR1, \
.p.sample_fmts = (const enum AVSampleFormat[]){ sample_fmt_, \
AV_SAMPLE_FMT_NONE }, \
@@ -254,6 +254,6 @@ const FFCodec ff_pcx_decoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("PC Paintbrush PCX image"),
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_PCX,
- .decode = pcx_decode_frame,
+ FF_CODEC_DECODE_CB(pcx_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
};
@@ -197,7 +197,7 @@ const FFCodec ff_pcx_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("PC Paintbrush PCX image"),
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_PCX,
- .encode2 = pcx_encode_frame,
+ FF_CODEC_ENCODE_CB(pcx_encode_frame),
.p.pix_fmts = (const enum AVPixelFormat[]){
AV_PIX_FMT_RGB24,
AV_PIX_FMT_RGB8, AV_PIX_FMT_BGR8, AV_PIX_FMT_RGB4_BYTE, AV_PIX_FMT_BGR4_BYTE,
@@ -698,7 +698,7 @@ const FFCodec ff_pgssub_decoder = {
.priv_data_size = sizeof(PGSSubContext),
.init = init_decoder,
.close = close_decoder,
- .decode_sub = decode,
+ FF_CODEC_DECODE_SUB_CB(decode),
.p.priv_class = &pgsdec_class,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -164,5 +164,5 @@ const FFCodec ff_pgx_decoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_PGX,
.p.capabilities = AV_CODEC_CAP_DR1,
- .decode = pgx_decode_frame,
+ FF_CODEC_DECODE_CB(pgx_decode_frame),
};
@@ -464,7 +464,7 @@ const FFCodec ff_photocd_decoder = {
.p.priv_class = &photocd_class,
.init = photocd_decode_init,
.close = photocd_decode_close,
- .decode = photocd_decode_frame,
+ FF_CODEC_DECODE_CB(photocd_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.p.long_name = NULL_IF_CONFIG_SMALL("Kodak Photo CD"),
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -286,5 +286,5 @@ const FFCodec ff_pictor_decoder = {
.p.id = AV_CODEC_ID_PICTOR,
.p.capabilities = AV_CODEC_CAP_DR1,
.priv_data_size = sizeof(PicContext),
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
};
@@ -699,7 +699,7 @@ const FFCodec ff_pixlet_decoder = {
.p.id = AV_CODEC_ID_PIXLET,
.init = pixlet_init,
.close = pixlet_close,
- .decode = pixlet_decode_frame,
+ FF_CODEC_DECODE_CB(pixlet_decode_frame),
.priv_data_size = sizeof(PixletContext),
.p.capabilities = AV_CODEC_CAP_DR1 |
AV_CODEC_CAP_FRAME_THREADS,
@@ -1723,7 +1723,7 @@ const FFCodec ff_apng_decoder = {
.priv_data_size = sizeof(PNGDecContext),
.init = png_dec_init,
.close = png_dec_end,
- .decode = decode_frame_apng,
+ FF_CODEC_DECODE_CB(decode_frame_apng),
.update_thread_context = ONLY_IF_THREADS_ENABLED(update_thread_context),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS /*| AV_CODEC_CAP_DRAW_HORIZ_BAND*/,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP |
@@ -1740,7 +1740,7 @@ const FFCodec ff_png_decoder = {
.priv_data_size = sizeof(PNGDecContext),
.init = png_dec_init,
.close = png_dec_end,
- .decode = decode_frame_png,
+ FF_CODEC_DECODE_CB(decode_frame_png),
.update_thread_context = ONLY_IF_THREADS_ENABLED(update_thread_context),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS /*| AV_CODEC_CAP_DRAW_HORIZ_BAND*/,
.caps_internal = FF_CODEC_CAP_SKIP_FRAME_FILL_PARAM | FF_CODEC_CAP_INIT_THREADSAFE |
@@ -1119,7 +1119,7 @@ const FFCodec ff_png_encoder = {
.priv_data_size = sizeof(PNGEncContext),
.init = png_enc_init,
.close = png_enc_close,
- .encode2 = encode_png,
+ FF_CODEC_ENCODE_CB(encode_png),
.p.capabilities = AV_CODEC_CAP_FRAME_THREADS,
.p.pix_fmts = (const enum AVPixelFormat[]) {
AV_PIX_FMT_RGB24, AV_PIX_FMT_RGBA,
@@ -1142,7 +1142,7 @@ const FFCodec ff_apng_encoder = {
.priv_data_size = sizeof(PNGEncContext),
.init = png_enc_init,
.close = png_enc_close,
- .encode2 = encode_apng,
+ FF_CODEC_ENCODE_CB(encode_apng),
.p.pix_fmts = (const enum AVPixelFormat[]) {
AV_PIX_FMT_RGB24, AV_PIX_FMT_RGBA,
AV_PIX_FMT_RGB48BE, AV_PIX_FMT_RGBA64BE,
@@ -338,7 +338,7 @@ const FFCodec ff_pgm_decoder = {
.p.id = AV_CODEC_ID_PGM,
.p.capabilities = AV_CODEC_CAP_DR1,
.priv_data_size = sizeof(PNMContext),
- .decode = pnm_decode_frame,
+ FF_CODEC_DECODE_CB(pnm_decode_frame),
};
#endif
@@ -350,7 +350,7 @@ const FFCodec ff_pgmyuv_decoder = {
.p.id = AV_CODEC_ID_PGMYUV,
.p.capabilities = AV_CODEC_CAP_DR1,
.priv_data_size = sizeof(PNMContext),
- .decode = pnm_decode_frame,
+ FF_CODEC_DECODE_CB(pnm_decode_frame),
};
#endif
@@ -362,7 +362,7 @@ const FFCodec ff_ppm_decoder = {
.p.id = AV_CODEC_ID_PPM,
.p.capabilities = AV_CODEC_CAP_DR1,
.priv_data_size = sizeof(PNMContext),
- .decode = pnm_decode_frame,
+ FF_CODEC_DECODE_CB(pnm_decode_frame),
};
#endif
@@ -374,7 +374,7 @@ const FFCodec ff_pbm_decoder = {
.p.id = AV_CODEC_ID_PBM,
.p.capabilities = AV_CODEC_CAP_DR1,
.priv_data_size = sizeof(PNMContext),
- .decode = pnm_decode_frame,
+ FF_CODEC_DECODE_CB(pnm_decode_frame),
};
#endif
@@ -386,7 +386,7 @@ const FFCodec ff_pam_decoder = {
.p.id = AV_CODEC_ID_PAM,
.p.capabilities = AV_CODEC_CAP_DR1,
.priv_data_size = sizeof(PNMContext),
- .decode = pnm_decode_frame,
+ FF_CODEC_DECODE_CB(pnm_decode_frame),
};
#endif
@@ -398,6 +398,6 @@ const FFCodec ff_pfm_decoder = {
.p.id = AV_CODEC_ID_PFM,
.p.capabilities = AV_CODEC_CAP_DR1,
.priv_data_size = sizeof(PNMContext),
- .decode = pnm_decode_frame,
+ FF_CODEC_DECODE_CB(pnm_decode_frame),
};
#endif
@@ -157,7 +157,7 @@ const FFCodec ff_pgm_encoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_PGM,
.p.capabilities = AV_CODEC_CAP_DR1,
- .encode2 = pnm_encode_frame,
+ FF_CODEC_ENCODE_CB(pnm_encode_frame),
.p.pix_fmts = (const enum AVPixelFormat[]){
AV_PIX_FMT_GRAY8, AV_PIX_FMT_GRAY16BE, AV_PIX_FMT_NONE
},
@@ -172,7 +172,7 @@ const FFCodec ff_pgmyuv_encoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_PGMYUV,
.p.capabilities = AV_CODEC_CAP_DR1,
- .encode2 = pnm_encode_frame,
+ FF_CODEC_ENCODE_CB(pnm_encode_frame),
.p.pix_fmts = (const enum AVPixelFormat[]){
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV420P16BE, AV_PIX_FMT_NONE
},
@@ -187,7 +187,7 @@ const FFCodec ff_ppm_encoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_PPM,
.p.capabilities = AV_CODEC_CAP_DR1,
- .encode2 = pnm_encode_frame,
+ FF_CODEC_ENCODE_CB(pnm_encode_frame),
.p.pix_fmts = (const enum AVPixelFormat[]){
AV_PIX_FMT_RGB24, AV_PIX_FMT_RGB48BE, AV_PIX_FMT_NONE
},
@@ -202,7 +202,7 @@ const FFCodec ff_pbm_encoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_PBM,
.p.capabilities = AV_CODEC_CAP_DR1,
- .encode2 = pnm_encode_frame,
+ FF_CODEC_ENCODE_CB(pnm_encode_frame),
.p.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_MONOWHITE,
AV_PIX_FMT_NONE },
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -216,7 +216,7 @@ const FFCodec ff_pfm_encoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_PFM,
.p.capabilities = AV_CODEC_CAP_DR1,
- .encode2 = pnm_encode_frame,
+ FF_CODEC_ENCODE_CB(pnm_encode_frame),
.p.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_GBRPF32,
AV_PIX_FMT_NONE },
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -878,7 +878,7 @@ const FFCodec ff_prores_decoder = {
.priv_data_size = sizeof(ProresContext),
.init = decode_init,
.close = decode_close,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.update_thread_context = ONLY_IF_THREADS_ENABLED(update_thread_context),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_SLICE_THREADS | AV_CODEC_CAP_FRAME_THREADS,
.p.profiles = NULL_IF_CONFIG_SMALL(ff_prores_profiles),
@@ -949,7 +949,7 @@ const FFCodec ff_prores_aw_encoder = {
.priv_data_size = sizeof(ProresContext),
.init = prores_encode_init,
.close = prores_encode_close,
- .encode2 = prores_encode_frame,
+ FF_CODEC_ENCODE_CB(prores_encode_frame),
.p.capabilities = AV_CODEC_CAP_FRAME_THREADS,
.p.priv_class = &prores_enc_class,
.p.profiles = NULL_IF_CONFIG_SMALL(ff_prores_profiles),
@@ -965,7 +965,7 @@ const FFCodec ff_prores_encoder = {
.priv_data_size = sizeof(ProresContext),
.init = prores_encode_init,
.close = prores_encode_close,
- .encode2 = prores_encode_frame,
+ FF_CODEC_ENCODE_CB(prores_encode_frame),
.p.capabilities = AV_CODEC_CAP_FRAME_THREADS,
.p.priv_class = &prores_enc_class,
.p.profiles = NULL_IF_CONFIG_SMALL(ff_prores_profiles),
@@ -1401,7 +1401,7 @@ const FFCodec ff_prores_ks_encoder = {
.priv_data_size = sizeof(ProresContext),
.init = encode_init,
.close = encode_close,
- .encode2 = encode_frame,
+ FF_CODEC_ENCODE_CB(encode_frame),
.p.capabilities = AV_CODEC_CAP_SLICE_THREADS | AV_CODEC_CAP_FRAME_THREADS,
.p.pix_fmts = (const enum AVPixelFormat[]) {
AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
@@ -372,7 +372,7 @@ const FFCodec ff_prosumer_decoder = {
.p.id = AV_CODEC_ID_PROSUMER,
.priv_data_size = sizeof(ProSumerContext),
.init = decode_init,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.close = decode_close,
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
@@ -551,5 +551,5 @@ const FFCodec ff_psd_decoder = {
.p.id = AV_CODEC_ID_PSD,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.priv_data_size = sizeof(PSDContext),
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
};
@@ -218,7 +218,7 @@ FF_ENABLE_DEPRECATION_WARNINGS
av_frame_unref(p->frame);
p->got_frame = 0;
- p->result = codec->decode(avctx, p->frame, &p->got_frame, p->avpkt);
+ p->result = codec->cb.decode(avctx, p->frame, &p->got_frame, p->avpkt);
if ((p->result < 0 || !p->got_frame) && p->frame->buf[0])
ff_thread_release_buffer(avctx, p->frame);
@@ -92,5 +92,5 @@ const FFCodec ff_ptx_decoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_PTX,
.p.capabilities = AV_CODEC_CAP_DR1,
- .decode = ptx_decode_frame,
+ FF_CODEC_DECODE_CB(ptx_decode_frame),
};
@@ -796,7 +796,7 @@ const FFCodec ff_qcelp_decoder = {
.p.type = AVMEDIA_TYPE_AUDIO,
.p.id = AV_CODEC_ID_QCELP,
.init = qcelp_decode_init,
- .decode = qcelp_decode_frame,
+ FF_CODEC_DECODE_CB(qcelp_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
.priv_data_size = sizeof(QCELPContext),
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -1877,7 +1877,7 @@ const FFCodec ff_qdm2_decoder = {
.priv_data_size = sizeof(QDM2Context),
.init = qdm2_decode_init,
.close = qdm2_decode_close,
- .decode = qdm2_decode_frame,
+ FF_CODEC_DECODE_CB(qdm2_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -736,7 +736,7 @@ const FFCodec ff_qdmc_decoder = {
.priv_data_size = sizeof(QDMCContext),
.init = qdmc_decode_init,
.close = qdmc_decode_close,
- .decode = qdmc_decode_frame,
+ FF_CODEC_DECODE_CB(qdmc_decode_frame),
.flush = qdmc_flush,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -519,5 +519,5 @@ const FFCodec ff_qdraw_decoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_QDRAW,
.p.capabilities = AV_CODEC_CAP_DR1,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
};
@@ -359,7 +359,7 @@ const FFCodec ff_qpeg_decoder = {
.priv_data_size = sizeof(QpegContext),
.init = decode_init,
.close = decode_end,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.flush = decode_flush,
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
@@ -984,7 +984,7 @@ const FFCodec ff_##x##_qsv_decoder = { \
.p.type = AVMEDIA_TYPE_VIDEO, \
.p.id = AV_CODEC_ID_##X, \
.init = qsv_decode_init, \
- .decode = qsv_decode_frame, \
+ FF_CODEC_DECODE_CB(qsv_decode_frame), \
.flush = qsv_decode_flush, \
.close = qsv_decode_close, \
.bsfs = bsf_name, \
@@ -184,7 +184,7 @@ const FFCodec ff_h264_qsv_encoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_H264,
.init = qsv_enc_init,
- .encode2 = qsv_enc_frame,
+ FF_CODEC_ENCODE_CB(qsv_enc_frame),
.close = qsv_enc_close,
.p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HYBRID,
.p.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_NV12,
@@ -298,7 +298,7 @@ const FFCodec ff_hevc_qsv_encoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_HEVC,
.init = qsv_enc_init,
- .encode2 = qsv_enc_frame,
+ FF_CODEC_ENCODE_CB(qsv_enc_frame),
.close = qsv_enc_close,
.p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HYBRID,
.p.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_NV12,
@@ -86,7 +86,7 @@ const FFCodec ff_mjpeg_qsv_encoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_MJPEG,
.init = qsv_enc_init,
- .encode2 = qsv_enc_frame,
+ FF_CODEC_ENCODE_CB(qsv_enc_frame),
.close = qsv_enc_close,
.p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HYBRID,
.p.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_NV12,
@@ -99,7 +99,7 @@ const FFCodec ff_mpeg2_qsv_encoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_MPEG2VIDEO,
.init = qsv_enc_init,
- .encode2 = qsv_enc_frame,
+ FF_CODEC_ENCODE_CB(qsv_enc_frame),
.close = qsv_enc_close,
.p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HYBRID,
.p.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_NV12,
@@ -109,7 +109,7 @@ const FFCodec ff_vp9_qsv_encoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_VP9,
.init = qsv_enc_init,
- .encode2 = qsv_enc_frame,
+ FF_CODEC_ENCODE_CB(qsv_enc_frame),
.close = qsv_enc_close,
.p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HYBRID,
.p.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_NV12,
@@ -589,7 +589,7 @@ const FFCodec ff_qtrle_decoder = {
.priv_data_size = sizeof(QtrleContext),
.init = qtrle_decode_init,
.close = qtrle_decode_end,
- .decode = qtrle_decode_frame,
+ FF_CODEC_DECODE_CB(qtrle_decode_frame),
.flush = qtrle_decode_flush,
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -406,7 +406,7 @@ const FFCodec ff_qtrle_encoder = {
.p.id = AV_CODEC_ID_QTRLE,
.priv_data_size = sizeof(QtrleEncContext),
.init = qtrle_encode_init,
- .encode2 = qtrle_encode_frame,
+ FF_CODEC_ENCODE_CB(qtrle_encode_frame),
.close = qtrle_encode_end,
.p.pix_fmts = (const enum AVPixelFormat[]){
AV_PIX_FMT_RGB24, AV_PIX_FMT_RGB555BE, AV_PIX_FMT_ARGB, AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE
@@ -109,7 +109,7 @@ const FFCodec ff_r210_decoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_R210,
.init = decode_init,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -121,7 +121,7 @@ const FFCodec ff_r10k_decoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_R10K,
.init = decode_init,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -133,7 +133,7 @@ const FFCodec ff_avrp_decoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_AVRP,
.init = decode_init,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -98,7 +98,7 @@ const FFCodec ff_r210_encoder = {
.p.id = AV_CODEC_ID_R210,
.p.capabilities = AV_CODEC_CAP_DR1,
.init = encode_init,
- .encode2 = encode_frame,
+ FF_CODEC_ENCODE_CB(encode_frame),
.p.pix_fmts = pix_fmt,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -111,7 +111,7 @@ const FFCodec ff_r10k_encoder = {
.p.id = AV_CODEC_ID_R10K,
.p.capabilities = AV_CODEC_CAP_DR1,
.init = encode_init,
- .encode2 = encode_frame,
+ FF_CODEC_ENCODE_CB(encode_frame),
.p.pix_fmts = pix_fmt,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -124,7 +124,7 @@ const FFCodec ff_avrp_encoder = {
.p.id = AV_CODEC_ID_AVRP,
.p.capabilities = AV_CODEC_CAP_DR1,
.init = encode_init,
- .encode2 = encode_frame,
+ FF_CODEC_ENCODE_CB(encode_frame),
.p.pix_fmts = pix_fmt,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -133,7 +133,7 @@ const FFCodec ff_ra_144_decoder = {
.p.id = AV_CODEC_ID_RA_144,
.priv_data_size = sizeof(RA144Context),
.init = ra144_decode_init,
- .decode = ra144_decode_frame,
+ FF_CODEC_DECODE_CB(ra144_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -544,7 +544,7 @@ const FFCodec ff_ra_144_encoder = {
AV_CODEC_CAP_SMALL_LAST_FRAME,
.priv_data_size = sizeof(RA144Context),
.init = ra144_encode_init,
- .encode2 = ra144_encode_frame,
+ FF_CODEC_ENCODE_CB(ra144_encode_frame),
.close = ra144_encode_close,
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE },
@@ -244,7 +244,7 @@ const FFCodec ff_ra_288_decoder = {
.p.id = AV_CODEC_ID_RA_288,
.priv_data_size = sizeof(RA288Context),
.init = ra288_decode_init,
- .decode = ra288_decode_frame,
+ FF_CODEC_DECODE_CB(ra288_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -521,7 +521,7 @@ const FFCodec ff_ralf_decoder = {
.priv_data_size = sizeof(RALFContext),
.init = decode_init,
.close = decode_close,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.flush = decode_flush,
.p.capabilities = AV_CODEC_CAP_CHANNEL_CONF |
AV_CODEC_CAP_DR1,
@@ -805,7 +805,7 @@ const FFCodec ff_rasc_decoder = {
.priv_data_size = sizeof(RASCContext),
.init = decode_init,
.close = decode_close,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.flush = decode_flush,
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
@@ -488,7 +488,7 @@ const FFCodec ff_rawvideo_decoder = {
.priv_data_size = sizeof(RawVideoContext),
.init = raw_init_decoder,
.close = raw_close_decoder,
- .decode = raw_decode,
+ FF_CODEC_DECODE_CB(raw_decode),
.p.priv_class = &rawdec_class,
.p.capabilities = AV_CODEC_CAP_PARAM_CHANGE,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -88,6 +88,6 @@ const FFCodec ff_rawvideo_encoder = {
.p.id = AV_CODEC_ID_RAWVIDEO,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.init = raw_encode_init,
- .encode2 = raw_encode,
+ FF_CODEC_ENCODE_CB(raw_encode),
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -79,7 +79,7 @@ const FFCodec ff_realtext_decoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("RealText subtitle"),
.p.type = AVMEDIA_TYPE_SUBTITLE,
.p.id = AV_CODEC_ID_REALTEXT,
- .decode_sub = realtext_decode_frame,
+ FF_CODEC_DECODE_SUB_CB(realtext_decode_frame),
.init = ff_ass_subtitle_header_default,
.flush = ff_ass_decoder_flush,
.priv_data_size = sizeof(FFASSDecoderContext),
@@ -569,7 +569,7 @@ static const AVCodecHWConfigInternal *const rkmpp_hw_configs[] = {
.priv_data_size = sizeof(RKMPPDecodeContext), \
.init = rkmpp_init_decoder, \
.close = rkmpp_close_decoder, \
- .receive_frame = rkmpp_receive_frame, \
+ FF_CODEC_RECEIVE_FRAME_CB(rkmpp_receive_frame), \
.flush = rkmpp_flush, \
.p.priv_class = &rkmpp_##NAME##_dec_class, \
.p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AVOID_PROBING | AV_CODEC_CAP_HARDWARE, \
@@ -225,7 +225,7 @@ const FFCodec ff_rl2_decoder = {
.priv_data_size = sizeof(Rl2Context),
.init = rl2_decode_init,
.close = rl2_decode_end,
- .decode = rl2_decode_frame,
+ FF_CODEC_DECODE_CB(rl2_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -195,7 +195,7 @@ const FFCodec ff_roq_dpcm_encoder = {
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
.priv_data_size = sizeof(ROQDPCMContext),
.init = roq_dpcm_encode_init,
- .encode2 = roq_dpcm_encode_frame,
+ FF_CODEC_ENCODE_CB(roq_dpcm_encode_frame),
.close = roq_dpcm_encode_close,
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE },
@@ -241,7 +241,7 @@ const FFCodec ff_roq_decoder = {
.priv_data_size = sizeof(RoqContext),
.init = roq_decode_init,
.close = roq_decode_end,
- .decode = roq_decode_frame,
+ FF_CODEC_DECODE_CB(roq_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
};
@@ -1125,7 +1125,7 @@ const FFCodec ff_roq_encoder = {
.p.id = AV_CODEC_ID_ROQ,
.priv_data_size = sizeof(RoqEncContext),
.init = roq_encode_init,
- .encode2 = roq_encode_frame,
+ FF_CODEC_ENCODE_CB(roq_encode_frame),
.close = roq_encode_end,
.p.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUVJ444P,
AV_PIX_FMT_NONE },
@@ -294,7 +294,7 @@ const FFCodec ff_rpza_decoder = {
.priv_data_size = sizeof(RpzaContext),
.init = rpza_decode_init,
.close = rpza_decode_end,
- .decode = rpza_decode_frame,
+ FF_CODEC_DECODE_CB(rpza_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -851,7 +851,7 @@ const FFCodec ff_rpza_encoder = {
.priv_data_size = sizeof(RpzaContext),
.p.priv_class = &rpza_class,
.init = rpza_encode_init,
- .encode2 = rpza_encode_frame,
+ FF_CODEC_ENCODE_CB(rpza_encode_frame),
.close = rpza_encode_end,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
.p.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_RGB555,
@@ -368,7 +368,7 @@ const FFCodec ff_rscc_decoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_RSCC,
.init = rscc_init,
- .decode = rscc_decode_frame,
+ FF_CODEC_DECODE_CB(rscc_decode_frame),
.close = rscc_close,
.priv_data_size = sizeof(RsccContext),
.p.capabilities = AV_CODEC_CAP_DR1,
@@ -689,7 +689,7 @@ const FFCodec ff_rv10_decoder = {
.priv_data_size = sizeof(RVDecContext),
.init = rv10_decode_init,
.close = rv10_decode_end,
- .decode = rv10_decode_frame,
+ FF_CODEC_DECODE_CB(rv10_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
.p.max_lowres = 3,
@@ -707,7 +707,7 @@ const FFCodec ff_rv20_decoder = {
.priv_data_size = sizeof(RVDecContext),
.init = rv10_decode_init,
.close = rv10_decode_end,
- .decode = rv10_decode_frame,
+ FF_CODEC_DECODE_CB(rv10_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
.flush = ff_mpeg_flush,
@@ -73,7 +73,7 @@ const FFCodec ff_rv10_encoder = {
.p.priv_class = &ff_mpv_enc_class,
.priv_data_size = sizeof(MpegEncContext),
.init = ff_mpv_encode_init,
- .encode2 = ff_mpv_encode_picture,
+ FF_CODEC_ENCODE_CB(ff_mpv_encode_picture),
.close = ff_mpv_encode_end,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
.p.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
@@ -70,7 +70,7 @@ const FFCodec ff_rv20_encoder = {
.p.priv_class = &ff_mpv_enc_class,
.priv_data_size = sizeof(MpegEncContext),
.init = ff_mpv_encode_init,
- .encode2 = ff_mpv_encode_picture,
+ FF_CODEC_ENCODE_CB(ff_mpv_encode_picture),
.close = ff_mpv_encode_end,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
.p.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
@@ -299,7 +299,7 @@ const FFCodec ff_rv30_decoder = {
.priv_data_size = sizeof(RV34DecContext),
.init = rv30_decode_init,
.close = ff_rv34_decode_end,
- .decode = ff_rv34_decode_frame,
+ FF_CODEC_DECODE_CB(ff_rv34_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY |
AV_CODEC_CAP_FRAME_THREADS,
.flush = ff_mpeg_flush,
@@ -582,7 +582,7 @@ const FFCodec ff_rv40_decoder = {
.priv_data_size = sizeof(RV34DecContext),
.init = rv40_decode_init,
.close = ff_rv34_decode_end,
- .decode = ff_rv34_decode_frame,
+ FF_CODEC_DECODE_CB(ff_rv34_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY |
AV_CODEC_CAP_FRAME_THREADS,
.flush = ff_mpeg_flush,
@@ -234,7 +234,7 @@ const FFCodec ff_s302m_decoder = {
.p.id = AV_CODEC_ID_S302M,
.p.priv_class = &s302m_class,
.priv_data_size = sizeof(S302Context),
- .decode = s302m_decode_frame,
+ FF_CODEC_DECODE_CB(s302m_decode_frame),
.p.capabilities = AV_CODEC_CAP_CHANNEL_CONF |
AV_CODEC_CAP_DR1,
};
@@ -179,7 +179,7 @@ const FFCodec ff_s302m_encoder = {
AV_CODEC_CAP_VARIABLE_FRAME_SIZE,
.priv_data_size = sizeof(S302MEncContext),
.init = s302m_encode_init,
- .encode2 = s302m_encode2_frame,
+ FF_CODEC_ENCODE_CB(s302m_encode2_frame),
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S32,
AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE },
@@ -188,7 +188,7 @@ const FFCodec ff_sami_decoder = {
.priv_data_size = sizeof(SAMIContext),
.init = sami_init,
.close = sami_close,
- .decode_sub = sami_decode_frame,
+ FF_CODEC_DECODE_SUB_CB(sami_decode_frame),
.flush = sami_flush,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -1524,7 +1524,7 @@ const FFCodec ff_sanm_decoder = {
.priv_data_size = sizeof(SANMVideoContext),
.init = decode_init,
.close = decode_end,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -372,7 +372,7 @@ const FFCodec ff_sbc_decoder = {
.p.id = AV_CODEC_ID_SBC,
.priv_data_size = sizeof(SBCDecContext),
.init = sbc_decode_init,
- .decode = sbc_decode_frame,
+ FF_CODEC_DECODE_CB(sbc_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
#if FF_API_OLD_CHANNEL_LAYOUT
@@ -351,7 +351,7 @@ const FFCodec ff_sbc_encoder = {
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_SMALL_LAST_FRAME,
.priv_data_size = sizeof(SBCEncContext),
.init = sbc_encode_init,
- .encode2 = sbc_encode_frame,
+ FF_CODEC_ENCODE_CB(sbc_encode_frame),
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
#if FF_API_OLD_CHANNEL_LAYOUT
.p.channel_layouts = (const uint64_t[]) { AV_CH_LAYOUT_MONO,
@@ -675,7 +675,7 @@ const FFCodec ff_scpr_decoder = {
.priv_data_size = sizeof(SCPRContext),
.init = decode_init,
.close = decode_close,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
FF_CODEC_CAP_INIT_CLEANUP,
@@ -189,7 +189,7 @@ const FFCodec ff_screenpresso_decoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_SCREENPRESSO,
.init = screenpresso_init,
- .decode = screenpresso_decode_frame,
+ FF_CODEC_DECODE_CB(screenpresso_decode_frame),
.close = screenpresso_close,
.priv_data_size = sizeof(ScreenpressoContext),
.p.capabilities = AV_CODEC_CAP_DR1,
@@ -526,7 +526,7 @@ const FFCodec ff_sga_decoder = {
.p.id = AV_CODEC_ID_SGA_VIDEO,
.priv_data_size = sizeof(SGAVideoContext),
.init = sga_decode_init,
- .decode = sga_decode_frame,
+ FF_CODEC_DECODE_CB(sga_decode_frame),
.close = sga_decode_end,
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -292,7 +292,7 @@ const FFCodec ff_sgi_decoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_SGI,
.priv_data_size = sizeof(SgiState),
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.init = sgi_decode_init,
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -278,7 +278,7 @@ const FFCodec ff_sgi_encoder = {
.priv_data_size = sizeof(SgiContext),
.p.priv_class = &sgi_class,
.init = encode_init,
- .encode2 = encode_frame,
+ FF_CODEC_ENCODE_CB(encode_frame),
.p.pix_fmts = (const enum AVPixelFormat[]) {
AV_PIX_FMT_RGB24, AV_PIX_FMT_RGBA,
AV_PIX_FMT_RGB48LE, AV_PIX_FMT_RGB48BE,
@@ -137,7 +137,7 @@ const FFCodec ff_sgirle_decoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_SGIRLE,
.init = sgirle_decode_init,
- .decode = sgirle_decode_frame,
+ FF_CODEC_DECODE_CB(sgirle_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -2006,5 +2006,5 @@ const FFCodec ff_sheervideo_decoder = {
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.priv_data_size = sizeof(SheerVideoContext),
.close = decode_end,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
};
@@ -811,7 +811,7 @@ const FFCodec ff_shorten_decoder = {
.priv_data_size = sizeof(ShortenContext),
.init = shorten_decode_init,
.close = shorten_decode_close,
- .decode = shorten_decode_frame,
+ FF_CODEC_DECODE_CB(shorten_decode_frame),
.p.capabilities = AV_CODEC_CAP_CHANNEL_CONF |
AV_CODEC_CAP_DELAY |
AV_CODEC_CAP_DR1 |
@@ -569,7 +569,7 @@ const FFCodec ff_sipr_decoder = {
.p.id = AV_CODEC_ID_SIPR,
.priv_data_size = sizeof(SiprContext),
.init = sipr_decoder_init,
- .decode = sipr_decode_frame,
+ FF_CODEC_DECODE_CB(sipr_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -849,7 +849,7 @@ const FFCodec ff_siren_decoder = {
.p.id = AV_CODEC_ID_SIREN,
.init = siren_init,
.close = siren_close,
- .decode = siren_decode,
+ FF_CODEC_DECODE_CB(siren_decode),
.flush = siren_flush,
.p.capabilities = AV_CODEC_CAP_CHANNEL_CONF |
AV_CODEC_CAP_DR1,
@@ -865,7 +865,7 @@ const FFCodec ff_msnsiren_decoder = {
.p.id = AV_CODEC_ID_MSNSIREN,
.init = siren_init,
.close = siren_close,
- .decode = siren_decode,
+ FF_CODEC_DECODE_CB(siren_decode),
.flush = siren_flush,
.p.capabilities = AV_CODEC_CAP_CHANNEL_CONF |
AV_CODEC_CAP_DR1,
@@ -730,7 +730,7 @@ const FFCodec ff_smacker_decoder = {
.priv_data_size = sizeof(SmackVContext),
.init = decode_init,
.close = decode_end,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP | FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -741,7 +741,7 @@ const FFCodec ff_smackaud_decoder = {
.p.type = AVMEDIA_TYPE_AUDIO,
.p.id = AV_CODEC_ID_SMACKAUDIO,
.init = smka_decode_init,
- .decode = smka_decode_frame,
+ FF_CODEC_DECODE_CB(smka_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -476,7 +476,7 @@ const FFCodec ff_smc_decoder = {
.priv_data_size = sizeof(SmcContext),
.init = smc_decode_init,
.close = smc_decode_end,
- .decode = smc_decode_frame,
+ FF_CODEC_DECODE_CB(smc_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -555,7 +555,7 @@ const FFCodec ff_smc_encoder = {
.p.id = AV_CODEC_ID_SMC,
.priv_data_size = sizeof(SMCContext),
.init = smc_encode_init,
- .encode2 = smc_encode_frame,
+ FF_CODEC_ENCODE_CB(smc_encode_frame),
.close = smc_encode_end,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
.p.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_PAL8,
@@ -661,7 +661,7 @@ const FFCodec ff_snow_decoder = {
.priv_data_size = sizeof(SnowContext),
.init = ff_snow_common_init,
.close = decode_end,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 /*| AV_CODEC_CAP_DRAW_HORIZ_BAND*/,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
FF_CODEC_CAP_INIT_CLEANUP,
@@ -1919,7 +1919,7 @@ const FFCodec ff_snow_encoder = {
.p.id = AV_CODEC_ID_SNOW,
.priv_data_size = sizeof(SnowContext),
.init = encode_init,
- .encode2 = encode_frame,
+ FF_CODEC_ENCODE_CB(encode_frame),
.close = encode_end,
.p.pix_fmts = (const enum AVPixelFormat[]){
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV410P, AV_PIX_FMT_YUV444P,
@@ -1084,7 +1084,7 @@ const FFCodec ff_sonic_decoder = {
.priv_data_size = sizeof(SonicContext),
.init = sonic_decode_init,
.close = sonic_decode_close,
- .decode = sonic_decode_frame,
+ FF_CODEC_DECODE_CB(sonic_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_EXPERIMENTAL | AV_CODEC_CAP_CHANNEL_CONF,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
};
@@ -1098,7 +1098,7 @@ const FFCodec ff_sonic_encoder = {
.p.id = AV_CODEC_ID_SONIC,
.priv_data_size = sizeof(SonicContext),
.init = sonic_encode_init,
- .encode2 = sonic_encode_frame,
+ FF_CODEC_ENCODE_CB(sonic_encode_frame),
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE },
.p.capabilities = AV_CODEC_CAP_EXPERIMENTAL,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
@@ -1114,7 +1114,7 @@ const FFCodec ff_sonic_ls_encoder = {
.p.id = AV_CODEC_ID_SONIC_LS,
.priv_data_size = sizeof(SonicContext),
.init = sonic_encode_init,
- .encode2 = sonic_encode_frame,
+ FF_CODEC_ENCODE_CB(sonic_encode_frame),
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_NONE },
.p.capabilities = AV_CODEC_CAP_EXPERIMENTAL,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
@@ -100,7 +100,7 @@ const FFCodec ff_sp5x_decoder = {
.priv_data_size = sizeof(MJpegDecodeContext),
.init = ff_mjpeg_decode_init,
.close = ff_mjpeg_decode_end,
- .receive_frame = ff_mjpeg_receive_frame,
+ FF_CODEC_RECEIVE_FRAME_CB(ff_mjpeg_receive_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.p.max_lowres = 3,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP |
@@ -116,7 +116,7 @@ const FFCodec ff_amv_decoder = {
.priv_data_size = sizeof(MJpegDecodeContext),
.init = ff_mjpeg_decode_init,
.close = ff_mjpeg_decode_end,
- .receive_frame = ff_mjpeg_receive_frame,
+ FF_CODEC_RECEIVE_FRAME_CB(ff_mjpeg_receive_frame),
.p.max_lowres = 3,
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP |
@@ -731,7 +731,7 @@ const FFCodec ff_speedhq_decoder = {
.p.id = AV_CODEC_ID_SPEEDHQ,
.priv_data_size = sizeof(SHQContext),
.init = speedhq_decode_init,
- .decode = speedhq_decode_frame,
+ FF_CODEC_DECODE_CB(speedhq_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -278,7 +278,7 @@ const FFCodec ff_speedhq_encoder = {
.p.priv_class = &ff_mpv_enc_class,
.priv_data_size = sizeof(MpegEncContext),
.init = ff_mpv_encode_init,
- .encode2 = ff_mpv_encode_picture,
+ FF_CODEC_ENCODE_CB(ff_mpv_encode_picture),
.close = ff_mpv_encode_end,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
.p.pix_fmts = (const enum AVPixelFormat[]) {
@@ -1582,7 +1582,7 @@ const FFCodec ff_speex_decoder = {
.p.type = AVMEDIA_TYPE_AUDIO,
.p.id = AV_CODEC_ID_SPEEX,
.init = speex_decode_init,
- .decode = speex_decode_frame,
+ FF_CODEC_DECODE_CB(speex_decode_frame),
.close = speex_decode_close,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
.priv_data_size = sizeof(SpeexContext),
@@ -96,7 +96,7 @@ const FFCodec ff_srt_decoder = {
.p.type = AVMEDIA_TYPE_SUBTITLE,
.p.id = AV_CODEC_ID_SUBRIP,
.init = ff_ass_subtitle_header_default,
- .decode_sub = srt_decode_frame,
+ FF_CODEC_DECODE_SUB_CB(srt_decode_frame),
.flush = ff_ass_decoder_flush,
.priv_data_size = sizeof(FFASSDecoderContext),
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -110,7 +110,7 @@ const FFCodec ff_subrip_decoder = {
.p.type = AVMEDIA_TYPE_SUBTITLE,
.p.id = AV_CODEC_ID_SUBRIP,
.init = ff_ass_subtitle_header_default,
- .decode_sub = srt_decode_frame,
+ FF_CODEC_DECODE_SUB_CB(srt_decode_frame),
.flush = ff_ass_decoder_flush,
.priv_data_size = sizeof(FFASSDecoderContext),
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -300,7 +300,7 @@ const FFCodec ff_srt_encoder = {
.p.id = AV_CODEC_ID_SUBRIP,
.priv_data_size = sizeof(SRTContext),
.init = srt_encode_init,
- .encode_sub = srt_encode_frame,
+ FF_CODEC_ENCODE_SUB_CB(srt_encode_frame),
.close = srt_encode_close,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -314,7 +314,7 @@ const FFCodec ff_subrip_encoder = {
.p.id = AV_CODEC_ID_SUBRIP,
.priv_data_size = sizeof(SRTContext),
.init = srt_encode_init,
- .encode_sub = srt_encode_frame,
+ FF_CODEC_ENCODE_SUB_CB(srt_encode_frame),
.close = srt_encode_close,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -328,7 +328,7 @@ const FFCodec ff_text_encoder = {
.p.id = AV_CODEC_ID_TEXT,
.priv_data_size = sizeof(SRTContext),
.init = srt_encode_init,
- .encode_sub = text_encode_frame,
+ FF_CODEC_ENCODE_SUB_CB(text_encode_frame),
.close = srt_encode_close,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -70,7 +70,7 @@ const FFCodec ff_subviewer_decoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("SubViewer subtitle"),
.p.type = AVMEDIA_TYPE_SUBTITLE,
.p.id = AV_CODEC_ID_SUBVIEWER,
- .decode_sub = subviewer_decode_frame,
+ FF_CODEC_DECODE_SUB_CB(subviewer_decode_frame),
.init = ff_ass_subtitle_header_default,
.flush = ff_ass_decoder_flush,
.priv_data_size = sizeof(FFASSDecoderContext),
@@ -211,5 +211,5 @@ const FFCodec ff_sunrast_decoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_SUNRAST,
.p.capabilities = AV_CODEC_CAP_DR1,
- .decode = sunrast_decode_frame,
+ FF_CODEC_DECODE_CB(sunrast_decode_frame),
};
@@ -215,7 +215,7 @@ const FFCodec ff_sunrast_encoder = {
.p.id = AV_CODEC_ID_SUNRAST,
.priv_data_size = sizeof(SUNRASTContext),
.init = sunrast_encode_init,
- .encode2 = sunrast_encode_frame,
+ FF_CODEC_ENCODE_CB(sunrast_encode_frame),
.p.priv_class = &sunrast_class,
.p.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_BGR24,
AV_PIX_FMT_PAL8,
@@ -851,7 +851,7 @@ const FFCodec ff_svq1_decoder = {
.priv_data_size = sizeof(SVQ1Context),
.init = svq1_decode_init,
.close = svq1_decode_end,
- .decode = svq1_decode_frame,
+ FF_CODEC_DECODE_CB(svq1_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.flush = svq1_flush,
.p.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV410P,
@@ -684,7 +684,7 @@ const FFCodec ff_svq1_encoder = {
.priv_data_size = sizeof(SVQ1EncContext),
.p.priv_class = &svq1enc_class,
.init = svq1_encode_init,
- .encode2 = svq1_encode_frame,
+ FF_CODEC_ENCODE_CB(svq1_encode_frame),
.close = svq1_encode_end,
.p.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV410P,
AV_PIX_FMT_NONE },
@@ -1596,7 +1596,7 @@ const FFCodec ff_svq3_decoder = {
.priv_data_size = sizeof(SVQ3Context),
.init = svq3_decode_init,
.close = svq3_decode_end,
- .decode = svq3_decode_frame,
+ FF_CODEC_DECODE_CB(svq3_decode_frame),
.p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND |
AV_CODEC_CAP_DR1 |
AV_CODEC_CAP_DELAY,
@@ -949,7 +949,7 @@ const FFCodec ff_tak_decoder = {
.priv_data_size = sizeof(TAKDecContext),
.init = tak_decode_init,
.close = tak_decode_close,
- .decode = tak_decode_frame,
+ FF_CODEC_DECODE_CB(tak_decode_frame),
.update_thread_context = ONLY_IF_THREADS_ENABLED(update_thread_context),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_CHANNEL_CONF,
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_U8P,
@@ -312,5 +312,5 @@ const FFCodec ff_targa_decoder = {
.p.id = AV_CODEC_ID_TARGA,
.p.capabilities = AV_CODEC_CAP_DR1,
.priv_data_size = sizeof(TargaContext),
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
};
@@ -79,7 +79,7 @@ const FFCodec ff_targa_y216_decoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_TARGA_Y216,
.init = y216_decode_init,
- .decode = y216_decode_frame,
+ FF_CODEC_DECODE_CB(y216_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -210,7 +210,7 @@ const FFCodec ff_targa_encoder = {
.priv_data_size = sizeof(TargaContext),
.p.priv_class = &targa_class,
.init = targa_encode_init,
- .encode2 = targa_encode_frame,
+ FF_CODEC_ENCODE_CB(targa_encode_frame),
.p.pix_fmts = (const enum AVPixelFormat[]){
AV_PIX_FMT_BGR24, AV_PIX_FMT_BGRA, AV_PIX_FMT_RGB555LE, AV_PIX_FMT_GRAY8, AV_PIX_FMT_PAL8,
AV_PIX_FMT_NONE
@@ -627,7 +627,7 @@ const FFCodec ff_tdsc_decoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_TDSC,
.init = tdsc_init,
- .decode = tdsc_decode_frame,
+ FF_CODEC_DECODE_CB(tdsc_decode_frame),
.close = tdsc_close,
.priv_data_size = sizeof(TDSCContext),
.p.capabilities = AV_CODEC_CAP_DR1,
@@ -64,7 +64,7 @@ int main(void){
while (codec = av_codec_iterate(&iter)) {
const FFCodec *const codec2 = ffcodec(codec);
const AVCodecDescriptor *desc;
- int is_decoder, is_encoder;
+ int is_decoder = 0, is_encoder = 0;
if (!codec->name) {
AV_LOG("Codec for format %s has no name\n",
@@ -102,17 +102,38 @@ int main(void){
AV_CODEC_CAP_OTHER_THREADS)))
ERR("Codec %s has private-only threading support\n");
- is_decoder = av_codec_is_decoder(codec);
- is_encoder = av_codec_is_encoder(codec);
- if (!!is_decoder + !!is_encoder != 1) {
- ERR("Codec %s is decoder and encoder or neither.\n");
+ switch (codec2->cb_type) {
+ case FF_CODEC_CB_TYPE_DECODE:
+ case FF_CODEC_CB_TYPE_DECODE_SUB:
+ case FF_CODEC_CB_TYPE_RECEIVE_FRAME:
+ is_decoder = 1;
+ break;
+ case FF_CODEC_CB_TYPE_ENCODE:
+ case FF_CODEC_CB_TYPE_ENCODE_SUB:
+ case FF_CODEC_CB_TYPE_RECEIVE_PACKET:
+ is_encoder = 1;
+ break;
+ default:
+ ERR("Codec %s has unknown cb_type\n");
continue;
}
+ if (is_decoder != av_codec_is_decoder(codec) ||
+ is_encoder != av_codec_is_encoder(codec)) {
+ ERR("Codec %s cb_type and av_codec_is_(de|en)coder inconsistent.\n");
+ continue;
+ }
+#define CHECK(TYPE, type) (codec2->cb_type == FF_CODEC_CB_TYPE_ ## TYPE && !codec2->cb.type)
+ if (CHECK(DECODE, decode) || CHECK(DECODE_SUB, decode_sub) ||
+ CHECK(RECEIVE_PACKET, receive_packet) ||
+ CHECK(ENCODE, encode) || CHECK(ENCODE_SUB, encode_sub) ||
+ CHECK(RECEIVE_FRAME, receive_frame)) {
+ ERR_EXT("Codec %s does not implement its %s callback.\n",
+ is_decoder ? "decoding" : "encoding");
+ }
+#undef CHECK
if (is_encoder) {
- if (codec->type == AVMEDIA_TYPE_SUBTITLE ^ !!codec2->encode_sub)
+ if ((codec->type == AVMEDIA_TYPE_SUBTITLE) != (codec2->cb_type == FF_CODEC_CB_TYPE_ENCODE_SUB))
ERR("Encoder %s is both subtitle encoder and not subtitle encoder.");
- if (!!codec2->encode_sub + !!codec2->encode2 + !!codec2->receive_packet != 1)
- ERR("Encoder %s does not implement exactly one encode API.\n");
if (codec2->update_thread_context || codec2->update_thread_context_for_user || codec2->bsfs)
ERR("Encoder %s has decoder-only thread functions or bsf.\n");
if (codec->type == AVMEDIA_TYPE_AUDIO) {
@@ -135,14 +156,11 @@ int main(void){
codec->capabilities & AV_CODEC_CAP_ENCODER_FLUSH)
ERR("Frame-threaded encoder %s claims to support flushing\n");
} else {
- if (codec->type == AVMEDIA_TYPE_SUBTITLE && !codec2->decode_sub)
+ if ((codec->type == AVMEDIA_TYPE_SUBTITLE) != (codec2->cb_type == FF_CODEC_CB_TYPE_DECODE_SUB))
ERR("Subtitle decoder %s does not implement decode_sub callback\n");
if (codec->type == AVMEDIA_TYPE_SUBTITLE && codec2->bsfs)
ERR("Automatic bitstream filtering unsupported for subtitles; "
"yet decoder %s has it set\n");
- if (codec->type != AVMEDIA_TYPE_SUBTITLE !=
- !!codec2->decode + !!codec2->receive_frame)
- ERR("Decoder %s does not implement exactly one decode API.\n");
if (codec->capabilities & (AV_CODEC_CAP_SMALL_LAST_FRAME |
AV_CODEC_CAP_VARIABLE_FRAME_SIZE |
AV_CODEC_CAP_ENCODER_REORDERED_OPAQUE |
@@ -86,7 +86,7 @@ const FFCodec ff_text_decoder = {
.priv_data_size = sizeof(TextContext),
.p.type = AVMEDIA_TYPE_SUBTITLE,
.p.id = AV_CODEC_ID_TEXT,
- .decode_sub = text_decode_frame,
+ FF_CODEC_DECODE_SUB_CB(text_decode_frame),
.init = ff_ass_subtitle_header_default,
.p.priv_class = &textsub_decoder_class,
.flush = text_flush,
@@ -110,7 +110,7 @@ const FFCodec ff_vplayer_decoder = {
.priv_data_size = sizeof(TextContext),
.p.type = AVMEDIA_TYPE_SUBTITLE,
.p.id = AV_CODEC_ID_VPLAYER,
- .decode_sub = text_decode_frame,
+ FF_CODEC_DECODE_SUB_CB(text_decode_frame),
.init = linebreak_init,
.p.priv_class = &textsub_decoder_class,
.flush = text_flush,
@@ -125,7 +125,7 @@ const FFCodec ff_stl_decoder = {
.priv_data_size = sizeof(TextContext),
.p.type = AVMEDIA_TYPE_SUBTITLE,
.p.id = AV_CODEC_ID_STL,
- .decode_sub = text_decode_frame,
+ FF_CODEC_DECODE_SUB_CB(text_decode_frame),
.init = linebreak_init,
.p.priv_class = &textsub_decoder_class,
.flush = text_flush,
@@ -140,7 +140,7 @@ const FFCodec ff_pjs_decoder = {
.priv_data_size = sizeof(TextContext),
.p.type = AVMEDIA_TYPE_SUBTITLE,
.p.id = AV_CODEC_ID_PJS,
- .decode_sub = text_decode_frame,
+ FF_CODEC_DECODE_SUB_CB(text_decode_frame),
.init = linebreak_init,
.p.priv_class = &textsub_decoder_class,
.flush = text_flush,
@@ -155,7 +155,7 @@ const FFCodec ff_subviewer1_decoder = {
.priv_data_size = sizeof(TextContext),
.p.type = AVMEDIA_TYPE_SUBTITLE,
.p.id = AV_CODEC_ID_SUBVIEWER1,
- .decode_sub = text_decode_frame,
+ FF_CODEC_DECODE_SUB_CB(text_decode_frame),
.init = linebreak_init,
.p.priv_class = &textsub_decoder_class,
.flush = text_flush,
@@ -269,7 +269,7 @@ const FFCodec ff_tiertexseqvideo_decoder = {
.priv_data_size = sizeof(SeqVideoContext),
.init = seqvideo_decode_init,
.close = seqvideo_decode_end,
- .decode = seqvideo_decode_frame,
+ FF_CODEC_DECODE_CB(seqvideo_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -2185,7 +2185,7 @@ const FFCodec ff_tiff_decoder = {
.priv_data_size = sizeof(TiffContext),
.init = tiff_init,
.close = tiff_end,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
.p.priv_class = &tiff_decoder_class,
@@ -578,7 +578,7 @@ const FFCodec ff_tiff_encoder = {
.init = encode_init,
.close = encode_close,
.p.capabilities = AV_CODEC_CAP_FRAME_THREADS,
- .encode2 = encode_frame,
+ FF_CODEC_ENCODE_CB(encode_frame),
.p.pix_fmts = (const enum AVPixelFormat[]) {
AV_PIX_FMT_RGB24, AV_PIX_FMT_RGB48LE, AV_PIX_FMT_PAL8,
AV_PIX_FMT_RGBA, AV_PIX_FMT_RGBA64LE,
@@ -92,7 +92,7 @@ const FFCodec ff_tmv_decoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_TMV,
.init = tmv_decode_init,
- .decode = tmv_decode_frame,
+ FF_CODEC_DECODE_CB(tmv_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -918,7 +918,7 @@ const FFCodec ff_truemotion1_decoder = {
.priv_data_size = sizeof(TrueMotion1Context),
.init = truemotion1_decode_init,
.close = truemotion1_decode_end,
- .decode = truemotion1_decode_frame,
+ FF_CODEC_DECODE_CB(truemotion1_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
};
@@ -1016,7 +1016,7 @@ const FFCodec ff_truemotion2_decoder = {
.priv_data_size = sizeof(TM2Context),
.init = decode_init,
.close = decode_end,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP | FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -226,7 +226,7 @@ const FFCodec ff_truemotion2rt_decoder = {
.p.id = AV_CODEC_ID_TRUEMOTION2RT,
.priv_data_size = sizeof(TrueMotion2RTContext),
.init = truemotion2rt_decode_init,
- .decode = truemotion2rt_decode_frame,
+ FF_CODEC_DECODE_CB(truemotion2rt_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -363,7 +363,7 @@ const FFCodec ff_truespeech_decoder = {
.p.id = AV_CODEC_ID_TRUESPEECH,
.priv_data_size = sizeof(TSContext),
.init = truespeech_decode_init,
- .decode = truespeech_decode_frame,
+ FF_CODEC_DECODE_CB(truespeech_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -178,7 +178,7 @@ const FFCodec ff_tscc_decoder = {
.priv_data_size = sizeof(CamtasiaContext),
.init = decode_init,
.close = decode_end,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
};
@@ -364,7 +364,7 @@ const FFCodec ff_tscc2_decoder = {
.priv_data_size = sizeof(TSCC2Context),
.init = tscc2_decode_init,
.close = tscc2_decode_end,
- .decode = tscc2_decode_frame,
+ FF_CODEC_DECODE_CB(tscc2_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP | FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -428,7 +428,7 @@ const FFCodec ff_tta_decoder = {
.priv_data_size = sizeof(TTAContext),
.init = tta_decode_init,
.close = tta_decode_close,
- .decode = tta_decode_frame,
+ FF_CODEC_DECODE_CB(tta_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_CHANNEL_CONF,
.p.priv_class = &tta_decoder_class,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
@@ -210,7 +210,7 @@ const FFCodec ff_tta_encoder = {
.priv_data_size = sizeof(TTAEncContext),
.init = tta_encode_init,
.close = tta_encode_close,
- .encode2 = tta_encode_frame,
+ FF_CODEC_ENCODE_CB(tta_encode_frame),
.p.capabilities = AV_CODEC_CAP_SMALL_LAST_FRAME,
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_U8,
AV_SAMPLE_FMT_S16,
@@ -390,7 +390,7 @@ const FFCodec ff_ttml_encoder = {
.p.id = AV_CODEC_ID_TTML,
.priv_data_size = sizeof(TTMLContext),
.init = ttml_encode_init,
- .encode_sub = ttml_encode_frame,
+ FF_CODEC_ENCODE_SUB_CB(ttml_encode_frame),
.close = ttml_encode_close,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
};
@@ -422,7 +422,7 @@ const FFCodec ff_twinvq_decoder = {
.priv_data_size = sizeof(TwinVQContext),
.init = twinvq_decode_init,
.close = ff_twinvq_decode_close,
- .decode = ff_twinvq_decode_frame,
+ FF_CODEC_DECODE_CB(ff_twinvq_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE },
@@ -170,5 +170,5 @@ const FFCodec ff_txd_decoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_TXD,
.p.capabilities = AV_CODEC_CAP_DR1,
- .decode = txd_decode_frame,
+ FF_CODEC_DECODE_CB(txd_decode_frame),
};
@@ -426,7 +426,7 @@ const FFCodec ff_ulti_decoder = {
.priv_data_size = sizeof(UltimotionDecodeContext),
.init = ulti_decode_init,
.close = ulti_decode_end,
- .decode = ulti_decode_frame,
+ FF_CODEC_DECODE_CB(ulti_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -74,13 +74,17 @@ void av_fast_padded_mallocz(void *ptr, unsigned int *size, size_t min_size)
int av_codec_is_encoder(const AVCodec *avcodec)
{
const FFCodec *const codec = ffcodec(avcodec);
- return codec && (codec->encode_sub || codec->encode2 || codec->receive_packet);
+ return codec && (codec->cb_type == FF_CODEC_CB_TYPE_ENCODE ||
+ codec->cb_type == FF_CODEC_CB_TYPE_ENCODE_SUB ||
+ codec->cb_type == FF_CODEC_CB_TYPE_RECEIVE_PACKET);
}
int av_codec_is_decoder(const AVCodec *avcodec)
{
const FFCodec *const codec = ffcodec(avcodec);
- return codec && (codec->decode || codec->decode_sub || codec->receive_frame);
+ return codec && (codec->cb_type == FF_CODEC_CB_TYPE_DECODE ||
+ codec->cb_type == FF_CODEC_CB_TYPE_DECODE_SUB ||
+ codec->cb_type == FF_CODEC_CB_TYPE_RECEIVE_FRAME);
}
int ff_set_dimensions(AVCodecContext *s, int width, int height)
@@ -1058,7 +1058,7 @@ const FFCodec ff_utvideo_decoder = {
.priv_data_size = sizeof(UtvideoContext),
.init = decode_init,
.close = decode_end,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -651,7 +651,7 @@ const FFCodec ff_utvideo_encoder = {
.priv_data_size = sizeof(UtvideoContext),
.p.priv_class = &utvideo_class,
.init = utvideo_encode_init,
- .encode2 = utvideo_encode_frame,
+ FF_CODEC_ENCODE_CB(utvideo_encode_frame),
.close = utvideo_encode_close,
.p.capabilities = AV_CODEC_CAP_FRAME_THREADS,
.p.pix_fmts = (const enum AVPixelFormat[]) {
@@ -218,7 +218,7 @@ const FFCodec ff_v210_decoder = {
.p.id = AV_CODEC_ID_V210,
.priv_data_size = sizeof(V210DecContext),
.init = decode_init,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 |
AV_CODEC_CAP_SLICE_THREADS |
AV_CODEC_CAP_FRAME_THREADS,
@@ -160,7 +160,7 @@ const FFCodec ff_v210_encoder = {
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.priv_data_size = sizeof(V210EncContext),
.init = encode_init,
- .encode2 = encode_frame,
+ FF_CODEC_ENCODE_CB(encode_frame),
.p.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV422P, AV_PIX_FMT_NONE },
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -125,7 +125,7 @@ const FFCodec ff_v210x_decoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_V210X,
.init = decode_init,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -78,7 +78,7 @@ const FFCodec ff_v308_decoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_V308,
.init = v308_decode_init,
- .decode = v308_decode_frame,
+ FF_CODEC_DECODE_CB(v308_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -77,7 +77,7 @@ const FFCodec ff_v308_encoder = {
.p.id = AV_CODEC_ID_V308,
.p.capabilities = AV_CODEC_CAP_DR1,
.init = v308_encode_init,
- .encode2 = v308_encode_frame,
+ FF_CODEC_ENCODE_CB(v308_encode_frame),
.p.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV444P, AV_PIX_FMT_NONE },
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -88,7 +88,7 @@ const FFCodec ff_ayuv_decoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_AYUV,
.init = v408_decode_init,
- .decode = v408_decode_frame,
+ FF_CODEC_DECODE_CB(v408_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -100,7 +100,7 @@ const FFCodec ff_v408_decoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_V408,
.init = v408_decode_init,
- .decode = v408_decode_frame,
+ FF_CODEC_DECODE_CB(v408_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -87,7 +87,7 @@ const FFCodec ff_ayuv_encoder = {
.p.id = AV_CODEC_ID_AYUV,
.p.capabilities = AV_CODEC_CAP_DR1,
.init = v408_encode_init,
- .encode2 = v408_encode_frame,
+ FF_CODEC_ENCODE_CB(v408_encode_frame),
.p.pix_fmts = pix_fmt,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -100,7 +100,7 @@ const FFCodec ff_v408_encoder = {
.p.id = AV_CODEC_ID_V408,
.p.capabilities = AV_CODEC_CAP_DR1,
.init = v408_encode_init,
- .encode2 = v408_encode_frame,
+ FF_CODEC_ENCODE_CB(v408_encode_frame),
.p.pix_fmts = pix_fmt,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -120,7 +120,7 @@ const FFCodec ff_v410_decoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_V410,
.init = v410_decode_init,
- .decode = v410_decode_frame,
+ FF_CODEC_DECODE_CB(v410_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_SLICE_THREADS |
AV_CODEC_CAP_FRAME_THREADS,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -81,7 +81,7 @@ const FFCodec ff_v410_encoder = {
.p.id = AV_CODEC_ID_V410,
.p.capabilities = AV_CODEC_CAP_DR1,
.init = v410_encode_init,
- .encode2 = v410_encode_frame,
+ FF_CODEC_ENCODE_CB(v410_encode_frame),
.p.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV444P10, AV_PIX_FMT_NONE },
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -248,7 +248,7 @@ static const AVOption options[] = {
.priv_data_size = sizeof(V4L2m2mPriv), \
.p.priv_class = &v4l2_m2m_ ## NAME ## _dec_class, \
.init = v4l2_decode_init, \
- .receive_frame = v4l2_receive_frame, \
+ FF_CODEC_RECEIVE_FRAME_CB(v4l2_receive_frame), \
.close = v4l2_decode_close, \
.bsfs = bsf_name, \
.p.capabilities = AV_CODEC_CAP_HARDWARE | AV_CODEC_CAP_DELAY | AV_CODEC_CAP_AVOID_PROBING, \
@@ -429,7 +429,7 @@ static const FFCodecDefault v4l2_m2m_defaults[] = {
.priv_data_size = sizeof(V4L2m2mPriv), \
.p.priv_class = &v4l2_m2m_ ## NAME ##_enc_class, \
.init = v4l2_encode_init, \
- .receive_packet = v4l2_receive_packet, \
+ FF_CODEC_RECEIVE_PACKET_CB(v4l2_receive_packet), \
.close = v4l2_encode_close, \
.defaults = v4l2_m2m_defaults, \
.p.capabilities = AV_CODEC_CAP_HARDWARE | AV_CODEC_CAP_DELAY, \
@@ -1332,7 +1332,7 @@ const FFCodec ff_h264_vaapi_encoder = {
.p.id = AV_CODEC_ID_H264,
.priv_data_size = sizeof(VAAPIEncodeH264Context),
.init = &vaapi_encode_h264_init,
- .receive_packet = &ff_vaapi_encode_receive_packet,
+ FF_CODEC_RECEIVE_PACKET_CB(&ff_vaapi_encode_receive_packet),
.close = &vaapi_encode_h264_close,
.p.priv_class = &vaapi_encode_h264_class,
.p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HARDWARE |
@@ -1309,7 +1309,7 @@ const FFCodec ff_hevc_vaapi_encoder = {
.p.id = AV_CODEC_ID_HEVC,
.priv_data_size = sizeof(VAAPIEncodeH265Context),
.init = &vaapi_encode_h265_init,
- .receive_packet = &ff_vaapi_encode_receive_packet,
+ FF_CODEC_RECEIVE_PACKET_CB(&ff_vaapi_encode_receive_packet),
.close = &vaapi_encode_h265_close,
.p.priv_class = &vaapi_encode_h265_class,
.p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HARDWARE |
@@ -559,7 +559,7 @@ const FFCodec ff_mjpeg_vaapi_encoder = {
.p.id = AV_CODEC_ID_MJPEG,
.priv_data_size = sizeof(VAAPIEncodeMJPEGContext),
.init = &vaapi_encode_mjpeg_init,
- .receive_packet = &ff_vaapi_encode_receive_packet,
+ FF_CODEC_RECEIVE_PACKET_CB(&ff_vaapi_encode_receive_packet),
.close = &vaapi_encode_mjpeg_close,
.p.priv_class = &vaapi_encode_mjpeg_class,
.p.capabilities = AV_CODEC_CAP_HARDWARE | AV_CODEC_CAP_DR1,
@@ -697,7 +697,7 @@ const FFCodec ff_mpeg2_vaapi_encoder = {
.p.id = AV_CODEC_ID_MPEG2VIDEO,
.priv_data_size = sizeof(VAAPIEncodeMPEG2Context),
.init = &vaapi_encode_mpeg2_init,
- .receive_packet = &ff_vaapi_encode_receive_packet,
+ FF_CODEC_RECEIVE_PACKET_CB(&ff_vaapi_encode_receive_packet),
.close = &vaapi_encode_mpeg2_close,
.p.priv_class = &vaapi_encode_mpeg2_class,
.p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HARDWARE |
@@ -252,7 +252,7 @@ const FFCodec ff_vp8_vaapi_encoder = {
.p.id = AV_CODEC_ID_VP8,
.priv_data_size = sizeof(VAAPIEncodeVP8Context),
.init = &vaapi_encode_vp8_init,
- .receive_packet = &ff_vaapi_encode_receive_packet,
+ FF_CODEC_RECEIVE_PACKET_CB(&ff_vaapi_encode_receive_packet),
.close = &ff_vaapi_encode_close,
.p.priv_class = &vaapi_encode_vp8_class,
.p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HARDWARE |
@@ -294,7 +294,7 @@ const FFCodec ff_vp9_vaapi_encoder = {
.p.id = AV_CODEC_ID_VP9,
.priv_data_size = sizeof(VAAPIEncodeVP9Context),
.init = &vaapi_encode_vp9_init,
- .receive_packet = &ff_vaapi_encode_receive_packet,
+ FF_CODEC_RECEIVE_PACKET_CB(&ff_vaapi_encode_receive_packet),
.close = &ff_vaapi_encode_close,
.p.priv_class = &vaapi_encode_vp9_class,
.p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_HARDWARE |
@@ -286,7 +286,7 @@ const FFCodec ff_vb_decoder = {
.priv_data_size = sizeof(VBDecContext),
.init = decode_init,
.close = decode_end,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
};
@@ -209,7 +209,7 @@ const FFCodec ff_vble_decoder = {
.priv_data_size = sizeof(VBLEContext),
.init = vble_decode_init,
.close = vble_decode_close,
- .decode = vble_decode_frame,
+ FF_CODEC_DECODE_CB(vble_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -1209,7 +1209,7 @@ const FFCodec ff_vc1_decoder = {
.priv_data_size = sizeof(VC1Context),
.init = vc1_decode_init,
.close = ff_vc1_decode_end,
- .decode = vc1_decode_frame,
+ FF_CODEC_DECODE_CB(vc1_decode_frame),
.flush = ff_mpeg_flush,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -1247,7 +1247,7 @@ const FFCodec ff_wmv3_decoder = {
.priv_data_size = sizeof(VC1Context),
.init = vc1_decode_init,
.close = ff_vc1_decode_end,
- .decode = vc1_decode_frame,
+ FF_CODEC_DECODE_CB(vc1_decode_frame),
.flush = ff_mpeg_flush,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -1286,7 +1286,7 @@ const FFCodec ff_wmv3image_decoder = {
.priv_data_size = sizeof(VC1Context),
.init = vc1_decode_init,
.close = ff_vc1_decode_end,
- .decode = vc1_decode_frame,
+ FF_CODEC_DECODE_CB(vc1_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
.flush = vc1_sprite_flush,
@@ -1306,7 +1306,7 @@ const FFCodec ff_vc1image_decoder = {
.priv_data_size = sizeof(VC1Context),
.init = vc1_decode_init,
.close = ff_vc1_decode_end,
- .decode = vc1_decode_frame,
+ FF_CODEC_DECODE_CB(vc1_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
.flush = vc1_sprite_flush,
@@ -1234,7 +1234,7 @@ const FFCodec ff_vc2_encoder = {
.priv_data_size = sizeof(VC2EncContext),
.init = vc2_encode_init,
.close = vc2_encode_end,
- .encode2 = vc2_encode_frame,
+ FF_CODEC_ENCODE_CB(vc2_encode_frame),
.p.priv_class = &vc2enc_class,
.defaults = vc2enc_defaults,
.p.pix_fmts = allowed_pix_fmts
@@ -128,7 +128,7 @@ const FFCodec ff_vcr1_decoder = {
.p.id = AV_CODEC_ID_VCR1,
.priv_data_size = sizeof(VCR1Context),
.init = vcr1_decode_init,
- .decode = vcr1_decode_frame,
+ FF_CODEC_DECODE_CB(vcr1_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -2732,7 +2732,7 @@ const FFCodec ff_h264_videotoolbox_encoder = {
.priv_data_size = sizeof(VTEncContext),
.p.pix_fmts = avc_pix_fmts,
.init = vtenc_init,
- .encode2 = vtenc_frame,
+ FF_CODEC_ENCODE_CB(vtenc_frame),
.close = vtenc_close,
.p.priv_class = &h264_videotoolbox_class,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
@@ -2767,7 +2767,7 @@ const FFCodec ff_hevc_videotoolbox_encoder = {
.priv_data_size = sizeof(VTEncContext),
.p.pix_fmts = hevc_pix_fmts,
.init = vtenc_init,
- .encode2 = vtenc_frame,
+ FF_CODEC_ENCODE_CB(vtenc_frame),
.close = vtenc_close,
.p.priv_class = &hevc_videotoolbox_class,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
@@ -2806,7 +2806,7 @@ const FFCodec ff_prores_videotoolbox_encoder = {
.priv_data_size = sizeof(VTEncContext),
.p.pix_fmts = prores_pix_fmts,
.init = vtenc_init,
- .encode2 = vtenc_frame,
+ FF_CODEC_ENCODE_CB(vtenc_frame),
.close = vtenc_close,
.p.priv_class = &prores_videotoolbox_class,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
@@ -213,7 +213,7 @@ const FFCodec ff_adpcm_vima_decoder = {
.p.type = AVMEDIA_TYPE_AUDIO,
.p.id = AV_CODEC_ID_ADPCM_VIMA,
.init = decode_init,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -235,7 +235,7 @@ const FFCodec ff_vmdaudio_decoder = {
.p.id = AV_CODEC_ID_VMDAUDIO,
.priv_data_size = sizeof(VmdAudioContext),
.init = vmdaudio_decode_init,
- .decode = vmdaudio_decode_frame,
+ FF_CODEC_DECODE_CB(vmdaudio_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -472,7 +472,7 @@ const FFCodec ff_vmdvideo_decoder = {
.priv_data_size = sizeof(VmdVideoContext),
.init = vmdvideo_decode_init,
.close = vmdvideo_decode_end,
- .decode = vmdvideo_decode_frame,
+ FF_CODEC_DECODE_CB(vmdvideo_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
};
@@ -581,7 +581,7 @@ const FFCodec ff_vmnc_decoder = {
.priv_data_size = sizeof(VmncContext),
.init = decode_init,
.close = decode_end,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -1892,7 +1892,7 @@ const FFCodec ff_vorbis_decoder = {
.priv_data_size = sizeof(vorbis_context),
.init = vorbis_decode_init,
.close = vorbis_decode_close,
- .decode = vorbis_decode_frame,
+ FF_CODEC_DECODE_CB(vorbis_decode_frame),
.flush = vorbis_decode_flush,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
@@ -1302,7 +1302,7 @@ const FFCodec ff_vorbis_encoder = {
.p.id = AV_CODEC_ID_VORBIS,
.priv_data_size = sizeof(vorbis_enc_context),
.init = vorbis_encode_init,
- .encode2 = vorbis_encode_frame,
+ FF_CODEC_ENCODE_CB(vorbis_encode_frame),
.close = vorbis_encode_close,
.p.capabilities = AV_CODEC_CAP_DELAY | AV_CODEC_CAP_EXPERIMENTAL,
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_FLTP,
@@ -3176,7 +3176,7 @@ const FFCodec ff_theora_decoder = {
.priv_data_size = sizeof(Vp3DecodeContext),
.init = theora_decode_init,
.close = vp3_decode_end,
- .decode = vp3_decode_frame,
+ FF_CODEC_DECODE_CB(vp3_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DRAW_HORIZ_BAND |
AV_CODEC_CAP_FRAME_THREADS,
.flush = vp3_decode_flush,
@@ -3194,7 +3194,7 @@ const FFCodec ff_vp3_decoder = {
.priv_data_size = sizeof(Vp3DecodeContext),
.init = vp3_decode_init,
.close = vp3_decode_end,
- .decode = vp3_decode_frame,
+ FF_CODEC_DECODE_CB(vp3_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DRAW_HORIZ_BAND |
AV_CODEC_CAP_FRAME_THREADS,
.flush = vp3_decode_flush,
@@ -3212,7 +3212,7 @@ const FFCodec ff_vp4_decoder = {
.priv_data_size = sizeof(Vp3DecodeContext),
.init = vp3_decode_init,
.close = vp3_decode_end,
- .decode = vp3_decode_frame,
+ FF_CODEC_DECODE_CB(vp3_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DRAW_HORIZ_BAND |
AV_CODEC_CAP_FRAME_THREADS,
.flush = vp3_decode_flush,
@@ -311,7 +311,7 @@ const FFCodec ff_vp5_decoder = {
.priv_data_size = sizeof(VP56Context),
.init = vp5_decode_init,
.close = vp56_free,
- .decode = ff_vp56_decode_frame,
+ FF_CODEC_DECODE_CB(ff_vp56_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
};
@@ -720,7 +720,7 @@ const FFCodec ff_vp6_decoder = {
.priv_data_size = sizeof(VP56Context),
.init = vp6_decode_init,
.close = vp6_decode_free,
- .decode = ff_vp56_decode_frame,
+ FF_CODEC_DECODE_CB(ff_vp56_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
};
@@ -734,7 +734,7 @@ const FFCodec ff_vp6f_decoder = {
.priv_data_size = sizeof(VP56Context),
.init = vp6_decode_init,
.close = vp6_decode_free,
- .decode = ff_vp56_decode_frame,
+ FF_CODEC_DECODE_CB(ff_vp56_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
};
@@ -748,7 +748,7 @@ const FFCodec ff_vp6a_decoder = {
.priv_data_size = 2 /* Main context + alpha context */ * sizeof(VP56Context),
.init = vp6_decode_init,
.close = vp6_decode_free,
- .decode = ff_vp56_decode_frame,
+ FF_CODEC_DECODE_CB(ff_vp56_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_SLICE_THREADS,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
};
@@ -2947,7 +2947,7 @@ const FFCodec ff_vp7_decoder = {
.priv_data_size = sizeof(VP8Context),
.init = vp7_decode_init,
.close = ff_vp8_decode_free,
- .decode = vp7_decode_frame,
+ FF_CODEC_DECODE_CB(vp7_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
.flush = vp8_decode_flush,
@@ -2963,7 +2963,7 @@ const FFCodec ff_vp8_decoder = {
.priv_data_size = sizeof(VP8Context),
.init = ff_vp8_decode_init,
.close = ff_vp8_decode_free,
- .decode = ff_vp8_decode_frame,
+ FF_CODEC_DECODE_CB(ff_vp8_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS |
AV_CODEC_CAP_SLICE_THREADS,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
@@ -1874,7 +1874,7 @@ const FFCodec ff_vp9_decoder = {
.priv_data_size = sizeof(VP9Context),
.init = vp9_decode_init,
.close = vp9_decode_free,
- .decode = vp9_decode_frame,
+ FF_CODEC_DECODE_CB(vp9_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS | AV_CODEC_CAP_SLICE_THREADS,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP |
FF_CODEC_CAP_SLICE_THREAD_HAS_MF |
@@ -854,7 +854,7 @@ const FFCodec ff_vqa_decoder = {
.priv_data_size = sizeof(VqaContext),
.init = vqa_decode_init,
.close = vqa_decode_end,
- .decode = vqa_decode_frame,
+ FF_CODEC_DECODE_CB(vqa_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.defaults = vqa_defaults,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
@@ -1710,7 +1710,7 @@ const FFCodec ff_wavpack_decoder = {
.priv_data_size = sizeof(WavpackContext),
.init = wavpack_decode_init,
.close = wavpack_decode_end,
- .decode = wavpack_decode_frame,
+ FF_CODEC_DECODE_CB(wavpack_decode_frame),
.flush = wavpack_decode_flush,
.update_thread_context = ONLY_IF_THREADS_ENABLED(update_thread_context),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS |
@@ -2969,7 +2969,7 @@ const FFCodec ff_wavpack_encoder = {
.priv_data_size = sizeof(WavPackEncodeContext),
.p.priv_class = &wavpack_encoder_class,
.init = wavpack_encode_init,
- .encode2 = wavpack_encode_frame,
+ FF_CODEC_ENCODE_CB(wavpack_encode_frame),
.close = wavpack_encode_close,
.p.capabilities = AV_CODEC_CAP_SMALL_LAST_FRAME,
.p.sample_fmts = (const enum AVSampleFormat[]){ AV_SAMPLE_FMT_U8P,
@@ -248,7 +248,7 @@ const FFCodec ff_wcmv_decoder = {
.priv_data_size = sizeof(WCMVContext),
.init = decode_init,
.close = decode_close,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE |
FF_CODEC_CAP_INIT_CLEANUP,
@@ -1562,7 +1562,7 @@ const FFCodec ff_webp_decoder = {
.p.id = AV_CODEC_ID_WEBP,
.priv_data_size = sizeof(WebPContext),
.init = webp_decode_init,
- .decode = webp_decode_frame,
+ FF_CODEC_DECODE_CB(webp_decode_frame),
.close = webp_decode_close,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -103,7 +103,7 @@ const FFCodec ff_webvtt_decoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("WebVTT subtitle"),
.p.type = AVMEDIA_TYPE_SUBTITLE,
.p.id = AV_CODEC_ID_WEBVTT,
- .decode_sub = webvtt_decode_frame,
+ FF_CODEC_DECODE_SUB_CB(webvtt_decode_frame),
.init = ff_ass_subtitle_header_default,
.flush = ff_ass_decoder_flush,
.priv_data_size = sizeof(FFASSDecoderContext),
@@ -218,7 +218,7 @@ const FFCodec ff_webvtt_encoder = {
.p.id = AV_CODEC_ID_WEBVTT,
.priv_data_size = sizeof(WebVTTContext),
.init = webvtt_encode_init,
- .encode_sub = webvtt_encode_frame,
+ FF_CODEC_ENCODE_SUB_CB(webvtt_encode_frame),
.close = webvtt_encode_close,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -1007,7 +1007,7 @@ const FFCodec ff_wmav1_decoder = {
.priv_data_size = sizeof(WMACodecContext),
.init = wma_decode_init,
.close = ff_wma_end,
- .decode = wma_decode_superframe,
+ FF_CODEC_DECODE_CB(wma_decode_superframe),
.flush = flush,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
@@ -1024,7 +1024,7 @@ const FFCodec ff_wmav2_decoder = {
.priv_data_size = sizeof(WMACodecContext),
.init = wma_decode_init,
.close = ff_wma_end,
- .decode = wma_decode_superframe,
+ FF_CODEC_DECODE_CB(wma_decode_superframe),
.flush = flush,
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
@@ -440,7 +440,7 @@ const FFCodec ff_wmav1_encoder = {
.p.id = AV_CODEC_ID_WMAV1,
.priv_data_size = sizeof(WMACodecContext),
.init = encode_init,
- .encode2 = encode_superframe,
+ FF_CODEC_ENCODE_CB(encode_superframe),
.close = ff_wma_end,
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE },
@@ -455,7 +455,7 @@ const FFCodec ff_wmav2_encoder = {
.p.id = AV_CODEC_ID_WMAV2,
.priv_data_size = sizeof(WMACodecContext),
.init = encode_init,
- .encode2 = encode_superframe,
+ FF_CODEC_ENCODE_CB(encode_superframe),
.close = ff_wma_end,
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE },
@@ -1336,7 +1336,7 @@ const FFCodec ff_wmalossless_decoder = {
.priv_data_size = sizeof(WmallDecodeCtx),
.init = decode_init,
.close = decode_close,
- .decode = decode_packet,
+ FF_CODEC_DECODE_CB(decode_packet),
.flush = flush,
.p.capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
@@ -2088,7 +2088,7 @@ const FFCodec ff_wmapro_decoder = {
.priv_data_size = sizeof(WMAProDecodeCtx),
.init = wmapro_decode_init,
.close = wmapro_decode_end,
- .decode = wmapro_decode_packet,
+ FF_CODEC_DECODE_CB(wmapro_decode_packet),
.p.capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1,
.flush = wmapro_flush,
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
@@ -2104,7 +2104,7 @@ const FFCodec ff_xma1_decoder = {
.priv_data_size = sizeof(XMADecodeCtx),
.init = xma_decode_init,
.close = xma_decode_end,
- .decode = xma_decode_packet,
+ FF_CODEC_DECODE_CB(xma_decode_packet),
.p.capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE },
@@ -2119,7 +2119,7 @@ const FFCodec ff_xma2_decoder = {
.priv_data_size = sizeof(XMADecodeCtx),
.init = xma_decode_init,
.close = xma_decode_end,
- .decode = xma_decode_packet,
+ FF_CODEC_DECODE_CB(xma_decode_packet),
.flush = xma_flush,
.p.capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
.p.sample_fmts = (const enum AVSampleFormat[]) { AV_SAMPLE_FMT_FLTP,
@@ -2006,7 +2006,7 @@ const FFCodec ff_wmavoice_decoder = {
.priv_data_size = sizeof(WMAVoiceContext),
.init = wmavoice_decode_init,
.close = wmavoice_decode_end,
- .decode = wmavoice_decode_packet,
+ FF_CODEC_DECODE_CB(wmavoice_decode_packet),
.p.capabilities = AV_CODEC_CAP_SUBFRAMES | AV_CODEC_CAP_DR1 | AV_CODEC_CAP_DELAY,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
.flush = wmavoice_flush,
@@ -599,7 +599,7 @@ const FFCodec ff_wmv2_decoder = {
.priv_data_size = sizeof(WMV2DecContext),
.init = wmv2_decode_init,
.close = wmv2_decode_end,
- .decode = ff_h263_decode_frame,
+ FF_CODEC_DECODE_CB(ff_h263_decode_frame),
.p.capabilities = AV_CODEC_CAP_DRAW_HORIZ_BAND | AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
.p.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P,
@@ -241,7 +241,7 @@ const FFCodec ff_wmv2_encoder = {
.p.priv_class = &ff_mpv_enc_class,
.priv_data_size = sizeof(WMV2EncContext),
.init = wmv2_encode_init,
- .encode2 = ff_mpv_encode_picture,
+ FF_CODEC_ENCODE_CB(ff_mpv_encode_picture),
.close = ff_mpv_encode_end,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
.p.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV420P,
@@ -138,7 +138,7 @@ const FFCodec ff_wnv1_decoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_WNV1,
.init = decode_init,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -113,7 +113,7 @@ const FFCodec ff_wrapped_avframe_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("AVFrame to AVPacket passthrough"),
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_WRAPPED_AVFRAME,
- .encode2 = wrapped_avframe_encode,
+ FF_CODEC_ENCODE_CB(wrapped_avframe_encode),
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -122,6 +122,6 @@ const FFCodec ff_wrapped_avframe_decoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("AVPacket to AVFrame passthrough"),
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_WRAPPED_AVFRAME,
- .decode = wrapped_avframe_decode,
+ FF_CODEC_DECODE_CB(wrapped_avframe_decode),
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -176,7 +176,7 @@ const FFCodec ff_ws_snd1_decoder = {
.p.type = AVMEDIA_TYPE_AUDIO,
.p.id = AV_CODEC_ID_WESTWOOD_SND1,
.init = ws_snd_decode_init,
- .decode = ws_snd_decode_frame,
+ FF_CODEC_DECODE_CB(ws_snd_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_CHANNEL_CONF,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -642,7 +642,7 @@ const FFCodec ff_xan_wc3_decoder = {
.priv_data_size = sizeof(XanContext),
.init = xan_decode_init,
.close = xan_decode_end,
- .decode = xan_decode_frame,
+ FF_CODEC_DECODE_CB(xan_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP | FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -143,5 +143,5 @@ const FFCodec ff_xbm_decoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_XBM,
.p.capabilities = AV_CODEC_CAP_DR1,
- .decode = xbm_decode_frame,
+ FF_CODEC_DECODE_CB(xbm_decode_frame),
};
@@ -81,7 +81,7 @@ const FFCodec ff_xbm_encoder = {
.p.long_name = NULL_IF_CONFIG_SMALL("XBM (X BitMap) image"),
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_XBM,
- .encode2 = xbm_encode_frame,
+ FF_CODEC_ENCODE_CB(xbm_encode_frame),
.p.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_MONOWHITE,
AV_PIX_FMT_NONE },
};
@@ -182,7 +182,7 @@ const FFCodec ff_xface_decoder = {
.p.id = AV_CODEC_ID_XFACE,
.priv_data_size = sizeof(XFaceContext),
.init = xface_decode_init,
- .decode = xface_decode_frame,
+ FF_CODEC_DECODE_CB(xface_decode_frame),
.p.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_MONOWHITE, AV_PIX_FMT_NONE },
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -219,5 +219,5 @@ const FFCodec ff_xface_encoder = {
.p.capabilities = AV_CODEC_CAP_DR1,
.p.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_MONOWHITE, AV_PIX_FMT_NONE },
.priv_data_size = sizeof(XFaceContext),
- .encode2 = xface_encode_frame,
+ FF_CODEC_ENCODE_CB(xface_encode_frame),
};
@@ -132,7 +132,7 @@ const FFCodec ff_xl_decoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_VIXL,
.init = decode_init,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -444,5 +444,5 @@ const FFCodec ff_xpm_decoder = {
.p.capabilities = AV_CODEC_CAP_DR1,
.priv_data_size = sizeof(XPMDecContext),
.close = xpm_decode_close,
- .decode = xpm_decode_frame,
+ FF_CODEC_DECODE_CB(xpm_decode_frame),
};
@@ -160,6 +160,6 @@ const FFCodec ff_xsub_decoder = {
.p.type = AVMEDIA_TYPE_SUBTITLE,
.p.id = AV_CODEC_ID_XSUB,
.init = decode_init,
- .decode_sub = decode_frame,
+ FF_CODEC_DECODE_SUB_CB(decode_frame),
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -217,6 +217,6 @@ const FFCodec ff_xsub_encoder = {
.p.type = AVMEDIA_TYPE_SUBTITLE,
.p.id = AV_CODEC_ID_XSUB,
.init = xsub_encoder_init,
- .encode_sub = xsub_encode,
+ FF_CODEC_ENCODE_SUB_CB(xsub_encode),
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -253,5 +253,5 @@ const FFCodec ff_xwd_decoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_XWD,
.p.capabilities = AV_CODEC_CAP_DR1,
- .decode = xwd_decode_frame,
+ FF_CODEC_DECODE_CB(xwd_decode_frame),
};
@@ -220,7 +220,7 @@ const FFCodec ff_xwd_encoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_XWD,
.p.capabilities = AV_CODEC_CAP_DR1,
- .encode2 = xwd_encode_frame,
+ FF_CODEC_ENCODE_CB(xwd_encode_frame),
.p.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_BGRA,
AV_PIX_FMT_RGBA,
AV_PIX_FMT_ARGB,
@@ -441,7 +441,7 @@ const FFCodec ff_xan_wc4_decoder = {
.priv_data_size = sizeof(XanContext),
.init = xan_decode_init,
.close = xan_decode_end,
- .decode = xan_decode_frame,
+ FF_CODEC_DECODE_CB(xan_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_CLEANUP | FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -87,7 +87,7 @@ const FFCodec ff_y41p_decoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_Y41P,
.init = y41p_decode_init,
- .decode = y41p_decode_frame,
+ FF_CODEC_DECODE_CB(y41p_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -84,7 +84,7 @@ const FFCodec ff_y41p_encoder = {
.p.id = AV_CODEC_ID_Y41P,
.p.capabilities = AV_CODEC_CAP_DR1,
.init = y41p_encode_init,
- .encode2 = y41p_encode_frame,
+ FF_CODEC_ENCODE_CB(y41p_encode_frame),
.p.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_YUV411P,
AV_PIX_FMT_NONE },
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
@@ -457,7 +457,7 @@ const FFCodec ff_ylc_decoder = {
.priv_data_size = sizeof(YLCContext),
.init = decode_init,
.close = decode_end,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -274,6 +274,6 @@ const FFCodec ff_yop_decoder = {
.priv_data_size = sizeof(YopDecContext),
.init = yop_decode_init,
.close = yop_decode_close,
- .decode = yop_decode_frame,
+ FF_CODEC_DECODE_CB(yop_decode_frame),
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -79,7 +79,7 @@ const FFCodec ff_yuv4_decoder = {
.p.type = AVMEDIA_TYPE_VIDEO,
.p.id = AV_CODEC_ID_YUV4,
.init = yuv4_decode_init,
- .decode = yuv4_decode_frame,
+ FF_CODEC_DECODE_CB(yuv4_decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE,
};
@@ -66,5 +66,5 @@ const FFCodec ff_yuv4_encoder = {
.p.id = AV_CODEC_ID_YUV4,
.p.capabilities = AV_CODEC_CAP_DR1,
.p.pix_fmts = (const enum AVPixelFormat[]){ AV_PIX_FMT_YUV420P, AV_PIX_FMT_NONE },
- .encode2 = yuv4_encode_frame,
+ FF_CODEC_ENCODE_CB(yuv4_encode_frame),
};
@@ -140,7 +140,7 @@ const FFCodec ff_zerocodec_decoder = {
.p.id = AV_CODEC_ID_ZEROCODEC,
.priv_data_size = sizeof(ZeroCodecContext),
.init = zerocodec_decode_init,
- .decode = zerocodec_decode_frame,
+ FF_CODEC_DECODE_CB(zerocodec_decode_frame),
.flush = zerocodec_decode_flush,
.close = zerocodec_decode_close,
.p.capabilities = AV_CODEC_CAP_DR1,
@@ -653,7 +653,7 @@ const FFCodec ff_zmbv_decoder = {
.priv_data_size = sizeof(ZmbvContext),
.init = decode_init,
.close = decode_end,
- .decode = decode_frame,
+ FF_CODEC_DECODE_CB(decode_frame),
.p.capabilities = AV_CODEC_CAP_DR1,
.caps_internal = FF_CODEC_CAP_INIT_THREADSAFE | FF_CODEC_CAP_INIT_CLEANUP,
};
@@ -418,7 +418,7 @@ const FFCodec ff_zmbv_encoder = {
.p.capabilities = AV_CODEC_CAP_DR1,
.priv_data_size = sizeof(ZmbvEncContext),
.init = encode_init,
- .encode2 = encode_frame,
+ FF_CODEC_ENCODE_CB(encode_frame),
.close = encode_end,
.p.pix_fmts = (const enum AVPixelFormat[]) { AV_PIX_FMT_PAL8,
AV_PIX_FMT_RGB555LE,
This is possible, because every given FFCodec has to implement exactly one of these. Doing so decreases sizeof(FFCodec) and therefore decreases the size of the binary. Notice that in case of position-independent code the decrease is in .data.rel.ro, so that this translates to decreased memory consumption. Signed-off-by: Andreas Rheinhardt <andreas.rheinhardt@outlook.com> --- libavcodec/012v.c | 2 +- libavcodec/4xm.c | 2 +- libavcodec/8bps.c | 2 +- libavcodec/8svx.c | 4 +- libavcodec/a64multienc.c | 4 +- libavcodec/aacdec.c | 4 +- libavcodec/aacdec_fixed.c | 2 +- libavcodec/aacenc.c | 2 +- libavcodec/aasc.c | 2 +- libavcodec/ac3dec_fixed.c | 2 +- libavcodec/ac3dec_float.c | 4 +- libavcodec/ac3enc_fixed.c | 2 +- libavcodec/ac3enc_float.c | 2 +- libavcodec/adpcm.c | 2 +- libavcodec/adpcmenc.c | 2 +- libavcodec/adxdec.c | 2 +- libavcodec/adxenc.c | 2 +- libavcodec/agm.c | 2 +- libavcodec/aic.c | 2 +- libavcodec/alac.c | 2 +- libavcodec/alacenc.c | 2 +- libavcodec/aliaspixdec.c | 2 +- libavcodec/aliaspixenc.c | 2 +- libavcodec/alsdec.c | 2 +- libavcodec/amfenc_h264.c | 2 +- libavcodec/amfenc_hevc.c | 2 +- libavcodec/amrnbdec.c | 2 +- libavcodec/amrwbdec.c | 2 +- libavcodec/anm.c | 2 +- libavcodec/ansi.c | 2 +- libavcodec/apedec.c | 2 +- libavcodec/aptxdec.c | 4 +- libavcodec/aptxenc.c | 4 +- libavcodec/arbc.c | 2 +- libavcodec/argo.c | 2 +- libavcodec/assdec.c | 4 +- libavcodec/assenc.c | 4 +- libavcodec/asvdec.c | 4 +- libavcodec/asvenc.c | 4 +- libavcodec/atrac1.c | 2 +- libavcodec/atrac3.c | 4 +- libavcodec/atrac3plusdec.c | 4 +- libavcodec/atrac9dec.c | 2 +- libavcodec/audiotoolboxdec.c | 2 +- libavcodec/audiotoolboxenc.c | 2 +- libavcodec/aura.c | 2 +- libavcodec/av1dec.c | 2 +- libavcodec/avrndec.c | 2 +- libavcodec/avs.c | 2 +- libavcodec/avuidec.c | 2 +- libavcodec/avuienc.c | 2 +- libavcodec/bethsoftvideo.c | 2 +- libavcodec/bfi.c | 2 +- libavcodec/bink.c | 2 +- libavcodec/binkaudio.c | 4 +- libavcodec/bintext.c | 6 +- libavcodec/bitpacked_dec.c | 2 +- libavcodec/bitpacked_enc.c | 2 +- libavcodec/bmp.c | 2 +- libavcodec/bmpenc.c | 2 +- libavcodec/bmvaudio.c | 2 +- libavcodec/bmvvideo.c | 2 +- libavcodec/brenderpix.c | 2 +- libavcodec/c93.c | 2 +- libavcodec/cavsdec.c | 2 +- libavcodec/ccaption_dec.c | 2 +- libavcodec/cdgraphics.c | 2 +- libavcodec/cdtoons.c | 2 +- libavcodec/cdxl.c | 2 +- libavcodec/cfhd.c | 2 +- libavcodec/cfhdenc.c | 2 +- libavcodec/cinepak.c | 2 +- libavcodec/cinepakenc.c | 2 +- libavcodec/clearvideo.c | 2 +- libavcodec/cljrdec.c | 2 +- libavcodec/cljrenc.c | 2 +- libavcodec/cllc.c | 2 +- libavcodec/cngdec.c | 2 +- libavcodec/cngenc.c | 2 +- libavcodec/codec_internal.h | 153 ++++++++++++++++++++-------- libavcodec/cook.c | 2 +- libavcodec/cpia.c | 2 +- libavcodec/cri.c | 2 +- libavcodec/crystalhd.c | 2 +- libavcodec/cscd.c | 2 +- libavcodec/cuviddec.c | 2 +- libavcodec/cyuv.c | 4 +- libavcodec/dcadec.c | 2 +- libavcodec/dcaenc.c | 2 +- libavcodec/dds.c | 2 +- libavcodec/decode.c | 8 +- libavcodec/dfa.c | 2 +- libavcodec/dfpwmdec.c | 2 +- libavcodec/dfpwmenc.c | 2 +- libavcodec/diracdec.c | 2 +- libavcodec/dnxhddec.c | 2 +- libavcodec/dnxhdenc.c | 2 +- libavcodec/dolby_e.c | 2 +- libavcodec/dpcm.c | 2 +- libavcodec/dpx.c | 2 +- libavcodec/dpxenc.c | 2 +- libavcodec/dsddec.c | 2 +- libavcodec/dsicinaudio.c | 2 +- libavcodec/dsicinvideo.c | 2 +- libavcodec/dss_sp.c | 2 +- libavcodec/dstdec.c | 2 +- libavcodec/dvaudiodec.c | 2 +- libavcodec/dvbsubdec.c | 2 +- libavcodec/dvbsubenc.c | 2 +- libavcodec/dvdec.c | 2 +- libavcodec/dvdsubdec.c | 2 +- libavcodec/dvdsubenc.c | 2 +- libavcodec/dvenc.c | 2 +- libavcodec/dxa.c | 2 +- libavcodec/dxtory.c | 2 +- libavcodec/dxv.c | 2 +- libavcodec/eac3enc.c | 2 +- libavcodec/eacmv.c | 2 +- libavcodec/eamad.c | 2 +- libavcodec/eatgq.c | 2 +- libavcodec/eatgv.c | 2 +- libavcodec/eatqi.c | 2 +- libavcodec/encode.c | 10 +- libavcodec/escape124.c | 2 +- libavcodec/escape130.c | 2 +- libavcodec/evrcdec.c | 2 +- libavcodec/exr.c | 2 +- libavcodec/exrenc.c | 2 +- libavcodec/fastaudio.c | 2 +- libavcodec/ffv1dec.c | 2 +- libavcodec/ffv1enc.c | 2 +- libavcodec/ffwavesynth.c | 2 +- libavcodec/fic.c | 2 +- libavcodec/fitsdec.c | 2 +- libavcodec/fitsenc.c | 2 +- libavcodec/flacdec.c | 2 +- libavcodec/flacenc.c | 2 +- libavcodec/flashsv.c | 4 +- libavcodec/flashsv2enc.c | 2 +- libavcodec/flashsvenc.c | 2 +- libavcodec/flicvideo.c | 2 +- libavcodec/flvdec.c | 2 +- libavcodec/flvenc.c | 2 +- libavcodec/fmvc.c | 2 +- libavcodec/frame_thread_encoder.c | 2 +- libavcodec/fraps.c | 2 +- libavcodec/frwu.c | 2 +- libavcodec/g2meet.c | 2 +- libavcodec/g722dec.c | 2 +- libavcodec/g722enc.c | 2 +- libavcodec/g723_1dec.c | 2 +- libavcodec/g723_1enc.c | 2 +- libavcodec/g726.c | 8 +- libavcodec/g729dec.c | 4 +- libavcodec/gdv.c | 2 +- libavcodec/gemdec.c | 2 +- libavcodec/gif.c | 2 +- libavcodec/gifdec.c | 2 +- libavcodec/gsmdec.c | 4 +- libavcodec/h261dec.c | 2 +- libavcodec/h261enc.c | 2 +- libavcodec/h263dec.c | 4 +- libavcodec/h264dec.c | 2 +- libavcodec/hapdec.c | 2 +- libavcodec/hapenc.c | 2 +- libavcodec/hcadec.c | 2 +- libavcodec/hcom.c | 2 +- libavcodec/hevcdec.c | 2 +- libavcodec/hnm4video.c | 2 +- libavcodec/hq_hqa.c | 2 +- libavcodec/hqx.c | 2 +- libavcodec/huffyuvdec.c | 6 +- libavcodec/huffyuvenc.c | 4 +- libavcodec/idcinvideo.c | 2 +- libavcodec/iff.c | 2 +- libavcodec/ilbcdec.c | 2 +- libavcodec/imc.c | 4 +- libavcodec/imm4.c | 2 +- libavcodec/imm5.c | 2 +- libavcodec/imx.c | 2 +- libavcodec/indeo2.c | 2 +- libavcodec/indeo3.c | 2 +- libavcodec/indeo4.c | 2 +- libavcodec/indeo5.c | 2 +- libavcodec/intelh263dec.c | 2 +- libavcodec/interplayacm.c | 2 +- libavcodec/interplayvideo.c | 2 +- libavcodec/ituh263enc.c | 4 +- libavcodec/j2kenc.c | 2 +- libavcodec/jacosubdec.c | 2 +- libavcodec/jpeg2000dec.c | 2 +- libavcodec/jpeglsdec.c | 2 +- libavcodec/jpeglsenc.c | 2 +- libavcodec/jvdec.c | 2 +- libavcodec/kgv1dec.c | 2 +- libavcodec/kmvc.c | 2 +- libavcodec/lagarith.c | 2 +- libavcodec/lcldec.c | 4 +- libavcodec/lclenc.c | 2 +- libavcodec/libaomdec.c | 2 +- libavcodec/libaomenc.c | 2 +- libavcodec/libaribb24.c | 2 +- libavcodec/libcelt_dec.c | 2 +- libavcodec/libcodec2.c | 4 +- libavcodec/libdav1d.c | 2 +- libavcodec/libdavs2.c | 2 +- libavcodec/libfdk-aacdec.c | 2 +- libavcodec/libfdk-aacenc.c | 2 +- libavcodec/libgsmdec.c | 4 +- libavcodec/libgsmenc.c | 4 +- libavcodec/libilbc.c | 4 +- libavcodec/libkvazaar.c | 2 +- libavcodec/libmp3lame.c | 2 +- libavcodec/libopencore-amr.c | 6 +- libavcodec/libopenh264dec.c | 2 +- libavcodec/libopenh264enc.c | 2 +- libavcodec/libopenjpegdec.c | 2 +- libavcodec/libopenjpegenc.c | 2 +- libavcodec/libopusdec.c | 2 +- libavcodec/libopusenc.c | 2 +- libavcodec/librav1e.c | 2 +- libavcodec/librsvgdec.c | 2 +- libavcodec/libshine.c | 2 +- libavcodec/libspeexdec.c | 2 +- libavcodec/libspeexenc.c | 2 +- libavcodec/libsvtav1.c | 2 +- libavcodec/libtheoraenc.c | 2 +- libavcodec/libtwolame.c | 2 +- libavcodec/libuavs3d.c | 2 +- libavcodec/libvo-amrwbenc.c | 2 +- libavcodec/libvorbisdec.c | 2 +- libavcodec/libvorbisenc.c | 2 +- libavcodec/libvpxdec.c | 4 +- libavcodec/libvpxenc.c | 4 +- libavcodec/libwebpenc.c | 2 +- libavcodec/libwebpenc_animencoder.c | 2 +- libavcodec/libx264.c | 6 +- libavcodec/libx265.c | 2 +- libavcodec/libxavs.c | 2 +- libavcodec/libxavs2.c | 2 +- libavcodec/libxvid.c | 2 +- libavcodec/libzvbi-teletextdec.c | 2 +- libavcodec/ljpegenc.c | 2 +- libavcodec/loco.c | 2 +- libavcodec/lscrdec.c | 2 +- libavcodec/m101.c | 2 +- libavcodec/mace.c | 4 +- libavcodec/magicyuv.c | 2 +- libavcodec/magicyuvenc.c | 2 +- libavcodec/mdec.c | 2 +- libavcodec/mediacodecdec.c | 2 +- libavcodec/metasound.c | 2 +- libavcodec/mfenc.c | 2 +- libavcodec/microdvddec.c | 2 +- libavcodec/midivid.c | 2 +- libavcodec/mimic.c | 2 +- libavcodec/mjpegbdec.c | 2 +- libavcodec/mjpegdec.c | 6 +- libavcodec/mjpegenc.c | 4 +- libavcodec/mlpdec.c | 4 +- libavcodec/mlpenc.c | 4 +- libavcodec/mmaldec.c | 2 +- libavcodec/mmvideo.c | 2 +- libavcodec/mobiclip.c | 2 +- libavcodec/motionpixels.c | 2 +- libavcodec/movtextdec.c | 2 +- libavcodec/movtextenc.c | 2 +- libavcodec/mpc7.c | 2 +- libavcodec/mpc8.c | 2 +- libavcodec/mpeg12dec.c | 8 +- libavcodec/mpeg12enc.c | 4 +- libavcodec/mpeg4videodec.c | 2 +- libavcodec/mpeg4videoenc.c | 2 +- libavcodec/mpegaudiodec_fixed.c | 10 +- libavcodec/mpegaudiodec_float.c | 10 +- libavcodec/mpegaudioenc_fixed.c | 2 +- libavcodec/mpegaudioenc_float.c | 2 +- libavcodec/mpl2dec.c | 2 +- libavcodec/mscc.c | 4 +- libavcodec/msmpeg4dec.c | 8 +- libavcodec/msmpeg4enc.c | 6 +- libavcodec/msp2dec.c | 2 +- libavcodec/msrle.c | 2 +- libavcodec/mss1.c | 2 +- libavcodec/mss2.c | 2 +- libavcodec/mss3.c | 2 +- libavcodec/mss4.c | 2 +- libavcodec/msvideo1.c | 2 +- libavcodec/msvideo1enc.c | 2 +- libavcodec/mv30.c | 2 +- libavcodec/mvcdec.c | 4 +- libavcodec/mvha.c | 2 +- libavcodec/mwsc.c | 2 +- libavcodec/mxpegdec.c | 2 +- libavcodec/nellymoserdec.c | 2 +- libavcodec/nellymoserenc.c | 2 +- libavcodec/notchlc.c | 2 +- libavcodec/nuv.c | 2 +- libavcodec/nvenc_h264.c | 2 +- libavcodec/nvenc_hevc.c | 2 +- libavcodec/omx.c | 4 +- libavcodec/on2avc.c | 2 +- libavcodec/opusdec.c | 2 +- libavcodec/opusenc.c | 2 +- libavcodec/pafaudio.c | 2 +- libavcodec/pafvideo.c | 2 +- libavcodec/pamenc.c | 2 +- libavcodec/pcm-bluray.c | 2 +- libavcodec/pcm-blurayenc.c | 2 +- libavcodec/pcm-dvd.c | 2 +- libavcodec/pcm-dvdenc.c | 2 +- libavcodec/pcm.c | 4 +- libavcodec/pcx.c | 2 +- libavcodec/pcxenc.c | 2 +- libavcodec/pgssubdec.c | 2 +- libavcodec/pgxdec.c | 2 +- libavcodec/photocd.c | 2 +- libavcodec/pictordec.c | 2 +- libavcodec/pixlet.c | 2 +- libavcodec/pngdec.c | 4 +- libavcodec/pngenc.c | 4 +- libavcodec/pnmdec.c | 12 +-- libavcodec/pnmenc.c | 10 +- libavcodec/proresdec2.c | 2 +- libavcodec/proresenc_anatoliy.c | 4 +- libavcodec/proresenc_kostya.c | 2 +- libavcodec/prosumer.c | 2 +- libavcodec/psd.c | 2 +- libavcodec/pthread_frame.c | 2 +- libavcodec/ptx.c | 2 +- libavcodec/qcelpdec.c | 2 +- libavcodec/qdm2.c | 2 +- libavcodec/qdmc.c | 2 +- libavcodec/qdrw.c | 2 +- libavcodec/qpeg.c | 2 +- libavcodec/qsvdec.c | 2 +- libavcodec/qsvenc_h264.c | 2 +- libavcodec/qsvenc_hevc.c | 2 +- libavcodec/qsvenc_jpeg.c | 2 +- libavcodec/qsvenc_mpeg2.c | 2 +- libavcodec/qsvenc_vp9.c | 2 +- libavcodec/qtrle.c | 2 +- libavcodec/qtrleenc.c | 2 +- libavcodec/r210dec.c | 6 +- libavcodec/r210enc.c | 6 +- libavcodec/ra144dec.c | 2 +- libavcodec/ra144enc.c | 2 +- libavcodec/ra288.c | 2 +- libavcodec/ralf.c | 2 +- libavcodec/rasc.c | 2 +- libavcodec/rawdec.c | 2 +- libavcodec/rawenc.c | 2 +- libavcodec/realtextdec.c | 2 +- libavcodec/rkmppdec.c | 2 +- libavcodec/rl2.c | 2 +- libavcodec/roqaudioenc.c | 2 +- libavcodec/roqvideodec.c | 2 +- libavcodec/roqvideoenc.c | 2 +- libavcodec/rpza.c | 2 +- libavcodec/rpzaenc.c | 2 +- libavcodec/rscc.c | 2 +- libavcodec/rv10.c | 4 +- libavcodec/rv10enc.c | 2 +- libavcodec/rv20enc.c | 2 +- libavcodec/rv30.c | 2 +- libavcodec/rv40.c | 2 +- libavcodec/s302m.c | 2 +- libavcodec/s302menc.c | 2 +- libavcodec/samidec.c | 2 +- libavcodec/sanm.c | 2 +- libavcodec/sbcdec.c | 2 +- libavcodec/sbcenc.c | 2 +- libavcodec/scpr.c | 2 +- libavcodec/screenpresso.c | 2 +- libavcodec/sga.c | 2 +- libavcodec/sgidec.c | 2 +- libavcodec/sgienc.c | 2 +- libavcodec/sgirledec.c | 2 +- libavcodec/sheervideo.c | 2 +- libavcodec/shorten.c | 2 +- libavcodec/sipr.c | 2 +- libavcodec/siren.c | 4 +- libavcodec/smacker.c | 4 +- libavcodec/smc.c | 2 +- libavcodec/smcenc.c | 2 +- libavcodec/snowdec.c | 2 +- libavcodec/snowenc.c | 2 +- libavcodec/sonic.c | 6 +- libavcodec/sp5xdec.c | 4 +- libavcodec/speedhq.c | 2 +- libavcodec/speedhqenc.c | 2 +- libavcodec/speexdec.c | 2 +- libavcodec/srtdec.c | 4 +- libavcodec/srtenc.c | 6 +- libavcodec/subviewerdec.c | 2 +- libavcodec/sunrast.c | 2 +- libavcodec/sunrastenc.c | 2 +- libavcodec/svq1dec.c | 2 +- libavcodec/svq1enc.c | 2 +- libavcodec/svq3.c | 2 +- libavcodec/takdec.c | 2 +- libavcodec/targa.c | 2 +- libavcodec/targa_y216dec.c | 2 +- libavcodec/targaenc.c | 2 +- libavcodec/tdsc.c | 2 +- libavcodec/tests/avcodec.c | 42 +++++--- libavcodec/textdec.c | 10 +- libavcodec/tiertexseqv.c | 2 +- libavcodec/tiff.c | 2 +- libavcodec/tiffenc.c | 2 +- libavcodec/tmv.c | 2 +- libavcodec/truemotion1.c | 2 +- libavcodec/truemotion2.c | 2 +- libavcodec/truemotion2rt.c | 2 +- libavcodec/truespeech.c | 2 +- libavcodec/tscc.c | 2 +- libavcodec/tscc2.c | 2 +- libavcodec/tta.c | 2 +- libavcodec/ttaenc.c | 2 +- libavcodec/ttmlenc.c | 2 +- libavcodec/twinvqdec.c | 2 +- libavcodec/txd.c | 2 +- libavcodec/ulti.c | 2 +- libavcodec/utils.c | 8 +- libavcodec/utvideodec.c | 2 +- libavcodec/utvideoenc.c | 2 +- libavcodec/v210dec.c | 2 +- libavcodec/v210enc.c | 2 +- libavcodec/v210x.c | 2 +- libavcodec/v308dec.c | 2 +- libavcodec/v308enc.c | 2 +- libavcodec/v408dec.c | 4 +- libavcodec/v408enc.c | 4 +- libavcodec/v410dec.c | 2 +- libavcodec/v410enc.c | 2 +- libavcodec/v4l2_m2m_dec.c | 2 +- libavcodec/v4l2_m2m_enc.c | 2 +- libavcodec/vaapi_encode_h264.c | 2 +- libavcodec/vaapi_encode_h265.c | 2 +- libavcodec/vaapi_encode_mjpeg.c | 2 +- libavcodec/vaapi_encode_mpeg2.c | 2 +- libavcodec/vaapi_encode_vp8.c | 2 +- libavcodec/vaapi_encode_vp9.c | 2 +- libavcodec/vb.c | 2 +- libavcodec/vble.c | 2 +- libavcodec/vc1dec.c | 8 +- libavcodec/vc2enc.c | 2 +- libavcodec/vcr1.c | 2 +- libavcodec/videotoolboxenc.c | 6 +- libavcodec/vima.c | 2 +- libavcodec/vmdaudio.c | 2 +- libavcodec/vmdvideo.c | 2 +- libavcodec/vmnc.c | 2 +- libavcodec/vorbisdec.c | 2 +- libavcodec/vorbisenc.c | 2 +- libavcodec/vp3.c | 6 +- libavcodec/vp5.c | 2 +- libavcodec/vp6.c | 6 +- libavcodec/vp8.c | 4 +- libavcodec/vp9.c | 2 +- libavcodec/vqavideo.c | 2 +- libavcodec/wavpack.c | 2 +- libavcodec/wavpackenc.c | 2 +- libavcodec/wcmv.c | 2 +- libavcodec/webp.c | 2 +- libavcodec/webvttdec.c | 2 +- libavcodec/webvttenc.c | 2 +- libavcodec/wmadec.c | 4 +- libavcodec/wmaenc.c | 4 +- libavcodec/wmalosslessdec.c | 2 +- libavcodec/wmaprodec.c | 6 +- libavcodec/wmavoice.c | 2 +- libavcodec/wmv2dec.c | 2 +- libavcodec/wmv2enc.c | 2 +- libavcodec/wnv1.c | 2 +- libavcodec/wrapped_avframe.c | 4 +- libavcodec/ws-snd1.c | 2 +- libavcodec/xan.c | 2 +- libavcodec/xbmdec.c | 2 +- libavcodec/xbmenc.c | 2 +- libavcodec/xfacedec.c | 2 +- libavcodec/xfaceenc.c | 2 +- libavcodec/xl.c | 2 +- libavcodec/xpmdec.c | 2 +- libavcodec/xsubdec.c | 2 +- libavcodec/xsubenc.c | 2 +- libavcodec/xwddec.c | 2 +- libavcodec/xwdenc.c | 2 +- libavcodec/xxan.c | 2 +- libavcodec/y41pdec.c | 2 +- libavcodec/y41penc.c | 2 +- libavcodec/ylc.c | 2 +- libavcodec/yop.c | 2 +- libavcodec/yuv4dec.c | 2 +- libavcodec/yuv4enc.c | 2 +- libavcodec/zerocodec.c | 2 +- libavcodec/zmbv.c | 2 +- libavcodec/zmbvenc.c | 2 +- 498 files changed, 758 insertions(+), 673 deletions(-)