[FFmpeg-devel,6/6] lavfi: make AVFilterLink opaque in two major bumps.

Submitted by Nicolas George on Dec. 18, 2016, 12:22 p.m.

Details

Message ID 20161218122221.23688-6-george@nsup.org
State New
Headers show

Commit Message

Nicolas George Dec. 18, 2016, 12:22 p.m.
API-Change: libavfilter
Signed-off-by: Nicolas George <george@nsup.org>
---
 libavfilter/avfilter.h |   2 +
 libavfilter/internal.h | 199 +++++++++++++++++++++++++++++++++++++++++++++++++
 libavfilter/version.h  |   3 +
 3 files changed, 204 insertions(+)


Not sure what the preferred delay would be. I suspect not many programs use
libavfilter yet. In the meantime, all new fields must be added at both
places and tested.

Comments

wm4 Dec. 18, 2016, 12:33 p.m.
On Sun, 18 Dec 2016 13:22:21 +0100
Nicolas George <george@nsup.org> wrote:

> API-Change: libavfilter
> Signed-off-by: Nicolas George <george@nsup.org>
> ---
>  libavfilter/avfilter.h |   2 +
>  libavfilter/internal.h | 199 +++++++++++++++++++++++++++++++++++++++++++++++++
>  libavfilter/version.h  |   3 +
>  3 files changed, 204 insertions(+)
> 
> 
> Not sure what the preferred delay would be. I suspect not many programs use
> libavfilter yet. In the meantime, all new fields must be added at both
> places and tested.
> 
> 
> diff --git a/libavfilter/avfilter.h b/libavfilter/avfilter.h
> index 828b270..6109e58 100644
> --- a/libavfilter/avfilter.h
> +++ b/libavfilter/avfilter.h
> @@ -377,6 +377,7 @@ struct AVFilterContext {
>      unsigned ready;
>  };
>  
> +#if FF_API_AVFILTERLINK_PUBLIC
>  /**
>   * A link between two filters. This contains pointers to the source and
>   * destination filters between which this link exists, and the indexes of
> @@ -593,6 +594,7 @@ struct AVFilterLink {
>  #endif /* FF_INTERNAL_FIELDS */
>  
>  };
> +#endif /* FF_API_AVFILTERLINK_PUBLIC */
>  
>  /**
>   * Link two filters together.
> diff --git a/libavfilter/internal.h b/libavfilter/internal.h
> index a8b69fd..599be24 100644
> --- a/libavfilter/internal.h
> +++ b/libavfilter/internal.h
> @@ -145,6 +145,205 @@ struct AVFilterPad {
>      int needs_writable;
>  };
>  
> +#if !FF_API_AVFILTERLINK_PUBLIC
> +/**
> + * A link between two filters. This contains pointers to the source and
> + * destination filters between which this link exists, and the indexes of
> + * the pads involved. In addition, this link also contains the parameters
> + * which have been negotiated and agreed upon between the filter, such as
> + * image dimensions, format, etc.
> + */
> +struct AVFilterLink {
> +    AVFilterContext *src;       ///< source filter
> +    AVFilterPad *srcpad;        ///< output pad on the source filter
> +
> +    AVFilterContext *dst;       ///< dest filter
> +    AVFilterPad *dstpad;        ///< input pad on the dest filter
> +
> +    enum AVMediaType type;      ///< filter media type
> +
> +    /* These parameters apply only to video */
> +    int w;                      ///< agreed upon image width
> +    int h;                      ///< agreed upon image height
> +    AVRational sample_aspect_ratio; ///< agreed upon sample aspect ratio
> +    /* These parameters apply only to audio */
> +    uint64_t channel_layout;    ///< channel layout of current buffer (see libavutil/channel_layout.h)
> +    int sample_rate;            ///< samples per second
> +
> +    int format;                 ///< agreed upon media format
> +
> +    /**
> +     * Define the time base used by the PTS of the frames/samples
> +     * which will pass through this link.
> +     * During the configuration stage, each filter is supposed to
> +     * change only the output timebase, while the timebase of the
> +     * input link is assumed to be an unchangeable property.
> +     */
> +    AVRational time_base;
> +
> +    /**
> +     * Lists of formats and channel layouts supported by the input and output
> +     * filters respectively. These lists are used for negotiating the format
> +     * to actually be used, which will be loaded into the format and
> +     * channel_layout members, above, when chosen.
> +     *
> +     */
> +    AVFilterFormats *in_formats;
> +    AVFilterFormats *out_formats;
> +
> +    /**
> +     * Lists of channel layouts and sample rates used for automatic
> +     * negotiation.
> +     */
> +    AVFilterFormats  *in_samplerates;
> +    AVFilterFormats *out_samplerates;
> +    struct AVFilterChannelLayouts  *in_channel_layouts;
> +    struct AVFilterChannelLayouts *out_channel_layouts;
> +
> +    /**
> +     * Audio only, the destination filter sets this to a non-zero value to
> +     * request that buffers with the given number of samples should be sent to
> +     * it. AVFilterPad.needs_fifo must also be set on the corresponding input
> +     * pad.
> +     * Last buffer before EOF will be padded with silence.
> +     */
> +    int request_samples;
> +
> +    /** stage of the initialization of the link properties (dimensions, etc) */
> +    enum {
> +        AVLINK_UNINIT = 0,      ///< not started
> +        AVLINK_STARTINIT,       ///< started, but incomplete
> +        AVLINK_INIT             ///< complete
> +    } init_state;
> +
> +    /**
> +     * Graph the filter belongs to.
> +     */
> +    struct AVFilterGraph *graph;
> +
> +    /**
> +     * Current timestamp of the link, as defined by the most recent
> +     * frame(s), in link time_base units.
> +     */
> +    int64_t current_pts;
> +
> +    /**
> +     * Current timestamp of the link, as defined by the most recent
> +     * frame(s), in AV_TIME_BASE units.
> +     */
> +    int64_t current_pts_us;
> +
> +    /**
> +     * Index in the age array.
> +     */
> +    int age_index;
> +
> +    /**
> +     * Frame rate of the stream on the link, or 1/0 if unknown or variable;
> +     * if left to 0/0, will be automatically copied from the first input
> +     * of the source filter if it exists.
> +     *
> +     * Sources should set it to the best estimation of the real frame rate.
> +     * If the source frame rate is unknown or variable, set this to 1/0.
> +     * Filters should update it if necessary depending on their function.
> +     * Sinks can use it to set a default output frame rate.
> +     * It is similar to the r_frame_rate field in AVStream.
> +     */
> +    AVRational frame_rate;
> +
> +    /**
> +     * Buffer partially filled with samples to achieve a fixed/minimum size.
> +     */
> +    AVFrame *partial_buf;
> +
> +    /**
> +     * Size of the partial buffer to allocate.
> +     * Must be between min_samples and max_samples.
> +     */
> +    int partial_buf_size;
> +
> +    /**
> +     * Minimum number of samples to filter at once. If filter_frame() is
> +     * called with fewer samples, it will accumulate them in partial_buf.
> +     * This field and the related ones must not be changed after filtering
> +     * has started.
> +     * If 0, all related fields are ignored.
> +     */
> +    int min_samples;
> +
> +    /**
> +     * Maximum number of samples to filter at once. If filter_frame() is
> +     * called with more samples, it will split them.
> +     */
> +    int max_samples;
> +
> +    /**
> +     * Number of channels.
> +     */
> +    int channels;
> +
> +    /**
> +     * Link processing flags.
> +     */
> +    unsigned flags;
> +
> +    /**
> +     * Number of past frames sent through the link.
> +     */
> +    int64_t frame_count_in, frame_count_out;
> +
> +    /**
> +     * A pointer to a FFVideoFramePool struct.
> +     */
> +    void *video_frame_pool;
> +
> +    /**
> +     * True if a frame is currently wanted on the output of this filter.
> +     * Set when ff_request_frame() is called by the output,
> +     * cleared when a frame is filtered.
> +     */
> +    int frame_wanted_out;
> +
> +    /**
> +     * For hwaccel pixel formats, this should be a reference to the
> +     * AVHWFramesContext describing the frames.
> +     */
> +    AVBufferRef *hw_frames_ctx;
> +
> +    /**
> +     * Queue of frames waiting to be filtered.
> +     */
> +    FFFrameQueue fifo;
> +
> +    /**
> +     * If set, the source filter can not generate a frame as is.
> +     * The goal is to avoid repeatedly calling the request_frame() method on
> +     * the same link.
> +     */
> +    int frame_blocked_in;
> +
> +    /**
> +     * Link input status.
> +     * If not zero, all attempts of filter_frame will fail with the
> +     * corresponding code.
> +     */
> +    int status_in;
> +
> +    /**
> +     * Timestamp of the input status change.
> +     */
> +    int64_t status_in_pts;
> +
> +    /**
> +     * Link output status.
> +     * If not zero, all attempts of request_frame will fail with the
> +     * corresponding code.
> +     */
> +    int status_out;
> +
> +};
> +#endif /* !FF_API_AVFILTERLINK_PUBLIC */
> +
>  struct AVFilterGraphInternal {
>      void *thread;
>      avfilter_execute_func *thread_execute;
> diff --git a/libavfilter/version.h b/libavfilter/version.h
> index e3bd8d0..8256781 100644
> --- a/libavfilter/version.h
> +++ b/libavfilter/version.h
> @@ -67,5 +67,8 @@
>  #ifndef FF_API_NOCONST_GET_NAME
>  #define FF_API_NOCONST_GET_NAME             (LIBAVFILTER_VERSION_MAJOR < 7)
>  #endif
> +#ifndef FF_API_AVFILTERLINK_PUBLIC
> +#define FF_API_AVFILTERLINK_PUBLIC          (LIBAVFILTER_VERSION_MAJOR < 8)
> +#endif
>  
>  #endif /* AVFILTER_VERSION_H */

Did you send the same patches to Libav? This makes the API incompatible
with Libav.
Nicolas George Dec. 18, 2016, 12:37 p.m.
L'octidi 28 frimaire, an CCXXV, wm4 a écrit :
> Did you send the same patches to Libav? This makes the API incompatible
> with Libav.

Their API has been non-working for a long time, even if technically
compatible with ours. Their problem.

Regards,
wm4 Dec. 18, 2016, 12:48 p.m.
On Sun, 18 Dec 2016 13:37:44 +0100
Nicolas George <george@nsup.org> wrote:

> L'octidi 28 frimaire, an CCXXV, wm4 a écrit :
> > Did you send the same patches to Libav? This makes the API incompatible
> > with Libav.  
> 
> Their API has been non-working for a long time, even if technically
> compatible with ours. Their problem.

Can't confirm that assessment.
Michael Niedermayer Dec. 18, 2016, 5:26 p.m.
On Sun, Dec 18, 2016 at 01:22:21PM +0100, Nicolas George wrote:
> API-Change: libavfilter
> Signed-off-by: Nicolas George <george@nsup.org>
> ---
>  libavfilter/avfilter.h |   2 +
>  libavfilter/internal.h | 199 +++++++++++++++++++++++++++++++++++++++++++++++++
>  libavfilter/version.h  |   3 +
>  3 files changed, 204 insertions(+)
> 
> 
> Not sure what the preferred delay would be. I suspect not many programs use
> libavfilter yet. In the meantime, all new fields must be added at both
> places and tested.

How does this patchset relate to the open-ness of the API ?
you arent saying anything about the plans, goals, intend of this (or
i missed it or fail to associate it with the patchset)

Iam asking as it seems like this is moving libavfilter further away
from being "open" and centralizing control over filters more.
I most likely misinterpret this as i just see the change in the code
and dont know the whole story but IMO we should move toward a clean and
stable API that everyone can use.
That also implies to allow filters to only use public API.
while this patchset seems to make filters use more private api by
making more needed API private. I think a open API and external
filter support would drive developers and users towards libavfilter
while locking it down would likely do the opposit

[...]
Nicolas George Dec. 18, 2016, 6:41 p.m.
L'octidi 28 frimaire, an CCXXV, Michael Niedermayer a écrit :
> How does this patchset relate to the open-ness of the API ?
> you arent saying anything about the plans, goals, intend of this (or
> i missed it or fail to associate it with the patchset)

I am doing this to accommodate people who object to having a different
view of AVFilterLink for the public API and the internal implementation,
mostly Hendrik, Andreas and Clément.

As for me, I am pretty happy with the current code that gives a
different view of AVFilterLink to the public than the one used for
implementation. Something like that is needed because some fields have a
type that is not itself public.

> Iam asking as it seems like this is moving libavfilter further away
> from being "open" and centralizing control over filters more.
> I most likely misinterpret this as i just see the change in the code
> and dont know the whole story but IMO we should move toward a clean and
> stable API that everyone can use.
> That also implies to allow filters to only use public API.
> while this patchset seems to make filters use more private api by
> making more needed API private. I think a open API and external
> filter support would drive developers and users towards libavfilter
> while locking it down would likely do the opposit

I am not sure I understand what you mean by openness. Do you mean
applications writing their own filter and inserting it in a filter
graph? If so, I can tell you it is not currently possible, and has not
been since at least 2012-06-12 (9d0bfc5 lavfi: make AVFilterPad opaque
after two major bumps.).

Regards,
Michael Niedermayer Dec. 18, 2016, 9:33 p.m.
On Sun, Dec 18, 2016 at 07:41:14PM +0100, Nicolas George wrote:
> L'octidi 28 frimaire, an CCXXV, Michael Niedermayer a écrit :
> > How does this patchset relate to the open-ness of the API ?
> > you arent saying anything about the plans, goals, intend of this (or
> > i missed it or fail to associate it with the patchset)
> 
> I am doing this to accommodate people who object to having a different
> view of AVFilterLink for the public API and the internal implementation,
> mostly Hendrik, Andreas and Clément.
> 
> As for me, I am pretty happy with the current code that gives a
> different view of AVFilterLink to the public than the one used for
> implementation. Something like that is needed because some fields have a
> type that is not itself public.
> 
> > Iam asking as it seems like this is moving libavfilter further away
> > from being "open" and centralizing control over filters more.
> > I most likely misinterpret this as i just see the change in the code
> > and dont know the whole story but IMO we should move toward a clean and
> > stable API that everyone can use.
> > That also implies to allow filters to only use public API.
> > while this patchset seems to make filters use more private api by
> > making more needed API private. I think a open API and external
> > filter support would drive developers and users towards libavfilter
> > while locking it down would likely do the opposit
>

> I am not sure I understand what you mean by openness. Do you mean
> applications writing their own filter and inserting it in a filter
> graph?

yes and ideally also installable filters like plugins
but really plugins are IMHO not hard, having an API that one can
access is the imortant part. Even without a true plugin API other
apps or users could link in filters build from a seperate package
quite easily


> If so, I can tell you it is not currently possible, and has not
> been since at least 2012-06-12 (9d0bfc5 lavfi: make AVFilterPad opaque
> after two major bumps.).

i know, but at the time all this closing down of the API happened it
was said that this was temporary (not by you and i dont remember who
said so and not limited to libavfilter) and now over 4 years later
temporary seems to be changing into the permanent end.

IMHO if you want libavfilter to be a success in the long term it needs
to be a open system with clean API that anyone can use and add filters
to.

As it is libavfilter is limited to what ffmpeg developers want INSIDE
ffmpeg. If a filter doesnt fit into that it will likely be rejected.
Which makes sense if there are external filters / plugins. But if the
only way to use a filter in libavfilter is through it being accepted
into main ffmpeg git that just makes libavfilter unfit as a _general_
filter framework.


[...]
Nicolas George Dec. 19, 2016, 8:25 a.m.
L'octidi 28 frimaire, an CCXXV, Michael Niedermayer a écrit :
> i know, but at the time all this closing down of the API happened it
> was said that this was temporary (not by you and i dont remember who
> said so and not limited to libavfilter) and now over 4 years later
> temporary seems to be changing into the permanent end.
> 
> IMHO if you want libavfilter to be a success in the long term it needs
> to be a open system with clean API that anyone can use and add filters
> to.
> 
> As it is libavfilter is limited to what ffmpeg developers want INSIDE
> ffmpeg. If a filter doesnt fit into that it will likely be rejected.
> Which makes sense if there are external filters / plugins. But if the
> only way to use a filter in libavfilter is through it being accepted
> into main ffmpeg git that just makes libavfilter unfit as a _general_
> filter framework.

Well, all you write here is entirely true. Making it possible to use
application-provided and plugin-provided filters in libavfilter is a
worthy goal.

But you have to consider the other side of the coin: all the genericness
of libavfilter requires a very complex API for filters, and keeping this
API stable would put a tremendous constraint on its evolution.

Just consider Marton's recent patch to make support of unknown channel
layouts the default. He could do it because he could review all the
existing filters, check they were ready and make the small adjustments
necessary. If there were foreign filters to accommodate, it would have
been impossible.

Removing the recursiveness in request_frame() (last year's patch
series), threading, the move to real frames, etc., would have been
equally impossible.

The short of it is that libavfilter is still too young to allow that.
The age must not be measured in years, but in manpower.

Regards,
Michael Niedermayer Dec. 19, 2016, 10:19 a.m.
On Mon, Dec 19, 2016 at 09:25:14AM +0100, Nicolas George wrote:
> L'octidi 28 frimaire, an CCXXV, Michael Niedermayer a écrit :
> > i know, but at the time all this closing down of the API happened it
> > was said that this was temporary (not by you and i dont remember who
> > said so and not limited to libavfilter) and now over 4 years later
> > temporary seems to be changing into the permanent end.
> > 
> > IMHO if you want libavfilter to be a success in the long term it needs
> > to be a open system with clean API that anyone can use and add filters
> > to.
> > 
> > As it is libavfilter is limited to what ffmpeg developers want INSIDE
> > ffmpeg. If a filter doesnt fit into that it will likely be rejected.
> > Which makes sense if there are external filters / plugins. But if the
> > only way to use a filter in libavfilter is through it being accepted
> > into main ffmpeg git that just makes libavfilter unfit as a _general_
> > filter framework.
> 
> Well, all you write here is entirely true. Making it possible to use
> application-provided and plugin-provided filters in libavfilter is a
> worthy goal.
> 
> But you have to consider the other side of the coin: all the genericness
> of libavfilter requires a very complex API for filters, and keeping this
> API stable would put a tremendous constraint on its evolution.
> 

> Just consider Marton's recent patch to make support of unknown channel
> layouts the default. He could do it because he could review all the
> existing filters, check they were ready and make the small adjustments
> necessary. If there were foreign filters to accommodate, it would have
> been impossible.

changing a default is not a needed change, and this one was even
rolled back and forth on the ML a while


> 
> Removing the recursiveness in request_frame() (last year's patch
> series), threading, the move to real frames, etc., would have been
> equally impossible.

Changing from AVFilterFrame to AVFrame could have been done using
a new set of API functions and deprecating the old.

Things like that were not done as it wasnt needed without a public
API, i dont think theres anything we really needed to do that we
would not have been able to with a public API if that was the
constraint under which we worked.
Most projects work under the constraint of a stable public API.


> 
> The short of it is that libavfilter is still too young to allow that.
> The age must not be measured in years, but in manpower.

Theres a big difference between the "wish to redesign" and the "need to
redesign"
more so the "wish to redesign" might not end before the loss of
interrest in the code as such, And the next developer might have a new
and different "wish to redesign".
With this you might never reach the point of having a stable API no
matter what man power you have as with more man might come more
wishes.

I really think we should draw a line at some point and commit ourselfs
to some stable API.

But theres another issue we should keep in mind. I think if someone
forked libavfilter, documented it well, added a plugin interface and
commited himself to a stable API and had a bit PR success iam not sure
which side would win.

[...]
Nicolas George Dec. 19, 2016, 4:39 p.m.
Le nonidi 29 frimaire, an CCXXV, Michael Niedermayer a écrit :
> Changing from AVFilterFrame to AVFrame could have been done using
> a new set of API functions and deprecating the old.
> 
> Things like that were not done as it wasnt needed without a public
> API, i dont think theres anything we really needed to do that we
> would not have been able to with a public API if that was the
> constraint under which we worked.
> Most projects work under the constraint of a stable public API.

> Theres a big difference between the "wish to redesign" and the "need to
> redesign"
> more so the "wish to redesign" might not end before the loss of
> interrest in the code as such, And the next developer might have a new
> and different "wish to redesign".
> With this you might never reach the point of having a stable API no
> matter what man power you have as with more man might come more
> wishes.

All this is certainly true, but it requires manpower. We do not have it;
mostly, we only have me.

> I really think we should draw a line at some point and commit ourselfs
> to some stable API.

I agree, but it should be done only when the API and design are good
enough. I do not think they are right now. A clue to prove that: when
something breaks around libavfilter in ffmpeg.c, you have to ask me,
showing that the code is complex, more than it should be and I think I
can make it simpler.

And even larger projects with plenty of manpower and API stability, from
time to time, decide to start fresh.

> But theres another issue we should keep in mind. I think if someone
> forked libavfilter, documented it well, added a plugin interface and
> commited himself to a stable API and had a bit PR success iam not sure
> which side would win.

That would be great! That would mean a version of libavfilter with good
documentation, stable API and a plugin interface. We could merge it and
I could start working on something else entirely, possibly asynchronous
I/O.

But before that would happen, I would expect the people interested to
make contact, on the mailing-list or with me personally, and we could
discuss how to do it together, without forking. That would be, of
course, even better.

Alas, the state of affairs now is that I cannot even find someone to
discuss fine points of API evolution.

More to the point: is this discussion meant to be an objection to the
patch itself?

Right now, because a not of necessary structures and functions are
internal, plugins are not possible, so this patch is not making things
any worse. I am all in favour of allowing plugins eventually, but I
think the only reasonable course of action is: first a good API, then
make it stable, then make it public.

Regards,
Michael Niedermayer Dec. 19, 2016, 6:03 p.m.
On Mon, Dec 19, 2016 at 05:39:53PM +0100, Nicolas George wrote:
> Le nonidi 29 frimaire, an CCXXV, Michael Niedermayer a écrit :
> > Changing from AVFilterFrame to AVFrame could have been done using
> > a new set of API functions and deprecating the old.
> > 
> > Things like that were not done as it wasnt needed without a public
> > API, i dont think theres anything we really needed to do that we
> > would not have been able to with a public API if that was the
> > constraint under which we worked.
> > Most projects work under the constraint of a stable public API.
> 
> > Theres a big difference between the "wish to redesign" and the "need to
> > redesign"
> > more so the "wish to redesign" might not end before the loss of
> > interrest in the code as such, And the next developer might have a new
> > and different "wish to redesign".
> > With this you might never reach the point of having a stable API no
> > matter what man power you have as with more man might come more
> > wishes.
> 
> All this is certainly true, but it requires manpower. We do not have it;
> mostly, we only have me.

> 
> > I really think we should draw a line at some point and commit ourselfs
> > to some stable API.
> 
> I agree, but it should be done only when the API and design are good
> enough. I do not think they are right now. A clue to prove that: when
> something breaks around libavfilter in ffmpeg.c, you have to ask me,
> showing that the code is complex, more than it should be and I think I
> can make it simpler.

I would very much be in favor of this
though i wonder what mistake we did to end here, the original design
was intended to be very simple ...


> 
> And even larger projects with plenty of manpower and API stability, from
> time to time, decide to start fresh.
> 
> > But theres another issue we should keep in mind. I think if someone
> > forked libavfilter, documented it well, added a plugin interface and
> > commited himself to a stable API and had a bit PR success iam not sure
> > which side would win.
> 
> That would be great! That would mean a version of libavfilter with good
> documentation, stable API and a plugin interface. We could merge it and
> I could start working on something else entirely, possibly asynchronous
> I/O.

>
> But before that would happen, I would expect the people interested to
> make contact, on the mailing-list or with me personally, and we could
> discuss how to do it together, without forking. That would be, of
> course, even better.
> 
> Alas, the state of affairs now is that I cannot even find someone to
> discuss fine points of API evolution.

my interrest is having a stable public API and a plugin interface
and iam interrested discussing that.
Iam also interrested in discussing clearly defined problems,
reproducable issues and solutions.
about fine details i dont really care
and your libavfilter patches are sometimes complex making it not a
trivial quick thing to discuss them


> 
> More to the point: is this discussion meant to be an objection to the
> patch itself?

its not an objection to the patch


> 
> Right now, because a not of necessary structures and functions are
> internal, plugins are not possible, so this patch is not making things
> any worse. I am all in favour of allowing plugins eventually, but I
> think the only reasonable course of action is: first a good API, then
> make it stable, then make it public.

You complain that noone else is working on libavfilter
how many people know what _NEEDS_ to be done to make the API good
enough for a first public and stable API ?

while i have a long todo and maybe no time for this, as it is i cant
even work on making libavfilter public+stable. Its not a technical
issue, its one of peoples requirements on what they require to be
changed first.


[...]
Nicolas George Dec. 20, 2016, 6:24 p.m.
Le nonidi 29 frimaire, an CCXXV, Michael Niedermayer a écrit :
> though i wonder what mistake we did to end here, the original design
> was intended to be very simple ...

That was the mistake: trying for a simple design.

Ha ha! Only serious. Linear filter chains are simple, and could have
worked easily with a simple design. But non-linear filter graphs are
inherently more complex: the system needs to decide what filter to
activate first from several parallel branches.

Graphs with several inputs and/or outputs are even more difficult,
because the application needs to decide which input needs a frame most
urgently.

Both lavfi's original design (API and implementation) and its use in
ffmpeg (actually, avconv) for complex filter graphs neglect partially or
completely this issue.

I tried to address this, and it more or less work, but that was with
localized changes, not a real design. The changes became less localized
in ffmpeg, but still.

(Also, the code in ffmpeg is made more complex because of the "-c copy"
and subtitles paths, because they do not transit through lavfi, and for
copy not through lavc. I have ideas to make this simpler, I can share
them, but this is only remotely related to the current discussion and I
do not want to clutter it.)

> my interrest is having a stable public API and a plugin interface
> and iam interrested discussing that.
> Iam also interrested in discussing clearly defined problems,
> reproducable issues and solutions.
> about fine details i dont really care
> and your libavfilter patches are sometimes complex making it not a
> trivial quick thing to discuss them

"make filter_frame non-recursive" was the Beast. The next patches will
be much simpler and straightforward, mostly isolating existing code in
cleaner functions.

> You complain that noone else is working on libavfilter
> how many people know what _NEEDS_ to be done to make the API good
> enough for a first public and stable API ?
> 
> while i have a long todo and maybe no time for this, as it is i cant
> even work on making libavfilter public+stable. Its not a technical
> issue, its one of peoples requirements on what they require to be
> changed first.

This is true, but there is another side to the human problem: explaining
the plans and requirements of the API takes time; if nobody will read it
carefully, that time is better spent actually coding (I am writing this
mail by snippets while FATE is running).

Here is an outline, in approximate order (there are dependencies, but
not everything):

- Add a callback AVFilter.activate() to replace filter_frame() on all
  inputs and request_frame() on all outputs. Most non-trivial filters
  are written that way in the first place.

- Change buffersink to implement that callback and peek directly in the
  FIFO.

- Write a generic activate callback for 1-to-1 filters and use it.

- Rewrite framesync (the utility system for filters with several video
  inputs that need synchroized frames) to implement activate and use the
  FIFO directly.

- Allow to give buffersrc a timestamp on EOF, make sure the timestamp is
  forwarded by most filters and allow to retrieve it from buffersink.

  (If somebody has a suggestion of a good data structure for that...)

- Allow to set a callback on buffersinks and buffersrcs to be notified
  when a frame arrives or is needed. It is much more convenient than
  walking the buffers to check one by one.

- Allow to merge several filter graphs into one. This may be useful when
  applications have several graphs to run simultaneously, so that they
  do not need to decide which one to activate. Another option would be
  to have the functions in the next step work on possibly several
  graphs.

- Keep a priority queue of ready filters and use it instead of walking
  the graph.

- Add a function to run the graph until "something" happens; "something"
  meaning a stop instruction called by the callbacks.

- Add utility functions to easily run the graph. The most useful IMHO:
  run until a frame comes out from any sink (we get the frame and
  something useful identifying the sink) or one is needed from a source
  (we get something identifying the source) or EOF.

I think that covers it. Please feel free to ask for details, give
suggestions, etc. Please.

Regards,
Michael Niedermayer Dec. 20, 2016, 10:56 p.m.
On Tue, Dec 20, 2016 at 07:24:44PM +0100, Nicolas George wrote:
> Le nonidi 29 frimaire, an CCXXV, Michael Niedermayer a écrit :
> > though i wonder what mistake we did to end here, the original design
> > was intended to be very simple ...
> 
> That was the mistake: trying for a simple design.
> 
> Ha ha! Only serious. Linear filter chains are simple, and could have
> worked easily with a simple design. But non-linear filter graphs are
> inherently more complex: the system needs to decide what filter to
> activate first from several parallel branches.

it shouldnt really be complex, not in concept, maybe in (efficient)
implementation
For example as a concept one could imagine each filter runs as its
own thread and waits on its inputs availability and output space.
if it gets input and has output space its woken up and works and
produces output and then wakes its surroundings up.
no difference between a linear chain or a complex graph here

and its simple as we use and need just a local view centered on a
filter and its neighbors

IIUC you already implemented the gory details of filters handing stuff
around and that working and not getting stuck nor bloating up too bad.
But as a concept ignoring the implementation details i would call it
simple.

iam not sure its usfull but
Another view from a different direction would be to see the filter
graph as a network of pipes with flowing water and interconnected
pumps. This too is a simple concept and intuition would suggest that
this would not easily end up with water being stuck nor too much
accumulating. It does not 1:1 match real filters though which work
on discrete frames, i wonder if this view is usefull
It would allow awnsering global questions about a graph, like what
inputs are useless when some output is welded shut or vice versa


> 
> Graphs with several inputs and/or outputs are even more difficult,
> because the application needs to decide which input needs a frame most
> urgently.
> 

> Both lavfi's original design (API and implementation) and its use in
> ffmpeg (actually, avconv) for complex filter graphs neglect partially or
> completely this issue.

i think the original lavfi design didnt really had any issue with graphs with
multiple inputs or outputs. A user app could decide were to in and out
put. but FFmpeg didnt support this at the time IIRC so the people working
on the original lavfi had nothing to implement.
the problems came when this support was added much later


[...]
> > You complain that noone else is working on libavfilter
> > how many people know what _NEEDS_ to be done to make the API good
> > enough for a first public and stable API ?
> > 
> > while i have a long todo and maybe no time for this, as it is i cant
> > even work on making libavfilter public+stable. Its not a technical
> > issue, its one of peoples requirements on what they require to be
> > changed first.
> 
> This is true, but there is another side to the human problem: explaining
> the plans and requirements of the API takes time; if nobody will read it
> carefully, that time is better spent actually coding (I am writing this
> mail by snippets while FATE is running).
> 
> Here is an outline, in approximate order (there are dependencies, but
> not everything):
> 

> - Add a callback AVFilter.activate() to replace filter_frame() on all
>   inputs and request_frame() on all outputs. Most non-trivial filters
>   are written that way in the first place.

ok thats mostly cosmetic id say.
I think we possibly even had something somewhat in that direction in
the original lavfi discussions MANY years ago
this makes more sense with non recursive stuff


> 
> - Change buffersink to implement that callback and peek directly in the
>   FIFO.

ok, "cosmetic"


> 
> - Write a generic activate callback for 1-to-1 filters and use it.

there were plans for simplifying generic filters
1 in 1 out frame filters
apply something to every pixel filters
some more cases i forgot

while different now with activate() the idea is old and IIRC was
supposed to be done long long ago. I think its a good idea if it does
happen


> 
> - Rewrite framesync (the utility system for filters with several video
>   inputs that need synchroized frames) to implement activate and use the
>   FIFO directly.

cosmetic :)


> 
> - Allow to give buffersrc a timestamp on EOF, make sure the timestamp is
>   forwarded by most filters and allow to retrieve it from buffersink.
> 
>   (If somebody has a suggestion of a good data structure for that...)

AVFrame.duration
This possibly is a big subject for discussion on its own, but maybe
not i dont know.


> 
> - Allow to set a callback on buffersinks and buffersrcs to be notified
>   when a frame arrives or is needed. It is much more convenient than
>   walking the buffers to check one by one.

agree that walking is bad.
cannot ATM argue on what is better as i dont feel that i have a clear
enough view of this and the surroundings.


> 
> - Allow to merge several filter graphs into one. This may be useful when
>   applications have several graphs to run simultaneously, so that they
>   do not need to decide which one to activate. Another option would be
>   to have the functions in the next step work on possibly several
>   graphs.

This may be orthogonal but i think a filter graph should be a filter
(maybe not at struct level litterally but it should be possible to have a
 AVFilter that is backed by a arbitrary user specified filter graph)


> 
> - Keep a priority queue of ready filters and use it instead of walking
>   the graph.

something better than O(n) yes, agree


> 
> - Add a function to run the graph until "something" happens; "something"
>   meaning a stop instruction called by the callbacks.

dont understand


> 
> - Add utility functions to easily run the graph. The most useful IMHO:
>   run until a frame comes out from any sink (we get the frame and
>   something useful identifying the sink) or one is needed from a source
>   (we get something identifying the source) or EOF.

if the API itself is simpler with utility functions then this sounds
like a good idea

thx

[...]
Nicolas George Dec. 21, 2016, 9:27 a.m.
Le decadi 30 frimaire, an CCXXV, Michael Niedermayer a écrit :
> it shouldnt really be complex, not in concept, maybe in (efficient)
> implementation

I think it is.

> For example as a concept one could imagine each filter runs as its
> own thread and waits on its inputs availability and output space.
> if it gets input and has output space its woken up and works and
> produces output and then wakes its surroundings up.
> no difference between a linear chain or a complex graph here

Sorry, but it does not work, at least not just like that:

If you make the pipes between filters asynchronous and unlimited, then
you could have movie from a fast codec flooding its output while overlay
is waiting on something slower on its other input. OOM.

If you make the pipes synchronous or limited, then you have problems
when there are cycles in the undirected graph, i.e. if there are
split+merge situations (a situation supported from the beginning and
present in many of the examples): merge starves on one input while split
is blocked trying to feed its other input. Deadlock.

Maybe it could be made to work with some kind of "elastic" pipes: the
fuller they are, the lower the priority. But my point is already proven:
it is complicated.

Note that what I ended up doing is based on exactly that. But it does
not only track where frames are possible but also where they are needed.

> iam not sure its usfull but
> Another view from a different direction would be to see the filter
> graph as a network of pipes with flowing water and interconnected
> pumps. This too is a simple concept and intuition would suggest that
> this would not easily end up with water being stuck nor too much
> accumulating. It does not 1:1 match real filters though which work
> on discrete frames, i wonder if this view is usefull
> It would allow awnsering global questions about a graph, like what
> inputs are useless when some output is welded shut or vice versa

Basically, you are suggesting to apply graph theory to the filter graph.
That would be very smart. Alas to do that, it is necessary to actually
know the graph. We do not: we do not know how inputs and outputs are
connected within filters. For example, the connection is very different
for overlay and concat, but from the outside they are indistinguishable.
And select is even worse, of course.

> i think the original lavfi design didnt really had any issue with graphs with
> multiple inputs or outputs. A user app could decide were to in and out
> put. but FFmpeg didnt support this at the time IIRC so the people working
> on the original lavfi had nothing to implement.
> the problems came when this support was added much later

Not only that: before I added it (i.e. not in the original design),
lavfi did not give the application enough information to decide what
input to feed. It is still missing in lithe fork's implementation.

> > - Add a callback AVFilter.activate() to replace filter_frame() on all
> >   inputs and request_frame() on all outputs. Most non-trivial filters
> >   are written that way in the first place.
> ok thats mostly cosmetic id say.

I think the difference between a mediocre API and a good one is partly
cosmetic.

But this change is not cosmetic at all. Right now, if you want to notify
a filter that EOF arrived on one input, you have to request a frame on
one of its output and hope that it will in turn cause a read on that
input. But you could end up pumping on another input instead.

> > - Change buffersink to implement that callback and peek directly in the
> >   FIFO.
> ok, "cosmetic"

Except for the EOF thingie, which is the biggest glitch at this time
AFAIK.

> > - Rewrite framesync (the utility system for filters with several video
> >   inputs that need synchroized frames) to implement activate and use the
> >   FIFO directly.
> cosmetic :)

Ditto.

> > - Allow to give buffersrc a timestamp on EOF, make sure the timestamp is
> >   forwarded by most filters and allow to retrieve it from buffersink.
> > 
> >   (If somebody has a suggestion of a good data structure for that...)

Actually, the question in parentheses was about the priority queue, they
got separated when I reorganized my paragraphs. Sorry.

> AVFrame.duration
> This possibly is a big subject for discussion on its own, but maybe
> not i dont know.

Indeed, there are pros and cons. I found that an actual timestamp was
slightly better, mainly because we often do not know the duration
immediately.

> > - Allow to set a callback on buffersinks and buffersrcs to be notified
> >   when a frame arrives or is needed. It is much more convenient than
> >   walking the buffers to check one by one.
> agree that walking is bad.
> cannot ATM argue on what is better as i dont feel that i have a clear
> enough view of this and the surroundings.

You could have said it: cosmetic :)

> > - Allow to merge several filter graphs into one. This may be useful when
> >   applications have several graphs to run simultaneously, so that they
> >   do not need to decide which one to activate. Another option would be
> >   to have the functions in the next step work on possibly several
> >   graphs.
> This may be orthogonal but i think a filter graph should be a filter
> (maybe not at struct level litterally but it should be possible to have a
>  AVFilter that is backed by a arbitrary user specified filter graph)

That would be nice, but I was referring to joining independent graphs,
just to spare the application the task of scheduling between the graphs
themselves.

> > - Add a function to run the graph until "something" happens; "something"
> >   meaning a stop instruction called by the callbacks.
> dont understand

Just the obvious use: feed input, "run" the graph, reap outputs, repeat.
What we have now, but not bound to the "oldest sink" thing I introduced
long ago (that made sense with the recursive design).

And with threading, we want the graph to continue running while we reap
the outputs.

Regards,
Michael Niedermayer Dec. 21, 2016, 1:39 p.m.
On Wed, Dec 21, 2016 at 10:27:13AM +0100, Nicolas George wrote:
> Le decadi 30 frimaire, an CCXXV, Michael Niedermayer a écrit :
[...]
> 
> > iam not sure its usfull but
> > Another view from a different direction would be to see the filter
> > graph as a network of pipes with flowing water and interconnected
> > pumps. This too is a simple concept and intuition would suggest that
> > this would not easily end up with water being stuck nor too much
> > accumulating. It does not 1:1 match real filters though which work
> > on discrete frames, i wonder if this view is usefull
> > It would allow awnsering global questions about a graph, like what
> > inputs are useless when some output is welded shut or vice versa
> 
> Basically, you are suggesting to apply graph theory to the filter graph.
> That would be very smart. Alas to do that, it is necessary to actually
> know the graph. We do not: we do not know how inputs and outputs are
> connected within filters. For example, the connection is very different
> for overlay and concat, but from the outside they are indistinguishable.
> And select is even worse, of course.

The framework could monitor filters to determine their apparent
behavior. This would of course not give exact prediction of future
behavior.
I cant say how useful it would be to use this of course ...


> 
> > i think the original lavfi design didnt really had any issue with graphs with
> > multiple inputs or outputs. A user app could decide were to in and out
> > put. but FFmpeg didnt support this at the time IIRC so the people working
> > on the original lavfi had nothing to implement.
> > the problems came when this support was added much later
> 
> Not only that: before I added it (i.e. not in the original design),
> lavfi did not give the application enough information to decide what
> input to feed. It is still missing in lithe fork's implementation.

well, in the original design a filter graph can either be used in a
pull based application in which primarly data is requested from its
outputs and the requests recursivly move to its inputs triggering
data read through callbacks from some source filter. [applications
could implement their own source filter as there was a public API]

Or in a push based application each source would have a fifo,
if its empty the application needs to push data into the fifo, data
again is returned by requesting from the sink(s).

Which sink to pull data from could be determied by first pulling
from ones that had data when polled and then it would be up to the
application to decide, your lowest timestamp choice would have been
a possibility, keeping track of apparent in-out relations would
be another. (this was either way application side and not lavfis
choice)

So i stand by my oppinion that the original lavfi design didnt really
had an issue with graphs with multiple inputs or outputs.

No question it wasnt perfect and considering it wasnt used at the time
at all that shouldnt be surprising

but it really doesnt matter now, we moved forward from there and need
to move more forward


> 
> > > - Add a callback AVFilter.activate() to replace filter_frame() on all
> > >   inputs and request_frame() on all outputs. Most non-trivial filters
> > >   are written that way in the first place.
> > ok thats mostly cosmetic id say.
> 
> I think the difference between a mediocre API and a good one is partly
> cosmetic.
> 
> But this change is not cosmetic at all. Right now, if you want to notify
> a filter that EOF arrived on one input, you have to request a frame on
> one of its output and hope that it will in turn cause a read on that
> input. But you could end up pumping on another input instead.
> 
> > > - Change buffersink to implement that callback and peek directly in the
> > >   FIFO.
> > ok, "cosmetic"
> 
> Except for the EOF thingie, which is the biggest glitch at this time
> AFAIK.
> 
> > > - Rewrite framesync (the utility system for filters with several video
> > >   inputs that need synchroized frames) to implement activate and use the
> > >   FIFO directly.
> > cosmetic :)
> 
> Ditto.

differences in corner cases yes, i didnt mean to imply that its
purely and 100% cosmetic. More that its basically a cosmetic change
replacing how the more or less same code is triggered and that maybe
some of this could be done by some gsoc student or other volunteer.
Aka at least part of this seems time consuming but not highly complex
work.


> 
> > > - Allow to give buffersrc a timestamp on EOF, make sure the timestamp is
> > >   forwarded by most filters and allow to retrieve it from buffersink.
> > > 
> > >   (If somebody has a suggestion of a good data structure for that...)
> 
> Actually, the question in parentheses was about the priority queue, they
> got separated when I reorganized my paragraphs. Sorry.
> 
> > AVFrame.duration
> > This possibly is a big subject for discussion on its own, but maybe
> > not i dont know.
> 
> Indeed, there are pros and cons. I found that an actual timestamp was
> slightly better, mainly because we often do not know the duration
> immediately.

i dont think not knowing the duration is a problem.
you need replicated frames possibly elsewere already. Like for
subtitles, its not much different to duplicating the last frame with
the remainining to EOF duration to be added to the last 1 or 0 duration
but i didnt think deeply about this now so i might miss details
the issue is also not specific to subtitles, audio tracks with "holes"
in them exist too so do video slidshows. At least in some usecases
limiting the distance between frames is needed. (for example to
ensure random access as in keyframes. The issue can to some extend
be pushed into the container format i guess but for truely streamed
formats if you dont repeat your video frame and subtitles which
are currently disaplyed it just wont get displayed if you start viewing
around that point)
so to me it seems there are a lot of issues that all can be dealt with
by some support to replicate frames in long stretches of no frames
and later drop them if they arent needed, the last EOF duration
containing frame could then be just another such case
but again i didnt think deeply about this


> 
> > > - Allow to set a callback on buffersinks and buffersrcs to be notified
> > >   when a frame arrives or is needed. It is much more convenient than
> > >   walking the buffers to check one by one.
> > agree that walking is bad.
> > cannot ATM argue on what is better as i dont feel that i have a clear
> > enough view of this and the surroundings.
> 
> You could have said it: cosmetic :)

i should be more verbose, i said cosmetic but i meant more than just
what cosmetic means litteraly :)


[...]
Nicolas George Dec. 21, 2016, 4:40 p.m.
Le primidi 1er nivôse, an CCXXV, Michael Niedermayer a écrit :
> The framework could monitor filters to determine their apparent
> behavior. This would of course not give exact prediction of future
> behavior.
> I cant say how useful it would be to use this of course ...

I do not see how to integrate that in something reliable, but somebody
may.

> well, in the original design a filter graph can either be used in a
> pull based application in which primarly data is requested from its
> outputs and the requests recursivly move to its inputs triggering
> data read through callbacks from some source filter. [applications
> could implement their own source filter as there was a public API]
> 
> Or in a push based application each source would have a fifo,
> if its empty the application needs to push data into the fifo, data
> again is returned by requesting from the sink(s).
> 
> Which sink to pull data from could be determied by first pulling
> from ones that had data when polled and then it would be up to the
> application to decide, your lowest timestamp choice would have been
> a possibility, keeping track of apparent in-out relations would
> be another. (this was either way application side and not lavfis
> choice)

I am not sure I can easily keep up the discussion: we are going back to
the basics of the scheduling, I worked on it in spring-summer 2012.
Since then, I remember a lot of "that does not work" but not all the
"because" that come after. I can give a few examples.

I think pull-only mode may work if the application is aware of all the
sinks and makes assumptions about their synchronization (i.e.
split -> normal speed + slow motion would not work), which seems
reasonable. Unfortunately, pull-only is the least useful way of using
that kind of library.

Push-only mode does not work with several inputs (or sources), because
you can not know which one needs a frame (the actual need may be far
downstream), and assumptions about synchronization are really not
acceptable in input.

Local-mixed pull-push (i.e. filters can issue a pull in reaction to a
push and reciprocally) solves these issues, but can result in infinite
loops: split pushes, first on out0 connected to overlay in0, overlays
pulls on in1 connected to split out1, split pushes, Redo From Start,
stack overflow. That was what we had before 2012.

Global-mixed pull-push (i.e. the application pushes on inputs and drives
by pulling on outputs, but filters are not allowed to make U-turns)
works, with the same caveats as pull-only. That is what we have since
2012.

And on top of that, you have to consider the case of inputs versus
sources that are always ready like testsrc. They bring their own
complications.

Of course, any of these issues can be solved using flags or something,
but the hard part is to solve all of them at the same time. We no longer
are at the simple original design, but a complicated set of workarounds
on top of the original design.

And the new non-recursive design is not more complex than the recursive
one, the one that works. It is the same, plus the code for the FIFO. If
it was done like that in the first place, it would have worked fine. The
complex part in it is the compatibility layer: use filters designed for
the recursive version unchanged in the non-recursive version. Hopefully,
some of that complexity will go away as difficult filters are adapted.

Plus, unlike the recursive design that mandates a depth-first order of
processing, the non-recursive design can work in any order. I suspect it
will give us the local-mixed pull-push mode for almost free. But I have
yet to test.

> differences in corner cases yes, i didnt mean to imply that its
> purely and 100% cosmetic. More that its basically a cosmetic change
> replacing how the more or less same code is triggered and that maybe
> some of this could be done by some gsoc student or other volunteer.
> Aka at least part of this seems time consuming but not highly complex
> work.

Indeed. And I must say it makes a nice change after a year spent
understanding why corner cases in the non-recursive design did not work.

> i dont think not knowing the duration is a problem.
> you need replicated frames possibly elsewere already. Like for
> subtitles, its not much different to duplicating the last frame with
> the remainining to EOF duration to be added to the last 1 or 0 duration
> but i didnt think deeply about this now so i might miss details
> the issue is also not specific to subtitles, audio tracks with "holes"
> in them exist too so do video slidshows. At least in some usecases
> limiting the distance between frames is needed. (for example to
> ensure random access as in keyframes. The issue can to some extend
> be pushed into the container format i guess but for truely streamed
> formats if you dont repeat your video frame and subtitles which
> are currently disaplyed it just wont get displayed if you start viewing
> around that point)
> so to me it seems there are a lot of issues that all can be dealt with
> by some support to replicate frames in long stretches of no frames
> and later drop them if they arent needed, the last EOF duration
> containing frame could then be just another such case
> but again i didnt think deeply about this

That may be true, but I really think that relying only on timestamps
instead of mixing duration (optional) and timestamps (almost mandatory)
will be simpler.

> > > > - Allow to set a callback on buffersinks and buffersrcs to be notified
> > > >   when a frame arrives or is needed. It is much more convenient than
> > > >   walking the buffers to check one by one.
> > You could have said it: cosmetic :)
> i should be more verbose, i said cosmetic but i meant more than just
> what cosmetic means litteraly :)

No, no, I meant: you really could have said it, this change is really
just syntactic sugar.

Regards,

Patch hide | download patch | download mbox

diff --git a/libavfilter/avfilter.h b/libavfilter/avfilter.h
index 828b270..6109e58 100644
--- a/libavfilter/avfilter.h
+++ b/libavfilter/avfilter.h
@@ -377,6 +377,7 @@  struct AVFilterContext {
     unsigned ready;
 };
 
+#if FF_API_AVFILTERLINK_PUBLIC
 /**
  * A link between two filters. This contains pointers to the source and
  * destination filters between which this link exists, and the indexes of
@@ -593,6 +594,7 @@  struct AVFilterLink {
 #endif /* FF_INTERNAL_FIELDS */
 
 };
+#endif /* FF_API_AVFILTERLINK_PUBLIC */
 
 /**
  * Link two filters together.
diff --git a/libavfilter/internal.h b/libavfilter/internal.h
index a8b69fd..599be24 100644
--- a/libavfilter/internal.h
+++ b/libavfilter/internal.h
@@ -145,6 +145,205 @@  struct AVFilterPad {
     int needs_writable;
 };
 
+#if !FF_API_AVFILTERLINK_PUBLIC
+/**
+ * A link between two filters. This contains pointers to the source and
+ * destination filters between which this link exists, and the indexes of
+ * the pads involved. In addition, this link also contains the parameters
+ * which have been negotiated and agreed upon between the filter, such as
+ * image dimensions, format, etc.
+ */
+struct AVFilterLink {
+    AVFilterContext *src;       ///< source filter
+    AVFilterPad *srcpad;        ///< output pad on the source filter
+
+    AVFilterContext *dst;       ///< dest filter
+    AVFilterPad *dstpad;        ///< input pad on the dest filter
+
+    enum AVMediaType type;      ///< filter media type
+
+    /* These parameters apply only to video */
+    int w;                      ///< agreed upon image width
+    int h;                      ///< agreed upon image height
+    AVRational sample_aspect_ratio; ///< agreed upon sample aspect ratio
+    /* These parameters apply only to audio */
+    uint64_t channel_layout;    ///< channel layout of current buffer (see libavutil/channel_layout.h)
+    int sample_rate;            ///< samples per second
+
+    int format;                 ///< agreed upon media format
+
+    /**
+     * Define the time base used by the PTS of the frames/samples
+     * which will pass through this link.
+     * During the configuration stage, each filter is supposed to
+     * change only the output timebase, while the timebase of the
+     * input link is assumed to be an unchangeable property.
+     */
+    AVRational time_base;
+
+    /**
+     * Lists of formats and channel layouts supported by the input and output
+     * filters respectively. These lists are used for negotiating the format
+     * to actually be used, which will be loaded into the format and
+     * channel_layout members, above, when chosen.
+     *
+     */
+    AVFilterFormats *in_formats;
+    AVFilterFormats *out_formats;
+
+    /**
+     * Lists of channel layouts and sample rates used for automatic
+     * negotiation.
+     */
+    AVFilterFormats  *in_samplerates;
+    AVFilterFormats *out_samplerates;
+    struct AVFilterChannelLayouts  *in_channel_layouts;
+    struct AVFilterChannelLayouts *out_channel_layouts;
+
+    /**
+     * Audio only, the destination filter sets this to a non-zero value to
+     * request that buffers with the given number of samples should be sent to
+     * it. AVFilterPad.needs_fifo must also be set on the corresponding input
+     * pad.
+     * Last buffer before EOF will be padded with silence.
+     */
+    int request_samples;
+
+    /** stage of the initialization of the link properties (dimensions, etc) */
+    enum {
+        AVLINK_UNINIT = 0,      ///< not started
+        AVLINK_STARTINIT,       ///< started, but incomplete
+        AVLINK_INIT             ///< complete
+    } init_state;
+
+    /**
+     * Graph the filter belongs to.
+     */
+    struct AVFilterGraph *graph;
+
+    /**
+     * Current timestamp of the link, as defined by the most recent
+     * frame(s), in link time_base units.
+     */
+    int64_t current_pts;
+
+    /**
+     * Current timestamp of the link, as defined by the most recent
+     * frame(s), in AV_TIME_BASE units.
+     */
+    int64_t current_pts_us;
+
+    /**
+     * Index in the age array.
+     */
+    int age_index;
+
+    /**
+     * Frame rate of the stream on the link, or 1/0 if unknown or variable;
+     * if left to 0/0, will be automatically copied from the first input
+     * of the source filter if it exists.
+     *
+     * Sources should set it to the best estimation of the real frame rate.
+     * If the source frame rate is unknown or variable, set this to 1/0.
+     * Filters should update it if necessary depending on their function.
+     * Sinks can use it to set a default output frame rate.
+     * It is similar to the r_frame_rate field in AVStream.
+     */
+    AVRational frame_rate;
+
+    /**
+     * Buffer partially filled with samples to achieve a fixed/minimum size.
+     */
+    AVFrame *partial_buf;
+
+    /**
+     * Size of the partial buffer to allocate.
+     * Must be between min_samples and max_samples.
+     */
+    int partial_buf_size;
+
+    /**
+     * Minimum number of samples to filter at once. If filter_frame() is
+     * called with fewer samples, it will accumulate them in partial_buf.
+     * This field and the related ones must not be changed after filtering
+     * has started.
+     * If 0, all related fields are ignored.
+     */
+    int min_samples;
+
+    /**
+     * Maximum number of samples to filter at once. If filter_frame() is
+     * called with more samples, it will split them.
+     */
+    int max_samples;
+
+    /**
+     * Number of channels.
+     */
+    int channels;
+
+    /**
+     * Link processing flags.
+     */
+    unsigned flags;
+
+    /**
+     * Number of past frames sent through the link.
+     */
+    int64_t frame_count_in, frame_count_out;
+
+    /**
+     * A pointer to a FFVideoFramePool struct.
+     */
+    void *video_frame_pool;
+
+    /**
+     * True if a frame is currently wanted on the output of this filter.
+     * Set when ff_request_frame() is called by the output,
+     * cleared when a frame is filtered.
+     */
+    int frame_wanted_out;
+
+    /**
+     * For hwaccel pixel formats, this should be a reference to the
+     * AVHWFramesContext describing the frames.
+     */
+    AVBufferRef *hw_frames_ctx;
+
+    /**
+     * Queue of frames waiting to be filtered.
+     */
+    FFFrameQueue fifo;
+
+    /**
+     * If set, the source filter can not generate a frame as is.
+     * The goal is to avoid repeatedly calling the request_frame() method on
+     * the same link.
+     */
+    int frame_blocked_in;
+
+    /**
+     * Link input status.
+     * If not zero, all attempts of filter_frame will fail with the
+     * corresponding code.
+     */
+    int status_in;
+
+    /**
+     * Timestamp of the input status change.
+     */
+    int64_t status_in_pts;
+
+    /**
+     * Link output status.
+     * If not zero, all attempts of request_frame will fail with the
+     * corresponding code.
+     */
+    int status_out;
+
+};
+#endif /* !FF_API_AVFILTERLINK_PUBLIC */
+
 struct AVFilterGraphInternal {
     void *thread;
     avfilter_execute_func *thread_execute;
diff --git a/libavfilter/version.h b/libavfilter/version.h
index e3bd8d0..8256781 100644
--- a/libavfilter/version.h
+++ b/libavfilter/version.h
@@ -67,5 +67,8 @@ 
 #ifndef FF_API_NOCONST_GET_NAME
 #define FF_API_NOCONST_GET_NAME             (LIBAVFILTER_VERSION_MAJOR < 7)
 #endif
+#ifndef FF_API_AVFILTERLINK_PUBLIC
+#define FF_API_AVFILTERLINK_PUBLIC          (LIBAVFILTER_VERSION_MAJOR < 8)
+#endif
 
 #endif /* AVFILTER_VERSION_H */