diff mbox series

[FFmpeg-devel,4/4] avfilter/vf_v360: refactor (i)flat_range for fisheye

Message ID 20210321124414.1223626-4-daniel.playfair.cal@gmail.com
State New
Headers show
Series [FFmpeg-devel,1/4] avfilter/vf_v360: fix fov_from_hfov for fisheye | expand

Checks

Context Check Description
andriy/x86_make success Make finished
andriy/x86_make_fate success Make fate finished
andriy/PPC64_make success Make finished
andriy/PPC64_make_fate success Make fate finished

Commit Message

Daniel Playfair Cal March 21, 2021, 12:44 p.m. UTC
This changes the iflat_range and flat_range values for the fisheye
projection to match their meaning for the flat/rectilinear projection.
That is, the range is between the two x or two y coordinates of the
outermost points above/below or left/right of the center, in the
flat/rectilinear projection.

Signed-off-by: Daniel Playfair Cal <daniel.playfair.cal@gmail.com>
---
 libavfilter/vf_v360.c | 19 +++++++++----------
 1 file changed, 9 insertions(+), 10 deletions(-)

Comments

Paul B Mahol March 21, 2021, 4:30 p.m. UTC | #1
Sorry, but I cannot apply this set as is, It makes at least one serious
regression.

For example try this filtergraph:

v360=input=e:output=fisheye:h_fov=180:v_fov=180,v360=input=fisheye:output=e:ih_fov=180:iv_fov=180

On Sun, Mar 21, 2021 at 1:45 PM Daniel Playfair Cal <
daniel.playfair.cal@gmail.com> wrote:

> This changes the iflat_range and flat_range values for the fisheye
> projection to match their meaning for the flat/rectilinear projection.
> That is, the range is between the two x or two y coordinates of the
> outermost points above/below or left/right of the center, in the
> flat/rectilinear projection.
>
> Signed-off-by: Daniel Playfair Cal <daniel.playfair.cal@gmail.com>
> ---
>  libavfilter/vf_v360.c | 19 +++++++++----------
>  1 file changed, 9 insertions(+), 10 deletions(-)
>
> diff --git a/libavfilter/vf_v360.c b/libavfilter/vf_v360.c
> index 68bb2f7b0f..3158451963 100644
> --- a/libavfilter/vf_v360.c
> +++ b/libavfilter/vf_v360.c
> @@ -2807,9 +2807,8 @@ static int prepare_fisheye_out(AVFilterContext *ctx)
>  {
>      V360Context *s = ctx->priv;
>
> -    s->flat_range[0] = s->h_fov / 180.f;
> -    s->flat_range[1] = s->v_fov / 180.f;
> -
> +    s->flat_range[0] = 0.5f * s->h_fov * M_PI / 180.f;
> +    s->flat_range[1] = 0.5f * s->v_fov * M_PI / 180.f;
>      return 0;
>  }
>
> @@ -2827,8 +2826,8 @@ static int fisheye_to_xyz(const V360Context *s,
>                            int i, int j, int width, int height,
>                            float *vec)
>  {
> -    const float uf = s->flat_range[0] * ((2.f * i) / width  - 1.f);
> -    const float vf = s->flat_range[1] * ((2.f * j + 1.f) / height - 1.f);
> +    const float uf = 2.f * s->flat_range[0] / M_PI * ((2.f * i) / width
> - 1.f);
> +    const float vf = 2.f * s->flat_range[1] / M_PI * ((2.f * j + 1.f) /
> height - 1.f);
>
>      const float phi   = atan2f(vf, uf);
>      const float theta = M_PI_2 * (1.f - hypotf(uf, vf));
> @@ -2858,8 +2857,8 @@ static int prepare_fisheye_in(AVFilterContext *ctx)
>  {
>      V360Context *s = ctx->priv;
>
> -    s->iflat_range[0] = s->ih_fov / 180.f;
> -    s->iflat_range[1] = s->iv_fov / 180.f;
> +    s->iflat_range[0] = 0.5f * s->ih_fov * M_PI / 180.f;
> +    s->iflat_range[1] = 0.5f * s->iv_fov * M_PI / 180.f;
>
>      return 0;
>  }
> @@ -2882,10 +2881,10 @@ static int xyz_to_fisheye(const V360Context *s,
>  {
>      const float h   = hypotf(vec[0], vec[1]);
>      const float lh  = h > 0.f ? h : 1.f;
> -    const float phi = atan2f(h, vec[2]) / M_PI;
> +    const float phi = atan2f(h, vec[2]);
>
> -    float uf = vec[0] / lh * phi / s->iflat_range[0];
> -    float vf = vec[1] / lh * phi / s->iflat_range[1];
> +    float uf = 0.5f * vec[0] / lh * phi / s->iflat_range[0];
> +    float vf = 0.5f * vec[1] / lh * phi / s->iflat_range[1];
>
>      const int visible = -0.5f < uf && uf < 0.5f && -0.5f < vf && vf <
> 0.5f;
>      int ui, vi;
> --
> 2.31.0
>
> _______________________________________________
> ffmpeg-devel mailing list
> ffmpeg-devel@ffmpeg.org
> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>
> To unsubscribe, visit link above, or email
> ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
Daniel Playfair Cal March 22, 2021, 4:09 a.m. UTC | #2
I've tried that filtergraph and a few other similar ones and I'm not sure
what you mean - what exactly is the regression?

I tried it on this image with an equirectangular projection:
https://wiki.panotools.org/images/0/01/Big_ben_equirectangular.jpg

The only difference I can see is that there are less unmapped areas in the
output with the patches, because the final mapping from the output
equirectangular image to the intermediate fisheye image no longer fails to
map some areas which are present in the fisheye image. I would describe
this as an improvement?

On Mon, Mar 22, 2021 at 3:30 AM Paul B Mahol <onemda@gmail.com> wrote:

> Sorry, but I cannot apply this set as is, It makes at least one serious
> regression.
>
> For example try this filtergraph:
>
>
> v360=input=e:output=fisheye:h_fov=180:v_fov=180,v360=input=fisheye:output=e:ih_fov=180:iv_fov=180
>
> On Sun, Mar 21, 2021 at 1:45 PM Daniel Playfair Cal <
> daniel.playfair.cal@gmail.com> wrote:
>
>> This changes the iflat_range and flat_range values for the fisheye
>> projection to match their meaning for the flat/rectilinear projection.
>> That is, the range is between the two x or two y coordinates of the
>> outermost points above/below or left/right of the center, in the
>> flat/rectilinear projection.
>>
>> Signed-off-by: Daniel Playfair Cal <daniel.playfair.cal@gmail.com>
>> ---
>>  libavfilter/vf_v360.c | 19 +++++++++----------
>>  1 file changed, 9 insertions(+), 10 deletions(-)
>>
>> diff --git a/libavfilter/vf_v360.c b/libavfilter/vf_v360.c
>> index 68bb2f7b0f..3158451963 100644
>> --- a/libavfilter/vf_v360.c
>> +++ b/libavfilter/vf_v360.c
>> @@ -2807,9 +2807,8 @@ static int prepare_fisheye_out(AVFilterContext *ctx)
>>  {
>>      V360Context *s = ctx->priv;
>>
>> -    s->flat_range[0] = s->h_fov / 180.f;
>> -    s->flat_range[1] = s->v_fov / 180.f;
>> -
>> +    s->flat_range[0] = 0.5f * s->h_fov * M_PI / 180.f;
>> +    s->flat_range[1] = 0.5f * s->v_fov * M_PI / 180.f;
>>      return 0;
>>  }
>>
>> @@ -2827,8 +2826,8 @@ static int fisheye_to_xyz(const V360Context *s,
>>                            int i, int j, int width, int height,
>>                            float *vec)
>>  {
>> -    const float uf = s->flat_range[0] * ((2.f * i) / width  - 1.f);
>> -    const float vf = s->flat_range[1] * ((2.f * j + 1.f) / height - 1.f);
>> +    const float uf = 2.f * s->flat_range[0] / M_PI * ((2.f * i) / width
>> - 1.f);
>> +    const float vf = 2.f * s->flat_range[1] / M_PI * ((2.f * j + 1.f) /
>> height - 1.f);
>>
>>      const float phi   = atan2f(vf, uf);
>>      const float theta = M_PI_2 * (1.f - hypotf(uf, vf));
>> @@ -2858,8 +2857,8 @@ static int prepare_fisheye_in(AVFilterContext *ctx)
>>  {
>>      V360Context *s = ctx->priv;
>>
>> -    s->iflat_range[0] = s->ih_fov / 180.f;
>> -    s->iflat_range[1] = s->iv_fov / 180.f;
>> +    s->iflat_range[0] = 0.5f * s->ih_fov * M_PI / 180.f;
>> +    s->iflat_range[1] = 0.5f * s->iv_fov * M_PI / 180.f;
>>
>>      return 0;
>>  }
>> @@ -2882,10 +2881,10 @@ static int xyz_to_fisheye(const V360Context *s,
>>  {
>>      const float h   = hypotf(vec[0], vec[1]);
>>      const float lh  = h > 0.f ? h : 1.f;
>> -    const float phi = atan2f(h, vec[2]) / M_PI;
>> +    const float phi = atan2f(h, vec[2]);
>>
>> -    float uf = vec[0] / lh * phi / s->iflat_range[0];
>> -    float vf = vec[1] / lh * phi / s->iflat_range[1];
>> +    float uf = 0.5f * vec[0] / lh * phi / s->iflat_range[0];
>> +    float vf = 0.5f * vec[1] / lh * phi / s->iflat_range[1];
>>
>>      const int visible = -0.5f < uf && uf < 0.5f && -0.5f < vf && vf <
>> 0.5f;
>>      int ui, vi;
>> --
>> 2.31.0
>>
>> _______________________________________________
>> ffmpeg-devel mailing list
>> ffmpeg-devel@ffmpeg.org
>> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>>
>> To unsubscribe, visit link above, or email
>> ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
>
>
Paul B Mahol March 22, 2021, 6:58 a.m. UTC | #3
On Mon, Mar 22, 2021 at 5:09 AM Daniel Playfair Cal <
daniel.playfair.cal@gmail.com> wrote:

> I've tried that filtergraph and a few other similar ones and I'm not sure
> what you mean - what exactly is the regression?
>
> I tried it on this image with an equirectangular projection:
> https://wiki.panotools.org/images/0/01/Big_ben_equirectangular.jpg
>
> The only difference I can see is that there are less unmapped areas in the
> output with the patches, because the final mapping from the output
> equirectangular image to the intermediate fisheye image no longer fails to
> map some areas which are present in the fisheye image. I would describe
> this as an improvement?
>

I disagree, if I use 180 hfov and 180 vfov it should not have extra areas
but only half of previous input.


>
> On Mon, Mar 22, 2021 at 3:30 AM Paul B Mahol <onemda@gmail.com> wrote:
>
>> Sorry, but I cannot apply this set as is, It makes at least one serious
>> regression.
>>
>> For example try this filtergraph:
>>
>>
>> v360=input=e:output=fisheye:h_fov=180:v_fov=180,v360=input=fisheye:output=e:ih_fov=180:iv_fov=180
>>
>> On Sun, Mar 21, 2021 at 1:45 PM Daniel Playfair Cal <
>> daniel.playfair.cal@gmail.com> wrote:
>>
>>> This changes the iflat_range and flat_range values for the fisheye
>>> projection to match their meaning for the flat/rectilinear projection.
>>> That is, the range is between the two x or two y coordinates of the
>>> outermost points above/below or left/right of the center, in the
>>> flat/rectilinear projection.
>>>
>>> Signed-off-by: Daniel Playfair Cal <daniel.playfair.cal@gmail.com>
>>> ---
>>>  libavfilter/vf_v360.c | 19 +++++++++----------
>>>  1 file changed, 9 insertions(+), 10 deletions(-)
>>>
>>> diff --git a/libavfilter/vf_v360.c b/libavfilter/vf_v360.c
>>> index 68bb2f7b0f..3158451963 100644
>>> --- a/libavfilter/vf_v360.c
>>> +++ b/libavfilter/vf_v360.c
>>> @@ -2807,9 +2807,8 @@ static int prepare_fisheye_out(AVFilterContext
>>> *ctx)
>>>  {
>>>      V360Context *s = ctx->priv;
>>>
>>> -    s->flat_range[0] = s->h_fov / 180.f;
>>> -    s->flat_range[1] = s->v_fov / 180.f;
>>> -
>>> +    s->flat_range[0] = 0.5f * s->h_fov * M_PI / 180.f;
>>> +    s->flat_range[1] = 0.5f * s->v_fov * M_PI / 180.f;
>>>      return 0;
>>>  }
>>>
>>> @@ -2827,8 +2826,8 @@ static int fisheye_to_xyz(const V360Context *s,
>>>                            int i, int j, int width, int height,
>>>                            float *vec)
>>>  {
>>> -    const float uf = s->flat_range[0] * ((2.f * i) / width  - 1.f);
>>> -    const float vf = s->flat_range[1] * ((2.f * j + 1.f) / height -
>>> 1.f);
>>> +    const float uf = 2.f * s->flat_range[0] / M_PI * ((2.f * i) /
>>> width  - 1.f);
>>> +    const float vf = 2.f * s->flat_range[1] / M_PI * ((2.f * j + 1.f) /
>>> height - 1.f);
>>>
>>>      const float phi   = atan2f(vf, uf);
>>>      const float theta = M_PI_2 * (1.f - hypotf(uf, vf));
>>> @@ -2858,8 +2857,8 @@ static int prepare_fisheye_in(AVFilterContext *ctx)
>>>  {
>>>      V360Context *s = ctx->priv;
>>>
>>> -    s->iflat_range[0] = s->ih_fov / 180.f;
>>> -    s->iflat_range[1] = s->iv_fov / 180.f;
>>> +    s->iflat_range[0] = 0.5f * s->ih_fov * M_PI / 180.f;
>>> +    s->iflat_range[1] = 0.5f * s->iv_fov * M_PI / 180.f;
>>>
>>>      return 0;
>>>  }
>>> @@ -2882,10 +2881,10 @@ static int xyz_to_fisheye(const V360Context *s,
>>>  {
>>>      const float h   = hypotf(vec[0], vec[1]);
>>>      const float lh  = h > 0.f ? h : 1.f;
>>> -    const float phi = atan2f(h, vec[2]) / M_PI;
>>> +    const float phi = atan2f(h, vec[2]);
>>>
>>> -    float uf = vec[0] / lh * phi / s->iflat_range[0];
>>> -    float vf = vec[1] / lh * phi / s->iflat_range[1];
>>> +    float uf = 0.5f * vec[0] / lh * phi / s->iflat_range[0];
>>> +    float vf = 0.5f * vec[1] / lh * phi / s->iflat_range[1];
>>>
>>>      const int visible = -0.5f < uf && uf < 0.5f && -0.5f < vf && vf <
>>> 0.5f;
>>>      int ui, vi;
>>> --
>>> 2.31.0
>>>
>>> _______________________________________________
>>> ffmpeg-devel mailing list
>>> ffmpeg-devel@ffmpeg.org
>>> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>>>
>>> To unsubscribe, visit link above, or email
>>> ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
>>
>>
Daniel Playfair Cal March 22, 2021, 12:34 p.m. UTC | #4
> I disagree, if I use 180 hfov and 180 vfov it should not have extra areas
but only half of previous input.

Not sure I follow - the ih_fov and vh_fov refer to the input (i.e. the
fisheye image). If you wanted to restrict the FoV of the output, surely the
way to do that would be to implement and use the FoV settings for the
equirectangular projection?. It doesn't seem right that the code for the
input projection is responsible for deciding what appears in the output. My
understanding was that the FoV settings simply describe the focal length of
the input or output camera so that points in the images can me mapped
to/from 3d coordinates.

To give you an idea of what I am trying to fix, here is an example input:
https://photos.app.goo.gl/o51NfY6aqWn3unPG6
This is a 1920x1440 image taken on a GoPro Hero 5 black with the 4:3 Wide
FoV setting and stabilisation disabled.

The following filtergraph demonstrates the issues:
'v360=input=fisheye:ih_fov=116.66:iv_fov=87.50:output=flat:d_fov=145.8'
 1. the dfov_from_hfov issue is worked around by the use of ih_fov and
iv_fov instead of id_fov, although you can try with id_fov=145.8 to see
that problem too
 2. by default the output has double the aspect ratio of the input, even
though the fisheye -> rectilinear transformation doesn't change the aspect
ratio (assuming the entire input image is included as it is in this example)
 3. much of the input is not visible in the output even though there is a
mapping between the chosen projections (changed in the visibility test
patch)

3 in particular I don't think can be solved by changing the settings - the
input field of view needs to match the FoV of the input camera, otherwise
the mapping is wrong. But it seems there is no other way to include the
entire input from a fisheye image.

On Mon, Mar 22, 2021 at 5:59 PM Paul B Mahol <onemda@gmail.com> wrote:

>
>
> On Mon, Mar 22, 2021 at 5:09 AM Daniel Playfair Cal <
> daniel.playfair.cal@gmail.com> wrote:
>
>> I've tried that filtergraph and a few other similar ones and I'm not sure
>> what you mean - what exactly is the regression?
>>
>> I tried it on this image with an equirectangular projection:
>> https://wiki.panotools.org/images/0/01/Big_ben_equirectangular.jpg
>>
>> The only difference I can see is that there are less unmapped areas in
>> the output with the patches, because the final mapping from the output
>> equirectangular image to the intermediate fisheye image no longer fails to
>> map some areas which are present in the fisheye image. I would describe
>> this as an improvement?
>>
>
> I disagree, if I use 180 hfov and 180 vfov it should not have extra areas
> but only half of previous input.
>
>
>>
>> On Mon, Mar 22, 2021 at 3:30 AM Paul B Mahol <onemda@gmail.com> wrote:
>>
>>> Sorry, but I cannot apply this set as is, It makes at least one serious
>>> regression.
>>>
>>> For example try this filtergraph:
>>>
>>>
>>> v360=input=e:output=fisheye:h_fov=180:v_fov=180,v360=input=fisheye:output=e:ih_fov=180:iv_fov=180
>>>
>>> On Sun, Mar 21, 2021 at 1:45 PM Daniel Playfair Cal <
>>> daniel.playfair.cal@gmail.com> wrote:
>>>
>>>> This changes the iflat_range and flat_range values for the fisheye
>>>> projection to match their meaning for the flat/rectilinear projection.
>>>> That is, the range is between the two x or two y coordinates of the
>>>> outermost points above/below or left/right of the center, in the
>>>> flat/rectilinear projection.
>>>>
>>>> Signed-off-by: Daniel Playfair Cal <daniel.playfair.cal@gmail.com>
>>>> ---
>>>>  libavfilter/vf_v360.c | 19 +++++++++----------
>>>>  1 file changed, 9 insertions(+), 10 deletions(-)
>>>>
>>>> diff --git a/libavfilter/vf_v360.c b/libavfilter/vf_v360.c
>>>> index 68bb2f7b0f..3158451963 100644
>>>> --- a/libavfilter/vf_v360.c
>>>> +++ b/libavfilter/vf_v360.c
>>>> @@ -2807,9 +2807,8 @@ static int prepare_fisheye_out(AVFilterContext
>>>> *ctx)
>>>>  {
>>>>      V360Context *s = ctx->priv;
>>>>
>>>> -    s->flat_range[0] = s->h_fov / 180.f;
>>>> -    s->flat_range[1] = s->v_fov / 180.f;
>>>> -
>>>> +    s->flat_range[0] = 0.5f * s->h_fov * M_PI / 180.f;
>>>> +    s->flat_range[1] = 0.5f * s->v_fov * M_PI / 180.f;
>>>>      return 0;
>>>>  }
>>>>
>>>> @@ -2827,8 +2826,8 @@ static int fisheye_to_xyz(const V360Context *s,
>>>>                            int i, int j, int width, int height,
>>>>                            float *vec)
>>>>  {
>>>> -    const float uf = s->flat_range[0] * ((2.f * i) / width  - 1.f);
>>>> -    const float vf = s->flat_range[1] * ((2.f * j + 1.f) / height -
>>>> 1.f);
>>>> +    const float uf = 2.f * s->flat_range[0] / M_PI * ((2.f * i) /
>>>> width  - 1.f);
>>>> +    const float vf = 2.f * s->flat_range[1] / M_PI * ((2.f * j + 1.f)
>>>> / height - 1.f);
>>>>
>>>>      const float phi   = atan2f(vf, uf);
>>>>      const float theta = M_PI_2 * (1.f - hypotf(uf, vf));
>>>> @@ -2858,8 +2857,8 @@ static int prepare_fisheye_in(AVFilterContext
>>>> *ctx)
>>>>  {
>>>>      V360Context *s = ctx->priv;
>>>>
>>>> -    s->iflat_range[0] = s->ih_fov / 180.f;
>>>> -    s->iflat_range[1] = s->iv_fov / 180.f;
>>>> +    s->iflat_range[0] = 0.5f * s->ih_fov * M_PI / 180.f;
>>>> +    s->iflat_range[1] = 0.5f * s->iv_fov * M_PI / 180.f;
>>>>
>>>>      return 0;
>>>>  }
>>>> @@ -2882,10 +2881,10 @@ static int xyz_to_fisheye(const V360Context *s,
>>>>  {
>>>>      const float h   = hypotf(vec[0], vec[1]);
>>>>      const float lh  = h > 0.f ? h : 1.f;
>>>> -    const float phi = atan2f(h, vec[2]) / M_PI;
>>>> +    const float phi = atan2f(h, vec[2]);
>>>>
>>>> -    float uf = vec[0] / lh * phi / s->iflat_range[0];
>>>> -    float vf = vec[1] / lh * phi / s->iflat_range[1];
>>>> +    float uf = 0.5f * vec[0] / lh * phi / s->iflat_range[0];
>>>> +    float vf = 0.5f * vec[1] / lh * phi / s->iflat_range[1];
>>>>
>>>>      const int visible = -0.5f < uf && uf < 0.5f && -0.5f < vf && vf <
>>>> 0.5f;
>>>>      int ui, vi;
>>>> --
>>>> 2.31.0
>>>>
>>>> _______________________________________________
>>>> ffmpeg-devel mailing list
>>>> ffmpeg-devel@ffmpeg.org
>>>> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>>>>
>>>> To unsubscribe, visit link above, or email
>>>> ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
>>>
>>>
Paul B Mahol March 22, 2021, 4:46 p.m. UTC | #5
On Mon, Mar 22, 2021 at 1:35 PM Daniel Playfair Cal <
daniel.playfair.cal@gmail.com> wrote:

> > I disagree, if I use 180 hfov and 180 vfov it should not have extra
> areas but only half of previous input.
>
> Not sure I follow - the ih_fov and vh_fov refer to the input (i.e. the
> fisheye image). If you wanted to restrict the FoV of the output, surely the
> way to do that would be to implement and use the FoV settings for the
> equirectangular projection?. It doesn't seem right that the code for the
> input projection is responsible for deciding what appears in the output. My
> understanding was that the FoV settings simply describe the focal length of
> the input or output camera so that points in the images can me mapped
> to/from 3d coordinates.
>
>
Take any equirectangular input and convert it to fisheye and than back to
equirectangular.
Or just take pure fisheye input with 180 h & v fov and convert it to
equirectangular. There is plenty of such video content on esa website.

To give you an idea of what I am trying to fix, here is an example input:
> https://photos.app.goo.gl/o51NfY6aqWn3unPG6
> This is a 1920x1440 image taken on a GoPro Hero 5 black with the 4:3 Wide
> FoV setting and stabilisation disabled.
>
>
That is flat take of something else. Not real fisheye input.


> The following filtergraph demonstrates the issues:
> 'v360=input=fisheye:ih_fov=116.66:iv_fov=87.50:output=flat:d_fov=145.8'
>  1. the dfov_from_hfov issue is worked around by the use of ih_fov and
> iv_fov instead of id_fov, although you can try with id_fov=145.8 to see
> that problem too
>  2. by default the output has double the aspect ratio of the input, even
> though the fisheye -> rectilinear transformation doesn't change the aspect
> ratio (assuming the entire input image is included as it is in this example)
>  3. much of the input is not visible in the output even though there is a
> mapping between the chosen projections (changed in the visibility test
> patch)
>
> 3 in particular I don't think can be solved by changing the settings - the
> input field of view needs to match the FoV of the input camera, otherwise
> the mapping is wrong. But it seems there is no other way to include the
> entire input from a fisheye image.
>
> On Mon, Mar 22, 2021 at 5:59 PM Paul B Mahol <onemda@gmail.com> wrote:
>
>>
>>
>> On Mon, Mar 22, 2021 at 5:09 AM Daniel Playfair Cal <
>> daniel.playfair.cal@gmail.com> wrote:
>>
>>> I've tried that filtergraph and a few other similar ones and I'm not
>>> sure what you mean - what exactly is the regression?
>>>
>>> I tried it on this image with an equirectangular projection:
>>> https://wiki.panotools.org/images/0/01/Big_ben_equirectangular.jpg
>>>
>>> The only difference I can see is that there are less unmapped areas in
>>> the output with the patches, because the final mapping from the output
>>> equirectangular image to the intermediate fisheye image no longer fails to
>>> map some areas which are present in the fisheye image. I would describe
>>> this as an improvement?
>>>
>>
>> I disagree, if I use 180 hfov and 180 vfov it should not have extra areas
>> but only half of previous input.
>>
>>
>>>
>>> On Mon, Mar 22, 2021 at 3:30 AM Paul B Mahol <onemda@gmail.com> wrote:
>>>
>>>> Sorry, but I cannot apply this set as is, It makes at least one serious
>>>> regression.
>>>>
>>>> For example try this filtergraph:
>>>>
>>>>
>>>> v360=input=e:output=fisheye:h_fov=180:v_fov=180,v360=input=fisheye:output=e:ih_fov=180:iv_fov=180
>>>>
>>>> On Sun, Mar 21, 2021 at 1:45 PM Daniel Playfair Cal <
>>>> daniel.playfair.cal@gmail.com> wrote:
>>>>
>>>>> This changes the iflat_range and flat_range values for the fisheye
>>>>> projection to match their meaning for the flat/rectilinear projection.
>>>>> That is, the range is between the two x or two y coordinates of the
>>>>> outermost points above/below or left/right of the center, in the
>>>>> flat/rectilinear projection.
>>>>>
>>>>> Signed-off-by: Daniel Playfair Cal <daniel.playfair.cal@gmail.com>
>>>>> ---
>>>>>  libavfilter/vf_v360.c | 19 +++++++++----------
>>>>>  1 file changed, 9 insertions(+), 10 deletions(-)
>>>>>
>>>>> diff --git a/libavfilter/vf_v360.c b/libavfilter/vf_v360.c
>>>>> index 68bb2f7b0f..3158451963 100644
>>>>> --- a/libavfilter/vf_v360.c
>>>>> +++ b/libavfilter/vf_v360.c
>>>>> @@ -2807,9 +2807,8 @@ static int prepare_fisheye_out(AVFilterContext
>>>>> *ctx)
>>>>>  {
>>>>>      V360Context *s = ctx->priv;
>>>>>
>>>>> -    s->flat_range[0] = s->h_fov / 180.f;
>>>>> -    s->flat_range[1] = s->v_fov / 180.f;
>>>>> -
>>>>> +    s->flat_range[0] = 0.5f * s->h_fov * M_PI / 180.f;
>>>>> +    s->flat_range[1] = 0.5f * s->v_fov * M_PI / 180.f;
>>>>>      return 0;
>>>>>  }
>>>>>
>>>>> @@ -2827,8 +2826,8 @@ static int fisheye_to_xyz(const V360Context *s,
>>>>>                            int i, int j, int width, int height,
>>>>>                            float *vec)
>>>>>  {
>>>>> -    const float uf = s->flat_range[0] * ((2.f * i) / width  - 1.f);
>>>>> -    const float vf = s->flat_range[1] * ((2.f * j + 1.f) / height -
>>>>> 1.f);
>>>>> +    const float uf = 2.f * s->flat_range[0] / M_PI * ((2.f * i) /
>>>>> width  - 1.f);
>>>>> +    const float vf = 2.f * s->flat_range[1] / M_PI * ((2.f * j + 1.f)
>>>>> / height - 1.f);
>>>>>
>>>>>      const float phi   = atan2f(vf, uf);
>>>>>      const float theta = M_PI_2 * (1.f - hypotf(uf, vf));
>>>>> @@ -2858,8 +2857,8 @@ static int prepare_fisheye_in(AVFilterContext
>>>>> *ctx)
>>>>>  {
>>>>>      V360Context *s = ctx->priv;
>>>>>
>>>>> -    s->iflat_range[0] = s->ih_fov / 180.f;
>>>>> -    s->iflat_range[1] = s->iv_fov / 180.f;
>>>>> +    s->iflat_range[0] = 0.5f * s->ih_fov * M_PI / 180.f;
>>>>> +    s->iflat_range[1] = 0.5f * s->iv_fov * M_PI / 180.f;
>>>>>
>>>>>      return 0;
>>>>>  }
>>>>> @@ -2882,10 +2881,10 @@ static int xyz_to_fisheye(const V360Context *s,
>>>>>  {
>>>>>      const float h   = hypotf(vec[0], vec[1]);
>>>>>      const float lh  = h > 0.f ? h : 1.f;
>>>>> -    const float phi = atan2f(h, vec[2]) / M_PI;
>>>>> +    const float phi = atan2f(h, vec[2]);
>>>>>
>>>>> -    float uf = vec[0] / lh * phi / s->iflat_range[0];
>>>>> -    float vf = vec[1] / lh * phi / s->iflat_range[1];
>>>>> +    float uf = 0.5f * vec[0] / lh * phi / s->iflat_range[0];
>>>>> +    float vf = 0.5f * vec[1] / lh * phi / s->iflat_range[1];
>>>>>
>>>>>      const int visible = -0.5f < uf && uf < 0.5f && -0.5f < vf && vf <
>>>>> 0.5f;
>>>>>      int ui, vi;
>>>>> --
>>>>> 2.31.0
>>>>>
>>>>> _______________________________________________
>>>>> ffmpeg-devel mailing list
>>>>> ffmpeg-devel@ffmpeg.org
>>>>> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>>>>>
>>>>> To unsubscribe, visit link above, or email
>>>>> ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
>>>>
>>>>
Daniel Playfair Cal March 23, 2021, 4 a.m. UTC | #6
What exactly is your definition of fisheye?

The definition I'm working with is the equidistant fisheye projection as
described here: https://wiki.panotools.org/Fisheye_Projection, i.e. r = f *
theta

That mapping works for any theta, and you can have a circular image with a
field of view of up to 360 degrees before anything is repeated and the
inverse mapping is ambiguous. Hence my assumption that a rectangular output
image with a 180 horizontal/vertical field of view should still contain
areas near the corners where theta > 90 (because the diagonal FoV is >
180), and these should still be mapped from such an image to a
equirectangular projection.

Do you prefer for some reason to limit the fisheye projection to 180
degrees on any axis, i.e. have the constraint that theta <= 90? If that's
the case I could patch xyz_to_fisheye and fisheye_to_xyz so that such areas
are marked as invisible? That causes your example filtergraph to work as
before.

On Tue, Mar 23, 2021 at 3:46 AM Paul B Mahol <onemda@gmail.com> wrote:

>
>
> On Mon, Mar 22, 2021 at 1:35 PM Daniel Playfair Cal <
> daniel.playfair.cal@gmail.com> wrote:
>
>> > I disagree, if I use 180 hfov and 180 vfov it should not have extra
>> areas but only half of previous input.
>>
>> Not sure I follow - the ih_fov and vh_fov refer to the input (i.e. the
>> fisheye image). If you wanted to restrict the FoV of the output, surely the
>> way to do that would be to implement and use the FoV settings for the
>> equirectangular projection?. It doesn't seem right that the code for the
>> input projection is responsible for deciding what appears in the output. My
>> understanding was that the FoV settings simply describe the focal length of
>> the input or output camera so that points in the images can me mapped
>> to/from 3d coordinates.
>>
>>
> Take any equirectangular input and convert it to fisheye and than back to
> equirectangular.
> Or just take pure fisheye input with 180 h & v fov and convert it to
> equirectangular. There is plenty of such video content on esa website.
>
> To give you an idea of what I am trying to fix, here is an example input:
>> https://photos.app.goo.gl/o51NfY6aqWn3unPG6
>> This is a 1920x1440 image taken on a GoPro Hero 5 black with the 4:3 Wide
>> FoV setting and stabilisation disabled.
>>
>>
> That is flat take of something else. Not real fisheye input.
>
>
>> The following filtergraph demonstrates the issues:
>> 'v360=input=fisheye:ih_fov=116.66:iv_fov=87.50:output=flat:d_fov=145.8'
>>  1. the dfov_from_hfov issue is worked around by the use of ih_fov and
>> iv_fov instead of id_fov, although you can try with id_fov=145.8 to see
>> that problem too
>>  2. by default the output has double the aspect ratio of the input, even
>> though the fisheye -> rectilinear transformation doesn't change the aspect
>> ratio (assuming the entire input image is included as it is in this example)
>>  3. much of the input is not visible in the output even though there is a
>> mapping between the chosen projections (changed in the visibility test
>> patch)
>>
>> 3 in particular I don't think can be solved by changing the settings -
>> the input field of view needs to match the FoV of the input camera,
>> otherwise the mapping is wrong. But it seems there is no other way to
>> include the entire input from a fisheye image.
>>
>> On Mon, Mar 22, 2021 at 5:59 PM Paul B Mahol <onemda@gmail.com> wrote:
>>
>>>
>>>
>>> On Mon, Mar 22, 2021 at 5:09 AM Daniel Playfair Cal <
>>> daniel.playfair.cal@gmail.com> wrote:
>>>
>>>> I've tried that filtergraph and a few other similar ones and I'm not
>>>> sure what you mean - what exactly is the regression?
>>>>
>>>> I tried it on this image with an equirectangular projection:
>>>> https://wiki.panotools.org/images/0/01/Big_ben_equirectangular.jpg
>>>>
>>>> The only difference I can see is that there are less unmapped areas in
>>>> the output with the patches, because the final mapping from the output
>>>> equirectangular image to the intermediate fisheye image no longer fails to
>>>> map some areas which are present in the fisheye image. I would describe
>>>> this as an improvement?
>>>>
>>>
>>> I disagree, if I use 180 hfov and 180 vfov it should not have extra
>>> areas but only half of previous input.
>>>
>>>
>>>>
>>>> On Mon, Mar 22, 2021 at 3:30 AM Paul B Mahol <onemda@gmail.com> wrote:
>>>>
>>>>> Sorry, but I cannot apply this set as is, It makes at least one
>>>>> serious regression.
>>>>>
>>>>> For example try this filtergraph:
>>>>>
>>>>>
>>>>> v360=input=e:output=fisheye:h_fov=180:v_fov=180,v360=input=fisheye:output=e:ih_fov=180:iv_fov=180
>>>>>
>>>>> On Sun, Mar 21, 2021 at 1:45 PM Daniel Playfair Cal <
>>>>> daniel.playfair.cal@gmail.com> wrote:
>>>>>
>>>>>> This changes the iflat_range and flat_range values for the fisheye
>>>>>> projection to match their meaning for the flat/rectilinear projection.
>>>>>> That is, the range is between the two x or two y coordinates of the
>>>>>> outermost points above/below or left/right of the center, in the
>>>>>> flat/rectilinear projection.
>>>>>>
>>>>>> Signed-off-by: Daniel Playfair Cal <daniel.playfair.cal@gmail.com>
>>>>>> ---
>>>>>>  libavfilter/vf_v360.c | 19 +++++++++----------
>>>>>>  1 file changed, 9 insertions(+), 10 deletions(-)
>>>>>>
>>>>>> diff --git a/libavfilter/vf_v360.c b/libavfilter/vf_v360.c
>>>>>> index 68bb2f7b0f..3158451963 100644
>>>>>> --- a/libavfilter/vf_v360.c
>>>>>> +++ b/libavfilter/vf_v360.c
>>>>>> @@ -2807,9 +2807,8 @@ static int prepare_fisheye_out(AVFilterContext
>>>>>> *ctx)
>>>>>>  {
>>>>>>      V360Context *s = ctx->priv;
>>>>>>
>>>>>> -    s->flat_range[0] = s->h_fov / 180.f;
>>>>>> -    s->flat_range[1] = s->v_fov / 180.f;
>>>>>> -
>>>>>> +    s->flat_range[0] = 0.5f * s->h_fov * M_PI / 180.f;
>>>>>> +    s->flat_range[1] = 0.5f * s->v_fov * M_PI / 180.f;
>>>>>>      return 0;
>>>>>>  }
>>>>>>
>>>>>> @@ -2827,8 +2826,8 @@ static int fisheye_to_xyz(const V360Context *s,
>>>>>>                            int i, int j, int width, int height,
>>>>>>                            float *vec)
>>>>>>  {
>>>>>> -    const float uf = s->flat_range[0] * ((2.f * i) / width  - 1.f);
>>>>>> -    const float vf = s->flat_range[1] * ((2.f * j + 1.f) / height -
>>>>>> 1.f);
>>>>>> +    const float uf = 2.f * s->flat_range[0] / M_PI * ((2.f * i) /
>>>>>> width  - 1.f);
>>>>>> +    const float vf = 2.f * s->flat_range[1] / M_PI * ((2.f * j +
>>>>>> 1.f) / height - 1.f);
>>>>>>
>>>>>>      const float phi   = atan2f(vf, uf);
>>>>>>      const float theta = M_PI_2 * (1.f - hypotf(uf, vf));
>>>>>> @@ -2858,8 +2857,8 @@ static int prepare_fisheye_in(AVFilterContext
>>>>>> *ctx)
>>>>>>  {
>>>>>>      V360Context *s = ctx->priv;
>>>>>>
>>>>>> -    s->iflat_range[0] = s->ih_fov / 180.f;
>>>>>> -    s->iflat_range[1] = s->iv_fov / 180.f;
>>>>>> +    s->iflat_range[0] = 0.5f * s->ih_fov * M_PI / 180.f;
>>>>>> +    s->iflat_range[1] = 0.5f * s->iv_fov * M_PI / 180.f;
>>>>>>
>>>>>>      return 0;
>>>>>>  }
>>>>>> @@ -2882,10 +2881,10 @@ static int xyz_to_fisheye(const V360Context
>>>>>> *s,
>>>>>>  {
>>>>>>      const float h   = hypotf(vec[0], vec[1]);
>>>>>>      const float lh  = h > 0.f ? h : 1.f;
>>>>>> -    const float phi = atan2f(h, vec[2]) / M_PI;
>>>>>> +    const float phi = atan2f(h, vec[2]);
>>>>>>
>>>>>> -    float uf = vec[0] / lh * phi / s->iflat_range[0];
>>>>>> -    float vf = vec[1] / lh * phi / s->iflat_range[1];
>>>>>> +    float uf = 0.5f * vec[0] / lh * phi / s->iflat_range[0];
>>>>>> +    float vf = 0.5f * vec[1] / lh * phi / s->iflat_range[1];
>>>>>>
>>>>>>      const int visible = -0.5f < uf && uf < 0.5f && -0.5f < vf && vf
>>>>>> < 0.5f;
>>>>>>      int ui, vi;
>>>>>> --
>>>>>> 2.31.0
>>>>>>
>>>>>> _______________________________________________
>>>>>> ffmpeg-devel mailing list
>>>>>> ffmpeg-devel@ffmpeg.org
>>>>>> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>>>>>>
>>>>>> To unsubscribe, visit link above, or email
>>>>>> ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
>>>>>
>>>>>
Paul B Mahol March 23, 2021, 6:14 a.m. UTC | #7
On Tue, Mar 23, 2021 at 5:00 AM Daniel Playfair Cal <
daniel.playfair.cal@gmail.com> wrote:

> What exactly is your definition of fisheye?
>

Take look at source code. I do not see how your definition matches one in
source code.


>
> The definition I'm working with is the equidistant fisheye projection as
> described here: https://wiki.panotools.org/Fisheye_Projection, i.e. r = f
> * theta
>
> That mapping works for any theta, and you can have a circular image with a
> field of view of up to 360 degrees before anything is repeated and the
> inverse mapping is ambiguous. Hence my assumption that a rectangular output
> image with a 180 horizontal/vertical field of view should still contain
> areas near the corners where theta > 90 (because the diagonal FoV is >
> 180), and these should still be mapped from such an image to a
> equirectangular projection.
>
> Do you prefer for some reason to limit the fisheye projection to 180
> degrees on any axis, i.e. have the constraint that theta <= 90? If that's
> the case I could patch xyz_to_fisheye and fisheye_to_xyz so that such areas
> are marked as invisible? That causes your example filtergraph to work as
> before.
>

> On Tue, Mar 23, 2021 at 3:46 AM Paul B Mahol <onemda@gmail.com> wrote:
>
>>
>>
>> On Mon, Mar 22, 2021 at 1:35 PM Daniel Playfair Cal <
>> daniel.playfair.cal@gmail.com> wrote:
>>
>>> > I disagree, if I use 180 hfov and 180 vfov it should not have extra
>>> areas but only half of previous input.
>>>
>>> Not sure I follow - the ih_fov and vh_fov refer to the input (i.e. the
>>> fisheye image). If you wanted to restrict the FoV of the output, surely the
>>> way to do that would be to implement and use the FoV settings for the
>>> equirectangular projection?. It doesn't seem right that the code for the
>>> input projection is responsible for deciding what appears in the output. My
>>> understanding was that the FoV settings simply describe the focal length of
>>> the input or output camera so that points in the images can me mapped
>>> to/from 3d coordinates.
>>>
>>>
>> Take any equirectangular input and convert it to fisheye and than back to
>> equirectangular.
>> Or just take pure fisheye input with 180 h & v fov and convert it to
>> equirectangular. There is plenty of such video content on esa website.
>>
>> To give you an idea of what I am trying to fix, here is an example input:
>>> https://photos.app.goo.gl/o51NfY6aqWn3unPG6
>>> This is a 1920x1440 image taken on a GoPro Hero 5 black with the 4:3
>>> Wide FoV setting and stabilisation disabled.
>>>
>>>
>> That is flat take of something else. Not real fisheye input.
>>
>>
>>> The following filtergraph demonstrates the issues:
>>> 'v360=input=fisheye:ih_fov=116.66:iv_fov=87.50:output=flat:d_fov=145.8'
>>>  1. the dfov_from_hfov issue is worked around by the use of ih_fov and
>>> iv_fov instead of id_fov, although you can try with id_fov=145.8 to see
>>> that problem too
>>>  2. by default the output has double the aspect ratio of the input, even
>>> though the fisheye -> rectilinear transformation doesn't change the aspect
>>> ratio (assuming the entire input image is included as it is in this example)
>>>  3. much of the input is not visible in the output even though there is
>>> a mapping between the chosen projections (changed in the visibility test
>>> patch)
>>>
>>> 3 in particular I don't think can be solved by changing the settings -
>>> the input field of view needs to match the FoV of the input camera,
>>> otherwise the mapping is wrong. But it seems there is no other way to
>>> include the entire input from a fisheye image.
>>>
>>> On Mon, Mar 22, 2021 at 5:59 PM Paul B Mahol <onemda@gmail.com> wrote:
>>>
>>>>
>>>>
>>>> On Mon, Mar 22, 2021 at 5:09 AM Daniel Playfair Cal <
>>>> daniel.playfair.cal@gmail.com> wrote:
>>>>
>>>>> I've tried that filtergraph and a few other similar ones and I'm not
>>>>> sure what you mean - what exactly is the regression?
>>>>>
>>>>> I tried it on this image with an equirectangular projection:
>>>>> https://wiki.panotools.org/images/0/01/Big_ben_equirectangular.jpg
>>>>>
>>>>> The only difference I can see is that there are less unmapped areas in
>>>>> the output with the patches, because the final mapping from the output
>>>>> equirectangular image to the intermediate fisheye image no longer fails to
>>>>> map some areas which are present in the fisheye image. I would describe
>>>>> this as an improvement?
>>>>>
>>>>
>>>> I disagree, if I use 180 hfov and 180 vfov it should not have extra
>>>> areas but only half of previous input.
>>>>
>>>>
>>>>>
>>>>> On Mon, Mar 22, 2021 at 3:30 AM Paul B Mahol <onemda@gmail.com> wrote:
>>>>>
>>>>>> Sorry, but I cannot apply this set as is, It makes at least one
>>>>>> serious regression.
>>>>>>
>>>>>> For example try this filtergraph:
>>>>>>
>>>>>>
>>>>>> v360=input=e:output=fisheye:h_fov=180:v_fov=180,v360=input=fisheye:output=e:ih_fov=180:iv_fov=180
>>>>>>
>>>>>> On Sun, Mar 21, 2021 at 1:45 PM Daniel Playfair Cal <
>>>>>> daniel.playfair.cal@gmail.com> wrote:
>>>>>>
>>>>>>> This changes the iflat_range and flat_range values for the fisheye
>>>>>>> projection to match their meaning for the flat/rectilinear
>>>>>>> projection.
>>>>>>> That is, the range is between the two x or two y coordinates of the
>>>>>>> outermost points above/below or left/right of the center, in the
>>>>>>> flat/rectilinear projection.
>>>>>>>
>>>>>>> Signed-off-by: Daniel Playfair Cal <daniel.playfair.cal@gmail.com>
>>>>>>> ---
>>>>>>>  libavfilter/vf_v360.c | 19 +++++++++----------
>>>>>>>  1 file changed, 9 insertions(+), 10 deletions(-)
>>>>>>>
>>>>>>> diff --git a/libavfilter/vf_v360.c b/libavfilter/vf_v360.c
>>>>>>> index 68bb2f7b0f..3158451963 100644
>>>>>>> --- a/libavfilter/vf_v360.c
>>>>>>> +++ b/libavfilter/vf_v360.c
>>>>>>> @@ -2807,9 +2807,8 @@ static int prepare_fisheye_out(AVFilterContext
>>>>>>> *ctx)
>>>>>>>  {
>>>>>>>      V360Context *s = ctx->priv;
>>>>>>>
>>>>>>> -    s->flat_range[0] = s->h_fov / 180.f;
>>>>>>> -    s->flat_range[1] = s->v_fov / 180.f;
>>>>>>> -
>>>>>>> +    s->flat_range[0] = 0.5f * s->h_fov * M_PI / 180.f;
>>>>>>> +    s->flat_range[1] = 0.5f * s->v_fov * M_PI / 180.f;
>>>>>>>      return 0;
>>>>>>>  }
>>>>>>>
>>>>>>> @@ -2827,8 +2826,8 @@ static int fisheye_to_xyz(const V360Context *s,
>>>>>>>                            int i, int j, int width, int height,
>>>>>>>                            float *vec)
>>>>>>>  {
>>>>>>> -    const float uf = s->flat_range[0] * ((2.f * i) / width  - 1.f);
>>>>>>> -    const float vf = s->flat_range[1] * ((2.f * j + 1.f) / height -
>>>>>>> 1.f);
>>>>>>> +    const float uf = 2.f * s->flat_range[0] / M_PI * ((2.f * i) /
>>>>>>> width  - 1.f);
>>>>>>> +    const float vf = 2.f * s->flat_range[1] / M_PI * ((2.f * j +
>>>>>>> 1.f) / height - 1.f);
>>>>>>>
>>>>>>>      const float phi   = atan2f(vf, uf);
>>>>>>>      const float theta = M_PI_2 * (1.f - hypotf(uf, vf));
>>>>>>> @@ -2858,8 +2857,8 @@ static int prepare_fisheye_in(AVFilterContext
>>>>>>> *ctx)
>>>>>>>  {
>>>>>>>      V360Context *s = ctx->priv;
>>>>>>>
>>>>>>> -    s->iflat_range[0] = s->ih_fov / 180.f;
>>>>>>> -    s->iflat_range[1] = s->iv_fov / 180.f;
>>>>>>> +    s->iflat_range[0] = 0.5f * s->ih_fov * M_PI / 180.f;
>>>>>>> +    s->iflat_range[1] = 0.5f * s->iv_fov * M_PI / 180.f;
>>>>>>>
>>>>>>>      return 0;
>>>>>>>  }
>>>>>>> @@ -2882,10 +2881,10 @@ static int xyz_to_fisheye(const V360Context
>>>>>>> *s,
>>>>>>>  {
>>>>>>>      const float h   = hypotf(vec[0], vec[1]);
>>>>>>>      const float lh  = h > 0.f ? h : 1.f;
>>>>>>> -    const float phi = atan2f(h, vec[2]) / M_PI;
>>>>>>> +    const float phi = atan2f(h, vec[2]);
>>>>>>>
>>>>>>> -    float uf = vec[0] / lh * phi / s->iflat_range[0];
>>>>>>> -    float vf = vec[1] / lh * phi / s->iflat_range[1];
>>>>>>> +    float uf = 0.5f * vec[0] / lh * phi / s->iflat_range[0];
>>>>>>> +    float vf = 0.5f * vec[1] / lh * phi / s->iflat_range[1];
>>>>>>>
>>>>>>>      const int visible = -0.5f < uf && uf < 0.5f && -0.5f < vf && vf
>>>>>>> < 0.5f;
>>>>>>>      int ui, vi;
>>>>>>> --
>>>>>>> 2.31.0
>>>>>>>
>>>>>>> _______________________________________________
>>>>>>> ffmpeg-devel mailing list
>>>>>>> ffmpeg-devel@ffmpeg.org
>>>>>>> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>>>>>>>
>>>>>>> To unsubscribe, visit link above, or email
>>>>>>> ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
>>>>>>
>>>>>>
Daniel Playfair Cal March 23, 2021, 6:31 a.m. UTC | #8
Do you agree with my definition or not? And which code are you referring to
- the master branch or my patches?

I'd like to get these patches to a point where they can be applied, but
it's going to be difficult if we can't agree on the goal.

On Tue, Mar 23, 2021 at 5:15 PM Paul B Mahol <onemda@gmail.com> wrote:

>
>
> On Tue, Mar 23, 2021 at 5:00 AM Daniel Playfair Cal <
> daniel.playfair.cal@gmail.com> wrote:
>
>> What exactly is your definition of fisheye?
>>
>
> Take look at source code. I do not see how your definition matches one in
> source code.
>
>
>>
>> The definition I'm working with is the equidistant fisheye projection as
>> described here: https://wiki.panotools.org/Fisheye_Projection, i.e. r =
>> f * theta
>>
>> That mapping works for any theta, and you can have a circular image with
>> a field of view of up to 360 degrees before anything is repeated and the
>> inverse mapping is ambiguous. Hence my assumption that a rectangular output
>> image with a 180 horizontal/vertical field of view should still contain
>> areas near the corners where theta > 90 (because the diagonal FoV is >
>> 180), and these should still be mapped from such an image to a
>> equirectangular projection.
>>
>> Do you prefer for some reason to limit the fisheye projection to 180
>> degrees on any axis, i.e. have the constraint that theta <= 90? If that's
>> the case I could patch xyz_to_fisheye and fisheye_to_xyz so that such areas
>> are marked as invisible? That causes your example filtergraph to work as
>> before.
>>
>
>> On Tue, Mar 23, 2021 at 3:46 AM Paul B Mahol <onemda@gmail.com> wrote:
>>
>>>
>>>
>>> On Mon, Mar 22, 2021 at 1:35 PM Daniel Playfair Cal <
>>> daniel.playfair.cal@gmail.com> wrote:
>>>
>>>> > I disagree, if I use 180 hfov and 180 vfov it should not have extra
>>>> areas but only half of previous input.
>>>>
>>>> Not sure I follow - the ih_fov and vh_fov refer to the input (i.e. the
>>>> fisheye image). If you wanted to restrict the FoV of the output, surely the
>>>> way to do that would be to implement and use the FoV settings for the
>>>> equirectangular projection?. It doesn't seem right that the code for the
>>>> input projection is responsible for deciding what appears in the output. My
>>>> understanding was that the FoV settings simply describe the focal length of
>>>> the input or output camera so that points in the images can me mapped
>>>> to/from 3d coordinates.
>>>>
>>>>
>>> Take any equirectangular input and convert it to fisheye and than back
>>> to equirectangular.
>>> Or just take pure fisheye input with 180 h & v fov and convert it to
>>> equirectangular. There is plenty of such video content on esa website.
>>>
>>> To give you an idea of what I am trying to fix, here is an example
>>>> input: https://photos.app.goo.gl/o51NfY6aqWn3unPG6
>>>> This is a 1920x1440 image taken on a GoPro Hero 5 black with the 4:3
>>>> Wide FoV setting and stabilisation disabled.
>>>>
>>>>
>>> That is flat take of something else. Not real fisheye input.
>>>
>>>
>>>> The following filtergraph demonstrates the issues:
>>>> 'v360=input=fisheye:ih_fov=116.66:iv_fov=87.50:output=flat:d_fov=145.8'
>>>>  1. the dfov_from_hfov issue is worked around by the use of ih_fov and
>>>> iv_fov instead of id_fov, although you can try with id_fov=145.8 to see
>>>> that problem too
>>>>  2. by default the output has double the aspect ratio of the input,
>>>> even though the fisheye -> rectilinear transformation doesn't change the
>>>> aspect ratio (assuming the entire input image is included as it is in this
>>>> example)
>>>>  3. much of the input is not visible in the output even though there is
>>>> a mapping between the chosen projections (changed in the visibility test
>>>> patch)
>>>>
>>>> 3 in particular I don't think can be solved by changing the settings -
>>>> the input field of view needs to match the FoV of the input camera,
>>>> otherwise the mapping is wrong. But it seems there is no other way to
>>>> include the entire input from a fisheye image.
>>>>
>>>> On Mon, Mar 22, 2021 at 5:59 PM Paul B Mahol <onemda@gmail.com> wrote:
>>>>
>>>>>
>>>>>
>>>>> On Mon, Mar 22, 2021 at 5:09 AM Daniel Playfair Cal <
>>>>> daniel.playfair.cal@gmail.com> wrote:
>>>>>
>>>>>> I've tried that filtergraph and a few other similar ones and I'm not
>>>>>> sure what you mean - what exactly is the regression?
>>>>>>
>>>>>> I tried it on this image with an equirectangular projection:
>>>>>> https://wiki.panotools.org/images/0/01/Big_ben_equirectangular.jpg
>>>>>>
>>>>>> The only difference I can see is that there are less unmapped areas
>>>>>> in the output with the patches, because the final mapping from the output
>>>>>> equirectangular image to the intermediate fisheye image no longer fails to
>>>>>> map some areas which are present in the fisheye image. I would describe
>>>>>> this as an improvement?
>>>>>>
>>>>>
>>>>> I disagree, if I use 180 hfov and 180 vfov it should not have extra
>>>>> areas but only half of previous input.
>>>>>
>>>>>
>>>>>>
>>>>>> On Mon, Mar 22, 2021 at 3:30 AM Paul B Mahol <onemda@gmail.com>
>>>>>> wrote:
>>>>>>
>>>>>>> Sorry, but I cannot apply this set as is, It makes at least one
>>>>>>> serious regression.
>>>>>>>
>>>>>>> For example try this filtergraph:
>>>>>>>
>>>>>>>
>>>>>>> v360=input=e:output=fisheye:h_fov=180:v_fov=180,v360=input=fisheye:output=e:ih_fov=180:iv_fov=180
>>>>>>>
>>>>>>> On Sun, Mar 21, 2021 at 1:45 PM Daniel Playfair Cal <
>>>>>>> daniel.playfair.cal@gmail.com> wrote:
>>>>>>>
>>>>>>>> This changes the iflat_range and flat_range values for the fisheye
>>>>>>>> projection to match their meaning for the flat/rectilinear
>>>>>>>> projection.
>>>>>>>> That is, the range is between the two x or two y coordinates of the
>>>>>>>> outermost points above/below or left/right of the center, in the
>>>>>>>> flat/rectilinear projection.
>>>>>>>>
>>>>>>>> Signed-off-by: Daniel Playfair Cal <daniel.playfair.cal@gmail.com>
>>>>>>>> ---
>>>>>>>>  libavfilter/vf_v360.c | 19 +++++++++----------
>>>>>>>>  1 file changed, 9 insertions(+), 10 deletions(-)
>>>>>>>>
>>>>>>>> diff --git a/libavfilter/vf_v360.c b/libavfilter/vf_v360.c
>>>>>>>> index 68bb2f7b0f..3158451963 100644
>>>>>>>> --- a/libavfilter/vf_v360.c
>>>>>>>> +++ b/libavfilter/vf_v360.c
>>>>>>>> @@ -2807,9 +2807,8 @@ static int
>>>>>>>> prepare_fisheye_out(AVFilterContext *ctx)
>>>>>>>>  {
>>>>>>>>      V360Context *s = ctx->priv;
>>>>>>>>
>>>>>>>> -    s->flat_range[0] = s->h_fov / 180.f;
>>>>>>>> -    s->flat_range[1] = s->v_fov / 180.f;
>>>>>>>> -
>>>>>>>> +    s->flat_range[0] = 0.5f * s->h_fov * M_PI / 180.f;
>>>>>>>> +    s->flat_range[1] = 0.5f * s->v_fov * M_PI / 180.f;
>>>>>>>>      return 0;
>>>>>>>>  }
>>>>>>>>
>>>>>>>> @@ -2827,8 +2826,8 @@ static int fisheye_to_xyz(const V360Context
>>>>>>>> *s,
>>>>>>>>                            int i, int j, int width, int height,
>>>>>>>>                            float *vec)
>>>>>>>>  {
>>>>>>>> -    const float uf = s->flat_range[0] * ((2.f * i) / width  - 1.f);
>>>>>>>> -    const float vf = s->flat_range[1] * ((2.f * j + 1.f) / height
>>>>>>>> - 1.f);
>>>>>>>> +    const float uf = 2.f * s->flat_range[0] / M_PI * ((2.f * i) /
>>>>>>>> width  - 1.f);
>>>>>>>> +    const float vf = 2.f * s->flat_range[1] / M_PI * ((2.f * j +
>>>>>>>> 1.f) / height - 1.f);
>>>>>>>>
>>>>>>>>      const float phi   = atan2f(vf, uf);
>>>>>>>>      const float theta = M_PI_2 * (1.f - hypotf(uf, vf));
>>>>>>>> @@ -2858,8 +2857,8 @@ static int prepare_fisheye_in(AVFilterContext
>>>>>>>> *ctx)
>>>>>>>>  {
>>>>>>>>      V360Context *s = ctx->priv;
>>>>>>>>
>>>>>>>> -    s->iflat_range[0] = s->ih_fov / 180.f;
>>>>>>>> -    s->iflat_range[1] = s->iv_fov / 180.f;
>>>>>>>> +    s->iflat_range[0] = 0.5f * s->ih_fov * M_PI / 180.f;
>>>>>>>> +    s->iflat_range[1] = 0.5f * s->iv_fov * M_PI / 180.f;
>>>>>>>>
>>>>>>>>      return 0;
>>>>>>>>  }
>>>>>>>> @@ -2882,10 +2881,10 @@ static int xyz_to_fisheye(const V360Context
>>>>>>>> *s,
>>>>>>>>  {
>>>>>>>>      const float h   = hypotf(vec[0], vec[1]);
>>>>>>>>      const float lh  = h > 0.f ? h : 1.f;
>>>>>>>> -    const float phi = atan2f(h, vec[2]) / M_PI;
>>>>>>>> +    const float phi = atan2f(h, vec[2]);
>>>>>>>>
>>>>>>>> -    float uf = vec[0] / lh * phi / s->iflat_range[0];
>>>>>>>> -    float vf = vec[1] / lh * phi / s->iflat_range[1];
>>>>>>>> +    float uf = 0.5f * vec[0] / lh * phi / s->iflat_range[0];
>>>>>>>> +    float vf = 0.5f * vec[1] / lh * phi / s->iflat_range[1];
>>>>>>>>
>>>>>>>>      const int visible = -0.5f < uf && uf < 0.5f && -0.5f < vf &&
>>>>>>>> vf < 0.5f;
>>>>>>>>      int ui, vi;
>>>>>>>> --
>>>>>>>> 2.31.0
>>>>>>>>
>>>>>>>> _______________________________________________
>>>>>>>> ffmpeg-devel mailing list
>>>>>>>> ffmpeg-devel@ffmpeg.org
>>>>>>>> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>>>>>>>>
>>>>>>>> To unsubscribe, visit link above, or email
>>>>>>>> ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
>>>>>>>
>>>>>>>
Paul B Mahol March 23, 2021, 5:59 p.m. UTC | #9
On Tue, Mar 23, 2021 at 7:32 AM Daniel Playfair Cal <
daniel.playfair.cal@gmail.com> wrote:

> Do you agree with my definition or not? And which code are you referring
> to - the master branch or my patches?
>

Just do not introduce regressions into existing code.

>
> I'd like to get these patches to a point where they can be applied, but
> it's going to be difficult if we can't agree on the goal.
>
> On Tue, Mar 23, 2021 at 5:15 PM Paul B Mahol <onemda@gmail.com> wrote:
>
>>
>>
>> On Tue, Mar 23, 2021 at 5:00 AM Daniel Playfair Cal <
>> daniel.playfair.cal@gmail.com> wrote:
>>
>>> What exactly is your definition of fisheye?
>>>
>>
>> Take look at source code. I do not see how your definition matches one in
>> source code.
>>
>>
>>>
>>> The definition I'm working with is the equidistant fisheye projection as
>>> described here: https://wiki.panotools.org/Fisheye_Projection, i.e. r =
>>> f * theta
>>>
>>> That mapping works for any theta, and you can have a circular image with
>>> a field of view of up to 360 degrees before anything is repeated and the
>>> inverse mapping is ambiguous. Hence my assumption that a rectangular output
>>> image with a 180 horizontal/vertical field of view should still contain
>>> areas near the corners where theta > 90 (because the diagonal FoV is >
>>> 180), and these should still be mapped from such an image to a
>>> equirectangular projection.
>>>
>>> Do you prefer for some reason to limit the fisheye projection to 180
>>> degrees on any axis, i.e. have the constraint that theta <= 90? If that's
>>> the case I could patch xyz_to_fisheye and fisheye_to_xyz so that such areas
>>> are marked as invisible? That causes your example filtergraph to work as
>>> before.
>>>
>>
>>> On Tue, Mar 23, 2021 at 3:46 AM Paul B Mahol <onemda@gmail.com> wrote:
>>>
>>>>
>>>>
>>>> On Mon, Mar 22, 2021 at 1:35 PM Daniel Playfair Cal <
>>>> daniel.playfair.cal@gmail.com> wrote:
>>>>
>>>>> > I disagree, if I use 180 hfov and 180 vfov it should not have extra
>>>>> areas but only half of previous input.
>>>>>
>>>>> Not sure I follow - the ih_fov and vh_fov refer to the input (i.e. the
>>>>> fisheye image). If you wanted to restrict the FoV of the output, surely the
>>>>> way to do that would be to implement and use the FoV settings for the
>>>>> equirectangular projection?. It doesn't seem right that the code for the
>>>>> input projection is responsible for deciding what appears in the output. My
>>>>> understanding was that the FoV settings simply describe the focal length of
>>>>> the input or output camera so that points in the images can me mapped
>>>>> to/from 3d coordinates.
>>>>>
>>>>>
>>>> Take any equirectangular input and convert it to fisheye and than back
>>>> to equirectangular.
>>>> Or just take pure fisheye input with 180 h & v fov and convert it to
>>>> equirectangular. There is plenty of such video content on esa website.
>>>>
>>>> To give you an idea of what I am trying to fix, here is an example
>>>>> input: https://photos.app.goo.gl/o51NfY6aqWn3unPG6
>>>>> This is a 1920x1440 image taken on a GoPro Hero 5 black with the 4:3
>>>>> Wide FoV setting and stabilisation disabled.
>>>>>
>>>>>
>>>> That is flat take of something else. Not real fisheye input.
>>>>
>>>>
>>>>> The following filtergraph demonstrates the issues:
>>>>> 'v360=input=fisheye:ih_fov=116.66:iv_fov=87.50:output=flat:d_fov=145.8'
>>>>>  1. the dfov_from_hfov issue is worked around by the use of ih_fov and
>>>>> iv_fov instead of id_fov, although you can try with id_fov=145.8 to see
>>>>> that problem too
>>>>>  2. by default the output has double the aspect ratio of the input,
>>>>> even though the fisheye -> rectilinear transformation doesn't change the
>>>>> aspect ratio (assuming the entire input image is included as it is in this
>>>>> example)
>>>>>  3. much of the input is not visible in the output even though there
>>>>> is a mapping between the chosen projections (changed in the visibility test
>>>>> patch)
>>>>>
>>>>> 3 in particular I don't think can be solved by changing the settings -
>>>>> the input field of view needs to match the FoV of the input camera,
>>>>> otherwise the mapping is wrong. But it seems there is no other way to
>>>>> include the entire input from a fisheye image.
>>>>>
>>>>> On Mon, Mar 22, 2021 at 5:59 PM Paul B Mahol <onemda@gmail.com> wrote:
>>>>>
>>>>>>
>>>>>>
>>>>>> On Mon, Mar 22, 2021 at 5:09 AM Daniel Playfair Cal <
>>>>>> daniel.playfair.cal@gmail.com> wrote:
>>>>>>
>>>>>>> I've tried that filtergraph and a few other similar ones and I'm not
>>>>>>> sure what you mean - what exactly is the regression?
>>>>>>>
>>>>>>> I tried it on this image with an equirectangular projection:
>>>>>>> https://wiki.panotools.org/images/0/01/Big_ben_equirectangular.jpg
>>>>>>>
>>>>>>> The only difference I can see is that there are less unmapped areas
>>>>>>> in the output with the patches, because the final mapping from the output
>>>>>>> equirectangular image to the intermediate fisheye image no longer fails to
>>>>>>> map some areas which are present in the fisheye image. I would describe
>>>>>>> this as an improvement?
>>>>>>>
>>>>>>
>>>>>> I disagree, if I use 180 hfov and 180 vfov it should not have extra
>>>>>> areas but only half of previous input.
>>>>>>
>>>>>>
>>>>>>>
>>>>>>> On Mon, Mar 22, 2021 at 3:30 AM Paul B Mahol <onemda@gmail.com>
>>>>>>> wrote:
>>>>>>>
>>>>>>>> Sorry, but I cannot apply this set as is, It makes at least one
>>>>>>>> serious regression.
>>>>>>>>
>>>>>>>> For example try this filtergraph:
>>>>>>>>
>>>>>>>>
>>>>>>>> v360=input=e:output=fisheye:h_fov=180:v_fov=180,v360=input=fisheye:output=e:ih_fov=180:iv_fov=180
>>>>>>>>
>>>>>>>> On Sun, Mar 21, 2021 at 1:45 PM Daniel Playfair Cal <
>>>>>>>> daniel.playfair.cal@gmail.com> wrote:
>>>>>>>>
>>>>>>>>> This changes the iflat_range and flat_range values for the fisheye
>>>>>>>>> projection to match their meaning for the flat/rectilinear
>>>>>>>>> projection.
>>>>>>>>> That is, the range is between the two x or two y coordinates of the
>>>>>>>>> outermost points above/below or left/right of the center, in the
>>>>>>>>> flat/rectilinear projection.
>>>>>>>>>
>>>>>>>>> Signed-off-by: Daniel Playfair Cal <daniel.playfair.cal@gmail.com>
>>>>>>>>> ---
>>>>>>>>>  libavfilter/vf_v360.c | 19 +++++++++----------
>>>>>>>>>  1 file changed, 9 insertions(+), 10 deletions(-)
>>>>>>>>>
>>>>>>>>> diff --git a/libavfilter/vf_v360.c b/libavfilter/vf_v360.c
>>>>>>>>> index 68bb2f7b0f..3158451963 100644
>>>>>>>>> --- a/libavfilter/vf_v360.c
>>>>>>>>> +++ b/libavfilter/vf_v360.c
>>>>>>>>> @@ -2807,9 +2807,8 @@ static int
>>>>>>>>> prepare_fisheye_out(AVFilterContext *ctx)
>>>>>>>>>  {
>>>>>>>>>      V360Context *s = ctx->priv;
>>>>>>>>>
>>>>>>>>> -    s->flat_range[0] = s->h_fov / 180.f;
>>>>>>>>> -    s->flat_range[1] = s->v_fov / 180.f;
>>>>>>>>> -
>>>>>>>>> +    s->flat_range[0] = 0.5f * s->h_fov * M_PI / 180.f;
>>>>>>>>> +    s->flat_range[1] = 0.5f * s->v_fov * M_PI / 180.f;
>>>>>>>>>      return 0;
>>>>>>>>>  }
>>>>>>>>>
>>>>>>>>> @@ -2827,8 +2826,8 @@ static int fisheye_to_xyz(const V360Context
>>>>>>>>> *s,
>>>>>>>>>                            int i, int j, int width, int height,
>>>>>>>>>                            float *vec)
>>>>>>>>>  {
>>>>>>>>> -    const float uf = s->flat_range[0] * ((2.f * i) / width  -
>>>>>>>>> 1.f);
>>>>>>>>> -    const float vf = s->flat_range[1] * ((2.f * j + 1.f) / height
>>>>>>>>> - 1.f);
>>>>>>>>> +    const float uf = 2.f * s->flat_range[0] / M_PI * ((2.f * i) /
>>>>>>>>> width  - 1.f);
>>>>>>>>> +    const float vf = 2.f * s->flat_range[1] / M_PI * ((2.f * j +
>>>>>>>>> 1.f) / height - 1.f);
>>>>>>>>>
>>>>>>>>>      const float phi   = atan2f(vf, uf);
>>>>>>>>>      const float theta = M_PI_2 * (1.f - hypotf(uf, vf));
>>>>>>>>> @@ -2858,8 +2857,8 @@ static int
>>>>>>>>> prepare_fisheye_in(AVFilterContext *ctx)
>>>>>>>>>  {
>>>>>>>>>      V360Context *s = ctx->priv;
>>>>>>>>>
>>>>>>>>> -    s->iflat_range[0] = s->ih_fov / 180.f;
>>>>>>>>> -    s->iflat_range[1] = s->iv_fov / 180.f;
>>>>>>>>> +    s->iflat_range[0] = 0.5f * s->ih_fov * M_PI / 180.f;
>>>>>>>>> +    s->iflat_range[1] = 0.5f * s->iv_fov * M_PI / 180.f;
>>>>>>>>>
>>>>>>>>>      return 0;
>>>>>>>>>  }
>>>>>>>>> @@ -2882,10 +2881,10 @@ static int xyz_to_fisheye(const
>>>>>>>>> V360Context *s,
>>>>>>>>>  {
>>>>>>>>>      const float h   = hypotf(vec[0], vec[1]);
>>>>>>>>>      const float lh  = h > 0.f ? h : 1.f;
>>>>>>>>> -    const float phi = atan2f(h, vec[2]) / M_PI;
>>>>>>>>> +    const float phi = atan2f(h, vec[2]);
>>>>>>>>>
>>>>>>>>> -    float uf = vec[0] / lh * phi / s->iflat_range[0];
>>>>>>>>> -    float vf = vec[1] / lh * phi / s->iflat_range[1];
>>>>>>>>> +    float uf = 0.5f * vec[0] / lh * phi / s->iflat_range[0];
>>>>>>>>> +    float vf = 0.5f * vec[1] / lh * phi / s->iflat_range[1];
>>>>>>>>>
>>>>>>>>>      const int visible = -0.5f < uf && uf < 0.5f && -0.5f < vf &&
>>>>>>>>> vf < 0.5f;
>>>>>>>>>      int ui, vi;
>>>>>>>>> --
>>>>>>>>> 2.31.0
>>>>>>>>>
>>>>>>>>> _______________________________________________
>>>>>>>>> ffmpeg-devel mailing list
>>>>>>>>> ffmpeg-devel@ffmpeg.org
>>>>>>>>> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>>>>>>>>>
>>>>>>>>> To unsubscribe, visit link above, or email
>>>>>>>>> ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
>>>>>>>>
>>>>>>>>
Paul B Mahol April 10, 2021, 11:37 p.m. UTC | #10
On Mon, Mar 22, 2021 at 1:35 PM Daniel Playfair Cal <
daniel.playfair.cal@gmail.com> wrote:

> > I disagree, if I use 180 hfov and 180 vfov it should not have extra
> areas but only half of previous input.
>
> Not sure I follow - the ih_fov and vh_fov refer to the input (i.e. the
> fisheye image). If you wanted to restrict the FoV of the output, surely the
> way to do that would be to implement and use the FoV settings for the
> equirectangular projection?. It doesn't seem right that the code for the
> input projection is responsible for deciding what appears in the output. My
> understanding was that the FoV settings simply describe the focal length of
> the input or output camera so that points in the images can me mapped
> to/from 3d coordinates.
>
> To give you an idea of what I am trying to fix, here is an example input:
> https://photos.app.goo.gl/o51NfY6aqWn3unPG6
> This is a 1920x1440 image taken on a GoPro Hero 5 black with the 4:3 Wide
> FoV setting and stabilisation disabled.
>
> The following filtergraph demonstrates the issues:
> 'v360=input=fisheye:ih_fov=116.66:iv_fov=87.50:output=flat:d_fov=145.8'
>  1. the dfov_from_hfov issue is worked around by the use of ih_fov and
> iv_fov instead of id_fov, although you can try with id_fov=145.8 to see
> that problem too
>

AFAIK, the h/v/d fov works fine with fisheye in/out. I used synthetic
fisheye images from paul bourke site.
And diagonal fov from w/h either works with both in and out or not at all.


>  2. by default the output has double the aspect ratio of the input, even
> though the fisheye -> rectilinear transformation doesn't change the aspect
> ratio (assuming the entire input image is included as it is in this example)
>  3. much of the input is not visible in the output even though there is a
> mapping between the chosen projections (changed in the visibility test
> patch)
>
> 3 in particular I don't think can be solved by changing the settings - the
> input field of view needs to match the FoV of the input camera, otherwise
> the mapping is wrong. But it seems there is no other way to include the
> entire input from a fisheye image.
>

So you want to not discard pixels out of circle defined by fov? That
generally does not make sense to me as that may not gonna have actual
pixels that belong to output.

Make sure that your files have correct projection, fisheye in v360 is
strict equidistant mapping, and may not be what your input is actually.



>
> On Mon, Mar 22, 2021 at 5:59 PM Paul B Mahol <onemda@gmail.com> wrote:
>
>>
>>
>> On Mon, Mar 22, 2021 at 5:09 AM Daniel Playfair Cal <
>> daniel.playfair.cal@gmail.com> wrote:
>>
>>> I've tried that filtergraph and a few other similar ones and I'm not
>>> sure what you mean - what exactly is the regression?
>>>
>>> I tried it on this image with an equirectangular projection:
>>> https://wiki.panotools.org/images/0/01/Big_ben_equirectangular.jpg
>>>
>>> The only difference I can see is that there are less unmapped areas in
>>> the output with the patches, because the final mapping from the output
>>> equirectangular image to the intermediate fisheye image no longer fails to
>>> map some areas which are present in the fisheye image. I would describe
>>> this as an improvement?
>>>
>>
>> I disagree, if I use 180 hfov and 180 vfov it should not have extra areas
>> but only half of previous input.
>>
>>
>>>
>>> On Mon, Mar 22, 2021 at 3:30 AM Paul B Mahol <onemda@gmail.com> wrote:
>>>
>>>> Sorry, but I cannot apply this set as is, It makes at least one serious
>>>> regression.
>>>>
>>>> For example try this filtergraph:
>>>>
>>>>
>>>> v360=input=e:output=fisheye:h_fov=180:v_fov=180,v360=input=fisheye:output=e:ih_fov=180:iv_fov=180
>>>>
>>>> On Sun, Mar 21, 2021 at 1:45 PM Daniel Playfair Cal <
>>>> daniel.playfair.cal@gmail.com> wrote:
>>>>
>>>>> This changes the iflat_range and flat_range values for the fisheye
>>>>> projection to match their meaning for the flat/rectilinear projection.
>>>>> That is, the range is between the two x or two y coordinates of the
>>>>> outermost points above/below or left/right of the center, in the
>>>>> flat/rectilinear projection.
>>>>>
>>>>> Signed-off-by: Daniel Playfair Cal <daniel.playfair.cal@gmail.com>
>>>>> ---
>>>>>  libavfilter/vf_v360.c | 19 +++++++++----------
>>>>>  1 file changed, 9 insertions(+), 10 deletions(-)
>>>>>
>>>>> diff --git a/libavfilter/vf_v360.c b/libavfilter/vf_v360.c
>>>>> index 68bb2f7b0f..3158451963 100644
>>>>> --- a/libavfilter/vf_v360.c
>>>>> +++ b/libavfilter/vf_v360.c
>>>>> @@ -2807,9 +2807,8 @@ static int prepare_fisheye_out(AVFilterContext
>>>>> *ctx)
>>>>>  {
>>>>>      V360Context *s = ctx->priv;
>>>>>
>>>>> -    s->flat_range[0] = s->h_fov / 180.f;
>>>>> -    s->flat_range[1] = s->v_fov / 180.f;
>>>>> -
>>>>> +    s->flat_range[0] = 0.5f * s->h_fov * M_PI / 180.f;
>>>>> +    s->flat_range[1] = 0.5f * s->v_fov * M_PI / 180.f;
>>>>>      return 0;
>>>>>  }
>>>>>
>>>>> @@ -2827,8 +2826,8 @@ static int fisheye_to_xyz(const V360Context *s,
>>>>>                            int i, int j, int width, int height,
>>>>>                            float *vec)
>>>>>  {
>>>>> -    const float uf = s->flat_range[0] * ((2.f * i) / width  - 1.f);
>>>>> -    const float vf = s->flat_range[1] * ((2.f * j + 1.f) / height -
>>>>> 1.f);
>>>>> +    const float uf = 2.f * s->flat_range[0] / M_PI * ((2.f * i) /
>>>>> width  - 1.f);
>>>>> +    const float vf = 2.f * s->flat_range[1] / M_PI * ((2.f * j + 1.f)
>>>>> / height - 1.f);
>>>>>
>>>>>      const float phi   = atan2f(vf, uf);
>>>>>      const float theta = M_PI_2 * (1.f - hypotf(uf, vf));
>>>>> @@ -2858,8 +2857,8 @@ static int prepare_fisheye_in(AVFilterContext
>>>>> *ctx)
>>>>>  {
>>>>>      V360Context *s = ctx->priv;
>>>>>
>>>>> -    s->iflat_range[0] = s->ih_fov / 180.f;
>>>>> -    s->iflat_range[1] = s->iv_fov / 180.f;
>>>>> +    s->iflat_range[0] = 0.5f * s->ih_fov * M_PI / 180.f;
>>>>> +    s->iflat_range[1] = 0.5f * s->iv_fov * M_PI / 180.f;
>>>>>
>>>>>      return 0;
>>>>>  }
>>>>> @@ -2882,10 +2881,10 @@ static int xyz_to_fisheye(const V360Context *s,
>>>>>  {
>>>>>      const float h   = hypotf(vec[0], vec[1]);
>>>>>      const float lh  = h > 0.f ? h : 1.f;
>>>>> -    const float phi = atan2f(h, vec[2]) / M_PI;
>>>>> +    const float phi = atan2f(h, vec[2]);
>>>>>
>>>>> -    float uf = vec[0] / lh * phi / s->iflat_range[0];
>>>>> -    float vf = vec[1] / lh * phi / s->iflat_range[1];
>>>>> +    float uf = 0.5f * vec[0] / lh * phi / s->iflat_range[0];
>>>>> +    float vf = 0.5f * vec[1] / lh * phi / s->iflat_range[1];
>>>>>
>>>>>      const int visible = -0.5f < uf && uf < 0.5f && -0.5f < vf && vf <
>>>>> 0.5f;
>>>>>      int ui, vi;
>>>>> --
>>>>> 2.31.0
>>>>>
>>>>> _______________________________________________
>>>>> ffmpeg-devel mailing list
>>>>> ffmpeg-devel@ffmpeg.org
>>>>> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>>>>>
>>>>> To unsubscribe, visit link above, or email
>>>>> ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
>>>>
>>>>
Daniel Playfair Cal April 11, 2021, 4:47 a.m. UTC | #11
> AFAIK, the h/v/d fov works fine with fisheye in/out. I used synthetic fisheye images from paul bourke site.

> And diagonal fov from w/h either works with both in and out or not at all.

That doesn't seem correct. If an image with an equidistant projection
is of dimensions WxH and has the focal point at the center, then the
fields of view are related as follows for some value of f:
horizontal: f*W
vertical: f*H
diagonal: f*sqrt( W^2 + H^2 )

In the example I gave, v360 chooses horizontal/vertical fields of view
that are incompatible with the diagonal field of view provided.
Therefore (assuming 1:1 pixel aspect ratio), since
sqrt(116.66^2+87.5^2) = 145.83, `input=fisheye:id_fov=145.83` should
behave the same as `input=fisheye:ih_fov=116.66:iv_fov=87.50` in terms
of the focal length. It doesn't, so I think this is a bug.

> So you want to not discard pixels out of circle defined by fov? That generally does not make sense to me as that may not gonna have actual pixels that belong to output.

What circle/FoV are you referring to? The FoV depends on which points
on the image you choose to compare (for example the
horizontal/vertical/diagonal FoVs are all different).
Sometimes/usually there is no circle which describes which points in
the image can be correctly mapped.

If a pixel in an image lies at a point where the projection of that
image is not defined, then yes it should be discarded. I think this is
correct in my patches.

> Make sure that your files have correct projection, fisheye in v360 is strict equidistant mapping, and may not be what your input is actually.

Yes, I agree we shouldn't assume anything based on the
content/appearance of the images from my GoPro. But it's possible to
make a rectangular image of 1920x1440 pixels with an equidistant
fisheye projection where the horizontal, vertical, and diagonal FoV is
116.66, 87.5, and 145.83. All the pixels in that image are defined in
the equidistant projection, so I think it should be possible for v360
to map all the pixels to an appropriate output. If my image is not a
real equidistant fisheye projection, or if my FoV measurements are
wrong, then the chessboard won't have the right shape in the output.
This is not the problem I am trying to solve here.

On Sun, Apr 11, 2021 at 9:37 AM Paul B Mahol <onemda@gmail.com> wrote:
>
>
>
> On Mon, Mar 22, 2021 at 1:35 PM Daniel Playfair Cal <daniel.playfair.cal@gmail.com> wrote:
>>
>> > I disagree, if I use 180 hfov and 180 vfov it should not have extra areas but only half of previous input.
>>
>> Not sure I follow - the ih_fov and vh_fov refer to the input (i.e. the fisheye image). If you wanted to restrict the FoV of the output, surely the way to do that would be to implement and use the FoV settings for the equirectangular projection?. It doesn't seem right that the code for the input projection is responsible for deciding what appears in the output. My understanding was that the FoV settings simply describe the focal length of the input or output camera so that points in the images can me mapped to/from 3d coordinates.
>>
>> To give you an idea of what I am trying to fix, here is an example input: https://photos.app.goo.gl/o51NfY6aqWn3unPG6
>> This is a 1920x1440 image taken on a GoPro Hero 5 black with the 4:3 Wide FoV setting and stabilisation disabled.
>>
>> The following filtergraph demonstrates the issues: 'v360=input=fisheye:ih_fov=116.66:iv_fov=87.50:output=flat:d_fov=145.8'
>>  1. the dfov_from_hfov issue is worked around by the use of ih_fov and iv_fov instead of id_fov, although you can try with id_fov=145.8 to see that problem too
>
>
> AFAIK, the h/v/d fov works fine with fisheye in/out. I used synthetic fisheye images from paul bourke site.
> And diagonal fov from w/h either works with both in and out or not at all.
>
>>
>>  2. by default the output has double the aspect ratio of the input, even though the fisheye -> rectilinear transformation doesn't change the aspect ratio (assuming the entire input image is included as it is in this example)
>>  3. much of the input is not visible in the output even though there is a mapping between the chosen projections (changed in the visibility test patch)
>>
>> 3 in particular I don't think can be solved by changing the settings - the input field of view needs to match the FoV of the input camera, otherwise the mapping is wrong. But it seems there is no other way to include the entire input from a fisheye image.
>
>
> So you want to not discard pixels out of circle defined by fov? That generally does not make sense to me as that may not gonna have actual pixels that belong to output.
>
> Make sure that your files have correct projection, fisheye in v360 is strict equidistant mapping, and may not be what your input is actually.
>
>
>>
>>
>> On Mon, Mar 22, 2021 at 5:59 PM Paul B Mahol <onemda@gmail.com> wrote:
>>>
>>>
>>>
>>> On Mon, Mar 22, 2021 at 5:09 AM Daniel Playfair Cal <daniel.playfair.cal@gmail.com> wrote:
>>>>
>>>> I've tried that filtergraph and a few other similar ones and I'm not sure what you mean - what exactly is the regression?
>>>>
>>>> I tried it on this image with an equirectangular projection: https://wiki.panotools.org/images/0/01/Big_ben_equirectangular.jpg
>>>>
>>>> The only difference I can see is that there are less unmapped areas in the output with the patches, because the final mapping from the output equirectangular image to the intermediate fisheye image no longer fails to map some areas which are present in the fisheye image. I would describe this as an improvement?
>>>
>>>
>>> I disagree, if I use 180 hfov and 180 vfov it should not have extra areas but only half of previous input.
>>>
>>>>
>>>>
>>>> On Mon, Mar 22, 2021 at 3:30 AM Paul B Mahol <onemda@gmail.com> wrote:
>>>>>
>>>>> Sorry, but I cannot apply this set as is, It makes at least one serious regression.
>>>>>
>>>>> For example try this filtergraph:
>>>>>
>>>>> v360=input=e:output=fisheye:h_fov=180:v_fov=180,v360=input=fisheye:output=e:ih_fov=180:iv_fov=180
>>>>>
>>>>> On Sun, Mar 21, 2021 at 1:45 PM Daniel Playfair Cal <daniel.playfair.cal@gmail.com> wrote:
>>>>>>
>>>>>> This changes the iflat_range and flat_range values for the fisheye
>>>>>> projection to match their meaning for the flat/rectilinear projection.
>>>>>> That is, the range is between the two x or two y coordinates of the
>>>>>> outermost points above/below or left/right of the center, in the
>>>>>> flat/rectilinear projection.
>>>>>>
>>>>>> Signed-off-by: Daniel Playfair Cal <daniel.playfair.cal@gmail.com>
>>>>>> ---
>>>>>>  libavfilter/vf_v360.c | 19 +++++++++----------
>>>>>>  1 file changed, 9 insertions(+), 10 deletions(-)
>>>>>>
>>>>>> diff --git a/libavfilter/vf_v360.c b/libavfilter/vf_v360.c
>>>>>> index 68bb2f7b0f..3158451963 100644
>>>>>> --- a/libavfilter/vf_v360.c
>>>>>> +++ b/libavfilter/vf_v360.c
>>>>>> @@ -2807,9 +2807,8 @@ static int prepare_fisheye_out(AVFilterContext *ctx)
>>>>>>  {
>>>>>>      V360Context *s = ctx->priv;
>>>>>>
>>>>>> -    s->flat_range[0] = s->h_fov / 180.f;
>>>>>> -    s->flat_range[1] = s->v_fov / 180.f;
>>>>>> -
>>>>>> +    s->flat_range[0] = 0.5f * s->h_fov * M_PI / 180.f;
>>>>>> +    s->flat_range[1] = 0.5f * s->v_fov * M_PI / 180.f;
>>>>>>      return 0;
>>>>>>  }
>>>>>>
>>>>>> @@ -2827,8 +2826,8 @@ static int fisheye_to_xyz(const V360Context *s,
>>>>>>                            int i, int j, int width, int height,
>>>>>>                            float *vec)
>>>>>>  {
>>>>>> -    const float uf = s->flat_range[0] * ((2.f * i) / width  - 1.f);
>>>>>> -    const float vf = s->flat_range[1] * ((2.f * j + 1.f) / height - 1.f);
>>>>>> +    const float uf = 2.f * s->flat_range[0] / M_PI * ((2.f * i) / width  - 1.f);
>>>>>> +    const float vf = 2.f * s->flat_range[1] / M_PI * ((2.f * j + 1.f) / height - 1.f);
>>>>>>
>>>>>>      const float phi   = atan2f(vf, uf);
>>>>>>      const float theta = M_PI_2 * (1.f - hypotf(uf, vf));
>>>>>> @@ -2858,8 +2857,8 @@ static int prepare_fisheye_in(AVFilterContext *ctx)
>>>>>>  {
>>>>>>      V360Context *s = ctx->priv;
>>>>>>
>>>>>> -    s->iflat_range[0] = s->ih_fov / 180.f;
>>>>>> -    s->iflat_range[1] = s->iv_fov / 180.f;
>>>>>> +    s->iflat_range[0] = 0.5f * s->ih_fov * M_PI / 180.f;
>>>>>> +    s->iflat_range[1] = 0.5f * s->iv_fov * M_PI / 180.f;
>>>>>>
>>>>>>      return 0;
>>>>>>  }
>>>>>> @@ -2882,10 +2881,10 @@ static int xyz_to_fisheye(const V360Context *s,
>>>>>>  {
>>>>>>      const float h   = hypotf(vec[0], vec[1]);
>>>>>>      const float lh  = h > 0.f ? h : 1.f;
>>>>>> -    const float phi = atan2f(h, vec[2]) / M_PI;
>>>>>> +    const float phi = atan2f(h, vec[2]);
>>>>>>
>>>>>> -    float uf = vec[0] / lh * phi / s->iflat_range[0];
>>>>>> -    float vf = vec[1] / lh * phi / s->iflat_range[1];
>>>>>> +    float uf = 0.5f * vec[0] / lh * phi / s->iflat_range[0];
>>>>>> +    float vf = 0.5f * vec[1] / lh * phi / s->iflat_range[1];
>>>>>>
>>>>>>      const int visible = -0.5f < uf && uf < 0.5f && -0.5f < vf && vf < 0.5f;
>>>>>>      int ui, vi;
>>>>>> --
>>>>>> 2.31.0
>>>>>>
>>>>>> _______________________________________________
>>>>>> ffmpeg-devel mailing list
>>>>>> ffmpeg-devel@ffmpeg.org
>>>>>> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>>>>>>
>>>>>> To unsubscribe, visit link above, or email
>>>>>> ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
Paul B Mahol April 11, 2021, 8:23 a.m. UTC | #12
Ok, dfov for width != height in (d)fisheye have been fixed.

Dunno what to do with rest of patches.

The aspect ratio one breaks handling in case input is in equirectangular
format.

On Sun, Apr 11, 2021 at 6:48 AM Daniel Playfair Cal <
daniel.playfair.cal@gmail.com> wrote:

> > AFAIK, the h/v/d fov works fine with fisheye in/out. I used synthetic
> fisheye images from paul bourke site.
>
> > And diagonal fov from w/h either works with both in and out or not at
> all.
>
> That doesn't seem correct. If an image with an equidistant projection
> is of dimensions WxH and has the focal point at the center, then the
> fields of view are related as follows for some value of f:
> horizontal: f*W
> vertical: f*H
> diagonal: f*sqrt( W^2 + H^2 )
>
> In the example I gave, v360 chooses horizontal/vertical fields of view
> that are incompatible with the diagonal field of view provided.
> Therefore (assuming 1:1 pixel aspect ratio), since
> sqrt(116.66^2+87.5^2) = 145.83, `input=fisheye:id_fov=145.83` should
> behave the same as `input=fisheye:ih_fov=116.66:iv_fov=87.50` in terms
> of the focal length. It doesn't, so I think this is a bug.
>
> > So you want to not discard pixels out of circle defined by fov? That
> generally does not make sense to me as that may not gonna have actual
> pixels that belong to output.
>
> What circle/FoV are you referring to? The FoV depends on which points
> on the image you choose to compare (for example the
> horizontal/vertical/diagonal FoVs are all different).
> Sometimes/usually there is no circle which describes which points in
> the image can be correctly mapped.
>
> If a pixel in an image lies at a point where the projection of that
> image is not defined, then yes it should be discarded. I think this is
> correct in my patches.
>
> > Make sure that your files have correct projection, fisheye in v360 is
> strict equidistant mapping, and may not be what your input is actually.
>
> Yes, I agree we shouldn't assume anything based on the
> content/appearance of the images from my GoPro. But it's possible to
> make a rectangular image of 1920x1440 pixels with an equidistant
> fisheye projection where the horizontal, vertical, and diagonal FoV is
> 116.66, 87.5, and 145.83. All the pixels in that image are defined in
> the equidistant projection, so I think it should be possible for v360
> to map all the pixels to an appropriate output. If my image is not a
> real equidistant fisheye projection, or if my FoV measurements are
> wrong, then the chessboard won't have the right shape in the output.
> This is not the problem I am trying to solve here.
>
> On Sun, Apr 11, 2021 at 9:37 AM Paul B Mahol <onemda@gmail.com> wrote:
> >
> >
> >
> > On Mon, Mar 22, 2021 at 1:35 PM Daniel Playfair Cal <
> daniel.playfair.cal@gmail.com> wrote:
> >>
> >> > I disagree, if I use 180 hfov and 180 vfov it should not have extra
> areas but only half of previous input.
> >>
> >> Not sure I follow - the ih_fov and vh_fov refer to the input (i.e. the
> fisheye image). If you wanted to restrict the FoV of the output, surely the
> way to do that would be to implement and use the FoV settings for the
> equirectangular projection?. It doesn't seem right that the code for the
> input projection is responsible for deciding what appears in the output. My
> understanding was that the FoV settings simply describe the focal length of
> the input or output camera so that points in the images can me mapped
> to/from 3d coordinates.
> >>
> >> To give you an idea of what I am trying to fix, here is an example
> input: https://photos.app.goo.gl/o51NfY6aqWn3unPG6
> >> This is a 1920x1440 image taken on a GoPro Hero 5 black with the 4:3
> Wide FoV setting and stabilisation disabled.
> >>
> >> The following filtergraph demonstrates the issues:
> 'v360=input=fisheye:ih_fov=116.66:iv_fov=87.50:output=flat:d_fov=145.8'
> >>  1. the dfov_from_hfov issue is worked around by the use of ih_fov and
> iv_fov instead of id_fov, although you can try with id_fov=145.8 to see
> that problem too
> >
> >
> > AFAIK, the h/v/d fov works fine with fisheye in/out. I used synthetic
> fisheye images from paul bourke site.
> > And diagonal fov from w/h either works with both in and out or not at
> all.
> >
> >>
> >>  2. by default the output has double the aspect ratio of the input,
> even though the fisheye -> rectilinear transformation doesn't change the
> aspect ratio (assuming the entire input image is included as it is in this
> example)
> >>  3. much of the input is not visible in the output even though there is
> a mapping between the chosen projections (changed in the visibility test
> patch)
> >>
> >> 3 in particular I don't think can be solved by changing the settings -
> the input field of view needs to match the FoV of the input camera,
> otherwise the mapping is wrong. But it seems there is no other way to
> include the entire input from a fisheye image.
> >
> >
> > So you want to not discard pixels out of circle defined by fov? That
> generally does not make sense to me as that may not gonna have actual
> pixels that belong to output.
> >
> > Make sure that your files have correct projection, fisheye in v360 is
> strict equidistant mapping, and may not be what your input is actually.
> >
> >
> >>
> >>
> >> On Mon, Mar 22, 2021 at 5:59 PM Paul B Mahol <onemda@gmail.com> wrote:
> >>>
> >>>
> >>>
> >>> On Mon, Mar 22, 2021 at 5:09 AM Daniel Playfair Cal <
> daniel.playfair.cal@gmail.com> wrote:
> >>>>
> >>>> I've tried that filtergraph and a few other similar ones and I'm not
> sure what you mean - what exactly is the regression?
> >>>>
> >>>> I tried it on this image with an equirectangular projection:
> https://wiki.panotools.org/images/0/01/Big_ben_equirectangular.jpg
> >>>>
> >>>> The only difference I can see is that there are less unmapped areas
> in the output with the patches, because the final mapping from the output
> equirectangular image to the intermediate fisheye image no longer fails to
> map some areas which are present in the fisheye image. I would describe
> this as an improvement?
> >>>
> >>>
> >>> I disagree, if I use 180 hfov and 180 vfov it should not have extra
> areas but only half of previous input.
> >>>
> >>>>
> >>>>
> >>>> On Mon, Mar 22, 2021 at 3:30 AM Paul B Mahol <onemda@gmail.com>
> wrote:
> >>>>>
> >>>>> Sorry, but I cannot apply this set as is, It makes at least one
> serious regression.
> >>>>>
> >>>>> For example try this filtergraph:
> >>>>>
> >>>>>
> v360=input=e:output=fisheye:h_fov=180:v_fov=180,v360=input=fisheye:output=e:ih_fov=180:iv_fov=180
> >>>>>
> >>>>> On Sun, Mar 21, 2021 at 1:45 PM Daniel Playfair Cal <
> daniel.playfair.cal@gmail.com> wrote:
> >>>>>>
> >>>>>> This changes the iflat_range and flat_range values for the fisheye
> >>>>>> projection to match their meaning for the flat/rectilinear
> projection.
> >>>>>> That is, the range is between the two x or two y coordinates of the
> >>>>>> outermost points above/below or left/right of the center, in the
> >>>>>> flat/rectilinear projection.
> >>>>>>
> >>>>>> Signed-off-by: Daniel Playfair Cal <daniel.playfair.cal@gmail.com>
> >>>>>> ---
> >>>>>>  libavfilter/vf_v360.c | 19 +++++++++----------
> >>>>>>  1 file changed, 9 insertions(+), 10 deletions(-)
> >>>>>>
> >>>>>> diff --git a/libavfilter/vf_v360.c b/libavfilter/vf_v360.c
> >>>>>> index 68bb2f7b0f..3158451963 100644
> >>>>>> --- a/libavfilter/vf_v360.c
> >>>>>> +++ b/libavfilter/vf_v360.c
> >>>>>> @@ -2807,9 +2807,8 @@ static int
> prepare_fisheye_out(AVFilterContext *ctx)
> >>>>>>  {
> >>>>>>      V360Context *s = ctx->priv;
> >>>>>>
> >>>>>> -    s->flat_range[0] = s->h_fov / 180.f;
> >>>>>> -    s->flat_range[1] = s->v_fov / 180.f;
> >>>>>> -
> >>>>>> +    s->flat_range[0] = 0.5f * s->h_fov * M_PI / 180.f;
> >>>>>> +    s->flat_range[1] = 0.5f * s->v_fov * M_PI / 180.f;
> >>>>>>      return 0;
> >>>>>>  }
> >>>>>>
> >>>>>> @@ -2827,8 +2826,8 @@ static int fisheye_to_xyz(const V360Context
> *s,
> >>>>>>                            int i, int j, int width, int height,
> >>>>>>                            float *vec)
> >>>>>>  {
> >>>>>> -    const float uf = s->flat_range[0] * ((2.f * i) / width  - 1.f);
> >>>>>> -    const float vf = s->flat_range[1] * ((2.f * j + 1.f) / height
> - 1.f);
> >>>>>> +    const float uf = 2.f * s->flat_range[0] / M_PI * ((2.f * i) /
> width  - 1.f);
> >>>>>> +    const float vf = 2.f * s->flat_range[1] / M_PI * ((2.f * j +
> 1.f) / height - 1.f);
> >>>>>>
> >>>>>>      const float phi   = atan2f(vf, uf);
> >>>>>>      const float theta = M_PI_2 * (1.f - hypotf(uf, vf));
> >>>>>> @@ -2858,8 +2857,8 @@ static int prepare_fisheye_in(AVFilterContext
> *ctx)
> >>>>>>  {
> >>>>>>      V360Context *s = ctx->priv;
> >>>>>>
> >>>>>> -    s->iflat_range[0] = s->ih_fov / 180.f;
> >>>>>> -    s->iflat_range[1] = s->iv_fov / 180.f;
> >>>>>> +    s->iflat_range[0] = 0.5f * s->ih_fov * M_PI / 180.f;
> >>>>>> +    s->iflat_range[1] = 0.5f * s->iv_fov * M_PI / 180.f;
> >>>>>>
> >>>>>>      return 0;
> >>>>>>  }
> >>>>>> @@ -2882,10 +2881,10 @@ static int xyz_to_fisheye(const V360Context
> *s,
> >>>>>>  {
> >>>>>>      const float h   = hypotf(vec[0], vec[1]);
> >>>>>>      const float lh  = h > 0.f ? h : 1.f;
> >>>>>> -    const float phi = atan2f(h, vec[2]) / M_PI;
> >>>>>> +    const float phi = atan2f(h, vec[2]);
> >>>>>>
> >>>>>> -    float uf = vec[0] / lh * phi / s->iflat_range[0];
> >>>>>> -    float vf = vec[1] / lh * phi / s->iflat_range[1];
> >>>>>> +    float uf = 0.5f * vec[0] / lh * phi / s->iflat_range[0];
> >>>>>> +    float vf = 0.5f * vec[1] / lh * phi / s->iflat_range[1];
> >>>>>>
> >>>>>>      const int visible = -0.5f < uf && uf < 0.5f && -0.5f < vf &&
> vf < 0.5f;
> >>>>>>      int ui, vi;
> >>>>>> --
> >>>>>> 2.31.0
> >>>>>>
> >>>>>> _______________________________________________
> >>>>>> ffmpeg-devel mailing list
> >>>>>> ffmpeg-devel@ffmpeg.org
> >>>>>> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
> >>>>>>
> >>>>>> To unsubscribe, visit link above, or email
> >>>>>> ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
>
Daniel Playfair Cal April 12, 2021, 5:43 a.m. UTC | #13
> Ok, dfov for width != height in (d)fisheye have been fixed.

Great, thanks :)

> The aspect ratio one breaks handling in case input is in equirectangular format.

That's not obvious to me - can you provide an example filtergraph
where it breaks, and explain why it's wrong?

> Dunno what to do with rest of patches.

The most important one I think is the visibility test for fisheye
input - that is the only one that can't be worked around.

Going back to your original example:
`v360=input=e:output=fisheye:h_fov=180:v_fov=180,v360=input=fisheye:output=e:ih_fov=180:iv_fov=180`

Now that you've added the ability to specify fov settings for the
equirectangular projection, you can apply my patch and still have the
output you expect, for example:

v360=input=e:output=fisheye:h_fov=180:v_fov=180,v360=input=fisheye:output=e:ih_fov=180:iv_fov=180:h_fov=180

The use of h_fov on the output prevents anything outside of 180
horizontal field of view from being mapped (because its outside the
bounds of the output image).

At the same time, its possible to map an entire fisheye image
including the corners, for example in my use case of converting
fisheye -> flat.


On Sun, Apr 11, 2021 at 6:23 PM Paul B Mahol <onemda@gmail.com> wrote:
>
> Ok, dfov for width != height in (d)fisheye have been fixed.
>
> Dunno what to do with rest of patches.
>
> The aspect ratio one breaks handling in case input is in equirectangular format.
>
> On Sun, Apr 11, 2021 at 6:48 AM Daniel Playfair Cal <daniel.playfair.cal@gmail.com> wrote:
>>
>> > AFAIK, the h/v/d fov works fine with fisheye in/out. I used synthetic fisheye images from paul bourke site.
>>
>> > And diagonal fov from w/h either works with both in and out or not at all.
>>
>> That doesn't seem correct. If an image with an equidistant projection
>> is of dimensions WxH and has the focal point at the center, then the
>> fields of view are related as follows for some value of f:
>> horizontal: f*W
>> vertical: f*H
>> diagonal: f*sqrt( W^2 + H^2 )
>>
>> In the example I gave, v360 chooses horizontal/vertical fields of view
>> that are incompatible with the diagonal field of view provided.
>> Therefore (assuming 1:1 pixel aspect ratio), since
>> sqrt(116.66^2+87.5^2) = 145.83, `input=fisheye:id_fov=145.83` should
>> behave the same as `input=fisheye:ih_fov=116.66:iv_fov=87.50` in terms
>> of the focal length. It doesn't, so I think this is a bug.
>>
>> > So you want to not discard pixels out of circle defined by fov? That generally does not make sense to me as that may not gonna have actual pixels that belong to output.
>>
>> What circle/FoV are you referring to? The FoV depends on which points
>> on the image you choose to compare (for example the
>> horizontal/vertical/diagonal FoVs are all different).
>> Sometimes/usually there is no circle which describes which points in
>> the image can be correctly mapped.
>>
>> If a pixel in an image lies at a point where the projection of that
>> image is not defined, then yes it should be discarded. I think this is
>> correct in my patches.
>>
>> > Make sure that your files have correct projection, fisheye in v360 is strict equidistant mapping, and may not be what your input is actually.
>>
>> Yes, I agree we shouldn't assume anything based on the
>> content/appearance of the images from my GoPro. But it's possible to
>> make a rectangular image of 1920x1440 pixels with an equidistant
>> fisheye projection where the horizontal, vertical, and diagonal FoV is
>> 116.66, 87.5, and 145.83. All the pixels in that image are defined in
>> the equidistant projection, so I think it should be possible for v360
>> to map all the pixels to an appropriate output. If my image is not a
>> real equidistant fisheye projection, or if my FoV measurements are
>> wrong, then the chessboard won't have the right shape in the output.
>> This is not the problem I am trying to solve here.
>>
>> On Sun, Apr 11, 2021 at 9:37 AM Paul B Mahol <onemda@gmail.com> wrote:
>> >
>> >
>> >
>> > On Mon, Mar 22, 2021 at 1:35 PM Daniel Playfair Cal <daniel.playfair.cal@gmail.com> wrote:
>> >>
>> >> > I disagree, if I use 180 hfov and 180 vfov it should not have extra areas but only half of previous input.
>> >>
>> >> Not sure I follow - the ih_fov and vh_fov refer to the input (i.e. the fisheye image). If you wanted to restrict the FoV of the output, surely the way to do that would be to implement and use the FoV settings for the equirectangular projection?. It doesn't seem right that the code for the input projection is responsible for deciding what appears in the output. My understanding was that the FoV settings simply describe the focal length of the input or output camera so that points in the images can me mapped to/from 3d coordinates.
>> >>
>> >> To give you an idea of what I am trying to fix, here is an example input: https://photos.app.goo.gl/o51NfY6aqWn3unPG6
>> >> This is a 1920x1440 image taken on a GoPro Hero 5 black with the 4:3 Wide FoV setting and stabilisation disabled.
>> >>
>> >> The following filtergraph demonstrates the issues: 'v360=input=fisheye:ih_fov=116.66:iv_fov=87.50:output=flat:d_fov=145.8'
>> >>  1. the dfov_from_hfov issue is worked around by the use of ih_fov and iv_fov instead of id_fov, although you can try with id_fov=145.8 to see that problem too
>> >
>> >
>> > AFAIK, the h/v/d fov works fine with fisheye in/out. I used synthetic fisheye images from paul bourke site.
>> > And diagonal fov from w/h either works with both in and out or not at all.
>> >
>> >>
>> >>  2. by default the output has double the aspect ratio of the input, even though the fisheye -> rectilinear transformation doesn't change the aspect ratio (assuming the entire input image is included as it is in this example)
>> >>  3. much of the input is not visible in the output even though there is a mapping between the chosen projections (changed in the visibility test patch)
>> >>
>> >> 3 in particular I don't think can be solved by changing the settings - the input field of view needs to match the FoV of the input camera, otherwise the mapping is wrong. But it seems there is no other way to include the entire input from a fisheye image.
>> >
>> >
>> > So you want to not discard pixels out of circle defined by fov? That generally does not make sense to me as that may not gonna have actual pixels that belong to output.
>> >
>> > Make sure that your files have correct projection, fisheye in v360 is strict equidistant mapping, and may not be what your input is actually.
>> >
>> >
>> >>
>> >>
>> >> On Mon, Mar 22, 2021 at 5:59 PM Paul B Mahol <onemda@gmail.com> wrote:
>> >>>
>> >>>
>> >>>
>> >>> On Mon, Mar 22, 2021 at 5:09 AM Daniel Playfair Cal <daniel.playfair.cal@gmail.com> wrote:
>> >>>>
>> >>>> I've tried that filtergraph and a few other similar ones and I'm not sure what you mean - what exactly is the regression?
>> >>>>
>> >>>> I tried it on this image with an equirectangular projection: https://wiki.panotools.org/images/0/01/Big_ben_equirectangular.jpg
>> >>>>
>> >>>> The only difference I can see is that there are less unmapped areas in the output with the patches, because the final mapping from the output equirectangular image to the intermediate fisheye image no longer fails to map some areas which are present in the fisheye image. I would describe this as an improvement?
>> >>>
>> >>>
>> >>> I disagree, if I use 180 hfov and 180 vfov it should not have extra areas but only half of previous input.
>> >>>
>> >>>>
>> >>>>
>> >>>> On Mon, Mar 22, 2021 at 3:30 AM Paul B Mahol <onemda@gmail.com> wrote:
>> >>>>>
>> >>>>> Sorry, but I cannot apply this set as is, It makes at least one serious regression.
>> >>>>>
>> >>>>> For example try this filtergraph:
>> >>>>>
>> >>>>> v360=input=e:output=fisheye:h_fov=180:v_fov=180,v360=input=fisheye:output=e:ih_fov=180:iv_fov=180
>> >>>>>
>> >>>>> On Sun, Mar 21, 2021 at 1:45 PM Daniel Playfair Cal <daniel.playfair.cal@gmail.com> wrote:
>> >>>>>>
>> >>>>>> This changes the iflat_range and flat_range values for the fisheye
>> >>>>>> projection to match their meaning for the flat/rectilinear projection.
>> >>>>>> That is, the range is between the two x or two y coordinates of the
>> >>>>>> outermost points above/below or left/right of the center, in the
>> >>>>>> flat/rectilinear projection.
>> >>>>>>
>> >>>>>> Signed-off-by: Daniel Playfair Cal <daniel.playfair.cal@gmail.com>
>> >>>>>> ---
>> >>>>>>  libavfilter/vf_v360.c | 19 +++++++++----------
>> >>>>>>  1 file changed, 9 insertions(+), 10 deletions(-)
>> >>>>>>
>> >>>>>> diff --git a/libavfilter/vf_v360.c b/libavfilter/vf_v360.c
>> >>>>>> index 68bb2f7b0f..3158451963 100644
>> >>>>>> --- a/libavfilter/vf_v360.c
>> >>>>>> +++ b/libavfilter/vf_v360.c
>> >>>>>> @@ -2807,9 +2807,8 @@ static int prepare_fisheye_out(AVFilterContext *ctx)
>> >>>>>>  {
>> >>>>>>      V360Context *s = ctx->priv;
>> >>>>>>
>> >>>>>> -    s->flat_range[0] = s->h_fov / 180.f;
>> >>>>>> -    s->flat_range[1] = s->v_fov / 180.f;
>> >>>>>> -
>> >>>>>> +    s->flat_range[0] = 0.5f * s->h_fov * M_PI / 180.f;
>> >>>>>> +    s->flat_range[1] = 0.5f * s->v_fov * M_PI / 180.f;
>> >>>>>>      return 0;
>> >>>>>>  }
>> >>>>>>
>> >>>>>> @@ -2827,8 +2826,8 @@ static int fisheye_to_xyz(const V360Context *s,
>> >>>>>>                            int i, int j, int width, int height,
>> >>>>>>                            float *vec)
>> >>>>>>  {
>> >>>>>> -    const float uf = s->flat_range[0] * ((2.f * i) / width  - 1.f);
>> >>>>>> -    const float vf = s->flat_range[1] * ((2.f * j + 1.f) / height - 1.f);
>> >>>>>> +    const float uf = 2.f * s->flat_range[0] / M_PI * ((2.f * i) / width  - 1.f);
>> >>>>>> +    const float vf = 2.f * s->flat_range[1] / M_PI * ((2.f * j + 1.f) / height - 1.f);
>> >>>>>>
>> >>>>>>      const float phi   = atan2f(vf, uf);
>> >>>>>>      const float theta = M_PI_2 * (1.f - hypotf(uf, vf));
>> >>>>>> @@ -2858,8 +2857,8 @@ static int prepare_fisheye_in(AVFilterContext *ctx)
>> >>>>>>  {
>> >>>>>>      V360Context *s = ctx->priv;
>> >>>>>>
>> >>>>>> -    s->iflat_range[0] = s->ih_fov / 180.f;
>> >>>>>> -    s->iflat_range[1] = s->iv_fov / 180.f;
>> >>>>>> +    s->iflat_range[0] = 0.5f * s->ih_fov * M_PI / 180.f;
>> >>>>>> +    s->iflat_range[1] = 0.5f * s->iv_fov * M_PI / 180.f;
>> >>>>>>
>> >>>>>>      return 0;
>> >>>>>>  }
>> >>>>>> @@ -2882,10 +2881,10 @@ static int xyz_to_fisheye(const V360Context *s,
>> >>>>>>  {
>> >>>>>>      const float h   = hypotf(vec[0], vec[1]);
>> >>>>>>      const float lh  = h > 0.f ? h : 1.f;
>> >>>>>> -    const float phi = atan2f(h, vec[2]) / M_PI;
>> >>>>>> +    const float phi = atan2f(h, vec[2]);
>> >>>>>>
>> >>>>>> -    float uf = vec[0] / lh * phi / s->iflat_range[0];
>> >>>>>> -    float vf = vec[1] / lh * phi / s->iflat_range[1];
>> >>>>>> +    float uf = 0.5f * vec[0] / lh * phi / s->iflat_range[0];
>> >>>>>> +    float vf = 0.5f * vec[1] / lh * phi / s->iflat_range[1];
>> >>>>>>
>> >>>>>>      const int visible = -0.5f < uf && uf < 0.5f && -0.5f < vf && vf < 0.5f;
>> >>>>>>      int ui, vi;
>> >>>>>> --
>> >>>>>> 2.31.0
>> >>>>>>
>> >>>>>> _______________________________________________
>> >>>>>> ffmpeg-devel mailing list
>> >>>>>> ffmpeg-devel@ffmpeg.org
>> >>>>>> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>> >>>>>>
>> >>>>>> To unsubscribe, visit link above, or email
>> >>>>>> ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
Paul B Mahol April 18, 2021, 7:26 a.m. UTC | #14
On Mon, Apr 12, 2021 at 7:44 AM Daniel Playfair Cal <
daniel.playfair.cal@gmail.com> wrote:

> > Ok, dfov for width != height in (d)fisheye have been fixed.
>
> Great, thanks :)
>
> > The aspect ratio one breaks handling in case input is in equirectangular
> format.
>
> That's not obvious to me - can you provide an example filtergraph
> where it breaks, and explain why it's wrong?
>
> > Dunno what to do with rest of patches.
>
> The most important one I think is the visibility test for fisheye
> input - that is the only one that can't be worked around.
>
> Going back to your original example:
>
> `v360=input=e:output=fisheye:h_fov=180:v_fov=180,v360=input=fisheye:output=e:ih_fov=180:iv_fov=180`
>
> Now that you've added the ability to specify fov settings for the
> equirectangular projection, you can apply my patch and still have the
> output you expect, for example:
>
>
> v360=input=e:output=fisheye:h_fov=180:v_fov=180,v360=input=fisheye:output=e:ih_fov=180:iv_fov=180:h_fov=180
>
> The use of h_fov on the output prevents anything outside of 180
> horizontal field of view from being mapped (because its outside the
> bounds of the output image).
>


That is not same output, The transparent part is completely missing.
Perhaps another option should be added  somehow.



>
> At the same time, its possible to map an entire fisheye image
> including the corners, for example in my use case of converting
> fisheye -> flat.


>
> On Sun, Apr 11, 2021 at 6:23 PM Paul B Mahol <onemda@gmail.com> wrote:
> >
> > Ok, dfov for width != height in (d)fisheye have been fixed.
> >
> > Dunno what to do with rest of patches.
> >
> > The aspect ratio one breaks handling in case input is in equirectangular
> format.
> >
> > On Sun, Apr 11, 2021 at 6:48 AM Daniel Playfair Cal <
> daniel.playfair.cal@gmail.com> wrote:
> >>
> >> > AFAIK, the h/v/d fov works fine with fisheye in/out. I used synthetic
> fisheye images from paul bourke site.
> >>
> >> > And diagonal fov from w/h either works with both in and out or not at
> all.
> >>
> >> That doesn't seem correct. If an image with an equidistant projection
> >> is of dimensions WxH and has the focal point at the center, then the
> >> fields of view are related as follows for some value of f:
> >> horizontal: f*W
> >> vertical: f*H
> >> diagonal: f*sqrt( W^2 + H^2 )
> >>
> >> In the example I gave, v360 chooses horizontal/vertical fields of view
> >> that are incompatible with the diagonal field of view provided.
> >> Therefore (assuming 1:1 pixel aspect ratio), since
> >> sqrt(116.66^2+87.5^2) = 145.83, `input=fisheye:id_fov=145.83` should
> >> behave the same as `input=fisheye:ih_fov=116.66:iv_fov=87.50` in terms
> >> of the focal length. It doesn't, so I think this is a bug.
> >>
> >> > So you want to not discard pixels out of circle defined by fov? That
> generally does not make sense to me as that may not gonna have actual
> pixels that belong to output.
> >>
> >> What circle/FoV are you referring to? The FoV depends on which points
> >> on the image you choose to compare (for example the
> >> horizontal/vertical/diagonal FoVs are all different).
> >> Sometimes/usually there is no circle which describes which points in
> >> the image can be correctly mapped.
> >>
> >> If a pixel in an image lies at a point where the projection of that
> >> image is not defined, then yes it should be discarded. I think this is
> >> correct in my patches.
> >>
> >> > Make sure that your files have correct projection, fisheye in v360 is
> strict equidistant mapping, and may not be what your input is actually.
> >>
> >> Yes, I agree we shouldn't assume anything based on the
> >> content/appearance of the images from my GoPro. But it's possible to
> >> make a rectangular image of 1920x1440 pixels with an equidistant
> >> fisheye projection where the horizontal, vertical, and diagonal FoV is
> >> 116.66, 87.5, and 145.83. All the pixels in that image are defined in
> >> the equidistant projection, so I think it should be possible for v360
> >> to map all the pixels to an appropriate output. If my image is not a
> >> real equidistant fisheye projection, or if my FoV measurements are
> >> wrong, then the chessboard won't have the right shape in the output.
> >> This is not the problem I am trying to solve here.
> >>
> >> On Sun, Apr 11, 2021 at 9:37 AM Paul B Mahol <onemda@gmail.com> wrote:
> >> >
> >> >
> >> >
> >> > On Mon, Mar 22, 2021 at 1:35 PM Daniel Playfair Cal <
> daniel.playfair.cal@gmail.com> wrote:
> >> >>
> >> >> > I disagree, if I use 180 hfov and 180 vfov it should not have
> extra areas but only half of previous input.
> >> >>
> >> >> Not sure I follow - the ih_fov and vh_fov refer to the input (i.e.
> the fisheye image). If you wanted to restrict the FoV of the output, surely
> the way to do that would be to implement and use the FoV settings for the
> equirectangular projection?. It doesn't seem right that the code for the
> input projection is responsible for deciding what appears in the output. My
> understanding was that the FoV settings simply describe the focal length of
> the input or output camera so that points in the images can me mapped
> to/from 3d coordinates.
> >> >>
> >> >> To give you an idea of what I am trying to fix, here is an example
> input: https://photos.app.goo.gl/o51NfY6aqWn3unPG6
> >> >> This is a 1920x1440 image taken on a GoPro Hero 5 black with the 4:3
> Wide FoV setting and stabilisation disabled.
> >> >>
> >> >> The following filtergraph demonstrates the issues:
> 'v360=input=fisheye:ih_fov=116.66:iv_fov=87.50:output=flat:d_fov=145.8'
> >> >>  1. the dfov_from_hfov issue is worked around by the use of ih_fov
> and iv_fov instead of id_fov, although you can try with id_fov=145.8 to see
> that problem too
> >> >
> >> >
> >> > AFAIK, the h/v/d fov works fine with fisheye in/out. I used synthetic
> fisheye images from paul bourke site.
> >> > And diagonal fov from w/h either works with both in and out or not at
> all.
> >> >
> >> >>
> >> >>  2. by default the output has double the aspect ratio of the input,
> even though the fisheye -> rectilinear transformation doesn't change the
> aspect ratio (assuming the entire input image is included as it is in this
> example)
> >> >>  3. much of the input is not visible in the output even though there
> is a mapping between the chosen projections (changed in the visibility test
> patch)
> >> >>
> >> >> 3 in particular I don't think can be solved by changing the settings
> - the input field of view needs to match the FoV of the input camera,
> otherwise the mapping is wrong. But it seems there is no other way to
> include the entire input from a fisheye image.
> >> >
> >> >
> >> > So you want to not discard pixels out of circle defined by fov? That
> generally does not make sense to me as that may not gonna have actual
> pixels that belong to output.
> >> >
> >> > Make sure that your files have correct projection, fisheye in v360 is
> strict equidistant mapping, and may not be what your input is actually.
> >> >
> >> >
> >> >>
> >> >>
> >> >> On Mon, Mar 22, 2021 at 5:59 PM Paul B Mahol <onemda@gmail.com>
> wrote:
> >> >>>
> >> >>>
> >> >>>
> >> >>> On Mon, Mar 22, 2021 at 5:09 AM Daniel Playfair Cal <
> daniel.playfair.cal@gmail.com> wrote:
> >> >>>>
> >> >>>> I've tried that filtergraph and a few other similar ones and I'm
> not sure what you mean - what exactly is the regression?
> >> >>>>
> >> >>>> I tried it on this image with an equirectangular projection:
> https://wiki.panotools.org/images/0/01/Big_ben_equirectangular.jpg
> >> >>>>
> >> >>>> The only difference I can see is that there are less unmapped
> areas in the output with the patches, because the final mapping from the
> output equirectangular image to the intermediate fisheye image no longer
> fails to map some areas which are present in the fisheye image. I would
> describe this as an improvement?
> >> >>>
> >> >>>
> >> >>> I disagree, if I use 180 hfov and 180 vfov it should not have extra
> areas but only half of previous input.
> >> >>>
> >> >>>>
> >> >>>>
> >> >>>> On Mon, Mar 22, 2021 at 3:30 AM Paul B Mahol <onemda@gmail.com>
> wrote:
> >> >>>>>
> >> >>>>> Sorry, but I cannot apply this set as is, It makes at least one
> serious regression.
> >> >>>>>
> >> >>>>> For example try this filtergraph:
> >> >>>>>
> >> >>>>>
> v360=input=e:output=fisheye:h_fov=180:v_fov=180,v360=input=fisheye:output=e:ih_fov=180:iv_fov=180
> >> >>>>>
> >> >>>>> On Sun, Mar 21, 2021 at 1:45 PM Daniel Playfair Cal <
> daniel.playfair.cal@gmail.com> wrote:
> >> >>>>>>
> >> >>>>>> This changes the iflat_range and flat_range values for the
> fisheye
> >> >>>>>> projection to match their meaning for the flat/rectilinear
> projection.
> >> >>>>>> That is, the range is between the two x or two y coordinates of
> the
> >> >>>>>> outermost points above/below or left/right of the center, in the
> >> >>>>>> flat/rectilinear projection.
> >> >>>>>>
> >> >>>>>> Signed-off-by: Daniel Playfair Cal <
> daniel.playfair.cal@gmail.com>
> >> >>>>>> ---
> >> >>>>>>  libavfilter/vf_v360.c | 19 +++++++++----------
> >> >>>>>>  1 file changed, 9 insertions(+), 10 deletions(-)
> >> >>>>>>
> >> >>>>>> diff --git a/libavfilter/vf_v360.c b/libavfilter/vf_v360.c
> >> >>>>>> index 68bb2f7b0f..3158451963 100644
> >> >>>>>> --- a/libavfilter/vf_v360.c
> >> >>>>>> +++ b/libavfilter/vf_v360.c
> >> >>>>>> @@ -2807,9 +2807,8 @@ static int
> prepare_fisheye_out(AVFilterContext *ctx)
> >> >>>>>>  {
> >> >>>>>>      V360Context *s = ctx->priv;
> >> >>>>>>
> >> >>>>>> -    s->flat_range[0] = s->h_fov / 180.f;
> >> >>>>>> -    s->flat_range[1] = s->v_fov / 180.f;
> >> >>>>>> -
> >> >>>>>> +    s->flat_range[0] = 0.5f * s->h_fov * M_PI / 180.f;
> >> >>>>>> +    s->flat_range[1] = 0.5f * s->v_fov * M_PI / 180.f;
> >> >>>>>>      return 0;
> >> >>>>>>  }
> >> >>>>>>
> >> >>>>>> @@ -2827,8 +2826,8 @@ static int fisheye_to_xyz(const
> V360Context *s,
> >> >>>>>>                            int i, int j, int width, int height,
> >> >>>>>>                            float *vec)
> >> >>>>>>  {
> >> >>>>>> -    const float uf = s->flat_range[0] * ((2.f * i) / width  -
> 1.f);
> >> >>>>>> -    const float vf = s->flat_range[1] * ((2.f * j + 1.f) /
> height - 1.f);
> >> >>>>>> +    const float uf = 2.f * s->flat_range[0] / M_PI * ((2.f * i)
> / width  - 1.f);
> >> >>>>>> +    const float vf = 2.f * s->flat_range[1] / M_PI * ((2.f * j
> + 1.f) / height - 1.f);
> >> >>>>>>
> >> >>>>>>      const float phi   = atan2f(vf, uf);
> >> >>>>>>      const float theta = M_PI_2 * (1.f - hypotf(uf, vf));
> >> >>>>>> @@ -2858,8 +2857,8 @@ static int
> prepare_fisheye_in(AVFilterContext *ctx)
> >> >>>>>>  {
> >> >>>>>>      V360Context *s = ctx->priv;
> >> >>>>>>
> >> >>>>>> -    s->iflat_range[0] = s->ih_fov / 180.f;
> >> >>>>>> -    s->iflat_range[1] = s->iv_fov / 180.f;
> >> >>>>>> +    s->iflat_range[0] = 0.5f * s->ih_fov * M_PI / 180.f;
> >> >>>>>> +    s->iflat_range[1] = 0.5f * s->iv_fov * M_PI / 180.f;
> >> >>>>>>
> >> >>>>>>      return 0;
> >> >>>>>>  }
> >> >>>>>> @@ -2882,10 +2881,10 @@ static int xyz_to_fisheye(const
> V360Context *s,
> >> >>>>>>  {
> >> >>>>>>      const float h   = hypotf(vec[0], vec[1]);
> >> >>>>>>      const float lh  = h > 0.f ? h : 1.f;
> >> >>>>>> -    const float phi = atan2f(h, vec[2]) / M_PI;
> >> >>>>>> +    const float phi = atan2f(h, vec[2]);
> >> >>>>>>
> >> >>>>>> -    float uf = vec[0] / lh * phi / s->iflat_range[0];
> >> >>>>>> -    float vf = vec[1] / lh * phi / s->iflat_range[1];
> >> >>>>>> +    float uf = 0.5f * vec[0] / lh * phi / s->iflat_range[0];
> >> >>>>>> +    float vf = 0.5f * vec[1] / lh * phi / s->iflat_range[1];
> >> >>>>>>
> >> >>>>>>      const int visible = -0.5f < uf && uf < 0.5f && -0.5f < vf
> && vf < 0.5f;
> >> >>>>>>      int ui, vi;
> >> >>>>>> --
> >> >>>>>> 2.31.0
> >> >>>>>>
> >> >>>>>> _______________________________________________
> >> >>>>>> ffmpeg-devel mailing list
> >> >>>>>> ffmpeg-devel@ffmpeg.org
> >> >>>>>> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
> >> >>>>>>
> >> >>>>>> To unsubscribe, visit link above, or email
> >> >>>>>> ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
>
diff mbox series

Patch

diff --git a/libavfilter/vf_v360.c b/libavfilter/vf_v360.c
index 68bb2f7b0f..3158451963 100644
--- a/libavfilter/vf_v360.c
+++ b/libavfilter/vf_v360.c
@@ -2807,9 +2807,8 @@  static int prepare_fisheye_out(AVFilterContext *ctx)
 {
     V360Context *s = ctx->priv;
 
-    s->flat_range[0] = s->h_fov / 180.f;
-    s->flat_range[1] = s->v_fov / 180.f;
-
+    s->flat_range[0] = 0.5f * s->h_fov * M_PI / 180.f;
+    s->flat_range[1] = 0.5f * s->v_fov * M_PI / 180.f;
     return 0;
 }
 
@@ -2827,8 +2826,8 @@  static int fisheye_to_xyz(const V360Context *s,
                           int i, int j, int width, int height,
                           float *vec)
 {
-    const float uf = s->flat_range[0] * ((2.f * i) / width  - 1.f);
-    const float vf = s->flat_range[1] * ((2.f * j + 1.f) / height - 1.f);
+    const float uf = 2.f * s->flat_range[0] / M_PI * ((2.f * i) / width  - 1.f);
+    const float vf = 2.f * s->flat_range[1] / M_PI * ((2.f * j + 1.f) / height - 1.f);
 
     const float phi   = atan2f(vf, uf);
     const float theta = M_PI_2 * (1.f - hypotf(uf, vf));
@@ -2858,8 +2857,8 @@  static int prepare_fisheye_in(AVFilterContext *ctx)
 {
     V360Context *s = ctx->priv;
 
-    s->iflat_range[0] = s->ih_fov / 180.f;
-    s->iflat_range[1] = s->iv_fov / 180.f;
+    s->iflat_range[0] = 0.5f * s->ih_fov * M_PI / 180.f;
+    s->iflat_range[1] = 0.5f * s->iv_fov * M_PI / 180.f;
 
     return 0;
 }
@@ -2882,10 +2881,10 @@  static int xyz_to_fisheye(const V360Context *s,
 {
     const float h   = hypotf(vec[0], vec[1]);
     const float lh  = h > 0.f ? h : 1.f;
-    const float phi = atan2f(h, vec[2]) / M_PI;
+    const float phi = atan2f(h, vec[2]);
 
-    float uf = vec[0] / lh * phi / s->iflat_range[0];
-    float vf = vec[1] / lh * phi / s->iflat_range[1];
+    float uf = 0.5f * vec[0] / lh * phi / s->iflat_range[0];
+    float vf = 0.5f * vec[1] / lh * phi / s->iflat_range[1];
 
     const int visible = -0.5f < uf && uf < 0.5f && -0.5f < vf && vf < 0.5f;
     int ui, vi;