diff mbox series

[FFmpeg-devel] avcodec: add Radiance HDR image format support

Message ID CAPYw7P6+=NoWs1jLxeN9PcBkC_N+413bJZQPCrFdj98hd3T7pQ@mail.gmail.com
State New
Headers show
Series [FFmpeg-devel] avcodec: add Radiance HDR image format support | expand

Checks

Context Check Description
yinshiyou/configure_loongarch64 warning Failed to apply patch
andriy/make_x86 success Make finished
andriy/make_fate_x86 success Make fate finished
andriy/make_armv7_RPi4 success Make finished
andriy/make_fate_armv7_RPi4 success Make fate finished

Commit Message

Paul B Mahol July 5, 2022, 3:45 p.m. UTC
Hello,

Patch attached.

Comments

Anton Khirnov July 5, 2022, 5:12 p.m. UTC | #1
Quoting Paul B Mahol (2022-07-05 17:45:20)
> Hello,
> 
> Patch attached.
> 
> From 62fce2bfd811eb4fb86b5907d62e67f0a2d033ff Mon Sep 17 00:00:00 2001
> From: Paul B Mahol <onemda@gmail.com>
> Date: Sun, 3 Jul 2022 23:50:05 +0200
> Subject: [PATCH] avcodec: add Radiance HDR image format support
> 
> Signed-off-by: Paul B Mahol <onemda@gmail.com>
> ---
>  doc/general_contents.texi |   2 +
>  libavcodec/Makefile       |   2 +
>  libavcodec/allcodecs.c    |   2 +
>  libavcodec/codec_desc.c   |   7 ++
>  libavcodec/codec_id.h     |   1 +
>  libavcodec/hdrdec.c       | 212 ++++++++++++++++++++++++++++++++++++++
>  libavcodec/hdrenc.c       | 188 +++++++++++++++++++++++++++++++++
>  libavformat/Makefile      |   1 +
>  libavformat/allformats.c  |   1 +
>  libavformat/img2.c        |   1 +
>  libavformat/img2dec.c     |   8 ++
>  libavformat/img2enc.c     |   2 +-
>  12 files changed, 426 insertions(+), 1 deletion(-)
>  create mode 100644 libavcodec/hdrdec.c
>  create mode 100644 libavcodec/hdrenc.c

without this codec having tests the time cube cannot be completed
Paul B Mahol July 6, 2022, 6:47 p.m. UTC | #2
Added parser.
The test will be added after this is merged.
James Almer July 6, 2022, 6:50 p.m. UTC | #3
On 7/6/2022 3:47 PM, Paul B Mahol wrote:
> Added parser.
> The test will be added after this is merged.

[...]

> diff --git a/libavformat/img2.c b/libavformat/img2.c
> index 870d2ebbc5..b03075f3b0 100644
> --- a/libavformat/img2.c
> +++ b/libavformat/img2.c
> @@ -91,6 +91,7 @@ const IdStrMap ff_img_tags[] = {
>      { AV_CODEC_ID_VBN,        "vbn"      },
>      { AV_CODEC_ID_JPEGXL,     "jxl"      },
>      { AV_CODEC_ID_QOI,        "qoi"      },
> +    { AV_CODEC_ID_HDR,        "hdr"      },

This could end up confusing people, so preferably use RHDR or some other 
combination for the codec id.

>      { AV_CODEC_ID_NONE,       NULL       }
>  };
James Almer July 6, 2022, 7:03 p.m. UTC | #4
On 7/6/2022 3:47 PM, Paul B Mahol wrote:
> Added parser.
> The test will be added after this is merged.

[...]

> +static int hdr_parse(AVCodecParserContext *s, AVCodecContext *avctx,
> +                     const uint8_t **poutbuf, int *poutbuf_size,
> +                     const uint8_t *buf, int buf_size)
> +{
> +    HDRParseContext *ipc = s->priv_data;
> +    uint64_t state = ipc->pc.state64;
> +    int next = END_NOT_FOUND, i = 0;
> +
> +    s->pict_type = AV_PICTURE_TYPE_NONE;

pict_type I, and key frame?

[...]

> +static int hdr_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
> +                            const AVFrame *frame, int *got_packet)
> +{
> +    HDREncContext *s = avctx->priv_data;
> +    int64_t packet_size;
> +    uint8_t *buf;
> +    int ret;
> +
> +    packet_size = avctx->width * avctx->height * 4LL + 1024LL;
> +    if ((ret = ff_get_encode_buffer(avctx, pkt, packet_size, 0)) < 0)

Wont using worst case scenario like this result in massive packets? 
av_shrink_packet() only changes the size field, it doesn't realloc the 
buffer to shrink it.

You could allocate a buffer in HDREncContext with 
av_fast_padded_malloc() here, use the bytestream2 API below, then 
allocate the packet with ff_get_encode_buffer() using 
bytestream2_tell_p() as size and do a memcpy at the end.

> +        return ret;
> +
> +    buf = pkt->data;
> +    bytestream_put_str(&buf, "#?RADIANCE\n");
> +    bytestream_put_str(&buf, "SOFTWARE=lavc\n");
> +    ret = snprintf(buf, 32, "PIXASPECT=%f\n", av_q2d(av_inv_q(avctx->sample_aspect_ratio)));
> +    if (ret > 0)
> +        buf += ret;
> +    bytestream_put_str(&buf, "FORMAT=32-bit_rle_rgbe\n\n");
> +    ret = snprintf(buf, 32, "-Y %d +X %d\n", avctx->height, avctx->width);
> +    if (ret > 0)
> +        buf += ret;
> +
> +    for (int y = 0; y < avctx->height; y++) {
> +        const float *red   = (const float *)(frame->data[2] + y * frame->linesize[2]);
> +        const float *green = (const float *)(frame->data[0] + y * frame->linesize[0]);
> +        const float *blue  = (const float *)(frame->data[1] + y * frame->linesize[1]);
> +
> +        if (avctx->width < 8 || avctx->width > 0x7fff) {
> +            for (int x = 0; x < avctx->width; x++) {
> +                float2rgbe(buf, red[x], green[x], blue[x]);
> +                buf += 4;
> +            }
> +        } else {
> +            bytestream_put_byte(&buf, 2);
> +            bytestream_put_byte(&buf, 2);
> +            bytestream_put_byte(&buf, avctx->width >> 8);
> +            bytestream_put_byte(&buf, avctx->width & 0xFF);
> +
> +            for (int x = 0; x < avctx->width; x++)
> +                float2rgbe(s->scanline + 4 * x, red[x], green[x], blue[x]);
> +            for (int p = 0; p < 4; p++)
> +                rle(&buf, s->scanline + p, avctx->width);
> +        }
> +    }
> +
> +    pkt->flags |= AV_PKT_FLAG_KEY;
> +
> +    av_shrink_packet(pkt, buf - pkt->data);
> +
> +    *got_packet = 1;
> +
> +    return 0;
> +}
Andreas Rheinhardt July 6, 2022, 7:09 p.m. UTC | #5
James Almer:
> On 7/6/2022 3:47 PM, Paul B Mahol wrote:
>> Added parser.
>> The test will be added after this is merged.
> 
> [...]
> 
>> +static int hdr_parse(AVCodecParserContext *s, AVCodecContext *avctx,
>> +                     const uint8_t **poutbuf, int *poutbuf_size,
>> +                     const uint8_t *buf, int buf_size)
>> +{
>> +    HDRParseContext *ipc = s->priv_data;
>> +    uint64_t state = ipc->pc.state64;
>> +    int next = END_NOT_FOUND, i = 0;
>> +
>> +    s->pict_type = AV_PICTURE_TYPE_NONE;
> 
> pict_type I, and key frame?
> 
> [...]
> 
>> +static int hdr_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
>> +                            const AVFrame *frame, int *got_packet)
>> +{
>> +    HDREncContext *s = avctx->priv_data;
>> +    int64_t packet_size;
>> +    uint8_t *buf;
>> +    int ret;
>> +
>> +    packet_size = avctx->width * avctx->height * 4LL + 1024LL;
>> +    if ((ret = ff_get_encode_buffer(avctx, pkt, packet_size, 0)) < 0)
> 
> Wont using worst case scenario like this result in massive packets?
> av_shrink_packet() only changes the size field, it doesn't realloc the
> buffer to shrink it.
> 
> You could allocate a buffer in HDREncContext with
> av_fast_padded_malloc() here, use the bytestream2 API below, then
> allocate the packet with ff_get_encode_buffer() using
> bytestream2_tell_p() as size and do a memcpy at the end.
> 

If that is the aim, then there is no reason to do the memcpy ourselves
(or do the av_fast_padded_malloc directly): Just use ff_alloc_packet()
and the generic code will ensure that the packet data will be made
refcounted. And the temp buffer will be freed generically, too.

>> +        return ret;
>> +
>> +    buf = pkt->data;
>> +    bytestream_put_str(&buf, "#?RADIANCE\n");
>> +    bytestream_put_str(&buf, "SOFTWARE=lavc\n");
>> +    ret = snprintf(buf, 32, "PIXASPECT=%f\n",
>> av_q2d(av_inv_q(avctx->sample_aspect_ratio)));
>> +    if (ret > 0)
>> +        buf += ret;
>> +    bytestream_put_str(&buf, "FORMAT=32-bit_rle_rgbe\n\n");
>> +    ret = snprintf(buf, 32, "-Y %d +X %d\n", avctx->height,
>> avctx->width);
>> +    if (ret > 0)
>> +        buf += ret;
>> +
>> +    for (int y = 0; y < avctx->height; y++) {
>> +        const float *red   = (const float *)(frame->data[2] + y *
>> frame->linesize[2]);
>> +        const float *green = (const float *)(frame->data[0] + y *
>> frame->linesize[0]);
>> +        const float *blue  = (const float *)(frame->data[1] + y *
>> frame->linesize[1]);
>> +
>> +        if (avctx->width < 8 || avctx->width > 0x7fff) {
>> +            for (int x = 0; x < avctx->width; x++) {
>> +                float2rgbe(buf, red[x], green[x], blue[x]);
>> +                buf += 4;
>> +            }
>> +        } else {
>> +            bytestream_put_byte(&buf, 2);
>> +            bytestream_put_byte(&buf, 2);
>> +            bytestream_put_byte(&buf, avctx->width >> 8);
>> +            bytestream_put_byte(&buf, avctx->width & 0xFF);
>> +
>> +            for (int x = 0; x < avctx->width; x++)
>> +                float2rgbe(s->scanline + 4 * x, red[x], green[x],
>> blue[x]);
>> +            for (int p = 0; p < 4; p++)
>> +                rle(&buf, s->scanline + p, avctx->width);
>> +        }
>> +    }
>> +
>> +    pkt->flags |= AV_PKT_FLAG_KEY;
>> +
>> +    av_shrink_packet(pkt, buf - pkt->data);
>> +
>> +    *got_packet = 1;
>> +
>> +    return 0;
>> +}
> 
> _______________________________________________
> ffmpeg-devel mailing list
> ffmpeg-devel@ffmpeg.org
> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
> 
> To unsubscribe, visit link above, or email
> ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
James Almer July 6, 2022, 7:12 p.m. UTC | #6
On 7/6/2022 4:09 PM, Andreas Rheinhardt wrote:
> James Almer:
>> On 7/6/2022 3:47 PM, Paul B Mahol wrote:
>>> Added parser.
>>> The test will be added after this is merged.
>>
>> [...]
>>
>>> +static int hdr_parse(AVCodecParserContext *s, AVCodecContext *avctx,
>>> +                     const uint8_t **poutbuf, int *poutbuf_size,
>>> +                     const uint8_t *buf, int buf_size)
>>> +{
>>> +    HDRParseContext *ipc = s->priv_data;
>>> +    uint64_t state = ipc->pc.state64;
>>> +    int next = END_NOT_FOUND, i = 0;
>>> +
>>> +    s->pict_type = AV_PICTURE_TYPE_NONE;
>>
>> pict_type I, and key frame?
>>
>> [...]
>>
>>> +static int hdr_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
>>> +                            const AVFrame *frame, int *got_packet)
>>> +{
>>> +    HDREncContext *s = avctx->priv_data;
>>> +    int64_t packet_size;
>>> +    uint8_t *buf;
>>> +    int ret;
>>> +
>>> +    packet_size = avctx->width * avctx->height * 4LL + 1024LL;
>>> +    if ((ret = ff_get_encode_buffer(avctx, pkt, packet_size, 0)) < 0)
>>
>> Wont using worst case scenario like this result in massive packets?
>> av_shrink_packet() only changes the size field, it doesn't realloc the
>> buffer to shrink it.
>>
>> You could allocate a buffer in HDREncContext with
>> av_fast_padded_malloc() here, use the bytestream2 API below, then
>> allocate the packet with ff_get_encode_buffer() using
>> bytestream2_tell_p() as size and do a memcpy at the end.
>>
> 
> If that is the aim, then there is no reason to do the memcpy ourselves
> (or do the av_fast_padded_malloc directly): Just use ff_alloc_packet()
> and the generic code will ensure that the packet data will be made
> refcounted. And the temp buffer will be freed generically, too.

The idea is to keep the encoder working with user provided buffers, 
while not allocating worst case scenario packets.

> 
>>> +        return ret;
>>> +
>>> +    buf = pkt->data;
>>> +    bytestream_put_str(&buf, "#?RADIANCE\n");
>>> +    bytestream_put_str(&buf, "SOFTWARE=lavc\n");
>>> +    ret = snprintf(buf, 32, "PIXASPECT=%f\n",
>>> av_q2d(av_inv_q(avctx->sample_aspect_ratio)));
>>> +    if (ret > 0)
>>> +        buf += ret;
>>> +    bytestream_put_str(&buf, "FORMAT=32-bit_rle_rgbe\n\n");
>>> +    ret = snprintf(buf, 32, "-Y %d +X %d\n", avctx->height,
>>> avctx->width);
>>> +    if (ret > 0)
>>> +        buf += ret;
>>> +
>>> +    for (int y = 0; y < avctx->height; y++) {
>>> +        const float *red   = (const float *)(frame->data[2] + y *
>>> frame->linesize[2]);
>>> +        const float *green = (const float *)(frame->data[0] + y *
>>> frame->linesize[0]);
>>> +        const float *blue  = (const float *)(frame->data[1] + y *
>>> frame->linesize[1]);
>>> +
>>> +        if (avctx->width < 8 || avctx->width > 0x7fff) {
>>> +            for (int x = 0; x < avctx->width; x++) {
>>> +                float2rgbe(buf, red[x], green[x], blue[x]);
>>> +                buf += 4;
>>> +            }
>>> +        } else {
>>> +            bytestream_put_byte(&buf, 2);
>>> +            bytestream_put_byte(&buf, 2);
>>> +            bytestream_put_byte(&buf, avctx->width >> 8);
>>> +            bytestream_put_byte(&buf, avctx->width & 0xFF);
>>> +
>>> +            for (int x = 0; x < avctx->width; x++)
>>> +                float2rgbe(s->scanline + 4 * x, red[x], green[x],
>>> blue[x]);
>>> +            for (int p = 0; p < 4; p++)
>>> +                rle(&buf, s->scanline + p, avctx->width);
>>> +        }
>>> +    }
>>> +
>>> +    pkt->flags |= AV_PKT_FLAG_KEY;
>>> +
>>> +    av_shrink_packet(pkt, buf - pkt->data);
>>> +
>>> +    *got_packet = 1;
>>> +
>>> +    return 0;
>>> +}
>>
>> _______________________________________________
>> ffmpeg-devel mailing list
>> ffmpeg-devel@ffmpeg.org
>> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>>
>> To unsubscribe, visit link above, or email
>> ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
> 
> _______________________________________________
> ffmpeg-devel mailing list
> ffmpeg-devel@ffmpeg.org
> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
> 
> To unsubscribe, visit link above, or email
> ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
Andreas Rheinhardt July 6, 2022, 7:21 p.m. UTC | #7
James Almer:
> 
> 
> On 7/6/2022 4:09 PM, Andreas Rheinhardt wrote:
>> James Almer:
>>> On 7/6/2022 3:47 PM, Paul B Mahol wrote:
>>>> Added parser.
>>>> The test will be added after this is merged.
>>>
>>> [...]
>>>
>>>> +static int hdr_parse(AVCodecParserContext *s, AVCodecContext *avctx,
>>>> +                     const uint8_t **poutbuf, int *poutbuf_size,
>>>> +                     const uint8_t *buf, int buf_size)
>>>> +{
>>>> +    HDRParseContext *ipc = s->priv_data;
>>>> +    uint64_t state = ipc->pc.state64;
>>>> +    int next = END_NOT_FOUND, i = 0;
>>>> +
>>>> +    s->pict_type = AV_PICTURE_TYPE_NONE;
>>>
>>> pict_type I, and key frame?
>>>
>>> [...]
>>>
>>>> +static int hdr_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
>>>> +                            const AVFrame *frame, int *got_packet)
>>>> +{
>>>> +    HDREncContext *s = avctx->priv_data;
>>>> +    int64_t packet_size;
>>>> +    uint8_t *buf;
>>>> +    int ret;
>>>> +
>>>> +    packet_size = avctx->width * avctx->height * 4LL + 1024LL;
>>>> +    if ((ret = ff_get_encode_buffer(avctx, pkt, packet_size, 0)) < 0)
>>>
>>> Wont using worst case scenario like this result in massive packets?
>>> av_shrink_packet() only changes the size field, it doesn't realloc the
>>> buffer to shrink it.
>>>
>>> You could allocate a buffer in HDREncContext with
>>> av_fast_padded_malloc() here, use the bytestream2 API below, then
>>> allocate the packet with ff_get_encode_buffer() using
>>> bytestream2_tell_p() as size and do a memcpy at the end.
>>>
>>
>> If that is the aim, then there is no reason to do the memcpy ourselves
>> (or do the av_fast_padded_malloc directly): Just use ff_alloc_packet()
>> and the generic code will ensure that the packet data will be made
>> refcounted. And the temp buffer will be freed generically, too.
> 
> The idea is to keep the encoder working with user provided buffers,
> while not allocating worst case scenario packets.
> 

This could also be done generically:
https://github.com/mkver/FFmpeg/commit/4c06a1e457fd00f2ad14c1328587d29964c9fa11
You were against it because you wanted some kind of checks (I don't
remember the details); checks the current code does not have.

- Andreas
Paul B Mahol July 6, 2022, 7:29 p.m. UTC | #8
On Wed, Jul 6, 2022 at 8:50 PM James Almer <jamrial@gmail.com> wrote:

> On 7/6/2022 3:47 PM, Paul B Mahol wrote:
> > Added parser.
> > The test will be added after this is merged.
>
> [...]
>
> > diff --git a/libavformat/img2.c b/libavformat/img2.c
> > index 870d2ebbc5..b03075f3b0 100644
> > --- a/libavformat/img2.c
> > +++ b/libavformat/img2.c
> > @@ -91,6 +91,7 @@ const IdStrMap ff_img_tags[] = {
> >      { AV_CODEC_ID_VBN,        "vbn"      },
> >      { AV_CODEC_ID_JPEGXL,     "jxl"      },
> >      { AV_CODEC_ID_QOI,        "qoi"      },
> > +    { AV_CODEC_ID_HDR,        "hdr"      },
>
> This could end up confusing people, so preferably use RHDR or some other
> combination for the codec id.
>

Take it or leave it.

IT is HDR, not going to change.

Confusing can not happen at all it is codec.

>
> >      { AV_CODEC_ID_NONE,       NULL       }
> >  };
>
> _______________________________________________
> ffmpeg-devel mailing list
> ffmpeg-devel@ffmpeg.org
> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>
> To unsubscribe, visit link above, or email
> ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
>
Paul B Mahol July 9, 2022, 8:16 a.m. UTC | #9
On Wed, Jul 6, 2022 at 8:47 PM Paul B Mahol <onemda@gmail.com> wrote:

>
> Added parser.
> The test will be added after this is merged.
>


Will apply soon with test.
diff mbox series

Patch

From 62fce2bfd811eb4fb86b5907d62e67f0a2d033ff Mon Sep 17 00:00:00 2001
From: Paul B Mahol <onemda@gmail.com>
Date: Sun, 3 Jul 2022 23:50:05 +0200
Subject: [PATCH] avcodec: add Radiance HDR image format support

Signed-off-by: Paul B Mahol <onemda@gmail.com>
---
 doc/general_contents.texi |   2 +
 libavcodec/Makefile       |   2 +
 libavcodec/allcodecs.c    |   2 +
 libavcodec/codec_desc.c   |   7 ++
 libavcodec/codec_id.h     |   1 +
 libavcodec/hdrdec.c       | 212 ++++++++++++++++++++++++++++++++++++++
 libavcodec/hdrenc.c       | 188 +++++++++++++++++++++++++++++++++
 libavformat/Makefile      |   1 +
 libavformat/allformats.c  |   1 +
 libavformat/img2.c        |   1 +
 libavformat/img2dec.c     |   8 ++
 libavformat/img2enc.c     |   2 +-
 12 files changed, 426 insertions(+), 1 deletion(-)
 create mode 100644 libavcodec/hdrdec.c
 create mode 100644 libavcodec/hdrenc.c

diff --git a/doc/general_contents.texi b/doc/general_contents.texi
index b1d3e3aa05..f25c784d3b 100644
--- a/doc/general_contents.texi
+++ b/doc/general_contents.texi
@@ -749,6 +749,8 @@  following image formats are supported:
     @tab OpenEXR
 @item FITS         @tab X @tab X
     @tab Flexible Image Transport System
+@item HDR          @tab X @tab X
+    @tab Radiance HDR RGBE Image format
 @item IMG          @tab   @tab X
     @tab GEM Raster image
 @item JPEG         @tab X @tab X
diff --git a/libavcodec/Makefile b/libavcodec/Makefile
index 457ec58377..df7e227a7f 100644
--- a/libavcodec/Makefile
+++ b/libavcodec/Makefile
@@ -403,6 +403,8 @@  OBJS-$(CONFIG_HAP_DECODER)             += hapdec.o hap.o
 OBJS-$(CONFIG_HAP_ENCODER)             += hapenc.o hap.o
 OBJS-$(CONFIG_HCA_DECODER)             += hcadec.o
 OBJS-$(CONFIG_HCOM_DECODER)            += hcom.o
+OBJS-$(CONFIG_HDR_DECODER)             += hdrdec.o
+OBJS-$(CONFIG_HDR_ENCODER)             += hdrenc.o
 OBJS-$(CONFIG_HEVC_DECODER)            += hevcdec.o hevc_mvs.o \
                                           hevc_cabac.o hevc_refs.o hevcpred.o    \
                                           hevcdsp.o hevc_filter.o hevc_data.o \
diff --git a/libavcodec/allcodecs.c b/libavcodec/allcodecs.c
index bdfc2f6f45..31d2c5979c 100644
--- a/libavcodec/allcodecs.c
+++ b/libavcodec/allcodecs.c
@@ -469,6 +469,8 @@  extern const FFCodec ff_gsm_decoder;
 extern const FFCodec ff_gsm_ms_decoder;
 extern const FFCodec ff_hca_decoder;
 extern const FFCodec ff_hcom_decoder;
+extern const FFCodec ff_hdr_encoder;
+extern const FFCodec ff_hdr_decoder;
 extern const FFCodec ff_iac_decoder;
 extern const FFCodec ff_ilbc_decoder;
 extern const FFCodec ff_imc_decoder;
diff --git a/libavcodec/codec_desc.c b/libavcodec/codec_desc.c
index 44ad2d1fe8..eeea15b1ef 100644
--- a/libavcodec/codec_desc.c
+++ b/libavcodec/codec_desc.c
@@ -1893,6 +1893,13 @@  static const AVCodecDescriptor codec_descriptors[] = {
         .long_name = NULL_IF_CONFIG_SMALL("PHM (Portable HalfFloatMap) image"),
         .props     = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSLESS,
     },
+    {
+        .id        = AV_CODEC_ID_HDR,
+        .type      = AVMEDIA_TYPE_VIDEO,
+        .name      = "hdr",
+        .long_name = NULL_IF_CONFIG_SMALL("HDR (Radiance RGBE format) image"),
+        .props     = AV_CODEC_PROP_INTRA_ONLY | AV_CODEC_PROP_LOSSY,
+    },
 
     /* various PCM "codecs" */
     {
diff --git a/libavcodec/codec_id.h b/libavcodec/codec_id.h
index 81fb316cff..005efa6334 100644
--- a/libavcodec/codec_id.h
+++ b/libavcodec/codec_id.h
@@ -312,6 +312,7 @@  enum AVCodecID {
     AV_CODEC_ID_JPEGXL,
     AV_CODEC_ID_QOI,
     AV_CODEC_ID_PHM,
+    AV_CODEC_ID_HDR,
 
     /* various PCM "codecs" */
     AV_CODEC_ID_FIRST_AUDIO = 0x10000,     ///< A dummy id pointing at the start of audio codecs
diff --git a/libavcodec/hdrdec.c b/libavcodec/hdrdec.c
new file mode 100644
index 0000000000..2178a824bd
--- /dev/null
+++ b/libavcodec/hdrdec.c
@@ -0,0 +1,212 @@ 
+/*
+ * Radiance HDR image format
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <stdlib.h>
+
+#include "libavutil/imgutils.h"
+#include "avcodec.h"
+#include "internal.h"
+#include "bytestream.h"
+#include "codec_internal.h"
+#include "thread.h"
+
+#define MINELEN 8
+#define MAXELEN 0x7fff
+
+static int hdr_get_line(GetByteContext *gb, uint8_t *buffer, int size)
+{
+    int n = 0, c;
+
+    memset(buffer, 0, size);
+
+    do {
+        c = bytestream2_get_byte(gb);
+        if (n < size - 1)
+            buffer[n++] = c;
+    } while (bytestream2_get_bytes_left(gb) > 0 && c != '\n');
+
+    return 0;
+}
+
+static float convert(int expo, int val)
+{
+    if (expo == -128) {
+        return 0.f;
+    } else {
+        const float v = val / 256.f;
+
+        return ldexpf(v, expo);
+    }
+}
+
+static int decompress(uint8_t *scanline, int w, GetByteContext *gb)
+{
+    int rshift = 0;
+
+    while (w > 0) {
+        scanline[0] = bytestream2_get_byte(gb);
+        scanline[1] = bytestream2_get_byte(gb);
+        scanline[2] = bytestream2_get_byte(gb);
+        scanline[3] = bytestream2_get_byte(gb);
+
+        if (scanline[0] == 1 &&
+            scanline[1] == 1 &&
+            scanline[2] == 1) {
+            for (int i = scanline[3] << rshift; i > 0; i--) {
+                memcpy(scanline, scanline - 4, 4);
+                scanline += 4;
+                w--;
+            }
+            rshift += 8;
+        } else {
+            scanline += 4;
+            w--;
+            rshift = 0;
+        }
+    }
+
+    return 1;
+}
+
+static int hdr_decode_frame(AVCodecContext *avctx, AVFrame *p,
+                            int *got_frame, AVPacket *avpkt)
+{
+    const uint8_t *buf = avpkt->data;
+    int ret, buf_size = avpkt->size;
+    int width = 0, height = 0;
+    GetByteContext gb;
+    uint8_t line[512];
+
+    bytestream2_init(&gb, buf, buf_size);
+    hdr_get_line(&gb, line, sizeof(line));
+    if (memcmp("#?RADIANCE\n", line, 11))
+        return AVERROR_INVALIDDATA;
+
+    do {
+        hdr_get_line(&gb, line, sizeof(line));
+    } while (line[0] != '\n');
+
+    hdr_get_line(&gb, line, sizeof(line));
+    if (sscanf(line, "-Y %d +X %d\n", &height, &width) == 2) {
+        ;
+    } else if (sscanf(line, "+Y %d +X %d\n", &height, &width) == 2) {
+        ;
+    } else if (sscanf(line, "-Y %d -X %d\n", &height, &width) == 2) {
+        ;
+    } else if (sscanf(line, "+Y %d -X %d\n", &height, &width) == 2) {
+        ;
+    } else if (sscanf(line, "-X %d +Y %d\n", &width, &height) == 2) {
+        ;
+    } else if (sscanf(line, "+X %d +Y %d\n", &width, &height) == 2) {
+        ;
+    } else if (sscanf(line, "-X %d -Y %d\n", &width, &height) == 2) {
+        ;
+    } else if (sscanf(line, "+X %d -Y %d\n", &width, &height) == 2) {
+        ;
+    }
+
+    if ((ret = ff_set_dimensions(avctx, width, height)) < 0)
+        return ret;
+
+    avctx->pix_fmt = AV_PIX_FMT_GBRPF32;
+    if ((ret = ff_thread_get_buffer(avctx, p, 0)) < 0)
+        return ret;
+
+    for (int y = 0; y < height; y++) {
+        float *dst_r = (float *)(p->data[2] + y * p->linesize[2]);
+        float *dst_g = (float *)(p->data[0] + y * p->linesize[0]);
+        float *dst_b = (float *)(p->data[1] + y * p->linesize[1]);
+        uint8_t *scanline = p->data[0] + y * p->linesize[0];
+        int i;
+
+        if (width < MINELEN || width > MAXELEN) {
+            decompress(scanline, width, &gb);
+            goto convert;
+        }
+
+        i = bytestream2_peek_byte(&gb);
+        if (i != 2) {
+            decompress(scanline, width, &gb);
+            goto convert;
+        }
+        bytestream2_skip(&gb, 1);
+
+        scanline[1] = bytestream2_get_byte(&gb);
+        scanline[2] = bytestream2_get_byte(&gb);
+        i = bytestream2_get_byte(&gb);
+
+        if (scanline[1] != 2 || scanline[2] & 128) {
+            scanline[0] = 2;
+            scanline[3] = i;
+            decompress(scanline + 4, width - 1, &gb);
+            goto convert;
+        }
+
+        for (int i = 0; i < 4; i++) {
+            uint8_t *scanline = p->data[0] + y * p->linesize[0] + i;
+
+            for (int j = 0; j < width * 4; ) {
+                int run = bytestream2_get_byte(&gb);
+                if (run > 128) {
+                    uint8_t val = bytestream2_get_byte(&gb);
+                    run &= 127;
+                    while (run--) {
+                        scanline[j] = val;
+                        j += 4;
+                    }
+                } else {
+                    while (run--) {
+                        scanline[j] = bytestream2_get_byte(&gb);
+                        j += 4;
+                    }
+                }
+            }
+        }
+
+convert:
+        for (int x = 0; x < width; x++) {
+            uint8_t rgbe[4];
+            int expo;
+
+            memcpy(rgbe, p->data[0] + y * p->linesize[0] + x * 4, 4);
+            expo = rgbe[3] - 128;
+
+            dst_r[x] = convert(expo, rgbe[0]);
+            dst_b[x] = convert(expo, rgbe[2]);
+            dst_g[x] = convert(expo, rgbe[1]);
+        }
+    }
+
+    p->key_frame = 1;
+    p->pict_type = AV_PICTURE_TYPE_I;
+
+    *got_frame   = 1;
+
+    return buf_size;
+}
+
+const FFCodec ff_hdr_decoder = {
+    .p.name         = "hdr",
+    .p.long_name    = NULL_IF_CONFIG_SMALL("HDR (Radiance RGBE format) image"),
+    .p.type         = AVMEDIA_TYPE_VIDEO,
+    .p.id           = AV_CODEC_ID_HDR,
+    .p.capabilities = AV_CODEC_CAP_DR1 | AV_CODEC_CAP_FRAME_THREADS,
+    FF_CODEC_DECODE_CB(hdr_decode_frame),
+};
diff --git a/libavcodec/hdrenc.c b/libavcodec/hdrenc.c
new file mode 100644
index 0000000000..dfacb8d725
--- /dev/null
+++ b/libavcodec/hdrenc.c
@@ -0,0 +1,188 @@ 
+/*
+ * Radiance HDR image format
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/imgutils.h"
+#include "avcodec.h"
+#include "bytestream.h"
+#include "codec_internal.h"
+#include "encode.h"
+
+typedef struct HDREncContext {
+    uint8_t *scanline;
+} HDREncContext;
+
+static av_cold int hdr_encode_init(AVCodecContext *avctx)
+{
+    HDREncContext *s = avctx->priv_data;
+
+    s->scanline = av_calloc(avctx->width * 4, sizeof(*s->scanline));
+    if (!s->scanline)
+        return AVERROR(ENOMEM);
+
+    return 0;
+}
+
+static av_cold int hdr_encode_close(AVCodecContext *avctx)
+{
+    HDREncContext *s = avctx->priv_data;
+
+    av_freep(&s->scanline);
+
+    return 0;
+}
+
+static void bytestream_put_str(uint8_t **buf, const char *const line)
+{
+    bytestream_put_buffer(buf, line, strlen(line));
+}
+
+static void float2rgbe(uint8_t *rgbe, float red, float green, float blue)
+{
+    float v;
+    int e;
+
+    v = FFMAX3(red, green, blue);
+
+    if (v < 1e-32f) {
+        rgbe[0] = rgbe[1] = rgbe[2] = rgbe[3] = 0;
+    } else {
+        v = frexpf(v, &e) * 256.f / v;
+
+        rgbe[0] = av_clip_uint8(red * v);
+        rgbe[1] = av_clip_uint8(green * v);
+        rgbe[2] = av_clip_uint8(blue * v);
+        rgbe[3] = av_clip_uint8(e + 128);
+    }
+}
+
+static void rle(uint8_t **buffer, const uint8_t *data, int width)
+{
+#define MIN_RLE 4
+    int cur = 0;
+
+    while (cur < width) {
+        int run_count = 0, old_run_count = 0;
+        int beg_run = cur;
+        uint8_t buf[2];
+
+        while (run_count < MIN_RLE && beg_run < width) {
+            beg_run += run_count;
+            old_run_count = run_count;
+            run_count = 1;
+            while ((beg_run + run_count < width) && (run_count < 127) &&
+                   (data[beg_run * 4] == data[(beg_run + run_count) * 4]))
+                run_count++;
+        }
+
+        if ((old_run_count > 1) && (old_run_count == beg_run - cur)) {
+            buf[0] = 128 + old_run_count;
+            buf[1] = data[cur * 4];
+            bytestream_put_buffer(buffer, buf, sizeof(buf));
+            cur = beg_run;
+        }
+
+        while (cur < beg_run) {
+            int nonrun_count = FFMIN(128, beg_run - cur);
+            buf[0] = nonrun_count;
+            bytestream_put_byte(buffer, buf[0]);
+            for (int n = 0; n < nonrun_count; n++)
+                bytestream_put_byte(buffer, data[(cur + n) * 4]);
+            cur += nonrun_count;
+        }
+
+        if (run_count >= MIN_RLE) {
+            buf[0] = 128 + run_count;
+            buf[1] = data[beg_run * 4];
+            bytestream_put_buffer(buffer, buf, sizeof(buf));
+            cur += run_count;
+        }
+    }
+}
+
+static int hdr_encode_frame(AVCodecContext *avctx, AVPacket *pkt,
+                            const AVFrame *frame, int *got_packet)
+{
+    HDREncContext *s = avctx->priv_data;
+    int64_t packet_size;
+    uint8_t *buf;
+    int ret;
+
+    packet_size = avctx->width * avctx->height * 4LL + 1024LL;
+    if ((ret = ff_alloc_packet(avctx, pkt, packet_size)) < 0)
+        return ret;
+
+    buf = pkt->data;
+    bytestream_put_str(&buf, "#?RADIANCE\n");
+    bytestream_put_str(&buf, "FORMAT=32-bit_rle_rgbe\n\n");
+    bytestream_put_str(&buf, "-Y ");
+    ret = snprintf(buf, 16, "%d ", avctx->height);
+    if (ret > 0)
+        buf += ret;
+    bytestream_put_str(&buf, "+X ");
+    ret = snprintf(buf, 16, "%d\n", avctx->width);
+    if (ret > 0)
+        buf += ret;
+
+    for (int y = 0; y < avctx->height; y++) {
+        const float *red   = (const float *)(frame->data[2] + y * frame->linesize[2]);
+        const float *green = (const float *)(frame->data[0] + y * frame->linesize[0]);
+        const float *blue  = (const float *)(frame->data[1] + y * frame->linesize[1]);
+
+        if (avctx->width < 8 || avctx->width > 0x7fff) {
+            for (int x = 0; x < avctx->width; x++) {
+                float2rgbe(buf, red[x], green[x], blue[x]);
+                buf += 4;
+            }
+        } else {
+            bytestream_put_byte(&buf, 2);
+            bytestream_put_byte(&buf, 2);
+            bytestream_put_byte(&buf, avctx->width >> 8);
+            bytestream_put_byte(&buf, avctx->width & 0xFF);
+
+            for (int x = 0; x < avctx->width; x++)
+                float2rgbe(s->scanline + 4 * x, red[x], green[x], blue[x]);
+            for (int p = 0; p < 4; p++)
+                rle(&buf, s->scanline + p, avctx->width);
+        }
+    }
+
+    pkt->size = buf - pkt->data;
+
+    *got_packet = 1;
+
+    return 0;
+}
+
+const FFCodec ff_hdr_encoder = {
+    .p.name         = "hdr",
+    .p.long_name    = NULL_IF_CONFIG_SMALL("HDR (Radiance RGBE format) image"),
+    .priv_data_size = sizeof(HDREncContext),
+    .p.type         = AVMEDIA_TYPE_VIDEO,
+    .p.id           = AV_CODEC_ID_HDR,
+    .p.capabilities = AV_CODEC_CAP_FRAME_THREADS,
+    .init           = hdr_encode_init,
+    FF_CODEC_ENCODE_CB(hdr_encode_frame),
+    .close          = hdr_encode_close,
+    .p.pix_fmts     = (const enum AVPixelFormat[]){
+        AV_PIX_FMT_GBRPF32,
+        AV_PIX_FMT_NONE
+    },
+    .caps_internal  = FF_CODEC_CAP_INIT_THREADSAFE,
+};
diff --git a/libavformat/Makefile b/libavformat/Makefile
index 6c6b779080..e420384355 100644
--- a/libavformat/Makefile
+++ b/libavformat/Makefile
@@ -273,6 +273,7 @@  OBJS-$(CONFIG_IMAGE_DPX_PIPE_DEMUXER)     += img2dec.o img2.o
 OBJS-$(CONFIG_IMAGE_EXR_PIPE_DEMUXER)     += img2dec.o img2.o
 OBJS-$(CONFIG_IMAGE_GEM_PIPE_DEMUXER)     += img2dec.o img2.o
 OBJS-$(CONFIG_IMAGE_GIF_PIPE_DEMUXER)     += img2dec.o img2.o
+OBJS-$(CONFIG_IMAGE_HDR_PIPE_DEMUXER)     += img2dec.o img2.o
 OBJS-$(CONFIG_IMAGE_J2K_PIPE_DEMUXER)     += img2dec.o img2.o
 OBJS-$(CONFIG_IMAGE_JPEG_PIPE_DEMUXER)    += img2dec.o img2.o
 OBJS-$(CONFIG_IMAGE_JPEGLS_PIPE_DEMUXER)  += img2dec.o img2.o
diff --git a/libavformat/allformats.c b/libavformat/allformats.c
index 32698b857f..ae4479fb7a 100644
--- a/libavformat/allformats.c
+++ b/libavformat/allformats.c
@@ -508,6 +508,7 @@  extern const AVInputFormat  ff_image_dpx_pipe_demuxer;
 extern const AVInputFormat  ff_image_exr_pipe_demuxer;
 extern const AVInputFormat  ff_image_gem_pipe_demuxer;
 extern const AVInputFormat  ff_image_gif_pipe_demuxer;
+extern const AVInputFormat  ff_image_hdr_pipe_demuxer;
 extern const AVInputFormat  ff_image_j2k_pipe_demuxer;
 extern const AVInputFormat  ff_image_jpeg_pipe_demuxer;
 extern const AVInputFormat  ff_image_jpegls_pipe_demuxer;
diff --git a/libavformat/img2.c b/libavformat/img2.c
index 870d2ebbc5..b03075f3b0 100644
--- a/libavformat/img2.c
+++ b/libavformat/img2.c
@@ -91,6 +91,7 @@  const IdStrMap ff_img_tags[] = {
     { AV_CODEC_ID_VBN,        "vbn"      },
     { AV_CODEC_ID_JPEGXL,     "jxl"      },
     { AV_CODEC_ID_QOI,        "qoi"      },
+    { AV_CODEC_ID_HDR,        "hdr"      },
     { AV_CODEC_ID_NONE,       NULL       }
 };
 
diff --git a/libavformat/img2dec.c b/libavformat/img2dec.c
index e7ff26e5dd..4c6c70139c 100644
--- a/libavformat/img2dec.c
+++ b/libavformat/img2dec.c
@@ -1070,6 +1070,13 @@  static int pam_probe(const AVProbeData *p)
     return pnm_magic_check(p, 7) ? pnm_probe(p) : 0;
 }
 
+static int hdr_probe(const AVProbeData *p)
+{
+    if (!memcmp(p->buf, "#?RADIANCE\n", 11))
+        return AVPROBE_SCORE_MAX;
+    return 0;
+}
+
 static int xbm_probe(const AVProbeData *p)
 {
     if (!memcmp(p->buf, "/* XBM X10 format */", 20))
@@ -1221,6 +1228,7 @@  IMAGEAUTO_DEMUXER(dpx,       DPX)
 IMAGEAUTO_DEMUXER(exr,       EXR)
 IMAGEAUTO_DEMUXER(gem,       GEM)
 IMAGEAUTO_DEMUXER(gif,       GIF)
+IMAGEAUTO_DEMUXER(hdr,       HDR)
 IMAGEAUTO_DEMUXER_EXT(j2k,   JPEG2000, J2K)
 IMAGEAUTO_DEMUXER_EXT(jpeg,  MJPEG, JPEG)
 IMAGEAUTO_DEMUXER(jpegls,    JPEGLS)
diff --git a/libavformat/img2enc.c b/libavformat/img2enc.c
index 0a11fae34e..734f93c686 100644
--- a/libavformat/img2enc.c
+++ b/libavformat/img2enc.c
@@ -267,7 +267,7 @@  const AVOutputFormat ff_image2_muxer = {
     .long_name      = NULL_IF_CONFIG_SMALL("image2 sequence"),
     .extensions     = "bmp,dpx,exr,jls,jpeg,jpg,jxl,ljpg,pam,pbm,pcx,pfm,pgm,pgmyuv,phm,"
                       "png,ppm,sgi,tga,tif,tiff,jp2,j2c,j2k,xwd,sun,ras,rs,im1,im8,"
-                      "im24,sunras,vbn,xbm,xface,pix,y,avif,qoi",
+                      "im24,sunras,vbn,xbm,xface,pix,y,avif,qoi,hdr",
     .priv_data_size = sizeof(VideoMuxData),
     .video_codec    = AV_CODEC_ID_MJPEG,
     .write_header   = write_header,
-- 
2.36.1