diff mbox series

[FFmpeg-devel,v4,6/6] decklink_enc: add support for playout of 608 captions in MOV files

Message ID 1682699871-22331-7-git-send-email-dheitmueller@ltnglobal.com
State New
Headers show
Series Add support for Closed Caption FIFO | expand

Checks

Context Check Description
andriy/make_x86 success Make finished
andriy/make_fate_x86 success Make fate finished

Commit Message

Devin Heitmueller April 28, 2023, 4:37 p.m. UTC
Unlike other cases where the closed captions are embedded in the
video stream as MPEG-2 userdata or H.264 SEI data, with MOV files
the captions are often found on a separate "e608" subtitle track.

Add support for playout of such files, leveraging the new ccfifo
mechanism to ensure that they are embedded into VANC at the correct
rate (since e608 packets often contain batches of multiple 608 pairs).

Note this patch includes a new file named libavdevice/ccfifo.c, which
allows the ccfifo functionality in libavfilter to be reused even if
doing shared builds.  This is the same approach used for log2_tab.c.

Signed-off-by: Devin Heitmueller <dheitmueller@ltnglobal.com>
---
 libavdevice/Makefile          |  1 +
 libavdevice/ccfifo.c          | 24 ++++++++++++++++
 libavdevice/decklink_common.h |  3 ++
 libavdevice/decklink_enc.cpp  | 66 +++++++++++++++++++++++++++++++++++++++++++
 libavdevice/decklink_enc_c.c  |  2 +-
 5 files changed, 95 insertions(+), 1 deletion(-)
 create mode 100644 libavdevice/ccfifo.c

Comments

Lance Wang April 30, 2023, 11:01 p.m. UTC | #1
On Fri, Apr 28, 2023 at 11:43 PM Devin Heitmueller <
devin.heitmueller@ltnglobal.com> wrote:

> Unlike other cases where the closed captions are embedded in the
> video stream as MPEG-2 userdata or H.264 SEI data, with MOV files
> the captions are often found on a separate "e608" subtitle track.
>
> Add support for playout of such files, leveraging the new ccfifo
> mechanism to ensure that they are embedded into VANC at the correct
> rate (since e608 packets often contain batches of multiple 608 pairs).
>
> Note this patch includes a new file named libavdevice/ccfifo.c, which
> allows the ccfifo functionality in libavfilter to be reused even if
> doing shared builds.  This is the same approach used for log2_tab.c.
>
>
This implementation  is limited to decklink SDI output only,  If possible,
can we implement the function from demuxer layer,  and then passthrough
by SEI side data? By this way,  we can convert  such stream in streaming
to  embedded CC to video stream easily also.


Signed-off-by: Devin Heitmueller <dheitmueller@ltnglobal.com>
> ---
>  libavdevice/Makefile          |  1 +
>  libavdevice/ccfifo.c          | 24 ++++++++++++++++
>  libavdevice/decklink_common.h |  3 ++
>  libavdevice/decklink_enc.cpp  | 66
> +++++++++++++++++++++++++++++++++++++++++++
>  libavdevice/decklink_enc_c.c  |  2 +-
>  5 files changed, 95 insertions(+), 1 deletion(-)
>  create mode 100644 libavdevice/ccfifo.c
>
> diff --git a/libavdevice/Makefile b/libavdevice/Makefile
> index 8a62822..c304492 100644
> --- a/libavdevice/Makefile
> +++ b/libavdevice/Makefile
> @@ -57,6 +57,7 @@ OBJS-$(CONFIG_LIBDC1394_INDEV)           += libdc1394.o
>
>  # Objects duplicated from other libraries for shared builds
>  SHLIBOBJS-$(CONFIG_DECKLINK_INDEV)       += reverse.o
> +SHLIBOBJS-$(CONFIG_DECKLINK_OUTDEV)      += ccfifo.o
>
>  # Windows resource file
>  SHLIBOBJS-$(HAVE_GNU_WINDRES)            += avdeviceres.o
> diff --git a/libavdevice/ccfifo.c b/libavdevice/ccfifo.c
> new file mode 100644
> index 0000000..9007094
> --- /dev/null
> +++ b/libavdevice/ccfifo.c
> @@ -0,0 +1,24 @@
> +/*
> + * CEA-708 Closed Captioning FIFO
> + * Copyright (c) 2023 LTN Global Communications
> + *
> + * Author: Devin Heitmueller <dheitmueller@ltnglobal.com>
> + *
> + * This file is part of FFmpeg.
> + *
> + * FFmpeg is free software; you can redistribute it and/or
> + * modify it under the terms of the GNU Lesser General Public
> + * License as published by the Free Software Foundation; either
> + * version 2.1 of the License, or (at your option) any later version.
> + *
> + * FFmpeg is distributed in the hope that it will be useful,
> + * but WITHOUT ANY WARRANTY; without even the implied warranty of
> + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
> + * Lesser General Public License for more details.
> + *
> + * You should have received a copy of the GNU Lesser General Public
> + * License along with FFmpeg; if not, write to the Free Software
> + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
> 02110-1301 USA
> + */
> +
> +#include "libavfilter/ccfifo.c"
> diff --git a/libavdevice/decklink_common.h b/libavdevice/decklink_common.h
> index 088e165..0d33f94 100644
> --- a/libavdevice/decklink_common.h
> +++ b/libavdevice/decklink_common.h
> @@ -31,6 +31,7 @@
>
>  extern "C" {
>  #include "libavcodec/packet_internal.h"
> +#include "libavfilter/ccfifo.h"
>  }
>  #include "libavutil/thread.h"
>  #include "decklink_common_c.h"
> @@ -112,6 +113,8 @@ struct decklink_ctx {
>      /* Capture buffer queue */
>      AVPacketQueue queue;
>
> +    AVCCFifo *cc_fifo;      ///< closed captions
> +
>      /* Streams present */
>      int audio;
>      int video;
> diff --git a/libavdevice/decklink_enc.cpp b/libavdevice/decklink_enc.cpp
> index 9f1a8df..616e9c7 100644
> --- a/libavdevice/decklink_enc.cpp
> +++ b/libavdevice/decklink_enc.cpp
> @@ -326,6 +326,25 @@ static int create_s337_payload(AVPacket *pkt, uint8_t
> **outbuf, int *outsize)
>      return 0;
>  }
>
> +static int decklink_setup_subtitle(AVFormatContext *avctx, AVStream *st)
> +{
> +    int ret = -1;
> +
> +    switch(st->codecpar->codec_id) {
> +#if CONFIG_LIBKLVANC
> +    case AV_CODEC_ID_EIA_608:
> +        /* No special setup required */
> +        ret = 0;
> +        break;
> +#endif
> +    default:
> +        av_log(avctx, AV_LOG_ERROR, "Unsupported subtitle codec
> specified\n");
> +        break;
> +    }
> +
> +    return ret;
> +}
> +
>  av_cold int ff_decklink_write_trailer(AVFormatContext *avctx)
>  {
>      struct decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
> @@ -352,6 +371,7 @@ av_cold int ff_decklink_write_trailer(AVFormatContext
> *avctx)
>      klvanc_context_destroy(ctx->vanc_ctx);
>  #endif
>
> +    ff_ccfifo_freep(&ctx->cc_fifo);
>      av_freep(&cctx->ctx);
>
>      return 0;
> @@ -503,6 +523,23 @@ out:
>          free(afd_words);
>  }
>
> +/* Parse any EIA-608 subtitles sitting on the queue, and write packet
> side data
> +   that will later be handled by construct_cc... */
> +static void parse_608subs(AVFormatContext *avctx, struct decklink_ctx
> *ctx, AVPacket *pkt)
> +{
> +    uint8_t *cc_data;
> +    size_t cc_size;
> +    int ret;
> +
> +    ret = ff_ccfifo_injectbytes(ctx->cc_fifo, &cc_data, &cc_size);
> +    if (ret == 0) {
> +        uint8_t *cc_buf = av_packet_new_side_data(pkt,
> AV_PKT_DATA_A53_CC, cc_size);
> +        if (cc_buf)
> +            memcpy(cc_buf, cc_data, cc_size);
> +        av_freep(&cc_data);
> +    }
> +}
> +
>  static int decklink_construct_vanc(AVFormatContext *avctx, struct
> decklink_ctx *ctx,
>                                     AVPacket *pkt, decklink_frame *frame,
>                                     AVStream *st)
> @@ -513,6 +550,7 @@ static int decklink_construct_vanc(AVFormatContext
> *avctx, struct decklink_ctx *
>      if (!ctx->supports_vanc)
>          return 0;
>
> +    parse_608subs(avctx, ctx, pkt);
>      construct_cc(avctx, ctx, pkt, &vanc_lines);
>      construct_afd(avctx, ctx, pkt, &vanc_lines, st);
>
> @@ -704,12 +742,23 @@ static int
> decklink_write_audio_packet(AVFormatContext *avctx, AVPacket *pkt)
>      return ret;
>  }
>
> +static int decklink_write_subtitle_packet(AVFormatContext *avctx,
> AVPacket *pkt)
> +{
> +    struct decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
> +    struct decklink_ctx *ctx = (struct decklink_ctx *)cctx->ctx;
> +
> +    ff_ccfifo_extractbytes(ctx->cc_fifo, pkt->data, pkt->size);
> +
> +    return 0;
> +}
> +
>  extern "C" {
>
>  av_cold int ff_decklink_write_header(AVFormatContext *avctx)
>  {
>      struct decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
>      struct decklink_ctx *ctx;
> +    AVRational frame_rate;
>      unsigned int n;
>      int ret;
>
> @@ -768,12 +817,27 @@ av_cold int ff_decklink_write_header(AVFormatContext
> *avctx)
>          } else if (c->codec_type == AVMEDIA_TYPE_VIDEO) {
>              if (decklink_setup_video(avctx, st))
>                  goto error;
> +        } else if (c->codec_type == AVMEDIA_TYPE_SUBTITLE) {
> +            if (decklink_setup_subtitle(avctx, st))
> +                goto error;
>          } else {
>              av_log(avctx, AV_LOG_ERROR, "Unsupported stream type.\n");
>              goto error;
>          }
>      }
>
> +    for (n = 0; n < avctx->nb_streams; n++) {
> +        AVStream *st = avctx->streams[n];
> +        AVCodecParameters *c = st->codecpar;
> +
> +        if(c->codec_type == AVMEDIA_TYPE_SUBTITLE)
> +            avpriv_set_pts_info(st, 64, ctx->bmd_tb_num, ctx->bmd_tb_den);
> +    }
> +
> +    frame_rate = av_make_q(ctx->bmd_tb_den, ctx->bmd_tb_num);
> +    if (!(ctx->cc_fifo = ff_ccfifo_alloc(&frame_rate, ctx)))
> +        av_log(ctx, AV_LOG_VERBOSE, "Failure to setup CC FIFO queue\n");
> +
>      return 0;
>
>  error:
> @@ -789,6 +853,8 @@ int ff_decklink_write_packet(AVFormatContext *avctx,
> AVPacket *pkt)
>          return decklink_write_video_packet(avctx, pkt);
>      else if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
>          return decklink_write_audio_packet(avctx, pkt);
> +    else if (st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE)
> +        return decklink_write_subtitle_packet(avctx, pkt);
>
>      return AVERROR(EIO);
>  }
> diff --git a/libavdevice/decklink_enc_c.c b/libavdevice/decklink_enc_c.c
> index f7e3150..0a3984b 100644
> --- a/libavdevice/decklink_enc_c.c
> +++ b/libavdevice/decklink_enc_c.c
> @@ -77,7 +77,7 @@ const FFOutputFormat ff_decklink_muxer = {
>      .p.long_name      = NULL_IF_CONFIG_SMALL("Blackmagic DeckLink
> output"),
>      .p.audio_codec    = AV_CODEC_ID_PCM_S16LE,
>      .p.video_codec    = AV_CODEC_ID_WRAPPED_AVFRAME,
> -    .p.subtitle_codec = AV_CODEC_ID_NONE,
> +    .p.subtitle_codec = AV_CODEC_ID_EIA_608,
>      .p.flags          = AVFMT_NOFILE,
>      .p.priv_class     = &decklink_muxer_class,
>      .get_device_list = ff_decklink_list_output_devices,
> --
> 1.8.3.1
>
> _______________________________________________
> ffmpeg-devel mailing list
> ffmpeg-devel@ffmpeg.org
> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>
> To unsubscribe, visit link above, or email
> ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
>
Devin Heitmueller May 2, 2023, 2:47 p.m. UTC | #2
Hi Lance,

On Sun, Apr 30, 2023 at 7:01 PM Lance Wang <lance.lmwang@gmail.com> wrote:
> This implementation  is limited to decklink SDI output only,  If possible,
> can we implement the function from demuxer layer,  and then passthrough
> by SEI side data? By this way,  we can convert  such stream in streaming
> to  embedded CC to video stream easily also.

I did consider this approach, and it does raise the more fundamental
issue about trying to minimize the number of ways we have to process
CC data depending on whether it originated in SEI metadata or in
separate packets.  There are a number of problems with what you are
proposing though:

1.  There could be multiple CC streams within an MOV file but only a
single CC stream can be embedded into AVFrame side data.  Hence you
would have to specify some sort of argument to the demux to decide
which stream to embed.  This makes it much more difficult to do things
like ingest a stream with multiple CC streams and have separate
outputs with different CC streams.  Performing the work on the output
side allows you to use the standard "-map" mechanism to dictate which
CC streams are routed to which outputs, and to deliver the content to
different outputs with different CC streams.

2.  I have use cases in mind where the captions originate from sources
other than MOV files, where the video framerate is not known (or there
is no video at all in the source).  For example, I want to be able to
consume video from a TS source while simultaneously demuxing an SCC or
MCC file and sending the result in the output.  In such cases the
correct rate control for the captions can only be implemented on the
output side, since in such cases the SCC/MCC demux doesn't have access
to the corresponding video stream (it won't know the video framerate,
nor is it able to embed the captions into the AVFrame side data).

I can indeed imagine there are use cases where doing it further up the
pipeline could be useful.  For example, if you were taking in an MOV
file and wanting to produce a TS where the captions need to be
embedded as SEI metadata (hence you would need the e608 packets
converted to AVFrame side data prior to reaching the encoder).
However I don't see this as a substitute for being able to do it on
the output side when that is the most flexible approach for those
other use cases described above.

Much of this comes down to the fundamental limitations of the ffmpeg
framework related to being able to move data back/forth between data
packets and side data.  You can't feed data packets into
AVFilterGraphs.  You can't easily combine data from data packets into
AVFrames carrying video (or extract side data from AVFrames to
generate data packets), etc.  You can't use BSF filters to combine
data from multiple inputs such as compressed video streams and data
streams after encoding.  I've run across all these limitations over
the years, and at this point I'm trying to take the least invasive
approach possible that doesn't require changes to the fundamental
frameworks for handling data packets.

It's worth noting that nothing you have suggested is an "either/or"
situation.  Because caption processing is inexpensive, there isn't any
significant overhead in having multiple AvCCFifo instances in the
pipeline.  In other words, if you added a feature to the MOV demuxer,
it wouldn't prevent us from running the packets through an AvCCFifo
instance on the output side.  The patch proposed doesn't preclude you
adding such a feature on the demux side in the future.

Devin
Lance Wang May 3, 2023, 7:36 a.m. UTC | #3
On Tue, May 2, 2023 at 10:48 PM Devin Heitmueller <
devin.heitmueller@ltnglobal.com> wrote:

> Hi Lance,
>
> On Sun, Apr 30, 2023 at 7:01 PM Lance Wang <lance.lmwang@gmail.com> wrote:
> > This implementation  is limited to decklink SDI output only,  If
> possible,
> > can we implement the function from demuxer layer,  and then passthrough
> > by SEI side data? By this way,  we can convert  such stream in streaming
> > to  embedded CC to video stream easily also.
>
> I did consider this approach, and it does raise the more fundamental
> issue about trying to minimize the number of ways we have to process
> CC data depending on whether it originated in SEI metadata or in
> separate packets.  There are a number of problems with what you are
> proposing though:
>
> 1.  There could be multiple CC streams within an MOV file but only a
> single CC stream can be embedded into AVFrame side data.  Hence you
> would have to specify some sort of argument to the demux to decide
> which stream to embed.  This makes it much more difficult to do things
> like ingest a stream with multiple CC streams and have separate
> outputs with different CC streams.  Performing the work on the output
> side allows you to use the standard "-map" mechanism to dictate which
> CC streams are routed to which outputs, and to deliver the content to
> different outputs with different CC streams.
>


> 2.  I have use cases in mind where the captions originate from sources
> other than MOV files, where the video framerate is not known (or there
> is no video at all in the source).  For example, I want to be able to
> consume video from a TS source while simultaneously demuxing an SCC or
> MCC file and sending the result in the output.  In such cases the
> correct rate control for the captions can only be implemented on the
> output side, since in such cases the SCC/MCC demux doesn't have access
> to the corresponding video stream (it won't know the video framerate,
> nor is it able to embed the captions into the AVFrame side data).
>
> I can indeed imagine there are use cases where doing it further up the
> pipeline could be useful.  For example, if you were taking in an MOV
> file and wanting to produce a TS where the captions need to be
> embedded as SEI metadata (hence you would need the e608 packets
> converted to AVFrame side data prior to reaching the encoder).
> However I don't see this as a substitute for being able to do it on
> the output side when that is the most flexible approach for those
> other use cases described above.


> Much of this comes down to the fundamental limitations of the ffmpeg
> framework related to being able to move data back/forth between data
> packets and side data.  You can't feed data packets into
> AVFilterGraphs.  You can't easily combine data from data packets into
> AVFrames carrying video (or extract side data from AVFrames to
> generate data packets), etc.  You can't use BSF filters to combine
> data from multiple inputs such as compressed video streams and data
> streams after encoding.  I've run across all these limitations over
> the years, and at this point I'm trying to take the least invasive
> approach possible that doesn't require changes to the fundamental
> frameworks for handling data packets.
>
It's worth noting that nothing you have suggested is an "either/or"
> situation.  Because caption processing is inexpensive, there isn't any
> significant overhead in having multiple AvCCFifo instances in the
> pipeline.  In other words, if you added a feature to the MOV demuxer,
> it wouldn't prevent us from running the packets through an AvCCFifo
> instance on the output side.  The patch proposed doesn't preclude you
> adding such a feature on the demux side in the future.
>
>
Thanks for the answer.  It's acceptable from my side.


Devin
>
> --
> Devin Heitmueller, Senior Software Engineer
> LTN Global Communications
> o: +1 (301) 363-1001
> w: https://ltnglobal.com  e: devin.heitmueller@ltnglobal.com
> _______________________________________________
> ffmpeg-devel mailing list
> ffmpeg-devel@ffmpeg.org
> https://ffmpeg.org/mailman/listinfo/ffmpeg-devel
>
> To unsubscribe, visit link above, or email
> ffmpeg-devel-request@ffmpeg.org with subject "unsubscribe".
>
diff mbox series

Patch

diff --git a/libavdevice/Makefile b/libavdevice/Makefile
index 8a62822..c304492 100644
--- a/libavdevice/Makefile
+++ b/libavdevice/Makefile
@@ -57,6 +57,7 @@  OBJS-$(CONFIG_LIBDC1394_INDEV)           += libdc1394.o
 
 # Objects duplicated from other libraries for shared builds
 SHLIBOBJS-$(CONFIG_DECKLINK_INDEV)       += reverse.o
+SHLIBOBJS-$(CONFIG_DECKLINK_OUTDEV)      += ccfifo.o
 
 # Windows resource file
 SHLIBOBJS-$(HAVE_GNU_WINDRES)            += avdeviceres.o
diff --git a/libavdevice/ccfifo.c b/libavdevice/ccfifo.c
new file mode 100644
index 0000000..9007094
--- /dev/null
+++ b/libavdevice/ccfifo.c
@@ -0,0 +1,24 @@ 
+/*
+ * CEA-708 Closed Captioning FIFO
+ * Copyright (c) 2023 LTN Global Communications
+ *
+ * Author: Devin Heitmueller <dheitmueller@ltnglobal.com>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavfilter/ccfifo.c"
diff --git a/libavdevice/decklink_common.h b/libavdevice/decklink_common.h
index 088e165..0d33f94 100644
--- a/libavdevice/decklink_common.h
+++ b/libavdevice/decklink_common.h
@@ -31,6 +31,7 @@ 
 
 extern "C" {
 #include "libavcodec/packet_internal.h"
+#include "libavfilter/ccfifo.h"
 }
 #include "libavutil/thread.h"
 #include "decklink_common_c.h"
@@ -112,6 +113,8 @@  struct decklink_ctx {
     /* Capture buffer queue */
     AVPacketQueue queue;
 
+    AVCCFifo *cc_fifo;      ///< closed captions
+
     /* Streams present */
     int audio;
     int video;
diff --git a/libavdevice/decklink_enc.cpp b/libavdevice/decklink_enc.cpp
index 9f1a8df..616e9c7 100644
--- a/libavdevice/decklink_enc.cpp
+++ b/libavdevice/decklink_enc.cpp
@@ -326,6 +326,25 @@  static int create_s337_payload(AVPacket *pkt, uint8_t **outbuf, int *outsize)
     return 0;
 }
 
+static int decklink_setup_subtitle(AVFormatContext *avctx, AVStream *st)
+{
+    int ret = -1;
+
+    switch(st->codecpar->codec_id) {
+#if CONFIG_LIBKLVANC
+    case AV_CODEC_ID_EIA_608:
+        /* No special setup required */
+        ret = 0;
+        break;
+#endif
+    default:
+        av_log(avctx, AV_LOG_ERROR, "Unsupported subtitle codec specified\n");
+        break;
+    }
+
+    return ret;
+}
+
 av_cold int ff_decklink_write_trailer(AVFormatContext *avctx)
 {
     struct decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
@@ -352,6 +371,7 @@  av_cold int ff_decklink_write_trailer(AVFormatContext *avctx)
     klvanc_context_destroy(ctx->vanc_ctx);
 #endif
 
+    ff_ccfifo_freep(&ctx->cc_fifo);
     av_freep(&cctx->ctx);
 
     return 0;
@@ -503,6 +523,23 @@  out:
         free(afd_words);
 }
 
+/* Parse any EIA-608 subtitles sitting on the queue, and write packet side data
+   that will later be handled by construct_cc... */
+static void parse_608subs(AVFormatContext *avctx, struct decklink_ctx *ctx, AVPacket *pkt)
+{
+    uint8_t *cc_data;
+    size_t cc_size;
+    int ret;
+
+    ret = ff_ccfifo_injectbytes(ctx->cc_fifo, &cc_data, &cc_size);
+    if (ret == 0) {
+        uint8_t *cc_buf = av_packet_new_side_data(pkt, AV_PKT_DATA_A53_CC, cc_size);
+        if (cc_buf)
+            memcpy(cc_buf, cc_data, cc_size);
+        av_freep(&cc_data);
+    }
+}
+
 static int decklink_construct_vanc(AVFormatContext *avctx, struct decklink_ctx *ctx,
                                    AVPacket *pkt, decklink_frame *frame,
                                    AVStream *st)
@@ -513,6 +550,7 @@  static int decklink_construct_vanc(AVFormatContext *avctx, struct decklink_ctx *
     if (!ctx->supports_vanc)
         return 0;
 
+    parse_608subs(avctx, ctx, pkt);
     construct_cc(avctx, ctx, pkt, &vanc_lines);
     construct_afd(avctx, ctx, pkt, &vanc_lines, st);
 
@@ -704,12 +742,23 @@  static int decklink_write_audio_packet(AVFormatContext *avctx, AVPacket *pkt)
     return ret;
 }
 
+static int decklink_write_subtitle_packet(AVFormatContext *avctx, AVPacket *pkt)
+{
+    struct decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
+    struct decklink_ctx *ctx = (struct decklink_ctx *)cctx->ctx;
+
+    ff_ccfifo_extractbytes(ctx->cc_fifo, pkt->data, pkt->size);
+
+    return 0;
+}
+
 extern "C" {
 
 av_cold int ff_decklink_write_header(AVFormatContext *avctx)
 {
     struct decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
     struct decklink_ctx *ctx;
+    AVRational frame_rate;
     unsigned int n;
     int ret;
 
@@ -768,12 +817,27 @@  av_cold int ff_decklink_write_header(AVFormatContext *avctx)
         } else if (c->codec_type == AVMEDIA_TYPE_VIDEO) {
             if (decklink_setup_video(avctx, st))
                 goto error;
+        } else if (c->codec_type == AVMEDIA_TYPE_SUBTITLE) {
+            if (decklink_setup_subtitle(avctx, st))
+                goto error;
         } else {
             av_log(avctx, AV_LOG_ERROR, "Unsupported stream type.\n");
             goto error;
         }
     }
 
+    for (n = 0; n < avctx->nb_streams; n++) {
+        AVStream *st = avctx->streams[n];
+        AVCodecParameters *c = st->codecpar;
+
+        if(c->codec_type == AVMEDIA_TYPE_SUBTITLE)
+            avpriv_set_pts_info(st, 64, ctx->bmd_tb_num, ctx->bmd_tb_den);
+    }
+
+    frame_rate = av_make_q(ctx->bmd_tb_den, ctx->bmd_tb_num);
+    if (!(ctx->cc_fifo = ff_ccfifo_alloc(&frame_rate, ctx)))
+        av_log(ctx, AV_LOG_VERBOSE, "Failure to setup CC FIFO queue\n");
+
     return 0;
 
 error:
@@ -789,6 +853,8 @@  int ff_decklink_write_packet(AVFormatContext *avctx, AVPacket *pkt)
         return decklink_write_video_packet(avctx, pkt);
     else if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
         return decklink_write_audio_packet(avctx, pkt);
+    else if (st->codecpar->codec_type == AVMEDIA_TYPE_SUBTITLE)
+        return decklink_write_subtitle_packet(avctx, pkt);
 
     return AVERROR(EIO);
 }
diff --git a/libavdevice/decklink_enc_c.c b/libavdevice/decklink_enc_c.c
index f7e3150..0a3984b 100644
--- a/libavdevice/decklink_enc_c.c
+++ b/libavdevice/decklink_enc_c.c
@@ -77,7 +77,7 @@  const FFOutputFormat ff_decklink_muxer = {
     .p.long_name      = NULL_IF_CONFIG_SMALL("Blackmagic DeckLink output"),
     .p.audio_codec    = AV_CODEC_ID_PCM_S16LE,
     .p.video_codec    = AV_CODEC_ID_WRAPPED_AVFRAME,
-    .p.subtitle_codec = AV_CODEC_ID_NONE,
+    .p.subtitle_codec = AV_CODEC_ID_EIA_608,
     .p.flags          = AVFMT_NOFILE,
     .p.priv_class     = &decklink_muxer_class,
     .get_device_list = ff_decklink_list_output_devices,