FFmpeg  4.3
vf_readvitc.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2016 Tobias Rapp
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * Filter for reading the vertical interval timecode (VITC).
24  * See also https://en.wikipedia.org/wiki/Vertical_interval_timecode
25  */
26 
27 #include "libavutil/common.h"
28 #include "libavutil/internal.h"
29 #include "libavutil/opt.h"
30 #include "libavutil/pixdesc.h"
31 #include "libavutil/timecode.h"
32 #include "avfilter.h"
33 #include "formats.h"
34 #include "internal.h"
35 
36 #define LINE_DATA_SIZE 9
37 
38 typedef struct ReadVitcContext {
39  const AVClass *class;
40 
41  int scan_max;
42  double thr_b;
43  double thr_w;
44 
48  int grp_width;
52 
53 #define OFFSET(x) offsetof(ReadVitcContext, x)
54 #define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
55 
56 static const AVOption readvitc_options[] = {
57  { "scan_max", "maximum line numbers to scan for VITC data", OFFSET(scan_max), AV_OPT_TYPE_INT, {.i64 = 45 }, -1, INT_MAX, FLAGS },
58  { "thr_b", "black color threshold", OFFSET(thr_b), AV_OPT_TYPE_DOUBLE, {.dbl = 0.2 }, 0, 1.0, FLAGS },
59  { "thr_w", "white color threshold", OFFSET(thr_w), AV_OPT_TYPE_DOUBLE, {.dbl = 0.6 }, 0, 1.0, FLAGS },
60  { NULL }
61 };
62 
63 AVFILTER_DEFINE_CLASS(readvitc);
64 
66  uint8_t crc;
67 
68  crc = 0x01 | (line[0] << 2);
69  crc ^= (line[0] >> 6) | 0x04 | (line[1] << 4);
70  crc ^= (line[1] >> 4) | 0x10 | (line[2] << 6);
71  crc ^= (line[2] >> 2) | 0x40;
72  crc ^= line[3];
73  crc ^= 0x01 | (line[4] << 2);
74  crc ^= (line[4] >> 6) | 0x04 | (line[5] << 4);
75  crc ^= (line[5] >> 4) | 0x10 | (line[6] << 6);
76  crc ^= (line[6] >> 2) | 0x40;
77  crc ^= line[7];
78  crc ^= 0x01;
79  crc = (crc >> 2) | (crc << 6); // rotate byte right by two bits
80  return crc;
81 }
82 
83 static inline uint8_t get_pit_avg3( uint8_t *line, int i ) {
84  return ((line[i-1] + line[i] + line[i+1]) / 3);
85 }
86 
87 static int read_vitc_line( ReadVitcContext *ctx, uint8_t *src, int line_size, int width, int height )
88 {
89  uint8_t *scan_line;
90  int grp_index, pit_index;
91  int grp_start_pos;
92  uint8_t pit_value;
93  int x, y, res = 0;
94 
95  if (ctx->scan_max >= 0)
96  height = FFMIN(height, ctx->scan_max);
97 
98  // scan lines for VITC data, starting from the top
99  for (y = 0; y < height; y++) {
100  scan_line = src;
101  memset(ctx->line_data, 0, LINE_DATA_SIZE);
102  grp_index = 0;
103  x = 0;
104  while ((x < width) && (grp_index < 9)) {
105  // search next sync pattern
106  while ((x < width) && (scan_line[x] < ctx->threshold_white))
107  x++;
108  while ((x < width) && (scan_line[x] > ctx->threshold_black))
109  x++;
110  x = FFMAX(x - ((ctx->grp_width+10) / 20), 1); // step back a half pit
111  grp_start_pos = x;
112  if ((grp_start_pos + ctx->grp_width) > width)
113  break; // not enough pixels for reading a whole pit group
114  pit_value = get_pit_avg3(scan_line, x);
115  if (pit_value < ctx->threshold_white)
116  break; // first sync bit mismatch
117  x = grp_start_pos + ((ctx->grp_width) / 10);
118  pit_value = get_pit_avg3(scan_line, x);
119  if (pit_value > ctx->threshold_black )
120  break; // second sync bit mismatch
121  for (pit_index = 0; pit_index <= 7; pit_index++) {
122  x = grp_start_pos + (((pit_index+2)*ctx->grp_width) / 10);
123  pit_value = get_pit_avg3(scan_line, x);
124  if (pit_value > ctx->threshold_gray)
125  ctx->line_data[grp_index] |= (1 << pit_index);
126  }
127  grp_index++;
128  }
129  if ((grp_index == 9) && (get_vitc_crc(ctx->line_data) == ctx->line_data[8])) {
130  res = 1;
131  break;
132  }
133  src += line_size;
134  }
135 
136  return res;
137 }
138 
139 static unsigned bcd2uint(uint8_t high, uint8_t low)
140 {
141  if (high > 9 || low > 9)
142  return 0;
143  return 10*high + low;
144 }
145 
146 static char *make_vitc_tc_string(char *buf, uint8_t *line)
147 {
148  unsigned hh = bcd2uint(line[7] & 0x03, line[6] & 0x0f); // 6-bit hours
149  unsigned mm = bcd2uint(line[5] & 0x07, line[4] & 0x0f); // 7-bit minutes
150  unsigned ss = bcd2uint(line[3] & 0x07, line[2] & 0x0f); // 7-bit seconds
151  unsigned ff = bcd2uint(line[1] & 0x03, line[0] & 0x0f); // 6-bit frames
152  unsigned drop = (line[1] & 0x04); // 1-bit drop flag
153  snprintf(buf, AV_TIMECODE_STR_SIZE, "%02u:%02u:%02u%c%02u",
154  hh, mm, ss, drop ? ';' : ':', ff);
155  return buf;
156 }
157 
159 {
160  ReadVitcContext *s = ctx->priv;
161 
162  s->threshold_black = s->thr_b * UINT8_MAX;
163  s->threshold_white = s->thr_w * UINT8_MAX;
164  if (s->threshold_black > s->threshold_white) {
165  av_log(ctx, AV_LOG_WARNING, "Black color threshold is higher than white color threshold (%g > %g)\n",
166  s->thr_b, s->thr_w);
167  return AVERROR(EINVAL);
168  }
169  s->threshold_gray = s->threshold_white - ((s->threshold_white - s->threshold_black) / 2);
170  av_log(ctx, AV_LOG_DEBUG, "threshold_black:%d threshold_white:%d threshold_gray:%d\n",
171  s->threshold_black, s->threshold_white, s->threshold_gray);
172 
173  return 0;
174 }
175 
177 {
178  AVFilterContext *ctx = inlink->dst;
179  ReadVitcContext *s = ctx->priv;
180 
181  s->grp_width = inlink->w * 5 / 48;
182  av_log(ctx, AV_LOG_DEBUG, "w:%d h:%d grp_width:%d scan_max:%d\n",
183  inlink->w, inlink->h, s->grp_width, s->scan_max);
184  return 0;
185 }
186 
188 {
189  static const enum AVPixelFormat pixel_fmts[] = {
209  };
210  AVFilterFormats *fmts_list = ff_make_format_list(pixel_fmts);
211  if (!fmts_list)
212  return AVERROR(ENOMEM);
213  return ff_set_common_formats(ctx, fmts_list);
214 }
215 
217 {
218  AVFilterContext *ctx = inlink->dst;
219  AVFilterLink *outlink = ctx->outputs[0];
220  ReadVitcContext *s = ctx->priv;
221  int found;
222 
223  found = read_vitc_line(s, frame->data[0], frame->linesize[0], inlink->w, inlink->h);
224  av_dict_set(&frame->metadata, "lavfi.readvitc.found", (found ? "1" : "0"), 0);
225  if (found)
226  av_dict_set(&frame->metadata, "lavfi.readvitc.tc_str", make_vitc_tc_string(s->tcbuf, s->line_data), 0);
227 
228  return ff_filter_frame(outlink, frame);
229 }
230 
231 static const AVFilterPad inputs[] = {
232  {
233  .name = "default",
234  .type = AVMEDIA_TYPE_VIDEO,
235  .filter_frame = filter_frame,
236  .config_props = config_props,
237  },
238  { NULL }
239 };
240 
241 static const AVFilterPad outputs[] = {
242  {
243  .name = "default",
244  .type = AVMEDIA_TYPE_VIDEO,
245  },
246  { NULL }
247 };
248 
250  .name = "readvitc",
251  .description = NULL_IF_CONFIG_SMALL("Read vertical interval timecode and write it to frame metadata."),
252  .priv_size = sizeof(ReadVitcContext),
253  .priv_class = &readvitc_class,
254  .inputs = inputs,
255  .outputs = outputs,
256  .init = init,
258 };
get_vitc_crc
static uint8_t get_vitc_crc(uint8_t *line)
Definition: vf_readvitc.c:65
AV_LOG_WARNING
#define AV_LOG_WARNING
Something somehow does not look correct.
Definition: log.h:182
AV_TIMECODE_STR_SIZE
#define AV_TIMECODE_STR_SIZE
Definition: timecode.h:33
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
config_props
static int config_props(AVFilterLink *inlink)
Definition: vf_readvitc.c:176
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
OFFSET
#define OFFSET(x)
Definition: vf_readvitc.c:53
opt.h
init
static av_cold int init(AVFilterContext *ctx)
Definition: vf_readvitc.c:158
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1075
make_vitc_tc_string
static char * make_vitc_tc_string(char *buf, uint8_t *line)
Definition: vf_readvitc.c:146
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
pixdesc.h
inputs
static const AVFilterPad inputs[]
Definition: vf_readvitc.c:231
AVOption
AVOption.
Definition: opt.h:246
AV_PIX_FMT_YUV440P
@ AV_PIX_FMT_YUV440P
planar YUV 4:4:0 (1 Cr & Cb sample per 1x2 Y samples)
Definition: pixfmt.h:99
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:148
outputs
static const AVFilterPad outputs[]
Definition: vf_readvitc.c:241
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
formats.h
FLAGS
#define FLAGS
Definition: vf_readvitc.c:54
ReadVitcContext::threshold_white
int threshold_white
Definition: vf_readvitc.c:46
ReadVitcContext::threshold_gray
int threshold_gray
Definition: vf_readvitc.c:47
timecode.h
x
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern ‘ARCH OS COMPILER COMPILER VERSION’ The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the ‘fate_recv’ variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration it may help to try out the ‘ssh’ command with one or more ‘ v’ options You should get detailed output concerning your SSH configuration and the authentication process The only thing left is to automate the execution of the fate sh script and the synchronisation of the samples directory Uploading new samples to the fate suite *****************************************If you need a sample uploaded send a mail to samples request This is for developers who have an account on the fate suite server If you upload new please make sure they are as small as space on each network bandwidth and so on benefit from smaller test cases Also keep in mind older checkouts use existing sample that means in practice generally do not remove or overwrite files as it likely would break older checkouts or releases Also all needed samples for a commit should be ideally before the push If you need an account for frequently uploading samples or you wish to help others by doing that send a mail to ffmpeg devel rsync vauL Duo x
Definition: fate.txt:150
ReadVitcContext::thr_b
double thr_b
Definition: vf_readvitc.c:42
ss
#define ss(width, name, subs,...)
Definition: cbs_vp9.c:261
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:54
AV_PIX_FMT_YUVJ411P
@ AV_PIX_FMT_YUVJ411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples) full scale (JPEG), deprecated in favor ...
Definition: pixfmt.h:258
av_cold
#define av_cold
Definition: attributes.h:90
ff_set_common_formats
int ff_set_common_formats(AVFilterContext *ctx, AVFilterFormats *formats)
A helper for query_formats() which sets all links to the same list of formats.
Definition: formats.c:600
AV_PIX_FMT_YUVJ422P
@ AV_PIX_FMT_YUVJ422P
planar YUV 4:2:2, 16bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV422P and setting col...
Definition: pixfmt.h:79
width
#define width
s
#define s(width, name)
Definition: cbs_vp9.c:257
AV_PIX_FMT_YUVA420P
@ AV_PIX_FMT_YUVA420P
planar YUV 4:2:0, 20bpp, (1 Cr & Cb sample per 2x2 Y & A samples)
Definition: pixfmt.h:101
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Definition: opt.h:225
AV_LOG_DEBUG
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
ctx
AVFormatContext * ctx
Definition: movenc.c:48
read_vitc_line
static int read_vitc_line(ReadVitcContext *ctx, uint8_t *src, int line_size, int width, int height)
Definition: vf_readvitc.c:87
AV_PIX_FMT_YUV420P
@ AV_PIX_FMT_YUV420P
planar YUV 4:2:0, 12bpp, (1 Cr & Cb sample per 2x2 Y samples)
Definition: pixfmt.h:66
AV_PIX_FMT_YUVJ444P
@ AV_PIX_FMT_YUVJ444P
planar YUV 4:4:4, 24bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV444P and setting col...
Definition: pixfmt.h:80
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
NULL
#define NULL
Definition: coverity.c:32
AV_PIX_FMT_YUVJ420P
@ AV_PIX_FMT_YUVJ420P
planar YUV 4:2:0, 12bpp, full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV420P and setting col...
Definition: pixfmt.h:78
src
#define src
Definition: vp8dsp.c:254
AV_PIX_FMT_GRAY8
@ AV_PIX_FMT_GRAY8
Y , 8bpp.
Definition: pixfmt.h:74
ReadVitcContext::scan_max
int scan_max
Definition: vf_readvitc.c:41
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:186
ReadVitcContext::thr_w
double thr_w
Definition: vf_readvitc.c:43
FFMAX
#define FFMAX(a, b)
Definition: common.h:94
LINE_DATA_SIZE
#define LINE_DATA_SIZE
Definition: vf_readvitc.c:36
AV_PIX_FMT_NV16
@ AV_PIX_FMT_NV16
interleaved chroma YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:201
height
#define height
FFMIN
#define FFMIN(a, b)
Definition: common.h:96
AV_PIX_FMT_YUVA444P
@ AV_PIX_FMT_YUVA444P
planar YUV 4:4:4 32bpp, (1 Cr & Cb sample per 1x1 Y & A samples)
Definition: pixfmt.h:177
line
Definition: graph2dot.c:48
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(readvitc)
ReadVitcContext::threshold_black
int threshold_black
Definition: vf_readvitc.c:45
ReadVitcContext::line_data
uint8_t line_data[LINE_DATA_SIZE]
Definition: vf_readvitc.c:49
ReadVitcContext
Definition: vf_readvitc.c:38
internal.h
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
internal.h
common.h
AV_PIX_FMT_YUVJ440P
@ AV_PIX_FMT_YUVJ440P
planar YUV 4:4:0 full scale (JPEG), deprecated in favor of AV_PIX_FMT_YUV440P and setting color_range
Definition: pixfmt.h:100
uint8_t
uint8_t
Definition: audio_convert.c:194
AV_PIX_FMT_NV21
@ AV_PIX_FMT_NV21
as above, but U and V bytes are swapped
Definition: pixfmt.h:90
bcd2uint
static unsigned bcd2uint(uint8_t high, uint8_t low)
Definition: vf_readvitc.c:139
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:60
AVFilter
Filter definition.
Definition: avfilter.h:144
AV_PIX_FMT_NV12
@ AV_PIX_FMT_NV12
planar YUV 4:2:0, 12bpp, 1 plane for Y and 1 plane for the UV components, which are interleaved (firs...
Definition: pixfmt.h:89
frame
these buffered frames must be flushed immediately if a new input produces new the filter must not call request_frame to get more It must just process the frame or queue it The task of requesting more frames is left to the filter s request_frame method or the application If a filter has several the filter must be ready for frames arriving randomly on any input any filter with several inputs will most likely require some kind of queuing mechanism It is perfectly acceptable to have a limited queue and to drop frames when the inputs are too unbalanced request_frame For filters that do not use the this method is called when a frame is wanted on an output For a it should directly call filter_frame on the corresponding output For a if there are queued frames already one of these frames should be pushed If the filter should request a frame on one of its repeatedly until at least one frame has been pushed Return or at least make progress towards producing a frame
Definition: filter_design.txt:264
readvitc_options
static const AVOption readvitc_options[]
Definition: vf_readvitc.c:56
get_pit_avg3
static uint8_t get_pit_avg3(uint8_t *line, int i)
Definition: vf_readvitc.c:83
ff_vf_readvitc
AVFilter ff_vf_readvitc
Definition: vf_readvitc.c:249
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:223
avfilter.h
AV_PIX_FMT_YUV444P
@ AV_PIX_FMT_YUV444P
planar YUV 4:4:4, 24bpp, (1 Cr & Cb sample per 1x1 Y samples)
Definition: pixfmt.h:71
AVFilterContext
An instance of a filter.
Definition: avfilter.h:338
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
AV_PIX_FMT_YUV422P
@ AV_PIX_FMT_YUV422P
planar YUV 4:2:2, 16bpp, (1 Cr & Cb sample per 2x1 Y samples)
Definition: pixfmt.h:70
av_dict_set
int av_dict_set(AVDictionary **pm, const char *key, const char *value, int flags)
Set the given entry in *pm, overwriting an existing entry.
Definition: dict.c:70
AV_PIX_FMT_YUV411P
@ AV_PIX_FMT_YUV411P
planar YUV 4:1:1, 12bpp, (1 Cr & Cb sample per 4x1 Y samples)
Definition: pixfmt.h:73
ReadVitcContext::grp_width
int grp_width
Definition: vf_readvitc.c:48
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: vf_readvitc.c:187
AV_PIX_FMT_YUV410P
@ AV_PIX_FMT_YUV410P
planar YUV 4:1:0, 9bpp, (1 Cr & Cb sample per 4x4 Y samples)
Definition: pixfmt.h:72
av_log
#define av_log(a,...)
Definition: tableprint_vlc.h:28
ReadVitcContext::tcbuf
char tcbuf[AV_TIMECODE_STR_SIZE]
Definition: vf_readvitc.c:50
snprintf
#define snprintf
Definition: snprintf.h:34
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
Definition: vf_readvitc.c:216
AV_PIX_FMT_YUVA422P
@ AV_PIX_FMT_YUVA422P
planar YUV 4:2:2 24bpp, (1 Cr & Cb sample per 2x1 Y & A samples)
Definition: pixfmt.h:176