FFmpeg  4.3
f_graphmonitor.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2018 Paul B Mahol
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 #include "float.h"
22 
23 #include "libavutil/pixdesc.h"
24 #include "libavutil/eval.h"
25 #include "libavutil/intreadwrite.h"
26 #include "libavutil/opt.h"
27 #include "libavutil/timestamp.h"
29 #include "avfilter.h"
30 #include "filters.h"
31 #include "formats.h"
32 #include "internal.h"
33 #include "video.h"
34 
35 typedef struct GraphMonitorContext {
36  const AVClass *class;
37 
38  int w, h;
39  float opacity;
40  int mode;
41  int flags;
43 
44  int64_t pts;
45  int64_t next_pts;
50  uint8_t bg[4];
52 
53 enum {
54  MODE_QUEUE = 1 << 0,
55  MODE_FCIN = 1 << 1,
56  MODE_FCOUT = 1 << 2,
57  MODE_PTS = 1 << 3,
58  MODE_TIME = 1 << 4,
59  MODE_TB = 1 << 5,
60  MODE_FMT = 1 << 6,
61  MODE_SIZE = 1 << 7,
62  MODE_RATE = 1 << 8,
63 };
64 
65 #define OFFSET(x) offsetof(GraphMonitorContext, x)
66 #define VF AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
67 
68 static const AVOption graphmonitor_options[] = {
69  { "size", "set monitor size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="hd720"}, 0, 0, VF },
70  { "s", "set monitor size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str="hd720"}, 0, 0, VF },
71  { "opacity", "set video opacity", OFFSET(opacity), AV_OPT_TYPE_FLOAT, {.dbl=.9}, 0, 1, VF },
72  { "o", "set video opacity", OFFSET(opacity), AV_OPT_TYPE_FLOAT, {.dbl=.9}, 0, 1, VF },
73  { "mode", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, VF, "mode" },
74  { "m", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, VF, "mode" },
75  { "full", NULL, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, VF, "mode" },
76  { "compact", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, VF, "mode" },
77  { "flags", "set flags", OFFSET(flags), AV_OPT_TYPE_FLAGS, {.i64=MODE_QUEUE}, 0, INT_MAX, VF, "flags" },
78  { "f", "set flags", OFFSET(flags), AV_OPT_TYPE_FLAGS, {.i64=MODE_QUEUE}, 0, INT_MAX, VF, "flags" },
79  { "queue", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_QUEUE}, 0, 0, VF, "flags" },
80  { "frame_count_in", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_FCOUT}, 0, 0, VF, "flags" },
81  { "frame_count_out", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_FCIN}, 0, 0, VF, "flags" },
82  { "pts", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_PTS}, 0, 0, VF, "flags" },
83  { "time", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_TIME}, 0, 0, VF, "flags" },
84  { "timebase", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_TB}, 0, 0, VF, "flags" },
85  { "format", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_FMT}, 0, 0, VF, "flags" },
86  { "size", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_SIZE}, 0, 0, VF, "flags" },
87  { "rate", NULL, 0, AV_OPT_TYPE_CONST, {.i64=MODE_RATE}, 0, 0, VF, "flags" },
88  { "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, VF },
89  { "r", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, VF },
90  { NULL }
91 };
92 
94 {
95  AVFilterLink *outlink = ctx->outputs[0];
96  static const enum AVPixelFormat pix_fmts[] = {
99  };
100  int ret;
101 
102  AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
103  if ((ret = ff_formats_ref(fmts_list, &outlink->in_formats)) < 0)
104  return ret;
105 
106  return 0;
107 }
108 
110 {
111  int bg = AV_RN32(s->bg);
112 
113  for (int i = 0; i < out->height; i++)
114  for (int j = 0; j < out->width; j++)
115  AV_WN32(out->data[0] + i * out->linesize[0] + j * 4, bg);
116 }
117 
118 static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint8_t *color)
119 {
120  const uint8_t *font;
121  int font_height;
122  int i;
123 
124  font = avpriv_cga_font, font_height = 8;
125 
126  if (y + 8 >= pic->height ||
127  x + strlen(txt) * 8 >= pic->width)
128  return;
129 
130  for (i = 0; txt[i]; i++) {
131  int char_y, mask;
132 
133  uint8_t *p = pic->data[0] + y*pic->linesize[0] + (x + i*8)*4;
134  for (char_y = 0; char_y < font_height; char_y++) {
135  for (mask = 0x80; mask; mask >>= 1) {
136  if (font[txt[i] * font_height + char_y] & mask) {
137  p[0] = color[0];
138  p[1] = color[1];
139  p[2] = color[2];
140  }
141  p += 4;
142  }
143  p += pic->linesize[0] - 8 * 4;
144  }
145  }
146 }
147 
149 {
150  for (int j = 0; j < filter->nb_inputs; j++) {
151  AVFilterLink *l = filter->inputs[j];
152  size_t frames = ff_inlink_queued_frames(l);
153 
154  if (frames)
155  return 1;
156  }
157 
158  for (int j = 0; j < filter->nb_outputs; j++) {
159  AVFilterLink *l = filter->outputs[j];
160  size_t frames = ff_inlink_queued_frames(l);
161 
162  if (frames)
163  return 1;
164  }
165 
166  return 0;
167 }
168 
170  int xpos, int ypos,
171  AVFilterLink *l,
172  size_t frames)
173 {
174  GraphMonitorContext *s = ctx->priv;
175  char buffer[1024] = { 0 };
176 
177  if (s->flags & MODE_FMT) {
178  if (l->type == AVMEDIA_TYPE_VIDEO) {
179  snprintf(buffer, sizeof(buffer)-1, " | format: %s",
181  } else if (l->type == AVMEDIA_TYPE_AUDIO) {
182  snprintf(buffer, sizeof(buffer)-1, " | format: %s",
184  }
185  drawtext(out, xpos, ypos, buffer, s->white);
186  xpos += strlen(buffer) * 8;
187  }
188  if (s->flags & MODE_SIZE) {
189  if (l->type == AVMEDIA_TYPE_VIDEO) {
190  snprintf(buffer, sizeof(buffer)-1, " | size: %dx%d", l->w, l->h);
191  } else if (l->type == AVMEDIA_TYPE_AUDIO) {
192  snprintf(buffer, sizeof(buffer)-1, " | channels: %d", l->channels);
193  }
194  drawtext(out, xpos, ypos, buffer, s->white);
195  xpos += strlen(buffer) * 8;
196  }
197  if (s->flags & MODE_RATE) {
198  if (l->type == AVMEDIA_TYPE_VIDEO) {
199  snprintf(buffer, sizeof(buffer)-1, " | fps: %d/%d", l->frame_rate.num, l->frame_rate.den);
200  } else if (l->type == AVMEDIA_TYPE_AUDIO) {
201  snprintf(buffer, sizeof(buffer)-1, " | samplerate: %d", l->sample_rate);
202  }
203  drawtext(out, xpos, ypos, buffer, s->white);
204  xpos += strlen(buffer) * 8;
205  }
206  if (s->flags & MODE_TB) {
207  snprintf(buffer, sizeof(buffer)-1, " | tb: %d/%d", l->time_base.num, l->time_base.den);
208  drawtext(out, xpos, ypos, buffer, s->white);
209  xpos += strlen(buffer) * 8;
210  }
211  if (s->flags & MODE_QUEUE) {
212  snprintf(buffer, sizeof(buffer)-1, " | queue: ");
213  drawtext(out, xpos, ypos, buffer, s->white);
214  xpos += strlen(buffer) * 8;
215  snprintf(buffer, sizeof(buffer)-1, "%"SIZE_SPECIFIER, frames);
216  drawtext(out, xpos, ypos, buffer, frames > 0 ? frames >= 10 ? frames >= 50 ? s->red : s->yellow : s->green : s->white);
217  xpos += strlen(buffer) * 8;
218  }
219  if (s->flags & MODE_FCIN) {
220  snprintf(buffer, sizeof(buffer)-1, " | in: %"PRId64, l->frame_count_in);
221  drawtext(out, xpos, ypos, buffer, s->white);
222  xpos += strlen(buffer) * 8;
223  }
224  if (s->flags & MODE_FCOUT) {
225  snprintf(buffer, sizeof(buffer)-1, " | out: %"PRId64, l->frame_count_out);
226  drawtext(out, xpos, ypos, buffer, s->white);
227  xpos += strlen(buffer) * 8;
228  }
229  if (s->flags & MODE_PTS) {
230  snprintf(buffer, sizeof(buffer)-1, " | pts: %s", av_ts2str(l->current_pts_us));
231  drawtext(out, xpos, ypos, buffer, s->white);
232  xpos += strlen(buffer) * 8;
233  }
234  if (s->flags & MODE_TIME) {
235  snprintf(buffer, sizeof(buffer)-1, " | time: %s", av_ts2timestr(l->current_pts_us, &AV_TIME_BASE_Q));
236  drawtext(out, xpos, ypos, buffer, s->white);
237  xpos += strlen(buffer) * 8;
238  }
239 }
240 
241 static int create_frame(AVFilterContext *ctx, int64_t pts)
242 {
243  GraphMonitorContext *s = ctx->priv;
244  AVFilterLink *outlink = ctx->outputs[0];
245  AVFrame *out;
246  int xpos, ypos = 0;
247 
248  out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
249  if (!out)
250  return AVERROR(ENOMEM);
251 
252  clear_image(s, out, outlink);
253 
254  for (int i = 0; i < ctx->graph->nb_filters; i++) {
256  char buffer[1024] = { 0 };
257 
258  if (s->mode && !filter_have_queued(filter))
259  continue;
260 
261  xpos = 0;
262  drawtext(out, xpos, ypos, filter->name, s->white);
263  xpos += strlen(filter->name) * 8 + 10;
264  drawtext(out, xpos, ypos, filter->filter->name, s->white);
265  ypos += 10;
266  for (int j = 0; j < filter->nb_inputs; j++) {
267  AVFilterLink *l = filter->inputs[j];
268  size_t frames = ff_inlink_queued_frames(l);
269 
270  if (s->mode && !frames)
271  continue;
272 
273  xpos = 10;
274  snprintf(buffer, sizeof(buffer)-1, "in%d: ", j);
275  drawtext(out, xpos, ypos, buffer, s->white);
276  xpos += strlen(buffer) * 8;
277  drawtext(out, xpos, ypos, l->src->name, s->white);
278  xpos += strlen(l->src->name) * 8 + 10;
279  draw_items(ctx, out, xpos, ypos, l, frames);
280  ypos += 10;
281  }
282 
283  ypos += 2;
284  for (int j = 0; j < filter->nb_outputs; j++) {
285  AVFilterLink *l = filter->outputs[j];
286  size_t frames = ff_inlink_queued_frames(l);
287 
288  if (s->mode && !frames)
289  continue;
290 
291  xpos = 10;
292  snprintf(buffer, sizeof(buffer)-1, "out%d: ", j);
293  drawtext(out, xpos, ypos, buffer, s->white);
294  xpos += strlen(buffer) * 8;
295  drawtext(out, xpos, ypos, l->dst->name, s->white);
296  xpos += strlen(l->dst->name) * 8 + 10;
297  draw_items(ctx, out, xpos, ypos, l, frames);
298  ypos += 10;
299  }
300  ypos += 5;
301  }
302 
303  out->pts = pts;
304  s->pts = pts + 1;
305  return ff_filter_frame(outlink, out);
306 }
307 
309 {
310  GraphMonitorContext *s = ctx->priv;
311  AVFilterLink *inlink = ctx->inputs[0];
312  AVFilterLink *outlink = ctx->outputs[0];
313  int64_t pts = AV_NOPTS_VALUE;
314 
315  FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
316 
317  if (ff_inlink_queued_frames(inlink)) {
318  AVFrame *frame = NULL;
319  int ret;
320 
321  ret = ff_inlink_consume_frame(inlink, &frame);
322  if (ret < 0)
323  return ret;
324  if (ret > 0) {
325  pts = frame->pts;
326  av_frame_free(&frame);
327  }
328  }
329 
330  if (pts != AV_NOPTS_VALUE) {
331  pts = av_rescale_q(pts, inlink->time_base, outlink->time_base);
332  if (s->pts == AV_NOPTS_VALUE)
333  s->pts = pts;
334  s->next_pts = pts;
335  }
336 
337  if (s->pts < s->next_pts && ff_outlink_frame_wanted(outlink))
338  return create_frame(ctx, s->pts);
339 
340  FF_FILTER_FORWARD_STATUS(inlink, outlink);
341  FF_FILTER_FORWARD_WANTED(outlink, inlink);
342 
343  return FFERROR_NOT_READY;
344 }
345 
346 static int config_output(AVFilterLink *outlink)
347 {
348  GraphMonitorContext *s = outlink->src->priv;
349 
350  s->bg[3] = 255 * s->opacity;
351  s->white[0] = s->white[1] = s->white[2] = 255;
352  s->yellow[0] = s->yellow[1] = 255;
353  s->red[0] = 255;
354  s->green[1] = 255;
355  s->pts = AV_NOPTS_VALUE;
357  outlink->w = s->w;
358  outlink->h = s->h;
359  outlink->sample_aspect_ratio = (AVRational){1,1};
360  outlink->frame_rate = s->frame_rate;
361  outlink->time_base = av_inv_q(s->frame_rate);
362 
363  return 0;
364 }
365 
366 #if CONFIG_GRAPHMONITOR_FILTER
367 
368 AVFILTER_DEFINE_CLASS(graphmonitor);
369 
370 static const AVFilterPad graphmonitor_inputs[] = {
371  {
372  .name = "default",
373  .type = AVMEDIA_TYPE_VIDEO,
374  },
375  { NULL }
376 };
377 
378 static const AVFilterPad graphmonitor_outputs[] = {
379  {
380  .name = "default",
381  .type = AVMEDIA_TYPE_VIDEO,
382  .config_props = config_output,
383  },
384  { NULL }
385 };
386 
388  .name = "graphmonitor",
389  .description = NULL_IF_CONFIG_SMALL("Show various filtergraph stats."),
390  .priv_size = sizeof(GraphMonitorContext),
391  .priv_class = &graphmonitor_class,
393  .activate = activate,
394  .inputs = graphmonitor_inputs,
395  .outputs = graphmonitor_outputs,
396 };
397 
398 #endif // CONFIG_GRAPHMONITOR_FILTER
399 
400 #if CONFIG_AGRAPHMONITOR_FILTER
401 
402 #define agraphmonitor_options graphmonitor_options
403 AVFILTER_DEFINE_CLASS(agraphmonitor);
404 
405 static const AVFilterPad agraphmonitor_inputs[] = {
406  {
407  .name = "default",
408  .type = AVMEDIA_TYPE_AUDIO,
409  },
410  { NULL }
411 };
412 
413 static const AVFilterPad agraphmonitor_outputs[] = {
414  {
415  .name = "default",
416  .type = AVMEDIA_TYPE_VIDEO,
417  .config_props = config_output,
418  },
419  { NULL }
420 };
421 
423  .name = "agraphmonitor",
424  .description = NULL_IF_CONFIG_SMALL("Show various filtergraph stats."),
425  .priv_size = sizeof(GraphMonitorContext),
426  .priv_class = &agraphmonitor_class,
428  .activate = activate,
429  .inputs = agraphmonitor_inputs,
430  .outputs = agraphmonitor_outputs,
431 };
432 #endif // CONFIG_AGRAPHMONITOR_FILTER
AVFilterContext ** filters
Definition: avfilter.h:842
int ff_inlink_consume_frame(AVFilterLink *link, AVFrame **rframe)
Take a frame from the link&#39;s FIFO and update the link&#39;s stats.
Definition: avfilter.c:1476
#define NULL
Definition: coverity.c:32
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
AVOption.
Definition: opt.h:246
Main libavfilter public API header.
int num
Numerator.
Definition: rational.h:59
#define FFERROR_NOT_READY
Filters implementation helper functions.
Definition: filters.h:34
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
AVFilter ff_avf_agraphmonitor
static int ff_outlink_frame_wanted(AVFilterLink *link)
Test if a frame is wanted on an output link.
Definition: filters.h:172
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
struct AVFilterGraph * graph
filtergraph this filter belongs to
Definition: avfilter.h:355
const char * name
Pad name.
Definition: internal.h:60
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:346
char * name
name of this filter instance
Definition: avfilter.h:343
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1075
static void filter(int16_t *output, ptrdiff_t out_stride, int16_t *low, ptrdiff_t low_stride, int16_t *high, ptrdiff_t high_stride, int len, int clip)
Definition: cfhd.c:196
static char buffer[20]
Definition: seek.c:32
uint8_t
AVOptions.
timestamp utils, mostly useful for debugging/logging purposes
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:393
static AVFrame * frame
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
Definition: filters.h:199
A filter pad used for either input or output.
Definition: internal.h:54
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
int width
Definition: frame.h:358
const uint8_t avpriv_cga_font[2048]
Definition: xga_font_data.c:29
static const uint16_t mask[17]
Definition: lzw.c:38
#define AVERROR(e)
Definition: error.h:43
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
#define av_ts2timestr(ts, tb)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:76
unsigned nb_outputs
number of output pads
Definition: avfilter.h:351
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
void * priv
private data for use by the filter
Definition: avfilter.h:353
const char * av_get_sample_fmt_name(enum AVSampleFormat sample_fmt)
Return the name of sample_fmt, or NULL if sample_fmt is not recognized.
Definition: samplefmt.c:49
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
#define FF_FILTER_FORWARD_WANTED(outlink, inlink)
Forward the frame_wanted_out flag from an output link to an input link.
Definition: filters.h:254
unsigned nb_inputs
number of input pads
Definition: avfilter.h:347
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:470
static int config_output(AVFilterLink *outlink)
AVFormatContext * ctx
Definition: movenc.c:48
#define s(width, name)
Definition: cbs_vp9.c:257
int frames
Definition: movenc.c:65
static const AVFilterPad inputs[]
Definition: af_acontrast.c:193
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
AVFilter ff_vf_graphmonitor
#define AV_TIME_BASE_Q
Internal time base represented as fractional value.
Definition: avutil.h:260
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:331
static const AVOption graphmonitor_options[]
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:144
static int filter_have_queued(AVFilterContext *filter)
Rational number (pair of numerator and denominator).
Definition: rational.h:58
offset must point to AVRational
Definition: opt.h:236
const char * name
Filter name.
Definition: avfilter.h:148
unsigned nb_filters
Definition: avfilter.h:843
#define snprintf
Definition: snprintf.h:34
size_t ff_inlink_queued_frames(AVFilterLink *link)
Get the number of frames available on the link.
Definition: avfilter.c:1446
offset must point to two consecutive integers
Definition: opt.h:233
#define AV_RN32(p)
Definition: intreadwrite.h:364
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
#define FF_FILTER_FORWARD_STATUS(inlink, outlink)
Acknowledge the status on an input link and forward it to an output link.
Definition: filters.h:226
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
#define SIZE_SPECIFIER
Definition: internal.h:262
#define VF
static int activate(AVFilterContext *ctx)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:314
static av_always_inline AVRational av_inv_q(AVRational q)
Invert a rational.
Definition: rational.h:159
#define AV_WN32(p, v)
Definition: intreadwrite.h:376
static void clear_image(GraphMonitorContext *s, AVFrame *out, AVFilterLink *outlink)
int den
Denominator.
Definition: rational.h:60
static int query_formats(AVFilterContext *ctx)
#define av_ts2str(ts)
Convenience macro, the return value should be used only directly in function arguments but never stan...
Definition: timestamp.h:54
#define AVFILTER_DEFINE_CLASS(fname)
Definition: internal.h:314
#define OFFSET(x)
static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint8_t *color)
A list of supported formats for one end of a filter link.
Definition: formats.h:64
An instance of a filter.
Definition: avfilter.h:338
int height
Definition: frame.h:358
FILE * out
Definition: movenc.c:54
static void draw_items(AVFilterContext *ctx, AVFrame *out, int xpos, int ypos, AVFilterLink *l, size_t frames)
const char * av_get_pix_fmt_name(enum AVPixelFormat pix_fmt)
Return the short name for a pixel format, NULL in case pix_fmt is unknown.
Definition: pixdesc.c:2465
internal API functions
static int create_frame(AVFilterContext *ctx, int64_t pts)
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
mode
Use these values in ebur128_init (or&#39;ed).
Definition: ebur128.h:83
const AVFilter * filter
the AVFilter of which this is an instance
Definition: avfilter.h:341
#define AV_NOPTS_VALUE
Undefined timestamp value.
Definition: avutil.h:248
CGA/EGA/VGA ROM font data.
simple arithmetic expression evaluator