FFmpeg  4.2.1
avf_showwaves.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2012 Stefano Sabatini
3  *
4  * This file is part of FFmpeg.
5  *
6  * FFmpeg is free software; you can redistribute it and/or
7  * modify it under the terms of the GNU Lesser General Public
8  * License as published by the Free Software Foundation; either
9  * version 2.1 of the License, or (at your option) any later version.
10  *
11  * FFmpeg is distributed in the hope that it will be useful,
12  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14  * Lesser General Public License for more details.
15  *
16  * You should have received a copy of the GNU Lesser General Public
17  * License along with FFmpeg; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
19  */
20 
21 /**
22  * @file
23  * audio to video multimedia filter
24  */
25 
26 #include "libavutil/avassert.h"
27 #include "libavutil/avstring.h"
29 #include "libavutil/opt.h"
30 #include "libavutil/parseutils.h"
31 #include "avfilter.h"
32 #include "filters.h"
33 #include "formats.h"
34 #include "audio.h"
35 #include "video.h"
36 #include "internal.h"
37 
44 };
45 
52 };
53 
58 };
59 
60 struct frame_node {
62  struct frame_node *next;
63 };
64 
65 typedef struct ShowWavesContext {
66  const AVClass *class;
67  int w, h;
69  char *colors;
70  int buf_idx;
71  int16_t *buf_idy; /* y coordinate of previous sample for each channel */
73  int n;
74  int pixstep;
76  int mode; ///< ShowWavesMode
77  int scale; ///< ShowWavesScale
78  int draw_mode; ///< ShowWavesDrawMode
81 
82  int (*get_h)(int16_t sample, int height);
83  void (*draw_sample)(uint8_t *buf, int height, int linesize,
84  int16_t *prev_y, const uint8_t color[4], int h);
85 
86  /* single picture */
90  int64_t total_samples;
91  int64_t *sum; /* abs sum of the samples per channel */
93 
94 #define OFFSET(x) offsetof(ShowWavesContext, x)
95 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
96 
97 static const AVOption showwaves_options[] = {
98  { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
99  { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
100  { "mode", "select display mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=MODE_POINT}, 0, MODE_NB-1, FLAGS, "mode"},
101  { "point", "draw a point for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_POINT}, .flags=FLAGS, .unit="mode"},
102  { "line", "draw a line for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_LINE}, .flags=FLAGS, .unit="mode"},
103  { "p2p", "draw a line between samples", 0, AV_OPT_TYPE_CONST, {.i64=MODE_P2P}, .flags=FLAGS, .unit="mode"},
104  { "cline", "draw a centered line for each sample", 0, AV_OPT_TYPE_CONST, {.i64=MODE_CENTERED_LINE}, .flags=FLAGS, .unit="mode"},
105  { "n", "set how many samples to show in the same point", OFFSET(n), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, FLAGS },
106  { "rate", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, FLAGS },
107  { "r", "set video rate", OFFSET(rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, FLAGS },
108  { "split_channels", "draw channels separately", OFFSET(split_channels), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
109  { "colors", "set channels colors", OFFSET(colors), AV_OPT_TYPE_STRING, {.str = "red|green|blue|yellow|orange|lime|pink|magenta|brown" }, 0, 0, FLAGS },
110  { "scale", "set amplitude scale", OFFSET(scale), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, SCALE_NB-1, FLAGS, .unit="scale" },
111  { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_LIN}, .flags=FLAGS, .unit="scale"},
112  { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_LOG}, .flags=FLAGS, .unit="scale"},
113  { "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_SQRT}, .flags=FLAGS, .unit="scale"},
114  { "cbrt", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_CBRT}, .flags=FLAGS, .unit="scale"},
115  { "draw", "set draw mode", OFFSET(draw_mode), AV_OPT_TYPE_INT, {.i64 = DRAW_SCALE}, 0, DRAW_NB-1, FLAGS, .unit="draw" },
116  { "scale", "scale pixel values for each drawn sample", 0, AV_OPT_TYPE_CONST, {.i64=DRAW_SCALE}, .flags=FLAGS, .unit="draw"},
117  { "full", "draw every pixel for sample directly", 0, AV_OPT_TYPE_CONST, {.i64=DRAW_FULL}, .flags=FLAGS, .unit="draw"},
118  { NULL }
119 };
120 
121 AVFILTER_DEFINE_CLASS(showwaves);
122 
124 {
125  ShowWavesContext *showwaves = ctx->priv;
126 
127  av_frame_free(&showwaves->outpicref);
128  av_freep(&showwaves->buf_idy);
129  av_freep(&showwaves->fg);
130 
131  if (showwaves->single_pic) {
132  struct frame_node *node = showwaves->audio_frames;
133  while (node) {
134  struct frame_node *tmp = node;
135 
136  node = node->next;
137  av_frame_free(&tmp->frame);
138  av_freep(&tmp);
139  }
140  av_freep(&showwaves->sum);
141  showwaves->last_frame = NULL;
142  }
143 }
144 
146 {
149  AVFilterLink *inlink = ctx->inputs[0];
150  AVFilterLink *outlink = ctx->outputs[0];
153  int ret;
154 
155  /* set input audio formats */
156  formats = ff_make_format_list(sample_fmts);
157  if ((ret = ff_formats_ref(formats, &inlink->out_formats)) < 0)
158  return ret;
159 
160  layouts = ff_all_channel_layouts();
161  if ((ret = ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts)) < 0)
162  return ret;
163 
164  formats = ff_all_samplerates();
165  if ((ret = ff_formats_ref(formats, &inlink->out_samplerates)) < 0)
166  return ret;
167 
168  /* set output video format */
169  formats = ff_make_format_list(pix_fmts);
170  if ((ret = ff_formats_ref(formats, &outlink->in_formats)) < 0)
171  return ret;
172 
173  return 0;
174 }
175 
176 static int get_lin_h(int16_t sample, int height)
177 {
178  return height/2 - av_rescale(sample, height/2, INT16_MAX);
179 }
180 
181 static int get_lin_h2(int16_t sample, int height)
182 {
183  return av_rescale(FFABS(sample), height, INT16_MAX);
184 }
185 
186 static int get_log_h(int16_t sample, int height)
187 {
188  return height/2 - FFSIGN(sample) * (log10(1 + FFABS(sample)) * (height/2) / log10(1 + INT16_MAX));
189 }
190 
191 static int get_log_h2(int16_t sample, int height)
192 {
193  return log10(1 + FFABS(sample)) * height / log10(1 + INT16_MAX);
194 }
195 
196 static int get_sqrt_h(int16_t sample, int height)
197 {
198  return height/2 - FFSIGN(sample) * (sqrt(FFABS(sample)) * (height/2) / sqrt(INT16_MAX));
199 }
200 
201 static int get_sqrt_h2(int16_t sample, int height)
202 {
203  return sqrt(FFABS(sample)) * height / sqrt(INT16_MAX);
204 }
205 
206 static int get_cbrt_h(int16_t sample, int height)
207 {
208  return height/2 - FFSIGN(sample) * (cbrt(FFABS(sample)) * (height/2) / cbrt(INT16_MAX));
209 }
210 
211 static int get_cbrt_h2(int16_t sample, int height)
212 {
213  return cbrt(FFABS(sample)) * height / cbrt(INT16_MAX);
214 }
215 
216 static void draw_sample_point_rgba_scale(uint8_t *buf, int height, int linesize,
217  int16_t *prev_y,
218  const uint8_t color[4], int h)
219 {
220  if (h >= 0 && h < height) {
221  buf[h * linesize + 0] += color[0];
222  buf[h * linesize + 1] += color[1];
223  buf[h * linesize + 2] += color[2];
224  buf[h * linesize + 3] += color[3];
225  }
226 }
227 
228 static void draw_sample_point_rgba_full(uint8_t *buf, int height, int linesize,
229  int16_t *prev_y,
230  const uint8_t color[4], int h)
231 {
232  if (h >= 0 && h < height) {
233  buf[h * linesize + 0] = color[0];
234  buf[h * linesize + 1] = color[1];
235  buf[h * linesize + 2] = color[2];
236  buf[h * linesize + 3] = color[3];
237  }
238 }
239 
240 static void draw_sample_line_rgba_scale(uint8_t *buf, int height, int linesize,
241  int16_t *prev_y,
242  const uint8_t color[4], int h)
243 {
244  int k;
245  int start = height/2;
246  int end = av_clip(h, 0, height-1);
247  if (start > end)
248  FFSWAP(int16_t, start, end);
249  for (k = start; k < end; k++) {
250  buf[k * linesize + 0] += color[0];
251  buf[k * linesize + 1] += color[1];
252  buf[k * linesize + 2] += color[2];
253  buf[k * linesize + 3] += color[3];
254  }
255 }
256 
257 static void draw_sample_line_rgba_full(uint8_t *buf, int height, int linesize,
258  int16_t *prev_y,
259  const uint8_t color[4], int h)
260 {
261  int k;
262  int start = height/2;
263  int end = av_clip(h, 0, height-1);
264  if (start > end)
265  FFSWAP(int16_t, start, end);
266  for (k = start; k < end; k++) {
267  buf[k * linesize + 0] = color[0];
268  buf[k * linesize + 1] = color[1];
269  buf[k * linesize + 2] = color[2];
270  buf[k * linesize + 3] = color[3];
271  }
272 }
273 
274 static void draw_sample_p2p_rgba_scale(uint8_t *buf, int height, int linesize,
275  int16_t *prev_y,
276  const uint8_t color[4], int h)
277 {
278  int k;
279  if (h >= 0 && h < height) {
280  buf[h * linesize + 0] += color[0];
281  buf[h * linesize + 1] += color[1];
282  buf[h * linesize + 2] += color[2];
283  buf[h * linesize + 3] += color[3];
284  if (*prev_y && h != *prev_y) {
285  int start = *prev_y;
286  int end = av_clip(h, 0, height-1);
287  if (start > end)
288  FFSWAP(int16_t, start, end);
289  for (k = start + 1; k < end; k++) {
290  buf[k * linesize + 0] += color[0];
291  buf[k * linesize + 1] += color[1];
292  buf[k * linesize + 2] += color[2];
293  buf[k * linesize + 3] += color[3];
294  }
295  }
296  }
297  *prev_y = h;
298 }
299 
300 static void draw_sample_p2p_rgba_full(uint8_t *buf, int height, int linesize,
301  int16_t *prev_y,
302  const uint8_t color[4], int h)
303 {
304  int k;
305  if (h >= 0 && h < height) {
306  buf[h * linesize + 0] = color[0];
307  buf[h * linesize + 1] = color[1];
308  buf[h * linesize + 2] = color[2];
309  buf[h * linesize + 3] = color[3];
310  if (*prev_y && h != *prev_y) {
311  int start = *prev_y;
312  int end = av_clip(h, 0, height-1);
313  if (start > end)
314  FFSWAP(int16_t, start, end);
315  for (k = start + 1; k < end; k++) {
316  buf[k * linesize + 0] = color[0];
317  buf[k * linesize + 1] = color[1];
318  buf[k * linesize + 2] = color[2];
319  buf[k * linesize + 3] = color[3];
320  }
321  }
322  }
323  *prev_y = h;
324 }
325 
326 static void draw_sample_cline_rgba_scale(uint8_t *buf, int height, int linesize,
327  int16_t *prev_y,
328  const uint8_t color[4], int h)
329 {
330  int k;
331  const int start = (height - h) / 2;
332  const int end = start + h;
333  for (k = start; k < end; k++) {
334  buf[k * linesize + 0] += color[0];
335  buf[k * linesize + 1] += color[1];
336  buf[k * linesize + 2] += color[2];
337  buf[k * linesize + 3] += color[3];
338  }
339 }
340  static void draw_sample_cline_rgba_full(uint8_t *buf, int height, int linesize,
341  int16_t *prev_y,
342  const uint8_t color[4], int h)
343 {
344  int k;
345  const int start = (height - h) / 2;
346  const int end = start + h;
347  for (k = start; k < end; k++) {
348  buf[k * linesize + 0] = color[0];
349  buf[k * linesize + 1] = color[1];
350  buf[k * linesize + 2] = color[2];
351  buf[k * linesize + 3] = color[3];
352  }
353 }
354 
355 static void draw_sample_point_gray(uint8_t *buf, int height, int linesize,
356  int16_t *prev_y,
357  const uint8_t color[4], int h)
358 {
359  if (h >= 0 && h < height)
360  buf[h * linesize] += color[0];
361 }
362 
363 static void draw_sample_line_gray(uint8_t *buf, int height, int linesize,
364  int16_t *prev_y,
365  const uint8_t color[4], int h)
366 {
367  int k;
368  int start = height/2;
369  int end = av_clip(h, 0, height-1);
370  if (start > end)
371  FFSWAP(int16_t, start, end);
372  for (k = start; k < end; k++)
373  buf[k * linesize] += color[0];
374 }
375 
376 static void draw_sample_p2p_gray(uint8_t *buf, int height, int linesize,
377  int16_t *prev_y,
378  const uint8_t color[4], int h)
379 {
380  int k;
381  if (h >= 0 && h < height) {
382  buf[h * linesize] += color[0];
383  if (*prev_y && h != *prev_y) {
384  int start = *prev_y;
385  int end = av_clip(h, 0, height-1);
386  if (start > end)
387  FFSWAP(int16_t, start, end);
388  for (k = start + 1; k < end; k++)
389  buf[k * linesize] += color[0];
390  }
391  }
392  *prev_y = h;
393 }
394 
395 static void draw_sample_cline_gray(uint8_t *buf, int height, int linesize,
396  int16_t *prev_y,
397  const uint8_t color[4], int h)
398 {
399  int k;
400  const int start = (height - h) / 2;
401  const int end = start + h;
402  for (k = start; k < end; k++)
403  buf[k * linesize] += color[0];
404 }
405 
406 static int config_output(AVFilterLink *outlink)
407 {
408  AVFilterContext *ctx = outlink->src;
409  AVFilterLink *inlink = ctx->inputs[0];
410  ShowWavesContext *showwaves = ctx->priv;
411  int nb_channels = inlink->channels;
412  char *colors, *saveptr = NULL;
413  uint8_t x;
414  int ch;
415 
416  if (showwaves->single_pic)
417  showwaves->n = 1;
418 
419  if (!showwaves->n)
420  showwaves->n = FFMAX(1, av_rescale_q(inlink->sample_rate, av_make_q(1, showwaves->w), showwaves->rate));
421 
422  showwaves->buf_idx = 0;
423  if (!(showwaves->buf_idy = av_mallocz_array(nb_channels, sizeof(*showwaves->buf_idy)))) {
424  av_log(ctx, AV_LOG_ERROR, "Could not allocate showwaves buffer\n");
425  return AVERROR(ENOMEM);
426  }
427  outlink->w = showwaves->w;
428  outlink->h = showwaves->h;
429  outlink->sample_aspect_ratio = (AVRational){1,1};
430 
431  outlink->frame_rate = av_div_q((AVRational){inlink->sample_rate,showwaves->n},
432  (AVRational){showwaves->w,1});
433 
434  av_log(ctx, AV_LOG_VERBOSE, "s:%dx%d r:%f n:%d\n",
435  showwaves->w, showwaves->h, av_q2d(outlink->frame_rate), showwaves->n);
436 
437  switch (outlink->format) {
438  case AV_PIX_FMT_GRAY8:
439  switch (showwaves->mode) {
440  case MODE_POINT: showwaves->draw_sample = draw_sample_point_gray; break;
441  case MODE_LINE: showwaves->draw_sample = draw_sample_line_gray; break;
442  case MODE_P2P: showwaves->draw_sample = draw_sample_p2p_gray; break;
443  case MODE_CENTERED_LINE: showwaves->draw_sample = draw_sample_cline_gray; break;
444  default:
445  return AVERROR_BUG;
446  }
447  showwaves->pixstep = 1;
448  break;
449  case AV_PIX_FMT_RGBA:
450  switch (showwaves->mode) {
455  default:
456  return AVERROR_BUG;
457  }
458  showwaves->pixstep = 4;
459  break;
460  }
461 
462  switch (showwaves->scale) {
463  case SCALE_LIN:
464  switch (showwaves->mode) {
465  case MODE_POINT:
466  case MODE_LINE:
467  case MODE_P2P: showwaves->get_h = get_lin_h; break;
468  case MODE_CENTERED_LINE: showwaves->get_h = get_lin_h2; break;
469  default:
470  return AVERROR_BUG;
471  }
472  break;
473  case SCALE_LOG:
474  switch (showwaves->mode) {
475  case MODE_POINT:
476  case MODE_LINE:
477  case MODE_P2P: showwaves->get_h = get_log_h; break;
478  case MODE_CENTERED_LINE: showwaves->get_h = get_log_h2; break;
479  default:
480  return AVERROR_BUG;
481  }
482  break;
483  case SCALE_SQRT:
484  switch (showwaves->mode) {
485  case MODE_POINT:
486  case MODE_LINE:
487  case MODE_P2P: showwaves->get_h = get_sqrt_h; break;
488  case MODE_CENTERED_LINE: showwaves->get_h = get_sqrt_h2; break;
489  default:
490  return AVERROR_BUG;
491  }
492  break;
493  case SCALE_CBRT:
494  switch (showwaves->mode) {
495  case MODE_POINT:
496  case MODE_LINE:
497  case MODE_P2P: showwaves->get_h = get_cbrt_h; break;
498  case MODE_CENTERED_LINE: showwaves->get_h = get_cbrt_h2; break;
499  default:
500  return AVERROR_BUG;
501  }
502  break;
503  }
504 
505  showwaves->fg = av_malloc_array(nb_channels, 4 * sizeof(*showwaves->fg));
506  if (!showwaves->fg)
507  return AVERROR(ENOMEM);
508 
509  colors = av_strdup(showwaves->colors);
510  if (!colors)
511  return AVERROR(ENOMEM);
512 
513  if (showwaves->draw_mode == DRAW_SCALE) {
514  /* multiplication factor, pre-computed to avoid in-loop divisions */
515  x = 255 / ((showwaves->split_channels ? 1 : nb_channels) * showwaves->n);
516  } else {
517  x = 255;
518  }
519  if (outlink->format == AV_PIX_FMT_RGBA) {
520  uint8_t fg[4] = { 0xff, 0xff, 0xff, 0xff };
521 
522  for (ch = 0; ch < nb_channels; ch++) {
523  char *color;
524 
525  color = av_strtok(ch == 0 ? colors : NULL, " |", &saveptr);
526  if (color)
527  av_parse_color(fg, color, -1, ctx);
528  showwaves->fg[4*ch + 0] = fg[0] * x / 255.;
529  showwaves->fg[4*ch + 1] = fg[1] * x / 255.;
530  showwaves->fg[4*ch + 2] = fg[2] * x / 255.;
531  showwaves->fg[4*ch + 3] = fg[3] * x / 255.;
532  }
533  } else {
534  for (ch = 0; ch < nb_channels; ch++)
535  showwaves->fg[4 * ch + 0] = x;
536  }
537  av_free(colors);
538 
539  return 0;
540 }
541 
542 inline static int push_frame(AVFilterLink *outlink)
543 {
544  AVFilterContext *ctx = outlink->src;
545  AVFilterLink *inlink = ctx->inputs[0];
546  ShowWavesContext *showwaves = outlink->src->priv;
547  int nb_channels = inlink->channels;
548  int ret, i;
549 
550  ret = ff_filter_frame(outlink, showwaves->outpicref);
551  showwaves->outpicref = NULL;
552  showwaves->buf_idx = 0;
553  for (i = 0; i < nb_channels; i++)
554  showwaves->buf_idy[i] = 0;
555  return ret;
556 }
557 
558 static int push_single_pic(AVFilterLink *outlink)
559 {
560  AVFilterContext *ctx = outlink->src;
561  AVFilterLink *inlink = ctx->inputs[0];
562  ShowWavesContext *showwaves = ctx->priv;
563  int64_t n = 0, column_max_samples = showwaves->total_samples / outlink->w;
564  int64_t remaining_samples = showwaves->total_samples - (column_max_samples * outlink->w);
565  int64_t last_column_samples = column_max_samples + remaining_samples;
566  AVFrame *out = showwaves->outpicref;
567  struct frame_node *node;
568  const int nb_channels = inlink->channels;
569  const int ch_height = showwaves->split_channels ? outlink->h / nb_channels : outlink->h;
570  const int linesize = out->linesize[0];
571  const int pixstep = showwaves->pixstep;
572  int col = 0;
573  int64_t *sum = showwaves->sum;
574 
575  if (column_max_samples == 0) {
576  av_log(ctx, AV_LOG_ERROR, "Too few samples\n");
577  return AVERROR(EINVAL);
578  }
579 
580  av_log(ctx, AV_LOG_DEBUG, "Create frame averaging %"PRId64" samples per column\n", column_max_samples);
581 
582  memset(sum, 0, nb_channels);
583 
584  for (node = showwaves->audio_frames; node; node = node->next) {
585  int i;
586  const AVFrame *frame = node->frame;
587  const int16_t *p = (const int16_t *)frame->data[0];
588 
589  for (i = 0; i < frame->nb_samples; i++) {
590  int64_t max_samples = col == outlink->w - 1 ? last_column_samples: column_max_samples;
591  int ch;
592 
593  for (ch = 0; ch < nb_channels; ch++)
594  sum[ch] += abs(p[ch + i*nb_channels]) << 1;
595  n++;
596  if (n == max_samples) {
597  for (ch = 0; ch < nb_channels; ch++) {
598  int16_t sample = sum[ch] / max_samples;
599  uint8_t *buf = out->data[0] + col * pixstep;
600  int h;
601 
602  if (showwaves->split_channels)
603  buf += ch*ch_height*linesize;
604  av_assert0(col < outlink->w);
605  h = showwaves->get_h(sample, ch_height);
606  showwaves->draw_sample(buf, ch_height, linesize, &showwaves->buf_idy[ch], &showwaves->fg[ch * 4], h);
607  sum[ch] = 0;
608  }
609  col++;
610  n = 0;
611  }
612  }
613  }
614 
615  return push_frame(outlink);
616 }
617 
618 
619 static int request_frame(AVFilterLink *outlink)
620 {
621  ShowWavesContext *showwaves = outlink->src->priv;
622  AVFilterLink *inlink = outlink->src->inputs[0];
623  int ret;
624 
625  ret = ff_request_frame(inlink);
626  if (ret == AVERROR_EOF && showwaves->outpicref) {
627  if (showwaves->single_pic)
628  push_single_pic(outlink);
629  else
630  push_frame(outlink);
631  }
632 
633  return ret;
634 }
635 
636 static int alloc_out_frame(ShowWavesContext *showwaves, const int16_t *p,
637  const AVFilterLink *inlink, AVFilterLink *outlink,
638  const AVFrame *in)
639 {
640  if (!showwaves->outpicref) {
641  int j;
642  AVFrame *out = showwaves->outpicref =
643  ff_get_video_buffer(outlink, outlink->w, outlink->h);
644  if (!out)
645  return AVERROR(ENOMEM);
646  out->width = outlink->w;
647  out->height = outlink->h;
648  out->pts = in->pts + av_rescale_q((p - (int16_t *)in->data[0]) / inlink->channels,
649  av_make_q(1, inlink->sample_rate),
650  outlink->time_base);
651  for (j = 0; j < outlink->h; j++)
652  memset(out->data[0] + j*out->linesize[0], 0, outlink->w * showwaves->pixstep);
653  }
654  return 0;
655 }
656 
658 {
659  ShowWavesContext *showwaves = ctx->priv;
660 
661  if (!strcmp(ctx->filter->name, "showwavespic")) {
662  showwaves->single_pic = 1;
663  showwaves->mode = MODE_CENTERED_LINE;
664  }
665 
666  return 0;
667 }
668 
669 #if CONFIG_SHOWWAVES_FILTER
670 
671 static int showwaves_filter_frame(AVFilterLink *inlink, AVFrame *insamples)
672 {
673  AVFilterContext *ctx = inlink->dst;
674  AVFilterLink *outlink = ctx->outputs[0];
675  ShowWavesContext *showwaves = ctx->priv;
676  const int nb_samples = insamples->nb_samples;
677  AVFrame *outpicref = showwaves->outpicref;
678  int16_t *p = (int16_t *)insamples->data[0];
679  int nb_channels = inlink->channels;
680  int i, j, ret = 0;
681  const int pixstep = showwaves->pixstep;
682  const int n = showwaves->n;
683  const int ch_height = showwaves->split_channels ? outlink->h / nb_channels : outlink->h;
684 
685  /* draw data in the buffer */
686  for (i = 0; i < nb_samples; i++) {
687 
688  ret = alloc_out_frame(showwaves, p, inlink, outlink, insamples);
689  if (ret < 0)
690  goto end;
691  outpicref = showwaves->outpicref;
692 
693  for (j = 0; j < nb_channels; j++) {
694  uint8_t *buf = outpicref->data[0] + showwaves->buf_idx * pixstep;
695  const int linesize = outpicref->linesize[0];
696  int h;
697 
698  if (showwaves->split_channels)
699  buf += j*ch_height*linesize;
700  h = showwaves->get_h(*p++, ch_height);
701  showwaves->draw_sample(buf, ch_height, linesize,
702  &showwaves->buf_idy[j], &showwaves->fg[j * 4], h);
703  }
704 
705  showwaves->sample_count_mod++;
706  if (showwaves->sample_count_mod == n) {
707  showwaves->sample_count_mod = 0;
708  showwaves->buf_idx++;
709  }
710  if (showwaves->buf_idx == showwaves->w ||
711  (ff_outlink_get_status(inlink) && i == nb_samples - 1))
712  if ((ret = push_frame(outlink)) < 0)
713  break;
714  outpicref = showwaves->outpicref;
715  }
716 
717 end:
718  av_frame_free(&insamples);
719  return ret;
720 }
721 
722 static int activate(AVFilterContext *ctx)
723 {
724  AVFilterLink *inlink = ctx->inputs[0];
725  AVFilterLink *outlink = ctx->outputs[0];
726  ShowWavesContext *showwaves = ctx->priv;
727  AVFrame *in;
728  const int nb_samples = showwaves->n * outlink->w;
729  int ret;
730 
731  FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
732 
733  ret = ff_inlink_consume_samples(inlink, nb_samples, nb_samples, &in);
734  if (ret < 0)
735  return ret;
736  if (ret > 0)
737  return showwaves_filter_frame(inlink, in);
738 
739  FF_FILTER_FORWARD_STATUS(inlink, outlink);
740  FF_FILTER_FORWARD_WANTED(outlink, inlink);
741 
742  return FFERROR_NOT_READY;
743 }
744 
745 static const AVFilterPad showwaves_inputs[] = {
746  {
747  .name = "default",
748  .type = AVMEDIA_TYPE_AUDIO,
749  },
750  { NULL }
751 };
752 
753 static const AVFilterPad showwaves_outputs[] = {
754  {
755  .name = "default",
756  .type = AVMEDIA_TYPE_VIDEO,
757  .config_props = config_output,
758  },
759  { NULL }
760 };
761 
763  .name = "showwaves",
764  .description = NULL_IF_CONFIG_SMALL("Convert input audio to a video output."),
765  .init = init,
766  .uninit = uninit,
767  .query_formats = query_formats,
768  .priv_size = sizeof(ShowWavesContext),
769  .inputs = showwaves_inputs,
770  .activate = activate,
771  .outputs = showwaves_outputs,
772  .priv_class = &showwaves_class,
773 };
774 
775 #endif // CONFIG_SHOWWAVES_FILTER
776 
777 #if CONFIG_SHOWWAVESPIC_FILTER
778 
779 #define OFFSET(x) offsetof(ShowWavesContext, x)
780 #define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM
781 
782 static const AVOption showwavespic_options[] = {
783  { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
784  { "s", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "600x240"}, 0, 0, FLAGS },
785  { "split_channels", "draw channels separately", OFFSET(split_channels), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
786  { "colors", "set channels colors", OFFSET(colors), AV_OPT_TYPE_STRING, {.str = "red|green|blue|yellow|orange|lime|pink|magenta|brown" }, 0, 0, FLAGS },
787  { "scale", "set amplitude scale", OFFSET(scale), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, SCALE_NB-1, FLAGS, .unit="scale" },
788  { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_LIN}, .flags=FLAGS, .unit="scale"},
789  { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_LOG}, .flags=FLAGS, .unit="scale"},
790  { "sqrt", "square root", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_SQRT}, .flags=FLAGS, .unit="scale"},
791  { "cbrt", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64=SCALE_CBRT}, .flags=FLAGS, .unit="scale"},
792  { "draw", "set draw mode", OFFSET(draw_mode), AV_OPT_TYPE_INT, {.i64 = DRAW_SCALE}, 0, DRAW_NB-1, FLAGS, .unit="draw" },
793  { "scale", "scale pixel values for each drawn sample", 0, AV_OPT_TYPE_CONST, {.i64=DRAW_SCALE}, .flags=FLAGS, .unit="draw"},
794  { "full", "draw every pixel for sample directly", 0, AV_OPT_TYPE_CONST, {.i64=DRAW_FULL}, .flags=FLAGS, .unit="draw"},
795  { NULL }
796 };
797 
798 AVFILTER_DEFINE_CLASS(showwavespic);
799 
800 static int showwavespic_config_input(AVFilterLink *inlink)
801 {
802  AVFilterContext *ctx = inlink->dst;
803  ShowWavesContext *showwaves = ctx->priv;
804 
805  if (showwaves->single_pic) {
806  showwaves->sum = av_mallocz_array(inlink->channels, sizeof(*showwaves->sum));
807  if (!showwaves->sum)
808  return AVERROR(ENOMEM);
809  }
810 
811  return 0;
812 }
813 
814 static int showwavespic_filter_frame(AVFilterLink *inlink, AVFrame *insamples)
815 {
816  AVFilterContext *ctx = inlink->dst;
817  AVFilterLink *outlink = ctx->outputs[0];
818  ShowWavesContext *showwaves = ctx->priv;
819  int16_t *p = (int16_t *)insamples->data[0];
820  int ret = 0;
821 
822  if (showwaves->single_pic) {
823  struct frame_node *f;
824 
825  ret = alloc_out_frame(showwaves, p, inlink, outlink, insamples);
826  if (ret < 0)
827  goto end;
828 
829  /* queue the audio frame */
830  f = av_malloc(sizeof(*f));
831  if (!f) {
832  ret = AVERROR(ENOMEM);
833  goto end;
834  }
835  f->frame = insamples;
836  f->next = NULL;
837  if (!showwaves->last_frame) {
838  showwaves->audio_frames =
839  showwaves->last_frame = f;
840  } else {
841  showwaves->last_frame->next = f;
842  showwaves->last_frame = f;
843  }
844  showwaves->total_samples += insamples->nb_samples;
845 
846  return 0;
847  }
848 
849 end:
850  av_frame_free(&insamples);
851  return ret;
852 }
853 
854 static const AVFilterPad showwavespic_inputs[] = {
855  {
856  .name = "default",
857  .type = AVMEDIA_TYPE_AUDIO,
858  .config_props = showwavespic_config_input,
859  .filter_frame = showwavespic_filter_frame,
860  },
861  { NULL }
862 };
863 
864 static const AVFilterPad showwavespic_outputs[] = {
865  {
866  .name = "default",
867  .type = AVMEDIA_TYPE_VIDEO,
868  .config_props = config_output,
869  .request_frame = request_frame,
870  },
871  { NULL }
872 };
873 
875  .name = "showwavespic",
876  .description = NULL_IF_CONFIG_SMALL("Convert input audio to a video output single picture."),
877  .init = init,
878  .uninit = uninit,
879  .query_formats = query_formats,
880  .priv_size = sizeof(ShowWavesContext),
881  .inputs = showwavespic_inputs,
882  .outputs = showwavespic_outputs,
883  .priv_class = &showwavespic_class,
884 };
885 
886 #endif // CONFIG_SHOWWAVESPIC_FILTER
ShowWavesMode
Definition: avf_showwaves.c:38
#define NULL
Definition: coverity.c:32
AVRational av_div_q(AVRational b, AVRational c)
Divide one rational by another.
Definition: rational.c:88
This structure describes decoded (raw) audio or video data.
Definition: frame.h:295
int(* get_h)(int16_t sample, int height)
Definition: avf_showwaves.c:82
AVOption.
Definition: opt.h:246
static int alloc_out_frame(ShowWavesContext *showwaves, const int16_t *p, const AVFilterLink *inlink, AVFilterLink *outlink, const AVFrame *in)
int16_t * buf_idy
Definition: avf_showwaves.c:71
#define OFFSET(x)
Definition: avf_showwaves.c:94
Main libavfilter public API header.
ShowWavesScale
Definition: avf_showwaves.c:46
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(INT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(INT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&HAVE_MMX) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
#define FFERROR_NOT_READY
Filters implementation helper functions.
Definition: filters.h:34
static void draw_sample_line_gray(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
#define sample
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
const char * name
Pad name.
Definition: internal.h:60
AVFilterLink ** inputs
array of pointers to input links
Definition: avfilter.h:346
#define av_assert0(cond)
assert() equivalent, that is always enabled.
Definition: avassert.h:37
int ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **ref)
Add *ref as a new reference to f.
Definition: formats.c:435
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1080
uint8_t
#define av_cold
Definition: attributes.h:82
#define av_malloc(s)
AVOptions.
static void draw_sample_p2p_rgba_scale(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
Definition: log.c:92
#define f(width, name)
Definition: cbs_vp9.c:255
static av_cold int end(AVCodecContext *avctx)
Definition: avrndec.c:90
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:388
static av_cold void uninit(AVFilterContext *ctx)
#define height
static double av_q2d(AVRational a)
Convert an AVRational to a double.
Definition: rational.h:104
#define AVERROR_EOF
End of file.
Definition: error.h:55
#define AV_LOG_VERBOSE
Detailed information.
Definition: log.h:192
#define av_log(a,...)
int mode
ShowWavesMode.
Definition: avf_showwaves.c:76
#define FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink)
Forward the status on an output link to an input link.
Definition: filters.h:199
static void draw_sample_cline_rgba_scale(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen, void *log_ctx)
Put the RGBA values that correspond to color_string in rgba_color.
Definition: parseutils.c:354
A filter pad used for either input or output.
Definition: internal.h:54
int64_t total_samples
Definition: avf_showwaves.c:90
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
AVRational rate
Definition: avf_showwaves.c:68
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:259
int width
Definition: frame.h:353
#define AV_LOG_ERROR
Something went wrong and cannot losslessly be recovered.
Definition: log.h:176
static void draw_sample_cline_rgba_full(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
struct frame_node * last_frame
Definition: avf_showwaves.c:89
#define AVERROR(e)
Definition: error.h:43
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:202
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification. ...
Definition: internal.h:186
void * priv
private data for use by the filter
Definition: avfilter.h:353
#define AV_LOG_DEBUG
Stuff which is only useful for libav* developers.
Definition: log.h:197
#define cbrt
Definition: tablegen.h:35
simple assert() macros that are a bit more flexible than ISO C assert().
AVFilter ff_avf_showwavespic
#define FFMAX(a, b)
Definition: common.h:94
static int get_lin_h2(int16_t sample, int height)
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
ShowWavesDrawMode
Definition: avf_showwaves.c:54
static int get_cbrt_h(int16_t sample, int height)
AVFrame * outpicref
Definition: avf_showwaves.c:72
#define FF_FILTER_FORWARD_WANTED(outlink, inlink)
Forward the frame_wanted_out flag from an output link to an input link.
Definition: filters.h:254
audio channel layout utility functions
int64_t av_rescale(int64_t a, int64_t b, int64_t c)
Rescale a 64-bit integer with rounding to nearest.
Definition: mathematics.c:129
struct frame_node * audio_frames
Definition: avf_showwaves.c:88
static void draw_sample_line_rgba_full(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:440
static int push_single_pic(AVFilterLink *outlink)
#define FFSIGN(a)
Definition: common.h:73
uint8_t w
Definition: llviddspenc.c:38
AVFormatContext * ctx
Definition: movenc.c:48
static int activate(AVFilterContext *ctx)
Definition: af_adeclick.c:609
#define FFABS(a)
Absolute value, Note, INT_MIN / INT64_MIN result in undefined behavior as they are not representable ...
Definition: common.h:72
int n
Definition: avisynth_c.h:760
static const AVFilterPad inputs[]
Definition: af_acontrast.c:193
if(ret< 0)
Definition: vf_mcdeint.c:279
AVFilterChannelLayouts * ff_all_channel_layouts(void)
Construct an empty AVFilterChannelLayouts/AVFilterFormats struct – representing any channel layout (...
Definition: formats.c:401
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
A list of supported channel layouts.
Definition: formats.h:85
struct frame_node * next
Definition: avf_showwaves.c:62
int scale
ShowWavesScale.
Definition: avf_showwaves.c:77
static void draw_sample_point_rgba_scale(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:251
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
static int config_output(AVFilterLink *outlink)
typedef void(RENAME(mix_any_func_type))
static int push_frame(AVFilterLink *outlink)
static int get_cbrt_h2(int16_t sample, int height)
int linesize[AV_NUM_DATA_POINTERS]
For video, size in bytes of each picture line.
Definition: frame.h:326
#define abs(x)
Definition: cuda_runtime.h:35
static int request_frame(AVFilterLink *outlink)
static AVRational av_make_q(int num, int den)
Create an AVRational.
Definition: rational.h:71
int ff_inlink_consume_samples(AVFilterLink *link, unsigned min, unsigned max, AVFrame **rframe)
Take samples from the link&#39;s FIFO and update the link&#39;s stats.
Definition: avfilter.c:1500
static int get_log_h2(int16_t sample, int height)
int draw_mode
ShowWavesDrawMode.
Definition: avf_showwaves.c:78
void * buf
Definition: avisynth_c.h:766
static int get_log_h(int16_t sample, int height)
#define AVERROR_BUG
Internal bug, also see AVERROR_BUG2.
Definition: error.h:50
AVFILTER_DEFINE_CLASS(showwaves)
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Describe the class of an AVClass context structure.
Definition: log.h:67
Filter definition.
Definition: avfilter.h:144
int ff_outlink_get_status(AVFilterLink *link)
Get the status on an output link.
Definition: avfilter.c:1630
Rational number (pair of numerator and denominator).
Definition: rational.h:58
static int query_formats(AVFilterContext *ctx)
offset must point to AVRational
Definition: opt.h:236
static void draw_sample_line_rgba_scale(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
const char * name
Filter name.
Definition: avfilter.h:148
static void draw_sample_point_gray(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
offset must point to two consecutive integers
Definition: opt.h:233
misc parsing utilities
AVFilterLink ** outputs
array of pointers to output links
Definition: avfilter.h:350
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:434
#define FF_FILTER_FORWARD_STATUS(inlink, outlink)
Acknowledge the status on an input link and forward it to an output link.
Definition: filters.h:226
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
AVFilterFormats * ff_all_samplerates(void)
Definition: formats.c:395
static int get_sqrt_h2(int16_t sample, int height)
uint8_t * data[AV_NUM_DATA_POINTERS]
pointer to the picture/channel planes.
Definition: frame.h:309
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok()...
Definition: avstring.c:184
static void draw_sample_point_rgba_full(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
static void draw_sample_p2p_rgba_full(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
static void draw_sample_p2p_gray(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
int
Y , 8bpp.
Definition: pixfmt.h:74
static int get_sqrt_h(int16_t sample, int height)
signed 16 bits
Definition: samplefmt.h:61
static int get_lin_h(int16_t sample, int height)
#define FLAGS
Definition: avf_showwaves.c:95
#define av_free(p)
A list of supported formats for one end of a filter link.
Definition: formats.h:64
An instance of a filter.
Definition: avfilter.h:338
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:701
int height
Definition: frame.h:353
FILE * out
Definition: movenc.c:54
#define av_freep(p)
void(* draw_sample)(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
Definition: avf_showwaves.c:83
void INT64 start
Definition: avisynth_c.h:766
#define av_malloc_array(a, b)
int ff_request_frame(AVFilterLink *link)
Request an input frame from the filter at the other end of the link.
Definition: avfilter.c:407
formats
Definition: signature.h:48
#define FFSWAP(type, a, b)
Definition: common.h:99
int nb_channels
internal API functions
AVFilter ff_avf_showwaves
static const AVOption showwaves_options[]
Definition: avf_showwaves.c:97
static av_cold int init(AVFilterContext *ctx)
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
mode
Use these values in ebur128_init (or&#39;ed).
Definition: ebur128.h:83
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:361
const AVFilter * filter
the AVFilter of which this is an instance
Definition: avfilter.h:341
static void draw_sample_cline_gray(uint8_t *buf, int height, int linesize, int16_t *prev_y, const uint8_t color[4], int h)
for(j=16;j >0;--j)
AVFrame * frame
Definition: avf_showwaves.c:61
void * av_mallocz_array(size_t nmemb, size_t size)
Allocate a memory block for an array with av_mallocz().
Definition: mem.c:191
static uint8_t tmp[11]
Definition: aes_ctr.c:26