FFmpeg  4.3
af_anequalizer.c
Go to the documentation of this file.
1 /*
2  * Copyright (c) 2001-2010 Krzysztof Foltman, Markus Schmidt, Thor Harald Johansen and others
3  * Copyright (c) 2015 Paul B Mahol
4  *
5  * This file is part of FFmpeg.
6  *
7  * FFmpeg is free software; you can redistribute it and/or
8  * modify it under the terms of the GNU Lesser General Public
9  * License as published by the Free Software Foundation; either
10  * version 2.1 of the License, or (at your option) any later version.
11  *
12  * FFmpeg is distributed in the hope that it will be useful,
13  * but WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15  * Lesser General Public License for more details.
16  *
17  * You should have received a copy of the GNU Lesser General Public
18  * License along with FFmpeg; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
20  */
21 
22 #include "libavutil/intreadwrite.h"
23 #include "libavutil/avstring.h"
24 #include "libavutil/ffmath.h"
25 #include "libavutil/opt.h"
26 #include "libavutil/parseutils.h"
27 #include "avfilter.h"
28 #include "internal.h"
29 #include "audio.h"
30 
31 #define FILTER_ORDER 4
32 
33 enum FilterType {
38 };
39 
40 typedef struct FoSection {
41  double a0, a1, a2, a3, a4;
42  double b0, b1, b2, b3, b4;
43 
44  double num[4];
45  double denum[4];
46 } FoSection;
47 
48 typedef struct EqualizatorFilter {
49  int ignore;
50  int channel;
51  int type;
52 
53  double freq;
54  double gain;
55  double width;
56 
59 
60 typedef struct AudioNEqualizerContext {
61  const AVClass *class;
62  char *args;
63  char *colors;
65  int w, h;
66 
67  double mag;
68  int fscale;
74 
75 #define OFFSET(x) offsetof(AudioNEqualizerContext, x)
76 #define A AV_OPT_FLAG_AUDIO_PARAM
77 #define V AV_OPT_FLAG_VIDEO_PARAM
78 #define F AV_OPT_FLAG_FILTERING_PARAM
79 
80 static const AVOption anequalizer_options[] = {
81  { "params", NULL, OFFSET(args), AV_OPT_TYPE_STRING, {.str=""}, 0, 0, A|F },
82  { "curves", "draw frequency response curves", OFFSET(draw_curves), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, V|F },
83  { "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "hd720"}, 0, 0, V|F },
84  { "mgain", "set max gain", OFFSET(mag), AV_OPT_TYPE_DOUBLE, {.dbl=60}, -900, 900, V|F },
85  { "fscale", "set frequency scale", OFFSET(fscale), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, V|F, "fscale" },
86  { "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, V|F, "fscale" },
87  { "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, V|F, "fscale" },
88  { "colors", "set channels curves colors", OFFSET(colors), AV_OPT_TYPE_STRING, {.str = "red|green|blue|yellow|orange|lime|pink|magenta|brown" }, 0, 0, V|F },
89  { NULL }
90 };
91 
92 AVFILTER_DEFINE_CLASS(anequalizer);
93 
95 {
96  AudioNEqualizerContext *s = ctx->priv;
97  char *colors, *color, *saveptr = NULL;
98  int ch, i, n;
99 
100  colors = av_strdup(s->colors);
101  if (!colors)
102  return;
103 
104  memset(out->data[0], 0, s->h * out->linesize[0]);
105 
106  for (ch = 0; ch < inlink->channels; ch++) {
107  uint8_t fg[4] = { 0xff, 0xff, 0xff, 0xff };
108  int prev_v = -1;
109  double f;
110 
111  color = av_strtok(ch == 0 ? colors : NULL, " |", &saveptr);
112  if (color)
113  av_parse_color(fg, color, -1, ctx);
114 
115  for (f = 0; f < s->w; f++) {
116  double zr, zi, zr2, zi2;
117  double Hr, Hi;
118  double Hmag = 1;
119  double w;
120  int v, y, x;
121 
122  w = M_PI * (s->fscale ? pow(s->w - 1, f / s->w) : f) / (s->w - 1);
123  zr = cos(w);
124  zr2 = zr * zr;
125  zi = -sin(w);
126  zi2 = zi * zi;
127 
128  for (n = 0; n < s->nb_filters; n++) {
129  if (s->filters[n].channel != ch ||
130  s->filters[n].ignore)
131  continue;
132 
133  for (i = 0; i < FILTER_ORDER / 2; i++) {
134  FoSection *S = &s->filters[n].section[i];
135 
136  /* H *= (((((S->b4 * z + S->b3) * z + S->b2) * z + S->b1) * z + S->b0) /
137  ((((S->a4 * z + S->a3) * z + S->a2) * z + S->a1) * z + S->a0)); */
138 
139  Hr = S->b4*(1-8*zr2*zi2) + S->b2*(zr2-zi2) + zr*(S->b1+S->b3*(zr2-3*zi2))+ S->b0;
140  Hi = zi*(S->b3*(3*zr2-zi2) + S->b1 + 2*zr*(2*S->b4*(zr2-zi2) + S->b2));
141  Hmag *= hypot(Hr, Hi);
142  Hr = S->a4*(1-8*zr2*zi2) + S->a2*(zr2-zi2) + zr*(S->a1+S->a3*(zr2-3*zi2))+ S->a0;
143  Hi = zi*(S->a3*(3*zr2-zi2) + S->a1 + 2*zr*(2*S->a4*(zr2-zi2) + S->a2));
144  Hmag /= hypot(Hr, Hi);
145  }
146  }
147 
148  v = av_clip((1. + -20 * log10(Hmag) / s->mag) * s->h / 2, 0, s->h - 1);
149  x = lrint(f);
150  if (prev_v == -1)
151  prev_v = v;
152  if (v <= prev_v) {
153  for (y = v; y <= prev_v; y++)
154  AV_WL32(out->data[0] + y * out->linesize[0] + x * 4, AV_RL32(fg));
155  } else {
156  for (y = prev_v; y <= v; y++)
157  AV_WL32(out->data[0] + y * out->linesize[0] + x * 4, AV_RL32(fg));
158  }
159 
160  prev_v = v;
161  }
162  }
163 
164  av_free(colors);
165 }
166 
167 static int config_video(AVFilterLink *outlink)
168 {
169  AVFilterContext *ctx = outlink->src;
170  AudioNEqualizerContext *s = ctx->priv;
171  AVFilterLink *inlink = ctx->inputs[0];
172  AVFrame *out;
173 
174  outlink->w = s->w;
175  outlink->h = s->h;
176 
177  av_frame_free(&s->video);
178  s->video = out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
179  if (!out)
180  return AVERROR(ENOMEM);
181  outlink->sample_aspect_ratio = (AVRational){1,1};
182 
184 
185  return 0;
186 }
187 
189 {
190  AudioNEqualizerContext *s = ctx->priv;
191  AVFilterPad pad, vpad;
192  int ret;
193 
194  pad = (AVFilterPad){
195  .name = av_strdup("out0"),
196  .type = AVMEDIA_TYPE_AUDIO,
197  };
198 
199  if (!pad.name)
200  return AVERROR(ENOMEM);
201 
202  if (s->draw_curves) {
203  vpad = (AVFilterPad){
204  .name = av_strdup("out1"),
205  .type = AVMEDIA_TYPE_VIDEO,
206  .config_props = config_video,
207  };
208  if (!vpad.name) {
209  av_freep(&pad.name);
210  return AVERROR(ENOMEM);
211  }
212  }
213 
214  ret = ff_insert_outpad(ctx, 0, &pad);
215  if (ret < 0) {
216  av_freep(&pad.name);
217  return ret;
218  }
219 
220  if (s->draw_curves) {
221  ret = ff_insert_outpad(ctx, 1, &vpad);
222  if (ret < 0) {
223  av_freep(&vpad.name);
224  return ret;
225  }
226  }
227 
228  return 0;
229 }
230 
232 {
233  AVFilterLink *inlink = ctx->inputs[0];
234  AVFilterLink *outlink = ctx->outputs[0];
235  AudioNEqualizerContext *s = ctx->priv;
238  static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGBA, AV_PIX_FMT_NONE };
239  static const enum AVSampleFormat sample_fmts[] = {
242  };
243  int ret;
244 
245  if (s->draw_curves) {
246  AVFilterLink *videolink = ctx->outputs[1];
248  if ((ret = ff_formats_ref(formats, &videolink->in_formats)) < 0)
249  return ret;
250  }
251 
253  if ((ret = ff_formats_ref(formats, &inlink->out_formats)) < 0 ||
254  (ret = ff_formats_ref(formats, &outlink->in_formats)) < 0)
255  return ret;
256 
258  if ((ret = ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts)) < 0 ||
260  return ret;
261 
263  if ((ret = ff_formats_ref(formats, &inlink->out_samplerates)) < 0 ||
264  (ret = ff_formats_ref(formats, &outlink->in_samplerates)) < 0)
265  return ret;
266 
267  return 0;
268 }
269 
271 {
272  AudioNEqualizerContext *s = ctx->priv;
273 
274  for (int i = 0; i < ctx->nb_outputs; i++)
275  av_freep(&ctx->output_pads[i].name);
276  av_frame_free(&s->video);
277  av_freep(&s->filters);
278  s->nb_filters = 0;
279  s->nb_allocated = 0;
280 }
281 
282 static void butterworth_fo_section(FoSection *S, double beta,
283  double si, double g, double g0,
284  double D, double c0)
285 {
286  if (c0 == 1 || c0 == -1) {
287  S->b0 = (g*g*beta*beta + 2*g*g0*si*beta + g0*g0)/D;
288  S->b1 = 2*c0*(g*g*beta*beta - g0*g0)/D;
289  S->b2 = (g*g*beta*beta - 2*g0*g*beta*si + g0*g0)/D;
290  S->b3 = 0;
291  S->b4 = 0;
292 
293  S->a0 = 1;
294  S->a1 = 2*c0*(beta*beta - 1)/D;
295  S->a2 = (beta*beta - 2*beta*si + 1)/D;
296  S->a3 = 0;
297  S->a4 = 0;
298  } else {
299  S->b0 = (g*g*beta*beta + 2*g*g0*si*beta + g0*g0)/D;
300  S->b1 = -4*c0*(g0*g0 + g*g0*si*beta)/D;
301  S->b2 = 2*(g0*g0*(1 + 2*c0*c0) - g*g*beta*beta)/D;
302  S->b3 = -4*c0*(g0*g0 - g*g0*si*beta)/D;
303  S->b4 = (g*g*beta*beta - 2*g*g0*si*beta + g0*g0)/D;
304 
305  S->a0 = 1;
306  S->a1 = -4*c0*(1 + si*beta)/D;
307  S->a2 = 2*(1 + 2*c0*c0 - beta*beta)/D;
308  S->a3 = -4*c0*(1 - si*beta)/D;
309  S->a4 = (beta*beta - 2*si*beta + 1)/D;
310  }
311 }
312 
314  int N, double w0, double wb,
315  double G, double Gb, double G0)
316 {
317  double g, c0, g0, beta;
318  double epsilon;
319  int r = N % 2;
320  int L = (N - r) / 2;
321  int i;
322 
323  if (G == 0 && G0 == 0) {
324  f->section[0].a0 = 1;
325  f->section[0].b0 = 1;
326  f->section[1].a0 = 1;
327  f->section[1].b0 = 1;
328  return;
329  }
330 
331  G = ff_exp10(G/20);
332  Gb = ff_exp10(Gb/20);
333  G0 = ff_exp10(G0/20);
334 
335  epsilon = sqrt((G * G - Gb * Gb) / (Gb * Gb - G0 * G0));
336  g = pow(G, 1.0 / N);
337  g0 = pow(G0, 1.0 / N);
338  beta = pow(epsilon, -1.0 / N) * tan(wb/2);
339  c0 = cos(w0);
340 
341  for (i = 1; i <= L; i++) {
342  double ui = (2.0 * i - 1) / N;
343  double si = sin(M_PI * ui / 2.0);
344  double Di = beta * beta + 2 * si * beta + 1;
345 
346  butterworth_fo_section(&f->section[i - 1], beta, si, g, g0, Di, c0);
347  }
348 }
349 
350 static void chebyshev1_fo_section(FoSection *S, double a,
351  double c, double tetta_b,
352  double g0, double si, double b,
353  double D, double c0)
354 {
355  if (c0 == 1 || c0 == -1) {
356  S->b0 = (tetta_b*tetta_b*(b*b+g0*g0*c*c) + 2*g0*b*si*tetta_b*tetta_b + g0*g0)/D;
357  S->b1 = 2*c0*(tetta_b*tetta_b*(b*b+g0*g0*c*c) - g0*g0)/D;
358  S->b2 = (tetta_b*tetta_b*(b*b+g0*g0*c*c) - 2*g0*b*si*tetta_b + g0*g0)/D;
359  S->b3 = 0;
360  S->b4 = 0;
361 
362  S->a0 = 1;
363  S->a1 = 2*c0*(tetta_b*tetta_b*(a*a+c*c) - 1)/D;
364  S->a2 = (tetta_b*tetta_b*(a*a+c*c) - 2*a*si*tetta_b + 1)/D;
365  S->a3 = 0;
366  S->a4 = 0;
367  } else {
368  S->b0 = ((b*b + g0*g0*c*c)*tetta_b*tetta_b + 2*g0*b*si*tetta_b + g0*g0)/D;
369  S->b1 = -4*c0*(g0*g0 + g0*b*si*tetta_b)/D;
370  S->b2 = 2*(g0*g0*(1 + 2*c0*c0) - (b*b + g0*g0*c*c)*tetta_b*tetta_b)/D;
371  S->b3 = -4*c0*(g0*g0 - g0*b*si*tetta_b)/D;
372  S->b4 = ((b*b + g0*g0*c*c)*tetta_b*tetta_b - 2*g0*b*si*tetta_b + g0*g0)/D;
373 
374  S->a0 = 1;
375  S->a1 = -4*c0*(1 + a*si*tetta_b)/D;
376  S->a2 = 2*(1 + 2*c0*c0 - (a*a + c*c)*tetta_b*tetta_b)/D;
377  S->a3 = -4*c0*(1 - a*si*tetta_b)/D;
378  S->a4 = ((a*a + c*c)*tetta_b*tetta_b - 2*a*si*tetta_b + 1)/D;
379  }
380 }
381 
383  int N, double w0, double wb,
384  double G, double Gb, double G0)
385 {
386  double a, b, c0, g0, alfa, beta, tetta_b;
387  double epsilon;
388  int r = N % 2;
389  int L = (N - r) / 2;
390  int i;
391 
392  if (G == 0 && G0 == 0) {
393  f->section[0].a0 = 1;
394  f->section[0].b0 = 1;
395  f->section[1].a0 = 1;
396  f->section[1].b0 = 1;
397  return;
398  }
399 
400  G = ff_exp10(G/20);
401  Gb = ff_exp10(Gb/20);
402  G0 = ff_exp10(G0/20);
403 
404  epsilon = sqrt((G*G - Gb*Gb) / (Gb*Gb - G0*G0));
405  g0 = pow(G0,1.0/N);
406  alfa = pow(1.0/epsilon + sqrt(1 + 1/(epsilon*epsilon)), 1.0/N);
407  beta = pow(G/epsilon + Gb * sqrt(1 + 1/(epsilon*epsilon)), 1.0/N);
408  a = 0.5 * (alfa - 1.0/alfa);
409  b = 0.5 * (beta - g0*g0*(1/beta));
410  tetta_b = tan(wb/2);
411  c0 = cos(w0);
412 
413  for (i = 1; i <= L; i++) {
414  double ui = (2.0*i-1.0)/N;
415  double ci = cos(M_PI*ui/2.0);
416  double si = sin(M_PI*ui/2.0);
417  double Di = (a*a + ci*ci)*tetta_b*tetta_b + 2.0*a*si*tetta_b + 1;
418 
419  chebyshev1_fo_section(&f->section[i - 1], a, ci, tetta_b, g0, si, b, Di, c0);
420  }
421 }
422 
423 static void chebyshev2_fo_section(FoSection *S, double a,
424  double c, double tetta_b,
425  double g, double si, double b,
426  double D, double c0)
427 {
428  if (c0 == 1 || c0 == -1) {
429  S->b0 = (g*g*tetta_b*tetta_b + 2*tetta_b*g*b*si + b*b + g*g*c*c)/D;
430  S->b1 = 2*c0*(g*g*tetta_b*tetta_b - b*b - g*g*c*c)/D;
431  S->b2 = (g*g*tetta_b*tetta_b - 2*tetta_b*g*b*si + b*b + g*g*c*c)/D;
432  S->b3 = 0;
433  S->b4 = 0;
434 
435  S->a0 = 1;
436  S->a1 = 2*c0*(tetta_b*tetta_b - a*a - c*c)/D;
437  S->a2 = (tetta_b*tetta_b - 2*tetta_b*a*si + a*a + c*c)/D;
438  S->a3 = 0;
439  S->a4 = 0;
440  } else {
441  S->b0 = (g*g*tetta_b*tetta_b + 2*g*b*si*tetta_b + b*b + g*g*c*c)/D;
442  S->b1 = -4*c0*(b*b + g*g*c*c + g*b*si*tetta_b)/D;
443  S->b2 = 2*((b*b + g*g*c*c)*(1 + 2*c0*c0) - g*g*tetta_b*tetta_b)/D;
444  S->b3 = -4*c0*(b*b + g*g*c*c - g*b*si*tetta_b)/D;
445  S->b4 = (g*g*tetta_b*tetta_b - 2*g*b*si*tetta_b + b*b + g*g*c*c)/D;
446 
447  S->a0 = 1;
448  S->a1 = -4*c0*(a*a + c*c + a*si*tetta_b)/D;
449  S->a2 = 2*((a*a + c*c)*(1 + 2*c0*c0) - tetta_b*tetta_b)/D;
450  S->a3 = -4*c0*(a*a + c*c - a*si*tetta_b)/D;
451  S->a4 = (tetta_b*tetta_b - 2*a*si*tetta_b + a*a + c*c)/D;
452  }
453 }
454 
456  int N, double w0, double wb,
457  double G, double Gb, double G0)
458 {
459  double a, b, c0, tetta_b;
460  double epsilon, g, eu, ew;
461  int r = N % 2;
462  int L = (N - r) / 2;
463  int i;
464 
465  if (G == 0 && G0 == 0) {
466  f->section[0].a0 = 1;
467  f->section[0].b0 = 1;
468  f->section[1].a0 = 1;
469  f->section[1].b0 = 1;
470  return;
471  }
472 
473  G = ff_exp10(G/20);
474  Gb = ff_exp10(Gb/20);
475  G0 = ff_exp10(G0/20);
476 
477  epsilon = sqrt((G*G - Gb*Gb) / (Gb*Gb - G0*G0));
478  g = pow(G, 1.0 / N);
479  eu = pow(epsilon + sqrt(1 + epsilon*epsilon), 1.0/N);
480  ew = pow(G0*epsilon + Gb*sqrt(1 + epsilon*epsilon), 1.0/N);
481  a = (eu - 1.0/eu)/2.0;
482  b = (ew - g*g/ew)/2.0;
483  tetta_b = tan(wb/2);
484  c0 = cos(w0);
485 
486  for (i = 1; i <= L; i++) {
487  double ui = (2.0 * i - 1.0)/N;
488  double ci = cos(M_PI * ui / 2.0);
489  double si = sin(M_PI * ui / 2.0);
490  double Di = tetta_b*tetta_b + 2*a*si*tetta_b + a*a + ci*ci;
491 
492  chebyshev2_fo_section(&f->section[i - 1], a, ci, tetta_b, g, si, b, Di, c0);
493  }
494 }
495 
496 static double butterworth_compute_bw_gain_db(double gain)
497 {
498  double bw_gain = 0;
499 
500  if (gain <= -6)
501  bw_gain = gain + 3;
502  else if(gain > -6 && gain < 6)
503  bw_gain = gain * 0.5;
504  else if(gain >= 6)
505  bw_gain = gain - 3;
506 
507  return bw_gain;
508 }
509 
510 static double chebyshev1_compute_bw_gain_db(double gain)
511 {
512  double bw_gain = 0;
513 
514  if (gain <= -6)
515  bw_gain = gain + 1;
516  else if(gain > -6 && gain < 6)
517  bw_gain = gain * 0.9;
518  else if(gain >= 6)
519  bw_gain = gain - 1;
520 
521  return bw_gain;
522 }
523 
524 static double chebyshev2_compute_bw_gain_db(double gain)
525 {
526  double bw_gain = 0;
527 
528  if (gain <= -6)
529  bw_gain = -3;
530  else if(gain > -6 && gain < 6)
531  bw_gain = gain * 0.3;
532  else if(gain >= 6)
533  bw_gain = 3;
534 
535  return bw_gain;
536 }
537 
538 static inline double hz_2_rad(double x, double fs)
539 {
540  return 2 * M_PI * x / fs;
541 }
542 
544 {
545  double w0 = hz_2_rad(f->freq, sample_rate);
546  double wb = hz_2_rad(f->width, sample_rate);
547  double bw_gain;
548 
549  switch (f->type) {
550  case BUTTERWORTH:
551  bw_gain = butterworth_compute_bw_gain_db(f->gain);
552  butterworth_bp_filter(f, FILTER_ORDER, w0, wb, f->gain, bw_gain, 0);
553  break;
554  case CHEBYSHEV1:
555  bw_gain = chebyshev1_compute_bw_gain_db(f->gain);
556  chebyshev1_bp_filter(f, FILTER_ORDER, w0, wb, f->gain, bw_gain, 0);
557  break;
558  case CHEBYSHEV2:
559  bw_gain = chebyshev2_compute_bw_gain_db(f->gain);
560  chebyshev2_bp_filter(f, FILTER_ORDER, w0, wb, f->gain, bw_gain, 0);
561  break;
562  }
563 
564 }
565 
567 {
568  equalizer(&s->filters[s->nb_filters], inlink->sample_rate);
569  if (s->nb_filters >= s->nb_allocated - 1) {
571 
572  filters = av_calloc(s->nb_allocated, 2 * sizeof(*s->filters));
573  if (!filters)
574  return AVERROR(ENOMEM);
575  memcpy(filters, s->filters, sizeof(*s->filters) * s->nb_allocated);
576  av_free(s->filters);
577  s->filters = filters;
578  s->nb_allocated *= 2;
579  }
580  s->nb_filters++;
581 
582  return 0;
583 }
584 
586 {
587  AVFilterContext *ctx = inlink->dst;
588  AudioNEqualizerContext *s = ctx->priv;
589  char *args = av_strdup(s->args);
590  char *saveptr = NULL;
591  int ret = 0;
592 
593  if (!args)
594  return AVERROR(ENOMEM);
595 
596  s->nb_allocated = 32 * inlink->channels;
597  s->filters = av_calloc(inlink->channels, 32 * sizeof(*s->filters));
598  if (!s->filters) {
599  s->nb_allocated = 0;
600  av_free(args);
601  return AVERROR(ENOMEM);
602  }
603 
604  while (1) {
605  char *arg = av_strtok(s->nb_filters == 0 ? args : NULL, "|", &saveptr);
606 
607  if (!arg)
608  break;
609 
610  s->filters[s->nb_filters].type = 0;
611  if (sscanf(arg, "c%d f=%lf w=%lf g=%lf t=%d", &s->filters[s->nb_filters].channel,
612  &s->filters[s->nb_filters].freq,
613  &s->filters[s->nb_filters].width,
614  &s->filters[s->nb_filters].gain,
615  &s->filters[s->nb_filters].type) != 5 &&
616  sscanf(arg, "c%d f=%lf w=%lf g=%lf", &s->filters[s->nb_filters].channel,
617  &s->filters[s->nb_filters].freq,
618  &s->filters[s->nb_filters].width,
619  &s->filters[s->nb_filters].gain) != 4 ) {
620  av_free(args);
621  return AVERROR(EINVAL);
622  }
623 
624  if (s->filters[s->nb_filters].freq < 0 ||
625  s->filters[s->nb_filters].freq > inlink->sample_rate / 2.0)
626  s->filters[s->nb_filters].ignore = 1;
627 
628  if (s->filters[s->nb_filters].channel < 0 ||
629  s->filters[s->nb_filters].channel >= inlink->channels)
630  s->filters[s->nb_filters].ignore = 1;
631 
632  s->filters[s->nb_filters].type = av_clip(s->filters[s->nb_filters].type, 0, NB_TYPES - 1);
633  ret = add_filter(s, inlink);
634  if (ret < 0)
635  break;
636  }
637 
638  av_free(args);
639 
640  return ret;
641 }
642 
643 static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
644  char *res, int res_len, int flags)
645 {
646  AudioNEqualizerContext *s = ctx->priv;
647  AVFilterLink *inlink = ctx->inputs[0];
648  int ret = AVERROR(ENOSYS);
649 
650  if (!strcmp(cmd, "change")) {
651  double freq, width, gain;
652  int filter;
653 
654  if (sscanf(args, "%d|f=%lf|w=%lf|g=%lf", &filter, &freq, &width, &gain) != 4)
655  return AVERROR(EINVAL);
656 
657  if (filter < 0 || filter >= s->nb_filters)
658  return AVERROR(EINVAL);
659 
660  if (freq < 0 || freq > inlink->sample_rate / 2.0)
661  return AVERROR(EINVAL);
662 
663  s->filters[filter].freq = freq;
664  s->filters[filter].width = width;
665  s->filters[filter].gain = gain;
666  equalizer(&s->filters[filter], inlink->sample_rate);
667  if (s->draw_curves)
668  draw_curves(ctx, inlink, s->video);
669 
670  ret = 0;
671  }
672 
673  return ret;
674 }
675 
676 static inline double section_process(FoSection *S, double in)
677 {
678  double out;
679 
680  out = S->b0 * in;
681  out+= S->b1 * S->num[0] - S->denum[0] * S->a1;
682  out+= S->b2 * S->num[1] - S->denum[1] * S->a2;
683  out+= S->b3 * S->num[2] - S->denum[2] * S->a3;
684  out+= S->b4 * S->num[3] - S->denum[3] * S->a4;
685 
686  S->num[3] = S->num[2];
687  S->num[2] = S->num[1];
688  S->num[1] = S->num[0];
689  S->num[0] = in;
690 
691  S->denum[3] = S->denum[2];
692  S->denum[2] = S->denum[1];
693  S->denum[1] = S->denum[0];
694  S->denum[0] = out;
695 
696  return out;
697 }
698 
699 static double process_sample(FoSection *s1, double in)
700 {
701  double p0 = in, p1;
702  int i;
703 
704  for (i = 0; i < FILTER_ORDER / 2; i++) {
705  p1 = section_process(&s1[i], p0);
706  p0 = p1;
707  }
708 
709  return p1;
710 }
711 
713 {
714  AVFilterContext *ctx = inlink->dst;
715  AudioNEqualizerContext *s = ctx->priv;
716  AVFilterLink *outlink = ctx->outputs[0];
717  double *bptr;
718  int i, n;
719 
720  for (i = 0; i < s->nb_filters; i++) {
721  EqualizatorFilter *f = &s->filters[i];
722 
723  if (f->gain == 0. || f->ignore)
724  continue;
725 
726  bptr = (double *)buf->extended_data[f->channel];
727  for (n = 0; n < buf->nb_samples; n++) {
728  double sample = bptr[n];
729 
730  sample = process_sample(f->section, sample);
731  bptr[n] = sample;
732  }
733  }
734 
735  if (s->draw_curves) {
736  AVFrame *clone;
737 
738  const int64_t pts = buf->pts +
739  av_rescale_q(buf->nb_samples, (AVRational){ 1, inlink->sample_rate },
740  outlink->time_base);
741  int ret;
742 
743  s->video->pts = pts;
744  clone = av_frame_clone(s->video);
745  if (!clone)
746  return AVERROR(ENOMEM);
747  ret = ff_filter_frame(ctx->outputs[1], clone);
748  if (ret < 0)
749  return ret;
750  }
751 
752  return ff_filter_frame(outlink, buf);
753 }
754 
755 static const AVFilterPad inputs[] = {
756  {
757  .name = "default",
758  .type = AVMEDIA_TYPE_AUDIO,
759  .config_props = config_input,
760  .filter_frame = filter_frame,
761  .needs_writable = 1,
762  },
763  { NULL }
764 };
765 
767  .name = "anequalizer",
768  .description = NULL_IF_CONFIG_SMALL("Apply high-order audio parametric multi band equalizer."),
769  .priv_size = sizeof(AudioNEqualizerContext),
770  .priv_class = &anequalizer_class,
771  .init = init,
772  .uninit = uninit,
774  .inputs = inputs,
775  .outputs = NULL,
778 };
config_video
static int config_video(AVFilterLink *outlink)
Definition: af_anequalizer.c:167
formats
formats
Definition: signature.h:48
ff_get_video_buffer
AVFrame * ff_get_video_buffer(AVFilterLink *link, int w, int h)
Request a picture buffer with a specific set of permissions.
Definition: video.c:99
AVFilterChannelLayouts
A list of supported channel layouts.
Definition: formats.h:85
AudioNEqualizerContext::args
char * args
Definition: af_anequalizer.c:62
AVPixelFormat
AVPixelFormat
Pixel format.
Definition: pixfmt.h:64
ff_exp10
static av_always_inline double ff_exp10(double x)
Compute 10^x for floating point values.
Definition: ffmath.h:42
AVERROR
Filter the word “frame” indicates either a video frame or a group of audio as stored in an AVFrame structure Format for each input and each output the list of supported formats For video that means pixel format For audio that means channel sample they are references to shared objects When the negotiation mechanism computes the intersection of the formats supported at each end of a all references to both lists are replaced with a reference to the intersection And when a single format is eventually chosen for a link amongst the remaining all references to the list are updated That means that if a filter requires that its input and output have the same format amongst a supported all it has to do is use a reference to the same list of formats query_formats can leave some formats unset and return AVERROR(EAGAIN) to cause the negotiation mechanism toagain later. That can be used by filters with complex requirements to use the format negotiated on one link to set the formats supported on another. Frame references ownership and permissions
opt.h
ff_make_format_list
AVFilterFormats * ff_make_format_list(const int *fmts)
Create a list of supported formats.
Definition: formats.c:283
butterworth_fo_section
static void butterworth_fo_section(FoSection *S, double beta, double si, double g, double g0, double D, double c0)
Definition: af_anequalizer.c:282
AV_WL32
#define AV_WL32(p, v)
Definition: intreadwrite.h:426
out
FILE * out
Definition: movenc.c:54
F
#define F
Definition: af_anequalizer.c:78
color
Definition: vf_paletteuse.c:588
ff_filter_frame
int ff_filter_frame(AVFilterLink *link, AVFrame *frame)
Send a frame of data to the next filter.
Definition: avfilter.c:1075
sample_fmts
static enum AVSampleFormat sample_fmts[]
Definition: adpcmenc.c:716
ff_channel_layouts_ref
int ff_channel_layouts_ref(AVFilterChannelLayouts *f, AVFilterChannelLayouts **ref)
Add *ref as a new reference to f.
Definition: formats.c:465
layouts
enum MovChannelLayoutTag * layouts
Definition: mov_chan.c:434
av_parse_color
int av_parse_color(uint8_t *rgba_color, const char *color_string, int slen, void *log_ctx)
Put the RGBA values that correspond to color_string in rgba_color.
Definition: parseutils.c:354
filters
static const struct PPFilter filters[]
Definition: postprocess.c:134
inlink
The exact code depends on how similar the blocks are and how related they are to the and needs to apply these operations to the correct inlink or outlink if there are several Macros are available to factor that when no extra processing is inlink
Definition: filter_design.txt:212
FilterType
FilterType
Definition: af_anequalizer.c:33
av_frame_free
void av_frame_free(AVFrame **frame)
Free the frame and any dynamically allocated objects in it, e.g.
Definition: frame.c:203
ff_all_channel_counts
AVFilterChannelLayouts * ff_all_channel_counts(void)
Construct an AVFilterChannelLayouts coding for any channel layout, with known or unknown disposition.
Definition: formats.c:440
AVFrame
This structure describes decoded (raw) audio or video data.
Definition: frame.h:300
AVFrame::pts
int64_t pts
Presentation timestamp in time_base units (time when frame should be shown to user).
Definition: frame.h:393
EqualizatorFilter
Definition: af_anequalizer.c:48
AVOption
AVOption.
Definition: opt.h:246
b
#define b
Definition: input.c:41
CHEBYSHEV2
@ CHEBYSHEV2
Definition: af_anequalizer.c:36
EqualizatorFilter::width
double width
Definition: af_anequalizer.c:55
chebyshev1_bp_filter
static void chebyshev1_bp_filter(EqualizatorFilter *f, int N, double w0, double wb, double G, double Gb, double G0)
Definition: af_anequalizer.c:382
EqualizatorFilter::gain
double gain
Definition: af_anequalizer.c:54
filter
filter_frame For filters that do not use the this method is called when a frame is pushed to the filter s input It can be called at any time except in a reentrant way If the input frame is enough to produce then the filter should push the output frames on the output link immediately As an exception to the previous rule if the input frame is enough to produce several output frames then the filter needs output only at least one per link The additional frames can be left buffered in the filter
Definition: filter_design.txt:228
AVFilter::name
const char * name
Filter name.
Definition: avfilter.h:148
chebyshev2_bp_filter
static void chebyshev2_bp_filter(EqualizatorFilter *f, int N, double w0, double wb, double G, double Gb, double G0)
Definition: af_anequalizer.c:455
sample_rate
sample_rate
Definition: ffmpeg_filter.c:192
anequalizer_options
static const AVOption anequalizer_options[]
Definition: af_anequalizer.c:80
AudioNEqualizerContext::mag
double mag
Definition: af_anequalizer.c:67
D
D(D(float, sse)
Definition: rematrix_init.c:28
AudioNEqualizerContext::nb_filters
int nb_filters
Definition: af_anequalizer.c:69
FILTER_ORDER
#define FILTER_ORDER
Definition: af_anequalizer.c:31
AVFilterFormats
A list of supported formats for one end of a filter link.
Definition: formats.h:64
S
#define S(s, c, i)
Definition: flacdsp_template.c:46
filter_frame
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
Definition: af_anequalizer.c:712
FoSection::b1
double b1
Definition: af_anequalizer.c:42
AudioNEqualizerContext::h
int h
Definition: af_anequalizer.c:65
AudioNEqualizerContext::fscale
int fscale
Definition: af_anequalizer.c:68
x
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern ‘ARCH OS COMPILER COMPILER VERSION’ The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the ‘fate_recv’ variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration it may help to try out the ‘ssh’ command with one or more ‘ v’ options You should get detailed output concerning your SSH configuration and the authentication process The only thing left is to automate the execution of the fate sh script and the synchronisation of the samples directory Uploading new samples to the fate suite *****************************************If you need a sample uploaded send a mail to samples request This is for developers who have an account on the fate suite server If you upload new please make sure they are as small as space on each network bandwidth and so on benefit from smaller test cases Also keep in mind older checkouts use existing sample that means in practice generally do not remove or overwrite files as it likely would break older checkouts or releases Also all needed samples for a commit should be ideally before the push If you need an account for frequently uploading samples or you wish to help others by doing that send a mail to ffmpeg devel rsync vauL Duo x
Definition: fate.txt:150
CHEBYSHEV1
@ CHEBYSHEV1
Definition: af_anequalizer.c:35
config_input
static int config_input(AVFilterLink *inlink)
Definition: af_anequalizer.c:585
pts
static int64_t pts
Definition: transcode_aac.c:647
AVFILTER_DEFINE_CLASS
AVFILTER_DEFINE_CLASS(anequalizer)
BUTTERWORTH
@ BUTTERWORTH
Definition: af_anequalizer.c:34
AVFilterPad
A filter pad used for either input or output.
Definition: internal.h:54
FoSection::denum
double denum[4]
Definition: af_anequalizer.c:45
lrint
#define lrint
Definition: tablegen.h:53
FoSection::a2
double a2
Definition: af_anequalizer.c:41
av_cold
#define av_cold
Definition: attributes.h:90
equalizer
static void equalizer(EqualizatorFilter *f, double sample_rate)
Definition: af_anequalizer.c:543
chebyshev2_fo_section
static void chebyshev2_fo_section(FoSection *S, double a, double c, double tetta_b, double g, double si, double b, double D, double c0)
Definition: af_anequalizer.c:423
FoSection::num
double num[4]
Definition: af_anequalizer.c:44
width
#define width
intreadwrite.h
s
#define s(width, name)
Definition: cbs_vp9.c:257
FoSection::a0
double a0
Definition: af_anequalizer.c:41
g
const char * g
Definition: vf_curves.c:115
AV_OPT_TYPE_DOUBLE
@ AV_OPT_TYPE_DOUBLE
Definition: opt.h:225
AVMEDIA_TYPE_AUDIO
@ AVMEDIA_TYPE_AUDIO
Definition: avutil.h:202
s1
#define s1
Definition: regdef.h:38
ff_formats_ref
int ff_formats_ref(AVFilterFormats *f, AVFilterFormats **ref)
Add *ref as a new reference to formats.
Definition: formats.c:470
av_strtok
char * av_strtok(char *s, const char *delim, char **saveptr)
Split the string into several tokens which can be accessed by successive calls to av_strtok().
Definition: avstring.c:184
chebyshev1_fo_section
static void chebyshev1_fo_section(FoSection *S, double a, double c, double tetta_b, double g0, double si, double b, double D, double c0)
Definition: af_anequalizer.c:350
query_formats
static int query_formats(AVFilterContext *ctx)
Definition: af_anequalizer.c:231
outputs
static const AVFilterPad outputs[]
Definition: af_acontrast.c:203
pix_fmts
static enum AVPixelFormat pix_fmts[]
Definition: libkvazaar.c:275
ctx
AVFormatContext * ctx
Definition: movenc.c:48
av_frame_clone
AVFrame * av_frame_clone(const AVFrame *src)
Create a new frame that references the same data as src.
Definition: frame.c:541
av_rescale_q
int64_t av_rescale_q(int64_t a, AVRational bq, AVRational cq)
Rescale a 64-bit integer by 2 rational numbers.
Definition: mathematics.c:142
A
#define A
Definition: af_anequalizer.c:76
f
#define f(width, name)
Definition: cbs_vp9.c:255
hz_2_rad
static double hz_2_rad(double x, double fs)
Definition: af_anequalizer.c:538
AV_PIX_FMT_RGBA
@ AV_PIX_FMT_RGBA
packed RGBA 8:8:8:8, 32bpp, RGBARGBA...
Definition: pixfmt.h:93
arg
const char * arg
Definition: jacosubdec.c:66
uninit
static av_cold void uninit(AVFilterContext *ctx)
Definition: af_anequalizer.c:270
ch
uint8_t pi<< 24) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_U8,(uint64_t)((*(const uint8_t *) pi - 0x80U))<< 56) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16,(*(const int16_t *) pi >>8)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S16,(uint64_t)(*(const int16_t *) pi)<< 48) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32,(*(const int32_t *) pi >>24)+0x80) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_S32,(uint64_t)(*(const int32_t *) pi)<< 32) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S64,(*(const int64_t *) pi >>56)+0x80) CONV_FUNC(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0f/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S64, *(const int64_t *) pi *(1.0/(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_FLT, llrintf(*(const float *) pi *(UINT64_C(1)<< 63))) CONV_FUNC(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) CONV_FUNC(AV_SAMPLE_FMT_S64, int64_t, AV_SAMPLE_FMT_DBL, llrint(*(const double *) pi *(UINT64_C(1)<< 63))) #define FMT_PAIR_FUNC(out, in) static conv_func_type *const fmt_pair_to_conv_functions[AV_SAMPLE_FMT_NB *AV_SAMPLE_FMT_NB]={ FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_U8), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S16), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S32), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_FLT), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_DBL), FMT_PAIR_FUNC(AV_SAMPLE_FMT_U8, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_S64), FMT_PAIR_FUNC(AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64), };static void cpy1(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, len);} static void cpy2(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 2 *len);} static void cpy4(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 4 *len);} static void cpy8(uint8_t **dst, const uint8_t **src, int len){ memcpy(*dst, *src, 8 *len);} AudioConvert *swri_audio_convert_alloc(enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, const int *ch_map, int flags) { AudioConvert *ctx;conv_func_type *f=fmt_pair_to_conv_functions[av_get_packed_sample_fmt(out_fmt)+AV_SAMPLE_FMT_NB *av_get_packed_sample_fmt(in_fmt)];if(!f) return NULL;ctx=av_mallocz(sizeof(*ctx));if(!ctx) return NULL;if(channels==1){ in_fmt=av_get_planar_sample_fmt(in_fmt);out_fmt=av_get_planar_sample_fmt(out_fmt);} ctx->channels=channels;ctx->conv_f=f;ctx->ch_map=ch_map;if(in_fmt==AV_SAMPLE_FMT_U8||in_fmt==AV_SAMPLE_FMT_U8P) memset(ctx->silence, 0x80, sizeof(ctx->silence));if(out_fmt==in_fmt &&!ch_map) { switch(av_get_bytes_per_sample(in_fmt)){ case 1:ctx->simd_f=cpy1;break;case 2:ctx->simd_f=cpy2;break;case 4:ctx->simd_f=cpy4;break;case 8:ctx->simd_f=cpy8;break;} } if(HAVE_X86ASM &&HAVE_MMX) swri_audio_convert_init_x86(ctx, out_fmt, in_fmt, channels);if(ARCH_ARM) swri_audio_convert_init_arm(ctx, out_fmt, in_fmt, channels);if(ARCH_AARCH64) swri_audio_convert_init_aarch64(ctx, out_fmt, in_fmt, channels);return ctx;} void swri_audio_convert_free(AudioConvert **ctx) { av_freep(ctx);} int swri_audio_convert(AudioConvert *ctx, AudioData *out, AudioData *in, int len) { int ch;int off=0;const int os=(out->planar ? 1 :out->ch_count) *out->bps;unsigned misaligned=0;av_assert0(ctx->channels==out->ch_count);if(ctx->in_simd_align_mask) { int planes=in->planar ? in->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) in->ch[ch];misaligned|=m &ctx->in_simd_align_mask;} if(ctx->out_simd_align_mask) { int planes=out->planar ? out->ch_count :1;unsigned m=0;for(ch=0;ch< planes;ch++) m|=(intptr_t) out->ch[ch];misaligned|=m &ctx->out_simd_align_mask;} if(ctx->simd_f &&!ctx->ch_map &&!misaligned){ off=len &~15;av_assert1(off >=0);av_assert1(off<=len);av_assert2(ctx->channels==SWR_CH_MAX||!in->ch[ctx->channels]);if(off >0){ if(out->planar==in->planar){ int planes=out->planar ? out->ch_count :1;for(ch=0;ch< planes;ch++){ ctx->simd_f(out-> ch ch
Definition: audioconvert.c:56
AudioNEqualizerContext
Definition: af_anequalizer.c:60
AVClass
Describe the class of an AVClass context structure.
Definition: log.h:67
NULL
#define NULL
Definition: coverity.c:32
fs
#define fs(width, name, subs,...)
Definition: cbs_vp9.c:259
inputs
static const AVFilterPad inputs[]
Definition: af_anequalizer.c:755
AVRational
Rational number (pair of numerator and denominator).
Definition: rational.h:58
AV_OPT_TYPE_IMAGE_SIZE
@ AV_OPT_TYPE_IMAGE_SIZE
offset must point to two consecutive integers
Definition: opt.h:233
FoSection::b3
double b3
Definition: af_anequalizer.c:42
parseutils.h
process_command
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args, char *res, int res_len, int flags)
Definition: af_anequalizer.c:643
AudioNEqualizerContext::video
AVFrame * video
Definition: af_anequalizer.c:72
c
Undefined Behavior In the C some operations are like signed integer dereferencing freed accessing outside allocated Undefined Behavior must not occur in a C it is not safe even if the output of undefined operations is unused The unsafety may seem nit picking but Optimizing compilers have in fact optimized code on the assumption that no undefined Behavior occurs Optimizing code based on wrong assumptions can and has in some cases lead to effects beyond the output of computations The signed integer overflow problem in speed critical code Code which is highly optimized and works with signed integers sometimes has the problem that often the output of the computation does not c
Definition: undefined.txt:32
for
for(j=16;j >0;--j)
Definition: h264pred_template.c:469
AVFILTER_FLAG_DYNAMIC_OUTPUTS
#define AVFILTER_FLAG_DYNAMIC_OUTPUTS
The number of the filter outputs is not determined just by AVFilter.outputs.
Definition: avfilter.h:111
draw_curves
static void draw_curves(AVFilterContext *ctx, AVFilterLink *inlink, AVFrame *out)
Definition: af_anequalizer.c:94
NULL_IF_CONFIG_SMALL
#define NULL_IF_CONFIG_SMALL(x)
Return NULL if CONFIG_SMALL is true, otherwise the argument without modification.
Definition: internal.h:186
AV_SAMPLE_FMT_NONE
@ AV_SAMPLE_FMT_NONE
Definition: samplefmt.h:59
sample
#define sample
Definition: flacdsp_template.c:44
hypot
static av_const double hypot(double x, double y)
Definition: libm.h:366
color
static const uint32_t color[16+AV_CLASS_CATEGORY_NB]
Definition: log.c:92
section
Definition: ffprobe.c:140
FoSection::b4
double b4
Definition: af_anequalizer.c:42
ff_af_anequalizer
AVFilter ff_af_anequalizer
Definition: af_anequalizer.c:766
a
The reader does not expect b to be semantically here and if the code is changed by maybe adding a a division or other the signedness will almost certainly be mistaken To avoid this confusion a new type was SUINT is the C unsigned type but it holds a signed int to use the same example SUINT a
Definition: undefined.txt:41
FoSection
Definition: af_anequalizer.c:40
N
#define N
Definition: af_mcompand.c:54
add_filter
static int add_filter(AudioNEqualizerContext *s, AVFilterLink *inlink)
Definition: af_anequalizer.c:566
M_PI
#define M_PI
Definition: mathematics.h:52
r
#define r
Definition: input.c:40
internal.h
EqualizatorFilter::channel
int channel
Definition: af_anequalizer.c:50
in
uint8_t pi<< 24) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0f/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_U8, uint8_t,(*(const uint8_t *) pi - 0x80) *(1.0/(1<< 7))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S16, int16_t,(*(const int16_t *) pi >> 8)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0f/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S16, int16_t, *(const int16_t *) pi *(1.0/(1<< 15))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_S32, int32_t,(*(const int32_t *) pi >> 24)+0x80) CONV_FUNC_GROUP(AV_SAMPLE_FMT_FLT, float, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0f/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_DBL, double, AV_SAMPLE_FMT_S32, int32_t, *(const int32_t *) pi *(1.0/(1U<< 31))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_FLT, float, av_clip_uint8(lrintf(*(const float *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_FLT, float, av_clip_int16(lrintf(*(const float *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_FLT, float, av_clipl_int32(llrintf(*(const float *) pi *(1U<< 31)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_U8, uint8_t, AV_SAMPLE_FMT_DBL, double, av_clip_uint8(lrint(*(const double *) pi *(1<< 7))+0x80)) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S16, int16_t, AV_SAMPLE_FMT_DBL, double, av_clip_int16(lrint(*(const double *) pi *(1<< 15)))) CONV_FUNC_GROUP(AV_SAMPLE_FMT_S32, int32_t, AV_SAMPLE_FMT_DBL, double, av_clipl_int32(llrint(*(const double *) pi *(1U<< 31)))) #define SET_CONV_FUNC_GROUP(ofmt, ifmt) static void set_generic_function(AudioConvert *ac) { } void ff_audio_convert_free(AudioConvert **ac) { if(! *ac) return;ff_dither_free(&(*ac) ->dc);av_freep(ac);} AudioConvert *ff_audio_convert_alloc(AVAudioResampleContext *avr, enum AVSampleFormat out_fmt, enum AVSampleFormat in_fmt, int channels, int sample_rate, int apply_map) { AudioConvert *ac;int in_planar, out_planar;ac=av_mallocz(sizeof(*ac));if(!ac) return NULL;ac->avr=avr;ac->out_fmt=out_fmt;ac->in_fmt=in_fmt;ac->channels=channels;ac->apply_map=apply_map;if(avr->dither_method !=AV_RESAMPLE_DITHER_NONE &&av_get_packed_sample_fmt(out_fmt)==AV_SAMPLE_FMT_S16 &&av_get_bytes_per_sample(in_fmt) > 2) { ac->dc=ff_dither_alloc(avr, out_fmt, in_fmt, channels, sample_rate, apply_map);if(!ac->dc) { av_free(ac);return NULL;} return ac;} in_planar=ff_sample_fmt_is_planar(in_fmt, channels);out_planar=ff_sample_fmt_is_planar(out_fmt, channels);if(in_planar==out_planar) { ac->func_type=CONV_FUNC_TYPE_FLAT;ac->planes=in_planar ? ac->channels :1;} else if(in_planar) ac->func_type=CONV_FUNC_TYPE_INTERLEAVE;else ac->func_type=CONV_FUNC_TYPE_DEINTERLEAVE;set_generic_function(ac);if(ARCH_AARCH64) ff_audio_convert_init_aarch64(ac);if(ARCH_ARM) ff_audio_convert_init_arm(ac);if(ARCH_X86) ff_audio_convert_init_x86(ac);return ac;} int ff_audio_convert(AudioConvert *ac, AudioData *out, AudioData *in) { int use_generic=1;int len=in->nb_samples;int p;if(ac->dc) { av_log(ac->avr, AV_LOG_TRACE, "%d samples - audio_convert: %s to %s (dithered)\n", len, av_get_sample_fmt_name(ac->in_fmt), av_get_sample_fmt_name(ac->out_fmt));return ff_convert_dither(ac-> in
Definition: audio_convert.c:326
AVFrame::nb_samples
int nb_samples
number of audio samples (per channel) described by this frame
Definition: frame.h:366
i
#define i(width, name, range_min, range_max)
Definition: cbs_h2645.c:269
FoSection::b2
double b2
Definition: af_anequalizer.c:42
AVFrame::extended_data
uint8_t ** extended_data
pointers to the data planes/channels.
Definition: frame.h:347
FoSection::b0
double b0
Definition: af_anequalizer.c:42
AVSampleFormat
AVSampleFormat
Audio sample formats.
Definition: samplefmt.h:58
section_process
static double section_process(FoSection *S, double in)
Definition: af_anequalizer.c:676
NB_TYPES
@ NB_TYPES
Definition: af_anequalizer.c:37
uint8_t
uint8_t
Definition: audio_convert.c:194
chebyshev1_compute_bw_gain_db
static double chebyshev1_compute_bw_gain_db(double gain)
Definition: af_anequalizer.c:510
AVFilterPad::name
const char * name
Pad name.
Definition: internal.h:60
chebyshev2_compute_bw_gain_db
static double chebyshev2_compute_bw_gain_db(double gain)
Definition: af_anequalizer.c:524
V
#define V
Definition: af_anequalizer.c:77
OFFSET
#define OFFSET(x)
Definition: af_anequalizer.c:75
AVFilter
Filter definition.
Definition: avfilter.h:144
FoSection::a3
double a3
Definition: af_anequalizer.c:41
G
#define G
Definition: huffyuvdsp.h:33
ret
ret
Definition: filter_design.txt:187
FoSection::a1
double a1
Definition: af_anequalizer.c:41
w
FFmpeg Automated Testing Environment ************************************Introduction Using FATE from your FFmpeg source directory Submitting the results to the FFmpeg result aggregation server Uploading new samples to the fate suite FATE makefile targets and variables Makefile targets Makefile variables Examples Introduction **************FATE is an extended regression suite on the client side and a means for results aggregation and presentation on the server side The first part of this document explains how you can use FATE from your FFmpeg source directory to test your ffmpeg binary The second part describes how you can run FATE to submit the results to FFmpeg’s FATE server In any way you can have a look at the publicly viewable FATE results by visiting this as it can be seen if some test on some platform broke with their recent contribution This usually happens on the platforms the developers could not test on The second part of this document describes how you can run FATE to submit your results to FFmpeg’s FATE server If you want to submit your results be sure to check that your combination of OS and compiler is not already listed on the above mentioned website In the third part you can find a comprehensive listing of FATE makefile targets and variables Using FATE from your FFmpeg source directory **********************************************If you want to run FATE on your machine you need to have the samples in place You can get the samples via the build target fate rsync Use this command from the top level source this will cause FATE to fail NOTE To use a custom wrapper to run the pass ‘ target exec’ to ‘configure’ or set the TARGET_EXEC Make variable Submitting the results to the FFmpeg result aggregation server ****************************************************************To submit your results to the server you should run fate through the shell script ‘tests fate sh’ from the FFmpeg sources This script needs to be invoked with a configuration file as its first argument tests fate sh path to fate_config A configuration file template with comments describing the individual configuration variables can be found at ‘doc fate_config sh template’ Create a configuration that suits your based on the configuration template The ‘slot’ configuration variable can be any string that is not yet but it is suggested that you name it adhering to the following pattern ‘ARCH OS COMPILER COMPILER VERSION’ The configuration file itself will be sourced in a shell therefore all shell features may be used This enables you to setup the environment as you need it for your build For your first test runs the ‘fate_recv’ variable should be empty or commented out This will run everything as normal except that it will omit the submission of the results to the server The following files should be present in $workdir as specified in the configuration it may help to try out the ‘ssh’ command with one or more ‘ v’ options You should get detailed output concerning your SSH configuration and the authentication process The only thing left is to automate the execution of the fate sh script and the synchronisation of the samples directory Uploading new samples to the fate suite *****************************************If you need a sample uploaded send a mail to samples request This is for developers who have an account on the fate suite server If you upload new please make sure they are as small as space on each network bandwidth and so on benefit from smaller test cases Also keep in mind older checkouts use existing sample that means in practice generally do not remove or overwrite files as it likely would break older checkouts or releases Also all needed samples for a commit should be ideally before the push If you need an account for frequently uploading samples or you wish to help others by doing that send a mail to ffmpeg devel rsync vauL Duo ug o o w
Definition: fate.txt:150
L
#define L(x)
Definition: vp56_arith.h:36
ff_all_samplerates
AVFilterFormats * ff_all_samplerates(void)
Definition: formats.c:425
av_calloc
void * av_calloc(size_t nmemb, size_t size)
Non-inlined equivalent of av_mallocz_array().
Definition: mem.c:245
AV_RL32
#define AV_RL32
Definition: intreadwrite.h:146
AV_PIX_FMT_NONE
@ AV_PIX_FMT_NONE
Definition: pixfmt.h:65
ff_insert_outpad
static int ff_insert_outpad(AVFilterContext *f, unsigned index, AVFilterPad *p)
Insert a new output pad for the filter.
Definition: internal.h:274
butterworth_bp_filter
static void butterworth_bp_filter(EqualizatorFilter *f, int N, double w0, double wb, double G, double Gb, double G0)
Definition: af_anequalizer.c:313
AV_OPT_TYPE_INT
@ AV_OPT_TYPE_INT
Definition: opt.h:223
avfilter.h
EqualizatorFilter::type
int type
Definition: af_anequalizer.c:51
AV_SAMPLE_FMT_DBLP
@ AV_SAMPLE_FMT_DBLP
double, planar
Definition: samplefmt.h:70
EqualizatorFilter::freq
double freq
Definition: af_anequalizer.c:53
ffmath.h
AVFilterContext
An instance of a filter.
Definition: avfilter.h:338
av_strdup
char * av_strdup(const char *s)
Duplicate a string.
Definition: mem.c:253
AVMEDIA_TYPE_VIDEO
@ AVMEDIA_TYPE_VIDEO
Definition: avutil.h:201
audio.h
AudioNEqualizerContext::w
int w
Definition: af_anequalizer.c:65
process_sample
static double process_sample(FoSection *s1, double in)
Definition: af_anequalizer.c:699
av_free
#define av_free(p)
Definition: tableprint_vlc.h:34
AV_OPT_TYPE_BOOL
@ AV_OPT_TYPE_BOOL
Definition: opt.h:240
av_freep
#define av_freep(p)
Definition: tableprint_vlc.h:35
AudioNEqualizerContext::filters
EqualizatorFilter * filters
Definition: af_anequalizer.c:71
AudioNEqualizerContext::nb_allocated
int nb_allocated
Definition: af_anequalizer.c:70
flags
#define flags(name, subs,...)
Definition: cbs_av1.c:564
AudioNEqualizerContext::colors
char * colors
Definition: af_anequalizer.c:63
EqualizatorFilter::ignore
int ignore
Definition: af_anequalizer.c:49
AudioNEqualizerContext::draw_curves
int draw_curves
Definition: af_anequalizer.c:64
avstring.h
AV_OPT_TYPE_STRING
@ AV_OPT_TYPE_STRING
Definition: opt.h:227
init
static av_cold int init(AVFilterContext *ctx)
Definition: af_anequalizer.c:188
AV_OPT_TYPE_CONST
@ AV_OPT_TYPE_CONST
Definition: opt.h:232
FoSection::a4
double a4
Definition: af_anequalizer.c:41
ui
#define ui(width, name)
Definition: cbs_mpeg2.c:43
butterworth_compute_bw_gain_db
static double butterworth_compute_bw_gain_db(double gain)
Definition: af_anequalizer.c:496